* [PATCH 1/8] block: add WRITE_BG
2016-08-31 17:05 [PATCHSET v6] Throttled background buffered writeback Jens Axboe
@ 2016-08-31 17:05 ` Jens Axboe
2016-09-01 7:55 ` Johannes Thumshirn
2016-08-31 17:05 ` [PATCH 2/8] writeback: add wbc_to_write_flags() Jens Axboe
` (6 subsequent siblings)
7 siblings, 1 reply; 23+ messages in thread
From: Jens Axboe @ 2016-08-31 17:05 UTC (permalink / raw)
To: axboe, linux-kernel, linux-fsdevel, linux-block; +Cc: Jens Axboe
This adds a new request flag, REQ_BG, that callers can use to tell
the block layer that this is background (non-urgent) IO.
Signed-off-by: Jens Axboe <axboe@fb.com>
---
include/linux/blk_types.h | 4 +++-
include/linux/fs.h | 3 +++
2 files changed, 6 insertions(+), 1 deletion(-)
diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h
index 436f43f87da9..be4409b6ae4f 100644
--- a/include/linux/blk_types.h
+++ b/include/linux/blk_types.h
@@ -155,6 +155,7 @@ enum rq_flag_bits {
__REQ_INTEGRITY, /* I/O includes block integrity payload */
__REQ_FUA, /* forced unit access */
__REQ_PREFLUSH, /* request for cache flush */
+ __REQ_BG, /* background activity */
/* bio only flags */
__REQ_RAHEAD, /* read ahead, can fail anytime */
@@ -198,7 +199,7 @@ enum rq_flag_bits {
(REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | REQ_FAILFAST_DRIVER)
#define REQ_COMMON_MASK \
(REQ_FAILFAST_MASK | REQ_SYNC | REQ_META | REQ_PRIO | REQ_NOIDLE | \
- REQ_PREFLUSH | REQ_FUA | REQ_INTEGRITY | REQ_NOMERGE)
+ REQ_PREFLUSH | REQ_FUA | REQ_INTEGRITY | REQ_NOMERGE | REQ_BG)
#define REQ_CLONE_MASK REQ_COMMON_MASK
/* This mask is used for both bio and request merge checking */
@@ -223,6 +224,7 @@ enum rq_flag_bits {
#define REQ_COPY_USER (1ULL << __REQ_COPY_USER)
#define REQ_PREFLUSH (1ULL << __REQ_PREFLUSH)
#define REQ_FLUSH_SEQ (1ULL << __REQ_FLUSH_SEQ)
+#define REQ_BG (1ULL << __REQ_BG)
#define REQ_IO_STAT (1ULL << __REQ_IO_STAT)
#define REQ_MIXED_MERGE (1ULL << __REQ_MIXED_MERGE)
#define REQ_PM (1ULL << __REQ_PM)
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 3523bf62f328..5a5f567c416a 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -189,6 +189,8 @@ typedef int (dio_iodone_t)(struct kiocb *iocb, loff_t offset,
* WRITE_FLUSH_FUA Combination of WRITE_FLUSH and FUA. The IO is preceded
* by a cache flush and data is guaranteed to be on
* non-volatile media on completion.
+ * WRITE_BG Background write. This is for background activity like
+ * the periodic flush and background threshold writeback
*
*/
#define RW_MASK REQ_OP_WRITE
@@ -202,6 +204,7 @@ typedef int (dio_iodone_t)(struct kiocb *iocb, loff_t offset,
#define WRITE_FLUSH (REQ_SYNC | REQ_NOIDLE | REQ_PREFLUSH)
#define WRITE_FUA (REQ_SYNC | REQ_NOIDLE | REQ_FUA)
#define WRITE_FLUSH_FUA (REQ_SYNC | REQ_NOIDLE | REQ_PREFLUSH | REQ_FUA)
+#define WRITE_BG (REQ_NOIDLE | REQ_BG)
/*
* Attribute flags. These should be or-ed together to figure out what
--
2.7.4
^ permalink raw reply related [flat|nested] 23+ messages in thread
* Re: [PATCH 1/8] block: add WRITE_BG
2016-08-31 17:05 ` [PATCH 1/8] block: add WRITE_BG Jens Axboe
@ 2016-09-01 7:55 ` Johannes Thumshirn
2016-09-01 7:59 ` Johannes Thumshirn
0 siblings, 1 reply; 23+ messages in thread
From: Johannes Thumshirn @ 2016-09-01 7:55 UTC (permalink / raw)
To: Jens Axboe; +Cc: axboe, linux-kernel, linux-fsdevel, linux-block
On Wed, Aug 31, 2016 at 11:05:44AM -0600, Jens Axboe wrote:
> This adds a new request flag, REQ_BG, that callers can use to tell
> the block layer that this is background (non-urgent) IO.
>
> Signed-off-by: Jens Axboe <axboe@fb.com>
> ---
s/WRITE_BG/REQ_BG/ in Subject?
--
Johannes Thumshirn Storage
jthumshirn@suse.de +49 911 74053 689
SUSE LINUX GmbH, Maxfeldstr. 5, 90409 Nürnberg
GF: Felix Imendörffer, Jane Smithard, Graham Norton
HRB 21284 (AG Nürnberg)
Key fingerprint = EC38 9CAB C2C4 F25D 8600 D0D0 0393 969D 2D76 0850
^ permalink raw reply [flat|nested] 23+ messages in thread
* Re: [PATCH 1/8] block: add WRITE_BG
2016-09-01 7:55 ` Johannes Thumshirn
@ 2016-09-01 7:59 ` Johannes Thumshirn
0 siblings, 0 replies; 23+ messages in thread
From: Johannes Thumshirn @ 2016-09-01 7:59 UTC (permalink / raw)
To: Jens Axboe; +Cc: axboe, linux-kernel, linux-fsdevel, linux-block
On Thu, Sep 01, 2016 at 09:55:18AM +0200, Johannes Thumshirn wrote:
> On Wed, Aug 31, 2016 at 11:05:44AM -0600, Jens Axboe wrote:
> > This adds a new request flag, REQ_BG, that callers can use to tell
> > the block layer that this is background (non-urgent) IO.
> >
> > Signed-off-by: Jens Axboe <axboe@fb.com>
> > ---
>
> s/WRITE_BG/REQ_BG/ in Subject?
Gah rm $LAST_MAIL
--
Johannes Thumshirn Storage
jthumshirn@suse.de +49 911 74053 689
SUSE LINUX GmbH, Maxfeldstr. 5, 90409 Nürnberg
GF: Felix Imendörffer, Jane Smithard, Graham Norton
HRB 21284 (AG Nürnberg)
Key fingerprint = EC38 9CAB C2C4 F25D 8600 D0D0 0393 969D 2D76 0850
^ permalink raw reply [flat|nested] 23+ messages in thread
* [PATCH 2/8] writeback: add wbc_to_write_flags()
2016-08-31 17:05 [PATCHSET v6] Throttled background buffered writeback Jens Axboe
2016-08-31 17:05 ` [PATCH 1/8] block: add WRITE_BG Jens Axboe
@ 2016-08-31 17:05 ` Jens Axboe
2016-08-31 23:32 ` Omar Sandoval
2016-08-31 17:05 ` [PATCH 3/8] writeback: use WRITE_BG for kupdate and background writeback Jens Axboe
` (5 subsequent siblings)
7 siblings, 1 reply; 23+ messages in thread
From: Jens Axboe @ 2016-08-31 17:05 UTC (permalink / raw)
To: axboe, linux-kernel, linux-fsdevel, linux-block; +Cc: Jens Axboe
Add wbc_to_write_flags(), which returns the write modifier flags to use,
based on a struct writeback_control. No functional changes in this
patch, but it prepares us for factoring other wbc fields for write type.
Signed-off-by: Jens Axboe <axboe@fb.com>
Reviewed-by: Jan Kara <jack@suse.cz>
---
fs/buffer.c | 2 +-
fs/f2fs/data.c | 2 +-
fs/f2fs/node.c | 2 +-
fs/gfs2/meta_io.c | 3 +--
fs/mpage.c | 2 +-
fs/xfs/xfs_aops.c | 7 +++----
include/linux/writeback.h | 8 ++++++++
7 files changed, 16 insertions(+), 10 deletions(-)
diff --git a/fs/buffer.c b/fs/buffer.c
index 9c8eb9b6db6a..6a5f1a01102e 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -1698,7 +1698,7 @@ int __block_write_full_page(struct inode *inode, struct page *page,
struct buffer_head *bh, *head;
unsigned int blocksize, bbits;
int nr_underway = 0;
- int write_flags = (wbc->sync_mode == WB_SYNC_ALL ? WRITE_SYNC : 0);
+ int write_flags = wbc_to_write_flags(wbc);
head = create_page_buffers(page, inode,
(1 << BH_Dirty)|(1 << BH_Uptodate));
diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
index ccb401eebc11..cb0528b31eb0 100644
--- a/fs/f2fs/data.c
+++ b/fs/f2fs/data.c
@@ -1240,7 +1240,7 @@ static int f2fs_write_data_page(struct page *page,
.sbi = sbi,
.type = DATA,
.op = REQ_OP_WRITE,
- .op_flags = (wbc->sync_mode == WB_SYNC_ALL) ? WRITE_SYNC : 0,
+ .op_flags = wbc_to_write_flags(wbc),
.page = page,
.encrypted_page = NULL,
};
diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c
index f75d197d5beb..c1713da2542f 100644
--- a/fs/f2fs/node.c
+++ b/fs/f2fs/node.c
@@ -1561,7 +1561,7 @@ static int f2fs_write_node_page(struct page *page,
.sbi = sbi,
.type = NODE,
.op = REQ_OP_WRITE,
- .op_flags = (wbc->sync_mode == WB_SYNC_ALL) ? WRITE_SYNC : 0,
+ .op_flags = wbc_to_write_flags(wbc),
.page = page,
.encrypted_page = NULL,
};
diff --git a/fs/gfs2/meta_io.c b/fs/gfs2/meta_io.c
index 950b8be68e41..7991c62e9d6f 100644
--- a/fs/gfs2/meta_io.c
+++ b/fs/gfs2/meta_io.c
@@ -37,8 +37,7 @@ static int gfs2_aspace_writepage(struct page *page, struct writeback_control *wb
{
struct buffer_head *bh, *head;
int nr_underway = 0;
- int write_flags = REQ_META | REQ_PRIO |
- (wbc->sync_mode == WB_SYNC_ALL ? WRITE_SYNC : 0);
+ int write_flags = REQ_META | REQ_PRIO | wbc_to_write_flags(wbc);
BUG_ON(!PageLocked(page));
BUG_ON(!page_has_buffers(page));
diff --git a/fs/mpage.c b/fs/mpage.c
index d2413af0823a..d6f1afe3397a 100644
--- a/fs/mpage.c
+++ b/fs/mpage.c
@@ -489,7 +489,7 @@ static int __mpage_writepage(struct page *page, struct writeback_control *wbc,
struct buffer_head map_bh;
loff_t i_size = i_size_read(inode);
int ret = 0;
- int op_flags = (wbc->sync_mode == WB_SYNC_ALL ? WRITE_SYNC : 0);
+ int op_flags = wbc_to_write_flags(wbc);
if (page_has_buffers(page)) {
struct buffer_head *head = page_buffers(page);
diff --git a/fs/xfs/xfs_aops.c b/fs/xfs/xfs_aops.c
index 7575cfc3ad15..a68645abde56 100644
--- a/fs/xfs/xfs_aops.c
+++ b/fs/xfs/xfs_aops.c
@@ -447,8 +447,8 @@ xfs_submit_ioend(
ioend->io_bio->bi_private = ioend;
ioend->io_bio->bi_end_io = xfs_end_bio;
- bio_set_op_attrs(ioend->io_bio, REQ_OP_WRITE,
- (wbc->sync_mode == WB_SYNC_ALL) ? WRITE_SYNC : 0);
+ bio_set_op_attrs(ioend->io_bio, REQ_OP_WRITE, wbc_to_write_flags(wbc));
+
/*
* If we are failing the IO now, just mark the ioend with an
* error and finish it. This will run IO completion immediately
@@ -519,8 +519,7 @@ xfs_chain_bio(
bio_chain(ioend->io_bio, new);
bio_get(ioend->io_bio); /* for xfs_destroy_ioend */
- bio_set_op_attrs(ioend->io_bio, REQ_OP_WRITE,
- (wbc->sync_mode == WB_SYNC_ALL) ? WRITE_SYNC : 0);
+ bio_set_op_attrs(ioend->io_bio, REQ_OP_WRITE, wbc_to_write_flags(wbc));
submit_bio(ioend->io_bio);
ioend->io_bio = new;
}
diff --git a/include/linux/writeback.h b/include/linux/writeback.h
index fc1e16c25a29..e1fc25172397 100644
--- a/include/linux/writeback.h
+++ b/include/linux/writeback.h
@@ -100,6 +100,14 @@ struct writeback_control {
#endif
};
+static inline int wbc_to_write_flags(struct writeback_control *wbc)
+{
+ if (wbc->sync_mode == WB_SYNC_ALL)
+ return WRITE_SYNC;
+
+ return WRITE;
+}
+
/*
* A wb_domain represents a domain that wb's (bdi_writeback's) belong to
* and are measured against each other in. There always is one global
--
2.7.4
^ permalink raw reply related [flat|nested] 23+ messages in thread
* Re: [PATCH 2/8] writeback: add wbc_to_write_flags()
2016-08-31 17:05 ` [PATCH 2/8] writeback: add wbc_to_write_flags() Jens Axboe
@ 2016-08-31 23:32 ` Omar Sandoval
2016-09-01 3:54 ` Jens Axboe
0 siblings, 1 reply; 23+ messages in thread
From: Omar Sandoval @ 2016-08-31 23:32 UTC (permalink / raw)
To: Jens Axboe; +Cc: axboe, linux-kernel, linux-fsdevel, linux-block
On Wed, Aug 31, 2016 at 11:05:45AM -0600, Jens Axboe wrote:
> Add wbc_to_write_flags(), which returns the write modifier flags to use,
> based on a struct writeback_control. No functional changes in this
> patch, but it prepares us for factoring other wbc fields for write type.
>
> Signed-off-by: Jens Axboe <axboe@fb.com>
> Reviewed-by: Jan Kara <jack@suse.cz>
[snip]
> diff --git a/include/linux/writeback.h b/include/linux/writeback.h
> index fc1e16c25a29..e1fc25172397 100644
> --- a/include/linux/writeback.h
> +++ b/include/linux/writeback.h
> @@ -100,6 +100,14 @@ struct writeback_control {
> #endif
> };
>
> +static inline int wbc_to_write_flags(struct writeback_control *wbc)
> +{
> + if (wbc->sync_mode == WB_SYNC_ALL)
> + return WRITE_SYNC;
> +
> + return WRITE;
I think this should be `return 0;` after the op/flags split. WRITE == 1,
so this would get interpreted as REQ_FAILFAST_DEV in bi_opf.
>From 2a222ca992c3 ("fs: have submit_bh users pass in op and flags
separately"):
@@ -1697,7 +1697,7 @@ static int __block_write_full_page(struct inode *inode, struct page *page,
struct buffer_head *bh, *head;
unsigned int blocksize, bbits;
int nr_underway = 0;
- int write_op = (wbc->sync_mode == WB_SYNC_ALL ? WRITE_SYNC : WRITE);
+ int write_flags = (wbc->sync_mode == WB_SYNC_ALL ? WRITE_SYNC : 0);
--
Omar
^ permalink raw reply [flat|nested] 23+ messages in thread
* Re: [PATCH 2/8] writeback: add wbc_to_write_flags()
2016-08-31 23:32 ` Omar Sandoval
@ 2016-09-01 3:54 ` Jens Axboe
0 siblings, 0 replies; 23+ messages in thread
From: Jens Axboe @ 2016-09-01 3:54 UTC (permalink / raw)
To: Omar Sandoval; +Cc: axboe, linux-kernel, linux-fsdevel, linux-block
On 08/31/2016 05:32 PM, Omar Sandoval wrote:
> On Wed, Aug 31, 2016 at 11:05:45AM -0600, Jens Axboe wrote:
>> Add wbc_to_write_flags(), which returns the write modifier flags to use,
>> based on a struct writeback_control. No functional changes in this
>> patch, but it prepares us for factoring other wbc fields for write type.
>>
>> Signed-off-by: Jens Axboe <axboe@fb.com>
>> Reviewed-by: Jan Kara <jack@suse.cz>
>
> [snip]
>
>> diff --git a/include/linux/writeback.h b/include/linux/writeback.h
>> index fc1e16c25a29..e1fc25172397 100644
>> --- a/include/linux/writeback.h
>> +++ b/include/linux/writeback.h
>> @@ -100,6 +100,14 @@ struct writeback_control {
>> #endif
>> };
>>
>> +static inline int wbc_to_write_flags(struct writeback_control *wbc)
>> +{
>> + if (wbc->sync_mode == WB_SYNC_ALL)
>> + return WRITE_SYNC;
>> +
>> + return WRITE;
>
> I think this should be `return 0;` after the op/flags split. WRITE == 1,
> so this would get interpreted as REQ_FAILFAST_DEV in bi_opf.
Good catch, thanks! Fixed up.
--
Jens Axboe
^ permalink raw reply [flat|nested] 23+ messages in thread
* [PATCH 3/8] writeback: use WRITE_BG for kupdate and background writeback
2016-08-31 17:05 [PATCHSET v6] Throttled background buffered writeback Jens Axboe
2016-08-31 17:05 ` [PATCH 1/8] block: add WRITE_BG Jens Axboe
2016-08-31 17:05 ` [PATCH 2/8] writeback: add wbc_to_write_flags() Jens Axboe
@ 2016-08-31 17:05 ` Jens Axboe
2016-08-31 17:05 ` [PATCH 4/8] writeback: track if we're sleeping on progress in balance_dirty_pages() Jens Axboe
` (4 subsequent siblings)
7 siblings, 0 replies; 23+ messages in thread
From: Jens Axboe @ 2016-08-31 17:05 UTC (permalink / raw)
To: axboe, linux-kernel, linux-fsdevel, linux-block; +Cc: Jens Axboe
If we're doing background type writes, then use the appropriate
write command for that.
Signed-off-by: Jens Axboe <axboe@fb.com>
---
include/linux/writeback.h | 2 ++
1 file changed, 2 insertions(+)
diff --git a/include/linux/writeback.h b/include/linux/writeback.h
index e1fc25172397..7fb80c78f159 100644
--- a/include/linux/writeback.h
+++ b/include/linux/writeback.h
@@ -104,6 +104,8 @@ static inline int wbc_to_write_flags(struct writeback_control *wbc)
{
if (wbc->sync_mode == WB_SYNC_ALL)
return WRITE_SYNC;
+ else if (wbc->for_kupdate || wbc->for_background)
+ return WRITE_BG;
return WRITE;
}
--
2.7.4
^ permalink raw reply related [flat|nested] 23+ messages in thread
* [PATCH 4/8] writeback: track if we're sleeping on progress in balance_dirty_pages()
2016-08-31 17:05 [PATCHSET v6] Throttled background buffered writeback Jens Axboe
` (2 preceding siblings ...)
2016-08-31 17:05 ` [PATCH 3/8] writeback: use WRITE_BG for kupdate and background writeback Jens Axboe
@ 2016-08-31 17:05 ` Jens Axboe
2016-08-31 17:05 ` [PATCH 5/8] block: add code to track actual device queue depth Jens Axboe
` (3 subsequent siblings)
7 siblings, 0 replies; 23+ messages in thread
From: Jens Axboe @ 2016-08-31 17:05 UTC (permalink / raw)
To: axboe, linux-kernel, linux-fsdevel, linux-block; +Cc: Jens Axboe
Note in the bdi_writeback structure if a task is currently being
limited in balance_dirty_pages(), waiting for writeback to
proceed.
Signed-off-by: Jens Axboe <axboe@fb.com>
---
include/linux/backing-dev-defs.h | 2 ++
mm/backing-dev.c | 1 +
mm/page-writeback.c | 2 ++
3 files changed, 5 insertions(+)
diff --git a/include/linux/backing-dev-defs.h b/include/linux/backing-dev-defs.h
index c357f27d5483..5a56588573f6 100644
--- a/include/linux/backing-dev-defs.h
+++ b/include/linux/backing-dev-defs.h
@@ -116,6 +116,8 @@ struct bdi_writeback {
struct list_head work_list;
struct delayed_work dwork; /* work item used for writeback */
+ atomic_t dirty_sleeping; /* waiting on dirty limit exceeded */
+
struct list_head bdi_node; /* anchored at bdi->wb_list */
#ifdef CONFIG_CGROUP_WRITEBACK
diff --git a/mm/backing-dev.c b/mm/backing-dev.c
index 8fde443f36d7..ffd607c9f00d 100644
--- a/mm/backing-dev.c
+++ b/mm/backing-dev.c
@@ -310,6 +310,7 @@ static int wb_init(struct bdi_writeback *wb, struct backing_dev_info *bdi,
spin_lock_init(&wb->work_lock);
INIT_LIST_HEAD(&wb->work_list);
INIT_DELAYED_WORK(&wb->dwork, wb_workfn);
+ atomic_set(&wb->dirty_sleeping, 0);
wb->congested = wb_congested_get_create(bdi, blkcg_id, gfp);
if (!wb->congested)
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index f4cd7d8005c9..c8e9e7a487c6 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -1778,7 +1778,9 @@ pause:
pause,
start_time);
__set_current_state(TASK_KILLABLE);
+ atomic_inc(&wb->dirty_sleeping);
io_schedule_timeout(pause);
+ atomic_dec(&wb->dirty_sleeping);
current->dirty_paused_when = now + pause;
current->nr_dirtied = 0;
--
2.7.4
^ permalink raw reply related [flat|nested] 23+ messages in thread
* [PATCH 5/8] block: add code to track actual device queue depth
2016-08-31 17:05 [PATCHSET v6] Throttled background buffered writeback Jens Axboe
` (3 preceding siblings ...)
2016-08-31 17:05 ` [PATCH 4/8] writeback: track if we're sleeping on progress in balance_dirty_pages() Jens Axboe
@ 2016-08-31 17:05 ` Jens Axboe
2016-08-31 17:05 ` [PATCH 6/8] block: add scalable completion tracking of requests Jens Axboe
` (2 subsequent siblings)
7 siblings, 0 replies; 23+ messages in thread
From: Jens Axboe @ 2016-08-31 17:05 UTC (permalink / raw)
To: axboe, linux-kernel, linux-fsdevel, linux-block; +Cc: Jens Axboe
For blk-mq, ->nr_requests does track queue depth, at least at init
time. But for the older queue paths, it's simply a soft setting.
On top of that, it's generally larger than the hardware setting
on purpose, to allow backup of requests for merging.
Fill a hole in struct request with a 'queue_depth' member, that
drivers can call to more closely inform the block layer of the
real queue depth.
Signed-off-by: Jens Axboe <axboe@fb.com>
---
block/blk-settings.c | 12 ++++++++++++
drivers/scsi/scsi.c | 3 +++
include/linux/blkdev.h | 11 +++++++++++
3 files changed, 26 insertions(+)
diff --git a/block/blk-settings.c b/block/blk-settings.c
index f679ae122843..f7e122e717e8 100644
--- a/block/blk-settings.c
+++ b/block/blk-settings.c
@@ -832,6 +832,18 @@ void blk_queue_flush_queueable(struct request_queue *q, bool queueable)
EXPORT_SYMBOL_GPL(blk_queue_flush_queueable);
/**
+ * blk_set_queue_depth - tell the block layer about the device queue depth
+ * @q: the request queue for the device
+ * @depth: queue depth
+ *
+ */
+void blk_set_queue_depth(struct request_queue *q, unsigned int depth)
+{
+ q->queue_depth = depth;
+}
+EXPORT_SYMBOL(blk_set_queue_depth);
+
+/**
* blk_queue_write_cache - configure queue's write cache
* @q: the request queue for the device
* @wc: write back cache on or off
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
index 1f36aca44394..f3de98abe7a9 100644
--- a/drivers/scsi/scsi.c
+++ b/drivers/scsi/scsi.c
@@ -621,6 +621,9 @@ int scsi_change_queue_depth(struct scsi_device *sdev, int depth)
wmb();
}
+ if (sdev->request_queue)
+ blk_set_queue_depth(sdev->request_queue, depth);
+
return sdev->queue_depth;
}
EXPORT_SYMBOL(scsi_change_queue_depth);
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index e79055c8b577..1d12aa664112 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -327,6 +327,8 @@ struct request_queue {
struct blk_mq_ctx __percpu *queue_ctx;
unsigned int nr_queues;
+ unsigned int queue_depth;
+
/* hw dispatch queues */
struct blk_mq_hw_ctx **queue_hw_ctx;
unsigned int nr_hw_queues;
@@ -683,6 +685,14 @@ static inline bool blk_write_same_mergeable(struct bio *a, struct bio *b)
return false;
}
+static inline unsigned int blk_queue_depth(struct request_queue *q)
+{
+ if (q->queue_depth)
+ return q->queue_depth;
+
+ return q->nr_requests;
+}
+
/*
* q->prep_rq_fn return values
*/
@@ -999,6 +1009,7 @@ extern void blk_limits_io_min(struct queue_limits *limits, unsigned int min);
extern void blk_queue_io_min(struct request_queue *q, unsigned int min);
extern void blk_limits_io_opt(struct queue_limits *limits, unsigned int opt);
extern void blk_queue_io_opt(struct request_queue *q, unsigned int opt);
+extern void blk_set_queue_depth(struct request_queue *q, unsigned int depth);
extern void blk_set_default_limits(struct queue_limits *lim);
extern void blk_set_stacking_limits(struct queue_limits *lim);
extern int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
--
2.7.4
^ permalink raw reply related [flat|nested] 23+ messages in thread
* [PATCH 6/8] block: add scalable completion tracking of requests
2016-08-31 17:05 [PATCHSET v6] Throttled background buffered writeback Jens Axboe
` (4 preceding siblings ...)
2016-08-31 17:05 ` [PATCH 5/8] block: add code to track actual device queue depth Jens Axboe
@ 2016-08-31 17:05 ` Jens Axboe
2016-08-31 17:05 ` [PATCH 7/8] wbt: add general throttling mechanism Jens Axboe
2016-08-31 17:05 ` [PATCH 8/8] writeback: throttle buffered writeback Jens Axboe
7 siblings, 0 replies; 23+ messages in thread
From: Jens Axboe @ 2016-08-31 17:05 UTC (permalink / raw)
To: axboe, linux-kernel, linux-fsdevel, linux-block; +Cc: Jens Axboe
For legacy block, we simply track them in the request queue. For
blk-mq, we track them on a per-sw queue basis, which we can then
sum up through the hardware queues and finally to a per device
state.
The stats are tracked in, roughly, 0.1s interval windows.
Add sysfs files to display the stats.
Signed-off-by: Jens Axboe <axboe@fb.com>
---
block/Makefile | 2 +-
block/blk-core.c | 4 +
block/blk-mq-sysfs.c | 47 +++++++++++
block/blk-mq.c | 14 ++++
block/blk-mq.h | 3 +
block/blk-stat.c | 204 ++++++++++++++++++++++++++++++++++++++++++++++
block/blk-stat.h | 17 ++++
block/blk-sysfs.c | 26 ++++++
include/linux/blk_types.h | 12 +++
include/linux/blkdev.h | 4 +
10 files changed, 332 insertions(+), 1 deletion(-)
create mode 100644 block/blk-stat.c
create mode 100644 block/blk-stat.h
diff --git a/block/Makefile b/block/Makefile
index 9eda2322b2d4..3446e0472df0 100644
--- a/block/Makefile
+++ b/block/Makefile
@@ -5,7 +5,7 @@
obj-$(CONFIG_BLOCK) := bio.o elevator.o blk-core.o blk-tag.o blk-sysfs.o \
blk-flush.o blk-settings.o blk-ioc.o blk-map.o \
blk-exec.o blk-merge.o blk-softirq.o blk-timeout.o \
- blk-lib.o blk-mq.o blk-mq-tag.o \
+ blk-lib.o blk-mq.o blk-mq-tag.o blk-stat.o \
blk-mq-sysfs.o blk-mq-cpu.o blk-mq-cpumap.o ioctl.o \
genhd.o scsi_ioctl.o partition-generic.o ioprio.o \
badblocks.o partitions/
diff --git a/block/blk-core.c b/block/blk-core.c
index 36c7ac328d8c..4075cbeb720e 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -2475,6 +2475,8 @@ void blk_start_request(struct request *req)
{
blk_dequeue_request(req);
+ req->issue_time = ktime_to_ns(ktime_get());
+
/*
* We are now handing the request to the hardware, initialize
* resid_len to full count and add the timeout handler.
@@ -2542,6 +2544,8 @@ bool blk_update_request(struct request *req, int error, unsigned int nr_bytes)
trace_block_rq_complete(req->q, req, nr_bytes);
+ blk_stat_add(&req->q->rq_stats[rq_data_dir(req)], req);
+
if (!req->bio)
return false;
diff --git a/block/blk-mq-sysfs.c b/block/blk-mq-sysfs.c
index fe822aa5b8e4..b66bbf13cc12 100644
--- a/block/blk-mq-sysfs.c
+++ b/block/blk-mq-sysfs.c
@@ -247,6 +247,47 @@ static ssize_t blk_mq_hw_sysfs_cpus_show(struct blk_mq_hw_ctx *hctx, char *page)
return ret;
}
+static void blk_mq_stat_clear(struct blk_mq_hw_ctx *hctx)
+{
+ struct blk_mq_ctx *ctx;
+ unsigned int i;
+
+ hctx_for_each_ctx(hctx, ctx, i) {
+ blk_stat_init(&ctx->stat[0]);
+ blk_stat_init(&ctx->stat[1]);
+ }
+}
+
+static ssize_t blk_mq_hw_sysfs_stat_store(struct blk_mq_hw_ctx *hctx,
+ const char *page, size_t count)
+{
+ blk_mq_stat_clear(hctx);
+ return count;
+}
+
+static ssize_t print_stat(char *page, struct blk_rq_stat *stat, const char *pre)
+{
+ return sprintf(page, "%s samples=%llu, mean=%lld, min=%lld, max=%lld\n",
+ pre, (long long) stat->nr_samples,
+ (long long) stat->mean, (long long) stat->min,
+ (long long) stat->max);
+}
+
+static ssize_t blk_mq_hw_sysfs_stat_show(struct blk_mq_hw_ctx *hctx, char *page)
+{
+ struct blk_rq_stat stat[2];
+ ssize_t ret;
+
+ blk_stat_init(&stat[0]);
+ blk_stat_init(&stat[1]);
+
+ blk_hctx_stat_get(hctx, stat);
+
+ ret = print_stat(page, &stat[0], "read :");
+ ret += print_stat(page + ret, &stat[1], "write:");
+ return ret;
+}
+
static struct blk_mq_ctx_sysfs_entry blk_mq_sysfs_dispatched = {
.attr = {.name = "dispatched", .mode = S_IRUGO },
.show = blk_mq_sysfs_dispatched_show,
@@ -304,6 +345,11 @@ static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_poll = {
.attr = {.name = "io_poll", .mode = S_IRUGO },
.show = blk_mq_hw_sysfs_poll_show,
};
+static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_stat = {
+ .attr = {.name = "stats", .mode = S_IRUGO | S_IWUSR },
+ .show = blk_mq_hw_sysfs_stat_show,
+ .store = blk_mq_hw_sysfs_stat_store,
+};
static struct attribute *default_hw_ctx_attrs[] = {
&blk_mq_hw_sysfs_queued.attr,
@@ -314,6 +360,7 @@ static struct attribute *default_hw_ctx_attrs[] = {
&blk_mq_hw_sysfs_cpus.attr,
&blk_mq_hw_sysfs_active.attr,
&blk_mq_hw_sysfs_poll.attr,
+ &blk_mq_hw_sysfs_stat.attr,
NULL,
};
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 13f5a6c1de76..712f141a6f1a 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -29,6 +29,7 @@
#include "blk.h"
#include "blk-mq.h"
#include "blk-mq-tag.h"
+#include "blk-stat.h"
static DEFINE_MUTEX(all_q_mutex);
static LIST_HEAD(all_q_list);
@@ -400,10 +401,19 @@ static void blk_mq_ipi_complete_request(struct request *rq)
put_cpu();
}
+static void blk_mq_stat_add(struct request *rq)
+{
+ struct blk_rq_stat *stat = &rq->mq_ctx->stat[rq_data_dir(rq)];
+
+ blk_stat_add(stat, rq);
+}
+
static void __blk_mq_complete_request(struct request *rq)
{
struct request_queue *q = rq->q;
+ blk_mq_stat_add(rq);
+
if (!q->softirq_done_fn)
blk_mq_end_request(rq, rq->errors);
else
@@ -447,6 +457,8 @@ void blk_mq_start_request(struct request *rq)
if (unlikely(blk_bidi_rq(rq)))
rq->next_rq->resid_len = blk_rq_bytes(rq->next_rq);
+ rq->issue_time = ktime_to_ns(ktime_get());
+
blk_add_timer(rq);
/*
@@ -1795,6 +1807,8 @@ static void blk_mq_init_cpu_queues(struct request_queue *q,
spin_lock_init(&__ctx->lock);
INIT_LIST_HEAD(&__ctx->rq_list);
__ctx->queue = q;
+ blk_stat_init(&__ctx->stat[0]);
+ blk_stat_init(&__ctx->stat[1]);
/* If the cpu isn't online, the cpu is mapped to first hctx */
if (!cpu_online(i))
diff --git a/block/blk-mq.h b/block/blk-mq.h
index 9087b11037b7..e107f700ff17 100644
--- a/block/blk-mq.h
+++ b/block/blk-mq.h
@@ -1,6 +1,8 @@
#ifndef INT_BLK_MQ_H
#define INT_BLK_MQ_H
+#include "blk-stat.h"
+
struct blk_mq_tag_set;
struct blk_mq_ctx {
@@ -20,6 +22,7 @@ struct blk_mq_ctx {
/* incremented at completion time */
unsigned long ____cacheline_aligned_in_smp rq_completed[2];
+ struct blk_rq_stat stat[2];
struct request_queue *queue;
struct kobject kobj;
diff --git a/block/blk-stat.c b/block/blk-stat.c
new file mode 100644
index 000000000000..76cf2e2092c1
--- /dev/null
+++ b/block/blk-stat.c
@@ -0,0 +1,204 @@
+/*
+ * Block stat tracking code
+ *
+ * Copyright (C) 2016 Jens Axboe
+ */
+#include <linux/kernel.h>
+#include <linux/blk-mq.h>
+
+#include "blk-stat.h"
+#include "blk-mq.h"
+
+static void blk_stat_flush_batch(struct blk_rq_stat *stat)
+{
+ if (!stat->nr_batch)
+ return;
+ if (!stat->nr_samples)
+ stat->mean = div64_s64(stat->batch, stat->nr_batch);
+ else {
+ stat->mean = div64_s64((stat->mean * stat->nr_samples) +
+ stat->batch,
+ stat->nr_samples + stat->nr_batch);
+ }
+
+ stat->nr_samples += stat->nr_batch;
+ stat->nr_batch = stat->batch = 0;
+}
+
+void blk_stat_sum(struct blk_rq_stat *dst, struct blk_rq_stat *src)
+{
+ if (!src->nr_samples)
+ return;
+
+ blk_stat_flush_batch(src);
+
+ dst->min = min(dst->min, src->min);
+ dst->max = max(dst->max, src->max);
+
+ if (!dst->nr_samples)
+ dst->mean = src->mean;
+ else {
+ dst->mean = div64_s64((src->mean * src->nr_samples) +
+ (dst->mean * dst->nr_samples),
+ dst->nr_samples + src->nr_samples);
+ }
+ dst->nr_samples += src->nr_samples;
+}
+
+static void blk_mq_stat_get(struct request_queue *q, struct blk_rq_stat *dst)
+{
+ struct blk_mq_hw_ctx *hctx;
+ struct blk_mq_ctx *ctx;
+ int i, j, nr;
+
+ blk_stat_init(&dst[0]);
+ blk_stat_init(&dst[1]);
+
+ nr = 0;
+ do {
+ uint64_t newest = 0;
+
+ queue_for_each_hw_ctx(q, hctx, i) {
+ hctx_for_each_ctx(hctx, ctx, j) {
+ if (!ctx->stat[0].nr_samples &&
+ !ctx->stat[1].nr_samples)
+ continue;
+ if (ctx->stat[0].time > newest)
+ newest = ctx->stat[0].time;
+ if (ctx->stat[1].time > newest)
+ newest = ctx->stat[1].time;
+ }
+ }
+
+ /*
+ * No samples
+ */
+ if (!newest)
+ break;
+
+ queue_for_each_hw_ctx(q, hctx, i) {
+ hctx_for_each_ctx(hctx, ctx, j) {
+ if (ctx->stat[0].time == newest) {
+ blk_stat_sum(&dst[0], &ctx->stat[0]);
+ nr++;
+ }
+ if (ctx->stat[1].time == newest) {
+ blk_stat_sum(&dst[1], &ctx->stat[1]);
+ nr++;
+ }
+ }
+ }
+ /*
+ * If we race on finding an entry, just loop back again.
+ * Should be very rare.
+ */
+ } while (!nr);
+}
+
+void blk_queue_stat_get(struct request_queue *q, struct blk_rq_stat *dst)
+{
+ if (q->mq_ops)
+ blk_mq_stat_get(q, dst);
+ else {
+ memcpy(&dst[0], &q->rq_stats[0], sizeof(struct blk_rq_stat));
+ memcpy(&dst[1], &q->rq_stats[1], sizeof(struct blk_rq_stat));
+ }
+}
+
+void blk_hctx_stat_get(struct blk_mq_hw_ctx *hctx, struct blk_rq_stat *dst)
+{
+ struct blk_mq_ctx *ctx;
+ unsigned int i, nr;
+
+ nr = 0;
+ do {
+ uint64_t newest = 0;
+
+ hctx_for_each_ctx(hctx, ctx, i) {
+ if (!ctx->stat[0].nr_samples &&
+ !ctx->stat[1].nr_samples)
+ continue;
+
+ if (ctx->stat[0].time > newest)
+ newest = ctx->stat[0].time;
+ if (ctx->stat[1].time > newest)
+ newest = ctx->stat[1].time;
+ }
+
+ if (!newest)
+ break;
+
+ hctx_for_each_ctx(hctx, ctx, i) {
+ if (ctx->stat[0].time == newest) {
+ blk_stat_sum(&dst[0], &ctx->stat[0]);
+ nr++;
+ }
+ if (ctx->stat[1].time == newest) {
+ blk_stat_sum(&dst[1], &ctx->stat[1]);
+ nr++;
+ }
+ }
+ /*
+ * If we race on finding an entry, just loop back again.
+ * Should be very rare, as the window is only updated
+ * occasionally
+ */
+ } while (!nr);
+}
+
+static void __blk_stat_init(struct blk_rq_stat *stat, s64 time_now)
+{
+ stat->min = -1ULL;
+ stat->max = stat->nr_samples = stat->mean = 0;
+ stat->batch = stat->nr_batch = 0;
+ stat->time = time_now & BLK_STAT_MASK;
+}
+
+void blk_stat_init(struct blk_rq_stat *stat)
+{
+ __blk_stat_init(stat, ktime_to_ns(ktime_get()));
+}
+
+void blk_stat_add(struct blk_rq_stat *stat, struct request *rq)
+{
+ s64 now, value;
+
+ now = ktime_to_ns(ktime_get());
+ if (now < rq->issue_time)
+ return;
+
+ if ((now & BLK_STAT_MASK) != (stat->time & BLK_STAT_MASK))
+ __blk_stat_init(stat, now);
+
+ value = now - rq->issue_time;
+ if (value > stat->max)
+ stat->max = value;
+ if (value < stat->min)
+ stat->min = value;
+
+ if (stat->batch + value < stat->batch ||
+ stat->nr_batch + 1 == BLK_RQ_STAT_BATCH)
+ blk_stat_flush_batch(stat);
+
+ stat->batch += value;
+ stat->nr_batch++;
+}
+
+void blk_stat_clear(struct request_queue *q)
+{
+ if (q->mq_ops) {
+ struct blk_mq_hw_ctx *hctx;
+ struct blk_mq_ctx *ctx;
+ int i, j;
+
+ queue_for_each_hw_ctx(q, hctx, i) {
+ hctx_for_each_ctx(hctx, ctx, j) {
+ blk_stat_init(&ctx->stat[0]);
+ blk_stat_init(&ctx->stat[1]);
+ }
+ }
+ } else {
+ blk_stat_init(&q->rq_stats[0]);
+ blk_stat_init(&q->rq_stats[1]);
+ }
+}
diff --git a/block/blk-stat.h b/block/blk-stat.h
new file mode 100644
index 000000000000..d77548dbf196
--- /dev/null
+++ b/block/blk-stat.h
@@ -0,0 +1,17 @@
+#ifndef BLK_STAT_H
+#define BLK_STAT_H
+
+/*
+ * ~0.13s window as a power-of-2 (2^27 nsecs)
+ */
+#define BLK_STAT_NSEC 134217728ULL
+#define BLK_STAT_MASK ~(BLK_STAT_NSEC - 1)
+
+void blk_stat_add(struct blk_rq_stat *, struct request *);
+void blk_hctx_stat_get(struct blk_mq_hw_ctx *, struct blk_rq_stat *);
+void blk_queue_stat_get(struct request_queue *, struct blk_rq_stat *);
+void blk_stat_clear(struct request_queue *q);
+void blk_stat_init(struct blk_rq_stat *);
+void blk_stat_sum(struct blk_rq_stat *, struct blk_rq_stat *);
+
+#endif
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
index f87a7e747d36..0b9e435fec97 100644
--- a/block/blk-sysfs.c
+++ b/block/blk-sysfs.c
@@ -384,6 +384,26 @@ static ssize_t queue_dax_show(struct request_queue *q, char *page)
return queue_var_show(blk_queue_dax(q), page);
}
+static ssize_t print_stat(char *page, struct blk_rq_stat *stat, const char *pre)
+{
+ return sprintf(page, "%s samples=%llu, mean=%lld, min=%lld, max=%lld\n",
+ pre, (long long) stat->nr_samples,
+ (long long) stat->mean, (long long) stat->min,
+ (long long) stat->max);
+}
+
+static ssize_t queue_stats_show(struct request_queue *q, char *page)
+{
+ struct blk_rq_stat stat[2];
+ ssize_t ret;
+
+ blk_queue_stat_get(q, stat);
+
+ ret = print_stat(page, &stat[0], "read :");
+ ret += print_stat(page + ret, &stat[1], "write:");
+ return ret;
+}
+
static struct queue_sysfs_entry queue_requests_entry = {
.attr = {.name = "nr_requests", .mode = S_IRUGO | S_IWUSR },
.show = queue_requests_show,
@@ -526,6 +546,11 @@ static struct queue_sysfs_entry queue_dax_entry = {
.show = queue_dax_show,
};
+static struct queue_sysfs_entry queue_stats_entry = {
+ .attr = {.name = "stats", .mode = S_IRUGO },
+ .show = queue_stats_show,
+};
+
static struct attribute *default_attrs[] = {
&queue_requests_entry.attr,
&queue_ra_entry.attr,
@@ -553,6 +578,7 @@ static struct attribute *default_attrs[] = {
&queue_poll_entry.attr,
&queue_wc_entry.attr,
&queue_dax_entry.attr,
+ &queue_stats_entry.attr,
NULL,
};
diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h
index be4409b6ae4f..95fbfa1fe010 100644
--- a/include/linux/blk_types.h
+++ b/include/linux/blk_types.h
@@ -266,4 +266,16 @@ static inline unsigned int blk_qc_t_to_tag(blk_qc_t cookie)
return cookie & ((1u << BLK_QC_T_SHIFT) - 1);
}
+#define BLK_RQ_STAT_BATCH 64
+
+struct blk_rq_stat {
+ s64 mean;
+ u64 min;
+ u64 max;
+ s32 nr_samples;
+ s32 nr_batch;
+ u64 batch;
+ s64 time;
+};
+
#endif /* __LINUX_BLK_TYPES_H */
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 1d12aa664112..259eba88f991 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -151,6 +151,7 @@ struct request {
struct gendisk *rq_disk;
struct hd_struct *part;
unsigned long start_time;
+ s64 issue_time;
#ifdef CONFIG_BLK_CGROUP
struct request_list *rl; /* rl this rq is alloced from */
unsigned long long start_time_ns;
@@ -414,6 +415,9 @@ struct request_queue {
unsigned int nr_sorted;
unsigned int in_flight[2];
+
+ struct blk_rq_stat rq_stats[2];
+
/*
* Number of active block driver functions for which blk_drain_queue()
* must wait. Must be incremented around functions that unlock the
--
2.7.4
^ permalink raw reply related [flat|nested] 23+ messages in thread
* [PATCH 7/8] wbt: add general throttling mechanism
2016-08-31 17:05 [PATCHSET v6] Throttled background buffered writeback Jens Axboe
` (5 preceding siblings ...)
2016-08-31 17:05 ` [PATCH 6/8] block: add scalable completion tracking of requests Jens Axboe
@ 2016-08-31 17:05 ` Jens Axboe
2016-09-01 18:05 ` Omar Sandoval
2016-08-31 17:05 ` [PATCH 8/8] writeback: throttle buffered writeback Jens Axboe
7 siblings, 1 reply; 23+ messages in thread
From: Jens Axboe @ 2016-08-31 17:05 UTC (permalink / raw)
To: axboe, linux-kernel, linux-fsdevel, linux-block; +Cc: Jens Axboe
We can hook this up to the block layer, to help throttle buffered
writes. Or NFS can tap into it, to accomplish the same.
wbt registers a few trace points that can be used to track what is
happening in the system:
wbt_lat: 259:0: latency 2446318
wbt_stat: 259:0: rmean=2446318, rmin=2446318, rmax=2446318, rsamples=1,
wmean=518866, wmin=15522, wmax=5330353, wsamples=57
wbt_step: 259:0: step down: step=1, window=72727272, background=8, normal=16, max=32
This shows a sync issue event (wbt_lat) that exceeded it's time. wbt_stat
dumps the current read/write stats for that window, and wbt_step shows a
step down event where we now scale back writes. Each trace includes the
device, 259:0 in this case.
Signed-off-by: Jens Axboe <axboe@fb.com>
---
include/linux/wbt.h | 118 +++++++++
include/trace/events/wbt.h | 122 ++++++++++
lib/Kconfig | 4 +
lib/Makefile | 1 +
lib/wbt.c | 587 +++++++++++++++++++++++++++++++++++++++++++++
5 files changed, 832 insertions(+)
create mode 100644 include/linux/wbt.h
create mode 100644 include/trace/events/wbt.h
create mode 100644 lib/wbt.c
diff --git a/include/linux/wbt.h b/include/linux/wbt.h
new file mode 100644
index 000000000000..14473d550a18
--- /dev/null
+++ b/include/linux/wbt.h
@@ -0,0 +1,118 @@
+#ifndef WB_THROTTLE_H
+#define WB_THROTTLE_H
+
+#include <linux/atomic.h>
+#include <linux/wait.h>
+#include <linux/timer.h>
+#include <linux/ktime.h>
+
+enum {
+ ISSUE_STAT_TRACKED = 1ULL << 63,
+ ISSUE_STAT_READ = 1ULL << 62,
+ ISSUE_STAT_MASK = ISSUE_STAT_TRACKED | ISSUE_STAT_READ,
+ ISSUE_STAT_TIME_MASK = ~ISSUE_STAT_MASK,
+
+ WBT_TRACKED = 1,
+ WBT_READ = 2,
+};
+
+struct wb_issue_stat {
+ u64 time;
+};
+
+static inline void wbt_issue_stat_set_time(struct wb_issue_stat *stat)
+{
+ stat->time = (stat->time & ISSUE_STAT_MASK) |
+ (ktime_to_ns(ktime_get()) & ISSUE_STAT_TIME_MASK);
+}
+
+static inline u64 wbt_issue_stat_get_time(struct wb_issue_stat *stat)
+{
+ return stat->time & ISSUE_STAT_TIME_MASK;
+}
+
+static inline void wbt_mark_tracked(struct wb_issue_stat *stat)
+{
+ stat->time |= ISSUE_STAT_TRACKED;
+}
+
+static inline void wbt_clear_state(struct wb_issue_stat *stat)
+{
+ stat->time &= ~(ISSUE_STAT_TRACKED | ISSUE_STAT_READ);
+}
+
+static inline bool wbt_tracked(struct wb_issue_stat *stat)
+{
+ return (stat->time & ISSUE_STAT_TRACKED) != 0;
+}
+
+static inline void wbt_mark_read(struct wb_issue_stat *stat)
+{
+ stat->time |= ISSUE_STAT_READ;
+}
+
+static inline bool wbt_is_read(struct wb_issue_stat *stat)
+{
+ return (stat->time & ISSUE_STAT_READ) != 0;
+}
+
+struct wb_stat_ops {
+ void (*get)(void *, struct blk_rq_stat *);
+ void (*clear)(void *);
+};
+
+struct rq_wb {
+ /*
+ * Settings that govern how we throttle
+ */
+ unsigned int wb_background; /* background writeback */
+ unsigned int wb_normal; /* normal writeback */
+ unsigned int wb_max; /* max throughput writeback */
+ unsigned int scale_step;
+
+ u64 win_nsec; /* default window size */
+ u64 cur_win_nsec; /* current window size */
+
+ /*
+ * Number of consecutive periods where we don't have enough
+ * information to make a firm scale up/down decision.
+ */
+ unsigned int unknown_cnt;
+
+ struct timer_list window_timer;
+
+ s64 sync_issue;
+ void *sync_cookie;
+
+ unsigned int wc;
+ unsigned int queue_depth;
+
+ unsigned long last_issue; /* last non-throttled issue */
+ unsigned long last_comp; /* last non-throttled comp */
+ unsigned long min_lat_nsec;
+ struct backing_dev_info *bdi;
+ struct request_queue *q;
+ wait_queue_head_t wait;
+ atomic_t inflight;
+
+ struct wb_stat_ops *stat_ops;
+ void *ops_data;
+};
+
+struct backing_dev_info;
+
+void __wbt_done(struct rq_wb *);
+void wbt_done(struct rq_wb *, struct wb_issue_stat *);
+unsigned int wbt_wait(struct rq_wb *, unsigned int, spinlock_t *);
+struct rq_wb *wbt_init(struct backing_dev_info *, struct wb_stat_ops *, void *);
+void wbt_exit(struct rq_wb *);
+void wbt_update_limits(struct rq_wb *);
+void wbt_requeue(struct rq_wb *, struct wb_issue_stat *);
+void wbt_issue(struct rq_wb *, struct wb_issue_stat *);
+void wbt_disable(struct rq_wb *);
+void wbt_track(struct wb_issue_stat *, unsigned int);
+
+void wbt_set_queue_depth(struct rq_wb *, unsigned int);
+void wbt_set_write_cache(struct rq_wb *, bool);
+
+#endif
diff --git a/include/trace/events/wbt.h b/include/trace/events/wbt.h
new file mode 100644
index 000000000000..a4b8b2e57bb1
--- /dev/null
+++ b/include/trace/events/wbt.h
@@ -0,0 +1,122 @@
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM wbt
+
+#if !defined(_TRACE_WBT_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_WBT_H
+
+#include <linux/tracepoint.h>
+#include <linux/wbt.h>
+
+/**
+ * wbt_stat - trace stats for blk_wb
+ * @stat: array of read/write stats
+ */
+TRACE_EVENT(wbt_stat,
+
+ TP_PROTO(struct backing_dev_info *bdi, struct blk_rq_stat *stat),
+
+ TP_ARGS(bdi, stat),
+
+ TP_STRUCT__entry(
+ __array(char, name, 32)
+ __field(s64, rmean)
+ __field(u64, rmin)
+ __field(u64, rmax)
+ __field(s64, rnr_samples)
+ __field(s64, rtime)
+ __field(s64, wmean)
+ __field(u64, wmin)
+ __field(u64, wmax)
+ __field(s64, wnr_samples)
+ __field(s64, wtime)
+ ),
+
+ TP_fast_assign(
+ strncpy(__entry->name, dev_name(bdi->dev), 32);
+ __entry->rmean = stat[0].mean;
+ __entry->rmin = stat[0].min;
+ __entry->rmax = stat[0].max;
+ __entry->rnr_samples = stat[0].nr_samples;
+ __entry->wmean = stat[1].mean;
+ __entry->wmin = stat[1].min;
+ __entry->wmax = stat[1].max;
+ __entry->wnr_samples = stat[1].nr_samples;
+ ),
+
+ TP_printk("%s: rmean=%llu, rmin=%llu, rmax=%llu, rsamples=%llu, "
+ "wmean=%llu, wmin=%llu, wmax=%llu, wsamples=%llu\n",
+ __entry->name, __entry->rmean, __entry->rmin, __entry->rmax,
+ __entry->rnr_samples, __entry->wmean, __entry->wmin,
+ __entry->wmax, __entry->wnr_samples)
+);
+
+/**
+ * wbt_lat - trace latency event
+ * @lat: latency trigger
+ */
+TRACE_EVENT(wbt_lat,
+
+ TP_PROTO(struct backing_dev_info *bdi, unsigned long lat),
+
+ TP_ARGS(bdi, lat),
+
+ TP_STRUCT__entry(
+ __array(char, name, 32)
+ __field(unsigned long, lat)
+ ),
+
+ TP_fast_assign(
+ strncpy(__entry->name, dev_name(bdi->dev), 32);
+ __entry->lat = lat;
+ ),
+
+ TP_printk("%s: latency %llu\n", __entry->name,
+ (unsigned long long) __entry->lat)
+);
+
+/**
+ * wbt_step - trace wb event step
+ * @msg: context message
+ * @step: the current scale step count
+ * @window: the current monitoring window
+ * @bg: the current background queue limit
+ * @normal: the current normal writeback limit
+ * @max: the current max throughput writeback limit
+ */
+TRACE_EVENT(wbt_step,
+
+ TP_PROTO(struct backing_dev_info *bdi, const char *msg,
+ unsigned int step, unsigned long window, unsigned int bg,
+ unsigned int normal, unsigned int max),
+
+ TP_ARGS(bdi, msg, step, window, bg, normal, max),
+
+ TP_STRUCT__entry(
+ __array(char, name, 32)
+ __field(const char *, msg)
+ __field(unsigned int, step)
+ __field(unsigned long, window)
+ __field(unsigned int, bg)
+ __field(unsigned int, normal)
+ __field(unsigned int, max)
+ ),
+
+ TP_fast_assign(
+ strncpy(__entry->name, dev_name(bdi->dev), 32);
+ __entry->msg = msg;
+ __entry->step = step;
+ __entry->window = window;
+ __entry->bg = bg;
+ __entry->normal = normal;
+ __entry->max = max;
+ ),
+
+ TP_printk("%s: %s: step=%u, window=%lu, background=%u, normal=%u, max=%u\n",
+ __entry->name, __entry->msg, __entry->step, __entry->window,
+ __entry->bg, __entry->normal, __entry->max)
+);
+
+#endif /* _TRACE_WBT_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/lib/Kconfig b/lib/Kconfig
index d79909dc01ec..5a65a1f91889 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -550,4 +550,8 @@ config STACKDEPOT
bool
select STACKTRACE
+config WBT
+ bool
+ select SCALE_BITMAP
+
endmenu
diff --git a/lib/Makefile b/lib/Makefile
index cfa68eb269e4..c42f0eccd700 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -178,6 +178,7 @@ obj-$(CONFIG_SG_SPLIT) += sg_split.o
obj-$(CONFIG_SG_POOL) += sg_pool.o
obj-$(CONFIG_STMP_DEVICE) += stmp_device.o
obj-$(CONFIG_IRQ_POLL) += irq_poll.o
+obj-$(CONFIG_WBT) += wbt.o
obj-$(CONFIG_STACKDEPOT) += stackdepot.o
KASAN_SANITIZE_stackdepot.o := n
diff --git a/lib/wbt.c b/lib/wbt.c
new file mode 100644
index 000000000000..7da087700eb1
--- /dev/null
+++ b/lib/wbt.c
@@ -0,0 +1,587 @@
+/*
+ * buffered writeback throttling. losely based on CoDel. We can't drop
+ * packets for IO scheduling, so the logic is something like this:
+ *
+ * - Monitor latencies in a defined window of time.
+ * - If the minimum latency in the above window exceeds some target, increment
+ * scaling step and scale down queue depth by a factor of 2x. The monitoring
+ * window is then shrunk to 100 / sqrt(scaling step + 1).
+ * - For any window where we don't have solid data on what the latencies
+ * look like, retain status quo.
+ * - If latencies look good, decrement scaling step.
+ *
+ * Copyright (C) 2016 Jens Axboe
+ *
+ * Things that (may) need changing:
+ *
+ * - Different scaling of background/normal/high priority writeback.
+ * We may have to violate guarantees for max.
+ * - We can have mismatches between the stat window and our window.
+ *
+ */
+#include <linux/kernel.h>
+#include <linux/blk_types.h>
+#include <linux/slab.h>
+#include <linux/backing-dev.h>
+#include <linux/wbt.h>
+
+#define CREATE_TRACE_POINTS
+#include <trace/events/wbt.h>
+
+enum {
+ /*
+ * Might need to be higher
+ */
+ RWB_MAX_DEPTH = 64,
+
+ /*
+ * 100msec window
+ */
+ RWB_WINDOW_NSEC = 100 * 1000 * 1000ULL,
+
+ /*
+ * Disregard stats, if we don't meet these minimums
+ */
+ RWB_MIN_WRITE_SAMPLES = 3,
+ RWB_MIN_READ_SAMPLES = 1,
+
+ /*
+ * If we have this number of consecutive windows with not enough
+ * information to scale up or down, scale up.
+ */
+ RWB_UNKNOWN_BUMP = 5,
+};
+
+static inline bool rwb_enabled(struct rq_wb *rwb)
+{
+ return rwb && rwb->wb_normal != 0;
+}
+
+/*
+ * Increment 'v', if 'v' is below 'below'. Returns true if we succeeded,
+ * false if 'v' + 1 would be bigger than 'below'.
+ */
+static bool atomic_inc_below(atomic_t *v, int below)
+{
+ int cur = atomic_read(v);
+
+ for (;;) {
+ int old;
+
+ if (cur >= below)
+ return false;
+ old = atomic_cmpxchg(v, cur, cur + 1);
+ if (old == cur)
+ break;
+ cur = old;
+ }
+
+ return true;
+}
+
+static void wb_timestamp(struct rq_wb *rwb, unsigned long *var)
+{
+ if (rwb_enabled(rwb)) {
+ const unsigned long cur = jiffies;
+
+ if (cur != *var)
+ *var = cur;
+ }
+}
+
+void __wbt_done(struct rq_wb *rwb)
+{
+ int inflight, limit;
+
+ inflight = atomic_dec_return(&rwb->inflight);
+
+ /*
+ * wbt got disabled with IO in flight. Wake up any potential
+ * waiters, we don't have to do more than that.
+ */
+ if (unlikely(!rwb_enabled(rwb))) {
+ wake_up_all(&rwb->wait);
+ return;
+ }
+
+ /*
+ * If the device does write back caching, drop further down
+ * before we wake people up.
+ */
+ if (rwb->wc && !atomic_read(&rwb->bdi->wb.dirty_sleeping))
+ limit = 0;
+ else
+ limit = rwb->wb_normal;
+
+ /*
+ * Don't wake anyone up if we are above the normal limit.
+ */
+ if (inflight && inflight >= limit)
+ return;
+
+ if (waitqueue_active(&rwb->wait)) {
+ int diff = limit - inflight;
+
+ if (!inflight || diff >= rwb->wb_background / 2)
+ wake_up_nr(&rwb->wait, 1);
+ }
+}
+
+/*
+ * Called on completion of a request. Note that it's also called when
+ * a request is merged, when the request gets freed.
+ */
+void wbt_done(struct rq_wb *rwb, struct wb_issue_stat *stat)
+{
+ if (!rwb)
+ return;
+
+ if (!wbt_tracked(stat)) {
+ if (rwb->sync_cookie == stat) {
+ rwb->sync_issue = 0;
+ rwb->sync_cookie = NULL;
+ }
+
+ if (wbt_is_read(stat))
+ wb_timestamp(rwb, &rwb->last_comp);
+ wbt_clear_state(stat);
+ } else {
+ WARN_ON_ONCE(stat == rwb->sync_cookie);
+ __wbt_done(rwb);
+ wbt_clear_state(stat);
+ }
+}
+
+static void calc_wb_limits(struct rq_wb *rwb)
+{
+ unsigned int depth;
+
+ if (!rwb->min_lat_nsec) {
+ rwb->wb_max = rwb->wb_normal = rwb->wb_background = 0;
+ return;
+ }
+
+ /*
+ * For QD=1 devices, this is a special case. It's important for those
+ * to have one request ready when one completes, so force a depth of
+ * 2 for those devices. On the backend, it'll be a depth of 1 anyway,
+ * since the device can't have more than that in flight. If we're
+ * scaling down, then keep a setting of 1/1/1.
+ */
+ if (rwb->queue_depth == 1) {
+ if (rwb->scale_step)
+ rwb->wb_max = rwb->wb_normal = 1;
+ else
+ rwb->wb_max = rwb->wb_normal = 2;
+ rwb->wb_background = 1;
+ } else {
+ depth = min_t(unsigned int, RWB_MAX_DEPTH, rwb->queue_depth);
+
+ /*
+ * Set our max/normal/bg queue depths based on how far
+ * we have scaled down (->scale_step).
+ */
+ rwb->wb_max = 1 + ((depth - 1) >> min(31U, rwb->scale_step));
+ rwb->wb_normal = (rwb->wb_max + 1) / 2;
+ rwb->wb_background = (rwb->wb_max + 3) / 4;
+ }
+}
+
+static bool inline stat_sample_valid(struct blk_rq_stat *stat)
+{
+ /*
+ * We need at least one read sample, and a minimum of
+ * RWB_MIN_WRITE_SAMPLES. We require some write samples to know
+ * that it's writes impacting us, and not just some sole read on
+ * a device that is in a lower power state.
+ */
+ return stat[0].nr_samples >= 1 &&
+ stat[1].nr_samples >= RWB_MIN_WRITE_SAMPLES;
+}
+
+static u64 rwb_sync_issue_lat(struct rq_wb *rwb)
+{
+ u64 now, issue = ACCESS_ONCE(rwb->sync_issue);
+
+ if (!issue || !rwb->sync_cookie)
+ return 0;
+
+ now = ktime_to_ns(ktime_get());
+ return now - issue;
+}
+
+enum {
+ LAT_OK,
+ LAT_UNKNOWN,
+ LAT_EXCEEDED,
+};
+
+static int __latency_exceeded(struct rq_wb *rwb, struct blk_rq_stat *stat)
+{
+ u64 thislat;
+
+ /*
+ * If our stored sync issue exceeds the window size, or it
+ * exceeds our min target AND we haven't logged any entries,
+ * flag the latency as exceeded. wbt works off completion latencies,
+ * but for a flooded device, a single sync IO can take a long time
+ * to complete after being issued. If this time exceeds our
+ * monitoring window AND we didn't see any other completions in that
+ * window, then count that sync IO as a violation of the latency.
+ */
+ thislat = rwb_sync_issue_lat(rwb);
+ if (thislat > rwb->cur_win_nsec ||
+ (thislat > rwb->min_lat_nsec && !stat[0].nr_samples)) {
+ trace_wbt_lat(rwb->bdi, thislat);
+ return LAT_EXCEEDED;
+ }
+
+ if (!stat_sample_valid(stat))
+ return LAT_UNKNOWN;
+
+ /*
+ * If the 'min' latency exceeds our target, step down.
+ */
+ if (stat[0].min > rwb->min_lat_nsec) {
+ trace_wbt_lat(rwb->bdi, stat[0].min);
+ trace_wbt_stat(rwb->bdi, stat);
+ return LAT_EXCEEDED;
+ }
+
+ if (rwb->scale_step)
+ trace_wbt_stat(rwb->bdi, stat);
+
+ return LAT_OK;
+}
+
+static int latency_exceeded(struct rq_wb *rwb)
+{
+ struct blk_rq_stat stat[2];
+
+ rwb->stat_ops->get(rwb->ops_data, stat);
+ return __latency_exceeded(rwb, stat);
+}
+
+static void rwb_trace_step(struct rq_wb *rwb, const char *msg)
+{
+ trace_wbt_step(rwb->bdi, msg, rwb->scale_step, rwb->cur_win_nsec,
+ rwb->wb_background, rwb->wb_normal, rwb->wb_max);
+}
+
+static void scale_up(struct rq_wb *rwb)
+{
+ /*
+ * If we're at 0, we can't go lower.
+ */
+ if (!rwb->scale_step)
+ return;
+
+ rwb->scale_step--;
+ rwb->unknown_cnt = 0;
+ rwb->stat_ops->clear(rwb->ops_data);
+ calc_wb_limits(rwb);
+
+ if (waitqueue_active(&rwb->wait))
+ wake_up_all(&rwb->wait);
+
+ rwb_trace_step(rwb, "step up");
+}
+
+static void scale_down(struct rq_wb *rwb)
+{
+ /*
+ * Stop scaling down when we've hit the limit. This also prevents
+ * ->scale_step from going to crazy values, if the device can't
+ * keep up.
+ */
+ if (rwb->wb_max == 1)
+ return;
+
+ rwb->scale_step++;
+ rwb->unknown_cnt = 0;
+ rwb->stat_ops->clear(rwb->ops_data);
+ calc_wb_limits(rwb);
+ rwb_trace_step(rwb, "step down");
+}
+
+static void rwb_arm_timer(struct rq_wb *rwb)
+{
+ unsigned long expires;
+
+ /*
+ * We should speed this up, using some variant of a fast integer
+ * inverse square root calculation. Since we only do this for
+ * every window expiration, it's not a huge deal, though.
+ */
+ rwb->cur_win_nsec = div_u64(rwb->win_nsec << 4,
+ int_sqrt((rwb->scale_step + 1) << 8));
+ expires = jiffies + nsecs_to_jiffies(rwb->cur_win_nsec);
+ mod_timer(&rwb->window_timer, expires);
+}
+
+static void wb_timer_fn(unsigned long data)
+{
+ struct rq_wb *rwb = (struct rq_wb *) data;
+ int status;
+
+ /*
+ * If we exceeded the latency target, step down. If we did not,
+ * step one level up. If we don't know enough to say either exceeded
+ * or ok, then don't do anything.
+ */
+ status = latency_exceeded(rwb);
+ switch (status) {
+ case LAT_EXCEEDED:
+ scale_down(rwb);
+ break;
+ case LAT_OK:
+ scale_up(rwb);
+ break;
+ case LAT_UNKNOWN:
+ /*
+ * We had no read samples, start bumping up the write
+ * depth slowly
+ */
+ if (++rwb->unknown_cnt >= RWB_UNKNOWN_BUMP)
+ scale_up(rwb);
+ break;
+ default:
+ break;
+ }
+
+ /*
+ * Re-arm timer, if we have IO in flight
+ */
+ if (rwb->scale_step || atomic_read(&rwb->inflight))
+ rwb_arm_timer(rwb);
+}
+
+void wbt_update_limits(struct rq_wb *rwb)
+{
+ rwb->scale_step = 0;
+ calc_wb_limits(rwb);
+
+ if (waitqueue_active(&rwb->wait))
+ wake_up_all(&rwb->wait);
+}
+
+static bool close_io(struct rq_wb *rwb)
+{
+ const unsigned long now = jiffies;
+
+ return time_before(now, rwb->last_issue + HZ / 10) ||
+ time_before(now, rwb->last_comp + HZ / 10);
+}
+
+#define REQ_HIPRIO (REQ_SYNC | REQ_META | REQ_PRIO)
+
+static inline unsigned int get_limit(struct rq_wb *rwb, unsigned long rw)
+{
+ unsigned int limit;
+
+ /*
+ * At this point we know it's a buffered write. If REQ_SYNC is
+ * set, then it's WB_SYNC_ALL writeback, and we'll use the max
+ * limit for that. If the write is marked as a background write,
+ * then use the idle limit, or go to normal if we haven't had
+ * competing IO for a bit.
+ */
+ if ((rw & REQ_HIPRIO) || atomic_read(&rwb->bdi->wb.dirty_sleeping))
+ limit = rwb->wb_max;
+ else if ((rw & REQ_BG) || close_io(rwb)) {
+ /*
+ * If less than 100ms since we completed unrelated IO,
+ * limit us to half the depth for background writeback.
+ */
+ limit = rwb->wb_background;
+ } else
+ limit = rwb->wb_normal;
+
+ return limit;
+}
+
+static inline bool may_queue(struct rq_wb *rwb, unsigned long rw)
+{
+ /*
+ * inc it here even if disabled, since we'll dec it at completion.
+ * this only happens if the task was sleeping in __wbt_wait(),
+ * and someone turned it off at the same time.
+ */
+ if (!rwb_enabled(rwb)) {
+ atomic_inc(&rwb->inflight);
+ return true;
+ }
+
+ return atomic_inc_below(&rwb->inflight, get_limit(rwb, rw));
+}
+
+/*
+ * Block if we will exceed our limit, or if we are currently waiting for
+ * the timer to kick off queuing again.
+ */
+static void __wbt_wait(struct rq_wb *rwb, unsigned long rw, spinlock_t *lock)
+{
+ DEFINE_WAIT(wait);
+
+ if (may_queue(rwb, rw))
+ return;
+
+ do {
+ prepare_to_wait_exclusive(&rwb->wait, &wait,
+ TASK_UNINTERRUPTIBLE);
+
+ if (may_queue(rwb, rw))
+ break;
+
+ if (lock)
+ spin_unlock_irq(lock);
+
+ io_schedule();
+
+ if (lock)
+ spin_lock_irq(lock);
+ } while (1);
+
+ finish_wait(&rwb->wait, &wait);
+}
+
+static inline bool wbt_should_throttle(struct rq_wb *rwb, unsigned int rw)
+{
+ const int op = rw >> BIO_OP_SHIFT;
+
+ /*
+ * If not a WRITE (or a discard), do nothing
+ */
+ if (!(op == REQ_OP_WRITE || op == REQ_OP_DISCARD))
+ return false;
+
+ /*
+ * Don't throttle WRITE_ODIRECT
+ */
+ if ((rw & (REQ_SYNC | REQ_NOIDLE)) == REQ_SYNC)
+ return false;
+
+ return true;
+}
+
+/*
+ * Returns true if the IO request should be accounted, false if not.
+ * May sleep, if we have exceeded the writeback limits. Caller can pass
+ * in an irq held spinlock, if it holds one when calling this function.
+ * If we do sleep, we'll release and re-grab it.
+ */
+unsigned int wbt_wait(struct rq_wb *rwb, unsigned int rw, spinlock_t *lock)
+{
+ unsigned int ret;
+
+ if (!rwb_enabled(rwb))
+ return 0;
+
+ if ((rw >> BIO_OP_SHIFT) == REQ_OP_READ)
+ ret = WBT_READ;
+
+ if (!wbt_should_throttle(rwb, rw)) {
+ if (ret & WBT_READ)
+ wb_timestamp(rwb, &rwb->last_issue);
+ return ret;
+ }
+
+ __wbt_wait(rwb, rw, lock);
+
+ if (!timer_pending(&rwb->window_timer))
+ rwb_arm_timer(rwb);
+
+ return ret | WBT_TRACKED;
+}
+
+void wbt_issue(struct rq_wb *rwb, struct wb_issue_stat *stat)
+{
+ if (!rwb_enabled(rwb))
+ return;
+
+ wbt_issue_stat_set_time(stat);
+
+ /*
+ * Track sync issue, in case it takes a long time to complete. Allows
+ * us to react quicker, if a sync IO takes a long time to complete.
+ * Note that this is just a hint. 'stat' can go away when the
+ * request completes, so it's important we never dereference it. We
+ * only use the address to compare with, which is why we store the
+ * sync_issue time locally.
+ */
+ if (wbt_is_read(stat) && !rwb->sync_issue) {
+ rwb->sync_cookie = stat;
+ rwb->sync_issue = wbt_issue_stat_get_time(stat);
+ }
+}
+
+void wbt_track(struct wb_issue_stat *stat, unsigned int wb_acct)
+{
+ if (wb_acct & WBT_TRACKED)
+ wbt_mark_tracked(stat);
+ else if (wb_acct & WBT_READ)
+ wbt_mark_read(stat);
+}
+
+void wbt_requeue(struct rq_wb *rwb, struct wb_issue_stat *stat)
+{
+ if (!rwb_enabled(rwb))
+ return;
+ if (stat == rwb->sync_cookie) {
+ rwb->sync_issue = 0;
+ rwb->sync_cookie = NULL;
+ }
+}
+
+void wbt_set_queue_depth(struct rq_wb *rwb, unsigned int depth)
+{
+ if (rwb) {
+ rwb->queue_depth = depth;
+ wbt_update_limits(rwb);
+ }
+}
+
+void wbt_set_write_cache(struct rq_wb *rwb, bool write_cache_on)
+{
+ if (rwb)
+ rwb->wc = write_cache_on;
+}
+
+void wbt_disable(struct rq_wb *rwb)
+{
+ del_timer_sync(&rwb->window_timer);
+ rwb->win_nsec = rwb->min_lat_nsec = 0;
+ wbt_update_limits(rwb);
+}
+EXPORT_SYMBOL_GPL(wbt_disable);
+
+struct rq_wb *wbt_init(struct backing_dev_info *bdi, struct wb_stat_ops *ops,
+ void *ops_data)
+{
+ struct rq_wb *rwb;
+
+ rwb = kzalloc(sizeof(*rwb), GFP_KERNEL);
+ if (!rwb)
+ return ERR_PTR(-ENOMEM);
+
+ atomic_set(&rwb->inflight, 0);
+ init_waitqueue_head(&rwb->wait);
+ setup_timer(&rwb->window_timer, wb_timer_fn, (unsigned long) rwb);
+ rwb->wc = 1;
+ rwb->queue_depth = RWB_MAX_DEPTH;
+ rwb->last_comp = rwb->last_issue = jiffies;
+ rwb->bdi = bdi;
+ rwb->win_nsec = RWB_WINDOW_NSEC;
+ rwb->stat_ops = ops,
+ rwb->ops_data = ops_data;
+ wbt_update_limits(rwb);
+ return rwb;
+}
+
+void wbt_exit(struct rq_wb *rwb)
+{
+ if (rwb) {
+ del_timer_sync(&rwb->window_timer);
+ kfree(rwb);
+ }
+}
--
2.7.4
^ permalink raw reply related [flat|nested] 23+ messages in thread
* Re: [PATCH 7/8] wbt: add general throttling mechanism
2016-08-31 17:05 ` [PATCH 7/8] wbt: add general throttling mechanism Jens Axboe
@ 2016-09-01 18:05 ` Omar Sandoval
2016-09-01 18:51 ` Jens Axboe
0 siblings, 1 reply; 23+ messages in thread
From: Omar Sandoval @ 2016-09-01 18:05 UTC (permalink / raw)
To: Jens Axboe; +Cc: axboe, linux-kernel, linux-fsdevel, linux-block
On Wed, Aug 31, 2016 at 11:05:50AM -0600, Jens Axboe wrote:
> We can hook this up to the block layer, to help throttle buffered
> writes. Or NFS can tap into it, to accomplish the same.
>
> wbt registers a few trace points that can be used to track what is
> happening in the system:
>
> wbt_lat: 259:0: latency 2446318
> wbt_stat: 259:0: rmean=2446318, rmin=2446318, rmax=2446318, rsamples=1,
> wmean=518866, wmin=15522, wmax=5330353, wsamples=57
> wbt_step: 259:0: step down: step=1, window=72727272, background=8, normal=16, max=32
>
> This shows a sync issue event (wbt_lat) that exceeded it's time. wbt_stat
> dumps the current read/write stats for that window, and wbt_step shows a
> step down event where we now scale back writes. Each trace includes the
> device, 259:0 in this case.
>
> Signed-off-by: Jens Axboe <axboe@fb.com>
> ---
> include/linux/wbt.h | 118 +++++++++
> include/trace/events/wbt.h | 122 ++++++++++
> lib/Kconfig | 4 +
> lib/Makefile | 1 +
> lib/wbt.c | 587 +++++++++++++++++++++++++++++++++++++++++++++
> 5 files changed, 832 insertions(+)
> create mode 100644 include/linux/wbt.h
> create mode 100644 include/trace/events/wbt.h
> create mode 100644 lib/wbt.c
>
[snip]
> diff --git a/lib/Kconfig b/lib/Kconfig
> index d79909dc01ec..5a65a1f91889 100644
> --- a/lib/Kconfig
> +++ b/lib/Kconfig
> @@ -550,4 +550,8 @@ config STACKDEPOT
> bool
> select STACKTRACE
>
> +config WBT
> + bool
> + select SCALE_BITMAP
Looks like this snuck in from your experiments to get this to work on
top of scale_bitmap?
[snip]
> +void __wbt_done(struct rq_wb *rwb)
> +{
> + int inflight, limit;
> +
> + inflight = atomic_dec_return(&rwb->inflight);
> +
> + /*
> + * wbt got disabled with IO in flight. Wake up any potential
> + * waiters, we don't have to do more than that.
> + */
> + if (unlikely(!rwb_enabled(rwb))) {
> + wake_up_all(&rwb->wait);
> + return;
> + }
> +
> + /*
> + * If the device does write back caching, drop further down
> + * before we wake people up.
> + */
> + if (rwb->wc && !atomic_read(&rwb->bdi->wb.dirty_sleeping))
> + limit = 0;
> + else
> + limit = rwb->wb_normal;
> +
> + /*
> + * Don't wake anyone up if we are above the normal limit.
> + */
> + if (inflight && inflight >= limit)
> + return;
> +
> + if (waitqueue_active(&rwb->wait)) {
> + int diff = limit - inflight;
> +
> + if (!inflight || diff >= rwb->wb_background / 2)
> + wake_up_nr(&rwb->wait, 1);
wake_up(&rwb->wait)?
--
Omar
^ permalink raw reply [flat|nested] 23+ messages in thread
* Re: [PATCH 7/8] wbt: add general throttling mechanism
2016-09-01 18:05 ` Omar Sandoval
@ 2016-09-01 18:51 ` Jens Axboe
0 siblings, 0 replies; 23+ messages in thread
From: Jens Axboe @ 2016-09-01 18:51 UTC (permalink / raw)
To: Omar Sandoval; +Cc: axboe, linux-kernel, linux-fsdevel, linux-block
On 09/01/2016 12:05 PM, Omar Sandoval wrote:
>> diff --git a/lib/Kconfig b/lib/Kconfig
>> index d79909dc01ec..5a65a1f91889 100644
>> --- a/lib/Kconfig
>> +++ b/lib/Kconfig
>> @@ -550,4 +550,8 @@ config STACKDEPOT
>> bool
>> select STACKTRACE
>>
>> +config WBT
>> + bool
>> + select SCALE_BITMAP
>
> Looks like this snuck in from your experiments to get this to work on
> top of scale_bitmap?
Oops yes, it is indeed. Killed, thanks.
>> + if (waitqueue_active(&rwb->wait)) {
>> + int diff = limit - inflight;
>> +
>> + if (!inflight || diff >= rwb->wb_background / 2)
>> + wake_up_nr(&rwb->wait, 1);
>
> wake_up(&rwb->wait)?
Yeah, that'd be cleaner. I think this is a leftover from when I
experimented with batched wakeups, with nr != 1. I'll change it to just
wake_up().
--
Jens Axboe
^ permalink raw reply [flat|nested] 23+ messages in thread
* [PATCH 8/8] writeback: throttle buffered writeback
2016-08-31 17:05 [PATCHSET v6] Throttled background buffered writeback Jens Axboe
` (6 preceding siblings ...)
2016-08-31 17:05 ` [PATCH 7/8] wbt: add general throttling mechanism Jens Axboe
@ 2016-08-31 17:05 ` Jens Axboe
7 siblings, 0 replies; 23+ messages in thread
From: Jens Axboe @ 2016-08-31 17:05 UTC (permalink / raw)
To: axboe, linux-kernel, linux-fsdevel, linux-block; +Cc: Jens Axboe
Test patch that throttles buffered writeback to make it a lot
more smooth, and has way less impact on other system activity.
Background writeback should be, by definition, background
activity. The fact that we flush huge bundles of it at the time
means that it potentially has heavy impacts on foreground workloads,
which isn't ideal. We can't easily limit the sizes of writes that
we do, since that would impact file system layout in the presence
of delayed allocation. So just throttle back buffered writeback,
unless someone is waiting for it.
The algorithm for when to throttle takes its inspiration in the
CoDel networking scheduling algorithm. Like CoDel, blk-wb monitors
the minimum latencies of requests over a window of time. In that
window of time, if the minimum latency of any request exceeds a
given target, then a scale count is incremented and the queue depth
is shrunk. The next monitoring window is shrunk accordingly. Unlike
CoDel, if we hit a window that exhibits good behavior, then we
simply increment the scale count and re-calculate the limits for that
scale value. This prevents us from oscillating between a
close-to-ideal value and max all the time, instead remaining in the
windows where we get good behavior.
The patch registers two sysfs entries. The first one, 'wb_window_usec',
defines the window of monitoring. The second one, 'wb_lat_usec',
sets the latency target for the window. It defaults to 2 msec for
non-rotational storage, and 75 msec for rotational storage. Setting
this value to '0' disables blk-wb. Generally, a user would not have
to touch these settings.
We don't enable WBT on devices that are managed with CFQ, and have
a non-root block cgroup attached. If we have a proportional share setup
on this particular disk, then the wbt throttling will interfere with
that. We don't have a strong need for wbt for that case, since we will
rely on CFQ doing that for us.
Signed-off-by: Jens Axboe <axboe@fb.com>
---
Documentation/block/queue-sysfs.txt | 13 ++++
block/Kconfig | 1 +
block/blk-core.c | 20 +++++-
block/blk-mq.c | 30 ++++++++-
block/blk-settings.c | 3 +
block/blk-stat.c | 5 +-
block/blk-sysfs.c | 119 ++++++++++++++++++++++++++++++++++++
block/cfq-iosched.c | 12 ++++
include/linux/blkdev.h | 6 +-
9 files changed, 200 insertions(+), 9 deletions(-)
diff --git a/Documentation/block/queue-sysfs.txt b/Documentation/block/queue-sysfs.txt
index 2a3904030dea..2847219ebd8c 100644
--- a/Documentation/block/queue-sysfs.txt
+++ b/Documentation/block/queue-sysfs.txt
@@ -169,5 +169,18 @@ This is the number of bytes the device can write in a single write-same
command. A value of '0' means write-same is not supported by this
device.
+wb_lat_usec (RW)
+----------------
+If the device is registered for writeback throttling, then this file shows
+the target minimum read latency. If this latency is exceeded in a given
+window of time (see wb_window_usec), then the writeback throttling will start
+scaling back writes.
+
+wb_window_usec (RW)
+-------------------
+If the device is registered for writeback throttling, then this file shows
+the value of the monitoring window in which we'll look at the target
+latency. See wb_lat_usec.
+
Jens Axboe <jens.axboe@oracle.com>, February 2009
diff --git a/block/Kconfig b/block/Kconfig
index 161491d0a879..6da79e670709 100644
--- a/block/Kconfig
+++ b/block/Kconfig
@@ -4,6 +4,7 @@
menuconfig BLOCK
bool "Enable the block layer" if EXPERT
default y
+ select WBT
help
Provide block layer support for the kernel.
diff --git a/block/blk-core.c b/block/blk-core.c
index 4075cbeb720e..4f4ce050290c 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -33,6 +33,7 @@
#include <linux/ratelimit.h>
#include <linux/pm_runtime.h>
#include <linux/blk-cgroup.h>
+#include <linux/wbt.h>
#define CREATE_TRACE_POINTS
#include <trace/events/block.h>
@@ -882,6 +883,8 @@ blk_init_allocated_queue(struct request_queue *q, request_fn_proc *rfn,
fail:
blk_free_flush_queue(q->fq);
+ wbt_exit(q->rq_wb);
+ q->rq_wb = NULL;
return NULL;
}
EXPORT_SYMBOL(blk_init_allocated_queue);
@@ -1346,6 +1349,7 @@ void blk_requeue_request(struct request_queue *q, struct request *rq)
blk_delete_timer(rq);
blk_clear_rq_complete(rq);
trace_block_rq_requeue(q, rq);
+ wbt_requeue(q->rq_wb, &rq->wb_stat);
if (rq->cmd_flags & REQ_QUEUED)
blk_queue_end_tag(q, rq);
@@ -1436,6 +1440,8 @@ void __blk_put_request(struct request_queue *q, struct request *req)
/* this is a bio leak */
WARN_ON(req->bio != NULL);
+ wbt_done(q->rq_wb, &req->wb_stat);
+
/*
* Request may not have originated from ll_rw_blk. if not,
* it didn't come out of our reserved rq pools
@@ -1667,6 +1673,7 @@ static blk_qc_t blk_queue_bio(struct request_queue *q, struct bio *bio)
int el_ret, rw_flags = 0, where = ELEVATOR_INSERT_SORT;
struct request *req;
unsigned int request_count = 0;
+ unsigned int wb_acct;
/*
* low level driver can indicate that it wants pages above a
@@ -1719,6 +1726,8 @@ static blk_qc_t blk_queue_bio(struct request_queue *q, struct bio *bio)
}
get_rq:
+ wb_acct = wbt_wait(q->rq_wb, bio->bi_opf, q->queue_lock);
+
/*
* This sync check and mask will be re-done in init_request_from_bio(),
* but we need to set it earlier to expose the sync flag to the
@@ -1738,11 +1747,15 @@ get_rq:
*/
req = get_request(q, bio_data_dir(bio), rw_flags, bio, GFP_NOIO);
if (IS_ERR(req)) {
+ if (wb_acct & WBT_TRACKED)
+ __wbt_done(q->rq_wb);
bio->bi_error = PTR_ERR(req);
bio_endio(bio);
goto out_unlock;
}
+ wbt_track(&req->wb_stat, wb_acct);
+
/*
* After dropping the lock and possibly sleeping here, our request
* may now be mergeable after it had proven unmergeable (above).
@@ -2475,7 +2488,7 @@ void blk_start_request(struct request *req)
{
blk_dequeue_request(req);
- req->issue_time = ktime_to_ns(ktime_get());
+ wbt_issue(req->q->rq_wb, &req->wb_stat);
/*
* We are now handing the request to the hardware, initialize
@@ -2713,9 +2726,10 @@ void blk_finish_request(struct request *req, int error)
blk_account_io_done(req);
- if (req->end_io)
+ if (req->end_io) {
+ wbt_done(req->q->rq_wb, &req->wb_stat);
req->end_io(req, error);
- else {
+ } else {
if (blk_bidi_rq(req))
__blk_put_request(req->next_rq->q, req->next_rq);
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 712f141a6f1a..511289a4626a 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -22,6 +22,7 @@
#include <linux/sched/sysctl.h>
#include <linux/delay.h>
#include <linux/crash_dump.h>
+#include <linux/wbt.h>
#include <trace/events/block.h>
@@ -319,6 +320,8 @@ static void __blk_mq_free_request(struct blk_mq_hw_ctx *hctx,
if (rq->cmd_flags & REQ_MQ_INFLIGHT)
atomic_dec(&hctx->nr_active);
+
+ wbt_done(q->rq_wb, &rq->wb_stat);
rq->cmd_flags = 0;
clear_bit(REQ_ATOM_STARTED, &rq->atomic_flags);
@@ -351,6 +354,7 @@ inline void __blk_mq_end_request(struct request *rq, int error)
blk_account_io_done(rq);
if (rq->end_io) {
+ wbt_done(rq->q->rq_wb, &rq->wb_stat);
rq->end_io(rq, error);
} else {
if (unlikely(blk_bidi_rq(rq)))
@@ -457,7 +461,7 @@ void blk_mq_start_request(struct request *rq)
if (unlikely(blk_bidi_rq(rq)))
rq->next_rq->resid_len = blk_rq_bytes(rq->next_rq);
- rq->issue_time = ktime_to_ns(ktime_get());
+ wbt_issue(q->rq_wb, &rq->wb_stat);
blk_add_timer(rq);
@@ -494,6 +498,7 @@ static void __blk_mq_requeue_request(struct request *rq)
struct request_queue *q = rq->q;
trace_block_rq_requeue(q, rq);
+ wbt_requeue(q->rq_wb, &rq->wb_stat);
if (test_and_clear_bit(REQ_ATOM_STARTED, &rq->atomic_flags)) {
if (q->dma_drain_size && blk_rq_bytes(rq))
@@ -1312,6 +1317,7 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
struct blk_plug *plug;
struct request *same_queue_rq = NULL;
blk_qc_t cookie;
+ unsigned int wb_acct;
blk_queue_bounce(q, &bio);
@@ -1326,9 +1332,16 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
blk_attempt_plug_merge(q, bio, &request_count, &same_queue_rq))
return BLK_QC_T_NONE;
+ wb_acct = wbt_wait(q->rq_wb, bio->bi_opf, NULL);
+
rq = blk_mq_map_request(q, bio, &data);
- if (unlikely(!rq))
+ if (unlikely(!rq)) {
+ if (wb_acct & WBT_TRACKED)
+ __wbt_done(q->rq_wb);
return BLK_QC_T_NONE;
+ }
+
+ wbt_track(&rq->wb_stat, wb_acct);
cookie = blk_tag_to_qc_t(rq->tag, data.hctx->queue_num);
@@ -1405,6 +1418,7 @@ static blk_qc_t blk_sq_make_request(struct request_queue *q, struct bio *bio)
struct blk_map_ctx data;
struct request *rq;
blk_qc_t cookie;
+ unsigned int wb_acct;
blk_queue_bounce(q, &bio);
@@ -1421,9 +1435,16 @@ static blk_qc_t blk_sq_make_request(struct request_queue *q, struct bio *bio)
} else
request_count = blk_plug_queued_count(q);
+ wb_acct = wbt_wait(q->rq_wb, bio->bi_opf, NULL);
+
rq = blk_mq_map_request(q, bio, &data);
- if (unlikely(!rq))
+ if (unlikely(!rq)) {
+ if (wb_acct & WBT_TRACKED)
+ __wbt_done(q->rq_wb);
return BLK_QC_T_NONE;
+ }
+
+ wbt_track(&rq->wb_stat, wb_acct);
cookie = blk_tag_to_qc_t(rq->tag, data.hctx->queue_num);
@@ -2147,6 +2168,9 @@ void blk_mq_free_queue(struct request_queue *q)
list_del_init(&q->all_q_node);
mutex_unlock(&all_q_mutex);
+ wbt_exit(q->rq_wb);
+ q->rq_wb = NULL;
+
blk_mq_del_queue_tag_set(q);
blk_mq_exit_hw_queues(q, set, set->nr_hw_queues);
diff --git a/block/blk-settings.c b/block/blk-settings.c
index f7e122e717e8..746dc9fee1ac 100644
--- a/block/blk-settings.c
+++ b/block/blk-settings.c
@@ -840,6 +840,7 @@ EXPORT_SYMBOL_GPL(blk_queue_flush_queueable);
void blk_set_queue_depth(struct request_queue *q, unsigned int depth)
{
q->queue_depth = depth;
+ wbt_set_queue_depth(q->rq_wb, depth);
}
EXPORT_SYMBOL(blk_set_queue_depth);
@@ -863,6 +864,8 @@ void blk_queue_write_cache(struct request_queue *q, bool wc, bool fua)
else
queue_flag_clear(QUEUE_FLAG_FUA, q);
spin_unlock_irq(q->queue_lock);
+
+ wbt_set_write_cache(q->rq_wb, test_bit(QUEUE_FLAG_WC, &q->queue_flags));
}
EXPORT_SYMBOL_GPL(blk_queue_write_cache);
diff --git a/block/blk-stat.c b/block/blk-stat.c
index 76cf2e2092c1..d8cb9b56fced 100644
--- a/block/blk-stat.c
+++ b/block/blk-stat.c
@@ -162,15 +162,16 @@ void blk_stat_init(struct blk_rq_stat *stat)
void blk_stat_add(struct blk_rq_stat *stat, struct request *rq)
{
s64 now, value;
+ u64 rq_time = wbt_issue_stat_get_time(&rq->wb_stat);
now = ktime_to_ns(ktime_get());
- if (now < rq->issue_time)
+ if (now < rq_time)
return;
if ((now & BLK_STAT_MASK) != (stat->time & BLK_STAT_MASK))
__blk_stat_init(stat, now);
- value = now - rq->issue_time;
+ value = now - rq_time;
if (value > stat->max)
stat->max = value;
if (value < stat->min)
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
index 0b9e435fec97..7fcf02c9bfa7 100644
--- a/block/blk-sysfs.c
+++ b/block/blk-sysfs.c
@@ -10,6 +10,7 @@
#include <linux/blktrace_api.h>
#include <linux/blk-mq.h>
#include <linux/blk-cgroup.h>
+#include <linux/wbt.h>
#include "blk.h"
#include "blk-mq.h"
@@ -41,6 +42,19 @@ queue_var_store(unsigned long *var, const char *page, size_t count)
return count;
}
+static ssize_t queue_var_store64(u64 *var, const char *page)
+{
+ int err;
+ u64 v;
+
+ err = kstrtou64(page, 10, &v);
+ if (err < 0)
+ return err;
+
+ *var = v;
+ return 0;
+}
+
static ssize_t queue_requests_show(struct request_queue *q, char *page)
{
return queue_var_show(q->nr_requests, (page));
@@ -347,6 +361,58 @@ static ssize_t queue_poll_store(struct request_queue *q, const char *page,
return ret;
}
+static ssize_t queue_wb_win_show(struct request_queue *q, char *page)
+{
+ if (!q->rq_wb)
+ return -EINVAL;
+
+ return sprintf(page, "%llu\n", div_u64(q->rq_wb->win_nsec, 1000));
+}
+
+static ssize_t queue_wb_win_store(struct request_queue *q, const char *page,
+ size_t count)
+{
+ ssize_t ret;
+ u64 val;
+
+ if (!q->rq_wb)
+ return -EINVAL;
+
+ ret = queue_var_store64(&val, page);
+ if (ret < 0)
+ return ret;
+
+ q->rq_wb->win_nsec = val * 1000ULL;
+ wbt_update_limits(q->rq_wb);
+ return count;
+}
+
+static ssize_t queue_wb_lat_show(struct request_queue *q, char *page)
+{
+ if (!q->rq_wb)
+ return -EINVAL;
+
+ return sprintf(page, "%llu\n", div_u64(q->rq_wb->min_lat_nsec, 1000));
+}
+
+static ssize_t queue_wb_lat_store(struct request_queue *q, const char *page,
+ size_t count)
+{
+ ssize_t ret;
+ u64 val;
+
+ if (!q->rq_wb)
+ return -EINVAL;
+
+ ret = queue_var_store64(&val, page);
+ if (ret < 0)
+ return ret;
+
+ q->rq_wb->min_lat_nsec = val * 1000ULL;
+ wbt_update_limits(q->rq_wb);
+ return count;
+}
+
static ssize_t queue_wc_show(struct request_queue *q, char *page)
{
if (test_bit(QUEUE_FLAG_WC, &q->queue_flags))
@@ -551,6 +617,18 @@ static struct queue_sysfs_entry queue_stats_entry = {
.show = queue_stats_show,
};
+static struct queue_sysfs_entry queue_wb_lat_entry = {
+ .attr = {.name = "wbt_lat_usec", .mode = S_IRUGO | S_IWUSR },
+ .show = queue_wb_lat_show,
+ .store = queue_wb_lat_store,
+};
+
+static struct queue_sysfs_entry queue_wb_win_entry = {
+ .attr = {.name = "wbt_window_usec", .mode = S_IRUGO | S_IWUSR },
+ .show = queue_wb_win_show,
+ .store = queue_wb_win_store,
+};
+
static struct attribute *default_attrs[] = {
&queue_requests_entry.attr,
&queue_ra_entry.attr,
@@ -579,6 +657,8 @@ static struct attribute *default_attrs[] = {
&queue_wc_entry.attr,
&queue_dax_entry.attr,
&queue_stats_entry.attr,
+ &queue_wb_lat_entry.attr,
+ &queue_wb_win_entry.attr,
NULL,
};
@@ -693,6 +773,43 @@ struct kobj_type blk_queue_ktype = {
.release = blk_release_queue,
};
+static void blk_wb_stat_get(void *data, struct blk_rq_stat *stat)
+{
+ blk_queue_stat_get(data, stat);
+}
+
+static void blk_wb_stat_clear(void *data)
+{
+ blk_stat_clear(data);
+}
+
+static struct wb_stat_ops wb_stat_ops = {
+ .get = blk_wb_stat_get,
+ .clear = blk_wb_stat_clear,
+};
+
+static void blk_wb_init(struct request_queue *q)
+{
+ struct rq_wb *rwb;
+
+ rwb = wbt_init(&q->backing_dev_info, &wb_stat_ops, q);
+
+ /*
+ * If this fails, we don't get throttling
+ */
+ if (IS_ERR(rwb))
+ return;
+
+ if (blk_queue_nonrot(q))
+ rwb->min_lat_nsec = 2000000ULL;
+ else
+ rwb->min_lat_nsec = 75000000ULL;
+
+ wbt_set_queue_depth(rwb, blk_queue_depth(q));
+ wbt_set_write_cache(rwb, test_bit(QUEUE_FLAG_WC, &q->queue_flags));
+ q->rq_wb = rwb;
+}
+
int blk_register_queue(struct gendisk *disk)
{
int ret;
@@ -732,6 +849,8 @@ int blk_register_queue(struct gendisk *disk)
if (q->mq_ops)
blk_mq_register_disk(disk);
+ blk_wb_init(q);
+
if (!q->request_fn)
return 0;
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index cc2f6dbd4303..ef61bda76317 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -3777,6 +3777,18 @@ static void check_blkcg_changed(struct cfq_io_cq *cic, struct bio *bio)
return;
/*
+ * If we have a non-root cgroup, we can depend on that to
+ * do proper throttling of writes. Turn off wbt for that
+ * case.
+ */
+ if (bio_blkcg(bio) != &blkcg_root) {
+ struct request_queue *q = cfqd->queue;
+
+ if (q->rq_wb)
+ wbt_disable(q->rq_wb);
+ }
+
+ /*
* Drop reference to queues. New queues will be assigned in new
* group upon arrival of fresh requests.
*/
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 259eba88f991..45256d75c4b7 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -24,6 +24,7 @@
#include <linux/rcupdate.h>
#include <linux/percpu-refcount.h>
#include <linux/scatterlist.h>
+#include <linux/wbt.h>
struct module;
struct scsi_ioctl_command;
@@ -37,6 +38,7 @@ struct bsg_job;
struct blkcg_gq;
struct blk_flush_queue;
struct pr_ops;
+struct rq_wb;
#define BLKDEV_MIN_RQ 4
#define BLKDEV_MAX_RQ 128 /* Default maximum */
@@ -151,7 +153,7 @@ struct request {
struct gendisk *rq_disk;
struct hd_struct *part;
unsigned long start_time;
- s64 issue_time;
+ struct wb_issue_stat wb_stat;
#ifdef CONFIG_BLK_CGROUP
struct request_list *rl; /* rl this rq is alloced from */
unsigned long long start_time_ns;
@@ -303,6 +305,8 @@ struct request_queue {
int nr_rqs[2]; /* # allocated [a]sync rqs */
int nr_rqs_elvpriv; /* # allocated rqs w/ elvpriv */
+ struct rq_wb *rq_wb;
+
/*
* If blkcg is not used, @q->root_rl serves all requests. If blkcg
* is used, root blkg allocates from @q->root_rl and all other
--
2.7.4
^ permalink raw reply related [flat|nested] 23+ messages in thread
* [PATCH 8/8] writeback: throttle buffered writeback
2016-09-07 14:46 [PATCH 0/8] Throttled background buffered writeback v7 Jens Axboe
@ 2016-09-07 14:46 ` Jens Axboe
0 siblings, 0 replies; 23+ messages in thread
From: Jens Axboe @ 2016-09-07 14:46 UTC (permalink / raw)
To: axboe, linux-kernel, linux-fsdevel, linux-block; +Cc: Jens Axboe
Test patch that throttles buffered writeback to make it a lot
more smooth, and has way less impact on other system activity.
Background writeback should be, by definition, background
activity. The fact that we flush huge bundles of it at the time
means that it potentially has heavy impacts on foreground workloads,
which isn't ideal. We can't easily limit the sizes of writes that
we do, since that would impact file system layout in the presence
of delayed allocation. So just throttle back buffered writeback,
unless someone is waiting for it.
The algorithm for when to throttle takes its inspiration in the
CoDel networking scheduling algorithm. Like CoDel, blk-wb monitors
the minimum latencies of requests over a window of time. In that
window of time, if the minimum latency of any request exceeds a
given target, then a scale count is incremented and the queue depth
is shrunk. The next monitoring window is shrunk accordingly. Unlike
CoDel, if we hit a window that exhibits good behavior, then we
simply increment the scale count and re-calculate the limits for that
scale value. This prevents us from oscillating between a
close-to-ideal value and max all the time, instead remaining in the
windows where we get good behavior.
Unlike CoDel, blk-wb allows the scale count to to negative. This
happens if we primarily have writes going on. Unlike positive
scale counts, this doesn't change the size of the monitoring window.
When the heavy writers finish, blk-bw quickly snaps back to it's
stable state of a zero scale count.
The patch registers two sysfs entries. The first one, 'wb_window_usec',
defines the window of monitoring. The second one, 'wb_lat_usec',
sets the latency target for the window. It defaults to 2 msec for
non-rotational storage, and 75 msec for rotational storage. Setting
this value to '0' disables blk-wb. Generally, a user would not have
to touch these settings.
We don't enable WBT on devices that are managed with CFQ, and have
a non-root block cgroup attached. If we have a proportional share setup
on this particular disk, then the wbt throttling will interfere with
that. We don't have a strong need for wbt for that case, since we will
rely on CFQ doing that for us.
Signed-off-by: Jens Axboe <axboe@fb.com>
---
Documentation/block/queue-sysfs.txt | 13 ++++
block/Kconfig | 1 +
block/blk-core.c | 20 +++++-
block/blk-mq.c | 30 ++++++++-
block/blk-settings.c | 3 +
block/blk-stat.c | 5 +-
block/blk-sysfs.c | 125 ++++++++++++++++++++++++++++++++++++
block/cfq-iosched.c | 12 ++++
include/linux/blkdev.h | 6 +-
9 files changed, 206 insertions(+), 9 deletions(-)
diff --git a/Documentation/block/queue-sysfs.txt b/Documentation/block/queue-sysfs.txt
index 2a3904030dea..2847219ebd8c 100644
--- a/Documentation/block/queue-sysfs.txt
+++ b/Documentation/block/queue-sysfs.txt
@@ -169,5 +169,18 @@ This is the number of bytes the device can write in a single write-same
command. A value of '0' means write-same is not supported by this
device.
+wb_lat_usec (RW)
+----------------
+If the device is registered for writeback throttling, then this file shows
+the target minimum read latency. If this latency is exceeded in a given
+window of time (see wb_window_usec), then the writeback throttling will start
+scaling back writes.
+
+wb_window_usec (RW)
+-------------------
+If the device is registered for writeback throttling, then this file shows
+the value of the monitoring window in which we'll look at the target
+latency. See wb_lat_usec.
+
Jens Axboe <jens.axboe@oracle.com>, February 2009
diff --git a/block/Kconfig b/block/Kconfig
index 161491d0a879..6da79e670709 100644
--- a/block/Kconfig
+++ b/block/Kconfig
@@ -4,6 +4,7 @@
menuconfig BLOCK
bool "Enable the block layer" if EXPERT
default y
+ select WBT
help
Provide block layer support for the kernel.
diff --git a/block/blk-core.c b/block/blk-core.c
index 4075cbeb720e..4f4ce050290c 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -33,6 +33,7 @@
#include <linux/ratelimit.h>
#include <linux/pm_runtime.h>
#include <linux/blk-cgroup.h>
+#include <linux/wbt.h>
#define CREATE_TRACE_POINTS
#include <trace/events/block.h>
@@ -882,6 +883,8 @@ blk_init_allocated_queue(struct request_queue *q, request_fn_proc *rfn,
fail:
blk_free_flush_queue(q->fq);
+ wbt_exit(q->rq_wb);
+ q->rq_wb = NULL;
return NULL;
}
EXPORT_SYMBOL(blk_init_allocated_queue);
@@ -1346,6 +1349,7 @@ void blk_requeue_request(struct request_queue *q, struct request *rq)
blk_delete_timer(rq);
blk_clear_rq_complete(rq);
trace_block_rq_requeue(q, rq);
+ wbt_requeue(q->rq_wb, &rq->wb_stat);
if (rq->cmd_flags & REQ_QUEUED)
blk_queue_end_tag(q, rq);
@@ -1436,6 +1440,8 @@ void __blk_put_request(struct request_queue *q, struct request *req)
/* this is a bio leak */
WARN_ON(req->bio != NULL);
+ wbt_done(q->rq_wb, &req->wb_stat);
+
/*
* Request may not have originated from ll_rw_blk. if not,
* it didn't come out of our reserved rq pools
@@ -1667,6 +1673,7 @@ static blk_qc_t blk_queue_bio(struct request_queue *q, struct bio *bio)
int el_ret, rw_flags = 0, where = ELEVATOR_INSERT_SORT;
struct request *req;
unsigned int request_count = 0;
+ unsigned int wb_acct;
/*
* low level driver can indicate that it wants pages above a
@@ -1719,6 +1726,8 @@ static blk_qc_t blk_queue_bio(struct request_queue *q, struct bio *bio)
}
get_rq:
+ wb_acct = wbt_wait(q->rq_wb, bio->bi_opf, q->queue_lock);
+
/*
* This sync check and mask will be re-done in init_request_from_bio(),
* but we need to set it earlier to expose the sync flag to the
@@ -1738,11 +1747,15 @@ get_rq:
*/
req = get_request(q, bio_data_dir(bio), rw_flags, bio, GFP_NOIO);
if (IS_ERR(req)) {
+ if (wb_acct & WBT_TRACKED)
+ __wbt_done(q->rq_wb);
bio->bi_error = PTR_ERR(req);
bio_endio(bio);
goto out_unlock;
}
+ wbt_track(&req->wb_stat, wb_acct);
+
/*
* After dropping the lock and possibly sleeping here, our request
* may now be mergeable after it had proven unmergeable (above).
@@ -2475,7 +2488,7 @@ void blk_start_request(struct request *req)
{
blk_dequeue_request(req);
- req->issue_time = ktime_to_ns(ktime_get());
+ wbt_issue(req->q->rq_wb, &req->wb_stat);
/*
* We are now handing the request to the hardware, initialize
@@ -2713,9 +2726,10 @@ void blk_finish_request(struct request *req, int error)
blk_account_io_done(req);
- if (req->end_io)
+ if (req->end_io) {
+ wbt_done(req->q->rq_wb, &req->wb_stat);
req->end_io(req, error);
- else {
+ } else {
if (blk_bidi_rq(req))
__blk_put_request(req->next_rq->q, req->next_rq);
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 712f141a6f1a..511289a4626a 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -22,6 +22,7 @@
#include <linux/sched/sysctl.h>
#include <linux/delay.h>
#include <linux/crash_dump.h>
+#include <linux/wbt.h>
#include <trace/events/block.h>
@@ -319,6 +320,8 @@ static void __blk_mq_free_request(struct blk_mq_hw_ctx *hctx,
if (rq->cmd_flags & REQ_MQ_INFLIGHT)
atomic_dec(&hctx->nr_active);
+
+ wbt_done(q->rq_wb, &rq->wb_stat);
rq->cmd_flags = 0;
clear_bit(REQ_ATOM_STARTED, &rq->atomic_flags);
@@ -351,6 +354,7 @@ inline void __blk_mq_end_request(struct request *rq, int error)
blk_account_io_done(rq);
if (rq->end_io) {
+ wbt_done(rq->q->rq_wb, &rq->wb_stat);
rq->end_io(rq, error);
} else {
if (unlikely(blk_bidi_rq(rq)))
@@ -457,7 +461,7 @@ void blk_mq_start_request(struct request *rq)
if (unlikely(blk_bidi_rq(rq)))
rq->next_rq->resid_len = blk_rq_bytes(rq->next_rq);
- rq->issue_time = ktime_to_ns(ktime_get());
+ wbt_issue(q->rq_wb, &rq->wb_stat);
blk_add_timer(rq);
@@ -494,6 +498,7 @@ static void __blk_mq_requeue_request(struct request *rq)
struct request_queue *q = rq->q;
trace_block_rq_requeue(q, rq);
+ wbt_requeue(q->rq_wb, &rq->wb_stat);
if (test_and_clear_bit(REQ_ATOM_STARTED, &rq->atomic_flags)) {
if (q->dma_drain_size && blk_rq_bytes(rq))
@@ -1312,6 +1317,7 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
struct blk_plug *plug;
struct request *same_queue_rq = NULL;
blk_qc_t cookie;
+ unsigned int wb_acct;
blk_queue_bounce(q, &bio);
@@ -1326,9 +1332,16 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
blk_attempt_plug_merge(q, bio, &request_count, &same_queue_rq))
return BLK_QC_T_NONE;
+ wb_acct = wbt_wait(q->rq_wb, bio->bi_opf, NULL);
+
rq = blk_mq_map_request(q, bio, &data);
- if (unlikely(!rq))
+ if (unlikely(!rq)) {
+ if (wb_acct & WBT_TRACKED)
+ __wbt_done(q->rq_wb);
return BLK_QC_T_NONE;
+ }
+
+ wbt_track(&rq->wb_stat, wb_acct);
cookie = blk_tag_to_qc_t(rq->tag, data.hctx->queue_num);
@@ -1405,6 +1418,7 @@ static blk_qc_t blk_sq_make_request(struct request_queue *q, struct bio *bio)
struct blk_map_ctx data;
struct request *rq;
blk_qc_t cookie;
+ unsigned int wb_acct;
blk_queue_bounce(q, &bio);
@@ -1421,9 +1435,16 @@ static blk_qc_t blk_sq_make_request(struct request_queue *q, struct bio *bio)
} else
request_count = blk_plug_queued_count(q);
+ wb_acct = wbt_wait(q->rq_wb, bio->bi_opf, NULL);
+
rq = blk_mq_map_request(q, bio, &data);
- if (unlikely(!rq))
+ if (unlikely(!rq)) {
+ if (wb_acct & WBT_TRACKED)
+ __wbt_done(q->rq_wb);
return BLK_QC_T_NONE;
+ }
+
+ wbt_track(&rq->wb_stat, wb_acct);
cookie = blk_tag_to_qc_t(rq->tag, data.hctx->queue_num);
@@ -2147,6 +2168,9 @@ void blk_mq_free_queue(struct request_queue *q)
list_del_init(&q->all_q_node);
mutex_unlock(&all_q_mutex);
+ wbt_exit(q->rq_wb);
+ q->rq_wb = NULL;
+
blk_mq_del_queue_tag_set(q);
blk_mq_exit_hw_queues(q, set, set->nr_hw_queues);
diff --git a/block/blk-settings.c b/block/blk-settings.c
index f7e122e717e8..746dc9fee1ac 100644
--- a/block/blk-settings.c
+++ b/block/blk-settings.c
@@ -840,6 +840,7 @@ EXPORT_SYMBOL_GPL(blk_queue_flush_queueable);
void blk_set_queue_depth(struct request_queue *q, unsigned int depth)
{
q->queue_depth = depth;
+ wbt_set_queue_depth(q->rq_wb, depth);
}
EXPORT_SYMBOL(blk_set_queue_depth);
@@ -863,6 +864,8 @@ void blk_queue_write_cache(struct request_queue *q, bool wc, bool fua)
else
queue_flag_clear(QUEUE_FLAG_FUA, q);
spin_unlock_irq(q->queue_lock);
+
+ wbt_set_write_cache(q->rq_wb, test_bit(QUEUE_FLAG_WC, &q->queue_flags));
}
EXPORT_SYMBOL_GPL(blk_queue_write_cache);
diff --git a/block/blk-stat.c b/block/blk-stat.c
index 3965e8a258c8..bdb16d84b914 100644
--- a/block/blk-stat.c
+++ b/block/blk-stat.c
@@ -178,15 +178,16 @@ bool blk_stat_is_current(struct blk_rq_stat *stat)
void blk_stat_add(struct blk_rq_stat *stat, struct request *rq)
{
s64 now, value;
+ u64 rq_time = wbt_issue_stat_get_time(&rq->wb_stat);
now = ktime_to_ns(ktime_get());
- if (now < rq->issue_time)
+ if (now < rq_time)
return;
if (!__blk_stat_is_current(stat, now))
__blk_stat_init(stat, now);
- value = now - rq->issue_time;
+ value = now - rq_time;
if (value > stat->max)
stat->max = value;
if (value < stat->min)
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
index 0b9e435fec97..85c3dc22307b 100644
--- a/block/blk-sysfs.c
+++ b/block/blk-sysfs.c
@@ -10,6 +10,7 @@
#include <linux/blktrace_api.h>
#include <linux/blk-mq.h>
#include <linux/blk-cgroup.h>
+#include <linux/wbt.h>
#include "blk.h"
#include "blk-mq.h"
@@ -41,6 +42,19 @@ queue_var_store(unsigned long *var, const char *page, size_t count)
return count;
}
+static ssize_t queue_var_store64(u64 *var, const char *page)
+{
+ int err;
+ u64 v;
+
+ err = kstrtou64(page, 10, &v);
+ if (err < 0)
+ return err;
+
+ *var = v;
+ return 0;
+}
+
static ssize_t queue_requests_show(struct request_queue *q, char *page)
{
return queue_var_show(q->nr_requests, (page));
@@ -347,6 +361,58 @@ static ssize_t queue_poll_store(struct request_queue *q, const char *page,
return ret;
}
+static ssize_t queue_wb_win_show(struct request_queue *q, char *page)
+{
+ if (!q->rq_wb)
+ return -EINVAL;
+
+ return sprintf(page, "%llu\n", div_u64(q->rq_wb->win_nsec, 1000));
+}
+
+static ssize_t queue_wb_win_store(struct request_queue *q, const char *page,
+ size_t count)
+{
+ ssize_t ret;
+ u64 val;
+
+ if (!q->rq_wb)
+ return -EINVAL;
+
+ ret = queue_var_store64(&val, page);
+ if (ret < 0)
+ return ret;
+
+ q->rq_wb->win_nsec = val * 1000ULL;
+ wbt_update_limits(q->rq_wb);
+ return count;
+}
+
+static ssize_t queue_wb_lat_show(struct request_queue *q, char *page)
+{
+ if (!q->rq_wb)
+ return -EINVAL;
+
+ return sprintf(page, "%llu\n", div_u64(q->rq_wb->min_lat_nsec, 1000));
+}
+
+static ssize_t queue_wb_lat_store(struct request_queue *q, const char *page,
+ size_t count)
+{
+ ssize_t ret;
+ u64 val;
+
+ if (!q->rq_wb)
+ return -EINVAL;
+
+ ret = queue_var_store64(&val, page);
+ if (ret < 0)
+ return ret;
+
+ q->rq_wb->min_lat_nsec = val * 1000ULL;
+ wbt_update_limits(q->rq_wb);
+ return count;
+}
+
static ssize_t queue_wc_show(struct request_queue *q, char *page)
{
if (test_bit(QUEUE_FLAG_WC, &q->queue_flags))
@@ -551,6 +617,18 @@ static struct queue_sysfs_entry queue_stats_entry = {
.show = queue_stats_show,
};
+static struct queue_sysfs_entry queue_wb_lat_entry = {
+ .attr = {.name = "wbt_lat_usec", .mode = S_IRUGO | S_IWUSR },
+ .show = queue_wb_lat_show,
+ .store = queue_wb_lat_store,
+};
+
+static struct queue_sysfs_entry queue_wb_win_entry = {
+ .attr = {.name = "wbt_window_usec", .mode = S_IRUGO | S_IWUSR },
+ .show = queue_wb_win_show,
+ .store = queue_wb_win_store,
+};
+
static struct attribute *default_attrs[] = {
&queue_requests_entry.attr,
&queue_ra_entry.attr,
@@ -579,6 +657,8 @@ static struct attribute *default_attrs[] = {
&queue_wc_entry.attr,
&queue_dax_entry.attr,
&queue_stats_entry.attr,
+ &queue_wb_lat_entry.attr,
+ &queue_wb_win_entry.attr,
NULL,
};
@@ -693,6 +773,49 @@ struct kobj_type blk_queue_ktype = {
.release = blk_release_queue,
};
+static void blk_wb_stat_get(void *data, struct blk_rq_stat *stat)
+{
+ blk_queue_stat_get(data, stat);
+}
+
+static void blk_wb_stat_clear(void *data)
+{
+ blk_stat_clear(data);
+}
+
+static bool blk_wb_stat_is_current(struct blk_rq_stat *stat)
+{
+ return blk_stat_is_current(stat);
+}
+
+static struct wb_stat_ops wb_stat_ops = {
+ .get = blk_wb_stat_get,
+ .is_current = blk_wb_stat_is_current,
+ .clear = blk_wb_stat_clear,
+};
+
+static void blk_wb_init(struct request_queue *q)
+{
+ struct rq_wb *rwb;
+
+ rwb = wbt_init(&q->backing_dev_info, &wb_stat_ops, q);
+
+ /*
+ * If this fails, we don't get throttling
+ */
+ if (IS_ERR(rwb))
+ return;
+
+ if (blk_queue_nonrot(q))
+ rwb->min_lat_nsec = 2000000ULL;
+ else
+ rwb->min_lat_nsec = 75000000ULL;
+
+ wbt_set_queue_depth(rwb, blk_queue_depth(q));
+ wbt_set_write_cache(rwb, test_bit(QUEUE_FLAG_WC, &q->queue_flags));
+ q->rq_wb = rwb;
+}
+
int blk_register_queue(struct gendisk *disk)
{
int ret;
@@ -732,6 +855,8 @@ int blk_register_queue(struct gendisk *disk)
if (q->mq_ops)
blk_mq_register_disk(disk);
+ blk_wb_init(q);
+
if (!q->request_fn)
return 0;
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index cc2f6dbd4303..ef61bda76317 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -3777,6 +3777,18 @@ static void check_blkcg_changed(struct cfq_io_cq *cic, struct bio *bio)
return;
/*
+ * If we have a non-root cgroup, we can depend on that to
+ * do proper throttling of writes. Turn off wbt for that
+ * case.
+ */
+ if (bio_blkcg(bio) != &blkcg_root) {
+ struct request_queue *q = cfqd->queue;
+
+ if (q->rq_wb)
+ wbt_disable(q->rq_wb);
+ }
+
+ /*
* Drop reference to queues. New queues will be assigned in new
* group upon arrival of fresh requests.
*/
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 259eba88f991..45256d75c4b7 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -24,6 +24,7 @@
#include <linux/rcupdate.h>
#include <linux/percpu-refcount.h>
#include <linux/scatterlist.h>
+#include <linux/wbt.h>
struct module;
struct scsi_ioctl_command;
@@ -37,6 +38,7 @@ struct bsg_job;
struct blkcg_gq;
struct blk_flush_queue;
struct pr_ops;
+struct rq_wb;
#define BLKDEV_MIN_RQ 4
#define BLKDEV_MAX_RQ 128 /* Default maximum */
@@ -151,7 +153,7 @@ struct request {
struct gendisk *rq_disk;
struct hd_struct *part;
unsigned long start_time;
- s64 issue_time;
+ struct wb_issue_stat wb_stat;
#ifdef CONFIG_BLK_CGROUP
struct request_list *rl; /* rl this rq is alloced from */
unsigned long long start_time_ns;
@@ -303,6 +305,8 @@ struct request_queue {
int nr_rqs[2]; /* # allocated [a]sync rqs */
int nr_rqs_elvpriv; /* # allocated rqs w/ elvpriv */
+ struct rq_wb *rq_wb;
+
/*
* If blkcg is not used, @q->root_rl serves all requests. If blkcg
* is used, root blkg allocates from @q->root_rl and all other
--
2.7.4
^ permalink raw reply related [flat|nested] 23+ messages in thread
* [PATCH 8/8] writeback: throttle buffered writeback
2016-04-26 15:55 [PATCHSET v5] Make background writeback great again for the first time Jens Axboe
@ 2016-04-26 15:55 ` Jens Axboe
0 siblings, 0 replies; 23+ messages in thread
From: Jens Axboe @ 2016-04-26 15:55 UTC (permalink / raw)
To: linux-kernel, linux-fsdevel, linux-block
Cc: jack, dchinner, sedat.dilek, Jens Axboe
Test patch that throttles buffered writeback to make it a lot
more smooth, and has way less impact on other system activity.
Background writeback should be, by definition, background
activity. The fact that we flush huge bundles of it at the time
means that it potentially has heavy impacts on foreground workloads,
which isn't ideal. We can't easily limit the sizes of writes that
we do, since that would impact file system layout in the presence
of delayed allocation. So just throttle back buffered writeback,
unless someone is waiting for it.
The algorithm for when to throttle takes its inspiration in the
CoDel networking scheduling algorithm. Like CoDel, blk-wb monitors
the minimum latencies of requests over a window of time. In that
window of time, if the minimum latency of any request exceeds a
given target, then a scale count is incremented and the queue depth
is shrunk. The next monitoring window is shrunk accordingly. Unlike
CoDel, if we hit a window that exhibits good behavior, then we
simply increment the scale count and re-calculate the limits for that
scale value. This prevents us from oscillating between a
close-to-ideal value and max all the time, instead remaining in the
windows where we get good behavior.
The patch registers two sysfs entries. The first one, 'wb_window_usec',
defines the window of monitoring. The second one, 'wb_lat_usec',
sets the latency target for the window. It defaults to 2 msec for
non-rotational storage, and 75 msec for rotational storage. Setting
this value to '0' disables blk-wb. Generally, a user would not have
to touch these settings.
Signed-off-by: Jens Axboe <axboe@fb.com>
---
Documentation/block/queue-sysfs.txt | 13 ++++
block/Kconfig | 1 +
block/blk-core.c | 21 ++++++-
block/blk-mq.c | 32 +++++++++-
block/blk-settings.c | 3 +
block/blk-stat.c | 5 +-
block/blk-sysfs.c | 119 ++++++++++++++++++++++++++++++++++++
include/linux/blkdev.h | 6 +-
8 files changed, 191 insertions(+), 9 deletions(-)
diff --git a/Documentation/block/queue-sysfs.txt b/Documentation/block/queue-sysfs.txt
index dce25d848d92..9bc990abef4d 100644
--- a/Documentation/block/queue-sysfs.txt
+++ b/Documentation/block/queue-sysfs.txt
@@ -151,5 +151,18 @@ device state. This means that it might not be safe to toggle the
setting from "write back" to "write through", since that will also
eliminate cache flushes issued by the kernel.
+wb_lat_usec (RW)
+----------------
+If the device is registered for writeback throttling, then this file shows
+the target minimum read latency. If this latency is exceeded in a given
+window of time (see wb_window_usec), then the writeback throttling will start
+scaling back writes.
+
+wb_window_usec (RW)
+-------------------
+If the device is registered for writeback throttling, then this file shows
+the value of the monitoring window in which we'll look at the target
+latency. See wb_lat_usec.
+
Jens Axboe <jens.axboe@oracle.com>, February 2009
diff --git a/block/Kconfig b/block/Kconfig
index 0363cd731320..d4c2ff4b9b2c 100644
--- a/block/Kconfig
+++ b/block/Kconfig
@@ -4,6 +4,7 @@
menuconfig BLOCK
bool "Enable the block layer" if EXPERT
default y
+ select WBT
help
Provide block layer support for the kernel.
diff --git a/block/blk-core.c b/block/blk-core.c
index 40b57bf4852c..c166d46a09d1 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -33,6 +33,7 @@
#include <linux/ratelimit.h>
#include <linux/pm_runtime.h>
#include <linux/blk-cgroup.h>
+#include <linux/wbt.h>
#define CREATE_TRACE_POINTS
#include <trace/events/block.h>
@@ -880,6 +881,8 @@ blk_init_allocated_queue(struct request_queue *q, request_fn_proc *rfn,
fail:
blk_free_flush_queue(q->fq);
+ wbt_exit(q->rq_wb);
+ q->rq_wb = NULL;
return NULL;
}
EXPORT_SYMBOL(blk_init_allocated_queue);
@@ -1395,6 +1398,7 @@ void blk_requeue_request(struct request_queue *q, struct request *rq)
blk_delete_timer(rq);
blk_clear_rq_complete(rq);
trace_block_rq_requeue(q, rq);
+ wbt_requeue(q->rq_wb, &rq->wb_stat);
if (rq->cmd_flags & REQ_QUEUED)
blk_queue_end_tag(q, rq);
@@ -1485,6 +1489,8 @@ void __blk_put_request(struct request_queue *q, struct request *req)
/* this is a bio leak */
WARN_ON(req->bio != NULL);
+ wbt_done(q->rq_wb, &req->wb_stat);
+
/*
* Request may not have originated from ll_rw_blk. if not,
* it didn't come out of our reserved rq pools
@@ -1714,6 +1720,7 @@ static blk_qc_t blk_queue_bio(struct request_queue *q, struct bio *bio)
int el_ret, rw_flags, where = ELEVATOR_INSERT_SORT;
struct request *req;
unsigned int request_count = 0;
+ bool wb_acct;
/*
* low level driver can indicate that it wants pages above a
@@ -1766,6 +1773,8 @@ static blk_qc_t blk_queue_bio(struct request_queue *q, struct bio *bio)
}
get_rq:
+ wb_acct = wbt_wait(q->rq_wb, bio->bi_rw, q->queue_lock);
+
/*
* This sync check and mask will be re-done in init_request_from_bio(),
* but we need to set it earlier to expose the sync flag to the
@@ -1781,11 +1790,16 @@ get_rq:
*/
req = get_request(q, rw_flags, bio, GFP_NOIO);
if (IS_ERR(req)) {
+ if (wb_acct)
+ __wbt_done(q->rq_wb);
bio->bi_error = PTR_ERR(req);
bio_endio(bio);
goto out_unlock;
}
+ if (wb_acct)
+ wbt_mark_tracked(&req->wb_stat);
+
/*
* After dropping the lock and possibly sleeping here, our request
* may now be mergeable after it had proven unmergeable (above).
@@ -2514,7 +2528,7 @@ void blk_start_request(struct request *req)
{
blk_dequeue_request(req);
- req->issue_time = ktime_to_ns(ktime_get());
+ wbt_issue(req->q->rq_wb, &req->wb_stat);
/*
* We are now handing the request to the hardware, initialize
@@ -2752,9 +2766,10 @@ void blk_finish_request(struct request *req, int error)
blk_account_io_done(req);
- if (req->end_io)
+ if (req->end_io) {
+ wbt_done(req->q->rq_wb, &req->wb_stat);
req->end_io(req, error);
- else {
+ } else {
if (blk_bidi_rq(req))
__blk_put_request(req->next_rq->q, req->next_rq);
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 71b4a13fbf94..556229e4da92 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -22,6 +22,7 @@
#include <linux/sched/sysctl.h>
#include <linux/delay.h>
#include <linux/crash_dump.h>
+#include <linux/wbt.h>
#include <trace/events/block.h>
@@ -275,6 +276,8 @@ static void __blk_mq_free_request(struct blk_mq_hw_ctx *hctx,
if (rq->cmd_flags & REQ_MQ_INFLIGHT)
atomic_dec(&hctx->nr_active);
+
+ wbt_done(q->rq_wb, &rq->wb_stat);
rq->cmd_flags = 0;
clear_bit(REQ_ATOM_STARTED, &rq->atomic_flags);
@@ -307,6 +310,7 @@ inline void __blk_mq_end_request(struct request *rq, int error)
blk_account_io_done(rq);
if (rq->end_io) {
+ wbt_done(rq->q->rq_wb, &rq->wb_stat);
rq->end_io(rq, error);
} else {
if (unlikely(blk_bidi_rq(rq)))
@@ -413,7 +417,7 @@ void blk_mq_start_request(struct request *rq)
if (unlikely(blk_bidi_rq(rq)))
rq->next_rq->resid_len = blk_rq_bytes(rq->next_rq);
- rq->issue_time = ktime_to_ns(ktime_get());
+ wbt_issue(q->rq_wb, &rq->wb_stat);
blk_add_timer(rq);
@@ -450,6 +454,7 @@ static void __blk_mq_requeue_request(struct request *rq)
struct request_queue *q = rq->q;
trace_block_rq_requeue(q, rq);
+ wbt_requeue(q->rq_wb, &rq->wb_stat);
if (test_and_clear_bit(REQ_ATOM_STARTED, &rq->atomic_flags)) {
if (q->dma_drain_size && blk_rq_bytes(rq))
@@ -1265,6 +1270,7 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
struct blk_plug *plug;
struct request *same_queue_rq = NULL;
blk_qc_t cookie;
+ bool wb_acct;
blk_queue_bounce(q, &bio);
@@ -1282,9 +1288,17 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
} else
request_count = blk_plug_queued_count(q);
+ wb_acct = wbt_wait(q->rq_wb, bio->bi_rw, NULL);
+
rq = blk_mq_map_request(q, bio, &data);
- if (unlikely(!rq))
+ if (unlikely(!rq)) {
+ if (wb_acct)
+ __wbt_done(q->rq_wb);
return BLK_QC_T_NONE;
+ }
+
+ if (wb_acct)
+ wbt_mark_tracked(&rq->wb_stat);
cookie = blk_tag_to_qc_t(rq->tag, data.hctx->queue_num);
@@ -1361,6 +1375,7 @@ static blk_qc_t blk_sq_make_request(struct request_queue *q, struct bio *bio)
struct blk_map_ctx data;
struct request *rq;
blk_qc_t cookie;
+ bool wb_acct;
blk_queue_bounce(q, &bio);
@@ -1375,9 +1390,17 @@ static blk_qc_t blk_sq_make_request(struct request_queue *q, struct bio *bio)
blk_attempt_plug_merge(q, bio, &request_count, NULL))
return BLK_QC_T_NONE;
+ wb_acct = wbt_wait(q->rq_wb, bio->bi_rw, NULL);
+
rq = blk_mq_map_request(q, bio, &data);
- if (unlikely(!rq))
+ if (unlikely(!rq)) {
+ if (wb_acct)
+ __wbt_done(q->rq_wb);
return BLK_QC_T_NONE;
+ }
+
+ if (wb_acct)
+ wbt_mark_tracked(&rq->wb_stat);
cookie = blk_tag_to_qc_t(rq->tag, data.hctx->queue_num);
@@ -2111,6 +2134,9 @@ void blk_mq_free_queue(struct request_queue *q)
list_del_init(&q->all_q_node);
mutex_unlock(&all_q_mutex);
+ wbt_exit(q->rq_wb);
+ q->rq_wb = NULL;
+
blk_mq_del_queue_tag_set(q);
blk_mq_exit_hw_queues(q, set, set->nr_hw_queues);
diff --git a/block/blk-settings.c b/block/blk-settings.c
index f7e122e717e8..746dc9fee1ac 100644
--- a/block/blk-settings.c
+++ b/block/blk-settings.c
@@ -840,6 +840,7 @@ EXPORT_SYMBOL_GPL(blk_queue_flush_queueable);
void blk_set_queue_depth(struct request_queue *q, unsigned int depth)
{
q->queue_depth = depth;
+ wbt_set_queue_depth(q->rq_wb, depth);
}
EXPORT_SYMBOL(blk_set_queue_depth);
@@ -863,6 +864,8 @@ void blk_queue_write_cache(struct request_queue *q, bool wc, bool fua)
else
queue_flag_clear(QUEUE_FLAG_FUA, q);
spin_unlock_irq(q->queue_lock);
+
+ wbt_set_write_cache(q->rq_wb, test_bit(QUEUE_FLAG_WC, &q->queue_flags));
}
EXPORT_SYMBOL_GPL(blk_queue_write_cache);
diff --git a/block/blk-stat.c b/block/blk-stat.c
index b38776a83173..8e3974d87c1f 100644
--- a/block/blk-stat.c
+++ b/block/blk-stat.c
@@ -143,15 +143,16 @@ void blk_stat_init(struct blk_rq_stat *stat)
void blk_stat_add(struct blk_rq_stat *stat, struct request *rq)
{
s64 delta, now, value;
+ u64 rq_time = wbt_issue_stat_get_time(&rq->wb_stat);
now = ktime_to_ns(ktime_get());
- if (now < rq->issue_time)
+ if (now < rq_time)
return;
if ((now & BLK_STAT_MASK) != (stat->time & BLK_STAT_MASK))
__blk_stat_init(stat, now);
- value = now - rq->issue_time;
+ value = now - rq_time;
if (value > stat->max)
stat->max = value;
if (value < stat->min)
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
index 6e516cc0d3d0..df194bf93598 100644
--- a/block/blk-sysfs.c
+++ b/block/blk-sysfs.c
@@ -10,6 +10,7 @@
#include <linux/blktrace_api.h>
#include <linux/blk-mq.h>
#include <linux/blk-cgroup.h>
+#include <linux/wbt.h>
#include "blk.h"
#include "blk-mq.h"
@@ -41,6 +42,19 @@ queue_var_store(unsigned long *var, const char *page, size_t count)
return count;
}
+static ssize_t queue_var_store64(u64 *var, const char *page)
+{
+ int err;
+ u64 v;
+
+ err = kstrtou64(page, 10, &v);
+ if (err < 0)
+ return err;
+
+ *var = v;
+ return 0;
+}
+
static ssize_t queue_requests_show(struct request_queue *q, char *page)
{
return queue_var_show(q->nr_requests, (page));
@@ -347,6 +361,58 @@ static ssize_t queue_poll_store(struct request_queue *q, const char *page,
return ret;
}
+static ssize_t queue_wb_win_show(struct request_queue *q, char *page)
+{
+ if (!q->rq_wb)
+ return -EINVAL;
+
+ return sprintf(page, "%llu\n", div_u64(q->rq_wb->win_nsec, 1000));
+}
+
+static ssize_t queue_wb_win_store(struct request_queue *q, const char *page,
+ size_t count)
+{
+ ssize_t ret;
+ u64 val;
+
+ if (!q->rq_wb)
+ return -EINVAL;
+
+ ret = queue_var_store64(&val, page);
+ if (ret < 0)
+ return ret;
+
+ q->rq_wb->win_nsec = val * 1000ULL;
+ wbt_update_limits(q->rq_wb);
+ return count;
+}
+
+static ssize_t queue_wb_lat_show(struct request_queue *q, char *page)
+{
+ if (!q->rq_wb)
+ return -EINVAL;
+
+ return sprintf(page, "%llu\n", div_u64(q->rq_wb->min_lat_nsec, 1000));
+}
+
+static ssize_t queue_wb_lat_store(struct request_queue *q, const char *page,
+ size_t count)
+{
+ ssize_t ret;
+ u64 val;
+
+ if (!q->rq_wb)
+ return -EINVAL;
+
+ ret = queue_var_store64(&val, page);
+ if (ret < 0)
+ return ret;
+
+ q->rq_wb->min_lat_nsec = val * 1000ULL;
+ wbt_update_limits(q->rq_wb);
+ return count;
+}
+
static ssize_t queue_wc_show(struct request_queue *q, char *page)
{
if (test_bit(QUEUE_FLAG_WC, &q->queue_flags))
@@ -541,6 +607,18 @@ static struct queue_sysfs_entry queue_stats_entry = {
.show = queue_stats_show,
};
+static struct queue_sysfs_entry queue_wb_lat_entry = {
+ .attr = {.name = "wbt_lat_usec", .mode = S_IRUGO | S_IWUSR },
+ .show = queue_wb_lat_show,
+ .store = queue_wb_lat_store,
+};
+
+static struct queue_sysfs_entry queue_wb_win_entry = {
+ .attr = {.name = "wbt_window_usec", .mode = S_IRUGO | S_IWUSR },
+ .show = queue_wb_win_show,
+ .store = queue_wb_win_store,
+};
+
static struct attribute *default_attrs[] = {
&queue_requests_entry.attr,
&queue_ra_entry.attr,
@@ -568,6 +646,8 @@ static struct attribute *default_attrs[] = {
&queue_poll_entry.attr,
&queue_wc_entry.attr,
&queue_stats_entry.attr,
+ &queue_wb_lat_entry.attr,
+ &queue_wb_win_entry.attr,
NULL,
};
@@ -682,6 +762,43 @@ struct kobj_type blk_queue_ktype = {
.release = blk_release_queue,
};
+static void blk_wb_stat_get(void *data, struct blk_rq_stat *stat)
+{
+ blk_queue_stat_get(data, stat);
+}
+
+static void blk_wb_stat_clear(void *data)
+{
+ blk_stat_clear(data);
+}
+
+static struct wb_stat_ops wb_stat_ops = {
+ .get = blk_wb_stat_get,
+ .clear = blk_wb_stat_clear,
+};
+
+static void blk_wb_init(struct request_queue *q)
+{
+ struct rq_wb *rwb;
+
+ rwb = wbt_init(&q->backing_dev_info, &wb_stat_ops, q);
+
+ /*
+ * If this fails, we don't get throttling
+ */
+ if (IS_ERR(rwb))
+ return;
+
+ if (blk_queue_nonrot(q))
+ rwb->min_lat_nsec = 2000000ULL;
+ else
+ rwb->min_lat_nsec = 75000000ULL;
+
+ wbt_set_queue_depth(rwb, blk_queue_depth(q));
+ wbt_set_write_cache(rwb, test_bit(QUEUE_FLAG_WC, &q->queue_flags));
+ q->rq_wb = rwb;
+}
+
int blk_register_queue(struct gendisk *disk)
{
int ret;
@@ -721,6 +838,8 @@ int blk_register_queue(struct gendisk *disk)
if (q->mq_ops)
blk_mq_register_disk(disk);
+ blk_wb_init(q);
+
if (!q->request_fn)
return 0;
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 87f6703ced71..a89f46c58d5f 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -24,6 +24,7 @@
#include <linux/rcupdate.h>
#include <linux/percpu-refcount.h>
#include <linux/scatterlist.h>
+#include <linux/wbt.h>
struct module;
struct scsi_ioctl_command;
@@ -37,6 +38,7 @@ struct bsg_job;
struct blkcg_gq;
struct blk_flush_queue;
struct pr_ops;
+struct rq_wb;
#define BLKDEV_MIN_RQ 4
#define BLKDEV_MAX_RQ 128 /* Default maximum */
@@ -153,7 +155,7 @@ struct request {
struct gendisk *rq_disk;
struct hd_struct *part;
unsigned long start_time;
- s64 issue_time;
+ struct wb_issue_stat wb_stat;
#ifdef CONFIG_BLK_CGROUP
struct request_list *rl; /* rl this rq is alloced from */
unsigned long long start_time_ns;
@@ -291,6 +293,8 @@ struct request_queue {
int nr_rqs[2]; /* # allocated [a]sync rqs */
int nr_rqs_elvpriv; /* # allocated rqs w/ elvpriv */
+ struct rq_wb *rq_wb;
+
/*
* If blkcg is not used, @q->root_rl serves all requests. If blkcg
* is used, root blkg allocates from @q->root_rl and all other
--
2.8.0.rc4.6.g7e4ba36
^ permalink raw reply related [flat|nested] 23+ messages in thread
* [PATCH 8/8] writeback: throttle buffered writeback
2016-04-18 4:24 [PATCHSET v4 0/8] Make background writeback not suck Jens Axboe
@ 2016-04-18 4:24 ` Jens Axboe
2016-04-23 8:21 ` xiakaixu
0 siblings, 1 reply; 23+ messages in thread
From: Jens Axboe @ 2016-04-18 4:24 UTC (permalink / raw)
To: linux-kernel, linux-fsdevel, linux-block; +Cc: jack, dchinner, Jens Axboe
Test patch that throttles buffered writeback to make it a lot
more smooth, and has way less impact on other system activity.
Background writeback should be, by definition, background
activity. The fact that we flush huge bundles of it at the time
means that it potentially has heavy impacts on foreground workloads,
which isn't ideal. We can't easily limit the sizes of writes that
we do, since that would impact file system layout in the presence
of delayed allocation. So just throttle back buffered writeback,
unless someone is waiting for it.
The algorithm for when to throttle takes its inspiration in the
CoDel networking scheduling algorithm. Like CoDel, blk-wb monitors
the minimum latencies of requests over a window of time. In that
window of time, if the minimum latency of any request exceeds a
given target, then a scale count is incremented and the queue depth
is shrunk. The next monitoring window is shrunk accordingly. Unlike
CoDel, if we hit a window that exhibits good behavior, then we
simply increment the scale count and re-calculate the limits for that
scale value. This prevents us from oscillating between a
close-to-ideal value and max all the time, instead remaining in the
windows where we get good behavior.
The patch registers two sysfs entries. The first one, 'wb_lat_usec',
sets the latency target for the window. It defaults to 2 msec for
non-rotational storage, and 75 msec for rotational storage. Setting
this value to '0' disables blk-wb.
The second entry, 'wb_stats', is a debug entry, that simply shows the
current internal state of the throttling machine:
$ cat /sys/block/nvme0n1/queue/wb_stats
background=16, normal=32, max=64, inflight=0, wait=0, bdp_wait=0
'background' denotes how many requests we will allow in-flight for
idle background buffered writeback, 'normal' for higher priority
writeback, and 'max' for when it's urgent we clean pages.
'inflight' shows how many requests are currently in-flight for
buffered writeback, 'wait' shows if anyone is currently waiting for
access, and 'bdp_wait' shows if someone is currently throttled on this
device in balance_dirty_pages().
blk-wb also registers a few trace events, that can be used to monitor
the state changes:
block_wb_lat: Latency 2446318
block_wb_stat: read lat: mean=2446318, min=2446318, max=2446318, samples=1,
write lat: mean=518866, min=15522, max=5330353, samples=57
block_wb_step: step down: step=1, background=8, normal=16, max=32
'block_wb_lat' logs a violation in sync issue latency, 'block_wb_stat'
logs a window violation of latencies and dumps the stats that lead to
that, and finally, 'block_wb_stat' logs a step up/down and the new
limits associated with that state.
Signed-off-by: Jens Axboe <axboe@fb.com>
---
block/Makefile | 2 +-
block/blk-core.c | 15 ++
block/blk-mq.c | 31 ++-
block/blk-settings.c | 4 +
block/blk-sysfs.c | 57 +++++
block/blk-wb.c | 495 +++++++++++++++++++++++++++++++++++++++++++
block/blk-wb.h | 42 ++++
include/linux/blk_types.h | 2 +
include/linux/blkdev.h | 3 +
include/trace/events/block.h | 98 +++++++++
10 files changed, 746 insertions(+), 3 deletions(-)
create mode 100644 block/blk-wb.c
create mode 100644 block/blk-wb.h
diff --git a/block/Makefile b/block/Makefile
index 3446e0472df0..7e4be7a56a59 100644
--- a/block/Makefile
+++ b/block/Makefile
@@ -5,7 +5,7 @@
obj-$(CONFIG_BLOCK) := bio.o elevator.o blk-core.o blk-tag.o blk-sysfs.o \
blk-flush.o blk-settings.o blk-ioc.o blk-map.o \
blk-exec.o blk-merge.o blk-softirq.o blk-timeout.o \
- blk-lib.o blk-mq.o blk-mq-tag.o blk-stat.o \
+ blk-lib.o blk-mq.o blk-mq-tag.o blk-stat.o blk-wb.o \
blk-mq-sysfs.o blk-mq-cpu.o blk-mq-cpumap.o ioctl.o \
genhd.o scsi_ioctl.o partition-generic.o ioprio.o \
badblocks.o partitions/
diff --git a/block/blk-core.c b/block/blk-core.c
index 40b57bf4852c..d941f69dfb4b 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -39,6 +39,7 @@
#include "blk.h"
#include "blk-mq.h"
+#include "blk-wb.h"
EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_remap);
EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_remap);
@@ -880,6 +881,7 @@ blk_init_allocated_queue(struct request_queue *q, request_fn_proc *rfn,
fail:
blk_free_flush_queue(q->fq);
+ blk_wb_exit(q);
return NULL;
}
EXPORT_SYMBOL(blk_init_allocated_queue);
@@ -1395,6 +1397,7 @@ void blk_requeue_request(struct request_queue *q, struct request *rq)
blk_delete_timer(rq);
blk_clear_rq_complete(rq);
trace_block_rq_requeue(q, rq);
+ blk_wb_requeue(q->rq_wb, rq);
if (rq->cmd_flags & REQ_QUEUED)
blk_queue_end_tag(q, rq);
@@ -1485,6 +1488,8 @@ void __blk_put_request(struct request_queue *q, struct request *req)
/* this is a bio leak */
WARN_ON(req->bio != NULL);
+ blk_wb_done(q->rq_wb, req);
+
/*
* Request may not have originated from ll_rw_blk. if not,
* it didn't come out of our reserved rq pools
@@ -1714,6 +1719,7 @@ static blk_qc_t blk_queue_bio(struct request_queue *q, struct bio *bio)
int el_ret, rw_flags, where = ELEVATOR_INSERT_SORT;
struct request *req;
unsigned int request_count = 0;
+ bool wb_acct;
/*
* low level driver can indicate that it wants pages above a
@@ -1766,6 +1772,8 @@ static blk_qc_t blk_queue_bio(struct request_queue *q, struct bio *bio)
}
get_rq:
+ wb_acct = blk_wb_wait(q->rq_wb, bio, q->queue_lock);
+
/*
* This sync check and mask will be re-done in init_request_from_bio(),
* but we need to set it earlier to expose the sync flag to the
@@ -1781,11 +1789,16 @@ get_rq:
*/
req = get_request(q, rw_flags, bio, GFP_NOIO);
if (IS_ERR(req)) {
+ if (wb_acct)
+ __blk_wb_done(q->rq_wb);
bio->bi_error = PTR_ERR(req);
bio_endio(bio);
goto out_unlock;
}
+ if (wb_acct)
+ req->cmd_flags |= REQ_BUF_INFLIGHT;
+
/*
* After dropping the lock and possibly sleeping here, our request
* may now be mergeable after it had proven unmergeable (above).
@@ -2515,6 +2528,7 @@ void blk_start_request(struct request *req)
blk_dequeue_request(req);
req->issue_time = ktime_to_ns(ktime_get());
+ blk_wb_issue(req->q->rq_wb, req);
/*
* We are now handing the request to the hardware, initialize
@@ -2751,6 +2765,7 @@ void blk_finish_request(struct request *req, int error)
blk_unprep_request(req);
blk_account_io_done(req);
+ blk_wb_done(req->q->rq_wb, req);
if (req->end_io)
req->end_io(req, error);
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 71b4a13fbf94..c0c5207fe7fd 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -30,6 +30,7 @@
#include "blk-mq.h"
#include "blk-mq-tag.h"
#include "blk-stat.h"
+#include "blk-wb.h"
static DEFINE_MUTEX(all_q_mutex);
static LIST_HEAD(all_q_list);
@@ -275,6 +276,9 @@ static void __blk_mq_free_request(struct blk_mq_hw_ctx *hctx,
if (rq->cmd_flags & REQ_MQ_INFLIGHT)
atomic_dec(&hctx->nr_active);
+
+ blk_wb_done(q->rq_wb, rq);
+
rq->cmd_flags = 0;
clear_bit(REQ_ATOM_STARTED, &rq->atomic_flags);
@@ -305,6 +309,7 @@ EXPORT_SYMBOL_GPL(blk_mq_free_request);
inline void __blk_mq_end_request(struct request *rq, int error)
{
blk_account_io_done(rq);
+ blk_wb_done(rq->q->rq_wb, rq);
if (rq->end_io) {
rq->end_io(rq, error);
@@ -414,6 +419,7 @@ void blk_mq_start_request(struct request *rq)
rq->next_rq->resid_len = blk_rq_bytes(rq->next_rq);
rq->issue_time = ktime_to_ns(ktime_get());
+ blk_wb_issue(q->rq_wb, rq);
blk_add_timer(rq);
@@ -450,6 +456,7 @@ static void __blk_mq_requeue_request(struct request *rq)
struct request_queue *q = rq->q;
trace_block_rq_requeue(q, rq);
+ blk_wb_requeue(q->rq_wb, rq);
if (test_and_clear_bit(REQ_ATOM_STARTED, &rq->atomic_flags)) {
if (q->dma_drain_size && blk_rq_bytes(rq))
@@ -1265,6 +1272,7 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
struct blk_plug *plug;
struct request *same_queue_rq = NULL;
blk_qc_t cookie;
+ bool wb_acct;
blk_queue_bounce(q, &bio);
@@ -1282,9 +1290,17 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
} else
request_count = blk_plug_queued_count(q);
+ wb_acct = blk_wb_wait(q->rq_wb, bio, NULL);
+
rq = blk_mq_map_request(q, bio, &data);
- if (unlikely(!rq))
+ if (unlikely(!rq)) {
+ if (wb_acct)
+ __blk_wb_done(q->rq_wb);
return BLK_QC_T_NONE;
+ }
+
+ if (wb_acct)
+ rq->cmd_flags |= REQ_BUF_INFLIGHT;
cookie = blk_tag_to_qc_t(rq->tag, data.hctx->queue_num);
@@ -1361,6 +1377,7 @@ static blk_qc_t blk_sq_make_request(struct request_queue *q, struct bio *bio)
struct blk_map_ctx data;
struct request *rq;
blk_qc_t cookie;
+ bool wb_acct;
blk_queue_bounce(q, &bio);
@@ -1375,9 +1392,17 @@ static blk_qc_t blk_sq_make_request(struct request_queue *q, struct bio *bio)
blk_attempt_plug_merge(q, bio, &request_count, NULL))
return BLK_QC_T_NONE;
+ wb_acct = blk_wb_wait(q->rq_wb, bio, NULL);
+
rq = blk_mq_map_request(q, bio, &data);
- if (unlikely(!rq))
+ if (unlikely(!rq)) {
+ if (wb_acct)
+ __blk_wb_done(q->rq_wb);
return BLK_QC_T_NONE;
+ }
+
+ if (wb_acct)
+ rq->cmd_flags |= REQ_BUF_INFLIGHT;
cookie = blk_tag_to_qc_t(rq->tag, data.hctx->queue_num);
@@ -2111,6 +2136,8 @@ void blk_mq_free_queue(struct request_queue *q)
list_del_init(&q->all_q_node);
mutex_unlock(&all_q_mutex);
+ blk_wb_exit(q);
+
blk_mq_del_queue_tag_set(q);
blk_mq_exit_hw_queues(q, set, set->nr_hw_queues);
diff --git a/block/blk-settings.c b/block/blk-settings.c
index f7e122e717e8..84bcfc22e020 100644
--- a/block/blk-settings.c
+++ b/block/blk-settings.c
@@ -13,6 +13,7 @@
#include <linux/gfp.h>
#include "blk.h"
+#include "blk-wb.h"
unsigned long blk_max_low_pfn;
EXPORT_SYMBOL(blk_max_low_pfn);
@@ -840,6 +841,9 @@ EXPORT_SYMBOL_GPL(blk_queue_flush_queueable);
void blk_set_queue_depth(struct request_queue *q, unsigned int depth)
{
q->queue_depth = depth;
+
+ if (q->rq_wb)
+ blk_wb_update_limits(q->rq_wb);
}
EXPORT_SYMBOL(blk_set_queue_depth);
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
index 6e516cc0d3d0..13f325deffa1 100644
--- a/block/blk-sysfs.c
+++ b/block/blk-sysfs.c
@@ -13,6 +13,7 @@
#include "blk.h"
#include "blk-mq.h"
+#include "blk-wb.h"
struct queue_sysfs_entry {
struct attribute attr;
@@ -347,6 +348,47 @@ static ssize_t queue_poll_store(struct request_queue *q, const char *page,
return ret;
}
+static ssize_t queue_wb_stats_show(struct request_queue *q, char *page)
+{
+ struct rq_wb *rwb = q->rq_wb;
+
+ if (!rwb)
+ return -EINVAL;
+
+ return sprintf(page, "background=%d, normal=%d, max=%d, inflight=%d,"
+ " wait=%d, bdp_wait=%d\n", rwb->wb_background,
+ rwb->wb_normal, rwb->wb_max,
+ atomic_read(&rwb->inflight),
+ waitqueue_active(&rwb->wait),
+ atomic_read(rwb->bdp_wait));
+}
+
+static ssize_t queue_wb_lat_show(struct request_queue *q, char *page)
+{
+ if (!q->rq_wb)
+ return -EINVAL;
+
+ return sprintf(page, "%llu\n", q->rq_wb->min_lat_nsec / 1000ULL);
+}
+
+static ssize_t queue_wb_lat_store(struct request_queue *q, const char *page,
+ size_t count)
+{
+ u64 val;
+ int err;
+
+ if (!q->rq_wb)
+ return -EINVAL;
+
+ err = kstrtou64(page, 10, &val);
+ if (err < 0)
+ return err;
+
+ q->rq_wb->min_lat_nsec = val * 1000ULL;
+ blk_wb_update_limits(q->rq_wb);
+ return count;
+}
+
static ssize_t queue_wc_show(struct request_queue *q, char *page)
{
if (test_bit(QUEUE_FLAG_WC, &q->queue_flags))
@@ -541,6 +583,17 @@ static struct queue_sysfs_entry queue_stats_entry = {
.show = queue_stats_show,
};
+static struct queue_sysfs_entry queue_wb_stats_entry = {
+ .attr = {.name = "wb_stats", .mode = S_IRUGO },
+ .show = queue_wb_stats_show,
+};
+
+static struct queue_sysfs_entry queue_wb_lat_entry = {
+ .attr = {.name = "wb_lat_usec", .mode = S_IRUGO | S_IWUSR },
+ .show = queue_wb_lat_show,
+ .store = queue_wb_lat_store,
+};
+
static struct attribute *default_attrs[] = {
&queue_requests_entry.attr,
&queue_ra_entry.attr,
@@ -568,6 +621,8 @@ static struct attribute *default_attrs[] = {
&queue_poll_entry.attr,
&queue_wc_entry.attr,
&queue_stats_entry.attr,
+ &queue_wb_stats_entry.attr,
+ &queue_wb_lat_entry.attr,
NULL,
};
@@ -721,6 +776,8 @@ int blk_register_queue(struct gendisk *disk)
if (q->mq_ops)
blk_mq_register_disk(disk);
+ blk_wb_init(q);
+
if (!q->request_fn)
return 0;
diff --git a/block/blk-wb.c b/block/blk-wb.c
new file mode 100644
index 000000000000..1b1d80876930
--- /dev/null
+++ b/block/blk-wb.c
@@ -0,0 +1,495 @@
+/*
+ * buffered writeback throttling. losely based on CoDel. We can't drop
+ * packets for IO scheduling, so the logic is something like this:
+ *
+ * - Monitor latencies in a defined window of time.
+ * - If the minimum latency in the above window exceeds some target, increment
+ * scaling step and scale down queue depth by a factor of 2x. The monitoring
+ * window is then shrunk to 100 / sqrt(scaling step + 1).
+ * - For any window where we don't have solid data on what the latencies
+ * look like, retain status quo.
+ * - If latencies look good, decrement scaling step.
+ *
+ * Copyright (C) 2016 Jens Axboe
+ *
+ * Things that (may) need changing:
+ *
+ * - Different scaling of background/normal/high priority writeback.
+ * We may have to violate guarantees for max.
+ * - We can have mismatches between the stat window and our window.
+ *
+ */
+#include <linux/kernel.h>
+#include <linux/bio.h>
+#include <linux/blkdev.h>
+#include <trace/events/block.h>
+
+#include "blk.h"
+#include "blk-wb.h"
+#include "blk-stat.h"
+
+enum {
+ /*
+ * Might need to be higher
+ */
+ RWB_MAX_DEPTH = 64,
+
+ /*
+ * 100msec window
+ */
+ RWB_WINDOW_NSEC = 100 * 1000 * 1000ULL,
+
+ /*
+ * Disregard stats, if we don't meet these minimums
+ */
+ RWB_MIN_WRITE_SAMPLES = 3,
+ RWB_MIN_READ_SAMPLES = 1,
+
+ /*
+ * Target min latencies, in nsecs
+ */
+ RWB_ROT_LAT = 75000000ULL, /* 75 msec */
+ RWB_NONROT_LAT = 2000000ULL, /* 2 msec */
+};
+
+static inline bool rwb_enabled(struct rq_wb *rwb)
+{
+ return rwb && rwb->wb_normal != 0;
+}
+
+/*
+ * Increment 'v', if 'v' is below 'below'. Returns true if we succeeded,
+ * false if 'v' + 1 would be bigger than 'below'.
+ */
+static bool atomic_inc_below(atomic_t *v, int below)
+{
+ int cur = atomic_read(v);
+
+ for (;;) {
+ int old;
+
+ if (cur >= below)
+ return false;
+ old = atomic_cmpxchg(v, cur, cur + 1);
+ if (old == cur)
+ break;
+ cur = old;
+ }
+
+ return true;
+}
+
+static void wb_timestamp(struct rq_wb *rwb, unsigned long *var)
+{
+ if (rwb_enabled(rwb)) {
+ const unsigned long cur = jiffies;
+
+ if (cur != *var)
+ *var = cur;
+ }
+}
+
+void __blk_wb_done(struct rq_wb *rwb)
+{
+ int inflight, limit = rwb->wb_normal;
+
+ /*
+ * If the device does write back caching, drop further down
+ * before we wake people up.
+ */
+ if (test_bit(QUEUE_FLAG_WC, &rwb->q->queue_flags) &&
+ !atomic_read(rwb->bdp_wait))
+ limit = 0;
+ else
+ limit = rwb->wb_normal;
+
+ /*
+ * Don't wake anyone up if we are above the normal limit. If
+ * throttling got disabled (limit == 0) with waiters, ensure
+ * that we wake them up.
+ */
+ inflight = atomic_dec_return(&rwb->inflight);
+ if (limit && inflight >= limit) {
+ if (!rwb->wb_max)
+ wake_up_all(&rwb->wait);
+ return;
+ }
+
+ if (waitqueue_active(&rwb->wait)) {
+ int diff = limit - inflight;
+
+ if (!inflight || diff >= rwb->wb_background / 2)
+ wake_up_nr(&rwb->wait, 1);
+ }
+}
+
+/*
+ * Called on completion of a request. Note that it's also called when
+ * a request is merged, when the request gets freed.
+ */
+void blk_wb_done(struct rq_wb *rwb, struct request *rq)
+{
+ if (!rwb)
+ return;
+
+ if (!(rq->cmd_flags & REQ_BUF_INFLIGHT)) {
+ if (rwb->sync_cookie == rq) {
+ rwb->sync_issue = 0;
+ rwb->sync_cookie = NULL;
+ }
+
+ wb_timestamp(rwb, &rwb->last_comp);
+ } else {
+ WARN_ON_ONCE(rq == rwb->sync_cookie);
+ __blk_wb_done(rwb);
+ rq->cmd_flags &= ~REQ_BUF_INFLIGHT;
+ }
+}
+
+static void calc_wb_limits(struct rq_wb *rwb)
+{
+ unsigned int depth;
+
+ if (!rwb->min_lat_nsec) {
+ rwb->wb_max = rwb->wb_normal = rwb->wb_background = 0;
+ return;
+ }
+
+ depth = min_t(unsigned int, RWB_MAX_DEPTH, blk_queue_depth(rwb->q));
+
+ /*
+ * Reduce max depth by 50%, and re-calculate normal/bg based on that
+ */
+ rwb->wb_max = 1 + ((depth - 1) >> min(31U, rwb->scale_step));
+ rwb->wb_normal = (rwb->wb_max + 1) / 2;
+ rwb->wb_background = (rwb->wb_max + 3) / 4;
+}
+
+static bool inline stat_sample_valid(struct blk_rq_stat *stat)
+{
+ /*
+ * We need at least one read sample, and a minimum of
+ * RWB_MIN_WRITE_SAMPLES. We require some write samples to know
+ * that it's writes impacting us, and not just some sole read on
+ * a device that is in a lower power state.
+ */
+ return stat[0].nr_samples >= 1 &&
+ stat[1].nr_samples >= RWB_MIN_WRITE_SAMPLES;
+}
+
+static u64 rwb_sync_issue_lat(struct rq_wb *rwb)
+{
+ u64 now, issue = ACCESS_ONCE(rwb->sync_issue);
+
+ if (!issue || !rwb->sync_cookie)
+ return 0;
+
+ now = ktime_to_ns(ktime_get());
+ return now - issue;
+}
+
+enum {
+ LAT_OK,
+ LAT_UNKNOWN,
+ LAT_EXCEEDED,
+};
+
+static int __latency_exceeded(struct rq_wb *rwb, struct blk_rq_stat *stat)
+{
+ u64 thislat;
+
+ if (!stat_sample_valid(stat))
+ return LAT_UNKNOWN;
+
+ /*
+ * If the 'min' latency exceeds our target, step down.
+ */
+ if (stat[0].min > rwb->min_lat_nsec) {
+ trace_block_wb_lat(stat[0].min);
+ trace_block_wb_stat(stat);
+ return LAT_EXCEEDED;
+ }
+
+ /*
+ * If our stored sync issue exceeds the window size, or it
+ * exceeds our min target AND we haven't logged any entries,
+ * flag the latency as exceeded.
+ */
+ thislat = rwb_sync_issue_lat(rwb);
+ if (thislat > rwb->win_nsec ||
+ (thislat > rwb->min_lat_nsec && !stat[0].nr_samples)) {
+ trace_block_wb_lat(thislat);
+ return LAT_EXCEEDED;
+ }
+
+ if (rwb->scale_step)
+ trace_block_wb_stat(stat);
+
+ return LAT_OK;
+}
+
+static int latency_exceeded(struct rq_wb *rwb)
+{
+ struct blk_rq_stat stat[2];
+
+ blk_queue_stat_get(rwb->q, stat);
+
+ return __latency_exceeded(rwb, stat);
+}
+
+static void rwb_trace_step(struct rq_wb *rwb, const char *msg)
+{
+ trace_block_wb_step(msg, rwb->scale_step, rwb->wb_background,
+ rwb->wb_normal, rwb->wb_max);
+}
+
+static void scale_up(struct rq_wb *rwb)
+{
+ /*
+ * If we're at 0, we can't go lower.
+ */
+ if (!rwb->scale_step)
+ return;
+
+ rwb->scale_step--;
+ calc_wb_limits(rwb);
+
+ if (waitqueue_active(&rwb->wait))
+ wake_up_all(&rwb->wait);
+
+ rwb_trace_step(rwb, "step up");
+}
+
+static void scale_down(struct rq_wb *rwb)
+{
+ /*
+ * Stop scaling down when we've hit the limit. This also prevents
+ * ->scale_step from going to crazy values, if the device can't
+ * keep up.
+ */
+ if (rwb->wb_max == 1)
+ return;
+
+ rwb->scale_step++;
+ blk_stat_clear(rwb->q);
+ calc_wb_limits(rwb);
+ rwb_trace_step(rwb, "step down");
+}
+
+static void rwb_arm_timer(struct rq_wb *rwb)
+{
+ unsigned long expires;
+
+ rwb->win_nsec = 1000000000ULL / int_sqrt((rwb->scale_step + 1) * 100);
+ expires = jiffies + nsecs_to_jiffies(rwb->win_nsec);
+ mod_timer(&rwb->window_timer, expires);
+}
+
+static void blk_wb_timer_fn(unsigned long data)
+{
+ struct rq_wb *rwb = (struct rq_wb *) data;
+ int status;
+
+ /*
+ * If we exceeded the latency target, step down. If we did not,
+ * step one level up. If we don't know enough to say either exceeded
+ * or ok, then don't do anything.
+ */
+ status = latency_exceeded(rwb);
+ switch (status) {
+ case LAT_EXCEEDED:
+ scale_down(rwb);
+ break;
+ case LAT_OK:
+ scale_up(rwb);
+ break;
+ default:
+ break;
+ }
+
+ /*
+ * Re-arm timer, if we have IO in flight
+ */
+ if (rwb->scale_step || atomic_read(&rwb->inflight))
+ rwb_arm_timer(rwb);
+}
+
+void blk_wb_update_limits(struct rq_wb *rwb)
+{
+ rwb->scale_step = 0;
+ calc_wb_limits(rwb);
+
+ if (waitqueue_active(&rwb->wait))
+ wake_up_all(&rwb->wait);
+}
+
+static bool close_io(struct rq_wb *rwb)
+{
+ const unsigned long now = jiffies;
+
+ return time_before(now, rwb->last_issue + HZ / 10) ||
+ time_before(now, rwb->last_comp + HZ / 10);
+}
+
+#define REQ_HIPRIO (REQ_SYNC | REQ_META | REQ_PRIO)
+
+static inline unsigned int get_limit(struct rq_wb *rwb, unsigned long rw)
+{
+ unsigned int limit;
+
+ /*
+ * At this point we know it's a buffered write. If REQ_SYNC is
+ * set, then it's WB_SYNC_ALL writeback, and we'll use the max
+ * limit for that. If the write is marked as a background write,
+ * then use the idle limit, or go to normal if we haven't had
+ * competing IO for a bit.
+ */
+ if ((rw & REQ_HIPRIO) || atomic_read(rwb->bdp_wait))
+ limit = rwb->wb_max;
+ else if ((rw & REQ_BG) || close_io(rwb)) {
+ /*
+ * If less than 100ms since we completed unrelated IO,
+ * limit us to half the depth for background writeback.
+ */
+ limit = rwb->wb_background;
+ } else
+ limit = rwb->wb_normal;
+
+ return limit;
+}
+
+static inline bool may_queue(struct rq_wb *rwb, unsigned long rw)
+{
+ /*
+ * inc it here even if disabled, since we'll dec it at completion.
+ * this only happens if the task was sleeping in __blk_wb_wait(),
+ * and someone turned it off at the same time.
+ */
+ if (!rwb_enabled(rwb)) {
+ atomic_inc(&rwb->inflight);
+ return true;
+ }
+
+ return atomic_inc_below(&rwb->inflight, get_limit(rwb, rw));
+}
+
+/*
+ * Block if we will exceed our limit, or if we are currently waiting for
+ * the timer to kick off queuing again.
+ */
+static void __blk_wb_wait(struct rq_wb *rwb, unsigned long rw, spinlock_t *lock)
+{
+ DEFINE_WAIT(wait);
+
+ if (may_queue(rwb, rw))
+ return;
+
+ do {
+ prepare_to_wait_exclusive(&rwb->wait, &wait,
+ TASK_UNINTERRUPTIBLE);
+
+ if (may_queue(rwb, rw))
+ break;
+
+ if (lock)
+ spin_unlock_irq(lock);
+
+ io_schedule();
+
+ if (lock)
+ spin_lock_irq(lock);
+ } while (1);
+
+ finish_wait(&rwb->wait, &wait);
+}
+
+/*
+ * Returns true if the IO request should be accounted, false if not.
+ * May sleep, if we have exceeded the writeback limits. Caller can pass
+ * in an irq held spinlock, if it holds one when calling this function.
+ * If we do sleep, we'll release and re-grab it.
+ */
+bool blk_wb_wait(struct rq_wb *rwb, struct bio *bio, spinlock_t *lock)
+{
+ /*
+ * If disabled, or not a WRITE (or a discard), do nothing
+ */
+ if (!rwb_enabled(rwb) || !(bio->bi_rw & REQ_WRITE) ||
+ (bio->bi_rw & REQ_DISCARD))
+ goto no_q;
+
+ /*
+ * Don't throttle WRITE_ODIRECT
+ */
+ if ((bio->bi_rw & (REQ_SYNC | REQ_NOIDLE)) == REQ_SYNC)
+ goto no_q;
+
+ __blk_wb_wait(rwb, bio->bi_rw, lock);
+
+ if (!timer_pending(&rwb->window_timer))
+ rwb_arm_timer(rwb);
+
+ return true;
+
+no_q:
+ wb_timestamp(rwb, &rwb->last_issue);
+ return false;
+}
+
+void blk_wb_issue(struct rq_wb *rwb, struct request *rq)
+{
+ if (!rwb_enabled(rwb))
+ return;
+ if (!(rq->cmd_flags & REQ_BUF_INFLIGHT) && !rwb->sync_issue) {
+ rwb->sync_cookie = rq;
+ rwb->sync_issue = rq->issue_time;
+ }
+}
+
+void blk_wb_requeue(struct rq_wb *rwb, struct request *rq)
+{
+ if (!rwb_enabled(rwb))
+ return;
+ if (rq == rwb->sync_cookie) {
+ rwb->sync_issue = 0;
+ rwb->sync_cookie = NULL;
+ }
+}
+
+void blk_wb_init(struct request_queue *q)
+{
+ struct rq_wb *rwb;
+
+ /*
+ * If this fails, we don't get throttling
+ */
+ rwb = kzalloc(sizeof(*rwb), GFP_KERNEL);
+ if (!rwb)
+ return;
+
+ atomic_set(&rwb->inflight, 0);
+ init_waitqueue_head(&rwb->wait);
+ setup_timer(&rwb->window_timer, blk_wb_timer_fn, (unsigned long) rwb);
+ rwb->last_comp = rwb->last_issue = jiffies;
+ rwb->bdp_wait = &q->backing_dev_info.wb.dirty_sleeping;
+ rwb->q = q;
+
+ if (blk_queue_nonrot(q))
+ rwb->min_lat_nsec = RWB_NONROT_LAT;
+ else
+ rwb->min_lat_nsec = RWB_ROT_LAT;
+
+ blk_wb_update_limits(rwb);
+ q->rq_wb = rwb;
+}
+
+void blk_wb_exit(struct request_queue *q)
+{
+ struct rq_wb *rwb = q->rq_wb;
+
+ if (rwb) {
+ del_timer_sync(&rwb->window_timer);
+ kfree(q->rq_wb);
+ q->rq_wb = NULL;
+ }
+}
diff --git a/block/blk-wb.h b/block/blk-wb.h
new file mode 100644
index 000000000000..6ad47195bc87
--- /dev/null
+++ b/block/blk-wb.h
@@ -0,0 +1,42 @@
+#ifndef BLK_WB_H
+#define BLK_WB_H
+
+#include <linux/atomic.h>
+#include <linux/wait.h>
+#include <linux/timer.h>
+
+struct rq_wb {
+ /*
+ * Settings that govern how we throttle
+ */
+ unsigned int wb_background; /* background writeback */
+ unsigned int wb_normal; /* normal writeback */
+ unsigned int wb_max; /* max throughput writeback */
+ unsigned int scale_step;
+
+ u64 win_nsec;
+
+ struct timer_list window_timer;
+
+ s64 sync_issue;
+ void *sync_cookie;
+
+ unsigned long last_issue; /* last non-throttled issue */
+ unsigned long last_comp; /* last non-throttled comp */
+ unsigned long min_lat_nsec;
+ atomic_t *bdp_wait;
+ struct request_queue *q;
+ atomic_t inflight;
+ wait_queue_head_t wait;
+};
+
+void __blk_wb_done(struct rq_wb *);
+void blk_wb_done(struct rq_wb *, struct request *);
+bool blk_wb_wait(struct rq_wb *, struct bio *, spinlock_t *);
+void blk_wb_init(struct request_queue *);
+void blk_wb_exit(struct request_queue *);
+void blk_wb_update_limits(struct rq_wb *);
+void blk_wb_requeue(struct rq_wb *, struct request *);
+void blk_wb_issue(struct rq_wb *, struct request *);
+
+#endif
diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h
index 2b4414fb4d8e..c41f8a303804 100644
--- a/include/linux/blk_types.h
+++ b/include/linux/blk_types.h
@@ -189,6 +189,7 @@ enum rq_flag_bits {
__REQ_PM, /* runtime pm request */
__REQ_HASHED, /* on IO scheduler merge hash */
__REQ_MQ_INFLIGHT, /* track inflight for MQ */
+ __REQ_BUF_INFLIGHT, /* track inflight for buffered */
__REQ_NR_BITS, /* stops here */
};
@@ -243,6 +244,7 @@ enum rq_flag_bits {
#define REQ_PM (1ULL << __REQ_PM)
#define REQ_HASHED (1ULL << __REQ_HASHED)
#define REQ_MQ_INFLIGHT (1ULL << __REQ_MQ_INFLIGHT)
+#define REQ_BUF_INFLIGHT (1ULL << __REQ_BUF_INFLIGHT)
typedef unsigned int blk_qc_t;
#define BLK_QC_T_NONE -1U
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 87f6703ced71..230c55dc95ae 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -37,6 +37,7 @@ struct bsg_job;
struct blkcg_gq;
struct blk_flush_queue;
struct pr_ops;
+struct rq_wb;
#define BLKDEV_MIN_RQ 4
#define BLKDEV_MAX_RQ 128 /* Default maximum */
@@ -291,6 +292,8 @@ struct request_queue {
int nr_rqs[2]; /* # allocated [a]sync rqs */
int nr_rqs_elvpriv; /* # allocated rqs w/ elvpriv */
+ struct rq_wb *rq_wb;
+
/*
* If blkcg is not used, @q->root_rl serves all requests. If blkcg
* is used, root blkg allocates from @q->root_rl and all other
diff --git a/include/trace/events/block.h b/include/trace/events/block.h
index e8a5eca1dbe5..8ae9f47d5287 100644
--- a/include/trace/events/block.h
+++ b/include/trace/events/block.h
@@ -667,6 +667,104 @@ TRACE_EVENT(block_rq_remap,
(unsigned long long)__entry->old_sector, __entry->nr_bios)
);
+/**
+ * block_wb_stat - trace stats for blk_wb
+ * @stat: array of read/write stats
+ */
+TRACE_EVENT(block_wb_stat,
+
+ TP_PROTO(struct blk_rq_stat *stat),
+
+ TP_ARGS(stat),
+
+ TP_STRUCT__entry(
+ __field( s64, rmean )
+ __field( u64, rmin )
+ __field( u64, rmax )
+ __field( s64, rnr_samples )
+ __field( s64, rtime )
+ __field( s64, wmean )
+ __field( u64, wmin )
+ __field( u64, wmax )
+ __field( s64, wnr_samples )
+ __field( s64, wtime )
+ ),
+
+ TP_fast_assign(
+ __entry->rmean = stat[0].mean;
+ __entry->rmin = stat[0].min;
+ __entry->rmax = stat[0].max;
+ __entry->rnr_samples = stat[0].nr_samples;
+ __entry->wmean = stat[1].mean;
+ __entry->wmin = stat[1].min;
+ __entry->wmax = stat[1].max;
+ __entry->wnr_samples = stat[1].nr_samples;
+ ),
+
+ TP_printk("read lat: mean=%llu, min=%llu, max=%llu, samples=%llu,"
+ "write lat: mean=%llu, min=%llu, max=%llu, samples=%llu\n",
+ __entry->rmean, __entry->rmin, __entry->rmax,
+ __entry->rnr_samples, __entry->wmean, __entry->wmin,
+ __entry->wmax, __entry->wnr_samples)
+);
+
+/**
+ * block_wb_lat - trace latency event
+ * @lat: latency trigger
+ */
+TRACE_EVENT(block_wb_lat,
+
+ TP_PROTO(unsigned long lat),
+
+ TP_ARGS(lat),
+
+ TP_STRUCT__entry(
+ __field( unsigned long, lat )
+ ),
+
+ TP_fast_assign(
+ __entry->lat = lat;
+ ),
+
+ TP_printk("Latency %llu\n", (unsigned long long) __entry->lat)
+);
+
+/**
+ * block_wb_step - trace wb event step
+ * @msg: context message
+ * @step: the current scale step count
+ * @bg: the current background queue limit
+ * @normal: the current normal writeback limit
+ * @max: the current max throughput writeback limit
+ */
+TRACE_EVENT(block_wb_step,
+
+ TP_PROTO(const char *msg, unsigned int step, unsigned int bg,
+ unsigned int normal, unsigned int max),
+
+ TP_ARGS(msg, step, bg, normal, max),
+
+ TP_STRUCT__entry(
+ __field( const char *, msg )
+ __field( unsigned int, step )
+ __field( unsigned int, bg )
+ __field( unsigned int, normal )
+ __field( unsigned int, max )
+ ),
+
+ TP_fast_assign(
+ __entry->msg = msg;
+ __entry->step = step;
+ __entry->bg = bg;
+ __entry->normal = normal;
+ __entry->max = max;
+ ),
+
+ TP_printk("%s: step=%u, background=%u, normal=%u, max=%u\n",
+ __entry->msg, __entry->step, __entry->bg, __entry->normal,
+ __entry->max)
+);
+
#endif /* _TRACE_BLOCK_H */
/* This part must be outside protection */
--
2.8.0.rc4.6.g7e4ba36
^ permalink raw reply related [flat|nested] 23+ messages in thread
* Re: [PATCH 8/8] writeback: throttle buffered writeback
2016-04-18 4:24 ` [PATCH 8/8] writeback: throttle buffered writeback Jens Axboe
@ 2016-04-23 8:21 ` xiakaixu
2016-04-23 21:37 ` Jens Axboe
0 siblings, 1 reply; 23+ messages in thread
From: xiakaixu @ 2016-04-23 8:21 UTC (permalink / raw)
To: Jens Axboe
Cc: linux-kernel, linux-fsdevel, linux-block, jack, dchinner,
miaoxie (A),
Bintian, Huxinwei, Xia Kaixu
> diff --git a/block/blk-core.c b/block/blk-core.c
> index 40b57bf4852c..d941f69dfb4b 100644
> --- a/block/blk-core.c
> +++ b/block/blk-core.c
> @@ -39,6 +39,7 @@
>
> #include "blk.h"
> #include "blk-mq.h"
> +#include "blk-wb.h"
>
> EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_remap);
> EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_remap);
> @@ -880,6 +881,7 @@ blk_init_allocated_queue(struct request_queue *q, request_fn_proc *rfn,
>
> fail:
> blk_free_flush_queue(q->fq);
> + blk_wb_exit(q);
> return NULL;
> }
> EXPORT_SYMBOL(blk_init_allocated_queue);
> @@ -1395,6 +1397,7 @@ void blk_requeue_request(struct request_queue *q, struct request *rq)
> blk_delete_timer(rq);
> blk_clear_rq_complete(rq);
> trace_block_rq_requeue(q, rq);
> + blk_wb_requeue(q->rq_wb, rq);
>
> if (rq->cmd_flags & REQ_QUEUED)
> blk_queue_end_tag(q, rq);
> @@ -1485,6 +1488,8 @@ void __blk_put_request(struct request_queue *q, struct request *req)
> /* this is a bio leak */
> WARN_ON(req->bio != NULL);
>
> + blk_wb_done(q->rq_wb, req);
> +
> /*
> * Request may not have originated from ll_rw_blk. if not,
> * it didn't come out of our reserved rq pools
> @@ -1714,6 +1719,7 @@ static blk_qc_t blk_queue_bio(struct request_queue *q, struct bio *bio)
> int el_ret, rw_flags, where = ELEVATOR_INSERT_SORT;
> struct request *req;
> unsigned int request_count = 0;
> + bool wb_acct;
>
> /*
> * low level driver can indicate that it wants pages above a
> @@ -1766,6 +1772,8 @@ static blk_qc_t blk_queue_bio(struct request_queue *q, struct bio *bio)
> }
>
> get_rq:
> + wb_acct = blk_wb_wait(q->rq_wb, bio, q->queue_lock);
> +
> /*
> * This sync check and mask will be re-done in init_request_from_bio(),
> * but we need to set it earlier to expose the sync flag to the
> @@ -1781,11 +1789,16 @@ get_rq:
> */
> req = get_request(q, rw_flags, bio, GFP_NOIO);
> if (IS_ERR(req)) {
> + if (wb_acct)
> + __blk_wb_done(q->rq_wb);
> bio->bi_error = PTR_ERR(req);
> bio_endio(bio);
> goto out_unlock;
> }
>
> + if (wb_acct)
> + req->cmd_flags |= REQ_BUF_INFLIGHT;
> +
> /*
> * After dropping the lock and possibly sleeping here, our request
> * may now be mergeable after it had proven unmergeable (above).
> @@ -2515,6 +2528,7 @@ void blk_start_request(struct request *req)
> blk_dequeue_request(req);
>
> req->issue_time = ktime_to_ns(ktime_get());
> + blk_wb_issue(req->q->rq_wb, req);
>
> /*
> * We are now handing the request to the hardware, initialize
> @@ -2751,6 +2765,7 @@ void blk_finish_request(struct request *req, int error)
> blk_unprep_request(req);
>
> blk_account_io_done(req);
> + blk_wb_done(req->q->rq_wb, req);
Hi Jens,
Seems the function blk_wb_done() will be executed twice even if the end_io
callback is set.
Maybe the same thing would happen in blk-mq.c.
>
> if (req->end_io)
> req->end_io(req, error);
> diff --git a/block/blk-mq.c b/block/blk-mq.c
> index 71b4a13fbf94..c0c5207fe7fd 100644
> --- a/block/blk-mq.c
> +++ b/block/blk-mq.c
> @@ -30,6 +30,7 @@
> #include "blk-mq.h"
> #include "blk-mq-tag.h"
> #include "blk-stat.h"
> +#include "blk-wb.h"
>
> static DEFINE_MUTEX(all_q_mutex);
> static LIST_HEAD(all_q_list);
> @@ -275,6 +276,9 @@ static void __blk_mq_free_request(struct blk_mq_hw_ctx *hctx,
>
> if (rq->cmd_flags & REQ_MQ_INFLIGHT)
> atomic_dec(&hctx->nr_active);
> +
> + blk_wb_done(q->rq_wb, rq);
> +
> rq->cmd_flags = 0;
>
> clear_bit(REQ_ATOM_STARTED, &rq->atomic_flags);
> @@ -305,6 +309,7 @@ EXPORT_SYMBOL_GPL(blk_mq_free_request);
> inline void __blk_mq_end_request(struct request *rq, int error)
> {
> blk_account_io_done(rq);
> + blk_wb_done(rq->q->rq_wb, rq);
>
> if (rq->end_io) {
> rq->end_io(rq, error);
> @@ -414,6 +419,7 @@ void blk_mq_start_request(struct request *rq)
> rq->next_rq->resid_len = blk_rq_bytes(rq->next_rq);
>
> rq->issue_time = ktime_to_ns(ktime_get());
> + blk_wb_issue(q->rq_wb, rq);
>
> blk_add_timer(rq);
>
> @@ -450,6 +456,7 @@ static void __blk_mq_requeue_request(struct request *rq)
> struct request_queue *q = rq->q;
>
> trace_block_rq_requeue(q, rq);
> + blk_wb_requeue(q->rq_wb, rq);
>
> if (test_and_clear_bit(REQ_ATOM_STARTED, &rq->atomic_flags)) {
> if (q->dma_drain_size && blk_rq_bytes(rq))
> @@ -1265,6 +1272,7 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
> struct blk_plug *plug;
> struct request *same_queue_rq = NULL;
> blk_qc_t cookie;
> + bool wb_acct;
>
> blk_queue_bounce(q, &bio);
>
> @@ -1282,9 +1290,17 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
> } else
> request_count = blk_plug_queued_count(q);
>
> + wb_acct = blk_wb_wait(q->rq_wb, bio, NULL);
> +
> rq = blk_mq_map_request(q, bio, &data);
> - if (unlikely(!rq))
> + if (unlikely(!rq)) {
> + if (wb_acct)
> + __blk_wb_done(q->rq_wb);
> return BLK_QC_T_NONE;
> + }
> +
> + if (wb_acct)
> + rq->cmd_flags |= REQ_BUF_INFLIGHT;
>
> cookie = blk_tag_to_qc_t(rq->tag, data.hctx->queue_num);
>
> @@ -1361,6 +1377,7 @@ static blk_qc_t blk_sq_make_request(struct request_queue *q, struct bio *bio)
> struct blk_map_ctx data;
> struct request *rq;
> blk_qc_t cookie;
> + bool wb_acct;
>
> blk_queue_bounce(q, &bio);
>
> @@ -1375,9 +1392,17 @@ static blk_qc_t blk_sq_make_request(struct request_queue *q, struct bio *bio)
> blk_attempt_plug_merge(q, bio, &request_count, NULL))
> return BLK_QC_T_NONE;
>
> + wb_acct = blk_wb_wait(q->rq_wb, bio, NULL);
> +
> rq = blk_mq_map_request(q, bio, &data);
> - if (unlikely(!rq))
> + if (unlikely(!rq)) {
> + if (wb_acct)
> + __blk_wb_done(q->rq_wb);
> return BLK_QC_T_NONE;
> + }
> +
> + if (wb_acct)
> + rq->cmd_flags |= REQ_BUF_INFLIGHT;
>
> cookie = blk_tag_to_qc_t(rq->tag, data.hctx->queue_num);
>
> @@ -2111,6 +2136,8 @@ void blk_mq_free_queue(struct request_queue *q)
> list_del_init(&q->all_q_node);
--
Regards
Kaixu Xia
^ permalink raw reply [flat|nested] 23+ messages in thread
* Re: [PATCH 8/8] writeback: throttle buffered writeback
2016-04-23 8:21 ` xiakaixu
@ 2016-04-23 21:37 ` Jens Axboe
2016-04-25 11:41 ` xiakaixu
0 siblings, 1 reply; 23+ messages in thread
From: Jens Axboe @ 2016-04-23 21:37 UTC (permalink / raw)
To: xiakaixu
Cc: linux-kernel, linux-fsdevel, linux-block, jack, dchinner,
miaoxie (A),
Bintian, Huxinwei
On 04/23/2016 02:21 AM, xiakaixu wrote:
>> diff --git a/block/blk-core.c b/block/blk-core.c
>> index 40b57bf4852c..d941f69dfb4b 100644
>> --- a/block/blk-core.c
>> +++ b/block/blk-core.c
>> @@ -39,6 +39,7 @@
>>
>> #include "blk.h"
>> #include "blk-mq.h"
>> +#include "blk-wb.h"
>>
>> EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_remap);
>> EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_remap);
>> @@ -880,6 +881,7 @@ blk_init_allocated_queue(struct request_queue *q, request_fn_proc *rfn,
>>
>> fail:
>> blk_free_flush_queue(q->fq);
>> + blk_wb_exit(q);
>> return NULL;
>> }
>> EXPORT_SYMBOL(blk_init_allocated_queue);
>> @@ -1395,6 +1397,7 @@ void blk_requeue_request(struct request_queue *q, struct request *rq)
>> blk_delete_timer(rq);
>> blk_clear_rq_complete(rq);
>> trace_block_rq_requeue(q, rq);
>> + blk_wb_requeue(q->rq_wb, rq);
>>
>> if (rq->cmd_flags & REQ_QUEUED)
>> blk_queue_end_tag(q, rq);
>> @@ -1485,6 +1488,8 @@ void __blk_put_request(struct request_queue *q, struct request *req)
>> /* this is a bio leak */
>> WARN_ON(req->bio != NULL);
>>
>> + blk_wb_done(q->rq_wb, req);
>> +
>> /*
>> * Request may not have originated from ll_rw_blk. if not,
>> * it didn't come out of our reserved rq pools
>> @@ -1714,6 +1719,7 @@ static blk_qc_t blk_queue_bio(struct request_queue *q, struct bio *bio)
>> int el_ret, rw_flags, where = ELEVATOR_INSERT_SORT;
>> struct request *req;
>> unsigned int request_count = 0;
>> + bool wb_acct;
>>
>> /*
>> * low level driver can indicate that it wants pages above a
>> @@ -1766,6 +1772,8 @@ static blk_qc_t blk_queue_bio(struct request_queue *q, struct bio *bio)
>> }
>>
>> get_rq:
>> + wb_acct = blk_wb_wait(q->rq_wb, bio, q->queue_lock);
>> +
>> /*
>> * This sync check and mask will be re-done in init_request_from_bio(),
>> * but we need to set it earlier to expose the sync flag to the
>> @@ -1781,11 +1789,16 @@ get_rq:
>> */
>> req = get_request(q, rw_flags, bio, GFP_NOIO);
>> if (IS_ERR(req)) {
>> + if (wb_acct)
>> + __blk_wb_done(q->rq_wb);
>> bio->bi_error = PTR_ERR(req);
>> bio_endio(bio);
>> goto out_unlock;
>> }
>>
>> + if (wb_acct)
>> + req->cmd_flags |= REQ_BUF_INFLIGHT;
>> +
>> /*
>> * After dropping the lock and possibly sleeping here, our request
>> * may now be mergeable after it had proven unmergeable (above).
>> @@ -2515,6 +2528,7 @@ void blk_start_request(struct request *req)
>> blk_dequeue_request(req);
>>
>> req->issue_time = ktime_to_ns(ktime_get());
>> + blk_wb_issue(req->q->rq_wb, req);
>>
>> /*
>> * We are now handing the request to the hardware, initialize
>> @@ -2751,6 +2765,7 @@ void blk_finish_request(struct request *req, int error)
>> blk_unprep_request(req);
>>
>> blk_account_io_done(req);
>> + blk_wb_done(req->q->rq_wb, req);
>
> Hi Jens,
>
> Seems the function blk_wb_done() will be executed twice even if the end_io
> callback is set.
> Maybe the same thing would happen in blk-mq.c.
Yeah, that was a mistake, the current version has it fixed. It was
inadvertently added when I discovered that the flush request didn't work
properly. Now it just duplicates the call inside the check for if it has
an ->end_io() defined, since we don't use the normal path for that.
--
Jens Axboe
^ permalink raw reply [flat|nested] 23+ messages in thread
* Re: [PATCH 8/8] writeback: throttle buffered writeback
2016-04-23 21:37 ` Jens Axboe
@ 2016-04-25 11:41 ` xiakaixu
2016-04-25 14:37 ` Jens Axboe
0 siblings, 1 reply; 23+ messages in thread
From: xiakaixu @ 2016-04-25 11:41 UTC (permalink / raw)
To: Jens Axboe
Cc: linux-kernel, linux-fsdevel, linux-block, jack, dchinner,
miaoxie (A),
Bintian, Huxinwei
于 2016/4/24 5:37, Jens Axboe 写道:
> On 04/23/2016 02:21 AM, xiakaixu wrote:
>>> diff --git a/block/blk-core.c b/block/blk-core.c
>>> index 40b57bf4852c..d941f69dfb4b 100644
>>> --- a/block/blk-core.c
>>> +++ b/block/blk-core.c
>>> @@ -39,6 +39,7 @@
>>>
>>> #include "blk.h"
>>> #include "blk-mq.h"
>>> +#include "blk-wb.h"
>>>
>>> EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_remap);
>>> EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_remap);
>>> @@ -880,6 +881,7 @@ blk_init_allocated_queue(struct request_queue *q, request_fn_proc *rfn,
>>>
>>> fail:
>>> blk_free_flush_queue(q->fq);
>>> + blk_wb_exit(q);
>>> return NULL;
>>> }
>>> EXPORT_SYMBOL(blk_init_allocated_queue);
>>> @@ -1395,6 +1397,7 @@ void blk_requeue_request(struct request_queue *q, struct request *rq)
>>> blk_delete_timer(rq);
>>> blk_clear_rq_complete(rq);
>>> trace_block_rq_requeue(q, rq);
>>> + blk_wb_requeue(q->rq_wb, rq);
>>>
>>> if (rq->cmd_flags & REQ_QUEUED)
>>> blk_queue_end_tag(q, rq);
>>> @@ -1485,6 +1488,8 @@ void __blk_put_request(struct request_queue *q, struct request *req)
>>> /* this is a bio leak */
>>> WARN_ON(req->bio != NULL);
>>>
>>> + blk_wb_done(q->rq_wb, req);
>>> +
>>> /*
>>> * Request may not have originated from ll_rw_blk. if not,
>>> * it didn't come out of our reserved rq pools
>>> @@ -1714,6 +1719,7 @@ static blk_qc_t blk_queue_bio(struct request_queue *q, struct bio *bio)
>>> int el_ret, rw_flags, where = ELEVATOR_INSERT_SORT;
>>> struct request *req;
>>> unsigned int request_count = 0;
>>> + bool wb_acct;
>>>
>>> /*
>>> * low level driver can indicate that it wants pages above a
>>> @@ -1766,6 +1772,8 @@ static blk_qc_t blk_queue_bio(struct request_queue *q, struct bio *bio)
>>> }
>>>
>>> get_rq:
>>> + wb_acct = blk_wb_wait(q->rq_wb, bio, q->queue_lock);
>>> +
>>> /*
>>> * This sync check and mask will be re-done in init_request_from_bio(),
>>> * but we need to set it earlier to expose the sync flag to the
>>> @@ -1781,11 +1789,16 @@ get_rq:
>>> */
>>> req = get_request(q, rw_flags, bio, GFP_NOIO);
>>> if (IS_ERR(req)) {
>>> + if (wb_acct)
>>> + __blk_wb_done(q->rq_wb);
>>> bio->bi_error = PTR_ERR(req);
>>> bio_endio(bio);
>>> goto out_unlock;
>>> }
>>>
>>> + if (wb_acct)
>>> + req->cmd_flags |= REQ_BUF_INFLIGHT;
>>> +
>>> /*
>>> * After dropping the lock and possibly sleeping here, our request
>>> * may now be mergeable after it had proven unmergeable (above).
>>> @@ -2515,6 +2528,7 @@ void blk_start_request(struct request *req)
>>> blk_dequeue_request(req);
>>>
>>> req->issue_time = ktime_to_ns(ktime_get());
>>> + blk_wb_issue(req->q->rq_wb, req);
>>>
>>> /*
>>> * We are now handing the request to the hardware, initialize
>>> @@ -2751,6 +2765,7 @@ void blk_finish_request(struct request *req, int error)
>>> blk_unprep_request(req);
>>>
>>> blk_account_io_done(req);
>>> + blk_wb_done(req->q->rq_wb, req);
>>
>> Hi Jens,
>>
>> Seems the function blk_wb_done() will be executed twice even if the end_io
>> callback is set.
>> Maybe the same thing would happen in blk-mq.c.
>
> Yeah, that was a mistake, the current version has it fixed. It was inadvertently added when I discovered that the flush request didn't work properly. Now it just duplicates the call inside the check for if it has an ->end_io() defined, since we don't use the normal path for that.
>
Hi Jens,
I have checked the wb-buf-throttle branch in your block git repo. I am not sure it is the completed version.
Seems only the problem is fixed in blk-mq.c. The function blk_wb_done() still would be executed twice in blk-core.c.
(the functions blk_finish_request() and __blk_put_request())
Maybe we can add a flag to mark whether blk_wb_done() has been done or not.
--
Regards
Kaixu Xia
^ permalink raw reply [flat|nested] 23+ messages in thread
* Re: [PATCH 8/8] writeback: throttle buffered writeback
2016-04-25 11:41 ` xiakaixu
@ 2016-04-25 14:37 ` Jens Axboe
0 siblings, 0 replies; 23+ messages in thread
From: Jens Axboe @ 2016-04-25 14:37 UTC (permalink / raw)
To: xiakaixu
Cc: linux-kernel, linux-fsdevel, linux-block, jack, dchinner,
miaoxie (A),
Bintian, Huxinwei
On 04/25/2016 05:41 AM, xiakaixu wrote:
> 于 2016/4/24 5:37, Jens Axboe 写道:
>> On 04/23/2016 02:21 AM, xiakaixu wrote:
>>>> diff --git a/block/blk-core.c b/block/blk-core.c
>>>> index 40b57bf4852c..d941f69dfb4b 100644
>>>> --- a/block/blk-core.c
>>>> +++ b/block/blk-core.c
>>>> @@ -39,6 +39,7 @@
>>>>
>>>> #include "blk.h"
>>>> #include "blk-mq.h"
>>>> +#include "blk-wb.h"
>>>>
>>>> EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_remap);
>>>> EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_remap);
>>>> @@ -880,6 +881,7 @@ blk_init_allocated_queue(struct request_queue *q, request_fn_proc *rfn,
>>>>
>>>> fail:
>>>> blk_free_flush_queue(q->fq);
>>>> + blk_wb_exit(q);
>>>> return NULL;
>>>> }
>>>> EXPORT_SYMBOL(blk_init_allocated_queue);
>>>> @@ -1395,6 +1397,7 @@ void blk_requeue_request(struct request_queue *q, struct request *rq)
>>>> blk_delete_timer(rq);
>>>> blk_clear_rq_complete(rq);
>>>> trace_block_rq_requeue(q, rq);
>>>> + blk_wb_requeue(q->rq_wb, rq);
>>>>
>>>> if (rq->cmd_flags & REQ_QUEUED)
>>>> blk_queue_end_tag(q, rq);
>>>> @@ -1485,6 +1488,8 @@ void __blk_put_request(struct request_queue *q, struct request *req)
>>>> /* this is a bio leak */
>>>> WARN_ON(req->bio != NULL);
>>>>
>>>> + blk_wb_done(q->rq_wb, req);
>>>> +
>>>> /*
>>>> * Request may not have originated from ll_rw_blk. if not,
>>>> * it didn't come out of our reserved rq pools
>>>> @@ -1714,6 +1719,7 @@ static blk_qc_t blk_queue_bio(struct request_queue *q, struct bio *bio)
>>>> int el_ret, rw_flags, where = ELEVATOR_INSERT_SORT;
>>>> struct request *req;
>>>> unsigned int request_count = 0;
>>>> + bool wb_acct;
>>>>
>>>> /*
>>>> * low level driver can indicate that it wants pages above a
>>>> @@ -1766,6 +1772,8 @@ static blk_qc_t blk_queue_bio(struct request_queue *q, struct bio *bio)
>>>> }
>>>>
>>>> get_rq:
>>>> + wb_acct = blk_wb_wait(q->rq_wb, bio, q->queue_lock);
>>>> +
>>>> /*
>>>> * This sync check and mask will be re-done in init_request_from_bio(),
>>>> * but we need to set it earlier to expose the sync flag to the
>>>> @@ -1781,11 +1789,16 @@ get_rq:
>>>> */
>>>> req = get_request(q, rw_flags, bio, GFP_NOIO);
>>>> if (IS_ERR(req)) {
>>>> + if (wb_acct)
>>>> + __blk_wb_done(q->rq_wb);
>>>> bio->bi_error = PTR_ERR(req);
>>>> bio_endio(bio);
>>>> goto out_unlock;
>>>> }
>>>>
>>>> + if (wb_acct)
>>>> + req->cmd_flags |= REQ_BUF_INFLIGHT;
>>>> +
>>>> /*
>>>> * After dropping the lock and possibly sleeping here, our request
>>>> * may now be mergeable after it had proven unmergeable (above).
>>>> @@ -2515,6 +2528,7 @@ void blk_start_request(struct request *req)
>>>> blk_dequeue_request(req);
>>>>
>>>> req->issue_time = ktime_to_ns(ktime_get());
>>>> + blk_wb_issue(req->q->rq_wb, req);
>>>>
>>>> /*
>>>> * We are now handing the request to the hardware, initialize
>>>> @@ -2751,6 +2765,7 @@ void blk_finish_request(struct request *req, int error)
>>>> blk_unprep_request(req);
>>>>
>>>> blk_account_io_done(req);
>>>> + blk_wb_done(req->q->rq_wb, req);
>>>
>>> Hi Jens,
>>>
>>> Seems the function blk_wb_done() will be executed twice even if the end_io
>>> callback is set.
>>> Maybe the same thing would happen in blk-mq.c.
>>
>> Yeah, that was a mistake, the current version has it fixed. It was inadvertently added when I discovered that the flush request didn't work properly. Now it just duplicates the call inside the check for if it has an ->end_io() defined, since we don't use the normal path for that.
>>
> Hi Jens,
>
> I have checked the wb-buf-throttle branch in your block git repo. I am not sure it is the completed version.
> Seems only the problem is fixed in blk-mq.c. The function blk_wb_done() still would be executed twice in blk-core.c.
> (the functions blk_finish_request() and __blk_put_request())
> Maybe we can add a flag to mark whether blk_wb_done() has been done or not.
Good catch, looks like I did only patch up the mq bits. It's still not
perfect, since we could potentially double account a request that has a
private end_io(), if it was allocated through the normal block rq
allocator. It'll skew the unrelated-io-timestamp a bit, but it's not a
big deal. The count for inflight will be consistent, which is the
important part.
We currently have just 1 bit to tell if the request is tracked or not,
so we don't know if it was tracked but already seen.
I'll fix up the blk-core part to be identical to the blk-mq fix.
--
Jens Axboe
^ permalink raw reply [flat|nested] 23+ messages in thread
* [PATCH 8/8] writeback: throttle buffered writeback
2016-03-23 15:25 Jens Axboe
@ 2016-03-23 15:25 ` Jens Axboe
0 siblings, 0 replies; 23+ messages in thread
From: Jens Axboe @ 2016-03-23 15:25 UTC (permalink / raw)
To: linux-kernel, linux-fsdevel, linux-block; +Cc: Jens Axboe
Test patch that throttles buffered writeback to make it a lot
more smooth, and has way less impact on other system activity.
Background writeback should be, by definition, background
activity. The fact that we flush huge bundles of it at the time
means that it potentially has heavy impacts on foreground workloads,
which isn't ideal. We can't easily limit the sizes of writes that
we do, since that would impact file system layout in the presence
of delayed allocation. So just throttle back buffered writeback,
unless someone is waiting for it.
Would likely need a dynamic adaption to the current device, this
one has only been tested on NVMe. But it brings down background
activity impact from 1-2s to tens of milliseconds instead.
This is just a test patch, and as such, it registers a queue sysfs
entry to both monitor the current state:
$ cat /sys/block/nvme0n1/queue/wb_stats
limit=4, batch=2, inflight=0, wait=0, timer=0
'limit' denotes how many requests we will allow inflight for buffered
writeback, this settings can be tweaked through writing to the
'wb_depth' file. Writing '0' turns this off completely. 'inflight' shows
how many requests are currently inflight for buffered writeback, 'wait'
shows if anyone is currently waiting for access, and 'timer' shows
if we have processes being deferred in write back cache timeout.
Background buffered writeback will be throttled at depth 'wb_depth',
and even lower (QD=1) if the device recently completed "competing" IO.
If we are doing reclaim or otherwise sync buffered writeback, the limit
is increased 4x to achieve full device bandwidth.
Finally, if the device has write back caching, 'wb_cache_delay' delays
by this amount of usecs when a write completes before allowing more.
Signed-off-by: Jens Axboe <axboe@fb.com>
---
block/Makefile | 2 +-
block/blk-core.c | 15 ++++
block/blk-mq.c | 32 ++++++-
block/blk-sysfs.c | 84 ++++++++++++++++++
block/blk-wb.c | 219 ++++++++++++++++++++++++++++++++++++++++++++++
block/blk-wb.h | 27 ++++++
include/linux/blk_types.h | 2 +
include/linux/blkdev.h | 3 +
8 files changed, 381 insertions(+), 3 deletions(-)
create mode 100644 block/blk-wb.c
create mode 100644 block/blk-wb.h
diff --git a/block/Makefile b/block/Makefile
index 9eda2322b2d4..9df911a3b569 100644
--- a/block/Makefile
+++ b/block/Makefile
@@ -5,7 +5,7 @@
obj-$(CONFIG_BLOCK) := bio.o elevator.o blk-core.o blk-tag.o blk-sysfs.o \
blk-flush.o blk-settings.o blk-ioc.o blk-map.o \
blk-exec.o blk-merge.o blk-softirq.o blk-timeout.o \
- blk-lib.o blk-mq.o blk-mq-tag.o \
+ blk-lib.o blk-mq.o blk-mq-tag.o blk-wb.o \
blk-mq-sysfs.o blk-mq-cpu.o blk-mq-cpumap.o ioctl.o \
genhd.o scsi_ioctl.o partition-generic.o ioprio.o \
badblocks.o partitions/
diff --git a/block/blk-core.c b/block/blk-core.c
index 827f8badd143..887a9e64c6ef 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -39,6 +39,7 @@
#include "blk.h"
#include "blk-mq.h"
+#include "blk-wb.h"
EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_remap);
EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_remap);
@@ -848,6 +849,9 @@ blk_init_allocated_queue(struct request_queue *q, request_fn_proc *rfn,
if (blk_init_rl(&q->root_rl, q, GFP_KERNEL))
goto fail;
+ if (blk_buffered_writeback_init(q))
+ goto fail;
+
INIT_WORK(&q->timeout_work, blk_timeout_work);
q->request_fn = rfn;
q->prep_rq_fn = NULL;
@@ -880,6 +884,7 @@ blk_init_allocated_queue(struct request_queue *q, request_fn_proc *rfn,
fail:
blk_free_flush_queue(q->fq);
+ blk_buffered_writeback_exit(q);
return NULL;
}
EXPORT_SYMBOL(blk_init_allocated_queue);
@@ -1485,6 +1490,8 @@ void __blk_put_request(struct request_queue *q, struct request *req)
/* this is a bio leak */
WARN_ON(req->bio != NULL);
+ blk_buffered_writeback_done(q->rq_wb, req);
+
/*
* Request may not have originated from ll_rw_blk. if not,
* it didn't come out of our reserved rq pools
@@ -1714,6 +1721,7 @@ static blk_qc_t blk_queue_bio(struct request_queue *q, struct bio *bio)
int el_ret, rw_flags, where = ELEVATOR_INSERT_SORT;
struct request *req;
unsigned int request_count = 0;
+ bool wb_acct;
/*
* low level driver can indicate that it wants pages above a
@@ -1766,6 +1774,8 @@ static blk_qc_t blk_queue_bio(struct request_queue *q, struct bio *bio)
}
get_rq:
+ wb_acct = blk_buffered_writeback_wait(q->rq_wb, bio, q->queue_lock);
+
/*
* This sync check and mask will be re-done in init_request_from_bio(),
* but we need to set it earlier to expose the sync flag to the
@@ -1781,11 +1791,16 @@ get_rq:
*/
req = get_request(q, rw_flags, bio, GFP_NOIO);
if (IS_ERR(req)) {
+ if (wb_acct)
+ __blk_buffered_writeback_done(q->rq_wb);
bio->bi_error = PTR_ERR(req);
bio_endio(bio);
goto out_unlock;
}
+ if (wb_acct)
+ req->cmd_flags |= REQ_BUF_INFLIGHT;
+
/*
* After dropping the lock and possibly sleeping here, our request
* may now be mergeable after it had proven unmergeable (above).
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 050f7a13021b..55aace97fd35 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -29,6 +29,7 @@
#include "blk.h"
#include "blk-mq.h"
#include "blk-mq-tag.h"
+#include "blk-wb.h"
static DEFINE_MUTEX(all_q_mutex);
static LIST_HEAD(all_q_list);
@@ -274,6 +275,9 @@ static void __blk_mq_free_request(struct blk_mq_hw_ctx *hctx,
if (rq->cmd_flags & REQ_MQ_INFLIGHT)
atomic_dec(&hctx->nr_active);
+
+ blk_buffered_writeback_done(q->rq_wb, rq);
+
rq->cmd_flags = 0;
clear_bit(REQ_ATOM_STARTED, &rq->atomic_flags);
@@ -1253,6 +1257,7 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
struct blk_plug *plug;
struct request *same_queue_rq = NULL;
blk_qc_t cookie;
+ bool wb_acct;
blk_queue_bounce(q, &bio);
@@ -1270,9 +1275,17 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
} else
request_count = blk_plug_queued_count(q);
+ wb_acct = blk_buffered_writeback_wait(q->rq_wb, bio, NULL);
+
rq = blk_mq_map_request(q, bio, &data);
- if (unlikely(!rq))
+ if (unlikely(!rq)) {
+ if (wb_acct)
+ __blk_buffered_writeback_done(q->rq_wb);
return BLK_QC_T_NONE;
+ }
+
+ if (wb_acct)
+ rq->cmd_flags |= REQ_BUF_INFLIGHT;
cookie = blk_tag_to_qc_t(rq->tag, data.hctx->queue_num);
@@ -1349,6 +1362,7 @@ static blk_qc_t blk_sq_make_request(struct request_queue *q, struct bio *bio)
struct blk_map_ctx data;
struct request *rq;
blk_qc_t cookie;
+ bool wb_acct;
blk_queue_bounce(q, &bio);
@@ -1363,9 +1377,17 @@ static blk_qc_t blk_sq_make_request(struct request_queue *q, struct bio *bio)
blk_attempt_plug_merge(q, bio, &request_count, NULL))
return BLK_QC_T_NONE;
+ wb_acct = blk_buffered_writeback_wait(q->rq_wb, bio, NULL);
+
rq = blk_mq_map_request(q, bio, &data);
- if (unlikely(!rq))
+ if (unlikely(!rq)) {
+ if (wb_acct)
+ __blk_buffered_writeback_done(q->rq_wb);
return BLK_QC_T_NONE;
+ }
+
+ if (wb_acct)
+ rq->cmd_flags |= REQ_BUF_INFLIGHT;
cookie = blk_tag_to_qc_t(rq->tag, data.hctx->queue_num);
@@ -2018,6 +2040,9 @@ struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
/* mark the queue as mq asap */
q->mq_ops = set->ops;
+ if (blk_buffered_writeback_init(q))
+ return ERR_PTR(-ENOMEM);
+
q->queue_ctx = alloc_percpu(struct blk_mq_ctx);
if (!q->queue_ctx)
return ERR_PTR(-ENOMEM);
@@ -2084,6 +2109,7 @@ err_map:
kfree(q->queue_hw_ctx);
err_percpu:
free_percpu(q->queue_ctx);
+ blk_buffered_writeback_exit(q);
return ERR_PTR(-ENOMEM);
}
EXPORT_SYMBOL(blk_mq_init_allocated_queue);
@@ -2096,6 +2122,8 @@ void blk_mq_free_queue(struct request_queue *q)
list_del_init(&q->all_q_node);
mutex_unlock(&all_q_mutex);
+ blk_buffered_writeback_exit(q);
+
blk_mq_del_queue_tag_set(q);
blk_mq_exit_hw_queues(q, set, set->nr_hw_queues);
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
index 954e510452d7..9ac9be23e700 100644
--- a/block/blk-sysfs.c
+++ b/block/blk-sysfs.c
@@ -13,6 +13,7 @@
#include "blk.h"
#include "blk-mq.h"
+#include "blk-wb.h"
struct queue_sysfs_entry {
struct attribute attr;
@@ -347,6 +348,71 @@ static ssize_t queue_poll_store(struct request_queue *q, const char *page,
return ret;
}
+static ssize_t queue_wb_stats_show(struct request_queue *q, char *page)
+{
+ struct rq_wb *wb = q->rq_wb;
+
+ if (!q->rq_wb)
+ return -EINVAL;
+
+ return sprintf(page, "limit=%d, batch=%d, inflight=%d, wait=%d, timer=%d\n",
+ wb->limit, wb->batch, atomic_read(&wb->inflight),
+ waitqueue_active(&wb->wait), timer_pending(&wb->timer));
+}
+
+static ssize_t queue_wb_depth_show(struct request_queue *q, char *page)
+{
+ if (!q->rq_wb)
+ return -EINVAL;
+
+ return queue_var_show(q->rq_wb->limit, page);
+}
+
+static ssize_t queue_wb_depth_store(struct request_queue *q, const char *page,
+ size_t count)
+{
+ unsigned long var;
+ ssize_t ret;
+
+ if (!q->rq_wb)
+ return -EINVAL;
+
+ ret = queue_var_store(&var, page, count);
+ if (ret < 0)
+ return ret;
+ if (var != (unsigned int) var)
+ return -EINVAL;
+
+ blk_update_wb_limit(q->rq_wb, var);
+ return ret;
+}
+
+static ssize_t queue_wb_cache_delay_show(struct request_queue *q, char *page)
+{
+ if (!q->rq_wb)
+ return -EINVAL;
+
+ return queue_var_show(q->rq_wb->cache_delay_usecs, page);
+}
+
+static ssize_t queue_wb_cache_delay_store(struct request_queue *q,
+ const char *page, size_t count)
+{
+ unsigned long var;
+ ssize_t ret;
+
+ if (!q->rq_wb)
+ return -EINVAL;
+
+ ret = queue_var_store(&var, page, count);
+ if (ret < 0)
+ return ret;
+
+ q->rq_wb->cache_delay_usecs = var;
+ q->rq_wb->cache_delay = usecs_to_jiffies(var);
+ return ret;
+}
+
static ssize_t queue_wc_show(struct request_queue *q, char *page)
{
if (test_bit(QUEUE_FLAG_WC, &q->queue_flags))
@@ -516,6 +582,21 @@ static struct queue_sysfs_entry queue_wc_entry = {
.store = queue_wc_store,
};
+static struct queue_sysfs_entry queue_wb_stats_entry = {
+ .attr = {.name = "wb_stats", .mode = S_IRUGO },
+ .show = queue_wb_stats_show,
+};
+static struct queue_sysfs_entry queue_wb_cache_delay_entry = {
+ .attr = {.name = "wb_cache_usecs", .mode = S_IRUGO | S_IWUSR },
+ .show = queue_wb_cache_delay_show,
+ .store = queue_wb_cache_delay_store,
+};
+static struct queue_sysfs_entry queue_wb_depth_entry = {
+ .attr = {.name = "wb_depth", .mode = S_IRUGO | S_IWUSR },
+ .show = queue_wb_depth_show,
+ .store = queue_wb_depth_store,
+};
+
static struct attribute *default_attrs[] = {
&queue_requests_entry.attr,
&queue_ra_entry.attr,
@@ -542,6 +623,9 @@ static struct attribute *default_attrs[] = {
&queue_random_entry.attr,
&queue_poll_entry.attr,
&queue_wc_entry.attr,
+ &queue_wb_stats_entry.attr,
+ &queue_wb_cache_delay_entry.attr,
+ &queue_wb_depth_entry.attr,
NULL,
};
diff --git a/block/blk-wb.c b/block/blk-wb.c
new file mode 100644
index 000000000000..2aa3753a8e1e
--- /dev/null
+++ b/block/blk-wb.c
@@ -0,0 +1,219 @@
+/*
+ * buffered writeback throttling
+ *
+ * Copyright (C) 2016 Jens Axboe
+ *
+ * Things that need changing:
+ *
+ * - Auto-detection of most of this, no tunables. Cache type we can get,
+ * and most other settings we can tweak/gather based on time.
+ * - Better solution for rwb->bdp_wait?
+ * - Higher depth for WB_SYNC_ALL?
+ *
+ */
+#include <linux/kernel.h>
+#include <linux/bio.h>
+#include <linux/blkdev.h>
+
+#include "blk.h"
+#include "blk-wb.h"
+
+void __blk_buffered_writeback_done(struct rq_wb *rwb)
+{
+ int inflight;
+
+ inflight = atomic_dec_return(&rwb->inflight);
+ if (inflight >= rwb->limit)
+ return;
+
+ /*
+ * If the device does caching, we can still flood it with IO
+ * even at a low depth. If caching is on, delay a bit before
+ * submitting the next, if we're still purely background
+ * activity.
+ */
+ if (test_bit(QUEUE_FLAG_WC, &rwb->q->queue_flags) && !*rwb->bdp_wait &&
+ time_before(jiffies, rwb->last_comp + rwb->cache_delay)) {
+ if (!timer_pending(&rwb->timer))
+ mod_timer(&rwb->timer, jiffies + rwb->cache_delay);
+ return;
+ }
+
+ if (waitqueue_active(&rwb->wait)) {
+ int diff = rwb->limit - inflight;
+
+ if (diff >= rwb->batch)
+ wake_up_nr(&rwb->wait, 1);
+ }
+}
+
+/*
+ * Called on completion of a request. Note that it's also called when
+ * a request is merged, when the request gets freed.
+ */
+void blk_buffered_writeback_done(struct rq_wb *rwb, struct request *rq)
+{
+ if (!(rq->cmd_flags & REQ_BUF_INFLIGHT)) {
+ const unsigned long cur = jiffies;
+
+ if (rwb->limit && cur != rwb->last_comp)
+ rwb->last_comp = cur;
+ } else
+ __blk_buffered_writeback_done(rwb);
+}
+
+/*
+ * Increment 'v', if 'v' is below 'below'. Returns true if we succeeded,
+ * false if 'v' + 1 would be bigger than 'below'.
+ */
+static bool atomic_inc_below(atomic_t *v, int below)
+{
+ int cur = atomic_read(v);
+
+ for (;;) {
+ int old;
+
+ if (cur >= below)
+ return false;
+ old = atomic_cmpxchg(v, cur, cur + 1);
+ if (old == cur)
+ break;
+ cur = old;
+ }
+
+ return true;
+}
+
+/*
+ * Block if we will exceed our limit, or if we are currently waiting for
+ * the timer to kick off queuing again.
+ */
+static void __blk_buffered_writeback_wait(struct rq_wb *rwb, unsigned int limit,
+ spinlock_t *lock)
+{
+ DEFINE_WAIT(wait);
+
+ if (!timer_pending(&rwb->timer) &&
+ atomic_inc_below(&rwb->inflight, limit))
+ return;
+
+ do {
+ prepare_to_wait_exclusive(&rwb->wait, &wait,
+ TASK_UNINTERRUPTIBLE);
+
+ if (!timer_pending(&rwb->timer) &&
+ atomic_inc_below(&rwb->inflight, limit))
+ break;
+
+ if (lock)
+ spin_unlock_irq(lock);
+
+ io_schedule();
+
+ if (lock)
+ spin_lock_irq(lock);
+ } while (1);
+
+ finish_wait(&rwb->wait, &wait);
+}
+
+/*
+ * Returns true if the IO request should be accounted, false if not.
+ * May sleep, if we have exceeded the writeback limits. Caller can pass
+ * in an irq held spinlock, if it holds one when calling this function.
+ * If we do sleep, we'll release and re-grab it.
+ */
+bool blk_buffered_writeback_wait(struct rq_wb *rwb, struct bio *bio,
+ spinlock_t *lock)
+{
+ unsigned int limit;
+
+ /*
+ * If disabled, or not a WRITE (or a discard), do nothing
+ */
+ if (!rwb->limit || !(bio->bi_rw & REQ_WRITE) ||
+ (bio->bi_rw & REQ_DISCARD))
+ return false;
+
+ /*
+ * Don't throttle WRITE_ODIRECT
+ */
+ if ((bio->bi_rw & (REQ_SYNC | REQ_NOIDLE)) == REQ_SYNC)
+ return false;
+
+ /*
+ * At this point we know it's a buffered write. If REQ_SYNC is
+ * set, then it's WB_SYNC_ALL writeback. Bump the limit 4x for
+ * those, since someone is (or will be) waiting on that.
+ */
+ limit = rwb->limit;
+ if (bio->bi_rw & REQ_SYNC)
+ limit <<= 2;
+ else if (limit != 1) {
+ /*
+ * If less than 100ms since we completed unrelated IO,
+ * limit us to a depth of 1 for background writeback.
+ */
+ if (time_before(jiffies, rwb->last_comp + HZ / 10))
+ limit = 1;
+ else if (!*rwb->bdp_wait)
+ limit >>= 1;
+ }
+
+ __blk_buffered_writeback_wait(rwb, limit, lock);
+ return true;
+}
+
+void blk_update_wb_limit(struct rq_wb *rwb, unsigned int limit)
+{
+ rwb->limit = limit;
+ rwb->batch = rwb->limit / 2;
+ if (!rwb->batch && rwb->limit)
+ rwb->batch = 1;
+ else if (rwb->batch > 4)
+ rwb->batch = 4;
+
+ wake_up_all(&rwb->wait);
+}
+
+static void blk_buffered_writeback_timer(unsigned long data)
+{
+ struct rq_wb *rwb = (struct rq_wb *) data;
+
+ if (waitqueue_active(&rwb->wait))
+ wake_up_nr(&rwb->wait, 1);
+}
+
+#define DEF_WB_LIMIT 4
+#define DEF_WB_CACHE_DELAY 10000
+
+int blk_buffered_writeback_init(struct request_queue *q)
+{
+ struct rq_wb *rwb;
+
+ rwb = kzalloc(sizeof(*rwb), GFP_KERNEL);
+ if (!rwb)
+ return -ENOMEM;
+
+ atomic_set(&rwb->inflight, 0);
+ init_waitqueue_head(&rwb->wait);
+ rwb->last_comp = jiffies;
+ rwb->bdp_wait = &q->backing_dev_info.wb.dirty_sleeping;
+ setup_timer(&rwb->timer, blk_buffered_writeback_timer,
+ (unsigned long) rwb);
+ rwb->cache_delay_usecs = DEF_WB_CACHE_DELAY;
+ rwb->cache_delay = usecs_to_jiffies(rwb->cache_delay);
+ rwb->q = q;
+ blk_update_wb_limit(rwb, DEF_WB_LIMIT);
+ q->rq_wb = rwb;
+ return 0;
+}
+
+void blk_buffered_writeback_exit(struct request_queue *q)
+{
+ if (q->rq_wb)
+ del_timer_sync(&q->rq_wb->timer);
+
+ kfree(q->rq_wb);
+ q->rq_wb = NULL;
+}
diff --git a/block/blk-wb.h b/block/blk-wb.h
new file mode 100644
index 000000000000..f3b4cd139815
--- /dev/null
+++ b/block/blk-wb.h
@@ -0,0 +1,27 @@
+#ifndef BLK_WB_H
+#define BLK_WB_H
+
+#include <linux/atomic.h>
+#include <linux/wait.h>
+
+struct rq_wb {
+ unsigned int limit;
+ unsigned int batch;
+ unsigned int cache_delay;
+ unsigned int cache_delay_usecs;
+ unsigned long last_comp;
+ unsigned int *bdp_wait;
+ struct request_queue *q;
+ atomic_t inflight;
+ wait_queue_head_t wait;
+ struct timer_list timer;
+};
+
+void __blk_buffered_writeback_done(struct rq_wb *);
+void blk_buffered_writeback_done(struct rq_wb *, struct request *);
+bool blk_buffered_writeback_wait(struct rq_wb *, struct bio *, spinlock_t *);
+int blk_buffered_writeback_init(struct request_queue *);
+void blk_buffered_writeback_exit(struct request_queue *);
+void blk_update_wb_limit(struct rq_wb *, unsigned int);
+
+#endif
diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h
index 86a38ea1823f..6f2a174b771c 100644
--- a/include/linux/blk_types.h
+++ b/include/linux/blk_types.h
@@ -188,6 +188,7 @@ enum rq_flag_bits {
__REQ_PM, /* runtime pm request */
__REQ_HASHED, /* on IO scheduler merge hash */
__REQ_MQ_INFLIGHT, /* track inflight for MQ */
+ __REQ_BUF_INFLIGHT, /* track inflight for buffered */
__REQ_NR_BITS, /* stops here */
};
@@ -241,6 +242,7 @@ enum rq_flag_bits {
#define REQ_PM (1ULL << __REQ_PM)
#define REQ_HASHED (1ULL << __REQ_HASHED)
#define REQ_MQ_INFLIGHT (1ULL << __REQ_MQ_INFLIGHT)
+#define REQ_BUF_INFLIGHT (1ULL << __REQ_BUF_INFLIGHT)
typedef unsigned int blk_qc_t;
#define BLK_QC_T_NONE -1U
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 76e875159e52..8586685bf7b2 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -37,6 +37,7 @@ struct bsg_job;
struct blkcg_gq;
struct blk_flush_queue;
struct pr_ops;
+struct rq_wb;
#define BLKDEV_MIN_RQ 4
#define BLKDEV_MAX_RQ 128 /* Default maximum */
@@ -290,6 +291,8 @@ struct request_queue {
int nr_rqs[2]; /* # allocated [a]sync rqs */
int nr_rqs_elvpriv; /* # allocated rqs w/ elvpriv */
+ struct rq_wb *rq_wb;
+
/*
* If blkcg is not used, @q->root_rl serves all requests. If blkcg
* is used, root blkg allocates from @q->root_rl and all other
--
2.4.1.168.g1ea28e1
^ permalink raw reply related [flat|nested] 23+ messages in thread