* [PATCH 1/2] blk-cgroup: refactor blkcg_print_stat
@ 2021-08-10 15:26 Christoph Hellwig
2021-08-10 15:26 ` Christoph Hellwig
` (3 more replies)
0 siblings, 4 replies; 13+ messages in thread
From: Christoph Hellwig @ 2021-08-10 15:26 UTC (permalink / raw)
To: axboe; +Cc: tj, cgroups, linux-block
Factor out a helper to deal with a single blkcg_gq to make the code a
little bit easier to follow.
Signed-off-by: Christoph Hellwig <hch@lst.de>
---
block/blk-cgroup.c | 148 ++++++++++++++++++++++-----------------------
1 file changed, 74 insertions(+), 74 deletions(-)
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
index db034e35ae20..52aa0540ccaf 100644
--- a/block/blk-cgroup.c
+++ b/block/blk-cgroup.c
@@ -870,97 +870,97 @@ static void blkcg_fill_root_iostats(void)
}
}
-static int blkcg_print_stat(struct seq_file *sf, void *v)
+static void blkcg_print_one_stat(struct blkcg_gq *blkg, struct seq_file *s)
{
- struct blkcg *blkcg = css_to_blkcg(seq_css(sf));
- struct blkcg_gq *blkg;
-
- if (!seq_css(sf)->parent)
- blkcg_fill_root_iostats();
- else
- cgroup_rstat_flush(blkcg->css.cgroup);
-
- rcu_read_lock();
+ struct blkg_iostat_set *bis = &blkg->iostat;
+ u64 rbytes, wbytes, rios, wios, dbytes, dios;
+ bool has_stats = false;
+ const char *dname;
+ unsigned seq;
+ char *buf;
+ size_t size = seq_get_buf(s, &buf), off = 0;
+ int i;
- hlist_for_each_entry_rcu(blkg, &blkcg->blkg_list, blkcg_node) {
- struct blkg_iostat_set *bis = &blkg->iostat;
- const char *dname;
- char *buf;
- u64 rbytes, wbytes, rios, wios, dbytes, dios;
- size_t size = seq_get_buf(sf, &buf), off = 0;
- int i;
- bool has_stats = false;
- unsigned seq;
+ if (!blkg->online)
+ return;
- spin_lock_irq(&blkg->q->queue_lock);
+ dname = blkg_dev_name(blkg);
+ if (!dname)
+ return;
- if (!blkg->online)
- goto skip;
+ /*
+ * Hooray string manipulation, count is the size written NOT
+ * INCLUDING THE \0, so size is now count+1 less than what we
+ * had before, but we want to start writing the next bit from
+ * the \0 so we only add count to buf.
+ */
+ off += scnprintf(buf+off, size-off, "%s ", dname);
- dname = blkg_dev_name(blkg);
- if (!dname)
- goto skip;
+ do {
+ seq = u64_stats_fetch_begin(&bis->sync);
+
+ rbytes = bis->cur.bytes[BLKG_IOSTAT_READ];
+ wbytes = bis->cur.bytes[BLKG_IOSTAT_WRITE];
+ dbytes = bis->cur.bytes[BLKG_IOSTAT_DISCARD];
+ rios = bis->cur.ios[BLKG_IOSTAT_READ];
+ wios = bis->cur.ios[BLKG_IOSTAT_WRITE];
+ dios = bis->cur.ios[BLKG_IOSTAT_DISCARD];
+ } while (u64_stats_fetch_retry(&bis->sync, seq));
+
+ if (rbytes || wbytes || rios || wios) {
+ has_stats = true;
+ off += scnprintf(buf+off, size-off,
+ "rbytes=%llu wbytes=%llu rios=%llu wios=%llu dbytes=%llu dios=%llu",
+ rbytes, wbytes, rios, wios,
+ dbytes, dios);
+ }
- /*
- * Hooray string manipulation, count is the size written NOT
- * INCLUDING THE \0, so size is now count+1 less than what we
- * had before, but we want to start writing the next bit from
- * the \0 so we only add count to buf.
- */
- off += scnprintf(buf+off, size-off, "%s ", dname);
+ if (blkcg_debug_stats && atomic_read(&blkg->use_delay)) {
+ has_stats = true;
+ off += scnprintf(buf+off, size-off, " use_delay=%d delay_nsec=%llu",
+ atomic_read(&blkg->use_delay),
+ atomic64_read(&blkg->delay_nsec));
+ }
- do {
- seq = u64_stats_fetch_begin(&bis->sync);
+ for (i = 0; i < BLKCG_MAX_POLS; i++) {
+ struct blkcg_policy *pol = blkcg_policy[i];
+ size_t written;
- rbytes = bis->cur.bytes[BLKG_IOSTAT_READ];
- wbytes = bis->cur.bytes[BLKG_IOSTAT_WRITE];
- dbytes = bis->cur.bytes[BLKG_IOSTAT_DISCARD];
- rios = bis->cur.ios[BLKG_IOSTAT_READ];
- wios = bis->cur.ios[BLKG_IOSTAT_WRITE];
- dios = bis->cur.ios[BLKG_IOSTAT_DISCARD];
- } while (u64_stats_fetch_retry(&bis->sync, seq));
+ if (!blkg->pd[i] || !pol->pd_stat_fn)
+ continue;
- if (rbytes || wbytes || rios || wios) {
+ written = pol->pd_stat_fn(blkg->pd[i], buf+off, size-off);
+ if (written)
has_stats = true;
- off += scnprintf(buf+off, size-off,
- "rbytes=%llu wbytes=%llu rios=%llu wios=%llu dbytes=%llu dios=%llu",
- rbytes, wbytes, rios, wios,
- dbytes, dios);
- }
+ off += written;
+ }
- if (blkcg_debug_stats && atomic_read(&blkg->use_delay)) {
- has_stats = true;
- off += scnprintf(buf+off, size-off,
- " use_delay=%d delay_nsec=%llu",
- atomic_read(&blkg->use_delay),
- (unsigned long long)atomic64_read(&blkg->delay_nsec));
+ if (has_stats) {
+ if (off < size - 1) {
+ off += scnprintf(buf+off, size-off, "\n");
+ seq_commit(s, off);
+ } else {
+ seq_commit(s, -1);
}
+ }
+}
- for (i = 0; i < BLKCG_MAX_POLS; i++) {
- struct blkcg_policy *pol = blkcg_policy[i];
- size_t written;
-
- if (!blkg->pd[i] || !pol->pd_stat_fn)
- continue;
+static int blkcg_print_stat(struct seq_file *sf, void *v)
+{
+ struct blkcg *blkcg = css_to_blkcg(seq_css(sf));
+ struct blkcg_gq *blkg;
- written = pol->pd_stat_fn(blkg->pd[i], buf+off, size-off);
- if (written)
- has_stats = true;
- off += written;
- }
+ if (!seq_css(sf)->parent)
+ blkcg_fill_root_iostats();
+ else
+ cgroup_rstat_flush(blkcg->css.cgroup);
- if (has_stats) {
- if (off < size - 1) {
- off += scnprintf(buf+off, size-off, "\n");
- seq_commit(sf, off);
- } else {
- seq_commit(sf, -1);
- }
- }
- skip:
+ rcu_read_lock();
+ hlist_for_each_entry_rcu(blkg, &blkcg->blkg_list, blkcg_node) {
+ spin_lock_irq(&blkg->q->queue_lock);
+ blkcg_print_one_stat(blkg, sf);
spin_unlock_irq(&blkg->q->queue_lock);
}
-
rcu_read_unlock();
return 0;
}
--
2.30.2
^ permalink raw reply related [flat|nested] 13+ messages in thread
* [PATCH 2/2] blk-cgroup: stop using seq_get_buf
@ 2021-08-10 15:26 ` Christoph Hellwig
0 siblings, 0 replies; 13+ messages in thread
From: Christoph Hellwig @ 2021-08-10 15:26 UTC (permalink / raw)
To: axboe; +Cc: tj, cgroups, linux-block
seq_get_buf is a crutch that undoes all the memory safety of the
seq_file interface. Use the normal seq_printf interfaces instead.
Signed-off-by: Christoph Hellwig <hch@lst.de>
---
block/blk-cgroup.c | 30 ++++++------------------------
block/blk-iocost.c | 23 +++++++++--------------
block/blk-iolatency.c | 38 +++++++++++++++++++-------------------
block/mq-deadline-cgroup.c | 8 +++-----
include/linux/blk-cgroup.h | 4 ++--
5 files changed, 39 insertions(+), 64 deletions(-)
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
index 52aa0540ccaf..b8ec47dcce42 100644
--- a/block/blk-cgroup.c
+++ b/block/blk-cgroup.c
@@ -877,8 +877,6 @@ static void blkcg_print_one_stat(struct blkcg_gq *blkg, struct seq_file *s)
bool has_stats = false;
const char *dname;
unsigned seq;
- char *buf;
- size_t size = seq_get_buf(s, &buf), off = 0;
int i;
if (!blkg->online)
@@ -888,13 +886,7 @@ static void blkcg_print_one_stat(struct blkcg_gq *blkg, struct seq_file *s)
if (!dname)
return;
- /*
- * Hooray string manipulation, count is the size written NOT
- * INCLUDING THE \0, so size is now count+1 less than what we
- * had before, but we want to start writing the next bit from
- * the \0 so we only add count to buf.
- */
- off += scnprintf(buf+off, size-off, "%s ", dname);
+ seq_printf(s, "%s ", dname);
do {
seq = u64_stats_fetch_begin(&bis->sync);
@@ -909,40 +901,30 @@ static void blkcg_print_one_stat(struct blkcg_gq *blkg, struct seq_file *s)
if (rbytes || wbytes || rios || wios) {
has_stats = true;
- off += scnprintf(buf+off, size-off,
- "rbytes=%llu wbytes=%llu rios=%llu wios=%llu dbytes=%llu dios=%llu",
+ seq_printf(s, "rbytes=%llu wbytes=%llu rios=%llu wios=%llu dbytes=%llu dios=%llu",
rbytes, wbytes, rios, wios,
dbytes, dios);
}
if (blkcg_debug_stats && atomic_read(&blkg->use_delay)) {
has_stats = true;
- off += scnprintf(buf+off, size-off, " use_delay=%d delay_nsec=%llu",
+ seq_printf(s, " use_delay=%d delay_nsec=%llu",
atomic_read(&blkg->use_delay),
atomic64_read(&blkg->delay_nsec));
}
for (i = 0; i < BLKCG_MAX_POLS; i++) {
struct blkcg_policy *pol = blkcg_policy[i];
- size_t written;
if (!blkg->pd[i] || !pol->pd_stat_fn)
continue;
- written = pol->pd_stat_fn(blkg->pd[i], buf+off, size-off);
- if (written)
+ if (pol->pd_stat_fn(blkg->pd[i], s))
has_stats = true;
- off += written;
}
- if (has_stats) {
- if (off < size - 1) {
- off += scnprintf(buf+off, size-off, "\n");
- seq_commit(s, off);
- } else {
- seq_commit(s, -1);
- }
- }
+ if (has_stats)
+ seq_printf(s, "\n");
}
static int blkcg_print_stat(struct seq_file *sf, void *v)
diff --git a/block/blk-iocost.c b/block/blk-iocost.c
index 5fac3757e6e0..89b21a360b2c 100644
--- a/block/blk-iocost.c
+++ b/block/blk-iocost.c
@@ -2988,34 +2988,29 @@ static void ioc_pd_free(struct blkg_policy_data *pd)
kfree(iocg);
}
-static size_t ioc_pd_stat(struct blkg_policy_data *pd, char *buf, size_t size)
+static bool ioc_pd_stat(struct blkg_policy_data *pd, struct seq_file *s)
{
struct ioc_gq *iocg = pd_to_iocg(pd);
struct ioc *ioc = iocg->ioc;
- size_t pos = 0;
if (!ioc->enabled)
- return 0;
+ return false;
if (iocg->level == 0) {
unsigned vp10k = DIV64_U64_ROUND_CLOSEST(
ioc->vtime_base_rate * 10000,
VTIME_PER_USEC);
- pos += scnprintf(buf + pos, size - pos, " cost.vrate=%u.%02u",
- vp10k / 100, vp10k % 100);
+ seq_printf(s, " cost.vrate=%u.%02u", vp10k / 100, vp10k % 100);
}
- pos += scnprintf(buf + pos, size - pos, " cost.usage=%llu",
- iocg->last_stat.usage_us);
+ seq_printf(s, " cost.usage=%llu", iocg->last_stat.usage_us);
if (blkcg_debug_stats)
- pos += scnprintf(buf + pos, size - pos,
- " cost.wait=%llu cost.indebt=%llu cost.indelay=%llu",
- iocg->last_stat.wait_us,
- iocg->last_stat.indebt_us,
- iocg->last_stat.indelay_us);
-
- return pos;
+ seq_printf(s, " cost.wait=%llu cost.indebt=%llu cost.indelay=%llu",
+ iocg->last_stat.wait_us,
+ iocg->last_stat.indebt_us,
+ iocg->last_stat.indelay_us);
+ return true;
}
static u64 ioc_weight_prfill(struct seq_file *sf, struct blkg_policy_data *pd,
diff --git a/block/blk-iolatency.c b/block/blk-iolatency.c
index d8b0d8bd132b..c0545f9da549 100644
--- a/block/blk-iolatency.c
+++ b/block/blk-iolatency.c
@@ -890,8 +890,7 @@ static int iolatency_print_limit(struct seq_file *sf, void *v)
return 0;
}
-static size_t iolatency_ssd_stat(struct iolatency_grp *iolat, char *buf,
- size_t size)
+static bool iolatency_ssd_stat(struct iolatency_grp *iolat, struct seq_file *s)
{
struct latency_stat stat;
int cpu;
@@ -906,39 +905,40 @@ static size_t iolatency_ssd_stat(struct iolatency_grp *iolat, char *buf,
preempt_enable();
if (iolat->rq_depth.max_depth == UINT_MAX)
- return scnprintf(buf, size, " missed=%llu total=%llu depth=max",
- (unsigned long long)stat.ps.missed,
- (unsigned long long)stat.ps.total);
- return scnprintf(buf, size, " missed=%llu total=%llu depth=%u",
- (unsigned long long)stat.ps.missed,
- (unsigned long long)stat.ps.total,
- iolat->rq_depth.max_depth);
+ seq_printf(s, " missed=%llu total=%llu depth=max",
+ (unsigned long long)stat.ps.missed,
+ (unsigned long long)stat.ps.total);
+ else
+ seq_printf(s, " missed=%llu total=%llu depth=%u",
+ (unsigned long long)stat.ps.missed,
+ (unsigned long long)stat.ps.total,
+ iolat->rq_depth.max_depth);
+ return true;
}
-static size_t iolatency_pd_stat(struct blkg_policy_data *pd, char *buf,
- size_t size)
+static bool iolatency_pd_stat(struct blkg_policy_data *pd, struct seq_file *s)
{
struct iolatency_grp *iolat = pd_to_lat(pd);
unsigned long long avg_lat;
unsigned long long cur_win;
if (!blkcg_debug_stats)
- return 0;
+ return false;
if (iolat->ssd)
- return iolatency_ssd_stat(iolat, buf, size);
+ return iolatency_ssd_stat(iolat, s);
avg_lat = div64_u64(iolat->lat_avg, NSEC_PER_USEC);
cur_win = div64_u64(iolat->cur_win_nsec, NSEC_PER_MSEC);
if (iolat->rq_depth.max_depth == UINT_MAX)
- return scnprintf(buf, size, " depth=max avg_lat=%llu win=%llu",
- avg_lat, cur_win);
-
- return scnprintf(buf, size, " depth=%u avg_lat=%llu win=%llu",
- iolat->rq_depth.max_depth, avg_lat, cur_win);
+ seq_printf(s, " depth=max avg_lat=%llu win=%llu",
+ avg_lat, cur_win);
+ else
+ seq_printf(s, " depth=%u avg_lat=%llu win=%llu",
+ iolat->rq_depth.max_depth, avg_lat, cur_win);
+ return true;
}
-
static struct blkg_policy_data *iolatency_pd_alloc(gfp_t gfp,
struct request_queue *q,
struct blkcg *blkcg)
diff --git a/block/mq-deadline-cgroup.c b/block/mq-deadline-cgroup.c
index 3b4bfddec39f..b48a4b962f90 100644
--- a/block/mq-deadline-cgroup.c
+++ b/block/mq-deadline-cgroup.c
@@ -52,7 +52,7 @@ struct dd_blkcg *dd_blkcg_from_bio(struct bio *bio)
return dd_blkcg_from_pd(pd);
}
-static size_t dd_pd_stat(struct blkg_policy_data *pd, char *buf, size_t size)
+static bool dd_pd_stat(struct blkg_policy_data *pd, struct seq_file *s)
{
static const char *const prio_class_name[] = {
[IOPRIO_CLASS_NONE] = "NONE",
@@ -61,12 +61,10 @@ static size_t dd_pd_stat(struct blkg_policy_data *pd, char *buf, size_t size)
[IOPRIO_CLASS_IDLE] = "IDLE",
};
struct dd_blkcg *blkcg = dd_blkcg_from_pd(pd);
- int res = 0;
u8 prio;
for (prio = 0; prio < ARRAY_SIZE(blkcg->stats->stats); prio++)
- res += scnprintf(buf + res, size - res,
- " [%s] dispatched=%u inserted=%u merged=%u",
+ seq_printf(s, " [%s] dispatched=%u inserted=%u merged=%u",
prio_class_name[prio],
ddcg_sum(blkcg, dispatched, prio) +
ddcg_sum(blkcg, merged, prio) -
@@ -75,7 +73,7 @@ static size_t dd_pd_stat(struct blkg_policy_data *pd, char *buf, size_t size)
ddcg_sum(blkcg, completed, prio),
ddcg_sum(blkcg, merged, prio));
- return res;
+ return true;
}
static struct blkg_policy_data *dd_pd_alloc(gfp_t gfp, struct request_queue *q,
diff --git a/include/linux/blk-cgroup.h b/include/linux/blk-cgroup.h
index 37048438872c..b4de2010fba5 100644
--- a/include/linux/blk-cgroup.h
+++ b/include/linux/blk-cgroup.h
@@ -152,8 +152,8 @@ typedef void (blkcg_pol_online_pd_fn)(struct blkg_policy_data *pd);
typedef void (blkcg_pol_offline_pd_fn)(struct blkg_policy_data *pd);
typedef void (blkcg_pol_free_pd_fn)(struct blkg_policy_data *pd);
typedef void (blkcg_pol_reset_pd_stats_fn)(struct blkg_policy_data *pd);
-typedef size_t (blkcg_pol_stat_pd_fn)(struct blkg_policy_data *pd, char *buf,
- size_t size);
+typedef bool (blkcg_pol_stat_pd_fn)(struct blkg_policy_data *pd,
+ struct seq_file *s);
struct blkcg_policy {
int plid;
--
2.30.2
^ permalink raw reply related [flat|nested] 13+ messages in thread
* [PATCH 2/2] blk-cgroup: stop using seq_get_buf
@ 2021-08-10 15:26 ` Christoph Hellwig
0 siblings, 0 replies; 13+ messages in thread
From: Christoph Hellwig @ 2021-08-10 15:26 UTC (permalink / raw)
To: axboe-tSWWG44O7X1aa/9Udqfwiw
Cc: tj-DgEjT+Ai2ygdnm+yROfE0A, cgroups-u79uwXL29TY76Z2rM5mHXA,
linux-block-u79uwXL29TY76Z2rM5mHXA
seq_get_buf is a crutch that undoes all the memory safety of the
seq_file interface. Use the normal seq_printf interfaces instead.
Signed-off-by: Christoph Hellwig <hch-jcswGhMUV9g@public.gmane.org>
---
block/blk-cgroup.c | 30 ++++++------------------------
block/blk-iocost.c | 23 +++++++++--------------
block/blk-iolatency.c | 38 +++++++++++++++++++-------------------
block/mq-deadline-cgroup.c | 8 +++-----
include/linux/blk-cgroup.h | 4 ++--
5 files changed, 39 insertions(+), 64 deletions(-)
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
index 52aa0540ccaf..b8ec47dcce42 100644
--- a/block/blk-cgroup.c
+++ b/block/blk-cgroup.c
@@ -877,8 +877,6 @@ static void blkcg_print_one_stat(struct blkcg_gq *blkg, struct seq_file *s)
bool has_stats = false;
const char *dname;
unsigned seq;
- char *buf;
- size_t size = seq_get_buf(s, &buf), off = 0;
int i;
if (!blkg->online)
@@ -888,13 +886,7 @@ static void blkcg_print_one_stat(struct blkcg_gq *blkg, struct seq_file *s)
if (!dname)
return;
- /*
- * Hooray string manipulation, count is the size written NOT
- * INCLUDING THE \0, so size is now count+1 less than what we
- * had before, but we want to start writing the next bit from
- * the \0 so we only add count to buf.
- */
- off += scnprintf(buf+off, size-off, "%s ", dname);
+ seq_printf(s, "%s ", dname);
do {
seq = u64_stats_fetch_begin(&bis->sync);
@@ -909,40 +901,30 @@ static void blkcg_print_one_stat(struct blkcg_gq *blkg, struct seq_file *s)
if (rbytes || wbytes || rios || wios) {
has_stats = true;
- off += scnprintf(buf+off, size-off,
- "rbytes=%llu wbytes=%llu rios=%llu wios=%llu dbytes=%llu dios=%llu",
+ seq_printf(s, "rbytes=%llu wbytes=%llu rios=%llu wios=%llu dbytes=%llu dios=%llu",
rbytes, wbytes, rios, wios,
dbytes, dios);
}
if (blkcg_debug_stats && atomic_read(&blkg->use_delay)) {
has_stats = true;
- off += scnprintf(buf+off, size-off, " use_delay=%d delay_nsec=%llu",
+ seq_printf(s, " use_delay=%d delay_nsec=%llu",
atomic_read(&blkg->use_delay),
atomic64_read(&blkg->delay_nsec));
}
for (i = 0; i < BLKCG_MAX_POLS; i++) {
struct blkcg_policy *pol = blkcg_policy[i];
- size_t written;
if (!blkg->pd[i] || !pol->pd_stat_fn)
continue;
- written = pol->pd_stat_fn(blkg->pd[i], buf+off, size-off);
- if (written)
+ if (pol->pd_stat_fn(blkg->pd[i], s))
has_stats = true;
- off += written;
}
- if (has_stats) {
- if (off < size - 1) {
- off += scnprintf(buf+off, size-off, "\n");
- seq_commit(s, off);
- } else {
- seq_commit(s, -1);
- }
- }
+ if (has_stats)
+ seq_printf(s, "\n");
}
static int blkcg_print_stat(struct seq_file *sf, void *v)
diff --git a/block/blk-iocost.c b/block/blk-iocost.c
index 5fac3757e6e0..89b21a360b2c 100644
--- a/block/blk-iocost.c
+++ b/block/blk-iocost.c
@@ -2988,34 +2988,29 @@ static void ioc_pd_free(struct blkg_policy_data *pd)
kfree(iocg);
}
-static size_t ioc_pd_stat(struct blkg_policy_data *pd, char *buf, size_t size)
+static bool ioc_pd_stat(struct blkg_policy_data *pd, struct seq_file *s)
{
struct ioc_gq *iocg = pd_to_iocg(pd);
struct ioc *ioc = iocg->ioc;
- size_t pos = 0;
if (!ioc->enabled)
- return 0;
+ return false;
if (iocg->level == 0) {
unsigned vp10k = DIV64_U64_ROUND_CLOSEST(
ioc->vtime_base_rate * 10000,
VTIME_PER_USEC);
- pos += scnprintf(buf + pos, size - pos, " cost.vrate=%u.%02u",
- vp10k / 100, vp10k % 100);
+ seq_printf(s, " cost.vrate=%u.%02u", vp10k / 100, vp10k % 100);
}
- pos += scnprintf(buf + pos, size - pos, " cost.usage=%llu",
- iocg->last_stat.usage_us);
+ seq_printf(s, " cost.usage=%llu", iocg->last_stat.usage_us);
if (blkcg_debug_stats)
- pos += scnprintf(buf + pos, size - pos,
- " cost.wait=%llu cost.indebt=%llu cost.indelay=%llu",
- iocg->last_stat.wait_us,
- iocg->last_stat.indebt_us,
- iocg->last_stat.indelay_us);
-
- return pos;
+ seq_printf(s, " cost.wait=%llu cost.indebt=%llu cost.indelay=%llu",
+ iocg->last_stat.wait_us,
+ iocg->last_stat.indebt_us,
+ iocg->last_stat.indelay_us);
+ return true;
}
static u64 ioc_weight_prfill(struct seq_file *sf, struct blkg_policy_data *pd,
diff --git a/block/blk-iolatency.c b/block/blk-iolatency.c
index d8b0d8bd132b..c0545f9da549 100644
--- a/block/blk-iolatency.c
+++ b/block/blk-iolatency.c
@@ -890,8 +890,7 @@ static int iolatency_print_limit(struct seq_file *sf, void *v)
return 0;
}
-static size_t iolatency_ssd_stat(struct iolatency_grp *iolat, char *buf,
- size_t size)
+static bool iolatency_ssd_stat(struct iolatency_grp *iolat, struct seq_file *s)
{
struct latency_stat stat;
int cpu;
@@ -906,39 +905,40 @@ static size_t iolatency_ssd_stat(struct iolatency_grp *iolat, char *buf,
preempt_enable();
if (iolat->rq_depth.max_depth == UINT_MAX)
- return scnprintf(buf, size, " missed=%llu total=%llu depth=max",
- (unsigned long long)stat.ps.missed,
- (unsigned long long)stat.ps.total);
- return scnprintf(buf, size, " missed=%llu total=%llu depth=%u",
- (unsigned long long)stat.ps.missed,
- (unsigned long long)stat.ps.total,
- iolat->rq_depth.max_depth);
+ seq_printf(s, " missed=%llu total=%llu depth=max",
+ (unsigned long long)stat.ps.missed,
+ (unsigned long long)stat.ps.total);
+ else
+ seq_printf(s, " missed=%llu total=%llu depth=%u",
+ (unsigned long long)stat.ps.missed,
+ (unsigned long long)stat.ps.total,
+ iolat->rq_depth.max_depth);
+ return true;
}
-static size_t iolatency_pd_stat(struct blkg_policy_data *pd, char *buf,
- size_t size)
+static bool iolatency_pd_stat(struct blkg_policy_data *pd, struct seq_file *s)
{
struct iolatency_grp *iolat = pd_to_lat(pd);
unsigned long long avg_lat;
unsigned long long cur_win;
if (!blkcg_debug_stats)
- return 0;
+ return false;
if (iolat->ssd)
- return iolatency_ssd_stat(iolat, buf, size);
+ return iolatency_ssd_stat(iolat, s);
avg_lat = div64_u64(iolat->lat_avg, NSEC_PER_USEC);
cur_win = div64_u64(iolat->cur_win_nsec, NSEC_PER_MSEC);
if (iolat->rq_depth.max_depth == UINT_MAX)
- return scnprintf(buf, size, " depth=max avg_lat=%llu win=%llu",
- avg_lat, cur_win);
-
- return scnprintf(buf, size, " depth=%u avg_lat=%llu win=%llu",
- iolat->rq_depth.max_depth, avg_lat, cur_win);
+ seq_printf(s, " depth=max avg_lat=%llu win=%llu",
+ avg_lat, cur_win);
+ else
+ seq_printf(s, " depth=%u avg_lat=%llu win=%llu",
+ iolat->rq_depth.max_depth, avg_lat, cur_win);
+ return true;
}
-
static struct blkg_policy_data *iolatency_pd_alloc(gfp_t gfp,
struct request_queue *q,
struct blkcg *blkcg)
diff --git a/block/mq-deadline-cgroup.c b/block/mq-deadline-cgroup.c
index 3b4bfddec39f..b48a4b962f90 100644
--- a/block/mq-deadline-cgroup.c
+++ b/block/mq-deadline-cgroup.c
@@ -52,7 +52,7 @@ struct dd_blkcg *dd_blkcg_from_bio(struct bio *bio)
return dd_blkcg_from_pd(pd);
}
-static size_t dd_pd_stat(struct blkg_policy_data *pd, char *buf, size_t size)
+static bool dd_pd_stat(struct blkg_policy_data *pd, struct seq_file *s)
{
static const char *const prio_class_name[] = {
[IOPRIO_CLASS_NONE] = "NONE",
@@ -61,12 +61,10 @@ static size_t dd_pd_stat(struct blkg_policy_data *pd, char *buf, size_t size)
[IOPRIO_CLASS_IDLE] = "IDLE",
};
struct dd_blkcg *blkcg = dd_blkcg_from_pd(pd);
- int res = 0;
u8 prio;
for (prio = 0; prio < ARRAY_SIZE(blkcg->stats->stats); prio++)
- res += scnprintf(buf + res, size - res,
- " [%s] dispatched=%u inserted=%u merged=%u",
+ seq_printf(s, " [%s] dispatched=%u inserted=%u merged=%u",
prio_class_name[prio],
ddcg_sum(blkcg, dispatched, prio) +
ddcg_sum(blkcg, merged, prio) -
@@ -75,7 +73,7 @@ static size_t dd_pd_stat(struct blkg_policy_data *pd, char *buf, size_t size)
ddcg_sum(blkcg, completed, prio),
ddcg_sum(blkcg, merged, prio));
- return res;
+ return true;
}
static struct blkg_policy_data *dd_pd_alloc(gfp_t gfp, struct request_queue *q,
diff --git a/include/linux/blk-cgroup.h b/include/linux/blk-cgroup.h
index 37048438872c..b4de2010fba5 100644
--- a/include/linux/blk-cgroup.h
+++ b/include/linux/blk-cgroup.h
@@ -152,8 +152,8 @@ typedef void (blkcg_pol_online_pd_fn)(struct blkg_policy_data *pd);
typedef void (blkcg_pol_offline_pd_fn)(struct blkg_policy_data *pd);
typedef void (blkcg_pol_free_pd_fn)(struct blkg_policy_data *pd);
typedef void (blkcg_pol_reset_pd_stats_fn)(struct blkg_policy_data *pd);
-typedef size_t (blkcg_pol_stat_pd_fn)(struct blkg_policy_data *pd, char *buf,
- size_t size);
+typedef bool (blkcg_pol_stat_pd_fn)(struct blkg_policy_data *pd,
+ struct seq_file *s);
struct blkcg_policy {
int plid;
--
2.30.2
^ permalink raw reply related [flat|nested] 13+ messages in thread
* Re: [PATCH 1/2] blk-cgroup: refactor blkcg_print_stat
@ 2021-08-11 17:56 ` Tejun Heo
0 siblings, 0 replies; 13+ messages in thread
From: Tejun Heo @ 2021-08-11 17:56 UTC (permalink / raw)
To: Christoph Hellwig; +Cc: axboe, cgroups, linux-block
On Tue, Aug 10, 2021 at 05:26:22PM +0200, Christoph Hellwig wrote:
> Factor out a helper to deal with a single blkcg_gq to make the code a
> little bit easier to follow.
>
> Signed-off-by: Christoph Hellwig <hch@lst.de>
Acked-by: Tejun Heo <tj@kernel.org>
Thanks.
--
tejun
^ permalink raw reply [flat|nested] 13+ messages in thread
* Re: [PATCH 1/2] blk-cgroup: refactor blkcg_print_stat
@ 2021-08-11 17:56 ` Tejun Heo
0 siblings, 0 replies; 13+ messages in thread
From: Tejun Heo @ 2021-08-11 17:56 UTC (permalink / raw)
To: Christoph Hellwig
Cc: axboe-tSWWG44O7X1aa/9Udqfwiw, cgroups-u79uwXL29TY76Z2rM5mHXA,
linux-block-u79uwXL29TY76Z2rM5mHXA
On Tue, Aug 10, 2021 at 05:26:22PM +0200, Christoph Hellwig wrote:
> Factor out a helper to deal with a single blkcg_gq to make the code a
> little bit easier to follow.
>
> Signed-off-by: Christoph Hellwig <hch-jcswGhMUV9g@public.gmane.org>
Acked-by: Tejun Heo <tj-DgEjT+Ai2ygdnm+yROfE0A@public.gmane.org>
Thanks.
--
tejun
^ permalink raw reply [flat|nested] 13+ messages in thread
* Re: [PATCH 2/2] blk-cgroup: stop using seq_get_buf
2021-08-10 15:26 ` Christoph Hellwig
(?)
@ 2021-08-11 17:56 ` Tejun Heo
-1 siblings, 0 replies; 13+ messages in thread
From: Tejun Heo @ 2021-08-11 17:56 UTC (permalink / raw)
To: Christoph Hellwig; +Cc: axboe, cgroups, linux-block
On Tue, Aug 10, 2021 at 05:26:23PM +0200, Christoph Hellwig wrote:
> seq_get_buf is a crutch that undoes all the memory safety of the
> seq_file interface. Use the normal seq_printf interfaces instead.
>
> Signed-off-by: Christoph Hellwig <hch@lst.de>
Acked-by: Tejun Heo <tj@kernel.org>
Thanks.
--
tejun
^ permalink raw reply [flat|nested] 13+ messages in thread
* Re: [PATCH 1/2] blk-cgroup: refactor blkcg_print_stat
@ 2021-08-16 12:39 ` Christoph Hellwig
0 siblings, 0 replies; 13+ messages in thread
From: Christoph Hellwig @ 2021-08-16 12:39 UTC (permalink / raw)
To: axboe; +Cc: tj, cgroups, linux-block
ping.
On Tue, Aug 10, 2021 at 05:26:22PM +0200, Christoph Hellwig wrote:
> Factor out a helper to deal with a single blkcg_gq to make the code a
> little bit easier to follow.
>
> Signed-off-by: Christoph Hellwig <hch@lst.de>
> ---
> block/blk-cgroup.c | 148 ++++++++++++++++++++++-----------------------
> 1 file changed, 74 insertions(+), 74 deletions(-)
>
> diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
> index db034e35ae20..52aa0540ccaf 100644
> --- a/block/blk-cgroup.c
> +++ b/block/blk-cgroup.c
> @@ -870,97 +870,97 @@ static void blkcg_fill_root_iostats(void)
> }
> }
>
> -static int blkcg_print_stat(struct seq_file *sf, void *v)
> +static void blkcg_print_one_stat(struct blkcg_gq *blkg, struct seq_file *s)
> {
> - struct blkcg *blkcg = css_to_blkcg(seq_css(sf));
> - struct blkcg_gq *blkg;
> -
> - if (!seq_css(sf)->parent)
> - blkcg_fill_root_iostats();
> - else
> - cgroup_rstat_flush(blkcg->css.cgroup);
> -
> - rcu_read_lock();
> + struct blkg_iostat_set *bis = &blkg->iostat;
> + u64 rbytes, wbytes, rios, wios, dbytes, dios;
> + bool has_stats = false;
> + const char *dname;
> + unsigned seq;
> + char *buf;
> + size_t size = seq_get_buf(s, &buf), off = 0;
> + int i;
>
> - hlist_for_each_entry_rcu(blkg, &blkcg->blkg_list, blkcg_node) {
> - struct blkg_iostat_set *bis = &blkg->iostat;
> - const char *dname;
> - char *buf;
> - u64 rbytes, wbytes, rios, wios, dbytes, dios;
> - size_t size = seq_get_buf(sf, &buf), off = 0;
> - int i;
> - bool has_stats = false;
> - unsigned seq;
> + if (!blkg->online)
> + return;
>
> - spin_lock_irq(&blkg->q->queue_lock);
> + dname = blkg_dev_name(blkg);
> + if (!dname)
> + return;
>
> - if (!blkg->online)
> - goto skip;
> + /*
> + * Hooray string manipulation, count is the size written NOT
> + * INCLUDING THE \0, so size is now count+1 less than what we
> + * had before, but we want to start writing the next bit from
> + * the \0 so we only add count to buf.
> + */
> + off += scnprintf(buf+off, size-off, "%s ", dname);
>
> - dname = blkg_dev_name(blkg);
> - if (!dname)
> - goto skip;
> + do {
> + seq = u64_stats_fetch_begin(&bis->sync);
> +
> + rbytes = bis->cur.bytes[BLKG_IOSTAT_READ];
> + wbytes = bis->cur.bytes[BLKG_IOSTAT_WRITE];
> + dbytes = bis->cur.bytes[BLKG_IOSTAT_DISCARD];
> + rios = bis->cur.ios[BLKG_IOSTAT_READ];
> + wios = bis->cur.ios[BLKG_IOSTAT_WRITE];
> + dios = bis->cur.ios[BLKG_IOSTAT_DISCARD];
> + } while (u64_stats_fetch_retry(&bis->sync, seq));
> +
> + if (rbytes || wbytes || rios || wios) {
> + has_stats = true;
> + off += scnprintf(buf+off, size-off,
> + "rbytes=%llu wbytes=%llu rios=%llu wios=%llu dbytes=%llu dios=%llu",
> + rbytes, wbytes, rios, wios,
> + dbytes, dios);
> + }
>
> - /*
> - * Hooray string manipulation, count is the size written NOT
> - * INCLUDING THE \0, so size is now count+1 less than what we
> - * had before, but we want to start writing the next bit from
> - * the \0 so we only add count to buf.
> - */
> - off += scnprintf(buf+off, size-off, "%s ", dname);
> + if (blkcg_debug_stats && atomic_read(&blkg->use_delay)) {
> + has_stats = true;
> + off += scnprintf(buf+off, size-off, " use_delay=%d delay_nsec=%llu",
> + atomic_read(&blkg->use_delay),
> + atomic64_read(&blkg->delay_nsec));
> + }
>
> - do {
> - seq = u64_stats_fetch_begin(&bis->sync);
> + for (i = 0; i < BLKCG_MAX_POLS; i++) {
> + struct blkcg_policy *pol = blkcg_policy[i];
> + size_t written;
>
> - rbytes = bis->cur.bytes[BLKG_IOSTAT_READ];
> - wbytes = bis->cur.bytes[BLKG_IOSTAT_WRITE];
> - dbytes = bis->cur.bytes[BLKG_IOSTAT_DISCARD];
> - rios = bis->cur.ios[BLKG_IOSTAT_READ];
> - wios = bis->cur.ios[BLKG_IOSTAT_WRITE];
> - dios = bis->cur.ios[BLKG_IOSTAT_DISCARD];
> - } while (u64_stats_fetch_retry(&bis->sync, seq));
> + if (!blkg->pd[i] || !pol->pd_stat_fn)
> + continue;
>
> - if (rbytes || wbytes || rios || wios) {
> + written = pol->pd_stat_fn(blkg->pd[i], buf+off, size-off);
> + if (written)
> has_stats = true;
> - off += scnprintf(buf+off, size-off,
> - "rbytes=%llu wbytes=%llu rios=%llu wios=%llu dbytes=%llu dios=%llu",
> - rbytes, wbytes, rios, wios,
> - dbytes, dios);
> - }
> + off += written;
> + }
>
> - if (blkcg_debug_stats && atomic_read(&blkg->use_delay)) {
> - has_stats = true;
> - off += scnprintf(buf+off, size-off,
> - " use_delay=%d delay_nsec=%llu",
> - atomic_read(&blkg->use_delay),
> - (unsigned long long)atomic64_read(&blkg->delay_nsec));
> + if (has_stats) {
> + if (off < size - 1) {
> + off += scnprintf(buf+off, size-off, "\n");
> + seq_commit(s, off);
> + } else {
> + seq_commit(s, -1);
> }
> + }
> +}
>
> - for (i = 0; i < BLKCG_MAX_POLS; i++) {
> - struct blkcg_policy *pol = blkcg_policy[i];
> - size_t written;
> -
> - if (!blkg->pd[i] || !pol->pd_stat_fn)
> - continue;
> +static int blkcg_print_stat(struct seq_file *sf, void *v)
> +{
> + struct blkcg *blkcg = css_to_blkcg(seq_css(sf));
> + struct blkcg_gq *blkg;
>
> - written = pol->pd_stat_fn(blkg->pd[i], buf+off, size-off);
> - if (written)
> - has_stats = true;
> - off += written;
> - }
> + if (!seq_css(sf)->parent)
> + blkcg_fill_root_iostats();
> + else
> + cgroup_rstat_flush(blkcg->css.cgroup);
>
> - if (has_stats) {
> - if (off < size - 1) {
> - off += scnprintf(buf+off, size-off, "\n");
> - seq_commit(sf, off);
> - } else {
> - seq_commit(sf, -1);
> - }
> - }
> - skip:
> + rcu_read_lock();
> + hlist_for_each_entry_rcu(blkg, &blkcg->blkg_list, blkcg_node) {
> + spin_lock_irq(&blkg->q->queue_lock);
> + blkcg_print_one_stat(blkg, sf);
> spin_unlock_irq(&blkg->q->queue_lock);
> }
> -
> rcu_read_unlock();
> return 0;
> }
> --
> 2.30.2
---end quoted text---
^ permalink raw reply [flat|nested] 13+ messages in thread
* Re: [PATCH 1/2] blk-cgroup: refactor blkcg_print_stat
@ 2021-08-16 12:39 ` Christoph Hellwig
0 siblings, 0 replies; 13+ messages in thread
From: Christoph Hellwig @ 2021-08-16 12:39 UTC (permalink / raw)
To: axboe-tSWWG44O7X1aa/9Udqfwiw
Cc: tj-DgEjT+Ai2ygdnm+yROfE0A, cgroups-u79uwXL29TY76Z2rM5mHXA,
linux-block-u79uwXL29TY76Z2rM5mHXA
ping.
On Tue, Aug 10, 2021 at 05:26:22PM +0200, Christoph Hellwig wrote:
> Factor out a helper to deal with a single blkcg_gq to make the code a
> little bit easier to follow.
>
> Signed-off-by: Christoph Hellwig <hch-jcswGhMUV9g@public.gmane.org>
> ---
> block/blk-cgroup.c | 148 ++++++++++++++++++++++-----------------------
> 1 file changed, 74 insertions(+), 74 deletions(-)
>
> diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
> index db034e35ae20..52aa0540ccaf 100644
> --- a/block/blk-cgroup.c
> +++ b/block/blk-cgroup.c
> @@ -870,97 +870,97 @@ static void blkcg_fill_root_iostats(void)
> }
> }
>
> -static int blkcg_print_stat(struct seq_file *sf, void *v)
> +static void blkcg_print_one_stat(struct blkcg_gq *blkg, struct seq_file *s)
> {
> - struct blkcg *blkcg = css_to_blkcg(seq_css(sf));
> - struct blkcg_gq *blkg;
> -
> - if (!seq_css(sf)->parent)
> - blkcg_fill_root_iostats();
> - else
> - cgroup_rstat_flush(blkcg->css.cgroup);
> -
> - rcu_read_lock();
> + struct blkg_iostat_set *bis = &blkg->iostat;
> + u64 rbytes, wbytes, rios, wios, dbytes, dios;
> + bool has_stats = false;
> + const char *dname;
> + unsigned seq;
> + char *buf;
> + size_t size = seq_get_buf(s, &buf), off = 0;
> + int i;
>
> - hlist_for_each_entry_rcu(blkg, &blkcg->blkg_list, blkcg_node) {
> - struct blkg_iostat_set *bis = &blkg->iostat;
> - const char *dname;
> - char *buf;
> - u64 rbytes, wbytes, rios, wios, dbytes, dios;
> - size_t size = seq_get_buf(sf, &buf), off = 0;
> - int i;
> - bool has_stats = false;
> - unsigned seq;
> + if (!blkg->online)
> + return;
>
> - spin_lock_irq(&blkg->q->queue_lock);
> + dname = blkg_dev_name(blkg);
> + if (!dname)
> + return;
>
> - if (!blkg->online)
> - goto skip;
> + /*
> + * Hooray string manipulation, count is the size written NOT
> + * INCLUDING THE \0, so size is now count+1 less than what we
> + * had before, but we want to start writing the next bit from
> + * the \0 so we only add count to buf.
> + */
> + off += scnprintf(buf+off, size-off, "%s ", dname);
>
> - dname = blkg_dev_name(blkg);
> - if (!dname)
> - goto skip;
> + do {
> + seq = u64_stats_fetch_begin(&bis->sync);
> +
> + rbytes = bis->cur.bytes[BLKG_IOSTAT_READ];
> + wbytes = bis->cur.bytes[BLKG_IOSTAT_WRITE];
> + dbytes = bis->cur.bytes[BLKG_IOSTAT_DISCARD];
> + rios = bis->cur.ios[BLKG_IOSTAT_READ];
> + wios = bis->cur.ios[BLKG_IOSTAT_WRITE];
> + dios = bis->cur.ios[BLKG_IOSTAT_DISCARD];
> + } while (u64_stats_fetch_retry(&bis->sync, seq));
> +
> + if (rbytes || wbytes || rios || wios) {
> + has_stats = true;
> + off += scnprintf(buf+off, size-off,
> + "rbytes=%llu wbytes=%llu rios=%llu wios=%llu dbytes=%llu dios=%llu",
> + rbytes, wbytes, rios, wios,
> + dbytes, dios);
> + }
>
> - /*
> - * Hooray string manipulation, count is the size written NOT
> - * INCLUDING THE \0, so size is now count+1 less than what we
> - * had before, but we want to start writing the next bit from
> - * the \0 so we only add count to buf.
> - */
> - off += scnprintf(buf+off, size-off, "%s ", dname);
> + if (blkcg_debug_stats && atomic_read(&blkg->use_delay)) {
> + has_stats = true;
> + off += scnprintf(buf+off, size-off, " use_delay=%d delay_nsec=%llu",
> + atomic_read(&blkg->use_delay),
> + atomic64_read(&blkg->delay_nsec));
> + }
>
> - do {
> - seq = u64_stats_fetch_begin(&bis->sync);
> + for (i = 0; i < BLKCG_MAX_POLS; i++) {
> + struct blkcg_policy *pol = blkcg_policy[i];
> + size_t written;
>
> - rbytes = bis->cur.bytes[BLKG_IOSTAT_READ];
> - wbytes = bis->cur.bytes[BLKG_IOSTAT_WRITE];
> - dbytes = bis->cur.bytes[BLKG_IOSTAT_DISCARD];
> - rios = bis->cur.ios[BLKG_IOSTAT_READ];
> - wios = bis->cur.ios[BLKG_IOSTAT_WRITE];
> - dios = bis->cur.ios[BLKG_IOSTAT_DISCARD];
> - } while (u64_stats_fetch_retry(&bis->sync, seq));
> + if (!blkg->pd[i] || !pol->pd_stat_fn)
> + continue;
>
> - if (rbytes || wbytes || rios || wios) {
> + written = pol->pd_stat_fn(blkg->pd[i], buf+off, size-off);
> + if (written)
> has_stats = true;
> - off += scnprintf(buf+off, size-off,
> - "rbytes=%llu wbytes=%llu rios=%llu wios=%llu dbytes=%llu dios=%llu",
> - rbytes, wbytes, rios, wios,
> - dbytes, dios);
> - }
> + off += written;
> + }
>
> - if (blkcg_debug_stats && atomic_read(&blkg->use_delay)) {
> - has_stats = true;
> - off += scnprintf(buf+off, size-off,
> - " use_delay=%d delay_nsec=%llu",
> - atomic_read(&blkg->use_delay),
> - (unsigned long long)atomic64_read(&blkg->delay_nsec));
> + if (has_stats) {
> + if (off < size - 1) {
> + off += scnprintf(buf+off, size-off, "\n");
> + seq_commit(s, off);
> + } else {
> + seq_commit(s, -1);
> }
> + }
> +}
>
> - for (i = 0; i < BLKCG_MAX_POLS; i++) {
> - struct blkcg_policy *pol = blkcg_policy[i];
> - size_t written;
> -
> - if (!blkg->pd[i] || !pol->pd_stat_fn)
> - continue;
> +static int blkcg_print_stat(struct seq_file *sf, void *v)
> +{
> + struct blkcg *blkcg = css_to_blkcg(seq_css(sf));
> + struct blkcg_gq *blkg;
>
> - written = pol->pd_stat_fn(blkg->pd[i], buf+off, size-off);
> - if (written)
> - has_stats = true;
> - off += written;
> - }
> + if (!seq_css(sf)->parent)
> + blkcg_fill_root_iostats();
> + else
> + cgroup_rstat_flush(blkcg->css.cgroup);
>
> - if (has_stats) {
> - if (off < size - 1) {
> - off += scnprintf(buf+off, size-off, "\n");
> - seq_commit(sf, off);
> - } else {
> - seq_commit(sf, -1);
> - }
> - }
> - skip:
> + rcu_read_lock();
> + hlist_for_each_entry_rcu(blkg, &blkcg->blkg_list, blkcg_node) {
> + spin_lock_irq(&blkg->q->queue_lock);
> + blkcg_print_one_stat(blkg, sf);
> spin_unlock_irq(&blkg->q->queue_lock);
> }
> -
> rcu_read_unlock();
> return 0;
> }
> --
> 2.30.2
---end quoted text---
^ permalink raw reply [flat|nested] 13+ messages in thread
* Re: [PATCH 1/2] blk-cgroup: refactor blkcg_print_stat
@ 2021-08-16 16:53 ` Jens Axboe
0 siblings, 0 replies; 13+ messages in thread
From: Jens Axboe @ 2021-08-16 16:53 UTC (permalink / raw)
To: Christoph Hellwig; +Cc: tj, cgroups, linux-block
On 8/10/21 9:26 AM, Christoph Hellwig wrote:
> Factor out a helper to deal with a single blkcg_gq to make the code a
> little bit easier to follow.
Applied 1-2, thanks.
--
Jens Axboe
^ permalink raw reply [flat|nested] 13+ messages in thread
* Re: [PATCH 1/2] blk-cgroup: refactor blkcg_print_stat
@ 2021-08-16 16:53 ` Jens Axboe
0 siblings, 0 replies; 13+ messages in thread
From: Jens Axboe @ 2021-08-16 16:53 UTC (permalink / raw)
To: Christoph Hellwig
Cc: tj-DgEjT+Ai2ygdnm+yROfE0A, cgroups-u79uwXL29TY76Z2rM5mHXA,
linux-block-u79uwXL29TY76Z2rM5mHXA
On 8/10/21 9:26 AM, Christoph Hellwig wrote:
> Factor out a helper to deal with a single blkcg_gq to make the code a
> little bit easier to follow.
Applied 1-2, thanks.
--
Jens Axboe
^ permalink raw reply [flat|nested] 13+ messages in thread
* Re: [PATCH 2/2] blk-cgroup: stop using seq_get_buf
2021-08-10 15:26 ` Christoph Hellwig
(?)
(?)
@ 2022-01-07 9:20 ` Wolfgang Bumiller
2022-01-10 16:39 ` Christoph Hellwig
-1 siblings, 1 reply; 13+ messages in thread
From: Wolfgang Bumiller @ 2022-01-07 9:20 UTC (permalink / raw)
To: Christoph Hellwig; +Cc: axboe, tj, cgroups, linux-block
Sorry for the late noise, but now when there are no stats, the name has
still been printed out without adding a new line, which doesn't seem to
be inline with the documented 'nested keyed' file format.
in particular, I'm now seeing this line with 2 keys:
253:10 253:5 rbytes=0 wbytes=0 rios=0 wios=1 dbytes=0 dios=0
^~~~~~ ^~~~~
I'm not sure if a separate temporary buffer would make more sense or
switching back to seq_get_buf?
Figuring out `has_stats` first doesn't seem to be trivial due to the
`pd_stat_fn` calls.
Or of course, just include the newlines unconditionally?
Unless seq_file has/gets another way to roll back the buffer?
On Tue, Aug 10, 2021 at 05:26:23PM +0200, Christoph Hellwig wrote:
> seq_get_buf is a crutch that undoes all the memory safety of the
> seq_file interface. Use the normal seq_printf interfaces instead.
>
> Signed-off-by: Christoph Hellwig <hch@lst.de>
> ---
> block/blk-cgroup.c | 30 ++++++------------------------
> block/blk-iocost.c | 23 +++++++++--------------
> block/blk-iolatency.c | 38 +++++++++++++++++++-------------------
> block/mq-deadline-cgroup.c | 8 +++-----
> include/linux/blk-cgroup.h | 4 ++--
> 5 files changed, 39 insertions(+), 64 deletions(-)
>
> diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
> index 52aa0540ccaf..b8ec47dcce42 100644
> --- a/block/blk-cgroup.c
> +++ b/block/blk-cgroup.c
> @@ -877,8 +877,6 @@ static void blkcg_print_one_stat(struct blkcg_gq *blkg, struct seq_file *s)
> bool has_stats = false;
> const char *dname;
> unsigned seq;
> - char *buf;
> - size_t size = seq_get_buf(s, &buf), off = 0;
> int i;
>
> if (!blkg->online)
> @@ -888,13 +886,7 @@ static void blkcg_print_one_stat(struct blkcg_gq *blkg, struct seq_file *s)
> if (!dname)
> return;
>
> - /*
> - * Hooray string manipulation, count is the size written NOT
> - * INCLUDING THE \0, so size is now count+1 less than what we
> - * had before, but we want to start writing the next bit from
> - * the \0 so we only add count to buf.
> - */
> - off += scnprintf(buf+off, size-off, "%s ", dname);
> + seq_printf(s, "%s ", dname);
>
> do {
> seq = u64_stats_fetch_begin(&bis->sync);
> @@ -909,40 +901,30 @@ static void blkcg_print_one_stat(struct blkcg_gq *blkg, struct seq_file *s)
>
> if (rbytes || wbytes || rios || wios) {
> has_stats = true;
> - off += scnprintf(buf+off, size-off,
> - "rbytes=%llu wbytes=%llu rios=%llu wios=%llu dbytes=%llu dios=%llu",
> + seq_printf(s, "rbytes=%llu wbytes=%llu rios=%llu wios=%llu dbytes=%llu dios=%llu",
> rbytes, wbytes, rios, wios,
> dbytes, dios);
> }
>
> if (blkcg_debug_stats && atomic_read(&blkg->use_delay)) {
> has_stats = true;
> - off += scnprintf(buf+off, size-off, " use_delay=%d delay_nsec=%llu",
> + seq_printf(s, " use_delay=%d delay_nsec=%llu",
> atomic_read(&blkg->use_delay),
> atomic64_read(&blkg->delay_nsec));
> }
>
> for (i = 0; i < BLKCG_MAX_POLS; i++) {
> struct blkcg_policy *pol = blkcg_policy[i];
> - size_t written;
>
> if (!blkg->pd[i] || !pol->pd_stat_fn)
> continue;
>
> - written = pol->pd_stat_fn(blkg->pd[i], buf+off, size-off);
> - if (written)
> + if (pol->pd_stat_fn(blkg->pd[i], s))
> has_stats = true;
> - off += written;
> }
>
> - if (has_stats) {
> - if (off < size - 1) {
> - off += scnprintf(buf+off, size-off, "\n");
> - seq_commit(s, off);
> - } else {
> - seq_commit(s, -1);
> - }
> - }
> + if (has_stats)
> + seq_printf(s, "\n");
> }
>
> static int blkcg_print_stat(struct seq_file *sf, void *v)
> diff --git a/block/blk-iocost.c b/block/blk-iocost.c
> index 5fac3757e6e0..89b21a360b2c 100644
> --- a/block/blk-iocost.c
> +++ b/block/blk-iocost.c
> @@ -2988,34 +2988,29 @@ static void ioc_pd_free(struct blkg_policy_data *pd)
> kfree(iocg);
> }
>
> -static size_t ioc_pd_stat(struct blkg_policy_data *pd, char *buf, size_t size)
> +static bool ioc_pd_stat(struct blkg_policy_data *pd, struct seq_file *s)
> {
> struct ioc_gq *iocg = pd_to_iocg(pd);
> struct ioc *ioc = iocg->ioc;
> - size_t pos = 0;
>
> if (!ioc->enabled)
> - return 0;
> + return false;
>
> if (iocg->level == 0) {
> unsigned vp10k = DIV64_U64_ROUND_CLOSEST(
> ioc->vtime_base_rate * 10000,
> VTIME_PER_USEC);
> - pos += scnprintf(buf + pos, size - pos, " cost.vrate=%u.%02u",
> - vp10k / 100, vp10k % 100);
> + seq_printf(s, " cost.vrate=%u.%02u", vp10k / 100, vp10k % 100);
> }
>
> - pos += scnprintf(buf + pos, size - pos, " cost.usage=%llu",
> - iocg->last_stat.usage_us);
> + seq_printf(s, " cost.usage=%llu", iocg->last_stat.usage_us);
>
> if (blkcg_debug_stats)
> - pos += scnprintf(buf + pos, size - pos,
> - " cost.wait=%llu cost.indebt=%llu cost.indelay=%llu",
> - iocg->last_stat.wait_us,
> - iocg->last_stat.indebt_us,
> - iocg->last_stat.indelay_us);
> -
> - return pos;
> + seq_printf(s, " cost.wait=%llu cost.indebt=%llu cost.indelay=%llu",
> + iocg->last_stat.wait_us,
> + iocg->last_stat.indebt_us,
> + iocg->last_stat.indelay_us);
> + return true;
> }
>
> static u64 ioc_weight_prfill(struct seq_file *sf, struct blkg_policy_data *pd,
> diff --git a/block/blk-iolatency.c b/block/blk-iolatency.c
> index d8b0d8bd132b..c0545f9da549 100644
> --- a/block/blk-iolatency.c
> +++ b/block/blk-iolatency.c
> @@ -890,8 +890,7 @@ static int iolatency_print_limit(struct seq_file *sf, void *v)
> return 0;
> }
>
> -static size_t iolatency_ssd_stat(struct iolatency_grp *iolat, char *buf,
> - size_t size)
> +static bool iolatency_ssd_stat(struct iolatency_grp *iolat, struct seq_file *s)
> {
> struct latency_stat stat;
> int cpu;
> @@ -906,39 +905,40 @@ static size_t iolatency_ssd_stat(struct iolatency_grp *iolat, char *buf,
> preempt_enable();
>
> if (iolat->rq_depth.max_depth == UINT_MAX)
> - return scnprintf(buf, size, " missed=%llu total=%llu depth=max",
> - (unsigned long long)stat.ps.missed,
> - (unsigned long long)stat.ps.total);
> - return scnprintf(buf, size, " missed=%llu total=%llu depth=%u",
> - (unsigned long long)stat.ps.missed,
> - (unsigned long long)stat.ps.total,
> - iolat->rq_depth.max_depth);
> + seq_printf(s, " missed=%llu total=%llu depth=max",
> + (unsigned long long)stat.ps.missed,
> + (unsigned long long)stat.ps.total);
> + else
> + seq_printf(s, " missed=%llu total=%llu depth=%u",
> + (unsigned long long)stat.ps.missed,
> + (unsigned long long)stat.ps.total,
> + iolat->rq_depth.max_depth);
> + return true;
> }
>
> -static size_t iolatency_pd_stat(struct blkg_policy_data *pd, char *buf,
> - size_t size)
> +static bool iolatency_pd_stat(struct blkg_policy_data *pd, struct seq_file *s)
> {
> struct iolatency_grp *iolat = pd_to_lat(pd);
> unsigned long long avg_lat;
> unsigned long long cur_win;
>
> if (!blkcg_debug_stats)
> - return 0;
> + return false;
>
> if (iolat->ssd)
> - return iolatency_ssd_stat(iolat, buf, size);
> + return iolatency_ssd_stat(iolat, s);
>
> avg_lat = div64_u64(iolat->lat_avg, NSEC_PER_USEC);
> cur_win = div64_u64(iolat->cur_win_nsec, NSEC_PER_MSEC);
> if (iolat->rq_depth.max_depth == UINT_MAX)
> - return scnprintf(buf, size, " depth=max avg_lat=%llu win=%llu",
> - avg_lat, cur_win);
> -
> - return scnprintf(buf, size, " depth=%u avg_lat=%llu win=%llu",
> - iolat->rq_depth.max_depth, avg_lat, cur_win);
> + seq_printf(s, " depth=max avg_lat=%llu win=%llu",
> + avg_lat, cur_win);
> + else
> + seq_printf(s, " depth=%u avg_lat=%llu win=%llu",
> + iolat->rq_depth.max_depth, avg_lat, cur_win);
> + return true;
> }
>
> -
> static struct blkg_policy_data *iolatency_pd_alloc(gfp_t gfp,
> struct request_queue *q,
> struct blkcg *blkcg)
> diff --git a/block/mq-deadline-cgroup.c b/block/mq-deadline-cgroup.c
> index 3b4bfddec39f..b48a4b962f90 100644
> --- a/block/mq-deadline-cgroup.c
> +++ b/block/mq-deadline-cgroup.c
> @@ -52,7 +52,7 @@ struct dd_blkcg *dd_blkcg_from_bio(struct bio *bio)
> return dd_blkcg_from_pd(pd);
> }
>
> -static size_t dd_pd_stat(struct blkg_policy_data *pd, char *buf, size_t size)
> +static bool dd_pd_stat(struct blkg_policy_data *pd, struct seq_file *s)
> {
> static const char *const prio_class_name[] = {
> [IOPRIO_CLASS_NONE] = "NONE",
> @@ -61,12 +61,10 @@ static size_t dd_pd_stat(struct blkg_policy_data *pd, char *buf, size_t size)
> [IOPRIO_CLASS_IDLE] = "IDLE",
> };
> struct dd_blkcg *blkcg = dd_blkcg_from_pd(pd);
> - int res = 0;
> u8 prio;
>
> for (prio = 0; prio < ARRAY_SIZE(blkcg->stats->stats); prio++)
> - res += scnprintf(buf + res, size - res,
> - " [%s] dispatched=%u inserted=%u merged=%u",
> + seq_printf(s, " [%s] dispatched=%u inserted=%u merged=%u",
> prio_class_name[prio],
> ddcg_sum(blkcg, dispatched, prio) +
> ddcg_sum(blkcg, merged, prio) -
> @@ -75,7 +73,7 @@ static size_t dd_pd_stat(struct blkg_policy_data *pd, char *buf, size_t size)
> ddcg_sum(blkcg, completed, prio),
> ddcg_sum(blkcg, merged, prio));
>
> - return res;
> + return true;
> }
>
> static struct blkg_policy_data *dd_pd_alloc(gfp_t gfp, struct request_queue *q,
> diff --git a/include/linux/blk-cgroup.h b/include/linux/blk-cgroup.h
> index 37048438872c..b4de2010fba5 100644
> --- a/include/linux/blk-cgroup.h
> +++ b/include/linux/blk-cgroup.h
> @@ -152,8 +152,8 @@ typedef void (blkcg_pol_online_pd_fn)(struct blkg_policy_data *pd);
> typedef void (blkcg_pol_offline_pd_fn)(struct blkg_policy_data *pd);
> typedef void (blkcg_pol_free_pd_fn)(struct blkg_policy_data *pd);
> typedef void (blkcg_pol_reset_pd_stats_fn)(struct blkg_policy_data *pd);
> -typedef size_t (blkcg_pol_stat_pd_fn)(struct blkg_policy_data *pd, char *buf,
> - size_t size);
> +typedef bool (blkcg_pol_stat_pd_fn)(struct blkg_policy_data *pd,
> + struct seq_file *s);
>
> struct blkcg_policy {
> int plid;
> --
> 2.30.2
>
>
^ permalink raw reply [flat|nested] 13+ messages in thread
* Re: [PATCH 2/2] blk-cgroup: stop using seq_get_buf
@ 2022-01-10 16:39 ` Christoph Hellwig
0 siblings, 0 replies; 13+ messages in thread
From: Christoph Hellwig @ 2022-01-10 16:39 UTC (permalink / raw)
To: Wolfgang Bumiller; +Cc: Christoph Hellwig, axboe, tj, cgroups, linux-block
On Fri, Jan 07, 2022 at 10:20:23AM +0100, Wolfgang Bumiller wrote:
> 253:10 253:5 rbytes=0 wbytes=0 rios=0 wios=1 dbytes=0 dios=0
> ^~~~~~ ^~~~~
>
> I'm not sure if a separate temporary buffer would make more sense or
> switching back to seq_get_buf?
> Figuring out `has_stats` first doesn't seem to be trivial due to the
> `pd_stat_fn` calls.
>
> Or of course, just include the newlines unconditionally?
>
> Unless seq_file has/gets another way to roll back the buffer?
No, there is no good way to roll back the buffer.
^ permalink raw reply [flat|nested] 13+ messages in thread
* Re: [PATCH 2/2] blk-cgroup: stop using seq_get_buf
@ 2022-01-10 16:39 ` Christoph Hellwig
0 siblings, 0 replies; 13+ messages in thread
From: Christoph Hellwig @ 2022-01-10 16:39 UTC (permalink / raw)
To: Wolfgang Bumiller
Cc: Christoph Hellwig, axboe-tSWWG44O7X1aa/9Udqfwiw,
tj-DgEjT+Ai2ygdnm+yROfE0A, cgroups-u79uwXL29TY76Z2rM5mHXA,
linux-block-u79uwXL29TY76Z2rM5mHXA
On Fri, Jan 07, 2022 at 10:20:23AM +0100, Wolfgang Bumiller wrote:
> 253:10 253:5 rbytes=0 wbytes=0 rios=0 wios=1 dbytes=0 dios=0
> ^~~~~~ ^~~~~
>
> I'm not sure if a separate temporary buffer would make more sense or
> switching back to seq_get_buf?
> Figuring out `has_stats` first doesn't seem to be trivial due to the
> `pd_stat_fn` calls.
>
> Or of course, just include the newlines unconditionally?
>
> Unless seq_file has/gets another way to roll back the buffer?
No, there is no good way to roll back the buffer.
^ permalink raw reply [flat|nested] 13+ messages in thread
end of thread, other threads:[~2022-01-10 16:39 UTC | newest]
Thread overview: 13+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2021-08-10 15:26 [PATCH 1/2] blk-cgroup: refactor blkcg_print_stat Christoph Hellwig
2021-08-10 15:26 ` [PATCH 2/2] blk-cgroup: stop using seq_get_buf Christoph Hellwig
2021-08-10 15:26 ` Christoph Hellwig
2021-08-11 17:56 ` Tejun Heo
2022-01-07 9:20 ` Wolfgang Bumiller
2022-01-10 16:39 ` Christoph Hellwig
2022-01-10 16:39 ` Christoph Hellwig
2021-08-11 17:56 ` [PATCH 1/2] blk-cgroup: refactor blkcg_print_stat Tejun Heo
2021-08-11 17:56 ` Tejun Heo
2021-08-16 12:39 ` Christoph Hellwig
2021-08-16 12:39 ` Christoph Hellwig
2021-08-16 16:53 ` Jens Axboe
2021-08-16 16:53 ` Jens Axboe
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.