All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH] fio: add for_each_rw_ddir() macro
@ 2020-08-12 15:26 Alexey Dobriyan
  2020-08-12 22:01 ` Elliott, Robert (Servers)
  0 siblings, 1 reply; 5+ messages in thread
From: Alexey Dobriyan @ 2020-08-12 15:26 UTC (permalink / raw)
  To: axboe; +Cc: fio

Make it slightly easier to add DDIR_APPEND as fully fledged I/O type.

Signed-off-by: Alexey Dobriyan <adobriyan@gmail.com>
---

 backend.c |   16 +++++++---------
 eta.c     |   12 +++++-------
 init.c    |   62 ++++++++++++++++++++++++++------------------------------------
 io_ddir.h |    2 ++
 stat.c    |   16 +++++++---------
 5 files changed, 47 insertions(+), 61 deletions(-)

--- a/backend.c
+++ b/backend.c
@@ -223,12 +223,10 @@ static bool check_min_rate(struct thread_data *td, struct timespec *now)
 {
 	bool ret = false;
 
-	if (td->bytes_done[DDIR_READ])
-		ret |= __check_min_rate(td, now, DDIR_READ);
-	if (td->bytes_done[DDIR_WRITE])
-		ret |= __check_min_rate(td, now, DDIR_WRITE);
-	if (td->bytes_done[DDIR_TRIM])
-		ret |= __check_min_rate(td, now, DDIR_TRIM);
+	for_each_rw_ddir(ddir) {
+		if (td->bytes_done[ddir])
+			ret |= __check_min_rate(td, now, ddir);
+	}
 
 	return ret;
 }
@@ -1876,9 +1874,9 @@ static void *thread_main(void *data)
 
 	update_rusage_stat(td);
 	td->ts.total_run_time = mtime_since_now(&td->epoch);
-	td->ts.io_bytes[DDIR_READ] = td->io_bytes[DDIR_READ];
-	td->ts.io_bytes[DDIR_WRITE] = td->io_bytes[DDIR_WRITE];
-	td->ts.io_bytes[DDIR_TRIM] = td->io_bytes[DDIR_TRIM];
+	for_each_rw_ddir(ddir) {
+		td->ts.io_bytes[ddir] = td->io_bytes[ddir];
+	}
 
 	if (td->o.verify_state_save && !(td->flags & TD_F_VSTATE_SAVED) &&
 	    (td->o.verify != VERIFY_NONE && td_write(td)))
--- a/eta.c
+++ b/eta.c
@@ -383,8 +383,8 @@ bool calc_thread_status(struct jobs_eta *je, int force)
 	struct thread_data *td;
 	int i, unified_rw_rep;
 	uint64_t rate_time, disp_time, bw_avg_time, *eta_secs;
-	unsigned long long io_bytes[DDIR_RWDIR_CNT];
-	unsigned long long io_iops[DDIR_RWDIR_CNT];
+	unsigned long long io_bytes[DDIR_RWDIR_CNT] = {};
+	unsigned long long io_iops[DDIR_RWDIR_CNT] = {};
 	struct timespec now;
 
 	static unsigned long long rate_io_bytes[DDIR_RWDIR_CNT];
@@ -413,8 +413,6 @@ bool calc_thread_status(struct jobs_eta *je, int force)
 
 	je->elapsed_sec = (mtime_since_genesis() + 999) / 1000;
 
-	io_bytes[DDIR_READ] = io_bytes[DDIR_WRITE] = io_bytes[DDIR_TRIM] = 0;
-	io_iops[DDIR_READ] = io_iops[DDIR_WRITE] = io_iops[DDIR_TRIM] = 0;
 	bw_avg_time = ULONG_MAX;
 	unified_rw_rep = 0;
 	for_each_td(td, i) {
@@ -509,9 +507,9 @@ bool calc_thread_status(struct jobs_eta *je, int force)
 		calc_rate(unified_rw_rep, rate_time, io_bytes, rate_io_bytes,
 				je->rate);
 		memcpy(&rate_prev_time, &now, sizeof(now));
-		add_agg_sample(sample_val(je->rate[DDIR_READ]), DDIR_READ, 0, 0);
-		add_agg_sample(sample_val(je->rate[DDIR_WRITE]), DDIR_WRITE, 0, 0);
-		add_agg_sample(sample_val(je->rate[DDIR_TRIM]), DDIR_TRIM, 0, 0);
+		for_each_rw_ddir(ddir) {
+			add_agg_sample(sample_val(je->rate[ddir]), ddir, 0, 0);
+		}
 	}
 
 	disp_time = mtime_since(&disp_prev_time, &now);
--- a/init.c
+++ b/init.c
@@ -564,13 +564,11 @@ static int setup_rate(struct thread_data *td)
 {
 	int ret = 0;
 
-	if (td->o.rate[DDIR_READ] || td->o.rate_iops[DDIR_READ])
-		ret = __setup_rate(td, DDIR_READ);
-	if (td->o.rate[DDIR_WRITE] || td->o.rate_iops[DDIR_WRITE])
-		ret |= __setup_rate(td, DDIR_WRITE);
-	if (td->o.rate[DDIR_TRIM] || td->o.rate_iops[DDIR_TRIM])
-		ret |= __setup_rate(td, DDIR_TRIM);
-
+	for_each_rw_ddir(ddir) {
+		if (td->o.rate[ddir] || td->o.rate_iops[ddir]) {
+			ret |= __setup_rate(td, ddir);
+		}
+	}
 	return ret;
 }
 
@@ -662,31 +660,25 @@ static int fixup_options(struct thread_data *td)
 	if (td_read(td))
 		o->overwrite = 1;
 
-	if (!o->min_bs[DDIR_READ])
-		o->min_bs[DDIR_READ] = o->bs[DDIR_READ];
-	if (!o->max_bs[DDIR_READ])
-		o->max_bs[DDIR_READ] = o->bs[DDIR_READ];
-	if (!o->min_bs[DDIR_WRITE])
-		o->min_bs[DDIR_WRITE] = o->bs[DDIR_WRITE];
-	if (!o->max_bs[DDIR_WRITE])
-		o->max_bs[DDIR_WRITE] = o->bs[DDIR_WRITE];
-	if (!o->min_bs[DDIR_TRIM])
-		o->min_bs[DDIR_TRIM] = o->bs[DDIR_TRIM];
-	if (!o->max_bs[DDIR_TRIM])
-		o->max_bs[DDIR_TRIM] = o->bs[DDIR_TRIM];
-
-	o->rw_min_bs = min(o->min_bs[DDIR_READ], o->min_bs[DDIR_WRITE]);
-	o->rw_min_bs = min(o->min_bs[DDIR_TRIM], o->rw_min_bs);
+	for_each_rw_ddir(ddir) {
+		if (!o->min_bs[ddir])
+			o->min_bs[ddir] = o->bs[ddir];
+		if (!o->max_bs[ddir])
+			o->max_bs[ddir] = o->bs[ddir];
+	}
+
+	o->rw_min_bs = -1;
+	for_each_rw_ddir(ddir) {
+		o->rw_min_bs = min(o->rw_min_bs, o->min_bs[ddir]);
+	}
 
 	/*
 	 * For random IO, allow blockalign offset other than min_bs.
 	 */
-	if (!o->ba[DDIR_READ] || !td_random(td))
-		o->ba[DDIR_READ] = o->min_bs[DDIR_READ];
-	if (!o->ba[DDIR_WRITE] || !td_random(td))
-		o->ba[DDIR_WRITE] = o->min_bs[DDIR_WRITE];
-	if (!o->ba[DDIR_TRIM] || !td_random(td))
-		o->ba[DDIR_TRIM] = o->min_bs[DDIR_TRIM];
+	for_each_rw_ddir(ddir) {
+		if (!o->ba[ddir] || !td_random(td))
+			o->ba[ddir] = o->min_bs[ddir];
+	}
 
 	if ((o->ba[DDIR_READ] != o->min_bs[DDIR_READ] ||
 	    o->ba[DDIR_WRITE] != o->min_bs[DDIR_WRITE] ||
@@ -765,14 +757,12 @@ static int fixup_options(struct thread_data *td)
 		log_err("fio: rate and rate_iops are mutually exclusive\n");
 		ret |= 1;
 	}
-	if ((o->rate[DDIR_READ] && (o->rate[DDIR_READ] < o->ratemin[DDIR_READ])) ||
-	    (o->rate[DDIR_WRITE] && (o->rate[DDIR_WRITE] < o->ratemin[DDIR_WRITE])) ||
-	    (o->rate[DDIR_TRIM] && (o->rate[DDIR_TRIM] < o->ratemin[DDIR_TRIM])) ||
-	    (o->rate_iops[DDIR_READ] && (o->rate_iops[DDIR_READ] < o->rate_iops_min[DDIR_READ])) ||
-	    (o->rate_iops[DDIR_WRITE] && (o->rate_iops[DDIR_WRITE] < o->rate_iops_min[DDIR_WRITE])) ||
-	    (o->rate_iops[DDIR_TRIM] && (o->rate_iops[DDIR_TRIM] < o->rate_iops_min[DDIR_TRIM]))) {
-		log_err("fio: minimum rate exceeds rate\n");
-		ret |= 1;
+	for_each_rw_ddir(ddir) {
+		if ((o->rate[ddir] && (o->rate[ddir] < o->ratemin[ddir])) ||
+		    (o->rate_iops[ddir] && (o->rate_iops[ddir] < o->rate_iops_min[ddir]))) {
+			log_err("fio: minimum rate exceeds rate\n");
+			ret |= 1;
+		}
 	}
 
 	if (!o->timeout && o->time_based) {
--- a/io_ddir.h
+++ b/io_ddir.h
@@ -16,6 +16,8 @@ enum fio_ddir {
 	DDIR_RWDIR_SYNC_CNT = 4,
 };
 
+#define for_each_rw_ddir(ddir)	for (enum fio_ddir ddir = 0; ddir < DDIR_RWDIR_CNT; ddir++)
+
 static inline const char *io_ddir_name(enum fio_ddir ddir)
 {
 	static const char *name[] = { "read", "write", "trim", "sync",
--- a/stat.c
+++ b/stat.c
@@ -1078,12 +1078,10 @@ static void show_thread_status_normal(struct thread_stat *ts,
 	if (strlen(ts->description))
 		log_buf(out, "  Description  : [%s]\n", ts->description);
 
-	if (ts->io_bytes[DDIR_READ])
-		show_ddir_status(rs, ts, DDIR_READ, out);
-	if (ts->io_bytes[DDIR_WRITE])
-		show_ddir_status(rs, ts, DDIR_WRITE, out);
-	if (ts->io_bytes[DDIR_TRIM])
-		show_ddir_status(rs, ts, DDIR_TRIM, out);
+	for_each_rw_ddir(ddir) {
+		if (ts->io_bytes[ddir])
+			show_ddir_status(rs, ts, ddir, out);
+	}
 
 	show_latencies(ts, out);
 
@@ -2315,9 +2313,9 @@ void __show_running_run_stats(void)
 
 	for_each_td(td, i) {
 		td->update_rusage = 1;
-		td->ts.io_bytes[DDIR_READ] = td->io_bytes[DDIR_READ];
-		td->ts.io_bytes[DDIR_WRITE] = td->io_bytes[DDIR_WRITE];
-		td->ts.io_bytes[DDIR_TRIM] = td->io_bytes[DDIR_TRIM];
+		for_each_rw_ddir(ddir) {
+			td->ts.io_bytes[ddir] = td->io_bytes[ddir];
+		}
 		td->ts.total_run_time = mtime_since(&td->epoch, &ts);
 
 		rt[i] = mtime_since(&td->start, &ts);


^ permalink raw reply	[flat|nested] 5+ messages in thread

* RE: [PATCH] fio: add for_each_rw_ddir() macro
  2020-08-12 15:26 [PATCH] fio: add for_each_rw_ddir() macro Alexey Dobriyan
@ 2020-08-12 22:01 ` Elliott, Robert (Servers)
  2020-08-13 10:36   ` Alexey Dobriyan
  2020-08-13 16:33   ` [PATCH v2] " Alexey Dobriyan
  0 siblings, 2 replies; 5+ messages in thread
From: Elliott, Robert (Servers) @ 2020-08-12 22:01 UTC (permalink / raw)
  To: Alexey Dobriyan, axboe; +Cc: fio



> -----Original Message-----
> From: fio-owner@vger.kernel.org <fio-owner@vger.kernel.org> On Behalf
> Of Alexey Dobriyan
> Sent: Wednesday, August 12, 2020 10:26 AM
> To: axboe@kernel.dk
> Cc: fio@vger.kernel.org
> Subject: [PATCH] fio: add for_each_rw_ddir() macro
> 
> Make it slightly easier to add DDIR_APPEND as fully fledged I/O type.
> 
> Signed-off-by: Alexey Dobriyan <adobriyan@gmail.com>
> ---
> 
...

> @@ -662,31 +660,25 @@ static int fixup_options(struct thread_data
> *td)
...
>  	/*
>  	 * For random IO, allow blockalign offset other than min_bs.
>  	 */
> -	if (!o->ba[DDIR_READ] || !td_random(td))
> -		o->ba[DDIR_READ] = o->min_bs[DDIR_READ];
> -	if (!o->ba[DDIR_WRITE] || !td_random(td))
> -		o->ba[DDIR_WRITE] = o->min_bs[DDIR_WRITE];
> -	if (!o->ba[DDIR_TRIM] || !td_random(td))
> -		o->ba[DDIR_TRIM] = o->min_bs[DDIR_TRIM];
> +	for_each_rw_ddir(ddir) {
> +		if (!o->ba[ddir] || !td_random(td))
> +			o->ba[ddir] = o->min_bs[ddir];
> +	}
> 
>  	if ((o->ba[DDIR_READ] != o->min_bs[DDIR_READ] ||
>  	    o->ba[DDIR_WRITE] != o->min_bs[DDIR_WRITE] ||

continues as:
            o->ba[DDIR_TRIM] != o->min_bs[DDIR_TRIM]) &&
            !o->norandommap) {

which should be updated too.


> @@ -765,14 +757,12 @@ static int fixup_options(struct thread_data
> *td)
>  		log_err("fio: rate and rate_iops are mutually
> exclusive\n");
>  		ret |= 1;
>  	}
> -	if ((o->rate[DDIR_READ] && (o->rate[DDIR_READ] < o->ratemin[DDIR_READ])) ||
> -	    (o->rate[DDIR_WRITE] && (o->rate[DDIR_WRITE] < o->ratemin[DDIR_WRITE])) ||
> -	    (o->rate[DDIR_TRIM] && (o->rate[DDIR_TRIM] < o->ratemin[DDIR_TRIM])) ||
> -	    (o->rate_iops[DDIR_READ] && (o->rate_iops[DDIR_READ] < o->rate_iops_min[DDIR_READ])) ||
> -	    (o->rate_iops[DDIR_WRITE] && (o->rate_iops[DDIR_WRITE] < o->rate_iops_min[DDIR_WRITE])) ||
> -	    (o->rate_iops[DDIR_TRIM] && (o->rate_iops[DDIR_TRIM] < o->rate_iops_min[DDIR_TRIM]))) {
> -		log_err("fio: minimum rate exceeds rate\n");
> -		ret |= 1;
> +	for_each_rw_ddir(ddir) {
> +		if ((o->rate[ddir] && (o->rate[ddir] < o->ratemin[ddir])) ||
> +		    (o->rate_iops[ddir] && (o->rate_iops[ddir] < o->rate_iops_min[ddir]))) {
> +			log_err("fio: minimum rate exceeds rate\n");
> +			ret |= 1;
> +		}
>  	}

That changes the behavior slightly - you can now get up to three of
those messages. Printing the value of ddir would help identify the
problem(s) and be more informative than the current single message.


^ permalink raw reply	[flat|nested] 5+ messages in thread

* Re: [PATCH] fio: add for_each_rw_ddir() macro
  2020-08-12 22:01 ` Elliott, Robert (Servers)
@ 2020-08-13 10:36   ` Alexey Dobriyan
  2020-08-13 16:33   ` [PATCH v2] " Alexey Dobriyan
  1 sibling, 0 replies; 5+ messages in thread
From: Alexey Dobriyan @ 2020-08-13 10:36 UTC (permalink / raw)
  To: Elliott, Robert (Servers); +Cc: axboe, fio

On Wed, Aug 12, 2020 at 10:01:56PM +0000, Elliott, Robert (Servers) wrote:
> > -----Original Message-----
> > From: fio-owner@vger.kernel.org <fio-owner@vger.kernel.org> On Behalf
> > Of Alexey Dobriyan
> > Sent: Wednesday, August 12, 2020 10:26 AM
> > To: axboe@kernel.dk
> > Cc: fio@vger.kernel.org
> > Subject: [PATCH] fio: add for_each_rw_ddir() macro
> > 
> > Make it slightly easier to add DDIR_APPEND as fully fledged I/O type.

> >  	/*
> >  	 * For random IO, allow blockalign offset other than min_bs.
> >  	 */
> > -	if (!o->ba[DDIR_READ] || !td_random(td))
> > -		o->ba[DDIR_READ] = o->min_bs[DDIR_READ];
> > -	if (!o->ba[DDIR_WRITE] || !td_random(td))
> > -		o->ba[DDIR_WRITE] = o->min_bs[DDIR_WRITE];
> > -	if (!o->ba[DDIR_TRIM] || !td_random(td))
> > -		o->ba[DDIR_TRIM] = o->min_bs[DDIR_TRIM];
> > +	for_each_rw_ddir(ddir) {
> > +		if (!o->ba[ddir] || !td_random(td))
> > +			o->ba[ddir] = o->min_bs[ddir];
> > +	}
> > 
> >  	if ((o->ba[DDIR_READ] != o->min_bs[DDIR_READ] ||
> >  	    o->ba[DDIR_WRITE] != o->min_bs[DDIR_WRITE] ||
> 
> continues as:
>             o->ba[DDIR_TRIM] != o->min_bs[DDIR_TRIM]) &&
>             !o->norandommap) {
> 
> which should be updated too.

Actually it should not. This error message doesn't make sense for Append:

        if ((o->ba[DDIR_READ] != o->min_bs[DDIR_READ] ||
            o->ba[DDIR_WRITE] != o->min_bs[DDIR_WRITE] ||
            o->ba[DDIR_TRIM] != o->min_bs[DDIR_TRIM]) &&
            !o->norandommap) {
                log_err("fio: Any use of blockalign= turns off randommap\n");
                o->norandommap = 1;
                ret |= warnings_fatal;
        }

ba= should be ignored for Append (maybe with a non-fatal warning message).

> > @@ -765,14 +757,12 @@ static int fixup_options(struct thread_data
> > *td)
> >  		log_err("fio: rate and rate_iops are mutually
> > exclusive\n");
> >  		ret |= 1;
> >  	}
> > -	if ((o->rate[DDIR_READ] && (o->rate[DDIR_READ] < o->ratemin[DDIR_READ])) ||
> > -	    (o->rate[DDIR_WRITE] && (o->rate[DDIR_WRITE] < o->ratemin[DDIR_WRITE])) ||
> > -	    (o->rate[DDIR_TRIM] && (o->rate[DDIR_TRIM] < o->ratemin[DDIR_TRIM])) ||
> > -	    (o->rate_iops[DDIR_READ] && (o->rate_iops[DDIR_READ] < o->rate_iops_min[DDIR_READ])) ||
> > -	    (o->rate_iops[DDIR_WRITE] && (o->rate_iops[DDIR_WRITE] < o->rate_iops_min[DDIR_WRITE])) ||
> > -	    (o->rate_iops[DDIR_TRIM] && (o->rate_iops[DDIR_TRIM] < o->rate_iops_min[DDIR_TRIM]))) {
> > -		log_err("fio: minimum rate exceeds rate\n");
> > -		ret |= 1;
> > +	for_each_rw_ddir(ddir) {
> > +		if ((o->rate[ddir] && (o->rate[ddir] < o->ratemin[ddir])) ||
> > +		    (o->rate_iops[ddir] && (o->rate_iops[ddir] < o->rate_iops_min[ddir]))) {
> > +			log_err("fio: minimum rate exceeds rate\n");
> > +			ret |= 1;
> > +		}
> >  	}
> 
> That changes the behavior slightly - you can now get up to three of
> those messages. Printing the value of ddir would help identify the
> problem(s) and be more informative than the current single message.

OK.


^ permalink raw reply	[flat|nested] 5+ messages in thread

* [PATCH v2] fio: add for_each_rw_ddir() macro
  2020-08-12 22:01 ` Elliott, Robert (Servers)
  2020-08-13 10:36   ` Alexey Dobriyan
@ 2020-08-13 16:33   ` Alexey Dobriyan
  2020-08-17  4:01     ` Jens Axboe
  1 sibling, 1 reply; 5+ messages in thread
From: Alexey Dobriyan @ 2020-08-13 16:33 UTC (permalink / raw)
  To: axboe; +Cc: fio, elliott

Make it slightly easier to add Zone Append as fully fledged I/O type.

Signed-off-by: Alexey Dobriyan (SK hynix) <adobriyan@gmail.com>
---

 backend.c |   16 +++++++---------
 eta.c     |   12 +++++-------
 init.c    |   62 ++++++++++++++++++++++++++------------------------------------
 io_ddir.h |    2 ++
 stat.c    |   16 +++++++---------
 5 files changed, 47 insertions(+), 61 deletions(-)

--- a/backend.c
+++ b/backend.c
@@ -223,12 +223,10 @@ static bool check_min_rate(struct thread_data *td, struct timespec *now)
 {
 	bool ret = false;
 
-	if (td->bytes_done[DDIR_READ])
-		ret |= __check_min_rate(td, now, DDIR_READ);
-	if (td->bytes_done[DDIR_WRITE])
-		ret |= __check_min_rate(td, now, DDIR_WRITE);
-	if (td->bytes_done[DDIR_TRIM])
-		ret |= __check_min_rate(td, now, DDIR_TRIM);
+	for_each_rw_ddir(ddir) {
+		if (td->bytes_done[ddir])
+			ret |= __check_min_rate(td, now, ddir);
+	}
 
 	return ret;
 }
@@ -1876,9 +1874,9 @@ static void *thread_main(void *data)
 
 	update_rusage_stat(td);
 	td->ts.total_run_time = mtime_since_now(&td->epoch);
-	td->ts.io_bytes[DDIR_READ] = td->io_bytes[DDIR_READ];
-	td->ts.io_bytes[DDIR_WRITE] = td->io_bytes[DDIR_WRITE];
-	td->ts.io_bytes[DDIR_TRIM] = td->io_bytes[DDIR_TRIM];
+	for_each_rw_ddir(ddir) {
+		td->ts.io_bytes[ddir] = td->io_bytes[ddir];
+	}
 
 	if (td->o.verify_state_save && !(td->flags & TD_F_VSTATE_SAVED) &&
 	    (td->o.verify != VERIFY_NONE && td_write(td)))
--- a/eta.c
+++ b/eta.c
@@ -383,8 +383,8 @@ bool calc_thread_status(struct jobs_eta *je, int force)
 	struct thread_data *td;
 	int i, unified_rw_rep;
 	uint64_t rate_time, disp_time, bw_avg_time, *eta_secs;
-	unsigned long long io_bytes[DDIR_RWDIR_CNT];
-	unsigned long long io_iops[DDIR_RWDIR_CNT];
+	unsigned long long io_bytes[DDIR_RWDIR_CNT] = {};
+	unsigned long long io_iops[DDIR_RWDIR_CNT] = {};
 	struct timespec now;
 
 	static unsigned long long rate_io_bytes[DDIR_RWDIR_CNT];
@@ -413,8 +413,6 @@ bool calc_thread_status(struct jobs_eta *je, int force)
 
 	je->elapsed_sec = (mtime_since_genesis() + 999) / 1000;
 
-	io_bytes[DDIR_READ] = io_bytes[DDIR_WRITE] = io_bytes[DDIR_TRIM] = 0;
-	io_iops[DDIR_READ] = io_iops[DDIR_WRITE] = io_iops[DDIR_TRIM] = 0;
 	bw_avg_time = ULONG_MAX;
 	unified_rw_rep = 0;
 	for_each_td(td, i) {
@@ -509,9 +507,9 @@ bool calc_thread_status(struct jobs_eta *je, int force)
 		calc_rate(unified_rw_rep, rate_time, io_bytes, rate_io_bytes,
 				je->rate);
 		memcpy(&rate_prev_time, &now, sizeof(now));
-		add_agg_sample(sample_val(je->rate[DDIR_READ]), DDIR_READ, 0, 0);
-		add_agg_sample(sample_val(je->rate[DDIR_WRITE]), DDIR_WRITE, 0, 0);
-		add_agg_sample(sample_val(je->rate[DDIR_TRIM]), DDIR_TRIM, 0, 0);
+		for_each_rw_ddir(ddir) {
+			add_agg_sample(sample_val(je->rate[ddir]), ddir, 0, 0);
+		}
 	}
 
 	disp_time = mtime_since(&disp_prev_time, &now);
--- a/init.c
+++ b/init.c
@@ -564,13 +564,11 @@ static int setup_rate(struct thread_data *td)
 {
 	int ret = 0;
 
-	if (td->o.rate[DDIR_READ] || td->o.rate_iops[DDIR_READ])
-		ret = __setup_rate(td, DDIR_READ);
-	if (td->o.rate[DDIR_WRITE] || td->o.rate_iops[DDIR_WRITE])
-		ret |= __setup_rate(td, DDIR_WRITE);
-	if (td->o.rate[DDIR_TRIM] || td->o.rate_iops[DDIR_TRIM])
-		ret |= __setup_rate(td, DDIR_TRIM);
-
+	for_each_rw_ddir(ddir) {
+		if (td->o.rate[ddir] || td->o.rate_iops[ddir]) {
+			ret |= __setup_rate(td, ddir);
+		}
+	}
 	return ret;
 }
 
@@ -662,31 +660,25 @@ static int fixup_options(struct thread_data *td)
 	if (td_read(td))
 		o->overwrite = 1;
 
-	if (!o->min_bs[DDIR_READ])
-		o->min_bs[DDIR_READ] = o->bs[DDIR_READ];
-	if (!o->max_bs[DDIR_READ])
-		o->max_bs[DDIR_READ] = o->bs[DDIR_READ];
-	if (!o->min_bs[DDIR_WRITE])
-		o->min_bs[DDIR_WRITE] = o->bs[DDIR_WRITE];
-	if (!o->max_bs[DDIR_WRITE])
-		o->max_bs[DDIR_WRITE] = o->bs[DDIR_WRITE];
-	if (!o->min_bs[DDIR_TRIM])
-		o->min_bs[DDIR_TRIM] = o->bs[DDIR_TRIM];
-	if (!o->max_bs[DDIR_TRIM])
-		o->max_bs[DDIR_TRIM] = o->bs[DDIR_TRIM];
-
-	o->rw_min_bs = min(o->min_bs[DDIR_READ], o->min_bs[DDIR_WRITE]);
-	o->rw_min_bs = min(o->min_bs[DDIR_TRIM], o->rw_min_bs);
+	for_each_rw_ddir(ddir) {
+		if (!o->min_bs[ddir])
+			o->min_bs[ddir] = o->bs[ddir];
+		if (!o->max_bs[ddir])
+			o->max_bs[ddir] = o->bs[ddir];
+	}
+
+	o->rw_min_bs = -1;
+	for_each_rw_ddir(ddir) {
+		o->rw_min_bs = min(o->rw_min_bs, o->min_bs[ddir]);
+	}
 
 	/*
 	 * For random IO, allow blockalign offset other than min_bs.
 	 */
-	if (!o->ba[DDIR_READ] || !td_random(td))
-		o->ba[DDIR_READ] = o->min_bs[DDIR_READ];
-	if (!o->ba[DDIR_WRITE] || !td_random(td))
-		o->ba[DDIR_WRITE] = o->min_bs[DDIR_WRITE];
-	if (!o->ba[DDIR_TRIM] || !td_random(td))
-		o->ba[DDIR_TRIM] = o->min_bs[DDIR_TRIM];
+	for_each_rw_ddir(ddir) {
+		if (!o->ba[ddir] || !td_random(td))
+			o->ba[ddir] = o->min_bs[ddir];
+	}
 
 	if ((o->ba[DDIR_READ] != o->min_bs[DDIR_READ] ||
 	    o->ba[DDIR_WRITE] != o->min_bs[DDIR_WRITE] ||
@@ -765,14 +757,12 @@ static int fixup_options(struct thread_data *td)
 		log_err("fio: rate and rate_iops are mutually exclusive\n");
 		ret |= 1;
 	}
-	if ((o->rate[DDIR_READ] && (o->rate[DDIR_READ] < o->ratemin[DDIR_READ])) ||
-	    (o->rate[DDIR_WRITE] && (o->rate[DDIR_WRITE] < o->ratemin[DDIR_WRITE])) ||
-	    (o->rate[DDIR_TRIM] && (o->rate[DDIR_TRIM] < o->ratemin[DDIR_TRIM])) ||
-	    (o->rate_iops[DDIR_READ] && (o->rate_iops[DDIR_READ] < o->rate_iops_min[DDIR_READ])) ||
-	    (o->rate_iops[DDIR_WRITE] && (o->rate_iops[DDIR_WRITE] < o->rate_iops_min[DDIR_WRITE])) ||
-	    (o->rate_iops[DDIR_TRIM] && (o->rate_iops[DDIR_TRIM] < o->rate_iops_min[DDIR_TRIM]))) {
-		log_err("fio: minimum rate exceeds rate\n");
-		ret |= 1;
+	for_each_rw_ddir(ddir) {
+		if ((o->rate[ddir] && (o->rate[ddir] < o->ratemin[ddir])) ||
+		    (o->rate_iops[ddir] && (o->rate_iops[ddir] < o->rate_iops_min[ddir]))) {
+			log_err("fio: minimum rate exceeds rate, ddir %d\n", +ddir);
+			ret |= 1;
+		}
 	}
 
 	if (!o->timeout && o->time_based) {
--- a/io_ddir.h
+++ b/io_ddir.h
@@ -16,6 +16,8 @@ enum fio_ddir {
 	DDIR_RWDIR_SYNC_CNT = 4,
 };
 
+#define for_each_rw_ddir(ddir)	for (enum fio_ddir ddir = 0; ddir < DDIR_RWDIR_CNT; ddir++)
+
 static inline const char *io_ddir_name(enum fio_ddir ddir)
 {
 	static const char *name[] = { "read", "write", "trim", "sync",
--- a/stat.c
+++ b/stat.c
@@ -1078,12 +1078,10 @@ static void show_thread_status_normal(struct thread_stat *ts,
 	if (strlen(ts->description))
 		log_buf(out, "  Description  : [%s]\n", ts->description);
 
-	if (ts->io_bytes[DDIR_READ])
-		show_ddir_status(rs, ts, DDIR_READ, out);
-	if (ts->io_bytes[DDIR_WRITE])
-		show_ddir_status(rs, ts, DDIR_WRITE, out);
-	if (ts->io_bytes[DDIR_TRIM])
-		show_ddir_status(rs, ts, DDIR_TRIM, out);
+	for_each_rw_ddir(ddir) {
+		if (ts->io_bytes[ddir])
+			show_ddir_status(rs, ts, ddir, out);
+	}
 
 	show_latencies(ts, out);
 
@@ -2315,9 +2313,9 @@ void __show_running_run_stats(void)
 
 	for_each_td(td, i) {
 		td->update_rusage = 1;
-		td->ts.io_bytes[DDIR_READ] = td->io_bytes[DDIR_READ];
-		td->ts.io_bytes[DDIR_WRITE] = td->io_bytes[DDIR_WRITE];
-		td->ts.io_bytes[DDIR_TRIM] = td->io_bytes[DDIR_TRIM];
+		for_each_rw_ddir(ddir) {
+			td->ts.io_bytes[ddir] = td->io_bytes[ddir];
+		}
 		td->ts.total_run_time = mtime_since(&td->epoch, &ts);
 
 		rt[i] = mtime_since(&td->start, &ts);


^ permalink raw reply	[flat|nested] 5+ messages in thread

* Re: [PATCH v2] fio: add for_each_rw_ddir() macro
  2020-08-13 16:33   ` [PATCH v2] " Alexey Dobriyan
@ 2020-08-17  4:01     ` Jens Axboe
  0 siblings, 0 replies; 5+ messages in thread
From: Jens Axboe @ 2020-08-17  4:01 UTC (permalink / raw)
  To: Alexey Dobriyan; +Cc: fio, elliott

On 8/13/20 9:33 AM, Alexey Dobriyan wrote:
> Make it slightly easier to add Zone Append as fully fledged I/O type.

Applied, thanks.

-- 
Jens Axboe



^ permalink raw reply	[flat|nested] 5+ messages in thread

end of thread, other threads:[~2020-08-17  4:01 UTC | newest]

Thread overview: 5+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2020-08-12 15:26 [PATCH] fio: add for_each_rw_ddir() macro Alexey Dobriyan
2020-08-12 22:01 ` Elliott, Robert (Servers)
2020-08-13 10:36   ` Alexey Dobriyan
2020-08-13 16:33   ` [PATCH v2] " Alexey Dobriyan
2020-08-17  4:01     ` Jens Axboe

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.