All of lore.kernel.org
 help / color / mirror / Atom feed
* [LTP] [PATCH 0/6] Fuzzy Sync yielding and validation test
@ 2021-03-05 15:51 Richard Palethorpe
  2021-03-05 15:51 ` [LTP] [PATCH 1/6] fzsync: Add self test Richard Palethorpe
                   ` (6 more replies)
  0 siblings, 7 replies; 18+ messages in thread
From: Richard Palethorpe @ 2021-03-05 15:51 UTC (permalink / raw)
  To: ltp

Hello,

As suggested by Cyril, I have created a validation test so that we do
not have to reproduce a range of different kernel bugs for each change
to Fuzzy Sync.

Also I have built on Leo Yu-Chi Liang's patch to introduce yield. It
seemed to work mostly fine as it was. However I added some extra bits
to be safe (hopefully).

Leo Yu-Chi Liang (1):
  fzsync: Add sched_yield for single core machine

Richard Palethorpe (5):
  fzsync: Add self test
  fzsync: Reset delay bias
  fzsync: Correctly print positive lower delay range bound
  fzsync: Move yield check out of loop and add yield to delay
  fzsync: Check processor affinity

 include/tst_fuzzy_sync.h            | 119 +++++++++++---
 lib/newlib_tests/.gitignore         |   1 +
 lib/newlib_tests/test16.c           |   2 +
 lib/newlib_tests/tst_fuzzy_sync01.c | 233 ++++++++++++++++++++++++++++
 4 files changed, 331 insertions(+), 24 deletions(-)
 create mode 100644 lib/newlib_tests/tst_fuzzy_sync01.c

-- 
2.30.1


^ permalink raw reply	[flat|nested] 18+ messages in thread

* [LTP] [PATCH 1/6] fzsync: Add self test
  2021-03-05 15:51 [LTP] [PATCH 0/6] Fuzzy Sync yielding and validation test Richard Palethorpe
@ 2021-03-05 15:51 ` Richard Palethorpe
  2021-03-08 15:30   ` Cyril Hrubis
  2021-03-05 15:51 ` [LTP] [PATCH 2/6] fzsync: Reset delay bias Richard Palethorpe
                   ` (5 subsequent siblings)
  6 siblings, 1 reply; 18+ messages in thread
From: Richard Palethorpe @ 2021-03-05 15:51 UTC (permalink / raw)
  To: ltp

Add a validation test for the Fuzz Sync library. It has a range of
data races which Fuzzy Sync must reproduce. This is much easier to
setup and run than reproducing real kernel bugs.

Signed-off-by: Richard Palethorpe <rpalethorpe@suse.com>
Suggested-by: Cyril Hrubis <chrubis@suse.cz>
---
 lib/newlib_tests/.gitignore         |   1 +
 lib/newlib_tests/test16.c           |   2 +
 lib/newlib_tests/tst_fuzzy_sync01.c | 233 ++++++++++++++++++++++++++++
 3 files changed, 236 insertions(+)
 create mode 100644 lib/newlib_tests/tst_fuzzy_sync01.c

diff --git a/lib/newlib_tests/.gitignore b/lib/newlib_tests/.gitignore
index 6c2612259..bb235f433 100644
--- a/lib/newlib_tests/.gitignore
+++ b/lib/newlib_tests/.gitignore
@@ -40,3 +40,4 @@ tst_bool_expr
 test_macros01
 test_macros02
 test_macros03
+tst_fuzzy_sync01
\ No newline at end of file
diff --git a/lib/newlib_tests/test16.c b/lib/newlib_tests/test16.c
index 0d74e1eae..7a9b5f20e 100644
--- a/lib/newlib_tests/test16.c
+++ b/lib/newlib_tests/test16.c
@@ -9,6 +9,8 @@
  * which they should take it in turns to update.
  */
 
+#define _GNU_SOURCE
+
 #include <stdlib.h>
 #include "tst_test.h"
 #include "tst_safe_pthread.h"
diff --git a/lib/newlib_tests/tst_fuzzy_sync01.c b/lib/newlib_tests/tst_fuzzy_sync01.c
new file mode 100644
index 000000000..22b78977a
--- /dev/null
+++ b/lib/newlib_tests/tst_fuzzy_sync01.c
@@ -0,0 +1,233 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (c) 2021 Richard Palethorpe <rpalethorpe@suse.com>
+ */
+#define _GNU_SOURCE
+
+#include "tst_test.h"
+#include "tst_fuzzy_sync.h"
+
+enum race_window_state {
+	/* We are in a race window which will cause behaviour that has
+	 * very different timings to the target. We must avoid these
+	 * race windows. */
+	TOO_EARLY,
+	TOO_LATE,
+	/* We are not in the target race window, but the timings of
+	 * this behaviour are similar to those of the target. */
+	EARLY_OR_LATE,
+	/* We in the target race window */
+	CRITICAL,
+};
+
+enum wait_with {
+	SPIN,
+	SLEEP,
+};
+
+struct window {
+	const enum wait_with type;
+	const int time;
+};
+
+struct race_shape {
+	const struct window too_early;
+	const struct window early;
+	const struct window critical;
+	const struct window late;
+	const struct window too_late;
+};
+
+struct scenario {
+	const char *const name;
+	const struct race_shape a;
+	const struct race_shape b;
+};
+
+static struct tst_fzsync_pair pair;
+
+static volatile enum race_window_state state;
+
+static const struct scenario races[] = {
+	{ "No delay",
+	  {{SPIN, 0 }, {SPIN, 0 }, {SPIN, 100 }, {SPIN, 0 }, {SPIN, 0 }},
+	  {{SPIN, 0 }, {SPIN, 0 }, {SPIN, 100 }, {SPIN, 0 }, {SPIN, 0 }},
+	},
+	{ "Exit aligned",
+	  {{SPIN, 0 }, {SLEEP, 1 }, {SPIN, 10 }, {SPIN, 10 }, {SPIN, 0 }},
+	  {{SPIN, 0 }, {SLEEP, 1 }, {SPIN, 1000 }, {SPIN, 10 }, {SPIN, 0 }},
+	},
+	{ "Entry aligned",
+	  {{SPIN, 0 }, {SPIN, 1 }, {SPIN, 10 }, {SLEEP, 10 }, {SPIN, 0 }},
+	  {{SPIN, 0 }, {SPIN, 1 }, {SPIN, 1000 }, {SLEEP, 1 }, {SPIN, 0 }},
+	},
+	{ "delay A entry",
+	  {{SPIN, 0 }, {SLEEP, 1 }, {SPIN, 10 }, {SPIN, 10 }, {SPIN, 0 }},
+	  {{SPIN, 0 }, {SPIN, 1 }, {SPIN, 1000 }, {SPIN, 1 }, {SPIN, 0 }},
+	},
+	{ "delay B entry",
+	  {{SPIN, 0 }, {SPIN, 1 }, {SPIN, 10 }, {SPIN, 10 }, {SPIN, 0 }},
+	  {{SPIN, 0 }, {SLEEP, 1 }, {SPIN, 1000 }, {SPIN, 1 }, {SPIN, 0 }},
+	},
+	{ "delay A exit",
+	  {{SPIN, 0 }, {SPIN, 1 }, {SPIN, 10 }, {SLEEP, 10 }, {SPIN, 0 }},
+	  {{SPIN, 0 }, {SPIN, 1 }, {SPIN, 1000 }, {SPIN, 1 }, {SPIN, 0 }},
+	},
+	{ "delay B exit",
+	  {{SPIN, 0 }, {SPIN, 1 }, {SPIN, 10 }, {SPIN, 10 }, {SPIN, 0 }},
+	  {{SPIN, 0 }, {SPIN, 1 }, {SPIN, 1000 }, {SLEEP, 1 }, {SPIN, 0 }},
+	},
+	{ "too early",
+	  {{SPIN, 10 }, {SLEEP, 1 }, {SPIN, 100 }, {SPIN, 100 }, {SPIN, 0 }},
+	  {{SLEEP, 1 }, {SPIN, 1000 }, {SPIN, 1000 }, {SPIN, 10 }, {SPIN, 0 }},
+	},
+	{ "too late",
+	  {{SPIN, 10000 }, {SLEEP, 1 }, {SPIN, 100 }, {SPIN, 100 }, {SPIN, 10 }},
+	  {{SPIN, 0 }, {SPIN, 1000 }, {SPIN, 1000 }, {SPIN, 1000 }, {SPIN, 10 }},
+	},
+};
+
+static void cleanup(void)
+{
+	tst_fzsync_pair_cleanup(&pair);
+}
+
+static void setup(void)
+{
+	pair.min_samples = 10000;
+
+	tst_fzsync_pair_init(&pair);
+}
+
+static void delay(struct window w)
+{
+	struct timespec ts = { 0, w.time };
+	volatile int time = w.time;
+
+	if (pair.yield_in_wait)
+		time /= 100;
+
+	switch(w.type) {
+	case SPIN:
+		while (time--) {
+			if (pair.yield_in_wait)
+				sched_yield();
+		}
+		break;
+	case SLEEP:
+		nanosleep(&ts, NULL);
+		break;
+	}
+}
+
+static void *worker(void *v)
+{
+	unsigned int i = *(unsigned int *)v;
+	const struct race_shape *r = &races[i].b;
+
+	while (tst_fzsync_run_b(&pair)) {
+		state = EARLY_OR_LATE;
+
+		tst_fzsync_start_race_b(&pair);
+		if (r->too_early.time) {
+			state = TOO_EARLY;
+			delay(r->too_early);
+		}
+
+		state = EARLY_OR_LATE;
+		delay(r->early);
+
+		state = CRITICAL;
+		delay(r->critical);
+
+		state = EARLY_OR_LATE;
+		delay(r->late);
+
+		if (r->too_late.time) {
+			state = TOO_LATE;
+			delay(r->too_late);
+		}
+		tst_fzsync_end_race_b(&pair);
+	}
+
+	return NULL;
+}
+
+static void run(unsigned int i)
+{
+	const struct race_shape *r = &races[i].a;
+	struct tst_fzsync_run_thread wrap_run_b = {
+		.func = worker,
+		.arg = &i,
+	};
+	int too_early = 0, early_or_late = 0, critical = 0, too_late = 0;
+	enum race_window_state state_copy;
+
+
+	tst_fzsync_pair_reset(&pair, NULL);
+	SAFE_PTHREAD_CREATE(&pair.thread_b, 0, tst_fzsync_thread_wrapper,
+			    &wrap_run_b);
+
+	while (tst_fzsync_run_a(&pair)) {
+		tst_fzsync_start_race_a(&pair);
+
+		delay(r->too_early);
+		state_copy = state;
+
+		switch(state_copy) {
+		case TOO_EARLY:
+		case TOO_LATE:
+			break;
+		default:
+			delay(r->early);
+			state_copy = state;
+		}
+
+		switch(state_copy) {
+		case TOO_EARLY:
+			too_early++;
+			break;
+		case EARLY_OR_LATE:
+			early_or_late++;
+			delay(r->late);
+			break;
+		case CRITICAL:
+			critical++;
+			delay(r->critical);
+			break;
+		case TOO_LATE:
+			too_late++;
+			delay(r->too_late);
+			break;
+		default:
+			tst_brk(TBROK, "enum is not atomic?");
+		}
+		tst_fzsync_end_race_a(&pair);
+
+		switch(state_copy) {
+		case TOO_EARLY:
+			tst_fzsync_pair_add_bias(&pair, -10);
+			break;
+		case TOO_LATE:
+			tst_fzsync_pair_add_bias(&pair, 10);
+			break;
+		default:
+			;
+		}
+
+		if (critical > 100) {
+			tst_fzsync_pair_cleanup(&pair);
+			break;
+		}
+	}
+
+	tst_res(critical ? TPASS : TFAIL, "%20s) =%-4d ~%-4d -%-4d +%-4d",
+		races[i].name, critical, early_or_late, too_early, too_late);
+}
+
+static struct tst_test test = {
+	.tcnt = ARRAY_SIZE(races),
+	.test = run,
+	.setup = setup,
+	.cleanup = cleanup,
+};
-- 
2.30.1


^ permalink raw reply related	[flat|nested] 18+ messages in thread

* [LTP] [PATCH 2/6] fzsync: Reset delay bias
  2021-03-05 15:51 [LTP] [PATCH 0/6] Fuzzy Sync yielding and validation test Richard Palethorpe
  2021-03-05 15:51 ` [LTP] [PATCH 1/6] fzsync: Add self test Richard Palethorpe
@ 2021-03-05 15:51 ` Richard Palethorpe
  2021-03-08 14:16   ` Cyril Hrubis
  2021-03-05 15:51 ` [LTP] [PATCH 3/6] fzsync: Correctly print positive lower delay range bound Richard Palethorpe
                   ` (4 subsequent siblings)
  6 siblings, 1 reply; 18+ messages in thread
From: Richard Palethorpe @ 2021-03-05 15:51 UTC (permalink / raw)
  To: ltp

If the delay bias a preserved then it breaks tests which have multiple
scenarios and/or use -i. The test author could reset it manually in
this case, but it's likely to be error prone.

Signed-off-by: Richard Palethorpe <rpalethorpe@suse.com>
---
 include/tst_fuzzy_sync.h | 1 +
 1 file changed, 1 insertion(+)

diff --git a/include/tst_fuzzy_sync.h b/include/tst_fuzzy_sync.h
index 4141f5c64..b8841d96d 100644
--- a/include/tst_fuzzy_sync.h
+++ b/include/tst_fuzzy_sync.h
@@ -289,6 +289,7 @@ static void tst_fzsync_pair_reset(struct tst_fzsync_pair *pair,
 	tst_init_stat(&pair->diff_ab);
 	tst_init_stat(&pair->spins_avg);
 	pair->delay = 0;
+	pair->delay_bias = 0;
 	pair->sampling = pair->min_samples;
 
 	pair->exec_loop = 0;
-- 
2.30.1


^ permalink raw reply related	[flat|nested] 18+ messages in thread

* [LTP] [PATCH 3/6] fzsync: Correctly print positive lower delay range bound
  2021-03-05 15:51 [LTP] [PATCH 0/6] Fuzzy Sync yielding and validation test Richard Palethorpe
  2021-03-05 15:51 ` [LTP] [PATCH 1/6] fzsync: Add self test Richard Palethorpe
  2021-03-05 15:51 ` [LTP] [PATCH 2/6] fzsync: Reset delay bias Richard Palethorpe
@ 2021-03-05 15:51 ` Richard Palethorpe
  2021-03-08 14:18   ` Cyril Hrubis
  2021-03-05 15:51 ` [LTP] [PATCH 4/6] fzsync: Add sched_yield for single core machine Richard Palethorpe
                   ` (3 subsequent siblings)
  6 siblings, 1 reply; 18+ messages in thread
From: Richard Palethorpe @ 2021-03-05 15:51 UTC (permalink / raw)
  To: ltp

If the magnitude of delay_bias is large enough then it can turn the
lower bound positive.

Signed-off-by: Richard Palethorpe <rpalethorpe@suse.com>
---
 include/tst_fuzzy_sync.h | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)

diff --git a/include/tst_fuzzy_sync.h b/include/tst_fuzzy_sync.h
index b8841d96d..4063e95cb 100644
--- a/include/tst_fuzzy_sync.h
+++ b/include/tst_fuzzy_sync.h
@@ -518,9 +518,9 @@ static void tst_fzsync_pair_update(struct tst_fzsync_pair *pair)
 			tst_res(TINFO,
 				"Reached deviation ratios < %.2f, introducing randomness",
 				pair->max_dev_ratio);
-			tst_res(TINFO, "Delay range is [-%d, %d]",
-				(int)(pair->diff_sb.avg / per_spin_time) + pair->delay_bias,
-				(int)(pair->diff_sa.avg / per_spin_time) - pair->delay_bias);
+			tst_res(TINFO, "Delay range is [%d, %d]",
+				-(int)(pair->diff_sb.avg / per_spin_time) + pair->delay_bias,
+				(int)(pair->diff_sa.avg / per_spin_time) + pair->delay_bias);
 			tst_fzsync_pair_info(pair);
 			pair->sampling = -1;
 		}
-- 
2.30.1


^ permalink raw reply related	[flat|nested] 18+ messages in thread

* [LTP] [PATCH 4/6] fzsync: Add sched_yield for single core machine
  2021-03-05 15:51 [LTP] [PATCH 0/6] Fuzzy Sync yielding and validation test Richard Palethorpe
                   ` (2 preceding siblings ...)
  2021-03-05 15:51 ` [LTP] [PATCH 3/6] fzsync: Correctly print positive lower delay range bound Richard Palethorpe
@ 2021-03-05 15:51 ` Richard Palethorpe
  2021-03-05 15:51 ` [LTP] [PATCH 5/6] fzsync: Move yield check out of loop and add yield to delay Richard Palethorpe
                   ` (2 subsequent siblings)
  6 siblings, 0 replies; 18+ messages in thread
From: Richard Palethorpe @ 2021-03-05 15:51 UTC (permalink / raw)
  To: ltp

From: Leo Yu-Chi Liang <ycliang@andestech.com>

Fuzzy sync library uses spin waiting mechanism to implement thread
barrier behavior, which would cause this test to be time-consuming
on single core machine.

Fix this by adding sched_yield in the spin waiting loop, so that the
thread yields cpu as soon as it enters the waiting loop.

Signed-off-by: Leo Yu-Chi Liang <ycliang@andestech.com>
---
 include/tst_fuzzy_sync.h | 34 ++++++++++++++++++++++++++--------
 1 file changed, 26 insertions(+), 8 deletions(-)

diff --git a/include/tst_fuzzy_sync.h b/include/tst_fuzzy_sync.h
index 4063e95cb..5474f81e3 100644
--- a/include/tst_fuzzy_sync.h
+++ b/include/tst_fuzzy_sync.h
@@ -59,12 +59,15 @@
  * @sa tst_fzsync_pair
  */
 
-#include <sys/time.h>
-#include <time.h>
 #include <math.h>
-#include <stdlib.h>
 #include <pthread.h>
+#include <sched.h>
+#include <stdbool.h>
+#include <stdlib.h>
+#include <sys/time.h>
+#include <time.h>
 #include "tst_atomic.h"
+#include "tst_cpu.h"
 #include "tst_timer.h"
 #include "tst_safe_pthread.h"
 
@@ -180,6 +183,15 @@ struct tst_fzsync_pair {
 	int exec_loop;
 	/** Internal; The second thread or 0 */
 	pthread_t thread_b;
+	/** 
+	 * Internal; The flag indicates single core machines or not
+	 * 
+	 * If running on single core machines, it would take considerable
+	 * amount of time to run fuzzy sync library.
+	 * Thus call sched_yield to give up cpu to decrease the test time.
+	 */
+	bool yield_in_wait;
+
 };
 
 #define CHK(param, low, hi, def) do {					      \
@@ -206,6 +218,7 @@ static void tst_fzsync_pair_init(struct tst_fzsync_pair *pair)
 	CHK(max_dev_ratio, 0, 1, 0.1);
 	CHK(exec_time_p, 0, 1, 0.5);
 	CHK(exec_loops, 20, INT_MAX, 3000000);
+	CHK(yield_in_wait, 0, 1, (tst_ncpus() <= 1));
 }
 #undef CHK
 
@@ -551,7 +564,8 @@ static void tst_fzsync_pair_update(struct tst_fzsync_pair *pair)
  */
 static inline void tst_fzsync_pair_wait(int *our_cntr,
 					int *other_cntr,
-					int *spins)
+					int *spins,
+					bool yield_in_wait)
 {
 	if (tst_atomic_inc(other_cntr) == INT_MAX) {
 		/*
@@ -565,6 +579,8 @@ static inline void tst_fzsync_pair_wait(int *our_cntr,
 		       && tst_atomic_load(our_cntr) < INT_MAX) {
 			if (spins)
 				(*spins)++;
+			if(yield_in_wait)
+				sched_yield();
 		}
 
 		tst_atomic_store(0, other_cntr);
@@ -582,6 +598,8 @@ static inline void tst_fzsync_pair_wait(int *our_cntr,
 		while (tst_atomic_load(our_cntr) < tst_atomic_load(other_cntr)) {
 			if (spins)
 				(*spins)++;
+			if(yield_in_wait)
+				sched_yield();
 		}
 	}
 }
@@ -594,7 +612,7 @@ static inline void tst_fzsync_pair_wait(int *our_cntr,
  */
 static inline void tst_fzsync_wait_a(struct tst_fzsync_pair *pair)
 {
-	tst_fzsync_pair_wait(&pair->a_cntr, &pair->b_cntr, NULL);
+	tst_fzsync_pair_wait(&pair->a_cntr, &pair->b_cntr, NULL, pair->yield_in_wait);
 }
 
 /**
@@ -605,7 +623,7 @@ static inline void tst_fzsync_wait_a(struct tst_fzsync_pair *pair)
  */
 static inline void tst_fzsync_wait_b(struct tst_fzsync_pair *pair)
 {
-	tst_fzsync_pair_wait(&pair->b_cntr, &pair->a_cntr, NULL);
+	tst_fzsync_pair_wait(&pair->b_cntr, &pair->a_cntr, NULL, pair->yield_in_wait);
 }
 
 /**
@@ -710,7 +728,7 @@ static inline void tst_fzsync_start_race_a(struct tst_fzsync_pair *pair)
 static inline void tst_fzsync_end_race_a(struct tst_fzsync_pair *pair)
 {
 	tst_fzsync_time(&pair->a_end);
-	tst_fzsync_pair_wait(&pair->a_cntr, &pair->b_cntr, &pair->spins);
+	tst_fzsync_pair_wait(&pair->a_cntr, &pair->b_cntr, &pair->spins, pair->yield_in_wait);
 }
 
 /**
@@ -741,7 +759,7 @@ static inline void tst_fzsync_start_race_b(struct tst_fzsync_pair *pair)
 static inline void tst_fzsync_end_race_b(struct tst_fzsync_pair *pair)
 {
 	tst_fzsync_time(&pair->b_end);
-	tst_fzsync_pair_wait(&pair->b_cntr, &pair->a_cntr, &pair->spins);
+	tst_fzsync_pair_wait(&pair->b_cntr, &pair->a_cntr, &pair->spins, pair->yield_in_wait);
 }
 
 /**
-- 
2.30.1


^ permalink raw reply related	[flat|nested] 18+ messages in thread

* [LTP] [PATCH 5/6] fzsync: Move yield check out of loop and add yield to delay
  2021-03-05 15:51 [LTP] [PATCH 0/6] Fuzzy Sync yielding and validation test Richard Palethorpe
                   ` (3 preceding siblings ...)
  2021-03-05 15:51 ` [LTP] [PATCH 4/6] fzsync: Add sched_yield for single core machine Richard Palethorpe
@ 2021-03-05 15:51 ` Richard Palethorpe
  2021-03-08 11:32   ` Leo Liang
  2021-03-08 14:48   ` Cyril Hrubis
  2021-03-05 15:51 ` [LTP] [PATCH 6/6] fzsync: Check processor affinity Richard Palethorpe
  2021-03-09 13:45 ` [LTP] [PATCH 0/6] Fuzzy Sync yielding and validation test Petr Vorel
  6 siblings, 2 replies; 18+ messages in thread
From: Richard Palethorpe @ 2021-03-05 15:51 UTC (permalink / raw)
  To: ltp

During my testing I found no difference between having the branch
inside the loop and outside. However looking at the generated
assembly, it definitely does perform the branch inside the loop. This
could have an effect on some platform with worse branch prediction. So
I have moved the branch outside of the loop.

Also I have added sched_yield to the delay loop. If we only have one
CPU then it is not delaying anything unless the other process can
progress.

Signed-off-by: Richard Palethorpe <rpalethorpe@suse.com>
---
 include/tst_fuzzy_sync.h | 72 ++++++++++++++++++++++++++++++----------
 1 file changed, 54 insertions(+), 18 deletions(-)

diff --git a/include/tst_fuzzy_sync.h b/include/tst_fuzzy_sync.h
index 5474f81e3..36a604e13 100644
--- a/include/tst_fuzzy_sync.h
+++ b/include/tst_fuzzy_sync.h
@@ -183,9 +183,9 @@ struct tst_fzsync_pair {
 	int exec_loop;
 	/** Internal; The second thread or 0 */
 	pthread_t thread_b;
-	/** 
-	 * Internal; The flag indicates single core machines or not
-	 * 
+	/**
+	 * The flag indicates single core machines or not
+	 *
 	 * If running on single core machines, it would take considerable
 	 * amount of time to run fuzzy sync library.
 	 * Thus call sched_yield to give up cpu to decrease the test time.
@@ -575,31 +575,53 @@ static inline void tst_fzsync_pair_wait(int *our_cntr,
 		 * line above before doing that. If we are in rear position
 		 * then our counter may already have been set to zero.
 		 */
-		while (tst_atomic_load(our_cntr) > 0
-		       && tst_atomic_load(our_cntr) < INT_MAX) {
-			if (spins)
-				(*spins)++;
-			if(yield_in_wait)
+		if (yield_in_wait) {
+			while (tst_atomic_load(our_cntr) > 0
+			       && tst_atomic_load(our_cntr) < INT_MAX) {
+				if (spins)
+					(*spins)++;
+
 				sched_yield();
+			}
+		} else {
+			while (tst_atomic_load(our_cntr) > 0
+			       && tst_atomic_load(our_cntr) < INT_MAX) {
+				if (spins)
+					(*spins)++;
+			}
 		}
 
+
 		tst_atomic_store(0, other_cntr);
 		/*
 		 * Once both counters have been set to zero the invariant
 		 * is restored and we can continue.
 		 */
-		while (tst_atomic_load(our_cntr) > 1)
-			;
+		if (yield_in_wait) {
+			while (tst_atomic_load(our_cntr) > 1)
+				sched_yield();
+		} else {
+			while (tst_atomic_load(our_cntr) > 1)
+				;
+		}
 	} else {
 		/*
 		 * If our counter is less than the other thread's we are ahead
 		 * of it and need to wait.
 		 */
-		while (tst_atomic_load(our_cntr) < tst_atomic_load(other_cntr)) {
-			if (spins)
-				(*spins)++;
-			if(yield_in_wait)
+		if (yield_in_wait) {
+			while (tst_atomic_load(our_cntr) <
+			       tst_atomic_load(other_cntr)) {
+				if (spins)
+					(*spins)++;
 				sched_yield();
+			}
+		} else {
+			while (tst_atomic_load(our_cntr) <
+			       tst_atomic_load(other_cntr)) {
+				if (spins)
+					(*spins)++;
+			}
 		}
 	}
 }
@@ -713,8 +735,15 @@ static inline void tst_fzsync_start_race_a(struct tst_fzsync_pair *pair)
 	tst_fzsync_wait_a(pair);
 
 	delay = pair->delay;
-	while (delay < 0)
-		delay++;
+	if (pair->yield_in_wait) {
+		while (delay < 0) {
+			sched_yield();
+			delay++;
+		}
+	} else {
+		while (delay < 0)
+			delay++;
+	}
 
 	tst_fzsync_time(&pair->a_start);
 }
@@ -744,8 +773,15 @@ static inline void tst_fzsync_start_race_b(struct tst_fzsync_pair *pair)
 	tst_fzsync_wait_b(pair);
 
 	delay = pair->delay;
-	while (delay > 0)
-		delay--;
+	if (pair->yield_in_wait) {
+		while (delay > 0) {
+			sched_yield();
+			delay--;
+		}
+	} else {
+		while (delay > 0)
+			delay--;
+	}
 
 	tst_fzsync_time(&pair->b_start);
 }
-- 
2.30.1


^ permalink raw reply related	[flat|nested] 18+ messages in thread

* [LTP] [PATCH 6/6] fzsync: Check processor affinity
  2021-03-05 15:51 [LTP] [PATCH 0/6] Fuzzy Sync yielding and validation test Richard Palethorpe
                   ` (4 preceding siblings ...)
  2021-03-05 15:51 ` [LTP] [PATCH 5/6] fzsync: Move yield check out of loop and add yield to delay Richard Palethorpe
@ 2021-03-05 15:51 ` Richard Palethorpe
  2021-03-08 11:33   ` Leo Liang
  2021-03-08 14:53   ` Cyril Hrubis
  2021-03-09 13:45 ` [LTP] [PATCH 0/6] Fuzzy Sync yielding and validation test Petr Vorel
  6 siblings, 2 replies; 18+ messages in thread
From: Richard Palethorpe @ 2021-03-05 15:51 UTC (permalink / raw)
  To: ltp

It is useful for testing Fuzzy Sync itself to set the CPU affinity to
a single core. The current processes affinity does not effect
tst_ncpus(), but we can get the affinity separately.

Note that checking this still does not guarantee we will use yield
when restricted to only one core. We would have to periodically probe
which CPUs threads are running on until we detect more than one CPU.

Signed-off-by: Richard Palethorpe <rpalethorpe@suse.com>
---
 include/tst_fuzzy_sync.h | 20 ++++++++++++++++++--
 1 file changed, 18 insertions(+), 2 deletions(-)

diff --git a/include/tst_fuzzy_sync.h b/include/tst_fuzzy_sync.h
index 36a604e13..ea356ab44 100644
--- a/include/tst_fuzzy_sync.h
+++ b/include/tst_fuzzy_sync.h
@@ -59,9 +59,11 @@
  * @sa tst_fzsync_pair
  */
 
+#define _GNU_SOURCE
+
 #include <math.h>
 #include <pthread.h>
-#include <sched.h>
+#include "lapi/cpuset.h"
 #include <stdbool.h>
 #include <stdlib.h>
 #include <sys/time.h>
@@ -213,12 +215,26 @@ struct tst_fzsync_pair {
  */
 static void tst_fzsync_pair_init(struct tst_fzsync_pair *pair)
 {
+	long ncpus = tst_ncpus();
+#ifdef CPU_COUNT
+	size_t cpusz = CPU_ALLOC_SIZE(ncpus);
+	cpu_set_t *cpus = CPU_ALLOC(ncpus);
+
+	if (sched_getaffinity(0, cpusz, cpus)) {
+		tst_res(TWARN | TERRNO, "sched_getaffinity(0, %zu, %zx)",
+			cpusz, (size_t)cpus);
+	} else {
+		ncpus = CPU_COUNT(cpus);
+	}
+	free(cpus);
+#endif
+
 	CHK(avg_alpha, 0, 1, 0.25);
 	CHK(min_samples, 20, INT_MAX, 1024);
 	CHK(max_dev_ratio, 0, 1, 0.1);
 	CHK(exec_time_p, 0, 1, 0.5);
 	CHK(exec_loops, 20, INT_MAX, 3000000);
-	CHK(yield_in_wait, 0, 1, (tst_ncpus() <= 1));
+	CHK(yield_in_wait, 0, 1, (ncpus <= 1));
 }
 #undef CHK
 
-- 
2.30.1


^ permalink raw reply related	[flat|nested] 18+ messages in thread

* [LTP] [PATCH 5/6] fzsync: Move yield check out of loop and add yield to delay
  2021-03-05 15:51 ` [LTP] [PATCH 5/6] fzsync: Move yield check out of loop and add yield to delay Richard Palethorpe
@ 2021-03-08 11:32   ` Leo Liang
  2021-03-08 14:48   ` Cyril Hrubis
  1 sibling, 0 replies; 18+ messages in thread
From: Leo Liang @ 2021-03-08 11:32 UTC (permalink / raw)
  To: ltp

On Fri, Mar 05, 2021 at 11:51:22PM +0800, Richard Palethorpe wrote:
> During my testing I found no difference between having the branch
> inside the loop and outside. However looking at the generated
> assembly, it definitely does perform the branch inside the loop. This
> could have an effect on some platform with worse branch prediction. So
> I have moved the branch outside of the loop.
> 
> Also I have added sched_yield to the delay loop. If we only have one
> CPU then it is not delaying anything unless the other process can
> progress.
> 
> Signed-off-by: Richard Palethorpe <rpalethorpe@suse.com>
> ---
>  include/tst_fuzzy_sync.h | 72 ++++++++++++++++++++++++++++++----------
>  1 file changed, 54 insertions(+), 18 deletions(-)
> 
> diff --git a/include/tst_fuzzy_sync.h b/include/tst_fuzzy_sync.h
> index 5474f81e3..36a604e13 100644
> --- a/include/tst_fuzzy_sync.h
> +++ b/include/tst_fuzzy_sync.h
> @@ -183,9 +183,9 @@ struct tst_fzsync_pair {
>  	int exec_loop;
>  	/** Internal; The second thread or 0 */
>  	pthread_t thread_b;
> -	/** 
> -	 * Internal; The flag indicates single core machines or not
> -	 * 
> +	/**
> +	 * The flag indicates single core machines or not
> +	 *
>  	 * If running on single core machines, it would take considerable
>  	 * amount of time to run fuzzy sync library.
>  	 * Thus call sched_yield to give up cpu to decrease the test time.
> @@ -575,31 +575,53 @@ static inline void tst_fzsync_pair_wait(int *our_cntr,
>  		 * line above before doing that. If we are in rear position
>  		 * then our counter may already have been set to zero.
>  		 */
> -		while (tst_atomic_load(our_cntr) > 0
> -		       && tst_atomic_load(our_cntr) < INT_MAX) {
> -			if (spins)
> -				(*spins)++;
> -			if(yield_in_wait)
> +		if (yield_in_wait) {
> +			while (tst_atomic_load(our_cntr) > 0
> +			       && tst_atomic_load(our_cntr) < INT_MAX) {
> +				if (spins)
> +					(*spins)++;
> +
>  				sched_yield();
> +			}
> +		} else {
> +			while (tst_atomic_load(our_cntr) > 0
> +			       && tst_atomic_load(our_cntr) < INT_MAX) {
> +				if (spins)
> +					(*spins)++;
> +			}
>  		}
>  
> +
>  		tst_atomic_store(0, other_cntr);
>  		/*
>  		 * Once both counters have been set to zero the invariant
>  		 * is restored and we can continue.
>  		 */
> -		while (tst_atomic_load(our_cntr) > 1)
> -			;
> +		if (yield_in_wait) {
> +			while (tst_atomic_load(our_cntr) > 1)
> +				sched_yield();
> +		} else {
> +			while (tst_atomic_load(our_cntr) > 1)
> +				;
> +		}
>  	} else {
>  		/*
>  		 * If our counter is less than the other thread's we are ahead
>  		 * of it and need to wait.
>  		 */
> -		while (tst_atomic_load(our_cntr) < tst_atomic_load(other_cntr)) {
> -			if (spins)
> -				(*spins)++;
> -			if(yield_in_wait)
> +		if (yield_in_wait) {
> +			while (tst_atomic_load(our_cntr) <
> +			       tst_atomic_load(other_cntr)) {
> +				if (spins)
> +					(*spins)++;
>  				sched_yield();
> +			}
> +		} else {
> +			while (tst_atomic_load(our_cntr) <
> +			       tst_atomic_load(other_cntr)) {
> +				if (spins)
> +					(*spins)++;
> +			}
>  		}
>  	}
>  }
> @@ -713,8 +735,15 @@ static inline void tst_fzsync_start_race_a(struct tst_fzsync_pair *pair)
>  	tst_fzsync_wait_a(pair);
>  
>  	delay = pair->delay;
> -	while (delay < 0)
> -		delay++;
> +	if (pair->yield_in_wait) {
> +		while (delay < 0) {
> +			sched_yield();
> +			delay++;
> +		}
> +	} else {
> +		while (delay < 0)
> +			delay++;
> +	}
>  
>  	tst_fzsync_time(&pair->a_start);
>  }
> @@ -744,8 +773,15 @@ static inline void tst_fzsync_start_race_b(struct tst_fzsync_pair *pair)
>  	tst_fzsync_wait_b(pair);
>  
>  	delay = pair->delay;
> -	while (delay > 0)
> -		delay--;
> +	if (pair->yield_in_wait) {
> +		while (delay > 0) {
> +			sched_yield();
> +			delay--;
> +		}
> +	} else {
> +		while (delay > 0)
> +			delay--;
> +	}
>  
>  	tst_fzsync_time(&pair->b_start);
>  }
> -- 
> 2.30.1
>

Reviewed-by: Leo Yu-Chi Liang <ycliang@andestech.com>

^ permalink raw reply	[flat|nested] 18+ messages in thread

* [LTP] [PATCH 6/6] fzsync: Check processor affinity
  2021-03-05 15:51 ` [LTP] [PATCH 6/6] fzsync: Check processor affinity Richard Palethorpe
@ 2021-03-08 11:33   ` Leo Liang
  2021-03-08 14:53   ` Cyril Hrubis
  1 sibling, 0 replies; 18+ messages in thread
From: Leo Liang @ 2021-03-08 11:33 UTC (permalink / raw)
  To: ltp

On Fri, Mar 05, 2021 at 11:51:23PM +0800, Richard Palethorpe wrote:
> It is useful for testing Fuzzy Sync itself to set the CPU affinity to
> a single core. The current processes affinity does not effect
> tst_ncpus(), but we can get the affinity separately.
> 
> Note that checking this still does not guarantee we will use yield
> when restricted to only one core. We would have to periodically probe
> which CPUs threads are running on until we detect more than one CPU.
> 
> Signed-off-by: Richard Palethorpe <rpalethorpe@suse.com>
> ---
>  include/tst_fuzzy_sync.h | 20 ++++++++++++++++++--
>  1 file changed, 18 insertions(+), 2 deletions(-)
> 
> diff --git a/include/tst_fuzzy_sync.h b/include/tst_fuzzy_sync.h
> index 36a604e13..ea356ab44 100644
> --- a/include/tst_fuzzy_sync.h
> +++ b/include/tst_fuzzy_sync.h
> @@ -59,9 +59,11 @@
>   * @sa tst_fzsync_pair
>   */
>  
> +#define _GNU_SOURCE
> +
>  #include <math.h>
>  #include <pthread.h>
> -#include <sched.h>
> +#include "lapi/cpuset.h"
>  #include <stdbool.h>
>  #include <stdlib.h>
>  #include <sys/time.h>
> @@ -213,12 +215,26 @@ struct tst_fzsync_pair {
>   */
>  static void tst_fzsync_pair_init(struct tst_fzsync_pair *pair)
>  {
> +	long ncpus = tst_ncpus();
> +#ifdef CPU_COUNT
> +	size_t cpusz = CPU_ALLOC_SIZE(ncpus);
> +	cpu_set_t *cpus = CPU_ALLOC(ncpus);
> +
> +	if (sched_getaffinity(0, cpusz, cpus)) {
> +		tst_res(TWARN | TERRNO, "sched_getaffinity(0, %zu, %zx)",
> +			cpusz, (size_t)cpus);
> +	} else {
> +		ncpus = CPU_COUNT(cpus);
> +	}
> +	free(cpus);
> +#endif
> +
>  	CHK(avg_alpha, 0, 1, 0.25);
>  	CHK(min_samples, 20, INT_MAX, 1024);
>  	CHK(max_dev_ratio, 0, 1, 0.1);
>  	CHK(exec_time_p, 0, 1, 0.5);
>  	CHK(exec_loops, 20, INT_MAX, 3000000);
> -	CHK(yield_in_wait, 0, 1, (tst_ncpus() <= 1));
> +	CHK(yield_in_wait, 0, 1, (ncpus <= 1));
>  }
>  #undef CHK
>  
> -- 
> 2.30.1
>

Reviewed-by: Leo Yu-Chi Liang <ycliang@andestech.com>

^ permalink raw reply	[flat|nested] 18+ messages in thread

* [LTP] [PATCH 2/6] fzsync: Reset delay bias
  2021-03-05 15:51 ` [LTP] [PATCH 2/6] fzsync: Reset delay bias Richard Palethorpe
@ 2021-03-08 14:16   ` Cyril Hrubis
  2021-03-08 14:50     ` Richard Palethorpe
  0 siblings, 1 reply; 18+ messages in thread
From: Cyril Hrubis @ 2021-03-08 14:16 UTC (permalink / raw)
  To: ltp

Hi!
> If the delay bias a preserved then it breaks tests which have multiple
                    ^
		    is?

> scenarios and/or use -i. The test author could reset it manually in
> this case, but it's likely to be error prone.
>
> Signed-off-by: Richard Palethorpe <rpalethorpe@suse.com>

Other than that this looks OK.

Reviewed-by: Cyril Hrubis <chrubis@suse.cz>

> ---
>  include/tst_fuzzy_sync.h | 1 +
>  1 file changed, 1 insertion(+)
> 
> diff --git a/include/tst_fuzzy_sync.h b/include/tst_fuzzy_sync.h
> index 4141f5c64..b8841d96d 100644
> --- a/include/tst_fuzzy_sync.h
> +++ b/include/tst_fuzzy_sync.h
> @@ -289,6 +289,7 @@ static void tst_fzsync_pair_reset(struct tst_fzsync_pair *pair,
>  	tst_init_stat(&pair->diff_ab);
>  	tst_init_stat(&pair->spins_avg);
>  	pair->delay = 0;
> +	pair->delay_bias = 0;
>  	pair->sampling = pair->min_samples;
>  
>  	pair->exec_loop = 0;
> -- 
> 2.30.1
> 
> 
> -- 
> Mailing list info: https://lists.linux.it/listinfo/ltp

-- 
Cyril Hrubis
chrubis@suse.cz

^ permalink raw reply	[flat|nested] 18+ messages in thread

* [LTP] [PATCH 3/6] fzsync: Correctly print positive lower delay range bound
  2021-03-05 15:51 ` [LTP] [PATCH 3/6] fzsync: Correctly print positive lower delay range bound Richard Palethorpe
@ 2021-03-08 14:18   ` Cyril Hrubis
  0 siblings, 0 replies; 18+ messages in thread
From: Cyril Hrubis @ 2021-03-08 14:18 UTC (permalink / raw)
  To: ltp

Hi!
> If the magnitude of delay_bias is large enough then it can turn the
> lower bound positive.
> 
> Signed-off-by: Richard Palethorpe <rpalethorpe@suse.com>

Reviewed-by: Cyril Hrubis <chrubis@suse.cz>

-- 
Cyril Hrubis
chrubis@suse.cz

^ permalink raw reply	[flat|nested] 18+ messages in thread

* [LTP] [PATCH 5/6] fzsync: Move yield check out of loop and add yield to delay
  2021-03-05 15:51 ` [LTP] [PATCH 5/6] fzsync: Move yield check out of loop and add yield to delay Richard Palethorpe
  2021-03-08 11:32   ` Leo Liang
@ 2021-03-08 14:48   ` Cyril Hrubis
  1 sibling, 0 replies; 18+ messages in thread
From: Cyril Hrubis @ 2021-03-08 14:48 UTC (permalink / raw)
  To: ltp

Reviewed-by: Cyril Hrubis <chrubis@suse.cz>

-- 
Cyril Hrubis
chrubis@suse.cz

^ permalink raw reply	[flat|nested] 18+ messages in thread

* [LTP] [PATCH 2/6] fzsync: Reset delay bias
  2021-03-08 14:16   ` Cyril Hrubis
@ 2021-03-08 14:50     ` Richard Palethorpe
  0 siblings, 0 replies; 18+ messages in thread
From: Richard Palethorpe @ 2021-03-08 14:50 UTC (permalink / raw)
  To: ltp

Hello,

Cyril Hrubis <chrubis@suse.cz> writes:

> Hi!
>> If the delay bias a preserved then it breaks tests which have multiple
>                     ^
> 		    is?

Yup, typo.

>
>> scenarios and/or use -i. The test author could reset it manually in
>> this case, but it's likely to be error prone.
>>
>> Signed-off-by: Richard Palethorpe <rpalethorpe@suse.com>
>
> Other than that this looks OK.
>
> Reviewed-by: Cyril Hrubis <chrubis@suse.cz>
>
>> ---
>>  include/tst_fuzzy_sync.h | 1 +
>>  1 file changed, 1 insertion(+)
>> 
>> diff --git a/include/tst_fuzzy_sync.h b/include/tst_fuzzy_sync.h
>> index 4141f5c64..b8841d96d 100644
>> --- a/include/tst_fuzzy_sync.h
>> +++ b/include/tst_fuzzy_sync.h
>> @@ -289,6 +289,7 @@ static void tst_fzsync_pair_reset(struct tst_fzsync_pair *pair,
>>  	tst_init_stat(&pair->diff_ab);
>>  	tst_init_stat(&pair->spins_avg);
>>  	pair->delay = 0;
>> +	pair->delay_bias = 0;
>>  	pair->sampling = pair->min_samples;
>>  
>>  	pair->exec_loop = 0;
>> -- 
>> 2.30.1
>> 
>> 
>> -- 
>> Mailing list info: https://lists.linux.it/listinfo/ltp


-- 
Thank you,
Richard.

^ permalink raw reply	[flat|nested] 18+ messages in thread

* [LTP] [PATCH 6/6] fzsync: Check processor affinity
  2021-03-05 15:51 ` [LTP] [PATCH 6/6] fzsync: Check processor affinity Richard Palethorpe
  2021-03-08 11:33   ` Leo Liang
@ 2021-03-08 14:53   ` Cyril Hrubis
  2021-03-08 15:30     ` Richard Palethorpe
  1 sibling, 1 reply; 18+ messages in thread
From: Cyril Hrubis @ 2021-03-08 14:53 UTC (permalink / raw)
  To: ltp

Hi!
> It is useful for testing Fuzzy Sync itself to set the CPU affinity to
> a single core. The current processes affinity does not effect
                                                           ^
							 affect?
> tst_ncpus(), but we can get the affinity separately.
> 
> Note that checking this still does not guarantee we will use yield
> when restricted to only one core. We would have to periodically probe
> which CPUs threads are running on until we detect more than one CPU.
> 
> Signed-off-by: Richard Palethorpe <rpalethorpe@suse.com>
> ---
>  include/tst_fuzzy_sync.h | 20 ++++++++++++++++++--
>  1 file changed, 18 insertions(+), 2 deletions(-)
> 
> diff --git a/include/tst_fuzzy_sync.h b/include/tst_fuzzy_sync.h
> index 36a604e13..ea356ab44 100644
> --- a/include/tst_fuzzy_sync.h
> +++ b/include/tst_fuzzy_sync.h
> @@ -59,9 +59,11 @@
>   * @sa tst_fzsync_pair
>   */
>  
> +#define _GNU_SOURCE
> +
>  #include <math.h>
>  #include <pthread.h>
> -#include <sched.h>
> +#include "lapi/cpuset.h"
>  #include <stdbool.h>
>  #include <stdlib.h>
>  #include <sys/time.h>
> @@ -213,12 +215,26 @@ struct tst_fzsync_pair {
>   */
>  static void tst_fzsync_pair_init(struct tst_fzsync_pair *pair)
>  {
> +	long ncpus = tst_ncpus();
> +#ifdef CPU_COUNT
> +	size_t cpusz = CPU_ALLOC_SIZE(ncpus);
> +	cpu_set_t *cpus = CPU_ALLOC(ncpus);
> +
> +	if (sched_getaffinity(0, cpusz, cpus)) {
> +		tst_res(TWARN | TERRNO, "sched_getaffinity(0, %zu, %zx)",
> +			cpusz, (size_t)cpus);
> +	} else {
> +		ncpus = CPU_COUNT(cpus);
> +	}
> +	free(cpus);
> +#endif

Can we instead put this into the lib/tst_cpu.c and call it
tst_allowed_cpus() or something like this?

>  	CHK(avg_alpha, 0, 1, 0.25);
>  	CHK(min_samples, 20, INT_MAX, 1024);
>  	CHK(max_dev_ratio, 0, 1, 0.1);
>  	CHK(exec_time_p, 0, 1, 0.5);
>  	CHK(exec_loops, 20, INT_MAX, 3000000);
> -	CHK(yield_in_wait, 0, 1, (tst_ncpus() <= 1));
> +	CHK(yield_in_wait, 0, 1, (ncpus <= 1));
>  }
>  #undef CHK
>  
> -- 
> 2.30.1
> 
> 
> -- 
> Mailing list info: https://lists.linux.it/listinfo/ltp

-- 
Cyril Hrubis
chrubis@suse.cz

^ permalink raw reply	[flat|nested] 18+ messages in thread

* [LTP] [PATCH 6/6] fzsync: Check processor affinity
  2021-03-08 14:53   ` Cyril Hrubis
@ 2021-03-08 15:30     ` Richard Palethorpe
  0 siblings, 0 replies; 18+ messages in thread
From: Richard Palethorpe @ 2021-03-08 15:30 UTC (permalink / raw)
  To: ltp

Hello,

Cyril Hrubis <chrubis@suse.cz> writes:

> Hi!
>> It is useful for testing Fuzzy Sync itself to set the CPU affinity to
>> a single core. The current processes affinity does not effect
>                                                            ^
> 							 affect?

Yup.

>> tst_ncpus(), but we can get the affinity separately.
>> 
>> Note that checking this still does not guarantee we will use yield
>> when restricted to only one core. We would have to periodically probe
>> which CPUs threads are running on until we detect more than one CPU.
>> 
>> Signed-off-by: Richard Palethorpe <rpalethorpe@suse.com>
>> ---
>>  include/tst_fuzzy_sync.h | 20 ++++++++++++++++++--
>>  1 file changed, 18 insertions(+), 2 deletions(-)
>> 
>> diff --git a/include/tst_fuzzy_sync.h b/include/tst_fuzzy_sync.h
>> index 36a604e13..ea356ab44 100644
>> --- a/include/tst_fuzzy_sync.h
>> +++ b/include/tst_fuzzy_sync.h
>> @@ -59,9 +59,11 @@
>>   * @sa tst_fzsync_pair
>>   */
>>  
>> +#define _GNU_SOURCE
>> +
>>  #include <math.h>
>>  #include <pthread.h>
>> -#include <sched.h>
>> +#include "lapi/cpuset.h"
>>  #include <stdbool.h>
>>  #include <stdlib.h>
>>  #include <sys/time.h>
>> @@ -213,12 +215,26 @@ struct tst_fzsync_pair {
>>   */
>>  static void tst_fzsync_pair_init(struct tst_fzsync_pair *pair)
>>  {
>> +	long ncpus = tst_ncpus();
>> +#ifdef CPU_COUNT
>> +	size_t cpusz = CPU_ALLOC_SIZE(ncpus);
>> +	cpu_set_t *cpus = CPU_ALLOC(ncpus);
>> +
>> +	if (sched_getaffinity(0, cpusz, cpus)) {
>> +		tst_res(TWARN | TERRNO, "sched_getaffinity(0, %zu, %zx)",
>> +			cpusz, (size_t)cpus);
>> +	} else {
>> +		ncpus = CPU_COUNT(cpus);
>> +	}
>> +	free(cpus);
>> +#endif
>
> Can we instead put this into the lib/tst_cpu.c and call it
> tst_allowed_cpus() or something like this?

Yeah sure, I guess this will allow me to handle the GNU feature test
macro better as well.

>
>>  	CHK(avg_alpha, 0, 1, 0.25);
>>  	CHK(min_samples, 20, INT_MAX, 1024);
>>  	CHK(max_dev_ratio, 0, 1, 0.1);
>>  	CHK(exec_time_p, 0, 1, 0.5);
>>  	CHK(exec_loops, 20, INT_MAX, 3000000);
>> -	CHK(yield_in_wait, 0, 1, (tst_ncpus() <= 1));
>> +	CHK(yield_in_wait, 0, 1, (ncpus <= 1));
>>  }
>>  #undef CHK
>>  
>> -- 
>> 2.30.1
>> 
>> 
>> -- 
>> Mailing list info: https://lists.linux.it/listinfo/ltp


-- 
Thank you,
Richard.

^ permalink raw reply	[flat|nested] 18+ messages in thread

* [LTP] [PATCH 1/6] fzsync: Add self test
  2021-03-05 15:51 ` [LTP] [PATCH 1/6] fzsync: Add self test Richard Palethorpe
@ 2021-03-08 15:30   ` Cyril Hrubis
  2021-03-08 16:18     ` Richard Palethorpe
  0 siblings, 1 reply; 18+ messages in thread
From: Cyril Hrubis @ 2021-03-08 15:30 UTC (permalink / raw)
  To: ltp

Hi!
> Add a validation test for the Fuzz Sync library. It has a range of
> data races which Fuzzy Sync must reproduce. This is much easier to
> setup and run than reproducing real kernel bugs.
> 
> Signed-off-by: Richard Palethorpe <rpalethorpe@suse.com>
> Suggested-by: Cyril Hrubis <chrubis@suse.cz>
> ---
>  lib/newlib_tests/.gitignore         |   1 +
>  lib/newlib_tests/test16.c           |   2 +
>  lib/newlib_tests/tst_fuzzy_sync01.c | 233 ++++++++++++++++++++++++++++
>  3 files changed, 236 insertions(+)
>  create mode 100644 lib/newlib_tests/tst_fuzzy_sync01.c
> 
> diff --git a/lib/newlib_tests/.gitignore b/lib/newlib_tests/.gitignore
> index 6c2612259..bb235f433 100644
> --- a/lib/newlib_tests/.gitignore
> +++ b/lib/newlib_tests/.gitignore
> @@ -40,3 +40,4 @@ tst_bool_expr
>  test_macros01
>  test_macros02
>  test_macros03
> +tst_fuzzy_sync01
> \ No newline at end of file
 ^
 Missing newline...

> diff --git a/lib/newlib_tests/test16.c b/lib/newlib_tests/test16.c
> index 0d74e1eae..7a9b5f20e 100644
> --- a/lib/newlib_tests/test16.c
> +++ b/lib/newlib_tests/test16.c
> @@ -9,6 +9,8 @@
>   * which they should take it in turns to update.
>   */
>  
  ^
  Trailing whitespaces.

Can you please setup your $EDITOR to fix these two?

> +#define _GNU_SOURCE

Also I guess that we need that for sched_getaffinity(), in this case it
should have been part of the patch that adds it.

Also I would prefer to move to the sched_getaffinity() code to move to
the test library.

>  #include <stdlib.h>
>  #include "tst_test.h"
>  #include "tst_safe_pthread.h"
> diff --git a/lib/newlib_tests/tst_fuzzy_sync01.c b/lib/newlib_tests/tst_fuzzy_sync01.c
> new file mode 100644
> index 000000000..22b78977a
> --- /dev/null
> +++ b/lib/newlib_tests/tst_fuzzy_sync01.c
> @@ -0,0 +1,233 @@
> +// SPDX-License-Identifier: GPL-2.0-or-later
> +/*
> + * Copyright (c) 2021 Richard Palethorpe <rpalethorpe@suse.com>
> + */
> +#define _GNU_SOURCE
> +
> +#include "tst_test.h"
> +#include "tst_fuzzy_sync.h"
> +
> +enum race_window_state {
> +	/* We are in a race window which will cause behaviour that has
> +	 * very different timings to the target. We must avoid these
> +	 * race windows. */
> +	TOO_EARLY,
> +	TOO_LATE,
> +	/* We are not in the target race window, but the timings of
> +	 * this behaviour are similar to those of the target. */
> +	EARLY_OR_LATE,
> +	/* We in the target race window */
> +	CRITICAL,
> +};
> +
> +enum wait_with {
> +	SPIN,
> +	SLEEP,
> +};
> +
> +struct window {
> +	const enum wait_with type;
> +	const int time;
> +};
> +
> +struct race_shape {
> +	const struct window too_early;
> +	const struct window early;
> +	const struct window critical;
> +	const struct window late;
> +	const struct window too_late;
> +};
> +
> +struct scenario {
> +	const char *const name;
> +	const struct race_shape a;
> +	const struct race_shape b;
> +};
> +
> +static struct tst_fzsync_pair pair;
> +
> +static volatile enum race_window_state state;
> +
> +static const struct scenario races[] = {
> +	{ "No delay",
> +	  {{SPIN, 0 }, {SPIN, 0 }, {SPIN, 100 }, {SPIN, 0 }, {SPIN, 0 }},
> +	  {{SPIN, 0 }, {SPIN, 0 }, {SPIN, 100 }, {SPIN, 0 }, {SPIN, 0 }},
> +	},
> +	{ "Exit aligned",
> +	  {{SPIN, 0 }, {SLEEP, 1 }, {SPIN, 10 }, {SPIN, 10 }, {SPIN, 0 }},
> +	  {{SPIN, 0 }, {SLEEP, 1 }, {SPIN, 1000 }, {SPIN, 10 }, {SPIN, 0 }},
> +	},
> +	{ "Entry aligned",
> +	  {{SPIN, 0 }, {SPIN, 1 }, {SPIN, 10 }, {SLEEP, 10 }, {SPIN, 0 }},
> +	  {{SPIN, 0 }, {SPIN, 1 }, {SPIN, 1000 }, {SLEEP, 1 }, {SPIN, 0 }},
> +	},
> +	{ "delay A entry",
> +	  {{SPIN, 0 }, {SLEEP, 1 }, {SPIN, 10 }, {SPIN, 10 }, {SPIN, 0 }},
> +	  {{SPIN, 0 }, {SPIN, 1 }, {SPIN, 1000 }, {SPIN, 1 }, {SPIN, 0 }},
> +	},
> +	{ "delay B entry",
> +	  {{SPIN, 0 }, {SPIN, 1 }, {SPIN, 10 }, {SPIN, 10 }, {SPIN, 0 }},
> +	  {{SPIN, 0 }, {SLEEP, 1 }, {SPIN, 1000 }, {SPIN, 1 }, {SPIN, 0 }},
> +	},
> +	{ "delay A exit",
> +	  {{SPIN, 0 }, {SPIN, 1 }, {SPIN, 10 }, {SLEEP, 10 }, {SPIN, 0 }},
> +	  {{SPIN, 0 }, {SPIN, 1 }, {SPIN, 1000 }, {SPIN, 1 }, {SPIN, 0 }},
> +	},
> +	{ "delay B exit",
> +	  {{SPIN, 0 }, {SPIN, 1 }, {SPIN, 10 }, {SPIN, 10 }, {SPIN, 0 }},
> +	  {{SPIN, 0 }, {SPIN, 1 }, {SPIN, 1000 }, {SLEEP, 1 }, {SPIN, 0 }},
> +	},
> +	{ "too early",
> +	  {{SPIN, 10 }, {SLEEP, 1 }, {SPIN, 100 }, {SPIN, 100 }, {SPIN, 0 }},
> +	  {{SLEEP, 1 }, {SPIN, 1000 }, {SPIN, 1000 }, {SPIN, 10 }, {SPIN, 0 }},
> +	},
> +	{ "too late",
> +	  {{SPIN, 10000 }, {SLEEP, 1 }, {SPIN, 100 }, {SPIN, 100 }, {SPIN, 10 }},
> +	  {{SPIN, 0 }, {SPIN, 1000 }, {SPIN, 1000 }, {SPIN, 1000 }, {SPIN, 10 }},
> +	},
> +};
> +
> +static void cleanup(void)
> +{
> +	tst_fzsync_pair_cleanup(&pair);
> +}
> +
> +static void setup(void)
> +{
> +	pair.min_samples = 10000;
> +
> +	tst_fzsync_pair_init(&pair);
> +}
> +
> +static void delay(struct window w)
> +{
> +	struct timespec ts = { 0, w.time };
> +	volatile int time = w.time;
> +
> +	if (pair.yield_in_wait)
> +		time /= 100;
> +
> +	switch(w.type) {
> +	case SPIN:
> +		while (time--) {
> +			if (pair.yield_in_wait)
> +				sched_yield();
> +		}

Why do we yield here? Isn't the library supposed to do so now?

> +		break;
> +	case SLEEP:
> +		nanosleep(&ts, NULL);
> +		break;
> +	}
> +}
> +
> +static void *worker(void *v)
> +{
> +	unsigned int i = *(unsigned int *)v;
> +	const struct race_shape *r = &races[i].b;
> +
> +	while (tst_fzsync_run_b(&pair)) {
> +		state = EARLY_OR_LATE;
> +
> +		tst_fzsync_start_race_b(&pair);
> +		if (r->too_early.time) {
> +			state = TOO_EARLY;
> +			delay(r->too_early);
> +		}
> +
> +		state = EARLY_OR_LATE;
> +		delay(r->early);
> +
> +		state = CRITICAL;
> +		delay(r->critical);
> +
> +		state = EARLY_OR_LATE;
> +		delay(r->late);
> +
> +		if (r->too_late.time) {
> +			state = TOO_LATE;
> +			delay(r->too_late);
> +		}
> +		tst_fzsync_end_race_b(&pair);
> +	}

I guess that I get this part, the thread B is marching always the same,
changing the state and doing short sleeps.

> +	return NULL;
> +}
> +
> +static void run(unsigned int i)
> +{
> +	const struct race_shape *r = &races[i].a;
> +	struct tst_fzsync_run_thread wrap_run_b = {
> +		.func = worker,
> +		.arg = &i,
> +	};
> +	int too_early = 0, early_or_late = 0, critical = 0, too_late = 0;
> +	enum race_window_state state_copy;
> +
> +
> +	tst_fzsync_pair_reset(&pair, NULL);
> +	SAFE_PTHREAD_CREATE(&pair.thread_b, 0, tst_fzsync_thread_wrapper,
> +			    &wrap_run_b);
> +
> +	while (tst_fzsync_run_a(&pair)) {
> +		tst_fzsync_start_race_a(&pair);
> +
> +		delay(r->too_early);
> +		state_copy = state;
> +
> +		switch(state_copy) {
> +		case TOO_EARLY:
> +		case TOO_LATE:
> +			break;
> +		default:
> +			delay(r->early);
> +			state_copy = state;
> +		}
> +
> +		switch(state_copy) {
> +		case TOO_EARLY:
> +			too_early++;
> +			break;
> +		case EARLY_OR_LATE:
> +			early_or_late++;
> +			delay(r->late);
> +			break;
> +		case CRITICAL:
> +			critical++;
> +			delay(r->critical);
> +			break;
> +		case TOO_LATE:
> +			too_late++;
> +			delay(r->too_late);
> +			break;
> +		default:
> +			tst_brk(TBROK, "enum is not atomic?");
> +		}
> +		tst_fzsync_end_race_a(&pair);

But here we do depend on the state as it has been set by thread B, I
guess that we depend on the fact that we are synchronized at
start_race_*() functions and we exit as soon as we know the result,
right?

Still I do wonder if this changes things and if it wouldn't be better to
march with the delays unconditionally and checking the state only at the
critical section?

> +		switch(state_copy) {
> +		case TOO_EARLY:
> +			tst_fzsync_pair_add_bias(&pair, -10);
> +			break;
> +		case TOO_LATE:
> +			tst_fzsync_pair_add_bias(&pair, 10);
> +			break;
> +		default:
> +			;
> +		}

And here we add bias unconditionaly, wouldn't that synchronize even if
fuzzy sync was partially broken? Shouldn't we add bias only for cases
that require it?

> +		if (critical > 100) {
> +			tst_fzsync_pair_cleanup(&pair);
> +			break;
> +		}
> +	}
> +
> +	tst_res(critical ? TPASS : TFAIL, "%20s) =%-4d ~%-4d -%-4d +%-4d",
> +		races[i].name, critical, early_or_late, too_early, too_late);
> +}
> +
> +static struct tst_test test = {
> +	.tcnt = ARRAY_SIZE(races),
> +	.test = run,
> +	.setup = setup,
> +	.cleanup = cleanup,
> +};
> -- 
> 2.30.1
> 

-- 
Cyril Hrubis
chrubis@suse.cz

^ permalink raw reply	[flat|nested] 18+ messages in thread

* [LTP] [PATCH 1/6] fzsync: Add self test
  2021-03-08 15:30   ` Cyril Hrubis
@ 2021-03-08 16:18     ` Richard Palethorpe
  0 siblings, 0 replies; 18+ messages in thread
From: Richard Palethorpe @ 2021-03-08 16:18 UTC (permalink / raw)
  To: ltp


Cyril Hrubis <chrubis@suse.cz> writes:

> Hi!
>> Add a validation test for the Fuzz Sync library. It has a range of
>> data races which Fuzzy Sync must reproduce. This is much easier to
>> setup and run than reproducing real kernel bugs.
>> 
>> Signed-off-by: Richard Palethorpe <rpalethorpe@suse.com>
>> Suggested-by: Cyril Hrubis <chrubis@suse.cz>
>> ---
>>  lib/newlib_tests/.gitignore         |   1 +
>>  lib/newlib_tests/test16.c           |   2 +
>>  lib/newlib_tests/tst_fuzzy_sync01.c | 233 ++++++++++++++++++++++++++++
>>  3 files changed, 236 insertions(+)
>>  create mode 100644 lib/newlib_tests/tst_fuzzy_sync01.c
>> 
>> diff --git a/lib/newlib_tests/.gitignore b/lib/newlib_tests/.gitignore
>> index 6c2612259..bb235f433 100644
>> --- a/lib/newlib_tests/.gitignore
>> +++ b/lib/newlib_tests/.gitignore
>> @@ -40,3 +40,4 @@ tst_bool_expr
>>  test_macros01
>>  test_macros02
>>  test_macros03
>> +tst_fuzzy_sync01
>> \ No newline at end of file
>  ^
>  Missing newline...

Ah sorry, I thought I had fixed the new line at EOF thing.

>
>> diff --git a/lib/newlib_tests/test16.c b/lib/newlib_tests/test16.c
>> index 0d74e1eae..7a9b5f20e 100644
>> --- a/lib/newlib_tests/test16.c
>> +++ b/lib/newlib_tests/test16.c
>> @@ -9,6 +9,8 @@
>>   * which they should take it in turns to update.
>>   */
>>  
>   ^
>   Trailing whitespaces.

I think this is just a context line which is not part of the diff. There
is no new line in the source file and it seems git-format-patch produces
it.

>
> Can you please setup your $EDITOR to fix these two?
>
>> +#define _GNU_SOURCE
>
> Also I guess that we need that for sched_getaffinity(), in this case it
> should have been part of the patch that adds it.
>
> Also I would prefer to move to the sched_getaffinity() code to move to
> the test library.
>
>>  #include <stdlib.h>
>>  #include "tst_test.h"
>>  #include "tst_safe_pthread.h"
>> diff --git a/lib/newlib_tests/tst_fuzzy_sync01.c b/lib/newlib_tests/tst_fuzzy_sync01.c
>> new file mode 100644
>> index 000000000..22b78977a
>> --- /dev/null
>> +++ b/lib/newlib_tests/tst_fuzzy_sync01.c
>> @@ -0,0 +1,233 @@
>> +// SPDX-License-Identifier: GPL-2.0-or-later
>> +/*
>> + * Copyright (c) 2021 Richard Palethorpe <rpalethorpe@suse.com>
>> + */
>> +#define _GNU_SOURCE
>> +
>> +#include "tst_test.h"
>> +#include "tst_fuzzy_sync.h"
>> +
>> +enum race_window_state {
>> +	/* We are in a race window which will cause behaviour that has
>> +	 * very different timings to the target. We must avoid these
>> +	 * race windows. */
>> +	TOO_EARLY,
>> +	TOO_LATE,
>> +	/* We are not in the target race window, but the timings of
>> +	 * this behaviour are similar to those of the target. */
>> +	EARLY_OR_LATE,
>> +	/* We in the target race window */
>> +	CRITICAL,
>> +};
>> +
>> +enum wait_with {
>> +	SPIN,
>> +	SLEEP,
>> +};
>> +
>> +struct window {
>> +	const enum wait_with type;
>> +	const int time;
>> +};
>> +
>> +struct race_shape {
>> +	const struct window too_early;
>> +	const struct window early;
>> +	const struct window critical;
>> +	const struct window late;
>> +	const struct window too_late;
>> +};
>> +
>> +struct scenario {
>> +	const char *const name;
>> +	const struct race_shape a;
>> +	const struct race_shape b;
>> +};
>> +
>> +static struct tst_fzsync_pair pair;
>> +
>> +static volatile enum race_window_state state;
>> +
>> +static const struct scenario races[] = {
>> +	{ "No delay",
>> +	  {{SPIN, 0 }, {SPIN, 0 }, {SPIN, 100 }, {SPIN, 0 }, {SPIN, 0 }},
>> +	  {{SPIN, 0 }, {SPIN, 0 }, {SPIN, 100 }, {SPIN, 0 }, {SPIN, 0 }},
>> +	},
>> +	{ "Exit aligned",
>> +	  {{SPIN, 0 }, {SLEEP, 1 }, {SPIN, 10 }, {SPIN, 10 }, {SPIN, 0 }},
>> +	  {{SPIN, 0 }, {SLEEP, 1 }, {SPIN, 1000 }, {SPIN, 10 }, {SPIN, 0 }},
>> +	},
>> +	{ "Entry aligned",
>> +	  {{SPIN, 0 }, {SPIN, 1 }, {SPIN, 10 }, {SLEEP, 10 }, {SPIN, 0 }},
>> +	  {{SPIN, 0 }, {SPIN, 1 }, {SPIN, 1000 }, {SLEEP, 1 }, {SPIN, 0 }},
>> +	},
>> +	{ "delay A entry",
>> +	  {{SPIN, 0 }, {SLEEP, 1 }, {SPIN, 10 }, {SPIN, 10 }, {SPIN, 0 }},
>> +	  {{SPIN, 0 }, {SPIN, 1 }, {SPIN, 1000 }, {SPIN, 1 }, {SPIN, 0 }},
>> +	},
>> +	{ "delay B entry",
>> +	  {{SPIN, 0 }, {SPIN, 1 }, {SPIN, 10 }, {SPIN, 10 }, {SPIN, 0 }},
>> +	  {{SPIN, 0 }, {SLEEP, 1 }, {SPIN, 1000 }, {SPIN, 1 }, {SPIN, 0 }},
>> +	},
>> +	{ "delay A exit",
>> +	  {{SPIN, 0 }, {SPIN, 1 }, {SPIN, 10 }, {SLEEP, 10 }, {SPIN, 0 }},
>> +	  {{SPIN, 0 }, {SPIN, 1 }, {SPIN, 1000 }, {SPIN, 1 }, {SPIN, 0 }},
>> +	},
>> +	{ "delay B exit",
>> +	  {{SPIN, 0 }, {SPIN, 1 }, {SPIN, 10 }, {SPIN, 10 }, {SPIN, 0 }},
>> +	  {{SPIN, 0 }, {SPIN, 1 }, {SPIN, 1000 }, {SLEEP, 1 }, {SPIN, 0 }},
>> +	},
>> +	{ "too early",
>> +	  {{SPIN, 10 }, {SLEEP, 1 }, {SPIN, 100 }, {SPIN, 100 }, {SPIN, 0 }},
>> +	  {{SLEEP, 1 }, {SPIN, 1000 }, {SPIN, 1000 }, {SPIN, 10 }, {SPIN, 0 }},
>> +	},
>> +	{ "too late",
>> +	  {{SPIN, 10000 }, {SLEEP, 1 }, {SPIN, 100 }, {SPIN, 100 }, {SPIN, 10 }},
>> +	  {{SPIN, 0 }, {SPIN, 1000 }, {SPIN, 1000 }, {SPIN, 1000 }, {SPIN, 10 }},
>> +	},
>> +};
>> +
>> +static void cleanup(void)
>> +{
>> +	tst_fzsync_pair_cleanup(&pair);
>> +}
>> +
>> +static void setup(void)
>> +{
>> +	pair.min_samples = 10000;
>> +
>> +	tst_fzsync_pair_init(&pair);
>> +}
>> +
>> +static void delay(struct window w)
>> +{
>> +	struct timespec ts = { 0, w.time };
>> +	volatile int time = w.time;
>> +
>> +	if (pair.yield_in_wait)
>> +		time /= 100;
>> +
>> +	switch(w.type) {
>> +	case SPIN:
>> +		while (time--) {
>> +			if (pair.yield_in_wait)
>> +				sched_yield();
>> +		}
>
> Why do we yield here? Isn't the library supposed to do so now?

Because on a single CPU spinning without yield is just like pausing
time. Time only moves when the "observer" has chance to update as
well. In this case the "observer" being the other thread.

>
>> +		break;
>> +	case SLEEP:
>> +		nanosleep(&ts, NULL);
>> +		break;
>> +	}
>> +}
>> +
>> +static void *worker(void *v)
>> +{
>> +	unsigned int i = *(unsigned int *)v;
>> +	const struct race_shape *r = &races[i].b;
>> +
>> +	while (tst_fzsync_run_b(&pair)) {
>> +		state = EARLY_OR_LATE;
>> +
>> +		tst_fzsync_start_race_b(&pair);
>> +		if (r->too_early.time) {
>> +			state = TOO_EARLY;
>> +			delay(r->too_early);
>> +		}
>> +
>> +		state = EARLY_OR_LATE;
>> +		delay(r->early);
>> +
>> +		state = CRITICAL;
>> +		delay(r->critical);
>> +
>> +		state = EARLY_OR_LATE;
>> +		delay(r->late);
>> +
>> +		if (r->too_late.time) {
>> +			state = TOO_LATE;
>> +			delay(r->too_late);
>> +		}
>> +		tst_fzsync_end_race_b(&pair);
>> +	}
>
> I guess that I get this part, the thread B is marching always the same,
> changing the state and doing short sleeps.
>
>> +	return NULL;
>> +}
>> +
>> +static void run(unsigned int i)
>> +{
>> +	const struct race_shape *r = &races[i].a;
>> +	struct tst_fzsync_run_thread wrap_run_b = {
>> +		.func = worker,
>> +		.arg = &i,
>> +	};
>> +	int too_early = 0, early_or_late = 0, critical = 0, too_late = 0;
>> +	enum race_window_state state_copy;
>> +
>> +
>> +	tst_fzsync_pair_reset(&pair, NULL);
>> +	SAFE_PTHREAD_CREATE(&pair.thread_b, 0, tst_fzsync_thread_wrapper,
>> +			    &wrap_run_b);
>> +
>> +	while (tst_fzsync_run_a(&pair)) {
>> +		tst_fzsync_start_race_a(&pair);
>> +
>> +		delay(r->too_early);
>> +		state_copy = state;
>> +
>> +		switch(state_copy) {
>> +		case TOO_EARLY:
>> +		case TOO_LATE:
>> +			break;
>> +		default:
>> +			delay(r->early);
>> +			state_copy = state;
>> +		}
>> +
>> +		switch(state_copy) {
>> +		case TOO_EARLY:
>> +			too_early++;
>> +			break;
>> +		case EARLY_OR_LATE:
>> +			early_or_late++;
>> +			delay(r->late);
>> +			break;
>> +		case CRITICAL:
>> +			critical++;
>> +			delay(r->critical);
>> +			break;
>> +		case TOO_LATE:
>> +			too_late++;
>> +			delay(r->too_late);
>> +			break;
>> +		default:
>> +			tst_brk(TBROK, "enum is not atomic?");
>> +		}
>> +		tst_fzsync_end_race_a(&pair);
>
> But here we do depend on the state as it has been set by thread B, I
> guess that we depend on the fact that we are synchronized at
> start_race_*() functions and we exit as soon as we know the result,
> right?

The state is read twice, once to check for the special TOO_EARLY or
TOO_LATE states. Then we check if we hit the critical section or not.

The TOO_EARLY and TOO_LATE states change our behavior, so that we delay
for a shorter period of time. This ruins the timing measurements for A
and is what add_bias is supposed to fix.

I probably should split this into two separate tests. One for races that
don't require a bias and another for one which do. Also I will try to
make it clear what the test is doing. It probably needs thinking through
more.

>
> Still I do wonder if this changes things and if it wouldn't be better to
> march with the delays unconditionally and checking the state only at the
> critical section?
>
>> +		switch(state_copy) {
>> +		case TOO_EARLY:
>> +			tst_fzsync_pair_add_bias(&pair, -10);
>> +			break;
>> +		case TOO_LATE:
>> +			tst_fzsync_pair_add_bias(&pair, 10);
>> +			break;
>> +		default:
>> +			;
>> +		}
>
> And here we add bias unconditionaly, wouldn't that synchronize even if
> fuzzy sync was partially broken? Shouldn't we add bias only for cases
> that require it?

Only some tests produce the TOO_EARLY and TOO_LATE states.

>
>> +		if (critical > 100) {
>> +			tst_fzsync_pair_cleanup(&pair);
>> +			break;
>> +		}
>> +	}
>> +
>> +	tst_res(critical ? TPASS : TFAIL, "%20s) =%-4d ~%-4d -%-4d +%-4d",
>> +		races[i].name, critical, early_or_late, too_early, too_late);
>> +}
>> +
>> +static struct tst_test test = {
>> +	.tcnt = ARRAY_SIZE(races),
>> +	.test = run,
>> +	.setup = setup,
>> +	.cleanup = cleanup,
>> +};
>> -- 
>> 2.30.1
>> 


-- 
Thank you,
Richard.

^ permalink raw reply	[flat|nested] 18+ messages in thread

* [LTP] [PATCH 0/6] Fuzzy Sync yielding and validation test
  2021-03-05 15:51 [LTP] [PATCH 0/6] Fuzzy Sync yielding and validation test Richard Palethorpe
                   ` (5 preceding siblings ...)
  2021-03-05 15:51 ` [LTP] [PATCH 6/6] fzsync: Check processor affinity Richard Palethorpe
@ 2021-03-09 13:45 ` Petr Vorel
  6 siblings, 0 replies; 18+ messages in thread
From: Petr Vorel @ 2021-03-09 13:45 UTC (permalink / raw)
  To: ltp

Hi Richie,

1st patch has error:
tst_fuzzy_sync01.c: In function ?delay?:
tst_fuzzy_sync01.c:107:10: error: ?struct tst_fzsync_pair? has no member named ?yield_in_wait?
  107 |  if (pair.yield_in_wait)
      |          ^
tst_fuzzy_sync01.c:113:12: error: ?struct tst_fzsync_pair? has no member named ?yield_in_wait?
  113 |    if (pair.yield_in_wait)
      |            ^
make[1]: *** [../../include/mk/rules.mk:37: tst_fuzzy_sync01] Error 1

4th patch (Leo Yu-Chi Liang's) patch has warnings:
In file included from tst_fuzzy_sync01.c:8:
../../include/tst_fuzzy_sync.h: In function ?tst_fzsync_pair_init?:
../../include/tst_fuzzy_sync.h:199:18: warning: comparison of constant ?0? with boolean expression is always false [-Wbool-compare]
  199 |  if (pair->param < low)            \
      |                  ^
../../include/tst_fuzzy_sync.h:221:2: note: in expansion of macro ?CHK?
  221 |  CHK(yield_in_wait, 0, 1, (tst_ncpus() <= 1));
      |  ^~~
../../include/tst_fuzzy_sync.h:201:18: warning: comparison of constant ?1? with boolean expression is always false [-Wbool-compare]
  201 |  if (pair->param > hi)            \
      |                  ^
../../include/tst_fuzzy_sync.h:221:2: note: in expansion of macro ?CHK?
  221 |  CHK(yield_in_wait, 0, 1, (tst_ncpus() <= 1));


Apart from that whole patchset LGTM, agree with Cyril's suggestions.

Kind regards,
Petr

^ permalink raw reply	[flat|nested] 18+ messages in thread

end of thread, other threads:[~2021-03-09 13:45 UTC | newest]

Thread overview: 18+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2021-03-05 15:51 [LTP] [PATCH 0/6] Fuzzy Sync yielding and validation test Richard Palethorpe
2021-03-05 15:51 ` [LTP] [PATCH 1/6] fzsync: Add self test Richard Palethorpe
2021-03-08 15:30   ` Cyril Hrubis
2021-03-08 16:18     ` Richard Palethorpe
2021-03-05 15:51 ` [LTP] [PATCH 2/6] fzsync: Reset delay bias Richard Palethorpe
2021-03-08 14:16   ` Cyril Hrubis
2021-03-08 14:50     ` Richard Palethorpe
2021-03-05 15:51 ` [LTP] [PATCH 3/6] fzsync: Correctly print positive lower delay range bound Richard Palethorpe
2021-03-08 14:18   ` Cyril Hrubis
2021-03-05 15:51 ` [LTP] [PATCH 4/6] fzsync: Add sched_yield for single core machine Richard Palethorpe
2021-03-05 15:51 ` [LTP] [PATCH 5/6] fzsync: Move yield check out of loop and add yield to delay Richard Palethorpe
2021-03-08 11:32   ` Leo Liang
2021-03-08 14:48   ` Cyril Hrubis
2021-03-05 15:51 ` [LTP] [PATCH 6/6] fzsync: Check processor affinity Richard Palethorpe
2021-03-08 11:33   ` Leo Liang
2021-03-08 14:53   ` Cyril Hrubis
2021-03-08 15:30     ` Richard Palethorpe
2021-03-09 13:45 ` [LTP] [PATCH 0/6] Fuzzy Sync yielding and validation test Petr Vorel

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.