All of lore.kernel.org
 help / color / mirror / Atom feed
* [LTP] [PATCH v1] fzsync: break inf loop with flag vs pthread_cancel
@ 2022-04-05 17:06 Edward Liaw via ltp
  2022-04-06  5:39 ` Petr Vorel
  2022-04-07  7:06 ` Li Wang
  0 siblings, 2 replies; 8+ messages in thread
From: Edward Liaw via ltp @ 2022-04-05 17:06 UTC (permalink / raw)
  To: ltp; +Cc: kernel-team

Hi, I'm working to get fzsync working with the Android kernel, which
does not have pthread_cancel available.

In the absence of pthread_cancel, when thread A exits due to a break,
thread B will get stuck in an infinite loop while waiting for thread A
to progress.

Instead of cancelling thread B, we can use the exit flag to break out of
thread B's loop.  This should also remove the need for the wrapper
around the thread.

Signed-off-by: Edward Liaw <edliaw@google.com>
---
 include/tst_fuzzy_sync.h            | 68 +++++++++++------------------
 lib/newlib_tests/tst_fuzzy_sync01.c |  7 +--
 lib/newlib_tests/tst_fuzzy_sync02.c |  7 +--
 3 files changed, 27 insertions(+), 55 deletions(-)

diff --git a/include/tst_fuzzy_sync.h b/include/tst_fuzzy_sync.h
index bc3450294..2c120f077 100644
--- a/include/tst_fuzzy_sync.h
+++ b/include/tst_fuzzy_sync.h
@@ -60,7 +60,6 @@
  */
 
 #include <math.h>
-#include <pthread.h>
 #include <stdbool.h>
 #include <stdlib.h>
 #include <sys/time.h>
@@ -233,36 +232,12 @@ static inline void tst_fzsync_pair_init(struct tst_fzsync_pair *pair)
 static inline void tst_fzsync_pair_cleanup(struct tst_fzsync_pair *pair)
 {
 	if (pair->thread_b) {
-		/* Revoke thread B if parent hits accidental break */
-		if (!pair->exit) {
-			tst_atomic_store(1, &pair->exit);
-			usleep(100000);
-			pthread_cancel(pair->thread_b);
-		}
+		tst_atomic_store(1, &pair->exit);
 		SAFE_PTHREAD_JOIN(pair->thread_b, NULL);
 		pair->thread_b = 0;
 	}
 }
 
-/** To store the run_b pointer and pass to tst_fzsync_thread_wrapper */
-struct tst_fzsync_run_thread {
-	void *(*func)(void *);
-	void *arg;
-};
-
-/**
- * Wrap run_b for tst_fzsync_pair_reset to enable pthread cancel
- * at the start of the thread B.
- */
-static inline void *tst_fzsync_thread_wrapper(void *run_thread)
-{
-       struct tst_fzsync_run_thread t = *(struct tst_fzsync_run_thread *)run_thread;
-
-       pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, NULL);
-       pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, NULL);
-       return t.func(t.arg);
-}
-
 /**
  * Zero some stat fields
  *
@@ -311,13 +286,8 @@ static inline void tst_fzsync_pair_reset(struct tst_fzsync_pair *pair,
 	pair->a_cntr = 0;
 	pair->b_cntr = 0;
 	pair->exit = 0;
-	if (run_b) {
-		static struct tst_fzsync_run_thread wrap_run_b;
-
-		wrap_run_b.func = run_b;
-		wrap_run_b.arg = NULL;
-		SAFE_PTHREAD_CREATE(&pair->thread_b, 0, tst_fzsync_thread_wrapper, &wrap_run_b);
-	}
+	if (run_b)
+		SAFE_PTHREAD_CREATE(&pair->thread_b, 0, run_b, 0);
 
 	pair->exec_time_start = (float)tst_timeout_remaining();
 }
@@ -554,6 +524,7 @@ static inline void tst_fzsync_pair_update(struct tst_fzsync_pair *pair)
  * @param our_cntr The counter for the thread we are on
  * @param other_cntr The counter for the thread we are synchronising with
  * @param spins A pointer to the spin counter or NULL
+ * @param exit Exit flag when we need to break out of the wait loop
  *
  * Used by tst_fzsync_pair_wait_a(), tst_fzsync_pair_wait_b(),
  * tst_fzsync_start_race_a(), etc. If the calling thread is ahead of the other
@@ -566,6 +537,7 @@ static inline void tst_fzsync_pair_update(struct tst_fzsync_pair *pair)
 static inline void tst_fzsync_pair_wait(int *our_cntr,
 					int *other_cntr,
 					int *spins,
+					int *exit,
 					bool yield_in_wait)
 {
 	if (tst_atomic_inc(other_cntr) == INT_MAX) {
@@ -578,7 +550,8 @@ static inline void tst_fzsync_pair_wait(int *our_cntr,
 		 */
 		if (yield_in_wait) {
 			while (tst_atomic_load(our_cntr) > 0
-			       && tst_atomic_load(our_cntr) < INT_MAX) {
+			       && tst_atomic_load(our_cntr) < INT_MAX
+			       && !tst_atomic_load(exit)) {
 				if (spins)
 					(*spins)++;
 
@@ -586,7 +559,8 @@ static inline void tst_fzsync_pair_wait(int *our_cntr,
 			}
 		} else {
 			while (tst_atomic_load(our_cntr) > 0
-			       && tst_atomic_load(our_cntr) < INT_MAX) {
+			       && tst_atomic_load(our_cntr) < INT_MAX
+			       && !tst_atomic_load(exit)) {
 				if (spins)
 					(*spins)++;
 			}
@@ -599,10 +573,12 @@ static inline void tst_fzsync_pair_wait(int *our_cntr,
 		 * is restored and we can continue.
 		 */
 		if (yield_in_wait) {
-			while (tst_atomic_load(our_cntr) > 1)
+			while (tst_atomic_load(our_cntr) > 1
+			       && !tst_atomic_load(exit))
 				sched_yield();
 		} else {
-			while (tst_atomic_load(our_cntr) > 1)
+			while (tst_atomic_load(our_cntr) > 1
+			       && !tst_atomic_load(exit))
 				;
 		}
 	} else {
@@ -612,14 +588,16 @@ static inline void tst_fzsync_pair_wait(int *our_cntr,
 		 */
 		if (yield_in_wait) {
 			while (tst_atomic_load(our_cntr) <
-			       tst_atomic_load(other_cntr)) {
+			       tst_atomic_load(other_cntr)
+			       && !tst_atomic_load(exit)) {
 				if (spins)
 					(*spins)++;
 				sched_yield();
 			}
 		} else {
 			while (tst_atomic_load(our_cntr) <
-			       tst_atomic_load(other_cntr)) {
+			       tst_atomic_load(other_cntr)
+			       && !tst_atomic_load(exit)) {
 				if (spins)
 					(*spins)++;
 			}
@@ -635,7 +613,8 @@ static inline void tst_fzsync_pair_wait(int *our_cntr,
  */
 static inline void tst_fzsync_wait_a(struct tst_fzsync_pair *pair)
 {
-	tst_fzsync_pair_wait(&pair->a_cntr, &pair->b_cntr, NULL, pair->yield_in_wait);
+	tst_fzsync_pair_wait(&pair->a_cntr, &pair->b_cntr,
+	                     NULL, &pair->exit, pair->yield_in_wait);
 }
 
 /**
@@ -646,7 +625,8 @@ static inline void tst_fzsync_wait_a(struct tst_fzsync_pair *pair)
  */
 static inline void tst_fzsync_wait_b(struct tst_fzsync_pair *pair)
 {
-	tst_fzsync_pair_wait(&pair->b_cntr, &pair->a_cntr, NULL, pair->yield_in_wait);
+	tst_fzsync_pair_wait(&pair->b_cntr, &pair->a_cntr,
+	                     NULL, &pair->exit, pair->yield_in_wait);
 }
 
 /**
@@ -758,7 +738,8 @@ static inline void tst_fzsync_start_race_a(struct tst_fzsync_pair *pair)
 static inline void tst_fzsync_end_race_a(struct tst_fzsync_pair *pair)
 {
 	tst_fzsync_time(&pair->a_end);
-	tst_fzsync_pair_wait(&pair->a_cntr, &pair->b_cntr, &pair->spins, pair->yield_in_wait);
+	tst_fzsync_pair_wait(&pair->a_cntr, &pair->b_cntr,
+	                     &pair->spins, &pair->exit, pair->yield_in_wait);
 }
 
 /**
@@ -796,7 +777,8 @@ static inline void tst_fzsync_start_race_b(struct tst_fzsync_pair *pair)
 static inline void tst_fzsync_end_race_b(struct tst_fzsync_pair *pair)
 {
 	tst_fzsync_time(&pair->b_end);
-	tst_fzsync_pair_wait(&pair->b_cntr, &pair->a_cntr, &pair->spins, pair->yield_in_wait);
+	tst_fzsync_pair_wait(&pair->b_cntr, &pair->a_cntr,
+	                     &pair->spins, &pair->exit, pair->yield_in_wait);
 }
 
 /**
diff --git a/lib/newlib_tests/tst_fuzzy_sync01.c b/lib/newlib_tests/tst_fuzzy_sync01.c
index ae3ea4e09..5f23a085b 100644
--- a/lib/newlib_tests/tst_fuzzy_sync01.c
+++ b/lib/newlib_tests/tst_fuzzy_sync01.c
@@ -182,15 +182,10 @@ static void *worker(void *v)
 static void run(unsigned int i)
 {
 	const struct window a = races[i].a;
-	struct tst_fzsync_run_thread wrap_run_b = {
-		.func = worker,
-		.arg = &i,
-	};
 	int cs, ct, r, too_early = 0, critical = 0, too_late = 0;
 
 	tst_fzsync_pair_reset(&pair, NULL);
-	SAFE_PTHREAD_CREATE(&pair.thread_b, 0, tst_fzsync_thread_wrapper,
-			    &wrap_run_b);
+	SAFE_PTHREAD_CREATE(&pair.thread_b, 0, worker, &i);
 
 	while (tst_fzsync_run_a(&pair)) {
 
diff --git a/lib/newlib_tests/tst_fuzzy_sync02.c b/lib/newlib_tests/tst_fuzzy_sync02.c
index 51075f3c3..c1c2a5327 100644
--- a/lib/newlib_tests/tst_fuzzy_sync02.c
+++ b/lib/newlib_tests/tst_fuzzy_sync02.c
@@ -125,16 +125,11 @@ static void run(unsigned int i)
 {
 	const struct window a = to_abs(races[i].a);
 	const struct window ad = to_abs(races[i].ad);
-	struct tst_fzsync_run_thread wrap_run_b = {
-		.func = worker,
-		.arg = &i,
-	};
 	int critical = 0;
 	int now, fin;
 
 	tst_fzsync_pair_reset(&pair, NULL);
-	SAFE_PTHREAD_CREATE(&pair.thread_b, 0, tst_fzsync_thread_wrapper,
-			    &wrap_run_b);
+	SAFE_PTHREAD_CREATE(&pair.thread_b, 0, worker, &i);
 
 	while (tst_fzsync_run_a(&pair)) {
 		c = 0;
-- 
2.35.1.1094.g7c7d902a7c-goog


-- 
Mailing list info: https://lists.linux.it/listinfo/ltp

^ permalink raw reply related	[flat|nested] 8+ messages in thread

* Re: [LTP] [PATCH v1] fzsync: break inf loop with flag vs pthread_cancel
  2022-04-05 17:06 [LTP] [PATCH v1] fzsync: break inf loop with flag vs pthread_cancel Edward Liaw via ltp
@ 2022-04-06  5:39 ` Petr Vorel
  2022-04-07  7:06 ` Li Wang
  1 sibling, 0 replies; 8+ messages in thread
From: Petr Vorel @ 2022-04-06  5:39 UTC (permalink / raw)
  To: Edward Liaw, Richard Palethorpe; +Cc: kernel-team, ltp

Hi Richard,

could you please have look into this one?
Bug in it could highly affect CVE reproducibility.

Kind regards,
Petr

> Hi, I'm working to get fzsync working with the Android kernel, which
> does not have pthread_cancel available.

> In the absence of pthread_cancel, when thread A exits due to a break,
> thread B will get stuck in an infinite loop while waiting for thread A
> to progress.

> Instead of cancelling thread B, we can use the exit flag to break out of
> thread B's loop.  This should also remove the need for the wrapper
> around the thread.

> Signed-off-by: Edward Liaw <edliaw@google.com>
> ---
>  include/tst_fuzzy_sync.h            | 68 +++++++++++------------------
>  lib/newlib_tests/tst_fuzzy_sync01.c |  7 +--
>  lib/newlib_tests/tst_fuzzy_sync02.c |  7 +--
>  3 files changed, 27 insertions(+), 55 deletions(-)

> diff --git a/include/tst_fuzzy_sync.h b/include/tst_fuzzy_sync.h
> index bc3450294..2c120f077 100644
> --- a/include/tst_fuzzy_sync.h
> +++ b/include/tst_fuzzy_sync.h
> @@ -60,7 +60,6 @@
>   */

>  #include <math.h>
> -#include <pthread.h>
>  #include <stdbool.h>
>  #include <stdlib.h>
>  #include <sys/time.h>
> @@ -233,36 +232,12 @@ static inline void tst_fzsync_pair_init(struct tst_fzsync_pair *pair)
>  static inline void tst_fzsync_pair_cleanup(struct tst_fzsync_pair *pair)
>  {
>  	if (pair->thread_b) {
> -		/* Revoke thread B if parent hits accidental break */
> -		if (!pair->exit) {
> -			tst_atomic_store(1, &pair->exit);
> -			usleep(100000);
> -			pthread_cancel(pair->thread_b);
> -		}
> +		tst_atomic_store(1, &pair->exit);
>  		SAFE_PTHREAD_JOIN(pair->thread_b, NULL);
>  		pair->thread_b = 0;
>  	}
>  }

> -/** To store the run_b pointer and pass to tst_fzsync_thread_wrapper */
> -struct tst_fzsync_run_thread {
> -	void *(*func)(void *);
> -	void *arg;
> -};
> -
> -/**
> - * Wrap run_b for tst_fzsync_pair_reset to enable pthread cancel
> - * at the start of the thread B.
> - */
> -static inline void *tst_fzsync_thread_wrapper(void *run_thread)
> -{
> -       struct tst_fzsync_run_thread t = *(struct tst_fzsync_run_thread *)run_thread;
> -
> -       pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, NULL);
> -       pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, NULL);
> -       return t.func(t.arg);
> -}
> -
>  /**
>   * Zero some stat fields
>   *
> @@ -311,13 +286,8 @@ static inline void tst_fzsync_pair_reset(struct tst_fzsync_pair *pair,
>  	pair->a_cntr = 0;
>  	pair->b_cntr = 0;
>  	pair->exit = 0;
> -	if (run_b) {
> -		static struct tst_fzsync_run_thread wrap_run_b;
> -
> -		wrap_run_b.func = run_b;
> -		wrap_run_b.arg = NULL;
> -		SAFE_PTHREAD_CREATE(&pair->thread_b, 0, tst_fzsync_thread_wrapper, &wrap_run_b);
> -	}
> +	if (run_b)
> +		SAFE_PTHREAD_CREATE(&pair->thread_b, 0, run_b, 0);

>  	pair->exec_time_start = (float)tst_timeout_remaining();
>  }
> @@ -554,6 +524,7 @@ static inline void tst_fzsync_pair_update(struct tst_fzsync_pair *pair)
>   * @param our_cntr The counter for the thread we are on
>   * @param other_cntr The counter for the thread we are synchronising with
>   * @param spins A pointer to the spin counter or NULL
> + * @param exit Exit flag when we need to break out of the wait loop
>   *
>   * Used by tst_fzsync_pair_wait_a(), tst_fzsync_pair_wait_b(),
>   * tst_fzsync_start_race_a(), etc. If the calling thread is ahead of the other
> @@ -566,6 +537,7 @@ static inline void tst_fzsync_pair_update(struct tst_fzsync_pair *pair)
>  static inline void tst_fzsync_pair_wait(int *our_cntr,
>  					int *other_cntr,
>  					int *spins,
> +					int *exit,
>  					bool yield_in_wait)
>  {
>  	if (tst_atomic_inc(other_cntr) == INT_MAX) {
> @@ -578,7 +550,8 @@ static inline void tst_fzsync_pair_wait(int *our_cntr,
>  		 */
>  		if (yield_in_wait) {
>  			while (tst_atomic_load(our_cntr) > 0
> -			       && tst_atomic_load(our_cntr) < INT_MAX) {
> +			       && tst_atomic_load(our_cntr) < INT_MAX
> +			       && !tst_atomic_load(exit)) {
>  				if (spins)
>  					(*spins)++;

> @@ -586,7 +559,8 @@ static inline void tst_fzsync_pair_wait(int *our_cntr,
>  			}
>  		} else {
>  			while (tst_atomic_load(our_cntr) > 0
> -			       && tst_atomic_load(our_cntr) < INT_MAX) {
> +			       && tst_atomic_load(our_cntr) < INT_MAX
> +			       && !tst_atomic_load(exit)) {
>  				if (spins)
>  					(*spins)++;
>  			}
> @@ -599,10 +573,12 @@ static inline void tst_fzsync_pair_wait(int *our_cntr,
>  		 * is restored and we can continue.
>  		 */
>  		if (yield_in_wait) {
> -			while (tst_atomic_load(our_cntr) > 1)
> +			while (tst_atomic_load(our_cntr) > 1
> +			       && !tst_atomic_load(exit))
>  				sched_yield();
>  		} else {
> -			while (tst_atomic_load(our_cntr) > 1)
> +			while (tst_atomic_load(our_cntr) > 1
> +			       && !tst_atomic_load(exit))
>  				;
>  		}
>  	} else {
> @@ -612,14 +588,16 @@ static inline void tst_fzsync_pair_wait(int *our_cntr,
>  		 */
>  		if (yield_in_wait) {
>  			while (tst_atomic_load(our_cntr) <
> -			       tst_atomic_load(other_cntr)) {
> +			       tst_atomic_load(other_cntr)
> +			       && !tst_atomic_load(exit)) {
>  				if (spins)
>  					(*spins)++;
>  				sched_yield();
>  			}
>  		} else {
>  			while (tst_atomic_load(our_cntr) <
> -			       tst_atomic_load(other_cntr)) {
> +			       tst_atomic_load(other_cntr)
> +			       && !tst_atomic_load(exit)) {
>  				if (spins)
>  					(*spins)++;
>  			}
> @@ -635,7 +613,8 @@ static inline void tst_fzsync_pair_wait(int *our_cntr,
>   */
>  static inline void tst_fzsync_wait_a(struct tst_fzsync_pair *pair)
>  {
> -	tst_fzsync_pair_wait(&pair->a_cntr, &pair->b_cntr, NULL, pair->yield_in_wait);
> +	tst_fzsync_pair_wait(&pair->a_cntr, &pair->b_cntr,
> +	                     NULL, &pair->exit, pair->yield_in_wait);
>  }

>  /**
> @@ -646,7 +625,8 @@ static inline void tst_fzsync_wait_a(struct tst_fzsync_pair *pair)
>   */
>  static inline void tst_fzsync_wait_b(struct tst_fzsync_pair *pair)
>  {
> -	tst_fzsync_pair_wait(&pair->b_cntr, &pair->a_cntr, NULL, pair->yield_in_wait);
> +	tst_fzsync_pair_wait(&pair->b_cntr, &pair->a_cntr,
> +	                     NULL, &pair->exit, pair->yield_in_wait);
>  }

>  /**
> @@ -758,7 +738,8 @@ static inline void tst_fzsync_start_race_a(struct tst_fzsync_pair *pair)
>  static inline void tst_fzsync_end_race_a(struct tst_fzsync_pair *pair)
>  {
>  	tst_fzsync_time(&pair->a_end);
> -	tst_fzsync_pair_wait(&pair->a_cntr, &pair->b_cntr, &pair->spins, pair->yield_in_wait);
> +	tst_fzsync_pair_wait(&pair->a_cntr, &pair->b_cntr,
> +	                     &pair->spins, &pair->exit, pair->yield_in_wait);
>  }

>  /**
> @@ -796,7 +777,8 @@ static inline void tst_fzsync_start_race_b(struct tst_fzsync_pair *pair)
>  static inline void tst_fzsync_end_race_b(struct tst_fzsync_pair *pair)
>  {
>  	tst_fzsync_time(&pair->b_end);
> -	tst_fzsync_pair_wait(&pair->b_cntr, &pair->a_cntr, &pair->spins, pair->yield_in_wait);
> +	tst_fzsync_pair_wait(&pair->b_cntr, &pair->a_cntr,
> +	                     &pair->spins, &pair->exit, pair->yield_in_wait);
>  }

>  /**
> diff --git a/lib/newlib_tests/tst_fuzzy_sync01.c b/lib/newlib_tests/tst_fuzzy_sync01.c
> index ae3ea4e09..5f23a085b 100644
> --- a/lib/newlib_tests/tst_fuzzy_sync01.c
> +++ b/lib/newlib_tests/tst_fuzzy_sync01.c
> @@ -182,15 +182,10 @@ static void *worker(void *v)
>  static void run(unsigned int i)
>  {
>  	const struct window a = races[i].a;
> -	struct tst_fzsync_run_thread wrap_run_b = {
> -		.func = worker,
> -		.arg = &i,
> -	};
>  	int cs, ct, r, too_early = 0, critical = 0, too_late = 0;

>  	tst_fzsync_pair_reset(&pair, NULL);
> -	SAFE_PTHREAD_CREATE(&pair.thread_b, 0, tst_fzsync_thread_wrapper,
> -			    &wrap_run_b);
> +	SAFE_PTHREAD_CREATE(&pair.thread_b, 0, worker, &i);

>  	while (tst_fzsync_run_a(&pair)) {

> diff --git a/lib/newlib_tests/tst_fuzzy_sync02.c b/lib/newlib_tests/tst_fuzzy_sync02.c
> index 51075f3c3..c1c2a5327 100644
> --- a/lib/newlib_tests/tst_fuzzy_sync02.c
> +++ b/lib/newlib_tests/tst_fuzzy_sync02.c
> @@ -125,16 +125,11 @@ static void run(unsigned int i)
>  {
>  	const struct window a = to_abs(races[i].a);
>  	const struct window ad = to_abs(races[i].ad);
> -	struct tst_fzsync_run_thread wrap_run_b = {
> -		.func = worker,
> -		.arg = &i,
> -	};
>  	int critical = 0;
>  	int now, fin;

>  	tst_fzsync_pair_reset(&pair, NULL);
> -	SAFE_PTHREAD_CREATE(&pair.thread_b, 0, tst_fzsync_thread_wrapper,
> -			    &wrap_run_b);
> +	SAFE_PTHREAD_CREATE(&pair.thread_b, 0, worker, &i);

>  	while (tst_fzsync_run_a(&pair)) {
>  		c = 0;

-- 
Mailing list info: https://lists.linux.it/listinfo/ltp

^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [LTP] [PATCH v1] fzsync: break inf loop with flag vs pthread_cancel
  2022-04-05 17:06 [LTP] [PATCH v1] fzsync: break inf loop with flag vs pthread_cancel Edward Liaw via ltp
  2022-04-06  5:39 ` Petr Vorel
@ 2022-04-07  7:06 ` Li Wang
  2022-04-11  7:51   ` Richard Palethorpe
  1 sibling, 1 reply; 8+ messages in thread
From: Li Wang @ 2022-04-07  7:06 UTC (permalink / raw)
  To: Edward Liaw; +Cc: kernel-team, LTP List


[-- Attachment #1.1: Type: text/plain, Size: 2811 bytes --]

Hi Edward,

On Wed, Apr 6, 2022 at 1:06 AM Edward Liaw via ltp <ltp@lists.linux.it>
wrote:

> Hi, I'm working to get fzsync working with the Android kernel, which
> does not have pthread_cancel available.
>
> In the absence of pthread_cancel, when thread A exits due to a break,
> thread B will get stuck in an infinite loop while waiting for thread A
> to progress.
>
> Instead of cancelling thread B, we can use the exit flag to break out of
> thread B's loop.  This should also remove the need for the wrapper
> around the thread.
>


This method is more graceful, but involves a new potential issue.

Looking at tst_fzsync_run_a, if anything goes wrong in other places
(thread_b) and break with setting 'pair->exit' to 1 to end the looping.
It doesn't work for thread_a because tst_atomic_store(exit, &pair->exit)
will reset it back to 0 (int exit = 0).

Another suggestion is to distinguish the abnormal invoke for
tst_fzsync_pair_cleanup, because that is rarely a situation we
encounter, no need to reset pair->exit over again.

So better to have this improvement:

--- a/include/tst_fuzzy_sync.h
+++ b/include/tst_fuzzy_sync.h
@@ -232,7 +232,11 @@ static inline void tst_fzsync_pair_init(struct
tst_fzsync_pair *pair)
 static inline void tst_fzsync_pair_cleanup(struct tst_fzsync_pair *pair)
 {
        if (pair->thread_b) {
-               tst_atomic_store(1, &pair->exit);
+               /* Terminate thread B if parent hits accidental break */
+               if (!pair->exit) {
+                       tst_atomic_store(1, &pair->exit);
+                       usleep(100000);
+               }
                SAFE_PTHREAD_JOIN(pair->thread_b, NULL);
                pair->thread_b = 0;
        }
@@ -642,7 +646,6 @@ static inline void tst_fzsync_wait_b(struct
tst_fzsync_pair *pair)
  */
 static inline int tst_fzsync_run_a(struct tst_fzsync_pair *pair)
 {
-       int exit = 0;
        float rem_p = 1 - tst_timeout_remaining() / pair->exec_time_start;

        if ((pair->exec_time_p * SAMPLING_SLICE < rem_p)
@@ -657,19 +660,18 @@ static inline int tst_fzsync_run_a(struct
tst_fzsync_pair *pair)
        if (pair->exec_time_p < rem_p) {
                tst_res(TINFO,
                        "Exceeded execution time, requesting exit");
-               exit = 1;
+               tst_atomic_store(1, &pair->exit);
        }

        if (++pair->exec_loop > pair->exec_loops) {
                tst_res(TINFO,
                        "Exceeded execution loops, requesting exit");
-               exit = 1;
+               tst_atomic_store(1, &pair->exit);
        }

-       tst_atomic_store(exit, &pair->exit);
        tst_fzsync_wait_a(pair);

-       if (exit) {
+       if (pair->exit) {
                tst_fzsync_pair_cleanup(pair);
                return 0;
        }


-- 
Regards,
Li Wang

[-- Attachment #1.2: Type: text/html, Size: 4687 bytes --]

[-- Attachment #2: Type: text/plain, Size: 60 bytes --]


-- 
Mailing list info: https://lists.linux.it/listinfo/ltp

^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [LTP] [PATCH v1] fzsync: break inf loop with flag vs pthread_cancel
  2022-04-07  7:06 ` Li Wang
@ 2022-04-11  7:51   ` Richard Palethorpe
  2022-04-11  9:01     ` Li Wang
  0 siblings, 1 reply; 8+ messages in thread
From: Richard Palethorpe @ 2022-04-11  7:51 UTC (permalink / raw)
  To: Li Wang; +Cc: kernel-team, ltp

Hello Li and Edward,

Sorry for slow response I was AFK.

Li Wang <liwang@redhat.com> writes:

> Hi Edward,
>
> On Wed, Apr 6, 2022 at 1:06 AM Edward Liaw via ltp <ltp@lists.linux.it> wrote:
>
>  Hi, I'm working to get fzsync working with the Android kernel, which
>  does not have pthread_cancel available.
>
>  In the absence of pthread_cancel, when thread A exits due to a break,
>  thread B will get stuck in an infinite loop while waiting for thread A
>  to progress.
>
>  Instead of cancelling thread B, we can use the exit flag to break out of
>  thread B's loop.  This should also remove the need for the wrapper
>  around the thread.
>
> This method is more graceful, but involves a new potential issue.
>
> Looking at tst_fzsync_run_a, if anything goes wrong in other places
> (thread_b) and break with setting 'pair->exit' to 1 to end the looping. 
> It doesn't work for thread_a because tst_atomic_store(exit, &pair->exit)
> will reset it back to 0 (int exit = 0).

I don't think we have ever handled thread B breaking gracefully?

If B breaks and it calls tst_fzsync_pair_cleanup then it will try to
join to itself and we will probably get EDEADLK.

In most cases though thread B is very simple and doesn't even use SAFE_
functions.

>
> Another suggestion is to distinguish the abnormal invoke for
> tst_fzsync_pair_cleanup, because that is rarely a situation we
> encounter, no need to reset pair->exit over again.
>
> So better to have this improvement:
>
> --- a/include/tst_fuzzy_sync.h
> +++ b/include/tst_fuzzy_sync.h
> @@ -232,7 +232,11 @@ static inline void tst_fzsync_pair_init(struct tst_fzsync_pair *pair)
>  static inline void tst_fzsync_pair_cleanup(struct tst_fzsync_pair *pair)
>  {
>         if (pair->thread_b) {
> -               tst_atomic_store(1, &pair->exit);
> +               /* Terminate thread B if parent hits accidental break */
> +               if (!pair->exit) {
> +                       tst_atomic_store(1, &pair->exit);
> +                       usleep(100000);

Why do we need usleep?

> +               }
>                 SAFE_PTHREAD_JOIN(pair->thread_b, NULL);
>                 pair->thread_b = 0;
>         }
> @@ -642,7 +646,6 @@ static inline void tst_fzsync_wait_b(struct tst_fzsync_pair *pair)
>   */
>  static inline int tst_fzsync_run_a(struct tst_fzsync_pair *pair)
>  {
> -       int exit = 0;
>         float rem_p = 1 - tst_timeout_remaining() / pair->exec_time_start;
>  
>         if ((pair->exec_time_p * SAMPLING_SLICE < rem_p)
> @@ -657,19 +660,18 @@ static inline int tst_fzsync_run_a(struct tst_fzsync_pair *pair)
>         if (pair->exec_time_p < rem_p) {
>                 tst_res(TINFO,
>                         "Exceeded execution time, requesting exit");
> -               exit = 1;
> +               tst_atomic_store(1, &pair->exit);
>         }
>  
>         if (++pair->exec_loop > pair->exec_loops) {
>                 tst_res(TINFO,
>                         "Exceeded execution loops, requesting exit");
> -               exit = 1;
> +               tst_atomic_store(1, &pair->exit);
>         }
>  
> -       tst_atomic_store(exit, &pair->exit);
>         tst_fzsync_wait_a(pair);
>  
> -       if (exit) {
> +       if (pair->exit) {
>                 tst_fzsync_pair_cleanup(pair);
>                 return 0;
>         }

I think this part is reasonable however. Even if we don't handle B
breaking fully gracefully it is better to avoid setting exit back to
zero unecessarily and risking deadlock.

>
> -- 
> Regards,
> Li Wang


-- 
Thank you,
Richard.

-- 
Mailing list info: https://lists.linux.it/listinfo/ltp

^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [LTP] [PATCH v1] fzsync: break inf loop with flag vs pthread_cancel
  2022-04-11  7:51   ` Richard Palethorpe
@ 2022-04-11  9:01     ` Li Wang
  2022-04-11  9:17       ` Richard Palethorpe
  0 siblings, 1 reply; 8+ messages in thread
From: Li Wang @ 2022-04-11  9:01 UTC (permalink / raw)
  To: Richard Palethorpe; +Cc: kernel-team, LTP List


[-- Attachment #1.1: Type: text/plain, Size: 916 bytes --]

Hi Richard, Edward,

On Mon, Apr 11, 2022 at 4:33 PM Richard Palethorpe <rpalethorpe@suse.de>
wrote:


> > Looking at tst_fzsync_run_a, if anything goes wrong in other places
> > (thread_b) and break with setting 'pair->exit' to 1 to end the looping.
> > It doesn't work for thread_a because tst_atomic_store(exit, &pair->exit)
> > will reset it back to 0 (int exit = 0).
>
> I don't think we have ever handled thread B breaking gracefully?
>

Right, that exist before Edward's patch :).



>
> If B breaks and it calls tst_fzsync_pair_cleanup then it will try to
> join to itself and we will probably get EDEADLK.
>

Exactly, maybe do something to make tst_fzsync_pair_cleanup
avoid joining to itself when the invoke come from B?



> > +                       tst_atomic_store(1, &pair->exit);
> > +                       usleep(100000);
>
> Why do we need usleep?
>

Seems not very needed.


-- 
Regards,
Li Wang

[-- Attachment #1.2: Type: text/html, Size: 2128 bytes --]

[-- Attachment #2: Type: text/plain, Size: 60 bytes --]


-- 
Mailing list info: https://lists.linux.it/listinfo/ltp

^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [LTP] [PATCH v1] fzsync: break inf loop with flag vs pthread_cancel
  2022-04-11  9:01     ` Li Wang
@ 2022-04-11  9:17       ` Richard Palethorpe
  2022-04-11  9:35         ` Li Wang
  0 siblings, 1 reply; 8+ messages in thread
From: Richard Palethorpe @ 2022-04-11  9:17 UTC (permalink / raw)
  To: Li Wang; +Cc: kernel-team, LTP List

Hello Li,

Li Wang <liwang@redhat.com> writes:

> Hi Richard, Edward,
>
> On Mon, Apr 11, 2022 at 4:33 PM Richard Palethorpe <rpalethorpe@suse.de> wrote:
>  
>  > Looking at tst_fzsync_run_a, if anything goes wrong in other places
>  > (thread_b) and break with setting 'pair->exit' to 1 to end the looping. 
>  > It doesn't work for thread_a because tst_atomic_store(exit, &pair->exit)
>  > will reset it back to 0 (int exit = 0).
>
>  I don't think we have ever handled thread B breaking gracefully?
>
> Right, that exist before Edward's patch :).
>
>  
>  
>  If B breaks and it calls tst_fzsync_pair_cleanup then it will try to
>  join to itself and we will probably get EDEADLK.
>
> Exactly, maybe do something to make tst_fzsync_pair_cleanup
> avoid joining to itself when the invoke come from B?

I suppose we could save thread A or B's TID and then check it. I think
that should be in a seperate patch.

>
>   
>  > +                       tst_atomic_store(1, &pair->exit);
>  > +                       usleep(100000);
>
>  Why do we need usleep?
>
> Seems not very needed. 

+1

-- 
Thank you,
Richard.

-- 
Mailing list info: https://lists.linux.it/listinfo/ltp

^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [LTP] [PATCH v1] fzsync: break inf loop with flag vs pthread_cancel
  2022-04-11  9:17       ` Richard Palethorpe
@ 2022-04-11  9:35         ` Li Wang
  2022-04-11 15:13           ` Edward Liaw via ltp
  0 siblings, 1 reply; 8+ messages in thread
From: Li Wang @ 2022-04-11  9:35 UTC (permalink / raw)
  To: Richard Palethorpe, Edward Liaw; +Cc: kernel-team, LTP List


[-- Attachment #1.1: Type: text/plain, Size: 690 bytes --]

On Mon, Apr 11, 2022 at 5:27 PM Richard Palethorpe <rpalethorpe@suse.de>
wrote:


> > Exactly, maybe do something to make tst_fzsync_pair_cleanup
> > avoid joining to itself when the invoke come from B?
>
> I suppose we could save thread A or B's TID and then check it. I think
> that should be in a seperate patch.
>

Agreed, feel free to send a patch, Richard.

>  > +                       tst_atomic_store(1, &pair->exit);
> >  > +                       usleep(100000);
> >
> >  Why do we need usleep?
> >
> > Seems not very needed.
>
> +1
>

@Edward,  It'd be appreciated if you can send a patch V2 base on
the comments above. Or, do you have different thoughts?

-- 
Regards,
Li Wang

[-- Attachment #1.2: Type: text/html, Size: 1675 bytes --]

[-- Attachment #2: Type: text/plain, Size: 60 bytes --]


-- 
Mailing list info: https://lists.linux.it/listinfo/ltp

^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [LTP] [PATCH v1] fzsync: break inf loop with flag vs pthread_cancel
  2022-04-11  9:35         ` Li Wang
@ 2022-04-11 15:13           ` Edward Liaw via ltp
  0 siblings, 0 replies; 8+ messages in thread
From: Edward Liaw via ltp @ 2022-04-11 15:13 UTC (permalink / raw)
  To: Li Wang; +Cc: kernel-team, LTP List


[-- Attachment #1.1: Type: text/plain, Size: 604 bytes --]

Hi Li, Richard,

On Mon, Apr 11, 2022, 2:36 AM Li Wang <liwang@redhat.com> wrote:

>
>
> On Mon, Apr 11, 2022 at 5:27 PM Richard Palethorpe <rpalethorpe@suse.de>
> wrote:
>
>> >  > +                       tst_atomic_store(1, &pair->exit);
>> >  > +                       usleep(100000);
>> >
>> >  Why do we need usleep?
>> >
>> > Seems not very needed.
>>
>> +1
>>
>
> @Edward,  It'd be appreciated if you can send a patch V2 base on
> the comments above. Or, do you have different thoughts?
>

Ok, will do!  I'm still new to using the mailing list so I wasn't sure how
to proceed :P

Thanks,
Edward

>

[-- Attachment #1.2: Type: text/html, Size: 1801 bytes --]

[-- Attachment #2: Type: text/plain, Size: 60 bytes --]


-- 
Mailing list info: https://lists.linux.it/listinfo/ltp

^ permalink raw reply	[flat|nested] 8+ messages in thread

end of thread, other threads:[~2022-04-11 15:14 UTC | newest]

Thread overview: 8+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2022-04-05 17:06 [LTP] [PATCH v1] fzsync: break inf loop with flag vs pthread_cancel Edward Liaw via ltp
2022-04-06  5:39 ` Petr Vorel
2022-04-07  7:06 ` Li Wang
2022-04-11  7:51   ` Richard Palethorpe
2022-04-11  9:01     ` Li Wang
2022-04-11  9:17       ` Richard Palethorpe
2022-04-11  9:35         ` Li Wang
2022-04-11 15:13           ` Edward Liaw via ltp

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.