All of lore.kernel.org
 help / color / mirror / Atom feed
* [LTP] [PATCH v3] futex_cmp_requeue01: cope with spurious wakeups
@ 2019-12-11 12:37 Jan Stancek
  2019-12-13 15:08 ` Cyril Hrubis
  0 siblings, 1 reply; 3+ messages in thread
From: Jan Stancek @ 2019-12-11 12:37 UTC (permalink / raw)
  To: ltp

There are couple problems:

1. Keeping same uaddr1 value across requeue leads to a side-effect.
If there is a signal or spurious wakeup, futex_wait() operation can
be restarted (by kernel) with same input parameters. Which means that
process requeued for uaddr2, goes back to queueing for uaddr1. Such
use case is currently not valid as it is expected that uaddr1 value
changes, so that any late waiter can notice it and goes back with
-EWOULDBLOCK (-EAGAIN).

2. Test doesn't expect possibility of spurious wakeups, which can
impact how many processes end up on futex0/futex1.

Change futex value before requeue, so child processes can observe
spurious wakeups.

Change child process, such that it always sleeps, to guarantee that
TST_PROCESS_STATE_WAIT() will always succeed finding it.

Change default timeout to 5 seconds. Spawning 1000 process on slower
systems was getting close to 1 second. This doesn't affect runtime in
PASS-ing case.

Count spurious wakeups and relax test conditions which can be affected.

Signed-off-by: Jan Stancek <jstancek@redhat.com>
---
 .../kernel/syscalls/futex/futex_cmp_requeue01.c    | 128 ++++++++++++++++-----
 1 file changed, 100 insertions(+), 28 deletions(-)

Changes in v3:
- following the discussion, check also lower limits of requeued
  and woken up processes.
  Expected range for num_requeues is:
    (tc->set_requeues - spurious, tc->set_requeues)
  Expected range for woken up processes is:
    (tc->set_wakes - spurious, tc->set_wakes + spurious)

diff --git a/testcases/kernel/syscalls/futex/futex_cmp_requeue01.c b/testcases/kernel/syscalls/futex/futex_cmp_requeue01.c
index f5264c2ba26f..a2e899b8d441 100644
--- a/testcases/kernel/syscalls/futex/futex_cmp_requeue01.c
+++ b/testcases/kernel/syscalls/futex/futex_cmp_requeue01.c
@@ -19,7 +19,14 @@
 #include "tst_test.h"
 #include "futextest.h"
 
-static futex_t *futexes;
+struct shared_data {
+	futex_t futexes[2];
+	int spurious;
+	int test_done;
+};
+
+static struct shared_data *sd;
+static int max_sleep_ms;
 
 static struct tcase {
 	int num_waiters;
@@ -37,24 +44,42 @@ static struct tcase {
 
 static void do_child(void)
 {
-	struct timespec usec = tst_ms_to_timespec(2000);
+	int slept_for_ms = 0;
+	struct timespec usec = tst_ms_to_timespec(max_sleep_ms);
 	int pid = getpid();
+	int ret = 0;
+
+	if (futex_wait(&sd->futexes[0], sd->futexes[0], &usec, 0) == -1) {
+		if (errno == EAGAIN) {
+			/* spurious wakeup or signal */
+			tst_atomic_inc(&sd->spurious);
+		} else {
+			tst_res(TFAIL | TERRNO, "process %d wasn't woken up",
+				pid);
+			ret = 1;
+		}
+	}
 
-	if (!futex_wait(&futexes[0], futexes[0], &usec, 0))
-		exit(0);
+	/* make sure TST_PROCESS_STATE_WAIT() can always succeed */
+	while (!tst_atomic_load(&sd->test_done)
+		&& (slept_for_ms < max_sleep_ms)) {
+		usleep(50000);
+		slept_for_ms += 50;
+	}
 
-	tst_res(TINFO | TERRNO, "process %d wasn't woken up", pid);
-	exit(1);
+	exit(ret);
 }
 
 static void verify_futex_cmp_requeue(unsigned int n)
 {
 	int num_requeues = 0, num_waits = 0, num_total = 0;
-	int i, status;
+	int i, status, spurious, woken_up;
 	struct tcase *tc = &tcases[n];
 	int pid[tc->num_waiters];
 	int exp_ret = tc->set_wakes + tc->set_requeues;
 
+	tst_atomic_store(0, &sd->spurious);
+	tst_atomic_store(0, &sd->test_done);
 	for (i = 0; i < tc->num_waiters; i++) {
 		pid[i] = SAFE_FORK();
 		if (!pid[i])
@@ -64,61 +89,108 @@ static void verify_futex_cmp_requeue(unsigned int n)
 	for (i = 0; i < tc->num_waiters; i++)
 		TST_PROCESS_STATE_WAIT(pid[i], 'S');
 
-	TEST(futex_cmp_requeue(&futexes[0], futexes[0], &futexes[1],
-	     tc->set_wakes, tc->set_requeues, 0));
-	if (TST_RET != exp_ret) {
-		tst_res(TFAIL, "futex_cmp_requeue() returned %ld, expected %d",
+	tst_res(TINFO, "Test %d: waiters: %d, wakes: %d, requeues: %d",
+		n, tc->num_waiters, tc->set_wakes, tc->set_requeues);
+
+	/*
+	 * change futex value, so any spurious wakeups or signals after
+	 * this point get bounced back to userspace.
+	 */
+	sd->futexes[0]++;
+	sd->futexes[1]++;
+
+	/*
+	 * Wakes up a maximum of tc->set_wakes waiters. tc->set_requeues
+	 * specifies an upper limit on the number of waiters that are requeued.
+	 * Returns the total number of waiters that were woken up or requeued.
+	 */
+	TEST(futex_cmp_requeue(&sd->futexes[0], sd->futexes[0], &sd->futexes[1],
+		tc->set_wakes, tc->set_requeues, 0));
+
+	/* Fail if more than requested wakes + requeues were returned */
+	if (TST_RET > exp_ret) {
+		tst_res(TFAIL, "futex_cmp_requeue() returned %ld, expected <= %d",
 			TST_RET, exp_ret);
+	} else {
+		tst_res(TINFO, "futex_cmp_requeue() returned %ld", TST_RET);
 	}
 
-	num_requeues = futex_wake(&futexes[1], tc->num_waiters, 0);
-	num_waits = futex_wake(&futexes[0], tc->num_waiters, 0);
+	num_requeues = futex_wake(&sd->futexes[1], tc->num_waiters, 0);
+	num_waits = futex_wake(&sd->futexes[0], tc->num_waiters, 0);
 
+	tst_atomic_store(1, &sd->test_done);
 	for (i = 0; i < tc->num_waiters; i++) {
 		SAFE_WAITPID(pid[i], &status, 0);
 		if (WIFEXITED(status) && !WEXITSTATUS(status))
 			num_total++;
 	}
 
+	spurious = tst_atomic_load(&sd->spurious);
+	tst_res(TINFO, "children woken, futex0: %d, futex1: %d, "
+		"spurious wakeups: %d",
+		num_waits, num_requeues, spurious);
+
+	/* Fail if any waiter timed out */
 	if (num_total != tc->num_waiters) {
 		tst_res(TFAIL, "%d waiters were not woken up normally",
 			tc->num_waiters - num_total);
 		return;
 	}
 
-	if (num_requeues != tc->set_requeues) {
+	/*
+	 * num_requeues should be in range:
+	 *     (tc->set_requeues - spurious, tc->set_requeues)
+	 *
+	 * Fewer processes than requested can be requeued at futex1
+	 * if some woke up spuriously. Finding more processes than
+	 * requested at futex1 is always a failure.
+	 */
+	if ((num_requeues < tc->set_requeues - spurious)
+		|| (num_requeues > tc->set_requeues)) {
 		tst_res(TFAIL,
-			"futex_cmp_requeue() requeues %d waiters, expected %d",
-			num_requeues, tc->set_requeues);
+			"requeued %d waiters, expected range: (%d, %d)",
+			num_requeues, tc->set_requeues - spurious,
+			tc->set_requeues);
 		return;
 	}
 
-	if (tc->num_waiters - num_requeues - num_waits != tc->set_wakes) {
+	/*
+	 * woken_up = (TST_RET - num_requeues) should be in range:
+	 *     (tc->set_wakes - spurious, tc->set_wakes + spurious)
+	 *
+	 * Fewer processes than requested can be woken up, if some of
+	 * them woke up spuriously before requeue. More processes than
+	 * requested may appear to be woken up, if some woke up
+	 * spuriously after requeue.
+	 */
+	woken_up = TST_RET - num_requeues;
+	if ((woken_up < tc->set_wakes - spurious)
+		|| (woken_up > tc->set_wakes + spurious)) {
 		tst_res(TFAIL,
-			"futex_cmp_requeue() woke up %d waiters, expected %d",
-			tc->num_waiters - num_requeues - num_waits,
-			tc->set_wakes);
+			"woken up %d, expected range (%d, %d)",
+			woken_up, tc->set_wakes - spurious,
+			tc->set_wakes + spurious);
 		return;
 	}
 
-	tst_res(TPASS,
-		"futex_cmp_requeue() woke up %d waiters and requeued %d waiters",
-		tc->set_wakes, tc->set_requeues);
+	tst_res(TPASS, "futex_cmp_requeue()");
 }
 
 static void setup(void)
 {
-	futexes = SAFE_MMAP(NULL, sizeof(futex_t) * 2, PROT_READ | PROT_WRITE,
+	max_sleep_ms = tst_multiply_timeout(5000);
+
+	sd = SAFE_MMAP(NULL, sizeof(*sd), PROT_READ | PROT_WRITE,
 			    MAP_ANONYMOUS | MAP_SHARED, -1, 0);
 
-	futexes[0] = FUTEX_INITIALIZER;
-	futexes[1] = FUTEX_INITIALIZER + 1;
+	sd->futexes[0] = FUTEX_INITIALIZER;
+	sd->futexes[1] = FUTEX_INITIALIZER + 1000;
 }
 
 static void cleanup(void)
 {
-	if (futexes)
-		SAFE_MUNMAP((void *)futexes, sizeof(futex_t) * 2);
+	if (sd)
+		SAFE_MUNMAP((void *)sd, sizeof(*sd));
 }
 
 static struct tst_test test = {
-- 
1.8.3.1


^ permalink raw reply related	[flat|nested] 3+ messages in thread

* [LTP] [PATCH v3] futex_cmp_requeue01: cope with spurious wakeups
  2019-12-11 12:37 [LTP] [PATCH v3] futex_cmp_requeue01: cope with spurious wakeups Jan Stancek
@ 2019-12-13 15:08 ` Cyril Hrubis
  2019-12-16 12:53   ` Jan Stancek
  0 siblings, 1 reply; 3+ messages in thread
From: Cyril Hrubis @ 2019-12-13 15:08 UTC (permalink / raw)
  To: ltp

Hi!
Reviewed-by: Cyril Hrubis <chrubis@suse.cz>

-- 
Cyril Hrubis
chrubis@suse.cz

^ permalink raw reply	[flat|nested] 3+ messages in thread

* [LTP] [PATCH v3] futex_cmp_requeue01: cope with spurious wakeups
  2019-12-13 15:08 ` Cyril Hrubis
@ 2019-12-16 12:53   ` Jan Stancek
  0 siblings, 0 replies; 3+ messages in thread
From: Jan Stancek @ 2019-12-16 12:53 UTC (permalink / raw)
  To: ltp



----- Original Message -----
> Hi!
> Reviewed-by: Cyril Hrubis <chrubis@suse.cz>

Pushed.


^ permalink raw reply	[flat|nested] 3+ messages in thread

end of thread, other threads:[~2019-12-16 12:53 UTC | newest]

Thread overview: 3+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2019-12-11 12:37 [LTP] [PATCH v3] futex_cmp_requeue01: cope with spurious wakeups Jan Stancek
2019-12-13 15:08 ` Cyril Hrubis
2019-12-16 12:53   ` Jan Stancek

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.