All of lore.kernel.org
 help / color / mirror / Atom feed
* [LTP] [PATCH v2 1/7] tst_atomic: Add load, store and use __atomic builtins
@ 2017-08-28 11:02 Richard Palethorpe
  2017-08-28 11:02 ` [LTP] [PATCH v2 2/7] tst_atomic: Add atomic store and load tests Richard Palethorpe
                   ` (6 more replies)
  0 siblings, 7 replies; 10+ messages in thread
From: Richard Palethorpe @ 2017-08-28 11:02 UTC (permalink / raw)
  To: ltp

Also add more fallback inline assembly for the existing architectures and add
SPARC64. Use the __atomic_* compiler intrinsics when available.

Signed-off-by: Richard Palethorpe <rpalethorpe@suse.com>
---
 configure.ac             |   1 +
 include/tst_atomic.h     | 213 ++++++++++++++++++++++++++++++++++++++++-------
 include/tst_fuzzy_sync.h |   1 +
 m4/ltp-atomic.m4         |  34 ++++++++
 4 files changed, 220 insertions(+), 29 deletions(-)
 create mode 100644 m4/ltp-atomic.m4

diff --git a/configure.ac b/configure.ac
index 23e583dd8..e4f26fe88 100644
--- a/configure.ac
+++ b/configure.ac
@@ -194,5 +194,6 @@ LTP_CHECK_BUILTIN_CLEAR_CACHE
 LTP_CHECK_MMSGHDR
 LTP_CHECK_UNAME_DOMAINNAME
 LTP_CHECK_X_TABLES
+LTP_CHECK_ATOMIC_MEMORY_MODEL
 
 AC_OUTPUT
diff --git a/include/tst_atomic.h b/include/tst_atomic.h
index 35a3b3411..bd4205153 100644
--- a/include/tst_atomic.h
+++ b/include/tst_atomic.h
@@ -20,12 +20,45 @@
 
 #include "config.h"
 
-#if HAVE_SYNC_ADD_AND_FETCH == 1
+#if HAVE_ATOMIC_MEMORY_MODEL == 1
+static inline int tst_atomic_add_return(int i, int *v)
+{
+	return __atomic_add_fetch(v, i, __ATOMIC_SEQ_CST);
+}
+
+static inline int tst_atomic_load(int *v)
+{
+	return __atomic_load_n(v, __ATOMIC_SEQ_CST);
+}
+
+static inline void tst_atomic_store(int i, int *v)
+{
+	__atomic_store_n(v, i, __ATOMIC_SEQ_CST);
+}
+
+#elif HAVE_SYNC_ADD_AND_FETCH == 1
 static inline int tst_atomic_add_return(int i, int *v)
 {
 	return __sync_add_and_fetch(v, i);
 }
 
+static inline int tst_atomic_load(int *v)
+{
+	int ret;
+
+	__sync_synchronize();
+	ret = *v;
+	__sync_synchronize();
+	return ret;
+}
+
+static inline void tst_atomic_store(int i, int *v)
+{
+	__sync_synchronize();
+	*v = i;
+	__sync_synchronize();
+}
+
 #elif defined(__i386__) || defined(__x86_64__)
 static inline int tst_atomic_add_return(int i, int *v)
 {
@@ -33,36 +66,31 @@ static inline int tst_atomic_add_return(int i, int *v)
 
 	/*
 	 * taken from arch/x86/include/asm/cmpxchg.h
-	 * Since we always pass int sized parameter, we can simplify it
-	 * and cherry-pick only that specific case.
-	 *
-	switch (sizeof(*v)) {
-	case 1:
-		asm volatile ("lock; xaddb %b0, %1\n"
-			: "+q" (__ret), "+m" (*v) : : "memory", "cc");
-		break;
-	case 2:
-		asm volatile ("lock; xaddw %w0, %1\n"
-			: "+r" (__ret), "+m" (*v) : : "memory", "cc");
-		break;
-	case 4:
-		asm volatile ("lock; xaddl %0, %1\n"
-			: "+r" (__ret), "+m" (*v) : : "memory", "cc");
-		break;
-	case 8:
-		asm volatile ("lock; xaddq %q0, %1\n"
-			: "+r" (__ret), "+m" (*v) : : "memory", "cc");
-		break;
-	default:
-		__xadd_wrong_size();
-	}
-	*/
+	 */
 	asm volatile ("lock; xaddl %0, %1\n"
 		: "+r" (__ret), "+m" (*v) : : "memory", "cc");
 
 	return i + __ret;
 }
 
+static inline int tst_atomic_load(int *v)
+{
+	int ret;
+
+	asm volatile("" : : : "memory");
+	ret = *v;
+	asm volatile("" : : : "memory");
+
+	return ret;
+}
+
+static inline void tst_atomic_store(int i, int *v)
+{
+	asm volatile("" : : : "memory");
+	*v = i;
+	asm volatile("" : : : "memory");
+}
+
 #elif defined(__powerpc__) || defined(__powerpc64__)
 static inline int tst_atomic_add_return(int i, int *v)
 {
@@ -83,7 +111,26 @@ static inline int tst_atomic_add_return(int i, int *v)
 	return t;
 }
 
+static inline int tst_atomic_load(int *v)
+{
+	int ret;
+
+	asm volatile("sync\n" : : : "memory");
+	ret = *v;
+	asm volatile("sync\n" : : : "memory");
+
+	return ret;
+}
+
+static inline void tst_atomic_store(int i, int *v)
+{
+	asm volatile("sync\n" : : : "memory");
+	*v = i;
+	asm volatile("sync\n" : : : "memory");
+}
+
 #elif defined(__s390__) || defined(__s390x__)
+
 static inline int tst_atomic_add_return(int i, int *v)
 {
 	int old_val, new_val;
@@ -102,11 +149,29 @@ static inline int tst_atomic_add_return(int i, int *v)
 	return old_val + i;
 }
 
+static inline int tst_atomic_load(int *v)
+{
+	int ret;
+
+	asm volatile("" : : : "memory");
+	ret = *v;
+	asm volatile("" : : : "memory");
+
+	return ret;
+}
+
+static inline void tst_atomic_store(int i, int *v)
+{
+	asm volatile("" : : : "memory");
+	*v = i;
+	asm volatile("" : : : "memory");
+}
+
 #elif defined(__arc__)
 
 /*ARCv2 defines the smp barriers */
 #ifdef __ARC700__
-#define smp_mb()
+#define smp_mb()	asm volatile("" : : : "memory")
 #else
 #define smp_mb()	asm volatile("dmb 3\n" : : : "memory")
 #endif
@@ -132,6 +197,24 @@ static inline int tst_atomic_add_return(int i, int *v)
 	return val;
 }
 
+static inline int tst_atomic_load(int *v)
+{
+	int ret;
+
+	smp_mb();
+	ret = *v;
+	smp_mb();
+
+	return ret;
+}
+
+static inline void tst_atomic_store(int i, int *v)
+{
+	smp_mb();
+	*v = i;
+	smp_mb();
+}
+
 #elif defined (__aarch64__)
 static inline int tst_atomic_add_return(int i, int *v)
 {
@@ -140,7 +223,7 @@ static inline int tst_atomic_add_return(int i, int *v)
 
 	__asm__ __volatile__(
 "       prfm    pstl1strm, %2	\n"
-"1:     ldxr 	%w0, %2		\n"
+"1:     ldaxr	%w0, %2		\n"
 "       add	%w0, %w0, %w3	\n"
 "       stlxr	%w1, %w0, %2	\n"
 "       cbnz	%w1, 1b		\n"
@@ -152,9 +235,81 @@ static inline int tst_atomic_add_return(int i, int *v)
 	return result;
 }
 
+static inline int tst_atomic_load(int *v)
+{
+	int ret;
+	unsigned long tmp;
+
+	asm volatile("//atomic_load			\n"
+		"	prfm	pstl1strm,  %[v]	\n"
+		"1:	ldaxr	%w[ret], %[v]		\n"
+		"	stlxr   %w[tmp], %w[ret], %[v]\n"
+		"	cbnz    %w[tmp], 1b		\n"
+		"	dmb ish				\n"
+		: [tmp] "=&r" (tmp), [ret] "=&r" (ret), [v] "+Q" (*v)
+		: : "memory");
+
+	return ret;
+}
+
+static inline void tst_atomic_store(int i, int *v)
+{
+	unsigned long tmp;
+
+	asm volatile("//atomic_store			\n"
+		"	prfm	pstl1strm, %[v]		\n"
+		"1:	ldaxr	%w[tmp], %[v]		\n"
+		"	stlxr   %w[tmp], %w[i], %[v]	\n"
+		"	cbnz    %w[tmp], 1b		\n"
+		"	dmb ish				\n"
+		: [tmp] "=&r" (tmp), [v] "+Q" (*v)
+		: [i] "r" (i)
+		: "memory");
+}
+
+#elif defined(__sparc__) && defined(__arch64__)
+static inline int tst_atomic_add_return(int i, int *v)
+{
+	int ret, tmp;
+
+	/* Based on arch/sparc/lib/atomic_64.S */
+	asm volatile("/*atomic_add_return*/		\n"
+		"1:	ldsw	[%[v]], %[ret];		\n"
+		"	add	%[ret], %[i], %[tmp];	\n"
+		"	cas	[%[v]], %[ret], %[tmp];	\n"
+		"	cmp	%[ret], %[tmp];		\n"
+		"	bne,pn	%%icc, 1b;		\n"
+		"	nop;				\n"
+		"	add	%[ret], %[i], %[ret];	\n"
+		: [ret] "=r&" (ret), [tmp] "=r&" (tmp)
+		: [i] "r" (i), [v] "r" (v)
+		: "memory", "cc");
+
+	return ret;
+}
+
+static inline int tst_atomic_load(int *v)
+{
+	int ret;
+
+	/* See arch/sparc/include/asm/barrier_64.h */
+	asm volatile("" : : : "memory");
+	ret = *v;
+	asm volatile("" : : : "memory");
+
+	return ret;
+}
+
+static inline void tst_atomic_store(int i, int *v)
+{
+	asm volatile("" : : : "memory");
+	*v = i;
+	asm volatile("" : : : "memory");
+}
+
 #else /* HAVE_SYNC_ADD_AND_FETCH == 1 */
-# error Your compiler does not provide __sync_add_and_fetch and LTP\
-	implementation is missing for your architecture.
+# error Your compiler does not provide __atomic_add_fetch, __sync_add_and_fetch \
+        and an LTP implementation is missing for your architecture.
 #endif
 
 static inline int tst_atomic_inc(int *v)
diff --git a/include/tst_fuzzy_sync.h b/include/tst_fuzzy_sync.h
index 229217495..f97137c35 100644
--- a/include/tst_fuzzy_sync.h
+++ b/include/tst_fuzzy_sync.h
@@ -32,6 +32,7 @@
 
 #include <sys/time.h>
 #include <time.h>
+#include "tst_atomic.h"
 
 #ifndef CLOCK_MONOTONIC_RAW
 # define CLOCK_MONOTONIC_RAW CLOCK_MONOTONIC
diff --git a/m4/ltp-atomic.m4 b/m4/ltp-atomic.m4
new file mode 100644
index 000000000..836f0a4fd
--- /dev/null
+++ b/m4/ltp-atomic.m4
@@ -0,0 +1,34 @@
+dnl
+dnl Copyright (c) Linux Test Project, 2016
+dnl
+dnl This program is free software;  you can redistribute it and/or modify
+dnl it under the terms of the GNU General Public License as published by
+dnl the Free Software Foundation; either version 2 of the License, or
+dnl (at your option) any later version.
+dnl
+dnl This program is distributed in the hope that it will be useful,
+dnl but WITHOUT ANY WARRANTY;  without even the implied warranty of
+dnl MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See
+dnl the GNU General Public License for more details.
+dnl
+
+AC_DEFUN([LTP_CHECK_ATOMIC_MEMORY_MODEL],[dnl
+	AC_MSG_CHECKING([for __atomic_* compiler builtins])
+	AC_LINK_IFELSE([AC_LANG_SOURCE([
+int main(void) {
+	int i = 0, j = 0;
+	__atomic_add_fetch(&i, 1, __ATOMIC_ACQ_REL);
+	__atomic_load_n(&i, __ATOMIC_SEQ_CST);
+	__atomic_compare_exchange_n(&i, &j, 0, 0, __ATOMIC_RELEASE, __ATOMIC_ACQUIRE);
+	__atomic_store_n(&i, 0, __ATOMIC_RELAXED);
+	return i;
+}])],[has_atomic_mm="yes"])
+
+if test "x$has_atomic_mm" = xyes; then
+	AC_DEFINE(HAVE_ATOMIC_MEMORY_MODEL,1,
+	          [Define to 1 if you have the __atomic_* compiler builtins])
+	AC_MSG_RESULT(yes)
+else
+	AC_MSG_RESULT(no)
+fi
+])
-- 
2.14.1


^ permalink raw reply related	[flat|nested] 10+ messages in thread

* [LTP] [PATCH v2 2/7] tst_atomic: Add atomic store and load tests
  2017-08-28 11:02 [LTP] [PATCH v2 1/7] tst_atomic: Add load, store and use __atomic builtins Richard Palethorpe
@ 2017-08-28 11:02 ` Richard Palethorpe
  2017-08-28 11:02 ` [LTP] [PATCH v2 3/7] fzsync: Add long running thread support and deviation stats Richard Palethorpe
                   ` (5 subsequent siblings)
  6 siblings, 0 replies; 10+ messages in thread
From: Richard Palethorpe @ 2017-08-28 11:02 UTC (permalink / raw)
  To: ltp

Similar to test09.

Signed-off-by: Richard Palethorpe <rpalethorpe@suse.com>
---
 lib/newlib_tests/.gitignore |   1 +
 lib/newlib_tests/Makefile   |   1 +
 lib/newlib_tests/test15.c   | 132 ++++++++++++++++++++++++++++++++++++++++++++
 3 files changed, 134 insertions(+)
 create mode 100644 lib/newlib_tests/test15.c

diff --git a/lib/newlib_tests/.gitignore b/lib/newlib_tests/.gitignore
index 8e120074e..7d47a6531 100644
--- a/lib/newlib_tests/.gitignore
+++ b/lib/newlib_tests/.gitignore
@@ -12,6 +12,7 @@ test11
 test12
 test13
 test14
+test15
 tst_device
 tst_safe_fileops
 tst_res_hexd
diff --git a/lib/newlib_tests/Makefile b/lib/newlib_tests/Makefile
index e9976e5ba..e86d8f23a 100644
--- a/lib/newlib_tests/Makefile
+++ b/lib/newlib_tests/Makefile
@@ -7,6 +7,7 @@ LDLIBS			+= -lltp
 
 test08: CFLAGS+=-pthread
 test09: CFLAGS+=-pthread
+test15: CFLAGS+=-pthread
 
 ifeq ($(ANDROID),1)
 FILTER_OUT_MAKE_TARGETS	+= test08
diff --git a/lib/newlib_tests/test15.c b/lib/newlib_tests/test15.c
new file mode 100644
index 000000000..ba3cc963f
--- /dev/null
+++ b/lib/newlib_tests/test15.c
@@ -0,0 +1,132 @@
+/*
+ * Copyright (c) 2017 Richard Palethorpe <rpalethorpe@suse.com>
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+/*
+ * A basic regression test for tst_atomic_{load,store}. Also provides a
+ * limited check that atomic stores and loads order non-atomic memory
+ * accesses. That is, we are checking that they implement memory fences or
+ * barriers.
+ *
+ * Many architectures/machines will still pass the test even if you remove the
+ * atomic functions. X86 in particular has strong memory ordering by default
+ * so that should always pass (if you use volatile). However Aarch64
+ * (Raspberry Pi 3 Model B) has been observed to fail without the atomic
+ * functions.
+ *
+ * A failure can occur if an update to seq_n is not made globally visible by
+ * the time the next thread needs to use it.
+ */
+
+#include <stdint.h>
+#include <pthread.h>
+#include "tst_test.h"
+#include "tst_atomic.h"
+
+#define THREADS 64
+#define FILLER (1 << 20)
+
+/* Uncomment these to see what happens without atomics. To prevent the compiler
+ * from removing/reording atomic and seq_n, mark them as volatile.
+ */
+/* #define tst_atomic_load(v) (*(v)) */
+/* #define tst_atomic_store(i, v) *(v) = (i) */
+
+struct block {
+	int seq_n;
+	intptr_t id;
+	intptr_t filler[FILLER];
+};
+
+static int atomic;
+static int *seq_n;
+static struct block *m;
+
+static void *worker_load_store(void *_id)
+{
+	intptr_t id = (intptr_t)_id, i;
+
+	for (i = (intptr_t)tst_atomic_load(&atomic);
+	     i != id;
+	     i = (intptr_t)tst_atomic_load(&atomic))
+		;
+
+	(m + (*seq_n))->id = id;
+	*seq_n += 1;
+	tst_atomic_store((int)i + 1, &atomic);
+
+	return NULL;
+}
+
+static void *cache_ruiner(void *vp LTP_ATTRIBUTE_UNUSED)
+{
+	intptr_t i = 0, j;
+	struct block *cur = m;
+
+	tst_res(TINFO, "cache ruiner started");
+	while (tst_atomic_load(&atomic) > 0) {
+		for (j = 0; j < FILLER; j++)
+			cur->filler[j] = j;
+
+		if (i < THREADS - 1) {
+			cur = m + (++i);
+		} else {
+			i = 0;
+			cur = m;
+		}
+	}
+
+	return NULL;
+}
+
+static void do_test(void)
+{
+	intptr_t i, id;
+	pthread_t threads[THREADS + 1];
+
+	atomic = 0;
+	m = (struct block *)SAFE_MMAP(NULL, sizeof(*m) * THREADS,
+				      PROT_READ | PROT_WRITE,
+				      MAP_PRIVATE | MAP_ANONYMOUS,
+				      -1, 0);
+	seq_n = &((m + THREADS / 2)->seq_n);
+
+	pthread_create(threads+THREADS, NULL, cache_ruiner, NULL);
+	for (i = THREADS - 1; i >= 0; i--)
+		pthread_create(threads+i, NULL, worker_load_store, (void *)i);
+
+	for (i = 0; i < THREADS; i++) {
+		tst_res(TINFO, "Joining thread %li", i);
+		pthread_join(threads[i], NULL);
+	}
+	tst_atomic_store(-1, &atomic);
+	pthread_join(*(threads+THREADS), NULL);
+
+	tst_res(TINFO, "Expected\tFound");
+	for (i = 0; i < THREADS; i++) {
+		id = (m + i)->id;
+		if (id != i)
+			tst_res(TFAIL, "%d\t\t%d", (int)i, (int)id);
+		else
+			tst_res(TPASS, "%d\t\t%d", (int)i, (int)id);
+	}
+
+	SAFE_MUNMAP(m, sizeof(*m) * THREADS);
+}
+
+static struct tst_test test = {
+	.test_all = do_test,
+};
-- 
2.14.1


^ permalink raw reply related	[flat|nested] 10+ messages in thread

* [LTP] [PATCH v2 3/7] fzsync: Add long running thread support and deviation stats
  2017-08-28 11:02 [LTP] [PATCH v2 1/7] tst_atomic: Add load, store and use __atomic builtins Richard Palethorpe
  2017-08-28 11:02 ` [LTP] [PATCH v2 2/7] tst_atomic: Add atomic store and load tests Richard Palethorpe
@ 2017-08-28 11:02 ` Richard Palethorpe
  2017-08-28 11:02 ` [LTP] [PATCH v2 4/7] fzsync: Add functionality test for library Richard Palethorpe
                   ` (4 subsequent siblings)
  6 siblings, 0 replies; 10+ messages in thread
From: Richard Palethorpe @ 2017-08-28 11:02 UTC (permalink / raw)
  To: ltp

Introduce a new synchronisation method which causes two threads to stop and
wait for each other. Continuing only when both threads have signaled their
readiness. Previously we would start a new thread in each iteration which
creates a variable delay in either the parent or child.

Using two long running threads significantly reduces the time needed to
perform a single race and may also increase timing stability on some
architectures.

This does not replace the delay method of synchronisation which can be used in
addition to the new method.

Signed-off-by: Richard Palethorpe <rpalethorpe@suse.com>
---
 include/tst_fuzzy_sync.h | 167 +++++++++++++++++++++++++++++++++++++++--------
 1 file changed, 140 insertions(+), 27 deletions(-)

diff --git a/include/tst_fuzzy_sync.h b/include/tst_fuzzy_sync.h
index f97137c35..b4bba71cf 100644
--- a/include/tst_fuzzy_sync.h
+++ b/include/tst_fuzzy_sync.h
@@ -17,21 +17,28 @@
 /*
  * Fuzzy Synchronisation - abreviated to fzsync
  *
- * This library is intended to help reproduce race conditions while running in
- * a loop. You can use it to measure the time at which two functions are
- * called in different threads. Then calculate the average time gap between
- * the function calls and introduce a delay in one thread to synchronise the
- * calls.
+ * This library is intended to help reproduce race conditions by providing two
+ * thread synchronisation mechanisms. The first is a 'checkpoint' system where
+ * each thread will wait indefinitely for the other to enter a checkpoint
+ * before continuing. This is acheived by calling tst_fzsync_wait() and/or
+ * tst_fzsync_wait_update() at the points you want to synchronise in each
+ * thread.
  *
- * It is called 'fuzzy' synchronisation because the time gap will naturally vary
- * due to environmental factors. It is not a 'hard' synchronisation mechanism
- * like lockstepping.
+ * The second takes the form a of a delay which is calculated by measuring the
+ * time difference between two points in each thread and comparing it to the
+ * desired difference (default is zero). Using a delay allows you to
+ * synchronise the threads at a point outside of your direct control
+ * (e.g. inside the kernel) or just increase the accuracy for the first
+ * mechanism. It is acheived using tst_fzsync_delay_{a,b}(),
+ * tst_fzsync_time_{a,b}() and tst_fzsync[_wait_]update().
  *
- * For a usage example see testcases/cve/cve-2017-2671.c
+ * For a usage example see testcases/cve/cve-2016-7117.c or just run
+ * 'git grep tst_fuzzy_sync.h'
  */
 
 #include <sys/time.h>
 #include <time.h>
+#include <math.h>
 #include "tst_atomic.h"
 
 #ifndef CLOCK_MONOTONIC_RAW
@@ -40,30 +47,44 @@
 
 /**
  * struct tst_fzsync_pair - the state of a two way synchronisation
- * @avg_diff: The average time difference over multiple iterations
- * @avg_diff_trgt: The desired average time difference, defaults to 0
+ * @avg_diff: Internal; the average time difference over multiple iterations.
+ * @avg_diff_trgt: The desired average time difference, defaults to 0.
  * @avg_alpha: The rate at which old diff samples are forgotten,
- *             defaults to 0.25
- * @a: The time at which call site A was last passed
- * @b: The time at which call site B was last passed
- * @delay: The size of the delay, positive to delay A, negative to delay B
- * @delay_inc: The step size of a delay increment, defaults to 10
+ *             defaults to 0.25.
+ * @avg_dev: Internal; Absolute average deviation.
+ * @a: Internal; The time at which call site A was last passed.
+ * @b: Internal; The time at which call site B was last passed.
+ * @delay: Internal; The size of the delay, positive to delay A,
+ *         negative to delay B.
+ * @delay_inc: The step size of a delay increment, defaults to 1.
  * @update_gap: The number of iterations between recalculating the delay.
  *              Defaults to 0xF and must be of the form $2^n - 1$
+ * @info_gap: The number of iterations between printing some statistics.
+ *            Defaults to 0x7FFFF and must also be one less than a power of 2.
+ * @a_cntr: Internal; Atomic counter used by fzsync_pair_wait()
+ * @b_cntr: Internal; Atomic counter used by fzsync_pair_wait()
+ * @exit: Internal; Used by tst_fzsync_pair_exit() and fzsync_pair_wait()
  *
  * This contains all the necessary state for synchronising two points A and
  * B. Where A is the time of an event in one process and B is the time of an
  * event in another process.
+ *
+ * Internal fields should only be accessed by library functions.
  */
 struct tst_fzsync_pair {
 	double avg_diff;
 	double avg_diff_trgt;
 	double avg_alpha;
+	double avg_dev;
 	struct timespec a;
 	struct timespec b;
 	long delay;
 	long delay_inc;
 	int update_gap;
+	int info_gap;
+	int a_cntr;
+	int b_cntr;
+	int exit;
 };
 
 /**
@@ -71,14 +92,19 @@ struct tst_fzsync_pair {
  */
 #define TST_FZSYNC_PAIR_INIT {	\
 	.avg_alpha = 0.25,	\
-	.delay_inc = 10,	\
-	.update_gap = 0xF	\
+	.delay_inc = 1,	        \
+	.update_gap = 0xF,	\
+	.info_gap = 0x7FFFF     \
 }
 
+/**
+ * tst_fzsync_pair_info - Print some synchronisation statistics
+ */
 static void tst_fzsync_pair_info(struct tst_fzsync_pair *pair)
 {
-	tst_res(TINFO, "avg_diff = %.5gns, delay = %05ld loops",
-		pair->avg_diff, pair->delay);
+	tst_res(TINFO,
+		"avg_diff = %.0fns, avg_dev = %.0fns, delay = %05ld loops",
+		pair->avg_diff, pair->avg_dev, pair->delay);
 }
 
 /**
@@ -133,18 +159,15 @@ static inline void tst_fzsync_time_b(struct tst_fzsync_pair *pair)
 }
 
 /**
- * tst_exp_moving_avg - Exponential moving average
+ * TST_EXP_MOVING_AVG - Exponential moving average
  * @alpha: The preference for recent samples over old ones.
  * @sample: The current sample
  * @prev_avg: The average of the all the previous samples
  *
  * Returns average including the current sample.
  */
-static inline double tst_exp_moving_avg(double alpha, long sample,
-					double prev_avg)
-{
-	return alpha * sample + (1.0 - alpha) * prev_avg;
-}
+#define TST_EXP_MOVING_AVG(alpha, sample, prev_avg)\
+	(alpha * sample + (1.0 - alpha) * prev_avg)
 
 /**
  * tst_fzsync_pair_update - Recalculate the delay
@@ -169,8 +192,16 @@ static void tst_fzsync_pair_update(int loop_index, struct tst_fzsync_pair *pair)
 	double target = pair->avg_diff_trgt;
 	double avg = pair->avg_diff;
 
+	if (pair->a.tv_sec > pair->b.tv_sec)
+		pair->a.tv_nsec += 1000000000;
+	else if (pair->a.tv_sec < pair->b.tv_sec)
+		pair->b.tv_nsec += 1000000000;
+
 	diff = pair->a.tv_nsec - pair->b.tv_nsec;
-	avg = tst_exp_moving_avg(pair->avg_alpha, diff, avg);
+	avg = TST_EXP_MOVING_AVG(pair->avg_alpha, diff, avg);
+	pair->avg_dev = TST_EXP_MOVING_AVG(pair->avg_alpha,
+					   fabs(diff - avg),
+					   pair->avg_dev);
 
 	if (!(loop_index & pair->update_gap)) {
 		if (avg > target)
@@ -179,5 +210,87 @@ static void tst_fzsync_pair_update(int loop_index, struct tst_fzsync_pair *pair)
 			pair->delay += inc;
 	}
 
+	if (!(loop_index & pair->info_gap))
+		tst_fzsync_pair_info(pair);
+
 	pair->avg_diff = avg;
 }
+
+/**
+ * tst_fzsync_pair_wait - Wait for the other thread
+ * @our_cntr: The counter for the thread we are on
+ * @other_cntr: The counter for the thread we are synchronising with
+ *
+ * Use this (through tst_fzsync_pair_wait_a() and tst_fzsync_pair_wait_b()) if
+ * you need an additional synchronisation point in a thread or you do not want
+ * to use the delay facility (not recommended). See
+ * tst_fzsync_pair_wait_update().
+ *
+ * Returns a non-zero value if the thread should continue otherwise the
+ * calling thread should exit.
+ */
+static inline int tst_fzsync_pair_wait(struct tst_fzsync_pair *pair,
+				       int *our_cntr, int *other_cntr)
+{
+	tst_atomic_inc(other_cntr);
+	while (tst_atomic_load(our_cntr) < tst_atomic_load(other_cntr)
+	       && !tst_atomic_load(&pair->exit))
+		;
+
+	return !tst_atomic_load(&pair->exit);
+}
+
+static inline int tst_fzsync_wait_a(struct tst_fzsync_pair *pair)
+{
+	return tst_fzsync_pair_wait(pair, &pair->a_cntr, &pair->b_cntr);
+}
+
+static inline int tst_fzsync_wait_b(struct tst_fzsync_pair *pair)
+{
+	return tst_fzsync_pair_wait(pair, &pair->b_cntr, &pair->a_cntr);
+}
+
+/**
+ * tst_fzsync_pair_wait_update_{a,b} - Wait and then recalculate
+ *
+ * This allows you to have two long running threads which wait for each other
+ * every iteration. So each thread will exit this function at approximately
+ * the same time. It also updates the delay values in a thread safe manner.
+ *
+ * You must call this function in both threads the same number of times each
+ * iteration. So a call in one thread must match with a call in the
+ * other. Make sure that calls to tst_fzsync_pair_wait() and
+ * tst_fzsync_pair_wait_update() happen in the same order in each thread. That
+ * is, make sure that a call to tst_fzsync_pair_wait_update_a() in one thread
+ * corresponds to a call to tst_fzsync_pair_wait_update_b() in the other.
+ *
+ * Returns a non-zero value if the calling thread should continue to loop. If
+ * it returns zero then tst_fzsync_exit() has been called and you must exit
+ * the thread.
+ */
+static inline int tst_fzsync_wait_update_a(struct tst_fzsync_pair *pair)
+{
+	static int loop_index;
+
+	tst_fzsync_pair_wait(pair, &pair->a_cntr, &pair->b_cntr);
+	loop_index++;
+	tst_fzsync_pair_update(loop_index, pair);
+	return tst_fzsync_pair_wait(pair, &pair->a_cntr, &pair->b_cntr);
+}
+
+static inline int tst_fzsync_wait_update_b(struct tst_fzsync_pair *pair)
+{
+	tst_fzsync_pair_wait(pair, &pair->b_cntr, &pair->a_cntr);
+	return tst_fzsync_pair_wait(pair, &pair->b_cntr, &pair->a_cntr);
+}
+
+/**
+ * tst_fzsync_pair_exit - Signal that the other thread should exit
+ *
+ * Causes tst_fzsync_pair_wait() and tst_fzsync_pair_wait_update() to return
+ * 0.
+ */
+static inline void tst_fzsync_pair_exit(struct tst_fzsync_pair *pair)
+{
+	tst_atomic_store(1, &pair->exit);
+}
-- 
2.14.1


^ permalink raw reply related	[flat|nested] 10+ messages in thread

* [LTP] [PATCH v2 4/7] fzsync: Add functionality test for library
  2017-08-28 11:02 [LTP] [PATCH v2 1/7] tst_atomic: Add load, store and use __atomic builtins Richard Palethorpe
  2017-08-28 11:02 ` [LTP] [PATCH v2 2/7] tst_atomic: Add atomic store and load tests Richard Palethorpe
  2017-08-28 11:02 ` [LTP] [PATCH v2 3/7] fzsync: Add long running thread support and deviation stats Richard Palethorpe
@ 2017-08-28 11:02 ` Richard Palethorpe
  2017-08-28 11:02 ` [LTP] [PATCH v2 5/7] Convert cve-2016-7117 test to use long running threads Richard Palethorpe
                   ` (3 subsequent siblings)
  6 siblings, 0 replies; 10+ messages in thread
From: Richard Palethorpe @ 2017-08-28 11:02 UTC (permalink / raw)
  To: ltp

Signed-off-by: Richard Palethorpe <rpalethorpe@suse.com>
---
 lib/newlib_tests/.gitignore |   1 +
 lib/newlib_tests/Makefile   |   2 +
 lib/newlib_tests/test16.c   | 106 ++++++++++++++++++++++++++++++++++++++++++++
 3 files changed, 109 insertions(+)
 create mode 100644 lib/newlib_tests/test16.c

diff --git a/lib/newlib_tests/.gitignore b/lib/newlib_tests/.gitignore
index 7d47a6531..d47a6ea12 100644
--- a/lib/newlib_tests/.gitignore
+++ b/lib/newlib_tests/.gitignore
@@ -13,6 +13,7 @@ test12
 test13
 test14
 test15
+test16
 tst_device
 tst_safe_fileops
 tst_res_hexd
diff --git a/lib/newlib_tests/Makefile b/lib/newlib_tests/Makefile
index e86d8f23a..afa09373e 100644
--- a/lib/newlib_tests/Makefile
+++ b/lib/newlib_tests/Makefile
@@ -8,6 +8,8 @@ LDLIBS			+= -lltp
 test08: CFLAGS+=-pthread
 test09: CFLAGS+=-pthread
 test15: CFLAGS+=-pthread
+test16: CFLAGS+=-pthread
+test16: LDLIBS+=-lrt
 
 ifeq ($(ANDROID),1)
 FILTER_OUT_MAKE_TARGETS	+= test08
diff --git a/lib/newlib_tests/test16.c b/lib/newlib_tests/test16.c
new file mode 100644
index 000000000..f4cc74e46
--- /dev/null
+++ b/lib/newlib_tests/test16.c
@@ -0,0 +1,106 @@
+/*
+ * Copyright (c) 2017 Richard Palethorpe <rpalethorpe@suse.com>
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+/* Basic functionality test for tst_fuzzy_sync.h similar to the atomic tests
+ * (test15.c). One thread writes to the odd indexes of an array while the
+ * other writes to the even. If the threads are not synchronised then they
+ * will probably write to the wrong indexes as they share an index variable
+ * which they should take it in turns to update.
+ */
+
+#include <stdlib.h>
+#include "tst_test.h"
+#include "tst_safe_pthread.h"
+#include "tst_fuzzy_sync.h"
+
+#define LOOPS 0xFFFFFF
+
+static pthread_t thrd;
+static volatile char seq[LOOPS * 2 + 1];
+static struct tst_fzsync_pair pair = TST_FZSYNC_PAIR_INIT;
+static volatile int seq_n;
+
+static void *worker(void *v LTP_ATTRIBUTE_UNUSED)
+{
+	int i;
+
+	for (i = 0; tst_fzsync_wait_update_b(&pair) && i < LOOPS; i++) {
+		tst_fzsync_delay_b(&pair);
+		tst_fzsync_time_b(&pair);
+		if (!tst_fzsync_wait_b(&pair))
+			break;
+		seq[seq_n] = 'B';
+		seq_n = (i + 1) * 2;
+	}
+
+	if (i > LOOPS)
+		tst_res(TWARN, "Worker performed too many iterations: %d > %d",
+			i, LOOPS);
+
+	return NULL;
+}
+
+static void setup(void)
+{
+	SAFE_PTHREAD_CREATE(&thrd, NULL, worker, NULL);
+}
+
+static void run(void)
+{
+	int i, j, fail = 0;
+
+	for (i = 0; tst_fzsync_wait_update_a(&pair) && i < LOOPS; i++) {
+		tst_fzsync_delay_a(&pair);
+		seq[seq_n] = 'A';
+		seq_n = i * 2 + 1;
+		tst_fzsync_time_a(&pair);
+		if (!tst_fzsync_wait_a(&pair))
+			break;
+	}
+
+	for (i = 0; i < LOOPS; i++) {
+		j = i * 2;
+		if (seq[j] != 'A') {
+			tst_res(TFAIL, "Expected A, but found %c at %d",
+				seq[j], j);
+			fail = 1;
+		}
+		j = i * 2 + 1;
+		if (seq[j] != 'B') {
+			tst_res(TFAIL, "Expected A, but found %c@%d",
+				seq[j], j);
+			fail = 1;
+		}
+	}
+
+	if (!fail)
+		tst_res(TPASS, "Sequence is correct");
+
+	if (labs(pair.delay) > 1000)
+		tst_res(TFAIL, "Delay is suspiciously large");
+}
+
+static void cleanup(void)
+{
+	tst_fzsync_pair_exit(&pair);
+	SAFE_PTHREAD_JOIN(thrd, NULL);
+}
+
+static struct tst_test test = {
+	.setup = setup,
+	.cleanup = cleanup,
+	.test_all = run,
+};
-- 
2.14.1


^ permalink raw reply related	[flat|nested] 10+ messages in thread

* [LTP] [PATCH v2 5/7] Convert cve-2016-7117 test to use long running threads
  2017-08-28 11:02 [LTP] [PATCH v2 1/7] tst_atomic: Add load, store and use __atomic builtins Richard Palethorpe
                   ` (2 preceding siblings ...)
  2017-08-28 11:02 ` [LTP] [PATCH v2 4/7] fzsync: Add functionality test for library Richard Palethorpe
@ 2017-08-28 11:02 ` Richard Palethorpe
  2017-08-28 11:02 ` [LTP] [PATCH v2 6/7] Convert cve-2014-0196 " Richard Palethorpe
                   ` (2 subsequent siblings)
  6 siblings, 0 replies; 10+ messages in thread
From: Richard Palethorpe @ 2017-08-28 11:02 UTC (permalink / raw)
  To: ltp

Signed-off-by: Richard Palethorpe <rpalethorpe@suse.com>
---
 testcases/cve/cve-2016-7117.c | 47 +++++++++++++++++++++++++------------------
 1 file changed, 27 insertions(+), 20 deletions(-)

diff --git a/testcases/cve/cve-2016-7117.c b/testcases/cve/cve-2016-7117.c
index deb667761..3cb7efcdf 100644
--- a/testcases/cve/cve-2016-7117.c
+++ b/testcases/cve/cve-2016-7117.c
@@ -71,7 +71,7 @@ struct mmsghdr {
 };
 #endif
 
-static int socket_fds[2];
+static volatile int socket_fds[2];
 static struct mmsghdr msghdrs[2] = {
 	{
 		.msg_hdr = {
@@ -94,40 +94,53 @@ static struct mmsghdr msghdrs[2] = {
 static char rbuf[sizeof(MSG)];
 static struct timespec timeout = { .tv_sec = RECV_TIMEOUT };
 static struct tst_fzsync_pair fzsync_pair = TST_FZSYNC_PAIR_INIT;
+static pthread_t pt_send;
+static void *send_and_close(void *);
+
+static void setup(void)
+{
+	SAFE_PTHREAD_CREATE(&pt_send, 0, send_and_close, 0);
+}
 
 static void cleanup(void)
 {
 	close(socket_fds[0]);
 	close(socket_fds[1]);
+
+	if (pt_send) {
+		tst_fzsync_pair_exit(&fzsync_pair);
+		SAFE_PTHREAD_JOIN(pt_send, 0);
+	}
 }
 
 static void *send_and_close(void *arg)
 {
-	send(socket_fds[0], MSG, sizeof(MSG), 0);
-	send(socket_fds[0], MSG, sizeof(MSG), 0);
-
-	tst_fzsync_delay_b(&fzsync_pair);
+	while (tst_fzsync_wait_update_b(&fzsync_pair)) {
+		send(socket_fds[0], MSG, sizeof(MSG), 0);
+		send(socket_fds[0], MSG, sizeof(MSG), 0);
 
-	close(socket_fds[0]);
-	close(socket_fds[1]);
-	tst_fzsync_time_b(&fzsync_pair);
+		tst_fzsync_delay_b(&fzsync_pair);
 
+		close(socket_fds[0]);
+		close(socket_fds[1]);
+		tst_fzsync_time_b(&fzsync_pair);
+		if (!tst_fzsync_wait_b(&fzsync_pair))
+			break;
+	}
 	return arg;
 }
 
 static void run(void)
 {
-	pthread_t pt_send;
 	int i, stat, too_early_count = 0;
 
 	msghdrs[0].msg_hdr.msg_iov->iov_base = (void *)&rbuf;
 
 	for (i = 1; i < ATTEMPTS; i++) {
-		if (socketpair(AF_LOCAL, SOCK_DGRAM, 0, socket_fds))
+		if (socketpair(AF_LOCAL, SOCK_DGRAM, 0, (int *)socket_fds))
 			tst_brk(TBROK | TERRNO, "Socket creation failed");
 
-		SAFE_PTHREAD_CREATE(&pt_send, 0, send_and_close, 0);
-
+		tst_fzsync_wait_update_a(&fzsync_pair);
 		tst_fzsync_delay_a(&fzsync_pair);
 
 		stat = tst_syscall(__NR_recvmmsg,
@@ -140,20 +153,14 @@ static void run(void)
 		else if (stat < 0)
 			tst_res(TWARN | TERRNO, "recvmmsg failed unexpectedly");
 
-		SAFE_PTHREAD_JOIN(pt_send, 0);
-
-		tst_fzsync_pair_update(i, &fzsync_pair);
-		if (!(i & 0x7FFFF)) {
-			tst_res(TINFO, "Too early: %.1f%%",
-				100 * too_early_count / (float)i);
-			tst_fzsync_pair_info(&fzsync_pair);
-		}
+		tst_fzsync_wait_a(&fzsync_pair);
 	}
 
 	tst_res(TPASS, "Nothing happened after %d attempts", ATTEMPTS);
 }
 
 static struct tst_test test = {
+	.setup = setup,
 	.test_all = run,
 	.cleanup = cleanup,
 	.min_kver = "2.6.33",
-- 
2.14.1


^ permalink raw reply related	[flat|nested] 10+ messages in thread

* [LTP] [PATCH v2 6/7] Convert cve-2014-0196 to use long running threads
  2017-08-28 11:02 [LTP] [PATCH v2 1/7] tst_atomic: Add load, store and use __atomic builtins Richard Palethorpe
                   ` (3 preceding siblings ...)
  2017-08-28 11:02 ` [LTP] [PATCH v2 5/7] Convert cve-2016-7117 test to use long running threads Richard Palethorpe
@ 2017-08-28 11:02 ` Richard Palethorpe
  2017-08-28 11:02 ` [LTP] [PATCH v2 7/7] Convert cve-2017-2671 " Richard Palethorpe
  2017-08-29 14:58 ` [LTP] [PATCH v2 1/7] tst_atomic: Add load, store and use __atomic builtins Jan Stancek
  6 siblings, 0 replies; 10+ messages in thread
From: Richard Palethorpe @ 2017-08-28 11:02 UTC (permalink / raw)
  To: ltp

Signed-off-by: Richard Palethorpe <rpalethorpe@suse.com>
---
 testcases/cve/cve-2014-0196.c | 50 ++++++++++++++++++++++++-------------------
 1 file changed, 28 insertions(+), 22 deletions(-)

diff --git a/testcases/cve/cve-2014-0196.c b/testcases/cve/cve-2014-0196.c
index 4e2b3f582..68302295e 100644
--- a/testcases/cve/cve-2014-0196.c
+++ b/testcases/cve/cve-2014-0196.c
@@ -51,11 +51,13 @@
 #define ATTEMPTS      0x7000
 #define BUFLEN        512
 
-static int master_fd, slave_fd;
+static volatile int master_fd, slave_fd;
 static int filler_ptys[ONEOFF_ALLOCS * 2];
 static int target_ptys[RUN_ALLOCS * 2];
 static char buf[BUFLEN];
 
+static pthread_t overwrite_thread;
+static void *overwrite_thread_fn(void *);
 static struct tst_fzsync_pair fzsync_pair = TST_FZSYNC_PAIR_INIT;
 
 static void create_pty(int *amaster, int *aslave)
@@ -68,35 +70,40 @@ static void setup(void)
 {
 	int i;
 
-	fzsync_pair.delay_inc = 100;
 	for (i = 0; i < ONEOFF_ALLOCS; i++) {
 		create_pty(&filler_ptys[i],
 			   &filler_ptys[i + ONEOFF_ALLOCS]);
 	}
+
+	fzsync_pair.info_gap = 0xFFF;
+	SAFE_PTHREAD_CREATE(&overwrite_thread, NULL,
+			    overwrite_thread_fn, NULL);
 }
 
-static void *overwrite_thread_fn(void *p)
+static void *overwrite_thread_fn(void *p LTP_ATTRIBUTE_UNUSED)
 {
-	tst_fzsync_delay_b(&fzsync_pair);
-	tst_fzsync_time_b(&fzsync_pair);
-
-	SAFE_WRITE(0, slave_fd, buf, BUFLEN - 1);
-	SAFE_WRITE(0, slave_fd, buf, BUFLEN - 1);
-	SAFE_WRITE(0, slave_fd, buf, BUFLEN);
-
-	return p;
+	while(tst_fzsync_wait_update_b(&fzsync_pair)) {
+		tst_fzsync_delay_b(&fzsync_pair);
+		tst_fzsync_time_b(&fzsync_pair);
+
+		SAFE_WRITE(0, slave_fd, buf, BUFLEN - 1);
+		SAFE_WRITE(0, slave_fd, buf, BUFLEN - 1);
+		SAFE_WRITE(0, slave_fd, buf, BUFLEN);
+		if (!tst_fzsync_wait_b(&fzsync_pair))
+			break;
+	}
+	return 0;
 }
 
 static void run(void)
 {
 	struct termios t;
-	pthread_t overwrite_thread;
 	int i, j;
 
 	tst_res(TINFO, "Attempting to overflow into a tty_struct...");
 
 	for (i = 0; i < ATTEMPTS; i++) {
-		create_pty(&master_fd, &slave_fd);
+		create_pty((int *)&master_fd, (int *)&slave_fd);
 
 		for (j = 0; j < RUN_ALLOCS; j++)
 			create_pty(&target_ptys[j],
@@ -111,20 +118,12 @@ static void run(void)
 		t.c_lflag |= ECHO;
 		tcsetattr(master_fd, TCSANOW, &t);
 
-		SAFE_PTHREAD_CREATE(&overwrite_thread, NULL,
-				    overwrite_thread_fn, NULL);
+		tst_fzsync_wait_update_a(&fzsync_pair);
 
 		tst_fzsync_delay_a(&fzsync_pair);
 		tst_fzsync_time_a(&fzsync_pair);
 		SAFE_WRITE(0, master_fd, "A", 1);
 
-		SAFE_PTHREAD_JOIN(overwrite_thread, NULL);
-
-		tst_fzsync_pair_update(i, &fzsync_pair);
-
-		if (!(i & 0x1FFF))
-			tst_fzsync_pair_info(&fzsync_pair);
-
 		for (j = 0; j < RUN_ALLOCS; j++) {
 			if (j == RUN_ALLOCS / 2)
 				continue;
@@ -139,6 +138,8 @@ static void run(void)
 		ioctl(slave_fd, 0xdeadbeef);
 		SAFE_CLOSE(master_fd);
 		SAFE_CLOSE(slave_fd);
+
+		tst_fzsync_wait_a(&fzsync_pair);
 	}
 
 	tst_res(TPASS, "Nothing bad happened, probably.");
@@ -148,6 +149,11 @@ static void cleanup(void)
 {
 	int i;
 
+	if (overwrite_thread) {
+		tst_fzsync_pair_exit(&fzsync_pair);
+		SAFE_PTHREAD_JOIN(overwrite_thread, NULL);
+	}
+
 	for (i = 0; i < ONEOFF_ALLOCS * 2; i++)
 		close(filler_ptys[i]);
 	close(master_fd);
-- 
2.14.1


^ permalink raw reply related	[flat|nested] 10+ messages in thread

* [LTP] [PATCH v2 7/7] Convert cve-2017-2671 to use long running threads
  2017-08-28 11:02 [LTP] [PATCH v2 1/7] tst_atomic: Add load, store and use __atomic builtins Richard Palethorpe
                   ` (4 preceding siblings ...)
  2017-08-28 11:02 ` [LTP] [PATCH v2 6/7] Convert cve-2014-0196 " Richard Palethorpe
@ 2017-08-28 11:02 ` Richard Palethorpe
  2017-08-29 14:58 ` [LTP] [PATCH v2 1/7] tst_atomic: Add load, store and use __atomic builtins Jan Stancek
  6 siblings, 0 replies; 10+ messages in thread
From: Richard Palethorpe @ 2017-08-28 11:02 UTC (permalink / raw)
  To: ltp

Signed-off-by: Richard Palethorpe <rpalethorpe@suse.com>
---
 testcases/cve/cve-2017-2671.c | 30 +++++++++++++++++++-----------
 1 file changed, 19 insertions(+), 11 deletions(-)

diff --git a/testcases/cve/cve-2017-2671.c b/testcases/cve/cve-2017-2671.c
index 77744db42..b0471bfff 100644
--- a/testcases/cve/cve-2017-2671.c
+++ b/testcases/cve/cve-2017-2671.c
@@ -49,13 +49,15 @@
 
 #include "tst_fuzzy_sync.h"
 
-#define ATTEMPTS 0xFFFF
+#define ATTEMPTS 0x80000
 #define PING_SYSCTL_PATH "/proc/sys/net/ipv4/ping_group_range"
 
 static int sockfd;
 static unsigned int ping_min_grp, ping_max_grp;
 static struct tst_fzsync_pair fzsync_pair = TST_FZSYNC_PAIR_INIT;
 static struct sockaddr_in iaddr, uaddr;
+static pthread_t thrd;
+static void *connect_b(void *);
 
 static void setup(void)
 {
@@ -73,13 +75,20 @@ static void setup(void)
 	SAFE_FILE_PRINTF(PING_SYSCTL_PATH, "0 0");
 
 	sockfd = SAFE_SOCKET(AF_INET, SOCK_DGRAM, IPPROTO_ICMP);
+	SAFE_PTHREAD_CREATE(&thrd, 0, connect_b, 0);
 	tst_res(TINFO, "Created ping socket, attempting to race...");
 }
 
 static void cleanup(void)
 {
+	if (thrd) {
+		tst_fzsync_pair_exit(&fzsync_pair);
+		SAFE_PTHREAD_JOIN(thrd, NULL);
+	}
+
 	if (sockfd > 0)
 		SAFE_CLOSE(sockfd);
+
 	if (ping_min_grp | ping_max_grp)
 		SAFE_FILE_PRINTF(PING_SYSCTL_PATH, "%u %u",
 				 ping_min_grp, ping_max_grp);
@@ -87,32 +96,31 @@ static void cleanup(void)
 
 static void *connect_b(void * param LTP_ATTRIBUTE_UNUSED)
 {
-	tst_fzsync_delay_b(&fzsync_pair);
-	connect(sockfd, (struct sockaddr *)&uaddr, sizeof(uaddr));
-	tst_fzsync_time_b(&fzsync_pair);
+	while(tst_fzsync_wait_update_b(&fzsync_pair)) {
+		tst_fzsync_delay_b(&fzsync_pair);
+		connect(sockfd, (struct sockaddr *)&uaddr, sizeof(uaddr));
+		tst_fzsync_time_b(&fzsync_pair);
+		if (!tst_fzsync_wait_b(&fzsync_pair))
+			break;
+	}
 
 	return 0;
 }
 
 static void run(void)
 {
-	pthread_t thrd;
 	int i;
 
 	for (i = 0; i < ATTEMPTS; i++) {
 		SAFE_CONNECT(sockfd,
 			     (struct sockaddr *)&iaddr, sizeof(iaddr));
-		SAFE_PTHREAD_CREATE(&thrd, 0, connect_b, 0);
 
+		tst_fzsync_wait_update_a(&fzsync_pair);
 		tst_fzsync_delay_a(&fzsync_pair);
 		connect(sockfd, (struct sockaddr *)&uaddr, sizeof(uaddr));
 		tst_fzsync_time_a(&fzsync_pair);
 
-		SAFE_PTHREAD_JOIN(thrd, 0);
-		tst_fzsync_pair_update(i, &fzsync_pair);
-
-		if (!(i & 0x7FFF))
-			tst_fzsync_pair_info(&fzsync_pair);
+		tst_fzsync_wait_a(&fzsync_pair);
 	}
 
 	tst_res(TPASS, "We didn't crash");
-- 
2.14.1


^ permalink raw reply related	[flat|nested] 10+ messages in thread

* [LTP] [PATCH v2 1/7] tst_atomic: Add load, store and use __atomic builtins
  2017-08-28 11:02 [LTP] [PATCH v2 1/7] tst_atomic: Add load, store and use __atomic builtins Richard Palethorpe
                   ` (5 preceding siblings ...)
  2017-08-28 11:02 ` [LTP] [PATCH v2 7/7] Convert cve-2017-2671 " Richard Palethorpe
@ 2017-08-29 14:58 ` Jan Stancek
  2017-08-30 12:27   ` Richard Palethorpe
  6 siblings, 1 reply; 10+ messages in thread
From: Jan Stancek @ 2017-08-29 14:58 UTC (permalink / raw)
  To: ltp

On 08/28/2017 01:02 PM, Richard Palethorpe wrote:
> Also add more fallback inline assembly for the existing architectures and add
> SPARC64. Use the __atomic_* compiler intrinsics when available.
> 
> Signed-off-by: Richard Palethorpe <rpalethorpe@suse.com>

Hi,

I gave this patch a go on number of old/new distros, x86_64, ppc64le
and s390 and I haven't ran into issues. My review is mostly only
comparison against kernel sources as much of inline assembly is beyond me.

I think we could use a comment about what sort of ordering do we expect
from tst_atomic.h. I always assumed we care only for compiler barriers,
and when we use __sync_* functions, it's more for convenience (not
because we wanted full memory barrier).

I don't quite understand tst_atomic_load/store() for aarch64, because
when I look at kernel's arch/arm64/include/asm/atomic.h
these are a lot simpler:
#define atomic_read(v)                  READ_ONCE((v)->counter)
#define atomic_set(v, i)                WRITE_ONCE(((v)->counter), (i))

The rest of fallback functions looks good to me (at least for architectures
I checked). I expect fallback is used only by very small fringe of users
and their numbers will only go down in future.

Regards,
Jan


^ permalink raw reply	[flat|nested] 10+ messages in thread

* [LTP] [PATCH v2 1/7] tst_atomic: Add load, store and use __atomic builtins
  2017-08-29 14:58 ` [LTP] [PATCH v2 1/7] tst_atomic: Add load, store and use __atomic builtins Jan Stancek
@ 2017-08-30 12:27   ` Richard Palethorpe
  2017-08-30 13:39     ` Cyril Hrubis
  0 siblings, 1 reply; 10+ messages in thread
From: Richard Palethorpe @ 2017-08-30 12:27 UTC (permalink / raw)
  To: ltp

Hi Jan,

As always, thanks for the review.

Jan Stancek writes:

> On 08/28/2017 01:02 PM, Richard Palethorpe wrote:
>> Also add more fallback inline assembly for the existing architectures and add
>> SPARC64. Use the __atomic_* compiler intrinsics when available.
>>
>> Signed-off-by: Richard Palethorpe <rpalethorpe@suse.com>
>
> Hi,
>
> I gave this patch a go on number of old/new distros, x86_64, ppc64le
> and s390 and I haven't ran into issues. My review is mostly only
> comparison against kernel sources as much of inline assembly is beyond me.
>
> I think we could use a comment about what sort of ordering do we expect
> from tst_atomic.h. I always assumed we care only for compiler barriers,
> and when we use __sync_* functions, it's more for convenience (not
> because we wanted full memory barrier).

I think that is a good idea, I will write some justification and
explanation in the header.

In my particular use case (tst_fuzzy_sync), I need atleast an
"acquire-release" memory barrier to ensure that some non-atomic updates
to shared data between threads are globally visible after a given
point. I could probably remove them and it would work on most machines
most of the time. However if a bug does occure due to stale data, I
think it would be extremely difficult for someone to diagnose.

>
> I don't quite understand tst_atomic_load/store() for aarch64, because
> when I look at kernel's arch/arm64/include/asm/atomic.h
> these are a lot simpler:
> #define atomic_read(v)                  READ_ONCE((v)->counter)
> #define atomic_set(v, i)                WRITE_ONCE(((v)->counter),
> (i))

I think these are _just_ atomic load and store. There are no common load
or store functions in the kernel which are also a memory
barriers (probably). However there are cmpxchg and add-load atomics which are also
memory barriers (they have acquire or release semantics).

I adapted the ARM load/store ASM from the cmpxchg code and I am pretty
sure they are overkill. However ARM is the only architecture where I
managed to cause an error without memory barriers, but with these
functions no error occurs.

>
> The rest of fallback functions looks good to me (at least for architectures
> I checked). I expect fallback is used only by very small fringe of users
> and their numbers will only go down in future.
>
> Regards,
> Jan

Yes I think so, GCC has had the sync intrinsics for a long time and
Clang. If anyone is using a compiler without these intrinsics it would
be interesting to know about it.

--
Thank you,
Richard.

^ permalink raw reply	[flat|nested] 10+ messages in thread

* [LTP] [PATCH v2 1/7] tst_atomic: Add load, store and use __atomic builtins
  2017-08-30 12:27   ` Richard Palethorpe
@ 2017-08-30 13:39     ` Cyril Hrubis
  0 siblings, 0 replies; 10+ messages in thread
From: Cyril Hrubis @ 2017-08-30 13:39 UTC (permalink / raw)
  To: ltp

Hi!
> > I gave this patch a go on number of old/new distros, x86_64, ppc64le
> > and s390 and I haven't ran into issues. My review is mostly only
> > comparison against kernel sources as much of inline assembly is beyond me.
> >
> > I think we could use a comment about what sort of ordering do we expect
> > from tst_atomic.h. I always assumed we care only for compiler barriers,
> > and when we use __sync_* functions, it's more for convenience (not
> > because we wanted full memory barrier).
> 
> I think that is a good idea, I will write some justification and
> explanation in the header.
> 
> In my particular use case (tst_fuzzy_sync), I need atleast an
> "acquire-release" memory barrier to ensure that some non-atomic updates
> to shared data between threads are globally visible after a given
> point. I could probably remove them and it would work on most machines
> most of the time. However if a bug does occure due to stale data, I
> think it would be extremely difficult for someone to diagnose.

When I added the atomic add functions I only cared about incrementing
the number of PASS/FAIL/ETC counters in the piece of shared memory used
for test results. So all I cared about was that the value is fetched,
incremented and written atomically.

I guess that we need more than compiler barriers then, since competing
threads may race for the increment we need the value to be coherent
across different CPU caches as well.

> > The rest of fallback functions looks good to me (at least for architectures
> > I checked). I expect fallback is used only by very small fringe of users
> > and their numbers will only go down in future.
> >
> > Regards,
> > Jan
> 
> Yes I think so, GCC has had the sync intrinsics for a long time and
> Clang. If anyone is using a compiler without these intrinsics it would
> be interesting to know about it.

Just be vary that some less common architectures got the support much
later than x86.

-- 
Cyril Hrubis
chrubis@suse.cz

^ permalink raw reply	[flat|nested] 10+ messages in thread

end of thread, other threads:[~2017-08-30 13:39 UTC | newest]

Thread overview: 10+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2017-08-28 11:02 [LTP] [PATCH v2 1/7] tst_atomic: Add load, store and use __atomic builtins Richard Palethorpe
2017-08-28 11:02 ` [LTP] [PATCH v2 2/7] tst_atomic: Add atomic store and load tests Richard Palethorpe
2017-08-28 11:02 ` [LTP] [PATCH v2 3/7] fzsync: Add long running thread support and deviation stats Richard Palethorpe
2017-08-28 11:02 ` [LTP] [PATCH v2 4/7] fzsync: Add functionality test for library Richard Palethorpe
2017-08-28 11:02 ` [LTP] [PATCH v2 5/7] Convert cve-2016-7117 test to use long running threads Richard Palethorpe
2017-08-28 11:02 ` [LTP] [PATCH v2 6/7] Convert cve-2014-0196 " Richard Palethorpe
2017-08-28 11:02 ` [LTP] [PATCH v2 7/7] Convert cve-2017-2671 " Richard Palethorpe
2017-08-29 14:58 ` [LTP] [PATCH v2 1/7] tst_atomic: Add load, store and use __atomic builtins Jan Stancek
2017-08-30 12:27   ` Richard Palethorpe
2017-08-30 13:39     ` Cyril Hrubis

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.