From mboxrd@z Thu Jan 1 00:00:00 1970 Received: from eggs.gnu.org ([2001:4830:134:3::10]:33416) by lists.gnu.org with esmtp (Exim 4.71) (envelope-from ) id 1aZIFO-0008OQ-TK for qemu-devel@nongnu.org; Fri, 26 Feb 2016 08:15:59 -0500 Received: from Debian-exim by eggs.gnu.org with spam-scanned (Exim 4.71) (envelope-from ) id 1aZIFN-0004Fy-8L for qemu-devel@nongnu.org; Fri, 26 Feb 2016 08:15:54 -0500 Received: from mail-wm0-x231.google.com ([2a00:1450:400c:c09::231]:36250) by eggs.gnu.org with esmtp (Exim 4.71) (envelope-from ) id 1aZIFM-0004Fu-Ua for qemu-devel@nongnu.org; Fri, 26 Feb 2016 08:15:53 -0500 Received: by mail-wm0-x231.google.com with SMTP id g62so72422241wme.1 for ; Fri, 26 Feb 2016 05:15:52 -0800 (PST) From: =?UTF-8?q?Alex=20Benn=C3=A9e?= Date: Fri, 26 Feb 2016 13:15:32 +0000 Message-Id: <1456492533-17171-11-git-send-email-alex.bennee@linaro.org> In-Reply-To: <1456492533-17171-1-git-send-email-alex.bennee@linaro.org> References: <1456492533-17171-1-git-send-email-alex.bennee@linaro.org> MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Subject: [Qemu-devel] [RFC 10/11] arm/barrier-litmus-tests: add some litmus tests List-Id: List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , To: mttcg@listserver.greensocs.com, mark.burton@greensocs.com, fred.konrad@greensocs.com, a.rigo@virtualopensystems.com Cc: peter.maydell@linaro.org, drjones@redhat.com, a.spyridakis@virtualopensystems.com, claudio.fontana@huawei.com, qemu-devel@nongnu.org, will.deacon@arm.com, crosthwaitepeter@gmail.com, pbonzini@redhat.com, =?UTF-8?q?Alex=20Benn=C3=A9e?= , aurelien@aurel32.net, rth@twiddle.net This adds a framework for adding simple barrier litmus tests against ARM. The litmus tests aren't as comprehensive as the academic exercises which will attempt to do all sorts of things to keep racing CPUs synced up. These tests do honour the "sync" parameter to do a poor-mans equivilent. I've imported a few more of the barrier primatives from the Linux source tree so we consistently use macros. Signed-off-by: Alex Bennée --- - add a unittest.cfg --- arm/barrier-litmus-test.c | 258 +++++++++++++++++++++++++++++++++++++++++++ arm/unittests.cfg | 21 ++++ config/config-arm-common.mak | 2 + lib/arm/asm/barrier.h | 63 ++++++++++- lib/arm64/asm/barrier.h | 50 +++++++++ 5 files changed, 393 insertions(+), 1 deletion(-) create mode 100644 arm/barrier-litmus-test.c diff --git a/arm/barrier-litmus-test.c b/arm/barrier-litmus-test.c new file mode 100644 index 0000000..45cdcdc --- /dev/null +++ b/arm/barrier-litmus-test.c @@ -0,0 +1,258 @@ +/* + * ARM Barrier Litmus Tests + * + * This test provides a framework for testing barrier conditions on + * the processor. It's simpler than the more involved barrier testing + * frameworks as we are looking for simple failures of QEMU's TCG not + * weird edge cases the silicon gets wrong. + */ + +#include +#include +#include +#include +#include + +#define MAX_CPUS 8 + +/* Array size and access controls */ +static int array_size = 100000; +static int wait_if_ahead = 0; + +/* + * These test_array_* structures are a contiguous array modified by two or more + * competing CPUs. The padding is to ensure the variables do not share + * cache lines. + * + * All structures start zeroed. + */ + +typedef struct test_array +{ + volatile unsigned int x; + uint8_t dummy[64]; + volatile unsigned int y; + uint8_t dummy2[64]; + volatile int z; +} test_array; + +volatile test_array *array; + +/* Test definition structure + * + * The first function will always run on the primary CPU, it is + * usually the one that will detect any weirdness and trigger the + * failure of the test. + */ + +typedef void (*test_fn)(void); + +typedef struct { + const char *test_name; + bool should_pass; + test_fn main_fn; + test_fn secondary_fns[MAX_CPUS-1]; +} test_descr_t; + +/* Litmus tests */ + +/* Simple Message Passing + * + * x is the message data + * y is the flag to indicate the data is ready + * + * Reading x == 0 when y == 1 is a failure. + */ + +void message_passing_write(void) +{ + int i; + for (i=0; i< array_size; i++) { + volatile test_array *entry = &array[i]; + entry->x = 1; + entry->y = 1; + } + + halt(); +} + +void message_passing_read(void) +{ + int i; + int errors = 0, ready = 0; + + for (i=0; i< array_size; i++) { + volatile test_array *entry = &array[i]; + unsigned int x,y; + y = entry->y; + x = entry->x; + + if (y && !x) + errors++; + ready += y; + } + + report_xfail("mp: %d errors, %d ready", true, errors == 0, errors, ready); +} + +/* Simple Message Passing with barriers */ +void message_passing_write_barrier(void) +{ + int i; + for (i=0; i< array_size; i++) { + volatile test_array *entry = &array[i]; + entry->x = 1; + smp_wmb(); + entry->y = 1; + } + + halt(); +} + +void message_passing_read_barrier(void) +{ + int i; + int errors = 0, ready = 0, not_ready = 0; + + for (i=0; i< array_size; i++) { + volatile test_array *entry = &array[i]; + unsigned int x, y; + y = entry->y; + smp_rmb(); + x = entry->x; + + if (y && !x) + errors++; + + if (y) { + ready++; + } else { + not_ready++; + + if (not_ready > 2) { + entry = &array[i+1]; + do { + not_ready = 0; + } while (wait_if_ahead && !entry->y); + } + } + } + + report("mp barrier: %d errors, %d ready", errors == 0, errors, ready); +} + +/* Simple Message Passing with Acquire/Release */ +void message_passing_write_release(void) +{ + int i; + for (i=0; i< array_size; i++) { + volatile test_array *entry = &array[i]; + entry->x = 1; + smp_store_release(&entry->y, 1); + } + + halt(); +} + +void message_passing_read_acquire(void) +{ + int i; + int errors = 0, ready = 0, not_ready = 0; + + for (i=0; i< array_size; i++) { + volatile test_array *entry = &array[i]; + unsigned int x, y; + y = smp_load_acquire(&entry->y); + x = entry->x; + + if (y && !x) + errors++; + + if (y) { + ready++; + } else { + not_ready++; + + if (not_ready > 2) { + entry = &array[i+1]; + do { + not_ready = 0; + } while (wait_if_ahead && !entry->y); + } + } + } + + report("mp acqrel: %d errors, %d ready", errors == 0, errors, ready); +} + + +/* Test array */ +static test_descr_t tests[] = { + + { "mp", false, + message_passing_read, + { message_passing_write } + }, + + { "mp_barrier", true, + message_passing_read_barrier, + { message_passing_write_barrier } + }, + + { "mp_acqrel", true, + message_passing_read_acquire, + { message_passing_write_release } + } +}; + + +void setup_and_run_litmus(test_descr_t *test) +{ + array = calloc(array_size, sizeof(test_array)); + + if (array) { + int i = 0; + printf("Allocated test array @ %p\n", array); + + while (test->secondary_fns[i]) { + smp_boot_secondary(i+1, test->secondary_fns[i]); + i++; + } + + test->main_fn(); + } else { + report("%s: failed to allocate memory",false, test->test_name); + } +} + +int main(int argc, char **argv) +{ + int i; + unsigned int j; + test_descr_t *test = NULL; + + for (i=0; i + #define sev() asm volatile("sev" : : : "memory") #define wfe() asm volatile("wfe" : : : "memory") #define wfi() asm volatile("wfi" : : : "memory") @@ -20,4 +22,63 @@ #define smp_rmb() smp_mb() #define smp_wmb() dmb(ishst) +extern void abort(void); + +static inline void __write_once_size(volatile void *p, void *res, int size) +{ + switch (size) { + case 1: *(volatile uint8_t *)p = *(uint8_t *)res; break; + case 2: *(volatile uint16_t *)p = *(uint16_t *)res; break; + case 4: *(volatile uint32_t *)p = *(uint32_t *)res; break; + case 8: *(volatile uint64_t *)p = *(uint64_t *)res; break; + default: + /* unhandled case */ + abort(); + } +} + +#define WRITE_ONCE(x, val) \ +({ \ + union { typeof(x) __val; char __c[1]; } __u = \ + { .__val = (typeof(x)) (val) }; \ + __write_once_size(&(x), __u.__c, sizeof(x)); \ + __u.__val; \ +}) + +#define smp_store_release(p, v) \ +do { \ + smp_mb(); \ + WRITE_ONCE(*p, v); \ +} while (0) + + +static inline +void __read_once_size(const volatile void *p, void *res, int size) +{ + switch (size) { + case 1: *(uint8_t *)res = *(volatile uint8_t *)p; break; + case 2: *(uint16_t *)res = *(volatile uint16_t *)p; break; + case 4: *(uint32_t *)res = *(volatile uint32_t *)p; break; + case 8: *(uint64_t *)res = *(volatile uint64_t *)p; break; + default: + /* unhandled case */ + abort(); + } +} + +#define READ_ONCE(x) \ +({ \ + union { typeof(x) __val; char __c[1]; } __u; \ + __read_once_size(&(x), __u.__c, sizeof(x)); \ + __u.__val; \ +}) + + +#define smp_load_acquire(p) \ +({ \ + typeof(*p) ___p1 = READ_ONCE(*p); \ + smp_mb(); \ + ___p1; \ +}) + #endif /* _ASMARM_BARRIER_H_ */ diff --git a/lib/arm64/asm/barrier.h b/lib/arm64/asm/barrier.h index dbdac9d..aafabdc 100644 --- a/lib/arm64/asm/barrier.h +++ b/lib/arm64/asm/barrier.h @@ -19,4 +19,54 @@ #define smp_rmb() dmb(ishld) #define smp_wmb() dmb(ishst) +#define smp_store_release(p, v) \ +do { \ + switch (sizeof(*p)) { \ + case 1: \ + asm volatile ("stlrb %w1, %0" \ + : "=Q" (*p) : "r" (v) : "memory"); \ + break; \ + case 2: \ + asm volatile ("stlrh %w1, %0" \ + : "=Q" (*p) : "r" (v) : "memory"); \ + break; \ + case 4: \ + asm volatile ("stlr %w1, %0" \ + : "=Q" (*p) : "r" (v) : "memory"); \ + break; \ + case 8: \ + asm volatile ("stlr %1, %0" \ + : "=Q" (*p) : "r" (v) : "memory"); \ + break; \ + } \ +} while (0) + +#define smp_load_acquire(p) \ +({ \ + union { typeof(*p) __val; char __c[1]; } __u; \ + switch (sizeof(*p)) { \ + case 1: \ + asm volatile ("ldarb %w0, %1" \ + : "=r" (*(u8 *)__u.__c) \ + : "Q" (*p) : "memory"); \ + break; \ + case 2: \ + asm volatile ("ldarh %w0, %1" \ + : "=r" (*(u16 *)__u.__c) \ + : "Q" (*p) : "memory"); \ + break; \ + case 4: \ + asm volatile ("ldar %w0, %1" \ + : "=r" (*(u32 *)__u.__c) \ + : "Q" (*p) : "memory"); \ + break; \ + case 8: \ + asm volatile ("ldar %0, %1" \ + : "=r" (*(u64 *)__u.__c) \ + : "Q" (*p) : "memory"); \ + break; \ + } \ + __u.__val; \ +}) + #endif /* _ASMARM64_BARRIER_H_ */ -- 2.7.1