All of lore.kernel.org
 help / color / mirror / Atom feed
From: Paolo Bonzini <pbonzini@redhat.com>
To: qemu-devel@nongnu.org
Subject: [Qemu-devel] [PULL 01/14] rcu tests: fix compilation on 32-bit ppc
Date: Thu, 26 Mar 2015 17:02:21 +0100	[thread overview]
Message-ID: <1427385754-13012-2-git-send-email-pbonzini@redhat.com> (raw)
In-Reply-To: <1427385754-13012-1-git-send-email-pbonzini@redhat.com>

32-bit PPC cannot do atomic operations on long long.  Inside the loops,
we are already using local counters that are summed at the end of
the run---with some exceptions (rcu_stress_count for rcutorture,
n_nodes for test-rcu-list): fix them to use the same technique.
For test-rcu-list, remove the mostly unused member "val" from the
list.  Then, use a mutex to protect the global counts.

Performance does not matter there because every thread will only enter
the critical section once.

Remaining uses of atomic instructions are for ints or pointers.

Reported-by: Andreas Faerber <afaerber@suse.de>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
---
 tests/rcutorture.c    | 20 ++++++++++++++++----
 tests/test-rcu-list.c | 50 ++++++++++++++++++++++++++++----------------------
 2 files changed, 44 insertions(+), 26 deletions(-)

diff --git a/tests/rcutorture.c b/tests/rcutorture.c
index 60a2ccf..d6b304d 100644
--- a/tests/rcutorture.c
+++ b/tests/rcutorture.c
@@ -82,6 +82,7 @@ static volatile int goflag = GOFLAG_INIT;
 #define RCU_READ_RUN 1000
 
 #define NR_THREADS 100
+static QemuMutex counts_mutex;
 static QemuThread threads[NR_THREADS];
 static struct rcu_reader_data *data[NR_THREADS];
 static int n_threads;
@@ -130,7 +131,9 @@ static void *rcu_read_perf_test(void *arg)
         }
         n_reads_local += RCU_READ_RUN;
     }
-    atomic_add(&n_reads, n_reads_local);
+    qemu_mutex_lock(&counts_mutex);
+    n_reads += n_reads_local;
+    qemu_mutex_unlock(&counts_mutex);
 
     rcu_unregister_thread();
     return NULL;
@@ -151,7 +154,9 @@ static void *rcu_update_perf_test(void *arg)
         synchronize_rcu();
         n_updates_local++;
     }
-    atomic_add(&n_updates, n_updates_local);
+    qemu_mutex_lock(&counts_mutex);
+    n_updates += n_updates_local;
+    qemu_mutex_unlock(&counts_mutex);
 
     rcu_unregister_thread();
     return NULL;
@@ -241,6 +246,7 @@ static void *rcu_read_stress_test(void *arg)
     struct rcu_stress *p;
     int pc;
     long long n_reads_local = 0;
+    long long rcu_stress_local[RCU_STRESS_PIPE_LEN + 1] = { 0 };
     volatile int garbage = 0;
 
     rcu_register_thread();
@@ -265,13 +271,18 @@ static void *rcu_read_stress_test(void *arg)
         if ((pc > RCU_STRESS_PIPE_LEN) || (pc < 0)) {
             pc = RCU_STRESS_PIPE_LEN;
         }
-        atomic_inc(&rcu_stress_count[pc]);
+        rcu_stress_local[pc]++;
         n_reads_local++;
         if ((++itercnt % 0x1000) == 0) {
             synchronize_rcu();
         }
     }
-    atomic_add(&n_reads, n_reads_local);
+    qemu_mutex_lock(&counts_mutex);
+    n_reads += n_reads_local;
+    for (i = 0; i <= RCU_STRESS_PIPE_LEN; i++) {
+        rcu_stress_count[i] += rcu_stress_local[i];
+    }
+    qemu_mutex_unlock(&counts_mutex);
 
     rcu_unregister_thread();
     return NULL;
@@ -419,6 +430,7 @@ int main(int argc, char *argv[])
     int nreaders = 1;
     int duration = 1;
 
+    qemu_mutex_init(&counts_mutex);
     if (argc >= 2 && argv[1][0] == '-') {
         g_test_init(&argc, &argv, NULL);
         if (g_test_quick()) {
diff --git a/tests/test-rcu-list.c b/tests/test-rcu-list.c
index 46b5e26..4c5f62e 100644
--- a/tests/test-rcu-list.c
+++ b/tests/test-rcu-list.c
@@ -35,16 +35,15 @@
  * Test variables.
  */
 
-long long n_reads = 0LL;
-long long n_updates = 0LL;
-long long n_reclaims = 0LL;
-long long n_nodes_removed = 0LL;
-long long n_nodes = 0LL;
-int g_test_in_charge = 0;
+static QemuMutex counts_mutex;
+static long long n_reads = 0LL;
+static long long n_updates = 0LL;
+static long long n_reclaims = 0LL;
+static long long n_nodes_removed = 0LL;
+static long long n_nodes = 0LL;
+static int g_test_in_charge = 0;
 
-int nthreadsrunning;
-
-char argsbuf[64];
+static int nthreadsrunning;
 
 #define GOFLAG_INIT 0
 #define GOFLAG_RUN  1
@@ -92,21 +91,21 @@ static void wait_all_threads(void)
 struct list_element {
     QLIST_ENTRY(list_element) entry;
     struct rcu_head rcu;
-    long long val;
 };
 
 static void reclaim_list_el(struct rcu_head *prcu)
 {
     struct list_element *el = container_of(prcu, struct list_element, rcu);
     g_free(el);
-    atomic_add(&n_reclaims, 1);
+    /* Accessed only from call_rcu thread.  */
+    n_reclaims++;
 }
 
 static QLIST_HEAD(q_list_head, list_element) Q_list_head;
 
 static void *rcu_q_reader(void *arg)
 {
-    long long j, n_reads_local = 0;
+    long long n_reads_local = 0;
     struct list_element *el;
 
     *(struct rcu_reader_data **)arg = &rcu_reader;
@@ -118,8 +117,6 @@ static void *rcu_q_reader(void *arg)
     while (goflag == GOFLAG_RUN) {
         rcu_read_lock();
         QLIST_FOREACH_RCU(el, &Q_list_head, entry) {
-            j = atomic_read(&el->val);
-            (void)j;
             n_reads_local++;
             if (goflag == GOFLAG_STOP) {
                 break;
@@ -129,7 +126,9 @@ static void *rcu_q_reader(void *arg)
 
         g_usleep(100);
     }
-    atomic_add(&n_reads, n_reads_local);
+    qemu_mutex_lock(&counts_mutex);
+    n_reads += n_reads_local;
+    qemu_mutex_unlock(&counts_mutex);
     return NULL;
 }
 
@@ -137,6 +136,7 @@ static void *rcu_q_reader(void *arg)
 static void *rcu_q_updater(void *arg)
 {
     int j, target_el;
+    long long n_nodes_local = 0;
     long long n_updates_local = 0;
     long long n_removed_local = 0;
     struct list_element *el, *prev_el;
@@ -170,8 +170,7 @@ static void *rcu_q_updater(void *arg)
             j++;
             if (target_el == j) {
                 prev_el = g_new(struct list_element, 1);
-                atomic_add(&n_nodes, 1);
-                prev_el->val = atomic_read(&n_nodes);
+                n_nodes += n_nodes_local;
                 QLIST_INSERT_BEFORE_RCU(el, prev_el, entry);
                 break;
             }
@@ -181,8 +180,11 @@ static void *rcu_q_updater(void *arg)
         synchronize_rcu();
     }
     synchronize_rcu();
-    atomic_add(&n_updates, n_updates_local);
-    atomic_add(&n_nodes_removed, n_removed_local);
+    qemu_mutex_lock(&counts_mutex);
+    n_nodes += n_nodes_local;
+    n_updates += n_updates_local;
+    n_nodes_removed += n_removed_local;
+    qemu_mutex_unlock(&counts_mutex);
     return NULL;
 }
 
@@ -194,10 +196,11 @@ static void rcu_qtest_init(void)
     srand(time(0));
     for (i = 0; i < RCU_Q_LEN; i++) {
         new_el = g_new(struct list_element, 1);
-        new_el->val = i;
         QLIST_INSERT_HEAD_RCU(&Q_list_head, new_el, entry);
     }
-    atomic_add(&n_nodes, RCU_Q_LEN);
+    qemu_mutex_lock(&counts_mutex);
+    n_nodes += RCU_Q_LEN;
+    qemu_mutex_unlock(&counts_mutex);
 }
 
 static void rcu_qtest_run(int duration, int nreaders)
@@ -233,7 +236,9 @@ static void rcu_qtest(const char *test, int duration, int nreaders)
         call_rcu1(&prev_el->rcu, reclaim_list_el);
         n_removed_local++;
     }
-    atomic_add(&n_nodes_removed, n_removed_local);
+    qemu_mutex_lock(&counts_mutex);
+    n_nodes_removed += n_removed_local;
+    qemu_mutex_unlock(&counts_mutex);
     synchronize_rcu();
     while (n_nodes_removed > n_reclaims) {
         g_usleep(100);
@@ -277,6 +282,7 @@ int main(int argc, char *argv[])
 {
     int duration = 0, readers = 0;
 
+    qemu_mutex_init(&counts_mutex);
     if (argc >= 2) {
         if (argv[1][0] == '-') {
             g_test_init(&argc, &argv, NULL);
-- 
2.3.3

  reply	other threads:[~2015-03-26 16:02 UTC|newest]

Thread overview: 16+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2015-03-26 16:02 [Qemu-devel] [PULL 00/14] Misc bugfixes for 2.3.0-rc2 Paolo Bonzini
2015-03-26 16:02 ` Paolo Bonzini [this message]
2015-03-26 16:02 ` [Qemu-devel] [PULL 02/14] fw_cfg: factor out initialization of FW_CFG_ID (rev. number) Paolo Bonzini
2015-03-26 16:02 ` [Qemu-devel] [PULL 03/14] i6300esb: Correct endiannness Paolo Bonzini
2015-03-26 16:02 ` [Qemu-devel] [PULL 04/14] i6300esb: Fix signed integer overflow Paolo Bonzini
2015-03-26 16:02 ` [Qemu-devel] [PULL 05/14] cpus: Don't kick un-realized cpus Paolo Bonzini
2015-03-26 16:02 ` [Qemu-devel] [PULL 06/14] megasas: use PCI DMA APIs Paolo Bonzini
2015-03-26 16:02 ` [Qemu-devel] [PULL 07/14] vmw_pvscsi: " Paolo Bonzini
2015-03-26 16:02 ` [Qemu-devel] [PULL 08/14] nbd: Fix up comment after commit e140177 Paolo Bonzini
2015-03-26 16:02 ` [Qemu-devel] [PULL 09/14] m68k: memory: Replace memory_region_init_ram with memory_region_allocate_system_memory Paolo Bonzini
2015-03-26 16:02 ` [Qemu-devel] [PULL 10/14] mips: " Paolo Bonzini
2015-03-26 16:02 ` [Qemu-devel] [PULL 11/14] sparc: " Paolo Bonzini
2015-03-26 16:02 ` [Qemu-devel] [PULL 12/14] exec: avoid possible overwriting of mmaped area in qemu_ram_remap Paolo Bonzini
2015-03-26 16:02 ` [Qemu-devel] [PULL 13/14] misc: fix typos in copyright declaration Paolo Bonzini
2015-03-26 16:02 ` [Qemu-devel] [PULL 14/14] virtio-scsi-dataplane: fix memory leak for VirtIOSCSIVring Paolo Bonzini
2015-03-26 18:34 ` [Qemu-devel] [PULL 00/14] Misc bugfixes for 2.3.0-rc2 Peter Maydell

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1427385754-13012-2-git-send-email-pbonzini@redhat.com \
    --to=pbonzini@redhat.com \
    --cc=qemu-devel@nongnu.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.