From: Chris Wilson <chris@chris-wilson.co.uk> To: linux-kernel@vger.kernel.org Cc: "Chris Wilson" <chris@chris-wilson.co.uk>, "Peter Zijlstra" <peterz@infradead.org>, "Ingo Molnar" <mingo@redhat.com>, "Maarten Lankhorst" <dev@mblankhorst.nl>, "Nicolai Hähnle" <nhaehnle@gmail.com> Subject: [PATCH v2 7/8] locking: Add kselftests for ww_mutex stress Date: Thu, 1 Dec 2016 11:47:10 +0000 Message-ID: <20161201114711.28697-8-chris@chris-wilson.co.uk> (raw) In-Reply-To: <20161201114711.28697-1-chris@chris-wilson.co.uk> v2: Use both inorder and reorder locking strategies Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Ingo Molnar <mingo@redhat.com> Cc: Maarten Lankhorst <dev@mblankhorst.nl> Cc: Nicolai Hähnle <nhaehnle@gmail.com> --- kernel/locking/test-ww_mutex.c | 254 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 254 insertions(+) diff --git a/kernel/locking/test-ww_mutex.c b/kernel/locking/test-ww_mutex.c index 84da738e57d1..da6c9a34f62f 100644 --- a/kernel/locking/test-ww_mutex.c +++ b/kernel/locking/test-ww_mutex.c @@ -19,8 +19,10 @@ #include <linux/kernel.h> #include <linux/completion.h> +#include <linux/delay.h> #include <linux/kthread.h> #include <linux/module.h> +#include <linux/random.h> #include <linux/slab.h> #include <linux/ww_mutex.h> @@ -348,6 +350,246 @@ static int test_cycle(unsigned int ncpus) return 0; } +struct stress { + struct work_struct work; + struct ww_mutex *locks; + int nlocks; + int nloops; +}; + +static int *get_random_order(int count) +{ + int *order; + int n, r, tmp; + + order = kmalloc_array(count, sizeof(*order), GFP_TEMPORARY); + if (!order) + return order; + + for (n = 0; n < count; n++) + order[n] = n; + + for (n = count - 1; n > 1; n--) { + r = get_random_int() % (n + 1); + if (r != n) { + tmp = order[n]; + order[n] = order[r]; + order[r] = tmp; + } + } + + return order; +} + +static void dummy_load(struct stress *stress) +{ + usleep_range(1000, 2000); +} + +static void stress_inorder_work(struct work_struct *work) +{ + struct stress *stress = container_of(work, typeof(*stress), work); + const int nlocks = stress->nlocks; + struct ww_mutex *locks = stress->locks; + struct ww_acquire_ctx ctx; + int *order; + + order = get_random_order(nlocks); + if (!order) + return; + + ww_acquire_init(&ctx, &ww_class); + + do { + int contended = -1; + int n, err; + +retry: + err = 0; + for (n = 0; n < nlocks; n++) { + if (n == contended) + continue; + + err = ww_mutex_lock(&locks[order[n]], &ctx); + if (err < 0) + break; + } + if (!err) + dummy_load(stress); + + if (contended > n) + ww_mutex_unlock(&locks[order[contended]]); + contended = n; + while (n--) + ww_mutex_unlock(&locks[order[n]]); + + if (err == -EDEADLK) { + ww_mutex_lock_slow(&locks[order[contended]], &ctx); + goto retry; + } + + if (err) { + pr_err_once("stress (%s) failed with %d\n", + __func__, err); + break; + } + } while (--stress->nloops); + + ww_acquire_fini(&ctx); + + kfree(order); + kfree(stress); +} + +struct reorder_lock { + struct list_head link; + struct ww_mutex *lock; +}; + +static void stress_reorder_work(struct work_struct *work) +{ + struct stress *stress = container_of(work, typeof(*stress), work); + LIST_HEAD(locks); + struct ww_acquire_ctx ctx; + struct reorder_lock *ll, *ln; + int *order; + int n, err; + + order = get_random_order(stress->nlocks); + if (!order) + return; + + for (n = 0; n < stress->nlocks; n++) { + ll = kmalloc(sizeof(*ll), GFP_KERNEL); + if (!ll) + goto out; + + ll->lock = &stress->locks[order[n]]; + list_add(&ll->link, &locks); + } + kfree(order); + order = NULL; + + ww_acquire_init(&ctx, &ww_class); + + do { + list_for_each_entry(ll, &locks, link) { + err = ww_mutex_lock(ll->lock, &ctx); + if (!err) + continue; + + ln = ll; + list_for_each_entry_continue_reverse(ln, &locks, link) + ww_mutex_unlock(ln->lock); + + if (err != -EDEADLK) { + pr_err_once("stress (%s) failed with %d\n", + __func__, err); + break; + } + + ww_mutex_lock_slow(ll->lock, &ctx); + list_move(&ll->link, &locks); /* restarts iteration */ + } + + dummy_load(stress); + list_for_each_entry(ll, &locks, link) + ww_mutex_unlock(ll->lock); + } while (--stress->nloops); + + ww_acquire_fini(&ctx); + +out: + list_for_each_entry_safe(ll, ln, &locks, link) + kfree(ll); + kfree(order); + kfree(stress); +} + +static void stress_one_work(struct work_struct *work) +{ + struct stress *stress = container_of(work, typeof(*stress), work); + const int nlocks = stress->nlocks; + struct ww_mutex *lock = stress->locks + (get_random_int() % nlocks); + int err; + + do { + err = ww_mutex_lock(lock, NULL); + if (!err) { + dummy_load(stress); + ww_mutex_unlock(lock); + } else { + pr_err_once("stress (%s) failed with %d\n", + __func__, err); + break; + } + } while (--stress->nloops); + + kfree(stress); +} + +#define STRESS_INORDER BIT(0) +#define STRESS_REORDER BIT(1) +#define STRESS_ONE BIT(2) +#define STRESS_ALL (STRESS_INORDER | STRESS_REORDER | STRESS_ONE) + +static int stress(int nlocks, int nthreads, int nloops, unsigned int flags) +{ + struct ww_mutex *locks; + int n; + + locks = kmalloc_array(nlocks, sizeof(*locks), GFP_KERNEL); + if (!locks) + return -ENOMEM; + + for (n = 0; n < nlocks; n++) + ww_mutex_init(&locks[n], &ww_class); + + for (n = 0; nthreads; n++) { + struct stress *stress; + void (*fn)(struct work_struct *work); + + fn = NULL; + switch (n & 3) { + case 0: + if (flags & STRESS_INORDER) + fn = stress_inorder_work; + break; + case 1: + if (flags & STRESS_REORDER) + fn = stress_reorder_work; + break; + case 2: + if (flags & STRESS_ONE) + fn = stress_one_work; + break; + } + + if (!fn) + continue; + + stress = kmalloc(sizeof(*stress), GFP_KERNEL); + if (!stress) + break; + + INIT_WORK(&stress->work, fn); + stress->locks = locks; + stress->nlocks = nlocks; + stress->nloops = nloops; + + queue_work(wq, &stress->work); + nthreads--; + } + + flush_workqueue(wq); + + for (n = 0; n < nlocks; n++) + ww_mutex_destroy(&locks[n]); + kfree(locks); + + return 0; +} + static int __init test_ww_mutex_init(void) { int ncpus = num_online_cpus(); @@ -377,6 +619,18 @@ static int __init test_ww_mutex_init(void) if (ret) return ret; + ret = stress(16, 2*ncpus, 1<<10, STRESS_INORDER); + if (ret) + return ret; + + ret = stress(16, 2*ncpus, 1<<10, STRESS_REORDER); + if (ret) + return ret; + + ret = stress(4096, hweight32(STRESS_ALL)*ncpus, 1<<12, STRESS_ALL); + if (ret) + return ret; + return 0; } -- 2.10.2
next prev parent reply index Thread overview: 21+ messages / expand[flat|nested] mbox.gz Atom feed top 2016-12-01 11:47 locking: Add kselftests for ww_mutex Chris Wilson 2016-12-01 11:47 ` [PATCH v2 1/8] locking: Fix compilation of __WW_MUTEX_INITIALIZER Chris Wilson 2016-12-16 14:50 ` Peter Zijlstra 2016-12-16 17:26 ` Chris Wilson 2017-01-14 12:52 ` [tip:locking/core] locking/ww_mutex: " tip-bot for Chris Wilson 2016-12-01 11:47 ` [PATCH v2 2/8] locking: Add ww_mutex to locktorture test Chris Wilson 2017-01-14 12:52 ` [tip:locking/core] locking/ww_mutex: " tip-bot for Chris Wilson 2016-12-01 11:47 ` [PATCH v2 3/8] locking: Begin kselftests for ww_mutex Chris Wilson 2017-01-14 12:53 ` [tip:locking/core] locking/ww_mutex: " tip-bot for Chris Wilson 2016-12-01 11:47 ` [PATCH v2 4/8] locking: Add kselftests for ww_mutex AA deadlock detection Chris Wilson 2017-01-14 12:54 ` [tip:locking/core] locking/ww_mutex: " tip-bot for Chris Wilson 2016-12-01 11:47 ` [PATCH v2 5/8] locking: Add kselftests for ww_mutex ABBA " Chris Wilson 2017-01-14 12:54 ` [tip:locking/core] locking/ww_mutex: " tip-bot for Chris Wilson 2016-12-01 11:47 ` [PATCH v2 6/8] locking: Add kselftests for resolving ww_mutex cyclic deadlocks Chris Wilson 2017-01-14 12:55 ` [tip:locking/core] locking/ww_mutex: " tip-bot for Chris Wilson 2016-12-01 11:47 ` Chris Wilson [this message] 2017-01-14 12:55 ` [tip:locking/core] locking/ww_mutex: Add kselftests for ww_mutex stress tip-bot for Chris Wilson 2017-01-19 20:13 ` Peter Zijlstra 2016-12-01 11:47 ` [PATCH v2 8/8] locking: Add ww_mutex to tools/testing/selftests Chris Wilson 2017-01-14 12:56 ` [tip:locking/core] locking/ww_mutex: " tip-bot for Chris Wilson 2017-02-22 14:29 ` locking: Add kselftests for ww_mutex Geert Uytterhoeven
Reply instructions: You may reply publicly to this message via plain-text email using any one of the following methods: * Save the following mbox file, import it into your mail client, and reply-to-all from there: mbox Avoid top-posting and favor interleaved quoting: https://en.wikipedia.org/wiki/Posting_style#Interleaved_style * Reply using the --to, --cc, and --in-reply-to switches of git-send-email(1): git send-email \ --in-reply-to=20161201114711.28697-8-chris@chris-wilson.co.uk \ --to=chris@chris-wilson.co.uk \ --cc=dev@mblankhorst.nl \ --cc=linux-kernel@vger.kernel.org \ --cc=mingo@redhat.com \ --cc=nhaehnle@gmail.com \ --cc=peterz@infradead.org \ /path/to/YOUR_REPLY https://kernel.org/pub/software/scm/git/docs/git-send-email.html * If your mail client supports setting the In-Reply-To header via mailto: links, try the mailto: link
LKML Archive on lore.kernel.org Archives are clonable: git clone --mirror https://lore.kernel.org/lkml/0 lkml/git/0.git git clone --mirror https://lore.kernel.org/lkml/1 lkml/git/1.git git clone --mirror https://lore.kernel.org/lkml/2 lkml/git/2.git git clone --mirror https://lore.kernel.org/lkml/3 lkml/git/3.git git clone --mirror https://lore.kernel.org/lkml/4 lkml/git/4.git git clone --mirror https://lore.kernel.org/lkml/5 lkml/git/5.git git clone --mirror https://lore.kernel.org/lkml/6 lkml/git/6.git git clone --mirror https://lore.kernel.org/lkml/7 lkml/git/7.git git clone --mirror https://lore.kernel.org/lkml/8 lkml/git/8.git git clone --mirror https://lore.kernel.org/lkml/9 lkml/git/9.git # If you have public-inbox 1.1+ installed, you may # initialize and index your mirror using the following commands: public-inbox-init -V2 lkml lkml/ https://lore.kernel.org/lkml \ linux-kernel@vger.kernel.org public-inbox-index lkml Example config snippet for mirrors Newsgroup available over NNTP: nntp://nntp.lore.kernel.org/org.kernel.vger.linux-kernel AGPL code for this site: git clone https://public-inbox.org/public-inbox.git