From: Waiman Long <longman@redhat.com>
To: Alex Kogan <alex.kogan@oracle.com>,
linux@armlinux.org.uk, peterz@infradead.org, mingo@redhat.com,
will.deacon@arm.com, arnd@arndb.de, linux-arch@vger.kernel.org,
linux-arm-kernel@lists.infradead.org,
linux-kernel@vger.kernel.org, tglx@linutronix.de, bp@alien8.de,
hpa@zytor.com, x86@kernel.org, guohanjun@huawei.com,
jglauber@marvell.com
Cc: steven.sistare@oracle.com, daniel.m.jordan@oracle.com,
dave.dice@oracle.com
Subject: Re: [PATCH v9 4/5] locking/qspinlock: Introduce starvation avoidance into CNA
Date: Thu, 23 Jan 2020 14:55:24 -0500 [thread overview]
Message-ID: <f5e31716-d687-f64c-0fc5-f1c9b539c4ff@redhat.com> (raw)
In-Reply-To: <20200115035920.54451-5-alex.kogan@oracle.com>
On 1/14/20 10:59 PM, Alex Kogan wrote:
> Keep track of the number of intra-node lock handoffs, and force
> inter-node handoff once this number reaches a preset threshold.
> The default value for the threshold can be overridden with
> the new kernel boot command-line option "numa_spinlock_threshold".
>
> Signed-off-by: Alex Kogan <alex.kogan@oracle.com>
> Reviewed-by: Steve Sistare <steven.sistare@oracle.com>
> Reviewed-by: Waiman Long <longman@redhat.com>
> ---
> .../admin-guide/kernel-parameters.txt | 8 ++++
> kernel/locking/qspinlock.c | 3 ++
> kernel/locking/qspinlock_cna.h | 41 ++++++++++++++++++-
> 3 files changed, 51 insertions(+), 1 deletion(-)
>
> diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
> index b68cb80e477f..30d79819a3b0 100644
> --- a/Documentation/admin-guide/kernel-parameters.txt
> +++ b/Documentation/admin-guide/kernel-parameters.txt
> @@ -3200,6 +3200,14 @@
> Not specifying this option is equivalent to
> numa_spinlock=auto.
>
> + numa_spinlock_threshold= [NUMA, PV_OPS]
> + Set the threshold for the number of intra-node
> + lock hand-offs before the NUMA-aware spinlock
> + is forced to be passed to a thread on another NUMA node.
> + Valid values are in the [0..31] range. Smaller values
> + result in a more fair, but less performant spinlock, and
> + vice versa. The default value is 16.
> +
> cpu0_hotplug [X86] Turn on CPU0 hotplug feature when
> CONFIG_BOOTPARAM_HOTPLUG_CPU0 is off.
> Some features depend on CPU0. Known dependencies are:
> diff --git a/kernel/locking/qspinlock.c b/kernel/locking/qspinlock.c
> index 609980a53841..e382d8946ccc 100644
> --- a/kernel/locking/qspinlock.c
> +++ b/kernel/locking/qspinlock.c
> @@ -597,6 +597,9 @@ EXPORT_SYMBOL(queued_spin_lock_slowpath);
> #if !defined(_GEN_CNA_LOCK_SLOWPATH) && defined(CONFIG_NUMA_AWARE_SPINLOCKS)
> #define _GEN_CNA_LOCK_SLOWPATH
>
> +#undef pv_init_node
> +#define pv_init_node cna_init_node
> +
> #undef pv_wait_head_or_lock
> #define pv_wait_head_or_lock cna_pre_scan
>
> diff --git a/kernel/locking/qspinlock_cna.h b/kernel/locking/qspinlock_cna.h
> index 8000231f3d51..a2b65f87e6f8 100644
> --- a/kernel/locking/qspinlock_cna.h
> +++ b/kernel/locking/qspinlock_cna.h
> @@ -51,13 +51,25 @@ struct cna_node {
> int numa_node;
> u32 encoded_tail;
> u32 pre_scan_result; /* encoded tail or enum val */
> + u32 intra_count;
> };
>
> enum {
> LOCAL_WAITER_FOUND = 2, /* 0 and 1 are reserved for @locked */
> + FLUSH_SECONDARY_QUEUE = 3,
> MIN_ENCODED_TAIL
> };
>
> +/*
> + * Controls the threshold for the number of intra-node lock hand-offs before
> + * the NUMA-aware variant of spinlock is forced to be passed to a thread on
> + * another NUMA node. By default, the chosen value provides reasonable
> + * long-term fairness without sacrificing performance compared to a lock
> + * that does not have any fairness guarantees. The default setting can
> + * be changed with the "numa_spinlock_threshold" boot option.
> + */
> +unsigned int intra_node_handoff_threshold __ro_after_init = 1 << 16;
> +
> static void __init cna_init_nodes_per_cpu(unsigned int cpu)
> {
> struct mcs_spinlock *base = per_cpu_ptr(&qnodes[0].mcs, cpu);
> @@ -97,6 +109,11 @@ static int __init cna_init_nodes(void)
> }
> early_initcall(cna_init_nodes);
>
> +static __always_inline void cna_init_node(struct mcs_spinlock *node)
> +{
> + ((struct cna_node *)node)->intra_count = 0;
> +}
> +
> /* this function is called only when the primary queue is empty */
> static inline bool cna_try_change_tail(struct qspinlock *lock, u32 val,
> struct mcs_spinlock *node)
> @@ -232,7 +249,9 @@ __always_inline u32 cna_pre_scan(struct qspinlock *lock,
> {
> struct cna_node *cn = (struct cna_node *)node;
>
> - cn->pre_scan_result = cna_scan_main_queue(node, node);
> + cn->pre_scan_result =
> + cn->intra_count == intra_node_handoff_threshold ?
> + FLUSH_SECONDARY_QUEUE : cna_scan_main_queue(node, node);
>
> return 0;
> }
> @@ -262,6 +281,9 @@ static inline void cna_pass_lock(struct mcs_spinlock *node,
> * if we acquired the MCS lock when its queue was empty
> */
> val = node->locked ? node->locked : 1;
> + /* inc @intra_count if the secondary queue is not empty */
> + ((struct cna_node *)next_holder)->intra_count =
> + cn->intra_count + (node->locked > 1);
Playing with lock event counts, I would like you to change the meaning
intra_count parameter that you are tracking. Instead of tracking the
number of times a lock is passed to a waiter of the same node
consecutively, I would like you to track the number of times the head
waiter in the secondary queue has given up its chance to acquire the
lock because a later waiter has jumped the queue and acquire the lock
before it. This value determines the worst case latency that a secondary
queue waiter can experience. So
@@ -332,8 +334,12 @@ static inline void cna_pass_lock(struct
mcs_spinlock *node,
*/
val = node->locked ? node->locked : 1;
- /* inc @intra_count if the secondary queue is not empty */
- next_cn->intra_count = cn->intra_count + (node->locked > 1);
+ /*
+ * inc @intra_count and pass it down if the secondary queue
+ * is not empty
+ */
+ if (node->locked > 1)
+ next_cn->intra_count = cn->intra_count + 1;
} else if (node->locked > 1) { /* if secondary queue is not
empty */
/* next holder will be the first node in the secondary
queue */
Maybe rename it to jump_count or some other more meaningful name. With
that change, we could probably reduce the default threshold from 64k to
maybe 256 or 512.
I changed the threshold to 256 and run a 96-thread locking stress test
for 10s, the lock event counts:
cna_flush_queue=15687
cna_intra_max=256
cna_mainscan_hit=13
cna_merge_queue=15691
cna_prescan_hit=4344037
cna_prescan_miss=21
cna_splice_new=15701
cna_splice_old=1289
lock_pending=4384
lock_slowpath=47998292
lock_use_node2=16778
Of the prescan hits, only about 0.4% of that resulted in a queue flush
which I thought is reasonable. I didn't see any noticeable degradation
in the performance of the stress test by reducing the threshold from 64k
to 256.
Cheers,
Longman
next prev parent reply other threads:[~2020-01-23 19:55 UTC|newest]
Thread overview: 40+ messages / expand[flat|nested] mbox.gz Atom feed top
2020-01-15 3:59 [PATCH v9 0/5] Add NUMA-awareness to qspinlock Alex Kogan
2020-01-15 3:59 ` [PATCH v9 1/5] locking/qspinlock: Rename mcs lock/unlock macros and make them more generic Alex Kogan
2020-01-15 3:59 ` [PATCH v9 2/5] locking/qspinlock: Refactor the qspinlock slow path Alex Kogan
2020-01-15 3:59 ` [PATCH v9 3/5] locking/qspinlock: Introduce CNA into the slow path of qspinlock Alex Kogan
2020-01-23 9:26 ` Peter Zijlstra
2020-01-23 10:06 ` Peter Zijlstra
2020-01-23 10:16 ` Peter Zijlstra
2020-01-23 11:22 ` Will Deacon
2020-01-23 13:17 ` Peter Zijlstra
2020-01-23 14:15 ` Waiman Long
2020-01-23 15:29 ` Peter Zijlstra
2020-01-15 3:59 ` [PATCH v9 4/5] locking/qspinlock: Introduce starvation avoidance into CNA Alex Kogan
2020-01-23 19:55 ` Waiman Long [this message]
2020-01-23 20:39 ` Waiman Long
2020-01-23 23:39 ` Alex Kogan
2020-01-15 3:59 ` [PATCH v9 5/5] locking/qspinlock: Introduce the shuffle reduction optimization " Alex Kogan
2020-03-02 1:14 ` [locking/qspinlock] 7b6da71157: unixbench.score 8.4% improvement kernel test robot
2020-01-22 11:45 ` [PATCH v9 0/5] Add NUMA-awareness to qspinlock Lihao Liang
2020-01-22 17:24 ` Waiman Long
2020-01-23 11:35 ` Will Deacon
2020-01-23 15:25 ` Waiman Long
2020-01-23 19:08 ` Waiman Long
2020-01-22 19:29 ` Alex Kogan
2020-01-26 0:32 ` Lihao Liang
2020-01-26 1:58 ` Lihao Liang
2020-01-27 16:01 ` Alex Kogan
2020-01-29 1:39 ` Lihao Liang
2020-01-27 6:16 ` Alex Kogan
2020-01-24 22:24 ` Paul E. McKenney
[not found] ` <6AAE7FC6-F5DE-4067-8BC4-77F27948CD09@oracle.com>
2020-01-25 0:57 ` Paul E. McKenney
2020-01-25 1:59 ` Waiman Long
[not found] ` <adb4fb09-f374-4d64-096b-ba9ad8b35fd5@redhat.com>
2020-01-25 4:58 ` Paul E. McKenney
2020-01-25 19:41 ` Waiman Long
2020-01-26 15:35 ` Paul E. McKenney
2020-01-26 22:42 ` Paul E. McKenney
2020-01-26 23:32 ` Paul E. McKenney
2020-01-27 6:04 ` Alex Kogan
2020-01-27 14:11 ` Waiman Long
2020-01-27 15:09 ` Paul E. McKenney
[not found] ` <9b3a3f16-5405-b6d1-d023-b85f4aab46dd@redhat.com>
2020-01-27 17:17 ` Waiman Long
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=f5e31716-d687-f64c-0fc5-f1c9b539c4ff@redhat.com \
--to=longman@redhat.com \
--cc=alex.kogan@oracle.com \
--cc=arnd@arndb.de \
--cc=bp@alien8.de \
--cc=daniel.m.jordan@oracle.com \
--cc=dave.dice@oracle.com \
--cc=guohanjun@huawei.com \
--cc=hpa@zytor.com \
--cc=jglauber@marvell.com \
--cc=linux-arch@vger.kernel.org \
--cc=linux-arm-kernel@lists.infradead.org \
--cc=linux-kernel@vger.kernel.org \
--cc=linux@armlinux.org.uk \
--cc=mingo@redhat.com \
--cc=peterz@infradead.org \
--cc=steven.sistare@oracle.com \
--cc=tglx@linutronix.de \
--cc=will.deacon@arm.com \
--cc=x86@kernel.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).