All of lore.kernel.org
 help / color / mirror / Atom feed
From: Waiman Long <longman@redhat.com>
To: Alex Kogan <alex.kogan@oracle.com>,
	linux@armlinux.org.uk, peterz@infradead.org, mingo@redhat.com,
	will.deacon@arm.com, arnd@arndb.de, linux-arch@vger.kernel.org,
	linux-arm-kernel@lists.infradead.org,
	linux-kernel@vger.kernel.org, tglx@linutronix.de, bp@alien8.de,
	hpa@zytor.com, x86@kernel.org, guohanjun@huawei.com,
	jglauber@marvell.com
Cc: steven.sistare@oracle.com, daniel.m.jordan@oracle.com,
	dave.dice@oracle.com, rahul.x.yadav@oracle.com
Subject: Re: [PATCH v7 5/5] locking/qspinlock: Introduce the shuffle reduction optimization into CNA
Date: Fri, 6 Dec 2019 17:00:24 -0500	[thread overview]
Message-ID: <1fce5ebf-7f80-fb9e-92b1-74062a6611a5@redhat.com> (raw)
In-Reply-To: <20191125210709.10293-6-alex.kogan@oracle.com>

On 11/25/19 4:07 PM, Alex Kogan wrote:
> @@ -234,12 +263,13 @@ __always_inline u32 cna_pre_scan(struct qspinlock *lock,
>  	struct cna_node *cn = (struct cna_node *)node;
>  
>  	/*
> -	 * setting @pre_scan_result to 1 indicates that no post-scan
> +	 * setting @pre_scan_result to 1 or 2 indicates that no post-scan
>  	 * should be made in cna_pass_lock()
>  	 */
>  	cn->pre_scan_result =
> -		cn->intra_count == intra_node_handoff_threshold ?
> -			1 : cna_scan_main_queue(node, node);
> +		(node->locked <= 1 && probably(SHUFFLE_REDUCTION_PROB_ARG)) ?
> +			1 : cn->intra_count == intra_node_handoff_threshold ?
> +			2 : cna_scan_main_queue(node, node);
>  
>  	return 0;
>  }
> @@ -253,12 +283,15 @@ static inline void cna_pass_lock(struct mcs_spinlock *node,
>  
>  	u32 scan = cn->pre_scan_result;
>  
> +	if (scan == 1)
> +		goto pass_lock;
> +
>  	/*
>  	 * check if a successor from the same numa node has not been found in
>  	 * pre-scan, and if so, try to find it in post-scan starting from the
>  	 * node where pre-scan stopped (stored in @pre_scan_result)
>  	 */
> -	if (scan > 1)
> +	if (scan > 2)
>  		scan = cna_scan_main_queue(node, decode_tail(scan));
>  
>  	if (!scan) { /* if found a successor from the same numa node */
> @@ -281,5 +314,6 @@ static inline void cna_pass_lock(struct mcs_spinlock *node,
>  		tail_2nd->next = next;
>  	}
>  
> +pass_lock:
>  	arch_mcs_pass_lock(&next_holder->locked, val);
>  }

I think you might have mishandled the proper accounting of intra_count.
How about something like:

diff --git a/kernel/locking/qspinlock_cna.h b/kernel/locking/qspinlock_cna.h
index f1eef6bece7b..03f8fdec2b80 100644
--- a/kernel/locking/qspinlock_cna.h
+++ b/kernel/locking/qspinlock_cna.h
@@ -268,7 +268,7 @@ __always_inline u32 cna_pre_scan(struct qspinlock *lock,
         */
        cn->pre_scan_result =
                (node->locked <= 1 &&
probably(SHUFFLE_REDUCTION_PROB_ARG)) ?
-                       1 : cn->intra_count ==
intra_node_handoff_threshold ?
+                       1 : cn->intra_count >=
intra_node_handoff_threshold ?
                        2 : cna_scan_main_queue(node, node);
 
        return 0;
@@ -283,9 +283,6 @@ static inline void cna_pass_lock(struct mcs_spinlock
*node,
 
        u32 scan = cn->pre_scan_result;
 
-       if (scan == 1)
-               goto pass_lock;
-
        /*
         * check if a successor from the same numa node has not been
found in
         * pre-scan, and if so, try to find it in post-scan starting
from the
@@ -294,7 +291,13 @@ static inline void cna_pass_lock(struct
mcs_spinlock *node,
        if (scan > 2)
                scan = cna_scan_main_queue(node, decode_tail(scan));
 
-       if (!scan) { /* if found a successor from the same numa node */
+       if (scan <= 1) { /* if found a successor from the same numa node */
+               /* inc @intra_count if the secondary queue is not empty */
+               ((struct cna_node *)next_holder)->intra_count =
+                       cn->intra_count + (node->locked > 1);
+               if ((scan == 1)
+                       goto pass_lock;
+
                next_holder = node->next;
                /*
                 * we unlock successor by passing a non-zero value,
@@ -302,9 +305,6 @@ static inline void cna_pass_lock(struct mcs_spinlock
*node,
                 * if we acquired the MCS lock when its queue was empty
                 */
                val = node->locked ? node->locked : 1;
-               /* inc @intra_count if the secondary queue is not empty */
-               ((struct cna_node *)next_holder)->intra_count =
-                       cn->intra_count + (node->locked > 1);
        } else if (node->locked > 1) {    /* if secondary queue is not
empty */
                /* next holder will be the first node in the secondary
queue */
                tail_2nd = decode_tail(node->locked);

The meaning of scan value:

0 - pass to next cna node, which is in the same numa node. Additional
cna node may or may not be added to the secondary queue

1 - pass to next cna node, which may not be in the same numa node. No
change to secondary queue

2 - exceed intra node handoff threshold, unconditionally merge back the
secondary queue cna nodes, if available

>2 no cna node of the same numa node found, unconditionally merge back
the secondary queue cna nodes, if available

The code will be easier to read if symbolic names instead of just numbers.

Cheers,
Longman



WARNING: multiple messages have this Message-ID (diff)
From: Waiman Long <longman@redhat.com>
To: Alex Kogan <alex.kogan@oracle.com>,
	linux@armlinux.org.uk, peterz@infradead.org, mingo@redhat.com,
	will.deacon@arm.com, arnd@arndb.de, linux-arch@vger.kernel.org,
	linux-arm-kernel@lists.infradead.org,
	linux-kernel@vger.kernel.org, tglx@linutronix.de, bp@alien8.de,
	hpa@zytor.com, x86@kernel.org, guohanjun@huawei.com,
	jglauber@marvell.com
Cc: rahul.x.yadav@oracle.com, dave.dice@oracle.com,
	steven.sistare@oracle.com, daniel.m.jordan@oracle.com
Subject: Re: [PATCH v7 5/5] locking/qspinlock: Introduce the shuffle reduction optimization into CNA
Date: Fri, 6 Dec 2019 17:00:24 -0500	[thread overview]
Message-ID: <1fce5ebf-7f80-fb9e-92b1-74062a6611a5@redhat.com> (raw)
In-Reply-To: <20191125210709.10293-6-alex.kogan@oracle.com>

On 11/25/19 4:07 PM, Alex Kogan wrote:
> @@ -234,12 +263,13 @@ __always_inline u32 cna_pre_scan(struct qspinlock *lock,
>  	struct cna_node *cn = (struct cna_node *)node;
>  
>  	/*
> -	 * setting @pre_scan_result to 1 indicates that no post-scan
> +	 * setting @pre_scan_result to 1 or 2 indicates that no post-scan
>  	 * should be made in cna_pass_lock()
>  	 */
>  	cn->pre_scan_result =
> -		cn->intra_count == intra_node_handoff_threshold ?
> -			1 : cna_scan_main_queue(node, node);
> +		(node->locked <= 1 && probably(SHUFFLE_REDUCTION_PROB_ARG)) ?
> +			1 : cn->intra_count == intra_node_handoff_threshold ?
> +			2 : cna_scan_main_queue(node, node);
>  
>  	return 0;
>  }
> @@ -253,12 +283,15 @@ static inline void cna_pass_lock(struct mcs_spinlock *node,
>  
>  	u32 scan = cn->pre_scan_result;
>  
> +	if (scan == 1)
> +		goto pass_lock;
> +
>  	/*
>  	 * check if a successor from the same numa node has not been found in
>  	 * pre-scan, and if so, try to find it in post-scan starting from the
>  	 * node where pre-scan stopped (stored in @pre_scan_result)
>  	 */
> -	if (scan > 1)
> +	if (scan > 2)
>  		scan = cna_scan_main_queue(node, decode_tail(scan));
>  
>  	if (!scan) { /* if found a successor from the same numa node */
> @@ -281,5 +314,6 @@ static inline void cna_pass_lock(struct mcs_spinlock *node,
>  		tail_2nd->next = next;
>  	}
>  
> +pass_lock:
>  	arch_mcs_pass_lock(&next_holder->locked, val);
>  }

I think you might have mishandled the proper accounting of intra_count.
How about something like:

diff --git a/kernel/locking/qspinlock_cna.h b/kernel/locking/qspinlock_cna.h
index f1eef6bece7b..03f8fdec2b80 100644
--- a/kernel/locking/qspinlock_cna.h
+++ b/kernel/locking/qspinlock_cna.h
@@ -268,7 +268,7 @@ __always_inline u32 cna_pre_scan(struct qspinlock *lock,
         */
        cn->pre_scan_result =
                (node->locked <= 1 &&
probably(SHUFFLE_REDUCTION_PROB_ARG)) ?
-                       1 : cn->intra_count ==
intra_node_handoff_threshold ?
+                       1 : cn->intra_count >=
intra_node_handoff_threshold ?
                        2 : cna_scan_main_queue(node, node);
 
        return 0;
@@ -283,9 +283,6 @@ static inline void cna_pass_lock(struct mcs_spinlock
*node,
 
        u32 scan = cn->pre_scan_result;
 
-       if (scan == 1)
-               goto pass_lock;
-
        /*
         * check if a successor from the same numa node has not been
found in
         * pre-scan, and if so, try to find it in post-scan starting
from the
@@ -294,7 +291,13 @@ static inline void cna_pass_lock(struct
mcs_spinlock *node,
        if (scan > 2)
                scan = cna_scan_main_queue(node, decode_tail(scan));
 
-       if (!scan) { /* if found a successor from the same numa node */
+       if (scan <= 1) { /* if found a successor from the same numa node */
+               /* inc @intra_count if the secondary queue is not empty */
+               ((struct cna_node *)next_holder)->intra_count =
+                       cn->intra_count + (node->locked > 1);
+               if ((scan == 1)
+                       goto pass_lock;
+
                next_holder = node->next;
                /*
                 * we unlock successor by passing a non-zero value,
@@ -302,9 +305,6 @@ static inline void cna_pass_lock(struct mcs_spinlock
*node,
                 * if we acquired the MCS lock when its queue was empty
                 */
                val = node->locked ? node->locked : 1;
-               /* inc @intra_count if the secondary queue is not empty */
-               ((struct cna_node *)next_holder)->intra_count =
-                       cn->intra_count + (node->locked > 1);
        } else if (node->locked > 1) {    /* if secondary queue is not
empty */
                /* next holder will be the first node in the secondary
queue */
                tail_2nd = decode_tail(node->locked);

The meaning of scan value:

0 - pass to next cna node, which is in the same numa node. Additional
cna node may or may not be added to the secondary queue

1 - pass to next cna node, which may not be in the same numa node. No
change to secondary queue

2 - exceed intra node handoff threshold, unconditionally merge back the
secondary queue cna nodes, if available

>2 no cna node of the same numa node found, unconditionally merge back
the secondary queue cna nodes, if available

The code will be easier to read if symbolic names instead of just numbers.

Cheers,
Longman



_______________________________________________
linux-arm-kernel mailing list
linux-arm-kernel@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-arm-kernel

  reply	other threads:[~2019-12-06 22:00 UTC|newest]

Thread overview: 52+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2019-11-25 21:07 [PATCH v7 0/5] Add NUMA-awareness to qspinlock Alex Kogan
2019-11-25 21:07 ` Alex Kogan
2019-11-25 21:07 ` [PATCH v7 1/5] locking/qspinlock: Rename mcs lock/unlock macros and make them more generic Alex Kogan
2019-11-25 21:07   ` Alex Kogan
2020-01-22  9:15   ` Peter Zijlstra
2020-01-22  9:15     ` Peter Zijlstra
2020-01-22  9:15     ` Peter Zijlstra
     [not found]     ` <C608A39E-CAFC-4C79-9EB6-3DFD9621E3F6@oracle.com>
2020-01-25 11:23       ` Peter Zijlstra
2020-01-25 11:23         ` Peter Zijlstra
2019-11-25 21:07 ` [PATCH v7 2/5] locking/qspinlock: Refactor the qspinlock slow path Alex Kogan
2019-11-25 21:07   ` Alex Kogan
2019-11-25 21:07 ` [PATCH v7 3/5] locking/qspinlock: Introduce CNA into the slow path of qspinlock Alex Kogan
2019-11-25 21:07   ` Alex Kogan
2019-12-06 17:21   ` Waiman Long
2019-12-06 17:21     ` Waiman Long
2019-12-06 19:50     ` Alex Kogan
2019-12-06 19:50       ` Alex Kogan
2020-01-21 20:29   ` Peter Zijlstra
2020-01-21 20:29     ` Peter Zijlstra
2020-01-21 20:29     ` Peter Zijlstra
2020-01-22  8:58     ` Will Deacon
2020-01-22  8:58       ` Will Deacon
2020-01-22  9:22     ` Peter Zijlstra
2020-01-22  9:22       ` Peter Zijlstra
2020-01-22  9:22       ` Peter Zijlstra
2020-01-22  9:51     ` Peter Zijlstra
2020-01-22  9:51       ` Peter Zijlstra
2020-01-22  9:51       ` Peter Zijlstra
2020-01-22 17:04       ` Peter Zijlstra
2020-01-22 17:04         ` Peter Zijlstra
2020-01-22 17:04         ` Peter Zijlstra
2020-01-23  9:00         ` Peter Zijlstra
2020-01-23  9:00           ` Peter Zijlstra
2020-01-23  9:00           ` Peter Zijlstra
2020-01-30 22:01         ` Alex Kogan
2020-01-30 22:01           ` Alex Kogan
2020-01-31 13:35           ` Peter Zijlstra
2020-01-31 13:35             ` Peter Zijlstra
2020-01-31 18:33             ` Alex Kogan
2020-01-31 18:33               ` Alex Kogan
2019-11-25 21:07 ` [PATCH v7 4/5] locking/qspinlock: Introduce starvation avoidance into CNA Alex Kogan
2019-11-25 21:07   ` Alex Kogan
2019-12-06 18:09   ` Waiman Long
2019-12-06 18:09     ` Waiman Long
2019-11-25 21:07 ` [PATCH v7 5/5] locking/qspinlock: Introduce the shuffle reduction optimization " Alex Kogan
2019-11-25 21:07   ` Alex Kogan
2019-12-06 22:00   ` Waiman Long [this message]
2019-12-06 22:00     ` Waiman Long
2019-12-10 18:56 Alex Kogan
2019-12-10 18:56 ` Alex Kogan
2019-12-17 20:05 ` Waiman Long
2019-12-17 20:05   ` Waiman Long

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1fce5ebf-7f80-fb9e-92b1-74062a6611a5@redhat.com \
    --to=longman@redhat.com \
    --cc=alex.kogan@oracle.com \
    --cc=arnd@arndb.de \
    --cc=bp@alien8.de \
    --cc=daniel.m.jordan@oracle.com \
    --cc=dave.dice@oracle.com \
    --cc=guohanjun@huawei.com \
    --cc=hpa@zytor.com \
    --cc=jglauber@marvell.com \
    --cc=linux-arch@vger.kernel.org \
    --cc=linux-arm-kernel@lists.infradead.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux@armlinux.org.uk \
    --cc=mingo@redhat.com \
    --cc=peterz@infradead.org \
    --cc=rahul.x.yadav@oracle.com \
    --cc=steven.sistare@oracle.com \
    --cc=tglx@linutronix.de \
    --cc=will.deacon@arm.com \
    --cc=x86@kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.