From mboxrd@z Thu Jan 1 00:00:00 1970 From: Waiman Long Subject: [PATCH v12 04/11] qspinlock: Extract out code snippets for the next patch Date: Thu, 16 Oct 2014 14:10:33 -0400 Message-ID: <1413483040-58399-5-git-send-email-Waiman.Long__46008.7571213537$1413483168$gmane$org@hp.com> References: <1413483040-58399-1-git-send-email-Waiman.Long@hp.com> Mime-Version: 1.0 Content-Type: text/plain; charset="us-ascii" Content-Transfer-Encoding: 7bit Return-path: Received: from mail6.bemta5.messagelabs.com ([195.245.231.135]) by lists.xen.org with esmtp (Exim 4.72) (envelope-from ) id 1XepW5-0005V2-W8 for xen-devel@lists.xenproject.org; Thu, 16 Oct 2014 18:11:14 +0000 In-Reply-To: <1413483040-58399-1-git-send-email-Waiman.Long@hp.com> List-Unsubscribe: , List-Post: List-Help: List-Subscribe: , Sender: xen-devel-bounces@lists.xen.org Errors-To: xen-devel-bounces@lists.xen.org To: Thomas Gleixner , Ingo Molnar , "H. Peter Anvin" Cc: linux-arch@vger.kernel.org, Waiman Long , Raghavendra K T , kvm@vger.kernel.org, Peter Zijlstra , Scott J Norton , x86@kernel.org, Paolo Bonzini , linux-kernel@vger.kernel.org, virtualization@lists.linux-foundation.org, David Vrabel , Oleg Nesterov , xen-devel@lists.xenproject.org, Boris Ostrovsky , "Paul E. McKenney" , Linus Torvalds , Douglas Hatch List-Id: xen-devel@lists.xenproject.org This is a preparatory patch that extracts out the following 2 code snippets to prepare for the next performance optimization patch. 1) the logic for the exchange of new and previous tail code words into a new xchg_tail() function. 2) the logic for clearing the pending bit and setting the locked bit into a new clear_pending_set_locked() function. This patch also simplifies the trylock operation before queuing by calling queue_spin_trylock() directly. Signed-off-by: Waiman Long Signed-off-by: Peter Zijlstra --- include/asm-generic/qspinlock_types.h | 2 + kernel/locking/qspinlock.c | 91 +++++++++++++++++++++----------- 2 files changed, 62 insertions(+), 31 deletions(-) diff --git a/include/asm-generic/qspinlock_types.h b/include/asm-generic/qspinlock_types.h index 4196694..88d647c 100644 --- a/include/asm-generic/qspinlock_types.h +++ b/include/asm-generic/qspinlock_types.h @@ -58,6 +58,8 @@ typedef struct qspinlock { #define _Q_TAIL_CPU_BITS (32 - _Q_TAIL_CPU_OFFSET) #define _Q_TAIL_CPU_MASK _Q_SET_MASK(TAIL_CPU) +#define _Q_TAIL_MASK (_Q_TAIL_IDX_MASK | _Q_TAIL_CPU_MASK) + #define _Q_LOCKED_VAL (1U << _Q_LOCKED_OFFSET) #define _Q_PENDING_VAL (1U << _Q_PENDING_OFFSET) diff --git a/kernel/locking/qspinlock.c b/kernel/locking/qspinlock.c index 226b11d..48bd2ad 100644 --- a/kernel/locking/qspinlock.c +++ b/kernel/locking/qspinlock.c @@ -95,6 +95,54 @@ static inline struct mcs_spinlock *decode_tail(u32 tail) #define _Q_LOCKED_PENDING_MASK (_Q_LOCKED_MASK | _Q_PENDING_MASK) /** + * clear_pending_set_locked - take ownership and clear the pending bit. + * @lock: Pointer to queue spinlock structure + * @val : Current value of the queue spinlock 32-bit word + * + * *,1,0 -> *,0,1 + */ +static __always_inline void +clear_pending_set_locked(struct qspinlock *lock, u32 val) +{ + u32 new, old; + + for (;;) { + new = (val & ~_Q_PENDING_MASK) | _Q_LOCKED_VAL; + + old = atomic_cmpxchg(&lock->val, val, new); + if (old == val) + break; + + val = old; + } +} + +/** + * xchg_tail - Put in the new queue tail code word & retrieve previous one + * @lock : Pointer to queue spinlock structure + * @tail : The new queue tail code word + * Return: The previous queue tail code word + * + * xchg(lock, tail) + * + * p,*,* -> n,*,* ; prev = xchg(lock, node) + */ +static __always_inline u32 xchg_tail(struct qspinlock *lock, u32 tail) +{ + u32 old, new, val = atomic_read(&lock->val); + + for (;;) { + new = (val & _Q_LOCKED_PENDING_MASK) | tail; + old = atomic_cmpxchg(&lock->val, val, new); + if (old == val) + break; + + val = old; + } + return old; +} + +/** * queue_spin_lock_slowpath - acquire the queue spinlock * @lock: Pointer to queue spinlock structure * @val: Current value of the queue spinlock 32-bit word @@ -176,15 +224,7 @@ void queue_spin_lock_slowpath(struct qspinlock *lock, u32 val) * * *,1,0 -> *,0,1 */ - for (;;) { - new = (val & ~_Q_PENDING_MASK) | _Q_LOCKED_VAL; - - old = atomic_cmpxchg(&lock->val, val, new); - if (old == val) - break; - - val = old; - } + clear_pending_set_locked(lock, val); return; /* @@ -201,37 +241,26 @@ queue: node->next = NULL; /* - * We have already touched the queueing cacheline; don't bother with - * pending stuff. - * - * trylock || xchg(lock, node) - * - * 0,0,0 -> 0,0,1 ; no tail, not locked -> no tail, locked. - * p,y,x -> n,y,x ; tail was p -> tail is n; preserving locked. + * We touched a (possibly) cold cacheline in the per-cpu queue node; + * attempt the trylock once more in the hope someone let go while we + * weren't watching. */ - for (;;) { - new = _Q_LOCKED_VAL; - if (val) - new = tail | (val & _Q_LOCKED_PENDING_MASK); - - old = atomic_cmpxchg(&lock->val, val, new); - if (old == val) - break; - - val = old; - } + if (queue_spin_trylock(lock)) + goto release; /* - * we won the trylock; forget about queueing. + * We have already touched the queueing cacheline; don't bother with + * pending stuff. + * + * p,*,* -> n,*,* */ - if (new == _Q_LOCKED_VAL) - goto release; + old = xchg_tail(lock, tail); /* * if there was a previous node; link it and wait until reaching the * head of the waitqueue. */ - if (old & ~_Q_LOCKED_PENDING_MASK) { + if (old & _Q_TAIL_MASK) { prev = decode_tail(old); ACCESS_ONCE(prev->next) = node; -- 1.7.1