From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S932481AbcFNSOm (ORCPT ); Tue, 14 Jun 2016 14:14:42 -0400 Received: from g9t1613g.houston.hpe.com ([15.241.32.99]:1572 "EHLO g9t1613g.houston.hpe.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S932344AbcFNSN0 (ORCPT ); Tue, 14 Jun 2016 14:13:26 -0400 From: Waiman Long To: Peter Zijlstra , Ingo Molnar Cc: linux-kernel@vger.kernel.org, x86@kernel.org, linux-alpha@vger.kernel.org, linux-ia64@vger.kernel.org, linux-s390@vger.kernel.org, linux-arch@vger.kernel.org, xfs@oss.sgi.com, Davidlohr Bueso , Jason Low , Dave Chinner , Scott J Norton , Douglas Hatch , Waiman Long Subject: [RFC PATCH-tip 3/6] locking/rwsem: move down rwsem_down_read_failed function Date: Tue, 14 Jun 2016 14:12:36 -0400 Message-Id: <1465927959-39719-4-git-send-email-Waiman.Long@hpe.com> X-Mailer: git-send-email 1.7.1 In-Reply-To: <1465927959-39719-1-git-send-email-Waiman.Long@hpe.com> References: <1465927959-39719-1-git-send-email-Waiman.Long@hpe.com> Sender: linux-kernel-owner@vger.kernel.org List-ID: X-Mailing-List: linux-kernel@vger.kernel.org Move the rwsem_down_read_failed() function down to below the optimistic spinning section before enabling optimistic spinning for the readers. There is no change in code. Signed-off-by: Waiman Long --- kernel/locking/rwsem-xadd.c | 116 +++++++++++++++++++++--------------------- 1 files changed, 58 insertions(+), 58 deletions(-) diff --git a/kernel/locking/rwsem-xadd.c b/kernel/locking/rwsem-xadd.c index 9703f4a..400e594 100644 --- a/kernel/locking/rwsem-xadd.c +++ b/kernel/locking/rwsem-xadd.c @@ -226,64 +226,6 @@ __rwsem_mark_wake(struct rw_semaphore *sem, } /* - * Wait for the read lock to be granted - */ -__visible -struct rw_semaphore __sched *rwsem_down_read_failed(struct rw_semaphore *sem) -{ - long count, adjustment = 0; - struct rwsem_waiter waiter; - struct task_struct *tsk = current; - WAKE_Q(wake_q); - - /* - * Undo read bias from down_read operation, stop active locking. - * Doing that after taking the wait_lock may block writer lock - * stealing for too long. - */ - atomic_long_add(-RWSEM_ACTIVE_READ_BIAS, &sem->count); - - /* set up my own style of waitqueue */ - waiter.task = tsk; - waiter.type = RWSEM_WAITING_FOR_READ; - - raw_spin_lock_irq(&sem->wait_lock); - if (list_empty(&sem->wait_list)) - adjustment += RWSEM_WAITING_BIAS; - list_add_tail(&waiter.list, &sem->wait_list); - - /* we're now waiting on the lock */ - if (adjustment) - count = atomic_long_add_return(adjustment, &sem->count); - else - count = atomic_long_read(&sem->count); - - /* If there are no active locks, wake the front queued process(es). - * - * If there are no writers and we are first in the queue, - * wake our own waiter to join the existing active readers ! - */ - if (count == RWSEM_WAITING_BIAS || - (count > RWSEM_WAITING_BIAS && adjustment)) - sem = __rwsem_mark_wake(sem, RWSEM_WAKE_ANY, &wake_q); - - raw_spin_unlock_irq(&sem->wait_lock); - wake_up_q(&wake_q); - - /* wait to be given the lock */ - while (true) { - set_task_state(tsk, TASK_UNINTERRUPTIBLE); - if (!waiter.task) - break; - schedule(); - } - - __set_task_state(tsk, TASK_RUNNING); - return sem; -} -EXPORT_SYMBOL(rwsem_down_read_failed); - -/* * This function must be called with the sem->wait_lock held to prevent * race conditions between checking the rwsem wait list and setting the * sem->count accordingly. @@ -479,6 +421,64 @@ static inline bool rwsem_has_spinner(struct rw_semaphore *sem) #endif /* + * Wait for the read lock to be granted + */ +__visible +struct rw_semaphore __sched * rwsem_down_read_failed(struct rw_semaphore *sem) +{ + long count, adjustment = 0; + struct rwsem_waiter waiter; + struct task_struct *tsk = current; + WAKE_Q(wake_q); + + /* + * Undo read bias from down_read operation, stop active locking. + * Doing that after taking the wait_lock may block writer lock + * stealing for too long. + */ + atomic_long_add(-RWSEM_ACTIVE_READ_BIAS, &sem->count); + + /* set up my own style of waitqueue */ + waiter.task = tsk; + waiter.type = RWSEM_WAITING_FOR_READ; + + raw_spin_lock_irq(&sem->wait_lock); + if (list_empty(&sem->wait_list)) + adjustment += RWSEM_WAITING_BIAS; + list_add_tail(&waiter.list, &sem->wait_list); + + /* we're now waiting on the lock */ + if (adjustment) + count = atomic_long_add_return(adjustment, &sem->count); + else + count = atomic_long_read(&sem->count); + + /* If there are no active locks, wake the front queued process(es). + * + * If there are no writers and we are first in the queue, + * wake our own waiter to join the existing active readers ! + */ + if (count == RWSEM_WAITING_BIAS || + (count > RWSEM_WAITING_BIAS && adjustment)) + sem = __rwsem_mark_wake(sem, RWSEM_WAKE_ANY, &wake_q); + + raw_spin_unlock_irq(&sem->wait_lock); + wake_up_q(&wake_q); + + /* wait to be given the lock */ + while (true) { + set_task_state(tsk, TASK_UNINTERRUPTIBLE); + if (!waiter.task) + break; + schedule(); + } + + __set_task_state(tsk, TASK_RUNNING); + return sem; +} +EXPORT_SYMBOL(rwsem_down_read_failed); + +/* * Wait until we successfully acquire the write lock */ static inline struct rw_semaphore * -- 1.7.1