From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S965317AbdEON7d (ORCPT ); Mon, 15 May 2017 09:59:33 -0400 Received: from merlin.infradead.org ([205.233.59.134]:47570 "EHLO merlin.infradead.org" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S965286AbdEON72 (ORCPT ); Mon, 15 May 2017 09:59:28 -0400 Date: Mon, 15 May 2017 15:59:16 +0200 From: Peter Zijlstra To: Davidlohr Bueso Cc: mingo@kernel.org, akpm@linux-foundation.org, jack@suse.cz, kirill.shutemov@linux.intel.com, ldufour@linux.vnet.ibm.com, mhocko@suse.com, mgorman@techsingularity.net, linux-kernel@vger.kernel.org, Davidlohr Bueso Subject: Re: [PATCH 2/6] locking: Introduce range reader/writer lock Message-ID: <20170515135916.2arr6oqzrag4wdfe@hirez.programming.kicks-ass.net> References: <20170515090725.27055-1-dave@stgolabs.net> <20170515090725.27055-3-dave@stgolabs.net> MIME-Version: 1.0 Content-Type: text/plain; charset=us-ascii Content-Disposition: inline In-Reply-To: <20170515090725.27055-3-dave@stgolabs.net> User-Agent: NeoMutt/20170113 (1.7.2) Sender: linux-kernel-owner@vger.kernel.org List-ID: X-Mailing-List: linux-kernel@vger.kernel.org On Mon, May 15, 2017 at 02:07:21AM -0700, Davidlohr Bueso wrote: > +static inline int wait_for_ranges(struct range_lock_tree *tree, > + struct range_lock *lock, long state) > +{ > + int ret = 0; > + > + while (true) { > + set_current_state(state); > + > + /* do we need to go to sleep? */ > + if (!lock->blocking_ranges) > + break; > + > + if (unlikely(signal_pending_state(state, current))) { > + struct interval_tree_node *node; > + unsigned long flags; > + DEFINE_WAKE_Q(wake_q); > + > + ret = -EINTR; > + /* > + * We're not taking the lock after all, cleanup > + * after ourselves. > + */ > + spin_lock_irqsave(&tree->lock, flags); > + > + range_lock_clear_reader(lock); > + __range_tree_remove(tree, lock); > + > + if (!__range_intersects_intree(tree, lock)) > + goto unlock; > + > + range_interval_tree_foreach(node, &tree->root, > + lock->node.start, > + lock->node.last) { > + struct range_lock *blked; > + blked = to_range_lock(node); > + > + if (range_lock_is_reader(lock) && > + range_lock_is_reader(blked)) > + continue; > + > + /* unaccount for threads _we_ are blocking */ > + if (lock->seqnum < blked->seqnum) > + range_lock_put(blked, &wake_q); > + } > + > + unlock: > + spin_unlock_irqrestore(&tree->lock, flags); > + wake_up_q(&wake_q); > + break; > + } > + > + schedule(); > + } > + > + __set_current_state(TASK_RUNNING); > + return ret; > +} > +void range_read_unlock(struct range_lock_tree *tree, struct range_lock *lock) > +{ > + struct interval_tree_node *node; > + unsigned long flags; > + DEFINE_WAKE_Q(wake_q); > + > + spin_lock_irqsave(&tree->lock, flags); > + > + range_lock_clear_reader(lock); > + __range_tree_remove(tree, lock); > + > + range_lock_release(&tree->dep_map, 1, _RET_IP_); > + > + if (!__range_intersects_intree(tree, lock)) { > + /* nobody to wakeup, we're done */ > + spin_unlock_irqrestore(&tree->lock, flags); > + return; > + } > + > + range_interval_tree_foreach(node, &tree->root, > + lock->node.start, lock->node.last) { > + struct range_lock *blocked_lock; > + blocked_lock = to_range_lock(node); > + > + if (!range_lock_is_reader(blocked_lock)) > + range_lock_put(blocked_lock, &wake_q); > + } > + > + spin_unlock_irqrestore(&tree->lock, flags); > + wake_up_q(&wake_q); > +} > +EXPORT_SYMBOL_GPL(range_read_unlock); > +void range_write_unlock(struct range_lock_tree *tree, struct range_lock *lock) > +{ > + struct interval_tree_node *node; > + unsigned long flags; > + DEFINE_WAKE_Q(wake_q); > + > + spin_lock_irqsave(&tree->lock, flags); > + > + range_lock_clear_reader(lock); > + __range_tree_remove(tree, lock); > + > + range_lock_release(&tree->dep_map, 1, _RET_IP_); > + > + if (!__range_intersects_intree(tree, lock)) { > + /* nobody to wakeup, we're done */ > + spin_unlock_irqrestore(&tree->lock, flags); > + return; > + } > + > + range_interval_tree_foreach(node, &tree->root, > + lock->node.start, lock->node.last) { > + struct range_lock *blocked_lock; > + blocked_lock = to_range_lock(node); > + > + range_lock_put(blocked_lock, &wake_q); > + } > + > + spin_unlock_irqrestore(&tree->lock, flags); > + wake_up_q(&wake_q); > +} > +EXPORT_SYMBOL_GPL(range_write_unlock); There is significant duplication here. Can't we have a __range_unlock_common() and use that 3 times?