From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from bombadil.infradead.org ([198.137.202.133]:43884 "EHLO bombadil.infradead.org" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1751188AbeDNON2 (ORCPT ); Sat, 14 Apr 2018 10:13:28 -0400 From: Matthew Wilcox To: linux-mm@kvack.org, linux-fsdevel@vger.kernel.org Cc: Matthew Wilcox , Jan Kara , Jeff Layton , Lukas Czerner , Ross Zwisler , Christoph Hellwig , Goldwyn Rodrigues , Nicholas Piggin , Ryusuke Konishi , linux-nilfs@vger.kernel.org, Jaegeuk Kim , Chao Yu , linux-f2fs-devel@lists.sourceforge.net, Oleg Drokin , Andreas Dilger , James Simmons , Mike Kravetz Subject: [PATCH v11 42/63] memfd: Convert shmem_wait_for_pins to XArray Date: Sat, 14 Apr 2018 07:12:55 -0700 Message-Id: <20180414141316.7167-43-willy@infradead.org> In-Reply-To: <20180414141316.7167-1-willy@infradead.org> References: <20180414141316.7167-1-willy@infradead.org> Sender: linux-fsdevel-owner@vger.kernel.org List-ID: From: Matthew Wilcox Simplify the locking by taking the spinlock while we walk the tree on the assumption that many acquires and releases of the lock will be worse than holding the lock while we process an entire batch of pages. Signed-off-by: Matthew Wilcox Reviewed-by: Mike Kravetz --- mm/shmem.c | 59 ++++++++++++++++++++++-------------------------------- 1 file changed, 24 insertions(+), 35 deletions(-) diff --git a/mm/shmem.c b/mm/shmem.c index e1a0d1c7513e..017340fe933d 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -2627,9 +2627,7 @@ static void shmem_tag_pins(struct address_space *mapping) */ static int shmem_wait_for_pins(struct address_space *mapping) { - struct radix_tree_iter iter; - void **slot; - pgoff_t start; + XA_STATE(xas, &mapping->i_pages, 0); struct page *page; int error, scan; @@ -2637,7 +2635,9 @@ static int shmem_wait_for_pins(struct address_space *mapping) error = 0; for (scan = 0; scan <= LAST_SCAN; scan++) { - if (!radix_tree_tagged(&mapping->i_pages, SHMEM_TAG_PINNED)) + unsigned int tagged = 0; + + if (!xas_tagged(&xas, SHMEM_TAG_PINNED)) break; if (!scan) @@ -2645,45 +2645,34 @@ static int shmem_wait_for_pins(struct address_space *mapping) else if (schedule_timeout_killable((HZ << scan) / 200)) scan = LAST_SCAN; - start = 0; - rcu_read_lock(); - radix_tree_for_each_tagged(slot, &mapping->i_pages, &iter, - start, SHMEM_TAG_PINNED) { - - page = radix_tree_deref_slot(slot); - if (radix_tree_exception(page)) { - if (radix_tree_deref_retry(page)) { - slot = radix_tree_iter_retry(&iter); - continue; - } - - page = NULL; - } - - if (page && - page_count(page) - page_mapcount(page) != 1) { - if (scan < LAST_SCAN) - goto continue_resched; - + xas_set(&xas, 0); + xas_lock_irq(&xas); + xas_for_each_tag(&xas, page, ULONG_MAX, SHMEM_TAG_PINNED) { + bool clear = true; + if (xa_is_value(page)) + continue; + if (page_count(page) - page_mapcount(page) != 1) { /* * On the last scan, we clean up all those tags * we inserted; but make a note that we still * found pages pinned. */ - error = -EBUSY; + if (scan == LAST_SCAN) + error = -EBUSY; + else + clear = false; } + if (clear) + xas_clear_tag(&xas, SHMEM_TAG_PINNED); + if (++tagged % XA_CHECK_SCHED) + continue; - xa_lock_irq(&mapping->i_pages); - radix_tree_tag_clear(&mapping->i_pages, - iter.index, SHMEM_TAG_PINNED); - xa_unlock_irq(&mapping->i_pages); -continue_resched: - if (need_resched()) { - slot = radix_tree_iter_resume(slot, &iter); - cond_resched_rcu(); - } + xas_pause(&xas); + xas_unlock_irq(&xas); + cond_resched(); + xas_lock_irq(&xas); } - rcu_read_unlock(); + xas_unlock_irq(&xas); } return error; -- 2.17.0