From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from bombadil.infradead.org ([198.137.202.133]:43918 "EHLO bombadil.infradead.org" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1752910AbeC3Dm7 (ORCPT ); Thu, 29 Mar 2018 23:42:59 -0400 From: Matthew Wilcox To: linux-mm@kvack.org, linux-fsdevel@vger.kernel.org Cc: Matthew Wilcox , Jan Kara , Jeff Layton , Lukas Czerner , Ross Zwisler , Christoph Hellwig , Goldwyn Rodrigues , Nicholas Piggin , Ryusuke Konishi , linux-nilfs@vger.kernel.org, Jaegeuk Kim , Chao Yu , linux-f2fs-devel@lists.sourceforge.net, Oleg Drokin , Andreas Dilger , James Simmons , Mike Kravetz Subject: [PATCH v10 57/62] dax: Convert dax_layout_busy_page to XArray Date: Thu, 29 Mar 2018 20:42:40 -0700 Message-Id: <20180330034245.10462-58-willy@infradead.org> In-Reply-To: <20180330034245.10462-1-willy@infradead.org> References: <20180330034245.10462-1-willy@infradead.org> Sender: linux-fsdevel-owner@vger.kernel.org List-ID: From: Matthew Wilcox Instead of using a pagevec, just use the XArray iterators. Add a conditional rescheduling point which probably should have been there in the original. Signed-off-by: Matthew Wilcox --- fs/dax.c | 54 ++++++++++++++++++++---------------------------------- 1 file changed, 20 insertions(+), 34 deletions(-) diff --git a/fs/dax.c b/fs/dax.c index 54ec283f5031..825bf153f499 100644 --- a/fs/dax.c +++ b/fs/dax.c @@ -607,11 +607,10 @@ static void *grab_mapping_entry(struct address_space *mapping, pgoff_t index, */ struct page *dax_layout_busy_page(struct address_space *mapping) { - pgoff_t indices[PAGEVEC_SIZE]; + XA_STATE(xas, &mapping->i_pages, 0); + void *entry; + unsigned int scanned = 0; struct page *page = NULL; - struct pagevec pvec; - pgoff_t index, end; - unsigned i; /* * In the 'limited' case get_user_pages() for dax is disabled. @@ -622,9 +621,6 @@ struct page *dax_layout_busy_page(struct address_space *mapping) if (!dax_mapping(mapping) || !mapping_mapped(mapping)) return NULL; - pagevec_init(&pvec); - index = 0; - end = -1; /* * Flush dax_layout_lock() sections to ensure all possible page * references have been taken, or otherwise arrange for faults @@ -634,36 +630,26 @@ struct page *dax_layout_busy_page(struct address_space *mapping) unmap_mapping_range(mapping, 0, 0, 1); synchronize_rcu(); - while (index < end && pagevec_lookup_entries(&pvec, mapping, index, - min(end - index, (pgoff_t)PAGEVEC_SIZE), - indices)) { - for (i = 0; i < pagevec_count(&pvec); i++) { - struct page *pvec_ent = pvec.pages[i]; - void *entry; - - index = indices[i]; - if (index >= end) - break; - - if (!xa_is_value(pvec_ent)) - continue; - - xa_lock_irq(&mapping->i_pages); - entry = get_unlocked_mapping_entry(mapping, index, NULL); - if (entry) - page = dax_busy_page(entry); - put_unlocked_mapping_entry(mapping, index, entry); - xa_unlock_irq(&mapping->i_pages); - if (page) - break; - } - pagevec_remove_exceptionals(&pvec); - pagevec_release(&pvec); - index++; - + xas_lock_irq(&xas); + xas_for_each(&xas, entry, ULONG_MAX) { + if (!xa_is_value(entry)) + continue; + if (unlikely(dax_is_locked(entry))) + entry = get_unlocked_entry(&xas); + if (entry) + page = dax_busy_page(entry); + put_unlocked_entry(&xas, entry); if (page) break; + if (++scanned % XA_CHECK_SCHED) + continue; + + xas_pause(&xas); + xas_unlock_irq(&xas); + cond_resched(); + xas_lock_irq(&xas); } + xas_unlock_irq(&xas); return page; } EXPORT_SYMBOL_GPL(dax_layout_busy_page); -- 2.16.2