From 25d9e6a9b37e573390af2e3f6c1db429d8ddb4ad Mon Sep 17 00:00:00 2001 From: Linus Torvalds Date: Sun, 30 Oct 2022 15:14:43 -0700 Subject: [PATCH 3/4] mm: re-unify the simplified page_zap_*_rmap() function Now that we've simplified both the anonymous and file-backed opage zap functions, they end up being identical except for which page statistic they update, and we can re-unify the implementation of that much simplified code. To make it very clear that this is onlt for the final pte zapping (since a lot of the simplifications depended on that), name the unified function 'page_zap_pte_rmap()'. Signed-off-by: Linus Torvalds --- include/linux/rmap.h | 3 +-- mm/memory.c | 5 ++--- mm/rmap.c | 39 +++++++++------------------------------ 3 files changed, 12 insertions(+), 35 deletions(-) diff --git a/include/linux/rmap.h b/include/linux/rmap.h index 8d29b7c38368..f62af001707c 100644 --- a/include/linux/rmap.h +++ b/include/linux/rmap.h @@ -196,8 +196,7 @@ void page_add_new_anon_rmap(struct page *, struct vm_area_struct *, unsigned long address); void page_add_file_rmap(struct page *, struct vm_area_struct *, bool compound); -void page_zap_file_rmap(struct page *); -void page_zap_anon_rmap(struct page *); +void page_zap_pte_rmap(struct page *); void page_remove_rmap(struct page *, struct vm_area_struct *, bool compound); diff --git a/mm/memory.c b/mm/memory.c index ba1d08a908a4..c893f5ffc5a8 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -1451,9 +1451,8 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb, if (pte_young(ptent) && likely(!(vma->vm_flags & VM_SEQ_READ))) mark_page_accessed(page); - page_zap_file_rmap(page); - } else - page_zap_anon_rmap(page); + } + page_zap_pte_rmap(page); munlock_vma_page(page, vma, false); rss[mm_counter(page)]--; if (unlikely(page_mapcount(page) < 0)) diff --git a/mm/rmap.c b/mm/rmap.c index 69de6c833d5c..28b51a31ebb0 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -1413,47 +1413,26 @@ static void page_remove_anon_compound_rmap(struct page *page) } /** - * page_zap_file_rmap - take down non-anon pte mapping from a page + * page_zap_pte_rmap - take down a pte mapping from a page * @page: page to remove mapping from * - * This is the simplified form of page_remove_rmap(), with: - * - we've already checked for '!PageAnon(page)' - * - 'compound' is always false - * - the caller does 'munlock_vma_page(page, vma, compound)' separately - * which allows for a much simpler calling convention. + * This is the simplified form of page_remove_rmap(), that only + * deals with last-level pages, so 'compound' is always false, + * and the caller does 'munlock_vma_page(page, vma, compound)' + * separately. * - * The caller holds the pte lock. - */ -void page_zap_file_rmap(struct page *page) -{ - if (!atomic_add_negative(-1, &page->_mapcount)) - return; - - lock_page_memcg(page); - __dec_lruvec_page_state(page, NR_FILE_MAPPED); - unlock_page_memcg(page); -} - -/** - * page_zap_anon_rmap(page) - take down non-anon pte mapping from a page - * @page: page to remove mapping from - * - * This is the simplified form of page_remove_rmap(), with: - * - we've already checked for 'PageAnon(page)' - * - 'compound' is always false - * - the caller does 'munlock_vma_page(page, vma, compound)' separately - * which allows for a much simpler calling convention. + * This allows for a much simpler calling convention and code. * * The caller holds the pte lock. */ -void page_zap_anon_rmap(struct page *page) +void page_zap_pte_rmap(struct page *page) { - /* page still mapped by someone else? */ if (!atomic_add_negative(-1, &page->_mapcount)) return; lock_page_memcg(page); - __dec_lruvec_page_state(page, NR_ANON_MAPPED); + __dec_lruvec_page_state(page, + PageAnon(page) ? NR_ANON_MAPPED : NR_FILE_MAPPED); unlock_page_memcg(page); } -- 2.37.1.289.g45aa1e5c72.dirty