From: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com> To: Andrea Arcangeli <aarcange@redhat.com>, Hugh Dickins <hughd@google.com>, Rik van Riel <riel@redhat.com>, Andrew Morton <akpm@linux-foundation.org> Cc: linux-mm@kvack.org, linux-kernel@vger.kernel.org, "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com> Subject: [PATCH 11/12] mm: drop page_check_address{,_transhuge} Date: Tue, 24 Jan 2017 19:28:23 +0300 [thread overview] Message-ID: <20170124162824.91275-12-kirill.shutemov@linux.intel.com> (raw) In-Reply-To: <20170124162824.91275-1-kirill.shutemov@linux.intel.com> All users are gone. Let's drop them. Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> --- include/linux/rmap.h | 36 -------------- mm/rmap.c | 138 --------------------------------------------------- 2 files changed, 174 deletions(-) diff --git a/include/linux/rmap.h b/include/linux/rmap.h index 474279810742..74113df9418d 100644 --- a/include/linux/rmap.h +++ b/include/linux/rmap.h @@ -196,42 +196,6 @@ int page_referenced(struct page *, int is_locked, int try_to_unmap(struct page *, enum ttu_flags flags); -/* - * Used by uprobes to replace a userspace page safely - */ -pte_t *__page_check_address(struct page *, struct mm_struct *, - unsigned long, spinlock_t **, int); - -static inline pte_t *page_check_address(struct page *page, struct mm_struct *mm, - unsigned long address, - spinlock_t **ptlp, int sync) -{ - pte_t *ptep; - - __cond_lock(*ptlp, ptep = __page_check_address(page, mm, address, - ptlp, sync)); - return ptep; -} - -/* - * Used by idle page tracking to check if a page was referenced via page - * tables. - */ -#ifdef CONFIG_TRANSPARENT_HUGEPAGE -bool page_check_address_transhuge(struct page *page, struct mm_struct *mm, - unsigned long address, pmd_t **pmdp, - pte_t **ptep, spinlock_t **ptlp); -#else -static inline bool page_check_address_transhuge(struct page *page, - struct mm_struct *mm, unsigned long address, - pmd_t **pmdp, pte_t **ptep, spinlock_t **ptlp) -{ - *ptep = page_check_address(page, mm, address, ptlp, 0); - *pmdp = NULL; - return !!*ptep; -} -#endif - /* Avoid racy checks */ #define PAGE_CHECK_WALK_SYNC (1 << 0) /* Look for migarion entries rather than present ptes */ diff --git a/mm/rmap.c b/mm/rmap.c index cb34fd68a23a..7106eb9b37a8 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -708,144 +708,6 @@ pmd_t *mm_find_pmd(struct mm_struct *mm, unsigned long address) return pmd; } -/* - * Check that @page is mapped at @address into @mm. - * - * If @sync is false, page_check_address may perform a racy check to avoid - * the page table lock when the pte is not present (helpful when reclaiming - * highly shared pages). - * - * On success returns with pte mapped and locked. - */ -pte_t *__page_check_address(struct page *page, struct mm_struct *mm, - unsigned long address, spinlock_t **ptlp, int sync) -{ - pmd_t *pmd; - pte_t *pte; - spinlock_t *ptl; - - if (unlikely(PageHuge(page))) { - /* when pud is not present, pte will be NULL */ - pte = huge_pte_offset(mm, address); - if (!pte) - return NULL; - - ptl = huge_pte_lockptr(page_hstate(page), mm, pte); - goto check; - } - - pmd = mm_find_pmd(mm, address); - if (!pmd) - return NULL; - - pte = pte_offset_map(pmd, address); - /* Make a quick check before getting the lock */ - if (!sync && !pte_present(*pte)) { - pte_unmap(pte); - return NULL; - } - - ptl = pte_lockptr(mm, pmd); -check: - spin_lock(ptl); - if (pte_present(*pte) && page_to_pfn(page) == pte_pfn(*pte)) { - *ptlp = ptl; - return pte; - } - pte_unmap_unlock(pte, ptl); - return NULL; -} - -#ifdef CONFIG_TRANSPARENT_HUGEPAGE -/* - * Check that @page is mapped at @address into @mm. In contrast to - * page_check_address(), this function can handle transparent huge pages. - * - * On success returns true with pte mapped and locked. For PMD-mapped - * transparent huge pages *@ptep is set to NULL. - */ -bool page_check_address_transhuge(struct page *page, struct mm_struct *mm, - unsigned long address, pmd_t **pmdp, - pte_t **ptep, spinlock_t **ptlp) -{ - pgd_t *pgd; - pud_t *pud; - pmd_t *pmd; - pte_t *pte; - spinlock_t *ptl; - - if (unlikely(PageHuge(page))) { - /* when pud is not present, pte will be NULL */ - pte = huge_pte_offset(mm, address); - if (!pte) - return false; - - ptl = huge_pte_lockptr(page_hstate(page), mm, pte); - pmd = NULL; - goto check_pte; - } - - pgd = pgd_offset(mm, address); - if (!pgd_present(*pgd)) - return false; - pud = pud_offset(pgd, address); - if (!pud_present(*pud)) - return false; - pmd = pmd_offset(pud, address); - - if (pmd_trans_huge(*pmd)) { - ptl = pmd_lock(mm, pmd); - if (!pmd_present(*pmd)) - goto unlock_pmd; - if (unlikely(!pmd_trans_huge(*pmd))) { - spin_unlock(ptl); - goto map_pte; - } - - if (pmd_page(*pmd) != page) - goto unlock_pmd; - - pte = NULL; - goto found; -unlock_pmd: - spin_unlock(ptl); - return false; - } else { - pmd_t pmde = *pmd; - - barrier(); - if (!pmd_present(pmde) || pmd_trans_huge(pmde)) - return false; - } -map_pte: - pte = pte_offset_map(pmd, address); - if (!pte_present(*pte)) { - pte_unmap(pte); - return false; - } - - ptl = pte_lockptr(mm, pmd); -check_pte: - spin_lock(ptl); - - if (!pte_present(*pte)) { - pte_unmap_unlock(pte, ptl); - return false; - } - - /* THP can be referenced by any subpage */ - if (pte_pfn(*pte) - page_to_pfn(page) >= hpage_nr_pages(page)) { - pte_unmap_unlock(pte, ptl); - return false; - } -found: - *ptep = pte; - *pmdp = pmd; - *ptlp = ptl; - return true; -} -#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ - struct page_referenced_arg { int mapcount; int referenced; -- 2.11.0
WARNING: multiple messages have this Message-ID (diff)
From: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com> To: Andrea Arcangeli <aarcange@redhat.com>, Hugh Dickins <hughd@google.com>, Rik van Riel <riel@redhat.com>, Andrew Morton <akpm@linux-foundation.org> Cc: linux-mm@kvack.org, linux-kernel@vger.kernel.org, "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com> Subject: [PATCH 11/12] mm: drop page_check_address{,_transhuge} Date: Tue, 24 Jan 2017 19:28:23 +0300 [thread overview] Message-ID: <20170124162824.91275-12-kirill.shutemov@linux.intel.com> (raw) In-Reply-To: <20170124162824.91275-1-kirill.shutemov@linux.intel.com> All users are gone. Let's drop them. Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> --- include/linux/rmap.h | 36 -------------- mm/rmap.c | 138 --------------------------------------------------- 2 files changed, 174 deletions(-) diff --git a/include/linux/rmap.h b/include/linux/rmap.h index 474279810742..74113df9418d 100644 --- a/include/linux/rmap.h +++ b/include/linux/rmap.h @@ -196,42 +196,6 @@ int page_referenced(struct page *, int is_locked, int try_to_unmap(struct page *, enum ttu_flags flags); -/* - * Used by uprobes to replace a userspace page safely - */ -pte_t *__page_check_address(struct page *, struct mm_struct *, - unsigned long, spinlock_t **, int); - -static inline pte_t *page_check_address(struct page *page, struct mm_struct *mm, - unsigned long address, - spinlock_t **ptlp, int sync) -{ - pte_t *ptep; - - __cond_lock(*ptlp, ptep = __page_check_address(page, mm, address, - ptlp, sync)); - return ptep; -} - -/* - * Used by idle page tracking to check if a page was referenced via page - * tables. - */ -#ifdef CONFIG_TRANSPARENT_HUGEPAGE -bool page_check_address_transhuge(struct page *page, struct mm_struct *mm, - unsigned long address, pmd_t **pmdp, - pte_t **ptep, spinlock_t **ptlp); -#else -static inline bool page_check_address_transhuge(struct page *page, - struct mm_struct *mm, unsigned long address, - pmd_t **pmdp, pte_t **ptep, spinlock_t **ptlp) -{ - *ptep = page_check_address(page, mm, address, ptlp, 0); - *pmdp = NULL; - return !!*ptep; -} -#endif - /* Avoid racy checks */ #define PAGE_CHECK_WALK_SYNC (1 << 0) /* Look for migarion entries rather than present ptes */ diff --git a/mm/rmap.c b/mm/rmap.c index cb34fd68a23a..7106eb9b37a8 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -708,144 +708,6 @@ pmd_t *mm_find_pmd(struct mm_struct *mm, unsigned long address) return pmd; } -/* - * Check that @page is mapped at @address into @mm. - * - * If @sync is false, page_check_address may perform a racy check to avoid - * the page table lock when the pte is not present (helpful when reclaiming - * highly shared pages). - * - * On success returns with pte mapped and locked. - */ -pte_t *__page_check_address(struct page *page, struct mm_struct *mm, - unsigned long address, spinlock_t **ptlp, int sync) -{ - pmd_t *pmd; - pte_t *pte; - spinlock_t *ptl; - - if (unlikely(PageHuge(page))) { - /* when pud is not present, pte will be NULL */ - pte = huge_pte_offset(mm, address); - if (!pte) - return NULL; - - ptl = huge_pte_lockptr(page_hstate(page), mm, pte); - goto check; - } - - pmd = mm_find_pmd(mm, address); - if (!pmd) - return NULL; - - pte = pte_offset_map(pmd, address); - /* Make a quick check before getting the lock */ - if (!sync && !pte_present(*pte)) { - pte_unmap(pte); - return NULL; - } - - ptl = pte_lockptr(mm, pmd); -check: - spin_lock(ptl); - if (pte_present(*pte) && page_to_pfn(page) == pte_pfn(*pte)) { - *ptlp = ptl; - return pte; - } - pte_unmap_unlock(pte, ptl); - return NULL; -} - -#ifdef CONFIG_TRANSPARENT_HUGEPAGE -/* - * Check that @page is mapped at @address into @mm. In contrast to - * page_check_address(), this function can handle transparent huge pages. - * - * On success returns true with pte mapped and locked. For PMD-mapped - * transparent huge pages *@ptep is set to NULL. - */ -bool page_check_address_transhuge(struct page *page, struct mm_struct *mm, - unsigned long address, pmd_t **pmdp, - pte_t **ptep, spinlock_t **ptlp) -{ - pgd_t *pgd; - pud_t *pud; - pmd_t *pmd; - pte_t *pte; - spinlock_t *ptl; - - if (unlikely(PageHuge(page))) { - /* when pud is not present, pte will be NULL */ - pte = huge_pte_offset(mm, address); - if (!pte) - return false; - - ptl = huge_pte_lockptr(page_hstate(page), mm, pte); - pmd = NULL; - goto check_pte; - } - - pgd = pgd_offset(mm, address); - if (!pgd_present(*pgd)) - return false; - pud = pud_offset(pgd, address); - if (!pud_present(*pud)) - return false; - pmd = pmd_offset(pud, address); - - if (pmd_trans_huge(*pmd)) { - ptl = pmd_lock(mm, pmd); - if (!pmd_present(*pmd)) - goto unlock_pmd; - if (unlikely(!pmd_trans_huge(*pmd))) { - spin_unlock(ptl); - goto map_pte; - } - - if (pmd_page(*pmd) != page) - goto unlock_pmd; - - pte = NULL; - goto found; -unlock_pmd: - spin_unlock(ptl); - return false; - } else { - pmd_t pmde = *pmd; - - barrier(); - if (!pmd_present(pmde) || pmd_trans_huge(pmde)) - return false; - } -map_pte: - pte = pte_offset_map(pmd, address); - if (!pte_present(*pte)) { - pte_unmap(pte); - return false; - } - - ptl = pte_lockptr(mm, pmd); -check_pte: - spin_lock(ptl); - - if (!pte_present(*pte)) { - pte_unmap_unlock(pte, ptl); - return false; - } - - /* THP can be referenced by any subpage */ - if (pte_pfn(*pte) - page_to_pfn(page) >= hpage_nr_pages(page)) { - pte_unmap_unlock(pte, ptl); - return false; - } -found: - *ptep = pte; - *pmdp = pmd; - *ptlp = ptl; - return true; -} -#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ - struct page_referenced_arg { int mapcount; int referenced; -- 2.11.0 -- To unsubscribe, send a message with 'unsubscribe linux-mm' in the body to majordomo@kvack.org. For more info on Linux MM, see: http://www.linux-mm.org/ . Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
next prev parent reply other threads:[~2017-01-24 16:29 UTC|newest] Thread overview: 71+ messages / expand[flat|nested] mbox.gz Atom feed top 2017-01-24 16:28 [PATCH 00/12] Fix few rmap-related THP bugs Kirill A. Shutemov 2017-01-24 16:28 ` Kirill A. Shutemov 2017-01-24 16:28 ` [PATCH 01/12] uprobes: split THPs before trying replace them Kirill A. Shutemov 2017-01-24 16:28 ` Kirill A. Shutemov 2017-01-24 18:08 ` Rik van Riel 2017-01-24 18:08 ` Rik van Riel 2017-01-24 21:28 ` Andrew Morton 2017-01-24 21:28 ` Andrew Morton 2017-01-24 22:22 ` Kirill A. Shutemov 2017-01-24 22:22 ` Kirill A. Shutemov 2017-01-24 22:35 ` Andrew Morton 2017-01-24 22:35 ` Andrew Morton 2017-01-24 22:56 ` Kirill A. Shutemov 2017-01-24 22:56 ` Kirill A. Shutemov 2017-01-25 16:55 ` Srikar Dronamraju 2017-01-25 16:55 ` Srikar Dronamraju 2017-01-25 17:44 ` Rik van Riel 2017-01-25 17:44 ` Kirill A. Shutemov 2017-01-25 17:44 ` Kirill A. Shutemov 2017-01-25 18:35 ` Johannes Weiner 2017-01-25 18:35 ` Johannes Weiner 2017-01-25 18:38 ` Kirill A. Shutemov 2017-01-25 18:38 ` Kirill A. Shutemov 2017-01-26 2:54 ` Srikar Dronamraju 2017-01-26 2:54 ` Srikar Dronamraju 2017-01-25 18:22 ` Johannes Weiner 2017-01-25 18:22 ` Johannes Weiner 2017-01-24 16:28 ` [PATCH 02/12] mm: introduce page_check_walk() Kirill A. Shutemov 2017-01-24 16:28 ` Kirill A. Shutemov 2017-01-24 21:41 ` Andrew Morton 2017-01-24 21:41 ` Andrew Morton 2017-01-24 22:50 ` Kirill A. Shutemov 2017-01-24 22:50 ` Kirill A. Shutemov 2017-01-24 22:55 ` Andrew Morton 2017-01-24 22:55 ` Andrew Morton 2017-01-25 17:53 ` Kirill A. Shutemov 2017-01-25 17:53 ` Kirill A. Shutemov 2017-01-25 1:19 ` kbuild test robot 2017-01-25 1:19 ` kbuild test robot 2017-01-25 1:59 ` kbuild test robot 2017-01-25 1:59 ` kbuild test robot 2017-01-24 16:28 ` [PATCH 03/12] mm: fix handling PTE-mapped THPs in page_referenced() Kirill A. Shutemov 2017-01-24 16:28 ` Kirill A. Shutemov 2017-01-24 16:28 ` [PATCH 04/12] mm: fix handling PTE-mapped THPs in page_idle_clear_pte_refs() Kirill A. Shutemov 2017-01-24 16:28 ` Kirill A. Shutemov 2017-01-24 16:28 ` [PATCH 05/12] mm, rmap: check all VMAs that PTE-mapped THP can be part of Kirill A. Shutemov 2017-01-24 16:28 ` Kirill A. Shutemov 2017-01-24 16:28 ` [PATCH 06/12] mm: convert page_mkclean_one() to page_check_walk() Kirill A. Shutemov 2017-01-24 16:28 ` Kirill A. Shutemov 2017-01-25 1:44 ` kbuild test robot 2017-01-25 1:44 ` kbuild test robot 2017-01-25 2:00 ` kbuild test robot 2017-01-25 2:00 ` kbuild test robot 2017-01-24 16:28 ` [PATCH 07/12] mm: convert try_to_unmap_one() " Kirill A. Shutemov 2017-01-24 16:28 ` Kirill A. Shutemov 2017-01-25 3:13 ` kbuild test robot 2017-01-25 3:13 ` kbuild test robot 2017-01-24 16:28 ` [PATCH 08/12] mm, ksm: convert write_protect_page() " Kirill A. Shutemov 2017-01-24 16:28 ` Kirill A. Shutemov 2017-01-24 16:28 ` [PATCH 09/12] mm, uprobes: convert __replace_page() " Kirill A. Shutemov 2017-01-24 16:28 ` Kirill A. Shutemov 2017-01-26 2:58 ` Srikar Dronamraju 2017-01-26 2:58 ` Srikar Dronamraju 2017-01-24 16:28 ` [PATCH 10/12] mm: convert page_mapped_in_vma() " Kirill A. Shutemov 2017-01-24 16:28 ` Kirill A. Shutemov 2017-01-24 16:28 ` Kirill A. Shutemov [this message] 2017-01-24 16:28 ` [PATCH 11/12] mm: drop page_check_address{,_transhuge} Kirill A. Shutemov 2017-01-24 16:28 ` [PATCH 12/12] mm: convert remove_migration_pte() to page_check_walk() Kirill A. Shutemov 2017-01-24 16:28 ` Kirill A. Shutemov 2017-01-25 1:54 ` kbuild test robot 2017-01-25 1:54 ` kbuild test robot
Reply instructions: You may reply publicly to this message via plain-text email using any one of the following methods: * Save the following mbox file, import it into your mail client, and reply-to-all from there: mbox Avoid top-posting and favor interleaved quoting: https://en.wikipedia.org/wiki/Posting_style#Interleaved_style * Reply using the --to, --cc, and --in-reply-to switches of git-send-email(1): git send-email \ --in-reply-to=20170124162824.91275-12-kirill.shutemov@linux.intel.com \ --to=kirill.shutemov@linux.intel.com \ --cc=aarcange@redhat.com \ --cc=akpm@linux-foundation.org \ --cc=hughd@google.com \ --cc=linux-kernel@vger.kernel.org \ --cc=linux-mm@kvack.org \ --cc=riel@redhat.com \ /path/to/YOUR_REPLY https://kernel.org/pub/software/scm/git/docs/git-send-email.html * If your mail client supports setting the In-Reply-To header via mailto: links, try the mailto: linkBe sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes, see mirroring instructions on how to clone and mirror all data and code used by this external index.