From: Huang Ying <ying.huang@intel.com>
To: Andrew Morton <akpm@linux-foundation.org>
Cc: linux-mm@kvack.org, linux-kernel@vger.kernel.org,
Huang Ying <ying.huang@intel.com>,
"Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>,
Andrea Arcangeli <aarcange@redhat.com>,
Michal Hocko <mhocko@kernel.org>,
Johannes Weiner <hannes@cmpxchg.org>,
Shaohua Li <shli@kernel.org>, Hugh Dickins <hughd@google.com>,
Minchan Kim <minchan@kernel.org>, Rik van Riel <riel@redhat.com>,
Dave Hansen <dave.hansen@linux.intel.com>,
Naoya Horiguchi <n-horiguchi@ah.jp.nec.com>,
Zi Yan <zi.yan@cs.rutgers.edu>,
Daniel Jordan <daniel.m.jordan@oracle.com>
Subject: [PATCH -V8 19/21] swap: Support PMD swap mapping in common path
Date: Fri, 7 Dec 2018 13:41:19 +0800 [thread overview]
Message-ID: <20181207054122.27822-20-ying.huang@intel.com> (raw)
In-Reply-To: <20181207054122.27822-1-ying.huang@intel.com>
Original code is only for PMD migration entry, it is revised to
support PMD swap mapping.
Signed-off-by: "Huang, Ying" <ying.huang@intel.com>
Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: Michal Hocko <mhocko@kernel.org>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Shaohua Li <shli@kernel.org>
Cc: Hugh Dickins <hughd@google.com>
Cc: Minchan Kim <minchan@kernel.org>
Cc: Rik van Riel <riel@redhat.com>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com>
Cc: Zi Yan <zi.yan@cs.rutgers.edu>
Cc: Daniel Jordan <daniel.m.jordan@oracle.com>
---
fs/proc/task_mmu.c | 12 +++++-------
mm/gup.c | 36 ++++++++++++++++++++++++------------
mm/huge_memory.c | 7 ++++---
mm/mempolicy.c | 2 +-
4 files changed, 34 insertions(+), 23 deletions(-)
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index 39e96a21366e..0e65233f2cc2 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -986,7 +986,7 @@ static inline void clear_soft_dirty_pmd(struct vm_area_struct *vma,
pmd = pmd_clear_soft_dirty(pmd);
set_pmd_at(vma->vm_mm, addr, pmdp, pmd);
- } else if (is_migration_entry(pmd_to_swp_entry(pmd))) {
+ } else if (is_swap_pmd(pmd)) {
pmd = pmd_swp_clear_soft_dirty(pmd);
set_pmd_at(vma->vm_mm, addr, pmdp, pmd);
}
@@ -1316,9 +1316,8 @@ static int pagemap_pmd_range(pmd_t *pmdp, unsigned long addr, unsigned long end,
if (pm->show_pfn)
frame = pmd_pfn(pmd) +
((addr & ~PMD_MASK) >> PAGE_SHIFT);
- }
-#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
- else if (is_swap_pmd(pmd)) {
+ } else if (IS_ENABLED(CONFIG_HAVE_PMD_SWAP_ENTRY) &&
+ is_swap_pmd(pmd)) {
swp_entry_t entry = pmd_to_swp_entry(pmd);
unsigned long offset;
@@ -1331,10 +1330,9 @@ static int pagemap_pmd_range(pmd_t *pmdp, unsigned long addr, unsigned long end,
flags |= PM_SWAP;
if (pmd_swp_soft_dirty(pmd))
flags |= PM_SOFT_DIRTY;
- VM_BUG_ON(!is_pmd_migration_entry(pmd));
- page = migration_entry_to_page(entry);
+ if (is_pmd_migration_entry(pmd))
+ page = migration_entry_to_page(entry);
}
-#endif
if (page && page_mapcount(page) == 1)
flags |= PM_MMAP_EXCLUSIVE;
diff --git a/mm/gup.c b/mm/gup.c
index 6dd33e16a806..460565825ef0 100644
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -215,6 +215,7 @@ static struct page *follow_pmd_mask(struct vm_area_struct *vma,
spinlock_t *ptl;
struct page *page;
struct mm_struct *mm = vma->vm_mm;
+ swp_entry_t entry;
pmd = pmd_offset(pudp, address);
/*
@@ -242,18 +243,22 @@ static struct page *follow_pmd_mask(struct vm_area_struct *vma,
if (!pmd_present(pmdval)) {
if (likely(!(flags & FOLL_MIGRATION)))
return no_page_table(vma, flags);
- VM_BUG_ON(thp_migration_supported() &&
- !is_pmd_migration_entry(pmdval));
- if (is_pmd_migration_entry(pmdval))
+ entry = pmd_to_swp_entry(pmdval);
+ if (thp_migration_supported() && is_migration_entry(entry)) {
pmd_migration_entry_wait(mm, pmd);
- pmdval = READ_ONCE(*pmd);
- /*
- * MADV_DONTNEED may convert the pmd to null because
- * mmap_sem is held in read mode
- */
- if (pmd_none(pmdval))
+ pmdval = READ_ONCE(*pmd);
+ /*
+ * MADV_DONTNEED may convert the pmd to null because
+ * mmap_sem is held in read mode
+ */
+ if (pmd_none(pmdval))
+ return no_page_table(vma, flags);
+ goto retry;
+ }
+ if (IS_ENABLED(CONFIG_THP_SWAP) && !non_swap_entry(entry))
return no_page_table(vma, flags);
- goto retry;
+ WARN_ON(1);
+ return no_page_table(vma, flags);
}
if (pmd_devmap(pmdval)) {
ptl = pmd_lock(mm, pmd);
@@ -275,11 +280,18 @@ static struct page *follow_pmd_mask(struct vm_area_struct *vma,
return no_page_table(vma, flags);
}
if (unlikely(!pmd_present(*pmd))) {
+ entry = pmd_to_swp_entry(*pmd);
spin_unlock(ptl);
if (likely(!(flags & FOLL_MIGRATION)))
return no_page_table(vma, flags);
- pmd_migration_entry_wait(mm, pmd);
- goto retry_locked;
+ if (thp_migration_supported() && is_migration_entry(entry)) {
+ pmd_migration_entry_wait(mm, pmd);
+ goto retry_locked;
+ }
+ if (IS_ENABLED(CONFIG_THP_SWAP) && !non_swap_entry(entry))
+ return no_page_table(vma, flags);
+ WARN_ON(1);
+ return no_page_table(vma, flags);
}
if (unlikely(!pmd_trans_huge(*pmd))) {
spin_unlock(ptl);
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 5b2eb7871cd7..b75af88c505a 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -2138,7 +2138,7 @@ static inline int pmd_move_must_withdraw(spinlock_t *new_pmd_ptl,
static pmd_t move_soft_dirty_pmd(pmd_t pmd)
{
#ifdef CONFIG_MEM_SOFT_DIRTY
- if (unlikely(is_pmd_migration_entry(pmd)))
+ if (unlikely(is_swap_pmd(pmd)))
pmd = pmd_swp_mksoft_dirty(pmd);
else if (pmd_present(pmd))
pmd = pmd_mksoft_dirty(pmd);
@@ -2222,11 +2222,12 @@ int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
preserve_write = prot_numa && pmd_write(*pmd);
ret = 1;
-#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
+#if defined(CONFIG_ARCH_ENABLE_THP_MIGRATION) || defined(CONFIG_THP_SWAP)
if (is_swap_pmd(*pmd)) {
swp_entry_t entry = pmd_to_swp_entry(*pmd);
- VM_BUG_ON(!is_pmd_migration_entry(*pmd));
+ VM_BUG_ON(!IS_ENABLED(CONFIG_THP_SWAP) &&
+ !is_migration_entry(entry));
if (is_write_migration_entry(entry)) {
pmd_t newpmd;
/*
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index e4f8248822c1..39335bf99169 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -436,7 +436,7 @@ static int queue_pages_pmd(pmd_t *pmd, spinlock_t *ptl, unsigned long addr,
struct queue_pages *qp = walk->private;
unsigned long flags;
- if (unlikely(is_pmd_migration_entry(*pmd))) {
+ if (unlikely(is_swap_pmd(*pmd))) {
ret = 1;
goto unlock;
}
--
2.18.1
next prev parent reply other threads:[~2018-12-07 5:42 UTC|newest]
Thread overview: 23+ messages / expand[flat|nested] mbox.gz Atom feed top
2018-12-07 5:41 [PATCH -V8 00/21] swap: Swapout/swapin THP in one piece Huang Ying
2018-12-07 5:41 ` [PATCH -V8 01/21] swap: Enable PMD swap operations for CONFIG_THP_SWAP Huang Ying
2018-12-07 5:41 ` [PATCH -V8 02/21] swap: Add __swap_duplicate_locked() Huang Ying
2018-12-07 5:41 ` [PATCH -V8 03/21] swap: Support PMD swap mapping in swap_duplicate() Huang Ying
2018-12-07 5:41 ` [PATCH -V8 04/21] swap: Support PMD swap mapping in put_swap_page() Huang Ying
2018-12-07 5:41 ` [PATCH -V8 05/21] swap: Support PMD swap mapping in free_swap_and_cache()/swap_free() Huang Ying
2018-12-07 5:41 ` [PATCH -V8 06/21] swap: Support PMD swap mapping when splitting huge PMD Huang Ying
2018-12-07 5:41 ` [PATCH -V8 07/21] swap: Support PMD swap mapping in split_swap_cluster() Huang Ying
2018-12-07 5:41 ` [PATCH -V8 08/21] swap: Support to read a huge swap cluster for swapin a THP Huang Ying
2018-12-07 5:41 ` [PATCH -V8 09/21] swap: Swapin a THP in one piece Huang Ying
2018-12-07 5:41 ` [PATCH -V8 10/21] swap: Support to count THP swapin and its fallback Huang Ying
2018-12-07 5:41 ` [PATCH -V8 11/21] swap: Add sysfs interface to configure THP swapin Huang Ying
2018-12-07 5:41 ` [PATCH -V8 12/21] swap: Support PMD swap mapping in swapoff Huang Ying
2018-12-07 5:41 ` [PATCH -V8 13/21] swap: Support PMD swap mapping in madvise_free() Huang Ying
2018-12-07 5:41 ` [PATCH -V8 14/21] swap: Support to move swap account for PMD swap mapping Huang Ying
2018-12-07 5:41 ` [PATCH -V8 15/21] swap: Support to copy PMD swap mapping when fork() Huang Ying
2018-12-07 5:41 ` [PATCH -V8 16/21] swap: Free PMD swap mapping when zap_huge_pmd() Huang Ying
2018-12-07 5:41 ` [PATCH -V8 17/21] swap: Support PMD swap mapping for MADV_WILLNEED Huang Ying
2018-12-07 5:41 ` [PATCH -V8 18/21] swap: Support PMD swap mapping in mincore() Huang Ying
2018-12-07 5:41 ` Huang Ying [this message]
2018-12-07 5:41 ` [PATCH -V8 20/21] swap: create PMD swap mapping when unmap the THP Huang Ying
2018-12-07 5:41 ` [PATCH -V8 21/21] swap: Update help of CONFIG_THP_SWAP Huang Ying
2018-12-07 6:20 ` [PATCH -V8 00/21] swap: Swapout/swapin THP in one piece Huang, Ying
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20181207054122.27822-20-ying.huang@intel.com \
--to=ying.huang@intel.com \
--cc=aarcange@redhat.com \
--cc=akpm@linux-foundation.org \
--cc=daniel.m.jordan@oracle.com \
--cc=dave.hansen@linux.intel.com \
--cc=hannes@cmpxchg.org \
--cc=hughd@google.com \
--cc=kirill.shutemov@linux.intel.com \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=mhocko@kernel.org \
--cc=minchan@kernel.org \
--cc=n-horiguchi@ah.jp.nec.com \
--cc=riel@redhat.com \
--cc=shli@kernel.org \
--cc=zi.yan@cs.rutgers.edu \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).