linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
To: Andrew Morton <akpm@linux-foundation.org>,
	Andrea Arcangeli <aarcange@redhat.com>
Cc: Dave Hansen <dave.hansen@intel.com>,
	Hugh Dickins <hughd@google.com>, Mel Gorman <mgorman@suse.de>,
	Rik van Riel <riel@redhat.com>, Vlastimil Babka <vbabka@suse.cz>,
	Christoph Lameter <cl@gentwo.org>,
	Naoya Horiguchi <n-horiguchi@ah.jp.nec.com>,
	Steve Capper <steve.capper@linaro.org>,
	"Aneesh Kumar K.V" <aneesh.kumar@linux.vnet.ibm.com>,
	Johannes Weiner <hannes@cmpxchg.org>,
	Michal Hocko <mhocko@suse.cz>,
	linux-kernel@vger.kernel.org, linux-mm@kvack.org,
	"Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
Subject: [PATCH 17/19] mlock, thp: HACK: split all pages in VM_LOCKED vma
Date: Wed,  5 Nov 2014 16:49:52 +0200	[thread overview]
Message-ID: <1415198994-15252-18-git-send-email-kirill.shutemov@linux.intel.com> (raw)
In-Reply-To: <1415198994-15252-1-git-send-email-kirill.shutemov@linux.intel.com>

We don't yet handle mlocked pages properly with new THP refcounting.
For now we split all pages in VMA on mlock and disallow khugepaged
collapse pages in the VMA. If split failed on mlock() we fail the
syscall with -EBUSY.
---
 include/linux/huge_mm.h |  24 +++++++++
 mm/huge_memory.c        |  17 ++-----
 mm/mlock.c              | 130 +++++++++++++++++++++++++++++++++---------------
 3 files changed, 118 insertions(+), 53 deletions(-)

diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
index 94f331166974..abe146bd8ed7 100644
--- a/include/linux/huge_mm.h
+++ b/include/linux/huge_mm.h
@@ -140,6 +140,18 @@ static inline int hpage_nr_pages(struct page *page)
 extern int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
 				unsigned long addr, pmd_t pmd, pmd_t *pmdp);
 
+extern struct page *huge_zero_page __read_mostly;
+
+static inline bool is_huge_zero_page(struct page *page)
+{
+	return ACCESS_ONCE(huge_zero_page) == page;
+}
+
+static inline bool is_huge_zero_pmd(pmd_t pmd)
+{
+	return is_huge_zero_page(pmd_page(pmd));
+}
+
 #else /* CONFIG_TRANSPARENT_HUGEPAGE */
 #define HPAGE_PMD_SHIFT ({ BUILD_BUG(); 0; })
 #define HPAGE_PMD_MASK ({ BUILD_BUG(); 0; })
@@ -185,6 +197,18 @@ static inline int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_str
 	return 0;
 }
 
+static inline bool is_huge_zero_page(struct page *page)
+{
+	BUG();
+	return 0;
+}
+
+static inline bool is_huge_zero_pmd(pmd_t pmd)
+{
+	BUG();
+	return 0;
+}
+
 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
 
 #endif /* _LINUX_HUGE_MM_H */
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 95f2a83ad9d8..555a9134dfa0 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -171,17 +171,7 @@ static int start_khugepaged(void)
 }
 
 static atomic_t huge_zero_refcount;
-static struct page *huge_zero_page __read_mostly;
-
-static inline bool is_huge_zero_page(struct page *page)
-{
-	return ACCESS_ONCE(huge_zero_page) == page;
-}
-
-static inline bool is_huge_zero_pmd(pmd_t pmd)
-{
-	return is_huge_zero_page(pmd_page(pmd));
-}
+struct page *huge_zero_page __read_mostly;
 
 static struct page *get_huge_zero_page(void)
 {
@@ -801,6 +791,8 @@ int do_huge_pmd_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
 
 	if (haddr < vma->vm_start || haddr + HPAGE_PMD_SIZE > vma->vm_end)
 		return VM_FAULT_FALLBACK;
+	if (vma->vm_flags & VM_LOCKED)
+		return VM_FAULT_FALLBACK;
 	if (unlikely(anon_vma_prepare(vma)))
 		return VM_FAULT_OOM;
 	if (unlikely(khugepaged_enter(vma)))
@@ -2352,7 +2344,8 @@ static bool hugepage_vma_check(struct vm_area_struct *vma)
 	if ((!(vma->vm_flags & VM_HUGEPAGE) && !khugepaged_always()) ||
 	    (vma->vm_flags & VM_NOHUGEPAGE))
 		return false;
-
+	if (vma->vm_flags & VM_LOCKED)
+		return false;
 	if (!vma->anon_vma || vma->vm_ops)
 		return false;
 	if (is_vma_temporary_stack(vma))
diff --git a/mm/mlock.c b/mm/mlock.c
index ce84cb0b83ef..e3a367685503 100644
--- a/mm/mlock.c
+++ b/mm/mlock.c
@@ -500,38 +500,26 @@ void munlock_vma_pages_range(struct vm_area_struct *vma,
 				&page_mask);
 
 		if (page && !IS_ERR(page)) {
-			if (PageTransHuge(page)) {
-				lock_page(page);
-				/*
-				 * Any THP page found by follow_page_mask() may
-				 * have gotten split before reaching
-				 * munlock_vma_page(), so we need to recompute
-				 * the page_mask here.
-				 */
-				page_mask = munlock_vma_page(page);
-				unlock_page(page);
-				put_page(page); /* follow_page_mask() */
-			} else {
-				/*
-				 * Non-huge pages are handled in batches via
-				 * pagevec. The pin from follow_page_mask()
-				 * prevents them from collapsing by THP.
-				 */
-				pagevec_add(&pvec, page);
-				zone = page_zone(page);
-				zoneid = page_zone_id(page);
+			VM_BUG_ON_PAGE(PageTransCompound(page), page);
+			/*
+			 * Non-huge pages are handled in batches via
+			 * pagevec. The pin from follow_page_mask()
+			 * prevents them from collapsing by THP.
+			 */
+			pagevec_add(&pvec, page);
+			zone = page_zone(page);
+			zoneid = page_zone_id(page);
 
-				/*
-				 * Try to fill the rest of pagevec using fast
-				 * pte walk. This will also update start to
-				 * the next page to process. Then munlock the
-				 * pagevec.
-				 */
-				start = __munlock_pagevec_fill(&pvec, vma,
-						zoneid, start, end);
-				__munlock_pagevec(&pvec, zone);
-				goto next;
-			}
+			/*
+			 * Try to fill the rest of pagevec using fast
+			 * pte walk. This will also update start to
+			 * the next page to process. Then munlock the
+			 * pagevec.
+			 */
+			start = __munlock_pagevec_fill(&pvec, vma,
+					zoneid, start, end);
+			__munlock_pagevec(&pvec, zone);
+			goto next;
 		}
 		/* It's a bug to munlock in the middle of a THP page */
 		VM_BUG_ON((start >> PAGE_SHIFT) & page_mask);
@@ -542,6 +530,60 @@ next:
 	}
 }
 
+static int thp_split(pmd_t *pmd, unsigned long addr, unsigned long end,
+		struct mm_walk *walk)
+{
+	spinlock_t *ptl;
+	struct page *page = NULL;
+	pte_t *pte;
+	int err = 0;
+
+retry:
+	if (pmd_none(*pmd))
+		return 0;
+	if (pmd_trans_huge(*pmd)) {
+		if (is_huge_zero_pmd(*pmd)) {
+			split_huge_pmd(walk->vma, pmd, addr);
+			return 0;
+		}
+		ptl = pmd_lock(walk->mm, pmd);
+		if (!pmd_trans_huge(*pmd)) {
+			spin_unlock(ptl);
+			goto retry;
+		}
+		page = pmd_page(*pmd);
+		VM_BUG_ON_PAGE(!PageHead(page), page);
+		get_page(page);
+		spin_unlock(ptl);
+		err = split_huge_page(page);
+		put_page(page);
+		return err;
+	}
+	pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
+	do {
+		if (!pte_present(*pte))
+			continue;
+		page = vm_normal_page(walk->vma, addr, *pte);
+		if (!page)
+			continue;
+		if (PageTransCompound(page)) {
+			page = compound_head(page);
+			get_page(page);
+			spin_unlock(ptl);
+			err = split_huge_page(page);
+			spin_lock(ptl);
+			put_page(page);
+			if (!err) {
+				VM_BUG_ON_PAGE(compound_mapcount(page), page);
+				VM_BUG_ON_PAGE(PageTransCompound(page), page);
+			} else
+				break;
+		}
+	} while (pte++, addr += PAGE_SIZE, addr != end);
+	pte_unmap_unlock(pte - 1, ptl);
+	return err;
+}
+
 /*
  * mlock_fixup  - handle mlock[all]/munlock[all] requests.
  *
@@ -586,24 +628,30 @@ static int mlock_fixup(struct vm_area_struct *vma, struct vm_area_struct **prev,
 
 success:
 	/*
-	 * Keep track of amount of locked VM.
-	 */
-	nr_pages = (end - start) >> PAGE_SHIFT;
-	if (!lock)
-		nr_pages = -nr_pages;
-	mm->locked_vm += nr_pages;
-
-	/*
 	 * vm_flags is protected by the mmap_sem held in write mode.
 	 * It's okay if try_to_unmap_one unmaps a page just after we
 	 * set VM_LOCKED, __mlock_vma_pages_range will bring it back.
 	 */
 
-	if (lock)
+	if (lock) {
+		struct mm_walk thp_split_walk = {
+			.mm = mm,
+			.pmd_entry = thp_split,
+		};
+		ret = walk_page_vma(vma, &thp_split_walk);
+		if (ret)
+			goto out;
 		vma->vm_flags = newflags;
-	else
+	} else
 		munlock_vma_pages_range(vma, start, end);
 
+	/*
+	 * Keep track of amount of locked VM.
+	 */
+	nr_pages = (end - start) >> PAGE_SHIFT;
+	if (!lock)
+		nr_pages = -nr_pages;
+	mm->locked_vm += nr_pages;
 out:
 	*prev = vma;
 	return ret;
-- 
2.1.1


  parent reply	other threads:[~2014-11-05 14:50 UTC|newest]

Thread overview: 41+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2014-11-05 14:49 [PATCHv2 RFC 00/19] THP refcounting redesign Kirill A. Shutemov
2014-11-05 14:49 ` [PATCH 01/19] mm, thp: drop FOLL_SPLIT Kirill A. Shutemov
2014-11-25  3:01   ` Naoya Horiguchi
2014-11-25 14:04     ` Kirill A. Shutemov
2014-11-05 14:49 ` [PATCH 02/19] thp: cluster split_huge_page* code together Kirill A. Shutemov
2014-11-05 14:49 ` [PATCH 03/19] mm: change PageAnon() to work on tail pages Kirill A. Shutemov
2014-11-05 14:49 ` [PATCH 04/19] mm: avoid PG_locked " Kirill A. Shutemov
2014-11-05 14:49 ` [PATCH 05/19] rmap: add argument to charge compound page Kirill A. Shutemov
2014-11-05 14:49 ` [PATCH 06/19] mm: store mapcount for compound page separate Kirill A. Shutemov
2014-11-18  8:43   ` Naoya Horiguchi
2014-11-18  9:58     ` Kirill A. Shutemov
2014-11-18 23:41       ` Naoya Horiguchi
2014-11-19  0:54         ` Kirill A. Shutemov
2014-11-21  6:41       ` Aneesh Kumar K.V
2014-11-21 11:47         ` Kirill A. Shutemov
2014-11-19 10:51   ` Jerome Marchand
2014-11-19 13:00     ` Kirill A. Shutemov
2014-11-19 13:15       ` Jerome Marchand
2014-11-20 20:06       ` Christoph Lameter
2014-11-21 12:01         ` Kirill A. Shutemov
2014-11-21  6:12   ` Aneesh Kumar K.V
2014-11-21 12:02     ` Kirill A. Shutemov
2014-11-05 14:49 ` [PATCH 07/19] mm, thp: adjust conditions when we can reuse the page on WP fault Kirill A. Shutemov
2014-11-05 14:49 ` [PATCH 08/19] mm: prepare migration code for new THP refcounting Kirill A. Shutemov
2014-11-05 14:49 ` [PATCH 09/19] thp: rename split_huge_page_pmd() to split_huge_pmd() Kirill A. Shutemov
2014-11-05 14:49 ` [PATCH 10/19] thp: PMD splitting without splitting compound page Kirill A. Shutemov
2014-11-19  6:57   ` Naoya Horiguchi
2014-11-19 13:02     ` Kirill A. Shutemov
2014-11-05 14:49 ` [PATCH 11/19] mm, vmstats: new THP splitting event Kirill A. Shutemov
2014-11-05 14:49 ` [PATCH 12/19] thp: implement new split_huge_page() Kirill A. Shutemov
2014-11-05 14:49 ` [PATCH 13/19] mm, thp: remove infrastructure for handling splitting PMDs Kirill A. Shutemov
2014-11-05 14:49 ` [PATCH 14/19] x86, thp: remove " Kirill A. Shutemov
2014-11-05 14:49 ` [PATCH 15/19] futex, thp: remove special case for THP in get_futex_key Kirill A. Shutemov
2014-11-05 14:49 ` [PATCH 16/19] thp: update documentation Kirill A. Shutemov
2014-11-19  8:07   ` Naoya Horiguchi
2014-11-19 13:11     ` Kirill A. Shutemov
2014-11-05 14:49 ` Kirill A. Shutemov [this message]
2014-11-19  9:02   ` [PATCH 17/19] mlock, thp: HACK: split all pages in VM_LOCKED vma Naoya Horiguchi
2014-11-19 13:08     ` Kirill A. Shutemov
2014-11-05 14:49 ` [PATCH 18/19] tho, mm: use migration entries to freeze page counts on split Kirill A. Shutemov
2014-11-05 14:49 ` [PATCH 19/19] mm, thp: remove compound_lock Kirill A. Shutemov

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1415198994-15252-18-git-send-email-kirill.shutemov@linux.intel.com \
    --to=kirill.shutemov@linux.intel.com \
    --cc=aarcange@redhat.com \
    --cc=akpm@linux-foundation.org \
    --cc=aneesh.kumar@linux.vnet.ibm.com \
    --cc=cl@gentwo.org \
    --cc=dave.hansen@intel.com \
    --cc=hannes@cmpxchg.org \
    --cc=hughd@google.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=mgorman@suse.de \
    --cc=mhocko@suse.cz \
    --cc=n-horiguchi@ah.jp.nec.com \
    --cc=riel@redhat.com \
    --cc=steve.capper@linaro.org \
    --cc=vbabka@suse.cz \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).