linux-mm.kvack.org archive mirror
 help / color / mirror / Atom feed
From: Mike Kravetz <mike.kravetz@oracle.com>
To: linux-mm@kvack.org, linux-kernel@vger.kernel.org,
	linux-s390@vger.kernel.org
Cc: shu wang <malate_wangshu@hotmail.com>,
	Axel Rasmussen <axelrasmussen@google.com>,
	Peter Xu <peterx@redhat.com>,
	Andrea Arcangeli <aarcange@redhat.com>,
	Heiko Carstens <hca@linux.ibm.com>,
	Alexey Dobriyan <adobriyan@gmail.com>,
	Matthew Wilcox <willy@infradead.org>,
	Michel Lespinasse <walken@google.com>,
	Andrew Morton <akpm@linux-foundation.org>,
	Mike Kravetz <mike.kravetz@oracle.com>
Subject: [RFC PATCH 5/5] mm proc/task_mmu.c: add hugetlb specific routine for clear_refs
Date: Wed, 10 Feb 2021 16:03:22 -0800	[thread overview]
Message-ID: <20210211000322.159437-6-mike.kravetz@oracle.com> (raw)
In-Reply-To: <20210211000322.159437-1-mike.kravetz@oracle.com>

There was is no hugetlb specific routine for clearing soft dirty and
other referrences.  The 'default' routines would only clear the
VM_SOFTDIRTY flag in the vma.

Add new routine specifically for hugetlb vmas.

Signed-off-by: Mike Kravetz <mike.kravetz@oracle.com>
---
 fs/proc/task_mmu.c | 110 +++++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 110 insertions(+)

diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index 829b35016aaa..f06cf9b131a8 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -1116,6 +1116,115 @@ static inline void clear_soft_dirty_pmd(struct vm_area_struct *vma,
 }
 #endif
 
+#ifdef CONFIG_HUGETLB_PAGE
+static inline bool huge_pte_is_pinned(struct vm_area_struct *vma,
+					unsigned long addr, pte_t pte)
+{
+	struct page *page;
+
+	if (likely(!atomic_read(&vma->vm_mm->has_pinned)))
+		return false;
+	page = pte_page(pte);
+	if (!page)
+		return false;
+	return page_maybe_dma_pinned(page);
+}
+
+static int clear_refs_hugetlb_range(pte_t *ptep, unsigned long hmask,
+				unsigned long addr, unsigned long end,
+				struct mm_walk *walk)
+{
+	struct clear_refs_private *cp = walk->private;
+	struct vm_area_struct *vma = walk->vma;
+	struct hstate *h = hstate_vma(walk->vma);
+	unsigned long adj_start = addr, adj_end = end;
+	spinlock_t *ptl;
+	pte_t old_pte, pte;
+
+	/*
+	 * clear_refs should only operate on complete vmas.  Therefore,
+	 * values passed here should be huge page aligned and huge page
+	 * size in length.  Quick validation before taking any action in
+	 * case upstream code is changed.
+	 */
+	if ((addr & hmask) != addr || end - addr != huge_page_size(h)) {
+		WARN_ONCE(1, "%s passed unaligned address\n", __func__);
+		return 1;
+	}
+
+	ptl = huge_pte_lock(hstate_vma(vma), walk->mm, ptep);
+
+	/* Soft dirty and pmd sharing do not mix */
+
+	pte = huge_ptep_get(ptep);
+	if (!pte_present(pte))
+		goto out;
+	if (unlikely(is_hugetlb_entry_hwpoisoned(pte)))
+		goto out;
+
+	if (cp->type == CLEAR_REFS_SOFT_DIRTY) {
+		if (huge_pte_is_pinned(vma, addr, pte))
+			goto out;
+
+		/*
+		 * soft dirty and pmd sharing do not work together as
+		 * per-process is tracked in ptes, and pmd sharing allows
+		 * processed to share ptes.  We unshare any pmds here.
+		 */
+		adjust_range_if_pmd_sharing_possible(vma, &adj_start, &adj_end);
+		flush_cache_range(vma, adj_start, adj_end);
+		/*
+		 * Only atttempt unshare if sharing possible.  If we unshare,
+		 * then pte's for a PUD sized area are effectively removed for
+		 * this process.  That clears soft dirty.
+		 */
+		if (adj_start != addr || adj_end != end) {
+			struct mmu_notifier_range range;
+			int unshared;
+
+			mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR,
+					0, vma, vma->vm_mm, adj_start, adj_end);
+			mmu_notifier_invalidate_range_start(&range);
+			i_mmap_lock_write(vma->vm_file->f_mapping);
+			unshared = huge_pmd_unshare(vma->vm_mm, vma,
+								&addr, ptep);
+			i_mmap_unlock_write(vma->vm_file->f_mapping);
+			mmu_notifier_invalidate_range_end(&range);
+			if (unshared)
+				goto unshare_done;
+		}
+
+		if (is_hugetlb_entry_migration(pte)) {
+			pte = huge_pte_swp_clear_soft_dirty(pte);
+			set_huge_pte_at(walk->mm, addr, ptep, pte);
+		} else {
+			old_pte = huge_ptep_modify_prot_start(vma, addr, ptep);
+			pte = pte_mkhuge(huge_pte_wrprotect(pte));
+			pte = arch_make_huge_pte(pte, vma, NULL, 0);
+			pte = huge_pte_clear_soft_dirty(pte);
+			pte = huge_pte_mkyoung(pte);
+			huge_ptep_modify_prot_commit(vma, addr, ptep,
+							old_pte, pte);
+		}
+
+unshare_done:
+		flush_hugetlb_tlb_range(vma, addr, end);
+	}
+
+	/* reference bits in hugetlb pages are not reset/used */
+out:
+	spin_unlock(ptl);
+	return 0;
+}
+#else /* CONFIG_HUGETLB_PAGE */
+static int clear_refs_hugetlb_range(pte_t *ptep, unsigned long hmask,
+				unsigned long addr, unsigned long end,
+				struct mm_walk *walk)
+{
+	return 1;
+}
+#endif /* CONFIG_HUGETLB_PAGE */
+
 static int clear_refs_pte_range(pmd_t *pmd, unsigned long addr,
 				unsigned long end, struct mm_walk *walk)
 {
@@ -1198,6 +1307,7 @@ static int clear_refs_test_walk(unsigned long start, unsigned long end,
 }
 
 static const struct mm_walk_ops clear_refs_walk_ops = {
+	.hugetlb_entry		= clear_refs_hugetlb_range,
 	.pmd_entry		= clear_refs_pte_range,
 	.test_walk		= clear_refs_test_walk,
 };
-- 
2.29.2



  parent reply	other threads:[~2021-02-11  0:04 UTC|newest]

Thread overview: 17+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-02-11  0:03 [RFC PATCH 0/5] Add hugetlb soft dirty support Mike Kravetz
2021-02-11  0:03 ` [RFC PATCH 1/5] hugetlb: add hugetlb helpers for " Mike Kravetz
2021-02-17 16:24   ` Peter Xu
2021-02-18 22:58     ` Mike Kravetz
2021-02-24 16:46     ` Gerald Schaefer
2021-02-24 16:55       ` Gerald Schaefer
2021-02-11  0:03 ` [RFC PATCH 2/5] hugetlb: enhance hugetlb fault processing to support soft dirty Mike Kravetz
2021-02-17 19:32   ` Peter Xu
2021-02-18 23:26     ` Mike Kravetz
2021-02-11  0:03 ` [RFC PATCH 3/5] mm proc/task_mmu.c: add soft dirty pte checks for hugetlb Mike Kravetz
2021-02-17 19:35   ` Peter Xu
2021-02-18 23:59     ` Mike Kravetz
2021-02-11  0:03 ` [RFC PATCH 4/5] hugetlb: don't permit pmd sharing if soft dirty in use Mike Kravetz
2021-02-17 19:44   ` Peter Xu
2021-02-11  0:03 ` Mike Kravetz [this message]
2021-02-17 20:25   ` [RFC PATCH 5/5] mm proc/task_mmu.c: add hugetlb specific routine for clear_refs Peter Xu
2021-02-19  0:14     ` Mike Kravetz

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20210211000322.159437-6-mike.kravetz@oracle.com \
    --to=mike.kravetz@oracle.com \
    --cc=aarcange@redhat.com \
    --cc=adobriyan@gmail.com \
    --cc=akpm@linux-foundation.org \
    --cc=axelrasmussen@google.com \
    --cc=hca@linux.ibm.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=linux-s390@vger.kernel.org \
    --cc=malate_wangshu@hotmail.com \
    --cc=peterx@redhat.com \
    --cc=walken@google.com \
    --cc=willy@infradead.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).