linux-doc.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: SeongJae Park <sjpark@amazon.com>
To: <akpm@linux-foundation.org>
Cc: SeongJae Park <sjpark@amazon.de>, <Jonathan.Cameron@Huawei.com>,
	<aarcange@redhat.com>, <acme@kernel.org>,
	<alexander.shishkin@linux.intel.com>, <amit@kernel.org>,
	<benh@kernel.crashing.org>, <brendan.d.gregg@gmail.com>,
	<brendanhiggins@google.com>, <cai@lca.pw>,
	<colin.king@canonical.com>, <corbet@lwn.net>, <david@redhat.com>,
	<dwmw@amazon.com>, <elver@google.com>, <fan.du@intel.com>,
	<foersleo@amazon.de>, <gthelen@google.com>, <irogers@google.com>,
	<jolsa@redhat.com>, <kirill@shutemov.name>,
	<mark.rutland@arm.com>, <mgorman@suse.de>, <minchan@kernel.org>,
	<mingo@redhat.com>, <namhyung@kernel.org>, <peterz@infradead.org>,
	<rdunlap@infradead.org>, <riel@surriel.com>,
	<rientjes@google.com>, <rostedt@goodmis.org>, <rppt@kernel.org>,
	<sblbir@amazon.com>, <shakeelb@google.com>, <shuah@kernel.org>,
	<sj38.park@gmail.com>, <snu@amazon.de>, <vbabka@suse.cz>,
	<vdavydov.dev@gmail.com>, <yang.shi@linux.alibaba.com>,
	<ying.huang@intel.com>, <zgf574564920@gmail.com>,
	<linux-damon@amazon.com>, <linux-mm@kvack.org>,
	<linux-doc@vger.kernel.org>, <linux-kernel@vger.kernel.org>
Subject: [RFC v10 12/13] mm/damon/paddr: Separate commonly usable functions
Date: Wed, 16 Dec 2020 10:42:20 +0100	[thread overview]
Message-ID: <20201216094221.11898-13-sjpark@amazon.com> (raw)
In-Reply-To: <20201216094221.11898-1-sjpark@amazon.com>

From: SeongJae Park <sjpark@amazon.de>

This commit moves functions in the default physical address space
monitoring primitives that commonly usable from other use cases like
page granularity idleness monitoring to prmtv-common.

Signed-off-by: SeongJae Park <sjpark@amazon.de>
---
 mm/damon/paddr.c        | 122 ----------------------------------------
 mm/damon/prmtv-common.c | 122 ++++++++++++++++++++++++++++++++++++++++
 mm/damon/prmtv-common.h |   4 ++
 3 files changed, 126 insertions(+), 122 deletions(-)

diff --git a/mm/damon/paddr.c b/mm/damon/paddr.c
index b120f672cc57..143ddc0e5917 100644
--- a/mm/damon/paddr.c
+++ b/mm/damon/paddr.c
@@ -19,69 +19,6 @@
  * of the primitives.
  */
 
-/*
- * Get a page by pfn if it is in the LRU list.  Otherwise, returns NULL.
- *
- * The body of this function is stollen from the 'page_idle_get_page()'.  We
- * steal rather than reuse it because the code is quite simple.
- */
-static struct page *damon_pa_get_page(unsigned long pfn)
-{
-	struct page *page = pfn_to_online_page(pfn);
-	pg_data_t *pgdat;
-
-	if (!page || !PageLRU(page) ||
-	    !get_page_unless_zero(page))
-		return NULL;
-
-	pgdat = page_pgdat(page);
-	spin_lock_irq(&pgdat->lru_lock);
-	if (unlikely(!PageLRU(page))) {
-		put_page(page);
-		page = NULL;
-	}
-	spin_unlock_irq(&pgdat->lru_lock);
-	return page;
-}
-
-static bool __damon_pa_mkold(struct page *page, struct vm_area_struct *vma,
-		unsigned long addr, void *arg)
-{
-	damon_va_mkold(vma->vm_mm, addr);
-	return true;
-}
-
-static void damon_pa_mkold(unsigned long paddr)
-{
-	struct page *page = damon_pa_get_page(PHYS_PFN(paddr));
-	struct rmap_walk_control rwc = {
-		.rmap_one = __damon_pa_mkold,
-		.anon_lock = page_lock_anon_vma_read,
-	};
-	bool need_lock;
-
-	if (!page)
-		return;
-
-	if (!page_mapped(page) || !page_rmapping(page)) {
-		set_page_idle(page);
-		put_page(page);
-		return;
-	}
-
-	need_lock = !PageAnon(page) || PageKsm(page);
-	if (need_lock && !trylock_page(page)) {
-		put_page(page);
-		return;
-	}
-
-	rmap_walk(page, &rwc);
-
-	if (need_lock)
-		unlock_page(page);
-	put_page(page);
-}
-
 static void __damon_pa_prepare_access_check(struct damon_ctx *ctx,
 					    struct damon_region *r)
 {
@@ -101,65 +38,6 @@ void damon_pa_prepare_access_checks(struct damon_ctx *ctx)
 	}
 }
 
-struct damon_pa_access_chk_result {
-	unsigned long page_sz;
-	bool accessed;
-};
-
-static bool damon_pa_accessed(struct page *page, struct vm_area_struct *vma,
-		unsigned long addr, void *arg)
-{
-	struct damon_pa_access_chk_result *result = arg;
-
-	result->accessed = damon_va_young(vma->vm_mm, addr, &result->page_sz);
-
-	/* If accessed, stop walking */
-	return !result->accessed;
-}
-
-static bool damon_pa_young(unsigned long paddr, unsigned long *page_sz)
-{
-	struct page *page = damon_pa_get_page(PHYS_PFN(paddr));
-	struct damon_pa_access_chk_result result = {
-		.page_sz = PAGE_SIZE,
-		.accessed = false,
-	};
-	struct rmap_walk_control rwc = {
-		.arg = &result,
-		.rmap_one = damon_pa_accessed,
-		.anon_lock = page_lock_anon_vma_read,
-	};
-	bool need_lock;
-
-	if (!page)
-		return false;
-
-	if (!page_mapped(page) || !page_rmapping(page)) {
-		if (page_is_idle(page))
-			result.accessed = false;
-		else
-			result.accessed = true;
-		put_page(page);
-		goto out;
-	}
-
-	need_lock = !PageAnon(page) || PageKsm(page);
-	if (need_lock && !trylock_page(page)) {
-		put_page(page);
-		return NULL;
-	}
-
-	rmap_walk(page, &rwc);
-
-	if (need_lock)
-		unlock_page(page);
-	put_page(page);
-
-out:
-	*page_sz = result.page_sz;
-	return result.accessed;
-}
-
 /*
  * Check whether the region was accessed after the last preparation
  *
diff --git a/mm/damon/prmtv-common.c b/mm/damon/prmtv-common.c
index 6cdb96cbc9ef..6c2e760e086c 100644
--- a/mm/damon/prmtv-common.c
+++ b/mm/damon/prmtv-common.c
@@ -102,3 +102,125 @@ bool damon_va_young(struct mm_struct *mm, unsigned long addr,
 
 	return young;
 }
+
+/*
+ * Get a page by pfn if it is in the LRU list.  Otherwise, returns NULL.
+ *
+ * The body of this function is stollen from the 'page_idle_get_page()'.  We
+ * steal rather than reuse it because the code is quite simple.
+ */
+static struct page *damon_pa_get_page(unsigned long pfn)
+{
+	struct page *page = pfn_to_online_page(pfn);
+	pg_data_t *pgdat;
+
+	if (!page || !PageLRU(page) ||
+	    !get_page_unless_zero(page))
+		return NULL;
+
+	pgdat = page_pgdat(page);
+	spin_lock_irq(&pgdat->lru_lock);
+	if (unlikely(!PageLRU(page))) {
+		put_page(page);
+		page = NULL;
+	}
+	spin_unlock_irq(&pgdat->lru_lock);
+	return page;
+}
+
+static bool __damon_pa_mkold(struct page *page, struct vm_area_struct *vma,
+		unsigned long addr, void *arg)
+{
+	damon_va_mkold(vma->vm_mm, addr);
+	return true;
+}
+
+void damon_pa_mkold(unsigned long paddr)
+{
+	struct page *page = damon_pa_get_page(PHYS_PFN(paddr));
+	struct rmap_walk_control rwc = {
+		.rmap_one = __damon_pa_mkold,
+		.anon_lock = page_lock_anon_vma_read,
+	};
+	bool need_lock;
+
+	if (!page)
+		return;
+
+	if (!page_mapped(page) || !page_rmapping(page)) {
+		set_page_idle(page);
+		put_page(page);
+		return;
+	}
+
+	need_lock = !PageAnon(page) || PageKsm(page);
+	if (need_lock && !trylock_page(page)) {
+		put_page(page);
+		return;
+	}
+
+	rmap_walk(page, &rwc);
+
+	if (need_lock)
+		unlock_page(page);
+	put_page(page);
+}
+
+struct damon_pa_access_chk_result {
+	unsigned long page_sz;
+	bool accessed;
+};
+
+static bool damon_pa_accessed(struct page *page, struct vm_area_struct *vma,
+		unsigned long addr, void *arg)
+{
+	struct damon_pa_access_chk_result *result = arg;
+
+	result->accessed = damon_va_young(vma->vm_mm, addr, &result->page_sz);
+
+	/* If accessed, stop walking */
+	return !result->accessed;
+}
+
+bool damon_pa_young(unsigned long paddr, unsigned long *page_sz)
+{
+	struct page *page = damon_pa_get_page(PHYS_PFN(paddr));
+	struct damon_pa_access_chk_result result = {
+		.page_sz = PAGE_SIZE,
+		.accessed = false,
+	};
+	struct rmap_walk_control rwc = {
+		.arg = &result,
+		.rmap_one = damon_pa_accessed,
+		.anon_lock = page_lock_anon_vma_read,
+	};
+	bool need_lock;
+
+	if (!page)
+		return false;
+
+	if (!page_mapped(page) || !page_rmapping(page)) {
+		if (page_is_idle(page))
+			result.accessed = false;
+		else
+			result.accessed = true;
+		put_page(page);
+		goto out;
+	}
+
+	need_lock = !PageAnon(page) || PageKsm(page);
+	if (need_lock && !trylock_page(page)) {
+		put_page(page);
+		return NULL;
+	}
+
+	rmap_walk(page, &rwc);
+
+	if (need_lock)
+		unlock_page(page);
+	put_page(page);
+
+out:
+	*page_sz = result.page_sz;
+	return result.accessed;
+}
diff --git a/mm/damon/prmtv-common.h b/mm/damon/prmtv-common.h
index a66a6139b4fc..fbe9452bd040 100644
--- a/mm/damon/prmtv-common.h
+++ b/mm/damon/prmtv-common.h
@@ -10,6 +10,7 @@
 #include <linux/mmu_notifier.h>
 #include <linux/page_idle.h>
 #include <linux/random.h>
+#include <linux/rmap.h>
 #include <linux/sched/mm.h>
 #include <linux/slab.h>
 
@@ -19,3 +20,6 @@
 void damon_va_mkold(struct mm_struct *mm, unsigned long addr);
 bool damon_va_young(struct mm_struct *mm, unsigned long addr,
 			unsigned long *page_sz);
+
+void damon_pa_mkold(unsigned long paddr);
+bool damon_pa_young(unsigned long paddr, unsigned long *page_sz);
-- 
2.17.1


  parent reply	other threads:[~2020-12-16  9:48 UTC|newest]

Thread overview: 16+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-12-16  9:42 [RFC v10 00/13] DAMON: Support Physical Memory Address Space and Page-granularity Idleness Monitoring SeongJae Park
2020-12-16  9:42 ` [RFC v10 01/13] damon/dbgfs: Allow users to set initial monitoring target regions SeongJae Park
2021-01-19 18:41   ` SeongJae Park
2020-12-16  9:42 ` [RFC v10 02/13] tools/damon: Support init target regions specification SeongJae Park
2020-12-16  9:42 ` [RFC v10 03/13] damon/dbgfs-test: Add a unit test case for 'init_regions' SeongJae Park
2020-12-16  9:42 ` [RFC v10 04/13] selftests/damon/_chk_record: Do not check number of gaps SeongJae Park
2020-12-16  9:42 ` [RFC v10 05/13] Docs/admin-guide/mm/damon: Document 'init_regions' feature SeongJae Park
2020-12-16  9:42 ` [RFC v10 06/13] mm/damon/vaddr: Separate commonly usable functions SeongJae Park
2020-12-16  9:42 ` [RFC v10 07/13] mm/damon: Implement primitives for physical address space monitoring SeongJae Park
2020-12-16  9:42 ` [RFC v10 08/13] damon/dbgfs: Support physical memory monitoring SeongJae Park
2020-12-16  9:42 ` [RFC v10 09/13] tools/damon/record: " SeongJae Park
2020-12-16  9:42 ` [RFC v10 10/13] tools/damon/record: Support NUMA specific recording SeongJae Park
2020-12-16  9:42 ` [RFC v10 11/13] Docs/DAMON: Document physical memory monitoring support SeongJae Park
2020-12-16  9:42 ` SeongJae Park [this message]
2020-12-16  9:42 ` [RFC v10 13/13] mm/damon: Implement primitives for page granularity idleness monitoring SeongJae Park
2021-05-05 11:26 ` DAMON-based Proactive Reclamation for The Physical Address Space SeongJae Park

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20201216094221.11898-13-sjpark@amazon.com \
    --to=sjpark@amazon.com \
    --cc=Jonathan.Cameron@Huawei.com \
    --cc=aarcange@redhat.com \
    --cc=acme@kernel.org \
    --cc=akpm@linux-foundation.org \
    --cc=alexander.shishkin@linux.intel.com \
    --cc=amit@kernel.org \
    --cc=benh@kernel.crashing.org \
    --cc=brendan.d.gregg@gmail.com \
    --cc=brendanhiggins@google.com \
    --cc=cai@lca.pw \
    --cc=colin.king@canonical.com \
    --cc=corbet@lwn.net \
    --cc=david@redhat.com \
    --cc=dwmw@amazon.com \
    --cc=elver@google.com \
    --cc=fan.du@intel.com \
    --cc=foersleo@amazon.de \
    --cc=gthelen@google.com \
    --cc=irogers@google.com \
    --cc=jolsa@redhat.com \
    --cc=kirill@shutemov.name \
    --cc=linux-damon@amazon.com \
    --cc=linux-doc@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=mark.rutland@arm.com \
    --cc=mgorman@suse.de \
    --cc=minchan@kernel.org \
    --cc=mingo@redhat.com \
    --cc=namhyung@kernel.org \
    --cc=peterz@infradead.org \
    --cc=rdunlap@infradead.org \
    --cc=riel@surriel.com \
    --cc=rientjes@google.com \
    --cc=rostedt@goodmis.org \
    --cc=rppt@kernel.org \
    --cc=sblbir@amazon.com \
    --cc=shakeelb@google.com \
    --cc=shuah@kernel.org \
    --cc=sj38.park@gmail.com \
    --cc=sjpark@amazon.de \
    --cc=snu@amazon.de \
    --cc=vbabka@suse.cz \
    --cc=vdavydov.dev@gmail.com \
    --cc=yang.shi@linux.alibaba.com \
    --cc=ying.huang@intel.com \
    --cc=zgf574564920@gmail.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).