From: Shiyang Ruan <ruansy.fnst@cn.fujitsu.com>
To: <linux-kernel@vger.kernel.org>, <linux-xfs@vger.kernel.org>,
<linux-nvdimm@lists.01.org>
Cc: <linux-mm@kvack.org>, <linux-fsdevel@vger.kernel.org>,
<darrick.wong@oracle.com>, <dan.j.williams@intel.com>,
<david@fromorbit.com>, <hch@lst.de>, <rgoldwyn@suse.de>,
<qi.fuli@fujitsu.com>, <y-goto@fujitsu.com>
Subject: [RFC PATCH 2/8] mm: add dax-rmap for memory-failure and rmap
Date: Mon, 27 Apr 2020 16:47:44 +0800 [thread overview]
Message-ID: <20200427084750.136031-3-ruansy.fnst@cn.fujitsu.com> (raw)
In-Reply-To: <20200427084750.136031-1-ruansy.fnst@cn.fujitsu.com>
Memory-failure collects and kill processes who is accessing a posioned,
file mmapped page. Add dax-rmap iteration to support reflink case.
Also add it for rmap iteration.
Signed-off-by: Shiyang Ruan <ruansy.fnst@cn.fujitsu.com>
---
mm/memory-failure.c | 63 +++++++++++++++++++++++++++++++++++----------
mm/rmap.c | 54 +++++++++++++++++++++++++++-----------
2 files changed, 88 insertions(+), 29 deletions(-)
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index a96364be8ab4..6d7da1fd55fd 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -463,36 +463,71 @@ static void collect_procs_anon(struct page *page, struct list_head *to_kill,
page_unlock_anon_vma_read(av);
}
+static void collect_each_procs_file(struct page *page,
+ struct task_struct *task,
+ struct list_head *to_kill)
+{
+ struct vm_area_struct *vma;
+ struct address_space *mapping = page->mapping;
+ struct rb_root_cached *root = (struct rb_root_cached *)page_private(page);
+ struct rb_node *node;
+ struct shared_file *shared;
+ pgoff_t pgoff;
+
+ if (dax_mapping(mapping) && root) {
+ struct shared_file save = {
+ .mapping = mapping,
+ .index = page->index,
+ };
+ for (node = rb_first_cached(root); node; node = rb_next(node)) {
+ shared = container_of(node, struct shared_file, node);
+ mapping = page->mapping = shared->mapping;
+ page->index = shared->index;
+ pgoff = page_to_pgoff(page);
+ vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff,
+ pgoff) {
+ if (vma->vm_mm == task->mm) {
+ // each vma is unique, so is the vaddr.
+ add_to_kill(task, page, vma, to_kill);
+ }
+ }
+ }
+ // restore the mapping and index.
+ page->mapping = save.mapping;
+ page->index = save.index;
+ } else {
+ pgoff = page_to_pgoff(page);
+ vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) {
+ /*
+ * Send early kill signal to tasks where a vma covers
+ * the page but the corrupted page is not necessarily
+ * mapped it in its pte.
+ * Assume applications who requested early kill want
+ * to be informed of all such data corruptions.
+ */
+ if (vma->vm_mm == task->mm)
+ add_to_kill(task, page, vma, to_kill);
+ }
+ }
+}
+
/*
* Collect processes when the error hit a file mapped page.
*/
static void collect_procs_file(struct page *page, struct list_head *to_kill,
int force_early)
{
- struct vm_area_struct *vma;
struct task_struct *tsk;
struct address_space *mapping = page->mapping;
i_mmap_lock_read(mapping);
read_lock(&tasklist_lock);
for_each_process(tsk) {
- pgoff_t pgoff = page_to_pgoff(page);
struct task_struct *t = task_early_kill(tsk, force_early);
if (!t)
continue;
- vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff,
- pgoff) {
- /*
- * Send early kill signal to tasks where a vma covers
- * the page but the corrupted page is not necessarily
- * mapped it in its pte.
- * Assume applications who requested early kill want
- * to be informed of all such data corruptions.
- */
- if (vma->vm_mm == t->mm)
- add_to_kill(t, page, vma, to_kill);
- }
+ collect_each_procs_file(page, t, to_kill);
}
read_unlock(&tasklist_lock);
i_mmap_unlock_read(mapping);
diff --git a/mm/rmap.c b/mm/rmap.c
index f79a206b271a..69ea66f9e971 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -1870,21 +1870,7 @@ static void rmap_walk_anon(struct page *page, struct rmap_walk_control *rwc,
anon_vma_unlock_read(anon_vma);
}
-/*
- * rmap_walk_file - do something to file page using the object-based rmap method
- * @page: the page to be handled
- * @rwc: control variable according to each walk type
- *
- * Find all the mappings of a page using the mapping pointer and the vma chains
- * contained in the address_space struct it points to.
- *
- * When called from try_to_munlock(), the mmap_sem of the mm containing the vma
- * where the page was found will be held for write. So, we won't recheck
- * vm_flags for that VMA. That should be OK, because that vma shouldn't be
- * LOCKED.
- */
-static void rmap_walk_file(struct page *page, struct rmap_walk_control *rwc,
- bool locked)
+static void rmap_walk_file_one(struct page *page, struct rmap_walk_control *rwc, bool locked)
{
struct address_space *mapping = page_mapping(page);
pgoff_t pgoff_start, pgoff_end;
@@ -1925,6 +1911,44 @@ static void rmap_walk_file(struct page *page, struct rmap_walk_control *rwc,
i_mmap_unlock_read(mapping);
}
+/*
+ * rmap_walk_file - do something to file page using the object-based rmap method
+ * @page: the page to be handled
+ * @rwc: control variable according to each walk type
+ *
+ * Find all the mappings of a page using the mapping pointer and the vma chains
+ * contained in the address_space struct it points to.
+ *
+ * When called from try_to_munlock(), the mmap_sem of the mm containing the vma
+ * where the page was found will be held for write. So, we won't recheck
+ * vm_flags for that VMA. That should be OK, because that vma shouldn't be
+ * LOCKED.
+ */
+static void rmap_walk_file(struct page *page, struct rmap_walk_control *rwc,
+ bool locked)
+{
+ struct rb_root_cached *root = (struct rb_root_cached *)page_private(page);
+ struct rb_node *node;
+ struct shared_file *shared;
+
+ if (dax_mapping(page->mapping) && root) {
+ struct shared_file save = {
+ .mapping = page->mapping,
+ .index = page->index,
+ };
+ for (node = rb_first_cached(root); node; node = rb_next(node)) {
+ shared = container_of(node, struct shared_file, node);
+ page->mapping = shared->mapping;
+ page->index = shared->index;
+ rmap_walk_file_one(page, rwc, locked);
+ }
+ // restore the mapping and index.
+ page->mapping = save.mapping;
+ page->index = save.index;
+ } else
+ rmap_walk_file_one(page, rwc, locked);
+}
+
void rmap_walk(struct page *page, struct rmap_walk_control *rwc)
{
if (unlikely(PageKsm(page)))
--
2.26.2
next prev parent reply other threads:[~2020-04-27 8:48 UTC|newest]
Thread overview: 22+ messages / expand[flat|nested] mbox.gz Atom feed top
2020-04-27 8:47 [RFC PATCH 0/8] dax: Add a dax-rmap tree to support reflink Shiyang Ruan
2020-04-27 8:47 ` [RFC PATCH 1/8] fs/dax: Introduce dax-rmap btree for reflink Shiyang Ruan
2020-04-27 8:47 ` Shiyang Ruan [this message]
2020-04-27 8:47 ` [RFC PATCH 3/8] fs/dax: Introduce dax_copy_edges() for COW Shiyang Ruan
2020-04-27 8:47 ` [RFC PATCH 4/8] fs/dax: copy data before write Shiyang Ruan
2020-04-27 8:47 ` [RFC PATCH 5/8] fs/dax: replace mmap entry in case of CoW Shiyang Ruan
2020-04-27 8:47 ` [RFC PATCH 6/8] fs/dax: dedup file range to use a compare function Shiyang Ruan
2020-04-27 8:47 ` [RFC PATCH 7/8] fs/xfs: handle CoW for fsdax write() path Shiyang Ruan
2020-04-27 8:47 ` [RFC PATCH 8/8] fs/xfs: support dedupe for fsdax Shiyang Ruan
2020-04-27 12:28 ` [RFC PATCH 0/8] dax: Add a dax-rmap tree to support reflink Matthew Wilcox
2020-04-28 6:09 ` 回复: " Ruan, Shiyang
2020-04-28 6:43 ` Dave Chinner
2020-04-28 9:32 ` Ruan Shiyang
2020-04-28 11:16 ` Matthew Wilcox
2020-04-28 11:24 ` Dave Chinner
2020-04-28 15:37 ` Darrick J. Wong
2020-04-28 22:02 ` Dave Chinner
2020-06-04 7:37 ` Ruan Shiyang
2020-06-04 14:51 ` Darrick J. Wong
2020-06-05 1:30 ` Dave Chinner
2020-06-05 2:30 ` Ruan Shiyang
2020-06-05 2:11 ` Ruan Shiyang
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20200427084750.136031-3-ruansy.fnst@cn.fujitsu.com \
--to=ruansy.fnst@cn.fujitsu.com \
--cc=dan.j.williams@intel.com \
--cc=darrick.wong@oracle.com \
--cc=david@fromorbit.com \
--cc=hch@lst.de \
--cc=linux-fsdevel@vger.kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=linux-nvdimm@lists.01.org \
--cc=linux-xfs@vger.kernel.org \
--cc=qi.fuli@fujitsu.com \
--cc=rgoldwyn@suse.de \
--cc=y-goto@fujitsu.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).