From: Peter Xu <peterx@redhat.com>
To: linux-mm@kvack.org, linux-kernel@vger.kernel.org
Cc: Jason Gunthorpe <jgg@ziepe.ca>,
Andrew Morton <akpm@linux-foundation.org>,
Jan Kara <jack@suse.cz>, Michal Hocko <mhocko@suse.com>,
Kirill Tkhai <ktkhai@virtuozzo.com>,
Kirill Shutemov <kirill@shutemov.name>,
Hugh Dickins <hughd@google.com>, Peter Xu <peterx@redhat.com>,
Christoph Hellwig <hch@lst.de>,
Andrea Arcangeli <aarcange@redhat.com>,
John Hubbard <jhubbard@nvidia.com>,
Oleg Nesterov <oleg@redhat.com>,
Leon Romanovsky <leonro@nvidia.com>,
Linus Torvalds <torvalds@linux-foundation.org>,
Jann Horn <jannh@google.com>
Subject: [PATCH 2/5] mm/fork: Pass new vma pointer into copy_page_range()
Date: Mon, 21 Sep 2020 17:17:41 -0400 [thread overview]
Message-ID: <20200921211744.24758-3-peterx@redhat.com> (raw)
In-Reply-To: <20200921211744.24758-1-peterx@redhat.com>
This prepares for the future work to trigger early cow on pinned pages during
fork(). No functional change intended.
Signed-off-by: Peter Xu <peterx@redhat.com>
---
include/linux/mm.h | 2 +-
kernel/fork.c | 2 +-
mm/memory.c | 14 +++++++++-----
3 files changed, 11 insertions(+), 7 deletions(-)
diff --git a/include/linux/mm.h b/include/linux/mm.h
index ca6e6a81576b..bf1ac54be55e 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -1644,7 +1644,7 @@ struct mmu_notifier_range;
void free_pgd_range(struct mmu_gather *tlb, unsigned long addr,
unsigned long end, unsigned long floor, unsigned long ceiling);
int copy_page_range(struct mm_struct *dst, struct mm_struct *src,
- struct vm_area_struct *vma);
+ struct vm_area_struct *vma, struct vm_area_struct *new);
int follow_pte_pmd(struct mm_struct *mm, unsigned long address,
struct mmu_notifier_range *range,
pte_t **ptepp, pmd_t **pmdpp, spinlock_t **ptlp);
diff --git a/kernel/fork.c b/kernel/fork.c
index 7237d418e7b5..843807ade6dd 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -589,7 +589,7 @@ static __latent_entropy int dup_mmap(struct mm_struct *mm,
mm->map_count++;
if (!(tmp->vm_flags & VM_WIPEONFORK))
- retval = copy_page_range(mm, oldmm, mpnt);
+ retval = copy_page_range(mm, oldmm, mpnt, tmp);
if (tmp->vm_ops && tmp->vm_ops->open)
tmp->vm_ops->open(tmp);
diff --git a/mm/memory.c b/mm/memory.c
index 469af373ae76..7525147908c4 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -814,6 +814,7 @@ copy_one_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
static int copy_pte_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
pmd_t *dst_pmd, pmd_t *src_pmd, struct vm_area_struct *vma,
+ struct vm_area_struct *new,
unsigned long addr, unsigned long end)
{
pte_t *orig_src_pte, *orig_dst_pte;
@@ -877,6 +878,7 @@ static int copy_pte_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
static inline int copy_pmd_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
pud_t *dst_pud, pud_t *src_pud, struct vm_area_struct *vma,
+ struct vm_area_struct *new,
unsigned long addr, unsigned long end)
{
pmd_t *src_pmd, *dst_pmd;
@@ -903,7 +905,7 @@ static inline int copy_pmd_range(struct mm_struct *dst_mm, struct mm_struct *src
if (pmd_none_or_clear_bad(src_pmd))
continue;
if (copy_pte_range(dst_mm, src_mm, dst_pmd, src_pmd,
- vma, addr, next))
+ vma, new, addr, next))
return -ENOMEM;
} while (dst_pmd++, src_pmd++, addr = next, addr != end);
return 0;
@@ -911,6 +913,7 @@ static inline int copy_pmd_range(struct mm_struct *dst_mm, struct mm_struct *src
static inline int copy_pud_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
p4d_t *dst_p4d, p4d_t *src_p4d, struct vm_area_struct *vma,
+ struct vm_area_struct *new,
unsigned long addr, unsigned long end)
{
pud_t *src_pud, *dst_pud;
@@ -937,7 +940,7 @@ static inline int copy_pud_range(struct mm_struct *dst_mm, struct mm_struct *src
if (pud_none_or_clear_bad(src_pud))
continue;
if (copy_pmd_range(dst_mm, src_mm, dst_pud, src_pud,
- vma, addr, next))
+ vma, new, addr, next))
return -ENOMEM;
} while (dst_pud++, src_pud++, addr = next, addr != end);
return 0;
@@ -945,6 +948,7 @@ static inline int copy_pud_range(struct mm_struct *dst_mm, struct mm_struct *src
static inline int copy_p4d_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
pgd_t *dst_pgd, pgd_t *src_pgd, struct vm_area_struct *vma,
+ struct vm_area_struct *new,
unsigned long addr, unsigned long end)
{
p4d_t *src_p4d, *dst_p4d;
@@ -959,14 +963,14 @@ static inline int copy_p4d_range(struct mm_struct *dst_mm, struct mm_struct *src
if (p4d_none_or_clear_bad(src_p4d))
continue;
if (copy_pud_range(dst_mm, src_mm, dst_p4d, src_p4d,
- vma, addr, next))
+ vma, new, addr, next))
return -ENOMEM;
} while (dst_p4d++, src_p4d++, addr = next, addr != end);
return 0;
}
int copy_page_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
- struct vm_area_struct *vma)
+ struct vm_area_struct *vma, struct vm_area_struct *new)
{
pgd_t *src_pgd, *dst_pgd;
unsigned long next;
@@ -1021,7 +1025,7 @@ int copy_page_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
if (pgd_none_or_clear_bad(src_pgd))
continue;
if (unlikely(copy_p4d_range(dst_mm, src_mm, dst_pgd, src_pgd,
- vma, addr, next))) {
+ vma, new, addr, next))) {
ret = -ENOMEM;
break;
}
--
2.26.2
next prev parent reply other threads:[~2020-09-21 21:18 UTC|newest]
Thread overview: 110+ messages / expand[flat|nested] mbox.gz Atom feed top
2020-09-21 21:17 [PATCH 0/5] mm: Break COW for pinned pages during fork() Peter Xu
2020-09-21 21:17 ` [PATCH 1/5] mm: Introduce mm_struct.has_pinned Peter Xu
2020-09-21 21:43 ` Jann Horn
2020-09-21 22:30 ` Peter Xu
2020-09-21 22:47 ` Jann Horn
2020-09-22 11:54 ` Jason Gunthorpe
2020-09-22 14:28 ` Peter Xu
2020-09-22 15:56 ` Jason Gunthorpe
2020-09-22 16:25 ` Linus Torvalds
2020-09-21 23:53 ` John Hubbard
2020-09-22 0:01 ` John Hubbard
2020-09-22 15:17 ` Peter Xu
2020-09-22 16:10 ` Jason Gunthorpe
2020-09-22 17:54 ` Peter Xu
2020-09-22 19:11 ` Jason Gunthorpe
2020-09-23 0:27 ` Peter Xu
2020-09-23 13:10 ` Peter Xu
2020-09-23 14:20 ` Jan Kara
2020-09-23 17:12 ` Jason Gunthorpe
2020-09-24 7:44 ` Jan Kara
2020-09-24 14:02 ` Jason Gunthorpe
2020-09-24 14:45 ` Jan Kara
2020-09-23 17:07 ` Jason Gunthorpe
2020-09-24 14:35 ` Peter Xu
2020-09-24 16:51 ` Jason Gunthorpe
2020-09-24 17:55 ` Peter Xu
2020-09-24 18:15 ` Jason Gunthorpe
2020-09-24 18:34 ` Peter Xu
2020-09-24 18:39 ` Jason Gunthorpe
2020-09-24 21:30 ` Peter Xu
2020-09-25 19:56 ` Linus Torvalds
2020-09-25 21:06 ` Linus Torvalds
2020-09-26 0:41 ` Jason Gunthorpe
2020-09-26 1:15 ` Linus Torvalds
2020-09-26 22:28 ` Linus Torvalds
2020-09-27 6:23 ` Leon Romanovsky
2020-09-27 18:16 ` Linus Torvalds
2020-09-27 18:45 ` Linus Torvalds
2020-09-28 12:49 ` Jason Gunthorpe
2020-09-28 16:17 ` Linus Torvalds
2020-09-28 17:22 ` Peter Xu
2020-09-28 17:54 ` Linus Torvalds
2020-09-28 18:39 ` Jason Gunthorpe
2020-09-28 19:29 ` Linus Torvalds
2020-09-28 23:57 ` Jason Gunthorpe
2020-09-29 0:18 ` John Hubbard
2020-09-28 19:36 ` Linus Torvalds
2020-09-28 19:50 ` Linus Torvalds
2020-09-28 22:51 ` Jason Gunthorpe
2020-09-29 0:30 ` Peter Xu
2020-10-08 5:49 ` Leon Romanovsky
2020-09-28 17:13 ` Peter Xu
2020-09-25 21:13 ` Peter Xu
2020-09-25 22:08 ` Linus Torvalds
2020-09-22 18:02 ` John Hubbard
2020-09-22 18:15 ` Peter Xu
2020-09-22 19:11 ` John Hubbard
2020-09-27 0:41 ` [mm] 698ac7610f: will-it-scale.per_thread_ops 8.2% improvement kernel test robot
2020-09-21 21:17 ` Peter Xu [this message]
2020-09-21 21:17 ` [PATCH 3/5] mm: Rework return value for copy_one_pte() Peter Xu
2020-09-22 7:11 ` John Hubbard
2020-09-22 15:29 ` Peter Xu
2020-09-22 10:08 ` Oleg Nesterov
2020-09-22 10:18 ` Oleg Nesterov
2020-09-22 15:36 ` Peter Xu
2020-09-22 15:48 ` Oleg Nesterov
2020-09-22 16:03 ` Peter Xu
2020-09-22 16:53 ` Oleg Nesterov
2020-09-22 18:13 ` Peter Xu
2020-09-22 18:23 ` Oleg Nesterov
2020-09-22 18:49 ` Peter Xu
2020-09-23 6:52 ` Oleg Nesterov
2020-09-23 17:16 ` Linus Torvalds
2020-09-23 21:24 ` Linus Torvalds
2020-09-21 21:20 ` [PATCH 4/5] mm: Do early cow for pinned pages during fork() for ptes Peter Xu
2020-09-21 21:55 ` Jann Horn
2020-09-21 22:18 ` John Hubbard
2020-09-21 22:27 ` Jann Horn
2020-09-22 0:08 ` John Hubbard
2020-09-21 22:27 ` Peter Xu
2020-09-22 11:48 ` Oleg Nesterov
2020-09-22 12:40 ` Oleg Nesterov
2020-09-22 15:58 ` Peter Xu
2020-09-22 16:52 ` Oleg Nesterov
2020-09-22 18:34 ` Peter Xu
2020-09-22 18:44 ` Oleg Nesterov
2020-09-23 1:03 ` Peter Xu
2020-09-23 20:25 ` Linus Torvalds
2020-09-24 15:08 ` Peter Xu
2020-09-24 11:48 ` Kirill Tkhai
2020-09-24 15:16 ` Peter Xu
2020-09-21 21:20 ` [PATCH 5/5] mm/thp: Split huge pmds/puds if they're pinned when fork() Peter Xu
2020-09-22 6:41 ` John Hubbard
2020-09-22 10:33 ` Jan Kara
2020-09-22 20:01 ` John Hubbard
2020-09-23 9:22 ` Jan Kara
2020-09-23 13:50 ` Peter Xu
2020-09-23 14:01 ` Jan Kara
2020-09-23 15:44 ` Peter Xu
2020-09-23 20:19 ` John Hubbard
2020-09-24 18:49 ` Peter Xu
2020-09-23 16:06 ` Peter Xu
2020-09-22 12:05 ` Jason Gunthorpe
2020-09-23 15:24 ` Peter Xu
2020-09-23 16:07 ` Yang Shi
2020-09-24 15:47 ` Peter Xu
2020-09-24 17:29 ` Yang Shi
2020-09-23 17:17 ` Jason Gunthorpe
2020-09-23 10:21 ` [PATCH 0/5] mm: Break COW for pinned pages during fork() Leon Romanovsky
2020-09-23 15:37 ` Peter Xu
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20200921211744.24758-3-peterx@redhat.com \
--to=peterx@redhat.com \
--cc=aarcange@redhat.com \
--cc=akpm@linux-foundation.org \
--cc=hch@lst.de \
--cc=hughd@google.com \
--cc=jack@suse.cz \
--cc=jannh@google.com \
--cc=jgg@ziepe.ca \
--cc=jhubbard@nvidia.com \
--cc=kirill@shutemov.name \
--cc=ktkhai@virtuozzo.com \
--cc=leonro@nvidia.com \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=mhocko@suse.com \
--cc=oleg@redhat.com \
--cc=torvalds@linux-foundation.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).