Provide simple functions to lazy migrate a process (or part thereof). These will be used to implement memory migration for NUMA process migration. Signed-off-by: Peter Zijlstra --- include/linux/mempolicy.h | 3 +++ mm/mempolicy.c | 40 ++++++++++++++++++++++++++++++++++++++++ 2 files changed, 43 insertions(+) --- a/include/linux/mempolicy.h +++ b/include/linux/mempolicy.h @@ -250,6 +250,9 @@ extern int vma_migratable(struct vm_area extern int mpol_misplaced(struct page *, struct vm_area_struct *, unsigned long); +extern void lazy_migrate_vma(struct vm_area_struct *vma, int node); +extern void lazy_migrate_process(struct mm_struct *mm, int node); + #else struct mempolicy {}; --- a/mm/mempolicy.c +++ b/mm/mempolicy.c @@ -1173,6 +1173,46 @@ static long do_mbind(unsigned long start return err; } +void lazy_migrate_vma(struct vm_area_struct *vma, int node) +{ + nodemask_t nmask = nodemask_of_node(node); + LIST_HEAD(pagelist); + + struct mempol_walk_data data = { + .nodes = &nmask, + .flags = MPOL_MF_MOVE | MPOL_MF_INVERT, /* move all pages not in set */ + .private = &pagelist, + .vma = vma, + }; + + struct mm_walk walk = { + .pte_entry = check_pte_entry, + .mm = vma->vm_mm, + .private = &data, + }; + + if (vma->vm_file) + return; + + if (!vma_migratable(vma)) + return; + + if (!walk_page_range(vma->vm_start, vma->vm_end, &walk)) + migrate_pages_unmap_only(&pagelist); + + putback_lru_pages(&pagelist); +} + +void lazy_migrate_process(struct mm_struct *mm, int node) +{ + struct vm_area_struct *vma; + + down_read(&mm->mmap_sem); + for (vma = mm->mmap; vma; vma = vma->vm_next) + lazy_migrate_vma(vma, node); + up_read(&mm->mmap_sem); +} + /* * User space interface with variable sized bitmaps for nodelists. */