diff --git a/mm/gup.c b/mm/gup.c index 98f13ab37bac..5c9825745bb2 100644 --- a/mm/gup.c +++ b/mm/gup.c @@ -3,6 +3,7 @@ #include #include #include +#include #include #include @@ -626,8 +627,8 @@ static int get_gate_page(struct mm_struct *mm, unsigned long address, * *@flags does not include FOLL_NOWAIT, the mmap_sem may be released. * If it is, *@nonblocking will be set to 0 and -EBUSY returned. */ -static int faultin_page(struct task_struct *tsk, struct vm_area_struct *vma, - unsigned long address, unsigned int *flags, int *nonblocking) +static int faultin_page_x(struct task_struct *tsk, struct vm_area_struct *vma, + unsigned long address, unsigned int *flags, int *nonblocking, int ign) { unsigned int fault_flags = 0; vm_fault_t ret; @@ -649,8 +650,14 @@ static int faultin_page(struct task_struct *tsk, struct vm_area_struct *vma, } ret = handle_mm_fault(vma, address, fault_flags); + if (!ign) { + printk(KERN_WARNING "faultin_page handle_mm_fault --> ret = %u\n", ret); + } if (ret & VM_FAULT_ERROR) { int err = vm_fault_to_errno(ret, *flags); + if (!ign) { + printk(KERN_WARNING "faultin_page handle_mm_fault --> err = %d\n", err); + } if (err) return err; @@ -665,6 +672,9 @@ static int faultin_page(struct task_struct *tsk, struct vm_area_struct *vma, } if (ret & VM_FAULT_RETRY) { + if (!ign) { + printk(KERN_WARNING "faultin_page-->EBUSY VM_FAULT_RETRY non-blocking?%d FAULT_FLAG_RETRY_NOWAIT?%d\n", nonblocking?1:0, (fault_flags & FAULT_FLAG_RETRY_NOWAIT)?1:0); + } if (nonblocking && !(fault_flags & FAULT_FLAG_RETRY_NOWAIT)) *nonblocking = 0; return -EBUSY; @@ -681,8 +691,16 @@ static int faultin_page(struct task_struct *tsk, struct vm_area_struct *vma, */ if ((ret & VM_FAULT_WRITE) && !(vma->vm_flags & VM_WRITE)) *flags |= FOLL_COW; + if (!ign) { + printk(KERN_WARNING "faultin_page-->0\n"); + } return 0; } +static int faultin_page(struct task_struct *tsk, struct vm_area_struct *vma, + unsigned long address, unsigned int *flags, int *nonblocking) +{ + return faultin_page_x(tsk, vma, address, flags, nonblocking, 1); +} static int check_vma_flags(struct vm_area_struct *vma, unsigned long gup_flags) { @@ -787,15 +805,18 @@ static int check_vma_flags(struct vm_area_struct *vma, unsigned long gup_flags) * instead of __get_user_pages. __get_user_pages should be used only if * you need some special @gup_flags. */ -static long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, +static long __get_user_pages_x(struct task_struct *tsk, struct mm_struct *mm, unsigned long start, unsigned long nr_pages, unsigned int gup_flags, struct page **pages, - struct vm_area_struct **vmas, int *nonblocking) + struct vm_area_struct **vmas, int *nonblocking, int ign) { long ret = 0, i = 0; struct vm_area_struct *vma = NULL; struct follow_page_context ctx = { NULL }; + if (!ign) + printk(KERN_WARNING "__get_user_pages start=%lx nr_pages=%lu gup_flags=%x ctx.page_mask=%u\n", start, nr_pages, gup_flags, ctx.page_mask); + if (!nr_pages) return 0; @@ -816,11 +837,25 @@ static long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, /* first iteration or cross vma bound */ if (!vma || start >= vma->vm_end) { + if (!ign) { + if (!vma) + printk(KERN_WARNING "__get_user_pages @1 vma==NULL\n"); + else + printk(KERN_WARNING "__get_user_pages @1 start=%lx vma->vm_start=%lx vma->vm_end=%lx vma->vm_flags=%lx\n", start, vma->vm_start, vma->vm_end, vma->vm_flags); + } vma = find_extend_vma(mm, start); + if (!ign) { + if (!vma) + printk(KERN_WARNING "__get_user_pages @2 vma==NULL\n"); + else + printk(KERN_WARNING "__get_user_pages @2 start=%lx vma->vm_start=%lx vma->vm_end=%lx vma->vm_flags=%lx\n", start, vma->vm_start, vma->vm_end, vma->vm_flags); + } if (!vma && in_gate_area(mm, start)) { ret = get_gate_page(mm, start & PAGE_MASK, gup_flags, &vma, pages ? &pages[i] : NULL); + if (!ign) + printk(KERN_WARNING "__get_user_pages @3 get_gate_page --> %ld\n", ret); if (ret) goto out; ctx.page_mask = 0; @@ -828,6 +863,8 @@ static long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, } if (!vma || check_vma_flags(vma, gup_flags)) { + if (!ign) + printk(KERN_WARNING "__get_user_pages @4 EFAULT\n"); ret = -EFAULT; goto out; } @@ -835,6 +872,8 @@ static long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, i = follow_hugetlb_page(mm, vma, pages, vmas, &start, &nr_pages, i, gup_flags, nonblocking); + if (!ign) + printk(KERN_WARNING "__get_user_pages @5 follow_hugetlb_page --> %ld\n", i); continue; } } @@ -844,15 +883,21 @@ static long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, * potentially allocating memory. */ if (fatal_signal_pending(current)) { + if (!ign) + printk(KERN_WARNING "__get_user_pages @6 ERESTARTSYS\n"); ret = -ERESTARTSYS; goto out; } cond_resched(); page = follow_page_mask(vma, start, foll_flags, &ctx); + if (!ign) + printk(KERN_WARNING "__get_user_pages @7 follow_page_mask --> %d ctx.page_mask=%u\n", page ? 1 : 0, ctx.page_mask); if (!page) { - ret = faultin_page(tsk, vma, start, &foll_flags, - nonblocking); + ret = faultin_page_x(tsk, vma, start, &foll_flags, + nonblocking, ign); + if (!ign) + printk(KERN_WARNING "__get_user_pages @8 faultin_page --> %ld\n", ret); switch (ret) { case 0: goto retry; @@ -868,6 +913,8 @@ static long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, } BUG(); } else if (PTR_ERR(page) == -EEXIST) { + if (!ign) + printk(KERN_WARNING "__get_user_pages @8 EEXIST\n"); /* * Proper page table entry exists, but no corresponding * struct page. @@ -875,6 +922,8 @@ static long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, goto next_page; } else if (IS_ERR(page)) { ret = PTR_ERR(page); + if (!ign) + printk(KERN_WARNING "__get_user_pages @8 IS_ERR -> ret=%ld\n", ret); goto out; } if (pages) { @@ -889,17 +938,31 @@ static long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, ctx.page_mask = 0; } page_increm = 1 + (~(start >> PAGE_SHIFT) & ctx.page_mask); + if (!ign) + printk(KERN_WARNING "__get_user_pages @9 page_increm=%u ctx.page_mask=%u\n", page_increm, ctx.page_mask); if (page_increm > nr_pages) page_increm = nr_pages; i += page_increm; start += page_increm * PAGE_SIZE; nr_pages -= page_increm; + if (!ign) + printk(KERN_WARNING "__get_user_pages @10 i=%ld start=%lx nr_pages=%ld\n", i, start, nr_pages); } while (nr_pages); out: if (ctx.pgmap) put_dev_pagemap(ctx.pgmap); + if (!ign) { + printk(KERN_WARNING "__get_user_pages LEAVE i=%ld ret=%ld\n", i, ret); + } return i ? i : ret; } +static long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, + unsigned long start, unsigned long nr_pages, + unsigned int gup_flags, struct page **pages, + struct vm_area_struct **vmas, int *nonblocking) +{ + return __get_user_pages_x(tsk, mm, start, nr_pages, gup_flags, pages, vmas, nonblocking, 1); +} static bool vma_permits_fault(struct vm_area_struct *vma, unsigned int fault_flags) @@ -1192,8 +1255,9 @@ EXPORT_SYMBOL(get_user_pages_remote); * If @nonblocking is non-NULL, it must held for read only and may be * released. If it's released, *@nonblocking will be set to 0. */ -long populate_vma_page_range(struct vm_area_struct *vma, - unsigned long start, unsigned long end, int *nonblocking) +long populate_vma_page_range_x(struct vm_area_struct *vma, + unsigned long start, unsigned long end, int *nonblocking, + int ign) { struct mm_struct *mm = vma->vm_mm; unsigned long nr_pages = (end - start) / PAGE_SIZE; @@ -1227,8 +1291,13 @@ long populate_vma_page_range(struct vm_area_struct *vma, * We made sure addr is within a VMA, so the following will * not result in a stack expansion that recurses back here. */ - return __get_user_pages(current, mm, start, nr_pages, gup_flags, - NULL, NULL, nonblocking); + return __get_user_pages_x(current, mm, start, nr_pages, gup_flags, + NULL, NULL, nonblocking, ign); +} +long populate_vma_page_range(struct vm_area_struct *vma, + unsigned long start, unsigned long end, int *nonblocking) +{ + return populate_vma_page_range_x(vma, start, end, nonblocking, 1); } /* @@ -1241,14 +1310,23 @@ long populate_vma_page_range(struct vm_area_struct *vma, int __mm_populate(unsigned long start, unsigned long len, int ignore_errors) { struct mm_struct *mm = current->mm; - unsigned long end, nstart, nend; + unsigned long end, nstart, nend = 0L; struct vm_area_struct *vma = NULL; int locked = 0; long ret = 0; + unsigned long nstart_prev = 0L - 1L, nend_prev = 0L - 1L; + int ign; end = start + len; + printk(KERN_WARNING "_mm_populate %lx %lx %lx %d ENTER\n", start, len, end, ignore_errors); + for (nstart = start; nstart < end; nstart = nend) { + ign = nstart == nstart_prev && nend == nend_prev; + nstart_prev = nstart; + nend_prev = nend; + if (!ign) + printk(KERN_WARNING "_mm_populate %lx %lx %lx %d LOOP %lx %d %ld\n", start, len, end, ignore_errors, nstart, locked, ret); /* * We want to fault in pages for [nstart; end) address range. * Find first corresponding VMA. @@ -1259,6 +1337,8 @@ int __mm_populate(unsigned long start, unsigned long len, int ignore_errors) vma = find_vma(mm, nstart); } else if (nstart >= vma->vm_end) vma = vma->vm_next; + if (!ign && vma) + printk(KERN_WARNING "_mm_populate %lx %lx %lx %d vma->vm_start=%lx vma->vm_end=%lx vma->vm_flags=%lx\n", start, len, end, ignore_errors, vma->vm_start, vma->vm_end, vma->vm_flags); if (!vma || vma->vm_start >= end) break; /* @@ -1266,8 +1346,13 @@ int __mm_populate(unsigned long start, unsigned long len, int ignore_errors) * range with the first VMA. Also, skip undesirable VMA types. */ nend = min(end, vma->vm_end); - if (vma->vm_flags & (VM_IO | VM_PFNMAP)) - continue; + if (!ign) + printk(KERN_WARNING "_mm_populate %lx %lx %lx %d nend=%lx %lx %lx\n", start, len, end, ignore_errors, nend, end, vma->vm_end); + if (vma->vm_flags & (VM_IO | VM_PFNMAP)) { + if (!ign) + printk(KERN_WARNING "_mm_populate %lx %lx %lx %d LOOP-1 %lx\n", start, len, end, ignore_errors, vma->vm_flags); + continue; + } if (nstart < vma->vm_start) nstart = vma->vm_start; /* @@ -1275,8 +1360,10 @@ int __mm_populate(unsigned long start, unsigned long len, int ignore_errors) * double checks the vma flags, so that it won't mlock pages * if the vma was already munlocked. */ - ret = populate_vma_page_range(vma, nstart, nend, &locked); + ret = populate_vma_page_range_x(vma, nstart, nend, &locked, ign); if (ret < 0) { + if (!ign) + printk(KERN_WARNING "_mm_populate %lx %lx %lx %d LOOP-2 %ld\n", start, len, end, ignore_errors, ret); if (ignore_errors) { ret = 0; continue; /* continue at next VMA */ @@ -1284,8 +1371,11 @@ int __mm_populate(unsigned long start, unsigned long len, int ignore_errors) break; } nend = nstart + ret * PAGE_SIZE; + if (!ign) + printk(KERN_WARNING "_mm_populate %lx %lx %lx %d LOOP-3 ret=%ld nend=%lx\n", start, len, end, ignore_errors, ret, nend); ret = 0; } + printk(KERN_WARNING "_mm_populate END %lu %lu %d\n", start, len, locked); if (locked) up_read(&mm->mmap_sem); return ret; /* 0 or negative error code */