From mboxrd@z Thu Jan 1 00:00:00 1970 Return-path: Received: from cantor2.suse.de ([195.135.220.15]:59442 "EHLO mx2.suse.de" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S965129AbbEFH20 (ORCPT ); Wed, 6 May 2015 03:28:26 -0400 From: Jan Kara To: linux-mm@kvack.org Cc: linux-media@vger.kernel.org, Hans Verkuil , dri-devel@lists.freedesktop.org, Pawel Osciak , Mauro Carvalho Chehab , mgorman@suse.de, Marek Szyprowski , linux-samsung-soc@vger.kernel.org, Jan Kara Subject: [PATCH 2/9] mm: Provide new get_vaddr_frames() helper Date: Wed, 6 May 2015 09:28:09 +0200 Message-Id: <1430897296-5469-3-git-send-email-jack@suse.cz> In-Reply-To: <1430897296-5469-1-git-send-email-jack@suse.cz> References: <1430897296-5469-1-git-send-email-jack@suse.cz> Sender: linux-media-owner@vger.kernel.org List-ID: Provide new function get_vaddr_frames(). This function maps virtual addresses from given start and fills given array with page frame numbers of the corresponding pages. If given start belongs to a normal vma, the function grabs reference to each of the pages to pin them in memory. If start belongs to VM_IO | VM_PFNMAP vma, we don't touch page structures. Caller must make sure pfns aren't reused for anything else while he is using them. This function is created for various drivers to simplify handling of their buffers. Signed-off-by: Jan Kara --- include/linux/mm.h | 44 +++++++++++ mm/gup.c | 214 +++++++++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 258 insertions(+) diff --git a/include/linux/mm.h b/include/linux/mm.h index 0755b9fd03a7..dcd1f02a78e9 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -20,6 +20,7 @@ #include #include #include +#include struct mempolicy; struct anon_vma; @@ -1197,6 +1198,49 @@ long get_user_pages_unlocked(struct task_struct *tsk, struct mm_struct *mm, int write, int force, struct page **pages); int get_user_pages_fast(unsigned long start, int nr_pages, int write, struct page **pages); + +/* Container for pinned pfns / pages */ +struct frame_vector { + unsigned int nr_allocated; /* Number of frames we have space for */ + unsigned int nr_frames; /* Number of frames stored in ptrs array */ + bool got_ref; /* Did we pin pages by getting page ref? */ + bool is_pfns; /* Does array contain pages or pfns? */ + void *ptrs[0]; /* Array of pinned pfns / pages. Use + * pfns_vector_pages() or pfns_vector_pfns() + * for access */ +}; + +struct frame_vector *frame_vector_create(unsigned int nr_frames); +void frame_vector_destroy(struct frame_vector *vec); +int get_vaddr_frames(unsigned long start, unsigned int nr_pfns, + bool write, bool force, struct frame_vector *vec); +void put_vaddr_frames(struct frame_vector *vec); +int frame_vector_to_pages(struct frame_vector *vec); +void frame_vector_to_pfns(struct frame_vector *vec); + +static inline unsigned int frame_vector_count(struct frame_vector *vec) +{ + return vec->nr_frames; +} + +static inline struct page **frame_vector_pages(struct frame_vector *vec) +{ + if (vec->is_pfns) { + int err = frame_vector_to_pages(vec); + + if (err) + return ERR_PTR(err); + } + return (struct page **)(vec->ptrs); +} + +static inline unsigned long *frame_vector_pfns(struct frame_vector *vec) +{ + if (!vec->is_pfns) + frame_vector_to_pfns(vec); + return (unsigned long *)(vec->ptrs); +} + struct kvec; int get_kernel_pages(const struct kvec *iov, int nr_pages, int write, struct page **pages); diff --git a/mm/gup.c b/mm/gup.c index 6297f6bccfb1..8db5c40e65c4 100644 --- a/mm/gup.c +++ b/mm/gup.c @@ -8,6 +8,7 @@ #include #include #include +#include #include #include @@ -936,6 +937,219 @@ int __mm_populate(unsigned long start, unsigned long len, int ignore_errors) return ret; /* 0 or negative error code */ } +/* + * get_vaddr_frames() - map virtual addresses to pfns + * @start: starting user address + * @nr_frames: number of pages / pfns from start to map + * @write: whether pages will be written to by the caller + * @force: whether to force write access even if user mapping is + * readonly. This will result in the page being COWed even + * in MAP_SHARED mappings. You do not want this. + * @vec: structure which receives pages / pfns of the addresses mapped. + * It should have space for at least nr_frames entries. + * + * This function maps virtual addresses from @start and fills @vec structure + * with page frame numbers or page pointers to corresponding pages (choice + * depends on the type of the vma underlying the virtual address). If @start + * belongs to a normal vma, the function grabs reference to each of the pages + * to pin them in memory. If @start belongs to VM_IO | VM_PFNMAP vma, we don't + * touch page structures and the caller must make sure pfns aren't reused for + * anything else while he is using them. + * + * The function returns number of pages mapped which may be less than + * @nr_frames. In particular we stop mapping if there are more vmas of + * different type underlying the specified range of virtual addresses. + * + * This function takes care of grabbing mmap_sem as necessary. + */ +int get_vaddr_frames(unsigned long start, unsigned int nr_frames, + bool write, bool force, struct frame_vector *vec) +{ + struct mm_struct *mm = current->mm; + struct vm_area_struct *vma; + int ret = 0; + int err; + int locked = 1; + + if (nr_frames == 0) + return 0; + + if (WARN_ON_ONCE(nr_frames > vec->nr_allocated)) + nr_frames = vec->nr_allocated; + + down_read(&mm->mmap_sem); + vma = find_vma_intersection(mm, start, start + 1); + if (!vma) { + ret = -EFAULT; + goto out; + } + if (!(vma->vm_flags & (VM_IO | VM_PFNMAP))) { + vec->got_ref = 1; + vec->is_pfns = 0; + ret = get_user_pages_locked(current, mm, start, nr_frames, + write, force, (struct page **)(vec->ptrs), &locked); + goto out; + } + + vec->got_ref = 0; + vec->is_pfns = 1; + do { + unsigned long *nums = frame_vector_pfns(vec); + + while (ret < nr_frames && start + PAGE_SIZE <= vma->vm_end) { + err = follow_pfn(vma, start, &nums[ret]); + if (err) { + if (ret == 0) + ret = err; + goto out; + } + start += PAGE_SIZE; + ret++; + } + /* + * We stop if we have enough pages or if VMA doesn't completely + * cover the tail page. + */ + if (ret >= nr_frames || start < vma->vm_end) + break; + vma = find_vma_intersection(mm, start, start + 1); + } while (vma && vma->vm_flags & (VM_IO | VM_PFNMAP)); +out: + if (locked) + up_read(&mm->mmap_sem); + if (!ret) + ret = -EFAULT; + if (ret > 0) + vec->nr_frames = ret; + return ret; +} +EXPORT_SYMBOL(get_vaddr_frames); + +/** + * put_vaddr_frames() - drop references to pages if get_vaddr_frames() acquired + * them + * @vec: frame vector to put + * + * Drop references to pages if get_vaddr_frames() acquired them. We also + * invalidate the frame vector so that it is prepared for the next call into + * get_vaddr_frames(). + */ +void put_vaddr_frames(struct frame_vector *vec) +{ + int i; + struct page **pages; + + if (!vec->got_ref) + goto out; + pages = frame_vector_pages(vec); + /* + * frame_vector_pages() might needed to do a conversion when we + * get_vaddr_frames() got pages but vec was later converted to pfns. + * But it shouldn't really fail to convert pfns back... + */ + BUG_ON(IS_ERR(pages)); + for (i = 0; i < vec->nr_frames; i++) + put_page(pages[i]); + vec->got_ref = 0; +out: + vec->nr_frames = 0; +} +EXPORT_SYMBOL(put_vaddr_frames); + +/** + * frame_vector_to_pages - convert frame vector to contain page pointers + * @vec: frame vector to convert + * + * Convert @vec to contain array of page pointers. If the conversion is + * successful, return 0. Otherwise return an error. + */ +int frame_vector_to_pages(struct frame_vector *vec) +{ + int i; + unsigned long *nums; + struct page **pages; + + if (!vec->is_pfns) + return 0; + nums = frame_vector_pfns(vec); + for (i = 0; i < vec->nr_frames; i++) + if (!pfn_valid(nums[i])) + return -EINVAL; + pages = (struct page **)nums; + for (i = 0; i < vec->nr_frames; i++) + pages[i] = pfn_to_page(nums[i]); + vec->is_pfns = 0; + return 0; +} +EXPORT_SYMBOL(frame_vector_to_pages); + +/** + * frame_vector_to_pfns - convert frame vector to contain pfns + * @vec: frame vector to convert + * + * Convert @vec to contain array of pfns. + */ +void frame_vector_to_pfns(struct frame_vector *vec) +{ + int i; + unsigned long *nums; + struct page **pages; + + if (vec->is_pfns) + return; + pages = (struct page **)(vec->ptrs); + nums = (unsigned long *)pages; + for (i = 0; i < vec->nr_frames; i++) + nums[i] = page_to_pfn(pages[i]); + vec->is_pfns = 1; +} +EXPORT_SYMBOL(frame_vector_to_pfns); + +/** + * frame_vector_create() - allocate & initialize structure for pinned pfns + * @nr_frames: number of pfns slots we should reserve + * + * Allocate and initialize struct pinned_pfns to be able to hold @nr_pfns + * pfns. + */ +struct frame_vector *frame_vector_create(unsigned int nr_frames) +{ + struct frame_vector *vec; + int size = sizeof(struct frame_vector) + sizeof(void *) * nr_frames; + + if (WARN_ON_ONCE(nr_frames == 0)) + return NULL; + /* + * Avoid higher order allocations, use vmalloc instead. It should + * be rare anyway. + */ + if (size <= PAGE_SIZE) + vec = kmalloc(size, GFP_KERNEL); + else + vec = vmalloc(size); + if (!vec) + return NULL; + vec->nr_allocated = nr_frames; + vec->nr_frames = 0; + return vec; +} +EXPORT_SYMBOL(frame_vector_create); + +/** + * frame_vector_destroy() - free memory allocated to carry frame vector + * @vec: Frame vector to free + * + * Free structure allocated by frame_vector_create() to carry frames. + */ +void frame_vector_destroy(struct frame_vector *vec) +{ + if (!is_vmalloc_addr(vec)) + kfree(vec); + else + vfree(vec); +} +EXPORT_SYMBOL(frame_vector_destroy); + /** * get_dump_page() - pin user page in memory while writing it to core dump * @addr: user address -- 2.1.4 From mboxrd@z Thu Jan 1 00:00:00 1970 From: Jan Kara Subject: [PATCH 2/9] mm: Provide new get_vaddr_frames() helper Date: Wed, 6 May 2015 09:28:09 +0200 Message-ID: <1430897296-5469-3-git-send-email-jack@suse.cz> References: <1430897296-5469-1-git-send-email-jack@suse.cz> Mime-Version: 1.0 Content-Type: text/plain; charset="utf-8" Content-Transfer-Encoding: base64 Return-path: In-Reply-To: <1430897296-5469-1-git-send-email-jack@suse.cz> List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dri-devel-bounces@lists.freedesktop.org Sender: "dri-devel" To: linux-mm@kvack.org Cc: linux-samsung-soc@vger.kernel.org, Jan Kara , Pawel Osciak , Mauro Carvalho Chehab , dri-devel@lists.freedesktop.org, mgorman@suse.de, Marek Szyprowski , linux-media@vger.kernel.org List-Id: linux-samsung-soc@vger.kernel.org UHJvdmlkZSBuZXcgZnVuY3Rpb24gZ2V0X3ZhZGRyX2ZyYW1lcygpLiAgVGhpcyBmdW5jdGlvbiBt YXBzIHZpcnR1YWwKYWRkcmVzc2VzIGZyb20gZ2l2ZW4gc3RhcnQgYW5kIGZpbGxzIGdpdmVuIGFy cmF5IHdpdGggcGFnZSBmcmFtZSBudW1iZXJzIG9mCnRoZSBjb3JyZXNwb25kaW5nIHBhZ2VzLiBJ ZiBnaXZlbiBzdGFydCBiZWxvbmdzIHRvIGEgbm9ybWFsIHZtYSwgdGhlIGZ1bmN0aW9uCmdyYWJz IHJlZmVyZW5jZSB0byBlYWNoIG9mIHRoZSBwYWdlcyB0byBwaW4gdGhlbSBpbiBtZW1vcnkuIElm IHN0YXJ0CmJlbG9uZ3MgdG8gVk1fSU8gfCBWTV9QRk5NQVAgdm1hLCB3ZSBkb24ndCB0b3VjaCBw YWdlIHN0cnVjdHVyZXMuIENhbGxlcgptdXN0IG1ha2Ugc3VyZSBwZm5zIGFyZW4ndCByZXVzZWQg Zm9yIGFueXRoaW5nIGVsc2Ugd2hpbGUgaGUgaXMgdXNpbmcKdGhlbS4KClRoaXMgZnVuY3Rpb24g aXMgY3JlYXRlZCBmb3IgdmFyaW91cyBkcml2ZXJzIHRvIHNpbXBsaWZ5IGhhbmRsaW5nIG9mCnRo ZWlyIGJ1ZmZlcnMuCgpTaWduZWQtb2ZmLWJ5OiBKYW4gS2FyYSA8amFja0BzdXNlLmN6PgotLS0K IGluY2x1ZGUvbGludXgvbW0uaCB8ICA0NCArKysrKysrKysrKwogbW0vZ3VwLmMgICAgICAgICAg IHwgMjE0ICsrKysrKysrKysrKysrKysrKysrKysrKysrKysrKysrKysrKysrKysrKysrKysrKysr KysrCiAyIGZpbGVzIGNoYW5nZWQsIDI1OCBpbnNlcnRpb25zKCspCgpkaWZmIC0tZ2l0IGEvaW5j bHVkZS9saW51eC9tbS5oIGIvaW5jbHVkZS9saW51eC9tbS5oCmluZGV4IDA3NTViOWZkMDNhNy4u ZGNkMWYwMmE3OGU5IDEwMDY0NAotLS0gYS9pbmNsdWRlL2xpbnV4L21tLmgKKysrIGIvaW5jbHVk ZS9saW51eC9tbS5oCkBAIC0yMCw2ICsyMCw3IEBACiAjaW5jbHVkZSA8bGludXgvc2hyaW5rZXIu aD4KICNpbmNsdWRlIDxsaW51eC9yZXNvdXJjZS5oPgogI2luY2x1ZGUgPGxpbnV4L3BhZ2VfZXh0 Lmg+CisjaW5jbHVkZSA8bGludXgvZXJyLmg+CiAKIHN0cnVjdCBtZW1wb2xpY3k7CiBzdHJ1Y3Qg YW5vbl92bWE7CkBAIC0xMTk3LDYgKzExOTgsNDkgQEAgbG9uZyBnZXRfdXNlcl9wYWdlc191bmxv Y2tlZChzdHJ1Y3QgdGFza19zdHJ1Y3QgKnRzaywgc3RydWN0IG1tX3N0cnVjdCAqbW0sCiAJCSAg ICBpbnQgd3JpdGUsIGludCBmb3JjZSwgc3RydWN0IHBhZ2UgKipwYWdlcyk7CiBpbnQgZ2V0X3Vz ZXJfcGFnZXNfZmFzdCh1bnNpZ25lZCBsb25nIHN0YXJ0LCBpbnQgbnJfcGFnZXMsIGludCB3cml0 ZSwKIAkJCXN0cnVjdCBwYWdlICoqcGFnZXMpOworCisvKiBDb250YWluZXIgZm9yIHBpbm5lZCBw Zm5zIC8gcGFnZXMgKi8KK3N0cnVjdCBmcmFtZV92ZWN0b3IgeworCXVuc2lnbmVkIGludCBucl9h bGxvY2F0ZWQ7CS8qIE51bWJlciBvZiBmcmFtZXMgd2UgaGF2ZSBzcGFjZSBmb3IgKi8KKwl1bnNp Z25lZCBpbnQgbnJfZnJhbWVzOwkvKiBOdW1iZXIgb2YgZnJhbWVzIHN0b3JlZCBpbiBwdHJzIGFy cmF5ICovCisJYm9vbCBnb3RfcmVmOwkJLyogRGlkIHdlIHBpbiBwYWdlcyBieSBnZXR0aW5nIHBh Z2UgcmVmPyAqLworCWJvb2wgaXNfcGZuczsJCS8qIERvZXMgYXJyYXkgY29udGFpbiBwYWdlcyBv ciBwZm5zPyAqLworCXZvaWQgKnB0cnNbMF07CQkvKiBBcnJheSBvZiBwaW5uZWQgcGZucyAvIHBh Z2VzLiBVc2UKKwkJCQkgKiBwZm5zX3ZlY3Rvcl9wYWdlcygpIG9yIHBmbnNfdmVjdG9yX3BmbnMo KQorCQkJCSAqIGZvciBhY2Nlc3MgKi8KK307CisKK3N0cnVjdCBmcmFtZV92ZWN0b3IgKmZyYW1l X3ZlY3Rvcl9jcmVhdGUodW5zaWduZWQgaW50IG5yX2ZyYW1lcyk7Cit2b2lkIGZyYW1lX3ZlY3Rv cl9kZXN0cm95KHN0cnVjdCBmcmFtZV92ZWN0b3IgKnZlYyk7CitpbnQgZ2V0X3ZhZGRyX2ZyYW1l cyh1bnNpZ25lZCBsb25nIHN0YXJ0LCB1bnNpZ25lZCBpbnQgbnJfcGZucywKKwkJICAgICBib29s IHdyaXRlLCBib29sIGZvcmNlLCBzdHJ1Y3QgZnJhbWVfdmVjdG9yICp2ZWMpOwordm9pZCBwdXRf dmFkZHJfZnJhbWVzKHN0cnVjdCBmcmFtZV92ZWN0b3IgKnZlYyk7CitpbnQgZnJhbWVfdmVjdG9y X3RvX3BhZ2VzKHN0cnVjdCBmcmFtZV92ZWN0b3IgKnZlYyk7Cit2b2lkIGZyYW1lX3ZlY3Rvcl90 b19wZm5zKHN0cnVjdCBmcmFtZV92ZWN0b3IgKnZlYyk7CisKK3N0YXRpYyBpbmxpbmUgdW5zaWdu ZWQgaW50IGZyYW1lX3ZlY3Rvcl9jb3VudChzdHJ1Y3QgZnJhbWVfdmVjdG9yICp2ZWMpCit7CisJ cmV0dXJuIHZlYy0+bnJfZnJhbWVzOworfQorCitzdGF0aWMgaW5saW5lIHN0cnVjdCBwYWdlICoq ZnJhbWVfdmVjdG9yX3BhZ2VzKHN0cnVjdCBmcmFtZV92ZWN0b3IgKnZlYykKK3sKKwlpZiAodmVj LT5pc19wZm5zKSB7CisJCWludCBlcnIgPSBmcmFtZV92ZWN0b3JfdG9fcGFnZXModmVjKTsKKwor CQlpZiAoZXJyKQorCQkJcmV0dXJuIEVSUl9QVFIoZXJyKTsKKwl9CisJcmV0dXJuIChzdHJ1Y3Qg cGFnZSAqKikodmVjLT5wdHJzKTsKK30KKworc3RhdGljIGlubGluZSB1bnNpZ25lZCBsb25nICpm cmFtZV92ZWN0b3JfcGZucyhzdHJ1Y3QgZnJhbWVfdmVjdG9yICp2ZWMpCit7CisJaWYgKCF2ZWMt PmlzX3BmbnMpCisJCWZyYW1lX3ZlY3Rvcl90b19wZm5zKHZlYyk7CisJcmV0dXJuICh1bnNpZ25l ZCBsb25nICopKHZlYy0+cHRycyk7Cit9CisKIHN0cnVjdCBrdmVjOwogaW50IGdldF9rZXJuZWxf cGFnZXMoY29uc3Qgc3RydWN0IGt2ZWMgKmlvdiwgaW50IG5yX3BhZ2VzLCBpbnQgd3JpdGUsCiAJ CQlzdHJ1Y3QgcGFnZSAqKnBhZ2VzKTsKZGlmZiAtLWdpdCBhL21tL2d1cC5jIGIvbW0vZ3VwLmMK aW5kZXggNjI5N2Y2YmNjZmIxLi44ZGI1YzQwZTY1YzQgMTAwNjQ0Ci0tLSBhL21tL2d1cC5jCisr KyBiL21tL2d1cC5jCkBAIC04LDYgKzgsNyBAQAogI2luY2x1ZGUgPGxpbnV4L3JtYXAuaD4KICNp bmNsdWRlIDxsaW51eC9zd2FwLmg+CiAjaW5jbHVkZSA8bGludXgvc3dhcG9wcy5oPgorI2luY2x1 ZGUgPGxpbnV4L3ZtYWxsb2MuaD4KIAogI2luY2x1ZGUgPGxpbnV4L3NjaGVkLmg+CiAjaW5jbHVk ZSA8bGludXgvcndzZW0uaD4KQEAgLTkzNiw2ICs5MzcsMjE5IEBAIGludCBfX21tX3BvcHVsYXRl KHVuc2lnbmVkIGxvbmcgc3RhcnQsIHVuc2lnbmVkIGxvbmcgbGVuLCBpbnQgaWdub3JlX2Vycm9y cykKIAlyZXR1cm4gcmV0OwkvKiAwIG9yIG5lZ2F0aXZlIGVycm9yIGNvZGUgKi8KIH0KIAorLyoK KyAqIGdldF92YWRkcl9mcmFtZXMoKSAtIG1hcCB2aXJ0dWFsIGFkZHJlc3NlcyB0byBwZm5zCisg KiBAc3RhcnQ6CXN0YXJ0aW5nIHVzZXIgYWRkcmVzcworICogQG5yX2ZyYW1lczoJbnVtYmVyIG9m IHBhZ2VzIC8gcGZucyBmcm9tIHN0YXJ0IHRvIG1hcAorICogQHdyaXRlOgl3aGV0aGVyIHBhZ2Vz IHdpbGwgYmUgd3JpdHRlbiB0byBieSB0aGUgY2FsbGVyCisgKiBAZm9yY2U6CXdoZXRoZXIgdG8g Zm9yY2Ugd3JpdGUgYWNjZXNzIGV2ZW4gaWYgdXNlciBtYXBwaW5nIGlzCisgKgkJcmVhZG9ubHku IFRoaXMgd2lsbCByZXN1bHQgaW4gdGhlIHBhZ2UgYmVpbmcgQ09XZWQgZXZlbgorICoJCWluIE1B UF9TSEFSRUQgbWFwcGluZ3MuIFlvdSBkbyBub3Qgd2FudCB0aGlzLgorICogQHZlYzoJc3RydWN0 dXJlIHdoaWNoIHJlY2VpdmVzIHBhZ2VzIC8gcGZucyBvZiB0aGUgYWRkcmVzc2VzIG1hcHBlZC4K KyAqCQlJdCBzaG91bGQgaGF2ZSBzcGFjZSBmb3IgYXQgbGVhc3QgbnJfZnJhbWVzIGVudHJpZXMu CisgKgorICogVGhpcyBmdW5jdGlvbiBtYXBzIHZpcnR1YWwgYWRkcmVzc2VzIGZyb20gQHN0YXJ0 IGFuZCBmaWxscyBAdmVjIHN0cnVjdHVyZQorICogd2l0aCBwYWdlIGZyYW1lIG51bWJlcnMgb3Ig cGFnZSBwb2ludGVycyB0byBjb3JyZXNwb25kaW5nIHBhZ2VzIChjaG9pY2UKKyAqIGRlcGVuZHMg b24gdGhlIHR5cGUgb2YgdGhlIHZtYSB1bmRlcmx5aW5nIHRoZSB2aXJ0dWFsIGFkZHJlc3MpLiBJ ZiBAc3RhcnQKKyAqIGJlbG9uZ3MgdG8gYSBub3JtYWwgdm1hLCB0aGUgZnVuY3Rpb24gZ3JhYnMg cmVmZXJlbmNlIHRvIGVhY2ggb2YgdGhlIHBhZ2VzCisgKiB0byBwaW4gdGhlbSBpbiBtZW1vcnku IElmIEBzdGFydCBiZWxvbmdzIHRvIFZNX0lPIHwgVk1fUEZOTUFQIHZtYSwgd2UgZG9uJ3QKKyAq IHRvdWNoIHBhZ2Ugc3RydWN0dXJlcyBhbmQgdGhlIGNhbGxlciBtdXN0IG1ha2Ugc3VyZSBwZm5z IGFyZW4ndCByZXVzZWQgZm9yCisgKiBhbnl0aGluZyBlbHNlIHdoaWxlIGhlIGlzIHVzaW5nIHRo ZW0uCisgKgorICogVGhlIGZ1bmN0aW9uIHJldHVybnMgbnVtYmVyIG9mIHBhZ2VzIG1hcHBlZCB3 aGljaCBtYXkgYmUgbGVzcyB0aGFuCisgKiBAbnJfZnJhbWVzLiBJbiBwYXJ0aWN1bGFyIHdlIHN0 b3AgbWFwcGluZyBpZiB0aGVyZSBhcmUgbW9yZSB2bWFzIG9mCisgKiBkaWZmZXJlbnQgdHlwZSB1 bmRlcmx5aW5nIHRoZSBzcGVjaWZpZWQgcmFuZ2Ugb2YgdmlydHVhbCBhZGRyZXNzZXMuCisgKgor ICogVGhpcyBmdW5jdGlvbiB0YWtlcyBjYXJlIG9mIGdyYWJiaW5nIG1tYXBfc2VtIGFzIG5lY2Vz c2FyeS4KKyAqLworaW50IGdldF92YWRkcl9mcmFtZXModW5zaWduZWQgbG9uZyBzdGFydCwgdW5z aWduZWQgaW50IG5yX2ZyYW1lcywKKwkJICAgICBib29sIHdyaXRlLCBib29sIGZvcmNlLCBzdHJ1 Y3QgZnJhbWVfdmVjdG9yICp2ZWMpCit7CisJc3RydWN0IG1tX3N0cnVjdCAqbW0gPSBjdXJyZW50 LT5tbTsKKwlzdHJ1Y3Qgdm1fYXJlYV9zdHJ1Y3QgKnZtYTsKKwlpbnQgcmV0ID0gMDsKKwlpbnQg ZXJyOworCWludCBsb2NrZWQgPSAxOworCisJaWYgKG5yX2ZyYW1lcyA9PSAwKQorCQlyZXR1cm4g MDsKKworCWlmIChXQVJOX09OX09OQ0UobnJfZnJhbWVzID4gdmVjLT5ucl9hbGxvY2F0ZWQpKQor CQlucl9mcmFtZXMgPSB2ZWMtPm5yX2FsbG9jYXRlZDsKKworCWRvd25fcmVhZCgmbW0tPm1tYXBf c2VtKTsKKwl2bWEgPSBmaW5kX3ZtYV9pbnRlcnNlY3Rpb24obW0sIHN0YXJ0LCBzdGFydCArIDEp OworCWlmICghdm1hKSB7CisJCXJldCA9IC1FRkFVTFQ7CisJCWdvdG8gb3V0OworCX0KKwlpZiAo ISh2bWEtPnZtX2ZsYWdzICYgKFZNX0lPIHwgVk1fUEZOTUFQKSkpIHsKKwkJdmVjLT5nb3RfcmVm ID0gMTsKKwkJdmVjLT5pc19wZm5zID0gMDsKKwkJcmV0ID0gZ2V0X3VzZXJfcGFnZXNfbG9ja2Vk KGN1cnJlbnQsIG1tLCBzdGFydCwgbnJfZnJhbWVzLAorCQkJd3JpdGUsIGZvcmNlLCAoc3RydWN0 IHBhZ2UgKiopKHZlYy0+cHRycyksICZsb2NrZWQpOworCQlnb3RvIG91dDsKKwl9CisKKwl2ZWMt PmdvdF9yZWYgPSAwOworCXZlYy0+aXNfcGZucyA9IDE7CisJZG8geworCQl1bnNpZ25lZCBsb25n ICpudW1zID0gZnJhbWVfdmVjdG9yX3BmbnModmVjKTsKKworCQl3aGlsZSAocmV0IDwgbnJfZnJh bWVzICYmIHN0YXJ0ICsgUEFHRV9TSVpFIDw9IHZtYS0+dm1fZW5kKSB7CisJCQllcnIgPSBmb2xs b3dfcGZuKHZtYSwgc3RhcnQsICZudW1zW3JldF0pOworCQkJaWYgKGVycikgeworCQkJCWlmIChy ZXQgPT0gMCkKKwkJCQkJcmV0ID0gZXJyOworCQkJCWdvdG8gb3V0OworCQkJfQorCQkJc3RhcnQg Kz0gUEFHRV9TSVpFOworCQkJcmV0Kys7CisJCX0KKwkJLyoKKwkJICogV2Ugc3RvcCBpZiB3ZSBo YXZlIGVub3VnaCBwYWdlcyBvciBpZiBWTUEgZG9lc24ndCBjb21wbGV0ZWx5CisJCSAqIGNvdmVy IHRoZSB0YWlsIHBhZ2UuCisJCSAqLworCQlpZiAocmV0ID49IG5yX2ZyYW1lcyB8fCBzdGFydCA8 IHZtYS0+dm1fZW5kKQorCQkJYnJlYWs7CisJCXZtYSA9IGZpbmRfdm1hX2ludGVyc2VjdGlvbiht bSwgc3RhcnQsIHN0YXJ0ICsgMSk7CisJfSB3aGlsZSAodm1hICYmIHZtYS0+dm1fZmxhZ3MgJiAo Vk1fSU8gfCBWTV9QRk5NQVApKTsKK291dDoKKwlpZiAobG9ja2VkKQorCQl1cF9yZWFkKCZtbS0+ bW1hcF9zZW0pOworCWlmICghcmV0KQorCQlyZXQgPSAtRUZBVUxUOworCWlmIChyZXQgPiAwKQor CQl2ZWMtPm5yX2ZyYW1lcyA9IHJldDsKKwlyZXR1cm4gcmV0OworfQorRVhQT1JUX1NZTUJPTChn ZXRfdmFkZHJfZnJhbWVzKTsKKworLyoqCisgKiBwdXRfdmFkZHJfZnJhbWVzKCkgLSBkcm9wIHJl ZmVyZW5jZXMgdG8gcGFnZXMgaWYgZ2V0X3ZhZGRyX2ZyYW1lcygpIGFjcXVpcmVkCisgKgkJCXRo ZW0KKyAqIEB2ZWM6CWZyYW1lIHZlY3RvciB0byBwdXQKKyAqCisgKiBEcm9wIHJlZmVyZW5jZXMg dG8gcGFnZXMgaWYgZ2V0X3ZhZGRyX2ZyYW1lcygpIGFjcXVpcmVkIHRoZW0uIFdlIGFsc28KKyAq IGludmFsaWRhdGUgdGhlIGZyYW1lIHZlY3RvciBzbyB0aGF0IGl0IGlzIHByZXBhcmVkIGZvciB0 aGUgbmV4dCBjYWxsIGludG8KKyAqIGdldF92YWRkcl9mcmFtZXMoKS4KKyAqLwordm9pZCBwdXRf dmFkZHJfZnJhbWVzKHN0cnVjdCBmcmFtZV92ZWN0b3IgKnZlYykKK3sKKwlpbnQgaTsKKwlzdHJ1 Y3QgcGFnZSAqKnBhZ2VzOworCisJaWYgKCF2ZWMtPmdvdF9yZWYpCisJCWdvdG8gb3V0OworCXBh Z2VzID0gZnJhbWVfdmVjdG9yX3BhZ2VzKHZlYyk7CisJLyoKKwkgKiBmcmFtZV92ZWN0b3JfcGFn ZXMoKSBtaWdodCBuZWVkZWQgdG8gZG8gYSBjb252ZXJzaW9uIHdoZW4gd2UKKwkgKiBnZXRfdmFk ZHJfZnJhbWVzKCkgZ290IHBhZ2VzIGJ1dCB2ZWMgd2FzIGxhdGVyIGNvbnZlcnRlZCB0byBwZm5z LgorCSAqIEJ1dCBpdCBzaG91bGRuJ3QgcmVhbGx5IGZhaWwgdG8gY29udmVydCBwZm5zIGJhY2su Li4KKwkgKi8KKwlCVUdfT04oSVNfRVJSKHBhZ2VzKSk7CisJZm9yIChpID0gMDsgaSA8IHZlYy0+ bnJfZnJhbWVzOyBpKyspCisJCXB1dF9wYWdlKHBhZ2VzW2ldKTsKKwl2ZWMtPmdvdF9yZWYgPSAw Oworb3V0OgorCXZlYy0+bnJfZnJhbWVzID0gMDsKK30KK0VYUE9SVF9TWU1CT0wocHV0X3ZhZGRy X2ZyYW1lcyk7CisKKy8qKgorICogZnJhbWVfdmVjdG9yX3RvX3BhZ2VzIC0gY29udmVydCBmcmFt ZSB2ZWN0b3IgdG8gY29udGFpbiBwYWdlIHBvaW50ZXJzCisgKiBAdmVjOglmcmFtZSB2ZWN0b3Ig dG8gY29udmVydAorICoKKyAqIENvbnZlcnQgQHZlYyB0byBjb250YWluIGFycmF5IG9mIHBhZ2Ug cG9pbnRlcnMuICBJZiB0aGUgY29udmVyc2lvbiBpcworICogc3VjY2Vzc2Z1bCwgcmV0dXJuIDAu IE90aGVyd2lzZSByZXR1cm4gYW4gZXJyb3IuCisgKi8KK2ludCBmcmFtZV92ZWN0b3JfdG9fcGFn ZXMoc3RydWN0IGZyYW1lX3ZlY3RvciAqdmVjKQoreworCWludCBpOworCXVuc2lnbmVkIGxvbmcg Km51bXM7CisJc3RydWN0IHBhZ2UgKipwYWdlczsKKworCWlmICghdmVjLT5pc19wZm5zKQorCQly ZXR1cm4gMDsKKwludW1zID0gZnJhbWVfdmVjdG9yX3BmbnModmVjKTsKKwlmb3IgKGkgPSAwOyBp IDwgdmVjLT5ucl9mcmFtZXM7IGkrKykKKwkJaWYgKCFwZm5fdmFsaWQobnVtc1tpXSkpCisJCQly ZXR1cm4gLUVJTlZBTDsKKwlwYWdlcyA9IChzdHJ1Y3QgcGFnZSAqKiludW1zOworCWZvciAoaSA9 IDA7IGkgPCB2ZWMtPm5yX2ZyYW1lczsgaSsrKQorCQlwYWdlc1tpXSA9IHBmbl90b19wYWdlKG51 bXNbaV0pOworCXZlYy0+aXNfcGZucyA9IDA7CisJcmV0dXJuIDA7Cit9CitFWFBPUlRfU1lNQk9M KGZyYW1lX3ZlY3Rvcl90b19wYWdlcyk7CisKKy8qKgorICogZnJhbWVfdmVjdG9yX3RvX3BmbnMg LSBjb252ZXJ0IGZyYW1lIHZlY3RvciB0byBjb250YWluIHBmbnMKKyAqIEB2ZWM6CWZyYW1lIHZl Y3RvciB0byBjb252ZXJ0CisgKgorICogQ29udmVydCBAdmVjIHRvIGNvbnRhaW4gYXJyYXkgb2Yg cGZucy4KKyAqLwordm9pZCBmcmFtZV92ZWN0b3JfdG9fcGZucyhzdHJ1Y3QgZnJhbWVfdmVjdG9y ICp2ZWMpCit7CisJaW50IGk7CisJdW5zaWduZWQgbG9uZyAqbnVtczsKKwlzdHJ1Y3QgcGFnZSAq KnBhZ2VzOworCisJaWYgKHZlYy0+aXNfcGZucykKKwkJcmV0dXJuOworCXBhZ2VzID0gKHN0cnVj dCBwYWdlICoqKSh2ZWMtPnB0cnMpOworCW51bXMgPSAodW5zaWduZWQgbG9uZyAqKXBhZ2VzOwor CWZvciAoaSA9IDA7IGkgPCB2ZWMtPm5yX2ZyYW1lczsgaSsrKQorCQludW1zW2ldID0gcGFnZV90 b19wZm4ocGFnZXNbaV0pOworCXZlYy0+aXNfcGZucyA9IDE7Cit9CitFWFBPUlRfU1lNQk9MKGZy YW1lX3ZlY3Rvcl90b19wZm5zKTsKKworLyoqCisgKiBmcmFtZV92ZWN0b3JfY3JlYXRlKCkgLSBh bGxvY2F0ZSAmIGluaXRpYWxpemUgc3RydWN0dXJlIGZvciBwaW5uZWQgcGZucworICogQG5yX2Zy YW1lczoJbnVtYmVyIG9mIHBmbnMgc2xvdHMgd2Ugc2hvdWxkIHJlc2VydmUKKyAqCisgKiBBbGxv Y2F0ZSBhbmQgaW5pdGlhbGl6ZSBzdHJ1Y3QgcGlubmVkX3BmbnMgdG8gYmUgYWJsZSB0byBob2xk IEBucl9wZm5zCisgKiBwZm5zLgorICovCitzdHJ1Y3QgZnJhbWVfdmVjdG9yICpmcmFtZV92ZWN0 b3JfY3JlYXRlKHVuc2lnbmVkIGludCBucl9mcmFtZXMpCit7CisJc3RydWN0IGZyYW1lX3ZlY3Rv ciAqdmVjOworCWludCBzaXplID0gc2l6ZW9mKHN0cnVjdCBmcmFtZV92ZWN0b3IpICsgc2l6ZW9m KHZvaWQgKikgKiBucl9mcmFtZXM7CisKKwlpZiAoV0FSTl9PTl9PTkNFKG5yX2ZyYW1lcyA9PSAw KSkKKwkJcmV0dXJuIE5VTEw7CisJLyoKKwkgKiBBdm9pZCBoaWdoZXIgb3JkZXIgYWxsb2NhdGlv bnMsIHVzZSB2bWFsbG9jIGluc3RlYWQuIEl0IHNob3VsZAorCSAqIGJlIHJhcmUgYW55d2F5Lgor CSAqLworCWlmIChzaXplIDw9IFBBR0VfU0laRSkKKwkJdmVjID0ga21hbGxvYyhzaXplLCBHRlBf S0VSTkVMKTsKKwllbHNlCisJCXZlYyA9IHZtYWxsb2Moc2l6ZSk7CisJaWYgKCF2ZWMpCisJCXJl dHVybiBOVUxMOworCXZlYy0+bnJfYWxsb2NhdGVkID0gbnJfZnJhbWVzOworCXZlYy0+bnJfZnJh bWVzID0gMDsKKwlyZXR1cm4gdmVjOworfQorRVhQT1JUX1NZTUJPTChmcmFtZV92ZWN0b3JfY3Jl YXRlKTsKKworLyoqCisgKiBmcmFtZV92ZWN0b3JfZGVzdHJveSgpIC0gZnJlZSBtZW1vcnkgYWxs b2NhdGVkIHRvIGNhcnJ5IGZyYW1lIHZlY3RvcgorICogQHZlYzoJRnJhbWUgdmVjdG9yIHRvIGZy ZWUKKyAqCisgKiBGcmVlIHN0cnVjdHVyZSBhbGxvY2F0ZWQgYnkgZnJhbWVfdmVjdG9yX2NyZWF0 ZSgpIHRvIGNhcnJ5IGZyYW1lcy4KKyAqLwordm9pZCBmcmFtZV92ZWN0b3JfZGVzdHJveShzdHJ1 Y3QgZnJhbWVfdmVjdG9yICp2ZWMpCit7CisJaWYgKCFpc192bWFsbG9jX2FkZHIodmVjKSkKKwkJ a2ZyZWUodmVjKTsKKwllbHNlCisJCXZmcmVlKHZlYyk7Cit9CitFWFBPUlRfU1lNQk9MKGZyYW1l X3ZlY3Rvcl9kZXN0cm95KTsKKwogLyoqCiAgKiBnZXRfZHVtcF9wYWdlKCkgLSBwaW4gdXNlciBw YWdlIGluIG1lbW9yeSB3aGlsZSB3cml0aW5nIGl0IHRvIGNvcmUgZHVtcAogICogQGFkZHI6IHVz ZXIgYWRkcmVzcwotLSAKMi4xLjQKCl9fX19fX19fX19fX19fX19fX19fX19fX19fX19fX19fX19f X19fX19fX19fX19fCmRyaS1kZXZlbCBtYWlsaW5nIGxpc3QKZHJpLWRldmVsQGxpc3RzLmZyZWVk ZXNrdG9wLm9yZwpodHRwOi8vbGlzdHMuZnJlZWRlc2t0b3Aub3JnL21haWxtYW4vbGlzdGluZm8v ZHJpLWRldmVsCg== From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from mail-wg0-f50.google.com (mail-wg0-f50.google.com [74.125.82.50]) by kanga.kvack.org (Postfix) with ESMTP id BA3296B0073 for ; Wed, 6 May 2015 03:28:26 -0400 (EDT) Received: by wgiu9 with SMTP id u9so1703443wgi.3 for ; Wed, 06 May 2015 00:28:26 -0700 (PDT) Received: from mx2.suse.de (cantor2.suse.de. [195.135.220.15]) by mx.google.com with ESMTPS id fa5si796866wid.36.2015.05.06.00.28.24 for (version=TLSv1 cipher=ECDHE-RSA-RC4-SHA bits=128/128); Wed, 06 May 2015 00:28:25 -0700 (PDT) From: Jan Kara Subject: [PATCH 2/9] mm: Provide new get_vaddr_frames() helper Date: Wed, 6 May 2015 09:28:09 +0200 Message-Id: <1430897296-5469-3-git-send-email-jack@suse.cz> In-Reply-To: <1430897296-5469-1-git-send-email-jack@suse.cz> References: <1430897296-5469-1-git-send-email-jack@suse.cz> Sender: owner-linux-mm@kvack.org List-ID: To: linux-mm@kvack.org Cc: linux-media@vger.kernel.org, Hans Verkuil , dri-devel@lists.freedesktop.org, Pawel Osciak , Mauro Carvalho Chehab , mgorman@suse.de, Marek Szyprowski , linux-samsung-soc@vger.kernel.org, Jan Kara Provide new function get_vaddr_frames(). This function maps virtual addresses from given start and fills given array with page frame numbers of the corresponding pages. If given start belongs to a normal vma, the function grabs reference to each of the pages to pin them in memory. If start belongs to VM_IO | VM_PFNMAP vma, we don't touch page structures. Caller must make sure pfns aren't reused for anything else while he is using them. This function is created for various drivers to simplify handling of their buffers. Signed-off-by: Jan Kara --- include/linux/mm.h | 44 +++++++++++ mm/gup.c | 214 +++++++++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 258 insertions(+) diff --git a/include/linux/mm.h b/include/linux/mm.h index 0755b9fd03a7..dcd1f02a78e9 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -20,6 +20,7 @@ #include #include #include +#include struct mempolicy; struct anon_vma; @@ -1197,6 +1198,49 @@ long get_user_pages_unlocked(struct task_struct *tsk, struct mm_struct *mm, int write, int force, struct page **pages); int get_user_pages_fast(unsigned long start, int nr_pages, int write, struct page **pages); + +/* Container for pinned pfns / pages */ +struct frame_vector { + unsigned int nr_allocated; /* Number of frames we have space for */ + unsigned int nr_frames; /* Number of frames stored in ptrs array */ + bool got_ref; /* Did we pin pages by getting page ref? */ + bool is_pfns; /* Does array contain pages or pfns? */ + void *ptrs[0]; /* Array of pinned pfns / pages. Use + * pfns_vector_pages() or pfns_vector_pfns() + * for access */ +}; + +struct frame_vector *frame_vector_create(unsigned int nr_frames); +void frame_vector_destroy(struct frame_vector *vec); +int get_vaddr_frames(unsigned long start, unsigned int nr_pfns, + bool write, bool force, struct frame_vector *vec); +void put_vaddr_frames(struct frame_vector *vec); +int frame_vector_to_pages(struct frame_vector *vec); +void frame_vector_to_pfns(struct frame_vector *vec); + +static inline unsigned int frame_vector_count(struct frame_vector *vec) +{ + return vec->nr_frames; +} + +static inline struct page **frame_vector_pages(struct frame_vector *vec) +{ + if (vec->is_pfns) { + int err = frame_vector_to_pages(vec); + + if (err) + return ERR_PTR(err); + } + return (struct page **)(vec->ptrs); +} + +static inline unsigned long *frame_vector_pfns(struct frame_vector *vec) +{ + if (!vec->is_pfns) + frame_vector_to_pfns(vec); + return (unsigned long *)(vec->ptrs); +} + struct kvec; int get_kernel_pages(const struct kvec *iov, int nr_pages, int write, struct page **pages); diff --git a/mm/gup.c b/mm/gup.c index 6297f6bccfb1..8db5c40e65c4 100644 --- a/mm/gup.c +++ b/mm/gup.c @@ -8,6 +8,7 @@ #include #include #include +#include #include #include @@ -936,6 +937,219 @@ int __mm_populate(unsigned long start, unsigned long len, int ignore_errors) return ret; /* 0 or negative error code */ } +/* + * get_vaddr_frames() - map virtual addresses to pfns + * @start: starting user address + * @nr_frames: number of pages / pfns from start to map + * @write: whether pages will be written to by the caller + * @force: whether to force write access even if user mapping is + * readonly. This will result in the page being COWed even + * in MAP_SHARED mappings. You do not want this. + * @vec: structure which receives pages / pfns of the addresses mapped. + * It should have space for at least nr_frames entries. + * + * This function maps virtual addresses from @start and fills @vec structure + * with page frame numbers or page pointers to corresponding pages (choice + * depends on the type of the vma underlying the virtual address). If @start + * belongs to a normal vma, the function grabs reference to each of the pages + * to pin them in memory. If @start belongs to VM_IO | VM_PFNMAP vma, we don't + * touch page structures and the caller must make sure pfns aren't reused for + * anything else while he is using them. + * + * The function returns number of pages mapped which may be less than + * @nr_frames. In particular we stop mapping if there are more vmas of + * different type underlying the specified range of virtual addresses. + * + * This function takes care of grabbing mmap_sem as necessary. + */ +int get_vaddr_frames(unsigned long start, unsigned int nr_frames, + bool write, bool force, struct frame_vector *vec) +{ + struct mm_struct *mm = current->mm; + struct vm_area_struct *vma; + int ret = 0; + int err; + int locked = 1; + + if (nr_frames == 0) + return 0; + + if (WARN_ON_ONCE(nr_frames > vec->nr_allocated)) + nr_frames = vec->nr_allocated; + + down_read(&mm->mmap_sem); + vma = find_vma_intersection(mm, start, start + 1); + if (!vma) { + ret = -EFAULT; + goto out; + } + if (!(vma->vm_flags & (VM_IO | VM_PFNMAP))) { + vec->got_ref = 1; + vec->is_pfns = 0; + ret = get_user_pages_locked(current, mm, start, nr_frames, + write, force, (struct page **)(vec->ptrs), &locked); + goto out; + } + + vec->got_ref = 0; + vec->is_pfns = 1; + do { + unsigned long *nums = frame_vector_pfns(vec); + + while (ret < nr_frames && start + PAGE_SIZE <= vma->vm_end) { + err = follow_pfn(vma, start, &nums[ret]); + if (err) { + if (ret == 0) + ret = err; + goto out; + } + start += PAGE_SIZE; + ret++; + } + /* + * We stop if we have enough pages or if VMA doesn't completely + * cover the tail page. + */ + if (ret >= nr_frames || start < vma->vm_end) + break; + vma = find_vma_intersection(mm, start, start + 1); + } while (vma && vma->vm_flags & (VM_IO | VM_PFNMAP)); +out: + if (locked) + up_read(&mm->mmap_sem); + if (!ret) + ret = -EFAULT; + if (ret > 0) + vec->nr_frames = ret; + return ret; +} +EXPORT_SYMBOL(get_vaddr_frames); + +/** + * put_vaddr_frames() - drop references to pages if get_vaddr_frames() acquired + * them + * @vec: frame vector to put + * + * Drop references to pages if get_vaddr_frames() acquired them. We also + * invalidate the frame vector so that it is prepared for the next call into + * get_vaddr_frames(). + */ +void put_vaddr_frames(struct frame_vector *vec) +{ + int i; + struct page **pages; + + if (!vec->got_ref) + goto out; + pages = frame_vector_pages(vec); + /* + * frame_vector_pages() might needed to do a conversion when we + * get_vaddr_frames() got pages but vec was later converted to pfns. + * But it shouldn't really fail to convert pfns back... + */ + BUG_ON(IS_ERR(pages)); + for (i = 0; i < vec->nr_frames; i++) + put_page(pages[i]); + vec->got_ref = 0; +out: + vec->nr_frames = 0; +} +EXPORT_SYMBOL(put_vaddr_frames); + +/** + * frame_vector_to_pages - convert frame vector to contain page pointers + * @vec: frame vector to convert + * + * Convert @vec to contain array of page pointers. If the conversion is + * successful, return 0. Otherwise return an error. + */ +int frame_vector_to_pages(struct frame_vector *vec) +{ + int i; + unsigned long *nums; + struct page **pages; + + if (!vec->is_pfns) + return 0; + nums = frame_vector_pfns(vec); + for (i = 0; i < vec->nr_frames; i++) + if (!pfn_valid(nums[i])) + return -EINVAL; + pages = (struct page **)nums; + for (i = 0; i < vec->nr_frames; i++) + pages[i] = pfn_to_page(nums[i]); + vec->is_pfns = 0; + return 0; +} +EXPORT_SYMBOL(frame_vector_to_pages); + +/** + * frame_vector_to_pfns - convert frame vector to contain pfns + * @vec: frame vector to convert + * + * Convert @vec to contain array of pfns. + */ +void frame_vector_to_pfns(struct frame_vector *vec) +{ + int i; + unsigned long *nums; + struct page **pages; + + if (vec->is_pfns) + return; + pages = (struct page **)(vec->ptrs); + nums = (unsigned long *)pages; + for (i = 0; i < vec->nr_frames; i++) + nums[i] = page_to_pfn(pages[i]); + vec->is_pfns = 1; +} +EXPORT_SYMBOL(frame_vector_to_pfns); + +/** + * frame_vector_create() - allocate & initialize structure for pinned pfns + * @nr_frames: number of pfns slots we should reserve + * + * Allocate and initialize struct pinned_pfns to be able to hold @nr_pfns + * pfns. + */ +struct frame_vector *frame_vector_create(unsigned int nr_frames) +{ + struct frame_vector *vec; + int size = sizeof(struct frame_vector) + sizeof(void *) * nr_frames; + + if (WARN_ON_ONCE(nr_frames == 0)) + return NULL; + /* + * Avoid higher order allocations, use vmalloc instead. It should + * be rare anyway. + */ + if (size <= PAGE_SIZE) + vec = kmalloc(size, GFP_KERNEL); + else + vec = vmalloc(size); + if (!vec) + return NULL; + vec->nr_allocated = nr_frames; + vec->nr_frames = 0; + return vec; +} +EXPORT_SYMBOL(frame_vector_create); + +/** + * frame_vector_destroy() - free memory allocated to carry frame vector + * @vec: Frame vector to free + * + * Free structure allocated by frame_vector_create() to carry frames. + */ +void frame_vector_destroy(struct frame_vector *vec) +{ + if (!is_vmalloc_addr(vec)) + kfree(vec); + else + vfree(vec); +} +EXPORT_SYMBOL(frame_vector_destroy); + /** * get_dump_page() - pin user page in memory while writing it to core dump * @addr: user address -- 2.1.4 -- To unsubscribe, send a message with 'unsubscribe linux-mm' in the body to majordomo@kvack.org. For more info on Linux MM, see: http://www.linux-mm.org/ . Don't email: email@kvack.org