All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH/RFC] ion: add movability support for page pools
@ 2017-09-05 12:55 Vitaly Wool
  2017-09-06  0:19 ` Laura Abbott
  0 siblings, 1 reply; 3+ messages in thread
From: Vitaly Wool @ 2017-09-05 12:55 UTC (permalink / raw)
  To: Laura Abbott, Sumit Semwal; +Cc: linux-kernel, Oleksiy.Avramchenko

ion page pool may become quite large and scattered all around
the kernel memory area. These pages are actually not used so
moving them around to reduce fragmentation is quite cheap because
there's no need to copy their contents.

This patch implements callbacks that support page moving.

Singed-off-by: Vitaly Wool <vitalywool@gmail.com>
---
 drivers/staging/android/ion/ion.h           |   2 +
 drivers/staging/android/ion/ion_page_pool.c | 165 +++++++++++++++++++++++++++-
 2 files changed, 163 insertions(+), 4 deletions(-)

diff --git a/drivers/staging/android/ion/ion.h b/drivers/staging/android/ion/ion.h
index fa9ed81..a948cb2 100644
--- a/drivers/staging/android/ion/ion.h
+++ b/drivers/staging/android/ion/ion.h
@@ -320,6 +320,7 @@ size_t ion_heap_freelist_size(struct ion_heap *heap);
  * @order:		order of pages in the pool
  * @list:		plist node for list of pools
  * @cached:		it's cached pool or not
+ * @inode:		inode for ion_pool pseudo filesystem
  *
  * Allows you to keep a pool of pre allocated pages to use from your heap.
  * Keeping a pool of pages that is ready for dma, ie any cached mapping have
@@ -336,6 +337,7 @@ struct ion_page_pool {
 	gfp_t gfp_mask;
 	unsigned int order;
 	struct plist_node list;
+	struct inode *inode;
 };
 
 struct ion_page_pool *ion_page_pool_create(gfp_t gfp_mask, unsigned int order,
diff --git a/drivers/staging/android/ion/ion_page_pool.c b/drivers/staging/android/ion/ion_page_pool.c
index 817849d..3c6ba9c 100644
--- a/drivers/staging/android/ion/ion_page_pool.c
+++ b/drivers/staging/android/ion/ion_page_pool.c
@@ -19,12 +19,17 @@
 #include <linux/err.h>
 #include <linux/fs.h>
 #include <linux/list.h>
+#include <linux/migrate.h>
+#include <linux/mount.h>
+#include <linux/page-flags.h>
 #include <linux/init.h>
 #include <linux/slab.h>
 #include <linux/swap.h>
 
 #include "ion.h"
 
+#define ION_PAGE_CACHE	1
+
 static void *ion_page_pool_alloc_pages(struct ion_page_pool *pool)
 {
 	struct page *page = alloc_pages(pool->gfp_mask, pool->order);
@@ -37,12 +42,19 @@ static void *ion_page_pool_alloc_pages(struct ion_page_pool *pool)
 static void ion_page_pool_free_pages(struct ion_page_pool *pool,
 				     struct page *page)
 {
+	if (pool->inode && pool->order == 0) {
+		lock_page(page);
+		__ClearPageMovable(page);
+		unlock_page(page);
+	}
+
 	__free_pages(page, pool->order);
 }
 
 static int ion_page_pool_add(struct ion_page_pool *pool, struct page *page)
 {
 	mutex_lock(&pool->mutex);
+	page->private = ION_PAGE_CACHE;
 	if (PageHighMem(page)) {
 		list_add_tail(&page->lru, &pool->high_items);
 		pool->high_count++;
@@ -50,6 +62,9 @@ static int ion_page_pool_add(struct ion_page_pool *pool, struct page *page)
 		list_add_tail(&page->lru, &pool->low_items);
 		pool->low_count++;
 	}
+
+	if (pool->inode && pool->order == 0)
+		__SetPageMovable(page, pool->inode->i_mapping);
 	mutex_unlock(&pool->mutex);
 	return 0;
 }
@@ -68,7 +83,9 @@ static struct page *ion_page_pool_remove(struct ion_page_pool *pool, bool high)
 		pool->low_count--;
 	}
 
-	list_del(&page->lru);
+	clear_bit(ION_PAGE_CACHE, &page->private);
+
+	list_del_init(&page->lru);
 	return page;
 }
 
@@ -85,8 +102,13 @@ struct page *ion_page_pool_alloc(struct ion_page_pool *pool)
 		page = ion_page_pool_remove(pool, false);
 	mutex_unlock(&pool->mutex);
 
-	if (!page)
+	if (!page) {
 		page = ion_page_pool_alloc_pages(pool);
+	} else {
+		lock_page(page);
+		__ClearPageMovable(page);
+		unlock_page(page);
+	}
 
 	return page;
 }
@@ -146,6 +168,136 @@ int ion_page_pool_shrink(struct ion_page_pool *pool, gfp_t gfp_mask,
 	return freed;
 }
 
+static bool ion_page_pool_isolate(struct page *page, isolate_mode_t mode)
+{
+	struct ion_page_pool *pool;
+	struct address_space *mapping = page_mapping(page);
+
+	VM_BUG_ON_PAGE(PageIsolated(page), page);
+
+	if (!mapping)
+		return false;
+	pool = mapping->private_data;
+
+	mutex_lock(&pool->mutex);
+	if (unlikely(!test_bit(ION_PAGE_CACHE, &page->private))) {
+		mutex_unlock(&pool->mutex);
+		return false;
+	}
+
+	list_del(&page->lru);
+	if (PageHighMem(page))
+		pool->high_count--;
+	else
+		pool->low_count--;
+	mutex_unlock(&pool->mutex);
+
+	return true;
+}
+
+static int ion_page_pool_migrate(struct address_space *mapping,
+				 struct page *newpage,
+				 struct page *page, enum migrate_mode mode)
+{
+	struct ion_page_pool *pool = mapping->private_data;
+
+	VM_BUG_ON_PAGE(!PageMovable(page), page);
+	VM_BUG_ON_PAGE(!PageIsolated(page), page);
+
+	lock_page(page);
+	newpage->private = ION_PAGE_CACHE;
+	__SetPageMovable(newpage, page_mapping(page));
+	get_page(newpage);
+	__ClearPageMovable(page);
+	ClearPagePrivate(page);
+	unlock_page(page);
+	mutex_lock(&pool->mutex);
+	if (PageHighMem(newpage)) {
+		list_add_tail(&newpage->lru, &pool->high_items);
+		pool->high_count++;
+	} else {
+		list_add_tail(&newpage->lru, &pool->low_items);
+		pool->low_count++;
+	}
+	mutex_unlock(&pool->mutex);
+	put_page(page);
+	return 0;
+}
+
+static void ion_page_pool_putback(struct page *page)
+{
+	/*
+	 * migrate function either succeeds or returns -EAGAIN, which
+	 * results in calling it again until it succeeds, sothis callback
+	 * is not needed.
+	 */
+}
+
+static struct dentry *ion_pool_do_mount(struct file_system_type *fs_type,
+				int flags, const char *dev_name, void *data)
+{
+	static const struct dentry_operations ops = {
+		.d_dname = simple_dname,
+	};
+
+	return mount_pseudo(fs_type, "ion_pool:", NULL, &ops, 0x77);
+}
+
+static struct file_system_type ion_pool_fs = {
+	.name		= "ion_pool",
+	.mount		= ion_pool_do_mount,
+	.kill_sb	= kill_anon_super,
+};
+
+static int ion_pool_cnt;
+static struct vfsmount *ion_pool_mnt;
+static int ion_pool_mount(void)
+{
+	int ret = 0;
+
+	ion_pool_mnt = kern_mount(&ion_pool_fs);
+	if (IS_ERR(ion_pool_mnt))
+		ret = PTR_ERR(ion_pool_mnt);
+
+	return ret;
+}
+
+static const struct address_space_operations ion_pool_aops = {
+	.isolate_page = ion_page_pool_isolate,
+	.migratepage = ion_page_pool_migrate,
+	.putback_page = ion_page_pool_putback,
+};
+
+static int ion_pool_register_migration(struct ion_page_pool *pool)
+{
+	int  ret = simple_pin_fs(&ion_pool_fs, &ion_pool_mnt, &ion_pool_cnt);
+
+	if (ret < 0) {
+		pr_err("Cannot mount pseudo fs: %d\n", ret);
+		return ret;
+	}
+	pool->inode = alloc_anon_inode(ion_pool_mnt->mnt_sb);
+	if (IS_ERR(pool->inode)) {
+		ret = PTR_ERR(pool->inode);
+		pool->inode = NULL;
+		simple_release_fs(&ion_pool_mnt, &ion_pool_cnt);
+		return ret;
+	}
+
+	pool->inode->i_mapping->private_data = pool;
+	pool->inode->i_mapping->a_ops = &ion_pool_aops;
+	return 0;
+}
+
+static void ion_pool_unregister_migration(struct ion_page_pool *pool)
+{
+	if (pool->inode) {
+		iput(pool->inode);
+		pool->inode = NULL;
+		simple_release_fs(&ion_pool_mnt, &ion_pool_cnt);
+	}
+}
+
 struct ion_page_pool *ion_page_pool_create(gfp_t gfp_mask, unsigned int order,
 					   bool cached)
 {
@@ -161,19 +313,24 @@ struct ion_page_pool *ion_page_pool_create(gfp_t gfp_mask, unsigned int order,
 	pool->order = order;
 	mutex_init(&pool->mutex);
 	plist_node_init(&pool->list, order);
-	if (cached)
+	if (cached) {
 		pool->cached = true;
+		ion_pool_register_migration(pool);
+	}
+
+	pool->inode = NULL;
 
 	return pool;
 }
 
 void ion_page_pool_destroy(struct ion_page_pool *pool)
 {
+	ion_pool_unregister_migration(pool);
 	kfree(pool);
 }
 
 static int __init ion_page_pool_init(void)
 {
-	return 0;
+	return ion_pool_mount();
 }
 device_initcall(ion_page_pool_init);
-- 
2.7.4

^ permalink raw reply related	[flat|nested] 3+ messages in thread

* Re: [PATCH/RFC] ion: add movability support for page pools
  2017-09-05 12:55 [PATCH/RFC] ion: add movability support for page pools Vitaly Wool
@ 2017-09-06  0:19 ` Laura Abbott
  2017-09-06 12:13   ` Vitaly Wool
  0 siblings, 1 reply; 3+ messages in thread
From: Laura Abbott @ 2017-09-06  0:19 UTC (permalink / raw)
  To: Vitaly Wool, Sumit Semwal; +Cc: linux-kernel, Oleksiy.Avramchenko

On 09/05/2017 05:55 AM, Vitaly Wool wrote:
> ion page pool may become quite large and scattered all around
> the kernel memory area. These pages are actually not used so
> moving them around to reduce fragmentation is quite cheap because
> there's no need to copy their contents.
> 

Can you give a few more details about what this helps?
Some benchmark numbers?

It looks like something similar was implemented for zsmalloc.
Are we going to start seeing a proliferation of pseudo filesystems
for each subsystem that wants to migrate pages?

> This patch implements callbacks that support page moving.
> 
> Singed-off-by: Vitaly Wool <vitalywool@gmail.com>
> ---
>  drivers/staging/android/ion/ion.h           |   2 +
>  drivers/staging/android/ion/ion_page_pool.c | 165 +++++++++++++++++++++++++++-
>  2 files changed, 163 insertions(+), 4 deletions(-)
> 
> diff --git a/drivers/staging/android/ion/ion.h b/drivers/staging/android/ion/ion.h
> index fa9ed81..a948cb2 100644
> --- a/drivers/staging/android/ion/ion.h
> +++ b/drivers/staging/android/ion/ion.h
> @@ -320,6 +320,7 @@ size_t ion_heap_freelist_size(struct ion_heap *heap);
>   * @order:		order of pages in the pool
>   * @list:		plist node for list of pools
>   * @cached:		it's cached pool or not
> + * @inode:		inode for ion_pool pseudo filesystem
>   *
>   * Allows you to keep a pool of pre allocated pages to use from your heap.
>   * Keeping a pool of pages that is ready for dma, ie any cached mapping have
> @@ -336,6 +337,7 @@ struct ion_page_pool {
>  	gfp_t gfp_mask;
>  	unsigned int order;
>  	struct plist_node list;
> +	struct inode *inode;
>  };
>  
>  struct ion_page_pool *ion_page_pool_create(gfp_t gfp_mask, unsigned int order,
> diff --git a/drivers/staging/android/ion/ion_page_pool.c b/drivers/staging/android/ion/ion_page_pool.c
> index 817849d..3c6ba9c 100644
> --- a/drivers/staging/android/ion/ion_page_pool.c
> +++ b/drivers/staging/android/ion/ion_page_pool.c
> @@ -19,12 +19,17 @@
>  #include <linux/err.h>
>  #include <linux/fs.h>
>  #include <linux/list.h>
> +#include <linux/migrate.h>
> +#include <linux/mount.h>
> +#include <linux/page-flags.h>
>  #include <linux/init.h>
>  #include <linux/slab.h>
>  #include <linux/swap.h>
>  
>  #include "ion.h"
>  
> +#define ION_PAGE_CACHE	1
> +
>  static void *ion_page_pool_alloc_pages(struct ion_page_pool *pool)
>  {
>  	struct page *page = alloc_pages(pool->gfp_mask, pool->order);
> @@ -37,12 +42,19 @@ static void *ion_page_pool_alloc_pages(struct ion_page_pool *pool)
>  static void ion_page_pool_free_pages(struct ion_page_pool *pool,
>  				     struct page *page)
>  {
> +	if (pool->inode && pool->order == 0) {
> +		lock_page(page);
> +		__ClearPageMovable(page);
> +		unlock_page(page);
> +	}
> +
>  	__free_pages(page, pool->order);
>  }
>  
>  static int ion_page_pool_add(struct ion_page_pool *pool, struct page *page)
>  {
>  	mutex_lock(&pool->mutex);
> +	page->private = ION_PAGE_CACHE;
>  	if (PageHighMem(page)) {
>  		list_add_tail(&page->lru, &pool->high_items);
>  		pool->high_count++;
> @@ -50,6 +62,9 @@ static int ion_page_pool_add(struct ion_page_pool *pool, struct page *page)
>  		list_add_tail(&page->lru, &pool->low_items);
>  		pool->low_count++;
>  	}
> +
> +	if (pool->inode && pool->order == 0)
> +		__SetPageMovable(page, pool->inode->i_mapping);
>  	mutex_unlock(&pool->mutex);
>  	return 0;
>  }
> @@ -68,7 +83,9 @@ static struct page *ion_page_pool_remove(struct ion_page_pool *pool, bool high)
>  		pool->low_count--;
>  	}
>  
> -	list_del(&page->lru);
> +	clear_bit(ION_PAGE_CACHE, &page->private);
> +
> +	list_del_init(&page->lru);
>  	return page;
>  }
>  
> @@ -85,8 +102,13 @@ struct page *ion_page_pool_alloc(struct ion_page_pool *pool)
>  		page = ion_page_pool_remove(pool, false);
>  	mutex_unlock(&pool->mutex);
>  
> -	if (!page)
> +	if (!page) {
>  		page = ion_page_pool_alloc_pages(pool);
> +	} else {
> +		lock_page(page);
> +		__ClearPageMovable(page);
> +		unlock_page(page);
> +	}
>  
>  	return page;
>  }
> @@ -146,6 +168,136 @@ int ion_page_pool_shrink(struct ion_page_pool *pool, gfp_t gfp_mask,
>  	return freed;
>  }
>  
> +static bool ion_page_pool_isolate(struct page *page, isolate_mode_t mode)
> +{
> +	struct ion_page_pool *pool;
> +	struct address_space *mapping = page_mapping(page);
> +
> +	VM_BUG_ON_PAGE(PageIsolated(page), page);
> +
> +	if (!mapping)
> +		return false;
> +	pool = mapping->private_data;
> +
> +	mutex_lock(&pool->mutex);
> +	if (unlikely(!test_bit(ION_PAGE_CACHE, &page->private))) {
> +		mutex_unlock(&pool->mutex);
> +		return false;
> +	}
> +
> +	list_del(&page->lru);
> +	if (PageHighMem(page))
> +		pool->high_count--;
> +	else
> +		pool->low_count--;
> +	mutex_unlock(&pool->mutex);
> +
> +	return true;
> +}
> +
> +static int ion_page_pool_migrate(struct address_space *mapping,
> +				 struct page *newpage,
> +				 struct page *page, enum migrate_mode mode)
> +{
> +	struct ion_page_pool *pool = mapping->private_data;
> +
> +	VM_BUG_ON_PAGE(!PageMovable(page), page);
> +	VM_BUG_ON_PAGE(!PageIsolated(page), page);
> +
> +	lock_page(page);
> +	newpage->private = ION_PAGE_CACHE;
> +	__SetPageMovable(newpage, page_mapping(page));
> +	get_page(newpage);
> +	__ClearPageMovable(page);
> +	ClearPagePrivate(page);
> +	unlock_page(page);
> +	mutex_lock(&pool->mutex);
> +	if (PageHighMem(newpage)) {
> +		list_add_tail(&newpage->lru, &pool->high_items);
> +		pool->high_count++;
> +	} else {
> +		list_add_tail(&newpage->lru, &pool->low_items);
> +		pool->low_count++;
> +	}
> +	mutex_unlock(&pool->mutex);
> +	put_page(page);
> +	return 0;
> +}
> +
> +static void ion_page_pool_putback(struct page *page)
> +{
> +	/*
> +	 * migrate function either succeeds or returns -EAGAIN, which
> +	 * results in calling it again until it succeeds, sothis callback
> +	 * is not needed.
> +	 */
> +}
> +
> +static struct dentry *ion_pool_do_mount(struct file_system_type *fs_type,
> +				int flags, const char *dev_name, void *data)
> +{
> +	static const struct dentry_operations ops = {
> +		.d_dname = simple_dname,
> +	};
> +
> +	return mount_pseudo(fs_type, "ion_pool:", NULL, &ops, 0x77);
> +}
> +
> +static struct file_system_type ion_pool_fs = {
> +	.name		= "ion_pool",
> +	.mount		= ion_pool_do_mount,
> +	.kill_sb	= kill_anon_super,
> +};
> +
> +static int ion_pool_cnt;
> +static struct vfsmount *ion_pool_mnt;
> +static int ion_pool_mount(void)
> +{
> +	int ret = 0;
> +
> +	ion_pool_mnt = kern_mount(&ion_pool_fs);
> +	if (IS_ERR(ion_pool_mnt))
> +		ret = PTR_ERR(ion_pool_mnt);
> +
> +	return ret;
> +}
> +
> +static const struct address_space_operations ion_pool_aops = {
> +	.isolate_page = ion_page_pool_isolate,
> +	.migratepage = ion_page_pool_migrate,
> +	.putback_page = ion_page_pool_putback,
> +};
> +
> +static int ion_pool_register_migration(struct ion_page_pool *pool)
> +{
> +	int  ret = simple_pin_fs(&ion_pool_fs, &ion_pool_mnt, &ion_pool_cnt);
> +
> +	if (ret < 0) {
> +		pr_err("Cannot mount pseudo fs: %d\n", ret);
> +		return ret;
> +	}
> +	pool->inode = alloc_anon_inode(ion_pool_mnt->mnt_sb);
> +	if (IS_ERR(pool->inode)) {
> +		ret = PTR_ERR(pool->inode);
> +		pool->inode = NULL;
> +		simple_release_fs(&ion_pool_mnt, &ion_pool_cnt);
> +		return ret;
> +	}
> +
> +	pool->inode->i_mapping->private_data = pool;
> +	pool->inode->i_mapping->a_ops = &ion_pool_aops;
> +	return 0;
> +}
> +
> +static void ion_pool_unregister_migration(struct ion_page_pool *pool)
> +{
> +	if (pool->inode) {
> +		iput(pool->inode);
> +		pool->inode = NULL;
> +		simple_release_fs(&ion_pool_mnt, &ion_pool_cnt);
> +	}
> +}
> +
>  struct ion_page_pool *ion_page_pool_create(gfp_t gfp_mask, unsigned int order,
>  					   bool cached)
>  {
> @@ -161,19 +313,24 @@ struct ion_page_pool *ion_page_pool_create(gfp_t gfp_mask, unsigned int order,
>  	pool->order = order;
>  	mutex_init(&pool->mutex);
>  	plist_node_init(&pool->list, order);
> -	if (cached)
> +	if (cached) {
>  		pool->cached = true;
> +		ion_pool_register_migration(pool);
> +	}
> +
> +	pool->inode = NULL;
>  
>  	return pool;
>  }
>  
>  void ion_page_pool_destroy(struct ion_page_pool *pool)
>  {
> +	ion_pool_unregister_migration(pool);
>  	kfree(pool);
>  }
>  
>  static int __init ion_page_pool_init(void)
>  {
> -	return 0;
> +	return ion_pool_mount();
>  }
>  device_initcall(ion_page_pool_init);
> 

^ permalink raw reply	[flat|nested] 3+ messages in thread

* Re: [PATCH/RFC] ion: add movability support for page pools
  2017-09-06  0:19 ` Laura Abbott
@ 2017-09-06 12:13   ` Vitaly Wool
  0 siblings, 0 replies; 3+ messages in thread
From: Vitaly Wool @ 2017-09-06 12:13 UTC (permalink / raw)
  To: Laura Abbott; +Cc: Sumit Semwal, LKML, Oleksiy.Avramchenko

2017-09-06 2:19 GMT+02:00 Laura Abbott <labbott@redhat.com>:
> On 09/05/2017 05:55 AM, Vitaly Wool wrote:
>> ion page pool may become quite large and scattered all around
>> the kernel memory area. These pages are actually not used so
>> moving them around to reduce fragmentation is quite cheap because
>> there's no need to copy their contents.
>>
>
> Can you give a few more details about what this helps?
> Some benchmark numbers?

We see that memory fragmentation increases sharply over time when the
camera is used a lot. Neither background compaction nor user-triggered
one help it that much:

# cat /proc/buddyinfo
Node 0, zone DMA 4366 2404 6 1 0 1 1 1 0 0 0
Node 0, zone Normal 2736 2094 91 21 8 0 1 1 0 0 0
# echo 1 > /proc/sys/vm/compact_memory
# cat /proc/buddyinfo
Node 0, zone DMA 2591 1955 391 113 13 2 1 1 0 0 0
Node 0, zone Normal 2159 1155 487 158 9 0 1 1 0 0 0


With the patch, triggering compaction gives far better results:

# cat /proc/buddyinfo
Node 0, zone DMA 3101 4845 605 0 0 0 0 0 0 0 0
Node 0, zone Normal 5271 4459 573 20 3 2 3 3 1 0 0
# echo 1 > /proc/sys/vm/compact_memory
# cat /proc/buddyinfo
Node 0, zone DMA 579 303 151 114 91 67 30 24 7 2 1
Node 0, zone Normal 762 508 400 271 195 100 45 10 5 0 0

> It looks like something similar was implemented for zsmalloc.
> Are we going to start seeing a proliferation of pseudo filesystems
> for each subsystem that wants to migrate pages?

You're right. Anyway IMV cached free buffers are to be movable because
there's essentially no reason why they shouldn't, and scattered empty
buffers standing in the way of compaction sounds like a nonsense to
me.
Then, currently pseudo filesystem tricks are required to support
moving of ion cache buffers and we just follow that.

~vitaly

>> This patch implements callbacks that support page moving.
>>
>> Singed-off-by: Vitaly Wool <vitalywool@gmail.com>
>> ---
>>  drivers/staging/android/ion/ion.h           |   2 +
>>  drivers/staging/android/ion/ion_page_pool.c | 165 +++++++++++++++++++++++++++-
>>  2 files changed, 163 insertions(+), 4 deletions(-)
>>
>> diff --git a/drivers/staging/android/ion/ion.h b/drivers/staging/android/ion/ion.h
>> index fa9ed81..a948cb2 100644
>> --- a/drivers/staging/android/ion/ion.h
>> +++ b/drivers/staging/android/ion/ion.h
>> @@ -320,6 +320,7 @@ size_t ion_heap_freelist_size(struct ion_heap *heap);
>>   * @order:           order of pages in the pool
>>   * @list:            plist node for list of pools
>>   * @cached:          it's cached pool or not
>> + * @inode:           inode for ion_pool pseudo filesystem
>>   *
>>   * Allows you to keep a pool of pre allocated pages to use from your heap.
>>   * Keeping a pool of pages that is ready for dma, ie any cached mapping have
>> @@ -336,6 +337,7 @@ struct ion_page_pool {
>>       gfp_t gfp_mask;
>>       unsigned int order;
>>       struct plist_node list;
>> +     struct inode *inode;
>>  };
>>
>>  struct ion_page_pool *ion_page_pool_create(gfp_t gfp_mask, unsigned int order,
>> diff --git a/drivers/staging/android/ion/ion_page_pool.c b/drivers/staging/android/ion/ion_page_pool.c
>> index 817849d..3c6ba9c 100644
>> --- a/drivers/staging/android/ion/ion_page_pool.c
>> +++ b/drivers/staging/android/ion/ion_page_pool.c
>> @@ -19,12 +19,17 @@
>>  #include <linux/err.h>
>>  #include <linux/fs.h>
>>  #include <linux/list.h>
>> +#include <linux/migrate.h>
>> +#include <linux/mount.h>
>> +#include <linux/page-flags.h>
>>  #include <linux/init.h>
>>  #include <linux/slab.h>
>>  #include <linux/swap.h>
>>
>>  #include "ion.h"
>>
>> +#define ION_PAGE_CACHE       1
>> +
>>  static void *ion_page_pool_alloc_pages(struct ion_page_pool *pool)
>>  {
>>       struct page *page = alloc_pages(pool->gfp_mask, pool->order);
>> @@ -37,12 +42,19 @@ static void *ion_page_pool_alloc_pages(struct ion_page_pool *pool)
>>  static void ion_page_pool_free_pages(struct ion_page_pool *pool,
>>                                    struct page *page)
>>  {
>> +     if (pool->inode && pool->order == 0) {
>> +             lock_page(page);
>> +             __ClearPageMovable(page);
>> +             unlock_page(page);
>> +     }
>> +
>>       __free_pages(page, pool->order);
>>  }
>>
>>  static int ion_page_pool_add(struct ion_page_pool *pool, struct page *page)
>>  {
>>       mutex_lock(&pool->mutex);
>> +     page->private = ION_PAGE_CACHE;
>>       if (PageHighMem(page)) {
>>               list_add_tail(&page->lru, &pool->high_items);
>>               pool->high_count++;
>> @@ -50,6 +62,9 @@ static int ion_page_pool_add(struct ion_page_pool *pool, struct page *page)
>>               list_add_tail(&page->lru, &pool->low_items);
>>               pool->low_count++;
>>       }
>> +
>> +     if (pool->inode && pool->order == 0)
>> +             __SetPageMovable(page, pool->inode->i_mapping);
>>       mutex_unlock(&pool->mutex);
>>       return 0;
>>  }
>> @@ -68,7 +83,9 @@ static struct page *ion_page_pool_remove(struct ion_page_pool *pool, bool high)
>>               pool->low_count--;
>>       }
>>
>> -     list_del(&page->lru);
>> +     clear_bit(ION_PAGE_CACHE, &page->private);
>> +
>> +     list_del_init(&page->lru);
>>       return page;
>>  }
>>
>> @@ -85,8 +102,13 @@ struct page *ion_page_pool_alloc(struct ion_page_pool *pool)
>>               page = ion_page_pool_remove(pool, false);
>>       mutex_unlock(&pool->mutex);
>>
>> -     if (!page)
>> +     if (!page) {
>>               page = ion_page_pool_alloc_pages(pool);
>> +     } else {
>> +             lock_page(page);
>> +             __ClearPageMovable(page);
>> +             unlock_page(page);
>> +     }
>>
>>       return page;
>>  }
>> @@ -146,6 +168,136 @@ int ion_page_pool_shrink(struct ion_page_pool *pool, gfp_t gfp_mask,
>>       return freed;
>>  }
>>
>> +static bool ion_page_pool_isolate(struct page *page, isolate_mode_t mode)
>> +{
>> +     struct ion_page_pool *pool;
>> +     struct address_space *mapping = page_mapping(page);
>> +
>> +     VM_BUG_ON_PAGE(PageIsolated(page), page);
>> +
>> +     if (!mapping)
>> +             return false;
>> +     pool = mapping->private_data;
>> +
>> +     mutex_lock(&pool->mutex);
>> +     if (unlikely(!test_bit(ION_PAGE_CACHE, &page->private))) {
>> +             mutex_unlock(&pool->mutex);
>> +             return false;
>> +     }
>> +
>> +     list_del(&page->lru);
>> +     if (PageHighMem(page))
>> +             pool->high_count--;
>> +     else
>> +             pool->low_count--;
>> +     mutex_unlock(&pool->mutex);
>> +
>> +     return true;
>> +}
>> +
>> +static int ion_page_pool_migrate(struct address_space *mapping,
>> +                              struct page *newpage,
>> +                              struct page *page, enum migrate_mode mode)
>> +{
>> +     struct ion_page_pool *pool = mapping->private_data;
>> +
>> +     VM_BUG_ON_PAGE(!PageMovable(page), page);
>> +     VM_BUG_ON_PAGE(!PageIsolated(page), page);
>> +
>> +     lock_page(page);
>> +     newpage->private = ION_PAGE_CACHE;
>> +     __SetPageMovable(newpage, page_mapping(page));
>> +     get_page(newpage);
>> +     __ClearPageMovable(page);
>> +     ClearPagePrivate(page);
>> +     unlock_page(page);
>> +     mutex_lock(&pool->mutex);
>> +     if (PageHighMem(newpage)) {
>> +             list_add_tail(&newpage->lru, &pool->high_items);
>> +             pool->high_count++;
>> +     } else {
>> +             list_add_tail(&newpage->lru, &pool->low_items);
>> +             pool->low_count++;
>> +     }
>> +     mutex_unlock(&pool->mutex);
>> +     put_page(page);
>> +     return 0;
>> +}
>> +
>> +static void ion_page_pool_putback(struct page *page)
>> +{
>> +     /*
>> +      * migrate function either succeeds or returns -EAGAIN, which
>> +      * results in calling it again until it succeeds, sothis callback
>> +      * is not needed.
>> +      */
>> +}
>> +
>> +static struct dentry *ion_pool_do_mount(struct file_system_type *fs_type,
>> +                             int flags, const char *dev_name, void *data)
>> +{
>> +     static const struct dentry_operations ops = {
>> +             .d_dname = simple_dname,
>> +     };
>> +
>> +     return mount_pseudo(fs_type, "ion_pool:", NULL, &ops, 0x77);
>> +}
>> +
>> +static struct file_system_type ion_pool_fs = {
>> +     .name           = "ion_pool",
>> +     .mount          = ion_pool_do_mount,
>> +     .kill_sb        = kill_anon_super,
>> +};
>> +
>> +static int ion_pool_cnt;
>> +static struct vfsmount *ion_pool_mnt;
>> +static int ion_pool_mount(void)
>> +{
>> +     int ret = 0;
>> +
>> +     ion_pool_mnt = kern_mount(&ion_pool_fs);
>> +     if (IS_ERR(ion_pool_mnt))
>> +             ret = PTR_ERR(ion_pool_mnt);
>> +
>> +     return ret;
>> +}
>> +
>> +static const struct address_space_operations ion_pool_aops = {
>> +     .isolate_page = ion_page_pool_isolate,
>> +     .migratepage = ion_page_pool_migrate,
>> +     .putback_page = ion_page_pool_putback,
>> +};
>> +
>> +static int ion_pool_register_migration(struct ion_page_pool *pool)
>> +{
>> +     int  ret = simple_pin_fs(&ion_pool_fs, &ion_pool_mnt, &ion_pool_cnt);
>> +
>> +     if (ret < 0) {
>> +             pr_err("Cannot mount pseudo fs: %d\n", ret);
>> +             return ret;
>> +     }
>> +     pool->inode = alloc_anon_inode(ion_pool_mnt->mnt_sb);
>> +     if (IS_ERR(pool->inode)) {
>> +             ret = PTR_ERR(pool->inode);
>> +             pool->inode = NULL;
>> +             simple_release_fs(&ion_pool_mnt, &ion_pool_cnt);
>> +             return ret;
>> +     }
>> +
>> +     pool->inode->i_mapping->private_data = pool;
>> +     pool->inode->i_mapping->a_ops = &ion_pool_aops;
>> +     return 0;
>> +}
>> +
>> +static void ion_pool_unregister_migration(struct ion_page_pool *pool)
>> +{
>> +     if (pool->inode) {
>> +             iput(pool->inode);
>> +             pool->inode = NULL;
>> +             simple_release_fs(&ion_pool_mnt, &ion_pool_cnt);
>> +     }
>> +}
>> +
>>  struct ion_page_pool *ion_page_pool_create(gfp_t gfp_mask, unsigned int order,
>>                                          bool cached)
>>  {
>> @@ -161,19 +313,24 @@ struct ion_page_pool *ion_page_pool_create(gfp_t gfp_mask, unsigned int order,
>>       pool->order = order;
>>       mutex_init(&pool->mutex);
>>       plist_node_init(&pool->list, order);
>> -     if (cached)
>> +     if (cached) {
>>               pool->cached = true;
>> +             ion_pool_register_migration(pool);
>> +     }
>> +
>> +     pool->inode = NULL;
>>
>>       return pool;
>>  }
>>
>>  void ion_page_pool_destroy(struct ion_page_pool *pool)
>>  {
>> +     ion_pool_unregister_migration(pool);
>>       kfree(pool);
>>  }
>>
>>  static int __init ion_page_pool_init(void)
>>  {
>> -     return 0;
>> +     return ion_pool_mount();
>>  }
>>  device_initcall(ion_page_pool_init);
>>
>

^ permalink raw reply	[flat|nested] 3+ messages in thread

end of thread, other threads:[~2017-09-06 12:13 UTC | newest]

Thread overview: 3+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2017-09-05 12:55 [PATCH/RFC] ion: add movability support for page pools Vitaly Wool
2017-09-06  0:19 ` Laura Abbott
2017-09-06 12:13   ` Vitaly Wool

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.