All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH 1/5] drm/ttm: add page order in page pool
@ 2017-11-22  9:17 Roger He
       [not found] ` <1511342234-18570-1-git-send-email-Hongbo.He-5C7GfCeVMHo@public.gmane.org>
                   ` (2 more replies)
  0 siblings, 3 replies; 10+ messages in thread
From: Roger He @ 2017-11-22  9:17 UTC (permalink / raw)
  To: amd-gfx, dri-devel; +Cc: Roger He

to indicate page order for each element in the pool

Change-Id: Ic609925ca5d2a5d4ad49d6becf505388ce3624cf
Signed-off-by: Roger He <Hongbo.He@amd.com>
---
 drivers/gpu/drm/ttm/ttm_page_alloc.c | 38 +++++++++++++++++++++++++-----------
 1 file changed, 27 insertions(+), 11 deletions(-)

diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
index 7385785..a02bd65 100644
--- a/drivers/gpu/drm/ttm/ttm_page_alloc.c
+++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
@@ -84,6 +84,7 @@ struct ttm_page_pool {
 	char			*name;
 	unsigned long		nfrees;
 	unsigned long		nrefills;
+	unsigned int		order;
 };
 
 /**
@@ -415,6 +416,7 @@ ttm_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
 	struct ttm_page_pool *pool;
 	int shrink_pages = sc->nr_to_scan;
 	unsigned long freed = 0;
+	unsigned int nr_free_pool;
 
 	if (!mutex_trylock(&lock))
 		return SHRINK_STOP;
@@ -424,10 +426,15 @@ ttm_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
 		unsigned nr_free = shrink_pages;
 		if (shrink_pages == 0)
 			break;
+
 		pool = &_manager->pools[(i + pool_offset)%NUM_POOLS];
 		/* OK to use static buffer since global mutex is held. */
-		shrink_pages = ttm_page_pool_free(pool, nr_free, true);
-		freed += nr_free - shrink_pages;
+		nr_free_pool = (nr_free >> pool->order);
+		if (nr_free_pool == 0)
+			continue;
+
+		shrink_pages = ttm_page_pool_free(pool, nr_free_pool, true);
+		freed += ((nr_free_pool - shrink_pages) << pool->order);
 	}
 	mutex_unlock(&lock);
 	return freed;
@@ -439,9 +446,12 @@ ttm_pool_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
 {
 	unsigned i;
 	unsigned long count = 0;
+	struct ttm_page_pool *pool;
 
-	for (i = 0; i < NUM_POOLS; ++i)
-		count += _manager->pools[i].npages;
+	for (i = 0; i < NUM_POOLS; ++i) {
+		pool = &_manager->pools[i];
+		count += (pool->npages << pool->order);
+	}
 
 	return count;
 }
@@ -935,7 +945,7 @@ static int ttm_get_pages(struct page **pages, unsigned npages, int flags,
 }
 
 static void ttm_page_pool_init_locked(struct ttm_page_pool *pool, gfp_t flags,
-		char *name)
+		char *name, unsigned int order)
 {
 	spin_lock_init(&pool->lock);
 	pool->fill_lock = false;
@@ -943,11 +953,17 @@ static void ttm_page_pool_init_locked(struct ttm_page_pool *pool, gfp_t flags,
 	pool->npages = pool->nfrees = 0;
 	pool->gfp_flags = flags;
 	pool->name = name;
+	pool->order = order;
 }
 
 int ttm_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages)
 {
 	int ret;
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+	unsigned order = HPAGE_PMD_ORDER;
+#else
+	unsigned order = 0;
+#endif
 
 	WARN_ON(_manager);
 
@@ -955,23 +971,23 @@ int ttm_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages)
 
 	_manager = kzalloc(sizeof(*_manager), GFP_KERNEL);
 
-	ttm_page_pool_init_locked(&_manager->wc_pool, GFP_HIGHUSER, "wc");
+	ttm_page_pool_init_locked(&_manager->wc_pool, GFP_HIGHUSER, "wc", 0);
 
-	ttm_page_pool_init_locked(&_manager->uc_pool, GFP_HIGHUSER, "uc");
+	ttm_page_pool_init_locked(&_manager->uc_pool, GFP_HIGHUSER, "uc", 0);
 
 	ttm_page_pool_init_locked(&_manager->wc_pool_dma32,
-				  GFP_USER | GFP_DMA32, "wc dma");
+				  GFP_USER | GFP_DMA32, "wc dma", 0);
 
 	ttm_page_pool_init_locked(&_manager->uc_pool_dma32,
-				  GFP_USER | GFP_DMA32, "uc dma");
+				  GFP_USER | GFP_DMA32, "uc dma", 0);
 
 	ttm_page_pool_init_locked(&_manager->wc_pool_huge,
 				  GFP_TRANSHUGE	& ~(__GFP_MOVABLE | __GFP_COMP),
-				  "wc huge");
+				  "wc huge", order);
 
 	ttm_page_pool_init_locked(&_manager->uc_pool_huge,
 				  GFP_TRANSHUGE	& ~(__GFP_MOVABLE | __GFP_COMP)
-				  , "uc huge");
+				  , "uc huge", order);
 
 	_manager->options.max_size = max_pages;
 	_manager->options.small = SMALL_ALLOCATION;
-- 
2.7.4

_______________________________________________
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel

^ permalink raw reply related	[flat|nested] 10+ messages in thread

* [PATCH 2/5] drm/ttm: add set_pages_wb for handling page order more than zero
       [not found] ` <1511342234-18570-1-git-send-email-Hongbo.He-5C7GfCeVMHo@public.gmane.org>
@ 2017-11-22  9:17   ` Roger He
  2017-11-22  9:17   ` [PATCH 3/5] drm/ttm: add page order support in ttm_pages_put Roger He
                     ` (2 subsequent siblings)
  3 siblings, 0 replies; 10+ messages in thread
From: Roger He @ 2017-11-22  9:17 UTC (permalink / raw)
  To: amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW,
	dri-devel-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW
  Cc: Roger He

Change-Id: Idf5ccb579d264b343199d8b8344bddeec2c0019f
Signed-off-by: Roger He <Hongbo.He@amd.com>
---
 drivers/gpu/drm/ttm/ttm_page_alloc.c | 11 +++++++++++
 1 file changed, 11 insertions(+)

diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
index a02bd65..9b48b93 100644
--- a/drivers/gpu/drm/ttm/ttm_page_alloc.c
+++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
@@ -226,6 +226,17 @@ static struct kobj_type ttm_pool_kobj_type = {
 static struct ttm_pool_manager *_manager;
 
 #ifndef CONFIG_X86
+static int set_pages_wb(struct page *page, int numpages)
+{
+#if IS_ENABLED(CONFIG_AGP)
+	int i;
+
+	for (i = 0; i < numpages; i++)
+		unmap_page_from_agp(page++);
+#endif
+	return 0;
+}
+
 static int set_pages_array_wb(struct page **pages, int addrinarray)
 {
 #if IS_ENABLED(CONFIG_AGP)
-- 
2.7.4

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply related	[flat|nested] 10+ messages in thread

* [PATCH 3/5] drm/ttm: add page order support in ttm_pages_put
       [not found] ` <1511342234-18570-1-git-send-email-Hongbo.He-5C7GfCeVMHo@public.gmane.org>
  2017-11-22  9:17   ` [PATCH 2/5] drm/ttm: add set_pages_wb for handling page order more than zero Roger He
@ 2017-11-22  9:17   ` Roger He
       [not found]     ` <1511342234-18570-3-git-send-email-Hongbo.He-5C7GfCeVMHo@public.gmane.org>
  2017-11-22  9:17   ` [PATCH 4/5] drm/ttm: roundup the shrink request to prevent skip huge pool Roger He
  2017-11-22  9:36   ` [PATCH 1/5] drm/ttm: add page order in page pool Chunming Zhou
  3 siblings, 1 reply; 10+ messages in thread
From: Roger He @ 2017-11-22  9:17 UTC (permalink / raw)
  To: amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW,
	dri-devel-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW
  Cc: Roger He

Change-Id: Ia55b206d95812c5afcfd6cec29f580758d1f50f0
Signed-off-by: Roger He <Hongbo.He@amd.com>
---
 drivers/gpu/drm/ttm/ttm_page_alloc.c | 19 +++++++++++--------
 1 file changed, 11 insertions(+), 8 deletions(-)

diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
index 9b48b93..2dc83c0 100644
--- a/drivers/gpu/drm/ttm/ttm_page_alloc.c
+++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
@@ -299,13 +299,16 @@ static struct ttm_page_pool *ttm_get_pool(int flags, bool huge,
 }
 
 /* set memory back to wb and free the pages. */
-static void ttm_pages_put(struct page *pages[], unsigned npages)
+static void ttm_pages_put(struct page *pages[], unsigned npages,
+		unsigned int order)
 {
-	unsigned i;
-	if (set_pages_array_wb(pages, npages))
-		pr_err("Failed to set %d pages to wb!\n", npages);
-	for (i = 0; i < npages; ++i)
-		__free_page(pages[i]);
+	unsigned int i, pages_nr = (1 << order);
+
+	for (i = 0; i < npages; ++i) {
+		if (set_pages_wb(pages[i], pages_nr))
+			pr_err("Failed to set %d pages to wb!\n", pages_nr);
+		__free_pages(pages[i], order);
+	}
 }
 
 static void ttm_pool_update_free_locked(struct ttm_page_pool *pool,
@@ -368,7 +371,7 @@ static int ttm_page_pool_free(struct ttm_page_pool *pool, unsigned nr_free,
 			 */
 			spin_unlock_irqrestore(&pool->lock, irq_flags);
 
-			ttm_pages_put(pages_to_free, freed_pages);
+			ttm_pages_put(pages_to_free, freed_pages, pool->order);
 			if (likely(nr_free != FREE_ALL_PAGES))
 				nr_free -= freed_pages;
 
@@ -403,7 +406,7 @@ static int ttm_page_pool_free(struct ttm_page_pool *pool, unsigned nr_free,
 	spin_unlock_irqrestore(&pool->lock, irq_flags);
 
 	if (freed_pages)
-		ttm_pages_put(pages_to_free, freed_pages);
+		ttm_pages_put(pages_to_free, freed_pages, pool->order);
 out:
 	if (pages_to_free != static_buf)
 		kfree(pages_to_free);
-- 
2.7.4

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply related	[flat|nested] 10+ messages in thread

* [PATCH 4/5] drm/ttm: roundup the shrink request to prevent skip huge pool
       [not found] ` <1511342234-18570-1-git-send-email-Hongbo.He-5C7GfCeVMHo@public.gmane.org>
  2017-11-22  9:17   ` [PATCH 2/5] drm/ttm: add set_pages_wb for handling page order more than zero Roger He
  2017-11-22  9:17   ` [PATCH 3/5] drm/ttm: add page order support in ttm_pages_put Roger He
@ 2017-11-22  9:17   ` Roger He
  2017-11-22  9:36   ` [PATCH 1/5] drm/ttm: add page order in page pool Chunming Zhou
  3 siblings, 0 replies; 10+ messages in thread
From: Roger He @ 2017-11-22  9:17 UTC (permalink / raw)
  To: amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW,
	dri-devel-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW
  Cc: Roger He

e.g. shrink reqeust is less than 512, the logic will skip huge pool

Change-Id: Id8bd4d1ecff9f3ab14355e2dbd1c59b9fe824e01
Signed-off-by: Roger He <Hongbo.He@amd.com>
---
 drivers/gpu/drm/ttm/ttm_page_alloc.c | 12 +++++++-----
 1 file changed, 7 insertions(+), 5 deletions(-)

diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
index 2dc83c0..a710d9e 100644
--- a/drivers/gpu/drm/ttm/ttm_page_alloc.c
+++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
@@ -438,17 +438,19 @@ ttm_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
 	/* select start pool in round robin fashion */
 	for (i = 0; i < NUM_POOLS; ++i) {
 		unsigned nr_free = shrink_pages;
+		unsigned page_nr;
+
 		if (shrink_pages == 0)
 			break;
 
 		pool = &_manager->pools[(i + pool_offset)%NUM_POOLS];
+		page_nr = (1 << pool->order);
 		/* OK to use static buffer since global mutex is held. */
-		nr_free_pool = (nr_free >> pool->order);
-		if (nr_free_pool == 0)
-			continue;
-
+		nr_free_pool = roundup(nr_free, page_nr) >> pool->order;
 		shrink_pages = ttm_page_pool_free(pool, nr_free_pool, true);
-		freed += ((nr_free_pool - shrink_pages) << pool->order);
+		freed += (nr_free_pool - shrink_pages) << pool->order;
+		if (freed >= sc->nr_to_scan)
+			break;
 	}
 	mutex_unlock(&lock);
 	return freed;
-- 
2.7.4

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply related	[flat|nested] 10+ messages in thread

* [PATCH 5/5] drm/ttm: delete set_pages_array_wb since no one use it anymore
  2017-11-22  9:17 [PATCH 1/5] drm/ttm: add page order in page pool Roger He
       [not found] ` <1511342234-18570-1-git-send-email-Hongbo.He-5C7GfCeVMHo@public.gmane.org>
@ 2017-11-22  9:17 ` Roger He
  2017-11-22  9:31 ` [PATCH 1/5] drm/ttm: add page order in page pool Christian König
  2 siblings, 0 replies; 10+ messages in thread
From: Roger He @ 2017-11-22  9:17 UTC (permalink / raw)
  To: amd-gfx, dri-devel; +Cc: Roger He

Change-Id: I9fa45af447c3c78d0b7b2b8958081e584c560120
Signed-off-by: Roger He <Hongbo.He@amd.com>
---
 drivers/gpu/drm/ttm/ttm_page_alloc.c | 11 -----------
 1 file changed, 11 deletions(-)

diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
index a710d9e..388c5b1 100644
--- a/drivers/gpu/drm/ttm/ttm_page_alloc.c
+++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
@@ -237,17 +237,6 @@ static int set_pages_wb(struct page *page, int numpages)
 	return 0;
 }
 
-static int set_pages_array_wb(struct page **pages, int addrinarray)
-{
-#if IS_ENABLED(CONFIG_AGP)
-	int i;
-
-	for (i = 0; i < addrinarray; i++)
-		unmap_page_from_agp(pages[i]);
-#endif
-	return 0;
-}
-
 static int set_pages_array_wc(struct page **pages, int addrinarray)
 {
 #if IS_ENABLED(CONFIG_AGP)
-- 
2.7.4

_______________________________________________
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel

^ permalink raw reply related	[flat|nested] 10+ messages in thread

* Re: [PATCH 1/5] drm/ttm: add page order in page pool
  2017-11-22  9:17 [PATCH 1/5] drm/ttm: add page order in page pool Roger He
       [not found] ` <1511342234-18570-1-git-send-email-Hongbo.He-5C7GfCeVMHo@public.gmane.org>
  2017-11-22  9:17 ` [PATCH 5/5] drm/ttm: delete set_pages_array_wb since no one use it anymore Roger He
@ 2017-11-22  9:31 ` Christian König
  2 siblings, 0 replies; 10+ messages in thread
From: Christian König @ 2017-11-22  9:31 UTC (permalink / raw)
  To: Roger He, amd-gfx, dri-devel

Am 22.11.2017 um 10:17 schrieb Roger He:
> to indicate page order for each element in the pool
>
> Change-Id: Ic609925ca5d2a5d4ad49d6becf505388ce3624cf
> Signed-off-by: Roger He <Hongbo.He@amd.com>

Reviewed-by: Christian König <christian.koenig@amd.com> for this one.

Feel free to commit it preliminary, but I think we still need to work on 
the rest.

Regards,
Christian.

> ---
>   drivers/gpu/drm/ttm/ttm_page_alloc.c | 38 +++++++++++++++++++++++++-----------
>   1 file changed, 27 insertions(+), 11 deletions(-)
>
> diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
> index 7385785..a02bd65 100644
> --- a/drivers/gpu/drm/ttm/ttm_page_alloc.c
> +++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
> @@ -84,6 +84,7 @@ struct ttm_page_pool {
>   	char			*name;
>   	unsigned long		nfrees;
>   	unsigned long		nrefills;
> +	unsigned int		order;
>   };
>   
>   /**
> @@ -415,6 +416,7 @@ ttm_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
>   	struct ttm_page_pool *pool;
>   	int shrink_pages = sc->nr_to_scan;
>   	unsigned long freed = 0;
> +	unsigned int nr_free_pool;
>   
>   	if (!mutex_trylock(&lock))
>   		return SHRINK_STOP;
> @@ -424,10 +426,15 @@ ttm_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
>   		unsigned nr_free = shrink_pages;
>   		if (shrink_pages == 0)
>   			break;
> +
>   		pool = &_manager->pools[(i + pool_offset)%NUM_POOLS];
>   		/* OK to use static buffer since global mutex is held. */
> -		shrink_pages = ttm_page_pool_free(pool, nr_free, true);
> -		freed += nr_free - shrink_pages;
> +		nr_free_pool = (nr_free >> pool->order);
> +		if (nr_free_pool == 0)
> +			continue;
> +
> +		shrink_pages = ttm_page_pool_free(pool, nr_free_pool, true);
> +		freed += ((nr_free_pool - shrink_pages) << pool->order);
>   	}
>   	mutex_unlock(&lock);
>   	return freed;
> @@ -439,9 +446,12 @@ ttm_pool_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
>   {
>   	unsigned i;
>   	unsigned long count = 0;
> +	struct ttm_page_pool *pool;
>   
> -	for (i = 0; i < NUM_POOLS; ++i)
> -		count += _manager->pools[i].npages;
> +	for (i = 0; i < NUM_POOLS; ++i) {
> +		pool = &_manager->pools[i];
> +		count += (pool->npages << pool->order);
> +	}
>   
>   	return count;
>   }
> @@ -935,7 +945,7 @@ static int ttm_get_pages(struct page **pages, unsigned npages, int flags,
>   }
>   
>   static void ttm_page_pool_init_locked(struct ttm_page_pool *pool, gfp_t flags,
> -		char *name)
> +		char *name, unsigned int order)
>   {
>   	spin_lock_init(&pool->lock);
>   	pool->fill_lock = false;
> @@ -943,11 +953,17 @@ static void ttm_page_pool_init_locked(struct ttm_page_pool *pool, gfp_t flags,
>   	pool->npages = pool->nfrees = 0;
>   	pool->gfp_flags = flags;
>   	pool->name = name;
> +	pool->order = order;
>   }
>   
>   int ttm_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages)
>   {
>   	int ret;
> +#ifdef CONFIG_TRANSPARENT_HUGEPAGE
> +	unsigned order = HPAGE_PMD_ORDER;
> +#else
> +	unsigned order = 0;
> +#endif
>   
>   	WARN_ON(_manager);
>   
> @@ -955,23 +971,23 @@ int ttm_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages)
>   
>   	_manager = kzalloc(sizeof(*_manager), GFP_KERNEL);
>   
> -	ttm_page_pool_init_locked(&_manager->wc_pool, GFP_HIGHUSER, "wc");
> +	ttm_page_pool_init_locked(&_manager->wc_pool, GFP_HIGHUSER, "wc", 0);
>   
> -	ttm_page_pool_init_locked(&_manager->uc_pool, GFP_HIGHUSER, "uc");
> +	ttm_page_pool_init_locked(&_manager->uc_pool, GFP_HIGHUSER, "uc", 0);
>   
>   	ttm_page_pool_init_locked(&_manager->wc_pool_dma32,
> -				  GFP_USER | GFP_DMA32, "wc dma");
> +				  GFP_USER | GFP_DMA32, "wc dma", 0);
>   
>   	ttm_page_pool_init_locked(&_manager->uc_pool_dma32,
> -				  GFP_USER | GFP_DMA32, "uc dma");
> +				  GFP_USER | GFP_DMA32, "uc dma", 0);
>   
>   	ttm_page_pool_init_locked(&_manager->wc_pool_huge,
>   				  GFP_TRANSHUGE	& ~(__GFP_MOVABLE | __GFP_COMP),
> -				  "wc huge");
> +				  "wc huge", order);
>   
>   	ttm_page_pool_init_locked(&_manager->uc_pool_huge,
>   				  GFP_TRANSHUGE	& ~(__GFP_MOVABLE | __GFP_COMP)
> -				  , "uc huge");
> +				  , "uc huge", order);
>   
>   	_manager->options.max_size = max_pages;
>   	_manager->options.small = SMALL_ALLOCATION;


_______________________________________________
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel

^ permalink raw reply	[flat|nested] 10+ messages in thread

* Re: [PATCH 1/5] drm/ttm: add page order in page pool
       [not found] ` <1511342234-18570-1-git-send-email-Hongbo.He-5C7GfCeVMHo@public.gmane.org>
                     ` (2 preceding siblings ...)
  2017-11-22  9:17   ` [PATCH 4/5] drm/ttm: roundup the shrink request to prevent skip huge pool Roger He
@ 2017-11-22  9:36   ` Chunming Zhou
  3 siblings, 0 replies; 10+ messages in thread
From: Chunming Zhou @ 2017-11-22  9:36 UTC (permalink / raw)
  To: Roger He, amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW,
	dri-devel-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW

the patch set looks ok to me, Reviewed-by: Chunming Zhou 
<david1.zhou@amd.com>


On 2017年11月22日 17:17, Roger He wrote:
> to indicate page order for each element in the pool
>
> Change-Id: Ic609925ca5d2a5d4ad49d6becf505388ce3624cf
> Signed-off-by: Roger He <Hongbo.He@amd.com>
> ---
>   drivers/gpu/drm/ttm/ttm_page_alloc.c | 38 +++++++++++++++++++++++++-----------
>   1 file changed, 27 insertions(+), 11 deletions(-)
>
> diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
> index 7385785..a02bd65 100644
> --- a/drivers/gpu/drm/ttm/ttm_page_alloc.c
> +++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
> @@ -84,6 +84,7 @@ struct ttm_page_pool {
>   	char			*name;
>   	unsigned long		nfrees;
>   	unsigned long		nrefills;
> +	unsigned int		order;
>   };
>   
>   /**
> @@ -415,6 +416,7 @@ ttm_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
>   	struct ttm_page_pool *pool;
>   	int shrink_pages = sc->nr_to_scan;
>   	unsigned long freed = 0;
> +	unsigned int nr_free_pool;
>   
>   	if (!mutex_trylock(&lock))
>   		return SHRINK_STOP;
> @@ -424,10 +426,15 @@ ttm_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
>   		unsigned nr_free = shrink_pages;
>   		if (shrink_pages == 0)
>   			break;
> +
>   		pool = &_manager->pools[(i + pool_offset)%NUM_POOLS];
>   		/* OK to use static buffer since global mutex is held. */
> -		shrink_pages = ttm_page_pool_free(pool, nr_free, true);
> -		freed += nr_free - shrink_pages;
> +		nr_free_pool = (nr_free >> pool->order);
> +		if (nr_free_pool == 0)
> +			continue;
> +
> +		shrink_pages = ttm_page_pool_free(pool, nr_free_pool, true);
> +		freed += ((nr_free_pool - shrink_pages) << pool->order);
>   	}
>   	mutex_unlock(&lock);
>   	return freed;
> @@ -439,9 +446,12 @@ ttm_pool_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
>   {
>   	unsigned i;
>   	unsigned long count = 0;
> +	struct ttm_page_pool *pool;
>   
> -	for (i = 0; i < NUM_POOLS; ++i)
> -		count += _manager->pools[i].npages;
> +	for (i = 0; i < NUM_POOLS; ++i) {
> +		pool = &_manager->pools[i];
> +		count += (pool->npages << pool->order);
> +	}
>   
>   	return count;
>   }
> @@ -935,7 +945,7 @@ static int ttm_get_pages(struct page **pages, unsigned npages, int flags,
>   }
>   
>   static void ttm_page_pool_init_locked(struct ttm_page_pool *pool, gfp_t flags,
> -		char *name)
> +		char *name, unsigned int order)
>   {
>   	spin_lock_init(&pool->lock);
>   	pool->fill_lock = false;
> @@ -943,11 +953,17 @@ static void ttm_page_pool_init_locked(struct ttm_page_pool *pool, gfp_t flags,
>   	pool->npages = pool->nfrees = 0;
>   	pool->gfp_flags = flags;
>   	pool->name = name;
> +	pool->order = order;
>   }
>   
>   int ttm_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages)
>   {
>   	int ret;
> +#ifdef CONFIG_TRANSPARENT_HUGEPAGE
> +	unsigned order = HPAGE_PMD_ORDER;
> +#else
> +	unsigned order = 0;
> +#endif
>   
>   	WARN_ON(_manager);
>   
> @@ -955,23 +971,23 @@ int ttm_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages)
>   
>   	_manager = kzalloc(sizeof(*_manager), GFP_KERNEL);
>   
> -	ttm_page_pool_init_locked(&_manager->wc_pool, GFP_HIGHUSER, "wc");
> +	ttm_page_pool_init_locked(&_manager->wc_pool, GFP_HIGHUSER, "wc", 0);
>   
> -	ttm_page_pool_init_locked(&_manager->uc_pool, GFP_HIGHUSER, "uc");
> +	ttm_page_pool_init_locked(&_manager->uc_pool, GFP_HIGHUSER, "uc", 0);
>   
>   	ttm_page_pool_init_locked(&_manager->wc_pool_dma32,
> -				  GFP_USER | GFP_DMA32, "wc dma");
> +				  GFP_USER | GFP_DMA32, "wc dma", 0);
>   
>   	ttm_page_pool_init_locked(&_manager->uc_pool_dma32,
> -				  GFP_USER | GFP_DMA32, "uc dma");
> +				  GFP_USER | GFP_DMA32, "uc dma", 0);
>   
>   	ttm_page_pool_init_locked(&_manager->wc_pool_huge,
>   				  GFP_TRANSHUGE	& ~(__GFP_MOVABLE | __GFP_COMP),
> -				  "wc huge");
> +				  "wc huge", order);
>   
>   	ttm_page_pool_init_locked(&_manager->uc_pool_huge,
>   				  GFP_TRANSHUGE	& ~(__GFP_MOVABLE | __GFP_COMP)
> -				  , "uc huge");
> +				  , "uc huge", order);
>   
>   	_manager->options.max_size = max_pages;
>   	_manager->options.small = SMALL_ALLOCATION;

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply	[flat|nested] 10+ messages in thread

* Re: [PATCH 3/5] drm/ttm: add page order support in ttm_pages_put
       [not found]     ` <1511342234-18570-3-git-send-email-Hongbo.He-5C7GfCeVMHo@public.gmane.org>
@ 2017-11-22 10:00       ` Christian König
  2017-11-22 10:54         ` He, Roger
  0 siblings, 1 reply; 10+ messages in thread
From: Christian König @ 2017-11-22 10:00 UTC (permalink / raw)
  To: Roger He, amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW,
	dri-devel-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW

[-- Attachment #1: Type: text/plain, Size: 2654 bytes --]

That completely negates the advantage of setting write back on multiple 
pages at once.

In other words this way we wouldn't need the array any more at all and 
could remove the whole complicated handling.

I'm pretty close to saying just go ahead with that and even clean up the 
whole stuff with the static array, but I'm not sure if that doesn't 
result in some performance problems.

A possible alternative is the (untested) patch attached, this way we 
move the __free_page()/_free_pages() call out of ttm_pages_put and just 
need to add the correct number of pages to the array in the loop.

Regards,
Christian.

Am 22.11.2017 um 10:17 schrieb Roger He:
> Change-Id: Ia55b206d95812c5afcfd6cec29f580758d1f50f0
> Signed-off-by: Roger He <Hongbo.He-5C7GfCeVMHo@public.gmane.org>
> ---
>   drivers/gpu/drm/ttm/ttm_page_alloc.c | 19 +++++++++++--------
>   1 file changed, 11 insertions(+), 8 deletions(-)
>
> diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
> index 9b48b93..2dc83c0 100644
> --- a/drivers/gpu/drm/ttm/ttm_page_alloc.c
> +++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
> @@ -299,13 +299,16 @@ static struct ttm_page_pool *ttm_get_pool(int flags, bool huge,
>   }
>   
>   /* set memory back to wb and free the pages. */
> -static void ttm_pages_put(struct page *pages[], unsigned npages)
> +static void ttm_pages_put(struct page *pages[], unsigned npages,
> +		unsigned int order)
>   {
> -	unsigned i;
> -	if (set_pages_array_wb(pages, npages))
> -		pr_err("Failed to set %d pages to wb!\n", npages);
> -	for (i = 0; i < npages; ++i)
> -		__free_page(pages[i]);
> +	unsigned int i, pages_nr = (1 << order);
> +
> +	for (i = 0; i < npages; ++i) {
> +		if (set_pages_wb(pages[i], pages_nr))
> +			pr_err("Failed to set %d pages to wb!\n", pages_nr);
> +		__free_pages(pages[i], order);
> +	}
>   }
>   
>   static void ttm_pool_update_free_locked(struct ttm_page_pool *pool,
> @@ -368,7 +371,7 @@ static int ttm_page_pool_free(struct ttm_page_pool *pool, unsigned nr_free,
>   			 */
>   			spin_unlock_irqrestore(&pool->lock, irq_flags);
>   
> -			ttm_pages_put(pages_to_free, freed_pages);
> +			ttm_pages_put(pages_to_free, freed_pages, pool->order);
>   			if (likely(nr_free != FREE_ALL_PAGES))
>   				nr_free -= freed_pages;
>   
> @@ -403,7 +406,7 @@ static int ttm_page_pool_free(struct ttm_page_pool *pool, unsigned nr_free,
>   	spin_unlock_irqrestore(&pool->lock, irq_flags);
>   
>   	if (freed_pages)
> -		ttm_pages_put(pages_to_free, freed_pages);
> +		ttm_pages_put(pages_to_free, freed_pages, pool->order);
>   out:
>   	if (pages_to_free != static_buf)
>   		kfree(pages_to_free);



[-- Warning: decoded text below may be mangled, UTF-8 assumed --]
[-- Attachment #2: 0001-drm-ttm-move-__free_page-out-of-ttm_pages_put.patch --]
[-- Type: text/x-patch; name="0001-drm-ttm-move-__free_page-out-of-ttm_pages_put.patch", Size: 2786 bytes --]

>From 4fcc6c40e3596e12a9712b01a44d9c0265f04582 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Christian=20K=C3=B6nig?= <christian.koenig-5C7GfCeVMHo@public.gmane.org>
Date: Wed, 22 Nov 2017 10:54:58 +0100
Subject: [PATCH] drm/ttm: move __free_page out of ttm_pages_put
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

This way we can easier free the pages with the right order.

Signed-off-by: Christian König <christian.koenig-5C7GfCeVMHo@public.gmane.org>
---
 drivers/gpu/drm/ttm/ttm_page_alloc.c | 16 ++++++++++++----
 1 file changed, 12 insertions(+), 4 deletions(-)

diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
index 72ea037b7090..4deb9c8441b5 100644
--- a/drivers/gpu/drm/ttm/ttm_page_alloc.c
+++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
@@ -286,11 +286,8 @@ static struct ttm_page_pool *ttm_get_pool(int flags, bool huge,
 /* set memory back to wb and free the pages. */
 static void ttm_pages_put(struct page *pages[], unsigned npages)
 {
-	unsigned i;
 	if (set_pages_array_wb(pages, npages))
 		pr_err("Failed to set %d pages to wb!\n", npages);
-	for (i = 0; i < npages; ++i)
-		__free_page(pages[i]);
 }
 
 static void ttm_pool_update_free_locked(struct ttm_page_pool *pool,
@@ -315,7 +312,8 @@ static int ttm_page_pool_free(struct ttm_page_pool *pool, unsigned nr_free,
 {
 	static struct page *static_buf[NUM_PAGES_TO_ALLOC];
 	unsigned long irq_flags;
-	struct page *p;
+	struct list_head freed;
+	struct page *p, *tmp;
 	struct page **pages_to_free;
 	unsigned freed_pages = 0,
 		 npages_to_free = nr_free;
@@ -333,6 +331,8 @@ static int ttm_page_pool_free(struct ttm_page_pool *pool, unsigned nr_free,
 		return 0;
 	}
 
+	INIT_LIST_HEAD(&freed);
+
 restart:
 	spin_lock_irqsave(&pool->lock, irq_flags);
 
@@ -345,6 +345,8 @@ static int ttm_page_pool_free(struct ttm_page_pool *pool, unsigned nr_free,
 		if (freed_pages >= NUM_PAGES_TO_ALLOC) {
 			/* remove range of pages from the pool */
 			__list_del(p->lru.prev, &pool->list);
+			/* and add to freed list */
+			__list_add(&p->lru, &freed, &freed);
 
 			ttm_pool_update_free_locked(pool, freed_pages);
 			/**
@@ -380,6 +382,8 @@ static int ttm_page_pool_free(struct ttm_page_pool *pool, unsigned nr_free,
 	/* remove range of pages from the pool */
 	if (freed_pages) {
 		__list_del(&p->lru, &pool->list);
+		/* and add to freed list */
+		__list_add(&p->lru, &freed, &freed);
 
 		ttm_pool_update_free_locked(pool, freed_pages);
 		nr_free -= freed_pages;
@@ -392,6 +396,10 @@ static int ttm_page_pool_free(struct ttm_page_pool *pool, unsigned nr_free,
 out:
 	if (pages_to_free != static_buf)
 		kfree(pages_to_free);
+
+	list_for_each_entry_safe(p, tmp, &freed, lru)
+		__free_page(p);
+
 	return nr_free;
 }
 
-- 
2.11.0


[-- Attachment #3: Type: text/plain, Size: 154 bytes --]

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply related	[flat|nested] 10+ messages in thread

* RE: [PATCH 3/5] drm/ttm: add page order support in ttm_pages_put
  2017-11-22 10:00       ` Christian König
@ 2017-11-22 10:54         ` He, Roger
  0 siblings, 0 replies; 10+ messages in thread
From: He, Roger @ 2017-11-22 10:54 UTC (permalink / raw)
  To: Koenig, Christian, amd-gfx, dri-devel

Hi Christian:

Maybe we can get back the logic for page order 0 in ttm_pages_put.

I mean: handle order 0 with set_pages_array_wb and handle order 9 with set_pages_wb.
If that, no performance concern.
How about that?

Thanks
Roger(Hongbo.He)
-----Original Message-----
From: Christian König [mailto:ckoenig.leichtzumerken@gmail.com] 
Sent: Wednesday, November 22, 2017 6:01 PM
To: He, Roger <Hongbo.He@amd.com>; amd-gfx@lists.freedesktop.org; dri-devel@lists.freedesktop.org
Subject: Re: [PATCH 3/5] drm/ttm: add page order support in ttm_pages_put

That completely negates the advantage of setting write back on multiple pages at once.

In other words this way we wouldn't need the array any more at all and could remove the whole complicated handling.

I'm pretty close to saying just go ahead with that and even clean up the whole stuff with the static array, but I'm not sure if that doesn't result in some performance problems.

A possible alternative is the (untested) patch attached, this way we move the __free_page()/_free_pages() call out of ttm_pages_put and just need to add the correct number of pages to the array in the loop.

Regards,
Christian.

Am 22.11.2017 um 10:17 schrieb Roger He:
> Change-Id: Ia55b206d95812c5afcfd6cec29f580758d1f50f0
> Signed-off-by: Roger He <Hongbo.He@amd.com>
> ---
>   drivers/gpu/drm/ttm/ttm_page_alloc.c | 19 +++++++++++--------
>   1 file changed, 11 insertions(+), 8 deletions(-)
>
> diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c 
> b/drivers/gpu/drm/ttm/ttm_page_alloc.c
> index 9b48b93..2dc83c0 100644
> --- a/drivers/gpu/drm/ttm/ttm_page_alloc.c
> +++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
> @@ -299,13 +299,16 @@ static struct ttm_page_pool *ttm_get_pool(int flags, bool huge,
>   }
>   
>   /* set memory back to wb and free the pages. */ -static void 
> ttm_pages_put(struct page *pages[], unsigned npages)
> +static void ttm_pages_put(struct page *pages[], unsigned npages,
> +		unsigned int order)
>   {
> -	unsigned i;
> -	if (set_pages_array_wb(pages, npages))
> -		pr_err("Failed to set %d pages to wb!\n", npages);
> -	for (i = 0; i < npages; ++i)
> -		__free_page(pages[i]);
> +	unsigned int i, pages_nr = (1 << order);
> +
> +	for (i = 0; i < npages; ++i) {
> +		if (set_pages_wb(pages[i], pages_nr))
> +			pr_err("Failed to set %d pages to wb!\n", pages_nr);
> +		__free_pages(pages[i], order);
> +	}
>   }
>   
>   static void ttm_pool_update_free_locked(struct ttm_page_pool *pool, 
> @@ -368,7 +371,7 @@ static int ttm_page_pool_free(struct ttm_page_pool *pool, unsigned nr_free,
>   			 */
>   			spin_unlock_irqrestore(&pool->lock, irq_flags);
>   
> -			ttm_pages_put(pages_to_free, freed_pages);
> +			ttm_pages_put(pages_to_free, freed_pages, pool->order);
>   			if (likely(nr_free != FREE_ALL_PAGES))
>   				nr_free -= freed_pages;
>   
> @@ -403,7 +406,7 @@ static int ttm_page_pool_free(struct ttm_page_pool *pool, unsigned nr_free,
>   	spin_unlock_irqrestore(&pool->lock, irq_flags);
>   
>   	if (freed_pages)
> -		ttm_pages_put(pages_to_free, freed_pages);
> +		ttm_pages_put(pages_to_free, freed_pages, pool->order);
>   out:
>   	if (pages_to_free != static_buf)
>   		kfree(pages_to_free);


_______________________________________________
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel

^ permalink raw reply	[flat|nested] 10+ messages in thread

* [PATCH 3/5] drm/ttm: add page order support in ttm_pages_put
  2017-11-22  8:06 Roger He
@ 2017-11-22  8:06 ` Roger He
  0 siblings, 0 replies; 10+ messages in thread
From: Roger He @ 2017-11-22  8:06 UTC (permalink / raw)
  To: amd-gfx, dri-devel; +Cc: Roger He, Christian.Koenig

Change-Id: Ia55b206d95812c5afcfd6cec29f580758d1f50f0
Signed-off-by: Roger He <Hongbo.He@amd.com>
---
 drivers/gpu/drm/ttm/ttm_page_alloc.c | 19 +++++++++++--------
 1 file changed, 11 insertions(+), 8 deletions(-)

diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
index fabb082..343db0b 100644
--- a/drivers/gpu/drm/ttm/ttm_page_alloc.c
+++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
@@ -299,13 +299,16 @@ static struct ttm_page_pool *ttm_get_pool(int flags, bool huge,
 }
 
 /* set memory back to wb and free the pages. */
-static void ttm_pages_put(struct page *pages[], unsigned npages)
+static void ttm_pages_put(struct page *pages[], unsigned npages,
+		unsigned int order)
 {
-	unsigned i;
-	if (set_pages_array_wb(pages, npages))
-		pr_err("Failed to set %d pages to wb!\n", npages);
-	for (i = 0; i < npages; ++i)
-		__free_page(pages[i]);
+	unsigned int i, pages_nr = (1 << order);
+
+	for (i = 0; i < npages; ++i) {
+		if (set_pages_wb(pages[i], pages_nr))
+			pr_err("Failed to set %d pages to wb!\n", pages_nr);
+		__free_pages(pages[i], order);
+	}
 }
 
 static void ttm_pool_update_free_locked(struct ttm_page_pool *pool,
@@ -368,7 +371,7 @@ static int ttm_page_pool_free(struct ttm_page_pool *pool, unsigned nr_free,
 			 */
 			spin_unlock_irqrestore(&pool->lock, irq_flags);
 
-			ttm_pages_put(pages_to_free, freed_pages);
+			ttm_pages_put(pages_to_free, freed_pages, pool->order);
 			if (likely(nr_free != FREE_ALL_PAGES))
 				nr_free -= freed_pages;
 
@@ -403,7 +406,7 @@ static int ttm_page_pool_free(struct ttm_page_pool *pool, unsigned nr_free,
 	spin_unlock_irqrestore(&pool->lock, irq_flags);
 
 	if (freed_pages)
-		ttm_pages_put(pages_to_free, freed_pages);
+		ttm_pages_put(pages_to_free, freed_pages, pool->order);
 out:
 	if (pages_to_free != static_buf)
 		kfree(pages_to_free);
-- 
2.7.4

_______________________________________________
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel

^ permalink raw reply related	[flat|nested] 10+ messages in thread

end of thread, other threads:[~2017-11-22 10:54 UTC | newest]

Thread overview: 10+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2017-11-22  9:17 [PATCH 1/5] drm/ttm: add page order in page pool Roger He
     [not found] ` <1511342234-18570-1-git-send-email-Hongbo.He-5C7GfCeVMHo@public.gmane.org>
2017-11-22  9:17   ` [PATCH 2/5] drm/ttm: add set_pages_wb for handling page order more than zero Roger He
2017-11-22  9:17   ` [PATCH 3/5] drm/ttm: add page order support in ttm_pages_put Roger He
     [not found]     ` <1511342234-18570-3-git-send-email-Hongbo.He-5C7GfCeVMHo@public.gmane.org>
2017-11-22 10:00       ` Christian König
2017-11-22 10:54         ` He, Roger
2017-11-22  9:17   ` [PATCH 4/5] drm/ttm: roundup the shrink request to prevent skip huge pool Roger He
2017-11-22  9:36   ` [PATCH 1/5] drm/ttm: add page order in page pool Chunming Zhou
2017-11-22  9:17 ` [PATCH 5/5] drm/ttm: delete set_pages_array_wb since no one use it anymore Roger He
2017-11-22  9:31 ` [PATCH 1/5] drm/ttm: add page order in page pool Christian König
  -- strict thread matches above, loose matches on Subject: below --
2017-11-22  8:06 Roger He
2017-11-22  8:06 ` [PATCH 3/5] drm/ttm: add page order support in ttm_pages_put Roger He

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.