All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH 1/5] drm/ttm: add page order in page pool
@ 2017-11-22  9:17 Roger He
       [not found] ` <1511342234-18570-1-git-send-email-Hongbo.He-5C7GfCeVMHo@public.gmane.org>
                   ` (2 more replies)
  0 siblings, 3 replies; 9+ messages in thread
From: Roger He @ 2017-11-22  9:17 UTC (permalink / raw)
  To: amd-gfx, dri-devel; +Cc: Roger He

to indicate page order for each element in the pool

Change-Id: Ic609925ca5d2a5d4ad49d6becf505388ce3624cf
Signed-off-by: Roger He <Hongbo.He@amd.com>
---
 drivers/gpu/drm/ttm/ttm_page_alloc.c | 38 +++++++++++++++++++++++++-----------
 1 file changed, 27 insertions(+), 11 deletions(-)

diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
index 7385785..a02bd65 100644
--- a/drivers/gpu/drm/ttm/ttm_page_alloc.c
+++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
@@ -84,6 +84,7 @@ struct ttm_page_pool {
 	char			*name;
 	unsigned long		nfrees;
 	unsigned long		nrefills;
+	unsigned int		order;
 };
 
 /**
@@ -415,6 +416,7 @@ ttm_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
 	struct ttm_page_pool *pool;
 	int shrink_pages = sc->nr_to_scan;
 	unsigned long freed = 0;
+	unsigned int nr_free_pool;
 
 	if (!mutex_trylock(&lock))
 		return SHRINK_STOP;
@@ -424,10 +426,15 @@ ttm_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
 		unsigned nr_free = shrink_pages;
 		if (shrink_pages == 0)
 			break;
+
 		pool = &_manager->pools[(i + pool_offset)%NUM_POOLS];
 		/* OK to use static buffer since global mutex is held. */
-		shrink_pages = ttm_page_pool_free(pool, nr_free, true);
-		freed += nr_free - shrink_pages;
+		nr_free_pool = (nr_free >> pool->order);
+		if (nr_free_pool == 0)
+			continue;
+
+		shrink_pages = ttm_page_pool_free(pool, nr_free_pool, true);
+		freed += ((nr_free_pool - shrink_pages) << pool->order);
 	}
 	mutex_unlock(&lock);
 	return freed;
@@ -439,9 +446,12 @@ ttm_pool_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
 {
 	unsigned i;
 	unsigned long count = 0;
+	struct ttm_page_pool *pool;
 
-	for (i = 0; i < NUM_POOLS; ++i)
-		count += _manager->pools[i].npages;
+	for (i = 0; i < NUM_POOLS; ++i) {
+		pool = &_manager->pools[i];
+		count += (pool->npages << pool->order);
+	}
 
 	return count;
 }
@@ -935,7 +945,7 @@ static int ttm_get_pages(struct page **pages, unsigned npages, int flags,
 }
 
 static void ttm_page_pool_init_locked(struct ttm_page_pool *pool, gfp_t flags,
-		char *name)
+		char *name, unsigned int order)
 {
 	spin_lock_init(&pool->lock);
 	pool->fill_lock = false;
@@ -943,11 +953,17 @@ static void ttm_page_pool_init_locked(struct ttm_page_pool *pool, gfp_t flags,
 	pool->npages = pool->nfrees = 0;
 	pool->gfp_flags = flags;
 	pool->name = name;
+	pool->order = order;
 }
 
 int ttm_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages)
 {
 	int ret;
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+	unsigned order = HPAGE_PMD_ORDER;
+#else
+	unsigned order = 0;
+#endif
 
 	WARN_ON(_manager);
 
@@ -955,23 +971,23 @@ int ttm_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages)
 
 	_manager = kzalloc(sizeof(*_manager), GFP_KERNEL);
 
-	ttm_page_pool_init_locked(&_manager->wc_pool, GFP_HIGHUSER, "wc");
+	ttm_page_pool_init_locked(&_manager->wc_pool, GFP_HIGHUSER, "wc", 0);
 
-	ttm_page_pool_init_locked(&_manager->uc_pool, GFP_HIGHUSER, "uc");
+	ttm_page_pool_init_locked(&_manager->uc_pool, GFP_HIGHUSER, "uc", 0);
 
 	ttm_page_pool_init_locked(&_manager->wc_pool_dma32,
-				  GFP_USER | GFP_DMA32, "wc dma");
+				  GFP_USER | GFP_DMA32, "wc dma", 0);
 
 	ttm_page_pool_init_locked(&_manager->uc_pool_dma32,
-				  GFP_USER | GFP_DMA32, "uc dma");
+				  GFP_USER | GFP_DMA32, "uc dma", 0);
 
 	ttm_page_pool_init_locked(&_manager->wc_pool_huge,
 				  GFP_TRANSHUGE	& ~(__GFP_MOVABLE | __GFP_COMP),
-				  "wc huge");
+				  "wc huge", order);
 
 	ttm_page_pool_init_locked(&_manager->uc_pool_huge,
 				  GFP_TRANSHUGE	& ~(__GFP_MOVABLE | __GFP_COMP)
-				  , "uc huge");
+				  , "uc huge", order);
 
 	_manager->options.max_size = max_pages;
 	_manager->options.small = SMALL_ALLOCATION;
-- 
2.7.4

_______________________________________________
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel

^ permalink raw reply related	[flat|nested] 9+ messages in thread

end of thread, other threads:[~2017-11-22 10:54 UTC | newest]

Thread overview: 9+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2017-11-22  9:17 [PATCH 1/5] drm/ttm: add page order in page pool Roger He
     [not found] ` <1511342234-18570-1-git-send-email-Hongbo.He-5C7GfCeVMHo@public.gmane.org>
2017-11-22  9:17   ` [PATCH 2/5] drm/ttm: add set_pages_wb for handling page order more than zero Roger He
2017-11-22  9:17   ` [PATCH 3/5] drm/ttm: add page order support in ttm_pages_put Roger He
     [not found]     ` <1511342234-18570-3-git-send-email-Hongbo.He-5C7GfCeVMHo@public.gmane.org>
2017-11-22 10:00       ` Christian König
2017-11-22 10:54         ` He, Roger
2017-11-22  9:17   ` [PATCH 4/5] drm/ttm: roundup the shrink request to prevent skip huge pool Roger He
2017-11-22  9:36   ` [PATCH 1/5] drm/ttm: add page order in page pool Chunming Zhou
2017-11-22  9:17 ` [PATCH 5/5] drm/ttm: delete set_pages_array_wb since no one use it anymore Roger He
2017-11-22  9:31 ` [PATCH 1/5] drm/ttm: add page order in page pool Christian König

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.