linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH 1/2] mm/vmscan: add sync_shrinkers function
@ 2021-04-15 11:56 Christian König
  2021-04-15 11:56 ` [PATCH 2/2] drm/ttm: optimize the pool shrinker a bit v2 Christian König
  2021-04-15 13:23 ` [PATCH 1/2] mm/vmscan: add sync_shrinkers function Daniel Vetter
  0 siblings, 2 replies; 10+ messages in thread
From: Christian König @ 2021-04-15 11:56 UTC (permalink / raw)
  To: linux-mm, linux-kernel, dri-devel; +Cc: vbabka, daniel, ray.huang, akpm

To be able to switch to a spinlock and reduce lock contention in the TTM
shrinker we don't want to hold a mutex while unmapping and freeing pages
from the pool.

But then we somehow need to prevent a race between (for example) the shrinker
trying to free pages and hotplug trying to remove the device which those pages
belong to.

Taking and releasing the shrinker semaphore on the write side after
unmapping and freeing all pages should make sure that no shrinker is running in
paralell any more.

Signed-off-by: Christian König <christian.koenig@amd.com>
---
 include/linux/shrinker.h |  1 +
 mm/vmscan.c              | 10 ++++++++++
 2 files changed, 11 insertions(+)

diff --git a/include/linux/shrinker.h b/include/linux/shrinker.h
index 0f80123650e2..6b75dc372fce 100644
--- a/include/linux/shrinker.h
+++ b/include/linux/shrinker.h
@@ -92,4 +92,5 @@ extern void register_shrinker_prepared(struct shrinker *shrinker);
 extern int register_shrinker(struct shrinker *shrinker);
 extern void unregister_shrinker(struct shrinker *shrinker);
 extern void free_prealloced_shrinker(struct shrinker *shrinker);
+extern void sync_shrinkers(void);
 #endif
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 562e87cbd7a1..46cd9c215d73 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -408,6 +408,16 @@ void unregister_shrinker(struct shrinker *shrinker)
 }
 EXPORT_SYMBOL(unregister_shrinker);
 
+/**
+ * sync_shrinker - Wait for all running shrinkers to complete.
+ */
+void sync_shrinkers(void)
+{
+	down_write(&shrinker_rwsem);
+	up_write(&shrinker_rwsem);
+}
+EXPORT_SYMBOL(sync_shrinkers);
+
 #define SHRINK_BATCH 128
 
 static unsigned long do_shrink_slab(struct shrink_control *shrinkctl,
-- 
2.25.1


^ permalink raw reply related	[flat|nested] 10+ messages in thread

* [PATCH 2/2] drm/ttm: optimize the pool shrinker a bit v2
  2021-04-15 11:56 [PATCH 1/2] mm/vmscan: add sync_shrinkers function Christian König
@ 2021-04-15 11:56 ` Christian König
  2021-04-15 14:33   ` Huang Rui
  2021-04-15 20:33   ` Andrew Morton
  2021-04-15 13:23 ` [PATCH 1/2] mm/vmscan: add sync_shrinkers function Daniel Vetter
  1 sibling, 2 replies; 10+ messages in thread
From: Christian König @ 2021-04-15 11:56 UTC (permalink / raw)
  To: linux-mm, linux-kernel, dri-devel; +Cc: vbabka, daniel, ray.huang, akpm

Switch back to using a spinlock again by moving the IOMMU unmap outside
of the locked region.

v2: Add a comment explaining why we need sync_shrinkers().

Signed-off-by: Christian König <christian.koenig@amd.com>
---
 drivers/gpu/drm/ttm/ttm_pool.c | 44 +++++++++++++++++-----------------
 1 file changed, 22 insertions(+), 22 deletions(-)

diff --git a/drivers/gpu/drm/ttm/ttm_pool.c b/drivers/gpu/drm/ttm/ttm_pool.c
index cb38b1a17b09..955836d569cc 100644
--- a/drivers/gpu/drm/ttm/ttm_pool.c
+++ b/drivers/gpu/drm/ttm/ttm_pool.c
@@ -70,7 +70,7 @@ static struct ttm_pool_type global_uncached[MAX_ORDER];
 static struct ttm_pool_type global_dma32_write_combined[MAX_ORDER];
 static struct ttm_pool_type global_dma32_uncached[MAX_ORDER];
 
-static struct mutex shrinker_lock;
+static spinlock_t shrinker_lock;
 static struct list_head shrinker_list;
 static struct shrinker mm_shrinker;
 
@@ -263,9 +263,9 @@ static void ttm_pool_type_init(struct ttm_pool_type *pt, struct ttm_pool *pool,
 	spin_lock_init(&pt->lock);
 	INIT_LIST_HEAD(&pt->pages);
 
-	mutex_lock(&shrinker_lock);
+	spin_lock(&shrinker_lock);
 	list_add_tail(&pt->shrinker_list, &shrinker_list);
-	mutex_unlock(&shrinker_lock);
+	spin_unlock(&shrinker_lock);
 }
 
 /* Remove a pool_type from the global shrinker list and free all pages */
@@ -273,9 +273,9 @@ static void ttm_pool_type_fini(struct ttm_pool_type *pt)
 {
 	struct page *p;
 
-	mutex_lock(&shrinker_lock);
+	spin_lock(&shrinker_lock);
 	list_del(&pt->shrinker_list);
-	mutex_unlock(&shrinker_lock);
+	spin_unlock(&shrinker_lock);
 
 	while ((p = ttm_pool_type_take(pt)))
 		ttm_pool_free_page(pt->pool, pt->caching, pt->order, p);
@@ -313,24 +313,19 @@ static struct ttm_pool_type *ttm_pool_select_type(struct ttm_pool *pool,
 static unsigned int ttm_pool_shrink(void)
 {
 	struct ttm_pool_type *pt;
-	unsigned int num_freed;
 	struct page *p;
 
-	mutex_lock(&shrinker_lock);
+	spin_lock(&shrinker_lock);
 	pt = list_first_entry(&shrinker_list, typeof(*pt), shrinker_list);
+	list_move_tail(&pt->shrinker_list, &shrinker_list);
+	spin_unlock(&shrinker_lock);
 
 	p = ttm_pool_type_take(pt);
-	if (p) {
-		ttm_pool_free_page(pt->pool, pt->caching, pt->order, p);
-		num_freed = 1 << pt->order;
-	} else {
-		num_freed = 0;
-	}
-
-	list_move_tail(&pt->shrinker_list, &shrinker_list);
-	mutex_unlock(&shrinker_lock);
+	if (!p)
+		return 0;
 
-	return num_freed;
+	ttm_pool_free_page(pt->pool, pt->caching, pt->order, p);
+	return 1 << pt->order;
 }
 
 /* Return the allocation order based for a page */
@@ -530,6 +525,11 @@ void ttm_pool_fini(struct ttm_pool *pool)
 			for (j = 0; j < MAX_ORDER; ++j)
 				ttm_pool_type_fini(&pool->caching[i].orders[j]);
 	}
+
+	/* We removed the pool types from the LRU, but we need to also make sure
+	 * that no shrinker is concurrently freeing pages from the pool.
+	 */
+	sync_shrinkers();
 }
 
 /* As long as pages are available make sure to release at least one */
@@ -604,7 +604,7 @@ static int ttm_pool_debugfs_globals_show(struct seq_file *m, void *data)
 {
 	ttm_pool_debugfs_header(m);
 
-	mutex_lock(&shrinker_lock);
+	spin_lock(&shrinker_lock);
 	seq_puts(m, "wc\t:");
 	ttm_pool_debugfs_orders(global_write_combined, m);
 	seq_puts(m, "uc\t:");
@@ -613,7 +613,7 @@ static int ttm_pool_debugfs_globals_show(struct seq_file *m, void *data)
 	ttm_pool_debugfs_orders(global_dma32_write_combined, m);
 	seq_puts(m, "uc 32\t:");
 	ttm_pool_debugfs_orders(global_dma32_uncached, m);
-	mutex_unlock(&shrinker_lock);
+	spin_unlock(&shrinker_lock);
 
 	ttm_pool_debugfs_footer(m);
 
@@ -640,7 +640,7 @@ int ttm_pool_debugfs(struct ttm_pool *pool, struct seq_file *m)
 
 	ttm_pool_debugfs_header(m);
 
-	mutex_lock(&shrinker_lock);
+	spin_lock(&shrinker_lock);
 	for (i = 0; i < TTM_NUM_CACHING_TYPES; ++i) {
 		seq_puts(m, "DMA ");
 		switch (i) {
@@ -656,7 +656,7 @@ int ttm_pool_debugfs(struct ttm_pool *pool, struct seq_file *m)
 		}
 		ttm_pool_debugfs_orders(pool->caching[i].orders, m);
 	}
-	mutex_unlock(&shrinker_lock);
+	spin_unlock(&shrinker_lock);
 
 	ttm_pool_debugfs_footer(m);
 	return 0;
@@ -693,7 +693,7 @@ int ttm_pool_mgr_init(unsigned long num_pages)
 	if (!page_pool_size)
 		page_pool_size = num_pages;
 
-	mutex_init(&shrinker_lock);
+	spin_lock_init(&shrinker_lock);
 	INIT_LIST_HEAD(&shrinker_list);
 
 	for (i = 0; i < MAX_ORDER; ++i) {
-- 
2.25.1


^ permalink raw reply related	[flat|nested] 10+ messages in thread

* Re: [PATCH 1/2] mm/vmscan: add sync_shrinkers function
  2021-04-15 11:56 [PATCH 1/2] mm/vmscan: add sync_shrinkers function Christian König
  2021-04-15 11:56 ` [PATCH 2/2] drm/ttm: optimize the pool shrinker a bit v2 Christian König
@ 2021-04-15 13:23 ` Daniel Vetter
  1 sibling, 0 replies; 10+ messages in thread
From: Daniel Vetter @ 2021-04-15 13:23 UTC (permalink / raw)
  To: Christian König
  Cc: linux-mm, linux-kernel, dri-devel, vbabka, daniel, ray.huang, akpm

On Thu, Apr 15, 2021 at 01:56:23PM +0200, Christian König wrote:
> To be able to switch to a spinlock and reduce lock contention in the TTM
> shrinker we don't want to hold a mutex while unmapping and freeing pages
> from the pool.
> 
> But then we somehow need to prevent a race between (for example) the shrinker
> trying to free pages and hotplug trying to remove the device which those pages
> belong to.
> 
> Taking and releasing the shrinker semaphore on the write side after
> unmapping and freeing all pages should make sure that no shrinker is running in
> paralell any more.
> 
> Signed-off-by: Christian König <christian.koenig@amd.com>
> ---
>  include/linux/shrinker.h |  1 +
>  mm/vmscan.c              | 10 ++++++++++
>  2 files changed, 11 insertions(+)
> 
> diff --git a/include/linux/shrinker.h b/include/linux/shrinker.h
> index 0f80123650e2..6b75dc372fce 100644
> --- a/include/linux/shrinker.h
> +++ b/include/linux/shrinker.h
> @@ -92,4 +92,5 @@ extern void register_shrinker_prepared(struct shrinker *shrinker);
>  extern int register_shrinker(struct shrinker *shrinker);
>  extern void unregister_shrinker(struct shrinker *shrinker);
>  extern void free_prealloced_shrinker(struct shrinker *shrinker);
> +extern void sync_shrinkers(void);
>  #endif
> diff --git a/mm/vmscan.c b/mm/vmscan.c
> index 562e87cbd7a1..46cd9c215d73 100644
> --- a/mm/vmscan.c
> +++ b/mm/vmscan.c
> @@ -408,6 +408,16 @@ void unregister_shrinker(struct shrinker *shrinker)
>  }
>  EXPORT_SYMBOL(unregister_shrinker);
>  
> +/**
> + * sync_shrinker - Wait for all running shrinkers to complete.

Maybe make it clear this is a barrier type thing, it wont stop shrinkers
at all, just synchronize with them.

Acked-by: Daniel Vetter <daniel.vetter@ffwll.ch>

But needs an ack from Andrew for merging through drm-misc-next before we
push it there.
-Daniel

> + */
> +void sync_shrinkers(void)
> +{
> +	down_write(&shrinker_rwsem);
> +	up_write(&shrinker_rwsem);
> +}
> +EXPORT_SYMBOL(sync_shrinkers);
> +
>  #define SHRINK_BATCH 128
>  
>  static unsigned long do_shrink_slab(struct shrink_control *shrinkctl,
> -- 
> 2.25.1
> 

-- 
Daniel Vetter
Software Engineer, Intel Corporation
http://blog.ffwll.ch

^ permalink raw reply	[flat|nested] 10+ messages in thread

* Re: [PATCH 2/2] drm/ttm: optimize the pool shrinker a bit v2
  2021-04-15 11:56 ` [PATCH 2/2] drm/ttm: optimize the pool shrinker a bit v2 Christian König
@ 2021-04-15 14:33   ` Huang Rui
  2021-04-15 20:33   ` Andrew Morton
  1 sibling, 0 replies; 10+ messages in thread
From: Huang Rui @ 2021-04-15 14:33 UTC (permalink / raw)
  To: Christian König
  Cc: linux-mm, linux-kernel, dri-devel, vbabka, daniel, akpm

On Thu, Apr 15, 2021 at 07:56:24PM +0800, Christian König wrote:
> Switch back to using a spinlock again by moving the IOMMU unmap outside
> of the locked region.
> 
> v2: Add a comment explaining why we need sync_shrinkers().
> 
> Signed-off-by: Christian König <christian.koenig@amd.com>

Series look good for me as well.

Acked-by: Huang Rui <ray.huang@amd.com>

> ---
>  drivers/gpu/drm/ttm/ttm_pool.c | 44 +++++++++++++++++-----------------
>  1 file changed, 22 insertions(+), 22 deletions(-)
> 
> diff --git a/drivers/gpu/drm/ttm/ttm_pool.c b/drivers/gpu/drm/ttm/ttm_pool.c
> index cb38b1a17b09..955836d569cc 100644
> --- a/drivers/gpu/drm/ttm/ttm_pool.c
> +++ b/drivers/gpu/drm/ttm/ttm_pool.c
> @@ -70,7 +70,7 @@ static struct ttm_pool_type global_uncached[MAX_ORDER];
>  static struct ttm_pool_type global_dma32_write_combined[MAX_ORDER];
>  static struct ttm_pool_type global_dma32_uncached[MAX_ORDER];
>  
> -static struct mutex shrinker_lock;
> +static spinlock_t shrinker_lock;
>  static struct list_head shrinker_list;
>  static struct shrinker mm_shrinker;
>  
> @@ -263,9 +263,9 @@ static void ttm_pool_type_init(struct ttm_pool_type *pt, struct ttm_pool *pool,
>  	spin_lock_init(&pt->lock);
>  	INIT_LIST_HEAD(&pt->pages);
>  
> -	mutex_lock(&shrinker_lock);
> +	spin_lock(&shrinker_lock);
>  	list_add_tail(&pt->shrinker_list, &shrinker_list);
> -	mutex_unlock(&shrinker_lock);
> +	spin_unlock(&shrinker_lock);
>  }
>  
>  /* Remove a pool_type from the global shrinker list and free all pages */
> @@ -273,9 +273,9 @@ static void ttm_pool_type_fini(struct ttm_pool_type *pt)
>  {
>  	struct page *p;
>  
> -	mutex_lock(&shrinker_lock);
> +	spin_lock(&shrinker_lock);
>  	list_del(&pt->shrinker_list);
> -	mutex_unlock(&shrinker_lock);
> +	spin_unlock(&shrinker_lock);
>  
>  	while ((p = ttm_pool_type_take(pt)))
>  		ttm_pool_free_page(pt->pool, pt->caching, pt->order, p);
> @@ -313,24 +313,19 @@ static struct ttm_pool_type *ttm_pool_select_type(struct ttm_pool *pool,
>  static unsigned int ttm_pool_shrink(void)
>  {
>  	struct ttm_pool_type *pt;
> -	unsigned int num_freed;
>  	struct page *p;
>  
> -	mutex_lock(&shrinker_lock);
> +	spin_lock(&shrinker_lock);
>  	pt = list_first_entry(&shrinker_list, typeof(*pt), shrinker_list);
> +	list_move_tail(&pt->shrinker_list, &shrinker_list);
> +	spin_unlock(&shrinker_lock);
>  
>  	p = ttm_pool_type_take(pt);
> -	if (p) {
> -		ttm_pool_free_page(pt->pool, pt->caching, pt->order, p);
> -		num_freed = 1 << pt->order;
> -	} else {
> -		num_freed = 0;
> -	}
> -
> -	list_move_tail(&pt->shrinker_list, &shrinker_list);
> -	mutex_unlock(&shrinker_lock);
> +	if (!p)
> +		return 0;
>  
> -	return num_freed;
> +	ttm_pool_free_page(pt->pool, pt->caching, pt->order, p);
> +	return 1 << pt->order;
>  }
>  
>  /* Return the allocation order based for a page */
> @@ -530,6 +525,11 @@ void ttm_pool_fini(struct ttm_pool *pool)
>  			for (j = 0; j < MAX_ORDER; ++j)
>  				ttm_pool_type_fini(&pool->caching[i].orders[j]);
>  	}
> +
> +	/* We removed the pool types from the LRU, but we need to also make sure
> +	 * that no shrinker is concurrently freeing pages from the pool.
> +	 */
> +	sync_shrinkers();
>  }
>  
>  /* As long as pages are available make sure to release at least one */
> @@ -604,7 +604,7 @@ static int ttm_pool_debugfs_globals_show(struct seq_file *m, void *data)
>  {
>  	ttm_pool_debugfs_header(m);
>  
> -	mutex_lock(&shrinker_lock);
> +	spin_lock(&shrinker_lock);
>  	seq_puts(m, "wc\t:");
>  	ttm_pool_debugfs_orders(global_write_combined, m);
>  	seq_puts(m, "uc\t:");
> @@ -613,7 +613,7 @@ static int ttm_pool_debugfs_globals_show(struct seq_file *m, void *data)
>  	ttm_pool_debugfs_orders(global_dma32_write_combined, m);
>  	seq_puts(m, "uc 32\t:");
>  	ttm_pool_debugfs_orders(global_dma32_uncached, m);
> -	mutex_unlock(&shrinker_lock);
> +	spin_unlock(&shrinker_lock);
>  
>  	ttm_pool_debugfs_footer(m);
>  
> @@ -640,7 +640,7 @@ int ttm_pool_debugfs(struct ttm_pool *pool, struct seq_file *m)
>  
>  	ttm_pool_debugfs_header(m);
>  
> -	mutex_lock(&shrinker_lock);
> +	spin_lock(&shrinker_lock);
>  	for (i = 0; i < TTM_NUM_CACHING_TYPES; ++i) {
>  		seq_puts(m, "DMA ");
>  		switch (i) {
> @@ -656,7 +656,7 @@ int ttm_pool_debugfs(struct ttm_pool *pool, struct seq_file *m)
>  		}
>  		ttm_pool_debugfs_orders(pool->caching[i].orders, m);
>  	}
> -	mutex_unlock(&shrinker_lock);
> +	spin_unlock(&shrinker_lock);
>  
>  	ttm_pool_debugfs_footer(m);
>  	return 0;
> @@ -693,7 +693,7 @@ int ttm_pool_mgr_init(unsigned long num_pages)
>  	if (!page_pool_size)
>  		page_pool_size = num_pages;
>  
> -	mutex_init(&shrinker_lock);
> +	spin_lock_init(&shrinker_lock);
>  	INIT_LIST_HEAD(&shrinker_list);
>  
>  	for (i = 0; i < MAX_ORDER; ++i) {
> -- 
> 2.25.1
> 

^ permalink raw reply	[flat|nested] 10+ messages in thread

* Re: [PATCH 2/2] drm/ttm: optimize the pool shrinker a bit v2
  2021-04-15 11:56 ` [PATCH 2/2] drm/ttm: optimize the pool shrinker a bit v2 Christian König
  2021-04-15 14:33   ` Huang Rui
@ 2021-04-15 20:33   ` Andrew Morton
  2021-04-16  7:08     ` Christian König
  1 sibling, 1 reply; 10+ messages in thread
From: Andrew Morton @ 2021-04-15 20:33 UTC (permalink / raw)
  To:  Christian König 
  Cc: linux-mm, linux-kernel, dri-devel, vbabka, daniel, ray.huang

On Thu, 15 Apr 2021 13:56:24 +0200 "Christian König" <ckoenig.leichtzumerken@gmail.com> wrote:

> @@ -530,6 +525,11 @@ void ttm_pool_fini(struct ttm_pool *pool)
>  			for (j = 0; j < MAX_ORDER; ++j)
>  				ttm_pool_type_fini(&pool->caching[i].orders[j]);
>  	}
> +
> +	/* We removed the pool types from the LRU, but we need to also make sure
> +	 * that no shrinker is concurrently freeing pages from the pool.
> +	 */
> +	sync_shrinkers();

It isn't immediately clear to me how this works.  ttm_pool_fini() has
already freed all the pages hasn't it?  So why would it care if some
shrinkers are still playing with the pages?

Or is it the case that ttm_pool_fini() is assuming that there will be
some further action against these pages, which requires that shrinkers
no longer be accessing the pages and which further assumes that future
shrinker invocations will not be able to look up these pages?

IOW, a bit more explanation about the dynamics here would help!

^ permalink raw reply	[flat|nested] 10+ messages in thread

* Re: [PATCH 2/2] drm/ttm: optimize the pool shrinker a bit v2
  2021-04-15 20:33   ` Andrew Morton
@ 2021-04-16  7:08     ` Christian König
  2021-04-26 11:15       ` Christian König
  0 siblings, 1 reply; 10+ messages in thread
From: Christian König @ 2021-04-16  7:08 UTC (permalink / raw)
  To: Andrew Morton
  Cc: linux-mm, linux-kernel, dri-devel, vbabka, daniel, ray.huang

Am 15.04.21 um 22:33 schrieb Andrew Morton:
> On Thu, 15 Apr 2021 13:56:24 +0200 "Christian König" <ckoenig.leichtzumerken@gmail.com> wrote:
>
>> @@ -530,6 +525,11 @@ void ttm_pool_fini(struct ttm_pool *pool)
>>   			for (j = 0; j < MAX_ORDER; ++j)
>>   				ttm_pool_type_fini(&pool->caching[i].orders[j]);
>>   	}
>> +
>> +	/* We removed the pool types from the LRU, but we need to also make sure
>> +	 * that no shrinker is concurrently freeing pages from the pool.
>> +	 */
>> +	sync_shrinkers();
> It isn't immediately clear to me how this works.  ttm_pool_fini() has
> already freed all the pages hasn't it?  So why would it care if some
> shrinkers are still playing with the pages?

Yes ttm_pool_fini() has freed up all pages which had been in the pool 
when the function was called.

But the problem is it is possible that a parallel running shrinker has 
taken a page from the pool and is in the process of freeing it up.

When I return here the pool structure and especially the device 
structure are freed while the parallel running shrinker is still using them.

I could go for a design where we have one shrinker per device instead, 
but that would put a bit to much pressure on the pool in my opinion.

> Or is it the case that ttm_pool_fini() is assuming that there will be
> some further action against these pages, which requires that shrinkers
> no longer be accessing the pages and which further assumes that future
> shrinker invocations will not be able to look up these pages?
>
> IOW, a bit more explanation about the dynamics here would help!

Sorry, I'm not a native speaker of English and sometimes still have a 
hard time explaining things.

Regards,
Christian.

^ permalink raw reply	[flat|nested] 10+ messages in thread

* Re: [PATCH 2/2] drm/ttm: optimize the pool shrinker a bit v2
  2021-04-16  7:08     ` Christian König
@ 2021-04-26 11:15       ` Christian König
  0 siblings, 0 replies; 10+ messages in thread
From: Christian König @ 2021-04-26 11:15 UTC (permalink / raw)
  To: Andrew Morton
  Cc: linux-mm, linux-kernel, dri-devel, vbabka, daniel, ray.huang

Just a gentle ping?

Are you ok with this explanation Andrew or should I look for a different 
approach?

Thanks,
Christian.

Am 16.04.21 um 09:08 schrieb Christian König:
> Am 15.04.21 um 22:33 schrieb Andrew Morton:
>> On Thu, 15 Apr 2021 13:56:24 +0200 "Christian König" 
>> <ckoenig.leichtzumerken@gmail.com> wrote:
>>
>>> @@ -530,6 +525,11 @@ void ttm_pool_fini(struct ttm_pool *pool)
>>>               for (j = 0; j < MAX_ORDER; ++j)
>>> ttm_pool_type_fini(&pool->caching[i].orders[j]);
>>>       }
>>> +
>>> +    /* We removed the pool types from the LRU, but we need to also 
>>> make sure
>>> +     * that no shrinker is concurrently freeing pages from the pool.
>>> +     */
>>> +    sync_shrinkers();
>> It isn't immediately clear to me how this works. ttm_pool_fini() has
>> already freed all the pages hasn't it?  So why would it care if some
>> shrinkers are still playing with the pages?
>
> Yes ttm_pool_fini() has freed up all pages which had been in the pool 
> when the function was called.
>
> But the problem is it is possible that a parallel running shrinker has 
> taken a page from the pool and is in the process of freeing it up.
>
> When I return here the pool structure and especially the device 
> structure are freed while the parallel running shrinker is still using 
> them.
>
> I could go for a design where we have one shrinker per device instead, 
> but that would put a bit to much pressure on the pool in my opinion.
>
>> Or is it the case that ttm_pool_fini() is assuming that there will be
>> some further action against these pages, which requires that shrinkers
>> no longer be accessing the pages and which further assumes that future
>> shrinker invocations will not be able to look up these pages?
>>
>> IOW, a bit more explanation about the dynamics here would help!
>
> Sorry, I'm not a native speaker of English and sometimes still have a 
> hard time explaining things.
>
> Regards,
> Christian.


^ permalink raw reply	[flat|nested] 10+ messages in thread

* Re: [PATCH 1/2] mm/vmscan: add sync_shrinkers function
  2021-04-09 11:00 ` Vlastimil Babka
@ 2021-04-09 11:04   ` Christian König
  0 siblings, 0 replies; 10+ messages in thread
From: Christian König @ 2021-04-09 11:04 UTC (permalink / raw)
  To: Vlastimil Babka, dri-devel, linux-kernel, linux-mm
  Cc: ray.huang, daniel, akpm

Am 09.04.21 um 13:00 schrieb Vlastimil Babka:
> On 4/9/21 9:17 AM, Christian König wrote:
>> To be able to switch to a spinlock and reduce lock contention in the TTM
>> shrinker we don't want to hold a mutex while unmapping and freeing pages
>> from the pool.
> Does using spinlock instead of mutex really reduce lock contention?

Well using the spinlock instead of the mutex is only the cherry on the cake.

The real improvement for the contention is the fact that we just grab 
the next pool and drop the lock again instead of doing the whole IOMMU 
unmap and flushing of the CPU TLB dance while holding the lock.

>> But then we somehow need to prevent a race between (for example) the shrinker
>> trying to free pages and hotplug trying to remove the device which those pages
>> belong to.
>>
>> Taking and releasing the shrinker semaphore on the write side after
>> unmapping and freeing all pages should make sure that no shrinker is running in
>> paralell any more.
> So you explain this in this commit log for adding the function, but then the
> next patch just adds a sync_shrinkers() call without any comment. I would expect
> there a comment explaining why it's done there - what it protects against, as
> it's not an obvious pattern IMHO.

Good point, going to add a comment.

Thanks,
Christian.

>
>> Signed-off-by: Christian König <christian.koenig@amd.com>
>> ---
>>   include/linux/shrinker.h |  1 +
>>   mm/vmscan.c              | 10 ++++++++++
>>   2 files changed, 11 insertions(+)
>>
>> diff --git a/include/linux/shrinker.h b/include/linux/shrinker.h
>> index 0f80123650e2..6b75dc372fce 100644
>> --- a/include/linux/shrinker.h
>> +++ b/include/linux/shrinker.h
>> @@ -92,4 +92,5 @@ extern void register_shrinker_prepared(struct shrinker *shrinker);
>>   extern int register_shrinker(struct shrinker *shrinker);
>>   extern void unregister_shrinker(struct shrinker *shrinker);
>>   extern void free_prealloced_shrinker(struct shrinker *shrinker);
>> +extern void sync_shrinkers(void);
>>   #endif
>> diff --git a/mm/vmscan.c b/mm/vmscan.c
>> index 562e87cbd7a1..46cd9c215d73 100644
>> --- a/mm/vmscan.c
>> +++ b/mm/vmscan.c
>> @@ -408,6 +408,16 @@ void unregister_shrinker(struct shrinker *shrinker)
>>   }
>>   EXPORT_SYMBOL(unregister_shrinker);
>>   
>> +/**
>> + * sync_shrinker - Wait for all running shrinkers to complete.
>> + */
>> +void sync_shrinkers(void)
>> +{
>> +	down_write(&shrinker_rwsem);
>> +	up_write(&shrinker_rwsem);
>> +}
>> +EXPORT_SYMBOL(sync_shrinkers);
>> +
>>   #define SHRINK_BATCH 128
>>   
>>   static unsigned long do_shrink_slab(struct shrink_control *shrinkctl,
>>


^ permalink raw reply	[flat|nested] 10+ messages in thread

* Re: [PATCH 1/2] mm/vmscan: add sync_shrinkers function
  2021-04-09  7:17 Christian König
@ 2021-04-09 11:00 ` Vlastimil Babka
  2021-04-09 11:04   ` Christian König
  0 siblings, 1 reply; 10+ messages in thread
From: Vlastimil Babka @ 2021-04-09 11:00 UTC (permalink / raw)
  To: Christian König, dri-devel, linux-kernel, linux-mm
  Cc: ray.huang, daniel, akpm

On 4/9/21 9:17 AM, Christian König wrote:
> To be able to switch to a spinlock and reduce lock contention in the TTM
> shrinker we don't want to hold a mutex while unmapping and freeing pages
> from the pool.

Does using spinlock instead of mutex really reduce lock contention?

> But then we somehow need to prevent a race between (for example) the shrinker
> trying to free pages and hotplug trying to remove the device which those pages
> belong to.
> 
> Taking and releasing the shrinker semaphore on the write side after
> unmapping and freeing all pages should make sure that no shrinker is running in
> paralell any more.

So you explain this in this commit log for adding the function, but then the
next patch just adds a sync_shrinkers() call without any comment. I would expect
there a comment explaining why it's done there - what it protects against, as
it's not an obvious pattern IMHO.

> Signed-off-by: Christian König <christian.koenig@amd.com>
> ---
>  include/linux/shrinker.h |  1 +
>  mm/vmscan.c              | 10 ++++++++++
>  2 files changed, 11 insertions(+)
> 
> diff --git a/include/linux/shrinker.h b/include/linux/shrinker.h
> index 0f80123650e2..6b75dc372fce 100644
> --- a/include/linux/shrinker.h
> +++ b/include/linux/shrinker.h
> @@ -92,4 +92,5 @@ extern void register_shrinker_prepared(struct shrinker *shrinker);
>  extern int register_shrinker(struct shrinker *shrinker);
>  extern void unregister_shrinker(struct shrinker *shrinker);
>  extern void free_prealloced_shrinker(struct shrinker *shrinker);
> +extern void sync_shrinkers(void);
>  #endif
> diff --git a/mm/vmscan.c b/mm/vmscan.c
> index 562e87cbd7a1..46cd9c215d73 100644
> --- a/mm/vmscan.c
> +++ b/mm/vmscan.c
> @@ -408,6 +408,16 @@ void unregister_shrinker(struct shrinker *shrinker)
>  }
>  EXPORT_SYMBOL(unregister_shrinker);
>  
> +/**
> + * sync_shrinker - Wait for all running shrinkers to complete.
> + */
> +void sync_shrinkers(void)
> +{
> +	down_write(&shrinker_rwsem);
> +	up_write(&shrinker_rwsem);
> +}
> +EXPORT_SYMBOL(sync_shrinkers);
> +
>  #define SHRINK_BATCH 128
>  
>  static unsigned long do_shrink_slab(struct shrink_control *shrinkctl,
> 


^ permalink raw reply	[flat|nested] 10+ messages in thread

* [PATCH 1/2] mm/vmscan: add sync_shrinkers function
@ 2021-04-09  7:17 Christian König
  2021-04-09 11:00 ` Vlastimil Babka
  0 siblings, 1 reply; 10+ messages in thread
From: Christian König @ 2021-04-09  7:17 UTC (permalink / raw)
  To: dri-devel, linux-kernel, linux-mm; +Cc: ray.huang, daniel, akpm

To be able to switch to a spinlock and reduce lock contention in the TTM
shrinker we don't want to hold a mutex while unmapping and freeing pages
from the pool.

But then we somehow need to prevent a race between (for example) the shrinker
trying to free pages and hotplug trying to remove the device which those pages
belong to.

Taking and releasing the shrinker semaphore on the write side after
unmapping and freeing all pages should make sure that no shrinker is running in
paralell any more.

Signed-off-by: Christian König <christian.koenig@amd.com>
---
 include/linux/shrinker.h |  1 +
 mm/vmscan.c              | 10 ++++++++++
 2 files changed, 11 insertions(+)

diff --git a/include/linux/shrinker.h b/include/linux/shrinker.h
index 0f80123650e2..6b75dc372fce 100644
--- a/include/linux/shrinker.h
+++ b/include/linux/shrinker.h
@@ -92,4 +92,5 @@ extern void register_shrinker_prepared(struct shrinker *shrinker);
 extern int register_shrinker(struct shrinker *shrinker);
 extern void unregister_shrinker(struct shrinker *shrinker);
 extern void free_prealloced_shrinker(struct shrinker *shrinker);
+extern void sync_shrinkers(void);
 #endif
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 562e87cbd7a1..46cd9c215d73 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -408,6 +408,16 @@ void unregister_shrinker(struct shrinker *shrinker)
 }
 EXPORT_SYMBOL(unregister_shrinker);
 
+/**
+ * sync_shrinker - Wait for all running shrinkers to complete.
+ */
+void sync_shrinkers(void)
+{
+	down_write(&shrinker_rwsem);
+	up_write(&shrinker_rwsem);
+}
+EXPORT_SYMBOL(sync_shrinkers);
+
 #define SHRINK_BATCH 128
 
 static unsigned long do_shrink_slab(struct shrink_control *shrinkctl,
-- 
2.25.1


^ permalink raw reply related	[flat|nested] 10+ messages in thread

end of thread, other threads:[~2021-04-26 11:15 UTC | newest]

Thread overview: 10+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2021-04-15 11:56 [PATCH 1/2] mm/vmscan: add sync_shrinkers function Christian König
2021-04-15 11:56 ` [PATCH 2/2] drm/ttm: optimize the pool shrinker a bit v2 Christian König
2021-04-15 14:33   ` Huang Rui
2021-04-15 20:33   ` Andrew Morton
2021-04-16  7:08     ` Christian König
2021-04-26 11:15       ` Christian König
2021-04-15 13:23 ` [PATCH 1/2] mm/vmscan: add sync_shrinkers function Daniel Vetter
  -- strict thread matches above, loose matches on Subject: below --
2021-04-09  7:17 Christian König
2021-04-09 11:00 ` Vlastimil Babka
2021-04-09 11:04   ` Christian König

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).