All of lore.kernel.org
 help / color / mirror / Atom feed
* drm/ttm: moving the LRU into the resource
@ 2021-11-24 12:44 Christian König
  2021-11-24 12:44 ` [PATCH 01/12] drm/ttm: add ttm_resource_fini Christian König
                   ` (12 more replies)
  0 siblings, 13 replies; 24+ messages in thread
From: Christian König @ 2021-11-24 12:44 UTC (permalink / raw)
  To: daniel, thomas.hellstrom, ray.huang, dri-devel

Hi guys,

I've already send out this patch set a couple of times.

This fixes the fundamental problem in TTM that during a move a buffer
has resources allocated from two different domains at the same time.

Additional to that it's a prerequisite to remove ghost objects and
allow to allocate multiple resources per BO.

I should have fixed all previous review comments as far as I can see,
but probably better to take another look.

Regards,
Christian.



^ permalink raw reply	[flat|nested] 24+ messages in thread

* [PATCH 01/12] drm/ttm: add ttm_resource_fini
  2021-11-24 12:44 drm/ttm: moving the LRU into the resource Christian König
@ 2021-11-24 12:44 ` Christian König
  2021-11-26  6:48   ` Huang Rui
  2021-11-24 12:44 ` [PATCH 02/12] drm/ttm: add back a reference to the bdev to the res manager Christian König
                   ` (11 subsequent siblings)
  12 siblings, 1 reply; 24+ messages in thread
From: Christian König @ 2021-11-24 12:44 UTC (permalink / raw)
  To: daniel, thomas.hellstrom, ray.huang, dri-devel

Make sure we call the common cleanup function in all
implementations of the resource manager.

Signed-off-by: Christian König <christian.koenig@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c     | 2 ++
 drivers/gpu/drm/amd/amdgpu/amdgpu_preempt_mgr.c | 1 +
 drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c    | 2 ++
 drivers/gpu/drm/i915/i915_ttm_buddy_manager.c   | 1 +
 drivers/gpu/drm/nouveau/nouveau_mem.c           | 3 ++-
 drivers/gpu/drm/nouveau/nouveau_mem.h           | 3 ++-
 drivers/gpu/drm/nouveau/nouveau_ttm.c           | 9 +++++----
 drivers/gpu/drm/ttm/ttm_range_manager.c         | 2 ++
 drivers/gpu/drm/ttm/ttm_resource.c              | 6 ++++++
 drivers/gpu/drm/ttm/ttm_sys_manager.c           | 1 +
 drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c   | 2 ++
 drivers/gpu/drm/vmwgfx/vmwgfx_thp.c             | 3 ++-
 include/drm/ttm/ttm_resource.h                  | 3 +++
 13 files changed, 31 insertions(+), 7 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
index 675a72ef305d..ea5470c8c921 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
@@ -169,6 +169,7 @@ static int amdgpu_gtt_mgr_new(struct ttm_resource_manager *man,
 	return 0;
 
 err_free:
+	ttm_resource_fini(man, &node->base.base);
 	kfree(node);
 
 err_out:
@@ -200,6 +201,7 @@ static void amdgpu_gtt_mgr_del(struct ttm_resource_manager *man,
 	if (!(res->placement & TTM_PL_FLAG_TEMPORARY))
 		atomic64_sub(res->num_pages, &mgr->used);
 
+	ttm_resource_fini(man, res);
 	kfree(node);
 }
 
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_preempt_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_preempt_mgr.c
index d02c8637f909..ffddec08e931 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_preempt_mgr.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_preempt_mgr.c
@@ -95,6 +95,7 @@ static void amdgpu_preempt_mgr_del(struct ttm_resource_manager *man,
 	struct amdgpu_preempt_mgr *mgr = to_preempt_mgr(man);
 
 	atomic64_sub(res->num_pages, &mgr->used);
+	ttm_resource_fini(man, res);
 	kfree(res);
 }
 
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
index 7b2b0980ec41..55d68408951d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
@@ -476,6 +476,7 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man,
 	while (i--)
 		drm_mm_remove_node(&node->mm_nodes[i]);
 	spin_unlock(&mgr->lock);
+	ttm_resource_fini(man, &node->base);
 	kvfree(node);
 
 error_sub:
@@ -515,6 +516,7 @@ static void amdgpu_vram_mgr_del(struct ttm_resource_manager *man,
 	atomic64_sub(usage, &mgr->usage);
 	atomic64_sub(vis_usage, &mgr->vis_usage);
 
+	ttm_resource_fini(man, res);
 	kvfree(node);
 }
 
diff --git a/drivers/gpu/drm/i915/i915_ttm_buddy_manager.c b/drivers/gpu/drm/i915/i915_ttm_buddy_manager.c
index d59fbb019032..ca3ca1f7f850 100644
--- a/drivers/gpu/drm/i915/i915_ttm_buddy_manager.c
+++ b/drivers/gpu/drm/i915/i915_ttm_buddy_manager.c
@@ -123,6 +123,7 @@ static void i915_ttm_buddy_man_free(struct ttm_resource_manager *man,
 	i915_buddy_free_list(&bman->mm, &bman_res->blocks);
 	mutex_unlock(&bman->lock);
 
+	ttm_resource_fini(man, res);
 	kfree(bman_res);
 }
 
diff --git a/drivers/gpu/drm/nouveau/nouveau_mem.c b/drivers/gpu/drm/nouveau/nouveau_mem.c
index 2ca3207c13fc..2e517cdc24c9 100644
--- a/drivers/gpu/drm/nouveau/nouveau_mem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_mem.c
@@ -162,11 +162,12 @@ nouveau_mem_vram(struct ttm_resource *reg, bool contig, u8 page)
 }
 
 void
-nouveau_mem_del(struct ttm_resource *reg)
+nouveau_mem_del(struct ttm_resource_manager *man, struct ttm_resource *reg)
 {
 	struct nouveau_mem *mem = nouveau_mem(reg);
 
 	nouveau_mem_fini(mem);
+	ttm_resource_fini(man, reg);
 	kfree(mem);
 }
 
diff --git a/drivers/gpu/drm/nouveau/nouveau_mem.h b/drivers/gpu/drm/nouveau/nouveau_mem.h
index 2c01166a90f2..325551eba5cd 100644
--- a/drivers/gpu/drm/nouveau/nouveau_mem.h
+++ b/drivers/gpu/drm/nouveau/nouveau_mem.h
@@ -23,7 +23,8 @@ nouveau_mem(struct ttm_resource *reg)
 
 int nouveau_mem_new(struct nouveau_cli *, u8 kind, u8 comp,
 		    struct ttm_resource **);
-void nouveau_mem_del(struct ttm_resource *);
+void nouveau_mem_del(struct ttm_resource_manager *man,
+		     struct ttm_resource *);
 int nouveau_mem_vram(struct ttm_resource *, bool contig, u8 page);
 int nouveau_mem_host(struct ttm_resource *, struct ttm_tt *);
 void nouveau_mem_fini(struct nouveau_mem *);
diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.c b/drivers/gpu/drm/nouveau/nouveau_ttm.c
index 2ca9d9a9e5d5..91ef33f8f22c 100644
--- a/drivers/gpu/drm/nouveau/nouveau_ttm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_ttm.c
@@ -36,9 +36,10 @@
 #include <core/tegra.h>
 
 static void
-nouveau_manager_del(struct ttm_resource_manager *man, struct ttm_resource *reg)
+nouveau_manager_del(struct ttm_resource_manager *man,
+		    struct ttm_resource *reg)
 {
-	nouveau_mem_del(reg);
+	nouveau_mem_del(man, reg);
 }
 
 static int
@@ -62,7 +63,7 @@ nouveau_vram_manager_new(struct ttm_resource_manager *man,
 
 	ret = nouveau_mem_vram(*res, nvbo->contig, nvbo->page);
 	if (ret) {
-		nouveau_mem_del(*res);
+		nouveau_mem_del(man, *res);
 		return ret;
 	}
 
@@ -118,7 +119,7 @@ nv04_gart_manager_new(struct ttm_resource_manager *man,
 	ret = nvif_vmm_get(&mem->cli->vmm.vmm, PTES, false, 12, 0,
 			   (long)(*res)->num_pages << PAGE_SHIFT, &mem->vma[0]);
 	if (ret) {
-		nouveau_mem_del(*res);
+		nouveau_mem_del(man, *res);
 		return ret;
 	}
 
diff --git a/drivers/gpu/drm/ttm/ttm_range_manager.c b/drivers/gpu/drm/ttm/ttm_range_manager.c
index 67d68a4a8640..25fcf0d63c2d 100644
--- a/drivers/gpu/drm/ttm/ttm_range_manager.c
+++ b/drivers/gpu/drm/ttm/ttm_range_manager.c
@@ -89,6 +89,7 @@ static int ttm_range_man_alloc(struct ttm_resource_manager *man,
 	spin_unlock(&rman->lock);
 
 	if (unlikely(ret)) {
+		ttm_resource_fini(man, *res);
 		kfree(node);
 		return ret;
 	}
@@ -108,6 +109,7 @@ static void ttm_range_man_free(struct ttm_resource_manager *man,
 	drm_mm_remove_node(&node->mm_nodes[0]);
 	spin_unlock(&rman->lock);
 
+	ttm_resource_fini(man, res);
 	kfree(node);
 }
 
diff --git a/drivers/gpu/drm/ttm/ttm_resource.c b/drivers/gpu/drm/ttm/ttm_resource.c
index 035d71332d18..89bcfe22a0ca 100644
--- a/drivers/gpu/drm/ttm/ttm_resource.c
+++ b/drivers/gpu/drm/ttm/ttm_resource.c
@@ -44,6 +44,12 @@ void ttm_resource_init(struct ttm_buffer_object *bo,
 }
 EXPORT_SYMBOL(ttm_resource_init);
 
+void ttm_resource_fini(struct ttm_resource_manager *man,
+		       struct ttm_resource *res)
+{
+}
+EXPORT_SYMBOL(ttm_resource_fini);
+
 int ttm_resource_alloc(struct ttm_buffer_object *bo,
 		       const struct ttm_place *place,
 		       struct ttm_resource **res_ptr)
diff --git a/drivers/gpu/drm/ttm/ttm_sys_manager.c b/drivers/gpu/drm/ttm/ttm_sys_manager.c
index 63aca52f75e1..135394dcca95 100644
--- a/drivers/gpu/drm/ttm/ttm_sys_manager.c
+++ b/drivers/gpu/drm/ttm/ttm_sys_manager.c
@@ -23,6 +23,7 @@ static int ttm_sys_man_alloc(struct ttm_resource_manager *man,
 static void ttm_sys_man_free(struct ttm_resource_manager *man,
 			     struct ttm_resource *res)
 {
+	ttm_resource_fini(man, res);
 	kfree(res);
 }
 
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
index b2c4af331c9d..bfd686bb8d19 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
@@ -116,6 +116,7 @@ static int vmw_gmrid_man_get_node(struct ttm_resource_manager *man,
 	gman->used_gmr_pages -= (*res)->num_pages;
 	spin_unlock(&gman->lock);
 	ida_free(&gman->gmr_ida, id);
+	ttm_resource_fini(man, *res);
 	kfree(*res);
 	return -ENOSPC;
 }
@@ -129,6 +130,7 @@ static void vmw_gmrid_man_put_node(struct ttm_resource_manager *man,
 	spin_lock(&gman->lock);
 	gman->used_gmr_pages -= res->num_pages;
 	spin_unlock(&gman->lock);
+	ttm_resource_fini(man, res);
 	kfree(res);
 }
 
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_thp.c b/drivers/gpu/drm/vmwgfx/vmwgfx_thp.c
index 2a3d3468e4e0..4fcbd94ccc11 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_thp.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_thp.c
@@ -104,6 +104,7 @@ static int vmw_thp_get_node(struct ttm_resource_manager *man,
 	spin_unlock(&rman->lock);
 
 	if (unlikely(ret)) {
+		ttm_resource_fini(man, &node->base);
 		kfree(node);
 	} else {
 		node->base.start = node->mm_nodes[0].start;
@@ -122,7 +123,7 @@ static void vmw_thp_put_node(struct ttm_resource_manager *man,
 	spin_lock(&rman->lock);
 	drm_mm_remove_node(&node->mm_nodes[0]);
 	spin_unlock(&rman->lock);
-
+	ttm_resource_fini(man, res);
 	kfree(node);
 }
 
diff --git a/include/drm/ttm/ttm_resource.h b/include/drm/ttm/ttm_resource.h
index 5952051091cd..df1f06b7b504 100644
--- a/include/drm/ttm/ttm_resource.h
+++ b/include/drm/ttm/ttm_resource.h
@@ -261,6 +261,9 @@ ttm_resource_manager_cleanup(struct ttm_resource_manager *man)
 void ttm_resource_init(struct ttm_buffer_object *bo,
                        const struct ttm_place *place,
                        struct ttm_resource *res);
+void ttm_resource_fini(struct ttm_resource_manager *man,
+		       struct ttm_resource *res);
+
 int ttm_resource_alloc(struct ttm_buffer_object *bo,
 		       const struct ttm_place *place,
 		       struct ttm_resource **res);
-- 
2.25.1


^ permalink raw reply related	[flat|nested] 24+ messages in thread

* [PATCH 02/12] drm/ttm: add back a reference to the bdev to the res manager
  2021-11-24 12:44 drm/ttm: moving the LRU into the resource Christian König
  2021-11-24 12:44 ` [PATCH 01/12] drm/ttm: add ttm_resource_fini Christian König
@ 2021-11-24 12:44 ` Christian König
  2021-11-26  6:52   ` Huang Rui
  2021-11-24 12:44 ` [PATCH 03/12] drm/ttm: add a weak BO reference to the resource v3 Christian König
                   ` (10 subsequent siblings)
  12 siblings, 1 reply; 24+ messages in thread
From: Christian König @ 2021-11-24 12:44 UTC (permalink / raw)
  To: daniel, thomas.hellstrom, ray.huang, dri-devel

It is simply a lot cleaner to have this around instead of adding
the device throughout the call chain.

Signed-off-by: Christian König <christian.koenig@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c     |  3 ++-
 drivers/gpu/drm/amd/amdgpu/amdgpu_preempt_mgr.c |  2 +-
 drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c    |  3 ++-
 drivers/gpu/drm/i915/i915_ttm_buddy_manager.c   |  2 +-
 drivers/gpu/drm/nouveau/nouveau_ttm.c           |  4 ++--
 drivers/gpu/drm/ttm/ttm_range_manager.c         |  2 +-
 drivers/gpu/drm/ttm/ttm_resource.c              |  3 +++
 drivers/gpu/drm/ttm/ttm_sys_manager.c           |  2 +-
 drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c   |  2 +-
 drivers/gpu/drm/vmwgfx/vmwgfx_thp.c             |  2 +-
 include/drm/ttm/ttm_resource.h                  | 16 +++++++++-------
 11 files changed, 24 insertions(+), 17 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
index ea5470c8c921..9e7685a4878c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
@@ -293,7 +293,8 @@ int amdgpu_gtt_mgr_init(struct amdgpu_device *adev, uint64_t gtt_size)
 	man->use_tt = true;
 	man->func = &amdgpu_gtt_mgr_func;
 
-	ttm_resource_manager_init(man, gtt_size >> PAGE_SHIFT);
+	ttm_resource_manager_init(man, &adev->mman.bdev,
+				  gtt_size >> PAGE_SHIFT);
 
 	start = AMDGPU_GTT_MAX_TRANSFER_SIZE * AMDGPU_GTT_NUM_TRANSFER_WINDOWS;
 	size = (adev->gmc.gart_size >> PAGE_SHIFT) - start;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_preempt_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_preempt_mgr.c
index ffddec08e931..6f7189d32f0a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_preempt_mgr.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_preempt_mgr.c
@@ -153,7 +153,7 @@ int amdgpu_preempt_mgr_init(struct amdgpu_device *adev)
 	man->use_tt = true;
 	man->func = &amdgpu_preempt_mgr_func;
 
-	ttm_resource_manager_init(man, (1 << 30));
+	ttm_resource_manager_init(man, &adev->mman.bdev, (1 << 30));
 
 	atomic64_set(&mgr->used, 0);
 
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
index 55d68408951d..ddd0b6d74070 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
@@ -699,7 +699,8 @@ int amdgpu_vram_mgr_init(struct amdgpu_device *adev)
 	struct amdgpu_vram_mgr *mgr = &adev->mman.vram_mgr;
 	struct ttm_resource_manager *man = &mgr->manager;
 
-	ttm_resource_manager_init(man, adev->gmc.real_vram_size >> PAGE_SHIFT);
+	ttm_resource_manager_init(man, &adev->mman.bdev,
+				  adev->gmc.real_vram_size >> PAGE_SHIFT);
 
 	man->func = &amdgpu_vram_mgr_func;
 
diff --git a/drivers/gpu/drm/i915/i915_ttm_buddy_manager.c b/drivers/gpu/drm/i915/i915_ttm_buddy_manager.c
index ca3ca1f7f850..ef535e04a88a 100644
--- a/drivers/gpu/drm/i915/i915_ttm_buddy_manager.c
+++ b/drivers/gpu/drm/i915/i915_ttm_buddy_manager.c
@@ -203,7 +203,7 @@ int i915_ttm_buddy_man_init(struct ttm_device *bdev,
 	man = &bman->manager;
 	man->use_tt = use_tt;
 	man->func = &i915_ttm_buddy_manager_func;
-	ttm_resource_manager_init(man, bman->mm.size >> PAGE_SHIFT);
+	ttm_resource_manager_init(man, bdev, bman->mm.size >> PAGE_SHIFT);
 
 	ttm_resource_manager_set_used(man, true);
 	ttm_set_driver_manager(bdev, type, man);
diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.c b/drivers/gpu/drm/nouveau/nouveau_ttm.c
index 91ef33f8f22c..85f1f5a0fe5d 100644
--- a/drivers/gpu/drm/nouveau/nouveau_ttm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_ttm.c
@@ -164,7 +164,7 @@ nouveau_ttm_init_vram(struct nouveau_drm *drm)
 
 		man->func = &nouveau_vram_manager;
 
-		ttm_resource_manager_init(man,
+		ttm_resource_manager_init(man, &drm->ttm.bdev,
 					  drm->gem.vram_available >> PAGE_SHIFT);
 		ttm_set_driver_manager(&drm->ttm.bdev, TTM_PL_VRAM, man);
 		ttm_resource_manager_set_used(man, true);
@@ -211,7 +211,7 @@ nouveau_ttm_init_gtt(struct nouveau_drm *drm)
 
 	man->func = func;
 	man->use_tt = true;
-	ttm_resource_manager_init(man, size_pages);
+	ttm_resource_manager_init(man, &drm->ttm.bdev, size_pages);
 	ttm_set_driver_manager(&drm->ttm.bdev, TTM_PL_TT, man);
 	ttm_resource_manager_set_used(man, true);
 	return 0;
diff --git a/drivers/gpu/drm/ttm/ttm_range_manager.c b/drivers/gpu/drm/ttm/ttm_range_manager.c
index 25fcf0d63c2d..062dabe6a10e 100644
--- a/drivers/gpu/drm/ttm/ttm_range_manager.c
+++ b/drivers/gpu/drm/ttm/ttm_range_manager.c
@@ -156,7 +156,7 @@ int ttm_range_man_init_nocheck(struct ttm_device *bdev,
 
 	man->func = &ttm_range_manager_func;
 
-	ttm_resource_manager_init(man, p_size);
+	ttm_resource_manager_init(man, bdev, p_size);
 
 	drm_mm_init(&rman->mm, 0, p_size);
 	spin_lock_init(&rman->lock);
diff --git a/drivers/gpu/drm/ttm/ttm_resource.c b/drivers/gpu/drm/ttm/ttm_resource.c
index 89bcfe22a0ca..41e7bf195168 100644
--- a/drivers/gpu/drm/ttm/ttm_resource.c
+++ b/drivers/gpu/drm/ttm/ttm_resource.c
@@ -126,16 +126,19 @@ EXPORT_SYMBOL(ttm_resource_compat);
  * ttm_resource_manager_init
  *
  * @man: memory manager object to init
+ * @bdev: ttm device this manager belongs to
  * @p_size: size managed area in pages.
  *
  * Initialise core parts of a manager object.
  */
 void ttm_resource_manager_init(struct ttm_resource_manager *man,
+			       struct ttm_device *bdev,
 			       unsigned long p_size)
 {
 	unsigned i;
 
 	spin_lock_init(&man->move_lock);
+	man->bdev = bdev;
 	man->size = p_size;
 
 	for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i)
diff --git a/drivers/gpu/drm/ttm/ttm_sys_manager.c b/drivers/gpu/drm/ttm/ttm_sys_manager.c
index 135394dcca95..2ced169513cb 100644
--- a/drivers/gpu/drm/ttm/ttm_sys_manager.c
+++ b/drivers/gpu/drm/ttm/ttm_sys_manager.c
@@ -43,7 +43,7 @@ void ttm_sys_man_init(struct ttm_device *bdev)
 	man->use_tt = true;
 	man->func = &ttm_sys_manager_func;
 
-	ttm_resource_manager_init(man, 0);
+	ttm_resource_manager_init(man, bdev, 0);
 	ttm_set_driver_manager(bdev, TTM_PL_SYSTEM, man);
 	ttm_resource_manager_set_used(man, true);
 }
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
index bfd686bb8d19..4fe4eeb95bf3 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
@@ -150,7 +150,7 @@ int vmw_gmrid_man_init(struct vmw_private *dev_priv, int type)
 	man->func = &vmw_gmrid_manager_func;
 	/* TODO: This is most likely not correct */
 	man->use_tt = true;
-	ttm_resource_manager_init(man, 0);
+	ttm_resource_manager_init(man, &dev_priv->bdev, 0);
 	spin_lock_init(&gman->lock);
 	gman->used_gmr_pages = 0;
 	ida_init(&gman->gmr_ida);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_thp.c b/drivers/gpu/drm/vmwgfx/vmwgfx_thp.c
index 4fcbd94ccc11..b8cd89cd624c 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_thp.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_thp.c
@@ -135,7 +135,7 @@ int vmw_thp_init(struct vmw_private *dev_priv)
 	if (!rman)
 		return -ENOMEM;
 
-	ttm_resource_manager_init(&rman->manager,
+	ttm_resource_manager_init(&rman->manager, &dev_priv->bdev,
 				  dev_priv->vram_size >> PAGE_SHIFT);
 
 	rman->manager.func = &vmw_thp_func;
diff --git a/include/drm/ttm/ttm_resource.h b/include/drm/ttm/ttm_resource.h
index df1f06b7b504..6bf37383002b 100644
--- a/include/drm/ttm/ttm_resource.h
+++ b/include/drm/ttm/ttm_resource.h
@@ -105,11 +105,11 @@ struct ttm_resource_manager_func {
  * @use_type: The memory type is enabled.
  * @use_tt: If a TT object should be used for the backing store.
  * @size: Size of the managed region.
+ * @bdev: ttm device this manager belongs to
  * @func: structure pointer implementing the range manager. See above
  * @move_lock: lock for move fence
- * static information. bdev::driver::io_mem_free is never used.
- * @lru: The lru list for this memory type.
  * @move: The fence of the last pipelined move operation.
+ * @lru: The lru list for this memory type.
  *
  * This structure is used to identify and manage memory types for a device.
  */
@@ -119,20 +119,21 @@ struct ttm_resource_manager {
 	 */
 	bool use_type;
 	bool use_tt;
+	struct ttm_device *bdev;
 	uint64_t size;
 	const struct ttm_resource_manager_func *func;
 	spinlock_t move_lock;
 
 	/*
-	 * Protected by the global->lru_lock.
+	 * Protected by @move_lock.
 	 */
-
-	struct list_head lru[TTM_MAX_BO_PRIORITY];
+	struct dma_fence *move;
 
 	/*
-	 * Protected by @move_lock.
+	 * Protected by the global->lru_lock.
 	 */
-	struct dma_fence *move;
+
+	struct list_head lru[TTM_MAX_BO_PRIORITY];
 };
 
 /**
@@ -272,6 +273,7 @@ bool ttm_resource_compat(struct ttm_resource *res,
 			 struct ttm_placement *placement);
 
 void ttm_resource_manager_init(struct ttm_resource_manager *man,
+			       struct ttm_device *bdev,
 			       unsigned long p_size);
 
 int ttm_resource_manager_evict_all(struct ttm_device *bdev,
-- 
2.25.1


^ permalink raw reply related	[flat|nested] 24+ messages in thread

* [PATCH 03/12] drm/ttm: add a weak BO reference to the resource v3
  2021-11-24 12:44 drm/ttm: moving the LRU into the resource Christian König
  2021-11-24 12:44 ` [PATCH 01/12] drm/ttm: add ttm_resource_fini Christian König
  2021-11-24 12:44 ` [PATCH 02/12] drm/ttm: add back a reference to the bdev to the res manager Christian König
@ 2021-11-24 12:44 ` Christian König
  2021-11-26  7:10   ` Huang Rui
  2021-11-26  7:43   ` Thomas Hellström
  2021-11-24 12:44 ` [PATCH 04/12] drm/ttm: add common accounting to the resource mgr v2 Christian König
                   ` (9 subsequent siblings)
  12 siblings, 2 replies; 24+ messages in thread
From: Christian König @ 2021-11-24 12:44 UTC (permalink / raw)
  To: daniel, thomas.hellstrom, ray.huang, dri-devel

Keep track for which BO a resource was allocated.
This is necessary to move the LRU handling into the resources.

A bit problematic is i915 since it tries to use the resource
interface without a BO which is illegal from the conceptional
point of view.

v2: Document that this is a weak reference and add a workaround for i915
v3: further document that this is protected by ttm_device::lru_lock and
    clarify the i915 workaround

Signed-off-by: Christian König <christian.koenig@amd.com>
---
 drivers/gpu/drm/ttm/ttm_bo_util.c  | 7 +++++--
 drivers/gpu/drm/ttm/ttm_resource.c | 9 +++++++++
 include/drm/ttm/ttm_resource.h     | 4 ++++
 3 files changed, 18 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index 95de2691ee7c..a2e3a9626198 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -237,6 +237,11 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
 	if (bo->type != ttm_bo_type_sg)
 		fbo->base.base.resv = &fbo->base.base._resv;
 
+	if (fbo->base.resource) {
+		ttm_resource_set_bo(fbo->base.resource, &fbo->base);
+		bo->resource = NULL;
+	}
+
 	dma_resv_init(&fbo->base.base._resv);
 	fbo->base.base.dev = NULL;
 	ret = dma_resv_trylock(&fbo->base.base._resv);
@@ -512,7 +517,6 @@ static int ttm_bo_move_to_ghost(struct ttm_buffer_object *bo,
 		ghost_obj->ttm = NULL;
 	else
 		bo->ttm = NULL;
-	bo->resource = NULL;
 
 	dma_resv_unlock(&ghost_obj->base._resv);
 	ttm_bo_put(ghost_obj);
@@ -637,7 +641,6 @@ int ttm_bo_pipeline_gutting(struct ttm_buffer_object *bo)
 	dma_resv_unlock(&ghost->base._resv);
 	ttm_bo_put(ghost);
 	bo->ttm = ttm;
-	bo->resource = NULL;
 	ttm_bo_assign_mem(bo, sys_res);
 	return 0;
 
diff --git a/drivers/gpu/drm/ttm/ttm_resource.c b/drivers/gpu/drm/ttm/ttm_resource.c
index 41e7bf195168..7fdd58b53c61 100644
--- a/drivers/gpu/drm/ttm/ttm_resource.c
+++ b/drivers/gpu/drm/ttm/ttm_resource.c
@@ -41,6 +41,7 @@ void ttm_resource_init(struct ttm_buffer_object *bo,
 	res->bus.offset = 0;
 	res->bus.is_iomem = false;
 	res->bus.caching = ttm_cached;
+	res->bo = bo;
 }
 EXPORT_SYMBOL(ttm_resource_init);
 
@@ -122,6 +123,14 @@ bool ttm_resource_compat(struct ttm_resource *res,
 }
 EXPORT_SYMBOL(ttm_resource_compat);
 
+void ttm_resource_set_bo(struct ttm_resource *res,
+			 struct ttm_buffer_object *bo)
+{
+	spin_lock(&bo->bdev->lru_lock);
+	res->bo = bo;
+	spin_unlock(&bo->bdev->lru_lock);
+}
+
 /**
  * ttm_resource_manager_init
  *
diff --git a/include/drm/ttm/ttm_resource.h b/include/drm/ttm/ttm_resource.h
index 6bf37383002b..69eea9d6399b 100644
--- a/include/drm/ttm/ttm_resource.h
+++ b/include/drm/ttm/ttm_resource.h
@@ -161,6 +161,7 @@ struct ttm_bus_placement {
  * @mem_type: Resource type of the allocation.
  * @placement: Placement flags.
  * @bus: Placement on io bus accessible to the CPU
+ * @bo: weak reference to the BO, protected by ttm_device::lru_lock
  *
  * Structure indicating the placement and space resources used by a
  * buffer object.
@@ -171,6 +172,7 @@ struct ttm_resource {
 	uint32_t mem_type;
 	uint32_t placement;
 	struct ttm_bus_placement bus;
+	struct ttm_buffer_object *bo;
 };
 
 /**
@@ -271,6 +273,8 @@ int ttm_resource_alloc(struct ttm_buffer_object *bo,
 void ttm_resource_free(struct ttm_buffer_object *bo, struct ttm_resource **res);
 bool ttm_resource_compat(struct ttm_resource *res,
 			 struct ttm_placement *placement);
+void ttm_resource_set_bo(struct ttm_resource *res,
+			 struct ttm_buffer_object *bo);
 
 void ttm_resource_manager_init(struct ttm_resource_manager *man,
 			       struct ttm_device *bdev,
-- 
2.25.1


^ permalink raw reply related	[flat|nested] 24+ messages in thread

* [PATCH 04/12] drm/ttm: add common accounting to the resource mgr v2
  2021-11-24 12:44 drm/ttm: moving the LRU into the resource Christian König
                   ` (2 preceding siblings ...)
  2021-11-24 12:44 ` [PATCH 03/12] drm/ttm: add a weak BO reference to the resource v3 Christian König
@ 2021-11-24 12:44 ` Christian König
  2021-11-26  7:35   ` Huang Rui
  2021-11-24 12:44 ` [PATCH 05/12] drm/ttm: move the LRU into resource handling v2 Christian König
                   ` (8 subsequent siblings)
  12 siblings, 1 reply; 24+ messages in thread
From: Christian König @ 2021-11-24 12:44 UTC (permalink / raw)
  To: daniel, thomas.hellstrom, ray.huang, dri-devel

It makes sense to have this in the common manager for debugging and
accounting of how much resources are used.

v2: cleanup kerneldoc a bit

Signed-off-by: Christian König <christian.koenig@amd.com>
---
 drivers/gpu/drm/ttm/ttm_resource.c |  8 ++++++++
 include/drm/ttm/ttm_resource.h     | 20 +++++++++++++++++++-
 2 files changed, 27 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/ttm/ttm_resource.c b/drivers/gpu/drm/ttm/ttm_resource.c
index 7fdd58b53c61..b8362492980d 100644
--- a/drivers/gpu/drm/ttm/ttm_resource.c
+++ b/drivers/gpu/drm/ttm/ttm_resource.c
@@ -33,6 +33,8 @@ void ttm_resource_init(struct ttm_buffer_object *bo,
                        const struct ttm_place *place,
                        struct ttm_resource *res)
 {
+	struct ttm_resource_manager *man;
+
 	res->start = 0;
 	res->num_pages = PFN_UP(bo->base.size);
 	res->mem_type = place->mem_type;
@@ -42,12 +44,16 @@ void ttm_resource_init(struct ttm_buffer_object *bo,
 	res->bus.is_iomem = false;
 	res->bus.caching = ttm_cached;
 	res->bo = bo;
+
+	man = ttm_manager_type(bo->bdev, place->mem_type);
+	atomic64_add(bo->base.size, &man->usage);
 }
 EXPORT_SYMBOL(ttm_resource_init);
 
 void ttm_resource_fini(struct ttm_resource_manager *man,
 		       struct ttm_resource *res)
 {
+	atomic64_sub(res->bo->base.size, &man->usage);
 }
 EXPORT_SYMBOL(ttm_resource_fini);
 
@@ -149,6 +155,7 @@ void ttm_resource_manager_init(struct ttm_resource_manager *man,
 	spin_lock_init(&man->move_lock);
 	man->bdev = bdev;
 	man->size = p_size;
+	atomic64_set(&man->usage, 0);
 
 	for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i)
 		INIT_LIST_HEAD(&man->lru[i]);
@@ -221,6 +228,7 @@ void ttm_resource_manager_debug(struct ttm_resource_manager *man,
 	drm_printf(p, "  use_type: %d\n", man->use_type);
 	drm_printf(p, "  use_tt: %d\n", man->use_tt);
 	drm_printf(p, "  size: %llu\n", man->size);
+	drm_printf(p, "  usage: %llu\n", atomic64_read(&man->usage));
 	if (man->func->debug)
 		man->func->debug(man, p);
 }
diff --git a/include/drm/ttm/ttm_resource.h b/include/drm/ttm/ttm_resource.h
index 69eea9d6399b..3d391279b33f 100644
--- a/include/drm/ttm/ttm_resource.h
+++ b/include/drm/ttm/ttm_resource.h
@@ -27,6 +27,7 @@
 
 #include <linux/types.h>
 #include <linux/mutex.h>
+#include <linux/atomic.h>
 #include <linux/dma-buf-map.h>
 #include <linux/dma-fence.h>
 #include <drm/drm_print.h>
@@ -132,8 +133,12 @@ struct ttm_resource_manager {
 	/*
 	 * Protected by the global->lru_lock.
 	 */
-
 	struct list_head lru[TTM_MAX_BO_PRIORITY];
+
+	/**
+	 * @usage: How much of the region is used, has its own protection.
+	 */
+	atomic64_t usage;
 };
 
 /**
@@ -261,6 +266,19 @@ ttm_resource_manager_cleanup(struct ttm_resource_manager *man)
 	man->move = NULL;
 }
 
+/**
+ * ttm_resource_manager_usage
+ *
+ * @man: A memory manager object.
+ *
+ * Return how many resources are currently used.
+ */
+static inline uint64_t
+ttm_resource_manager_usage(struct ttm_resource_manager *man)
+{
+	return atomic64_read(&man->usage);
+}
+
 void ttm_resource_init(struct ttm_buffer_object *bo,
                        const struct ttm_place *place,
                        struct ttm_resource *res);
-- 
2.25.1


^ permalink raw reply related	[flat|nested] 24+ messages in thread

* [PATCH 05/12] drm/ttm: move the LRU into resource handling v2
  2021-11-24 12:44 drm/ttm: moving the LRU into the resource Christian König
                   ` (3 preceding siblings ...)
  2021-11-24 12:44 ` [PATCH 04/12] drm/ttm: add common accounting to the resource mgr v2 Christian König
@ 2021-11-24 12:44 ` Christian König
  2021-11-24 12:44 ` [PATCH 06/12] drm/ttm: add resource iterator Christian König
                   ` (7 subsequent siblings)
  12 siblings, 0 replies; 24+ messages in thread
From: Christian König @ 2021-11-24 12:44 UTC (permalink / raw)
  To: daniel, thomas.hellstrom, ray.huang, dri-devel

This way we finally fix the problem that new resource are
not immediately evict-able after allocation.

That has caused numerous problems including OOM on GDS handling
and not being able to use TTM as general resource manager.

v2: stop assuming in ttm_resource_fini that res->bo is still valid.

Signed-off-by: Christian König <christian.koenig@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c  |   8 +-
 drivers/gpu/drm/i915/gem/i915_gem_ttm.c |   2 +-
 drivers/gpu/drm/ttm/ttm_bo.c            | 115 ++--------------------
 drivers/gpu/drm/ttm/ttm_bo_util.c       |   1 -
 drivers/gpu/drm/ttm/ttm_device.c        |  64 ++++++-------
 drivers/gpu/drm/ttm/ttm_resource.c      | 122 +++++++++++++++++++++++-
 include/drm/ttm/ttm_bo_api.h            |  16 ----
 include/drm/ttm/ttm_bo_driver.h         |  29 +-----
 include/drm/ttm/ttm_resource.h          |  35 +++++++
 9 files changed, 201 insertions(+), 191 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 6e631254dc7f..d172ccb440e4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -683,12 +683,12 @@ void amdgpu_vm_move_to_lru_tail(struct amdgpu_device *adev,
 
 	if (vm->bulk_moveable) {
 		spin_lock(&adev->mman.bdev.lru_lock);
-		ttm_bo_bulk_move_lru_tail(&vm->lru_bulk_move);
+		ttm_lru_bulk_move_tail(&vm->lru_bulk_move);
 		spin_unlock(&adev->mman.bdev.lru_lock);
 		return;
 	}
 
-	memset(&vm->lru_bulk_move, 0, sizeof(vm->lru_bulk_move));
+	ttm_lru_bulk_move_init(&vm->lru_bulk_move);
 
 	spin_lock(&adev->mman.bdev.lru_lock);
 	list_for_each_entry(bo_base, &vm->idle, vm_status) {
@@ -698,11 +698,9 @@ void amdgpu_vm_move_to_lru_tail(struct amdgpu_device *adev,
 		if (!bo->parent)
 			continue;
 
-		ttm_bo_move_to_lru_tail(&bo->tbo, bo->tbo.resource,
-					&vm->lru_bulk_move);
+		ttm_bo_move_to_lru_tail(&bo->tbo, &vm->lru_bulk_move);
 		if (shadow)
 			ttm_bo_move_to_lru_tail(&shadow->tbo,
-						shadow->tbo.resource,
 						&vm->lru_bulk_move);
 	}
 	spin_unlock(&adev->mman.bdev.lru_lock);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
index 74a1ffd0d7dd..5abe22aecc54 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
@@ -794,7 +794,7 @@ static void i915_ttm_adjust_lru(struct drm_i915_gem_object *obj)
 			bo->priority = I915_TTM_PRIO_NO_PAGES;
 	}
 
-	ttm_bo_move_to_lru_tail(bo, bo->resource, NULL);
+	ttm_bo_move_to_lru_tail(bo, NULL);
 	spin_unlock(&bo->bdev->lru_lock);
 }
 
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 5db5c9ba166c..5bf5a9442856 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -69,108 +69,16 @@ static void ttm_bo_mem_space_debug(struct ttm_buffer_object *bo,
 	}
 }
 
-static inline void ttm_bo_move_to_pinned(struct ttm_buffer_object *bo)
-{
-	struct ttm_device *bdev = bo->bdev;
-
-	list_move_tail(&bo->lru, &bdev->pinned);
-
-	if (bdev->funcs->del_from_lru_notify)
-		bdev->funcs->del_from_lru_notify(bo);
-}
-
-static inline void ttm_bo_del_from_lru(struct ttm_buffer_object *bo)
-{
-	struct ttm_device *bdev = bo->bdev;
-
-	list_del_init(&bo->lru);
-
-	if (bdev->funcs->del_from_lru_notify)
-		bdev->funcs->del_from_lru_notify(bo);
-}
-
-static void ttm_bo_bulk_move_set_pos(struct ttm_lru_bulk_move_pos *pos,
-				     struct ttm_buffer_object *bo)
-{
-	if (!pos->first)
-		pos->first = bo;
-	pos->last = bo;
-}
-
 void ttm_bo_move_to_lru_tail(struct ttm_buffer_object *bo,
-			     struct ttm_resource *mem,
 			     struct ttm_lru_bulk_move *bulk)
 {
-	struct ttm_device *bdev = bo->bdev;
-	struct ttm_resource_manager *man;
-
-	if (!bo->deleted)
-		dma_resv_assert_held(bo->base.resv);
-
-	if (bo->pin_count) {
-		ttm_bo_move_to_pinned(bo);
-		return;
-	}
-
-	if (!mem)
-		return;
-
-	man = ttm_manager_type(bdev, mem->mem_type);
-	list_move_tail(&bo->lru, &man->lru[bo->priority]);
-
-	if (bdev->funcs->del_from_lru_notify)
-		bdev->funcs->del_from_lru_notify(bo);
-
-	if (bulk && !bo->pin_count) {
-		switch (bo->resource->mem_type) {
-		case TTM_PL_TT:
-			ttm_bo_bulk_move_set_pos(&bulk->tt[bo->priority], bo);
-			break;
+	dma_resv_assert_held(bo->base.resv);
 
-		case TTM_PL_VRAM:
-			ttm_bo_bulk_move_set_pos(&bulk->vram[bo->priority], bo);
-			break;
-		}
-	}
+	if (bo->resource)
+		ttm_resource_move_to_lru_tail(bo->resource, bulk);
 }
 EXPORT_SYMBOL(ttm_bo_move_to_lru_tail);
 
-void ttm_bo_bulk_move_lru_tail(struct ttm_lru_bulk_move *bulk)
-{
-	unsigned i;
-
-	for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) {
-		struct ttm_lru_bulk_move_pos *pos = &bulk->tt[i];
-		struct ttm_resource_manager *man;
-
-		if (!pos->first)
-			continue;
-
-		dma_resv_assert_held(pos->first->base.resv);
-		dma_resv_assert_held(pos->last->base.resv);
-
-		man = ttm_manager_type(pos->first->bdev, TTM_PL_TT);
-		list_bulk_move_tail(&man->lru[i], &pos->first->lru,
-				    &pos->last->lru);
-	}
-
-	for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) {
-		struct ttm_lru_bulk_move_pos *pos = &bulk->vram[i];
-		struct ttm_resource_manager *man;
-
-		if (!pos->first)
-			continue;
-
-		dma_resv_assert_held(pos->first->base.resv);
-		dma_resv_assert_held(pos->last->base.resv);
-
-		man = ttm_manager_type(pos->first->bdev, TTM_PL_VRAM);
-		list_bulk_move_tail(&man->lru[i], &pos->first->lru,
-				    &pos->last->lru);
-	}
-}
-EXPORT_SYMBOL(ttm_bo_bulk_move_lru_tail);
-
 static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
 				  struct ttm_resource *mem, bool evict,
 				  struct ttm_operation_ctx *ctx,
@@ -345,7 +253,6 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo,
 		return ret;
 	}
 
-	ttm_bo_move_to_pinned(bo);
 	list_del_init(&bo->ddestroy);
 	spin_unlock(&bo->bdev->lru_lock);
 	ttm_bo_cleanup_memtype_use(bo);
@@ -447,7 +354,7 @@ static void ttm_bo_release(struct kref *kref)
 		 */
 		if (bo->pin_count) {
 			bo->pin_count = 0;
-			ttm_bo_move_to_lru_tail(bo, bo->resource, NULL);
+			ttm_resource_move_to_lru_tail(bo->resource, NULL);
 		}
 
 		kref_init(&bo->kref);
@@ -460,7 +367,6 @@ static void ttm_bo_release(struct kref *kref)
 	}
 
 	spin_lock(&bo->bdev->lru_lock);
-	ttm_bo_del_from_lru(bo);
 	list_del(&bo->ddestroy);
 	spin_unlock(&bo->bdev->lru_lock);
 
@@ -674,15 +580,17 @@ int ttm_mem_evict_first(struct ttm_device *bdev,
 			struct ww_acquire_ctx *ticket)
 {
 	struct ttm_buffer_object *bo = NULL, *busy_bo = NULL;
+	struct ttm_resource *res;
 	bool locked = false;
 	unsigned i;
 	int ret;
 
 	spin_lock(&bdev->lru_lock);
 	for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) {
-		list_for_each_entry(bo, &man->lru[i], lru) {
+		list_for_each_entry(res, &man->lru[i], lru) {
 			bool busy;
 
+			bo = res->bo;
 			if (!ttm_bo_evict_swapout_allowable(bo, ctx, place,
 							    &locked, &busy)) {
 				if (busy && !busy_bo && ticket !=
@@ -700,7 +608,7 @@ int ttm_mem_evict_first(struct ttm_device *bdev,
 		}
 
 		/* If the inner loop terminated early, we have our candidate */
-		if (&bo->lru != &man->lru[i])
+		if (&res->lru != &man->lru[i])
 			break;
 
 		bo = NULL;
@@ -872,9 +780,6 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
 	}
 
 error:
-	if (bo->resource->mem_type == TTM_PL_SYSTEM && !bo->pin_count)
-		ttm_bo_move_to_lru_tail_unlocked(bo);
-
 	return ret;
 }
 EXPORT_SYMBOL(ttm_bo_mem_space);
@@ -968,7 +873,6 @@ int ttm_bo_init_reserved(struct ttm_device *bdev,
 	bo->destroy = destroy ? destroy : ttm_bo_default_destroy;
 
 	kref_init(&bo->kref);
-	INIT_LIST_HEAD(&bo->lru);
 	INIT_LIST_HEAD(&bo->ddestroy);
 	bo->bdev = bdev;
 	bo->type = type;
@@ -1017,8 +921,6 @@ int ttm_bo_init_reserved(struct ttm_device *bdev,
 		return ret;
 	}
 
-	ttm_bo_move_to_lru_tail_unlocked(bo);
-
 	return ret;
 }
 EXPORT_SYMBOL(ttm_bo_init_reserved);
@@ -1120,7 +1022,6 @@ int ttm_bo_swapout(struct ttm_buffer_object *bo, struct ttm_operation_ctx *ctx,
 		return ret == -EBUSY ? -ENOSPC : ret;
 	}
 
-	ttm_bo_move_to_pinned(bo);
 	/* TODO: Cleanup the locking */
 	spin_unlock(&bo->bdev->lru_lock);
 
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index a2e3a9626198..6c3d052f96ae 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -228,7 +228,6 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
 
 	atomic_inc(&ttm_glob.bo_count);
 	INIT_LIST_HEAD(&fbo->base.ddestroy);
-	INIT_LIST_HEAD(&fbo->base.lru);
 	drm_vma_node_reset(&fbo->base.base.vma_node);
 
 	kref_init(&fbo->base.kref);
diff --git a/drivers/gpu/drm/ttm/ttm_device.c b/drivers/gpu/drm/ttm/ttm_device.c
index be24bb6cefd0..ba35887147ba 100644
--- a/drivers/gpu/drm/ttm/ttm_device.c
+++ b/drivers/gpu/drm/ttm/ttm_device.c
@@ -144,6 +144,7 @@ int ttm_device_swapout(struct ttm_device *bdev, struct ttm_operation_ctx *ctx,
 {
 	struct ttm_resource_manager *man;
 	struct ttm_buffer_object *bo;
+	struct ttm_resource *res;
 	unsigned i, j;
 	int ret;
 
@@ -154,8 +155,11 @@ int ttm_device_swapout(struct ttm_device *bdev, struct ttm_operation_ctx *ctx,
 			continue;
 
 		for (j = 0; j < TTM_MAX_BO_PRIORITY; ++j) {
-			list_for_each_entry(bo, &man->lru[j], lru) {
-				uint32_t num_pages = PFN_UP(bo->base.size);
+			list_for_each_entry(res, &man->lru[j], lru) {
+				uint32_t num_pages;
+
+				bo = res->bo;
+				num_pages = PFN_UP(bo->base.size);
 
 				ret = ttm_bo_swapout(bo, ctx, gfp_flags);
 				/* ttm_bo_swapout has dropped the lru_lock */
@@ -259,49 +263,45 @@ void ttm_device_fini(struct ttm_device *bdev)
 }
 EXPORT_SYMBOL(ttm_device_fini);
 
-void ttm_device_clear_dma_mappings(struct ttm_device *bdev)
+static void ttm_device_clear_lru_dma_mappings(struct ttm_device *bdev,
+					      struct list_head *list)
 {
-	struct ttm_resource_manager *man;
-	struct ttm_buffer_object *bo;
-	unsigned int i, j;
+	struct ttm_resource *res;
 
 	spin_lock(&bdev->lru_lock);
-	while (!list_empty(&bdev->pinned)) {
-		bo = list_first_entry(&bdev->pinned, struct ttm_buffer_object, lru);
+	while ((res = list_first_entry_or_null(list, typeof(*res), lru))) {
+		struct ttm_buffer_object *bo = res->bo;
+
 		/* Take ref against racing releases once lru_lock is unlocked */
-		if (ttm_bo_get_unless_zero(bo)) {
-			list_del_init(&bo->lru);
-			spin_unlock(&bdev->lru_lock);
+		if (!ttm_bo_get_unless_zero(bo))
+			continue;
 
-			if (bo->ttm)
-				ttm_tt_unpopulate(bo->bdev, bo->ttm);
+		list_del_init(&res->lru);
+		spin_unlock(&bdev->lru_lock);
 
-			ttm_bo_put(bo);
-			spin_lock(&bdev->lru_lock);
-		}
+		if (bo->ttm)
+			ttm_tt_unpopulate(bo->bdev, bo->ttm);
+
+		ttm_bo_put(bo);
+		spin_lock(&bdev->lru_lock);
 	}
+	spin_unlock(&bdev->lru_lock);
+}
+
+void ttm_device_clear_dma_mappings(struct ttm_device *bdev)
+{
+	struct ttm_resource_manager *man;
+	unsigned int i, j;
+
+	ttm_device_clear_lru_dma_mappings(bdev, &bdev->pinned);
 
 	for (i = TTM_PL_SYSTEM; i < TTM_NUM_MEM_TYPES; ++i) {
 		man = ttm_manager_type(bdev, i);
 		if (!man || !man->use_tt)
 			continue;
 
-		for (j = 0; j < TTM_MAX_BO_PRIORITY; ++j) {
-			while (!list_empty(&man->lru[j])) {
-				bo = list_first_entry(&man->lru[j], struct ttm_buffer_object, lru);
-				if (ttm_bo_get_unless_zero(bo)) {
-					list_del_init(&bo->lru);
-					spin_unlock(&bdev->lru_lock);
-
-					if (bo->ttm)
-						ttm_tt_unpopulate(bo->bdev, bo->ttm);
-
-					ttm_bo_put(bo);
-					spin_lock(&bdev->lru_lock);
-				}
-			}
-		}
+		for (j = 0; j < TTM_MAX_BO_PRIORITY; ++j)
+			ttm_device_clear_lru_dma_mappings(bdev, &man->lru[j]);
 	}
-	spin_unlock(&bdev->lru_lock);
 }
 EXPORT_SYMBOL(ttm_device_clear_dma_mappings);
diff --git a/drivers/gpu/drm/ttm/ttm_resource.c b/drivers/gpu/drm/ttm/ttm_resource.c
index b8362492980d..450e665c357b 100644
--- a/drivers/gpu/drm/ttm/ttm_resource.c
+++ b/drivers/gpu/drm/ttm/ttm_resource.c
@@ -29,6 +29,106 @@
 #include <drm/ttm/ttm_resource.h>
 #include <drm/ttm/ttm_bo_driver.h>
 
+/**
+ * ttm_lru_bulk_move_init - initialize a bulk move structure
+ * @bulk: the structure to init
+ *
+ * For now just memset the structure to zero.
+ */
+void ttm_lru_bulk_move_init(struct ttm_lru_bulk_move *bulk)
+{
+	memset(bulk, 0, sizeof(*bulk));
+}
+EXPORT_SYMBOL(ttm_lru_bulk_move_init);
+
+/**
+ * ttm_lru_bulk_move_tail
+ *
+ * @bulk: bulk move structure
+ *
+ * Bulk move BOs to the LRU tail, only valid to use when driver makes sure that
+ * resource order never changes. Should be called with ttm_device::lru_lock held.
+ */
+void ttm_lru_bulk_move_tail(struct ttm_lru_bulk_move *bulk)
+{
+	unsigned i;
+
+	for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) {
+		struct ttm_lru_bulk_move_pos *pos = &bulk->tt[i];
+		struct ttm_resource_manager *man;
+
+		if (!pos->first)
+			continue;
+
+		dma_resv_assert_held(pos->first->bo->base.resv);
+		dma_resv_assert_held(pos->last->bo->base.resv);
+
+		man = ttm_manager_type(pos->first->bo->bdev, TTM_PL_TT);
+		list_bulk_move_tail(&man->lru[i], &pos->first->lru,
+				    &pos->last->lru);
+	}
+
+	for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) {
+		struct ttm_lru_bulk_move_pos *pos = &bulk->vram[i];
+		struct ttm_resource_manager *man;
+
+		if (!pos->first)
+			continue;
+
+		dma_resv_assert_held(pos->first->bo->base.resv);
+		dma_resv_assert_held(pos->last->bo->base.resv);
+
+		man = ttm_manager_type(pos->first->bo->bdev, TTM_PL_VRAM);
+		list_bulk_move_tail(&man->lru[i], &pos->first->lru,
+				    &pos->last->lru);
+	}
+}
+EXPORT_SYMBOL(ttm_lru_bulk_move_tail);
+
+/* Record a resource position in a bulk move structure */
+static void ttm_lru_bulk_move_set_pos(struct ttm_lru_bulk_move_pos *pos,
+				      struct ttm_resource *res)
+{
+	if (!pos->first)
+		pos->first = res;
+	pos->last = res;
+}
+
+/* Move a resource to the LRU tail and track the bulk position */
+void ttm_resource_move_to_lru_tail(struct ttm_resource *res,
+				   struct ttm_lru_bulk_move *bulk)
+{
+	struct ttm_buffer_object *bo = res->bo;
+	struct ttm_device *bdev = bo->bdev;
+	struct ttm_resource_manager *man;
+
+	if (bo->pin_count) {
+		list_move_tail(&res->lru, &bdev->pinned);
+		if (bdev->funcs->del_from_lru_notify)
+			bdev->funcs->del_from_lru_notify(res->bo);
+		return;
+	}
+
+	man = ttm_manager_type(bdev, res->mem_type);
+	list_move_tail(&res->lru, &man->lru[bo->priority]);
+
+	if (bdev->funcs->del_from_lru_notify)
+		bdev->funcs->del_from_lru_notify(bo);
+
+	if (!bulk)
+		return;
+
+	switch (res->mem_type) {
+	case TTM_PL_TT:
+		ttm_lru_bulk_move_set_pos(&bulk->tt[bo->priority], res);
+		break;
+
+	case TTM_PL_VRAM:
+		ttm_lru_bulk_move_set_pos(&bulk->vram[bo->priority], res);
+		break;
+	}
+}
+
 void ttm_resource_init(struct ttm_buffer_object *bo,
                        const struct ttm_place *place,
                        struct ttm_resource *res)
@@ -44,16 +144,36 @@ void ttm_resource_init(struct ttm_buffer_object *bo,
 	res->bus.is_iomem = false;
 	res->bus.caching = ttm_cached;
 	res->bo = bo;
+	INIT_LIST_HEAD(&res->lru);
 
 	man = ttm_manager_type(bo->bdev, place->mem_type);
 	atomic64_add(bo->base.size, &man->usage);
+
+	spin_lock(&bo->bdev->lru_lock);
+	ttm_resource_move_to_lru_tail(res, NULL);
+	spin_unlock(&bo->bdev->lru_lock);
 }
 EXPORT_SYMBOL(ttm_resource_init);
 
+/**
+ * ttm_resource_fini
+ *
+ * @res: the resource to clean up
+ *
+ * Make sure the resource is removed from the LRU before destruction.
+ */
 void ttm_resource_fini(struct ttm_resource_manager *man,
 		       struct ttm_resource *res)
 {
-	atomic64_sub(res->bo->base.size, &man->usage);
+	struct ttm_device *bdev = man->bdev;
+
+	spin_lock(&bdev->lru_lock);
+	list_del_init(&res->lru);
+	if (res->bo && bdev->funcs->del_from_lru_notify)
+		bdev->funcs->del_from_lru_notify(res->bo);
+	spin_unlock(&bdev->lru_lock);
+
+	atomic64_sub(res->num_pages << PAGE_SHIFT, &man->usage);
 }
 EXPORT_SYMBOL(ttm_resource_fini);
 
diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h
index 9798eb097c13..d239b97e8f71 100644
--- a/include/drm/ttm/ttm_bo_api.h
+++ b/include/drm/ttm/ttm_bo_api.h
@@ -56,8 +56,6 @@ struct ttm_placement;
 
 struct ttm_place;
 
-struct ttm_lru_bulk_move;
-
 /**
  * enum ttm_bo_type
  *
@@ -95,7 +93,6 @@ struct ttm_tt;
  * @ttm: TTM structure holding system pages.
  * @evicted: Whether the object was evicted without user-space knowing.
  * @deleted: True if the object is only a zombie and already deleted.
- * @lru: List head for the lru list.
  * @ddestroy: List head for the delayed destroy list.
  * @swap: List head for swap LRU list.
  * @offset: The current GPU offset, which can have different meanings
@@ -143,7 +140,6 @@ struct ttm_buffer_object {
 	 * Members protected by the bdev::lru_lock.
 	 */
 
-	struct list_head lru;
 	struct list_head ddestroy;
 
 	/**
@@ -294,7 +290,6 @@ void ttm_bo_put(struct ttm_buffer_object *bo);
  * ttm_bo_move_to_lru_tail
  *
  * @bo: The buffer object.
- * @mem: Resource object.
  * @bulk: optional bulk move structure to remember BO positions
  *
  * Move this BO to the tail of all lru lists used to lookup and reserve an
@@ -302,19 +297,8 @@ void ttm_bo_put(struct ttm_buffer_object *bo);
  * held, and is used to make a BO less likely to be considered for eviction.
  */
 void ttm_bo_move_to_lru_tail(struct ttm_buffer_object *bo,
-			     struct ttm_resource *mem,
 			     struct ttm_lru_bulk_move *bulk);
 
-/**
- * ttm_bo_bulk_move_lru_tail
- *
- * @bulk: bulk move structure
- *
- * Bulk move BOs to the LRU tail, only valid to use when driver makes sure that
- * BO order never changes. Should be called with ttm_global::lru_lock held.
- */
-void ttm_bo_bulk_move_lru_tail(struct ttm_lru_bulk_move *bulk);
-
 /**
  * ttm_bo_lock_delayed_workqueue
  *
diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h
index 5f087575194b..6c7352e13708 100644
--- a/include/drm/ttm/ttm_bo_driver.h
+++ b/include/drm/ttm/ttm_bo_driver.h
@@ -45,33 +45,6 @@
 #include "ttm_tt.h"
 #include "ttm_pool.h"
 
-/**
- * struct ttm_lru_bulk_move_pos
- *
- * @first: first BO in the bulk move range
- * @last: last BO in the bulk move range
- *
- * Positions for a lru bulk move.
- */
-struct ttm_lru_bulk_move_pos {
-	struct ttm_buffer_object *first;
-	struct ttm_buffer_object *last;
-};
-
-/**
- * struct ttm_lru_bulk_move
- *
- * @tt: first/last lru entry for BOs in the TT domain
- * @vram: first/last lru entry for BOs in the VRAM domain
- * @swap: first/last lru entry for BOs on the swap list
- *
- * Helper structure for bulk moves on the LRU list.
- */
-struct ttm_lru_bulk_move {
-	struct ttm_lru_bulk_move_pos tt[TTM_MAX_BO_PRIORITY];
-	struct ttm_lru_bulk_move_pos vram[TTM_MAX_BO_PRIORITY];
-};
-
 /*
  * ttm_bo.c
  */
@@ -182,7 +155,7 @@ static inline void
 ttm_bo_move_to_lru_tail_unlocked(struct ttm_buffer_object *bo)
 {
 	spin_lock(&bo->bdev->lru_lock);
-	ttm_bo_move_to_lru_tail(bo, bo->resource, NULL);
+	ttm_bo_move_to_lru_tail(bo, NULL);
 	spin_unlock(&bo->bdev->lru_lock);
 }
 
diff --git a/include/drm/ttm/ttm_resource.h b/include/drm/ttm/ttm_resource.h
index 3d391279b33f..a54d52517a30 100644
--- a/include/drm/ttm/ttm_resource.h
+++ b/include/drm/ttm/ttm_resource.h
@@ -26,10 +26,12 @@
 #define _TTM_RESOURCE_H_
 
 #include <linux/types.h>
+#include <linux/list.h>
 #include <linux/mutex.h>
 #include <linux/atomic.h>
 #include <linux/dma-buf-map.h>
 #include <linux/dma-fence.h>
+
 #include <drm/drm_print.h>
 #include <drm/ttm/ttm_caching.h>
 #include <drm/ttm/ttm_kmap_iter.h>
@@ -178,6 +180,33 @@ struct ttm_resource {
 	uint32_t placement;
 	struct ttm_bus_placement bus;
 	struct ttm_buffer_object *bo;
+	struct list_head lru;
+};
+
+/**
+ * struct ttm_lru_bulk_move_pos
+ *
+ * @first: first res in the bulk move range
+ * @last: last res in the bulk move range
+ *
+ * Positions for a lru bulk move.
+ */
+struct ttm_lru_bulk_move_pos {
+	struct ttm_resource *first;
+	struct ttm_resource *last;
+};
+
+/**
+ * struct ttm_lru_bulk_move
+ *
+ * @tt: first/last lru entry for resources in the TT domain
+ * @vram: first/last lru entry for resources in the VRAM domain
+ *
+ * Helper structure for bulk moves on the LRU list.
+ */
+struct ttm_lru_bulk_move {
+	struct ttm_lru_bulk_move_pos tt[TTM_MAX_BO_PRIORITY];
+	struct ttm_lru_bulk_move_pos vram[TTM_MAX_BO_PRIORITY];
 };
 
 /**
@@ -279,6 +308,12 @@ ttm_resource_manager_usage(struct ttm_resource_manager *man)
 	return atomic64_read(&man->usage);
 }
 
+void ttm_lru_bulk_move_init(struct ttm_lru_bulk_move *bulk);
+void ttm_lru_bulk_move_tail(struct ttm_lru_bulk_move *bulk);
+
+void ttm_resource_move_to_lru_tail(struct ttm_resource *res,
+				   struct ttm_lru_bulk_move *bulk);
+
 void ttm_resource_init(struct ttm_buffer_object *bo,
                        const struct ttm_place *place,
                        struct ttm_resource *res);
-- 
2.25.1


^ permalink raw reply related	[flat|nested] 24+ messages in thread

* [PATCH 06/12] drm/ttm: add resource iterator
  2021-11-24 12:44 drm/ttm: moving the LRU into the resource Christian König
                   ` (4 preceding siblings ...)
  2021-11-24 12:44 ` [PATCH 05/12] drm/ttm: move the LRU into resource handling v2 Christian König
@ 2021-11-24 12:44 ` Christian König
  2021-11-26  7:43   ` Huang Rui
  2021-11-24 12:44 ` [PATCH 07/12] drm/radeon: use ttm_resource_manager_debug Christian König
                   ` (6 subsequent siblings)
  12 siblings, 1 reply; 24+ messages in thread
From: Christian König @ 2021-11-24 12:44 UTC (permalink / raw)
  To: daniel, thomas.hellstrom, ray.huang, dri-devel

Instead of duplicating that at different places add an iterator over all
the resources in a resource manager.

Signed-off-by: Christian König <christian.koenig@amd.com>
---
 drivers/gpu/drm/ttm/ttm_bo.c       | 41 +++++++++++----------------
 drivers/gpu/drm/ttm/ttm_device.c   | 26 ++++++++---------
 drivers/gpu/drm/ttm/ttm_resource.c | 45 ++++++++++++++++++++++++++++++
 include/drm/ttm/ttm_resource.h     | 23 +++++++++++++++
 4 files changed, 95 insertions(+), 40 deletions(-)

diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 5bf5a9442856..dd8b9a99c180 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -580,38 +580,29 @@ int ttm_mem_evict_first(struct ttm_device *bdev,
 			struct ww_acquire_ctx *ticket)
 {
 	struct ttm_buffer_object *bo = NULL, *busy_bo = NULL;
+	struct ttm_resource_cursor cursor;
 	struct ttm_resource *res;
 	bool locked = false;
-	unsigned i;
 	int ret;
 
 	spin_lock(&bdev->lru_lock);
-	for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) {
-		list_for_each_entry(res, &man->lru[i], lru) {
-			bool busy;
-
-			bo = res->bo;
-			if (!ttm_bo_evict_swapout_allowable(bo, ctx, place,
-							    &locked, &busy)) {
-				if (busy && !busy_bo && ticket !=
-				    dma_resv_locking_ctx(bo->base.resv))
-					busy_bo = bo;
-				continue;
-			}
-
-			if (!ttm_bo_get_unless_zero(bo)) {
-				if (locked)
-					dma_resv_unlock(bo->base.resv);
-				continue;
-			}
-			break;
+	ttm_resource_manager_for_each_res(man, &cursor, res) {
+		bool busy;
+
+		if (!ttm_bo_evict_swapout_allowable(res->bo, ctx, place,
+						    &locked, &busy)) {
+			if (busy && !busy_bo && ticket !=
+			    dma_resv_locking_ctx(bo->base.resv))
+				busy_bo = res->bo;
+			continue;
 		}
 
-		/* If the inner loop terminated early, we have our candidate */
-		if (&res->lru != &man->lru[i])
-			break;
-
-		bo = NULL;
+		if (!ttm_bo_get_unless_zero(res->bo)) {
+			if (locked)
+				dma_resv_unlock(res->bo->base.resv);
+			continue;
+		}
+		bo = res->bo;
 	}
 
 	if (!bo) {
diff --git a/drivers/gpu/drm/ttm/ttm_device.c b/drivers/gpu/drm/ttm/ttm_device.c
index ba35887147ba..a0562ab386f5 100644
--- a/drivers/gpu/drm/ttm/ttm_device.c
+++ b/drivers/gpu/drm/ttm/ttm_device.c
@@ -142,10 +142,10 @@ EXPORT_SYMBOL(ttm_global_swapout);
 int ttm_device_swapout(struct ttm_device *bdev, struct ttm_operation_ctx *ctx,
 		       gfp_t gfp_flags)
 {
+	struct ttm_resource_cursor cursor;
 	struct ttm_resource_manager *man;
-	struct ttm_buffer_object *bo;
 	struct ttm_resource *res;
-	unsigned i, j;
+	unsigned i;
 	int ret;
 
 	spin_lock(&bdev->lru_lock);
@@ -154,20 +154,16 @@ int ttm_device_swapout(struct ttm_device *bdev, struct ttm_operation_ctx *ctx,
 		if (!man || !man->use_tt)
 			continue;
 
-		for (j = 0; j < TTM_MAX_BO_PRIORITY; ++j) {
-			list_for_each_entry(res, &man->lru[j], lru) {
-				uint32_t num_pages;
-
-				bo = res->bo;
-				num_pages = PFN_UP(bo->base.size);
+		ttm_resource_manager_for_each_res(man, &cursor, res) {
+			struct ttm_buffer_object *bo = res->bo;
+			uint32_t num_pages = PFN_UP(bo->base.size);
 
-				ret = ttm_bo_swapout(bo, ctx, gfp_flags);
-				/* ttm_bo_swapout has dropped the lru_lock */
-				if (!ret)
-					return num_pages;
-				if (ret != -EBUSY)
-					return ret;
-			}
+			ret = ttm_bo_swapout(bo, ctx, gfp_flags);
+			/* ttm_bo_swapout has dropped the lru_lock */
+			if (!ret)
+				return num_pages;
+			if (ret != -EBUSY)
+				return ret;
 		}
 	}
 	spin_unlock(&bdev->lru_lock);
diff --git a/drivers/gpu/drm/ttm/ttm_resource.c b/drivers/gpu/drm/ttm/ttm_resource.c
index 450e665c357b..9e68d36a1546 100644
--- a/drivers/gpu/drm/ttm/ttm_resource.c
+++ b/drivers/gpu/drm/ttm/ttm_resource.c
@@ -354,6 +354,51 @@ void ttm_resource_manager_debug(struct ttm_resource_manager *man,
 }
 EXPORT_SYMBOL(ttm_resource_manager_debug);
 
+/**
+ * ttm_resource_manager_first
+ *
+ * @man: resource manager to iterate over
+ * @cursor: cursor to record the position
+ *
+ * Returns the first resource from the resource manager.
+ */
+struct ttm_resource *
+ttm_resource_manager_first(struct ttm_resource_manager *man,
+			   struct ttm_resource_cursor *cursor)
+{
+	struct ttm_resource *res;
+
+	for (cursor->priority = 0; cursor->priority < TTM_MAX_BO_PRIORITY;
+	     ++cursor->priority)
+		list_for_each_entry(res, &man->lru[cursor->priority], lru)
+			return res;
+
+	return NULL;
+}
+
+/**
+ * ttm_resource_manager_next
+ *
+ * @man: resource manager to iterate over
+ * @cursor: cursor to record the position
+ *
+ * Returns the next resource from the resource manager.
+ */
+struct ttm_resource *
+ttm_resource_manager_next(struct ttm_resource_manager *man,
+			  struct ttm_resource_cursor *cursor,
+			  struct ttm_resource *res)
+{
+	list_for_each_entry_continue(res, &man->lru[cursor->priority], lru)
+		return res;
+
+	for (; cursor->priority < TTM_MAX_BO_PRIORITY; ++cursor->priority)
+		list_for_each_entry(res, &man->lru[cursor->priority], lru)
+			return res;
+
+	return NULL;
+}
+
 static void ttm_kmap_iter_iomap_map_local(struct ttm_kmap_iter *iter,
 					  struct dma_buf_map *dmap,
 					  pgoff_t i)
diff --git a/include/drm/ttm/ttm_resource.h b/include/drm/ttm/ttm_resource.h
index a54d52517a30..13da5e337350 100644
--- a/include/drm/ttm/ttm_resource.h
+++ b/include/drm/ttm/ttm_resource.h
@@ -183,6 +183,17 @@ struct ttm_resource {
 	struct list_head lru;
 };
 
+/**
+ * struct ttm_resource_cursor
+ *
+ * @priority: the current priority
+ *
+ * Cursor to iterate over the resources in a manager.
+ */
+struct ttm_resource_cursor {
+	unsigned int priority;
+};
+
 /**
  * struct ttm_lru_bulk_move_pos
  *
@@ -339,6 +350,18 @@ int ttm_resource_manager_evict_all(struct ttm_device *bdev,
 void ttm_resource_manager_debug(struct ttm_resource_manager *man,
 				struct drm_printer *p);
 
+struct ttm_resource *
+ttm_resource_manager_first(struct ttm_resource_manager *man,
+			   struct ttm_resource_cursor *cursor);
+struct ttm_resource *
+ttm_resource_manager_next(struct ttm_resource_manager *man,
+			  struct ttm_resource_cursor *cursor,
+			  struct ttm_resource *res);
+
+#define ttm_resource_manager_for_each_res(man, cursor, res)		\
+	for (res = ttm_resource_manager_first(man, cursor); res;	\
+	     res = ttm_resource_manager_next(man, cursor, res))
+
 struct ttm_kmap_iter *
 ttm_kmap_iter_iomap_init(struct ttm_kmap_iter_iomap *iter_io,
 			 struct io_mapping *iomap,
-- 
2.25.1


^ permalink raw reply related	[flat|nested] 24+ messages in thread

* [PATCH 07/12] drm/radeon: use ttm_resource_manager_debug
  2021-11-24 12:44 drm/ttm: moving the LRU into the resource Christian König
                   ` (5 preceding siblings ...)
  2021-11-24 12:44 ` [PATCH 06/12] drm/ttm: add resource iterator Christian König
@ 2021-11-24 12:44 ` Christian König
  2021-11-24 12:44 ` [PATCH 08/12] drm/radeon: remove resource accounting Christian König
                   ` (5 subsequent siblings)
  12 siblings, 0 replies; 24+ messages in thread
From: Christian König @ 2021-11-24 12:44 UTC (permalink / raw)
  To: daniel, thomas.hellstrom, ray.huang, dri-devel

Instead of calling the debug operation directly.

Signed-off-by: Christian König <christian.koenig@amd.com>
---
 drivers/gpu/drm/radeon/radeon_ttm.c | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index 11b21d605584..0d1283cdc8fb 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -802,7 +802,7 @@ static int radeon_mm_vram_dump_table_show(struct seq_file *m, void *unused)
 							    TTM_PL_VRAM);
 	struct drm_printer p = drm_seq_file_printer(m);
 
-	man->func->debug(man, &p);
+	ttm_resource_manager_debug(man, &p);
 	return 0;
 }
 
@@ -820,7 +820,7 @@ static int radeon_mm_gtt_dump_table_show(struct seq_file *m, void *unused)
 							    TTM_PL_TT);
 	struct drm_printer p = drm_seq_file_printer(m);
 
-	man->func->debug(man, &p);
+	ttm_resource_manager_debug(man, &p);
 	return 0;
 }
 
-- 
2.25.1


^ permalink raw reply related	[flat|nested] 24+ messages in thread

* [PATCH 08/12] drm/radeon: remove resource accounting
  2021-11-24 12:44 drm/ttm: moving the LRU into the resource Christian König
                   ` (6 preceding siblings ...)
  2021-11-24 12:44 ` [PATCH 07/12] drm/radeon: use ttm_resource_manager_debug Christian König
@ 2021-11-24 12:44 ` Christian König
  2021-11-24 12:44 ` [PATCH 09/12] drm/amdgpu: use ttm_resource_manager_debug Christian König
                   ` (4 subsequent siblings)
  12 siblings, 0 replies; 24+ messages in thread
From: Christian König @ 2021-11-24 12:44 UTC (permalink / raw)
  To: daniel, thomas.hellstrom, ray.huang, dri-devel

Use the one provided by TTM instead.

Signed-off-by: Christian König <christian.koenig@amd.com>
---
 drivers/gpu/drm/radeon/radeon.h        |  2 --
 drivers/gpu/drm/radeon/radeon_kms.c    |  7 ++++--
 drivers/gpu/drm/radeon/radeon_object.c | 30 +++-----------------------
 drivers/gpu/drm/radeon/radeon_object.h |  1 -
 drivers/gpu/drm/radeon/radeon_ttm.c    | 18 ++--------------
 5 files changed, 10 insertions(+), 48 deletions(-)

diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 895776c421d4..08f83bf2c330 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -2462,8 +2462,6 @@ struct radeon_device {
 	struct radeon_vm_manager	vm_manager;
 	struct mutex			gpu_clock_mutex;
 	/* memory stats */
-	atomic64_t			vram_usage;
-	atomic64_t			gtt_usage;
 	atomic64_t			num_bytes_moved;
 	atomic_t			gpu_reset_counter;
 	/* ACPI interface */
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
index 482fb0ae6cb5..c18dceb796c7 100644
--- a/drivers/gpu/drm/radeon/radeon_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_kms.c
@@ -241,6 +241,7 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
 	struct drm_radeon_info *info = data;
 	struct radeon_mode_info *minfo = &rdev->mode_info;
 	uint32_t *value, value_tmp, *value_ptr, value_size;
+	struct ttm_resource_manager *man;
 	uint64_t value64;
 	struct drm_crtc *crtc;
 	int i, found;
@@ -550,12 +551,14 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
 	case RADEON_INFO_VRAM_USAGE:
 		value = (uint32_t*)&value64;
 		value_size = sizeof(uint64_t);
-		value64 = atomic64_read(&rdev->vram_usage);
+		man = ttm_manager_type(&rdev->mman.bdev, TTM_PL_VRAM);
+		value64 = ttm_resource_manager_usage(man);
 		break;
 	case RADEON_INFO_GTT_USAGE:
 		value = (uint32_t*)&value64;
 		value_size = sizeof(uint64_t);
-		value64 = atomic64_read(&rdev->gtt_usage);
+		man = ttm_manager_type(&rdev->mman.bdev, TTM_PL_TT);
+		value64 = ttm_resource_manager_usage(man);
 		break;
 	case RADEON_INFO_ACTIVE_CU_COUNT:
 		if (rdev->family >= CHIP_BONAIRE)
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index 915194345e20..8b28d753c0d4 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -49,27 +49,6 @@ static void radeon_bo_clear_surface_reg(struct radeon_bo *bo);
  * function are calling it.
  */
 
-static void radeon_update_memory_usage(struct ttm_buffer_object *bo,
-				       unsigned int mem_type, int sign)
-{
-	struct radeon_device *rdev = radeon_get_rdev(bo->bdev);
-
-	switch (mem_type) {
-	case TTM_PL_TT:
-		if (sign > 0)
-			atomic64_add(bo->base.size, &rdev->gtt_usage);
-		else
-			atomic64_sub(bo->base.size, &rdev->gtt_usage);
-		break;
-	case TTM_PL_VRAM:
-		if (sign > 0)
-			atomic64_add(bo->base.size, &rdev->vram_usage);
-		else
-			atomic64_sub(bo->base.size, &rdev->vram_usage);
-		break;
-	}
-}
-
 static void radeon_ttm_bo_destroy(struct ttm_buffer_object *tbo)
 {
 	struct radeon_bo *bo;
@@ -434,7 +413,9 @@ void radeon_bo_fini(struct radeon_device *rdev)
 static u64 radeon_bo_get_threshold_for_moves(struct radeon_device *rdev)
 {
 	u64 real_vram_size = rdev->mc.real_vram_size;
-	u64 vram_usage = atomic64_read(&rdev->vram_usage);
+	struct ttm_resource_manager *man =
+		ttm_manager_type(&rdev->mman.bdev, TTM_PL_VRAM);
+	u64 vram_usage = ttm_resource_manager_usage(man);
 
 	/* This function is based on the current VRAM usage.
 	 *
@@ -725,15 +706,10 @@ int radeon_bo_check_tiling(struct radeon_bo *bo, bool has_moved,
 }
 
 void radeon_bo_move_notify(struct ttm_buffer_object *bo,
-			   unsigned int old_type,
 			   struct ttm_resource *new_mem)
 {
 	struct radeon_bo *rbo;
 
-	radeon_update_memory_usage(bo, old_type, -1);
-	if (new_mem)
-		radeon_update_memory_usage(bo, new_mem->mem_type, 1);
-
 	if (!radeon_ttm_bo_is_radeon_bo(bo))
 		return;
 
diff --git a/drivers/gpu/drm/radeon/radeon_object.h b/drivers/gpu/drm/radeon/radeon_object.h
index 1afc7992ef91..0b64e202577b 100644
--- a/drivers/gpu/drm/radeon/radeon_object.h
+++ b/drivers/gpu/drm/radeon/radeon_object.h
@@ -161,7 +161,6 @@ extern void radeon_bo_get_tiling_flags(struct radeon_bo *bo,
 extern int radeon_bo_check_tiling(struct radeon_bo *bo, bool has_moved,
 				bool force_drop);
 extern void radeon_bo_move_notify(struct ttm_buffer_object *bo,
-				  unsigned int old_type,
 				  struct ttm_resource *new_mem);
 extern vm_fault_t radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo);
 extern int radeon_bo_get_surface_reg(struct radeon_bo *bo);
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index 0d1283cdc8fb..ae09a91a486a 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -199,7 +199,7 @@ static int radeon_bo_move(struct ttm_buffer_object *bo, bool evict,
 	struct ttm_resource *old_mem = bo->resource;
 	struct radeon_device *rdev;
 	struct radeon_bo *rbo;
-	int r, old_type;
+	int r;
 
 	if (new_mem->mem_type == TTM_PL_TT) {
 		r = radeon_ttm_tt_bind(bo->bdev, bo->ttm, new_mem);
@@ -216,9 +216,6 @@ static int radeon_bo_move(struct ttm_buffer_object *bo, bool evict,
 	if (WARN_ON_ONCE(rbo->tbo.pin_count > 0))
 		return -EINVAL;
 
-	/* Save old type for statistics update */
-	old_type = old_mem->mem_type;
-
 	rdev = radeon_get_rdev(bo->bdev);
 	if (old_mem->mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) {
 		ttm_bo_move_null(bo, new_mem);
@@ -264,7 +261,7 @@ static int radeon_bo_move(struct ttm_buffer_object *bo, bool evict,
 out:
 	/* update statistics */
 	atomic64_add(bo->base.size, &rdev->num_bytes_moved);
-	radeon_bo_move_notify(bo, old_type, new_mem);
+	radeon_bo_move_notify(bo, new_mem);
 	return 0;
 }
 
@@ -679,16 +676,6 @@ bool radeon_ttm_tt_is_readonly(struct radeon_device *rdev,
 	return !!(gtt->userflags & RADEON_GEM_USERPTR_READONLY);
 }
 
-static void
-radeon_bo_delete_mem_notify(struct ttm_buffer_object *bo)
-{
-	unsigned int old_type = TTM_PL_SYSTEM;
-
-	if (bo->resource)
-		old_type = bo->resource->mem_type;
-	radeon_bo_move_notify(bo, old_type, NULL);
-}
-
 static struct ttm_device_funcs radeon_bo_driver = {
 	.ttm_tt_create = &radeon_ttm_tt_create,
 	.ttm_tt_populate = &radeon_ttm_tt_populate,
@@ -697,7 +684,6 @@ static struct ttm_device_funcs radeon_bo_driver = {
 	.eviction_valuable = ttm_bo_eviction_valuable,
 	.evict_flags = &radeon_evict_flags,
 	.move = &radeon_bo_move,
-	.delete_mem_notify = &radeon_bo_delete_mem_notify,
 	.io_mem_reserve = &radeon_ttm_io_mem_reserve,
 };
 
-- 
2.25.1


^ permalink raw reply related	[flat|nested] 24+ messages in thread

* [PATCH 09/12] drm/amdgpu: use ttm_resource_manager_debug
  2021-11-24 12:44 drm/ttm: moving the LRU into the resource Christian König
                   ` (7 preceding siblings ...)
  2021-11-24 12:44 ` [PATCH 08/12] drm/radeon: remove resource accounting Christian König
@ 2021-11-24 12:44 ` Christian König
  2021-11-24 12:44 ` [PATCH 10/12] drm/amdgpu: remove GTT accounting Christian König
                   ` (3 subsequent siblings)
  12 siblings, 0 replies; 24+ messages in thread
From: Christian König @ 2021-11-24 12:44 UTC (permalink / raw)
  To: daniel, thomas.hellstrom, ray.huang, dri-devel

Instead of calling the debug operation directly.

Signed-off-by: Christian König <christian.koenig@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 10 +++++-----
 1 file changed, 5 insertions(+), 5 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index 9be56ecaf39a..62c328f23ff6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -2082,7 +2082,7 @@ static int amdgpu_mm_vram_table_show(struct seq_file *m, void *unused)
 							    TTM_PL_VRAM);
 	struct drm_printer p = drm_seq_file_printer(m);
 
-	man->func->debug(man, &p);
+	ttm_resource_manager_debug(man, &p);
 	return 0;
 }
 
@@ -2100,7 +2100,7 @@ static int amdgpu_mm_tt_table_show(struct seq_file *m, void *unused)
 							    TTM_PL_TT);
 	struct drm_printer p = drm_seq_file_printer(m);
 
-	man->func->debug(man, &p);
+	ttm_resource_manager_debug(man, &p);
 	return 0;
 }
 
@@ -2111,7 +2111,7 @@ static int amdgpu_mm_gds_table_show(struct seq_file *m, void *unused)
 							    AMDGPU_PL_GDS);
 	struct drm_printer p = drm_seq_file_printer(m);
 
-	man->func->debug(man, &p);
+	ttm_resource_manager_debug(man, &p);
 	return 0;
 }
 
@@ -2122,7 +2122,7 @@ static int amdgpu_mm_gws_table_show(struct seq_file *m, void *unused)
 							    AMDGPU_PL_GWS);
 	struct drm_printer p = drm_seq_file_printer(m);
 
-	man->func->debug(man, &p);
+	ttm_resource_manager_debug(man, &p);
 	return 0;
 }
 
@@ -2133,7 +2133,7 @@ static int amdgpu_mm_oa_table_show(struct seq_file *m, void *unused)
 							    AMDGPU_PL_OA);
 	struct drm_printer p = drm_seq_file_printer(m);
 
-	man->func->debug(man, &p);
+	ttm_resource_manager_debug(man, &p);
 	return 0;
 }
 
-- 
2.25.1


^ permalink raw reply related	[flat|nested] 24+ messages in thread

* [PATCH 10/12] drm/amdgpu: remove GTT accounting
  2021-11-24 12:44 drm/ttm: moving the LRU into the resource Christian König
                   ` (8 preceding siblings ...)
  2021-11-24 12:44 ` [PATCH 09/12] drm/amdgpu: use ttm_resource_manager_debug Christian König
@ 2021-11-24 12:44 ` Christian König
  2021-11-24 12:44 ` [PATCH 11/12] drm/amdgpu: remove VRAM accounting Christian König
                   ` (2 subsequent siblings)
  12 siblings, 0 replies; 24+ messages in thread
From: Christian König @ 2021-11-24 12:44 UTC (permalink / raw)
  To: daniel, thomas.hellstrom, ray.huang, dri-devel

This is provided by TTM now.

Also switch man->size to bytes instead of pages and fix the double
printing of size and usage in debugfs.

Signed-off-by: Christian König <christian.koenig@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c | 50 +++++----------------
 drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c     |  5 +--
 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h     |  2 -
 3 files changed, 12 insertions(+), 45 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
index 9e7685a4878c..ce5eeb3c1097 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
@@ -60,7 +60,7 @@ static ssize_t amdgpu_mem_info_gtt_total_show(struct device *dev,
 	struct ttm_resource_manager *man;
 
 	man = ttm_manager_type(&adev->mman.bdev, TTM_PL_TT);
-	return sysfs_emit(buf, "%llu\n", man->size * PAGE_SIZE);
+	return sysfs_emit(buf, "%llu\n", man->size);
 }
 
 /**
@@ -80,7 +80,7 @@ static ssize_t amdgpu_mem_info_gtt_used_show(struct device *dev,
 	struct ttm_resource_manager *man;
 
 	man = ttm_manager_type(&adev->mman.bdev, TTM_PL_TT);
-	return sysfs_emit(buf, "%llu\n", amdgpu_gtt_mgr_usage(man));
+	return sysfs_emit(buf, "%llu\n", ttm_resource_manager_usage(man));
 }
 
 static DEVICE_ATTR(mem_info_gtt_total, S_IRUGO,
@@ -132,20 +132,17 @@ static int amdgpu_gtt_mgr_new(struct ttm_resource_manager *man,
 	struct amdgpu_gtt_node *node;
 	int r;
 
-	if (!(place->flags & TTM_PL_FLAG_TEMPORARY) &&
-	    atomic64_add_return(num_pages, &mgr->used) >  man->size) {
-		atomic64_sub(num_pages, &mgr->used);
-		return -ENOSPC;
-	}
-
 	node = kzalloc(struct_size(node, base.mm_nodes, 1), GFP_KERNEL);
-	if (!node) {
-		r = -ENOMEM;
-		goto err_out;
-	}
+	if (!node)
+		return -ENOMEM;
 
 	node->tbo = tbo;
 	ttm_resource_init(tbo, place, &node->base.base);
+	if (!(place->flags & TTM_PL_FLAG_TEMPORARY) &&
+	    ttm_resource_manager_usage(man) > man->size) {
+		r = -ENOSPC;
+		goto err_free;
+	}
 
 	if (place->lpfn) {
 		spin_lock(&mgr->lock);
@@ -171,11 +168,6 @@ static int amdgpu_gtt_mgr_new(struct ttm_resource_manager *man,
 err_free:
 	ttm_resource_fini(man, &node->base.base);
 	kfree(node);
-
-err_out:
-	if (!(place->flags & TTM_PL_FLAG_TEMPORARY))
-		atomic64_sub(num_pages, &mgr->used);
-
 	return r;
 }
 
@@ -198,27 +190,10 @@ static void amdgpu_gtt_mgr_del(struct ttm_resource_manager *man,
 		drm_mm_remove_node(&node->base.mm_nodes[0]);
 	spin_unlock(&mgr->lock);
 
-	if (!(res->placement & TTM_PL_FLAG_TEMPORARY))
-		atomic64_sub(res->num_pages, &mgr->used);
-
 	ttm_resource_fini(man, res);
 	kfree(node);
 }
 
-/**
- * amdgpu_gtt_mgr_usage - return usage of GTT domain
- *
- * @man: TTM memory type manager
- *
- * Return how many bytes are used in the GTT domain
- */
-uint64_t amdgpu_gtt_mgr_usage(struct ttm_resource_manager *man)
-{
-	struct amdgpu_gtt_mgr *mgr = to_gtt_mgr(man);
-
-	return atomic64_read(&mgr->used) * PAGE_SIZE;
-}
-
 /**
  * amdgpu_gtt_mgr_recover - re-init gart
  *
@@ -265,9 +240,6 @@ static void amdgpu_gtt_mgr_debug(struct ttm_resource_manager *man,
 	spin_lock(&mgr->lock);
 	drm_mm_print(&mgr->mm, printer);
 	spin_unlock(&mgr->lock);
-
-	drm_printf(printer, "man size:%llu pages,  gtt used:%llu pages\n",
-		   man->size, atomic64_read(&mgr->used));
 }
 
 static const struct ttm_resource_manager_func amdgpu_gtt_mgr_func = {
@@ -293,14 +265,12 @@ int amdgpu_gtt_mgr_init(struct amdgpu_device *adev, uint64_t gtt_size)
 	man->use_tt = true;
 	man->func = &amdgpu_gtt_mgr_func;
 
-	ttm_resource_manager_init(man, &adev->mman.bdev,
-				  gtt_size >> PAGE_SHIFT);
+	ttm_resource_manager_init(man, &adev->mman.bdev, gtt_size);
 
 	start = AMDGPU_GTT_MAX_TRANSFER_SIZE * AMDGPU_GTT_NUM_TRANSFER_WINDOWS;
 	size = (adev->gmc.gart_size >> PAGE_SHIFT) - start;
 	drm_mm_init(&mgr->mm, start, size);
 	spin_lock_init(&mgr->lock);
-	atomic64_set(&mgr->used, 0);
 
 	ttm_set_driver_manager(&adev->mman.bdev, TTM_PL_TT, &mgr->manager);
 	ttm_resource_manager_set_used(man, true);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
index 651c7abfde03..8d5d56d9d26d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
@@ -678,7 +678,7 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
 		ui64 = amdgpu_vram_mgr_vis_usage(ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM));
 		return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0;
 	case AMDGPU_INFO_GTT_USAGE:
-		ui64 = amdgpu_gtt_mgr_usage(ttm_manager_type(&adev->mman.bdev, TTM_PL_TT));
+		ui64 = ttm_resource_manager_usage(ttm_manager_type(&adev->mman.bdev, TTM_PL_TT));
 		return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0;
 	case AMDGPU_INFO_GDS_CONFIG: {
 		struct drm_amdgpu_info_gds gds_info;
@@ -737,8 +737,7 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
 		mem.gtt.total_heap_size *= PAGE_SIZE;
 		mem.gtt.usable_heap_size = mem.gtt.total_heap_size -
 			atomic64_read(&adev->gart_pin_size);
-		mem.gtt.heap_usage =
-			amdgpu_gtt_mgr_usage(gtt_man);
+		mem.gtt.heap_usage = ttm_resource_manager_usage(gtt_man);
 		mem.gtt.max_allocation = mem.gtt.usable_heap_size * 3 / 4;
 
 		return copy_to_user(out, &mem,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
index 7346ecff4438..2ac332cd4f59 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
@@ -52,7 +52,6 @@ struct amdgpu_gtt_mgr {
 	struct ttm_resource_manager manager;
 	struct drm_mm mm;
 	spinlock_t lock;
-	atomic64_t used;
 };
 
 struct amdgpu_preempt_mgr {
@@ -114,7 +113,6 @@ int amdgpu_vram_mgr_init(struct amdgpu_device *adev);
 void amdgpu_vram_mgr_fini(struct amdgpu_device *adev);
 
 bool amdgpu_gtt_mgr_has_gart_addr(struct ttm_resource *mem);
-uint64_t amdgpu_gtt_mgr_usage(struct ttm_resource_manager *man);
 int amdgpu_gtt_mgr_recover(struct ttm_resource_manager *man);
 
 uint64_t amdgpu_preempt_mgr_usage(struct ttm_resource_manager *man);
-- 
2.25.1


^ permalink raw reply related	[flat|nested] 24+ messages in thread

* [PATCH 11/12] drm/amdgpu: remove VRAM accounting
  2021-11-24 12:44 drm/ttm: moving the LRU into the resource Christian König
                   ` (9 preceding siblings ...)
  2021-11-24 12:44 ` [PATCH 10/12] drm/amdgpu: remove GTT accounting Christian König
@ 2021-11-24 12:44 ` Christian König
  2021-11-24 12:44 ` [PATCH 12/12] drm/amdgpu: drop amdgpu_gtt_node Christian König
  2021-11-26  7:47 ` drm/ttm: moving the LRU into the resource Thomas Hellström
  12 siblings, 0 replies; 24+ messages in thread
From: Christian König @ 2021-11-24 12:44 UTC (permalink / raw)
  To: daniel, thomas.hellstrom, ray.huang, dri-devel

This is provided by TTM now.

Also switch man->size to bytes instead of pages and fix the double
printing of size and usage in debugfs.

Signed-off-by: Christian König <christian.koenig@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c   |  5 +-
 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c       |  2 +-
 drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c      |  5 +-
 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c      |  2 +-
 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h      |  2 -
 drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c     |  2 +-
 drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c | 56 +++++++-------------
 7 files changed, 26 insertions(+), 48 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
index 7077f21f0021..4760fd82e6c0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
@@ -531,9 +531,10 @@ int amdgpu_amdkfd_get_dmabuf_info(struct kgd_dev *kgd, int dma_buf_fd,
 uint64_t amdgpu_amdkfd_get_vram_usage(struct kgd_dev *kgd)
 {
 	struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
-	struct ttm_resource_manager *vram_man = ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM);
+	struct ttm_resource_manager *vram_man =
+		ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM);
 
-	return amdgpu_vram_mgr_usage(vram_man);
+	return ttm_resource_manager_usage(vram_man);
 }
 
 uint64_t amdgpu_amdkfd_get_hive_id(struct kgd_dev *kgd)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index 413606d10080..797739ba5466 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -315,7 +315,7 @@ static void amdgpu_cs_get_threshold_for_moves(struct amdgpu_device *adev,
 	}
 
 	total_vram = adev->gmc.real_vram_size - atomic64_read(&adev->vram_pin_size);
-	used_vram = amdgpu_vram_mgr_usage(vram_man);
+	used_vram = ttm_resource_manager_usage(vram_man);
 	free_vram = used_vram >= total_vram ? 0 : total_vram - used_vram;
 
 	spin_lock(&adev->mm_stats.lock);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
index 8d5d56d9d26d..6d21ee430f03 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
@@ -672,7 +672,7 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
 		ui64 = atomic64_read(&adev->num_vram_cpu_page_faults);
 		return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0;
 	case AMDGPU_INFO_VRAM_USAGE:
-		ui64 = amdgpu_vram_mgr_usage(ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM));
+		ui64 = ttm_resource_manager_usage(ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM));
 		return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0;
 	case AMDGPU_INFO_VIS_VRAM_USAGE:
 		ui64 = amdgpu_vram_mgr_vis_usage(ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM));
@@ -718,8 +718,7 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
 		mem.vram.usable_heap_size = adev->gmc.real_vram_size -
 			atomic64_read(&adev->vram_pin_size) -
 			AMDGPU_VM_RESERVED_VRAM;
-		mem.vram.heap_usage =
-			amdgpu_vram_mgr_usage(vram_man);
+		mem.vram.heap_usage = ttm_resource_manager_usage(vram_man);
 		mem.vram.max_allocation = mem.vram.usable_heap_size * 3 / 4;
 
 		mem.cpu_accessible_vram.total_heap_size =
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index 62c328f23ff6..b5a4f1311504 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -1879,7 +1879,7 @@ void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev, bool enable)
 		size = adev->gmc.real_vram_size;
 	else
 		size = adev->gmc.visible_vram_size;
-	man->size = size >> PAGE_SHIFT;
+	man->size = size;
 	adev->mman.buffer_funcs_enabled = enable;
 }
 
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
index 2ac332cd4f59..f3eb4029bdcf 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
@@ -44,7 +44,6 @@ struct amdgpu_vram_mgr {
 	spinlock_t lock;
 	struct list_head reservations_pending;
 	struct list_head reserved_pages;
-	atomic64_t usage;
 	atomic64_t vis_usage;
 };
 
@@ -127,7 +126,6 @@ int amdgpu_vram_mgr_alloc_sgt(struct amdgpu_device *adev,
 void amdgpu_vram_mgr_free_sgt(struct device *dev,
 			      enum dma_data_direction dir,
 			      struct sg_table *sgt);
-uint64_t amdgpu_vram_mgr_usage(struct ttm_resource_manager *man);
 uint64_t amdgpu_vram_mgr_vis_usage(struct ttm_resource_manager *man);
 int amdgpu_vram_mgr_reserve_range(struct ttm_resource_manager *man,
 				  uint64_t start, uint64_t size);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
index 04cf9b207e62..0b258a79b57c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
@@ -571,7 +571,7 @@ static int amdgpu_virt_write_vf2pf_data(struct amdgpu_device *adev)
 	vf2pf_info->driver_cert = 0;
 	vf2pf_info->os_info.all = 0;
 
-	vf2pf_info->fb_usage = amdgpu_vram_mgr_usage(vram_man) >> 20;
+	vf2pf_info->fb_usage = ttm_resource_manager_usage(vram_man) >> 20;
 	vf2pf_info->fb_vis_usage = amdgpu_vram_mgr_vis_usage(vram_man) >> 20;
 	vf2pf_info->fb_size = adev->gmc.real_vram_size >> 20;
 	vf2pf_info->fb_vis_size = adev->gmc.visible_vram_size >> 20;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
index ddd0b6d74070..862c0f3d2329 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
@@ -99,7 +99,7 @@ static ssize_t amdgpu_mem_info_vram_used_show(struct device *dev,
 	struct ttm_resource_manager *man;
 
 	man = ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM);
-	return sysfs_emit(buf, "%llu\n", amdgpu_vram_mgr_usage(man));
+	return sysfs_emit(buf, "%llu\n", ttm_resource_manager_usage(man));
 }
 
 /**
@@ -255,7 +255,7 @@ static void amdgpu_vram_mgr_do_reserve(struct ttm_resource_manager *man)
 
 		vis_usage = amdgpu_vram_mgr_vis_size(adev, &rsv->mm_node);
 		atomic64_add(vis_usage, &mgr->vis_usage);
-		atomic64_add(rsv->mm_node.size << PAGE_SHIFT, &mgr->usage);
+		atomic64_add(rsv->mm_node.size << PAGE_SHIFT, &man->usage);
 		list_move(&rsv->node, &mgr->reserved_pages);
 	}
 }
@@ -382,19 +382,13 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man,
 
 	lpfn = place->lpfn;
 	if (!lpfn)
-		lpfn = man->size;
+		lpfn = man->size >> PAGE_SHIFT;
 
 	max_bytes = adev->gmc.mc_vram_size;
 	if (tbo->type != ttm_bo_type_kernel)
 		max_bytes -= AMDGPU_VM_RESERVED_VRAM;
 
-	/* bail out quickly if there's likely not enough VRAM for this BO */
 	mem_bytes = tbo->base.size;
-	if (atomic64_add_return(mem_bytes, &mgr->usage) > max_bytes) {
-		r = -ENOSPC;
-		goto error_sub;
-	}
-
 	if (place->flags & TTM_PL_FLAG_CONTIGUOUS) {
 		pages_per_node = ~0ul;
 		num_nodes = 1;
@@ -412,13 +406,17 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man,
 
 	node = kvmalloc(struct_size(node, mm_nodes, num_nodes),
 			GFP_KERNEL | __GFP_ZERO);
-	if (!node) {
-		r = -ENOMEM;
-		goto error_sub;
-	}
+	if (!node)
+		return -ENOMEM;
 
 	ttm_resource_init(tbo, place, &node->base);
 
+	/* bail out quickly if there's likely not enough VRAM for this BO */
+	if (ttm_resource_manager_usage(man) > max_bytes) {
+		r = -ENOSPC;
+		goto error_fini;
+	}
+
 	mode = DRM_MM_INSERT_BEST;
 	if (place->flags & TTM_PL_FLAG_TOPDOWN)
 		mode = DRM_MM_INSERT_HIGH;
@@ -476,11 +474,10 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man,
 	while (i--)
 		drm_mm_remove_node(&node->mm_nodes[i]);
 	spin_unlock(&mgr->lock);
+error_fini:
 	ttm_resource_fini(man, &node->base);
 	kvfree(node);
 
-error_sub:
-	atomic64_sub(mem_bytes, &mgr->usage);
 	return r;
 }
 
@@ -498,7 +495,7 @@ static void amdgpu_vram_mgr_del(struct ttm_resource_manager *man,
 	struct ttm_range_mgr_node *node = to_ttm_range_mgr_node(res);
 	struct amdgpu_vram_mgr *mgr = to_vram_mgr(man);
 	struct amdgpu_device *adev = to_amdgpu_device(mgr);
-	uint64_t usage = 0, vis_usage = 0;
+	uint64_t vis_usage = 0;
 	unsigned i, pages;
 
 	spin_lock(&mgr->lock);
@@ -507,13 +504,11 @@ static void amdgpu_vram_mgr_del(struct ttm_resource_manager *man,
 		struct drm_mm_node *mm = &node->mm_nodes[i];
 
 		drm_mm_remove_node(mm);
-		usage += mm->size << PAGE_SHIFT;
 		vis_usage += amdgpu_vram_mgr_vis_size(adev, mm);
 	}
 	amdgpu_vram_mgr_do_reserve(man);
 	spin_unlock(&mgr->lock);
 
-	atomic64_sub(usage, &mgr->usage);
 	atomic64_sub(vis_usage, &mgr->vis_usage);
 
 	ttm_resource_fini(man, res);
@@ -631,20 +626,6 @@ void amdgpu_vram_mgr_free_sgt(struct device *dev,
 	kfree(sgt);
 }
 
-/**
- * amdgpu_vram_mgr_usage - how many bytes are used in this domain
- *
- * @man: TTM memory type manager
- *
- * Returns how many bytes are used in this domain.
- */
-uint64_t amdgpu_vram_mgr_usage(struct ttm_resource_manager *man)
-{
-	struct amdgpu_vram_mgr *mgr = to_vram_mgr(man);
-
-	return atomic64_read(&mgr->usage);
-}
-
 /**
  * amdgpu_vram_mgr_vis_usage - how many bytes are used in the visible part
  *
@@ -672,13 +653,12 @@ static void amdgpu_vram_mgr_debug(struct ttm_resource_manager *man,
 {
 	struct amdgpu_vram_mgr *mgr = to_vram_mgr(man);
 
+	drm_printf(printer, "  vis usage:%llu\n",
+		   amdgpu_vram_mgr_vis_usage(man));
+
 	spin_lock(&mgr->lock);
 	drm_mm_print(&mgr->mm, printer);
 	spin_unlock(&mgr->lock);
-
-	drm_printf(printer, "man size:%llu pages, ram usage:%lluMB, vis usage:%lluMB\n",
-		   man->size, amdgpu_vram_mgr_usage(man) >> 20,
-		   amdgpu_vram_mgr_vis_usage(man) >> 20);
 }
 
 static const struct ttm_resource_manager_func amdgpu_vram_mgr_func = {
@@ -700,11 +680,11 @@ int amdgpu_vram_mgr_init(struct amdgpu_device *adev)
 	struct ttm_resource_manager *man = &mgr->manager;
 
 	ttm_resource_manager_init(man, &adev->mman.bdev,
-				  adev->gmc.real_vram_size >> PAGE_SHIFT);
+				  adev->gmc.real_vram_size);
 
 	man->func = &amdgpu_vram_mgr_func;
 
-	drm_mm_init(&mgr->mm, 0, man->size);
+	drm_mm_init(&mgr->mm, 0, man->size >> PAGE_SHIFT);
 	spin_lock_init(&mgr->lock);
 	INIT_LIST_HEAD(&mgr->reservations_pending);
 	INIT_LIST_HEAD(&mgr->reserved_pages);
-- 
2.25.1


^ permalink raw reply related	[flat|nested] 24+ messages in thread

* [PATCH 12/12] drm/amdgpu: drop amdgpu_gtt_node
  2021-11-24 12:44 drm/ttm: moving the LRU into the resource Christian König
                   ` (10 preceding siblings ...)
  2021-11-24 12:44 ` [PATCH 11/12] drm/amdgpu: remove VRAM accounting Christian König
@ 2021-11-24 12:44 ` Christian König
  2021-11-26  7:53   ` Huang Rui
  2021-11-26  7:47 ` drm/ttm: moving the LRU into the resource Thomas Hellström
  12 siblings, 1 reply; 24+ messages in thread
From: Christian König @ 2021-11-24 12:44 UTC (permalink / raw)
  To: daniel, thomas.hellstrom, ray.huang, dri-devel

We have the BO pointer in the base structure now as well.

Signed-off-by: Christian König <christian.koenig@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c | 49 ++++++++-------------
 1 file changed, 18 insertions(+), 31 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
index ce5eeb3c1097..a55bbe1a154c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
@@ -26,23 +26,12 @@
 
 #include "amdgpu.h"
 
-struct amdgpu_gtt_node {
-	struct ttm_buffer_object *tbo;
-	struct ttm_range_mgr_node base;
-};
-
 static inline struct amdgpu_gtt_mgr *
 to_gtt_mgr(struct ttm_resource_manager *man)
 {
 	return container_of(man, struct amdgpu_gtt_mgr, manager);
 }
 
-static inline struct amdgpu_gtt_node *
-to_amdgpu_gtt_node(struct ttm_resource *res)
-{
-	return container_of(res, struct amdgpu_gtt_node, base.base);
-}
-
 /**
  * DOC: mem_info_gtt_total
  *
@@ -107,9 +96,9 @@ const struct attribute_group amdgpu_gtt_mgr_attr_group = {
  */
 bool amdgpu_gtt_mgr_has_gart_addr(struct ttm_resource *res)
 {
-	struct amdgpu_gtt_node *node = to_amdgpu_gtt_node(res);
+	struct ttm_range_mgr_node *node = to_ttm_range_mgr_node(res);
 
-	return drm_mm_node_allocated(&node->base.mm_nodes[0]);
+	return drm_mm_node_allocated(&node->mm_nodes[0]);
 }
 
 /**
@@ -129,15 +118,14 @@ static int amdgpu_gtt_mgr_new(struct ttm_resource_manager *man,
 {
 	struct amdgpu_gtt_mgr *mgr = to_gtt_mgr(man);
 	uint32_t num_pages = PFN_UP(tbo->base.size);
-	struct amdgpu_gtt_node *node;
+	struct ttm_range_mgr_node *node;
 	int r;
 
-	node = kzalloc(struct_size(node, base.mm_nodes, 1), GFP_KERNEL);
+	node = kzalloc(struct_size(node, mm_nodes, 1), GFP_KERNEL);
 	if (!node)
 		return -ENOMEM;
 
-	node->tbo = tbo;
-	ttm_resource_init(tbo, place, &node->base.base);
+	ttm_resource_init(tbo, place, &node->base);
 	if (!(place->flags & TTM_PL_FLAG_TEMPORARY) &&
 	    ttm_resource_manager_usage(man) > man->size) {
 		r = -ENOSPC;
@@ -146,8 +134,7 @@ static int amdgpu_gtt_mgr_new(struct ttm_resource_manager *man,
 
 	if (place->lpfn) {
 		spin_lock(&mgr->lock);
-		r = drm_mm_insert_node_in_range(&mgr->mm,
-						&node->base.mm_nodes[0],
+		r = drm_mm_insert_node_in_range(&mgr->mm, &node->mm_nodes[0],
 						num_pages, tbo->page_alignment,
 						0, place->fpfn, place->lpfn,
 						DRM_MM_INSERT_BEST);
@@ -155,18 +142,18 @@ static int amdgpu_gtt_mgr_new(struct ttm_resource_manager *man,
 		if (unlikely(r))
 			goto err_free;
 
-		node->base.base.start = node->base.mm_nodes[0].start;
+		node->base.start = node->mm_nodes[0].start;
 	} else {
-		node->base.mm_nodes[0].start = 0;
-		node->base.mm_nodes[0].size = node->base.base.num_pages;
-		node->base.base.start = AMDGPU_BO_INVALID_OFFSET;
+		node->mm_nodes[0].start = 0;
+		node->mm_nodes[0].size = node->base.num_pages;
+		node->base.start = AMDGPU_BO_INVALID_OFFSET;
 	}
 
-	*res = &node->base.base;
+	*res = &node->base;
 	return 0;
 
 err_free:
-	ttm_resource_fini(man, &node->base.base);
+	ttm_resource_fini(man, &node->base);
 	kfree(node);
 	return r;
 }
@@ -182,12 +169,12 @@ static int amdgpu_gtt_mgr_new(struct ttm_resource_manager *man,
 static void amdgpu_gtt_mgr_del(struct ttm_resource_manager *man,
 			       struct ttm_resource *res)
 {
-	struct amdgpu_gtt_node *node = to_amdgpu_gtt_node(res);
+	struct ttm_range_mgr_node *node = to_ttm_range_mgr_node(res);
 	struct amdgpu_gtt_mgr *mgr = to_gtt_mgr(man);
 
 	spin_lock(&mgr->lock);
-	if (drm_mm_node_allocated(&node->base.mm_nodes[0]))
-		drm_mm_remove_node(&node->base.mm_nodes[0]);
+	if (drm_mm_node_allocated(&node->mm_nodes[0]))
+		drm_mm_remove_node(&node->mm_nodes[0]);
 	spin_unlock(&mgr->lock);
 
 	ttm_resource_fini(man, res);
@@ -204,16 +191,16 @@ static void amdgpu_gtt_mgr_del(struct ttm_resource_manager *man,
 int amdgpu_gtt_mgr_recover(struct ttm_resource_manager *man)
 {
 	struct amdgpu_gtt_mgr *mgr = to_gtt_mgr(man);
+	struct ttm_range_mgr_node *node;
 	struct amdgpu_device *adev;
-	struct amdgpu_gtt_node *node;
 	struct drm_mm_node *mm_node;
 	int r = 0;
 
 	adev = container_of(mgr, typeof(*adev), mman.gtt_mgr);
 	spin_lock(&mgr->lock);
 	drm_mm_for_each_node(mm_node, &mgr->mm) {
-		node = container_of(mm_node, typeof(*node), base.mm_nodes[0]);
-		r = amdgpu_ttm_recover_gart(node->tbo);
+		node = container_of(mm_node, typeof(*node), mm_nodes[0]);
+		r = amdgpu_ttm_recover_gart(node->base.bo);
 		if (r)
 			break;
 	}
-- 
2.25.1


^ permalink raw reply related	[flat|nested] 24+ messages in thread

* Re: [PATCH 01/12] drm/ttm: add ttm_resource_fini
  2021-11-24 12:44 ` [PATCH 01/12] drm/ttm: add ttm_resource_fini Christian König
@ 2021-11-26  6:48   ` Huang Rui
  2021-11-26  7:39     ` Christian König
  0 siblings, 1 reply; 24+ messages in thread
From: Huang Rui @ 2021-11-26  6:48 UTC (permalink / raw)
  To: Christian König; +Cc: thomas.hellstrom, dri-devel

On Wed, Nov 24, 2021 at 08:44:19PM +0800, Christian König wrote:
> Make sure we call the common cleanup function in all
> implementations of the resource manager.
> 
> Signed-off-by: Christian König <christian.koenig@amd.com>
> ---
>  drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c     | 2 ++
>  drivers/gpu/drm/amd/amdgpu/amdgpu_preempt_mgr.c | 1 +
>  drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c    | 2 ++
>  drivers/gpu/drm/i915/i915_ttm_buddy_manager.c   | 1 +
>  drivers/gpu/drm/nouveau/nouveau_mem.c           | 3 ++-
>  drivers/gpu/drm/nouveau/nouveau_mem.h           | 3 ++-
>  drivers/gpu/drm/nouveau/nouveau_ttm.c           | 9 +++++----
>  drivers/gpu/drm/ttm/ttm_range_manager.c         | 2 ++
>  drivers/gpu/drm/ttm/ttm_resource.c              | 6 ++++++
>  drivers/gpu/drm/ttm/ttm_sys_manager.c           | 1 +
>  drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c   | 2 ++
>  drivers/gpu/drm/vmwgfx/vmwgfx_thp.c             | 3 ++-
>  include/drm/ttm/ttm_resource.h                  | 3 +++
>  13 files changed, 31 insertions(+), 7 deletions(-)
> 
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
> index 675a72ef305d..ea5470c8c921 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
> @@ -169,6 +169,7 @@ static int amdgpu_gtt_mgr_new(struct ttm_resource_manager *man,
>  	return 0;
>  
>  err_free:
> +	ttm_resource_fini(man, &node->base.base);
>  	kfree(node);
>  
>  err_out:
> @@ -200,6 +201,7 @@ static void amdgpu_gtt_mgr_del(struct ttm_resource_manager *man,
>  	if (!(res->placement & TTM_PL_FLAG_TEMPORARY))
>  		atomic64_sub(res->num_pages, &mgr->used);
>  
> +	ttm_resource_fini(man, res);
>  	kfree(node);
>  }
>  
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_preempt_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_preempt_mgr.c
> index d02c8637f909..ffddec08e931 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_preempt_mgr.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_preempt_mgr.c
> @@ -95,6 +95,7 @@ static void amdgpu_preempt_mgr_del(struct ttm_resource_manager *man,
>  	struct amdgpu_preempt_mgr *mgr = to_preempt_mgr(man);
>  
>  	atomic64_sub(res->num_pages, &mgr->used);
> +	ttm_resource_fini(man, res);
>  	kfree(res);
>  }
>  
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
> index 7b2b0980ec41..55d68408951d 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
> @@ -476,6 +476,7 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man,
>  	while (i--)
>  		drm_mm_remove_node(&node->mm_nodes[i]);
>  	spin_unlock(&mgr->lock);
> +	ttm_resource_fini(man, &node->base);
>  	kvfree(node);
>  
>  error_sub:
> @@ -515,6 +516,7 @@ static void amdgpu_vram_mgr_del(struct ttm_resource_manager *man,
>  	atomic64_sub(usage, &mgr->usage);
>  	atomic64_sub(vis_usage, &mgr->vis_usage);
>  
> +	ttm_resource_fini(man, res);
>  	kvfree(node);
>  }
>  
> diff --git a/drivers/gpu/drm/i915/i915_ttm_buddy_manager.c b/drivers/gpu/drm/i915/i915_ttm_buddy_manager.c
> index d59fbb019032..ca3ca1f7f850 100644
> --- a/drivers/gpu/drm/i915/i915_ttm_buddy_manager.c
> +++ b/drivers/gpu/drm/i915/i915_ttm_buddy_manager.c
> @@ -123,6 +123,7 @@ static void i915_ttm_buddy_man_free(struct ttm_resource_manager *man,
>  	i915_buddy_free_list(&bman->mm, &bman_res->blocks);
>  	mutex_unlock(&bman->lock);
>  
> +	ttm_resource_fini(man, res);
>  	kfree(bman_res);
>  }
>  
> diff --git a/drivers/gpu/drm/nouveau/nouveau_mem.c b/drivers/gpu/drm/nouveau/nouveau_mem.c
> index 2ca3207c13fc..2e517cdc24c9 100644
> --- a/drivers/gpu/drm/nouveau/nouveau_mem.c
> +++ b/drivers/gpu/drm/nouveau/nouveau_mem.c
> @@ -162,11 +162,12 @@ nouveau_mem_vram(struct ttm_resource *reg, bool contig, u8 page)
>  }
>  
>  void
> -nouveau_mem_del(struct ttm_resource *reg)
> +nouveau_mem_del(struct ttm_resource_manager *man, struct ttm_resource *reg)
>  {
>  	struct nouveau_mem *mem = nouveau_mem(reg);
>  
>  	nouveau_mem_fini(mem);
> +	ttm_resource_fini(man, reg);
>  	kfree(mem);
>  }
>  
> diff --git a/drivers/gpu/drm/nouveau/nouveau_mem.h b/drivers/gpu/drm/nouveau/nouveau_mem.h
> index 2c01166a90f2..325551eba5cd 100644
> --- a/drivers/gpu/drm/nouveau/nouveau_mem.h
> +++ b/drivers/gpu/drm/nouveau/nouveau_mem.h
> @@ -23,7 +23,8 @@ nouveau_mem(struct ttm_resource *reg)
>  
>  int nouveau_mem_new(struct nouveau_cli *, u8 kind, u8 comp,
>  		    struct ttm_resource **);
> -void nouveau_mem_del(struct ttm_resource *);
> +void nouveau_mem_del(struct ttm_resource_manager *man,
> +		     struct ttm_resource *);
>  int nouveau_mem_vram(struct ttm_resource *, bool contig, u8 page);
>  int nouveau_mem_host(struct ttm_resource *, struct ttm_tt *);
>  void nouveau_mem_fini(struct nouveau_mem *);
> diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.c b/drivers/gpu/drm/nouveau/nouveau_ttm.c
> index 2ca9d9a9e5d5..91ef33f8f22c 100644
> --- a/drivers/gpu/drm/nouveau/nouveau_ttm.c
> +++ b/drivers/gpu/drm/nouveau/nouveau_ttm.c
> @@ -36,9 +36,10 @@
>  #include <core/tegra.h>
>  
>  static void
> -nouveau_manager_del(struct ttm_resource_manager *man, struct ttm_resource *reg)
> +nouveau_manager_del(struct ttm_resource_manager *man,
> +		    struct ttm_resource *reg)
>  {
> -	nouveau_mem_del(reg);
> +	nouveau_mem_del(man, reg);
>  }
>  
>  static int
> @@ -62,7 +63,7 @@ nouveau_vram_manager_new(struct ttm_resource_manager *man,
>  
>  	ret = nouveau_mem_vram(*res, nvbo->contig, nvbo->page);
>  	if (ret) {
> -		nouveau_mem_del(*res);
> +		nouveau_mem_del(man, *res);
>  		return ret;
>  	}
>  
> @@ -118,7 +119,7 @@ nv04_gart_manager_new(struct ttm_resource_manager *man,
>  	ret = nvif_vmm_get(&mem->cli->vmm.vmm, PTES, false, 12, 0,
>  			   (long)(*res)->num_pages << PAGE_SHIFT, &mem->vma[0]);
>  	if (ret) {
> -		nouveau_mem_del(*res);
> +		nouveau_mem_del(man, *res);
>  		return ret;
>  	}
>  
> diff --git a/drivers/gpu/drm/ttm/ttm_range_manager.c b/drivers/gpu/drm/ttm/ttm_range_manager.c
> index 67d68a4a8640..25fcf0d63c2d 100644
> --- a/drivers/gpu/drm/ttm/ttm_range_manager.c
> +++ b/drivers/gpu/drm/ttm/ttm_range_manager.c
> @@ -89,6 +89,7 @@ static int ttm_range_man_alloc(struct ttm_resource_manager *man,
>  	spin_unlock(&rman->lock);
>  
>  	if (unlikely(ret)) {
> +		ttm_resource_fini(man, *res);
>  		kfree(node);
>  		return ret;
>  	}
> @@ -108,6 +109,7 @@ static void ttm_range_man_free(struct ttm_resource_manager *man,
>  	drm_mm_remove_node(&node->mm_nodes[0]);
>  	spin_unlock(&rman->lock);
>  
> +	ttm_resource_fini(man, res);
>  	kfree(node);
>  }
>  
> diff --git a/drivers/gpu/drm/ttm/ttm_resource.c b/drivers/gpu/drm/ttm/ttm_resource.c
> index 035d71332d18..89bcfe22a0ca 100644
> --- a/drivers/gpu/drm/ttm/ttm_resource.c
> +++ b/drivers/gpu/drm/ttm/ttm_resource.c
> @@ -44,6 +44,12 @@ void ttm_resource_init(struct ttm_buffer_object *bo,
>  }
>  EXPORT_SYMBOL(ttm_resource_init);
>  
> +void ttm_resource_fini(struct ttm_resource_manager *man,
> +		       struct ttm_resource *res)
> +{

Maybe we should clean up the res data here. E.X. memset(res, 0, sizeof(*res).
The previous data should invalid while we call fini.

Thanks,
Ray

> +}
> +EXPORT_SYMBOL(ttm_resource_fini);
> +
>  int ttm_resource_alloc(struct ttm_buffer_object *bo,
>  		       const struct ttm_place *place,
>  		       struct ttm_resource **res_ptr)
> diff --git a/drivers/gpu/drm/ttm/ttm_sys_manager.c b/drivers/gpu/drm/ttm/ttm_sys_manager.c
> index 63aca52f75e1..135394dcca95 100644
> --- a/drivers/gpu/drm/ttm/ttm_sys_manager.c
> +++ b/drivers/gpu/drm/ttm/ttm_sys_manager.c
> @@ -23,6 +23,7 @@ static int ttm_sys_man_alloc(struct ttm_resource_manager *man,
>  static void ttm_sys_man_free(struct ttm_resource_manager *man,
>  			     struct ttm_resource *res)
>  {
> +	ttm_resource_fini(man, res);
>  	kfree(res);
>  }
>  
> diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
> index b2c4af331c9d..bfd686bb8d19 100644
> --- a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
> +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
> @@ -116,6 +116,7 @@ static int vmw_gmrid_man_get_node(struct ttm_resource_manager *man,
>  	gman->used_gmr_pages -= (*res)->num_pages;
>  	spin_unlock(&gman->lock);
>  	ida_free(&gman->gmr_ida, id);
> +	ttm_resource_fini(man, *res);
>  	kfree(*res);
>  	return -ENOSPC;
>  }
> @@ -129,6 +130,7 @@ static void vmw_gmrid_man_put_node(struct ttm_resource_manager *man,
>  	spin_lock(&gman->lock);
>  	gman->used_gmr_pages -= res->num_pages;
>  	spin_unlock(&gman->lock);
> +	ttm_resource_fini(man, res);
>  	kfree(res);
>  }
>  
> diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_thp.c b/drivers/gpu/drm/vmwgfx/vmwgfx_thp.c
> index 2a3d3468e4e0..4fcbd94ccc11 100644
> --- a/drivers/gpu/drm/vmwgfx/vmwgfx_thp.c
> +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_thp.c
> @@ -104,6 +104,7 @@ static int vmw_thp_get_node(struct ttm_resource_manager *man,
>  	spin_unlock(&rman->lock);
>  
>  	if (unlikely(ret)) {
> +		ttm_resource_fini(man, &node->base);
>  		kfree(node);
>  	} else {
>  		node->base.start = node->mm_nodes[0].start;
> @@ -122,7 +123,7 @@ static void vmw_thp_put_node(struct ttm_resource_manager *man,
>  	spin_lock(&rman->lock);
>  	drm_mm_remove_node(&node->mm_nodes[0]);
>  	spin_unlock(&rman->lock);
> -
> +	ttm_resource_fini(man, res);
>  	kfree(node);
>  }
>  
> diff --git a/include/drm/ttm/ttm_resource.h b/include/drm/ttm/ttm_resource.h
> index 5952051091cd..df1f06b7b504 100644
> --- a/include/drm/ttm/ttm_resource.h
> +++ b/include/drm/ttm/ttm_resource.h
> @@ -261,6 +261,9 @@ ttm_resource_manager_cleanup(struct ttm_resource_manager *man)
>  void ttm_resource_init(struct ttm_buffer_object *bo,
>                         const struct ttm_place *place,
>                         struct ttm_resource *res);
> +void ttm_resource_fini(struct ttm_resource_manager *man,
> +		       struct ttm_resource *res);
> +
>  int ttm_resource_alloc(struct ttm_buffer_object *bo,
>  		       const struct ttm_place *place,
>  		       struct ttm_resource **res);
> -- 
> 2.25.1
> 

^ permalink raw reply	[flat|nested] 24+ messages in thread

* Re: [PATCH 02/12] drm/ttm: add back a reference to the bdev to the res manager
  2021-11-24 12:44 ` [PATCH 02/12] drm/ttm: add back a reference to the bdev to the res manager Christian König
@ 2021-11-26  6:52   ` Huang Rui
  0 siblings, 0 replies; 24+ messages in thread
From: Huang Rui @ 2021-11-26  6:52 UTC (permalink / raw)
  To: Christian König; +Cc: thomas.hellstrom, dri-devel

On Wed, Nov 24, 2021 at 08:44:20PM +0800, Christian König wrote:
> It is simply a lot cleaner to have this around instead of adding
> the device throughout the call chain.
> 
> Signed-off-by: Christian König <christian.koenig@amd.com>

Reviewed-by: Huang Rui <ray.huang@amd.com>

> ---
>  drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c     |  3 ++-
>  drivers/gpu/drm/amd/amdgpu/amdgpu_preempt_mgr.c |  2 +-
>  drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c    |  3 ++-
>  drivers/gpu/drm/i915/i915_ttm_buddy_manager.c   |  2 +-
>  drivers/gpu/drm/nouveau/nouveau_ttm.c           |  4 ++--
>  drivers/gpu/drm/ttm/ttm_range_manager.c         |  2 +-
>  drivers/gpu/drm/ttm/ttm_resource.c              |  3 +++
>  drivers/gpu/drm/ttm/ttm_sys_manager.c           |  2 +-
>  drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c   |  2 +-
>  drivers/gpu/drm/vmwgfx/vmwgfx_thp.c             |  2 +-
>  include/drm/ttm/ttm_resource.h                  | 16 +++++++++-------
>  11 files changed, 24 insertions(+), 17 deletions(-)
> 
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
> index ea5470c8c921..9e7685a4878c 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
> @@ -293,7 +293,8 @@ int amdgpu_gtt_mgr_init(struct amdgpu_device *adev, uint64_t gtt_size)
>  	man->use_tt = true;
>  	man->func = &amdgpu_gtt_mgr_func;
>  
> -	ttm_resource_manager_init(man, gtt_size >> PAGE_SHIFT);
> +	ttm_resource_manager_init(man, &adev->mman.bdev,
> +				  gtt_size >> PAGE_SHIFT);
>  
>  	start = AMDGPU_GTT_MAX_TRANSFER_SIZE * AMDGPU_GTT_NUM_TRANSFER_WINDOWS;
>  	size = (adev->gmc.gart_size >> PAGE_SHIFT) - start;
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_preempt_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_preempt_mgr.c
> index ffddec08e931..6f7189d32f0a 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_preempt_mgr.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_preempt_mgr.c
> @@ -153,7 +153,7 @@ int amdgpu_preempt_mgr_init(struct amdgpu_device *adev)
>  	man->use_tt = true;
>  	man->func = &amdgpu_preempt_mgr_func;
>  
> -	ttm_resource_manager_init(man, (1 << 30));
> +	ttm_resource_manager_init(man, &adev->mman.bdev, (1 << 30));
>  
>  	atomic64_set(&mgr->used, 0);
>  
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
> index 55d68408951d..ddd0b6d74070 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
> @@ -699,7 +699,8 @@ int amdgpu_vram_mgr_init(struct amdgpu_device *adev)
>  	struct amdgpu_vram_mgr *mgr = &adev->mman.vram_mgr;
>  	struct ttm_resource_manager *man = &mgr->manager;
>  
> -	ttm_resource_manager_init(man, adev->gmc.real_vram_size >> PAGE_SHIFT);
> +	ttm_resource_manager_init(man, &adev->mman.bdev,
> +				  adev->gmc.real_vram_size >> PAGE_SHIFT);
>  
>  	man->func = &amdgpu_vram_mgr_func;
>  
> diff --git a/drivers/gpu/drm/i915/i915_ttm_buddy_manager.c b/drivers/gpu/drm/i915/i915_ttm_buddy_manager.c
> index ca3ca1f7f850..ef535e04a88a 100644
> --- a/drivers/gpu/drm/i915/i915_ttm_buddy_manager.c
> +++ b/drivers/gpu/drm/i915/i915_ttm_buddy_manager.c
> @@ -203,7 +203,7 @@ int i915_ttm_buddy_man_init(struct ttm_device *bdev,
>  	man = &bman->manager;
>  	man->use_tt = use_tt;
>  	man->func = &i915_ttm_buddy_manager_func;
> -	ttm_resource_manager_init(man, bman->mm.size >> PAGE_SHIFT);
> +	ttm_resource_manager_init(man, bdev, bman->mm.size >> PAGE_SHIFT);
>  
>  	ttm_resource_manager_set_used(man, true);
>  	ttm_set_driver_manager(bdev, type, man);
> diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.c b/drivers/gpu/drm/nouveau/nouveau_ttm.c
> index 91ef33f8f22c..85f1f5a0fe5d 100644
> --- a/drivers/gpu/drm/nouveau/nouveau_ttm.c
> +++ b/drivers/gpu/drm/nouveau/nouveau_ttm.c
> @@ -164,7 +164,7 @@ nouveau_ttm_init_vram(struct nouveau_drm *drm)
>  
>  		man->func = &nouveau_vram_manager;
>  
> -		ttm_resource_manager_init(man,
> +		ttm_resource_manager_init(man, &drm->ttm.bdev,
>  					  drm->gem.vram_available >> PAGE_SHIFT);
>  		ttm_set_driver_manager(&drm->ttm.bdev, TTM_PL_VRAM, man);
>  		ttm_resource_manager_set_used(man, true);
> @@ -211,7 +211,7 @@ nouveau_ttm_init_gtt(struct nouveau_drm *drm)
>  
>  	man->func = func;
>  	man->use_tt = true;
> -	ttm_resource_manager_init(man, size_pages);
> +	ttm_resource_manager_init(man, &drm->ttm.bdev, size_pages);
>  	ttm_set_driver_manager(&drm->ttm.bdev, TTM_PL_TT, man);
>  	ttm_resource_manager_set_used(man, true);
>  	return 0;
> diff --git a/drivers/gpu/drm/ttm/ttm_range_manager.c b/drivers/gpu/drm/ttm/ttm_range_manager.c
> index 25fcf0d63c2d..062dabe6a10e 100644
> --- a/drivers/gpu/drm/ttm/ttm_range_manager.c
> +++ b/drivers/gpu/drm/ttm/ttm_range_manager.c
> @@ -156,7 +156,7 @@ int ttm_range_man_init_nocheck(struct ttm_device *bdev,
>  
>  	man->func = &ttm_range_manager_func;
>  
> -	ttm_resource_manager_init(man, p_size);
> +	ttm_resource_manager_init(man, bdev, p_size);
>  
>  	drm_mm_init(&rman->mm, 0, p_size);
>  	spin_lock_init(&rman->lock);
> diff --git a/drivers/gpu/drm/ttm/ttm_resource.c b/drivers/gpu/drm/ttm/ttm_resource.c
> index 89bcfe22a0ca..41e7bf195168 100644
> --- a/drivers/gpu/drm/ttm/ttm_resource.c
> +++ b/drivers/gpu/drm/ttm/ttm_resource.c
> @@ -126,16 +126,19 @@ EXPORT_SYMBOL(ttm_resource_compat);
>   * ttm_resource_manager_init
>   *
>   * @man: memory manager object to init
> + * @bdev: ttm device this manager belongs to
>   * @p_size: size managed area in pages.
>   *
>   * Initialise core parts of a manager object.
>   */
>  void ttm_resource_manager_init(struct ttm_resource_manager *man,
> +			       struct ttm_device *bdev,
>  			       unsigned long p_size)
>  {
>  	unsigned i;
>  
>  	spin_lock_init(&man->move_lock);
> +	man->bdev = bdev;
>  	man->size = p_size;
>  
>  	for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i)
> diff --git a/drivers/gpu/drm/ttm/ttm_sys_manager.c b/drivers/gpu/drm/ttm/ttm_sys_manager.c
> index 135394dcca95..2ced169513cb 100644
> --- a/drivers/gpu/drm/ttm/ttm_sys_manager.c
> +++ b/drivers/gpu/drm/ttm/ttm_sys_manager.c
> @@ -43,7 +43,7 @@ void ttm_sys_man_init(struct ttm_device *bdev)
>  	man->use_tt = true;
>  	man->func = &ttm_sys_manager_func;
>  
> -	ttm_resource_manager_init(man, 0);
> +	ttm_resource_manager_init(man, bdev, 0);
>  	ttm_set_driver_manager(bdev, TTM_PL_SYSTEM, man);
>  	ttm_resource_manager_set_used(man, true);
>  }
> diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
> index bfd686bb8d19..4fe4eeb95bf3 100644
> --- a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
> +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
> @@ -150,7 +150,7 @@ int vmw_gmrid_man_init(struct vmw_private *dev_priv, int type)
>  	man->func = &vmw_gmrid_manager_func;
>  	/* TODO: This is most likely not correct */
>  	man->use_tt = true;
> -	ttm_resource_manager_init(man, 0);
> +	ttm_resource_manager_init(man, &dev_priv->bdev, 0);
>  	spin_lock_init(&gman->lock);
>  	gman->used_gmr_pages = 0;
>  	ida_init(&gman->gmr_ida);
> diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_thp.c b/drivers/gpu/drm/vmwgfx/vmwgfx_thp.c
> index 4fcbd94ccc11..b8cd89cd624c 100644
> --- a/drivers/gpu/drm/vmwgfx/vmwgfx_thp.c
> +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_thp.c
> @@ -135,7 +135,7 @@ int vmw_thp_init(struct vmw_private *dev_priv)
>  	if (!rman)
>  		return -ENOMEM;
>  
> -	ttm_resource_manager_init(&rman->manager,
> +	ttm_resource_manager_init(&rman->manager, &dev_priv->bdev,
>  				  dev_priv->vram_size >> PAGE_SHIFT);
>  
>  	rman->manager.func = &vmw_thp_func;
> diff --git a/include/drm/ttm/ttm_resource.h b/include/drm/ttm/ttm_resource.h
> index df1f06b7b504..6bf37383002b 100644
> --- a/include/drm/ttm/ttm_resource.h
> +++ b/include/drm/ttm/ttm_resource.h
> @@ -105,11 +105,11 @@ struct ttm_resource_manager_func {
>   * @use_type: The memory type is enabled.
>   * @use_tt: If a TT object should be used for the backing store.
>   * @size: Size of the managed region.
> + * @bdev: ttm device this manager belongs to
>   * @func: structure pointer implementing the range manager. See above
>   * @move_lock: lock for move fence
> - * static information. bdev::driver::io_mem_free is never used.
> - * @lru: The lru list for this memory type.
>   * @move: The fence of the last pipelined move operation.
> + * @lru: The lru list for this memory type.
>   *
>   * This structure is used to identify and manage memory types for a device.
>   */
> @@ -119,20 +119,21 @@ struct ttm_resource_manager {
>  	 */
>  	bool use_type;
>  	bool use_tt;
> +	struct ttm_device *bdev;
>  	uint64_t size;
>  	const struct ttm_resource_manager_func *func;
>  	spinlock_t move_lock;
>  
>  	/*
> -	 * Protected by the global->lru_lock.
> +	 * Protected by @move_lock.
>  	 */
> -
> -	struct list_head lru[TTM_MAX_BO_PRIORITY];
> +	struct dma_fence *move;
>  
>  	/*
> -	 * Protected by @move_lock.
> +	 * Protected by the global->lru_lock.
>  	 */
> -	struct dma_fence *move;
> +
> +	struct list_head lru[TTM_MAX_BO_PRIORITY];
>  };
>  
>  /**
> @@ -272,6 +273,7 @@ bool ttm_resource_compat(struct ttm_resource *res,
>  			 struct ttm_placement *placement);
>  
>  void ttm_resource_manager_init(struct ttm_resource_manager *man,
> +			       struct ttm_device *bdev,
>  			       unsigned long p_size);
>  
>  int ttm_resource_manager_evict_all(struct ttm_device *bdev,
> -- 
> 2.25.1
> 

^ permalink raw reply	[flat|nested] 24+ messages in thread

* Re: [PATCH 03/12] drm/ttm: add a weak BO reference to the resource v3
  2021-11-24 12:44 ` [PATCH 03/12] drm/ttm: add a weak BO reference to the resource v3 Christian König
@ 2021-11-26  7:10   ` Huang Rui
  2021-11-26  7:43   ` Thomas Hellström
  1 sibling, 0 replies; 24+ messages in thread
From: Huang Rui @ 2021-11-26  7:10 UTC (permalink / raw)
  To: Christian König; +Cc: thomas.hellstrom, dri-devel

On Wed, Nov 24, 2021 at 08:44:21PM +0800, Christian König wrote:
> Keep track for which BO a resource was allocated.
> This is necessary to move the LRU handling into the resources.
> 
> A bit problematic is i915 since it tries to use the resource
> interface without a BO which is illegal from the conceptional
> point of view.
> 
> v2: Document that this is a weak reference and add a workaround for i915
> v3: further document that this is protected by ttm_device::lru_lock and
>     clarify the i915 workaround
> 
> Signed-off-by: Christian König <christian.koenig@amd.com>
> ---
>  drivers/gpu/drm/ttm/ttm_bo_util.c  | 7 +++++--
>  drivers/gpu/drm/ttm/ttm_resource.c | 9 +++++++++
>  include/drm/ttm/ttm_resource.h     | 4 ++++
>  3 files changed, 18 insertions(+), 2 deletions(-)
> 
> diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
> index 95de2691ee7c..a2e3a9626198 100644
> --- a/drivers/gpu/drm/ttm/ttm_bo_util.c
> +++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
> @@ -237,6 +237,11 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
>  	if (bo->type != ttm_bo_type_sg)
>  		fbo->base.base.resv = &fbo->base.base._resv;
>  
> +	if (fbo->base.resource) {
> +		ttm_resource_set_bo(fbo->base.resource, &fbo->base);
> +		bo->resource = NULL;
> +	}
> +
>  	dma_resv_init(&fbo->base.base._resv);
>  	fbo->base.base.dev = NULL;
>  	ret = dma_resv_trylock(&fbo->base.base._resv);
> @@ -512,7 +517,6 @@ static int ttm_bo_move_to_ghost(struct ttm_buffer_object *bo,
>  		ghost_obj->ttm = NULL;
>  	else
>  		bo->ttm = NULL;
> -	bo->resource = NULL;
>  
>  	dma_resv_unlock(&ghost_obj->base._resv);
>  	ttm_bo_put(ghost_obj);
> @@ -637,7 +641,6 @@ int ttm_bo_pipeline_gutting(struct ttm_buffer_object *bo)
>  	dma_resv_unlock(&ghost->base._resv);
>  	ttm_bo_put(ghost);
>  	bo->ttm = ttm;
> -	bo->resource = NULL;
>  	ttm_bo_assign_mem(bo, sys_res);
>  	return 0;
>  
> diff --git a/drivers/gpu/drm/ttm/ttm_resource.c b/drivers/gpu/drm/ttm/ttm_resource.c
> index 41e7bf195168..7fdd58b53c61 100644
> --- a/drivers/gpu/drm/ttm/ttm_resource.c
> +++ b/drivers/gpu/drm/ttm/ttm_resource.c
> @@ -41,6 +41,7 @@ void ttm_resource_init(struct ttm_buffer_object *bo,
>  	res->bus.offset = 0;
>  	res->bus.is_iomem = false;
>  	res->bus.caching = ttm_cached;
> +	res->bo = bo;
>  }
>  EXPORT_SYMBOL(ttm_resource_init);
>  
> @@ -122,6 +123,14 @@ bool ttm_resource_compat(struct ttm_resource *res,
>  }
>  EXPORT_SYMBOL(ttm_resource_compat);
>  
> +void ttm_resource_set_bo(struct ttm_resource *res,

How about rename it as ttm_resource_set_bo_with_lru_locked?

It's just a minor suggestion.

Acked-by: Huang Rui <ray.huang@amd.com>

> +			 struct ttm_buffer_object *bo)
> +{
> +	spin_lock(&bo->bdev->lru_lock);
> +	res->bo = bo;
> +	spin_unlock(&bo->bdev->lru_lock);
> +}
> +
>  /**
>   * ttm_resource_manager_init
>   *
> diff --git a/include/drm/ttm/ttm_resource.h b/include/drm/ttm/ttm_resource.h
> index 6bf37383002b..69eea9d6399b 100644
> --- a/include/drm/ttm/ttm_resource.h
> +++ b/include/drm/ttm/ttm_resource.h
> @@ -161,6 +161,7 @@ struct ttm_bus_placement {
>   * @mem_type: Resource type of the allocation.
>   * @placement: Placement flags.
>   * @bus: Placement on io bus accessible to the CPU
> + * @bo: weak reference to the BO, protected by ttm_device::lru_lock
>   *
>   * Structure indicating the placement and space resources used by a
>   * buffer object.
> @@ -171,6 +172,7 @@ struct ttm_resource {
>  	uint32_t mem_type;
>  	uint32_t placement;
>  	struct ttm_bus_placement bus;
> +	struct ttm_buffer_object *bo;
>  };
>  
>  /**
> @@ -271,6 +273,8 @@ int ttm_resource_alloc(struct ttm_buffer_object *bo,
>  void ttm_resource_free(struct ttm_buffer_object *bo, struct ttm_resource **res);
>  bool ttm_resource_compat(struct ttm_resource *res,
>  			 struct ttm_placement *placement);
> +void ttm_resource_set_bo(struct ttm_resource *res,
> +			 struct ttm_buffer_object *bo);
>  
>  void ttm_resource_manager_init(struct ttm_resource_manager *man,
>  			       struct ttm_device *bdev,
> -- 
> 2.25.1
> 

^ permalink raw reply	[flat|nested] 24+ messages in thread

* Re: [PATCH 04/12] drm/ttm: add common accounting to the resource mgr v2
  2021-11-24 12:44 ` [PATCH 04/12] drm/ttm: add common accounting to the resource mgr v2 Christian König
@ 2021-11-26  7:35   ` Huang Rui
  0 siblings, 0 replies; 24+ messages in thread
From: Huang Rui @ 2021-11-26  7:35 UTC (permalink / raw)
  To: Christian König; +Cc: thomas.hellstrom, dri-devel

On Wed, Nov 24, 2021 at 08:44:22PM +0800, Christian König wrote:
> It makes sense to have this in the common manager for debugging and
> accounting of how much resources are used.
> 
> v2: cleanup kerneldoc a bit
> 
> Signed-off-by: Christian König <christian.koenig@amd.com>

Reviewed-by: Huang Rui <ray.huang@amd.com>

> ---
>  drivers/gpu/drm/ttm/ttm_resource.c |  8 ++++++++
>  include/drm/ttm/ttm_resource.h     | 20 +++++++++++++++++++-
>  2 files changed, 27 insertions(+), 1 deletion(-)
> 
> diff --git a/drivers/gpu/drm/ttm/ttm_resource.c b/drivers/gpu/drm/ttm/ttm_resource.c
> index 7fdd58b53c61..b8362492980d 100644
> --- a/drivers/gpu/drm/ttm/ttm_resource.c
> +++ b/drivers/gpu/drm/ttm/ttm_resource.c
> @@ -33,6 +33,8 @@ void ttm_resource_init(struct ttm_buffer_object *bo,
>                         const struct ttm_place *place,
>                         struct ttm_resource *res)
>  {
> +	struct ttm_resource_manager *man;
> +
>  	res->start = 0;
>  	res->num_pages = PFN_UP(bo->base.size);
>  	res->mem_type = place->mem_type;
> @@ -42,12 +44,16 @@ void ttm_resource_init(struct ttm_buffer_object *bo,
>  	res->bus.is_iomem = false;
>  	res->bus.caching = ttm_cached;
>  	res->bo = bo;
> +
> +	man = ttm_manager_type(bo->bdev, place->mem_type);
> +	atomic64_add(bo->base.size, &man->usage);
>  }
>  EXPORT_SYMBOL(ttm_resource_init);
>  
>  void ttm_resource_fini(struct ttm_resource_manager *man,
>  		       struct ttm_resource *res)
>  {
> +	atomic64_sub(res->bo->base.size, &man->usage);
>  }
>  EXPORT_SYMBOL(ttm_resource_fini);
>  
> @@ -149,6 +155,7 @@ void ttm_resource_manager_init(struct ttm_resource_manager *man,
>  	spin_lock_init(&man->move_lock);
>  	man->bdev = bdev;
>  	man->size = p_size;
> +	atomic64_set(&man->usage, 0);
>  
>  	for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i)
>  		INIT_LIST_HEAD(&man->lru[i]);
> @@ -221,6 +228,7 @@ void ttm_resource_manager_debug(struct ttm_resource_manager *man,
>  	drm_printf(p, "  use_type: %d\n", man->use_type);
>  	drm_printf(p, "  use_tt: %d\n", man->use_tt);
>  	drm_printf(p, "  size: %llu\n", man->size);
> +	drm_printf(p, "  usage: %llu\n", atomic64_read(&man->usage));
>  	if (man->func->debug)
>  		man->func->debug(man, p);
>  }
> diff --git a/include/drm/ttm/ttm_resource.h b/include/drm/ttm/ttm_resource.h
> index 69eea9d6399b..3d391279b33f 100644
> --- a/include/drm/ttm/ttm_resource.h
> +++ b/include/drm/ttm/ttm_resource.h
> @@ -27,6 +27,7 @@
>  
>  #include <linux/types.h>
>  #include <linux/mutex.h>
> +#include <linux/atomic.h>
>  #include <linux/dma-buf-map.h>
>  #include <linux/dma-fence.h>
>  #include <drm/drm_print.h>
> @@ -132,8 +133,12 @@ struct ttm_resource_manager {
>  	/*
>  	 * Protected by the global->lru_lock.
>  	 */
> -
>  	struct list_head lru[TTM_MAX_BO_PRIORITY];
> +
> +	/**
> +	 * @usage: How much of the region is used, has its own protection.
> +	 */
> +	atomic64_t usage;
>  };
>  
>  /**
> @@ -261,6 +266,19 @@ ttm_resource_manager_cleanup(struct ttm_resource_manager *man)
>  	man->move = NULL;
>  }
>  
> +/**
> + * ttm_resource_manager_usage
> + *
> + * @man: A memory manager object.
> + *
> + * Return how many resources are currently used.
> + */
> +static inline uint64_t
> +ttm_resource_manager_usage(struct ttm_resource_manager *man)
> +{
> +	return atomic64_read(&man->usage);
> +}
> +
>  void ttm_resource_init(struct ttm_buffer_object *bo,
>                         const struct ttm_place *place,
>                         struct ttm_resource *res);
> -- 
> 2.25.1
> 

^ permalink raw reply	[flat|nested] 24+ messages in thread

* Re: [PATCH 01/12] drm/ttm: add ttm_resource_fini
  2021-11-26  6:48   ` Huang Rui
@ 2021-11-26  7:39     ` Christian König
  2021-11-26  7:47       ` Huang Rui
  0 siblings, 1 reply; 24+ messages in thread
From: Christian König @ 2021-11-26  7:39 UTC (permalink / raw)
  To: Huang Rui; +Cc: thomas.hellstrom, dri-devel

Am 26.11.21 um 07:48 schrieb Huang Rui:
> On Wed, Nov 24, 2021 at 08:44:19PM +0800, Christian König wrote:
>> Make sure we call the common cleanup function in all
>> implementations of the resource manager.
>>
>> Signed-off-by: Christian König <christian.koenig@amd.com>
>> ---
>>   drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c     | 2 ++
>>   drivers/gpu/drm/amd/amdgpu/amdgpu_preempt_mgr.c | 1 +
>>   drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c    | 2 ++
>>   drivers/gpu/drm/i915/i915_ttm_buddy_manager.c   | 1 +
>>   drivers/gpu/drm/nouveau/nouveau_mem.c           | 3 ++-
>>   drivers/gpu/drm/nouveau/nouveau_mem.h           | 3 ++-
>>   drivers/gpu/drm/nouveau/nouveau_ttm.c           | 9 +++++----
>>   drivers/gpu/drm/ttm/ttm_range_manager.c         | 2 ++
>>   drivers/gpu/drm/ttm/ttm_resource.c              | 6 ++++++
>>   drivers/gpu/drm/ttm/ttm_sys_manager.c           | 1 +
>>   drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c   | 2 ++
>>   drivers/gpu/drm/vmwgfx/vmwgfx_thp.c             | 3 ++-
>>   include/drm/ttm/ttm_resource.h                  | 3 +++
>>   13 files changed, 31 insertions(+), 7 deletions(-)
>>
>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
>> index 675a72ef305d..ea5470c8c921 100644
>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
>> @@ -169,6 +169,7 @@ static int amdgpu_gtt_mgr_new(struct ttm_resource_manager *man,
>>   	return 0;
>>   
>>   err_free:
>> +	ttm_resource_fini(man, &node->base.base);
>>   	kfree(node);
>>   
>>   err_out:
>> @@ -200,6 +201,7 @@ static void amdgpu_gtt_mgr_del(struct ttm_resource_manager *man,
>>   	if (!(res->placement & TTM_PL_FLAG_TEMPORARY))
>>   		atomic64_sub(res->num_pages, &mgr->used);
>>   
>> +	ttm_resource_fini(man, res);
>>   	kfree(node);
>>   }
>>   
>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_preempt_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_preempt_mgr.c
>> index d02c8637f909..ffddec08e931 100644
>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_preempt_mgr.c
>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_preempt_mgr.c
>> @@ -95,6 +95,7 @@ static void amdgpu_preempt_mgr_del(struct ttm_resource_manager *man,
>>   	struct amdgpu_preempt_mgr *mgr = to_preempt_mgr(man);
>>   
>>   	atomic64_sub(res->num_pages, &mgr->used);
>> +	ttm_resource_fini(man, res);
>>   	kfree(res);
>>   }
>>   
>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
>> index 7b2b0980ec41..55d68408951d 100644
>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
>> @@ -476,6 +476,7 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man,
>>   	while (i--)
>>   		drm_mm_remove_node(&node->mm_nodes[i]);
>>   	spin_unlock(&mgr->lock);
>> +	ttm_resource_fini(man, &node->base);
>>   	kvfree(node);
>>   
>>   error_sub:
>> @@ -515,6 +516,7 @@ static void amdgpu_vram_mgr_del(struct ttm_resource_manager *man,
>>   	atomic64_sub(usage, &mgr->usage);
>>   	atomic64_sub(vis_usage, &mgr->vis_usage);
>>   
>> +	ttm_resource_fini(man, res);
>>   	kvfree(node);
>>   }
>>   
>> diff --git a/drivers/gpu/drm/i915/i915_ttm_buddy_manager.c b/drivers/gpu/drm/i915/i915_ttm_buddy_manager.c
>> index d59fbb019032..ca3ca1f7f850 100644
>> --- a/drivers/gpu/drm/i915/i915_ttm_buddy_manager.c
>> +++ b/drivers/gpu/drm/i915/i915_ttm_buddy_manager.c
>> @@ -123,6 +123,7 @@ static void i915_ttm_buddy_man_free(struct ttm_resource_manager *man,
>>   	i915_buddy_free_list(&bman->mm, &bman_res->blocks);
>>   	mutex_unlock(&bman->lock);
>>   
>> +	ttm_resource_fini(man, res);
>>   	kfree(bman_res);
>>   }
>>   
>> diff --git a/drivers/gpu/drm/nouveau/nouveau_mem.c b/drivers/gpu/drm/nouveau/nouveau_mem.c
>> index 2ca3207c13fc..2e517cdc24c9 100644
>> --- a/drivers/gpu/drm/nouveau/nouveau_mem.c
>> +++ b/drivers/gpu/drm/nouveau/nouveau_mem.c
>> @@ -162,11 +162,12 @@ nouveau_mem_vram(struct ttm_resource *reg, bool contig, u8 page)
>>   }
>>   
>>   void
>> -nouveau_mem_del(struct ttm_resource *reg)
>> +nouveau_mem_del(struct ttm_resource_manager *man, struct ttm_resource *reg)
>>   {
>>   	struct nouveau_mem *mem = nouveau_mem(reg);
>>   
>>   	nouveau_mem_fini(mem);
>> +	ttm_resource_fini(man, reg);
>>   	kfree(mem);
>>   }
>>   
>> diff --git a/drivers/gpu/drm/nouveau/nouveau_mem.h b/drivers/gpu/drm/nouveau/nouveau_mem.h
>> index 2c01166a90f2..325551eba5cd 100644
>> --- a/drivers/gpu/drm/nouveau/nouveau_mem.h
>> +++ b/drivers/gpu/drm/nouveau/nouveau_mem.h
>> @@ -23,7 +23,8 @@ nouveau_mem(struct ttm_resource *reg)
>>   
>>   int nouveau_mem_new(struct nouveau_cli *, u8 kind, u8 comp,
>>   		    struct ttm_resource **);
>> -void nouveau_mem_del(struct ttm_resource *);
>> +void nouveau_mem_del(struct ttm_resource_manager *man,
>> +		     struct ttm_resource *);
>>   int nouveau_mem_vram(struct ttm_resource *, bool contig, u8 page);
>>   int nouveau_mem_host(struct ttm_resource *, struct ttm_tt *);
>>   void nouveau_mem_fini(struct nouveau_mem *);
>> diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.c b/drivers/gpu/drm/nouveau/nouveau_ttm.c
>> index 2ca9d9a9e5d5..91ef33f8f22c 100644
>> --- a/drivers/gpu/drm/nouveau/nouveau_ttm.c
>> +++ b/drivers/gpu/drm/nouveau/nouveau_ttm.c
>> @@ -36,9 +36,10 @@
>>   #include <core/tegra.h>
>>   
>>   static void
>> -nouveau_manager_del(struct ttm_resource_manager *man, struct ttm_resource *reg)
>> +nouveau_manager_del(struct ttm_resource_manager *man,
>> +		    struct ttm_resource *reg)
>>   {
>> -	nouveau_mem_del(reg);
>> +	nouveau_mem_del(man, reg);
>>   }
>>   
>>   static int
>> @@ -62,7 +63,7 @@ nouveau_vram_manager_new(struct ttm_resource_manager *man,
>>   
>>   	ret = nouveau_mem_vram(*res, nvbo->contig, nvbo->page);
>>   	if (ret) {
>> -		nouveau_mem_del(*res);
>> +		nouveau_mem_del(man, *res);
>>   		return ret;
>>   	}
>>   
>> @@ -118,7 +119,7 @@ nv04_gart_manager_new(struct ttm_resource_manager *man,
>>   	ret = nvif_vmm_get(&mem->cli->vmm.vmm, PTES, false, 12, 0,
>>   			   (long)(*res)->num_pages << PAGE_SHIFT, &mem->vma[0]);
>>   	if (ret) {
>> -		nouveau_mem_del(*res);
>> +		nouveau_mem_del(man, *res);
>>   		return ret;
>>   	}
>>   
>> diff --git a/drivers/gpu/drm/ttm/ttm_range_manager.c b/drivers/gpu/drm/ttm/ttm_range_manager.c
>> index 67d68a4a8640..25fcf0d63c2d 100644
>> --- a/drivers/gpu/drm/ttm/ttm_range_manager.c
>> +++ b/drivers/gpu/drm/ttm/ttm_range_manager.c
>> @@ -89,6 +89,7 @@ static int ttm_range_man_alloc(struct ttm_resource_manager *man,
>>   	spin_unlock(&rman->lock);
>>   
>>   	if (unlikely(ret)) {
>> +		ttm_resource_fini(man, *res);
>>   		kfree(node);
>>   		return ret;
>>   	}
>> @@ -108,6 +109,7 @@ static void ttm_range_man_free(struct ttm_resource_manager *man,
>>   	drm_mm_remove_node(&node->mm_nodes[0]);
>>   	spin_unlock(&rman->lock);
>>   
>> +	ttm_resource_fini(man, res);
>>   	kfree(node);
>>   }
>>   
>> diff --git a/drivers/gpu/drm/ttm/ttm_resource.c b/drivers/gpu/drm/ttm/ttm_resource.c
>> index 035d71332d18..89bcfe22a0ca 100644
>> --- a/drivers/gpu/drm/ttm/ttm_resource.c
>> +++ b/drivers/gpu/drm/ttm/ttm_resource.c
>> @@ -44,6 +44,12 @@ void ttm_resource_init(struct ttm_buffer_object *bo,
>>   }
>>   EXPORT_SYMBOL(ttm_resource_init);
>>   
>> +void ttm_resource_fini(struct ttm_resource_manager *man,
>> +		       struct ttm_resource *res)
>> +{
> Maybe we should clean up the res data here. E.X. memset(res, 0, sizeof(*res).
> The previous data should invalid while we call fini.

Nah, calling _fini means the resource structure is about to be freed.

memset in this situation doesn't make much sense.

Christian.

>
> Thanks,
> Ray
>
>> +}
>> +EXPORT_SYMBOL(ttm_resource_fini);
>> +
>>   int ttm_resource_alloc(struct ttm_buffer_object *bo,
>>   		       const struct ttm_place *place,
>>   		       struct ttm_resource **res_ptr)
>> diff --git a/drivers/gpu/drm/ttm/ttm_sys_manager.c b/drivers/gpu/drm/ttm/ttm_sys_manager.c
>> index 63aca52f75e1..135394dcca95 100644
>> --- a/drivers/gpu/drm/ttm/ttm_sys_manager.c
>> +++ b/drivers/gpu/drm/ttm/ttm_sys_manager.c
>> @@ -23,6 +23,7 @@ static int ttm_sys_man_alloc(struct ttm_resource_manager *man,
>>   static void ttm_sys_man_free(struct ttm_resource_manager *man,
>>   			     struct ttm_resource *res)
>>   {
>> +	ttm_resource_fini(man, res);
>>   	kfree(res);
>>   }
>>   
>> diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
>> index b2c4af331c9d..bfd686bb8d19 100644
>> --- a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
>> +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
>> @@ -116,6 +116,7 @@ static int vmw_gmrid_man_get_node(struct ttm_resource_manager *man,
>>   	gman->used_gmr_pages -= (*res)->num_pages;
>>   	spin_unlock(&gman->lock);
>>   	ida_free(&gman->gmr_ida, id);
>> +	ttm_resource_fini(man, *res);
>>   	kfree(*res);
>>   	return -ENOSPC;
>>   }
>> @@ -129,6 +130,7 @@ static void vmw_gmrid_man_put_node(struct ttm_resource_manager *man,
>>   	spin_lock(&gman->lock);
>>   	gman->used_gmr_pages -= res->num_pages;
>>   	spin_unlock(&gman->lock);
>> +	ttm_resource_fini(man, res);
>>   	kfree(res);
>>   }
>>   
>> diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_thp.c b/drivers/gpu/drm/vmwgfx/vmwgfx_thp.c
>> index 2a3d3468e4e0..4fcbd94ccc11 100644
>> --- a/drivers/gpu/drm/vmwgfx/vmwgfx_thp.c
>> +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_thp.c
>> @@ -104,6 +104,7 @@ static int vmw_thp_get_node(struct ttm_resource_manager *man,
>>   	spin_unlock(&rman->lock);
>>   
>>   	if (unlikely(ret)) {
>> +		ttm_resource_fini(man, &node->base);
>>   		kfree(node);
>>   	} else {
>>   		node->base.start = node->mm_nodes[0].start;
>> @@ -122,7 +123,7 @@ static void vmw_thp_put_node(struct ttm_resource_manager *man,
>>   	spin_lock(&rman->lock);
>>   	drm_mm_remove_node(&node->mm_nodes[0]);
>>   	spin_unlock(&rman->lock);
>> -
>> +	ttm_resource_fini(man, res);
>>   	kfree(node);
>>   }
>>   
>> diff --git a/include/drm/ttm/ttm_resource.h b/include/drm/ttm/ttm_resource.h
>> index 5952051091cd..df1f06b7b504 100644
>> --- a/include/drm/ttm/ttm_resource.h
>> +++ b/include/drm/ttm/ttm_resource.h
>> @@ -261,6 +261,9 @@ ttm_resource_manager_cleanup(struct ttm_resource_manager *man)
>>   void ttm_resource_init(struct ttm_buffer_object *bo,
>>                          const struct ttm_place *place,
>>                          struct ttm_resource *res);
>> +void ttm_resource_fini(struct ttm_resource_manager *man,
>> +		       struct ttm_resource *res);
>> +
>>   int ttm_resource_alloc(struct ttm_buffer_object *bo,
>>   		       const struct ttm_place *place,
>>   		       struct ttm_resource **res);
>> -- 
>> 2.25.1
>>


^ permalink raw reply	[flat|nested] 24+ messages in thread

* Re: [PATCH 06/12] drm/ttm: add resource iterator
  2021-11-24 12:44 ` [PATCH 06/12] drm/ttm: add resource iterator Christian König
@ 2021-11-26  7:43   ` Huang Rui
  0 siblings, 0 replies; 24+ messages in thread
From: Huang Rui @ 2021-11-26  7:43 UTC (permalink / raw)
  To: Christian König; +Cc: thomas.hellstrom, dri-devel

On Wed, Nov 24, 2021 at 08:44:24PM +0800, Christian König wrote:
> Instead of duplicating that at different places add an iterator over all
> the resources in a resource manager.
> 
> Signed-off-by: Christian König <christian.koenig@amd.com>

Reviewed-by: Huang Rui <ray.huang@amd.com>

> ---
>  drivers/gpu/drm/ttm/ttm_bo.c       | 41 +++++++++++----------------
>  drivers/gpu/drm/ttm/ttm_device.c   | 26 ++++++++---------
>  drivers/gpu/drm/ttm/ttm_resource.c | 45 ++++++++++++++++++++++++++++++
>  include/drm/ttm/ttm_resource.h     | 23 +++++++++++++++
>  4 files changed, 95 insertions(+), 40 deletions(-)
> 
> diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
> index 5bf5a9442856..dd8b9a99c180 100644
> --- a/drivers/gpu/drm/ttm/ttm_bo.c
> +++ b/drivers/gpu/drm/ttm/ttm_bo.c
> @@ -580,38 +580,29 @@ int ttm_mem_evict_first(struct ttm_device *bdev,
>  			struct ww_acquire_ctx *ticket)
>  {
>  	struct ttm_buffer_object *bo = NULL, *busy_bo = NULL;
> +	struct ttm_resource_cursor cursor;
>  	struct ttm_resource *res;
>  	bool locked = false;
> -	unsigned i;
>  	int ret;
>  
>  	spin_lock(&bdev->lru_lock);
> -	for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) {
> -		list_for_each_entry(res, &man->lru[i], lru) {
> -			bool busy;
> -
> -			bo = res->bo;
> -			if (!ttm_bo_evict_swapout_allowable(bo, ctx, place,
> -							    &locked, &busy)) {
> -				if (busy && !busy_bo && ticket !=
> -				    dma_resv_locking_ctx(bo->base.resv))
> -					busy_bo = bo;
> -				continue;
> -			}
> -
> -			if (!ttm_bo_get_unless_zero(bo)) {
> -				if (locked)
> -					dma_resv_unlock(bo->base.resv);
> -				continue;
> -			}
> -			break;
> +	ttm_resource_manager_for_each_res(man, &cursor, res) {
> +		bool busy;
> +
> +		if (!ttm_bo_evict_swapout_allowable(res->bo, ctx, place,
> +						    &locked, &busy)) {
> +			if (busy && !busy_bo && ticket !=
> +			    dma_resv_locking_ctx(bo->base.resv))
> +				busy_bo = res->bo;
> +			continue;
>  		}
>  
> -		/* If the inner loop terminated early, we have our candidate */
> -		if (&res->lru != &man->lru[i])
> -			break;
> -
> -		bo = NULL;
> +		if (!ttm_bo_get_unless_zero(res->bo)) {
> +			if (locked)
> +				dma_resv_unlock(res->bo->base.resv);
> +			continue;
> +		}
> +		bo = res->bo;
>  	}
>  
>  	if (!bo) {
> diff --git a/drivers/gpu/drm/ttm/ttm_device.c b/drivers/gpu/drm/ttm/ttm_device.c
> index ba35887147ba..a0562ab386f5 100644
> --- a/drivers/gpu/drm/ttm/ttm_device.c
> +++ b/drivers/gpu/drm/ttm/ttm_device.c
> @@ -142,10 +142,10 @@ EXPORT_SYMBOL(ttm_global_swapout);
>  int ttm_device_swapout(struct ttm_device *bdev, struct ttm_operation_ctx *ctx,
>  		       gfp_t gfp_flags)
>  {
> +	struct ttm_resource_cursor cursor;
>  	struct ttm_resource_manager *man;
> -	struct ttm_buffer_object *bo;
>  	struct ttm_resource *res;
> -	unsigned i, j;
> +	unsigned i;
>  	int ret;
>  
>  	spin_lock(&bdev->lru_lock);
> @@ -154,20 +154,16 @@ int ttm_device_swapout(struct ttm_device *bdev, struct ttm_operation_ctx *ctx,
>  		if (!man || !man->use_tt)
>  			continue;
>  
> -		for (j = 0; j < TTM_MAX_BO_PRIORITY; ++j) {
> -			list_for_each_entry(res, &man->lru[j], lru) {
> -				uint32_t num_pages;
> -
> -				bo = res->bo;
> -				num_pages = PFN_UP(bo->base.size);
> +		ttm_resource_manager_for_each_res(man, &cursor, res) {
> +			struct ttm_buffer_object *bo = res->bo;
> +			uint32_t num_pages = PFN_UP(bo->base.size);
>  
> -				ret = ttm_bo_swapout(bo, ctx, gfp_flags);
> -				/* ttm_bo_swapout has dropped the lru_lock */
> -				if (!ret)
> -					return num_pages;
> -				if (ret != -EBUSY)
> -					return ret;
> -			}
> +			ret = ttm_bo_swapout(bo, ctx, gfp_flags);
> +			/* ttm_bo_swapout has dropped the lru_lock */
> +			if (!ret)
> +				return num_pages;
> +			if (ret != -EBUSY)
> +				return ret;
>  		}
>  	}
>  	spin_unlock(&bdev->lru_lock);
> diff --git a/drivers/gpu/drm/ttm/ttm_resource.c b/drivers/gpu/drm/ttm/ttm_resource.c
> index 450e665c357b..9e68d36a1546 100644
> --- a/drivers/gpu/drm/ttm/ttm_resource.c
> +++ b/drivers/gpu/drm/ttm/ttm_resource.c
> @@ -354,6 +354,51 @@ void ttm_resource_manager_debug(struct ttm_resource_manager *man,
>  }
>  EXPORT_SYMBOL(ttm_resource_manager_debug);
>  
> +/**
> + * ttm_resource_manager_first
> + *
> + * @man: resource manager to iterate over
> + * @cursor: cursor to record the position
> + *
> + * Returns the first resource from the resource manager.
> + */
> +struct ttm_resource *
> +ttm_resource_manager_first(struct ttm_resource_manager *man,
> +			   struct ttm_resource_cursor *cursor)
> +{
> +	struct ttm_resource *res;
> +
> +	for (cursor->priority = 0; cursor->priority < TTM_MAX_BO_PRIORITY;
> +	     ++cursor->priority)
> +		list_for_each_entry(res, &man->lru[cursor->priority], lru)
> +			return res;
> +
> +	return NULL;
> +}
> +
> +/**
> + * ttm_resource_manager_next
> + *
> + * @man: resource manager to iterate over
> + * @cursor: cursor to record the position
> + *
> + * Returns the next resource from the resource manager.
> + */
> +struct ttm_resource *
> +ttm_resource_manager_next(struct ttm_resource_manager *man,
> +			  struct ttm_resource_cursor *cursor,
> +			  struct ttm_resource *res)
> +{
> +	list_for_each_entry_continue(res, &man->lru[cursor->priority], lru)
> +		return res;
> +
> +	for (; cursor->priority < TTM_MAX_BO_PRIORITY; ++cursor->priority)
> +		list_for_each_entry(res, &man->lru[cursor->priority], lru)
> +			return res;
> +
> +	return NULL;
> +}
> +
>  static void ttm_kmap_iter_iomap_map_local(struct ttm_kmap_iter *iter,
>  					  struct dma_buf_map *dmap,
>  					  pgoff_t i)
> diff --git a/include/drm/ttm/ttm_resource.h b/include/drm/ttm/ttm_resource.h
> index a54d52517a30..13da5e337350 100644
> --- a/include/drm/ttm/ttm_resource.h
> +++ b/include/drm/ttm/ttm_resource.h
> @@ -183,6 +183,17 @@ struct ttm_resource {
>  	struct list_head lru;
>  };
>  
> +/**
> + * struct ttm_resource_cursor
> + *
> + * @priority: the current priority
> + *
> + * Cursor to iterate over the resources in a manager.
> + */
> +struct ttm_resource_cursor {
> +	unsigned int priority;
> +};
> +
>  /**
>   * struct ttm_lru_bulk_move_pos
>   *
> @@ -339,6 +350,18 @@ int ttm_resource_manager_evict_all(struct ttm_device *bdev,
>  void ttm_resource_manager_debug(struct ttm_resource_manager *man,
>  				struct drm_printer *p);
>  
> +struct ttm_resource *
> +ttm_resource_manager_first(struct ttm_resource_manager *man,
> +			   struct ttm_resource_cursor *cursor);
> +struct ttm_resource *
> +ttm_resource_manager_next(struct ttm_resource_manager *man,
> +			  struct ttm_resource_cursor *cursor,
> +			  struct ttm_resource *res);
> +
> +#define ttm_resource_manager_for_each_res(man, cursor, res)		\
> +	for (res = ttm_resource_manager_first(man, cursor); res;	\
> +	     res = ttm_resource_manager_next(man, cursor, res))
> +
>  struct ttm_kmap_iter *
>  ttm_kmap_iter_iomap_init(struct ttm_kmap_iter_iomap *iter_io,
>  			 struct io_mapping *iomap,
> -- 
> 2.25.1
> 

^ permalink raw reply	[flat|nested] 24+ messages in thread

* Re: [PATCH 03/12] drm/ttm: add a weak BO reference to the resource v3
  2021-11-24 12:44 ` [PATCH 03/12] drm/ttm: add a weak BO reference to the resource v3 Christian König
  2021-11-26  7:10   ` Huang Rui
@ 2021-11-26  7:43   ` Thomas Hellström
  1 sibling, 0 replies; 24+ messages in thread
From: Thomas Hellström @ 2021-11-26  7:43 UTC (permalink / raw)
  To: Christian König, daniel, ray.huang, dri-devel


On 11/24/21 13:44, Christian König wrote:
> Keep track for which BO a resource was allocated.
> This is necessary to move the LRU handling into the resources.
>
> A bit problematic is i915 since it tries to use the resource
> interface without a BO which is illegal from the conceptional
> point of view.

s/conceptional/conceptual/ ?

Can't find the i915 workaround below? Was this the mock bo that was used 
in the selftest code, perhaps that was replaced with allowing 
resource->bo to be NULL. In that case the commit message should be updated.

/Thomas.


>
> v2: Document that this is a weak reference and add a workaround for i915
> v3: further document that this is protected by ttm_device::lru_lock and
>      clarify the i915 workaround
>
> Signed-off-by: Christian König <christian.koenig@amd.com>
> ---
>   drivers/gpu/drm/ttm/ttm_bo_util.c  | 7 +++++--
>   drivers/gpu/drm/ttm/ttm_resource.c | 9 +++++++++
>   include/drm/ttm/ttm_resource.h     | 4 ++++
>   3 files changed, 18 insertions(+), 2 deletions(-)
>
> diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
> index 95de2691ee7c..a2e3a9626198 100644
> --- a/drivers/gpu/drm/ttm/ttm_bo_util.c
> +++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
> @@ -237,6 +237,11 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
>   	if (bo->type != ttm_bo_type_sg)
>   		fbo->base.base.resv = &fbo->base.base._resv;
>   
> +	if (fbo->base.resource) {
> +		ttm_resource_set_bo(fbo->base.resource, &fbo->base);
> +		bo->resource = NULL;
> +	}
> +
>   	dma_resv_init(&fbo->base.base._resv);
>   	fbo->base.base.dev = NULL;
>   	ret = dma_resv_trylock(&fbo->base.base._resv);
> @@ -512,7 +517,6 @@ static int ttm_bo_move_to_ghost(struct ttm_buffer_object *bo,
>   		ghost_obj->ttm = NULL;
>   	else
>   		bo->ttm = NULL;
> -	bo->resource = NULL;
>   
>   	dma_resv_unlock(&ghost_obj->base._resv);
>   	ttm_bo_put(ghost_obj);
> @@ -637,7 +641,6 @@ int ttm_bo_pipeline_gutting(struct ttm_buffer_object *bo)
>   	dma_resv_unlock(&ghost->base._resv);
>   	ttm_bo_put(ghost);
>   	bo->ttm = ttm;
> -	bo->resource = NULL;
>   	ttm_bo_assign_mem(bo, sys_res);
>   	return 0;
>   
> diff --git a/drivers/gpu/drm/ttm/ttm_resource.c b/drivers/gpu/drm/ttm/ttm_resource.c
> index 41e7bf195168..7fdd58b53c61 100644
> --- a/drivers/gpu/drm/ttm/ttm_resource.c
> +++ b/drivers/gpu/drm/ttm/ttm_resource.c
> @@ -41,6 +41,7 @@ void ttm_resource_init(struct ttm_buffer_object *bo,
>   	res->bus.offset = 0;
>   	res->bus.is_iomem = false;
>   	res->bus.caching = ttm_cached;
> +	res->bo = bo;
>   }
>   EXPORT_SYMBOL(ttm_resource_init);
>   
> @@ -122,6 +123,14 @@ bool ttm_resource_compat(struct ttm_resource *res,
>   }
>   EXPORT_SYMBOL(ttm_resource_compat);
>   
> +void ttm_resource_set_bo(struct ttm_resource *res,
> +			 struct ttm_buffer_object *bo)
> +{
> +	spin_lock(&bo->bdev->lru_lock);
> +	res->bo = bo;
> +	spin_unlock(&bo->bdev->lru_lock);
> +}
> +
>   /**
>    * ttm_resource_manager_init
>    *
> diff --git a/include/drm/ttm/ttm_resource.h b/include/drm/ttm/ttm_resource.h
> index 6bf37383002b..69eea9d6399b 100644
> --- a/include/drm/ttm/ttm_resource.h
> +++ b/include/drm/ttm/ttm_resource.h
> @@ -161,6 +161,7 @@ struct ttm_bus_placement {
>    * @mem_type: Resource type of the allocation.
>    * @placement: Placement flags.
>    * @bus: Placement on io bus accessible to the CPU
> + * @bo: weak reference to the BO, protected by ttm_device::lru_lock
>    *
>    * Structure indicating the placement and space resources used by a
>    * buffer object.
> @@ -171,6 +172,7 @@ struct ttm_resource {
>   	uint32_t mem_type;
>   	uint32_t placement;
>   	struct ttm_bus_placement bus;
> +	struct ttm_buffer_object *bo;
>   };
>   
>   /**
> @@ -271,6 +273,8 @@ int ttm_resource_alloc(struct ttm_buffer_object *bo,
>   void ttm_resource_free(struct ttm_buffer_object *bo, struct ttm_resource **res);
>   bool ttm_resource_compat(struct ttm_resource *res,
>   			 struct ttm_placement *placement);
> +void ttm_resource_set_bo(struct ttm_resource *res,
> +			 struct ttm_buffer_object *bo);
>   
>   void ttm_resource_manager_init(struct ttm_resource_manager *man,
>   			       struct ttm_device *bdev,

^ permalink raw reply	[flat|nested] 24+ messages in thread

* Re: [PATCH 01/12] drm/ttm: add ttm_resource_fini
  2021-11-26  7:39     ` Christian König
@ 2021-11-26  7:47       ` Huang Rui
  0 siblings, 0 replies; 24+ messages in thread
From: Huang Rui @ 2021-11-26  7:47 UTC (permalink / raw)
  To: Christian König; +Cc: thomas.hellstrom, dri-devel

On Fri, Nov 26, 2021 at 03:39:30PM +0800, Christian König wrote:
> Am 26.11.21 um 07:48 schrieb Huang Rui:
> > On Wed, Nov 24, 2021 at 08:44:19PM +0800, Christian König wrote:
> >> Make sure we call the common cleanup function in all
> >> implementations of the resource manager.
> >>
> >> Signed-off-by: Christian König <christian.koenig@amd.com>
> >> ---
> >>   drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c     | 2 ++
> >>   drivers/gpu/drm/amd/amdgpu/amdgpu_preempt_mgr.c | 1 +
> >>   drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c    | 2 ++
> >>   drivers/gpu/drm/i915/i915_ttm_buddy_manager.c   | 1 +
> >>   drivers/gpu/drm/nouveau/nouveau_mem.c           | 3 ++-
> >>   drivers/gpu/drm/nouveau/nouveau_mem.h           | 3 ++-
> >>   drivers/gpu/drm/nouveau/nouveau_ttm.c           | 9 +++++----
> >>   drivers/gpu/drm/ttm/ttm_range_manager.c         | 2 ++
> >>   drivers/gpu/drm/ttm/ttm_resource.c              | 6 ++++++
> >>   drivers/gpu/drm/ttm/ttm_sys_manager.c           | 1 +
> >>   drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c   | 2 ++
> >>   drivers/gpu/drm/vmwgfx/vmwgfx_thp.c             | 3 ++-
> >>   include/drm/ttm/ttm_resource.h                  | 3 +++
> >>   13 files changed, 31 insertions(+), 7 deletions(-)
> >>
> >> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
> >> index 675a72ef305d..ea5470c8c921 100644
> >> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
> >> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
> >> @@ -169,6 +169,7 @@ static int amdgpu_gtt_mgr_new(struct ttm_resource_manager *man,
> >>   	return 0;
> >>   
> >>   err_free:
> >> +	ttm_resource_fini(man, &node->base.base);
> >>   	kfree(node);
> >>   
> >>   err_out:
> >> @@ -200,6 +201,7 @@ static void amdgpu_gtt_mgr_del(struct ttm_resource_manager *man,
> >>   	if (!(res->placement & TTM_PL_FLAG_TEMPORARY))
> >>   		atomic64_sub(res->num_pages, &mgr->used);
> >>   
> >> +	ttm_resource_fini(man, res);
> >>   	kfree(node);
> >>   }
> >>   
> >> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_preempt_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_preempt_mgr.c
> >> index d02c8637f909..ffddec08e931 100644
> >> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_preempt_mgr.c
> >> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_preempt_mgr.c
> >> @@ -95,6 +95,7 @@ static void amdgpu_preempt_mgr_del(struct ttm_resource_manager *man,
> >>   	struct amdgpu_preempt_mgr *mgr = to_preempt_mgr(man);
> >>   
> >>   	atomic64_sub(res->num_pages, &mgr->used);
> >> +	ttm_resource_fini(man, res);
> >>   	kfree(res);
> >>   }
> >>   
> >> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
> >> index 7b2b0980ec41..55d68408951d 100644
> >> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
> >> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
> >> @@ -476,6 +476,7 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man,
> >>   	while (i--)
> >>   		drm_mm_remove_node(&node->mm_nodes[i]);
> >>   	spin_unlock(&mgr->lock);
> >> +	ttm_resource_fini(man, &node->base);
> >>   	kvfree(node);
> >>   
> >>   error_sub:
> >> @@ -515,6 +516,7 @@ static void amdgpu_vram_mgr_del(struct ttm_resource_manager *man,
> >>   	atomic64_sub(usage, &mgr->usage);
> >>   	atomic64_sub(vis_usage, &mgr->vis_usage);
> >>   
> >> +	ttm_resource_fini(man, res);
> >>   	kvfree(node);
> >>   }
> >>   
> >> diff --git a/drivers/gpu/drm/i915/i915_ttm_buddy_manager.c b/drivers/gpu/drm/i915/i915_ttm_buddy_manager.c
> >> index d59fbb019032..ca3ca1f7f850 100644
> >> --- a/drivers/gpu/drm/i915/i915_ttm_buddy_manager.c
> >> +++ b/drivers/gpu/drm/i915/i915_ttm_buddy_manager.c
> >> @@ -123,6 +123,7 @@ static void i915_ttm_buddy_man_free(struct ttm_resource_manager *man,
> >>   	i915_buddy_free_list(&bman->mm, &bman_res->blocks);
> >>   	mutex_unlock(&bman->lock);
> >>   
> >> +	ttm_resource_fini(man, res);
> >>   	kfree(bman_res);
> >>   }
> >>   
> >> diff --git a/drivers/gpu/drm/nouveau/nouveau_mem.c b/drivers/gpu/drm/nouveau/nouveau_mem.c
> >> index 2ca3207c13fc..2e517cdc24c9 100644
> >> --- a/drivers/gpu/drm/nouveau/nouveau_mem.c
> >> +++ b/drivers/gpu/drm/nouveau/nouveau_mem.c
> >> @@ -162,11 +162,12 @@ nouveau_mem_vram(struct ttm_resource *reg, bool contig, u8 page)
> >>   }
> >>   
> >>   void
> >> -nouveau_mem_del(struct ttm_resource *reg)
> >> +nouveau_mem_del(struct ttm_resource_manager *man, struct ttm_resource *reg)
> >>   {
> >>   	struct nouveau_mem *mem = nouveau_mem(reg);
> >>   
> >>   	nouveau_mem_fini(mem);
> >> +	ttm_resource_fini(man, reg);
> >>   	kfree(mem);
> >>   }
> >>   
> >> diff --git a/drivers/gpu/drm/nouveau/nouveau_mem.h b/drivers/gpu/drm/nouveau/nouveau_mem.h
> >> index 2c01166a90f2..325551eba5cd 100644
> >> --- a/drivers/gpu/drm/nouveau/nouveau_mem.h
> >> +++ b/drivers/gpu/drm/nouveau/nouveau_mem.h
> >> @@ -23,7 +23,8 @@ nouveau_mem(struct ttm_resource *reg)
> >>   
> >>   int nouveau_mem_new(struct nouveau_cli *, u8 kind, u8 comp,
> >>   		    struct ttm_resource **);
> >> -void nouveau_mem_del(struct ttm_resource *);
> >> +void nouveau_mem_del(struct ttm_resource_manager *man,
> >> +		     struct ttm_resource *);
> >>   int nouveau_mem_vram(struct ttm_resource *, bool contig, u8 page);
> >>   int nouveau_mem_host(struct ttm_resource *, struct ttm_tt *);
> >>   void nouveau_mem_fini(struct nouveau_mem *);
> >> diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.c b/drivers/gpu/drm/nouveau/nouveau_ttm.c
> >> index 2ca9d9a9e5d5..91ef33f8f22c 100644
> >> --- a/drivers/gpu/drm/nouveau/nouveau_ttm.c
> >> +++ b/drivers/gpu/drm/nouveau/nouveau_ttm.c
> >> @@ -36,9 +36,10 @@
> >>   #include <core/tegra.h>
> >>   
> >>   static void
> >> -nouveau_manager_del(struct ttm_resource_manager *man, struct ttm_resource *reg)
> >> +nouveau_manager_del(struct ttm_resource_manager *man,
> >> +		    struct ttm_resource *reg)
> >>   {
> >> -	nouveau_mem_del(reg);
> >> +	nouveau_mem_del(man, reg);
> >>   }
> >>   
> >>   static int
> >> @@ -62,7 +63,7 @@ nouveau_vram_manager_new(struct ttm_resource_manager *man,
> >>   
> >>   	ret = nouveau_mem_vram(*res, nvbo->contig, nvbo->page);
> >>   	if (ret) {
> >> -		nouveau_mem_del(*res);
> >> +		nouveau_mem_del(man, *res);
> >>   		return ret;
> >>   	}
> >>   
> >> @@ -118,7 +119,7 @@ nv04_gart_manager_new(struct ttm_resource_manager *man,
> >>   	ret = nvif_vmm_get(&mem->cli->vmm.vmm, PTES, false, 12, 0,
> >>   			   (long)(*res)->num_pages << PAGE_SHIFT, &mem->vma[0]);
> >>   	if (ret) {
> >> -		nouveau_mem_del(*res);
> >> +		nouveau_mem_del(man, *res);
> >>   		return ret;
> >>   	}
> >>   
> >> diff --git a/drivers/gpu/drm/ttm/ttm_range_manager.c b/drivers/gpu/drm/ttm/ttm_range_manager.c
> >> index 67d68a4a8640..25fcf0d63c2d 100644
> >> --- a/drivers/gpu/drm/ttm/ttm_range_manager.c
> >> +++ b/drivers/gpu/drm/ttm/ttm_range_manager.c
> >> @@ -89,6 +89,7 @@ static int ttm_range_man_alloc(struct ttm_resource_manager *man,
> >>   	spin_unlock(&rman->lock);
> >>   
> >>   	if (unlikely(ret)) {
> >> +		ttm_resource_fini(man, *res);
> >>   		kfree(node);
> >>   		return ret;
> >>   	}
> >> @@ -108,6 +109,7 @@ static void ttm_range_man_free(struct ttm_resource_manager *man,
> >>   	drm_mm_remove_node(&node->mm_nodes[0]);
> >>   	spin_unlock(&rman->lock);
> >>   
> >> +	ttm_resource_fini(man, res);
> >>   	kfree(node);
> >>   }
> >>   
> >> diff --git a/drivers/gpu/drm/ttm/ttm_resource.c b/drivers/gpu/drm/ttm/ttm_resource.c
> >> index 035d71332d18..89bcfe22a0ca 100644
> >> --- a/drivers/gpu/drm/ttm/ttm_resource.c
> >> +++ b/drivers/gpu/drm/ttm/ttm_resource.c
> >> @@ -44,6 +44,12 @@ void ttm_resource_init(struct ttm_buffer_object *bo,
> >>   }
> >>   EXPORT_SYMBOL(ttm_resource_init);
> >>   
> >> +void ttm_resource_fini(struct ttm_resource_manager *man,
> >> +		       struct ttm_resource *res)
> >> +{
> > Maybe we should clean up the res data here. E.X. memset(res, 0, sizeof(*res).
> > The previous data should invalid while we call fini.
> 
> Nah, calling _fini means the resource structure is about to be freed.
> 
> memset in this situation doesn't make much sense.
> 

But we already have ttm_resource_free() to free resource structure. If the
fini is pairing with ttm_resource_init, it should clean up the data of the
resource structure.

Thanks,
Ray

> Christian.
> 
> >
> > Thanks,
> > Ray
> >
> >> +}
> >> +EXPORT_SYMBOL(ttm_resource_fini);
> >> +
> >>   int ttm_resource_alloc(struct ttm_buffer_object *bo,
> >>   		       const struct ttm_place *place,
> >>   		       struct ttm_resource **res_ptr)
> >> diff --git a/drivers/gpu/drm/ttm/ttm_sys_manager.c b/drivers/gpu/drm/ttm/ttm_sys_manager.c
> >> index 63aca52f75e1..135394dcca95 100644
> >> --- a/drivers/gpu/drm/ttm/ttm_sys_manager.c
> >> +++ b/drivers/gpu/drm/ttm/ttm_sys_manager.c
> >> @@ -23,6 +23,7 @@ static int ttm_sys_man_alloc(struct ttm_resource_manager *man,
> >>   static void ttm_sys_man_free(struct ttm_resource_manager *man,
> >>   			     struct ttm_resource *res)
> >>   {
> >> +	ttm_resource_fini(man, res);
> >>   	kfree(res);
> >>   }
> >>   
> >> diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
> >> index b2c4af331c9d..bfd686bb8d19 100644
> >> --- a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
> >> +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
> >> @@ -116,6 +116,7 @@ static int vmw_gmrid_man_get_node(struct ttm_resource_manager *man,
> >>   	gman->used_gmr_pages -= (*res)->num_pages;
> >>   	spin_unlock(&gman->lock);
> >>   	ida_free(&gman->gmr_ida, id);
> >> +	ttm_resource_fini(man, *res);
> >>   	kfree(*res);
> >>   	return -ENOSPC;
> >>   }
> >> @@ -129,6 +130,7 @@ static void vmw_gmrid_man_put_node(struct ttm_resource_manager *man,
> >>   	spin_lock(&gman->lock);
> >>   	gman->used_gmr_pages -= res->num_pages;
> >>   	spin_unlock(&gman->lock);
> >> +	ttm_resource_fini(man, res);
> >>   	kfree(res);
> >>   }
> >>   
> >> diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_thp.c b/drivers/gpu/drm/vmwgfx/vmwgfx_thp.c
> >> index 2a3d3468e4e0..4fcbd94ccc11 100644
> >> --- a/drivers/gpu/drm/vmwgfx/vmwgfx_thp.c
> >> +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_thp.c
> >> @@ -104,6 +104,7 @@ static int vmw_thp_get_node(struct ttm_resource_manager *man,
> >>   	spin_unlock(&rman->lock);
> >>   
> >>   	if (unlikely(ret)) {
> >> +		ttm_resource_fini(man, &node->base);
> >>   		kfree(node);
> >>   	} else {
> >>   		node->base.start = node->mm_nodes[0].start;
> >> @@ -122,7 +123,7 @@ static void vmw_thp_put_node(struct ttm_resource_manager *man,
> >>   	spin_lock(&rman->lock);
> >>   	drm_mm_remove_node(&node->mm_nodes[0]);
> >>   	spin_unlock(&rman->lock);
> >> -
> >> +	ttm_resource_fini(man, res);
> >>   	kfree(node);
> >>   }
> >>   
> >> diff --git a/include/drm/ttm/ttm_resource.h b/include/drm/ttm/ttm_resource.h
> >> index 5952051091cd..df1f06b7b504 100644
> >> --- a/include/drm/ttm/ttm_resource.h
> >> +++ b/include/drm/ttm/ttm_resource.h
> >> @@ -261,6 +261,9 @@ ttm_resource_manager_cleanup(struct ttm_resource_manager *man)
> >>   void ttm_resource_init(struct ttm_buffer_object *bo,
> >>                          const struct ttm_place *place,
> >>                          struct ttm_resource *res);
> >> +void ttm_resource_fini(struct ttm_resource_manager *man,
> >> +		       struct ttm_resource *res);
> >> +
> >>   int ttm_resource_alloc(struct ttm_buffer_object *bo,
> >>   		       const struct ttm_place *place,
> >>   		       struct ttm_resource **res);
> >> -- 
> >> 2.25.1
> >>
> 

^ permalink raw reply	[flat|nested] 24+ messages in thread

* Re: drm/ttm: moving the LRU into the resource
  2021-11-24 12:44 drm/ttm: moving the LRU into the resource Christian König
                   ` (11 preceding siblings ...)
  2021-11-24 12:44 ` [PATCH 12/12] drm/amdgpu: drop amdgpu_gtt_node Christian König
@ 2021-11-26  7:47 ` Thomas Hellström
  12 siblings, 0 replies; 24+ messages in thread
From: Thomas Hellström @ 2021-11-26  7:47 UTC (permalink / raw)
  To: Christian König, daniel, ray.huang, dri-devel

Hi, Christian,

On 11/24/21 13:44, Christian König wrote:
> Hi guys,
>
> I've already send out this patch set a couple of times.
>
> This fixes the fundamental problem in TTM that during a move a buffer
> has resources allocated from two different domains at the same time.
>
> Additional to that it's a prerequisite to remove ghost objects and
> allow to allocate multiple resources per BO.
>
> I should have fixed all previous review comments as far as I can see,
> but probably better to take another look.
>
> Regards,
> Christian.

When sending the next revision of this, could you CC intel-gfx (if it 
applies on drm-tip that is), so that we could catch and help fixing any 
dg1 regressions early?

Thanks,

Thomas



^ permalink raw reply	[flat|nested] 24+ messages in thread

* Re: [PATCH 12/12] drm/amdgpu: drop amdgpu_gtt_node
  2021-11-24 12:44 ` [PATCH 12/12] drm/amdgpu: drop amdgpu_gtt_node Christian König
@ 2021-11-26  7:53   ` Huang Rui
  0 siblings, 0 replies; 24+ messages in thread
From: Huang Rui @ 2021-11-26  7:53 UTC (permalink / raw)
  To: Christian König; +Cc: thomas.hellstrom, dri-devel

On Wed, Nov 24, 2021 at 08:44:30PM +0800, Christian König wrote:
> We have the BO pointer in the base structure now as well.
> 
> Signed-off-by: Christian König <christian.koenig@amd.com>

Patch 7 -> Patch 12 are Reviewed-by: Huang Rui <ray.huang@amd.com>

I need more time to read patch 5.

Thanks,
Ray

> ---
>  drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c | 49 ++++++++-------------
>  1 file changed, 18 insertions(+), 31 deletions(-)
> 
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
> index ce5eeb3c1097..a55bbe1a154c 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
> @@ -26,23 +26,12 @@
>  
>  #include "amdgpu.h"
>  
> -struct amdgpu_gtt_node {
> -	struct ttm_buffer_object *tbo;
> -	struct ttm_range_mgr_node base;
> -};
> -
>  static inline struct amdgpu_gtt_mgr *
>  to_gtt_mgr(struct ttm_resource_manager *man)
>  {
>  	return container_of(man, struct amdgpu_gtt_mgr, manager);
>  }
>  
> -static inline struct amdgpu_gtt_node *
> -to_amdgpu_gtt_node(struct ttm_resource *res)
> -{
> -	return container_of(res, struct amdgpu_gtt_node, base.base);
> -}
> -
>  /**
>   * DOC: mem_info_gtt_total
>   *
> @@ -107,9 +96,9 @@ const struct attribute_group amdgpu_gtt_mgr_attr_group = {
>   */
>  bool amdgpu_gtt_mgr_has_gart_addr(struct ttm_resource *res)
>  {
> -	struct amdgpu_gtt_node *node = to_amdgpu_gtt_node(res);
> +	struct ttm_range_mgr_node *node = to_ttm_range_mgr_node(res);
>  
> -	return drm_mm_node_allocated(&node->base.mm_nodes[0]);
> +	return drm_mm_node_allocated(&node->mm_nodes[0]);
>  }
>  
>  /**
> @@ -129,15 +118,14 @@ static int amdgpu_gtt_mgr_new(struct ttm_resource_manager *man,
>  {
>  	struct amdgpu_gtt_mgr *mgr = to_gtt_mgr(man);
>  	uint32_t num_pages = PFN_UP(tbo->base.size);
> -	struct amdgpu_gtt_node *node;
> +	struct ttm_range_mgr_node *node;
>  	int r;
>  
> -	node = kzalloc(struct_size(node, base.mm_nodes, 1), GFP_KERNEL);
> +	node = kzalloc(struct_size(node, mm_nodes, 1), GFP_KERNEL);
>  	if (!node)
>  		return -ENOMEM;
>  
> -	node->tbo = tbo;
> -	ttm_resource_init(tbo, place, &node->base.base);
> +	ttm_resource_init(tbo, place, &node->base);
>  	if (!(place->flags & TTM_PL_FLAG_TEMPORARY) &&
>  	    ttm_resource_manager_usage(man) > man->size) {
>  		r = -ENOSPC;
> @@ -146,8 +134,7 @@ static int amdgpu_gtt_mgr_new(struct ttm_resource_manager *man,
>  
>  	if (place->lpfn) {
>  		spin_lock(&mgr->lock);
> -		r = drm_mm_insert_node_in_range(&mgr->mm,
> -						&node->base.mm_nodes[0],
> +		r = drm_mm_insert_node_in_range(&mgr->mm, &node->mm_nodes[0],
>  						num_pages, tbo->page_alignment,
>  						0, place->fpfn, place->lpfn,
>  						DRM_MM_INSERT_BEST);
> @@ -155,18 +142,18 @@ static int amdgpu_gtt_mgr_new(struct ttm_resource_manager *man,
>  		if (unlikely(r))
>  			goto err_free;
>  
> -		node->base.base.start = node->base.mm_nodes[0].start;
> +		node->base.start = node->mm_nodes[0].start;
>  	} else {
> -		node->base.mm_nodes[0].start = 0;
> -		node->base.mm_nodes[0].size = node->base.base.num_pages;
> -		node->base.base.start = AMDGPU_BO_INVALID_OFFSET;
> +		node->mm_nodes[0].start = 0;
> +		node->mm_nodes[0].size = node->base.num_pages;
> +		node->base.start = AMDGPU_BO_INVALID_OFFSET;
>  	}
>  
> -	*res = &node->base.base;
> +	*res = &node->base;
>  	return 0;
>  
>  err_free:
> -	ttm_resource_fini(man, &node->base.base);
> +	ttm_resource_fini(man, &node->base);
>  	kfree(node);
>  	return r;
>  }
> @@ -182,12 +169,12 @@ static int amdgpu_gtt_mgr_new(struct ttm_resource_manager *man,
>  static void amdgpu_gtt_mgr_del(struct ttm_resource_manager *man,
>  			       struct ttm_resource *res)
>  {
> -	struct amdgpu_gtt_node *node = to_amdgpu_gtt_node(res);
> +	struct ttm_range_mgr_node *node = to_ttm_range_mgr_node(res);
>  	struct amdgpu_gtt_mgr *mgr = to_gtt_mgr(man);
>  
>  	spin_lock(&mgr->lock);
> -	if (drm_mm_node_allocated(&node->base.mm_nodes[0]))
> -		drm_mm_remove_node(&node->base.mm_nodes[0]);
> +	if (drm_mm_node_allocated(&node->mm_nodes[0]))
> +		drm_mm_remove_node(&node->mm_nodes[0]);
>  	spin_unlock(&mgr->lock);
>  
>  	ttm_resource_fini(man, res);
> @@ -204,16 +191,16 @@ static void amdgpu_gtt_mgr_del(struct ttm_resource_manager *man,
>  int amdgpu_gtt_mgr_recover(struct ttm_resource_manager *man)
>  {
>  	struct amdgpu_gtt_mgr *mgr = to_gtt_mgr(man);
> +	struct ttm_range_mgr_node *node;
>  	struct amdgpu_device *adev;
> -	struct amdgpu_gtt_node *node;
>  	struct drm_mm_node *mm_node;
>  	int r = 0;
>  
>  	adev = container_of(mgr, typeof(*adev), mman.gtt_mgr);
>  	spin_lock(&mgr->lock);
>  	drm_mm_for_each_node(mm_node, &mgr->mm) {
> -		node = container_of(mm_node, typeof(*node), base.mm_nodes[0]);
> -		r = amdgpu_ttm_recover_gart(node->tbo);
> +		node = container_of(mm_node, typeof(*node), mm_nodes[0]);
> +		r = amdgpu_ttm_recover_gart(node->base.bo);
>  		if (r)
>  			break;
>  	}
> -- 
> 2.25.1
> 

^ permalink raw reply	[flat|nested] 24+ messages in thread

* [PATCH 12/12] drm/amdgpu: drop amdgpu_gtt_node
  2021-08-30  8:56 [PATCH 01/12] drm/ttm: add ttm_resource_fini Christian König
@ 2021-08-30  8:57 ` Christian König
  0 siblings, 0 replies; 24+ messages in thread
From: Christian König @ 2021-08-30  8:57 UTC (permalink / raw)
  To: thomas.hellstrom, dri-devel, andrey.grodzovsky

We have the BO pointer in the base structure now as well.

Signed-off-by: Christian König <christian.koenig@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c | 49 ++++++++-------------
 1 file changed, 18 insertions(+), 31 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
index 7085494c6a9f..22506e429a62 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
@@ -26,23 +26,12 @@
 
 #include "amdgpu.h"
 
-struct amdgpu_gtt_node {
-	struct ttm_buffer_object *tbo;
-	struct ttm_range_mgr_node base;
-};
-
 static inline struct amdgpu_gtt_mgr *
 to_gtt_mgr(struct ttm_resource_manager *man)
 {
 	return container_of(man, struct amdgpu_gtt_mgr, manager);
 }
 
-static inline struct amdgpu_gtt_node *
-to_amdgpu_gtt_node(struct ttm_resource *res)
-{
-	return container_of(res, struct amdgpu_gtt_node, base.base);
-}
-
 /**
  * DOC: mem_info_gtt_total
  *
@@ -107,9 +96,9 @@ const struct attribute_group amdgpu_gtt_mgr_attr_group = {
  */
 bool amdgpu_gtt_mgr_has_gart_addr(struct ttm_resource *res)
 {
-	struct amdgpu_gtt_node *node = to_amdgpu_gtt_node(res);
+	struct ttm_range_mgr_node *node = to_ttm_range_mgr_node(res);
 
-	return drm_mm_node_allocated(&node->base.mm_nodes[0]);
+	return drm_mm_node_allocated(&node->mm_nodes[0]);
 }
 
 /**
@@ -129,15 +118,14 @@ static int amdgpu_gtt_mgr_new(struct ttm_resource_manager *man,
 {
 	struct amdgpu_gtt_mgr *mgr = to_gtt_mgr(man);
 	uint32_t num_pages = PFN_UP(tbo->base.size);
-	struct amdgpu_gtt_node *node;
+	struct ttm_range_mgr_node *node;
 	int r;
 
-	node = kzalloc(struct_size(node, base.mm_nodes, 1), GFP_KERNEL);
+	node = kzalloc(struct_size(node, mm_nodes, 1), GFP_KERNEL);
 	if (!node)
 		return -ENOMEM;
 
-	node->tbo = tbo;
-	ttm_resource_init(tbo, place, &node->base.base);
+	ttm_resource_init(tbo, place, &node->base);
 	if (!(place->flags & TTM_PL_FLAG_TEMPORARY) &&
 	    ttm_resource_manager_usage(man) > man->size) {
 		r = -ENOSPC;
@@ -146,8 +134,7 @@ static int amdgpu_gtt_mgr_new(struct ttm_resource_manager *man,
 
 	if (place->lpfn) {
 		spin_lock(&mgr->lock);
-		r = drm_mm_insert_node_in_range(&mgr->mm,
-						&node->base.mm_nodes[0],
+		r = drm_mm_insert_node_in_range(&mgr->mm, &node->mm_nodes[0],
 						num_pages, tbo->page_alignment,
 						0, place->fpfn, place->lpfn,
 						DRM_MM_INSERT_BEST);
@@ -155,18 +142,18 @@ static int amdgpu_gtt_mgr_new(struct ttm_resource_manager *man,
 		if (unlikely(r))
 			goto err_free;
 
-		node->base.base.start = node->base.mm_nodes[0].start;
+		node->base.start = node->mm_nodes[0].start;
 	} else {
-		node->base.mm_nodes[0].start = 0;
-		node->base.mm_nodes[0].size = node->base.base.num_pages;
-		node->base.base.start = AMDGPU_BO_INVALID_OFFSET;
+		node->mm_nodes[0].start = 0;
+		node->mm_nodes[0].size = node->base.num_pages;
+		node->base.start = AMDGPU_BO_INVALID_OFFSET;
 	}
 
-	*res = &node->base.base;
+	*res = &node->base;
 	return 0;
 
 err_free:
-	ttm_resource_fini(man, &node->base.base);
+	ttm_resource_fini(man, &node->base);
 	kfree(node);
 	return r;
 }
@@ -182,12 +169,12 @@ static int amdgpu_gtt_mgr_new(struct ttm_resource_manager *man,
 static void amdgpu_gtt_mgr_del(struct ttm_resource_manager *man,
 			       struct ttm_resource *res)
 {
-	struct amdgpu_gtt_node *node = to_amdgpu_gtt_node(res);
+	struct ttm_range_mgr_node *node = to_ttm_range_mgr_node(res);
 	struct amdgpu_gtt_mgr *mgr = to_gtt_mgr(man);
 
 	spin_lock(&mgr->lock);
-	if (drm_mm_node_allocated(&node->base.mm_nodes[0]))
-		drm_mm_remove_node(&node->base.mm_nodes[0]);
+	if (drm_mm_node_allocated(&node->mm_nodes[0]))
+		drm_mm_remove_node(&node->mm_nodes[0]);
 	spin_unlock(&mgr->lock);
 
 	ttm_resource_fini(man, res);
@@ -204,16 +191,16 @@ static void amdgpu_gtt_mgr_del(struct ttm_resource_manager *man,
 int amdgpu_gtt_mgr_recover(struct ttm_resource_manager *man)
 {
 	struct amdgpu_gtt_mgr *mgr = to_gtt_mgr(man);
+	struct ttm_range_mgr_node *node;
 	struct amdgpu_device *adev;
-	struct amdgpu_gtt_node *node;
 	struct drm_mm_node *mm_node;
 	int r = 0;
 
 	adev = container_of(mgr, typeof(*adev), mman.gtt_mgr);
 	spin_lock(&mgr->lock);
 	drm_mm_for_each_node(mm_node, &mgr->mm) {
-		node = container_of(mm_node, typeof(*node), base.mm_nodes[0]);
-		r = amdgpu_ttm_recover_gart(node->tbo);
+		node = container_of(mm_node, typeof(*node), mm_nodes[0]);
+		r = amdgpu_ttm_recover_gart(node->base.bo);
 		if (r)
 			break;
 	}
-- 
2.25.1


^ permalink raw reply related	[flat|nested] 24+ messages in thread

end of thread, other threads:[~2021-11-26  7:54 UTC | newest]

Thread overview: 24+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2021-11-24 12:44 drm/ttm: moving the LRU into the resource Christian König
2021-11-24 12:44 ` [PATCH 01/12] drm/ttm: add ttm_resource_fini Christian König
2021-11-26  6:48   ` Huang Rui
2021-11-26  7:39     ` Christian König
2021-11-26  7:47       ` Huang Rui
2021-11-24 12:44 ` [PATCH 02/12] drm/ttm: add back a reference to the bdev to the res manager Christian König
2021-11-26  6:52   ` Huang Rui
2021-11-24 12:44 ` [PATCH 03/12] drm/ttm: add a weak BO reference to the resource v3 Christian König
2021-11-26  7:10   ` Huang Rui
2021-11-26  7:43   ` Thomas Hellström
2021-11-24 12:44 ` [PATCH 04/12] drm/ttm: add common accounting to the resource mgr v2 Christian König
2021-11-26  7:35   ` Huang Rui
2021-11-24 12:44 ` [PATCH 05/12] drm/ttm: move the LRU into resource handling v2 Christian König
2021-11-24 12:44 ` [PATCH 06/12] drm/ttm: add resource iterator Christian König
2021-11-26  7:43   ` Huang Rui
2021-11-24 12:44 ` [PATCH 07/12] drm/radeon: use ttm_resource_manager_debug Christian König
2021-11-24 12:44 ` [PATCH 08/12] drm/radeon: remove resource accounting Christian König
2021-11-24 12:44 ` [PATCH 09/12] drm/amdgpu: use ttm_resource_manager_debug Christian König
2021-11-24 12:44 ` [PATCH 10/12] drm/amdgpu: remove GTT accounting Christian König
2021-11-24 12:44 ` [PATCH 11/12] drm/amdgpu: remove VRAM accounting Christian König
2021-11-24 12:44 ` [PATCH 12/12] drm/amdgpu: drop amdgpu_gtt_node Christian König
2021-11-26  7:53   ` Huang Rui
2021-11-26  7:47 ` drm/ttm: moving the LRU into the resource Thomas Hellström
  -- strict thread matches above, loose matches on Subject: below --
2021-08-30  8:56 [PATCH 01/12] drm/ttm: add ttm_resource_fini Christian König
2021-08-30  8:57 ` [PATCH 12/12] drm/amdgpu: drop amdgpu_gtt_node Christian König

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.