* [PATCH 1/2] drm/ttm: use pin_count more extensively @ 2020-11-27 14:16 Christian König 2020-11-27 14:16 ` [PATCH 2/2] drm/ttm: cleanup LRU handling further Christian König 0 siblings, 1 reply; 3+ messages in thread From: Christian König @ 2020-11-27 14:16 UTC (permalink / raw) To: dri-devel Check the pin_count instead of the lru list is empty here. Signed-off-by: Christian König <christian.koenig@amd.com> --- drivers/gpu/drm/ttm/ttm_bo.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index 9a03c7834b1e..a0bddcc64504 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c @@ -937,9 +937,8 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo, } error: - if (bo->mem.mem_type == TTM_PL_SYSTEM && !list_empty(&bo->lru)) { + if (bo->mem.mem_type == TTM_PL_SYSTEM && !bo->pin_count) ttm_bo_move_to_lru_tail_unlocked(bo); - } return ret; } -- 2.25.1 _______________________________________________ dri-devel mailing list dri-devel@lists.freedesktop.org https://lists.freedesktop.org/mailman/listinfo/dri-devel ^ permalink raw reply related [flat|nested] 3+ messages in thread
* [PATCH 2/2] drm/ttm: cleanup LRU handling further 2020-11-27 14:16 [PATCH 1/2] drm/ttm: use pin_count more extensively Christian König @ 2020-11-27 14:16 ` Christian König 2020-12-14 19:29 ` Andrey Grodzovsky 0 siblings, 1 reply; 3+ messages in thread From: Christian König @ 2020-11-27 14:16 UTC (permalink / raw) To: dri-devel We only completely delete the BO from the LRU on destruction. Signed-off-by: Christian König <christian.koenig@amd.com> --- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 4 +- drivers/gpu/drm/qxl/qxl_release.c | 2 +- drivers/gpu/drm/ttm/ttm_bo.c | 60 +++++++++++--------------- drivers/gpu/drm/ttm/ttm_execbuf_util.c | 4 +- include/drm/ttm/ttm_bo_api.h | 2 + include/drm/ttm/ttm_bo_driver.h | 5 ++- 6 files changed, 36 insertions(+), 41 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 0768c8686983..ad91c0c3c423 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -653,9 +653,11 @@ void amdgpu_vm_move_to_lru_tail(struct amdgpu_device *adev, if (!bo->parent) continue; - ttm_bo_move_to_lru_tail(&bo->tbo, &vm->lru_bulk_move); + ttm_bo_move_to_lru_tail(&bo->tbo, &bo->tbo.mem, + &vm->lru_bulk_move); if (bo->shadow) ttm_bo_move_to_lru_tail(&bo->shadow->tbo, + &bo->shadow->tbo.mem, &vm->lru_bulk_move); } spin_unlock(&ttm_bo_glob.lru_lock); diff --git a/drivers/gpu/drm/qxl/qxl_release.c b/drivers/gpu/drm/qxl/qxl_release.c index e75e364655b8..0fcfc952d5e9 100644 --- a/drivers/gpu/drm/qxl/qxl_release.c +++ b/drivers/gpu/drm/qxl/qxl_release.c @@ -456,7 +456,7 @@ void qxl_release_fence_buffer_objects(struct qxl_release *release) bo = entry->bo; dma_resv_add_shared_fence(bo->base.resv, &release->base); - ttm_bo_move_to_lru_tail(bo, NULL); + ttm_bo_move_to_lru_tail(bo, &bo->mem, NULL); dma_resv_unlock(bo->base.resv); } spin_unlock(&ttm_bo_glob.lru_lock); diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index a0bddcc64504..a4435caba94c 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c @@ -109,40 +109,14 @@ static struct kobj_type ttm_bo_glob_kobj_type = { .default_attrs = ttm_bo_global_attrs }; -static void ttm_bo_add_mem_to_lru(struct ttm_buffer_object *bo, - struct ttm_resource *mem) -{ - struct ttm_bo_device *bdev = bo->bdev; - struct ttm_resource_manager *man; - - if (!list_empty(&bo->lru) || bo->pin_count) - return; - - man = ttm_manager_type(bdev, mem->mem_type); - list_add_tail(&bo->lru, &man->lru[bo->priority]); - - if (man->use_tt && bo->ttm && - !(bo->ttm->page_flags & (TTM_PAGE_FLAG_SG | - TTM_PAGE_FLAG_SWAPPED))) { - list_add_tail(&bo->swap, &ttm_bo_glob.swap_lru[bo->priority]); - } -} - static void ttm_bo_del_from_lru(struct ttm_buffer_object *bo) { struct ttm_bo_device *bdev = bo->bdev; - bool notify = false; - if (!list_empty(&bo->swap)) { - list_del_init(&bo->swap); - notify = true; - } - if (!list_empty(&bo->lru)) { - list_del_init(&bo->lru); - notify = true; - } + list_del_init(&bo->swap); + list_del_init(&bo->lru); - if (notify && bdev->driver->del_from_lru_notify) + if (bdev->driver->del_from_lru_notify) bdev->driver->del_from_lru_notify(bo); } @@ -155,12 +129,30 @@ static void ttm_bo_bulk_move_set_pos(struct ttm_lru_bulk_move_pos *pos, } void ttm_bo_move_to_lru_tail(struct ttm_buffer_object *bo, + struct ttm_resource *mem, struct ttm_lru_bulk_move *bulk) { + struct ttm_bo_device *bdev = bo->bdev; + struct ttm_resource_manager *man; + dma_resv_assert_held(bo->base.resv); - ttm_bo_del_from_lru(bo); - ttm_bo_add_mem_to_lru(bo, &bo->mem); + if (bo->pin_count) + return; + + man = ttm_manager_type(bdev, mem->mem_type); + list_move_tail(&bo->lru, &man->lru[bo->priority]); + if (man->use_tt && bo->ttm && + !(bo->ttm->page_flags & (TTM_PAGE_FLAG_SG | + TTM_PAGE_FLAG_SWAPPED))) { + struct list_head *swap; + + swap = &ttm_bo_glob.swap_lru[bo->priority]; + list_move_tail(&bo->swap, swap); + } + + if (bdev->driver->del_from_lru_notify) + bdev->driver->del_from_lru_notify(bo); if (bulk && !bo->pin_count) { switch (bo->mem.mem_type) { @@ -516,8 +508,7 @@ static void ttm_bo_release(struct kref *kref) */ if (bo->pin_count) { bo->pin_count = 0; - ttm_bo_del_from_lru(bo); - ttm_bo_add_mem_to_lru(bo, &bo->mem); + ttm_bo_move_to_lru_tail(bo, &bo->mem, NULL); } kref_init(&bo->kref); @@ -859,8 +850,7 @@ static int ttm_bo_mem_placement(struct ttm_buffer_object *bo, mem->placement = place->flags; spin_lock(&ttm_bo_glob.lru_lock); - ttm_bo_del_from_lru(bo); - ttm_bo_add_mem_to_lru(bo, mem); + ttm_bo_move_to_lru_tail(bo, mem, NULL); spin_unlock(&ttm_bo_glob.lru_lock); return 0; diff --git a/drivers/gpu/drm/ttm/ttm_execbuf_util.c b/drivers/gpu/drm/ttm/ttm_execbuf_util.c index 8a8f1a6a83a6..9fa36ed59429 100644 --- a/drivers/gpu/drm/ttm/ttm_execbuf_util.c +++ b/drivers/gpu/drm/ttm/ttm_execbuf_util.c @@ -55,7 +55,7 @@ void ttm_eu_backoff_reservation(struct ww_acquire_ctx *ticket, list_for_each_entry(entry, list, head) { struct ttm_buffer_object *bo = entry->bo; - ttm_bo_move_to_lru_tail(bo, NULL); + ttm_bo_move_to_lru_tail(bo, &bo->mem, NULL); dma_resv_unlock(bo->base.resv); } spin_unlock(&ttm_bo_glob.lru_lock); @@ -162,7 +162,7 @@ void ttm_eu_fence_buffer_objects(struct ww_acquire_ctx *ticket, dma_resv_add_shared_fence(bo->base.resv, fence); else dma_resv_add_excl_fence(bo->base.resv, fence); - ttm_bo_move_to_lru_tail(bo, NULL); + ttm_bo_move_to_lru_tail(bo, &bo->mem, NULL); dma_resv_unlock(bo->base.resv); } spin_unlock(&ttm_bo_glob.lru_lock); diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h index 2564e66e67d7..2d7004fc47b3 100644 --- a/include/drm/ttm/ttm_bo_api.h +++ b/include/drm/ttm/ttm_bo_api.h @@ -310,6 +310,7 @@ void ttm_bo_put(struct ttm_buffer_object *bo); * ttm_bo_move_to_lru_tail * * @bo: The buffer object. + * @mem: Resource object. * @bulk: optional bulk move structure to remember BO positions * * Move this BO to the tail of all lru lists used to lookup and reserve an @@ -317,6 +318,7 @@ void ttm_bo_put(struct ttm_buffer_object *bo); * held, and is used to make a BO less likely to be considered for eviction. */ void ttm_bo_move_to_lru_tail(struct ttm_buffer_object *bo, + struct ttm_resource *mem, struct ttm_lru_bulk_move *bulk); /** diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h index f02f7cf9ae90..e362e272184c 100644 --- a/include/drm/ttm/ttm_bo_driver.h +++ b/include/drm/ttm/ttm_bo_driver.h @@ -492,10 +492,11 @@ static inline int ttm_bo_reserve_slowpath(struct ttm_buffer_object *bo, return 0; } -static inline void ttm_bo_move_to_lru_tail_unlocked(struct ttm_buffer_object *bo) +static inline void +ttm_bo_move_to_lru_tail_unlocked(struct ttm_buffer_object *bo) { spin_lock(&ttm_bo_glob.lru_lock); - ttm_bo_move_to_lru_tail(bo, NULL); + ttm_bo_move_to_lru_tail(bo, &bo->mem, NULL); spin_unlock(&ttm_bo_glob.lru_lock); } -- 2.25.1 _______________________________________________ dri-devel mailing list dri-devel@lists.freedesktop.org https://lists.freedesktop.org/mailman/listinfo/dri-devel ^ permalink raw reply related [flat|nested] 3+ messages in thread
* Re: [PATCH 2/2] drm/ttm: cleanup LRU handling further 2020-11-27 14:16 ` [PATCH 2/2] drm/ttm: cleanup LRU handling further Christian König @ 2020-12-14 19:29 ` Andrey Grodzovsky 0 siblings, 0 replies; 3+ messages in thread From: Andrey Grodzovsky @ 2020-12-14 19:29 UTC (permalink / raw) To: Christian König, dri-devel Series is Acked-by: Andrey Grodzovsky <andrey.grodzovsky@amd.com> Andrey On 11/27/20 9:16 AM, Christian König wrote: > We only completely delete the BO from the LRU on destruction. > > Signed-off-by: Christian König <christian.koenig@amd.com> > --- > drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 4 +- > drivers/gpu/drm/qxl/qxl_release.c | 2 +- > drivers/gpu/drm/ttm/ttm_bo.c | 60 +++++++++++--------------- > drivers/gpu/drm/ttm/ttm_execbuf_util.c | 4 +- > include/drm/ttm/ttm_bo_api.h | 2 + > include/drm/ttm/ttm_bo_driver.h | 5 ++- > 6 files changed, 36 insertions(+), 41 deletions(-) > > diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c > index 0768c8686983..ad91c0c3c423 100644 > --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c > +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c > @@ -653,9 +653,11 @@ void amdgpu_vm_move_to_lru_tail(struct amdgpu_device *adev, > if (!bo->parent) > continue; > > - ttm_bo_move_to_lru_tail(&bo->tbo, &vm->lru_bulk_move); > + ttm_bo_move_to_lru_tail(&bo->tbo, &bo->tbo.mem, > + &vm->lru_bulk_move); > if (bo->shadow) > ttm_bo_move_to_lru_tail(&bo->shadow->tbo, > + &bo->shadow->tbo.mem, > &vm->lru_bulk_move); > } > spin_unlock(&ttm_bo_glob.lru_lock); > diff --git a/drivers/gpu/drm/qxl/qxl_release.c b/drivers/gpu/drm/qxl/qxl_release.c > index e75e364655b8..0fcfc952d5e9 100644 > --- a/drivers/gpu/drm/qxl/qxl_release.c > +++ b/drivers/gpu/drm/qxl/qxl_release.c > @@ -456,7 +456,7 @@ void qxl_release_fence_buffer_objects(struct qxl_release *release) > bo = entry->bo; > > dma_resv_add_shared_fence(bo->base.resv, &release->base); > - ttm_bo_move_to_lru_tail(bo, NULL); > + ttm_bo_move_to_lru_tail(bo, &bo->mem, NULL); > dma_resv_unlock(bo->base.resv); > } > spin_unlock(&ttm_bo_glob.lru_lock); > diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c > index a0bddcc64504..a4435caba94c 100644 > --- a/drivers/gpu/drm/ttm/ttm_bo.c > +++ b/drivers/gpu/drm/ttm/ttm_bo.c > @@ -109,40 +109,14 @@ static struct kobj_type ttm_bo_glob_kobj_type = { > .default_attrs = ttm_bo_global_attrs > }; > > -static void ttm_bo_add_mem_to_lru(struct ttm_buffer_object *bo, > - struct ttm_resource *mem) > -{ > - struct ttm_bo_device *bdev = bo->bdev; > - struct ttm_resource_manager *man; > - > - if (!list_empty(&bo->lru) || bo->pin_count) > - return; > - > - man = ttm_manager_type(bdev, mem->mem_type); > - list_add_tail(&bo->lru, &man->lru[bo->priority]); > - > - if (man->use_tt && bo->ttm && > - !(bo->ttm->page_flags & (TTM_PAGE_FLAG_SG | > - TTM_PAGE_FLAG_SWAPPED))) { > - list_add_tail(&bo->swap, &ttm_bo_glob.swap_lru[bo->priority]); > - } > -} > - > static void ttm_bo_del_from_lru(struct ttm_buffer_object *bo) > { > struct ttm_bo_device *bdev = bo->bdev; > - bool notify = false; > > - if (!list_empty(&bo->swap)) { > - list_del_init(&bo->swap); > - notify = true; > - } > - if (!list_empty(&bo->lru)) { > - list_del_init(&bo->lru); > - notify = true; > - } > + list_del_init(&bo->swap); > + list_del_init(&bo->lru); > > - if (notify && bdev->driver->del_from_lru_notify) > + if (bdev->driver->del_from_lru_notify) > bdev->driver->del_from_lru_notify(bo); > } > > @@ -155,12 +129,30 @@ static void ttm_bo_bulk_move_set_pos(struct ttm_lru_bulk_move_pos *pos, > } > > void ttm_bo_move_to_lru_tail(struct ttm_buffer_object *bo, > + struct ttm_resource *mem, > struct ttm_lru_bulk_move *bulk) > { > + struct ttm_bo_device *bdev = bo->bdev; > + struct ttm_resource_manager *man; > + > dma_resv_assert_held(bo->base.resv); > > - ttm_bo_del_from_lru(bo); > - ttm_bo_add_mem_to_lru(bo, &bo->mem); > + if (bo->pin_count) > + return; > + > + man = ttm_manager_type(bdev, mem->mem_type); > + list_move_tail(&bo->lru, &man->lru[bo->priority]); > + if (man->use_tt && bo->ttm && > + !(bo->ttm->page_flags & (TTM_PAGE_FLAG_SG | > + TTM_PAGE_FLAG_SWAPPED))) { > + struct list_head *swap; > + > + swap = &ttm_bo_glob.swap_lru[bo->priority]; > + list_move_tail(&bo->swap, swap); > + } > + > + if (bdev->driver->del_from_lru_notify) > + bdev->driver->del_from_lru_notify(bo); > > if (bulk && !bo->pin_count) { > switch (bo->mem.mem_type) { > @@ -516,8 +508,7 @@ static void ttm_bo_release(struct kref *kref) > */ > if (bo->pin_count) { > bo->pin_count = 0; > - ttm_bo_del_from_lru(bo); > - ttm_bo_add_mem_to_lru(bo, &bo->mem); > + ttm_bo_move_to_lru_tail(bo, &bo->mem, NULL); > } > > kref_init(&bo->kref); > @@ -859,8 +850,7 @@ static int ttm_bo_mem_placement(struct ttm_buffer_object *bo, > mem->placement = place->flags; > > spin_lock(&ttm_bo_glob.lru_lock); > - ttm_bo_del_from_lru(bo); > - ttm_bo_add_mem_to_lru(bo, mem); > + ttm_bo_move_to_lru_tail(bo, mem, NULL); > spin_unlock(&ttm_bo_glob.lru_lock); > > return 0; > diff --git a/drivers/gpu/drm/ttm/ttm_execbuf_util.c b/drivers/gpu/drm/ttm/ttm_execbuf_util.c > index 8a8f1a6a83a6..9fa36ed59429 100644 > --- a/drivers/gpu/drm/ttm/ttm_execbuf_util.c > +++ b/drivers/gpu/drm/ttm/ttm_execbuf_util.c > @@ -55,7 +55,7 @@ void ttm_eu_backoff_reservation(struct ww_acquire_ctx *ticket, > list_for_each_entry(entry, list, head) { > struct ttm_buffer_object *bo = entry->bo; > > - ttm_bo_move_to_lru_tail(bo, NULL); > + ttm_bo_move_to_lru_tail(bo, &bo->mem, NULL); > dma_resv_unlock(bo->base.resv); > } > spin_unlock(&ttm_bo_glob.lru_lock); > @@ -162,7 +162,7 @@ void ttm_eu_fence_buffer_objects(struct ww_acquire_ctx *ticket, > dma_resv_add_shared_fence(bo->base.resv, fence); > else > dma_resv_add_excl_fence(bo->base.resv, fence); > - ttm_bo_move_to_lru_tail(bo, NULL); > + ttm_bo_move_to_lru_tail(bo, &bo->mem, NULL); > dma_resv_unlock(bo->base.resv); > } > spin_unlock(&ttm_bo_glob.lru_lock); > diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h > index 2564e66e67d7..2d7004fc47b3 100644 > --- a/include/drm/ttm/ttm_bo_api.h > +++ b/include/drm/ttm/ttm_bo_api.h > @@ -310,6 +310,7 @@ void ttm_bo_put(struct ttm_buffer_object *bo); > * ttm_bo_move_to_lru_tail > * > * @bo: The buffer object. > + * @mem: Resource object. > * @bulk: optional bulk move structure to remember BO positions > * > * Move this BO to the tail of all lru lists used to lookup and reserve an > @@ -317,6 +318,7 @@ void ttm_bo_put(struct ttm_buffer_object *bo); > * held, and is used to make a BO less likely to be considered for eviction. > */ > void ttm_bo_move_to_lru_tail(struct ttm_buffer_object *bo, > + struct ttm_resource *mem, > struct ttm_lru_bulk_move *bulk); > > /** > diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h > index f02f7cf9ae90..e362e272184c 100644 > --- a/include/drm/ttm/ttm_bo_driver.h > +++ b/include/drm/ttm/ttm_bo_driver.h > @@ -492,10 +492,11 @@ static inline int ttm_bo_reserve_slowpath(struct ttm_buffer_object *bo, > return 0; > } > > -static inline void ttm_bo_move_to_lru_tail_unlocked(struct ttm_buffer_object *bo) > +static inline void > +ttm_bo_move_to_lru_tail_unlocked(struct ttm_buffer_object *bo) > { > spin_lock(&ttm_bo_glob.lru_lock); > - ttm_bo_move_to_lru_tail(bo, NULL); > + ttm_bo_move_to_lru_tail(bo, &bo->mem, NULL); > spin_unlock(&ttm_bo_glob.lru_lock); > } > _______________________________________________ dri-devel mailing list dri-devel@lists.freedesktop.org https://lists.freedesktop.org/mailman/listinfo/dri-devel ^ permalink raw reply [flat|nested] 3+ messages in thread
end of thread, other threads:[~2020-12-14 19:29 UTC | newest] Thread overview: 3+ messages (download: mbox.gz / follow: Atom feed) -- links below jump to the message on this page -- 2020-11-27 14:16 [PATCH 1/2] drm/ttm: use pin_count more extensively Christian König 2020-11-27 14:16 ` [PATCH 2/2] drm/ttm: cleanup LRU handling further Christian König 2020-12-14 19:29 ` Andrey Grodzovsky
This is a public inbox, see mirroring instructions for how to clone and mirror all data and code used for this inbox; as well as URLs for NNTP newsgroup(s).