All of lore.kernel.org
 help / color / mirror / Atom feed
From: "Christian König" <christian.koenig@amd.com>
To: Nirmoy Das <nirmoy.das@amd.com>, amd-gfx@lists.freedesktop.org
Cc: andrey.grodzovsky@amd.com, lijo.lazar@amd.com
Subject: Re: [PATCH v2 2/3] drm/amdgpu: do not pass ttm_resource_manager to vram_mgr
Date: Fri, 22 Oct 2021 11:46:10 +0200	[thread overview]
Message-ID: <7f0b1097-68d2-65ba-fa5c-568cbccd8790@amd.com> (raw)
In-Reply-To: <20211022093231.7787-2-nirmoy.das@amd.com>

Am 22.10.21 um 11:32 schrieb Nirmoy Das:
> Do not allow exported amdgpu_vram_mgr_*() to accept
> any ttm_resource_manager pointer. Also there is no need
> to force other module to call a ttm function just to
> eventually call vram_mgr functions.
>
> v2: pass adev's vram_mgr instead of adev
>
> Signed-off-by: Nirmoy Das <nirmoy.das@amd.com>

Reviewed-by: Christian König <christian.koenig@amd.com>

> ---
>   drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c   |  3 +-
>   drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c       |  5 +--
>   drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c      | 10 ++---
>   drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c      |  6 +--
>   drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h      |  8 ++--
>   drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c     |  5 +--
>   drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c | 40 ++++++++------------
>   7 files changed, 31 insertions(+), 46 deletions(-)
>
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
> index 7077f21f0021..df818e145d9a 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
> @@ -531,9 +531,8 @@ int amdgpu_amdkfd_get_dmabuf_info(struct kgd_dev *kgd, int dma_buf_fd,
>   uint64_t amdgpu_amdkfd_get_vram_usage(struct kgd_dev *kgd)
>   {
>   	struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
> -	struct ttm_resource_manager *vram_man = ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM);
>
> -	return amdgpu_vram_mgr_usage(vram_man);
> +	return amdgpu_vram_mgr_usage(&adev->mman.vram_mgr);
>   }
>
>   uint64_t amdgpu_amdkfd_get_hive_id(struct kgd_dev *kgd)
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
> index 76fe5b71e35d..7e745164a624 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
> @@ -298,7 +298,6 @@ static void amdgpu_cs_get_threshold_for_moves(struct amdgpu_device *adev,
>   {
>   	s64 time_us, increment_us;
>   	u64 free_vram, total_vram, used_vram;
> -	struct ttm_resource_manager *vram_man = ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM);
>   	/* Allow a maximum of 200 accumulated ms. This is basically per-IB
>   	 * throttling.
>   	 *
> @@ -315,7 +314,7 @@ static void amdgpu_cs_get_threshold_for_moves(struct amdgpu_device *adev,
>   	}
>
>   	total_vram = adev->gmc.real_vram_size - atomic64_read(&adev->vram_pin_size);
> -	used_vram = amdgpu_vram_mgr_usage(vram_man);
> +	used_vram = amdgpu_vram_mgr_usage(&adev->mman.vram_mgr);
>   	free_vram = used_vram >= total_vram ? 0 : total_vram - used_vram;
>
>   	spin_lock(&adev->mm_stats.lock);
> @@ -362,7 +361,7 @@ static void amdgpu_cs_get_threshold_for_moves(struct amdgpu_device *adev,
>   	if (!amdgpu_gmc_vram_full_visible(&adev->gmc)) {
>   		u64 total_vis_vram = adev->gmc.visible_vram_size;
>   		u64 used_vis_vram =
> -		  amdgpu_vram_mgr_vis_usage(vram_man);
> +		  amdgpu_vram_mgr_vis_usage(&adev->mman.vram_mgr);
>
>   		if (used_vis_vram < total_vis_vram) {
>   			u64 free_vis_vram = total_vis_vram - used_vis_vram;
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
> index 603ce32db5c5..b426e03ad630 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
> @@ -672,10 +672,10 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
>   		ui64 = atomic64_read(&adev->num_vram_cpu_page_faults);
>   		return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0;
>   	case AMDGPU_INFO_VRAM_USAGE:
> -		ui64 = amdgpu_vram_mgr_usage(ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM));
> +		ui64 = amdgpu_vram_mgr_usage(&adev->mman.vram_mgr);
>   		return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0;
>   	case AMDGPU_INFO_VIS_VRAM_USAGE:
> -		ui64 = amdgpu_vram_mgr_vis_usage(ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM));
> +		ui64 = amdgpu_vram_mgr_vis_usage(&adev->mman.vram_mgr);
>   		return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0;
>   	case AMDGPU_INFO_GTT_USAGE:
>   		ui64 = amdgpu_gtt_mgr_usage(&adev->mman.gtt_mgr);
> @@ -709,8 +709,6 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
>   	}
>   	case AMDGPU_INFO_MEMORY: {
>   		struct drm_amdgpu_memory_info mem;
> -		struct ttm_resource_manager *vram_man =
> -			ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM);
>   		struct ttm_resource_manager *gtt_man =
>   			ttm_manager_type(&adev->mman.bdev, TTM_PL_TT);
>   		memset(&mem, 0, sizeof(mem));
> @@ -719,7 +717,7 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
>   			atomic64_read(&adev->vram_pin_size) -
>   			AMDGPU_VM_RESERVED_VRAM;
>   		mem.vram.heap_usage =
> -			amdgpu_vram_mgr_usage(vram_man);
> +			amdgpu_vram_mgr_usage(&adev->mman.vram_mgr);
>   		mem.vram.max_allocation = mem.vram.usable_heap_size * 3 / 4;
>
>   		mem.cpu_accessible_vram.total_heap_size =
> @@ -729,7 +727,7 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
>   			    atomic64_read(&adev->visible_pin_size),
>   			    mem.vram.usable_heap_size);
>   		mem.cpu_accessible_vram.heap_usage =
> -			amdgpu_vram_mgr_vis_usage(vram_man);
> +			amdgpu_vram_mgr_vis_usage(&adev->mman.vram_mgr);
>   		mem.cpu_accessible_vram.max_allocation =
>   			mem.cpu_accessible_vram.usable_heap_size * 3 / 4;
>
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
> index 08133de21fdd..4114e0d35e82 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
> @@ -1804,8 +1804,7 @@ static int amdgpu_ras_badpages_read(struct amdgpu_device *adev,
>   			.size = AMDGPU_GPU_PAGE_SIZE,
>   			.flags = AMDGPU_RAS_RETIRE_PAGE_RESERVED,
>   		};
> -		status = amdgpu_vram_mgr_query_page_status(
> -				ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM),
> +		status = amdgpu_vram_mgr_query_page_status(&adev->mman.vram_mgr,
>   				data->bps[i].retired_page);
>   		if (status == -EBUSY)
>   			(*bps)[i].flags = AMDGPU_RAS_RETIRE_PAGE_PENDING;
> @@ -1906,8 +1905,7 @@ int amdgpu_ras_add_bad_pages(struct amdgpu_device *adev,
>   			goto out;
>   		}
>
> -		amdgpu_vram_mgr_reserve_range(
> -			ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM),
> +		amdgpu_vram_mgr_reserve_range(&adev->mman.vram_mgr,
>   			bps[i].retired_page << AMDGPU_GPU_PAGE_SHIFT,
>   			AMDGPU_GPU_PAGE_SIZE);
>
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
> index af1c4e414979..ee8ab7846905 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
> @@ -129,11 +129,11 @@ int amdgpu_vram_mgr_alloc_sgt(struct amdgpu_device *adev,
>   void amdgpu_vram_mgr_free_sgt(struct device *dev,
>   			      enum dma_data_direction dir,
>   			      struct sg_table *sgt);
> -uint64_t amdgpu_vram_mgr_usage(struct ttm_resource_manager *man);
> -uint64_t amdgpu_vram_mgr_vis_usage(struct ttm_resource_manager *man);
> -int amdgpu_vram_mgr_reserve_range(struct ttm_resource_manager *man,
> +uint64_t amdgpu_vram_mgr_usage(struct amdgpu_vram_mgr *mgr);
> +uint64_t amdgpu_vram_mgr_vis_usage(struct amdgpu_vram_mgr *mgr);
> +int amdgpu_vram_mgr_reserve_range(struct amdgpu_vram_mgr *mgr,
>   				  uint64_t start, uint64_t size);
> -int amdgpu_vram_mgr_query_page_status(struct ttm_resource_manager *man,
> +int amdgpu_vram_mgr_query_page_status(struct amdgpu_vram_mgr *mgr,
>   				      uint64_t start);
>
>   int amdgpu_ttm_init(struct amdgpu_device *adev);
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
> index 99c149397aae..a3c7a19047e8 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
> @@ -548,7 +548,6 @@ static void amdgpu_virt_populate_vf2pf_ucode_info(struct amdgpu_device *adev)
>   static int amdgpu_virt_write_vf2pf_data(struct amdgpu_device *adev)
>   {
>   	struct amd_sriov_msg_vf2pf_info *vf2pf_info;
> -	struct ttm_resource_manager *vram_man = ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM);
>
>   	vf2pf_info = (struct amd_sriov_msg_vf2pf_info *) adev->virt.fw_reserve.p_vf2pf;
>
> @@ -571,8 +570,8 @@ static int amdgpu_virt_write_vf2pf_data(struct amdgpu_device *adev)
>   	vf2pf_info->driver_cert = 0;
>   	vf2pf_info->os_info.all = 0;
>
> -	vf2pf_info->fb_usage = amdgpu_vram_mgr_usage(vram_man) >> 20;
> -	vf2pf_info->fb_vis_usage = amdgpu_vram_mgr_vis_usage(vram_man) >> 20;
> +	vf2pf_info->fb_usage = amdgpu_vram_mgr_usage(&adev->mman.vram_mgr) >> 20;
> +	vf2pf_info->fb_vis_usage = amdgpu_vram_mgr_vis_usage(&adev->mman.vram_mgr) >> 20;
>   	vf2pf_info->fb_size = adev->gmc.real_vram_size >> 20;
>   	vf2pf_info->fb_vis_size = adev->gmc.visible_vram_size >> 20;
>
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
> index 7b2b0980ec41..7a2b487db57c 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
> @@ -96,10 +96,9 @@ static ssize_t amdgpu_mem_info_vram_used_show(struct device *dev,
>   {
>   	struct drm_device *ddev = dev_get_drvdata(dev);
>   	struct amdgpu_device *adev = drm_to_adev(ddev);
> -	struct ttm_resource_manager *man;
>
> -	man = ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM);
> -	return sysfs_emit(buf, "%llu\n", amdgpu_vram_mgr_usage(man));
> +	return sysfs_emit(buf, "%llu\n",
> +			  amdgpu_vram_mgr_usage(&adev->mman.vram_mgr));
>   }
>
>   /**
> @@ -116,10 +115,9 @@ static ssize_t amdgpu_mem_info_vis_vram_used_show(struct device *dev,
>   {
>   	struct drm_device *ddev = dev_get_drvdata(dev);
>   	struct amdgpu_device *adev = drm_to_adev(ddev);
> -	struct ttm_resource_manager *man;
>
> -	man = ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM);
> -	return sysfs_emit(buf, "%llu\n", amdgpu_vram_mgr_vis_usage(man));
> +	return sysfs_emit(buf, "%llu\n",
> +			  amdgpu_vram_mgr_vis_usage(&adev->mman.vram_mgr));
>   }
>
>   /**
> @@ -263,16 +261,15 @@ static void amdgpu_vram_mgr_do_reserve(struct ttm_resource_manager *man)
>   /**
>    * amdgpu_vram_mgr_reserve_range - Reserve a range from VRAM
>    *
> - * @man: TTM memory type manager
> + * @mgr: amdgpu_vram_mgr pointer
>    * @start: start address of the range in VRAM
>    * @size: size of the range
>    *
> - * Reserve memory from start addess with the specified size in VRAM
> + * Reserve memory from start address with the specified size in VRAM
>    */
> -int amdgpu_vram_mgr_reserve_range(struct ttm_resource_manager *man,
> +int amdgpu_vram_mgr_reserve_range(struct amdgpu_vram_mgr *mgr,
>   				  uint64_t start, uint64_t size)
>   {
> -	struct amdgpu_vram_mgr *mgr = to_vram_mgr(man);
>   	struct amdgpu_vram_reservation *rsv;
>
>   	rsv = kzalloc(sizeof(*rsv), GFP_KERNEL);
> @@ -285,7 +282,7 @@ int amdgpu_vram_mgr_reserve_range(struct ttm_resource_manager *man,
>
>   	spin_lock(&mgr->lock);
>   	list_add_tail(&mgr->reservations_pending, &rsv->node);
> -	amdgpu_vram_mgr_do_reserve(man);
> +	amdgpu_vram_mgr_do_reserve(&mgr->manager);
>   	spin_unlock(&mgr->lock);
>
>   	return 0;
> @@ -294,7 +291,7 @@ int amdgpu_vram_mgr_reserve_range(struct ttm_resource_manager *man,
>   /**
>    * amdgpu_vram_mgr_query_page_status - query the reservation status
>    *
> - * @man: TTM memory type manager
> + * @mgr: amdgpu_vram_mgr pointer
>    * @start: start address of a page in VRAM
>    *
>    * Returns:
> @@ -302,10 +299,9 @@ int amdgpu_vram_mgr_reserve_range(struct ttm_resource_manager *man,
>    *	0: the page has been reserved
>    *	-ENOENT: the input page is not a reservation
>    */
> -int amdgpu_vram_mgr_query_page_status(struct ttm_resource_manager *man,
> +int amdgpu_vram_mgr_query_page_status(struct amdgpu_vram_mgr *mgr,
>   				      uint64_t start)
>   {
> -	struct amdgpu_vram_mgr *mgr = to_vram_mgr(man);
>   	struct amdgpu_vram_reservation *rsv;
>   	int ret;
>
> @@ -632,28 +628,24 @@ void amdgpu_vram_mgr_free_sgt(struct device *dev,
>   /**
>    * amdgpu_vram_mgr_usage - how many bytes are used in this domain
>    *
> - * @man: TTM memory type manager
> + * @mgr: amdgpu_vram_mgr pointer
>    *
>    * Returns how many bytes are used in this domain.
>    */
> -uint64_t amdgpu_vram_mgr_usage(struct ttm_resource_manager *man)
> +uint64_t amdgpu_vram_mgr_usage(struct amdgpu_vram_mgr *mgr)
>   {
> -	struct amdgpu_vram_mgr *mgr = to_vram_mgr(man);
> -
>   	return atomic64_read(&mgr->usage);
>   }
>
>   /**
>    * amdgpu_vram_mgr_vis_usage - how many bytes are used in the visible part
>    *
> - * @man: TTM memory type manager
> + * @mgr: amdgpu_vram_mgr pointer
>    *
>    * Returns how many bytes are used in the visible part of VRAM
>    */
> -uint64_t amdgpu_vram_mgr_vis_usage(struct ttm_resource_manager *man)
> +uint64_t amdgpu_vram_mgr_vis_usage(struct amdgpu_vram_mgr *mgr)
>   {
> -	struct amdgpu_vram_mgr *mgr = to_vram_mgr(man);
> -
>   	return atomic64_read(&mgr->vis_usage);
>   }
>
> @@ -675,8 +667,8 @@ static void amdgpu_vram_mgr_debug(struct ttm_resource_manager *man,
>   	spin_unlock(&mgr->lock);
>
>   	drm_printf(printer, "man size:%llu pages, ram usage:%lluMB, vis usage:%lluMB\n",
> -		   man->size, amdgpu_vram_mgr_usage(man) >> 20,
> -		   amdgpu_vram_mgr_vis_usage(man) >> 20);
> +		   man->size, amdgpu_vram_mgr_usage(mgr) >> 20,
> +		   amdgpu_vram_mgr_vis_usage(mgr) >> 20);
>   }
>
>   static const struct ttm_resource_manager_func amdgpu_vram_mgr_func = {
> --
> 2.32.0
>


  reply	other threads:[~2021-10-22  9:46 UTC|newest]

Thread overview: 8+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-10-22  9:32 [PATCH v3 1/3] drm/amdgpu: do not pass ttm_resource_manager to gtt_mgr Nirmoy Das
2021-10-22  9:32 ` [PATCH v2 2/3] drm/amdgpu: do not pass ttm_resource_manager to vram_mgr Nirmoy Das
2021-10-22  9:46   ` Christian König [this message]
2021-10-22  9:32 ` [PATCH v3 3/3] drm/amdgpu: recover gart table at resume Nirmoy Das
2021-10-22  9:50   ` Christian König
2021-10-22  9:44 ` [PATCH v3 1/3] drm/amdgpu: do not pass ttm_resource_manager to gtt_mgr Christian König
2021-10-22  9:47   ` Nirmoy
  -- strict thread matches above, loose matches on Subject: below --
2021-10-21 14:31 [PATCH v2 " Nirmoy Das
2021-10-21 14:31 ` [PATCH v2 2/3] drm/amdgpu: do not pass ttm_resource_manager to vram_mgr Nirmoy Das

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=7f0b1097-68d2-65ba-fa5c-568cbccd8790@amd.com \
    --to=christian.koenig@amd.com \
    --cc=amd-gfx@lists.freedesktop.org \
    --cc=andrey.grodzovsky@amd.com \
    --cc=lijo.lazar@amd.com \
    --cc=nirmoy.das@amd.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.