All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH libdrm] amdgpu: add a faster BO list API
@ 2019-01-07 19:31 Marek Olšák
       [not found] ` <20190107193104.4361-1-maraeo-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org>
  0 siblings, 1 reply; 23+ messages in thread
From: Marek Olšák @ 2019-01-07 19:31 UTC (permalink / raw)
  To: amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW

From: Marek Olšák <marek.olsak@amd.com>

---
 amdgpu/amdgpu-symbol-check |  3 ++
 amdgpu/amdgpu.h            | 56 +++++++++++++++++++++++++++++++++++++-
 amdgpu/amdgpu_bo.c         | 36 ++++++++++++++++++++++++
 amdgpu/amdgpu_cs.c         | 25 +++++++++++++++++
 4 files changed, 119 insertions(+), 1 deletion(-)

diff --git a/amdgpu/amdgpu-symbol-check b/amdgpu/amdgpu-symbol-check
index 6f5e0f95..96a44b40 100755
--- a/amdgpu/amdgpu-symbol-check
+++ b/amdgpu/amdgpu-symbol-check
@@ -12,20 +12,22 @@ _edata
 _end
 _fini
 _init
 amdgpu_bo_alloc
 amdgpu_bo_cpu_map
 amdgpu_bo_cpu_unmap
 amdgpu_bo_export
 amdgpu_bo_free
 amdgpu_bo_import
 amdgpu_bo_inc_ref
+amdgpu_bo_list_create_raw
+amdgpu_bo_list_destroy_raw
 amdgpu_bo_list_create
 amdgpu_bo_list_destroy
 amdgpu_bo_list_update
 amdgpu_bo_query_info
 amdgpu_bo_set_metadata
 amdgpu_bo_va_op
 amdgpu_bo_va_op_raw
 amdgpu_bo_wait_for_idle
 amdgpu_create_bo_from_user_mem
 amdgpu_cs_chunk_fence_info_to_data
@@ -40,20 +42,21 @@ amdgpu_cs_destroy_semaphore
 amdgpu_cs_destroy_syncobj
 amdgpu_cs_export_syncobj
 amdgpu_cs_fence_to_handle
 amdgpu_cs_import_syncobj
 amdgpu_cs_query_fence_status
 amdgpu_cs_query_reset_state
 amdgpu_query_sw_info
 amdgpu_cs_signal_semaphore
 amdgpu_cs_submit
 amdgpu_cs_submit_raw
+amdgpu_cs_submit_raw2
 amdgpu_cs_syncobj_export_sync_file
 amdgpu_cs_syncobj_import_sync_file
 amdgpu_cs_syncobj_reset
 amdgpu_cs_syncobj_signal
 amdgpu_cs_syncobj_wait
 amdgpu_cs_wait_fences
 amdgpu_cs_wait_semaphore
 amdgpu_device_deinitialize
 amdgpu_device_initialize
 amdgpu_find_bo_by_cpu_mapping
diff --git a/amdgpu/amdgpu.h b/amdgpu/amdgpu.h
index dc51659a..5b800033 100644
--- a/amdgpu/amdgpu.h
+++ b/amdgpu/amdgpu.h
@@ -35,20 +35,21 @@
 #define _AMDGPU_H_
 
 #include <stdint.h>
 #include <stdbool.h>
 
 #ifdef __cplusplus
 extern "C" {
 #endif
 
 struct drm_amdgpu_info_hw_ip;
+struct drm_amdgpu_bo_list_entry;
 
 /*--------------------------------------------------------------------------*/
 /* --------------------------- Defines ------------------------------------ */
 /*--------------------------------------------------------------------------*/
 
 /**
  * Define max. number of Command Buffers (IB) which could be sent to the single
  * hardware IP to accommodate CE/DE requirements
  *
  * \sa amdgpu_cs_ib_info
@@ -767,34 +768,65 @@ int amdgpu_bo_cpu_unmap(amdgpu_bo_handle buf_handle);
  *                            and no GPU access is scheduled.
  *                          1 GPU access is in fly or scheduled
  *
  * \return   0 - on success
  *          <0 - Negative POSIX Error code
  */
 int amdgpu_bo_wait_for_idle(amdgpu_bo_handle buf_handle,
 			    uint64_t timeout_ns,
 			    bool *buffer_busy);
 
+/**
+ * Creates a BO list handle for command submission.
+ *
+ * \param   dev			- \c [in] Device handle.
+ *				   See #amdgpu_device_initialize()
+ * \param   number_of_buffers	- \c [in] Number of BOs in the list
+ * \param   buffers		- \c [in] List of BO handles
+ * \param   result		- \c [out] Created BO list handle
+ *
+ * \return   0 on success\n
+ *          <0 - Negative POSIX Error code
+ *
+ * \sa amdgpu_bo_list_destroy_raw()
+*/
+int amdgpu_bo_list_create_raw(amdgpu_device_handle dev,
+			      uint32_t number_of_buffers,
+			      struct drm_amdgpu_bo_list_entry *buffers,
+			      uint32_t *result);
+
+/**
+ * Destroys a BO list handle.
+ *
+ * \param   bo_list	- \c [in] BO list handle.
+ *
+ * \return   0 on success\n
+ *          <0 - Negative POSIX Error code
+ *
+ * \sa amdgpu_bo_list_create_raw(), amdgpu_cs_submit_raw2()
+*/
+int amdgpu_bo_list_destroy_raw(amdgpu_device_handle dev, uint32_t bo_list);
+
 /**
  * Creates a BO list handle for command submission.
  *
  * \param   dev			- \c [in] Device handle.
  *				   See #amdgpu_device_initialize()
  * \param   number_of_resources	- \c [in] Number of BOs in the list
  * \param   resources		- \c [in] List of BO handles
  * \param   resource_prios	- \c [in] Optional priority for each handle
  * \param   result		- \c [out] Created BO list handle
  *
  * \return   0 on success\n
  *          <0 - Negative POSIX Error code
  *
- * \sa amdgpu_bo_list_destroy()
+ * \sa amdgpu_bo_list_destroy(), amdgpu_cs_submit_raw2()
 */
 int amdgpu_bo_list_create(amdgpu_device_handle dev,
 			  uint32_t number_of_resources,
 			  amdgpu_bo_handle *resources,
 			  uint8_t *resource_prios,
 			  amdgpu_bo_list_handle *result);
 
 /**
  * Destroys a BO list handle.
  *
@@ -1580,20 +1612,42 @@ struct drm_amdgpu_cs_chunk;
 struct drm_amdgpu_cs_chunk_dep;
 struct drm_amdgpu_cs_chunk_data;
 
 int amdgpu_cs_submit_raw(amdgpu_device_handle dev,
 			 amdgpu_context_handle context,
 			 amdgpu_bo_list_handle bo_list_handle,
 			 int num_chunks,
 			 struct drm_amdgpu_cs_chunk *chunks,
 			 uint64_t *seq_no);
 
+/**
+ * Submit raw command submission to the kernel with a raw BO list handle.
+ *
+ * \param   dev	       - \c [in] device handle
+ * \param   context    - \c [in] context handle for context id
+ * \param   bo_list_handle - \c [in] raw bo list handle (0 for none)
+ * \param   num_chunks - \c [in] number of CS chunks to submit
+ * \param   chunks     - \c [in] array of CS chunks
+ * \param   seq_no     - \c [out] output sequence number for submission.
+ *
+ * \return   0 on success\n
+ *          <0 - Negative POSIX Error code
+ *
+ * \sa amdgpu_bo_list_create_raw(), amdgpu_bo_list_destroy_raw()
+ */
+int amdgpu_cs_submit_raw2(amdgpu_device_handle dev,
+			  amdgpu_context_handle context,
+			  uint32_t bo_list_handle,
+			  int num_chunks,
+			  struct drm_amdgpu_cs_chunk *chunks,
+			  uint64_t *seq_no);
+
 void amdgpu_cs_chunk_fence_to_dep(struct amdgpu_cs_fence *fence,
 				  struct drm_amdgpu_cs_chunk_dep *dep);
 void amdgpu_cs_chunk_fence_info_to_data(struct amdgpu_cs_fence_info *fence_info,
 					struct drm_amdgpu_cs_chunk_data *data);
 
 /**
  * Reserve VMID
  * \param   context - \c [in]  GPU Context
  * \param   flags - \c [in]  TBD
  *
diff --git a/amdgpu/amdgpu_bo.c b/amdgpu/amdgpu_bo.c
index c0f42e81..21bc73aa 100644
--- a/amdgpu/amdgpu_bo.c
+++ b/amdgpu/amdgpu_bo.c
@@ -611,20 +611,56 @@ drm_public int amdgpu_create_bo_from_user_mem(amdgpu_device_handle dev,
 	pthread_mutex_lock(&dev->bo_table_mutex);
 	r = handle_table_insert(&dev->bo_handles, (*buf_handle)->handle,
 				*buf_handle);
 	pthread_mutex_unlock(&dev->bo_table_mutex);
 	if (r)
 		amdgpu_bo_free(*buf_handle);
 out:
 	return r;
 }
 
+drm_public int amdgpu_bo_list_create_raw(amdgpu_device_handle dev,
+					 uint32_t number_of_buffers,
+					 struct drm_amdgpu_bo_list_entry *buffers,
+					 uint32_t *result)
+{
+	union drm_amdgpu_bo_list args;
+	int r;
+
+	memset(&args, 0, sizeof(args));
+	args.in.operation = AMDGPU_BO_LIST_OP_CREATE;
+	args.in.bo_number = number_of_buffers;
+	args.in.bo_info_size = sizeof(struct drm_amdgpu_bo_list_entry);
+	args.in.bo_info_ptr = (uint64_t)(uintptr_t)buffers;
+
+	r = drmCommandWriteRead(dev->fd, DRM_AMDGPU_BO_LIST,
+				&args, sizeof(args));
+	if (r)
+		return r;
+
+	*result = args.out.list_handle;
+	return 0;
+}
+
+drm_public int amdgpu_bo_list_destroy_raw(amdgpu_device_handle dev,
+					  uint32_t bo_list)
+{
+	union drm_amdgpu_bo_list args;
+
+	memset(&args, 0, sizeof(args));
+	args.in.operation = AMDGPU_BO_LIST_OP_DESTROY;
+	args.in.list_handle = bo_list;
+
+	return drmCommandWriteRead(dev->fd, DRM_AMDGPU_BO_LIST,
+				   &args, sizeof(args));
+}
+
 drm_public int amdgpu_bo_list_create(amdgpu_device_handle dev,
 				     uint32_t number_of_resources,
 				     amdgpu_bo_handle *resources,
 				     uint8_t *resource_prios,
 				     amdgpu_bo_list_handle *result)
 {
 	struct drm_amdgpu_bo_list_entry *list;
 	union drm_amdgpu_bo_list args;
 	unsigned i;
 	int r;
diff --git a/amdgpu/amdgpu_cs.c b/amdgpu/amdgpu_cs.c
index 3b8231aa..5bedf748 100644
--- a/amdgpu/amdgpu_cs.c
+++ b/amdgpu/amdgpu_cs.c
@@ -724,20 +724,45 @@ drm_public int amdgpu_cs_submit_raw(amdgpu_device_handle dev,
 	r = drmCommandWriteRead(dev->fd, DRM_AMDGPU_CS,
 				&cs, sizeof(cs));
 	if (r)
 		return r;
 
 	if (seq_no)
 		*seq_no = cs.out.handle;
 	return 0;
 }
 
+drm_public int amdgpu_cs_submit_raw2(amdgpu_device_handle dev,
+				     amdgpu_context_handle context,
+				     uint32_t bo_list_handle,
+				     int num_chunks,
+				     struct drm_amdgpu_cs_chunk *chunks,
+				     uint64_t *seq_no)
+{
+	union drm_amdgpu_cs cs = {0};
+	uint64_t *chunk_array;
+	int i, r;
+
+	chunk_array = alloca(sizeof(uint64_t) * num_chunks);
+	for (i = 0; i < num_chunks; i++)
+		chunk_array[i] = (uint64_t)(uintptr_t)&chunks[i];
+	cs.in.chunks = (uint64_t)(uintptr_t)chunk_array;
+	cs.in.ctx_id = context->id;
+	cs.in.bo_list_handle = bo_list_handle;
+	cs.in.num_chunks = num_chunks;
+	r = drmCommandWriteRead(dev->fd, DRM_AMDGPU_CS,
+				&cs, sizeof(cs));
+	if (!r && seq_no)
+		*seq_no = cs.out.handle;
+	return r;
+}
+
 drm_public void amdgpu_cs_chunk_fence_info_to_data(struct amdgpu_cs_fence_info *fence_info,
 					struct drm_amdgpu_cs_chunk_data *data)
 {
 	data->fence_data.handle = fence_info->handle->handle;
 	data->fence_data.offset = fence_info->offset * sizeof(uint64_t);
 }
 
 drm_public void amdgpu_cs_chunk_fence_to_dep(struct amdgpu_cs_fence *fence,
 					struct drm_amdgpu_cs_chunk_dep *dep)
 {
-- 
2.17.1

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply related	[flat|nested] 23+ messages in thread

* RE: [PATCH libdrm] amdgpu: add a faster BO list API
       [not found] ` <20190107193104.4361-1-maraeo-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org>
@ 2019-01-08  3:16   ` Zhou, David(ChunMing)
  2019-01-09 10:28   ` Christian König
  2019-01-16 12:46   ` Bas Nieuwenhuizen
  2 siblings, 0 replies; 23+ messages in thread
From: Zhou, David(ChunMing) @ 2019-01-08  3:16 UTC (permalink / raw)
  To: Marek Olšák, amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW

Looks good to me, Reviewed-by: Chunming Zhou <david1.zhou@amd.com>

> -----Original Message-----
> From: amd-gfx <amd-gfx-bounces@lists.freedesktop.org> On Behalf Of
> Marek Ol?ák
> Sent: Tuesday, January 08, 2019 3:31 AM
> To: amd-gfx@lists.freedesktop.org
> Subject: [PATCH libdrm] amdgpu: add a faster BO list API
> 
> From: Marek Olšák <marek.olsak@amd.com>
> 
> ---
>  amdgpu/amdgpu-symbol-check |  3 ++
>  amdgpu/amdgpu.h            | 56
> +++++++++++++++++++++++++++++++++++++-
>  amdgpu/amdgpu_bo.c         | 36 ++++++++++++++++++++++++
>  amdgpu/amdgpu_cs.c         | 25 +++++++++++++++++
>  4 files changed, 119 insertions(+), 1 deletion(-)
> 
> diff --git a/amdgpu/amdgpu-symbol-check b/amdgpu/amdgpu-symbol-
> check index 6f5e0f95..96a44b40 100755
> --- a/amdgpu/amdgpu-symbol-check
> +++ b/amdgpu/amdgpu-symbol-check
> @@ -12,20 +12,22 @@ _edata
>  _end
>  _fini
>  _init
>  amdgpu_bo_alloc
>  amdgpu_bo_cpu_map
>  amdgpu_bo_cpu_unmap
>  amdgpu_bo_export
>  amdgpu_bo_free
>  amdgpu_bo_import
>  amdgpu_bo_inc_ref
> +amdgpu_bo_list_create_raw
> +amdgpu_bo_list_destroy_raw
>  amdgpu_bo_list_create
>  amdgpu_bo_list_destroy
>  amdgpu_bo_list_update
>  amdgpu_bo_query_info
>  amdgpu_bo_set_metadata
>  amdgpu_bo_va_op
>  amdgpu_bo_va_op_raw
>  amdgpu_bo_wait_for_idle
>  amdgpu_create_bo_from_user_mem
>  amdgpu_cs_chunk_fence_info_to_data
> @@ -40,20 +42,21 @@ amdgpu_cs_destroy_semaphore
> amdgpu_cs_destroy_syncobj  amdgpu_cs_export_syncobj
> amdgpu_cs_fence_to_handle  amdgpu_cs_import_syncobj
> amdgpu_cs_query_fence_status  amdgpu_cs_query_reset_state
> amdgpu_query_sw_info  amdgpu_cs_signal_semaphore
> amdgpu_cs_submit  amdgpu_cs_submit_raw
> +amdgpu_cs_submit_raw2
>  amdgpu_cs_syncobj_export_sync_file
>  amdgpu_cs_syncobj_import_sync_file
>  amdgpu_cs_syncobj_reset
>  amdgpu_cs_syncobj_signal
>  amdgpu_cs_syncobj_wait
>  amdgpu_cs_wait_fences
>  amdgpu_cs_wait_semaphore
>  amdgpu_device_deinitialize
>  amdgpu_device_initialize
>  amdgpu_find_bo_by_cpu_mapping
> diff --git a/amdgpu/amdgpu.h b/amdgpu/amdgpu.h index
> dc51659a..5b800033 100644
> --- a/amdgpu/amdgpu.h
> +++ b/amdgpu/amdgpu.h
> @@ -35,20 +35,21 @@
>  #define _AMDGPU_H_
> 
>  #include <stdint.h>
>  #include <stdbool.h>
> 
>  #ifdef __cplusplus
>  extern "C" {
>  #endif
> 
>  struct drm_amdgpu_info_hw_ip;
> +struct drm_amdgpu_bo_list_entry;
> 
>  /*--------------------------------------------------------------------------*/
>  /* --------------------------- Defines ------------------------------------ */  /*---------
> -----------------------------------------------------------------*/
> 
>  /**
>   * Define max. number of Command Buffers (IB) which could be sent to the
> single
>   * hardware IP to accommodate CE/DE requirements
>   *
>   * \sa amdgpu_cs_ib_info
> @@ -767,34 +768,65 @@ int amdgpu_bo_cpu_unmap(amdgpu_bo_handle
> buf_handle);
>   *                            and no GPU access is scheduled.
>   *                          1 GPU access is in fly or scheduled
>   *
>   * \return   0 - on success
>   *          <0 - Negative POSIX Error code
>   */
>  int amdgpu_bo_wait_for_idle(amdgpu_bo_handle buf_handle,
>  			    uint64_t timeout_ns,
>  			    bool *buffer_busy);
> 
> +/**
> + * Creates a BO list handle for command submission.
> + *
> + * \param   dev			- \c [in] Device handle.
> + *				   See #amdgpu_device_initialize()
> + * \param   number_of_buffers	- \c [in] Number of BOs in the list
> + * \param   buffers		- \c [in] List of BO handles
> + * \param   result		- \c [out] Created BO list handle
> + *
> + * \return   0 on success\n
> + *          <0 - Negative POSIX Error code
> + *
> + * \sa amdgpu_bo_list_destroy_raw()
> +*/
> +int amdgpu_bo_list_create_raw(amdgpu_device_handle dev,
> +			      uint32_t number_of_buffers,
> +			      struct drm_amdgpu_bo_list_entry *buffers,
> +			      uint32_t *result);
> +
> +/**
> + * Destroys a BO list handle.
> + *
> + * \param   bo_list	- \c [in] BO list handle.
> + *
> + * \return   0 on success\n
> + *          <0 - Negative POSIX Error code
> + *
> + * \sa amdgpu_bo_list_create_raw(), amdgpu_cs_submit_raw2() */ int
> +amdgpu_bo_list_destroy_raw(amdgpu_device_handle dev, uint32_t
> bo_list);
> +
>  /**
>   * Creates a BO list handle for command submission.
>   *
>   * \param   dev			- \c [in] Device handle.
>   *				   See #amdgpu_device_initialize()
>   * \param   number_of_resources	- \c [in] Number of BOs in the list
>   * \param   resources		- \c [in] List of BO handles
>   * \param   resource_prios	- \c [in] Optional priority for each handle
>   * \param   result		- \c [out] Created BO list handle
>   *
>   * \return   0 on success\n
>   *          <0 - Negative POSIX Error code
>   *
> - * \sa amdgpu_bo_list_destroy()
> + * \sa amdgpu_bo_list_destroy(), amdgpu_cs_submit_raw2()
>  */
>  int amdgpu_bo_list_create(amdgpu_device_handle dev,
>  			  uint32_t number_of_resources,
>  			  amdgpu_bo_handle *resources,
>  			  uint8_t *resource_prios,
>  			  amdgpu_bo_list_handle *result);
> 
>  /**
>   * Destroys a BO list handle.
>   *
> @@ -1580,20 +1612,42 @@ struct drm_amdgpu_cs_chunk;  struct
> drm_amdgpu_cs_chunk_dep;  struct drm_amdgpu_cs_chunk_data;
> 
>  int amdgpu_cs_submit_raw(amdgpu_device_handle dev,
>  			 amdgpu_context_handle context,
>  			 amdgpu_bo_list_handle bo_list_handle,
>  			 int num_chunks,
>  			 struct drm_amdgpu_cs_chunk *chunks,
>  			 uint64_t *seq_no);
> 
> +/**
> + * Submit raw command submission to the kernel with a raw BO list handle.
> + *
> + * \param   dev	       - \c [in] device handle
> + * \param   context    - \c [in] context handle for context id
> + * \param   bo_list_handle - \c [in] raw bo list handle (0 for none)
> + * \param   num_chunks - \c [in] number of CS chunks to submit
> + * \param   chunks     - \c [in] array of CS chunks
> + * \param   seq_no     - \c [out] output sequence number for submission.
> + *
> + * \return   0 on success\n
> + *          <0 - Negative POSIX Error code
> + *
> + * \sa amdgpu_bo_list_create_raw(), amdgpu_bo_list_destroy_raw()  */
> +int amdgpu_cs_submit_raw2(amdgpu_device_handle dev,
> +			  amdgpu_context_handle context,
> +			  uint32_t bo_list_handle,
> +			  int num_chunks,
> +			  struct drm_amdgpu_cs_chunk *chunks,
> +			  uint64_t *seq_no);
> +
>  void amdgpu_cs_chunk_fence_to_dep(struct amdgpu_cs_fence *fence,
>  				  struct drm_amdgpu_cs_chunk_dep *dep);
> void amdgpu_cs_chunk_fence_info_to_data(struct amdgpu_cs_fence_info
> *fence_info,
>  					struct drm_amdgpu_cs_chunk_data
> *data);
> 
>  /**
>   * Reserve VMID
>   * \param   context - \c [in]  GPU Context
>   * \param   flags - \c [in]  TBD
>   *
> diff --git a/amdgpu/amdgpu_bo.c b/amdgpu/amdgpu_bo.c index
> c0f42e81..21bc73aa 100644
> --- a/amdgpu/amdgpu_bo.c
> +++ b/amdgpu/amdgpu_bo.c
> @@ -611,20 +611,56 @@ drm_public int
> amdgpu_create_bo_from_user_mem(amdgpu_device_handle dev,
>  	pthread_mutex_lock(&dev->bo_table_mutex);
>  	r = handle_table_insert(&dev->bo_handles, (*buf_handle)->handle,
>  				*buf_handle);
>  	pthread_mutex_unlock(&dev->bo_table_mutex);
>  	if (r)
>  		amdgpu_bo_free(*buf_handle);
>  out:
>  	return r;
>  }
> 
> +drm_public int amdgpu_bo_list_create_raw(amdgpu_device_handle dev,
> +					 uint32_t number_of_buffers,
> +					 struct drm_amdgpu_bo_list_entry
> *buffers,
> +					 uint32_t *result)
> +{
> +	union drm_amdgpu_bo_list args;
> +	int r;
> +
> +	memset(&args, 0, sizeof(args));
> +	args.in.operation = AMDGPU_BO_LIST_OP_CREATE;
> +	args.in.bo_number = number_of_buffers;
> +	args.in.bo_info_size = sizeof(struct drm_amdgpu_bo_list_entry);
> +	args.in.bo_info_ptr = (uint64_t)(uintptr_t)buffers;
> +
> +	r = drmCommandWriteRead(dev->fd, DRM_AMDGPU_BO_LIST,
> +				&args, sizeof(args));
> +	if (r)
> +		return r;
> +
> +	*result = args.out.list_handle;
> +	return 0;
> +}
> +
> +drm_public int amdgpu_bo_list_destroy_raw(amdgpu_device_handle dev,
> +					  uint32_t bo_list)
> +{
> +	union drm_amdgpu_bo_list args;
> +
> +	memset(&args, 0, sizeof(args));
> +	args.in.operation = AMDGPU_BO_LIST_OP_DESTROY;
> +	args.in.list_handle = bo_list;
> +
> +	return drmCommandWriteRead(dev->fd, DRM_AMDGPU_BO_LIST,
> +				   &args, sizeof(args));
> +}
> +
>  drm_public int amdgpu_bo_list_create(amdgpu_device_handle dev,
>  				     uint32_t number_of_resources,
>  				     amdgpu_bo_handle *resources,
>  				     uint8_t *resource_prios,
>  				     amdgpu_bo_list_handle *result)  {
>  	struct drm_amdgpu_bo_list_entry *list;
>  	union drm_amdgpu_bo_list args;
>  	unsigned i;
>  	int r;
> diff --git a/amdgpu/amdgpu_cs.c b/amdgpu/amdgpu_cs.c index
> 3b8231aa..5bedf748 100644
> --- a/amdgpu/amdgpu_cs.c
> +++ b/amdgpu/amdgpu_cs.c
> @@ -724,20 +724,45 @@ drm_public int
> amdgpu_cs_submit_raw(amdgpu_device_handle dev,
>  	r = drmCommandWriteRead(dev->fd, DRM_AMDGPU_CS,
>  				&cs, sizeof(cs));
>  	if (r)
>  		return r;
> 
>  	if (seq_no)
>  		*seq_no = cs.out.handle;
>  	return 0;
>  }
> 
> +drm_public int amdgpu_cs_submit_raw2(amdgpu_device_handle dev,
> +				     amdgpu_context_handle context,
> +				     uint32_t bo_list_handle,
> +				     int num_chunks,
> +				     struct drm_amdgpu_cs_chunk *chunks,
> +				     uint64_t *seq_no)
> +{
> +	union drm_amdgpu_cs cs = {0};
> +	uint64_t *chunk_array;
> +	int i, r;
> +
> +	chunk_array = alloca(sizeof(uint64_t) * num_chunks);
> +	for (i = 0; i < num_chunks; i++)
> +		chunk_array[i] = (uint64_t)(uintptr_t)&chunks[i];
> +	cs.in.chunks = (uint64_t)(uintptr_t)chunk_array;
> +	cs.in.ctx_id = context->id;
> +	cs.in.bo_list_handle = bo_list_handle;
> +	cs.in.num_chunks = num_chunks;
> +	r = drmCommandWriteRead(dev->fd, DRM_AMDGPU_CS,
> +				&cs, sizeof(cs));
> +	if (!r && seq_no)
> +		*seq_no = cs.out.handle;
> +	return r;
> +}
> +
>  drm_public void amdgpu_cs_chunk_fence_info_to_data(struct
> amdgpu_cs_fence_info *fence_info,
>  					struct drm_amdgpu_cs_chunk_data
> *data)  {
>  	data->fence_data.handle = fence_info->handle->handle;
>  	data->fence_data.offset = fence_info->offset * sizeof(uint64_t);  }
> 
>  drm_public void amdgpu_cs_chunk_fence_to_dep(struct
> amdgpu_cs_fence *fence,
>  					struct drm_amdgpu_cs_chunk_dep
> *dep)  {
> --
> 2.17.1
> 
> _______________________________________________
> amd-gfx mailing list
> amd-gfx@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/amd-gfx
_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply	[flat|nested] 23+ messages in thread

* Re: [PATCH libdrm] amdgpu: add a faster BO list API
       [not found] ` <20190107193104.4361-1-maraeo-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org>
  2019-01-08  3:16   ` Zhou, David(ChunMing)
@ 2019-01-09 10:28   ` Christian König
       [not found]     ` <a0a15ed6-eb1a-fbbe-7c1b-e3b9a64c1008-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org>
  2019-01-16 12:46   ` Bas Nieuwenhuizen
  2 siblings, 1 reply; 23+ messages in thread
From: Christian König @ 2019-01-09 10:28 UTC (permalink / raw)
  To: Marek Olšák, amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW

Looks good, but I'm wondering what's the actual improvement?

Christian.

Am 07.01.19 um 20:31 schrieb Marek Olšák:
> From: Marek Olšák <marek.olsak@amd.com>
>
> ---
>   amdgpu/amdgpu-symbol-check |  3 ++
>   amdgpu/amdgpu.h            | 56 +++++++++++++++++++++++++++++++++++++-
>   amdgpu/amdgpu_bo.c         | 36 ++++++++++++++++++++++++
>   amdgpu/amdgpu_cs.c         | 25 +++++++++++++++++
>   4 files changed, 119 insertions(+), 1 deletion(-)
>
> diff --git a/amdgpu/amdgpu-symbol-check b/amdgpu/amdgpu-symbol-check
> index 6f5e0f95..96a44b40 100755
> --- a/amdgpu/amdgpu-symbol-check
> +++ b/amdgpu/amdgpu-symbol-check
> @@ -12,20 +12,22 @@ _edata
>   _end
>   _fini
>   _init
>   amdgpu_bo_alloc
>   amdgpu_bo_cpu_map
>   amdgpu_bo_cpu_unmap
>   amdgpu_bo_export
>   amdgpu_bo_free
>   amdgpu_bo_import
>   amdgpu_bo_inc_ref
> +amdgpu_bo_list_create_raw
> +amdgpu_bo_list_destroy_raw
>   amdgpu_bo_list_create
>   amdgpu_bo_list_destroy
>   amdgpu_bo_list_update
>   amdgpu_bo_query_info
>   amdgpu_bo_set_metadata
>   amdgpu_bo_va_op
>   amdgpu_bo_va_op_raw
>   amdgpu_bo_wait_for_idle
>   amdgpu_create_bo_from_user_mem
>   amdgpu_cs_chunk_fence_info_to_data
> @@ -40,20 +42,21 @@ amdgpu_cs_destroy_semaphore
>   amdgpu_cs_destroy_syncobj
>   amdgpu_cs_export_syncobj
>   amdgpu_cs_fence_to_handle
>   amdgpu_cs_import_syncobj
>   amdgpu_cs_query_fence_status
>   amdgpu_cs_query_reset_state
>   amdgpu_query_sw_info
>   amdgpu_cs_signal_semaphore
>   amdgpu_cs_submit
>   amdgpu_cs_submit_raw
> +amdgpu_cs_submit_raw2
>   amdgpu_cs_syncobj_export_sync_file
>   amdgpu_cs_syncobj_import_sync_file
>   amdgpu_cs_syncobj_reset
>   amdgpu_cs_syncobj_signal
>   amdgpu_cs_syncobj_wait
>   amdgpu_cs_wait_fences
>   amdgpu_cs_wait_semaphore
>   amdgpu_device_deinitialize
>   amdgpu_device_initialize
>   amdgpu_find_bo_by_cpu_mapping
> diff --git a/amdgpu/amdgpu.h b/amdgpu/amdgpu.h
> index dc51659a..5b800033 100644
> --- a/amdgpu/amdgpu.h
> +++ b/amdgpu/amdgpu.h
> @@ -35,20 +35,21 @@
>   #define _AMDGPU_H_
>   
>   #include <stdint.h>
>   #include <stdbool.h>
>   
>   #ifdef __cplusplus
>   extern "C" {
>   #endif
>   
>   struct drm_amdgpu_info_hw_ip;
> +struct drm_amdgpu_bo_list_entry;
>   
>   /*--------------------------------------------------------------------------*/
>   /* --------------------------- Defines ------------------------------------ */
>   /*--------------------------------------------------------------------------*/
>   
>   /**
>    * Define max. number of Command Buffers (IB) which could be sent to the single
>    * hardware IP to accommodate CE/DE requirements
>    *
>    * \sa amdgpu_cs_ib_info
> @@ -767,34 +768,65 @@ int amdgpu_bo_cpu_unmap(amdgpu_bo_handle buf_handle);
>    *                            and no GPU access is scheduled.
>    *                          1 GPU access is in fly or scheduled
>    *
>    * \return   0 - on success
>    *          <0 - Negative POSIX Error code
>    */
>   int amdgpu_bo_wait_for_idle(amdgpu_bo_handle buf_handle,
>   			    uint64_t timeout_ns,
>   			    bool *buffer_busy);
>   
> +/**
> + * Creates a BO list handle for command submission.
> + *
> + * \param   dev			- \c [in] Device handle.
> + *				   See #amdgpu_device_initialize()
> + * \param   number_of_buffers	- \c [in] Number of BOs in the list
> + * \param   buffers		- \c [in] List of BO handles
> + * \param   result		- \c [out] Created BO list handle
> + *
> + * \return   0 on success\n
> + *          <0 - Negative POSIX Error code
> + *
> + * \sa amdgpu_bo_list_destroy_raw()
> +*/
> +int amdgpu_bo_list_create_raw(amdgpu_device_handle dev,
> +			      uint32_t number_of_buffers,
> +			      struct drm_amdgpu_bo_list_entry *buffers,
> +			      uint32_t *result);
> +
> +/**
> + * Destroys a BO list handle.
> + *
> + * \param   bo_list	- \c [in] BO list handle.
> + *
> + * \return   0 on success\n
> + *          <0 - Negative POSIX Error code
> + *
> + * \sa amdgpu_bo_list_create_raw(), amdgpu_cs_submit_raw2()
> +*/
> +int amdgpu_bo_list_destroy_raw(amdgpu_device_handle dev, uint32_t bo_list);
> +
>   /**
>    * Creates a BO list handle for command submission.
>    *
>    * \param   dev			- \c [in] Device handle.
>    *				   See #amdgpu_device_initialize()
>    * \param   number_of_resources	- \c [in] Number of BOs in the list
>    * \param   resources		- \c [in] List of BO handles
>    * \param   resource_prios	- \c [in] Optional priority for each handle
>    * \param   result		- \c [out] Created BO list handle
>    *
>    * \return   0 on success\n
>    *          <0 - Negative POSIX Error code
>    *
> - * \sa amdgpu_bo_list_destroy()
> + * \sa amdgpu_bo_list_destroy(), amdgpu_cs_submit_raw2()
>   */
>   int amdgpu_bo_list_create(amdgpu_device_handle dev,
>   			  uint32_t number_of_resources,
>   			  amdgpu_bo_handle *resources,
>   			  uint8_t *resource_prios,
>   			  amdgpu_bo_list_handle *result);
>   
>   /**
>    * Destroys a BO list handle.
>    *
> @@ -1580,20 +1612,42 @@ struct drm_amdgpu_cs_chunk;
>   struct drm_amdgpu_cs_chunk_dep;
>   struct drm_amdgpu_cs_chunk_data;
>   
>   int amdgpu_cs_submit_raw(amdgpu_device_handle dev,
>   			 amdgpu_context_handle context,
>   			 amdgpu_bo_list_handle bo_list_handle,
>   			 int num_chunks,
>   			 struct drm_amdgpu_cs_chunk *chunks,
>   			 uint64_t *seq_no);
>   
> +/**
> + * Submit raw command submission to the kernel with a raw BO list handle.
> + *
> + * \param   dev	       - \c [in] device handle
> + * \param   context    - \c [in] context handle for context id
> + * \param   bo_list_handle - \c [in] raw bo list handle (0 for none)
> + * \param   num_chunks - \c [in] number of CS chunks to submit
> + * \param   chunks     - \c [in] array of CS chunks
> + * \param   seq_no     - \c [out] output sequence number for submission.
> + *
> + * \return   0 on success\n
> + *          <0 - Negative POSIX Error code
> + *
> + * \sa amdgpu_bo_list_create_raw(), amdgpu_bo_list_destroy_raw()
> + */
> +int amdgpu_cs_submit_raw2(amdgpu_device_handle dev,
> +			  amdgpu_context_handle context,
> +			  uint32_t bo_list_handle,
> +			  int num_chunks,
> +			  struct drm_amdgpu_cs_chunk *chunks,
> +			  uint64_t *seq_no);
> +
>   void amdgpu_cs_chunk_fence_to_dep(struct amdgpu_cs_fence *fence,
>   				  struct drm_amdgpu_cs_chunk_dep *dep);
>   void amdgpu_cs_chunk_fence_info_to_data(struct amdgpu_cs_fence_info *fence_info,
>   					struct drm_amdgpu_cs_chunk_data *data);
>   
>   /**
>    * Reserve VMID
>    * \param   context - \c [in]  GPU Context
>    * \param   flags - \c [in]  TBD
>    *
> diff --git a/amdgpu/amdgpu_bo.c b/amdgpu/amdgpu_bo.c
> index c0f42e81..21bc73aa 100644
> --- a/amdgpu/amdgpu_bo.c
> +++ b/amdgpu/amdgpu_bo.c
> @@ -611,20 +611,56 @@ drm_public int amdgpu_create_bo_from_user_mem(amdgpu_device_handle dev,
>   	pthread_mutex_lock(&dev->bo_table_mutex);
>   	r = handle_table_insert(&dev->bo_handles, (*buf_handle)->handle,
>   				*buf_handle);
>   	pthread_mutex_unlock(&dev->bo_table_mutex);
>   	if (r)
>   		amdgpu_bo_free(*buf_handle);
>   out:
>   	return r;
>   }
>   
> +drm_public int amdgpu_bo_list_create_raw(amdgpu_device_handle dev,
> +					 uint32_t number_of_buffers,
> +					 struct drm_amdgpu_bo_list_entry *buffers,
> +					 uint32_t *result)
> +{
> +	union drm_amdgpu_bo_list args;
> +	int r;
> +
> +	memset(&args, 0, sizeof(args));
> +	args.in.operation = AMDGPU_BO_LIST_OP_CREATE;
> +	args.in.bo_number = number_of_buffers;
> +	args.in.bo_info_size = sizeof(struct drm_amdgpu_bo_list_entry);
> +	args.in.bo_info_ptr = (uint64_t)(uintptr_t)buffers;
> +
> +	r = drmCommandWriteRead(dev->fd, DRM_AMDGPU_BO_LIST,
> +				&args, sizeof(args));
> +	if (r)
> +		return r;
> +
> +	*result = args.out.list_handle;
> +	return 0;
> +}
> +
> +drm_public int amdgpu_bo_list_destroy_raw(amdgpu_device_handle dev,
> +					  uint32_t bo_list)
> +{
> +	union drm_amdgpu_bo_list args;
> +
> +	memset(&args, 0, sizeof(args));
> +	args.in.operation = AMDGPU_BO_LIST_OP_DESTROY;
> +	args.in.list_handle = bo_list;
> +
> +	return drmCommandWriteRead(dev->fd, DRM_AMDGPU_BO_LIST,
> +				   &args, sizeof(args));
> +}
> +
>   drm_public int amdgpu_bo_list_create(amdgpu_device_handle dev,
>   				     uint32_t number_of_resources,
>   				     amdgpu_bo_handle *resources,
>   				     uint8_t *resource_prios,
>   				     amdgpu_bo_list_handle *result)
>   {
>   	struct drm_amdgpu_bo_list_entry *list;
>   	union drm_amdgpu_bo_list args;
>   	unsigned i;
>   	int r;
> diff --git a/amdgpu/amdgpu_cs.c b/amdgpu/amdgpu_cs.c
> index 3b8231aa..5bedf748 100644
> --- a/amdgpu/amdgpu_cs.c
> +++ b/amdgpu/amdgpu_cs.c
> @@ -724,20 +724,45 @@ drm_public int amdgpu_cs_submit_raw(amdgpu_device_handle dev,
>   	r = drmCommandWriteRead(dev->fd, DRM_AMDGPU_CS,
>   				&cs, sizeof(cs));
>   	if (r)
>   		return r;
>   
>   	if (seq_no)
>   		*seq_no = cs.out.handle;
>   	return 0;
>   }
>   
> +drm_public int amdgpu_cs_submit_raw2(amdgpu_device_handle dev,
> +				     amdgpu_context_handle context,
> +				     uint32_t bo_list_handle,
> +				     int num_chunks,
> +				     struct drm_amdgpu_cs_chunk *chunks,
> +				     uint64_t *seq_no)
> +{
> +	union drm_amdgpu_cs cs = {0};
> +	uint64_t *chunk_array;
> +	int i, r;
> +
> +	chunk_array = alloca(sizeof(uint64_t) * num_chunks);
> +	for (i = 0; i < num_chunks; i++)
> +		chunk_array[i] = (uint64_t)(uintptr_t)&chunks[i];
> +	cs.in.chunks = (uint64_t)(uintptr_t)chunk_array;
> +	cs.in.ctx_id = context->id;
> +	cs.in.bo_list_handle = bo_list_handle;
> +	cs.in.num_chunks = num_chunks;
> +	r = drmCommandWriteRead(dev->fd, DRM_AMDGPU_CS,
> +				&cs, sizeof(cs));
> +	if (!r && seq_no)
> +		*seq_no = cs.out.handle;
> +	return r;
> +}
> +
>   drm_public void amdgpu_cs_chunk_fence_info_to_data(struct amdgpu_cs_fence_info *fence_info,
>   					struct drm_amdgpu_cs_chunk_data *data)
>   {
>   	data->fence_data.handle = fence_info->handle->handle;
>   	data->fence_data.offset = fence_info->offset * sizeof(uint64_t);
>   }
>   
>   drm_public void amdgpu_cs_chunk_fence_to_dep(struct amdgpu_cs_fence *fence,
>   					struct drm_amdgpu_cs_chunk_dep *dep)
>   {

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply	[flat|nested] 23+ messages in thread

* Re: [PATCH libdrm] amdgpu: add a faster BO list API
       [not found]     ` <a0a15ed6-eb1a-fbbe-7c1b-e3b9a64c1008-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org>
@ 2019-01-09 12:36       ` Marek Olšák
       [not found]         ` <CAAxE2A5M2WW6uPFo0a=+6ukbtgx5xHfkKUKOB9dgtB=qH88htQ-JsoAwUIsXosN+BqQ9rBEUg@public.gmane.org>
  0 siblings, 1 reply; 23+ messages in thread
From: Marek Olšák @ 2019-01-09 12:36 UTC (permalink / raw)
  To: Christian König; +Cc: amd-gfx mailing list


[-- Attachment #1.1: Type: text/plain, Size: 12537 bytes --]

On Wed, Jan 9, 2019, 5:28 AM Christian König <
ckoenig.leichtzumerken-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org wrote:

> Looks good, but I'm wondering what's the actual improvement?
>

No malloc calls and 1 less for loop copying the bo list.

Marek


> Christian.
>
> Am 07.01.19 um 20:31 schrieb Marek Olšák:
> > From: Marek Olšák <marek.olsak-5C7GfCeVMHo@public.gmane.org>
> >
> > ---
> >   amdgpu/amdgpu-symbol-check |  3 ++
> >   amdgpu/amdgpu.h            | 56 +++++++++++++++++++++++++++++++++++++-
> >   amdgpu/amdgpu_bo.c         | 36 ++++++++++++++++++++++++
> >   amdgpu/amdgpu_cs.c         | 25 +++++++++++++++++
> >   4 files changed, 119 insertions(+), 1 deletion(-)
> >
> > diff --git a/amdgpu/amdgpu-symbol-check b/amdgpu/amdgpu-symbol-check
> > index 6f5e0f95..96a44b40 100755
> > --- a/amdgpu/amdgpu-symbol-check
> > +++ b/amdgpu/amdgpu-symbol-check
> > @@ -12,20 +12,22 @@ _edata
> >   _end
> >   _fini
> >   _init
> >   amdgpu_bo_alloc
> >   amdgpu_bo_cpu_map
> >   amdgpu_bo_cpu_unmap
> >   amdgpu_bo_export
> >   amdgpu_bo_free
> >   amdgpu_bo_import
> >   amdgpu_bo_inc_ref
> > +amdgpu_bo_list_create_raw
> > +amdgpu_bo_list_destroy_raw
> >   amdgpu_bo_list_create
> >   amdgpu_bo_list_destroy
> >   amdgpu_bo_list_update
> >   amdgpu_bo_query_info
> >   amdgpu_bo_set_metadata
> >   amdgpu_bo_va_op
> >   amdgpu_bo_va_op_raw
> >   amdgpu_bo_wait_for_idle
> >   amdgpu_create_bo_from_user_mem
> >   amdgpu_cs_chunk_fence_info_to_data
> > @@ -40,20 +42,21 @@ amdgpu_cs_destroy_semaphore
> >   amdgpu_cs_destroy_syncobj
> >   amdgpu_cs_export_syncobj
> >   amdgpu_cs_fence_to_handle
> >   amdgpu_cs_import_syncobj
> >   amdgpu_cs_query_fence_status
> >   amdgpu_cs_query_reset_state
> >   amdgpu_query_sw_info
> >   amdgpu_cs_signal_semaphore
> >   amdgpu_cs_submit
> >   amdgpu_cs_submit_raw
> > +amdgpu_cs_submit_raw2
> >   amdgpu_cs_syncobj_export_sync_file
> >   amdgpu_cs_syncobj_import_sync_file
> >   amdgpu_cs_syncobj_reset
> >   amdgpu_cs_syncobj_signal
> >   amdgpu_cs_syncobj_wait
> >   amdgpu_cs_wait_fences
> >   amdgpu_cs_wait_semaphore
> >   amdgpu_device_deinitialize
> >   amdgpu_device_initialize
> >   amdgpu_find_bo_by_cpu_mapping
> > diff --git a/amdgpu/amdgpu.h b/amdgpu/amdgpu.h
> > index dc51659a..5b800033 100644
> > --- a/amdgpu/amdgpu.h
> > +++ b/amdgpu/amdgpu.h
> > @@ -35,20 +35,21 @@
> >   #define _AMDGPU_H_
> >
> >   #include <stdint.h>
> >   #include <stdbool.h>
> >
> >   #ifdef __cplusplus
> >   extern "C" {
> >   #endif
> >
> >   struct drm_amdgpu_info_hw_ip;
> > +struct drm_amdgpu_bo_list_entry;
> >
> >
>  /*--------------------------------------------------------------------------*/
> >   /* --------------------------- Defines
> ------------------------------------ */
> >
>  /*--------------------------------------------------------------------------*/
> >
> >   /**
> >    * Define max. number of Command Buffers (IB) which could be sent to
> the single
> >    * hardware IP to accommodate CE/DE requirements
> >    *
> >    * \sa amdgpu_cs_ib_info
> > @@ -767,34 +768,65 @@ int amdgpu_bo_cpu_unmap(amdgpu_bo_handle
> buf_handle);
> >    *                            and no GPU access is scheduled.
> >    *                          1 GPU access is in fly or scheduled
> >    *
> >    * \return   0 - on success
> >    *          <0 - Negative POSIX Error code
> >    */
> >   int amdgpu_bo_wait_for_idle(amdgpu_bo_handle buf_handle,
> >                           uint64_t timeout_ns,
> >                           bool *buffer_busy);
> >
> > +/**
> > + * Creates a BO list handle for command submission.
> > + *
> > + * \param   dev                      - \c [in] Device handle.
> > + *                              See #amdgpu_device_initialize()
> > + * \param   number_of_buffers        - \c [in] Number of BOs in the list
> > + * \param   buffers          - \c [in] List of BO handles
> > + * \param   result           - \c [out] Created BO list handle
> > + *
> > + * \return   0 on success\n
> > + *          <0 - Negative POSIX Error code
> > + *
> > + * \sa amdgpu_bo_list_destroy_raw()
> > +*/
> > +int amdgpu_bo_list_create_raw(amdgpu_device_handle dev,
> > +                           uint32_t number_of_buffers,
> > +                           struct drm_amdgpu_bo_list_entry *buffers,
> > +                           uint32_t *result);
> > +
> > +/**
> > + * Destroys a BO list handle.
> > + *
> > + * \param   bo_list  - \c [in] BO list handle.
> > + *
> > + * \return   0 on success\n
> > + *          <0 - Negative POSIX Error code
> > + *
> > + * \sa amdgpu_bo_list_create_raw(), amdgpu_cs_submit_raw2()
> > +*/
> > +int amdgpu_bo_list_destroy_raw(amdgpu_device_handle dev, uint32_t
> bo_list);
> > +
> >   /**
> >    * Creates a BO list handle for command submission.
> >    *
> >    * \param   dev                     - \c [in] Device handle.
> >    *                             See #amdgpu_device_initialize()
> >    * \param   number_of_resources     - \c [in] Number of BOs in the list
> >    * \param   resources               - \c [in] List of BO handles
> >    * \param   resource_prios  - \c [in] Optional priority for each handle
> >    * \param   result          - \c [out] Created BO list handle
> >    *
> >    * \return   0 on success\n
> >    *          <0 - Negative POSIX Error code
> >    *
> > - * \sa amdgpu_bo_list_destroy()
> > + * \sa amdgpu_bo_list_destroy(), amdgpu_cs_submit_raw2()
> >   */
> >   int amdgpu_bo_list_create(amdgpu_device_handle dev,
> >                         uint32_t number_of_resources,
> >                         amdgpu_bo_handle *resources,
> >                         uint8_t *resource_prios,
> >                         amdgpu_bo_list_handle *result);
> >
> >   /**
> >    * Destroys a BO list handle.
> >    *
> > @@ -1580,20 +1612,42 @@ struct drm_amdgpu_cs_chunk;
> >   struct drm_amdgpu_cs_chunk_dep;
> >   struct drm_amdgpu_cs_chunk_data;
> >
> >   int amdgpu_cs_submit_raw(amdgpu_device_handle dev,
> >                        amdgpu_context_handle context,
> >                        amdgpu_bo_list_handle bo_list_handle,
> >                        int num_chunks,
> >                        struct drm_amdgpu_cs_chunk *chunks,
> >                        uint64_t *seq_no);
> >
> > +/**
> > + * Submit raw command submission to the kernel with a raw BO list
> handle.
> > + *
> > + * \param   dev             - \c [in] device handle
> > + * \param   context    - \c [in] context handle for context id
> > + * \param   bo_list_handle - \c [in] raw bo list handle (0 for none)
> > + * \param   num_chunks - \c [in] number of CS chunks to submit
> > + * \param   chunks     - \c [in] array of CS chunks
> > + * \param   seq_no     - \c [out] output sequence number for submission.
> > + *
> > + * \return   0 on success\n
> > + *          <0 - Negative POSIX Error code
> > + *
> > + * \sa amdgpu_bo_list_create_raw(), amdgpu_bo_list_destroy_raw()
> > + */
> > +int amdgpu_cs_submit_raw2(amdgpu_device_handle dev,
> > +                       amdgpu_context_handle context,
> > +                       uint32_t bo_list_handle,
> > +                       int num_chunks,
> > +                       struct drm_amdgpu_cs_chunk *chunks,
> > +                       uint64_t *seq_no);
> > +
> >   void amdgpu_cs_chunk_fence_to_dep(struct amdgpu_cs_fence *fence,
> >                                 struct drm_amdgpu_cs_chunk_dep *dep);
> >   void amdgpu_cs_chunk_fence_info_to_data(struct amdgpu_cs_fence_info
> *fence_info,
> >                                       struct drm_amdgpu_cs_chunk_data
> *data);
> >
> >   /**
> >    * Reserve VMID
> >    * \param   context - \c [in]  GPU Context
> >    * \param   flags - \c [in]  TBD
> >    *
> > diff --git a/amdgpu/amdgpu_bo.c b/amdgpu/amdgpu_bo.c
> > index c0f42e81..21bc73aa 100644
> > --- a/amdgpu/amdgpu_bo.c
> > +++ b/amdgpu/amdgpu_bo.c
> > @@ -611,20 +611,56 @@ drm_public int
> amdgpu_create_bo_from_user_mem(amdgpu_device_handle dev,
> >       pthread_mutex_lock(&dev->bo_table_mutex);
> >       r = handle_table_insert(&dev->bo_handles, (*buf_handle)->handle,
> >                               *buf_handle);
> >       pthread_mutex_unlock(&dev->bo_table_mutex);
> >       if (r)
> >               amdgpu_bo_free(*buf_handle);
> >   out:
> >       return r;
> >   }
> >
> > +drm_public int amdgpu_bo_list_create_raw(amdgpu_device_handle dev,
> > +                                      uint32_t number_of_buffers,
> > +                                      struct drm_amdgpu_bo_list_entry
> *buffers,
> > +                                      uint32_t *result)
> > +{
> > +     union drm_amdgpu_bo_list args;
> > +     int r;
> > +
> > +     memset(&args, 0, sizeof(args));
> > +     args.in.operation = AMDGPU_BO_LIST_OP_CREATE;
> > +     args.in.bo_number = number_of_buffers;
> > +     args.in.bo_info_size = sizeof(struct drm_amdgpu_bo_list_entry);
> > +     args.in.bo_info_ptr = (uint64_t)(uintptr_t)buffers;
> > +
> > +     r = drmCommandWriteRead(dev->fd, DRM_AMDGPU_BO_LIST,
> > +                             &args, sizeof(args));
> > +     if (r)
> > +             return r;
> > +
> > +     *result = args.out.list_handle;
> > +     return 0;
> > +}
> > +
> > +drm_public int amdgpu_bo_list_destroy_raw(amdgpu_device_handle dev,
> > +                                       uint32_t bo_list)
> > +{
> > +     union drm_amdgpu_bo_list args;
> > +
> > +     memset(&args, 0, sizeof(args));
> > +     args.in.operation = AMDGPU_BO_LIST_OP_DESTROY;
> > +     args.in.list_handle = bo_list;
> > +
> > +     return drmCommandWriteRead(dev->fd, DRM_AMDGPU_BO_LIST,
> > +                                &args, sizeof(args));
> > +}
> > +
> >   drm_public int amdgpu_bo_list_create(amdgpu_device_handle dev,
> >                                    uint32_t number_of_resources,
> >                                    amdgpu_bo_handle *resources,
> >                                    uint8_t *resource_prios,
> >                                    amdgpu_bo_list_handle *result)
> >   {
> >       struct drm_amdgpu_bo_list_entry *list;
> >       union drm_amdgpu_bo_list args;
> >       unsigned i;
> >       int r;
> > diff --git a/amdgpu/amdgpu_cs.c b/amdgpu/amdgpu_cs.c
> > index 3b8231aa..5bedf748 100644
> > --- a/amdgpu/amdgpu_cs.c
> > +++ b/amdgpu/amdgpu_cs.c
> > @@ -724,20 +724,45 @@ drm_public int
> amdgpu_cs_submit_raw(amdgpu_device_handle dev,
> >       r = drmCommandWriteRead(dev->fd, DRM_AMDGPU_CS,
> >                               &cs, sizeof(cs));
> >       if (r)
> >               return r;
> >
> >       if (seq_no)
> >               *seq_no = cs.out.handle;
> >       return 0;
> >   }
> >
> > +drm_public int amdgpu_cs_submit_raw2(amdgpu_device_handle dev,
> > +                                  amdgpu_context_handle context,
> > +                                  uint32_t bo_list_handle,
> > +                                  int num_chunks,
> > +                                  struct drm_amdgpu_cs_chunk *chunks,
> > +                                  uint64_t *seq_no)
> > +{
> > +     union drm_amdgpu_cs cs = {0};
> > +     uint64_t *chunk_array;
> > +     int i, r;
> > +
> > +     chunk_array = alloca(sizeof(uint64_t) * num_chunks);
> > +     for (i = 0; i < num_chunks; i++)
> > +             chunk_array[i] = (uint64_t)(uintptr_t)&chunks[i];
> > +     cs.in.chunks = (uint64_t)(uintptr_t)chunk_array;
> > +     cs.in.ctx_id = context->id;
> > +     cs.in.bo_list_handle = bo_list_handle;
> > +     cs.in.num_chunks = num_chunks;
> > +     r = drmCommandWriteRead(dev->fd, DRM_AMDGPU_CS,
> > +                             &cs, sizeof(cs));
> > +     if (!r && seq_no)
> > +             *seq_no = cs.out.handle;
> > +     return r;
> > +}
> > +
> >   drm_public void amdgpu_cs_chunk_fence_info_to_data(struct
> amdgpu_cs_fence_info *fence_info,
> >                                       struct drm_amdgpu_cs_chunk_data
> *data)
> >   {
> >       data->fence_data.handle = fence_info->handle->handle;
> >       data->fence_data.offset = fence_info->offset * sizeof(uint64_t);
> >   }
> >
> >   drm_public void amdgpu_cs_chunk_fence_to_dep(struct amdgpu_cs_fence
> *fence,
> >                                       struct drm_amdgpu_cs_chunk_dep
> *dep)
> >   {
>
>

[-- Attachment #1.2: Type: text/html, Size: 16157 bytes --]

[-- Attachment #2: Type: text/plain, Size: 154 bytes --]

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply	[flat|nested] 23+ messages in thread

* Re: [PATCH libdrm] amdgpu: add a faster BO list API
       [not found]         ` <CAAxE2A5M2WW6uPFo0a=+6ukbtgx5xHfkKUKOB9dgtB=qH88htQ-JsoAwUIsXosN+BqQ9rBEUg@public.gmane.org>
@ 2019-01-09 13:08           ` Christian König
       [not found]             ` <513ee137-7e99-c8fc-9e3b-e9077ead60a3-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org>
  0 siblings, 1 reply; 23+ messages in thread
From: Christian König @ 2019-01-09 13:08 UTC (permalink / raw)
  To: Marek Olšák, Christian König; +Cc: amd-gfx mailing list


[-- Attachment #1.1: Type: text/plain, Size: 15148 bytes --]

Am 09.01.19 um 13:36 schrieb Marek Olšák:
>
>
> On Wed, Jan 9, 2019, 5:28 AM Christian König 
> <ckoenig.leichtzumerken-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org 
> <mailto:ckoenig.leichtzumerken-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org> wrote:
>
>     Looks good, but I'm wondering what's the actual improvement?
>
>
> No malloc calls and 1 less for loop copying the bo list.

Yeah, but didn't we want to get completely rid of the bo list?

Christian.

>
> Marek
>
>
>     Christian.
>
>     Am 07.01.19 um 20:31 schrieb Marek Olšák:
>     > From: Marek Olšák <marek.olsak-5C7GfCeVMHo@public.gmane.org <mailto:marek.olsak-5C7GfCeVMHo@public.gmane.org>>
>     >
>     > ---
>     >   amdgpu/amdgpu-symbol-check |  3 ++
>     >   amdgpu/amdgpu.h            | 56
>     +++++++++++++++++++++++++++++++++++++-
>     >   amdgpu/amdgpu_bo.c         | 36 ++++++++++++++++++++++++
>     >   amdgpu/amdgpu_cs.c         | 25 +++++++++++++++++
>     >   4 files changed, 119 insertions(+), 1 deletion(-)
>     >
>     > diff --git a/amdgpu/amdgpu-symbol-check b/amdgpu/amdgpu-symbol-check
>     > index 6f5e0f95..96a44b40 100755
>     > --- a/amdgpu/amdgpu-symbol-check
>     > +++ b/amdgpu/amdgpu-symbol-check
>     > @@ -12,20 +12,22 @@ _edata
>     >   _end
>     >   _fini
>     >   _init
>     >   amdgpu_bo_alloc
>     >   amdgpu_bo_cpu_map
>     >   amdgpu_bo_cpu_unmap
>     >   amdgpu_bo_export
>     >   amdgpu_bo_free
>     >   amdgpu_bo_import
>     >   amdgpu_bo_inc_ref
>     > +amdgpu_bo_list_create_raw
>     > +amdgpu_bo_list_destroy_raw
>     >   amdgpu_bo_list_create
>     >   amdgpu_bo_list_destroy
>     >   amdgpu_bo_list_update
>     >   amdgpu_bo_query_info
>     >   amdgpu_bo_set_metadata
>     >   amdgpu_bo_va_op
>     >   amdgpu_bo_va_op_raw
>     >   amdgpu_bo_wait_for_idle
>     >   amdgpu_create_bo_from_user_mem
>     >   amdgpu_cs_chunk_fence_info_to_data
>     > @@ -40,20 +42,21 @@ amdgpu_cs_destroy_semaphore
>     >   amdgpu_cs_destroy_syncobj
>     >   amdgpu_cs_export_syncobj
>     >   amdgpu_cs_fence_to_handle
>     >   amdgpu_cs_import_syncobj
>     >   amdgpu_cs_query_fence_status
>     >   amdgpu_cs_query_reset_state
>     >   amdgpu_query_sw_info
>     >   amdgpu_cs_signal_semaphore
>     >   amdgpu_cs_submit
>     >   amdgpu_cs_submit_raw
>     > +amdgpu_cs_submit_raw2
>     >   amdgpu_cs_syncobj_export_sync_file
>     >   amdgpu_cs_syncobj_import_sync_file
>     >   amdgpu_cs_syncobj_reset
>     >   amdgpu_cs_syncobj_signal
>     >   amdgpu_cs_syncobj_wait
>     >   amdgpu_cs_wait_fences
>     >   amdgpu_cs_wait_semaphore
>     >   amdgpu_device_deinitialize
>     >   amdgpu_device_initialize
>     >   amdgpu_find_bo_by_cpu_mapping
>     > diff --git a/amdgpu/amdgpu.h b/amdgpu/amdgpu.h
>     > index dc51659a..5b800033 100644
>     > --- a/amdgpu/amdgpu.h
>     > +++ b/amdgpu/amdgpu.h
>     > @@ -35,20 +35,21 @@
>     >   #define _AMDGPU_H_
>     >
>     >   #include <stdint.h>
>     >   #include <stdbool.h>
>     >
>     >   #ifdef __cplusplus
>     >   extern "C" {
>     >   #endif
>     >
>     >   struct drm_amdgpu_info_hw_ip;
>     > +struct drm_amdgpu_bo_list_entry;
>     >
>     >
>      /*--------------------------------------------------------------------------*/
>     >   /* --------------------------- Defines
>     ------------------------------------ */
>     >
>      /*--------------------------------------------------------------------------*/
>     >
>     >   /**
>     >    * Define max. number of Command Buffers (IB) which could be
>     sent to the single
>     >    * hardware IP to accommodate CE/DE requirements
>     >    *
>     >    * \sa amdgpu_cs_ib_info
>     > @@ -767,34 +768,65 @@ int amdgpu_bo_cpu_unmap(amdgpu_bo_handle
>     buf_handle);
>     >    *                            and no GPU access is scheduled.
>     >    *                          1 GPU access is in fly or scheduled
>     >    *
>     >    * \return   0 - on success
>     >    *          <0 - Negative POSIX Error code
>     >    */
>     >   int amdgpu_bo_wait_for_idle(amdgpu_bo_handle buf_handle,
>     >                           uint64_t timeout_ns,
>     >                           bool *buffer_busy);
>     >
>     > +/**
>     > + * Creates a BO list handle for command submission.
>     > + *
>     > + * \param   dev                      - \c [in] Device handle.
>     > + *                              See #amdgpu_device_initialize()
>     > + * \param   number_of_buffers        - \c [in] Number of BOs in
>     the list
>     > + * \param   buffers          - \c [in] List of BO handles
>     > + * \param   result           - \c [out] Created BO list handle
>     > + *
>     > + * \return   0 on success\n
>     > + *          <0 - Negative POSIX Error code
>     > + *
>     > + * \sa amdgpu_bo_list_destroy_raw()
>     > +*/
>     > +int amdgpu_bo_list_create_raw(amdgpu_device_handle dev,
>     > +                           uint32_t number_of_buffers,
>     > +                           struct drm_amdgpu_bo_list_entry
>     *buffers,
>     > +                           uint32_t *result);
>     > +
>     > +/**
>     > + * Destroys a BO list handle.
>     > + *
>     > + * \param   bo_list  - \c [in] BO list handle.
>     > + *
>     > + * \return   0 on success\n
>     > + *          <0 - Negative POSIX Error code
>     > + *
>     > + * \sa amdgpu_bo_list_create_raw(), amdgpu_cs_submit_raw2()
>     > +*/
>     > +int amdgpu_bo_list_destroy_raw(amdgpu_device_handle dev,
>     uint32_t bo_list);
>     > +
>     >   /**
>     >    * Creates a BO list handle for command submission.
>     >    *
>     >    * \param   dev                     - \c [in] Device handle.
>     >    *                             See #amdgpu_device_initialize()
>     >    * \param   number_of_resources     - \c [in] Number of BOs in
>     the list
>     >    * \param   resources               - \c [in] List of BO handles
>     >    * \param   resource_prios  - \c [in] Optional priority for
>     each handle
>     >    * \param   result          - \c [out] Created BO list handle
>     >    *
>     >    * \return   0 on success\n
>     >    *          <0 - Negative POSIX Error code
>     >    *
>     > - * \sa amdgpu_bo_list_destroy()
>     > + * \sa amdgpu_bo_list_destroy(), amdgpu_cs_submit_raw2()
>     >   */
>     >   int amdgpu_bo_list_create(amdgpu_device_handle dev,
>     >                         uint32_t number_of_resources,
>     >                         amdgpu_bo_handle *resources,
>     >                         uint8_t *resource_prios,
>     >                         amdgpu_bo_list_handle *result);
>     >
>     >   /**
>     >    * Destroys a BO list handle.
>     >    *
>     > @@ -1580,20 +1612,42 @@ struct drm_amdgpu_cs_chunk;
>     >   struct drm_amdgpu_cs_chunk_dep;
>     >   struct drm_amdgpu_cs_chunk_data;
>     >
>     >   int amdgpu_cs_submit_raw(amdgpu_device_handle dev,
>     >                        amdgpu_context_handle context,
>     >                        amdgpu_bo_list_handle bo_list_handle,
>     >                        int num_chunks,
>     >                        struct drm_amdgpu_cs_chunk *chunks,
>     >                        uint64_t *seq_no);
>     >
>     > +/**
>     > + * Submit raw command submission to the kernel with a raw BO
>     list handle.
>     > + *
>     > + * \param   dev             - \c [in] device handle
>     > + * \param   context    - \c [in] context handle for context id
>     > + * \param   bo_list_handle - \c [in] raw bo list handle (0 for
>     none)
>     > + * \param   num_chunks - \c [in] number of CS chunks to submit
>     > + * \param   chunks     - \c [in] array of CS chunks
>     > + * \param   seq_no     - \c [out] output sequence number for
>     submission.
>     > + *
>     > + * \return   0 on success\n
>     > + *          <0 - Negative POSIX Error code
>     > + *
>     > + * \sa amdgpu_bo_list_create_raw(), amdgpu_bo_list_destroy_raw()
>     > + */
>     > +int amdgpu_cs_submit_raw2(amdgpu_device_handle dev,
>     > +                       amdgpu_context_handle context,
>     > +                       uint32_t bo_list_handle,
>     > +                       int num_chunks,
>     > +                       struct drm_amdgpu_cs_chunk *chunks,
>     > +                       uint64_t *seq_no);
>     > +
>     >   void amdgpu_cs_chunk_fence_to_dep(struct amdgpu_cs_fence *fence,
>     >                                 struct drm_amdgpu_cs_chunk_dep
>     *dep);
>     >   void amdgpu_cs_chunk_fence_info_to_data(struct
>     amdgpu_cs_fence_info *fence_info,
>     >                                       struct
>     drm_amdgpu_cs_chunk_data *data);
>     >
>     >   /**
>     >    * Reserve VMID
>     >    * \param   context - \c [in]  GPU Context
>     >    * \param   flags - \c [in]  TBD
>     >    *
>     > diff --git a/amdgpu/amdgpu_bo.c b/amdgpu/amdgpu_bo.c
>     > index c0f42e81..21bc73aa 100644
>     > --- a/amdgpu/amdgpu_bo.c
>     > +++ b/amdgpu/amdgpu_bo.c
>     > @@ -611,20 +611,56 @@ drm_public int
>     amdgpu_create_bo_from_user_mem(amdgpu_device_handle dev,
>     >  pthread_mutex_lock(&dev->bo_table_mutex);
>     >       r = handle_table_insert(&dev->bo_handles,
>     (*buf_handle)->handle,
>     >                               *buf_handle);
>     >  pthread_mutex_unlock(&dev->bo_table_mutex);
>     >       if (r)
>     >               amdgpu_bo_free(*buf_handle);
>     >   out:
>     >       return r;
>     >   }
>     >
>     > +drm_public int amdgpu_bo_list_create_raw(amdgpu_device_handle dev,
>     > +                                      uint32_t number_of_buffers,
>     > +                                      struct
>     drm_amdgpu_bo_list_entry *buffers,
>     > +                                      uint32_t *result)
>     > +{
>     > +     union drm_amdgpu_bo_list args;
>     > +     int r;
>     > +
>     > +     memset(&args, 0, sizeof(args));
>     > +     args.in.operation = AMDGPU_BO_LIST_OP_CREATE;
>     > +     args.in.bo_number = number_of_buffers;
>     > +     args.in.bo_info_size = sizeof(struct
>     drm_amdgpu_bo_list_entry);
>     > +     args.in.bo_info_ptr = (uint64_t)(uintptr_t)buffers;
>     > +
>     > +     r = drmCommandWriteRead(dev->fd, DRM_AMDGPU_BO_LIST,
>     > +                             &args, sizeof(args));
>     > +     if (r)
>     > +             return r;
>     > +
>     > +     *result = args.out.list_handle;
>     > +     return 0;
>     > +}
>     > +
>     > +drm_public int amdgpu_bo_list_destroy_raw(amdgpu_device_handle dev,
>     > +                                       uint32_t bo_list)
>     > +{
>     > +     union drm_amdgpu_bo_list args;
>     > +
>     > +     memset(&args, 0, sizeof(args));
>     > +     args.in.operation = AMDGPU_BO_LIST_OP_DESTROY;
>     > +     args.in.list_handle = bo_list;
>     > +
>     > +     return drmCommandWriteRead(dev->fd, DRM_AMDGPU_BO_LIST,
>     > +                                &args, sizeof(args));
>     > +}
>     > +
>     >   drm_public int amdgpu_bo_list_create(amdgpu_device_handle dev,
>     >                                    uint32_t number_of_resources,
>     >                                    amdgpu_bo_handle *resources,
>     >                                    uint8_t *resource_prios,
>     > amdgpu_bo_list_handle *result)
>     >   {
>     >       struct drm_amdgpu_bo_list_entry *list;
>     >       union drm_amdgpu_bo_list args;
>     >       unsigned i;
>     >       int r;
>     > diff --git a/amdgpu/amdgpu_cs.c b/amdgpu/amdgpu_cs.c
>     > index 3b8231aa..5bedf748 100644
>     > --- a/amdgpu/amdgpu_cs.c
>     > +++ b/amdgpu/amdgpu_cs.c
>     > @@ -724,20 +724,45 @@ drm_public int
>     amdgpu_cs_submit_raw(amdgpu_device_handle dev,
>     >       r = drmCommandWriteRead(dev->fd, DRM_AMDGPU_CS,
>     >                               &cs, sizeof(cs));
>     >       if (r)
>     >               return r;
>     >
>     >       if (seq_no)
>     >               *seq_no = cs.out.handle;
>     >       return 0;
>     >   }
>     >
>     > +drm_public int amdgpu_cs_submit_raw2(amdgpu_device_handle dev,
>     > + amdgpu_context_handle context,
>     > +                                  uint32_t bo_list_handle,
>     > +                                  int num_chunks,
>     > +                                  struct drm_amdgpu_cs_chunk
>     *chunks,
>     > +                                  uint64_t *seq_no)
>     > +{
>     > +     union drm_amdgpu_cs cs = {0};
>     > +     uint64_t *chunk_array;
>     > +     int i, r;
>     > +
>     > +     chunk_array = alloca(sizeof(uint64_t) * num_chunks);
>     > +     for (i = 0; i < num_chunks; i++)
>     > +             chunk_array[i] = (uint64_t)(uintptr_t)&chunks[i];
>     > +     cs.in.chunks = (uint64_t)(uintptr_t)chunk_array;
>     > +     cs.in.ctx_id = context->id;
>     > +     cs.in.bo_list_handle = bo_list_handle;
>     > +     cs.in.num_chunks = num_chunks;
>     > +     r = drmCommandWriteRead(dev->fd, DRM_AMDGPU_CS,
>     > +                             &cs, sizeof(cs));
>     > +     if (!r && seq_no)
>     > +             *seq_no = cs.out.handle;
>     > +     return r;
>     > +}
>     > +
>     >   drm_public void amdgpu_cs_chunk_fence_info_to_data(struct
>     amdgpu_cs_fence_info *fence_info,
>     >                                       struct
>     drm_amdgpu_cs_chunk_data *data)
>     >   {
>     >       data->fence_data.handle = fence_info->handle->handle;
>     >       data->fence_data.offset = fence_info->offset *
>     sizeof(uint64_t);
>     >   }
>     >
>     >   drm_public void amdgpu_cs_chunk_fence_to_dep(struct
>     amdgpu_cs_fence *fence,
>     >                                       struct
>     drm_amdgpu_cs_chunk_dep *dep)
>     >   {
>
>
> _______________________________________________
> amd-gfx mailing list
> amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW@public.gmane.org
> https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[-- Attachment #1.2: Type: text/html, Size: 23015 bytes --]

[-- Attachment #2: Type: text/plain, Size: 154 bytes --]

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply	[flat|nested] 23+ messages in thread

* Re: [PATCH libdrm] amdgpu: add a faster BO list API
       [not found]             ` <513ee137-7e99-c8fc-9e3b-e9077ead60a3-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org>
@ 2019-01-09 16:14               ` Marek Olšák
       [not found]                 ` <CAAxE2A5WYWCWAPA0K+vYDirtT6BV7QJoZSbEhh0Z57OF860mWQ-JsoAwUIsXosN+BqQ9rBEUg@public.gmane.org>
  0 siblings, 1 reply; 23+ messages in thread
From: Marek Olšák @ 2019-01-09 16:14 UTC (permalink / raw)
  To: Christian König; +Cc: amd-gfx mailing list


[-- Attachment #1.1: Type: text/plain, Size: 660 bytes --]

On Wed, Jan 9, 2019 at 8:09 AM Christian König <
ckoenig.leichtzumerken-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org> wrote:

> Am 09.01.19 um 13:36 schrieb Marek Olšák:
>
>
>
> On Wed, Jan 9, 2019, 5:28 AM Christian König <
> ckoenig.leichtzumerken-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org wrote:
>
>> Looks good, but I'm wondering what's the actual improvement?
>>
>
> No malloc calls and 1 less for loop copying the bo list.
>
>
> Yeah, but didn't we want to get completely rid of the bo list?
>

If we have multiple IBs (e.g. gfx + compute) that share a BO list, I think
it's faster to send the BO list to the kernel only once.

Marek

[-- Attachment #1.2: Type: text/html, Size: 1883 bytes --]

[-- Attachment #2: Type: text/plain, Size: 154 bytes --]

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply	[flat|nested] 23+ messages in thread

* Re: [PATCH libdrm] amdgpu: add a faster BO list API
       [not found]                 ` <CAAxE2A5WYWCWAPA0K+vYDirtT6BV7QJoZSbEhh0Z57OF860mWQ-JsoAwUIsXosN+BqQ9rBEUg@public.gmane.org>
@ 2019-01-09 18:41                   ` Christian König
       [not found]                     ` <7f85afd6-b17b-1c50-ba03-c03dd6e9a362-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org>
  0 siblings, 1 reply; 23+ messages in thread
From: Christian König @ 2019-01-09 18:41 UTC (permalink / raw)
  To: Marek Olšák, Christian König; +Cc: amd-gfx mailing list


[-- Attachment #1.1: Type: text/plain, Size: 1562 bytes --]

Am 09.01.19 um 17:14 schrieb Marek Olšák:
> On Wed, Jan 9, 2019 at 8:09 AM Christian König 
> <ckoenig.leichtzumerken-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org 
> <mailto:ckoenig.leichtzumerken-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org>> wrote:
>
>     Am 09.01.19 um 13:36 schrieb Marek Olšák:
>>
>>
>>     On Wed, Jan 9, 2019, 5:28 AM Christian König
>>     <ckoenig.leichtzumerken-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org
>>     <mailto:ckoenig.leichtzumerken-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org> wrote:
>>
>>         Looks good, but I'm wondering what's the actual improvement?
>>
>>
>>     No malloc calls and 1 less for loop copying the bo list.
>
>     Yeah, but didn't we want to get completely rid of the bo list?
>
>
> If we have multiple IBs (e.g. gfx + compute) that share a BO list, I 
> think it's faster to send the BO list to the kernel only once.

That's not really faster.

The only thing we safe us is a single loop over all BOs to lockup the 
handle into a pointer and that is only a tiny fraction of the overhead.

The majority of the overhead is locking the BOs and reserving space for 
the submission.

What could really help here is to submit gfx+comput together in just one 
CS IOCTL. This way we would need the locking and space reservation only 
once.

It's a bit of work in the kernel side, but certainly doable.

Christian.

>
> Marek
>
> _______________________________________________
> amd-gfx mailing list
> amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW@public.gmane.org
> https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[-- Attachment #1.2: Type: text/html, Size: 4518 bytes --]

[-- Attachment #2: Type: text/plain, Size: 154 bytes --]

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply	[flat|nested] 23+ messages in thread

* Re: [PATCH libdrm] amdgpu: add a faster BO list API
       [not found]                     ` <7f85afd6-b17b-1c50-ba03-c03dd6e9a362-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org>
@ 2019-01-09 23:39                       ` Marek Olšák
       [not found]                         ` <CAAxE2A5RjR=+2Rs5HDx1rV0ftdkZJX=6TQDkvRQSxfo++vnXOA-JsoAwUIsXosN+BqQ9rBEUg@public.gmane.org>
  0 siblings, 1 reply; 23+ messages in thread
From: Marek Olšák @ 2019-01-09 23:39 UTC (permalink / raw)
  To: Christian König; +Cc: amd-gfx mailing list


[-- Attachment #1.1: Type: text/plain, Size: 1422 bytes --]

On Wed, Jan 9, 2019 at 1:41 PM Christian König <
ckoenig.leichtzumerken-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org> wrote:

> Am 09.01.19 um 17:14 schrieb Marek Olšák:
>
> On Wed, Jan 9, 2019 at 8:09 AM Christian König <
> ckoenig.leichtzumerken-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org> wrote:
>
>> Am 09.01.19 um 13:36 schrieb Marek Olšák:
>>
>>
>>
>> On Wed, Jan 9, 2019, 5:28 AM Christian König <
>> ckoenig.leichtzumerken-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org wrote:
>>
>>> Looks good, but I'm wondering what's the actual improvement?
>>>
>>
>> No malloc calls and 1 less for loop copying the bo list.
>>
>>
>> Yeah, but didn't we want to get completely rid of the bo list?
>>
>
> If we have multiple IBs (e.g. gfx + compute) that share a BO list, I think
> it's faster to send the BO list to the kernel only once.
>
>
> That's not really faster.
>
> The only thing we safe us is a single loop over all BOs to lockup the
> handle into a pointer and that is only a tiny fraction of the overhead.
>
> The majority of the overhead is locking the BOs and reserving space for
> the submission.
>
> What could really help here is to submit gfx+comput together in just one
> CS IOCTL. This way we would need the locking and space reservation only
> once.
>
> It's a bit of work in the kernel side, but certainly doable.
>

OK. Any objections to this patch?

Thanks,
Marek

[-- Attachment #1.2: Type: text/html, Size: 3740 bytes --]

[-- Attachment #2: Type: text/plain, Size: 154 bytes --]

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply	[flat|nested] 23+ messages in thread

* Re: [PATCH libdrm] amdgpu: add a faster BO list API
       [not found]                         ` <CAAxE2A5RjR=+2Rs5HDx1rV0ftdkZJX=6TQDkvRQSxfo++vnXOA-JsoAwUIsXosN+BqQ9rBEUg@public.gmane.org>
@ 2019-01-10  9:15                           ` Koenig, Christian
       [not found]                             ` <e23ecf17-dbd4-ecef-f8fc-4dc849e7bddf-5C7GfCeVMHo@public.gmane.org>
  0 siblings, 1 reply; 23+ messages in thread
From: Koenig, Christian @ 2019-01-10  9:15 UTC (permalink / raw)
  To: Marek Olšák; +Cc: amd-gfx mailing list


[-- Attachment #1.1: Type: text/plain, Size: 1975 bytes --]

Am 10.01.19 um 00:39 schrieb Marek Olšák:
On Wed, Jan 9, 2019 at 1:41 PM Christian König <ckoenig.leichtzumerken@gmail.com<mailto:ckoenig.leichtzumerken@gmail.com>> wrote:
Am 09.01.19 um 17:14 schrieb Marek Olšák:
On Wed, Jan 9, 2019 at 8:09 AM Christian König <ckoenig.leichtzumerken@gmail.com<mailto:ckoenig.leichtzumerken@gmail.com>> wrote:
Am 09.01.19 um 13:36 schrieb Marek Olšák:


On Wed, Jan 9, 2019, 5:28 AM Christian König <ckoenig.leichtzumerken@gmail.com<mailto:ckoenig.leichtzumerken@gmail.com> wrote:
Looks good, but I'm wondering what's the actual improvement?

No malloc calls and 1 less for loop copying the bo list.

Yeah, but didn't we want to get completely rid of the bo list?

If we have multiple IBs (e.g. gfx + compute) that share a BO list, I think it's faster to send the BO list to the kernel only once.

That's not really faster.

The only thing we safe us is a single loop over all BOs to lockup the handle into a pointer and that is only a tiny fraction of the overhead.

The majority of the overhead is locking the BOs and reserving space for the submission.

What could really help here is to submit gfx+comput together in just one CS IOCTL. This way we would need the locking and space reservation only once.

It's a bit of work in the kernel side, but certainly doable.

OK. Any objections to this patch?

In general I'm wondering if we couldn't avoid adding so much new interface.

For example we can avoid the malloc() when we just cache the last freed bo_list structure in the device. We would just need an atomic pointer exchange operation for that.

This way we even don't need to change mesa at all.

Regarding optimization, this chunk can be replaced by a cast on 64bit:

+       chunk_array = alloca(sizeof(uint64_t) * num_chunks);
+       for (i = 0; i < num_chunks; i++)
+               chunk_array[i] = (uint64_t)(uintptr_t)&chunks[i];

Regards,
Christian.


Thanks,
Marek


[-- Attachment #1.2: Type: text/html, Size: 4236 bytes --]

[-- Attachment #2: Type: text/plain, Size: 154 bytes --]

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply	[flat|nested] 23+ messages in thread

* Re: [PATCH libdrm] amdgpu: add a faster BO list API
       [not found]                             ` <e23ecf17-dbd4-ecef-f8fc-4dc849e7bddf-5C7GfCeVMHo@public.gmane.org>
@ 2019-01-10 11:41                               ` Marek Olšák
       [not found]                                 ` <CAAxE2A6z_LLzzsLqsBtLyXcFTsLG_8FQc7=oN2p_nLJGoXbmgg-JsoAwUIsXosN+BqQ9rBEUg@public.gmane.org>
  0 siblings, 1 reply; 23+ messages in thread
From: Marek Olšák @ 2019-01-10 11:41 UTC (permalink / raw)
  To: Koenig, Christian; +Cc: amd-gfx mailing list


[-- Attachment #1.1: Type: text/plain, Size: 2465 bytes --]

On Thu, Jan 10, 2019, 4:15 AM Koenig, Christian <Christian.Koenig-5C7GfCeVMHo@public.gmane.org
wrote:

> Am 10.01.19 um 00:39 schrieb Marek Olšák:
>
> On Wed, Jan 9, 2019 at 1:41 PM Christian König <
> ckoenig.leichtzumerken-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org> wrote:
>
>> Am 09.01.19 um 17:14 schrieb Marek Olšák:
>>
>> On Wed, Jan 9, 2019 at 8:09 AM Christian König <
>> ckoenig.leichtzumerken-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org> wrote:
>>
>>> Am 09.01.19 um 13:36 schrieb Marek Olšák:
>>>
>>>
>>>
>>> On Wed, Jan 9, 2019, 5:28 AM Christian König <
>>> ckoenig.leichtzumerken-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org wrote:
>>>
>>>> Looks good, but I'm wondering what's the actual improvement?
>>>>
>>>
>>> No malloc calls and 1 less for loop copying the bo list.
>>>
>>>
>>> Yeah, but didn't we want to get completely rid of the bo list?
>>>
>>
>> If we have multiple IBs (e.g. gfx + compute) that share a BO list, I
>> think it's faster to send the BO list to the kernel only once.
>>
>>
>> That's not really faster.
>>
>> The only thing we safe us is a single loop over all BOs to lockup the
>> handle into a pointer and that is only a tiny fraction of the overhead.
>>
>> The majority of the overhead is locking the BOs and reserving space for
>> the submission.
>>
>> What could really help here is to submit gfx+comput together in just one
>> CS IOCTL. This way we would need the locking and space reservation only
>> once.
>>
>> It's a bit of work in the kernel side, but certainly doable.
>>
>
> OK. Any objections to this patch?
>
>
> In general I'm wondering if we couldn't avoid adding so much new interface.
>

There are Vulkan drivers that still use the bo_list interface.


> For example we can avoid the malloc() when we just cache the last freed
> bo_list structure in the device. We would just need an atomic pointer
> exchange operation for that.
>

> This way we even don't need to change mesa at all.
>

There is still the for loop that we need to get rid of.


> Regarding optimization, this chunk can be replaced by a cast on 64bit:
>
> +	chunk_array = alloca(sizeof(uint64_t) * num_chunks);
> +	for (i = 0; i < num_chunks; i++)
> +		chunk_array[i] = (uint64_t)(uintptr_t)&chunks[i];
>
> It can't. The input is an array of structures. The ioctl takes an array of
pointers.

Marek


> Regards,
> Christian.
>
>
> Thanks,
> Marek
>
>
>

[-- Attachment #1.2: Type: text/html, Size: 5797 bytes --]

[-- Attachment #2: Type: text/plain, Size: 154 bytes --]

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply	[flat|nested] 23+ messages in thread

* Re: [PATCH libdrm] amdgpu: add a faster BO list API
       [not found]                                 ` <CAAxE2A6z_LLzzsLqsBtLyXcFTsLG_8FQc7=oN2p_nLJGoXbmgg-JsoAwUIsXosN+BqQ9rBEUg@public.gmane.org>
@ 2019-01-10 11:51                                   ` Christian König
       [not found]                                     ` <7544c927-8b1f-c7d0-dd9d-21311ffca542-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org>
  0 siblings, 1 reply; 23+ messages in thread
From: Christian König @ 2019-01-10 11:51 UTC (permalink / raw)
  To: Marek Olšák, Koenig, Christian; +Cc: amd-gfx mailing list


[-- Attachment #1.1: Type: text/plain, Size: 3570 bytes --]

Am 10.01.19 um 12:41 schrieb Marek Olšák:
>
>
> On Thu, Jan 10, 2019, 4:15 AM Koenig, Christian 
> <Christian.Koenig-5C7GfCeVMHo@public.gmane.org <mailto:Christian.Koenig-5C7GfCeVMHo@public.gmane.org> wrote:
>
>     Am 10.01.19 um 00:39 schrieb Marek Olšák:
>>     On Wed, Jan 9, 2019 at 1:41 PM Christian König
>>     <ckoenig.leichtzumerken-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org
>>     <mailto:ckoenig.leichtzumerken-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org>> wrote:
>>
>>         Am 09.01.19 um 17:14 schrieb Marek Olšák:
>>>         On Wed, Jan 9, 2019 at 8:09 AM Christian König
>>>         <ckoenig.leichtzumerken-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org
>>>         <mailto:ckoenig.leichtzumerken-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org>> wrote:
>>>
>>>             Am 09.01.19 um 13:36 schrieb Marek Olšák:
>>>>
>>>>
>>>>             On Wed, Jan 9, 2019, 5:28 AM Christian König
>>>>             <ckoenig.leichtzumerken-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org
>>>>             <mailto:ckoenig.leichtzumerken-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org> wrote:
>>>>
>>>>                 Looks good, but I'm wondering what's the actual
>>>>                 improvement?
>>>>
>>>>
>>>>             No malloc calls and 1 less for loop copying the bo list.
>>>
>>>             Yeah, but didn't we want to get completely rid of the bo
>>>             list?
>>>
>>>
>>>         If we have multiple IBs (e.g. gfx + compute) that share a BO
>>>         list, I think it's faster to send the BO list to the kernel
>>>         only once.
>>
>>         That's not really faster.
>>
>>         The only thing we safe us is a single loop over all BOs to
>>         lockup the handle into a pointer and that is only a tiny
>>         fraction of the overhead.
>>
>>         The majority of the overhead is locking the BOs and reserving
>>         space for the submission.
>>
>>         What could really help here is to submit gfx+comput together
>>         in just one CS IOCTL. This way we would need the locking and
>>         space reservation only once.
>>
>>         It's a bit of work in the kernel side, but certainly doable.
>>
>>
>>     OK. Any objections to this patch?
>
>     In general I'm wondering if we couldn't avoid adding so much new
>     interface.
>
>
> There are Vulkan drivers that still use the bo_list interface.
>
>
>     For example we can avoid the malloc() when we just cache the last
>     freed bo_list structure in the device. We would just need an
>     atomic pointer exchange operation for that.
>
>
>     This way we even don't need to change mesa at all.
>
>
> There is still the for loop that we need to get rid of.

Yeah, but that I'm fine to handle with a amdgpu_bo_list_create_raw which 
only takes the handles and still returns the amdgpu_bo_list structure we 
are used to.

See what I'm mostly concerned about is having another CS function to 
maintain.

>
>
>     Regarding optimization, this chunk can be replaced by a cast on 64bit:
>>     +	chunk_array = alloca(sizeof(uint64_t) * num_chunks);
>>     +	for (i = 0; i < num_chunks; i++)
>>     +		chunk_array[i] = (uint64_t)(uintptr_t)&chunks[i];
>
> It can't. The input is an array of structures. The ioctl takes an 
> array of pointers.

Ah! Haven't seen this, sorry for the noise.

Christian.

>
> Marek
>
>
>     Regards,
>     Christian.
>
>>
>>     Thanks,
>>     Marek
>
>
> _______________________________________________
> amd-gfx mailing list
> amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW@public.gmane.org
> https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[-- Attachment #1.2: Type: text/html, Size: 12679 bytes --]

[-- Attachment #2: Type: text/plain, Size: 154 bytes --]

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply	[flat|nested] 23+ messages in thread

* Re: [PATCH libdrm] amdgpu: add a faster BO list API
       [not found]                                     ` <7544c927-8b1f-c7d0-dd9d-21311ffca542-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org>
@ 2019-01-10 12:25                                       ` Marek Olšák
  0 siblings, 0 replies; 23+ messages in thread
From: Marek Olšák @ 2019-01-10 12:25 UTC (permalink / raw)
  To: Christian König; +Cc: amd-gfx mailing list


[-- Attachment #1.1: Type: text/plain, Size: 3392 bytes --]

On Thu, Jan 10, 2019, 6:51 AM Christian König <
ckoenig.leichtzumerken-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org wrote:

> Am 10.01.19 um 12:41 schrieb Marek Olšák:
>
>
>
> On Thu, Jan 10, 2019, 4:15 AM Koenig, Christian <Christian.Koenig-5C7GfCeVMHo@public.gmane.org
> wrote:
>
>> Am 10.01.19 um 00:39 schrieb Marek Olšák:
>>
>> On Wed, Jan 9, 2019 at 1:41 PM Christian König <
>> ckoenig.leichtzumerken-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org> wrote:
>>
>>> Am 09.01.19 um 17:14 schrieb Marek Olšák:
>>>
>>> On Wed, Jan 9, 2019 at 8:09 AM Christian König <
>>> ckoenig.leichtzumerken-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org> wrote:
>>>
>>>> Am 09.01.19 um 13:36 schrieb Marek Olšák:
>>>>
>>>>
>>>>
>>>> On Wed, Jan 9, 2019, 5:28 AM Christian König <
>>>> ckoenig.leichtzumerken-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org wrote:
>>>>
>>>>> Looks good, but I'm wondering what's the actual improvement?
>>>>>
>>>>
>>>> No malloc calls and 1 less for loop copying the bo list.
>>>>
>>>>
>>>> Yeah, but didn't we want to get completely rid of the bo list?
>>>>
>>>
>>> If we have multiple IBs (e.g. gfx + compute) that share a BO list, I
>>> think it's faster to send the BO list to the kernel only once.
>>>
>>>
>>> That's not really faster.
>>>
>>> The only thing we safe us is a single loop over all BOs to lockup the
>>> handle into a pointer and that is only a tiny fraction of the overhead.
>>>
>>> The majority of the overhead is locking the BOs and reserving space for
>>> the submission.
>>>
>>> What could really help here is to submit gfx+comput together in just one
>>> CS IOCTL. This way we would need the locking and space reservation only
>>> once.
>>>
>>> It's a bit of work in the kernel side, but certainly doable.
>>>
>>
>> OK. Any objections to this patch?
>>
>>
>> In general I'm wondering if we couldn't avoid adding so much new
>> interface.
>>
>
> There are Vulkan drivers that still use the bo_list interface.
>
>
>> For example we can avoid the malloc() when we just cache the last freed
>> bo_list structure in the device. We would just need an atomic pointer
>> exchange operation for that.
>>
>
>> This way we even don't need to change mesa at all.
>>
>
> There is still the for loop that we need to get rid of.
>
>
> Yeah, but that I'm fine to handle with a amdgpu_bo_list_create_raw which
> only takes the handles and still returns the amdgpu_bo_list structure we
> are used to.
>
> See what I'm mostly concerned about is having another CS function to
> maintain.
>

There is no maintenance cost. It's just a wrapper. Eventually all drivers
will switch to it.

Marek


>
>
>> Regarding optimization, this chunk can be replaced by a cast on 64bit:
>>
>> +	chunk_array = alloca(sizeof(uint64_t) * num_chunks);
>> +	for (i = 0; i < num_chunks; i++)
>> +		chunk_array[i] = (uint64_t)(uintptr_t)&chunks[i];
>>
>> It can't. The input is an array of structures. The ioctl takes an array
> of pointers.
>
>
> Ah! Haven't seen this, sorry for the noise.
>
> Christian.
>
>
> Marek
>
>
>> Regards,
>> Christian.
>>
>>
>> Thanks,
>> Marek
>>
>>
>>
> _______________________________________________
> amd-gfx mailing listamd-gfx-PD4FTy7X32lNgt0PjOBp934avgP/u3fG0wdF1cv0I5s@public.gmane.org://lists.freedesktop.org/mailman/listinfo/amd-gfx
>
>
>

[-- Attachment #1.2: Type: text/html, Size: 12628 bytes --]

[-- Attachment #2: Type: text/plain, Size: 154 bytes --]

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply	[flat|nested] 23+ messages in thread

* Re: [PATCH libdrm] amdgpu: add a faster BO list API
       [not found] ` <20190107193104.4361-1-maraeo-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org>
  2019-01-08  3:16   ` Zhou, David(ChunMing)
  2019-01-09 10:28   ` Christian König
@ 2019-01-16 12:46   ` Bas Nieuwenhuizen
       [not found]     ` <CAP+8YyFD+LxEQOLOY+mDC5v3OOyh1De2DcXK0sRtMW0t7z20SQ-JsoAwUIsXosN+BqQ9rBEUg@public.gmane.org>
  2 siblings, 1 reply; 23+ messages in thread
From: Bas Nieuwenhuizen @ 2019-01-16 12:46 UTC (permalink / raw)
  To: Marek Olšák; +Cc: amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW

So random questions:

1) In this discussion it was mentioned that some Vulkan drivers still
use the bo_list interface. I think that implies radv as I think we're
still using bo_list. Is there any other API we should be using? (Also,
with VK_EXT_descriptor_indexing I suspect we'll be moving more towards
a global bo list instead of a cmd buffer one, as we cannot know all
the BOs referenced anymore, but not sure what end state here will be).

2) The other alternative mentioned was adding the buffers directly
into the submit ioctl. Is this the desired end state (though as above
I'm not sure how that works for vulkan)? If yes, what is the timeline
for this that we need something in the interim?

3) Did we measure any performance benefit?

In general I'd like to to ack the raw bo list creation function as
this interface seems easier to use. The two arrays thing has always
been kind of a pain when we want to use e.g. builtin sort functions to
make sure we have no duplicate BOs, but have some comments below.

On Mon, Jan 7, 2019 at 8:31 PM Marek Olšák <maraeo@gmail.com> wrote:
>
> From: Marek Olšák <marek.olsak@amd.com>
>
> ---
>  amdgpu/amdgpu-symbol-check |  3 ++
>  amdgpu/amdgpu.h            | 56 +++++++++++++++++++++++++++++++++++++-
>  amdgpu/amdgpu_bo.c         | 36 ++++++++++++++++++++++++
>  amdgpu/amdgpu_cs.c         | 25 +++++++++++++++++
>  4 files changed, 119 insertions(+), 1 deletion(-)
>
> diff --git a/amdgpu/amdgpu-symbol-check b/amdgpu/amdgpu-symbol-check
> index 6f5e0f95..96a44b40 100755
> --- a/amdgpu/amdgpu-symbol-check
> +++ b/amdgpu/amdgpu-symbol-check
> @@ -12,20 +12,22 @@ _edata
>  _end
>  _fini
>  _init
>  amdgpu_bo_alloc
>  amdgpu_bo_cpu_map
>  amdgpu_bo_cpu_unmap
>  amdgpu_bo_export
>  amdgpu_bo_free
>  amdgpu_bo_import
>  amdgpu_bo_inc_ref
> +amdgpu_bo_list_create_raw
> +amdgpu_bo_list_destroy_raw
>  amdgpu_bo_list_create
>  amdgpu_bo_list_destroy
>  amdgpu_bo_list_update
>  amdgpu_bo_query_info
>  amdgpu_bo_set_metadata
>  amdgpu_bo_va_op
>  amdgpu_bo_va_op_raw
>  amdgpu_bo_wait_for_idle
>  amdgpu_create_bo_from_user_mem
>  amdgpu_cs_chunk_fence_info_to_data
> @@ -40,20 +42,21 @@ amdgpu_cs_destroy_semaphore
>  amdgpu_cs_destroy_syncobj
>  amdgpu_cs_export_syncobj
>  amdgpu_cs_fence_to_handle
>  amdgpu_cs_import_syncobj
>  amdgpu_cs_query_fence_status
>  amdgpu_cs_query_reset_state
>  amdgpu_query_sw_info
>  amdgpu_cs_signal_semaphore
>  amdgpu_cs_submit
>  amdgpu_cs_submit_raw
> +amdgpu_cs_submit_raw2
>  amdgpu_cs_syncobj_export_sync_file
>  amdgpu_cs_syncobj_import_sync_file
>  amdgpu_cs_syncobj_reset
>  amdgpu_cs_syncobj_signal
>  amdgpu_cs_syncobj_wait
>  amdgpu_cs_wait_fences
>  amdgpu_cs_wait_semaphore
>  amdgpu_device_deinitialize
>  amdgpu_device_initialize
>  amdgpu_find_bo_by_cpu_mapping
> diff --git a/amdgpu/amdgpu.h b/amdgpu/amdgpu.h
> index dc51659a..5b800033 100644
> --- a/amdgpu/amdgpu.h
> +++ b/amdgpu/amdgpu.h
> @@ -35,20 +35,21 @@
>  #define _AMDGPU_H_
>
>  #include <stdint.h>
>  #include <stdbool.h>
>
>  #ifdef __cplusplus
>  extern "C" {
>  #endif
>
>  struct drm_amdgpu_info_hw_ip;
> +struct drm_amdgpu_bo_list_entry;
>
>  /*--------------------------------------------------------------------------*/
>  /* --------------------------- Defines ------------------------------------ */
>  /*--------------------------------------------------------------------------*/
>
>  /**
>   * Define max. number of Command Buffers (IB) which could be sent to the single
>   * hardware IP to accommodate CE/DE requirements
>   *
>   * \sa amdgpu_cs_ib_info
> @@ -767,34 +768,65 @@ int amdgpu_bo_cpu_unmap(amdgpu_bo_handle buf_handle);
>   *                            and no GPU access is scheduled.
>   *                          1 GPU access is in fly or scheduled
>   *
>   * \return   0 - on success
>   *          <0 - Negative POSIX Error code
>   */
>  int amdgpu_bo_wait_for_idle(amdgpu_bo_handle buf_handle,
>                             uint64_t timeout_ns,
>                             bool *buffer_busy);
>
> +/**
> + * Creates a BO list handle for command submission.
> + *
> + * \param   dev                        - \c [in] Device handle.
> + *                                See #amdgpu_device_initialize()
> + * \param   number_of_buffers  - \c [in] Number of BOs in the list
> + * \param   buffers            - \c [in] List of BO handles
> + * \param   result             - \c [out] Created BO list handle
> + *
> + * \return   0 on success\n
> + *          <0 - Negative POSIX Error code
> + *
> + * \sa amdgpu_bo_list_destroy_raw()
> +*/
> +int amdgpu_bo_list_create_raw(amdgpu_device_handle dev,
> +                             uint32_t number_of_buffers,
> +                             struct drm_amdgpu_bo_list_entry *buffers,
> +                             uint32_t *result);

So AFAIU  drm_amdgpu_bo_list_entry takes a raw bo handle while we
never get a raw bo handle from libdrm_amdgpu. How are we supposed to
fill it in?

What do we win by having the raw handle for the bo_list? If we would
not return the raw handle we would not need the submit_raw2.

> +
> +/**
> + * Destroys a BO list handle.
> + *
> + * \param   bo_list    - \c [in] BO list handle.
> + *
> + * \return   0 on success\n
> + *          <0 - Negative POSIX Error code
> + *
> + * \sa amdgpu_bo_list_create_raw(), amdgpu_cs_submit_raw2()
> +*/
> +int amdgpu_bo_list_destroy_raw(amdgpu_device_handle dev, uint32_t bo_list);
> +
>  /**
>   * Creates a BO list handle for command submission.
>   *
>   * \param   dev                        - \c [in] Device handle.
>   *                                See #amdgpu_device_initialize()
>   * \param   number_of_resources        - \c [in] Number of BOs in the list
>   * \param   resources          - \c [in] List of BO handles
>   * \param   resource_prios     - \c [in] Optional priority for each handle
>   * \param   result             - \c [out] Created BO list handle
>   *
>   * \return   0 on success\n
>   *          <0 - Negative POSIX Error code
>   *
> - * \sa amdgpu_bo_list_destroy()
> + * \sa amdgpu_bo_list_destroy(), amdgpu_cs_submit_raw2()
>  */
>  int amdgpu_bo_list_create(amdgpu_device_handle dev,
>                           uint32_t number_of_resources,
>                           amdgpu_bo_handle *resources,
>                           uint8_t *resource_prios,
>                           amdgpu_bo_list_handle *result);
>
>  /**
>   * Destroys a BO list handle.
>   *
> @@ -1580,20 +1612,42 @@ struct drm_amdgpu_cs_chunk;
>  struct drm_amdgpu_cs_chunk_dep;
>  struct drm_amdgpu_cs_chunk_data;
>
>  int amdgpu_cs_submit_raw(amdgpu_device_handle dev,
>                          amdgpu_context_handle context,
>                          amdgpu_bo_list_handle bo_list_handle,
>                          int num_chunks,
>                          struct drm_amdgpu_cs_chunk *chunks,
>                          uint64_t *seq_no);
>
> +/**
> + * Submit raw command submission to the kernel with a raw BO list handle.
> + *
> + * \param   dev               - \c [in] device handle
> + * \param   context    - \c [in] context handle for context id
> + * \param   bo_list_handle - \c [in] raw bo list handle (0 for none)
> + * \param   num_chunks - \c [in] number of CS chunks to submit
> + * \param   chunks     - \c [in] array of CS chunks
> + * \param   seq_no     - \c [out] output sequence number for submission.
> + *
> + * \return   0 on success\n
> + *          <0 - Negative POSIX Error code
> + *
> + * \sa amdgpu_bo_list_create_raw(), amdgpu_bo_list_destroy_raw()
> + */
> +int amdgpu_cs_submit_raw2(amdgpu_device_handle dev,
> +                         amdgpu_context_handle context,
> +                         uint32_t bo_list_handle,
> +                         int num_chunks,
> +                         struct drm_amdgpu_cs_chunk *chunks,
> +                         uint64_t *seq_no);
> +
>  void amdgpu_cs_chunk_fence_to_dep(struct amdgpu_cs_fence *fence,
>                                   struct drm_amdgpu_cs_chunk_dep *dep);
>  void amdgpu_cs_chunk_fence_info_to_data(struct amdgpu_cs_fence_info *fence_info,
>                                         struct drm_amdgpu_cs_chunk_data *data);
>
>  /**
>   * Reserve VMID
>   * \param   context - \c [in]  GPU Context
>   * \param   flags - \c [in]  TBD
>   *
> diff --git a/amdgpu/amdgpu_bo.c b/amdgpu/amdgpu_bo.c
> index c0f42e81..21bc73aa 100644
> --- a/amdgpu/amdgpu_bo.c
> +++ b/amdgpu/amdgpu_bo.c
> @@ -611,20 +611,56 @@ drm_public int amdgpu_create_bo_from_user_mem(amdgpu_device_handle dev,
>         pthread_mutex_lock(&dev->bo_table_mutex);
>         r = handle_table_insert(&dev->bo_handles, (*buf_handle)->handle,
>                                 *buf_handle);
>         pthread_mutex_unlock(&dev->bo_table_mutex);
>         if (r)
>                 amdgpu_bo_free(*buf_handle);
>  out:
>         return r;
>  }
>
> +drm_public int amdgpu_bo_list_create_raw(amdgpu_device_handle dev,
> +                                        uint32_t number_of_buffers,
> +                                        struct drm_amdgpu_bo_list_entry *buffers,
> +                                        uint32_t *result)
> +{
> +       union drm_amdgpu_bo_list args;
> +       int r;
> +
> +       memset(&args, 0, sizeof(args));
> +       args.in.operation = AMDGPU_BO_LIST_OP_CREATE;
> +       args.in.bo_number = number_of_buffers;
> +       args.in.bo_info_size = sizeof(struct drm_amdgpu_bo_list_entry);
> +       args.in.bo_info_ptr = (uint64_t)(uintptr_t)buffers;
> +
> +       r = drmCommandWriteRead(dev->fd, DRM_AMDGPU_BO_LIST,
> +                               &args, sizeof(args));
> +       if (r)
> +               return r;
> +
> +       *result = args.out.list_handle;
> +       return 0;
> +}
> +
> +drm_public int amdgpu_bo_list_destroy_raw(amdgpu_device_handle dev,
> +                                         uint32_t bo_list)
> +{
> +       union drm_amdgpu_bo_list args;
> +
> +       memset(&args, 0, sizeof(args));
> +       args.in.operation = AMDGPU_BO_LIST_OP_DESTROY;
> +       args.in.list_handle = bo_list;
> +
> +       return drmCommandWriteRead(dev->fd, DRM_AMDGPU_BO_LIST,
> +                                  &args, sizeof(args));
> +}
> +
>  drm_public int amdgpu_bo_list_create(amdgpu_device_handle dev,
>                                      uint32_t number_of_resources,
>                                      amdgpu_bo_handle *resources,
>                                      uint8_t *resource_prios,
>                                      amdgpu_bo_list_handle *result)
>  {
>         struct drm_amdgpu_bo_list_entry *list;
>         union drm_amdgpu_bo_list args;
>         unsigned i;
>         int r;
> diff --git a/amdgpu/amdgpu_cs.c b/amdgpu/amdgpu_cs.c
> index 3b8231aa..5bedf748 100644
> --- a/amdgpu/amdgpu_cs.c
> +++ b/amdgpu/amdgpu_cs.c
> @@ -724,20 +724,45 @@ drm_public int amdgpu_cs_submit_raw(amdgpu_device_handle dev,
>         r = drmCommandWriteRead(dev->fd, DRM_AMDGPU_CS,
>                                 &cs, sizeof(cs));
>         if (r)
>                 return r;
>
>         if (seq_no)
>                 *seq_no = cs.out.handle;
>         return 0;
>  }
>
> +drm_public int amdgpu_cs_submit_raw2(amdgpu_device_handle dev,
> +                                    amdgpu_context_handle context,
> +                                    uint32_t bo_list_handle,
> +                                    int num_chunks,
> +                                    struct drm_amdgpu_cs_chunk *chunks,
> +                                    uint64_t *seq_no)
> +{
> +       union drm_amdgpu_cs cs = {0};
> +       uint64_t *chunk_array;
> +       int i, r;
> +
> +       chunk_array = alloca(sizeof(uint64_t) * num_chunks);
> +       for (i = 0; i < num_chunks; i++)
> +               chunk_array[i] = (uint64_t)(uintptr_t)&chunks[i];
> +       cs.in.chunks = (uint64_t)(uintptr_t)chunk_array;
> +       cs.in.ctx_id = context->id;
> +       cs.in.bo_list_handle = bo_list_handle;
> +       cs.in.num_chunks = num_chunks;
> +       r = drmCommandWriteRead(dev->fd, DRM_AMDGPU_CS,
> +                               &cs, sizeof(cs));
> +       if (!r && seq_no)
> +               *seq_no = cs.out.handle;
> +       return r;
> +}
> +
>  drm_public void amdgpu_cs_chunk_fence_info_to_data(struct amdgpu_cs_fence_info *fence_info,
>                                         struct drm_amdgpu_cs_chunk_data *data)
>  {
>         data->fence_data.handle = fence_info->handle->handle;
>         data->fence_data.offset = fence_info->offset * sizeof(uint64_t);
>  }
>
>  drm_public void amdgpu_cs_chunk_fence_to_dep(struct amdgpu_cs_fence *fence,
>                                         struct drm_amdgpu_cs_chunk_dep *dep)
>  {
> --
> 2.17.1
>
> _______________________________________________
> amd-gfx mailing list
> amd-gfx@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/amd-gfx
_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply	[flat|nested] 23+ messages in thread

* Re: [PATCH libdrm] amdgpu: add a faster BO list API
       [not found]     ` <CAP+8YyFD+LxEQOLOY+mDC5v3OOyh1De2DcXK0sRtMW0t7z20SQ-JsoAwUIsXosN+BqQ9rBEUg@public.gmane.org>
@ 2019-01-16 12:55       ` Christian König
       [not found]         ` <74054b1e-5211-3bfc-ab0f-27e8604759d1-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org>
  2019-01-16 14:37       ` Marek Olšák
  1 sibling, 1 reply; 23+ messages in thread
From: Christian König @ 2019-01-16 12:55 UTC (permalink / raw)
  To: Bas Nieuwenhuizen, Marek Olšák
  Cc: amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW

Well if you ask me we should have the following interface for 
negotiating memory management with the kernel:

1. We have per process BOs which can't be shared between processes.

Those are always valid and don't need to be mentioned in any BO list 
whatsoever.

If we knew that a per process BO is currently not in use we can 
optionally tell that to the kernel to make memory management more efficient.

In other words instead of a list of stuff which is used we send down to 
the kernel a list of stuff which is not used any more and that only when 
we know that it is necessary, e.g. when a game or application overcommits.

2. We have shared BOs which are used by more than one process.

Those are rare and should be added to the per CS list of BOs in use.


The whole BO list interface Marek tries to optimize here should be 
deprecated and not used any more.

Regards,
Christian.

Am 16.01.19 um 13:46 schrieb Bas Nieuwenhuizen:
> So random questions:
>
> 1) In this discussion it was mentioned that some Vulkan drivers still
> use the bo_list interface. I think that implies radv as I think we're
> still using bo_list. Is there any other API we should be using? (Also,
> with VK_EXT_descriptor_indexing I suspect we'll be moving more towards
> a global bo list instead of a cmd buffer one, as we cannot know all
> the BOs referenced anymore, but not sure what end state here will be).
>
> 2) The other alternative mentioned was adding the buffers directly
> into the submit ioctl. Is this the desired end state (though as above
> I'm not sure how that works for vulkan)? If yes, what is the timeline
> for this that we need something in the interim?
>
> 3) Did we measure any performance benefit?
>
> In general I'd like to to ack the raw bo list creation function as
> this interface seems easier to use. The two arrays thing has always
> been kind of a pain when we want to use e.g. builtin sort functions to
> make sure we have no duplicate BOs, but have some comments below.
>
> On Mon, Jan 7, 2019 at 8:31 PM Marek Olšák <maraeo@gmail.com> wrote:
>> From: Marek Olšák <marek.olsak@amd.com>
>>
>> ---
>>   amdgpu/amdgpu-symbol-check |  3 ++
>>   amdgpu/amdgpu.h            | 56 +++++++++++++++++++++++++++++++++++++-
>>   amdgpu/amdgpu_bo.c         | 36 ++++++++++++++++++++++++
>>   amdgpu/amdgpu_cs.c         | 25 +++++++++++++++++
>>   4 files changed, 119 insertions(+), 1 deletion(-)
>>
>> diff --git a/amdgpu/amdgpu-symbol-check b/amdgpu/amdgpu-symbol-check
>> index 6f5e0f95..96a44b40 100755
>> --- a/amdgpu/amdgpu-symbol-check
>> +++ b/amdgpu/amdgpu-symbol-check
>> @@ -12,20 +12,22 @@ _edata
>>   _end
>>   _fini
>>   _init
>>   amdgpu_bo_alloc
>>   amdgpu_bo_cpu_map
>>   amdgpu_bo_cpu_unmap
>>   amdgpu_bo_export
>>   amdgpu_bo_free
>>   amdgpu_bo_import
>>   amdgpu_bo_inc_ref
>> +amdgpu_bo_list_create_raw
>> +amdgpu_bo_list_destroy_raw
>>   amdgpu_bo_list_create
>>   amdgpu_bo_list_destroy
>>   amdgpu_bo_list_update
>>   amdgpu_bo_query_info
>>   amdgpu_bo_set_metadata
>>   amdgpu_bo_va_op
>>   amdgpu_bo_va_op_raw
>>   amdgpu_bo_wait_for_idle
>>   amdgpu_create_bo_from_user_mem
>>   amdgpu_cs_chunk_fence_info_to_data
>> @@ -40,20 +42,21 @@ amdgpu_cs_destroy_semaphore
>>   amdgpu_cs_destroy_syncobj
>>   amdgpu_cs_export_syncobj
>>   amdgpu_cs_fence_to_handle
>>   amdgpu_cs_import_syncobj
>>   amdgpu_cs_query_fence_status
>>   amdgpu_cs_query_reset_state
>>   amdgpu_query_sw_info
>>   amdgpu_cs_signal_semaphore
>>   amdgpu_cs_submit
>>   amdgpu_cs_submit_raw
>> +amdgpu_cs_submit_raw2
>>   amdgpu_cs_syncobj_export_sync_file
>>   amdgpu_cs_syncobj_import_sync_file
>>   amdgpu_cs_syncobj_reset
>>   amdgpu_cs_syncobj_signal
>>   amdgpu_cs_syncobj_wait
>>   amdgpu_cs_wait_fences
>>   amdgpu_cs_wait_semaphore
>>   amdgpu_device_deinitialize
>>   amdgpu_device_initialize
>>   amdgpu_find_bo_by_cpu_mapping
>> diff --git a/amdgpu/amdgpu.h b/amdgpu/amdgpu.h
>> index dc51659a..5b800033 100644
>> --- a/amdgpu/amdgpu.h
>> +++ b/amdgpu/amdgpu.h
>> @@ -35,20 +35,21 @@
>>   #define _AMDGPU_H_
>>
>>   #include <stdint.h>
>>   #include <stdbool.h>
>>
>>   #ifdef __cplusplus
>>   extern "C" {
>>   #endif
>>
>>   struct drm_amdgpu_info_hw_ip;
>> +struct drm_amdgpu_bo_list_entry;
>>
>>   /*--------------------------------------------------------------------------*/
>>   /* --------------------------- Defines ------------------------------------ */
>>   /*--------------------------------------------------------------------------*/
>>
>>   /**
>>    * Define max. number of Command Buffers (IB) which could be sent to the single
>>    * hardware IP to accommodate CE/DE requirements
>>    *
>>    * \sa amdgpu_cs_ib_info
>> @@ -767,34 +768,65 @@ int amdgpu_bo_cpu_unmap(amdgpu_bo_handle buf_handle);
>>    *                            and no GPU access is scheduled.
>>    *                          1 GPU access is in fly or scheduled
>>    *
>>    * \return   0 - on success
>>    *          <0 - Negative POSIX Error code
>>    */
>>   int amdgpu_bo_wait_for_idle(amdgpu_bo_handle buf_handle,
>>                              uint64_t timeout_ns,
>>                              bool *buffer_busy);
>>
>> +/**
>> + * Creates a BO list handle for command submission.
>> + *
>> + * \param   dev                        - \c [in] Device handle.
>> + *                                See #amdgpu_device_initialize()
>> + * \param   number_of_buffers  - \c [in] Number of BOs in the list
>> + * \param   buffers            - \c [in] List of BO handles
>> + * \param   result             - \c [out] Created BO list handle
>> + *
>> + * \return   0 on success\n
>> + *          <0 - Negative POSIX Error code
>> + *
>> + * \sa amdgpu_bo_list_destroy_raw()
>> +*/
>> +int amdgpu_bo_list_create_raw(amdgpu_device_handle dev,
>> +                             uint32_t number_of_buffers,
>> +                             struct drm_amdgpu_bo_list_entry *buffers,
>> +                             uint32_t *result);
> So AFAIU  drm_amdgpu_bo_list_entry takes a raw bo handle while we
> never get a raw bo handle from libdrm_amdgpu. How are we supposed to
> fill it in?
>
> What do we win by having the raw handle for the bo_list? If we would
> not return the raw handle we would not need the submit_raw2.
>
>> +
>> +/**
>> + * Destroys a BO list handle.
>> + *
>> + * \param   bo_list    - \c [in] BO list handle.
>> + *
>> + * \return   0 on success\n
>> + *          <0 - Negative POSIX Error code
>> + *
>> + * \sa amdgpu_bo_list_create_raw(), amdgpu_cs_submit_raw2()
>> +*/
>> +int amdgpu_bo_list_destroy_raw(amdgpu_device_handle dev, uint32_t bo_list);
>> +
>>   /**
>>    * Creates a BO list handle for command submission.
>>    *
>>    * \param   dev                        - \c [in] Device handle.
>>    *                                See #amdgpu_device_initialize()
>>    * \param   number_of_resources        - \c [in] Number of BOs in the list
>>    * \param   resources          - \c [in] List of BO handles
>>    * \param   resource_prios     - \c [in] Optional priority for each handle
>>    * \param   result             - \c [out] Created BO list handle
>>    *
>>    * \return   0 on success\n
>>    *          <0 - Negative POSIX Error code
>>    *
>> - * \sa amdgpu_bo_list_destroy()
>> + * \sa amdgpu_bo_list_destroy(), amdgpu_cs_submit_raw2()
>>   */
>>   int amdgpu_bo_list_create(amdgpu_device_handle dev,
>>                            uint32_t number_of_resources,
>>                            amdgpu_bo_handle *resources,
>>                            uint8_t *resource_prios,
>>                            amdgpu_bo_list_handle *result);
>>
>>   /**
>>    * Destroys a BO list handle.
>>    *
>> @@ -1580,20 +1612,42 @@ struct drm_amdgpu_cs_chunk;
>>   struct drm_amdgpu_cs_chunk_dep;
>>   struct drm_amdgpu_cs_chunk_data;
>>
>>   int amdgpu_cs_submit_raw(amdgpu_device_handle dev,
>>                           amdgpu_context_handle context,
>>                           amdgpu_bo_list_handle bo_list_handle,
>>                           int num_chunks,
>>                           struct drm_amdgpu_cs_chunk *chunks,
>>                           uint64_t *seq_no);
>>
>> +/**
>> + * Submit raw command submission to the kernel with a raw BO list handle.
>> + *
>> + * \param   dev               - \c [in] device handle
>> + * \param   context    - \c [in] context handle for context id
>> + * \param   bo_list_handle - \c [in] raw bo list handle (0 for none)
>> + * \param   num_chunks - \c [in] number of CS chunks to submit
>> + * \param   chunks     - \c [in] array of CS chunks
>> + * \param   seq_no     - \c [out] output sequence number for submission.
>> + *
>> + * \return   0 on success\n
>> + *          <0 - Negative POSIX Error code
>> + *
>> + * \sa amdgpu_bo_list_create_raw(), amdgpu_bo_list_destroy_raw()
>> + */
>> +int amdgpu_cs_submit_raw2(amdgpu_device_handle dev,
>> +                         amdgpu_context_handle context,
>> +                         uint32_t bo_list_handle,
>> +                         int num_chunks,
>> +                         struct drm_amdgpu_cs_chunk *chunks,
>> +                         uint64_t *seq_no);
>> +
>>   void amdgpu_cs_chunk_fence_to_dep(struct amdgpu_cs_fence *fence,
>>                                    struct drm_amdgpu_cs_chunk_dep *dep);
>>   void amdgpu_cs_chunk_fence_info_to_data(struct amdgpu_cs_fence_info *fence_info,
>>                                          struct drm_amdgpu_cs_chunk_data *data);
>>
>>   /**
>>    * Reserve VMID
>>    * \param   context - \c [in]  GPU Context
>>    * \param   flags - \c [in]  TBD
>>    *
>> diff --git a/amdgpu/amdgpu_bo.c b/amdgpu/amdgpu_bo.c
>> index c0f42e81..21bc73aa 100644
>> --- a/amdgpu/amdgpu_bo.c
>> +++ b/amdgpu/amdgpu_bo.c
>> @@ -611,20 +611,56 @@ drm_public int amdgpu_create_bo_from_user_mem(amdgpu_device_handle dev,
>>          pthread_mutex_lock(&dev->bo_table_mutex);
>>          r = handle_table_insert(&dev->bo_handles, (*buf_handle)->handle,
>>                                  *buf_handle);
>>          pthread_mutex_unlock(&dev->bo_table_mutex);
>>          if (r)
>>                  amdgpu_bo_free(*buf_handle);
>>   out:
>>          return r;
>>   }
>>
>> +drm_public int amdgpu_bo_list_create_raw(amdgpu_device_handle dev,
>> +                                        uint32_t number_of_buffers,
>> +                                        struct drm_amdgpu_bo_list_entry *buffers,
>> +                                        uint32_t *result)
>> +{
>> +       union drm_amdgpu_bo_list args;
>> +       int r;
>> +
>> +       memset(&args, 0, sizeof(args));
>> +       args.in.operation = AMDGPU_BO_LIST_OP_CREATE;
>> +       args.in.bo_number = number_of_buffers;
>> +       args.in.bo_info_size = sizeof(struct drm_amdgpu_bo_list_entry);
>> +       args.in.bo_info_ptr = (uint64_t)(uintptr_t)buffers;
>> +
>> +       r = drmCommandWriteRead(dev->fd, DRM_AMDGPU_BO_LIST,
>> +                               &args, sizeof(args));
>> +       if (r)
>> +               return r;
>> +
>> +       *result = args.out.list_handle;
>> +       return 0;
>> +}
>> +
>> +drm_public int amdgpu_bo_list_destroy_raw(amdgpu_device_handle dev,
>> +                                         uint32_t bo_list)
>> +{
>> +       union drm_amdgpu_bo_list args;
>> +
>> +       memset(&args, 0, sizeof(args));
>> +       args.in.operation = AMDGPU_BO_LIST_OP_DESTROY;
>> +       args.in.list_handle = bo_list;
>> +
>> +       return drmCommandWriteRead(dev->fd, DRM_AMDGPU_BO_LIST,
>> +                                  &args, sizeof(args));
>> +}
>> +
>>   drm_public int amdgpu_bo_list_create(amdgpu_device_handle dev,
>>                                       uint32_t number_of_resources,
>>                                       amdgpu_bo_handle *resources,
>>                                       uint8_t *resource_prios,
>>                                       amdgpu_bo_list_handle *result)
>>   {
>>          struct drm_amdgpu_bo_list_entry *list;
>>          union drm_amdgpu_bo_list args;
>>          unsigned i;
>>          int r;
>> diff --git a/amdgpu/amdgpu_cs.c b/amdgpu/amdgpu_cs.c
>> index 3b8231aa..5bedf748 100644
>> --- a/amdgpu/amdgpu_cs.c
>> +++ b/amdgpu/amdgpu_cs.c
>> @@ -724,20 +724,45 @@ drm_public int amdgpu_cs_submit_raw(amdgpu_device_handle dev,
>>          r = drmCommandWriteRead(dev->fd, DRM_AMDGPU_CS,
>>                                  &cs, sizeof(cs));
>>          if (r)
>>                  return r;
>>
>>          if (seq_no)
>>                  *seq_no = cs.out.handle;
>>          return 0;
>>   }
>>
>> +drm_public int amdgpu_cs_submit_raw2(amdgpu_device_handle dev,
>> +                                    amdgpu_context_handle context,
>> +                                    uint32_t bo_list_handle,
>> +                                    int num_chunks,
>> +                                    struct drm_amdgpu_cs_chunk *chunks,
>> +                                    uint64_t *seq_no)
>> +{
>> +       union drm_amdgpu_cs cs = {0};
>> +       uint64_t *chunk_array;
>> +       int i, r;
>> +
>> +       chunk_array = alloca(sizeof(uint64_t) * num_chunks);
>> +       for (i = 0; i < num_chunks; i++)
>> +               chunk_array[i] = (uint64_t)(uintptr_t)&chunks[i];
>> +       cs.in.chunks = (uint64_t)(uintptr_t)chunk_array;
>> +       cs.in.ctx_id = context->id;
>> +       cs.in.bo_list_handle = bo_list_handle;
>> +       cs.in.num_chunks = num_chunks;
>> +       r = drmCommandWriteRead(dev->fd, DRM_AMDGPU_CS,
>> +                               &cs, sizeof(cs));
>> +       if (!r && seq_no)
>> +               *seq_no = cs.out.handle;
>> +       return r;
>> +}
>> +
>>   drm_public void amdgpu_cs_chunk_fence_info_to_data(struct amdgpu_cs_fence_info *fence_info,
>>                                          struct drm_amdgpu_cs_chunk_data *data)
>>   {
>>          data->fence_data.handle = fence_info->handle->handle;
>>          data->fence_data.offset = fence_info->offset * sizeof(uint64_t);
>>   }
>>
>>   drm_public void amdgpu_cs_chunk_fence_to_dep(struct amdgpu_cs_fence *fence,
>>                                          struct drm_amdgpu_cs_chunk_dep *dep)
>>   {
>> --
>> 2.17.1
>>
>> _______________________________________________
>> amd-gfx mailing list
>> amd-gfx@lists.freedesktop.org
>> https://lists.freedesktop.org/mailman/listinfo/amd-gfx
> _______________________________________________
> amd-gfx mailing list
> amd-gfx@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/amd-gfx

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply	[flat|nested] 23+ messages in thread

* Re: [PATCH libdrm] amdgpu: add a faster BO list API
       [not found]         ` <74054b1e-5211-3bfc-ab0f-27e8604759d1-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org>
@ 2019-01-16 14:31           ` Marek Olšák
       [not found]             ` <CAAxE2A5ywFkNMtPbesU_kuSwKCmsPJ0D8wRFuSp14mpORcwYhg-JsoAwUIsXosN+BqQ9rBEUg@public.gmane.org>
  0 siblings, 1 reply; 23+ messages in thread
From: Marek Olšák @ 2019-01-16 14:31 UTC (permalink / raw)
  To: Christian König; +Cc: amd-gfx mailing list, Bas Nieuwenhuizen


[-- Attachment #1.1: Type: text/plain, Size: 15990 bytes --]

On Wed, Jan 16, 2019, 7:55 AM Christian König <
ckoenig.leichtzumerken-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org wrote:

> Well if you ask me we should have the following interface for
> negotiating memory management with the kernel:
>
> 1. We have per process BOs which can't be shared between processes.
>
> Those are always valid and don't need to be mentioned in any BO list
> whatsoever.
>
> If we knew that a per process BO is currently not in use we can
> optionally tell that to the kernel to make memory management more
> efficient.
>
> In other words instead of a list of stuff which is used we send down to
> the kernel a list of stuff which is not used any more and that only when
> we know that it is necessary, e.g. when a game or application overcommits.
>

Radeonsi doesn't use this because this approach caused performance
degradation and also drops BO priorities.

Marek


> 2. We have shared BOs which are used by more than one process.
>
> Those are rare and should be added to the per CS list of BOs in use.
>
>
> The whole BO list interface Marek tries to optimize here should be
> deprecated and not used any more.
>
> Regards,
> Christian.
>
> Am 16.01.19 um 13:46 schrieb Bas Nieuwenhuizen:
> > So random questions:
> >
> > 1) In this discussion it was mentioned that some Vulkan drivers still
> > use the bo_list interface. I think that implies radv as I think we're
> > still using bo_list. Is there any other API we should be using? (Also,
> > with VK_EXT_descriptor_indexing I suspect we'll be moving more towards
> > a global bo list instead of a cmd buffer one, as we cannot know all
> > the BOs referenced anymore, but not sure what end state here will be).
> >
> > 2) The other alternative mentioned was adding the buffers directly
> > into the submit ioctl. Is this the desired end state (though as above
> > I'm not sure how that works for vulkan)? If yes, what is the timeline
> > for this that we need something in the interim?
> >
> > 3) Did we measure any performance benefit?
> >
> > In general I'd like to to ack the raw bo list creation function as
> > this interface seems easier to use. The two arrays thing has always
> > been kind of a pain when we want to use e.g. builtin sort functions to
> > make sure we have no duplicate BOs, but have some comments below.
> >
> > On Mon, Jan 7, 2019 at 8:31 PM Marek Olšák <maraeo-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org> wrote:
> >> From: Marek Olšák <marek.olsak-5C7GfCeVMHo@public.gmane.org>
> >>
> >> ---
> >>   amdgpu/amdgpu-symbol-check |  3 ++
> >>   amdgpu/amdgpu.h            | 56 +++++++++++++++++++++++++++++++++++++-
> >>   amdgpu/amdgpu_bo.c         | 36 ++++++++++++++++++++++++
> >>   amdgpu/amdgpu_cs.c         | 25 +++++++++++++++++
> >>   4 files changed, 119 insertions(+), 1 deletion(-)
> >>
> >> diff --git a/amdgpu/amdgpu-symbol-check b/amdgpu/amdgpu-symbol-check
> >> index 6f5e0f95..96a44b40 100755
> >> --- a/amdgpu/amdgpu-symbol-check
> >> +++ b/amdgpu/amdgpu-symbol-check
> >> @@ -12,20 +12,22 @@ _edata
> >>   _end
> >>   _fini
> >>   _init
> >>   amdgpu_bo_alloc
> >>   amdgpu_bo_cpu_map
> >>   amdgpu_bo_cpu_unmap
> >>   amdgpu_bo_export
> >>   amdgpu_bo_free
> >>   amdgpu_bo_import
> >>   amdgpu_bo_inc_ref
> >> +amdgpu_bo_list_create_raw
> >> +amdgpu_bo_list_destroy_raw
> >>   amdgpu_bo_list_create
> >>   amdgpu_bo_list_destroy
> >>   amdgpu_bo_list_update
> >>   amdgpu_bo_query_info
> >>   amdgpu_bo_set_metadata
> >>   amdgpu_bo_va_op
> >>   amdgpu_bo_va_op_raw
> >>   amdgpu_bo_wait_for_idle
> >>   amdgpu_create_bo_from_user_mem
> >>   amdgpu_cs_chunk_fence_info_to_data
> >> @@ -40,20 +42,21 @@ amdgpu_cs_destroy_semaphore
> >>   amdgpu_cs_destroy_syncobj
> >>   amdgpu_cs_export_syncobj
> >>   amdgpu_cs_fence_to_handle
> >>   amdgpu_cs_import_syncobj
> >>   amdgpu_cs_query_fence_status
> >>   amdgpu_cs_query_reset_state
> >>   amdgpu_query_sw_info
> >>   amdgpu_cs_signal_semaphore
> >>   amdgpu_cs_submit
> >>   amdgpu_cs_submit_raw
> >> +amdgpu_cs_submit_raw2
> >>   amdgpu_cs_syncobj_export_sync_file
> >>   amdgpu_cs_syncobj_import_sync_file
> >>   amdgpu_cs_syncobj_reset
> >>   amdgpu_cs_syncobj_signal
> >>   amdgpu_cs_syncobj_wait
> >>   amdgpu_cs_wait_fences
> >>   amdgpu_cs_wait_semaphore
> >>   amdgpu_device_deinitialize
> >>   amdgpu_device_initialize
> >>   amdgpu_find_bo_by_cpu_mapping
> >> diff --git a/amdgpu/amdgpu.h b/amdgpu/amdgpu.h
> >> index dc51659a..5b800033 100644
> >> --- a/amdgpu/amdgpu.h
> >> +++ b/amdgpu/amdgpu.h
> >> @@ -35,20 +35,21 @@
> >>   #define _AMDGPU_H_
> >>
> >>   #include <stdint.h>
> >>   #include <stdbool.h>
> >>
> >>   #ifdef __cplusplus
> >>   extern "C" {
> >>   #endif
> >>
> >>   struct drm_amdgpu_info_hw_ip;
> >> +struct drm_amdgpu_bo_list_entry;
> >>
> >>
>  /*--------------------------------------------------------------------------*/
> >>   /* --------------------------- Defines
> ------------------------------------ */
> >>
>  /*--------------------------------------------------------------------------*/
> >>
> >>   /**
> >>    * Define max. number of Command Buffers (IB) which could be sent to
> the single
> >>    * hardware IP to accommodate CE/DE requirements
> >>    *
> >>    * \sa amdgpu_cs_ib_info
> >> @@ -767,34 +768,65 @@ int amdgpu_bo_cpu_unmap(amdgpu_bo_handle
> buf_handle);
> >>    *                            and no GPU access is scheduled.
> >>    *                          1 GPU access is in fly or scheduled
> >>    *
> >>    * \return   0 - on success
> >>    *          <0 - Negative POSIX Error code
> >>    */
> >>   int amdgpu_bo_wait_for_idle(amdgpu_bo_handle buf_handle,
> >>                              uint64_t timeout_ns,
> >>                              bool *buffer_busy);
> >>
> >> +/**
> >> + * Creates a BO list handle for command submission.
> >> + *
> >> + * \param   dev                        - \c [in] Device handle.
> >> + *                                See #amdgpu_device_initialize()
> >> + * \param   number_of_buffers  - \c [in] Number of BOs in the list
> >> + * \param   buffers            - \c [in] List of BO handles
> >> + * \param   result             - \c [out] Created BO list handle
> >> + *
> >> + * \return   0 on success\n
> >> + *          <0 - Negative POSIX Error code
> >> + *
> >> + * \sa amdgpu_bo_list_destroy_raw()
> >> +*/
> >> +int amdgpu_bo_list_create_raw(amdgpu_device_handle dev,
> >> +                             uint32_t number_of_buffers,
> >> +                             struct drm_amdgpu_bo_list_entry *buffers,
> >> +                             uint32_t *result);
> > So AFAIU  drm_amdgpu_bo_list_entry takes a raw bo handle while we
> > never get a raw bo handle from libdrm_amdgpu. How are we supposed to
> > fill it in?
> >
> > What do we win by having the raw handle for the bo_list? If we would
> > not return the raw handle we would not need the submit_raw2.
> >
> >> +
> >> +/**
> >> + * Destroys a BO list handle.
> >> + *
> >> + * \param   bo_list    - \c [in] BO list handle.
> >> + *
> >> + * \return   0 on success\n
> >> + *          <0 - Negative POSIX Error code
> >> + *
> >> + * \sa amdgpu_bo_list_create_raw(), amdgpu_cs_submit_raw2()
> >> +*/
> >> +int amdgpu_bo_list_destroy_raw(amdgpu_device_handle dev, uint32_t
> bo_list);
> >> +
> >>   /**
> >>    * Creates a BO list handle for command submission.
> >>    *
> >>    * \param   dev                        - \c [in] Device handle.
> >>    *                                See #amdgpu_device_initialize()
> >>    * \param   number_of_resources        - \c [in] Number of BOs in the
> list
> >>    * \param   resources          - \c [in] List of BO handles
> >>    * \param   resource_prios     - \c [in] Optional priority for each
> handle
> >>    * \param   result             - \c [out] Created BO list handle
> >>    *
> >>    * \return   0 on success\n
> >>    *          <0 - Negative POSIX Error code
> >>    *
> >> - * \sa amdgpu_bo_list_destroy()
> >> + * \sa amdgpu_bo_list_destroy(), amdgpu_cs_submit_raw2()
> >>   */
> >>   int amdgpu_bo_list_create(amdgpu_device_handle dev,
> >>                            uint32_t number_of_resources,
> >>                            amdgpu_bo_handle *resources,
> >>                            uint8_t *resource_prios,
> >>                            amdgpu_bo_list_handle *result);
> >>
> >>   /**
> >>    * Destroys a BO list handle.
> >>    *
> >> @@ -1580,20 +1612,42 @@ struct drm_amdgpu_cs_chunk;
> >>   struct drm_amdgpu_cs_chunk_dep;
> >>   struct drm_amdgpu_cs_chunk_data;
> >>
> >>   int amdgpu_cs_submit_raw(amdgpu_device_handle dev,
> >>                           amdgpu_context_handle context,
> >>                           amdgpu_bo_list_handle bo_list_handle,
> >>                           int num_chunks,
> >>                           struct drm_amdgpu_cs_chunk *chunks,
> >>                           uint64_t *seq_no);
> >>
> >> +/**
> >> + * Submit raw command submission to the kernel with a raw BO list
> handle.
> >> + *
> >> + * \param   dev               - \c [in] device handle
> >> + * \param   context    - \c [in] context handle for context id
> >> + * \param   bo_list_handle - \c [in] raw bo list handle (0 for none)
> >> + * \param   num_chunks - \c [in] number of CS chunks to submit
> >> + * \param   chunks     - \c [in] array of CS chunks
> >> + * \param   seq_no     - \c [out] output sequence number for
> submission.
> >> + *
> >> + * \return   0 on success\n
> >> + *          <0 - Negative POSIX Error code
> >> + *
> >> + * \sa amdgpu_bo_list_create_raw(), amdgpu_bo_list_destroy_raw()
> >> + */
> >> +int amdgpu_cs_submit_raw2(amdgpu_device_handle dev,
> >> +                         amdgpu_context_handle context,
> >> +                         uint32_t bo_list_handle,
> >> +                         int num_chunks,
> >> +                         struct drm_amdgpu_cs_chunk *chunks,
> >> +                         uint64_t *seq_no);
> >> +
> >>   void amdgpu_cs_chunk_fence_to_dep(struct amdgpu_cs_fence *fence,
> >>                                    struct drm_amdgpu_cs_chunk_dep *dep);
> >>   void amdgpu_cs_chunk_fence_info_to_data(struct amdgpu_cs_fence_info
> *fence_info,
> >>                                          struct
> drm_amdgpu_cs_chunk_data *data);
> >>
> >>   /**
> >>    * Reserve VMID
> >>    * \param   context - \c [in]  GPU Context
> >>    * \param   flags - \c [in]  TBD
> >>    *
> >> diff --git a/amdgpu/amdgpu_bo.c b/amdgpu/amdgpu_bo.c
> >> index c0f42e81..21bc73aa 100644
> >> --- a/amdgpu/amdgpu_bo.c
> >> +++ b/amdgpu/amdgpu_bo.c
> >> @@ -611,20 +611,56 @@ drm_public int
> amdgpu_create_bo_from_user_mem(amdgpu_device_handle dev,
> >>          pthread_mutex_lock(&dev->bo_table_mutex);
> >>          r = handle_table_insert(&dev->bo_handles,
> (*buf_handle)->handle,
> >>                                  *buf_handle);
> >>          pthread_mutex_unlock(&dev->bo_table_mutex);
> >>          if (r)
> >>                  amdgpu_bo_free(*buf_handle);
> >>   out:
> >>          return r;
> >>   }
> >>
> >> +drm_public int amdgpu_bo_list_create_raw(amdgpu_device_handle dev,
> >> +                                        uint32_t number_of_buffers,
> >> +                                        struct
> drm_amdgpu_bo_list_entry *buffers,
> >> +                                        uint32_t *result)
> >> +{
> >> +       union drm_amdgpu_bo_list args;
> >> +       int r;
> >> +
> >> +       memset(&args, 0, sizeof(args));
> >> +       args.in.operation = AMDGPU_BO_LIST_OP_CREATE;
> >> +       args.in.bo_number = number_of_buffers;
> >> +       args.in.bo_info_size = sizeof(struct drm_amdgpu_bo_list_entry);
> >> +       args.in.bo_info_ptr = (uint64_t)(uintptr_t)buffers;
> >> +
> >> +       r = drmCommandWriteRead(dev->fd, DRM_AMDGPU_BO_LIST,
> >> +                               &args, sizeof(args));
> >> +       if (r)
> >> +               return r;
> >> +
> >> +       *result = args.out.list_handle;
> >> +       return 0;
> >> +}
> >> +
> >> +drm_public int amdgpu_bo_list_destroy_raw(amdgpu_device_handle dev,
> >> +                                         uint32_t bo_list)
> >> +{
> >> +       union drm_amdgpu_bo_list args;
> >> +
> >> +       memset(&args, 0, sizeof(args));
> >> +       args.in.operation = AMDGPU_BO_LIST_OP_DESTROY;
> >> +       args.in.list_handle = bo_list;
> >> +
> >> +       return drmCommandWriteRead(dev->fd, DRM_AMDGPU_BO_LIST,
> >> +                                  &args, sizeof(args));
> >> +}
> >> +
> >>   drm_public int amdgpu_bo_list_create(amdgpu_device_handle dev,
> >>                                       uint32_t number_of_resources,
> >>                                       amdgpu_bo_handle *resources,
> >>                                       uint8_t *resource_prios,
> >>                                       amdgpu_bo_list_handle *result)
> >>   {
> >>          struct drm_amdgpu_bo_list_entry *list;
> >>          union drm_amdgpu_bo_list args;
> >>          unsigned i;
> >>          int r;
> >> diff --git a/amdgpu/amdgpu_cs.c b/amdgpu/amdgpu_cs.c
> >> index 3b8231aa..5bedf748 100644
> >> --- a/amdgpu/amdgpu_cs.c
> >> +++ b/amdgpu/amdgpu_cs.c
> >> @@ -724,20 +724,45 @@ drm_public int
> amdgpu_cs_submit_raw(amdgpu_device_handle dev,
> >>          r = drmCommandWriteRead(dev->fd, DRM_AMDGPU_CS,
> >>                                  &cs, sizeof(cs));
> >>          if (r)
> >>                  return r;
> >>
> >>          if (seq_no)
> >>                  *seq_no = cs.out.handle;
> >>          return 0;
> >>   }
> >>
> >> +drm_public int amdgpu_cs_submit_raw2(amdgpu_device_handle dev,
> >> +                                    amdgpu_context_handle context,
> >> +                                    uint32_t bo_list_handle,
> >> +                                    int num_chunks,
> >> +                                    struct drm_amdgpu_cs_chunk *chunks,
> >> +                                    uint64_t *seq_no)
> >> +{
> >> +       union drm_amdgpu_cs cs = {0};
> >> +       uint64_t *chunk_array;
> >> +       int i, r;
> >> +
> >> +       chunk_array = alloca(sizeof(uint64_t) * num_chunks);
> >> +       for (i = 0; i < num_chunks; i++)
> >> +               chunk_array[i] = (uint64_t)(uintptr_t)&chunks[i];
> >> +       cs.in.chunks = (uint64_t)(uintptr_t)chunk_array;
> >> +       cs.in.ctx_id = context->id;
> >> +       cs.in.bo_list_handle = bo_list_handle;
> >> +       cs.in.num_chunks = num_chunks;
> >> +       r = drmCommandWriteRead(dev->fd, DRM_AMDGPU_CS,
> >> +                               &cs, sizeof(cs));
> >> +       if (!r && seq_no)
> >> +               *seq_no = cs.out.handle;
> >> +       return r;
> >> +}
> >> +
> >>   drm_public void amdgpu_cs_chunk_fence_info_to_data(struct
> amdgpu_cs_fence_info *fence_info,
> >>                                          struct
> drm_amdgpu_cs_chunk_data *data)
> >>   {
> >>          data->fence_data.handle = fence_info->handle->handle;
> >>          data->fence_data.offset = fence_info->offset *
> sizeof(uint64_t);
> >>   }
> >>
> >>   drm_public void amdgpu_cs_chunk_fence_to_dep(struct amdgpu_cs_fence
> *fence,
> >>                                          struct drm_amdgpu_cs_chunk_dep
> *dep)
> >>   {
> >> --
> >> 2.17.1
> >>
> >> _______________________________________________
> >> amd-gfx mailing list
> >> amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW@public.gmane.org
> >> https://lists.freedesktop.org/mailman/listinfo/amd-gfx
> > _______________________________________________
> > amd-gfx mailing list
> > amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW@public.gmane.org
> > https://lists.freedesktop.org/mailman/listinfo/amd-gfx
>
>

[-- Attachment #1.2: Type: text/html, Size: 21368 bytes --]

[-- Attachment #2: Type: text/plain, Size: 154 bytes --]

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply	[flat|nested] 23+ messages in thread

* Re: [PATCH libdrm] amdgpu: add a faster BO list API
       [not found]             ` <CAAxE2A5ywFkNMtPbesU_kuSwKCmsPJ0D8wRFuSp14mpORcwYhg-JsoAwUIsXosN+BqQ9rBEUg@public.gmane.org>
@ 2019-01-16 14:34               ` Koenig, Christian
       [not found]                 ` <a550562a-7d36-9acf-3143-217c507e667a-5C7GfCeVMHo@public.gmane.org>
  0 siblings, 1 reply; 23+ messages in thread
From: Koenig, Christian @ 2019-01-16 14:34 UTC (permalink / raw)
  To: Marek Olšák; +Cc: amd-gfx mailing list, Bas Nieuwenhuizen


[-- Attachment #1.1: Type: text/plain, Size: 15463 bytes --]

Am 16.01.19 um 15:31 schrieb Marek Olšák:


On Wed, Jan 16, 2019, 7:55 AM Christian König <ckoenig.leichtzumerken@gmail.com<mailto:ckoenig.leichtzumerken@gmail.com> wrote:
Well if you ask me we should have the following interface for
negotiating memory management with the kernel:

1. We have per process BOs which can't be shared between processes.

Those are always valid and don't need to be mentioned in any BO list
whatsoever.

If we knew that a per process BO is currently not in use we can
optionally tell that to the kernel to make memory management more efficient.

In other words instead of a list of stuff which is used we send down to
the kernel a list of stuff which is not used any more and that only when
we know that it is necessary, e.g. when a game or application overcommits.

Radeonsi doesn't use this because this approach caused performance degradation and also drops BO priorities.

The performance degradation where mostly shortcomings with the LRU which by now have been fixed.

BO priorities are a different topic, but could be added to per VM BOs as well.

Christian.


Marek


2. We have shared BOs which are used by more than one process.

Those are rare and should be added to the per CS list of BOs in use.


The whole BO list interface Marek tries to optimize here should be
deprecated and not used any more.

Regards,
Christian.

Am 16.01.19 um 13:46 schrieb Bas Nieuwenhuizen:
> So random questions:
>
> 1) In this discussion it was mentioned that some Vulkan drivers still
> use the bo_list interface. I think that implies radv as I think we're
> still using bo_list. Is there any other API we should be using? (Also,
> with VK_EXT_descriptor_indexing I suspect we'll be moving more towards
> a global bo list instead of a cmd buffer one, as we cannot know all
> the BOs referenced anymore, but not sure what end state here will be).
>
> 2) The other alternative mentioned was adding the buffers directly
> into the submit ioctl. Is this the desired end state (though as above
> I'm not sure how that works for vulkan)? If yes, what is the timeline
> for this that we need something in the interim?
>
> 3) Did we measure any performance benefit?
>
> In general I'd like to to ack the raw bo list creation function as
> this interface seems easier to use. The two arrays thing has always
> been kind of a pain when we want to use e.g. builtin sort functions to
> make sure we have no duplicate BOs, but have some comments below.
>
> On Mon, Jan 7, 2019 at 8:31 PM Marek Olšák <maraeo@gmail.com<mailto:maraeo@gmail.com>> wrote:
>> From: Marek Olšák <marek.olsak@amd.com<mailto:marek.olsak@amd.com>>
>>
>> ---
>>   amdgpu/amdgpu-symbol-check |  3 ++
>>   amdgpu/amdgpu.h            | 56 +++++++++++++++++++++++++++++++++++++-
>>   amdgpu/amdgpu_bo.c         | 36 ++++++++++++++++++++++++
>>   amdgpu/amdgpu_cs.c         | 25 +++++++++++++++++
>>   4 files changed, 119 insertions(+), 1 deletion(-)
>>
>> diff --git a/amdgpu/amdgpu-symbol-check b/amdgpu/amdgpu-symbol-check
>> index 6f5e0f95..96a44b40 100755
>> --- a/amdgpu/amdgpu-symbol-check
>> +++ b/amdgpu/amdgpu-symbol-check
>> @@ -12,20 +12,22 @@ _edata
>>   _end
>>   _fini
>>   _init
>>   amdgpu_bo_alloc
>>   amdgpu_bo_cpu_map
>>   amdgpu_bo_cpu_unmap
>>   amdgpu_bo_export
>>   amdgpu_bo_free
>>   amdgpu_bo_import
>>   amdgpu_bo_inc_ref
>> +amdgpu_bo_list_create_raw
>> +amdgpu_bo_list_destroy_raw
>>   amdgpu_bo_list_create
>>   amdgpu_bo_list_destroy
>>   amdgpu_bo_list_update
>>   amdgpu_bo_query_info
>>   amdgpu_bo_set_metadata
>>   amdgpu_bo_va_op
>>   amdgpu_bo_va_op_raw
>>   amdgpu_bo_wait_for_idle
>>   amdgpu_create_bo_from_user_mem
>>   amdgpu_cs_chunk_fence_info_to_data
>> @@ -40,20 +42,21 @@ amdgpu_cs_destroy_semaphore
>>   amdgpu_cs_destroy_syncobj
>>   amdgpu_cs_export_syncobj
>>   amdgpu_cs_fence_to_handle
>>   amdgpu_cs_import_syncobj
>>   amdgpu_cs_query_fence_status
>>   amdgpu_cs_query_reset_state
>>   amdgpu_query_sw_info
>>   amdgpu_cs_signal_semaphore
>>   amdgpu_cs_submit
>>   amdgpu_cs_submit_raw
>> +amdgpu_cs_submit_raw2
>>   amdgpu_cs_syncobj_export_sync_file
>>   amdgpu_cs_syncobj_import_sync_file
>>   amdgpu_cs_syncobj_reset
>>   amdgpu_cs_syncobj_signal
>>   amdgpu_cs_syncobj_wait
>>   amdgpu_cs_wait_fences
>>   amdgpu_cs_wait_semaphore
>>   amdgpu_device_deinitialize
>>   amdgpu_device_initialize
>>   amdgpu_find_bo_by_cpu_mapping
>> diff --git a/amdgpu/amdgpu.h b/amdgpu/amdgpu.h
>> index dc51659a..5b800033 100644
>> --- a/amdgpu/amdgpu.h
>> +++ b/amdgpu/amdgpu.h
>> @@ -35,20 +35,21 @@
>>   #define _AMDGPU_H_
>>
>>   #include <stdint.h>
>>   #include <stdbool.h>
>>
>>   #ifdef __cplusplus
>>   extern "C" {
>>   #endif
>>
>>   struct drm_amdgpu_info_hw_ip;
>> +struct drm_amdgpu_bo_list_entry;
>>
>>   /*--------------------------------------------------------------------------*/
>>   /* --------------------------- Defines ------------------------------------ */
>>   /*--------------------------------------------------------------------------*/
>>
>>   /**
>>    * Define max. number of Command Buffers (IB) which could be sent to the single
>>    * hardware IP to accommodate CE/DE requirements
>>    *
>>    * \sa amdgpu_cs_ib_info
>> @@ -767,34 +768,65 @@ int amdgpu_bo_cpu_unmap(amdgpu_bo_handle buf_handle);
>>    *                            and no GPU access is scheduled.
>>    *                          1 GPU access is in fly or scheduled
>>    *
>>    * \return   0 - on success
>>    *          <0 - Negative POSIX Error code
>>    */
>>   int amdgpu_bo_wait_for_idle(amdgpu_bo_handle buf_handle,
>>                              uint64_t timeout_ns,
>>                              bool *buffer_busy);
>>
>> +/**
>> + * Creates a BO list handle for command submission.
>> + *
>> + * \param   dev                        - \c [in] Device handle.
>> + *                                See #amdgpu_device_initialize()
>> + * \param   number_of_buffers  - \c [in] Number of BOs in the list
>> + * \param   buffers            - \c [in] List of BO handles
>> + * \param   result             - \c [out] Created BO list handle
>> + *
>> + * \return   0 on success\n
>> + *          <0 - Negative POSIX Error code
>> + *
>> + * \sa amdgpu_bo_list_destroy_raw()
>> +*/
>> +int amdgpu_bo_list_create_raw(amdgpu_device_handle dev,
>> +                             uint32_t number_of_buffers,
>> +                             struct drm_amdgpu_bo_list_entry *buffers,
>> +                             uint32_t *result);
> So AFAIU  drm_amdgpu_bo_list_entry takes a raw bo handle while we
> never get a raw bo handle from libdrm_amdgpu. How are we supposed to
> fill it in?
>
> What do we win by having the raw handle for the bo_list? If we would
> not return the raw handle we would not need the submit_raw2.
>
>> +
>> +/**
>> + * Destroys a BO list handle.
>> + *
>> + * \param   bo_list    - \c [in] BO list handle.
>> + *
>> + * \return   0 on success\n
>> + *          <0 - Negative POSIX Error code
>> + *
>> + * \sa amdgpu_bo_list_create_raw(), amdgpu_cs_submit_raw2()
>> +*/
>> +int amdgpu_bo_list_destroy_raw(amdgpu_device_handle dev, uint32_t bo_list);
>> +
>>   /**
>>    * Creates a BO list handle for command submission.
>>    *
>>    * \param   dev                        - \c [in] Device handle.
>>    *                                See #amdgpu_device_initialize()
>>    * \param   number_of_resources        - \c [in] Number of BOs in the list
>>    * \param   resources          - \c [in] List of BO handles
>>    * \param   resource_prios     - \c [in] Optional priority for each handle
>>    * \param   result             - \c [out] Created BO list handle
>>    *
>>    * \return   0 on success\n
>>    *          <0 - Negative POSIX Error code
>>    *
>> - * \sa amdgpu_bo_list_destroy()
>> + * \sa amdgpu_bo_list_destroy(), amdgpu_cs_submit_raw2()
>>   */
>>   int amdgpu_bo_list_create(amdgpu_device_handle dev,
>>                            uint32_t number_of_resources,
>>                            amdgpu_bo_handle *resources,
>>                            uint8_t *resource_prios,
>>                            amdgpu_bo_list_handle *result);
>>
>>   /**
>>    * Destroys a BO list handle.
>>    *
>> @@ -1580,20 +1612,42 @@ struct drm_amdgpu_cs_chunk;
>>   struct drm_amdgpu_cs_chunk_dep;
>>   struct drm_amdgpu_cs_chunk_data;
>>
>>   int amdgpu_cs_submit_raw(amdgpu_device_handle dev,
>>                           amdgpu_context_handle context,
>>                           amdgpu_bo_list_handle bo_list_handle,
>>                           int num_chunks,
>>                           struct drm_amdgpu_cs_chunk *chunks,
>>                           uint64_t *seq_no);
>>
>> +/**
>> + * Submit raw command submission to the kernel with a raw BO list handle.
>> + *
>> + * \param   dev               - \c [in] device handle
>> + * \param   context    - \c [in] context handle for context id
>> + * \param   bo_list_handle - \c [in] raw bo list handle (0 for none)
>> + * \param   num_chunks - \c [in] number of CS chunks to submit
>> + * \param   chunks     - \c [in] array of CS chunks
>> + * \param   seq_no     - \c [out] output sequence number for submission.
>> + *
>> + * \return   0 on success\n
>> + *          <0 - Negative POSIX Error code
>> + *
>> + * \sa amdgpu_bo_list_create_raw(), amdgpu_bo_list_destroy_raw()
>> + */
>> +int amdgpu_cs_submit_raw2(amdgpu_device_handle dev,
>> +                         amdgpu_context_handle context,
>> +                         uint32_t bo_list_handle,
>> +                         int num_chunks,
>> +                         struct drm_amdgpu_cs_chunk *chunks,
>> +                         uint64_t *seq_no);
>> +
>>   void amdgpu_cs_chunk_fence_to_dep(struct amdgpu_cs_fence *fence,
>>                                    struct drm_amdgpu_cs_chunk_dep *dep);
>>   void amdgpu_cs_chunk_fence_info_to_data(struct amdgpu_cs_fence_info *fence_info,
>>                                          struct drm_amdgpu_cs_chunk_data *data);
>>
>>   /**
>>    * Reserve VMID
>>    * \param   context - \c [in]  GPU Context
>>    * \param   flags - \c [in]  TBD
>>    *
>> diff --git a/amdgpu/amdgpu_bo.c b/amdgpu/amdgpu_bo.c
>> index c0f42e81..21bc73aa 100644
>> --- a/amdgpu/amdgpu_bo.c
>> +++ b/amdgpu/amdgpu_bo.c
>> @@ -611,20 +611,56 @@ drm_public int amdgpu_create_bo_from_user_mem(amdgpu_device_handle dev,
>>          pthread_mutex_lock(&dev->bo_table_mutex);
>>          r = handle_table_insert(&dev->bo_handles, (*buf_handle)->handle,
>>                                  *buf_handle);
>>          pthread_mutex_unlock(&dev->bo_table_mutex);
>>          if (r)
>>                  amdgpu_bo_free(*buf_handle);
>>   out:
>>          return r;
>>   }
>>
>> +drm_public int amdgpu_bo_list_create_raw(amdgpu_device_handle dev,
>> +                                        uint32_t number_of_buffers,
>> +                                        struct drm_amdgpu_bo_list_entry *buffers,
>> +                                        uint32_t *result)
>> +{
>> +       union drm_amdgpu_bo_list args;
>> +       int r;
>> +
>> +       memset(&args, 0, sizeof(args));
>> +       args.in.operation = AMDGPU_BO_LIST_OP_CREATE;
>> +       args.in.bo_number = number_of_buffers;
>> +       args.in.bo_info_size = sizeof(struct drm_amdgpu_bo_list_entry);
>> +       args.in.bo_info_ptr = (uint64_t)(uintptr_t)buffers;
>> +
>> +       r = drmCommandWriteRead(dev->fd, DRM_AMDGPU_BO_LIST,
>> +                               &args, sizeof(args));
>> +       if (r)
>> +               return r;
>> +
>> +       *result = args.out.list_handle;
>> +       return 0;
>> +}
>> +
>> +drm_public int amdgpu_bo_list_destroy_raw(amdgpu_device_handle dev,
>> +                                         uint32_t bo_list)
>> +{
>> +       union drm_amdgpu_bo_list args;
>> +
>> +       memset(&args, 0, sizeof(args));
>> +       args.in.operation = AMDGPU_BO_LIST_OP_DESTROY;
>> +       args.in.list_handle = bo_list;
>> +
>> +       return drmCommandWriteRead(dev->fd, DRM_AMDGPU_BO_LIST,
>> +                                  &args, sizeof(args));
>> +}
>> +
>>   drm_public int amdgpu_bo_list_create(amdgpu_device_handle dev,
>>                                       uint32_t number_of_resources,
>>                                       amdgpu_bo_handle *resources,
>>                                       uint8_t *resource_prios,
>>                                       amdgpu_bo_list_handle *result)
>>   {
>>          struct drm_amdgpu_bo_list_entry *list;
>>          union drm_amdgpu_bo_list args;
>>          unsigned i;
>>          int r;
>> diff --git a/amdgpu/amdgpu_cs.c b/amdgpu/amdgpu_cs.c
>> index 3b8231aa..5bedf748 100644
>> --- a/amdgpu/amdgpu_cs.c
>> +++ b/amdgpu/amdgpu_cs.c
>> @@ -724,20 +724,45 @@ drm_public int amdgpu_cs_submit_raw(amdgpu_device_handle dev,
>>          r = drmCommandWriteRead(dev->fd, DRM_AMDGPU_CS,
>>                                  &cs, sizeof(cs));
>>          if (r)
>>                  return r;
>>
>>          if (seq_no)
>>                  *seq_no = cs.out.handle;
>>          return 0;
>>   }
>>
>> +drm_public int amdgpu_cs_submit_raw2(amdgpu_device_handle dev,
>> +                                    amdgpu_context_handle context,
>> +                                    uint32_t bo_list_handle,
>> +                                    int num_chunks,
>> +                                    struct drm_amdgpu_cs_chunk *chunks,
>> +                                    uint64_t *seq_no)
>> +{
>> +       union drm_amdgpu_cs cs = {0};
>> +       uint64_t *chunk_array;
>> +       int i, r;
>> +
>> +       chunk_array = alloca(sizeof(uint64_t) * num_chunks);
>> +       for (i = 0; i < num_chunks; i++)
>> +               chunk_array[i] = (uint64_t)(uintptr_t)&chunks[i];
>> +       cs.in.chunks = (uint64_t)(uintptr_t)chunk_array;
>> +       cs.in.ctx_id = context->id;
>> +       cs.in.bo_list_handle = bo_list_handle;
>> +       cs.in.num_chunks = num_chunks;
>> +       r = drmCommandWriteRead(dev->fd, DRM_AMDGPU_CS,
>> +                               &cs, sizeof(cs));
>> +       if (!r && seq_no)
>> +               *seq_no = cs.out.handle;
>> +       return r;
>> +}
>> +
>>   drm_public void amdgpu_cs_chunk_fence_info_to_data(struct amdgpu_cs_fence_info *fence_info,
>>                                          struct drm_amdgpu_cs_chunk_data *data)
>>   {
>>          data->fence_data.handle = fence_info->handle->handle;
>>          data->fence_data.offset = fence_info->offset * sizeof(uint64_t);
>>   }
>>
>>   drm_public void amdgpu_cs_chunk_fence_to_dep(struct amdgpu_cs_fence *fence,
>>                                          struct drm_amdgpu_cs_chunk_dep *dep)
>>   {
>> --
>> 2.17.1
>>
>> _______________________________________________
>> amd-gfx mailing list
>> amd-gfx@lists.freedesktop.org<mailto:amd-gfx@lists.freedesktop.org>
>> https://lists.freedesktop.org/mailman/listinfo/amd-gfx
> _______________________________________________
> amd-gfx mailing list
> amd-gfx@lists.freedesktop.org<mailto:amd-gfx@lists.freedesktop.org>
> https://lists.freedesktop.org/mailman/listinfo/amd-gfx



[-- Attachment #1.2: Type: text/html, Size: 28379 bytes --]

[-- Attachment #2: Type: text/plain, Size: 154 bytes --]

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply	[flat|nested] 23+ messages in thread

* Re: [PATCH libdrm] amdgpu: add a faster BO list API
       [not found]     ` <CAP+8YyFD+LxEQOLOY+mDC5v3OOyh1De2DcXK0sRtMW0t7z20SQ-JsoAwUIsXosN+BqQ9rBEUg@public.gmane.org>
  2019-01-16 12:55       ` Christian König
@ 2019-01-16 14:37       ` Marek Olšák
       [not found]         ` <CAAxE2A5chwbGmQN2yqVCfvF=TPvFMN6Qu-iFUuRW-zBVm=AN9w-JsoAwUIsXosN+BqQ9rBEUg@public.gmane.org>
  1 sibling, 1 reply; 23+ messages in thread
From: Marek Olšák @ 2019-01-16 14:37 UTC (permalink / raw)
  To: Bas Nieuwenhuizen; +Cc: amd-gfx mailing list


[-- Attachment #1.1: Type: text/plain, Size: 14361 bytes --]

On Wed, Jan 16, 2019, 7:46 AM Bas Nieuwenhuizen <bas-dldO88ZXqoXqqjsSq9zF6IRWq/SkRNHw@public.gmane.org
wrote:

> So random questions:
>
> 1) In this discussion it was mentioned that some Vulkan drivers still
> use the bo_list interface. I think that implies radv as I think we're
> still using bo_list. Is there any other API we should be using? (Also,
> with VK_EXT_descriptor_indexing I suspect we'll be moving more towards
> a global bo list instead of a cmd buffer one, as we cannot know all
> the BOs referenced anymore, but not sure what end state here will be).
>
> 2) The other alternative mentioned was adding the buffers directly
> into the submit ioctl. Is this the desired end state (though as above
> I'm not sure how that works for vulkan)? If yes, what is the timeline
> for this that we need something in the interim?
>

Radeonsi already uses this.


> 3) Did we measure any performance benefit?
>
> In general I'd like to to ack the raw bo list creation function as
> this interface seems easier to use. The two arrays thing has always
> been kind of a pain when we want to use e.g. builtin sort functions to
> make sure we have no duplicate BOs, but have some comments below.
>

The reason amdgpu was slower than radeon was because of this inefficient bo
list interface.


> On Mon, Jan 7, 2019 at 8:31 PM Marek Olšák <maraeo-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org> wrote:
> >
> > From: Marek Olšák <marek.olsak-5C7GfCeVMHo@public.gmane.org>
> >
> > ---
> >  amdgpu/amdgpu-symbol-check |  3 ++
> >  amdgpu/amdgpu.h            | 56 +++++++++++++++++++++++++++++++++++++-
> >  amdgpu/amdgpu_bo.c         | 36 ++++++++++++++++++++++++
> >  amdgpu/amdgpu_cs.c         | 25 +++++++++++++++++
> >  4 files changed, 119 insertions(+), 1 deletion(-)
> >
> > diff --git a/amdgpu/amdgpu-symbol-check b/amdgpu/amdgpu-symbol-check
> > index 6f5e0f95..96a44b40 100755
> > --- a/amdgpu/amdgpu-symbol-check
> > +++ b/amdgpu/amdgpu-symbol-check
> > @@ -12,20 +12,22 @@ _edata
> >  _end
> >  _fini
> >  _init
> >  amdgpu_bo_alloc
> >  amdgpu_bo_cpu_map
> >  amdgpu_bo_cpu_unmap
> >  amdgpu_bo_export
> >  amdgpu_bo_free
> >  amdgpu_bo_import
> >  amdgpu_bo_inc_ref
> > +amdgpu_bo_list_create_raw
> > +amdgpu_bo_list_destroy_raw
> >  amdgpu_bo_list_create
> >  amdgpu_bo_list_destroy
> >  amdgpu_bo_list_update
> >  amdgpu_bo_query_info
> >  amdgpu_bo_set_metadata
> >  amdgpu_bo_va_op
> >  amdgpu_bo_va_op_raw
> >  amdgpu_bo_wait_for_idle
> >  amdgpu_create_bo_from_user_mem
> >  amdgpu_cs_chunk_fence_info_to_data
> > @@ -40,20 +42,21 @@ amdgpu_cs_destroy_semaphore
> >  amdgpu_cs_destroy_syncobj
> >  amdgpu_cs_export_syncobj
> >  amdgpu_cs_fence_to_handle
> >  amdgpu_cs_import_syncobj
> >  amdgpu_cs_query_fence_status
> >  amdgpu_cs_query_reset_state
> >  amdgpu_query_sw_info
> >  amdgpu_cs_signal_semaphore
> >  amdgpu_cs_submit
> >  amdgpu_cs_submit_raw
> > +amdgpu_cs_submit_raw2
> >  amdgpu_cs_syncobj_export_sync_file
> >  amdgpu_cs_syncobj_import_sync_file
> >  amdgpu_cs_syncobj_reset
> >  amdgpu_cs_syncobj_signal
> >  amdgpu_cs_syncobj_wait
> >  amdgpu_cs_wait_fences
> >  amdgpu_cs_wait_semaphore
> >  amdgpu_device_deinitialize
> >  amdgpu_device_initialize
> >  amdgpu_find_bo_by_cpu_mapping
> > diff --git a/amdgpu/amdgpu.h b/amdgpu/amdgpu.h
> > index dc51659a..5b800033 100644
> > --- a/amdgpu/amdgpu.h
> > +++ b/amdgpu/amdgpu.h
> > @@ -35,20 +35,21 @@
> >  #define _AMDGPU_H_
> >
> >  #include <stdint.h>
> >  #include <stdbool.h>
> >
> >  #ifdef __cplusplus
> >  extern "C" {
> >  #endif
> >
> >  struct drm_amdgpu_info_hw_ip;
> > +struct drm_amdgpu_bo_list_entry;
> >
> >
> /*--------------------------------------------------------------------------*/
> >  /* --------------------------- Defines
> ------------------------------------ */
> >
> /*--------------------------------------------------------------------------*/
> >
> >  /**
> >   * Define max. number of Command Buffers (IB) which could be sent to
> the single
> >   * hardware IP to accommodate CE/DE requirements
> >   *
> >   * \sa amdgpu_cs_ib_info
> > @@ -767,34 +768,65 @@ int amdgpu_bo_cpu_unmap(amdgpu_bo_handle
> buf_handle);
> >   *                            and no GPU access is scheduled.
> >   *                          1 GPU access is in fly or scheduled
> >   *
> >   * \return   0 - on success
> >   *          <0 - Negative POSIX Error code
> >   */
> >  int amdgpu_bo_wait_for_idle(amdgpu_bo_handle buf_handle,
> >                             uint64_t timeout_ns,
> >                             bool *buffer_busy);
> >
> > +/**
> > + * Creates a BO list handle for command submission.
> > + *
> > + * \param   dev                        - \c [in] Device handle.
> > + *                                See #amdgpu_device_initialize()
> > + * \param   number_of_buffers  - \c [in] Number of BOs in the list
> > + * \param   buffers            - \c [in] List of BO handles
> > + * \param   result             - \c [out] Created BO list handle
> > + *
> > + * \return   0 on success\n
> > + *          <0 - Negative POSIX Error code
> > + *
> > + * \sa amdgpu_bo_list_destroy_raw()
> > +*/
> > +int amdgpu_bo_list_create_raw(amdgpu_device_handle dev,
> > +                             uint32_t number_of_buffers,
> > +                             struct drm_amdgpu_bo_list_entry *buffers,
> > +                             uint32_t *result);
>
> So AFAIU  drm_amdgpu_bo_list_entry takes a raw bo handle while we
> never get a raw bo handle from libdrm_amdgpu. How are we supposed to
> fill it in?
>

This function returns it.


> What do we win by having the raw handle for the bo_list? If we would
> not return the raw handle we would not need the submit_raw2.
>

One less malloc call and pointer indirection.

Marek


> > +
> > +/**
> > + * Destroys a BO list handle.
> > + *
> > + * \param   bo_list    - \c [in] BO list handle.
> > + *
> > + * \return   0 on success\n
> > + *          <0 - Negative POSIX Error code
> > + *
> > + * \sa amdgpu_bo_list_create_raw(), amdgpu_cs_submit_raw2()
> > +*/
> > +int amdgpu_bo_list_destroy_raw(amdgpu_device_handle dev, uint32_t
> bo_list);
> > +
> >  /**
> >   * Creates a BO list handle for command submission.
> >   *
> >   * \param   dev                        - \c [in] Device handle.
> >   *                                See #amdgpu_device_initialize()
> >   * \param   number_of_resources        - \c [in] Number of BOs in the
> list
> >   * \param   resources          - \c [in] List of BO handles
> >   * \param   resource_prios     - \c [in] Optional priority for each
> handle
> >   * \param   result             - \c [out] Created BO list handle
> >   *
> >   * \return   0 on success\n
> >   *          <0 - Negative POSIX Error code
> >   *
> > - * \sa amdgpu_bo_list_destroy()
> > + * \sa amdgpu_bo_list_destroy(), amdgpu_cs_submit_raw2()
> >  */
> >  int amdgpu_bo_list_create(amdgpu_device_handle dev,
> >                           uint32_t number_of_resources,
> >                           amdgpu_bo_handle *resources,
> >                           uint8_t *resource_prios,
> >                           amdgpu_bo_list_handle *result);
> >
> >  /**
> >   * Destroys a BO list handle.
> >   *
> > @@ -1580,20 +1612,42 @@ struct drm_amdgpu_cs_chunk;
> >  struct drm_amdgpu_cs_chunk_dep;
> >  struct drm_amdgpu_cs_chunk_data;
> >
> >  int amdgpu_cs_submit_raw(amdgpu_device_handle dev,
> >                          amdgpu_context_handle context,
> >                          amdgpu_bo_list_handle bo_list_handle,
> >                          int num_chunks,
> >                          struct drm_amdgpu_cs_chunk *chunks,
> >                          uint64_t *seq_no);
> >
> > +/**
> > + * Submit raw command submission to the kernel with a raw BO list
> handle.
> > + *
> > + * \param   dev               - \c [in] device handle
> > + * \param   context    - \c [in] context handle for context id
> > + * \param   bo_list_handle - \c [in] raw bo list handle (0 for none)
> > + * \param   num_chunks - \c [in] number of CS chunks to submit
> > + * \param   chunks     - \c [in] array of CS chunks
> > + * \param   seq_no     - \c [out] output sequence number for submission.
> > + *
> > + * \return   0 on success\n
> > + *          <0 - Negative POSIX Error code
> > + *
> > + * \sa amdgpu_bo_list_create_raw(), amdgpu_bo_list_destroy_raw()
> > + */
> > +int amdgpu_cs_submit_raw2(amdgpu_device_handle dev,
> > +                         amdgpu_context_handle context,
> > +                         uint32_t bo_list_handle,
> > +                         int num_chunks,
> > +                         struct drm_amdgpu_cs_chunk *chunks,
> > +                         uint64_t *seq_no);
> > +
> >  void amdgpu_cs_chunk_fence_to_dep(struct amdgpu_cs_fence *fence,
> >                                   struct drm_amdgpu_cs_chunk_dep *dep);
> >  void amdgpu_cs_chunk_fence_info_to_data(struct amdgpu_cs_fence_info
> *fence_info,
> >                                         struct drm_amdgpu_cs_chunk_data
> *data);
> >
> >  /**
> >   * Reserve VMID
> >   * \param   context - \c [in]  GPU Context
> >   * \param   flags - \c [in]  TBD
> >   *
> > diff --git a/amdgpu/amdgpu_bo.c b/amdgpu/amdgpu_bo.c
> > index c0f42e81..21bc73aa 100644
> > --- a/amdgpu/amdgpu_bo.c
> > +++ b/amdgpu/amdgpu_bo.c
> > @@ -611,20 +611,56 @@ drm_public int
> amdgpu_create_bo_from_user_mem(amdgpu_device_handle dev,
> >         pthread_mutex_lock(&dev->bo_table_mutex);
> >         r = handle_table_insert(&dev->bo_handles, (*buf_handle)->handle,
> >                                 *buf_handle);
> >         pthread_mutex_unlock(&dev->bo_table_mutex);
> >         if (r)
> >                 amdgpu_bo_free(*buf_handle);
> >  out:
> >         return r;
> >  }
> >
> > +drm_public int amdgpu_bo_list_create_raw(amdgpu_device_handle dev,
> > +                                        uint32_t number_of_buffers,
> > +                                        struct drm_amdgpu_bo_list_entry
> *buffers,
> > +                                        uint32_t *result)
> > +{
> > +       union drm_amdgpu_bo_list args;
> > +       int r;
> > +
> > +       memset(&args, 0, sizeof(args));
> > +       args.in.operation = AMDGPU_BO_LIST_OP_CREATE;
> > +       args.in.bo_number = number_of_buffers;
> > +       args.in.bo_info_size = sizeof(struct drm_amdgpu_bo_list_entry);
> > +       args.in.bo_info_ptr = (uint64_t)(uintptr_t)buffers;
> > +
> > +       r = drmCommandWriteRead(dev->fd, DRM_AMDGPU_BO_LIST,
> > +                               &args, sizeof(args));
> > +       if (r)
> > +               return r;
> > +
> > +       *result = args.out.list_handle;
> > +       return 0;
> > +}
> > +
> > +drm_public int amdgpu_bo_list_destroy_raw(amdgpu_device_handle dev,
> > +                                         uint32_t bo_list)
> > +{
> > +       union drm_amdgpu_bo_list args;
> > +
> > +       memset(&args, 0, sizeof(args));
> > +       args.in.operation = AMDGPU_BO_LIST_OP_DESTROY;
> > +       args.in.list_handle = bo_list;
> > +
> > +       return drmCommandWriteRead(dev->fd, DRM_AMDGPU_BO_LIST,
> > +                                  &args, sizeof(args));
> > +}
> > +
> >  drm_public int amdgpu_bo_list_create(amdgpu_device_handle dev,
> >                                      uint32_t number_of_resources,
> >                                      amdgpu_bo_handle *resources,
> >                                      uint8_t *resource_prios,
> >                                      amdgpu_bo_list_handle *result)
> >  {
> >         struct drm_amdgpu_bo_list_entry *list;
> >         union drm_amdgpu_bo_list args;
> >         unsigned i;
> >         int r;
> > diff --git a/amdgpu/amdgpu_cs.c b/amdgpu/amdgpu_cs.c
> > index 3b8231aa..5bedf748 100644
> > --- a/amdgpu/amdgpu_cs.c
> > +++ b/amdgpu/amdgpu_cs.c
> > @@ -724,20 +724,45 @@ drm_public int
> amdgpu_cs_submit_raw(amdgpu_device_handle dev,
> >         r = drmCommandWriteRead(dev->fd, DRM_AMDGPU_CS,
> >                                 &cs, sizeof(cs));
> >         if (r)
> >                 return r;
> >
> >         if (seq_no)
> >                 *seq_no = cs.out.handle;
> >         return 0;
> >  }
> >
> > +drm_public int amdgpu_cs_submit_raw2(amdgpu_device_handle dev,
> > +                                    amdgpu_context_handle context,
> > +                                    uint32_t bo_list_handle,
> > +                                    int num_chunks,
> > +                                    struct drm_amdgpu_cs_chunk *chunks,
> > +                                    uint64_t *seq_no)
> > +{
> > +       union drm_amdgpu_cs cs = {0};
> > +       uint64_t *chunk_array;
> > +       int i, r;
> > +
> > +       chunk_array = alloca(sizeof(uint64_t) * num_chunks);
> > +       for (i = 0; i < num_chunks; i++)
> > +               chunk_array[i] = (uint64_t)(uintptr_t)&chunks[i];
> > +       cs.in.chunks = (uint64_t)(uintptr_t)chunk_array;
> > +       cs.in.ctx_id = context->id;
> > +       cs.in.bo_list_handle = bo_list_handle;
> > +       cs.in.num_chunks = num_chunks;
> > +       r = drmCommandWriteRead(dev->fd, DRM_AMDGPU_CS,
> > +                               &cs, sizeof(cs));
> > +       if (!r && seq_no)
> > +               *seq_no = cs.out.handle;
> > +       return r;
> > +}
> > +
> >  drm_public void amdgpu_cs_chunk_fence_info_to_data(struct
> amdgpu_cs_fence_info *fence_info,
> >                                         struct drm_amdgpu_cs_chunk_data
> *data)
> >  {
> >         data->fence_data.handle = fence_info->handle->handle;
> >         data->fence_data.offset = fence_info->offset * sizeof(uint64_t);
> >  }
> >
> >  drm_public void amdgpu_cs_chunk_fence_to_dep(struct amdgpu_cs_fence
> *fence,
> >                                         struct drm_amdgpu_cs_chunk_dep
> *dep)
> >  {
> > --
> > 2.17.1
> >
> > _______________________________________________
> > amd-gfx mailing list
> > amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW@public.gmane.org
> > https://lists.freedesktop.org/mailman/listinfo/amd-gfx
>

[-- Attachment #1.2: Type: text/html, Size: 19022 bytes --]

[-- Attachment #2: Type: text/plain, Size: 154 bytes --]

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply	[flat|nested] 23+ messages in thread

* Re: [PATCH libdrm] amdgpu: add a faster BO list API
       [not found]                 ` <a550562a-7d36-9acf-3143-217c507e667a-5C7GfCeVMHo@public.gmane.org>
@ 2019-01-16 14:39                   ` Marek Olšák
       [not found]                     ` <CAAxE2A4k8JtkrS2XfgRdmYY3NVR4ges=Yqfh-TH9O=LnaVv02g-JsoAwUIsXosN+BqQ9rBEUg@public.gmane.org>
  0 siblings, 1 reply; 23+ messages in thread
From: Marek Olšák @ 2019-01-16 14:39 UTC (permalink / raw)
  To: Koenig, Christian; +Cc: amd-gfx mailing list, Bas Nieuwenhuizen


[-- Attachment #1.1: Type: text/plain, Size: 16867 bytes --]

On Wed, Jan 16, 2019, 9:34 AM Koenig, Christian <Christian.Koenig-5C7GfCeVMHo@public.gmane.org
wrote:

> Am 16.01.19 um 15:31 schrieb Marek Olšák:
>
>
>
> On Wed, Jan 16, 2019, 7:55 AM Christian König <
> ckoenig.leichtzumerken-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org wrote:
>
>> Well if you ask me we should have the following interface for
>> negotiating memory management with the kernel:
>>
>> 1. We have per process BOs which can't be shared between processes.
>>
>> Those are always valid and don't need to be mentioned in any BO list
>> whatsoever.
>>
>> If we knew that a per process BO is currently not in use we can
>> optionally tell that to the kernel to make memory management more
>> efficient.
>>
>> In other words instead of a list of stuff which is used we send down to
>> the kernel a list of stuff which is not used any more and that only when
>> we know that it is necessary, e.g. when a game or application overcommits.
>>
>
> Radeonsi doesn't use this because this approach caused performance
> degradation and also drops BO priorities.
>
>
> The performance degradation where mostly shortcomings with the LRU which
> by now have been fixed.
>
> BO priorities are a different topic, but could be added to per VM BOs as
> well.
>

What's the minimum drm version that contains the fixes?

Marek


> Christian.
>
>
> Marek
>
>
>> 2. We have shared BOs which are used by more than one process.
>>
>> Those are rare and should be added to the per CS list of BOs in use.
>>
>>
>> The whole BO list interface Marek tries to optimize here should be
>> deprecated and not used any more.
>>
>> Regards,
>> Christian.
>>
>> Am 16.01.19 um 13:46 schrieb Bas Nieuwenhuizen:
>> > So random questions:
>> >
>> > 1) In this discussion it was mentioned that some Vulkan drivers still
>> > use the bo_list interface. I think that implies radv as I think we're
>> > still using bo_list. Is there any other API we should be using? (Also,
>> > with VK_EXT_descriptor_indexing I suspect we'll be moving more towards
>> > a global bo list instead of a cmd buffer one, as we cannot know all
>> > the BOs referenced anymore, but not sure what end state here will be).
>> >
>> > 2) The other alternative mentioned was adding the buffers directly
>> > into the submit ioctl. Is this the desired end state (though as above
>> > I'm not sure how that works for vulkan)? If yes, what is the timeline
>> > for this that we need something in the interim?
>> >
>> > 3) Did we measure any performance benefit?
>> >
>> > In general I'd like to to ack the raw bo list creation function as
>> > this interface seems easier to use. The two arrays thing has always
>> > been kind of a pain when we want to use e.g. builtin sort functions to
>> > make sure we have no duplicate BOs, but have some comments below.
>> >
>> > On Mon, Jan 7, 2019 at 8:31 PM Marek Olšák <maraeo-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org> wrote:
>> >> From: Marek Olšák <marek.olsak-5C7GfCeVMHo@public.gmane.org>
>> >>
>> >> ---
>> >>   amdgpu/amdgpu-symbol-check |  3 ++
>> >>   amdgpu/amdgpu.h            | 56
>> +++++++++++++++++++++++++++++++++++++-
>> >>   amdgpu/amdgpu_bo.c         | 36 ++++++++++++++++++++++++
>> >>   amdgpu/amdgpu_cs.c         | 25 +++++++++++++++++
>> >>   4 files changed, 119 insertions(+), 1 deletion(-)
>> >>
>> >> diff --git a/amdgpu/amdgpu-symbol-check b/amdgpu/amdgpu-symbol-check
>> >> index 6f5e0f95..96a44b40 100755
>> >> --- a/amdgpu/amdgpu-symbol-check
>> >> +++ b/amdgpu/amdgpu-symbol-check
>> >> @@ -12,20 +12,22 @@ _edata
>> >>   _end
>> >>   _fini
>> >>   _init
>> >>   amdgpu_bo_alloc
>> >>   amdgpu_bo_cpu_map
>> >>   amdgpu_bo_cpu_unmap
>> >>   amdgpu_bo_export
>> >>   amdgpu_bo_free
>> >>   amdgpu_bo_import
>> >>   amdgpu_bo_inc_ref
>> >> +amdgpu_bo_list_create_raw
>> >> +amdgpu_bo_list_destroy_raw
>> >>   amdgpu_bo_list_create
>> >>   amdgpu_bo_list_destroy
>> >>   amdgpu_bo_list_update
>> >>   amdgpu_bo_query_info
>> >>   amdgpu_bo_set_metadata
>> >>   amdgpu_bo_va_op
>> >>   amdgpu_bo_va_op_raw
>> >>   amdgpu_bo_wait_for_idle
>> >>   amdgpu_create_bo_from_user_mem
>> >>   amdgpu_cs_chunk_fence_info_to_data
>> >> @@ -40,20 +42,21 @@ amdgpu_cs_destroy_semaphore
>> >>   amdgpu_cs_destroy_syncobj
>> >>   amdgpu_cs_export_syncobj
>> >>   amdgpu_cs_fence_to_handle
>> >>   amdgpu_cs_import_syncobj
>> >>   amdgpu_cs_query_fence_status
>> >>   amdgpu_cs_query_reset_state
>> >>   amdgpu_query_sw_info
>> >>   amdgpu_cs_signal_semaphore
>> >>   amdgpu_cs_submit
>> >>   amdgpu_cs_submit_raw
>> >> +amdgpu_cs_submit_raw2
>> >>   amdgpu_cs_syncobj_export_sync_file
>> >>   amdgpu_cs_syncobj_import_sync_file
>> >>   amdgpu_cs_syncobj_reset
>> >>   amdgpu_cs_syncobj_signal
>> >>   amdgpu_cs_syncobj_wait
>> >>   amdgpu_cs_wait_fences
>> >>   amdgpu_cs_wait_semaphore
>> >>   amdgpu_device_deinitialize
>> >>   amdgpu_device_initialize
>> >>   amdgpu_find_bo_by_cpu_mapping
>> >> diff --git a/amdgpu/amdgpu.h b/amdgpu/amdgpu.h
>> >> index dc51659a..5b800033 100644
>> >> --- a/amdgpu/amdgpu.h
>> >> +++ b/amdgpu/amdgpu.h
>> >> @@ -35,20 +35,21 @@
>> >>   #define _AMDGPU_H_
>> >>
>> >>   #include <stdint.h>
>> >>   #include <stdbool.h>
>> >>
>> >>   #ifdef __cplusplus
>> >>   extern "C" {
>> >>   #endif
>> >>
>> >>   struct drm_amdgpu_info_hw_ip;
>> >> +struct drm_amdgpu_bo_list_entry;
>> >>
>> >>
>>  /*--------------------------------------------------------------------------*/
>> >>   /* --------------------------- Defines
>> ------------------------------------ */
>> >>
>>  /*--------------------------------------------------------------------------*/
>> >>
>> >>   /**
>> >>    * Define max. number of Command Buffers (IB) which could be sent to
>> the single
>> >>    * hardware IP to accommodate CE/DE requirements
>> >>    *
>> >>    * \sa amdgpu_cs_ib_info
>> >> @@ -767,34 +768,65 @@ int amdgpu_bo_cpu_unmap(amdgpu_bo_handle
>> buf_handle);
>> >>    *                            and no GPU access is scheduled.
>> >>    *                          1 GPU access is in fly or scheduled
>> >>    *
>> >>    * \return   0 - on success
>> >>    *          <0 - Negative POSIX Error code
>> >>    */
>> >>   int amdgpu_bo_wait_for_idle(amdgpu_bo_handle buf_handle,
>> >>                              uint64_t timeout_ns,
>> >>                              bool *buffer_busy);
>> >>
>> >> +/**
>> >> + * Creates a BO list handle for command submission.
>> >> + *
>> >> + * \param   dev                        - \c [in] Device handle.
>> >> + *                                See #amdgpu_device_initialize()
>> >> + * \param   number_of_buffers  - \c [in] Number of BOs in the list
>> >> + * \param   buffers            - \c [in] List of BO handles
>> >> + * \param   result             - \c [out] Created BO list handle
>> >> + *
>> >> + * \return   0 on success\n
>> >> + *          <0 - Negative POSIX Error code
>> >> + *
>> >> + * \sa amdgpu_bo_list_destroy_raw()
>> >> +*/
>> >> +int amdgpu_bo_list_create_raw(amdgpu_device_handle dev,
>> >> +                             uint32_t number_of_buffers,
>> >> +                             struct drm_amdgpu_bo_list_entry *buffers,
>> >> +                             uint32_t *result);
>> > So AFAIU  drm_amdgpu_bo_list_entry takes a raw bo handle while we
>> > never get a raw bo handle from libdrm_amdgpu. How are we supposed to
>> > fill it in?
>> >
>> > What do we win by having the raw handle for the bo_list? If we would
>> > not return the raw handle we would not need the submit_raw2.
>> >
>> >> +
>> >> +/**
>> >> + * Destroys a BO list handle.
>> >> + *
>> >> + * \param   bo_list    - \c [in] BO list handle.
>> >> + *
>> >> + * \return   0 on success\n
>> >> + *          <0 - Negative POSIX Error code
>> >> + *
>> >> + * \sa amdgpu_bo_list_create_raw(), amdgpu_cs_submit_raw2()
>> >> +*/
>> >> +int amdgpu_bo_list_destroy_raw(amdgpu_device_handle dev, uint32_t
>> bo_list);
>> >> +
>> >>   /**
>> >>    * Creates a BO list handle for command submission.
>> >>    *
>> >>    * \param   dev                        - \c [in] Device handle.
>> >>    *                                See #amdgpu_device_initialize()
>> >>    * \param   number_of_resources        - \c [in] Number of BOs in
>> the list
>> >>    * \param   resources          - \c [in] List of BO handles
>> >>    * \param   resource_prios     - \c [in] Optional priority for each
>> handle
>> >>    * \param   result             - \c [out] Created BO list handle
>> >>    *
>> >>    * \return   0 on success\n
>> >>    *          <0 - Negative POSIX Error code
>> >>    *
>> >> - * \sa amdgpu_bo_list_destroy()
>> >> + * \sa amdgpu_bo_list_destroy(), amdgpu_cs_submit_raw2()
>> >>   */
>> >>   int amdgpu_bo_list_create(amdgpu_device_handle dev,
>> >>                            uint32_t number_of_resources,
>> >>                            amdgpu_bo_handle *resources,
>> >>                            uint8_t *resource_prios,
>> >>                            amdgpu_bo_list_handle *result);
>> >>
>> >>   /**
>> >>    * Destroys a BO list handle.
>> >>    *
>> >> @@ -1580,20 +1612,42 @@ struct drm_amdgpu_cs_chunk;
>> >>   struct drm_amdgpu_cs_chunk_dep;
>> >>   struct drm_amdgpu_cs_chunk_data;
>> >>
>> >>   int amdgpu_cs_submit_raw(amdgpu_device_handle dev,
>> >>                           amdgpu_context_handle context,
>> >>                           amdgpu_bo_list_handle bo_list_handle,
>> >>                           int num_chunks,
>> >>                           struct drm_amdgpu_cs_chunk *chunks,
>> >>                           uint64_t *seq_no);
>> >>
>> >> +/**
>> >> + * Submit raw command submission to the kernel with a raw BO list
>> handle.
>> >> + *
>> >> + * \param   dev               - \c [in] device handle
>> >> + * \param   context    - \c [in] context handle for context id
>> >> + * \param   bo_list_handle - \c [in] raw bo list handle (0 for none)
>> >> + * \param   num_chunks - \c [in] number of CS chunks to submit
>> >> + * \param   chunks     - \c [in] array of CS chunks
>> >> + * \param   seq_no     - \c [out] output sequence number for
>> submission.
>> >> + *
>> >> + * \return   0 on success\n
>> >> + *          <0 - Negative POSIX Error code
>> >> + *
>> >> + * \sa amdgpu_bo_list_create_raw(), amdgpu_bo_list_destroy_raw()
>> >> + */
>> >> +int amdgpu_cs_submit_raw2(amdgpu_device_handle dev,
>> >> +                         amdgpu_context_handle context,
>> >> +                         uint32_t bo_list_handle,
>> >> +                         int num_chunks,
>> >> +                         struct drm_amdgpu_cs_chunk *chunks,
>> >> +                         uint64_t *seq_no);
>> >> +
>> >>   void amdgpu_cs_chunk_fence_to_dep(struct amdgpu_cs_fence *fence,
>> >>                                    struct drm_amdgpu_cs_chunk_dep
>> *dep);
>> >>   void amdgpu_cs_chunk_fence_info_to_data(struct amdgpu_cs_fence_info
>> *fence_info,
>> >>                                          struct
>> drm_amdgpu_cs_chunk_data *data);
>> >>
>> >>   /**
>> >>    * Reserve VMID
>> >>    * \param   context - \c [in]  GPU Context
>> >>    * \param   flags - \c [in]  TBD
>> >>    *
>> >> diff --git a/amdgpu/amdgpu_bo.c b/amdgpu/amdgpu_bo.c
>> >> index c0f42e81..21bc73aa 100644
>> >> --- a/amdgpu/amdgpu_bo.c
>> >> +++ b/amdgpu/amdgpu_bo.c
>> >> @@ -611,20 +611,56 @@ drm_public int
>> amdgpu_create_bo_from_user_mem(amdgpu_device_handle dev,
>> >>          pthread_mutex_lock(&dev->bo_table_mutex);
>> >>          r = handle_table_insert(&dev->bo_handles,
>> (*buf_handle)->handle,
>> >>                                  *buf_handle);
>> >>          pthread_mutex_unlock(&dev->bo_table_mutex);
>> >>          if (r)
>> >>                  amdgpu_bo_free(*buf_handle);
>> >>   out:
>> >>          return r;
>> >>   }
>> >>
>> >> +drm_public int amdgpu_bo_list_create_raw(amdgpu_device_handle dev,
>> >> +                                        uint32_t number_of_buffers,
>> >> +                                        struct
>> drm_amdgpu_bo_list_entry *buffers,
>> >> +                                        uint32_t *result)
>> >> +{
>> >> +       union drm_amdgpu_bo_list args;
>> >> +       int r;
>> >> +
>> >> +       memset(&args, 0, sizeof(args));
>> >> +       args.in.operation = AMDGPU_BO_LIST_OP_CREATE;
>> >> +       args.in.bo_number = number_of_buffers;
>> >> +       args.in.bo_info_size = sizeof(struct drm_amdgpu_bo_list_entry);
>> >> +       args.in.bo_info_ptr = (uint64_t)(uintptr_t)buffers;
>> >> +
>> >> +       r = drmCommandWriteRead(dev->fd, DRM_AMDGPU_BO_LIST,
>> >> +                               &args, sizeof(args));
>> >> +       if (r)
>> >> +               return r;
>> >> +
>> >> +       *result = args.out.list_handle;
>> >> +       return 0;
>> >> +}
>> >> +
>> >> +drm_public int amdgpu_bo_list_destroy_raw(amdgpu_device_handle dev,
>> >> +                                         uint32_t bo_list)
>> >> +{
>> >> +       union drm_amdgpu_bo_list args;
>> >> +
>> >> +       memset(&args, 0, sizeof(args));
>> >> +       args.in.operation = AMDGPU_BO_LIST_OP_DESTROY;
>> >> +       args.in.list_handle = bo_list;
>> >> +
>> >> +       return drmCommandWriteRead(dev->fd, DRM_AMDGPU_BO_LIST,
>> >> +                                  &args, sizeof(args));
>> >> +}
>> >> +
>> >>   drm_public int amdgpu_bo_list_create(amdgpu_device_handle dev,
>> >>                                       uint32_t number_of_resources,
>> >>                                       amdgpu_bo_handle *resources,
>> >>                                       uint8_t *resource_prios,
>> >>                                       amdgpu_bo_list_handle *result)
>> >>   {
>> >>          struct drm_amdgpu_bo_list_entry *list;
>> >>          union drm_amdgpu_bo_list args;
>> >>          unsigned i;
>> >>          int r;
>> >> diff --git a/amdgpu/amdgpu_cs.c b/amdgpu/amdgpu_cs.c
>> >> index 3b8231aa..5bedf748 100644
>> >> --- a/amdgpu/amdgpu_cs.c
>> >> +++ b/amdgpu/amdgpu_cs.c
>> >> @@ -724,20 +724,45 @@ drm_public int
>> amdgpu_cs_submit_raw(amdgpu_device_handle dev,
>> >>          r = drmCommandWriteRead(dev->fd, DRM_AMDGPU_CS,
>> >>                                  &cs, sizeof(cs));
>> >>          if (r)
>> >>                  return r;
>> >>
>> >>          if (seq_no)
>> >>                  *seq_no = cs.out.handle;
>> >>          return 0;
>> >>   }
>> >>
>> >> +drm_public int amdgpu_cs_submit_raw2(amdgpu_device_handle dev,
>> >> +                                    amdgpu_context_handle context,
>> >> +                                    uint32_t bo_list_handle,
>> >> +                                    int num_chunks,
>> >> +                                    struct drm_amdgpu_cs_chunk
>> *chunks,
>> >> +                                    uint64_t *seq_no)
>> >> +{
>> >> +       union drm_amdgpu_cs cs = {0};
>> >> +       uint64_t *chunk_array;
>> >> +       int i, r;
>> >> +
>> >> +       chunk_array = alloca(sizeof(uint64_t) * num_chunks);
>> >> +       for (i = 0; i < num_chunks; i++)
>> >> +               chunk_array[i] = (uint64_t)(uintptr_t)&chunks[i];
>> >> +       cs.in.chunks = (uint64_t)(uintptr_t)chunk_array;
>> >> +       cs.in.ctx_id = context->id;
>> >> +       cs.in.bo_list_handle = bo_list_handle;
>> >> +       cs.in.num_chunks = num_chunks;
>> >> +       r = drmCommandWriteRead(dev->fd, DRM_AMDGPU_CS,
>> >> +                               &cs, sizeof(cs));
>> >> +       if (!r && seq_no)
>> >> +               *seq_no = cs.out.handle;
>> >> +       return r;
>> >> +}
>> >> +
>> >>   drm_public void amdgpu_cs_chunk_fence_info_to_data(struct
>> amdgpu_cs_fence_info *fence_info,
>> >>                                          struct
>> drm_amdgpu_cs_chunk_data *data)
>> >>   {
>> >>          data->fence_data.handle = fence_info->handle->handle;
>> >>          data->fence_data.offset = fence_info->offset *
>> sizeof(uint64_t);
>> >>   }
>> >>
>> >>   drm_public void amdgpu_cs_chunk_fence_to_dep(struct amdgpu_cs_fence
>> *fence,
>> >>                                          struct
>> drm_amdgpu_cs_chunk_dep *dep)
>> >>   {
>> >> --
>> >> 2.17.1
>> >>
>> >> _______________________________________________
>> >> amd-gfx mailing list
>> >> amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW@public.gmane.org
>> >> https://lists.freedesktop.org/mailman/listinfo/amd-gfx
>> > _______________________________________________
>> > amd-gfx mailing list
>> > amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW@public.gmane.org
>> > https://lists.freedesktop.org/mailman/listinfo/amd-gfx
>>
>>
>

[-- Attachment #1.2: Type: text/html, Size: 22821 bytes --]

[-- Attachment #2: Type: text/plain, Size: 154 bytes --]

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply	[flat|nested] 23+ messages in thread

* Re: [PATCH libdrm] amdgpu: add a faster BO list API
       [not found]                     ` <CAAxE2A4k8JtkrS2XfgRdmYY3NVR4ges=Yqfh-TH9O=LnaVv02g-JsoAwUIsXosN+BqQ9rBEUg@public.gmane.org>
@ 2019-01-16 14:43                       ` Christian König
       [not found]                         ` <3d525127-825b-efab-b0c8-76550634d1c1-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org>
  0 siblings, 1 reply; 23+ messages in thread
From: Christian König @ 2019-01-16 14:43 UTC (permalink / raw)
  To: Marek Olšák, Koenig, Christian
  Cc: amd-gfx mailing list, Bas Nieuwenhuizen


[-- Attachment #1.1: Type: text/plain, Size: 20633 bytes --]

Am 16.01.19 um 15:39 schrieb Marek Olšák:
>
>
> On Wed, Jan 16, 2019, 9:34 AM Koenig, Christian 
> <Christian.Koenig-5C7GfCeVMHo@public.gmane.org <mailto:Christian.Koenig-5C7GfCeVMHo@public.gmane.org> wrote:
>
>     Am 16.01.19 um 15:31 schrieb Marek Olšák:
>>
>>
>>     On Wed, Jan 16, 2019, 7:55 AM Christian König
>>     <ckoenig.leichtzumerken-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org
>>     <mailto:ckoenig.leichtzumerken-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org> wrote:
>>
>>         Well if you ask me we should have the following interface for
>>         negotiating memory management with the kernel:
>>
>>         1. We have per process BOs which can't be shared between
>>         processes.
>>
>>         Those are always valid and don't need to be mentioned in any
>>         BO list
>>         whatsoever.
>>
>>         If we knew that a per process BO is currently not in use we can
>>         optionally tell that to the kernel to make memory management
>>         more efficient.
>>
>>         In other words instead of a list of stuff which is used we
>>         send down to
>>         the kernel a list of stuff which is not used any more and
>>         that only when
>>         we know that it is necessary, e.g. when a game or application
>>         overcommits.
>>
>>
>>     Radeonsi doesn't use this because this approach caused
>>     performance degradation and also drops BO priorities.
>
>     The performance degradation where mostly shortcomings with the LRU
>     which by now have been fixed.
>
>     BO priorities are a different topic, but could be added to per VM
>     BOs as well.
>
>
> What's the minimum drm version that contains the fixes?

I've pushed the last optimization this morning. No idea when it really 
became useful, but the numbers from the closed source clients now look 
much better.

We should probably test and bump the drm version when we are sure that 
this now works as expected.

Christian.

>
> Marek
>
>
>     Christian.
>
>>
>>     Marek
>>
>>
>>         2. We have shared BOs which are used by more than one process.
>>
>>         Those are rare and should be added to the per CS list of BOs
>>         in use.
>>
>>
>>         The whole BO list interface Marek tries to optimize here
>>         should be
>>         deprecated and not used any more.
>>
>>         Regards,
>>         Christian.
>>
>>         Am 16.01.19 um 13:46 schrieb Bas Nieuwenhuizen:
>>         > So random questions:
>>         >
>>         > 1) In this discussion it was mentioned that some Vulkan
>>         drivers still
>>         > use the bo_list interface. I think that implies radv as I
>>         think we're
>>         > still using bo_list. Is there any other API we should be
>>         using? (Also,
>>         > with VK_EXT_descriptor_indexing I suspect we'll be moving
>>         more towards
>>         > a global bo list instead of a cmd buffer one, as we cannot
>>         know all
>>         > the BOs referenced anymore, but not sure what end state
>>         here will be).
>>         >
>>         > 2) The other alternative mentioned was adding the buffers
>>         directly
>>         > into the submit ioctl. Is this the desired end state
>>         (though as above
>>         > I'm not sure how that works for vulkan)? If yes, what is
>>         the timeline
>>         > for this that we need something in the interim?
>>         >
>>         > 3) Did we measure any performance benefit?
>>         >
>>         > In general I'd like to to ack the raw bo list creation
>>         function as
>>         > this interface seems easier to use. The two arrays thing
>>         has always
>>         > been kind of a pain when we want to use e.g. builtin sort
>>         functions to
>>         > make sure we have no duplicate BOs, but have some comments
>>         below.
>>         >
>>         > On Mon, Jan 7, 2019 at 8:31 PM Marek Olšák
>>         <maraeo-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org <mailto:maraeo-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org>> wrote:
>>         >> From: Marek Olšák <marek.olsak-5C7GfCeVMHo@public.gmane.org
>>         <mailto:marek.olsak-5C7GfCeVMHo@public.gmane.org>>
>>         >>
>>         >> ---
>>         >>   amdgpu/amdgpu-symbol-check |  3 ++
>>         >>   amdgpu/amdgpu.h            | 56
>>         +++++++++++++++++++++++++++++++++++++-
>>         >>   amdgpu/amdgpu_bo.c         | 36 ++++++++++++++++++++++++
>>         >>   amdgpu/amdgpu_cs.c         | 25 +++++++++++++++++
>>         >>   4 files changed, 119 insertions(+), 1 deletion(-)
>>         >>
>>         >> diff --git a/amdgpu/amdgpu-symbol-check
>>         b/amdgpu/amdgpu-symbol-check
>>         >> index 6f5e0f95..96a44b40 100755
>>         >> --- a/amdgpu/amdgpu-symbol-check
>>         >> +++ b/amdgpu/amdgpu-symbol-check
>>         >> @@ -12,20 +12,22 @@ _edata
>>         >>   _end
>>         >>   _fini
>>         >>   _init
>>         >>   amdgpu_bo_alloc
>>         >>   amdgpu_bo_cpu_map
>>         >>   amdgpu_bo_cpu_unmap
>>         >>   amdgpu_bo_export
>>         >>   amdgpu_bo_free
>>         >>   amdgpu_bo_import
>>         >>   amdgpu_bo_inc_ref
>>         >> +amdgpu_bo_list_create_raw
>>         >> +amdgpu_bo_list_destroy_raw
>>         >>   amdgpu_bo_list_create
>>         >>   amdgpu_bo_list_destroy
>>         >>   amdgpu_bo_list_update
>>         >>   amdgpu_bo_query_info
>>         >>   amdgpu_bo_set_metadata
>>         >>   amdgpu_bo_va_op
>>         >>   amdgpu_bo_va_op_raw
>>         >>   amdgpu_bo_wait_for_idle
>>         >>   amdgpu_create_bo_from_user_mem
>>         >>   amdgpu_cs_chunk_fence_info_to_data
>>         >> @@ -40,20 +42,21 @@ amdgpu_cs_destroy_semaphore
>>         >>   amdgpu_cs_destroy_syncobj
>>         >>   amdgpu_cs_export_syncobj
>>         >>   amdgpu_cs_fence_to_handle
>>         >>   amdgpu_cs_import_syncobj
>>         >>   amdgpu_cs_query_fence_status
>>         >>   amdgpu_cs_query_reset_state
>>         >>   amdgpu_query_sw_info
>>         >>   amdgpu_cs_signal_semaphore
>>         >>   amdgpu_cs_submit
>>         >>   amdgpu_cs_submit_raw
>>         >> +amdgpu_cs_submit_raw2
>>         >>   amdgpu_cs_syncobj_export_sync_file
>>         >>   amdgpu_cs_syncobj_import_sync_file
>>         >>   amdgpu_cs_syncobj_reset
>>         >>   amdgpu_cs_syncobj_signal
>>         >>   amdgpu_cs_syncobj_wait
>>         >>   amdgpu_cs_wait_fences
>>         >>   amdgpu_cs_wait_semaphore
>>         >>   amdgpu_device_deinitialize
>>         >>   amdgpu_device_initialize
>>         >>   amdgpu_find_bo_by_cpu_mapping
>>         >> diff --git a/amdgpu/amdgpu.h b/amdgpu/amdgpu.h
>>         >> index dc51659a..5b800033 100644
>>         >> --- a/amdgpu/amdgpu.h
>>         >> +++ b/amdgpu/amdgpu.h
>>         >> @@ -35,20 +35,21 @@
>>         >>   #define _AMDGPU_H_
>>         >>
>>         >>   #include <stdint.h>
>>         >>   #include <stdbool.h>
>>         >>
>>         >>   #ifdef __cplusplus
>>         >>   extern "C" {
>>         >>   #endif
>>         >>
>>         >>   struct drm_amdgpu_info_hw_ip;
>>         >> +struct drm_amdgpu_bo_list_entry;
>>         >>
>>         >>
>>          /*--------------------------------------------------------------------------*/
>>         >>   /* --------------------------- Defines
>>         ------------------------------------ */
>>         >>
>>          /*--------------------------------------------------------------------------*/
>>         >>
>>         >>   /**
>>         >>    * Define max. number of Command Buffers (IB) which
>>         could be sent to the single
>>         >>    * hardware IP to accommodate CE/DE requirements
>>         >>    *
>>         >>    * \sa amdgpu_cs_ib_info
>>         >> @@ -767,34 +768,65 @@ int
>>         amdgpu_bo_cpu_unmap(amdgpu_bo_handle buf_handle);
>>         >>    *                            and no GPU access is
>>         scheduled.
>>         >>    *                          1 GPU access is in fly or
>>         scheduled
>>         >>    *
>>         >>    * \return   0 - on success
>>         >>    *          <0 - Negative POSIX Error code
>>         >>    */
>>         >>   int amdgpu_bo_wait_for_idle(amdgpu_bo_handle buf_handle,
>>         >>                              uint64_t timeout_ns,
>>         >>                              bool *buffer_busy);
>>         >>
>>         >> +/**
>>         >> + * Creates a BO list handle for command submission.
>>         >> + *
>>         >> + * \param   dev   - \c [in] Device handle.
>>         >> + * See #amdgpu_device_initialize()
>>         >> + * \param   number_of_buffers  - \c [in] Number of BOs in
>>         the list
>>         >> + * \param   buffers            - \c [in] List of BO handles
>>         >> + * \param   result             - \c [out] Created BO list
>>         handle
>>         >> + *
>>         >> + * \return   0 on success\n
>>         >> + *          <0 - Negative POSIX Error code
>>         >> + *
>>         >> + * \sa amdgpu_bo_list_destroy_raw()
>>         >> +*/
>>         >> +int amdgpu_bo_list_create_raw(amdgpu_device_handle dev,
>>         >> +  uint32_t number_of_buffers,
>>         >> +                             struct
>>         drm_amdgpu_bo_list_entry *buffers,
>>         >> +  uint32_t *result);
>>         > So AFAIU  drm_amdgpu_bo_list_entry takes a raw bo handle
>>         while we
>>         > never get a raw bo handle from libdrm_amdgpu. How are we
>>         supposed to
>>         > fill it in?
>>         >
>>         > What do we win by having the raw handle for the bo_list? If
>>         we would
>>         > not return the raw handle we would not need the submit_raw2.
>>         >
>>         >> +
>>         >> +/**
>>         >> + * Destroys a BO list handle.
>>         >> + *
>>         >> + * \param   bo_list    - \c [in] BO list handle.
>>         >> + *
>>         >> + * \return   0 on success\n
>>         >> + *          <0 - Negative POSIX Error code
>>         >> + *
>>         >> + * \sa amdgpu_bo_list_create_raw(), amdgpu_cs_submit_raw2()
>>         >> +*/
>>         >> +int amdgpu_bo_list_destroy_raw(amdgpu_device_handle dev,
>>         uint32_t bo_list);
>>         >> +
>>         >>   /**
>>         >>    * Creates a BO list handle for command submission.
>>         >>    *
>>         >>    * \param   dev     - \c [in] Device handle.
>>         >>    * See #amdgpu_device_initialize()
>>         >>    * \param   number_of_resources     - \c [in] Number of
>>         BOs in the list
>>         >>    * \param   resources          - \c [in] List of BO handles
>>         >>    * \param   resource_prios     - \c [in] Optional
>>         priority for each handle
>>         >>    * \param   result             - \c [out] Created BO
>>         list handle
>>         >>    *
>>         >>    * \return   0 on success\n
>>         >>    *          <0 - Negative POSIX Error code
>>         >>    *
>>         >> - * \sa amdgpu_bo_list_destroy()
>>         >> + * \sa amdgpu_bo_list_destroy(), amdgpu_cs_submit_raw2()
>>         >>   */
>>         >>   int amdgpu_bo_list_create(amdgpu_device_handle dev,
>>         >>                            uint32_t number_of_resources,
>>         >> amdgpu_bo_handle *resources,
>>         >>                            uint8_t *resource_prios,
>>         >> amdgpu_bo_list_handle *result);
>>         >>
>>         >>   /**
>>         >>    * Destroys a BO list handle.
>>         >>    *
>>         >> @@ -1580,20 +1612,42 @@ struct drm_amdgpu_cs_chunk;
>>         >>   struct drm_amdgpu_cs_chunk_dep;
>>         >>   struct drm_amdgpu_cs_chunk_data;
>>         >>
>>         >>   int amdgpu_cs_submit_raw(amdgpu_device_handle dev,
>>         >>  amdgpu_context_handle context,
>>         >>  amdgpu_bo_list_handle bo_list_handle,
>>         >>                           int num_chunks,
>>         >>                           struct drm_amdgpu_cs_chunk *chunks,
>>         >>                           uint64_t *seq_no);
>>         >>
>>         >> +/**
>>         >> + * Submit raw command submission to the kernel with a raw
>>         BO list handle.
>>         >> + *
>>         >> + * \param   dev               - \c [in] device handle
>>         >> + * \param   context    - \c [in] context handle for
>>         context id
>>         >> + * \param   bo_list_handle - \c [in] raw bo list handle
>>         (0 for none)
>>         >> + * \param   num_chunks - \c [in] number of CS chunks to
>>         submit
>>         >> + * \param   chunks     - \c [in] array of CS chunks
>>         >> + * \param   seq_no     - \c [out] output sequence number
>>         for submission.
>>         >> + *
>>         >> + * \return   0 on success\n
>>         >> + *          <0 - Negative POSIX Error code
>>         >> + *
>>         >> + * \sa amdgpu_bo_list_create_raw(),
>>         amdgpu_bo_list_destroy_raw()
>>         >> + */
>>         >> +int amdgpu_cs_submit_raw2(amdgpu_device_handle dev,
>>         >> +  amdgpu_context_handle context,
>>         >> +                         uint32_t bo_list_handle,
>>         >> +                         int num_chunks,
>>         >> +                         struct drm_amdgpu_cs_chunk *chunks,
>>         >> +                         uint64_t *seq_no);
>>         >> +
>>         >>   void amdgpu_cs_chunk_fence_to_dep(struct amdgpu_cs_fence
>>         *fence,
>>         >> struct drm_amdgpu_cs_chunk_dep *dep);
>>         >>   void amdgpu_cs_chunk_fence_info_to_data(struct
>>         amdgpu_cs_fence_info *fence_info,
>>         >>     struct drm_amdgpu_cs_chunk_data *data);
>>         >>
>>         >>   /**
>>         >>    * Reserve VMID
>>         >>    * \param   context - \c [in]  GPU Context
>>         >>    * \param   flags - \c [in]  TBD
>>         >>    *
>>         >> diff --git a/amdgpu/amdgpu_bo.c b/amdgpu/amdgpu_bo.c
>>         >> index c0f42e81..21bc73aa 100644
>>         >> --- a/amdgpu/amdgpu_bo.c
>>         >> +++ b/amdgpu/amdgpu_bo.c
>>         >> @@ -611,20 +611,56 @@ drm_public int
>>         amdgpu_create_bo_from_user_mem(amdgpu_device_handle dev,
>>         >> pthread_mutex_lock(&dev->bo_table_mutex);
>>         >>          r = handle_table_insert(&dev->bo_handles,
>>         (*buf_handle)->handle,
>>         >> *buf_handle);
>>         >> pthread_mutex_unlock(&dev->bo_table_mutex);
>>         >>          if (r)
>>         >> amdgpu_bo_free(*buf_handle);
>>         >>   out:
>>         >>          return r;
>>         >>   }
>>         >>
>>         >> +drm_public int
>>         amdgpu_bo_list_create_raw(amdgpu_device_handle dev,
>>         >> +     uint32_t number_of_buffers,
>>         >> +     struct drm_amdgpu_bo_list_entry *buffers,
>>         >> +     uint32_t *result)
>>         >> +{
>>         >> +       union drm_amdgpu_bo_list args;
>>         >> +       int r;
>>         >> +
>>         >> +       memset(&args, 0, sizeof(args));
>>         >> +       args.in.operation = AMDGPU_BO_LIST_OP_CREATE;
>>         >> +       args.in.bo_number = number_of_buffers;
>>         >> +       args.in.bo_info_size = sizeof(struct
>>         drm_amdgpu_bo_list_entry);
>>         >> +       args.in.bo_info_ptr = (uint64_t)(uintptr_t)buffers;
>>         >> +
>>         >> +       r = drmCommandWriteRead(dev->fd, DRM_AMDGPU_BO_LIST,
>>         >> +  &args, sizeof(args));
>>         >> +       if (r)
>>         >> +               return r;
>>         >> +
>>         >> +       *result = args.out.list_handle;
>>         >> +       return 0;
>>         >> +}
>>         >> +
>>         >> +drm_public int
>>         amdgpu_bo_list_destroy_raw(amdgpu_device_handle dev,
>>         >> +      uint32_t bo_list)
>>         >> +{
>>         >> +       union drm_amdgpu_bo_list args;
>>         >> +
>>         >> +       memset(&args, 0, sizeof(args));
>>         >> +       args.in.operation = AMDGPU_BO_LIST_OP_DESTROY;
>>         >> +       args.in.list_handle = bo_list;
>>         >> +
>>         >> +       return drmCommandWriteRead(dev->fd,
>>         DRM_AMDGPU_BO_LIST,
>>         >> + &args, sizeof(args));
>>         >> +}
>>         >> +
>>         >>   drm_public int
>>         amdgpu_bo_list_create(amdgpu_device_handle dev,
>>         >>  uint32_t number_of_resources,
>>         >>  amdgpu_bo_handle *resources,
>>         >>  uint8_t *resource_prios,
>>         >>  amdgpu_bo_list_handle *result)
>>         >>   {
>>         >>          struct drm_amdgpu_bo_list_entry *list;
>>         >>          union drm_amdgpu_bo_list args;
>>         >>          unsigned i;
>>         >>          int r;
>>         >> diff --git a/amdgpu/amdgpu_cs.c b/amdgpu/amdgpu_cs.c
>>         >> index 3b8231aa..5bedf748 100644
>>         >> --- a/amdgpu/amdgpu_cs.c
>>         >> +++ b/amdgpu/amdgpu_cs.c
>>         >> @@ -724,20 +724,45 @@ drm_public int
>>         amdgpu_cs_submit_raw(amdgpu_device_handle dev,
>>         >>          r = drmCommandWriteRead(dev->fd, DRM_AMDGPU_CS,
>>         >> &cs, sizeof(cs));
>>         >>          if (r)
>>         >>                  return r;
>>         >>
>>         >>          if (seq_no)
>>         >>                  *seq_no = cs.out.handle;
>>         >>          return 0;
>>         >>   }
>>         >>
>>         >> +drm_public int amdgpu_cs_submit_raw2(amdgpu_device_handle
>>         dev,
>>         >> + amdgpu_context_handle context,
>>         >> + uint32_t bo_list_handle,
>>         >> + int num_chunks,
>>         >> + struct drm_amdgpu_cs_chunk *chunks,
>>         >> + uint64_t *seq_no)
>>         >> +{
>>         >> +       union drm_amdgpu_cs cs = {0};
>>         >> +       uint64_t *chunk_array;
>>         >> +       int i, r;
>>         >> +
>>         >> +       chunk_array = alloca(sizeof(uint64_t) * num_chunks);
>>         >> +       for (i = 0; i < num_chunks; i++)
>>         >> +               chunk_array[i] =
>>         (uint64_t)(uintptr_t)&chunks[i];
>>         >> +       cs.in.chunks = (uint64_t)(uintptr_t)chunk_array;
>>         >> +       cs.in.ctx_id = context->id;
>>         >> +       cs.in.bo_list_handle = bo_list_handle;
>>         >> +       cs.in.num_chunks = num_chunks;
>>         >> +       r = drmCommandWriteRead(dev->fd, DRM_AMDGPU_CS,
>>         >> +  &cs, sizeof(cs));
>>         >> +       if (!r && seq_no)
>>         >> +               *seq_no = cs.out.handle;
>>         >> +       return r;
>>         >> +}
>>         >> +
>>         >>   drm_public void
>>         amdgpu_cs_chunk_fence_info_to_data(struct
>>         amdgpu_cs_fence_info *fence_info,
>>         >>     struct drm_amdgpu_cs_chunk_data *data)
>>         >>   {
>>         >>          data->fence_data.handle = fence_info->handle->handle;
>>         >>          data->fence_data.offset = fence_info->offset *
>>         sizeof(uint64_t);
>>         >>   }
>>         >>
>>         >>   drm_public void amdgpu_cs_chunk_fence_to_dep(struct
>>         amdgpu_cs_fence *fence,
>>         >>     struct drm_amdgpu_cs_chunk_dep *dep)
>>         >>   {
>>         >> --
>>         >> 2.17.1
>>         >>
>>         >> _______________________________________________
>>         >> amd-gfx mailing list
>>         >> amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW@public.gmane.org
>>         <mailto:amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW@public.gmane.org>
>>         >> https://lists.freedesktop.org/mailman/listinfo/amd-gfx
>>         > _______________________________________________
>>         > amd-gfx mailing list
>>         > amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW@public.gmane.org
>>         <mailto:amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW@public.gmane.org>
>>         > https://lists.freedesktop.org/mailman/listinfo/amd-gfx
>>
>
>
> _______________________________________________
> amd-gfx mailing list
> amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW@public.gmane.org
> https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[-- Attachment #1.2: Type: text/html, Size: 40710 bytes --]

[-- Attachment #2: Type: text/plain, Size: 154 bytes --]

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply	[flat|nested] 23+ messages in thread

* Re: [PATCH libdrm] amdgpu: add a faster BO list API
       [not found]         ` <CAAxE2A5chwbGmQN2yqVCfvF=TPvFMN6Qu-iFUuRW-zBVm=AN9w-JsoAwUIsXosN+BqQ9rBEUg@public.gmane.org>
@ 2019-01-16 15:15           ` Bas Nieuwenhuizen
       [not found]             ` <CAP+8YyFhXpM8eHEjWwy+yAs4s7A7FyrkYO8=FA0tf6M6n-ka+g-JsoAwUIsXosN+BqQ9rBEUg@public.gmane.org>
  0 siblings, 1 reply; 23+ messages in thread
From: Bas Nieuwenhuizen @ 2019-01-16 15:15 UTC (permalink / raw)
  To: Marek Olšák; +Cc: amd-gfx mailing list

On Wed, Jan 16, 2019 at 3:38 PM Marek Olšák <maraeo@gmail.com> wrote:
>
>
>
> On Wed, Jan 16, 2019, 7:46 AM Bas Nieuwenhuizen <bas@basnieuwenhuizen.nl wrote:
>>
>> So random questions:
>>
>> 1) In this discussion it was mentioned that some Vulkan drivers still
>> use the bo_list interface. I think that implies radv as I think we're
>> still using bo_list. Is there any other API we should be using? (Also,
>> with VK_EXT_descriptor_indexing I suspect we'll be moving more towards
>> a global bo list instead of a cmd buffer one, as we cannot know all
>> the BOs referenced anymore, but not sure what end state here will be).
>>
>> 2) The other alternative mentioned was adding the buffers directly
>> into the submit ioctl. Is this the desired end state (though as above
>> I'm not sure how that works for vulkan)? If yes, what is the timeline
>> for this that we need something in the interim?
>
>
> Radeonsi already uses this.
>
>>
>> 3) Did we measure any performance benefit?
>>
>> In general I'd like to to ack the raw bo list creation function as
>> this interface seems easier to use. The two arrays thing has always
>> been kind of a pain when we want to use e.g. builtin sort functions to
>> make sure we have no duplicate BOs, but have some comments below.
>
>
> The reason amdgpu was slower than radeon was because of this inefficient bo list interface.
>
>>
>> On Mon, Jan 7, 2019 at 8:31 PM Marek Olšák <maraeo@gmail.com> wrote:
>> >
>> > From: Marek Olšák <marek.olsak@amd.com>
>> >
>> > ---
>> >  amdgpu/amdgpu-symbol-check |  3 ++
>> >  amdgpu/amdgpu.h            | 56 +++++++++++++++++++++++++++++++++++++-
>> >  amdgpu/amdgpu_bo.c         | 36 ++++++++++++++++++++++++
>> >  amdgpu/amdgpu_cs.c         | 25 +++++++++++++++++
>> >  4 files changed, 119 insertions(+), 1 deletion(-)
>> >
>> > diff --git a/amdgpu/amdgpu-symbol-check b/amdgpu/amdgpu-symbol-check
>> > index 6f5e0f95..96a44b40 100755
>> > --- a/amdgpu/amdgpu-symbol-check
>> > +++ b/amdgpu/amdgpu-symbol-check
>> > @@ -12,20 +12,22 @@ _edata
>> >  _end
>> >  _fini
>> >  _init
>> >  amdgpu_bo_alloc
>> >  amdgpu_bo_cpu_map
>> >  amdgpu_bo_cpu_unmap
>> >  amdgpu_bo_export
>> >  amdgpu_bo_free
>> >  amdgpu_bo_import
>> >  amdgpu_bo_inc_ref
>> > +amdgpu_bo_list_create_raw
>> > +amdgpu_bo_list_destroy_raw
>> >  amdgpu_bo_list_create
>> >  amdgpu_bo_list_destroy
>> >  amdgpu_bo_list_update
>> >  amdgpu_bo_query_info
>> >  amdgpu_bo_set_metadata
>> >  amdgpu_bo_va_op
>> >  amdgpu_bo_va_op_raw
>> >  amdgpu_bo_wait_for_idle
>> >  amdgpu_create_bo_from_user_mem
>> >  amdgpu_cs_chunk_fence_info_to_data
>> > @@ -40,20 +42,21 @@ amdgpu_cs_destroy_semaphore
>> >  amdgpu_cs_destroy_syncobj
>> >  amdgpu_cs_export_syncobj
>> >  amdgpu_cs_fence_to_handle
>> >  amdgpu_cs_import_syncobj
>> >  amdgpu_cs_query_fence_status
>> >  amdgpu_cs_query_reset_state
>> >  amdgpu_query_sw_info
>> >  amdgpu_cs_signal_semaphore
>> >  amdgpu_cs_submit
>> >  amdgpu_cs_submit_raw
>> > +amdgpu_cs_submit_raw2
>> >  amdgpu_cs_syncobj_export_sync_file
>> >  amdgpu_cs_syncobj_import_sync_file
>> >  amdgpu_cs_syncobj_reset
>> >  amdgpu_cs_syncobj_signal
>> >  amdgpu_cs_syncobj_wait
>> >  amdgpu_cs_wait_fences
>> >  amdgpu_cs_wait_semaphore
>> >  amdgpu_device_deinitialize
>> >  amdgpu_device_initialize
>> >  amdgpu_find_bo_by_cpu_mapping
>> > diff --git a/amdgpu/amdgpu.h b/amdgpu/amdgpu.h
>> > index dc51659a..5b800033 100644
>> > --- a/amdgpu/amdgpu.h
>> > +++ b/amdgpu/amdgpu.h
>> > @@ -35,20 +35,21 @@
>> >  #define _AMDGPU_H_
>> >
>> >  #include <stdint.h>
>> >  #include <stdbool.h>
>> >
>> >  #ifdef __cplusplus
>> >  extern "C" {
>> >  #endif
>> >
>> >  struct drm_amdgpu_info_hw_ip;
>> > +struct drm_amdgpu_bo_list_entry;
>> >
>> >  /*--------------------------------------------------------------------------*/
>> >  /* --------------------------- Defines ------------------------------------ */
>> >  /*--------------------------------------------------------------------------*/
>> >
>> >  /**
>> >   * Define max. number of Command Buffers (IB) which could be sent to the single
>> >   * hardware IP to accommodate CE/DE requirements
>> >   *
>> >   * \sa amdgpu_cs_ib_info
>> > @@ -767,34 +768,65 @@ int amdgpu_bo_cpu_unmap(amdgpu_bo_handle buf_handle);
>> >   *                            and no GPU access is scheduled.
>> >   *                          1 GPU access is in fly or scheduled
>> >   *
>> >   * \return   0 - on success
>> >   *          <0 - Negative POSIX Error code
>> >   */
>> >  int amdgpu_bo_wait_for_idle(amdgpu_bo_handle buf_handle,
>> >                             uint64_t timeout_ns,
>> >                             bool *buffer_busy);
>> >
>> > +/**
>> > + * Creates a BO list handle for command submission.
>> > + *
>> > + * \param   dev                        - \c [in] Device handle.
>> > + *                                See #amdgpu_device_initialize()
>> > + * \param   number_of_buffers  - \c [in] Number of BOs in the list
>> > + * \param   buffers            - \c [in] List of BO handles
>> > + * \param   result             - \c [out] Created BO list handle
>> > + *
>> > + * \return   0 on success\n
>> > + *          <0 - Negative POSIX Error code
>> > + *
>> > + * \sa amdgpu_bo_list_destroy_raw()
>> > +*/
>> > +int amdgpu_bo_list_create_raw(amdgpu_device_handle dev,
>> > +                             uint32_t number_of_buffers,
>> > +                             struct drm_amdgpu_bo_list_entry *buffers,
>> > +                             uint32_t *result);
>>
>> So AFAIU  drm_amdgpu_bo_list_entry takes a raw bo handle while we
>> never get a raw bo handle from libdrm_amdgpu. How are we supposed to
>> fill it in?
>
>
> This function returns it.

This function returns a bo_list handle right? I'm talking about the BO
handles in `buffers`, where do we get them?

>
>>
>> What do we win by having the raw handle for the bo_list? If we would
>> not return the raw handle we would not need the submit_raw2.
>
>
> One less malloc call and pointer indirection.
>
> Marek
>
>>
>> > +
>> > +/**
>> > + * Destroys a BO list handle.
>> > + *
>> > + * \param   bo_list    - \c [in] BO list handle.
>> > + *
>> > + * \return   0 on success\n
>> > + *          <0 - Negative POSIX Error code
>> > + *
>> > + * \sa amdgpu_bo_list_create_raw(), amdgpu_cs_submit_raw2()
>> > +*/
>> > +int amdgpu_bo_list_destroy_raw(amdgpu_device_handle dev, uint32_t bo_list);
>> > +
>> >  /**
>> >   * Creates a BO list handle for command submission.
>> >   *
>> >   * \param   dev                        - \c [in] Device handle.
>> >   *                                See #amdgpu_device_initialize()
>> >   * \param   number_of_resources        - \c [in] Number of BOs in the list
>> >   * \param   resources          - \c [in] List of BO handles
>> >   * \param   resource_prios     - \c [in] Optional priority for each handle
>> >   * \param   result             - \c [out] Created BO list handle
>> >   *
>> >   * \return   0 on success\n
>> >   *          <0 - Negative POSIX Error code
>> >   *
>> > - * \sa amdgpu_bo_list_destroy()
>> > + * \sa amdgpu_bo_list_destroy(), amdgpu_cs_submit_raw2()
>> >  */
>> >  int amdgpu_bo_list_create(amdgpu_device_handle dev,
>> >                           uint32_t number_of_resources,
>> >                           amdgpu_bo_handle *resources,
>> >                           uint8_t *resource_prios,
>> >                           amdgpu_bo_list_handle *result);
>> >
>> >  /**
>> >   * Destroys a BO list handle.
>> >   *
>> > @@ -1580,20 +1612,42 @@ struct drm_amdgpu_cs_chunk;
>> >  struct drm_amdgpu_cs_chunk_dep;
>> >  struct drm_amdgpu_cs_chunk_data;
>> >
>> >  int amdgpu_cs_submit_raw(amdgpu_device_handle dev,
>> >                          amdgpu_context_handle context,
>> >                          amdgpu_bo_list_handle bo_list_handle,
>> >                          int num_chunks,
>> >                          struct drm_amdgpu_cs_chunk *chunks,
>> >                          uint64_t *seq_no);
>> >
>> > +/**
>> > + * Submit raw command submission to the kernel with a raw BO list handle.
>> > + *
>> > + * \param   dev               - \c [in] device handle
>> > + * \param   context    - \c [in] context handle for context id
>> > + * \param   bo_list_handle - \c [in] raw bo list handle (0 for none)
>> > + * \param   num_chunks - \c [in] number of CS chunks to submit
>> > + * \param   chunks     - \c [in] array of CS chunks
>> > + * \param   seq_no     - \c [out] output sequence number for submission.
>> > + *
>> > + * \return   0 on success\n
>> > + *          <0 - Negative POSIX Error code
>> > + *
>> > + * \sa amdgpu_bo_list_create_raw(), amdgpu_bo_list_destroy_raw()
>> > + */
>> > +int amdgpu_cs_submit_raw2(amdgpu_device_handle dev,
>> > +                         amdgpu_context_handle context,
>> > +                         uint32_t bo_list_handle,
>> > +                         int num_chunks,
>> > +                         struct drm_amdgpu_cs_chunk *chunks,
>> > +                         uint64_t *seq_no);
>> > +
>> >  void amdgpu_cs_chunk_fence_to_dep(struct amdgpu_cs_fence *fence,
>> >                                   struct drm_amdgpu_cs_chunk_dep *dep);
>> >  void amdgpu_cs_chunk_fence_info_to_data(struct amdgpu_cs_fence_info *fence_info,
>> >                                         struct drm_amdgpu_cs_chunk_data *data);
>> >
>> >  /**
>> >   * Reserve VMID
>> >   * \param   context - \c [in]  GPU Context
>> >   * \param   flags - \c [in]  TBD
>> >   *
>> > diff --git a/amdgpu/amdgpu_bo.c b/amdgpu/amdgpu_bo.c
>> > index c0f42e81..21bc73aa 100644
>> > --- a/amdgpu/amdgpu_bo.c
>> > +++ b/amdgpu/amdgpu_bo.c
>> > @@ -611,20 +611,56 @@ drm_public int amdgpu_create_bo_from_user_mem(amdgpu_device_handle dev,
>> >         pthread_mutex_lock(&dev->bo_table_mutex);
>> >         r = handle_table_insert(&dev->bo_handles, (*buf_handle)->handle,
>> >                                 *buf_handle);
>> >         pthread_mutex_unlock(&dev->bo_table_mutex);
>> >         if (r)
>> >                 amdgpu_bo_free(*buf_handle);
>> >  out:
>> >         return r;
>> >  }
>> >
>> > +drm_public int amdgpu_bo_list_create_raw(amdgpu_device_handle dev,
>> > +                                        uint32_t number_of_buffers,
>> > +                                        struct drm_amdgpu_bo_list_entry *buffers,
>> > +                                        uint32_t *result)
>> > +{
>> > +       union drm_amdgpu_bo_list args;
>> > +       int r;
>> > +
>> > +       memset(&args, 0, sizeof(args));
>> > +       args.in.operation = AMDGPU_BO_LIST_OP_CREATE;
>> > +       args.in.bo_number = number_of_buffers;
>> > +       args.in.bo_info_size = sizeof(struct drm_amdgpu_bo_list_entry);
>> > +       args.in.bo_info_ptr = (uint64_t)(uintptr_t)buffers;
>> > +
>> > +       r = drmCommandWriteRead(dev->fd, DRM_AMDGPU_BO_LIST,
>> > +                               &args, sizeof(args));
>> > +       if (r)
>> > +               return r;
>> > +
>> > +       *result = args.out.list_handle;
>> > +       return 0;
>> > +}
>> > +
>> > +drm_public int amdgpu_bo_list_destroy_raw(amdgpu_device_handle dev,
>> > +                                         uint32_t bo_list)
>> > +{
>> > +       union drm_amdgpu_bo_list args;
>> > +
>> > +       memset(&args, 0, sizeof(args));
>> > +       args.in.operation = AMDGPU_BO_LIST_OP_DESTROY;
>> > +       args.in.list_handle = bo_list;
>> > +
>> > +       return drmCommandWriteRead(dev->fd, DRM_AMDGPU_BO_LIST,
>> > +                                  &args, sizeof(args));
>> > +}
>> > +
>> >  drm_public int amdgpu_bo_list_create(amdgpu_device_handle dev,
>> >                                      uint32_t number_of_resources,
>> >                                      amdgpu_bo_handle *resources,
>> >                                      uint8_t *resource_prios,
>> >                                      amdgpu_bo_list_handle *result)
>> >  {
>> >         struct drm_amdgpu_bo_list_entry *list;
>> >         union drm_amdgpu_bo_list args;
>> >         unsigned i;
>> >         int r;
>> > diff --git a/amdgpu/amdgpu_cs.c b/amdgpu/amdgpu_cs.c
>> > index 3b8231aa..5bedf748 100644
>> > --- a/amdgpu/amdgpu_cs.c
>> > +++ b/amdgpu/amdgpu_cs.c
>> > @@ -724,20 +724,45 @@ drm_public int amdgpu_cs_submit_raw(amdgpu_device_handle dev,
>> >         r = drmCommandWriteRead(dev->fd, DRM_AMDGPU_CS,
>> >                                 &cs, sizeof(cs));
>> >         if (r)
>> >                 return r;
>> >
>> >         if (seq_no)
>> >                 *seq_no = cs.out.handle;
>> >         return 0;
>> >  }
>> >
>> > +drm_public int amdgpu_cs_submit_raw2(amdgpu_device_handle dev,
>> > +                                    amdgpu_context_handle context,
>> > +                                    uint32_t bo_list_handle,
>> > +                                    int num_chunks,
>> > +                                    struct drm_amdgpu_cs_chunk *chunks,
>> > +                                    uint64_t *seq_no)
>> > +{
>> > +       union drm_amdgpu_cs cs = {0};
>> > +       uint64_t *chunk_array;
>> > +       int i, r;
>> > +
>> > +       chunk_array = alloca(sizeof(uint64_t) * num_chunks);
>> > +       for (i = 0; i < num_chunks; i++)
>> > +               chunk_array[i] = (uint64_t)(uintptr_t)&chunks[i];
>> > +       cs.in.chunks = (uint64_t)(uintptr_t)chunk_array;
>> > +       cs.in.ctx_id = context->id;
>> > +       cs.in.bo_list_handle = bo_list_handle;
>> > +       cs.in.num_chunks = num_chunks;
>> > +       r = drmCommandWriteRead(dev->fd, DRM_AMDGPU_CS,
>> > +                               &cs, sizeof(cs));
>> > +       if (!r && seq_no)
>> > +               *seq_no = cs.out.handle;
>> > +       return r;
>> > +}
>> > +
>> >  drm_public void amdgpu_cs_chunk_fence_info_to_data(struct amdgpu_cs_fence_info *fence_info,
>> >                                         struct drm_amdgpu_cs_chunk_data *data)
>> >  {
>> >         data->fence_data.handle = fence_info->handle->handle;
>> >         data->fence_data.offset = fence_info->offset * sizeof(uint64_t);
>> >  }
>> >
>> >  drm_public void amdgpu_cs_chunk_fence_to_dep(struct amdgpu_cs_fence *fence,
>> >                                         struct drm_amdgpu_cs_chunk_dep *dep)
>> >  {
>> > --
>> > 2.17.1
>> >
>> > _______________________________________________
>> > amd-gfx mailing list
>> > amd-gfx@lists.freedesktop.org
>> > https://lists.freedesktop.org/mailman/listinfo/amd-gfx
_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply	[flat|nested] 23+ messages in thread

* Re: [PATCH libdrm] amdgpu: add a faster BO list API
       [not found]             ` <CAP+8YyFhXpM8eHEjWwy+yAs4s7A7FyrkYO8=FA0tf6M6n-ka+g-JsoAwUIsXosN+BqQ9rBEUg@public.gmane.org>
@ 2019-01-16 16:14               ` Marek Olšák
  0 siblings, 0 replies; 23+ messages in thread
From: Marek Olšák @ 2019-01-16 16:14 UTC (permalink / raw)
  To: Bas Nieuwenhuizen; +Cc: amd-gfx mailing list


[-- Attachment #1.1: Type: text/plain, Size: 6560 bytes --]

On Wed, Jan 16, 2019 at 10:15 AM Bas Nieuwenhuizen <bas-dldO88ZXqoXqqjsSq9zF6IRWq/SkRNHw@public.gmane.org>
wrote:

> On Wed, Jan 16, 2019 at 3:38 PM Marek Olšák <maraeo-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org> wrote:
> >
> >
> >
> > On Wed, Jan 16, 2019, 7:46 AM Bas Nieuwenhuizen <bas@basnieuwenhuizen.nl
> wrote:
> >>
> >> So random questions:
> >>
> >> 1) In this discussion it was mentioned that some Vulkan drivers still
> >> use the bo_list interface. I think that implies radv as I think we're
> >> still using bo_list. Is there any other API we should be using? (Also,
> >> with VK_EXT_descriptor_indexing I suspect we'll be moving more towards
> >> a global bo list instead of a cmd buffer one, as we cannot know all
> >> the BOs referenced anymore, but not sure what end state here will be).
> >>
> >> 2) The other alternative mentioned was adding the buffers directly
> >> into the submit ioctl. Is this the desired end state (though as above
> >> I'm not sure how that works for vulkan)? If yes, what is the timeline
> >> for this that we need something in the interim?
> >
> >
> > Radeonsi already uses this.
> >
> >>
> >> 3) Did we measure any performance benefit?
> >>
> >> In general I'd like to to ack the raw bo list creation function as
> >> this interface seems easier to use. The two arrays thing has always
> >> been kind of a pain when we want to use e.g. builtin sort functions to
> >> make sure we have no duplicate BOs, but have some comments below.
> >
> >
> > The reason amdgpu was slower than radeon was because of this inefficient
> bo list interface.
> >
> >>
> >> On Mon, Jan 7, 2019 at 8:31 PM Marek Olšák <maraeo-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org> wrote:
> >> >
> >> > From: Marek Olšák <marek.olsak-5C7GfCeVMHo@public.gmane.org>
> >> >
> >> > ---
> >> >  amdgpu/amdgpu-symbol-check |  3 ++
> >> >  amdgpu/amdgpu.h            | 56
> +++++++++++++++++++++++++++++++++++++-
> >> >  amdgpu/amdgpu_bo.c         | 36 ++++++++++++++++++++++++
> >> >  amdgpu/amdgpu_cs.c         | 25 +++++++++++++++++
> >> >  4 files changed, 119 insertions(+), 1 deletion(-)
> >> >
> >> > diff --git a/amdgpu/amdgpu-symbol-check b/amdgpu/amdgpu-symbol-check
> >> > index 6f5e0f95..96a44b40 100755
> >> > --- a/amdgpu/amdgpu-symbol-check
> >> > +++ b/amdgpu/amdgpu-symbol-check
> >> > @@ -12,20 +12,22 @@ _edata
> >> >  _end
> >> >  _fini
> >> >  _init
> >> >  amdgpu_bo_alloc
> >> >  amdgpu_bo_cpu_map
> >> >  amdgpu_bo_cpu_unmap
> >> >  amdgpu_bo_export
> >> >  amdgpu_bo_free
> >> >  amdgpu_bo_import
> >> >  amdgpu_bo_inc_ref
> >> > +amdgpu_bo_list_create_raw
> >> > +amdgpu_bo_list_destroy_raw
> >> >  amdgpu_bo_list_create
> >> >  amdgpu_bo_list_destroy
> >> >  amdgpu_bo_list_update
> >> >  amdgpu_bo_query_info
> >> >  amdgpu_bo_set_metadata
> >> >  amdgpu_bo_va_op
> >> >  amdgpu_bo_va_op_raw
> >> >  amdgpu_bo_wait_for_idle
> >> >  amdgpu_create_bo_from_user_mem
> >> >  amdgpu_cs_chunk_fence_info_to_data
> >> > @@ -40,20 +42,21 @@ amdgpu_cs_destroy_semaphore
> >> >  amdgpu_cs_destroy_syncobj
> >> >  amdgpu_cs_export_syncobj
> >> >  amdgpu_cs_fence_to_handle
> >> >  amdgpu_cs_import_syncobj
> >> >  amdgpu_cs_query_fence_status
> >> >  amdgpu_cs_query_reset_state
> >> >  amdgpu_query_sw_info
> >> >  amdgpu_cs_signal_semaphore
> >> >  amdgpu_cs_submit
> >> >  amdgpu_cs_submit_raw
> >> > +amdgpu_cs_submit_raw2
> >> >  amdgpu_cs_syncobj_export_sync_file
> >> >  amdgpu_cs_syncobj_import_sync_file
> >> >  amdgpu_cs_syncobj_reset
> >> >  amdgpu_cs_syncobj_signal
> >> >  amdgpu_cs_syncobj_wait
> >> >  amdgpu_cs_wait_fences
> >> >  amdgpu_cs_wait_semaphore
> >> >  amdgpu_device_deinitialize
> >> >  amdgpu_device_initialize
> >> >  amdgpu_find_bo_by_cpu_mapping
> >> > diff --git a/amdgpu/amdgpu.h b/amdgpu/amdgpu.h
> >> > index dc51659a..5b800033 100644
> >> > --- a/amdgpu/amdgpu.h
> >> > +++ b/amdgpu/amdgpu.h
> >> > @@ -35,20 +35,21 @@
> >> >  #define _AMDGPU_H_
> >> >
> >> >  #include <stdint.h>
> >> >  #include <stdbool.h>
> >> >
> >> >  #ifdef __cplusplus
> >> >  extern "C" {
> >> >  #endif
> >> >
> >> >  struct drm_amdgpu_info_hw_ip;
> >> > +struct drm_amdgpu_bo_list_entry;
> >> >
> >> >
> /*--------------------------------------------------------------------------*/
> >> >  /* --------------------------- Defines
> ------------------------------------ */
> >> >
> /*--------------------------------------------------------------------------*/
> >> >
> >> >  /**
> >> >   * Define max. number of Command Buffers (IB) which could be sent to
> the single
> >> >   * hardware IP to accommodate CE/DE requirements
> >> >   *
> >> >   * \sa amdgpu_cs_ib_info
> >> > @@ -767,34 +768,65 @@ int amdgpu_bo_cpu_unmap(amdgpu_bo_handle
> buf_handle);
> >> >   *                            and no GPU access is scheduled.
> >> >   *                          1 GPU access is in fly or scheduled
> >> >   *
> >> >   * \return   0 - on success
> >> >   *          <0 - Negative POSIX Error code
> >> >   */
> >> >  int amdgpu_bo_wait_for_idle(amdgpu_bo_handle buf_handle,
> >> >                             uint64_t timeout_ns,
> >> >                             bool *buffer_busy);
> >> >
> >> > +/**
> >> > + * Creates a BO list handle for command submission.
> >> > + *
> >> > + * \param   dev                        - \c [in] Device handle.
> >> > + *                                See #amdgpu_device_initialize()
> >> > + * \param   number_of_buffers  - \c [in] Number of BOs in the list
> >> > + * \param   buffers            - \c [in] List of BO handles
> >> > + * \param   result             - \c [out] Created BO list handle
> >> > + *
> >> > + * \return   0 on success\n
> >> > + *          <0 - Negative POSIX Error code
> >> > + *
> >> > + * \sa amdgpu_bo_list_destroy_raw()
> >> > +*/
> >> > +int amdgpu_bo_list_create_raw(amdgpu_device_handle dev,
> >> > +                             uint32_t number_of_buffers,
> >> > +                             struct drm_amdgpu_bo_list_entry
> *buffers,
> >> > +                             uint32_t *result);
> >>
> >> So AFAIU  drm_amdgpu_bo_list_entry takes a raw bo handle while we
> >> never get a raw bo handle from libdrm_amdgpu. How are we supposed to
> >> fill it in?
> >
> >
> > This function returns it.
>
> This function returns a bo_list handle right? I'm talking about the BO
> handles in `buffers`, where do we get them?
>

Query KMS handles using the export function.

Marek

[-- Attachment #1.2: Type: text/html, Size: 9116 bytes --]

[-- Attachment #2: Type: text/plain, Size: 154 bytes --]

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply	[flat|nested] 23+ messages in thread

* Re: [PATCH libdrm] amdgpu: add a faster BO list API
       [not found]                         ` <3d525127-825b-efab-b0c8-76550634d1c1-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org>
@ 2019-01-16 17:39                           ` Marek Olšák
       [not found]                             ` <CAAxE2A77=9-qpfUmt-PQf5=Gx72SLZ5QvNaSYLJ9D6o0fiEz4Q-JsoAwUIsXosN+BqQ9rBEUg@public.gmane.org>
  0 siblings, 1 reply; 23+ messages in thread
From: Marek Olšák @ 2019-01-16 17:39 UTC (permalink / raw)
  To: Christian König; +Cc: amd-gfx mailing list, Bas Nieuwenhuizen


[-- Attachment #1.1: Type: text/plain, Size: 1922 bytes --]

On Wed, Jan 16, 2019 at 9:43 AM Christian König <
ckoenig.leichtzumerken-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org> wrote:

> Am 16.01.19 um 15:39 schrieb Marek Olšák:
>
>
>
> On Wed, Jan 16, 2019, 9:34 AM Koenig, Christian <Christian.Koenig-5C7GfCeVMHo@public.gmane.org
> wrote:
>
>> Am 16.01.19 um 15:31 schrieb Marek Olšák:
>>
>>
>>
>> On Wed, Jan 16, 2019, 7:55 AM Christian König <
>> ckoenig.leichtzumerken-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org wrote:
>>
>>> Well if you ask me we should have the following interface for
>>> negotiating memory management with the kernel:
>>>
>>> 1. We have per process BOs which can't be shared between processes.
>>>
>>> Those are always valid and don't need to be mentioned in any BO list
>>> whatsoever.
>>>
>>> If we knew that a per process BO is currently not in use we can
>>> optionally tell that to the kernel to make memory management more
>>> efficient.
>>>
>>> In other words instead of a list of stuff which is used we send down to
>>> the kernel a list of stuff which is not used any more and that only when
>>> we know that it is necessary, e.g. when a game or application
>>> overcommits.
>>>
>>
>> Radeonsi doesn't use this because this approach caused performance
>> degradation and also drops BO priorities.
>>
>>
>> The performance degradation where mostly shortcomings with the LRU which
>> by now have been fixed.
>>
>> BO priorities are a different topic, but could be added to per VM BOs as
>> well.
>>
>
> What's the minimum drm version that contains the fixes?
>
>
> I've pushed the last optimization this morning. No idea when it really
> became useful, but the numbers from the closed source clients now look much
> better.
>
> We should probably test and bump the drm version when we are sure that
> this now works as expected.
>

We should, but AMD Mesa guys don't have any time.

Marek

[-- Attachment #1.2: Type: text/html, Size: 4774 bytes --]

[-- Attachment #2: Type: text/plain, Size: 154 bytes --]

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply	[flat|nested] 23+ messages in thread

* Re: [PATCH libdrm] amdgpu: add a faster BO list API
       [not found]                             ` <CAAxE2A77=9-qpfUmt-PQf5=Gx72SLZ5QvNaSYLJ9D6o0fiEz4Q-JsoAwUIsXosN+BqQ9rBEUg@public.gmane.org>
@ 2019-01-16 21:41                               ` Marek Olšák
  0 siblings, 0 replies; 23+ messages in thread
From: Marek Olšák @ 2019-01-16 21:41 UTC (permalink / raw)
  To: Christian König; +Cc: amd-gfx mailing list, Bas Nieuwenhuizen


[-- Attachment #1.1: Type: text/plain, Size: 2266 bytes --]

FYI, I've pushed the patch because it helps simplify our the amdgpu winsys
code and I already have code that depends on it that I don't wanna rewrite.

Marek

On Wed, Jan 16, 2019 at 12:39 PM Marek Olšák <maraeo-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org> wrote:

> On Wed, Jan 16, 2019 at 9:43 AM Christian König <
> ckoenig.leichtzumerken-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org> wrote:
>
>> Am 16.01.19 um 15:39 schrieb Marek Olšák:
>>
>>
>>
>> On Wed, Jan 16, 2019, 9:34 AM Koenig, Christian <Christian.Koenig-urvtwAKJhsc@public.gmane.orgm
>> wrote:
>>
>>> Am 16.01.19 um 15:31 schrieb Marek Olšák:
>>>
>>>
>>>
>>> On Wed, Jan 16, 2019, 7:55 AM Christian König <
>>> ckoenig.leichtzumerken-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org wrote:
>>>
>>>> Well if you ask me we should have the following interface for
>>>> negotiating memory management with the kernel:
>>>>
>>>> 1. We have per process BOs which can't be shared between processes.
>>>>
>>>> Those are always valid and don't need to be mentioned in any BO list
>>>> whatsoever.
>>>>
>>>> If we knew that a per process BO is currently not in use we can
>>>> optionally tell that to the kernel to make memory management more
>>>> efficient.
>>>>
>>>> In other words instead of a list of stuff which is used we send down to
>>>> the kernel a list of stuff which is not used any more and that only
>>>> when
>>>> we know that it is necessary, e.g. when a game or application
>>>> overcommits.
>>>>
>>>
>>> Radeonsi doesn't use this because this approach caused performance
>>> degradation and also drops BO priorities.
>>>
>>>
>>> The performance degradation where mostly shortcomings with the LRU which
>>> by now have been fixed.
>>>
>>> BO priorities are a different topic, but could be added to per VM BOs as
>>> well.
>>>
>>
>> What's the minimum drm version that contains the fixes?
>>
>>
>> I've pushed the last optimization this morning. No idea when it really
>> became useful, but the numbers from the closed source clients now look much
>> better.
>>
>> We should probably test and bump the drm version when we are sure that
>> this now works as expected.
>>
>
> We should, but AMD Mesa guys don't have any time.
>
> Marek
>

[-- Attachment #1.2: Type: text/html, Size: 5584 bytes --]

[-- Attachment #2: Type: text/plain, Size: 154 bytes --]

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply	[flat|nested] 23+ messages in thread

end of thread, other threads:[~2019-01-16 21:41 UTC | newest]

Thread overview: 23+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2019-01-07 19:31 [PATCH libdrm] amdgpu: add a faster BO list API Marek Olšák
     [not found] ` <20190107193104.4361-1-maraeo-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org>
2019-01-08  3:16   ` Zhou, David(ChunMing)
2019-01-09 10:28   ` Christian König
     [not found]     ` <a0a15ed6-eb1a-fbbe-7c1b-e3b9a64c1008-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org>
2019-01-09 12:36       ` Marek Olšák
     [not found]         ` <CAAxE2A5M2WW6uPFo0a=+6ukbtgx5xHfkKUKOB9dgtB=qH88htQ-JsoAwUIsXosN+BqQ9rBEUg@public.gmane.org>
2019-01-09 13:08           ` Christian König
     [not found]             ` <513ee137-7e99-c8fc-9e3b-e9077ead60a3-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org>
2019-01-09 16:14               ` Marek Olšák
     [not found]                 ` <CAAxE2A5WYWCWAPA0K+vYDirtT6BV7QJoZSbEhh0Z57OF860mWQ-JsoAwUIsXosN+BqQ9rBEUg@public.gmane.org>
2019-01-09 18:41                   ` Christian König
     [not found]                     ` <7f85afd6-b17b-1c50-ba03-c03dd6e9a362-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org>
2019-01-09 23:39                       ` Marek Olšák
     [not found]                         ` <CAAxE2A5RjR=+2Rs5HDx1rV0ftdkZJX=6TQDkvRQSxfo++vnXOA-JsoAwUIsXosN+BqQ9rBEUg@public.gmane.org>
2019-01-10  9:15                           ` Koenig, Christian
     [not found]                             ` <e23ecf17-dbd4-ecef-f8fc-4dc849e7bddf-5C7GfCeVMHo@public.gmane.org>
2019-01-10 11:41                               ` Marek Olšák
     [not found]                                 ` <CAAxE2A6z_LLzzsLqsBtLyXcFTsLG_8FQc7=oN2p_nLJGoXbmgg-JsoAwUIsXosN+BqQ9rBEUg@public.gmane.org>
2019-01-10 11:51                                   ` Christian König
     [not found]                                     ` <7544c927-8b1f-c7d0-dd9d-21311ffca542-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org>
2019-01-10 12:25                                       ` Marek Olšák
2019-01-16 12:46   ` Bas Nieuwenhuizen
     [not found]     ` <CAP+8YyFD+LxEQOLOY+mDC5v3OOyh1De2DcXK0sRtMW0t7z20SQ-JsoAwUIsXosN+BqQ9rBEUg@public.gmane.org>
2019-01-16 12:55       ` Christian König
     [not found]         ` <74054b1e-5211-3bfc-ab0f-27e8604759d1-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org>
2019-01-16 14:31           ` Marek Olšák
     [not found]             ` <CAAxE2A5ywFkNMtPbesU_kuSwKCmsPJ0D8wRFuSp14mpORcwYhg-JsoAwUIsXosN+BqQ9rBEUg@public.gmane.org>
2019-01-16 14:34               ` Koenig, Christian
     [not found]                 ` <a550562a-7d36-9acf-3143-217c507e667a-5C7GfCeVMHo@public.gmane.org>
2019-01-16 14:39                   ` Marek Olšák
     [not found]                     ` <CAAxE2A4k8JtkrS2XfgRdmYY3NVR4ges=Yqfh-TH9O=LnaVv02g-JsoAwUIsXosN+BqQ9rBEUg@public.gmane.org>
2019-01-16 14:43                       ` Christian König
     [not found]                         ` <3d525127-825b-efab-b0c8-76550634d1c1-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org>
2019-01-16 17:39                           ` Marek Olšák
     [not found]                             ` <CAAxE2A77=9-qpfUmt-PQf5=Gx72SLZ5QvNaSYLJ9D6o0fiEz4Q-JsoAwUIsXosN+BqQ9rBEUg@public.gmane.org>
2019-01-16 21:41                               ` Marek Olšák
2019-01-16 14:37       ` Marek Olšák
     [not found]         ` <CAAxE2A5chwbGmQN2yqVCfvF=TPvFMN6Qu-iFUuRW-zBVm=AN9w-JsoAwUIsXosN+BqQ9rBEUg@public.gmane.org>
2019-01-16 15:15           ` Bas Nieuwenhuizen
     [not found]             ` <CAP+8YyFhXpM8eHEjWwy+yAs4s7A7FyrkYO8=FA0tf6M6n-ka+g-JsoAwUIsXosN+BqQ9rBEUg@public.gmane.org>
2019-01-16 16:14               ` Marek Olšák

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.