All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH 0/4] share semaphore across process
@ 2016-08-18  7:55 Chunming Zhou
       [not found] ` <1471506959-905-1-git-send-email-David1.Zhou-5C7GfCeVMHo@public.gmane.org>
  0 siblings, 1 reply; 9+ messages in thread
From: Chunming Zhou @ 2016-08-18  7:55 UTC (permalink / raw)
  To: amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW
  Cc: Chunming Zhou, David.Mao-5C7GfCeVMHo

after ctx id is valid in global side, we share semaphore across process based on BO sharing mechanism.
That means we map semaphore object to a bo, then sharing bo with other process, the other process can
get the semaphore object from the sharing bo.

Chunming Zhou (4):
  amdgpu: use drm_amdgpu_fence instead of amdgpu_cs_fence in semaphore
    structure
  amdgpu: add export/import semaphore apis
  amdgpu: add mutex for across process reason
  tests/amdgpu: add semaphore across process test

 amdgpu/amdgpu.h            |  40 ++++++++++++++
 amdgpu/amdgpu_cs.c         | 113 ++++++++++++++++++++++++++++++++------
 amdgpu/amdgpu_internal.h   |   6 ++-
 tests/amdgpu/basic_tests.c | 131 +++++++++++++++++++++++++++++++++++++++++++++
 4 files changed, 273 insertions(+), 17 deletions(-)

-- 
1.9.1

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply	[flat|nested] 9+ messages in thread

* [PATCH 1/4] amdgpu: use drm_amdgpu_fence instead of amdgpu_cs_fence in semaphore structure
       [not found] ` <1471506959-905-1-git-send-email-David1.Zhou-5C7GfCeVMHo@public.gmane.org>
@ 2016-08-18  7:55   ` Chunming Zhou
  2016-08-18  7:55   ` [PATCH 2/4] amdgpu: add export/import semaphore apis Chunming Zhou
                     ` (2 subsequent siblings)
  3 siblings, 0 replies; 9+ messages in thread
From: Chunming Zhou @ 2016-08-18  7:55 UTC (permalink / raw)
  To: amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW
  Cc: Chunming Zhou, David.Mao-5C7GfCeVMHo

semaphore just need to store ctx id not context itself,
which will bring convenience for sharing semaphore accross process.

Change-Id: I46cf54c61ee6143a77d18510c3591bcc97fa8b24
Signed-off-by: Chunming Zhou <David1.Zhou@amd.com>
---
 amdgpu/amdgpu_cs.c       | 20 ++++++++++----------
 amdgpu/amdgpu_internal.h |  2 +-
 2 files changed, 11 insertions(+), 11 deletions(-)

diff --git a/amdgpu/amdgpu_cs.c b/amdgpu/amdgpu_cs.c
index bb0d79b..c76a675 100644
--- a/amdgpu/amdgpu_cs.c
+++ b/amdgpu/amdgpu_cs.c
@@ -284,13 +284,13 @@ static int amdgpu_cs_submit_one(amdgpu_context_handle context,
 		}
 		sem_count = 0;
 		LIST_FOR_EACH_ENTRY_SAFE(sem, tmp, sem_list, list) {
-			struct amdgpu_cs_fence *info = &sem->signal_fence;
+			struct drm_amdgpu_fence *info = &sem->signal_fence;
 			struct drm_amdgpu_cs_chunk_dep *dep = &sem_dependencies[sem_count++];
 			dep->ip_type = info->ip_type;
 			dep->ip_instance = info->ip_instance;
 			dep->ring = info->ring;
-			dep->ctx_id = info->context->id;
-			dep->handle = info->fence;
+			dep->ctx_id = info->ctx_id;
+			dep->handle = info->seq_no;
 
 			list_del(&sem->list);
 			amdgpu_cs_reset_sem(sem);
@@ -550,14 +550,14 @@ int amdgpu_cs_signal_semaphore(amdgpu_context_handle ctx,
 	if (NULL == sem)
 		return -EINVAL;
 	/* sem has been signaled */
-	if (sem->signal_fence.context)
+	if (sem->signal_fence.ctx_id)
 		return -EINVAL;
 	pthread_mutex_lock(&ctx->sequence_mutex);
-	sem->signal_fence.context = ctx;
+	sem->signal_fence.ctx_id = ctx->id;
 	sem->signal_fence.ip_type = ip_type;
 	sem->signal_fence.ip_instance = ip_instance;
 	sem->signal_fence.ring = ring;
-	sem->signal_fence.fence = ctx->last_seq[ip_type][ip_instance][ring];
+	sem->signal_fence.seq_no = ctx->last_seq[ip_type][ip_instance][ring];
 	update_references(NULL, &sem->refcount);
 	pthread_mutex_unlock(&ctx->sequence_mutex);
 	return 0;
@@ -578,7 +578,7 @@ int amdgpu_cs_wait_semaphore(amdgpu_context_handle ctx,
 	if (NULL == sem)
 		return -EINVAL;
 	/* must signal first */
-	if (NULL == sem->signal_fence.context)
+	if (0 == sem->signal_fence.ctx_id)
 		return -EINVAL;
 
 	pthread_mutex_lock(&ctx->sequence_mutex);
@@ -591,14 +591,14 @@ static int amdgpu_cs_reset_sem(amdgpu_semaphore_handle sem)
 {
 	if (NULL == sem)
 		return -EINVAL;
-	if (NULL == sem->signal_fence.context)
+	if (0 == sem->signal_fence.ctx_id)
 		return -EINVAL;
 
-	sem->signal_fence.context = NULL;;
+	sem->signal_fence.ctx_id = 0;
 	sem->signal_fence.ip_type = 0;
 	sem->signal_fence.ip_instance = 0;
 	sem->signal_fence.ring = 0;
-	sem->signal_fence.fence = 0;
+	sem->signal_fence.seq_no = 0;
 
 	return 0;
 }
diff --git a/amdgpu/amdgpu_internal.h b/amdgpu/amdgpu_internal.h
index 1160a12..ccc85d7 100644
--- a/amdgpu/amdgpu_internal.h
+++ b/amdgpu/amdgpu_internal.h
@@ -133,7 +133,7 @@ struct amdgpu_context {
 struct amdgpu_semaphore {
 	atomic_t refcount;
 	struct list_head list;
-	struct amdgpu_cs_fence signal_fence;
+	struct drm_amdgpu_fence signal_fence;
 };
 
 /**
-- 
1.9.1

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply related	[flat|nested] 9+ messages in thread

* [PATCH 2/4] amdgpu: add export/import semaphore apis
       [not found] ` <1471506959-905-1-git-send-email-David1.Zhou-5C7GfCeVMHo@public.gmane.org>
  2016-08-18  7:55   ` [PATCH 1/4] amdgpu: use drm_amdgpu_fence instead of amdgpu_cs_fence in semaphore structure Chunming Zhou
@ 2016-08-18  7:55   ` Chunming Zhou
       [not found]     ` <1471506959-905-3-git-send-email-David1.Zhou-5C7GfCeVMHo@public.gmane.org>
  2016-08-18  7:55   ` [PATCH 3/4] amdgpu: add mutex for across process reason Chunming Zhou
  2016-08-18  7:55   ` [PATCH 4/4] tests/amdgpu: add semaphore across process test Chunming Zhou
  3 siblings, 1 reply; 9+ messages in thread
From: Chunming Zhou @ 2016-08-18  7:55 UTC (permalink / raw)
  To: amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW
  Cc: Chunming Zhou, David.Mao-5C7GfCeVMHo

They are used for sharing semaphore across process.

Change-Id: I262adf10913d365bb93368b492e69140af522c64
Signed-off-by: Chunming Zhou <David1.Zhou@amd.com>
---
 amdgpu/amdgpu.h          | 40 ++++++++++++++++++++++++++++++
 amdgpu/amdgpu_cs.c       | 63 ++++++++++++++++++++++++++++++++++++++++++++++--
 amdgpu/amdgpu_internal.h |  2 ++
 3 files changed, 103 insertions(+), 2 deletions(-)

diff --git a/amdgpu/amdgpu.h b/amdgpu/amdgpu.h
index 693d841..e716855 100644
--- a/amdgpu/amdgpu.h
+++ b/amdgpu/amdgpu.h
@@ -1379,6 +1379,19 @@ int amdgpu_svm_commit(amdgpu_va_handle va_range_handle,
 int amdgpu_svm_uncommit(amdgpu_va_handle va_range_handle);
 
 /**
+ *  create shared semaphore
+ *
+ * \param amdgpu_device_handle
+ * \param   sem	   - \c [out] semaphore handle
+ *
+ * \return   0 on success\n
+ *          <0 - Negative POSIX Error code
+ *
+*/
+int amdgpu_cs_create_semaphore_object(amdgpu_device_handle device_handle,
+				      amdgpu_semaphore_handle *sem);
+
+/**
  *  create semaphore
  *
  * \param   sem	   - \c [out] semaphore handle
@@ -1439,6 +1452,33 @@ int amdgpu_cs_wait_semaphore(amdgpu_context_handle ctx,
 int amdgpu_cs_destroy_semaphore(amdgpu_semaphore_handle sem);
 
 /**
+ *  export semaphore
+ *
+ * \param   sem	    - \c [in] semaphore handle
+ * \param   shared_handle    - \c [out] handle across process
+ *
+ * \return   0 on success\n
+ *          <0 - Negative POSIX Error code
+ *
+*/
+int amdgpu_cs_export_semaphore(amdgpu_semaphore_handle sem,
+			       uint32_t *shared_handle);
+/**
+ *  import semaphore
+ *
+ * \param   sem	    - \c [out] semaphore handle
+ * \param   dev	    - \c [in] device handle
+ * \param   shared_handle    - \c [in] handle across process
+ *
+ * \return   0 on success\n
+ *          <0 - Negative POSIX Error code
+ *
+*/
+int amdgpu_cs_import_semaphore(amdgpu_semaphore_handle *sem,
+			       amdgpu_device_handle dev,
+			       uint32_t shared_handle);
+
+/**
  *  Get the ASIC marketing name
  *
  * \param   dev         - \c [in] Device handle. See #amdgpu_device_initialize()
diff --git a/amdgpu/amdgpu_cs.c b/amdgpu/amdgpu_cs.c
index c76a675..a3ff34e 100644
--- a/amdgpu/amdgpu_cs.c
+++ b/amdgpu/amdgpu_cs.c
@@ -518,6 +518,34 @@ int amdgpu_cs_wait_fences(struct amdgpu_cs_fence *fences,
 	return r;
 }
 
+int amdgpu_cs_create_semaphore_object(amdgpu_device_handle device_handle,
+				      amdgpu_semaphore_handle *sem)
+{
+	struct amdgpu_bo_alloc_request req = {0};
+	amdgpu_bo_handle buf_handle;
+	int r;
+
+	if (NULL == sem)
+		return -EINVAL;
+
+	req.alloc_size = sizeof(struct amdgpu_semaphore);
+	req.preferred_heap = AMDGPU_GEM_DOMAIN_GTT;
+
+	r = amdgpu_bo_alloc(device_handle, &req, &buf_handle);
+	if (r)
+		return r;
+	r = amdgpu_bo_cpu_map(buf_handle, sem);
+	if (r) {
+		amdgpu_bo_free(buf_handle);
+		return r;
+	}
+	(*sem)->buf_handle = buf_handle;
+	atomic_set(&(*sem)->refcount, 1);
+	(*sem)->version = 2;
+
+	return 0;
+}
+
 int amdgpu_cs_create_semaphore(amdgpu_semaphore_handle *sem)
 {
 	struct amdgpu_semaphore *gpu_semaphore;
@@ -529,6 +557,7 @@ int amdgpu_cs_create_semaphore(amdgpu_semaphore_handle *sem)
 	if (NULL == gpu_semaphore)
 		return -ENOMEM;
 
+	gpu_semaphore->version = 1;
 	atomic_set(&gpu_semaphore->refcount, 1);
 	*sem = gpu_semaphore;
 
@@ -608,8 +637,15 @@ static int amdgpu_cs_unreference_sem(amdgpu_semaphore_handle sem)
 	if (NULL == sem)
 		return -EINVAL;
 
-	if (update_references(&sem->refcount, NULL))
-		free(sem);
+	if (update_references(&sem->refcount, NULL)) {
+		if (sem->version == 1)
+			free(sem);
+		else if (sem->version == 2) {
+			amdgpu_bo_handle buf_handle = sem->buf_handle;
+			amdgpu_bo_cpu_unmap(buf_handle);
+			amdgpu_bo_free(buf_handle);
+		}
+	}
 	return 0;
 }
 
@@ -618,4 +654,27 @@ int amdgpu_cs_destroy_semaphore(amdgpu_semaphore_handle sem)
 	return amdgpu_cs_unreference_sem(sem);
 }
 
+int amdgpu_cs_export_semaphore(amdgpu_semaphore_handle sem,
+			       uint32_t *shared_handle)
+{
+	return amdgpu_bo_export(sem->buf_handle,
+				amdgpu_bo_handle_type_dma_buf_fd,
+				shared_handle);
+
+}
+
+int amdgpu_cs_import_semaphore(amdgpu_semaphore_handle *sem,
+			       amdgpu_device_handle dev, uint32_t shared_handle)
+{
+	struct amdgpu_bo_import_result output;
+	int r;
+
+	r = amdgpu_bo_import(dev,
+			     amdgpu_bo_handle_type_dma_buf_fd,
+			     shared_handle,
+			     &output);
+	if (r)
+		return r;
 
+	return amdgpu_bo_cpu_map(output.buf_handle, sem);
+}
diff --git a/amdgpu/amdgpu_internal.h b/amdgpu/amdgpu_internal.h
index ccc85d7..7c422da 100644
--- a/amdgpu/amdgpu_internal.h
+++ b/amdgpu/amdgpu_internal.h
@@ -134,6 +134,8 @@ struct amdgpu_semaphore {
 	atomic_t refcount;
 	struct list_head list;
 	struct drm_amdgpu_fence signal_fence;
+	amdgpu_bo_handle buf_handle;
+	uint32_t version;
 };
 
 /**
-- 
1.9.1

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply related	[flat|nested] 9+ messages in thread

* [PATCH 3/4] amdgpu: add mutex for across process reason
       [not found] ` <1471506959-905-1-git-send-email-David1.Zhou-5C7GfCeVMHo@public.gmane.org>
  2016-08-18  7:55   ` [PATCH 1/4] amdgpu: use drm_amdgpu_fence instead of amdgpu_cs_fence in semaphore structure Chunming Zhou
  2016-08-18  7:55   ` [PATCH 2/4] amdgpu: add export/import semaphore apis Chunming Zhou
@ 2016-08-18  7:55   ` Chunming Zhou
  2016-08-18  7:55   ` [PATCH 4/4] tests/amdgpu: add semaphore across process test Chunming Zhou
  3 siblings, 0 replies; 9+ messages in thread
From: Chunming Zhou @ 2016-08-18  7:55 UTC (permalink / raw)
  To: amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW
  Cc: Chunming Zhou, David.Mao-5C7GfCeVMHo

Change-Id: I69b5c8d86f9e1ed32bb20e899b74ad4146c0e988
Signed-off-by: Chunming Zhou <David1.Zhou@amd.com>
---
 amdgpu/amdgpu_cs.c       | 36 +++++++++++++++++++++++++++++-------
 amdgpu/amdgpu_internal.h |  2 ++
 2 files changed, 31 insertions(+), 7 deletions(-)

diff --git a/amdgpu/amdgpu_cs.c b/amdgpu/amdgpu_cs.c
index a3ff34e..ecbd4d7 100644
--- a/amdgpu/amdgpu_cs.c
+++ b/amdgpu/amdgpu_cs.c
@@ -541,6 +541,12 @@ int amdgpu_cs_create_semaphore_object(amdgpu_device_handle device_handle,
 	}
 	(*sem)->buf_handle = buf_handle;
 	atomic_set(&(*sem)->refcount, 1);
+	r = sem_init(&(*sem)->mutex, 1, 1);
+	if (r) {
+		amdgpu_bo_cpu_unmap(buf_handle);
+		amdgpu_bo_free(buf_handle);
+		return r;
+	}
 	(*sem)->version = 2;
 
 	return 0;
@@ -549,6 +555,7 @@ int amdgpu_cs_create_semaphore_object(amdgpu_device_handle device_handle,
 int amdgpu_cs_create_semaphore(amdgpu_semaphore_handle *sem)
 {
 	struct amdgpu_semaphore *gpu_semaphore;
+	int r;
 
 	if (NULL == sem)
 		return -EINVAL;
@@ -559,6 +566,11 @@ int amdgpu_cs_create_semaphore(amdgpu_semaphore_handle *sem)
 
 	gpu_semaphore->version = 1;
 	atomic_set(&gpu_semaphore->refcount, 1);
+	r = sem_init(&gpu_semaphore->mutex, 1, 1);
+	if (r) {
+		free(gpu_semaphore);
+		return r;
+	}
 	*sem = gpu_semaphore;
 
 	return 0;
@@ -581,6 +593,7 @@ int amdgpu_cs_signal_semaphore(amdgpu_context_handle ctx,
 	/* sem has been signaled */
 	if (sem->signal_fence.ctx_id)
 		return -EINVAL;
+	sem_wait(&sem->mutex);
 	pthread_mutex_lock(&ctx->sequence_mutex);
 	sem->signal_fence.ctx_id = ctx->id;
 	sem->signal_fence.ip_type = ip_type;
@@ -589,6 +602,7 @@ int amdgpu_cs_signal_semaphore(amdgpu_context_handle ctx,
 	sem->signal_fence.seq_no = ctx->last_seq[ip_type][ip_instance][ring];
 	update_references(NULL, &sem->refcount);
 	pthread_mutex_unlock(&ctx->sequence_mutex);
+	sem_post(&sem->mutex);
 	return 0;
 }
 
@@ -609,10 +623,11 @@ int amdgpu_cs_wait_semaphore(amdgpu_context_handle ctx,
 	/* must signal first */
 	if (0 == sem->signal_fence.ctx_id)
 		return -EINVAL;
-
+	sem_wait(&sem->mutex);
 	pthread_mutex_lock(&ctx->sequence_mutex);
 	list_add(&sem->list, &ctx->sem_list[ip_type][ip_instance][ring]);
 	pthread_mutex_unlock(&ctx->sequence_mutex);
+	sem_post(&sem->mutex);
 	return 0;
 }
 
@@ -622,13 +637,13 @@ static int amdgpu_cs_reset_sem(amdgpu_semaphore_handle sem)
 		return -EINVAL;
 	if (0 == sem->signal_fence.ctx_id)
 		return -EINVAL;
-
+	sem_wait(&sem->mutex);
 	sem->signal_fence.ctx_id = 0;
 	sem->signal_fence.ip_type = 0;
 	sem->signal_fence.ip_instance = 0;
 	sem->signal_fence.ring = 0;
 	sem->signal_fence.seq_no = 0;
-
+	sem_post(&sem->mutex);
 	return 0;
 }
 
@@ -636,8 +651,9 @@ static int amdgpu_cs_unreference_sem(amdgpu_semaphore_handle sem)
 {
 	if (NULL == sem)
 		return -EINVAL;
-
+	sem_wait(&sem->mutex);
 	if (update_references(&sem->refcount, NULL)) {
+		sem_post(&sem->mutex);
 		if (sem->version == 1)
 			free(sem);
 		else if (sem->version == 2) {
@@ -645,7 +661,9 @@ static int amdgpu_cs_unreference_sem(amdgpu_semaphore_handle sem)
 			amdgpu_bo_cpu_unmap(buf_handle);
 			amdgpu_bo_free(buf_handle);
 		}
+		return 0;
 	}
+	sem_post(&sem->mutex);
 	return 0;
 }
 
@@ -657,10 +675,14 @@ int amdgpu_cs_destroy_semaphore(amdgpu_semaphore_handle sem)
 int amdgpu_cs_export_semaphore(amdgpu_semaphore_handle sem,
 			       uint32_t *shared_handle)
 {
-	return amdgpu_bo_export(sem->buf_handle,
-				amdgpu_bo_handle_type_dma_buf_fd,
-				shared_handle);
+	int r;
 
+	sem_wait(&sem->mutex);
+	r = amdgpu_bo_export(sem->buf_handle,
+			     amdgpu_bo_handle_type_dma_buf_fd,
+			     shared_handle);
+	sem_post(&sem->mutex);
+	return r;
 }
 
 int amdgpu_cs_import_semaphore(amdgpu_semaphore_handle *sem,
diff --git a/amdgpu/amdgpu_internal.h b/amdgpu/amdgpu_internal.h
index 7c422da..f035786 100644
--- a/amdgpu/amdgpu_internal.h
+++ b/amdgpu/amdgpu_internal.h
@@ -31,6 +31,7 @@
 
 #include <assert.h>
 #include <pthread.h>
+#include <semaphore.h>
 
 #include "libdrm_macros.h"
 #include "xf86atomic.h"
@@ -135,6 +136,7 @@ struct amdgpu_semaphore {
 	struct list_head list;
 	struct drm_amdgpu_fence signal_fence;
 	amdgpu_bo_handle buf_handle;
+	sem_t mutex;
 	uint32_t version;
 };
 
-- 
1.9.1

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply related	[flat|nested] 9+ messages in thread

* [PATCH 4/4] tests/amdgpu: add semaphore across process test
       [not found] ` <1471506959-905-1-git-send-email-David1.Zhou-5C7GfCeVMHo@public.gmane.org>
                     ` (2 preceding siblings ...)
  2016-08-18  7:55   ` [PATCH 3/4] amdgpu: add mutex for across process reason Chunming Zhou
@ 2016-08-18  7:55   ` Chunming Zhou
  3 siblings, 0 replies; 9+ messages in thread
From: Chunming Zhou @ 2016-08-18  7:55 UTC (permalink / raw)
  To: amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW
  Cc: Chunming Zhou, David.Mao-5C7GfCeVMHo

Change-Id: I6e8c8cfa1a05f51f3c03670baea68ed6da94fa11
Signed-off-by: Chunming Zhou <David1.Zhou@amd.com>
---
 tests/amdgpu/basic_tests.c | 131 +++++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 131 insertions(+)

diff --git a/tests/amdgpu/basic_tests.c b/tests/amdgpu/basic_tests.c
index 02e863a..c7da54d 100644
--- a/tests/amdgpu/basic_tests.c
+++ b/tests/amdgpu/basic_tests.c
@@ -47,6 +47,7 @@ static void amdgpu_command_submission_sdma(void);
 static void amdgpu_command_submission_multi_fence(void);
 static void amdgpu_userptr_test(void);
 static void amdgpu_semaphore_test(void);
+static void amdgpu_semaphore_across_process_test(void);
 static void amdgpu_svm_test(void);
 static void amdgpu_multi_svm_test(void);
 static void amdgpu_va_range_test(void);
@@ -60,6 +61,7 @@ CU_TestInfo basic_tests[] = {
 	{ "Command submission Test (SDMA)", amdgpu_command_submission_sdma },
 	{ "Command submission Test (Multi-fence)", amdgpu_command_submission_multi_fence },
 	{ "SW semaphore Test",  amdgpu_semaphore_test },
+	{ "SW semaphore across process Test",  amdgpu_semaphore_across_process_test },
 	{ "VA range Test", amdgpu_va_range_test},
 	{ "SVM Test", amdgpu_svm_test },
 	{ "SVM Test (multi-GPUs)", amdgpu_multi_svm_test },
@@ -515,6 +517,135 @@ static void amdgpu_command_submission_gfx(void)
 	amdgpu_command_submission_gfx_shared_ib();
 }
 
+static void amdgpu_semaphore_across_process_test(void)
+{
+	struct amdgpu_context  *context_handle;
+	amdgpu_semaphore_handle sem;
+	amdgpu_bo_handle ib_result_handle[2];
+	void *ib_result_cpu[2];
+	uint64_t ib_result_mc_address[2];
+	struct amdgpu_cs_request ibs_request[2] = {0};
+	struct amdgpu_cs_ib_info ib_info[2] = {0};
+	struct amdgpu_cs_fence fence_status = {0};
+	uint32_t *ptr;
+	uint32_t expired;
+	uint32_t shared_handle;
+	amdgpu_bo_list_handle bo_list[2];
+	amdgpu_va_handle va_handle[2];
+	int r, i, pid;
+
+	r = amdgpu_cs_create_semaphore_object(device_handle, &sem);
+	CU_ASSERT_EQUAL(r, 0);
+	r = amdgpu_cs_ctx_create(device_handle, &context_handle);
+	CU_ASSERT_EQUAL(r, 0);
+
+	r = amdgpu_bo_alloc_and_map(device_handle, 4096, 4096,
+				    AMDGPU_GEM_DOMAIN_GTT, 0,
+				    &ib_result_handle[0], &ib_result_cpu[0],
+				    &ib_result_mc_address[0], &va_handle[0]);
+	CU_ASSERT_EQUAL(r, 0);
+
+	r = amdgpu_get_bo_list(device_handle, ib_result_handle[0],
+			       NULL, &bo_list[0]);
+	CU_ASSERT_EQUAL(r, 0);
+
+	ptr = ib_result_cpu[0];
+	ptr[0] = SDMA_NOP;
+	ib_info[0].ib_mc_address = ib_result_mc_address[0];
+	ib_info[0].size = 1;
+
+	ibs_request[0].ip_type = AMDGPU_HW_IP_DMA;
+	ibs_request[0].number_of_ibs = 1;
+	ibs_request[0].ibs = &ib_info[0];
+	ibs_request[0].resources = bo_list[0];
+	ibs_request[0].fence_info.handle = NULL;
+	r = amdgpu_cs_submit(context_handle, 0,&ibs_request[0], 1);
+	CU_ASSERT_EQUAL(r, 0);
+	r = amdgpu_cs_signal_semaphore(context_handle, AMDGPU_HW_IP_DMA, 0, 0, sem);
+	CU_ASSERT_EQUAL(r, 0);
+	r = amdgpu_cs_export_semaphore(sem, &shared_handle);
+	CU_ASSERT_EQUAL(r, 0);
+
+	pid = fork();
+	/* child process */
+	if (pid == 0) {
+		amdgpu_device_handle child_device_handle;
+		uint32_t  child_major_version;
+		uint32_t  child_minor_version;
+		amdgpu_semaphore_handle child_sem;
+		amdgpu_context_handle context_handle1;
+
+		r = amdgpu_device_initialize(drm_amdgpu[0], &child_major_version,
+					     &child_minor_version, &child_device_handle);
+		CU_ASSERT_EQUAL(r, 0);
+
+		r = amdgpu_bo_alloc_and_map(child_device_handle, 4096, 4096,
+					    AMDGPU_GEM_DOMAIN_GTT, 0,
+					    &ib_result_handle[1], &ib_result_cpu[1],
+					    &ib_result_mc_address[1], &va_handle[1]);
+		CU_ASSERT_EQUAL(r, 0);
+
+		r = amdgpu_get_bo_list(child_device_handle, ib_result_handle[1],
+				       NULL, &bo_list[1]);
+		CU_ASSERT_EQUAL(r, 0);
+
+		r = amdgpu_cs_import_semaphore(&child_sem, child_device_handle, shared_handle);
+		CU_ASSERT_EQUAL(r, 0);
+
+		r = amdgpu_cs_ctx_create(child_device_handle, &context_handle1);
+		CU_ASSERT_EQUAL(r, 0);
+
+		r = amdgpu_cs_wait_semaphore(context_handle1, AMDGPU_HW_IP_GFX, 0, 0, child_sem);
+		CU_ASSERT_EQUAL(r, 0);
+		ptr = ib_result_cpu[1];
+		ptr[0] = GFX_COMPUTE_NOP;
+		ib_info[1].ib_mc_address = ib_result_mc_address[1];
+		ib_info[1].size = 1;
+
+		ibs_request[1].ip_type = AMDGPU_HW_IP_GFX;
+		ibs_request[1].number_of_ibs = 1;
+		ibs_request[1].ibs = &ib_info[1];
+		ibs_request[1].resources = bo_list[1];
+		ibs_request[1].fence_info.handle = NULL;
+
+		r = amdgpu_cs_submit(context_handle1, 0,&ibs_request[1], 1);
+		CU_ASSERT_EQUAL(r, 0);
+
+		fence_status.context = context_handle1;
+		fence_status.ip_type = AMDGPU_HW_IP_GFX;
+		fence_status.fence = ibs_request[1].seq_no;
+		r = amdgpu_cs_query_fence_status(&fence_status,
+						 500000000, 0, &expired);
+		CU_ASSERT_EQUAL(r, 0);
+		CU_ASSERT_EQUAL(expired, true);
+
+		r = amdgpu_bo_unmap_and_free(ib_result_handle[1], va_handle[1],
+					     ib_result_mc_address[1], 4096);
+		CU_ASSERT_EQUAL(r, 0);
+
+		r = amdgpu_bo_list_destroy(bo_list[1]);
+		CU_ASSERT_EQUAL(r, 0);
+
+		r = amdgpu_cs_ctx_free(context_handle1);
+		CU_ASSERT_EQUAL(r, 0);
+
+		amdgpu_device_deinitialize(child_device_handle);
+	} else {
+		r = amdgpu_bo_unmap_and_free(ib_result_handle[0], va_handle[0],
+					     ib_result_mc_address[0], 4096);
+		CU_ASSERT_EQUAL(r, 0);
+
+		r = amdgpu_bo_list_destroy(bo_list[0]);
+		CU_ASSERT_EQUAL(r, 0);
+
+		r = amdgpu_cs_ctx_free(context_handle);
+		CU_ASSERT_EQUAL(r, 0);
+
+		r = amdgpu_cs_destroy_semaphore(sem);
+		CU_ASSERT_EQUAL(r, 0);
+	}
+}
+
 static void amdgpu_semaphore_test(void)
 {
 	amdgpu_context_handle context_handle[2];
-- 
1.9.1

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply related	[flat|nested] 9+ messages in thread

* Re: [PATCH 2/4] amdgpu: add export/import semaphore apis
       [not found]     ` <1471506959-905-3-git-send-email-David1.Zhou-5C7GfCeVMHo@public.gmane.org>
@ 2016-08-21  6:23       ` Edward O'Callaghan
       [not found]         ` <cf458d97-d40a-d363-b5c4-2ae45dde9179-dczkZgxz+BNUPWh3PAxdjQ@public.gmane.org>
  0 siblings, 1 reply; 9+ messages in thread
From: Edward O'Callaghan @ 2016-08-21  6:23 UTC (permalink / raw)
  To: Chunming Zhou, amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW
  Cc: David.Mao-5C7GfCeVMHo


[-- Attachment #1.1.1: Type: text/plain, Size: 5872 bytes --]



On 08/18/2016 05:55 PM, Chunming Zhou wrote:
> They are used for sharing semaphore across process.
> 
> Change-Id: I262adf10913d365bb93368b492e69140af522c64
> Signed-off-by: Chunming Zhou <David1.Zhou-5C7GfCeVMHo@public.gmane.org>
> ---
>  amdgpu/amdgpu.h          | 40 ++++++++++++++++++++++++++++++
>  amdgpu/amdgpu_cs.c       | 63 ++++++++++++++++++++++++++++++++++++++++++++++--
>  amdgpu/amdgpu_internal.h |  2 ++
>  3 files changed, 103 insertions(+), 2 deletions(-)
> 
> diff --git a/amdgpu/amdgpu.h b/amdgpu/amdgpu.h
> index 693d841..e716855 100644
> --- a/amdgpu/amdgpu.h
> +++ b/amdgpu/amdgpu.h
> @@ -1379,6 +1379,19 @@ int amdgpu_svm_commit(amdgpu_va_handle va_range_handle,
>  int amdgpu_svm_uncommit(amdgpu_va_handle va_range_handle);
>  
>  /**
> + *  create shared semaphore
> + *
> + * \param amdgpu_device_handle
> + * \param   sem	   - \c [out] semaphore handle
> + *
> + * \return   0 on success\n
> + *          <0 - Negative POSIX Error code
> + *
> +*/
> +int amdgpu_cs_create_semaphore_object(amdgpu_device_handle device_handle,
> +				      amdgpu_semaphore_handle *sem);
> +
> +/**
>   *  create semaphore
>   *
>   * \param   sem	   - \c [out] semaphore handle
> @@ -1439,6 +1452,33 @@ int amdgpu_cs_wait_semaphore(amdgpu_context_handle ctx,
>  int amdgpu_cs_destroy_semaphore(amdgpu_semaphore_handle sem);
>  
>  /**
> + *  export semaphore
> + *
> + * \param   sem	    - \c [in] semaphore handle
> + * \param   shared_handle    - \c [out] handle across process
> + *
> + * \return   0 on success\n
> + *          <0 - Negative POSIX Error code
> + *
> +*/
> +int amdgpu_cs_export_semaphore(amdgpu_semaphore_handle sem,
> +			       uint32_t *shared_handle);
> +/**
> + *  import semaphore
> + *
> + * \param   sem	    - \c [out] semaphore handle
> + * \param   dev	    - \c [in] device handle
> + * \param   shared_handle    - \c [in] handle across process
> + *
> + * \return   0 on success\n
> + *          <0 - Negative POSIX Error code
> + *
> +*/
> +int amdgpu_cs_import_semaphore(amdgpu_semaphore_handle *sem,
> +			       amdgpu_device_handle dev,
> +			       uint32_t shared_handle);
> +
> +/**
>   *  Get the ASIC marketing name
>   *
>   * \param   dev         - \c [in] Device handle. See #amdgpu_device_initialize()
> diff --git a/amdgpu/amdgpu_cs.c b/amdgpu/amdgpu_cs.c
> index c76a675..a3ff34e 100644
> --- a/amdgpu/amdgpu_cs.c
> +++ b/amdgpu/amdgpu_cs.c
> @@ -518,6 +518,34 @@ int amdgpu_cs_wait_fences(struct amdgpu_cs_fence *fences,
>  	return r;
>  }
>  
> +int amdgpu_cs_create_semaphore_object(amdgpu_device_handle device_handle,
> +				      amdgpu_semaphore_handle *sem)
> +{
> +	struct amdgpu_bo_alloc_request req = {0};
> +	amdgpu_bo_handle buf_handle;
> +	int r;
> +
> +	if (NULL == sem)

Since sem is ** then should we not check that *both* sem && *sem are
non-NULL too? Further you can use the canonical form of (!sem)

> +		return -EINVAL;
> +
> +	req.alloc_size = sizeof(struct amdgpu_semaphore);
> +	req.preferred_heap = AMDGPU_GEM_DOMAIN_GTT;
> +
> +	r = amdgpu_bo_alloc(device_handle, &req, &buf_handle);
> +	if (r)
> +		return r;
> +	r = amdgpu_bo_cpu_map(buf_handle, sem);
> +	if (r) {
> +		amdgpu_bo_free(buf_handle);
> +		return r;
> +	}
> +	(*sem)->buf_handle = buf_handle;
> +	atomic_set(&(*sem)->refcount, 1);

Hi Chunming,

When/where was 'amdgpu_semaphore_handle' introduced? I am not sure I
like pointers being hidden behind typedef's as opaque types this can
lead to really really bad things.. I only noticed sem was a ** because
of the weird looking deference then address operator application, then
deference again here, &(*sem)->..

Cheers,
Edward.

> +	(*sem)->version = 2;
> +
> +	return 0;
> +}
> +
>  int amdgpu_cs_create_semaphore(amdgpu_semaphore_handle *sem)
>  {
>  	struct amdgpu_semaphore *gpu_semaphore;
> @@ -529,6 +557,7 @@ int amdgpu_cs_create_semaphore(amdgpu_semaphore_handle *sem)
>  	if (NULL == gpu_semaphore)
>  		return -ENOMEM;
>  
> +	gpu_semaphore->version = 1;
>  	atomic_set(&gpu_semaphore->refcount, 1);
>  	*sem = gpu_semaphore;
>  
> @@ -608,8 +637,15 @@ static int amdgpu_cs_unreference_sem(amdgpu_semaphore_handle sem)
>  	if (NULL == sem)
>  		return -EINVAL;
>  
> -	if (update_references(&sem->refcount, NULL))
> -		free(sem);
> +	if (update_references(&sem->refcount, NULL)) {
> +		if (sem->version == 1)
> +			free(sem);
> +		else if (sem->version == 2) {
> +			amdgpu_bo_handle buf_handle = sem->buf_handle;
> +			amdgpu_bo_cpu_unmap(buf_handle);
> +			amdgpu_bo_free(buf_handle);
> +		}
> +	}
>  	return 0;
>  }
>  
> @@ -618,4 +654,27 @@ int amdgpu_cs_destroy_semaphore(amdgpu_semaphore_handle sem)
>  	return amdgpu_cs_unreference_sem(sem);
>  }
>  
> +int amdgpu_cs_export_semaphore(amdgpu_semaphore_handle sem,
> +			       uint32_t *shared_handle)
> +{
> +	return amdgpu_bo_export(sem->buf_handle,
> +				amdgpu_bo_handle_type_dma_buf_fd,
> +				shared_handle);
> +
> +}
> +
> +int amdgpu_cs_import_semaphore(amdgpu_semaphore_handle *sem,
> +			       amdgpu_device_handle dev, uint32_t shared_handle)
> +{
> +	struct amdgpu_bo_import_result output;
> +	int r;
> +
> +	r = amdgpu_bo_import(dev,
> +			     amdgpu_bo_handle_type_dma_buf_fd,
> +			     shared_handle,
> +			     &output);
> +	if (r)
> +		return r;
>  
> +	return amdgpu_bo_cpu_map(output.buf_handle, sem);
> +}
> diff --git a/amdgpu/amdgpu_internal.h b/amdgpu/amdgpu_internal.h
> index ccc85d7..7c422da 100644
> --- a/amdgpu/amdgpu_internal.h
> +++ b/amdgpu/amdgpu_internal.h
> @@ -134,6 +134,8 @@ struct amdgpu_semaphore {
>  	atomic_t refcount;
>  	struct list_head list;
>  	struct drm_amdgpu_fence signal_fence;
> +	amdgpu_bo_handle buf_handle;
> +	uint32_t version;
>  };
>  
>  /**
> 


[-- Attachment #1.2: OpenPGP digital signature --]
[-- Type: application/pgp-signature, Size: 819 bytes --]

[-- Attachment #2: Type: text/plain, Size: 154 bytes --]

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [PATCH 2/4] amdgpu: add export/import semaphore apis
       [not found]         ` <cf458d97-d40a-d363-b5c4-2ae45dde9179-dczkZgxz+BNUPWh3PAxdjQ@public.gmane.org>
@ 2016-08-22  2:42           ` zhoucm1
       [not found]             ` <57BA6689.2010700-5C7GfCeVMHo@public.gmane.org>
  0 siblings, 1 reply; 9+ messages in thread
From: zhoucm1 @ 2016-08-22  2:42 UTC (permalink / raw)
  To: Edward O'Callaghan, amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW
  Cc: David.Mao-5C7GfCeVMHo


[-- Attachment #1.1: Type: text/plain, Size: 6425 bytes --]



On 2016年08月21日 14:23, Edward O'Callaghan wrote:
>
> On 08/18/2016 05:55 PM, Chunming Zhou wrote:
>> They are used for sharing semaphore across process.
>>
>> Change-Id: I262adf10913d365bb93368b492e69140af522c64
>> Signed-off-by: Chunming Zhou <David1.Zhou-5C7GfCeVMHo@public.gmane.org>
>> ---
>>   amdgpu/amdgpu.h          | 40 ++++++++++++++++++++++++++++++
>>   amdgpu/amdgpu_cs.c       | 63 ++++++++++++++++++++++++++++++++++++++++++++++--
>>   amdgpu/amdgpu_internal.h |  2 ++
>>   3 files changed, 103 insertions(+), 2 deletions(-)
>>
>> diff --git a/amdgpu/amdgpu.h b/amdgpu/amdgpu.h
>> index 693d841..e716855 100644
>> --- a/amdgpu/amdgpu.h
>> +++ b/amdgpu/amdgpu.h
>> @@ -1379,6 +1379,19 @@ int amdgpu_svm_commit(amdgpu_va_handle va_range_handle,
>>   int amdgpu_svm_uncommit(amdgpu_va_handle va_range_handle);
>>   
>>   /**
>> + *  create shared semaphore
>> + *
>> + * \param amdgpu_device_handle
>> + * \param   sem	   - \c [out] semaphore handle
>> + *
>> + * \return   0 on success\n
>> + *          <0 - Negative POSIX Error code
>> + *
>> +*/
>> +int amdgpu_cs_create_semaphore_object(amdgpu_device_handle device_handle,
>> +				      amdgpu_semaphore_handle *sem);
>> +
>> +/**
>>    *  create semaphore
>>    *
>>    * \param   sem	   - \c [out] semaphore handle
>> @@ -1439,6 +1452,33 @@ int amdgpu_cs_wait_semaphore(amdgpu_context_handle ctx,
>>   int amdgpu_cs_destroy_semaphore(amdgpu_semaphore_handle sem);
>>   
>>   /**
>> + *  export semaphore
>> + *
>> + * \param   sem	    - \c [in] semaphore handle
>> + * \param   shared_handle    - \c [out] handle across process
>> + *
>> + * \return   0 on success\n
>> + *          <0 - Negative POSIX Error code
>> + *
>> +*/
>> +int amdgpu_cs_export_semaphore(amdgpu_semaphore_handle sem,
>> +			       uint32_t *shared_handle);
>> +/**
>> + *  import semaphore
>> + *
>> + * \param   sem	    - \c [out] semaphore handle
>> + * \param   dev	    - \c [in] device handle
>> + * \param   shared_handle    - \c [in] handle across process
>> + *
>> + * \return   0 on success\n
>> + *          <0 - Negative POSIX Error code
>> + *
>> +*/
>> +int amdgpu_cs_import_semaphore(amdgpu_semaphore_handle *sem,
>> +			       amdgpu_device_handle dev,
>> +			       uint32_t shared_handle);
>> +
>> +/**
>>    *  Get the ASIC marketing name
>>    *
>>    * \param   dev         - \c [in] Device handle. See #amdgpu_device_initialize()
>> diff --git a/amdgpu/amdgpu_cs.c b/amdgpu/amdgpu_cs.c
>> index c76a675..a3ff34e 100644
>> --- a/amdgpu/amdgpu_cs.c
>> +++ b/amdgpu/amdgpu_cs.c
>> @@ -518,6 +518,34 @@ int amdgpu_cs_wait_fences(struct amdgpu_cs_fence *fences,
>>   	return r;
>>   }
>>   
>> +int amdgpu_cs_create_semaphore_object(amdgpu_device_handle device_handle,
>> +				      amdgpu_semaphore_handle *sem)
>> +{
>> +	struct amdgpu_bo_alloc_request req = {0};
>> +	amdgpu_bo_handle buf_handle;
>> +	int r;
>> +
>> +	if (NULL == sem)
> Since sem is ** then should we not check that *both* sem && *sem are
> non-NULL too? Further you can use the canonical form of (!sem)
>
>> +		return -EINVAL;
>> +
>> +	req.alloc_size = sizeof(struct amdgpu_semaphore);
>> +	req.preferred_heap = AMDGPU_GEM_DOMAIN_GTT;
>> +
>> +	r = amdgpu_bo_alloc(device_handle, &req, &buf_handle);
>> +	if (r)
>> +		return r;
>> +	r = amdgpu_bo_cpu_map(buf_handle, sem);
>> +	if (r) {
>> +		amdgpu_bo_free(buf_handle);
>> +		return r;
>> +	}
>> +	(*sem)->buf_handle = buf_handle;
>> +	atomic_set(&(*sem)->refcount, 1);
> Hi Chunming,
>
> When/where was 'amdgpu_semaphore_handle' introduced? I am not sure I
> like pointers being hidden behind typedef's as opaque types this can
> lead to really really bad things.. I only noticed sem was a ** because
> of the weird looking deference then address operator application, then
> deference again here, &(*sem)->..
Hi Edward,
semaphore was introduced last year, which is the wrapper of existing 
dependency.
Yeah, this whole sharing semaphore approach has been NAKed by Christian.
So we can skip this series now, we are going to use sync_file instead.

Thanks,
David Zhou

>
> Cheers,
> Edward.
>
>> +	(*sem)->version = 2;
>> +
>> +	return 0;
>> +}
>> +
>>   int amdgpu_cs_create_semaphore(amdgpu_semaphore_handle *sem)
>>   {
>>   	struct amdgpu_semaphore *gpu_semaphore;
>> @@ -529,6 +557,7 @@ int amdgpu_cs_create_semaphore(amdgpu_semaphore_handle *sem)
>>   	if (NULL == gpu_semaphore)
>>   		return -ENOMEM;
>>   
>> +	gpu_semaphore->version = 1;
>>   	atomic_set(&gpu_semaphore->refcount, 1);
>>   	*sem = gpu_semaphore;
>>   
>> @@ -608,8 +637,15 @@ static int amdgpu_cs_unreference_sem(amdgpu_semaphore_handle sem)
>>   	if (NULL == sem)
>>   		return -EINVAL;
>>   
>> -	if (update_references(&sem->refcount, NULL))
>> -		free(sem);
>> +	if (update_references(&sem->refcount, NULL)) {
>> +		if (sem->version == 1)
>> +			free(sem);
>> +		else if (sem->version == 2) {
>> +			amdgpu_bo_handle buf_handle = sem->buf_handle;
>> +			amdgpu_bo_cpu_unmap(buf_handle);
>> +			amdgpu_bo_free(buf_handle);
>> +		}
>> +	}
>>   	return 0;
>>   }
>>   
>> @@ -618,4 +654,27 @@ int amdgpu_cs_destroy_semaphore(amdgpu_semaphore_handle sem)
>>   	return amdgpu_cs_unreference_sem(sem);
>>   }
>>   
>> +int amdgpu_cs_export_semaphore(amdgpu_semaphore_handle sem,
>> +			       uint32_t *shared_handle)
>> +{
>> +	return amdgpu_bo_export(sem->buf_handle,
>> +				amdgpu_bo_handle_type_dma_buf_fd,
>> +				shared_handle);
>> +
>> +}
>> +
>> +int amdgpu_cs_import_semaphore(amdgpu_semaphore_handle *sem,
>> +			       amdgpu_device_handle dev, uint32_t shared_handle)
>> +{
>> +	struct amdgpu_bo_import_result output;
>> +	int r;
>> +
>> +	r = amdgpu_bo_import(dev,
>> +			     amdgpu_bo_handle_type_dma_buf_fd,
>> +			     shared_handle,
>> +			     &output);
>> +	if (r)
>> +		return r;
>>   
>> +	return amdgpu_bo_cpu_map(output.buf_handle, sem);
>> +}
>> diff --git a/amdgpu/amdgpu_internal.h b/amdgpu/amdgpu_internal.h
>> index ccc85d7..7c422da 100644
>> --- a/amdgpu/amdgpu_internal.h
>> +++ b/amdgpu/amdgpu_internal.h
>> @@ -134,6 +134,8 @@ struct amdgpu_semaphore {
>>   	atomic_t refcount;
>>   	struct list_head list;
>>   	struct drm_amdgpu_fence signal_fence;
>> +	amdgpu_bo_handle buf_handle;
>> +	uint32_t version;
>>   };
>>   
>>   /**
>>
>
>
> _______________________________________________
> amd-gfx mailing list
> amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW@public.gmane.org
> https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[-- Attachment #1.2: Type: text/html, Size: 7295 bytes --]

[-- Attachment #2: Type: text/plain, Size: 154 bytes --]

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [PATCH 2/4] amdgpu: add export/import semaphore apis
       [not found]             ` <57BA6689.2010700-5C7GfCeVMHo@public.gmane.org>
@ 2016-08-22 12:41               ` Edward O'Callaghan
       [not found]                 ` <414bf811-0e72-6f79-350f-8995a2b5167c-dczkZgxz+BNUPWh3PAxdjQ@public.gmane.org>
  0 siblings, 1 reply; 9+ messages in thread
From: Edward O'Callaghan @ 2016-08-22 12:41 UTC (permalink / raw)
  To: zhoucm1, amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW; +Cc: David.Mao-5C7GfCeVMHo


[-- Attachment #1.1.1: Type: text/plain, Size: 7229 bytes --]



On 08/22/2016 12:42 PM, zhoucm1 wrote:
> 
> 
> On 2016年08月21日 14:23, Edward O'Callaghan wrote:
>>
>> On 08/18/2016 05:55 PM, Chunming Zhou wrote:
>>> They are used for sharing semaphore across process.
>>>
>>> Change-Id: I262adf10913d365bb93368b492e69140af522c64
>>> Signed-off-by: Chunming Zhou <David1.Zhou-5C7GfCeVMHo@public.gmane.org>
>>> ---
>>>  amdgpu/amdgpu.h          | 40 ++++++++++++++++++++++++++++++
>>>  amdgpu/amdgpu_cs.c       | 63 ++++++++++++++++++++++++++++++++++++++++++++++--
>>>  amdgpu/amdgpu_internal.h |  2 ++
>>>  3 files changed, 103 insertions(+), 2 deletions(-)
>>>
>>> diff --git a/amdgpu/amdgpu.h b/amdgpu/amdgpu.h
>>> index 693d841..e716855 100644
>>> --- a/amdgpu/amdgpu.h
>>> +++ b/amdgpu/amdgpu.h
>>> @@ -1379,6 +1379,19 @@ int amdgpu_svm_commit(amdgpu_va_handle va_range_handle,
>>>  int amdgpu_svm_uncommit(amdgpu_va_handle va_range_handle);
>>>  
>>>  /**
>>> + *  create shared semaphore
>>> + *
>>> + * \param amdgpu_device_handle
>>> + * \param   sem	   - \c [out] semaphore handle
>>> + *
>>> + * \return   0 on success\n
>>> + *          <0 - Negative POSIX Error code
>>> + *
>>> +*/
>>> +int amdgpu_cs_create_semaphore_object(amdgpu_device_handle device_handle,
>>> +				      amdgpu_semaphore_handle *sem);
>>> +
>>> +/**
>>>   *  create semaphore
>>>   *
>>>   * \param   sem	   - \c [out] semaphore handle
>>> @@ -1439,6 +1452,33 @@ int amdgpu_cs_wait_semaphore(amdgpu_context_handle ctx,
>>>  int amdgpu_cs_destroy_semaphore(amdgpu_semaphore_handle sem);
>>>  
>>>  /**
>>> + *  export semaphore
>>> + *
>>> + * \param   sem	    - \c [in] semaphore handle
>>> + * \param   shared_handle    - \c [out] handle across process
>>> + *
>>> + * \return   0 on success\n
>>> + *          <0 - Negative POSIX Error code
>>> + *
>>> +*/
>>> +int amdgpu_cs_export_semaphore(amdgpu_semaphore_handle sem,
>>> +			       uint32_t *shared_handle);
>>> +/**
>>> + *  import semaphore
>>> + *
>>> + * \param   sem	    - \c [out] semaphore handle
>>> + * \param   dev	    - \c [in] device handle
>>> + * \param   shared_handle    - \c [in] handle across process
>>> + *
>>> + * \return   0 on success\n
>>> + *          <0 - Negative POSIX Error code
>>> + *
>>> +*/
>>> +int amdgpu_cs_import_semaphore(amdgpu_semaphore_handle *sem,
>>> +			       amdgpu_device_handle dev,
>>> +			       uint32_t shared_handle);
>>> +
>>> +/**
>>>   *  Get the ASIC marketing name
>>>   *
>>>   * \param   dev         - \c [in] Device handle. See #amdgpu_device_initialize()
>>> diff --git a/amdgpu/amdgpu_cs.c b/amdgpu/amdgpu_cs.c
>>> index c76a675..a3ff34e 100644
>>> --- a/amdgpu/amdgpu_cs.c
>>> +++ b/amdgpu/amdgpu_cs.c
>>> @@ -518,6 +518,34 @@ int amdgpu_cs_wait_fences(struct amdgpu_cs_fence *fences,
>>>  	return r;
>>>  }
>>>  
>>> +int amdgpu_cs_create_semaphore_object(amdgpu_device_handle device_handle,
>>> +				      amdgpu_semaphore_handle *sem)
>>> +{
>>> +	struct amdgpu_bo_alloc_request req = {0};
>>> +	amdgpu_bo_handle buf_handle;
>>> +	int r;
>>> +
>>> +	if (NULL == sem)
>> Since sem is ** then should we not check that *both* sem && *sem are
>> non-NULL too? Further you can use the canonical form of (!sem)
>>
>>> +		return -EINVAL;
>>> +
>>> +	req.alloc_size = sizeof(struct amdgpu_semaphore);
>>> +	req.preferred_heap = AMDGPU_GEM_DOMAIN_GTT;
>>> +
>>> +	r = amdgpu_bo_alloc(device_handle, &req, &buf_handle);
>>> +	if (r)
>>> +		return r;
>>> +	r = amdgpu_bo_cpu_map(buf_handle, sem);
>>> +	if (r) {
>>> +		amdgpu_bo_free(buf_handle);
>>> +		return r;
>>> +	}
>>> +	(*sem)->buf_handle = buf_handle;
>>> +	atomic_set(&(*sem)->refcount, 1);
>> Hi Chunming,
>>
>> When/where was 'amdgpu_semaphore_handle' introduced? I am not sure I
>> like pointers being hidden behind typedef's as opaque types this can
>> lead to really really bad things.. I only noticed sem was a ** because
>> of the weird looking deference then address operator application, then
>> deference again here, &(*sem)->..
> Hi Edward,
> semaphore was introduced last year, which is the wrapper of existing
> dependency.

Well no, I mean 'amdgpu_semaphore_handle' in particular as I didn't see
that in mainline. Where was it introduced?

> Yeah, this whole sharing semaphore approach has been NAKed by Christian.
> So we can skip this series now, we are going to use sync_file instead.

No worries.

Kind Regards,
Edward.

> 
> Thanks,
> David Zhou
> 
>>
>> Cheers,
>> Edward.
>>
>>> +	(*sem)->version = 2;
>>> +
>>> +	return 0;
>>> +}
>>> +
>>>  int amdgpu_cs_create_semaphore(amdgpu_semaphore_handle *sem)
>>>  {
>>>  	struct amdgpu_semaphore *gpu_semaphore;
>>> @@ -529,6 +557,7 @@ int amdgpu_cs_create_semaphore(amdgpu_semaphore_handle *sem)
>>>  	if (NULL == gpu_semaphore)
>>>  		return -ENOMEM;
>>>  
>>> +	gpu_semaphore->version = 1;
>>>  	atomic_set(&gpu_semaphore->refcount, 1);
>>>  	*sem = gpu_semaphore;
>>>  
>>> @@ -608,8 +637,15 @@ static int amdgpu_cs_unreference_sem(amdgpu_semaphore_handle sem)
>>>  	if (NULL == sem)
>>>  		return -EINVAL;
>>>  
>>> -	if (update_references(&sem->refcount, NULL))
>>> -		free(sem);
>>> +	if (update_references(&sem->refcount, NULL)) {
>>> +		if (sem->version == 1)
>>> +			free(sem);
>>> +		else if (sem->version == 2) {
>>> +			amdgpu_bo_handle buf_handle = sem->buf_handle;
>>> +			amdgpu_bo_cpu_unmap(buf_handle);
>>> +			amdgpu_bo_free(buf_handle);
>>> +		}
>>> +	}
>>>  	return 0;
>>>  }
>>>  
>>> @@ -618,4 +654,27 @@ int amdgpu_cs_destroy_semaphore(amdgpu_semaphore_handle sem)
>>>  	return amdgpu_cs_unreference_sem(sem);
>>>  }
>>>  
>>> +int amdgpu_cs_export_semaphore(amdgpu_semaphore_handle sem,
>>> +			       uint32_t *shared_handle)
>>> +{
>>> +	return amdgpu_bo_export(sem->buf_handle,
>>> +				amdgpu_bo_handle_type_dma_buf_fd,
>>> +				shared_handle);
>>> +
>>> +}
>>> +
>>> +int amdgpu_cs_import_semaphore(amdgpu_semaphore_handle *sem,
>>> +			       amdgpu_device_handle dev, uint32_t shared_handle)
>>> +{
>>> +	struct amdgpu_bo_import_result output;
>>> +	int r;
>>> +
>>> +	r = amdgpu_bo_import(dev,
>>> +			     amdgpu_bo_handle_type_dma_buf_fd,
>>> +			     shared_handle,
>>> +			     &output);
>>> +	if (r)
>>> +		return r;
>>>  
>>> +	return amdgpu_bo_cpu_map(output.buf_handle, sem);
>>> +}
>>> diff --git a/amdgpu/amdgpu_internal.h b/amdgpu/amdgpu_internal.h
>>> index ccc85d7..7c422da 100644
>>> --- a/amdgpu/amdgpu_internal.h
>>> +++ b/amdgpu/amdgpu_internal.h
>>> @@ -134,6 +134,8 @@ struct amdgpu_semaphore {
>>>  	atomic_t refcount;
>>>  	struct list_head list;
>>>  	struct drm_amdgpu_fence signal_fence;
>>> +	amdgpu_bo_handle buf_handle;
>>> +	uint32_t version;
>>>  };
>>>  
>>>  /**
>>>
>>
>>
>> _______________________________________________
>> amd-gfx mailing list
>> amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW@public.gmane.org
>> https://lists.freedesktop.org/mailman/listinfo/amd-gfx
> 
> 
> 
> _______________________________________________
> amd-gfx mailing list
> amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW@public.gmane.org
> https://lists.freedesktop.org/mailman/listinfo/amd-gfx
> 


[-- Attachment #1.2: OpenPGP digital signature --]
[-- Type: application/pgp-signature, Size: 819 bytes --]

[-- Attachment #2: Type: text/plain, Size: 154 bytes --]

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [PATCH 2/4] amdgpu: add export/import semaphore apis
       [not found]                 ` <414bf811-0e72-6f79-350f-8995a2b5167c-dczkZgxz+BNUPWh3PAxdjQ@public.gmane.org>
@ 2016-08-22 12:44                   ` Edward O'Callaghan
  0 siblings, 0 replies; 9+ messages in thread
From: Edward O'Callaghan @ 2016-08-22 12:44 UTC (permalink / raw)
  To: zhoucm1, amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW; +Cc: David.Mao-5C7GfCeVMHo


[-- Attachment #1.1.1: Type: text/plain, Size: 7803 bytes --]



On 08/22/2016 10:41 PM, Edward O'Callaghan wrote:
> 
> 
> On 08/22/2016 12:42 PM, zhoucm1 wrote:
>>
>>
>> On 2016年08月21日 14:23, Edward O'Callaghan wrote:
>>>
>>> On 08/18/2016 05:55 PM, Chunming Zhou wrote:
>>>> They are used for sharing semaphore across process.
>>>>
>>>> Change-Id: I262adf10913d365bb93368b492e69140af522c64
>>>> Signed-off-by: Chunming Zhou <David1.Zhou-5C7GfCeVMHo@public.gmane.org>
>>>> ---
>>>>  amdgpu/amdgpu.h          | 40 ++++++++++++++++++++++++++++++
>>>>  amdgpu/amdgpu_cs.c       | 63 ++++++++++++++++++++++++++++++++++++++++++++++--
>>>>  amdgpu/amdgpu_internal.h |  2 ++
>>>>  3 files changed, 103 insertions(+), 2 deletions(-)
>>>>
>>>> diff --git a/amdgpu/amdgpu.h b/amdgpu/amdgpu.h
>>>> index 693d841..e716855 100644
>>>> --- a/amdgpu/amdgpu.h
>>>> +++ b/amdgpu/amdgpu.h
>>>> @@ -1379,6 +1379,19 @@ int amdgpu_svm_commit(amdgpu_va_handle va_range_handle,
>>>>  int amdgpu_svm_uncommit(amdgpu_va_handle va_range_handle);
>>>>  
>>>>  /**
>>>> + *  create shared semaphore
>>>> + *
>>>> + * \param amdgpu_device_handle
>>>> + * \param   sem	   - \c [out] semaphore handle
>>>> + *
>>>> + * \return   0 on success\n
>>>> + *          <0 - Negative POSIX Error code
>>>> + *
>>>> +*/
>>>> +int amdgpu_cs_create_semaphore_object(amdgpu_device_handle device_handle,
>>>> +				      amdgpu_semaphore_handle *sem);
>>>> +
>>>> +/**
>>>>   *  create semaphore
>>>>   *
>>>>   * \param   sem	   - \c [out] semaphore handle
>>>> @@ -1439,6 +1452,33 @@ int amdgpu_cs_wait_semaphore(amdgpu_context_handle ctx,
>>>>  int amdgpu_cs_destroy_semaphore(amdgpu_semaphore_handle sem);
>>>>  
>>>>  /**
>>>> + *  export semaphore
>>>> + *
>>>> + * \param   sem	    - \c [in] semaphore handle
>>>> + * \param   shared_handle    - \c [out] handle across process
>>>> + *
>>>> + * \return   0 on success\n
>>>> + *          <0 - Negative POSIX Error code
>>>> + *
>>>> +*/
>>>> +int amdgpu_cs_export_semaphore(amdgpu_semaphore_handle sem,
>>>> +			       uint32_t *shared_handle);
>>>> +/**
>>>> + *  import semaphore
>>>> + *
>>>> + * \param   sem	    - \c [out] semaphore handle
>>>> + * \param   dev	    - \c [in] device handle
>>>> + * \param   shared_handle    - \c [in] handle across process
>>>> + *
>>>> + * \return   0 on success\n
>>>> + *          <0 - Negative POSIX Error code
>>>> + *
>>>> +*/
>>>> +int amdgpu_cs_import_semaphore(amdgpu_semaphore_handle *sem,
>>>> +			       amdgpu_device_handle dev,
>>>> +			       uint32_t shared_handle);
>>>> +
>>>> +/**
>>>>   *  Get the ASIC marketing name
>>>>   *
>>>>   * \param   dev         - \c [in] Device handle. See #amdgpu_device_initialize()
>>>> diff --git a/amdgpu/amdgpu_cs.c b/amdgpu/amdgpu_cs.c
>>>> index c76a675..a3ff34e 100644
>>>> --- a/amdgpu/amdgpu_cs.c
>>>> +++ b/amdgpu/amdgpu_cs.c
>>>> @@ -518,6 +518,34 @@ int amdgpu_cs_wait_fences(struct amdgpu_cs_fence *fences,
>>>>  	return r;
>>>>  }
>>>>  
>>>> +int amdgpu_cs_create_semaphore_object(amdgpu_device_handle device_handle,
>>>> +				      amdgpu_semaphore_handle *sem)
>>>> +{
>>>> +	struct amdgpu_bo_alloc_request req = {0};
>>>> +	amdgpu_bo_handle buf_handle;
>>>> +	int r;
>>>> +
>>>> +	if (NULL == sem)
>>> Since sem is ** then should we not check that *both* sem && *sem are
>>> non-NULL too? Further you can use the canonical form of (!sem)
>>>
>>>> +		return -EINVAL;
>>>> +
>>>> +	req.alloc_size = sizeof(struct amdgpu_semaphore);
>>>> +	req.preferred_heap = AMDGPU_GEM_DOMAIN_GTT;
>>>> +
>>>> +	r = amdgpu_bo_alloc(device_handle, &req, &buf_handle);
>>>> +	if (r)
>>>> +		return r;
>>>> +	r = amdgpu_bo_cpu_map(buf_handle, sem);
>>>> +	if (r) {
>>>> +		amdgpu_bo_free(buf_handle);
>>>> +		return r;
>>>> +	}
>>>> +	(*sem)->buf_handle = buf_handle;
>>>> +	atomic_set(&(*sem)->refcount, 1);
>>> Hi Chunming,
>>>
>>> When/where was 'amdgpu_semaphore_handle' introduced? I am not sure I
>>> like pointers being hidden behind typedef's as opaque types this can
>>> lead to really really bad things.. I only noticed sem was a ** because
>>> of the weird looking deference then address operator application, then
>>> deference again here, &(*sem)->..
>> Hi Edward,
>> semaphore was introduced last year, which is the wrapper of existing
>> dependency.
> 
> Well no, I mean 'amdgpu_semaphore_handle' in particular as I didn't see
> that in mainline. Where was it introduced?

woops, ignore me. 11pm should be in bed :p .... sorry for the noise.

> 
>> Yeah, this whole sharing semaphore approach has been NAKed by Christian.
>> So we can skip this series now, we are going to use sync_file instead.
> 
> No worries.
> 
> Kind Regards,
> Edward.
> 
>>
>> Thanks,
>> David Zhou
>>
>>>
>>> Cheers,
>>> Edward.
>>>
>>>> +	(*sem)->version = 2;
>>>> +
>>>> +	return 0;
>>>> +}
>>>> +
>>>>  int amdgpu_cs_create_semaphore(amdgpu_semaphore_handle *sem)
>>>>  {
>>>>  	struct amdgpu_semaphore *gpu_semaphore;
>>>> @@ -529,6 +557,7 @@ int amdgpu_cs_create_semaphore(amdgpu_semaphore_handle *sem)
>>>>  	if (NULL == gpu_semaphore)
>>>>  		return -ENOMEM;
>>>>  
>>>> +	gpu_semaphore->version = 1;
>>>>  	atomic_set(&gpu_semaphore->refcount, 1);
>>>>  	*sem = gpu_semaphore;
>>>>  
>>>> @@ -608,8 +637,15 @@ static int amdgpu_cs_unreference_sem(amdgpu_semaphore_handle sem)
>>>>  	if (NULL == sem)
>>>>  		return -EINVAL;
>>>>  
>>>> -	if (update_references(&sem->refcount, NULL))
>>>> -		free(sem);
>>>> +	if (update_references(&sem->refcount, NULL)) {
>>>> +		if (sem->version == 1)
>>>> +			free(sem);
>>>> +		else if (sem->version == 2) {
>>>> +			amdgpu_bo_handle buf_handle = sem->buf_handle;
>>>> +			amdgpu_bo_cpu_unmap(buf_handle);
>>>> +			amdgpu_bo_free(buf_handle);
>>>> +		}
>>>> +	}
>>>>  	return 0;
>>>>  }
>>>>  
>>>> @@ -618,4 +654,27 @@ int amdgpu_cs_destroy_semaphore(amdgpu_semaphore_handle sem)
>>>>  	return amdgpu_cs_unreference_sem(sem);
>>>>  }
>>>>  
>>>> +int amdgpu_cs_export_semaphore(amdgpu_semaphore_handle sem,
>>>> +			       uint32_t *shared_handle)
>>>> +{
>>>> +	return amdgpu_bo_export(sem->buf_handle,
>>>> +				amdgpu_bo_handle_type_dma_buf_fd,
>>>> +				shared_handle);
>>>> +
>>>> +}
>>>> +
>>>> +int amdgpu_cs_import_semaphore(amdgpu_semaphore_handle *sem,
>>>> +			       amdgpu_device_handle dev, uint32_t shared_handle)
>>>> +{
>>>> +	struct amdgpu_bo_import_result output;
>>>> +	int r;
>>>> +
>>>> +	r = amdgpu_bo_import(dev,
>>>> +			     amdgpu_bo_handle_type_dma_buf_fd,
>>>> +			     shared_handle,
>>>> +			     &output);
>>>> +	if (r)
>>>> +		return r;
>>>>  
>>>> +	return amdgpu_bo_cpu_map(output.buf_handle, sem);
>>>> +}
>>>> diff --git a/amdgpu/amdgpu_internal.h b/amdgpu/amdgpu_internal.h
>>>> index ccc85d7..7c422da 100644
>>>> --- a/amdgpu/amdgpu_internal.h
>>>> +++ b/amdgpu/amdgpu_internal.h
>>>> @@ -134,6 +134,8 @@ struct amdgpu_semaphore {
>>>>  	atomic_t refcount;
>>>>  	struct list_head list;
>>>>  	struct drm_amdgpu_fence signal_fence;
>>>> +	amdgpu_bo_handle buf_handle;
>>>> +	uint32_t version;
>>>>  };
>>>>  
>>>>  /**
>>>>
>>>
>>>
>>> _______________________________________________
>>> amd-gfx mailing list
>>> amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW@public.gmane.org
>>> https://lists.freedesktop.org/mailman/listinfo/amd-gfx
>>
>>
>>
>> _______________________________________________
>> amd-gfx mailing list
>> amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW@public.gmane.org
>> https://lists.freedesktop.org/mailman/listinfo/amd-gfx
>>
> 
> 
> 
> _______________________________________________
> amd-gfx mailing list
> amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW@public.gmane.org
> https://lists.freedesktop.org/mailman/listinfo/amd-gfx
> 


[-- Attachment #1.2: OpenPGP digital signature --]
[-- Type: application/pgp-signature, Size: 819 bytes --]

[-- Attachment #2: Type: text/plain, Size: 154 bytes --]

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply	[flat|nested] 9+ messages in thread

end of thread, other threads:[~2016-08-22 12:44 UTC | newest]

Thread overview: 9+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2016-08-18  7:55 [PATCH 0/4] share semaphore across process Chunming Zhou
     [not found] ` <1471506959-905-1-git-send-email-David1.Zhou-5C7GfCeVMHo@public.gmane.org>
2016-08-18  7:55   ` [PATCH 1/4] amdgpu: use drm_amdgpu_fence instead of amdgpu_cs_fence in semaphore structure Chunming Zhou
2016-08-18  7:55   ` [PATCH 2/4] amdgpu: add export/import semaphore apis Chunming Zhou
     [not found]     ` <1471506959-905-3-git-send-email-David1.Zhou-5C7GfCeVMHo@public.gmane.org>
2016-08-21  6:23       ` Edward O'Callaghan
     [not found]         ` <cf458d97-d40a-d363-b5c4-2ae45dde9179-dczkZgxz+BNUPWh3PAxdjQ@public.gmane.org>
2016-08-22  2:42           ` zhoucm1
     [not found]             ` <57BA6689.2010700-5C7GfCeVMHo@public.gmane.org>
2016-08-22 12:41               ` Edward O'Callaghan
     [not found]                 ` <414bf811-0e72-6f79-350f-8995a2b5167c-dczkZgxz+BNUPWh3PAxdjQ@public.gmane.org>
2016-08-22 12:44                   ` Edward O'Callaghan
2016-08-18  7:55   ` [PATCH 3/4] amdgpu: add mutex for across process reason Chunming Zhou
2016-08-18  7:55   ` [PATCH 4/4] tests/amdgpu: add semaphore across process test Chunming Zhou

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.