All of lore.kernel.org
 help / color / mirror / Atom feed
From: Dave Airlie <airlied@gmail.com>
To: dri-devel@lists.freedesktop.org, amd-gfx@lists.freedesktop.org
Subject: [PATCH libdrm] libdrm/amdgpu: add interface for kernel semaphores
Date: Tue, 27 Jun 2017 07:19:22 +1000	[thread overview]
Message-ID: <20170626211922.7493-1-airlied@gmail.com> (raw)

From: Dave Airlie <airlied@redhat.com>

This adds the corresponding code for libdrm to use the new
kernel interfaces for semaphores.

This will be used by radv to implement shared semaphores.

TODO: Version checks.

Signed-off-by: Dave Airlie <airlied@redhat.com>
---
 amdgpu/amdgpu.h          |  28 +++++++++
 amdgpu/amdgpu_cs.c       | 161 ++++++++++++++++++++++++++++++++++++++++++++---
 include/drm/amdgpu_drm.h |  28 +++++++++
 3 files changed, 208 insertions(+), 9 deletions(-)

diff --git a/amdgpu/amdgpu.h b/amdgpu/amdgpu.h
index 7b26a04..747e248 100644
--- a/amdgpu/amdgpu.h
+++ b/amdgpu/amdgpu.h
@@ -129,6 +129,8 @@ typedef struct amdgpu_va *amdgpu_va_handle;
  */
 typedef struct amdgpu_semaphore *amdgpu_semaphore_handle;
 
+typedef uint32_t amdgpu_sem_handle;
+
 /*--------------------------------------------------------------------------*/
 /* -------------------------- Structures ---------------------------------- */
 /*--------------------------------------------------------------------------*/
@@ -365,6 +367,16 @@ struct amdgpu_cs_request {
 	struct amdgpu_cs_fence_info fence_info;
 };
 
+struct amdgpu_cs_request_sem {
+	/*
+	 *
+	 */
+	uint32_t number_of_wait_sem;
+	uint32_t *wait_sems;
+	uint32_t number_of_signal_sem;
+	uint32_t *signal_sems;
+};
+
 /**
  * Structure which provide information about GPU VM MC Address space
  * alignments requirements
@@ -882,6 +894,12 @@ int amdgpu_cs_submit(amdgpu_context_handle context,
 		     struct amdgpu_cs_request *ibs_request,
 		     uint32_t number_of_requests);
 
+int amdgpu_cs_submit_sem(amdgpu_context_handle context,
+			 uint64_t flags,
+			 struct amdgpu_cs_request *ibs_request,
+			 struct amdgpu_cs_request_sem *ibs_sem,
+			 uint32_t number_of_requests);
+
 /**
  *  Query status of Command Buffer Submission
  *
@@ -1255,4 +1273,14 @@ int amdgpu_cs_destroy_semaphore(amdgpu_semaphore_handle sem);
 */
 const char *amdgpu_get_marketing_name(amdgpu_device_handle dev);
 
+int amdgpu_cs_create_sem(amdgpu_device_handle dev,
+			 amdgpu_sem_handle *sem);
+int amdgpu_cs_export_sem(amdgpu_device_handle dev,
+			  amdgpu_sem_handle sem,
+			 int *shared_handle);
+int amdgpu_cs_import_sem(amdgpu_device_handle dev,
+			  int shared_handle,
+			 amdgpu_sem_handle *sem);
+int amdgpu_cs_destroy_sem(amdgpu_device_handle dev,
+			  amdgpu_sem_handle sem);
 #endif /* #ifdef _AMDGPU_H_ */
diff --git a/amdgpu/amdgpu_cs.c b/amdgpu/amdgpu_cs.c
index fb5b3a8..7283327 100644
--- a/amdgpu/amdgpu_cs.c
+++ b/amdgpu/amdgpu_cs.c
@@ -170,7 +170,8 @@ int amdgpu_cs_query_reset_state(amdgpu_context_handle context,
  * \sa amdgpu_cs_submit()
 */
 static int amdgpu_cs_submit_one(amdgpu_context_handle context,
-				struct amdgpu_cs_request *ibs_request)
+				struct amdgpu_cs_request *ibs_request,
+				struct amdgpu_cs_request_sem *sem_request)
 {
 	union drm_amdgpu_cs cs;
 	uint64_t *chunk_array;
@@ -178,9 +179,11 @@ static int amdgpu_cs_submit_one(amdgpu_context_handle context,
 	struct drm_amdgpu_cs_chunk_data *chunk_data;
 	struct drm_amdgpu_cs_chunk_dep *dependencies = NULL;
 	struct drm_amdgpu_cs_chunk_dep *sem_dependencies = NULL;
+	struct drm_amdgpu_cs_chunk_sem *wait_sem_dependencies = NULL;
+	struct drm_amdgpu_cs_chunk_sem *signal_sem_dependencies = NULL;
 	struct list_head *sem_list;
 	amdgpu_semaphore_handle sem, tmp;
-	uint32_t i, size, sem_count = 0;
+	uint32_t i, j, size, sem_count = 0;
 	bool user_fence;
 	int r = 0;
 
@@ -196,7 +199,7 @@ static int amdgpu_cs_submit_one(amdgpu_context_handle context,
 	}
 	user_fence = (ibs_request->fence_info.handle != NULL);
 
-	size = ibs_request->number_of_ibs + (user_fence ? 2 : 1) + 1;
+	size = ibs_request->number_of_ibs + (user_fence ? 2 : 1) + 1 + (sem_request ? 2 : 0);
 
 	chunk_array = alloca(sizeof(uint64_t) * size);
 	chunks = alloca(sizeof(struct drm_amdgpu_cs_chunk) * size);
@@ -308,6 +311,45 @@ static int amdgpu_cs_submit_one(amdgpu_context_handle context,
 		chunks[i].chunk_data = (uint64_t)(uintptr_t)sem_dependencies;
 	}
 
+	if (sem_request) {
+		if (sem_request->number_of_wait_sem) {
+			wait_sem_dependencies = malloc(sizeof(struct drm_amdgpu_cs_chunk_sem) * sem_request->number_of_wait_sem);
+			if (!wait_sem_dependencies) {
+				r = -ENOMEM;
+				goto error_unlock;
+			}
+			for (j = 0; j < sem_request->number_of_wait_sem; j++) {
+				struct drm_amdgpu_cs_chunk_sem *dep = &wait_sem_dependencies[j];
+				dep->handle = sem_request->wait_sems[j];
+			}
+			i = cs.in.num_chunks++;
+
+			/* dependencies chunk */
+			chunk_array[i] = (uint64_t)(uintptr_t)&chunks[i];
+			chunks[i].chunk_id = AMDGPU_CHUNK_ID_SEM_WAIT;
+			chunks[i].length_dw = sizeof(struct drm_amdgpu_cs_chunk_sem) / 4 * sem_request->number_of_wait_sem;
+			chunks[i].chunk_data = (uint64_t)(uintptr_t)wait_sem_dependencies;
+		}
+		if (sem_request->number_of_signal_sem) {
+			signal_sem_dependencies = malloc(sizeof(struct drm_amdgpu_cs_chunk_sem) * sem_request->number_of_signal_sem);
+			if (!signal_sem_dependencies) {
+				r = -ENOMEM;
+				goto error_unlock;
+			}
+			for (j = 0; j < sem_request->number_of_signal_sem; j++) {
+				struct drm_amdgpu_cs_chunk_sem *dep = &signal_sem_dependencies[j];
+				dep->handle = sem_request->signal_sems[j];
+			}
+			i = cs.in.num_chunks++;
+
+			/* dependencies chunk */
+			chunk_array[i] = (uint64_t)(uintptr_t)&chunks[i];
+			chunks[i].chunk_id = AMDGPU_CHUNK_ID_SEM_SIGNAL;
+			chunks[i].length_dw = sizeof(struct drm_amdgpu_cs_chunk_sem) / 4 * sem_request->number_of_signal_sem;
+			chunks[i].chunk_data = (uint64_t)(uintptr_t)signal_sem_dependencies;
+		}
+	}
+
 	r = drmCommandWriteRead(context->dev->fd, DRM_AMDGPU_CS,
 				&cs, sizeof(cs));
 	if (r)
@@ -319,17 +361,20 @@ error_unlock:
 	pthread_mutex_unlock(&context->sequence_mutex);
 	free(dependencies);
 	free(sem_dependencies);
+	free(wait_sem_dependencies);
+	free(signal_sem_dependencies);
 	return r;
 }
 
-int amdgpu_cs_submit(amdgpu_context_handle context,
-		     uint64_t flags,
-		     struct amdgpu_cs_request *ibs_request,
-		     uint32_t number_of_requests)
+int amdgpu_cs_submit_sem(amdgpu_context_handle context,
+			 uint64_t flags,
+			 struct amdgpu_cs_request *ibs_request,
+			 struct amdgpu_cs_request_sem *ibs_sem,
+			 uint32_t number_of_requests)
 {
 	uint32_t i;
 	int r;
-
+	bool has_sems = ibs_sem ? true : false;
 	if (NULL == context)
 		return -EINVAL;
 	if (NULL == ibs_request)
@@ -337,15 +382,28 @@ int amdgpu_cs_submit(amdgpu_context_handle context,
 
 	r = 0;
 	for (i = 0; i < number_of_requests; i++) {
-		r = amdgpu_cs_submit_one(context, ibs_request);
+		r = amdgpu_cs_submit_one(context, ibs_request, has_sems ? ibs_sem : NULL);
 		if (r)
 			break;
 		ibs_request++;
+		if (has_sems)
+			ibs_sem++;
 	}
 
 	return r;
 }
 
+int amdgpu_cs_submit(amdgpu_context_handle context,
+		     uint64_t flags,
+		     struct amdgpu_cs_request *ibs_request,
+		     uint32_t number_of_requests)
+{
+	return amdgpu_cs_submit_sem(context, flags,
+				    ibs_request, NULL,
+				    number_of_requests);
+}
+
+
 /**
  * Calculate absolute timeout.
  *
@@ -542,3 +600,88 @@ int amdgpu_cs_destroy_semaphore(amdgpu_semaphore_handle sem)
 {
 	return amdgpu_cs_unreference_sem(sem);
 }
+
+
+int amdgpu_cs_create_sem(amdgpu_device_handle dev,
+			 amdgpu_sem_handle *sem)
+{
+	union drm_amdgpu_sem args;
+	int r;
+
+	if (NULL == dev)
+		return -EINVAL;
+
+	/* Create the context */
+	memset(&args, 0, sizeof(args));
+	args.in.op = AMDGPU_SEM_OP_CREATE_SEM;
+	r = drmCommandWriteRead(dev->fd, DRM_AMDGPU_SEM, &args, sizeof(args));
+	if (r)
+		return r;
+
+	*sem = args.out.handle;
+
+	return 0;
+}
+
+int amdgpu_cs_export_sem(amdgpu_device_handle dev,
+			  amdgpu_sem_handle sem,
+			  int *shared_handle)
+{
+	union drm_amdgpu_sem args;
+	int r;
+
+	if (NULL == dev)
+		return -EINVAL;
+
+	/* Create the context */
+	memset(&args, 0, sizeof(args));
+	args.in.op = AMDGPU_SEM_OP_EXPORT_SEM;
+	args.in.handle = sem;
+	r = drmCommandWriteRead(dev->fd, DRM_AMDGPU_SEM, &args, sizeof(args));
+	if (r)
+		return r;
+	*shared_handle = args.out.fd;
+	return 0;
+}
+
+int amdgpu_cs_import_sem(amdgpu_device_handle dev,
+			  int shared_handle,
+			  amdgpu_sem_handle *sem)
+{
+	union drm_amdgpu_sem args;
+	int r;
+
+	if (NULL == dev)
+		return -EINVAL;
+
+	/* Create the context */
+	memset(&args, 0, sizeof(args));
+	args.in.op = AMDGPU_SEM_OP_IMPORT_SEM;
+	args.in.handle = shared_handle;
+	r = drmCommandWriteRead(dev->fd, DRM_AMDGPU_SEM, &args, sizeof(args));
+	if (r)
+		return r;
+	*sem = args.out.handle;
+	return 0;
+}
+
+
+int amdgpu_cs_destroy_sem(amdgpu_device_handle dev,
+			  amdgpu_sem_handle sem)
+{
+	union drm_amdgpu_sem args;
+	int r;
+
+	if (NULL == dev)
+		return -EINVAL;
+
+	/* Create the context */
+	memset(&args, 0, sizeof(args));
+	args.in.op = AMDGPU_SEM_OP_DESTROY_SEM;
+	args.in.handle = sem;
+	r = drmCommandWriteRead(dev->fd, DRM_AMDGPU_SEM, &args, sizeof(args));
+	if (r)
+		return r;
+
+	return 0;
+}
diff --git a/include/drm/amdgpu_drm.h b/include/drm/amdgpu_drm.h
index d8f2497..fa0bfe2 100644
--- a/include/drm/amdgpu_drm.h
+++ b/include/drm/amdgpu_drm.h
@@ -50,6 +50,7 @@ extern "C" {
 #define DRM_AMDGPU_WAIT_CS		0x09
 #define DRM_AMDGPU_GEM_OP		0x10
 #define DRM_AMDGPU_GEM_USERPTR		0x11
+#define DRM_AMDGPU_SEM                  0x13
 
 #define DRM_IOCTL_AMDGPU_GEM_CREATE	DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_CREATE, union drm_amdgpu_gem_create)
 #define DRM_IOCTL_AMDGPU_GEM_MMAP	DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_MMAP, union drm_amdgpu_gem_mmap)
@@ -63,6 +64,7 @@ extern "C" {
 #define DRM_IOCTL_AMDGPU_WAIT_CS	DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_WAIT_CS, union drm_amdgpu_wait_cs)
 #define DRM_IOCTL_AMDGPU_GEM_OP		DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_OP, struct drm_amdgpu_gem_op)
 #define DRM_IOCTL_AMDGPU_GEM_USERPTR	DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_USERPTR, struct drm_amdgpu_gem_userptr)
+#define DRM_IOCTL_AMDGPU_SEM		DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_SEM, union drm_amdgpu_sem)
 
 #define AMDGPU_GEM_DOMAIN_CPU		0x1
 #define AMDGPU_GEM_DOMAIN_GTT		0x2
@@ -303,6 +305,26 @@ union drm_amdgpu_wait_cs {
 	struct drm_amdgpu_wait_cs_out out;
 };
 
+#define AMDGPU_SEM_OP_CREATE_SEM 0
+#define AMDGPU_SEM_OP_IMPORT_SEM 1
+#define AMDGPU_SEM_OP_EXPORT_SEM 2
+#define AMDGPU_SEM_OP_DESTROY_SEM 3
+
+struct drm_amdgpu_sem_in {
+	__u32 op;
+	__u32 handle;
+};
+
+struct drm_amdgpu_sem_out {
+	__u32 fd;
+	__u32 handle;
+};
+
+union drm_amdgpu_sem {
+	struct drm_amdgpu_sem_in in;
+	struct drm_amdgpu_sem_out out;
+};
+
 #define AMDGPU_GEM_OP_GET_GEM_CREATE_INFO	0
 #define AMDGPU_GEM_OP_SET_PLACEMENT		1
 
@@ -358,6 +380,8 @@ struct drm_amdgpu_gem_va {
 #define AMDGPU_CHUNK_ID_IB		0x01
 #define AMDGPU_CHUNK_ID_FENCE		0x02
 #define AMDGPU_CHUNK_ID_DEPENDENCIES	0x03
+#define AMDGPU_CHUNK_ID_SEM_WAIT        0x04
+#define AMDGPU_CHUNK_ID_SEM_SIGNAL      0x05
 
 struct drm_amdgpu_cs_chunk {
 	uint32_t		chunk_id;
@@ -422,6 +446,10 @@ struct drm_amdgpu_cs_chunk_fence {
 	uint32_t offset;
 };
 
+struct drm_amdgpu_cs_chunk_sem {
+	uint32_t handle;
+};
+
 struct drm_amdgpu_cs_chunk_data {
 	union {
 		struct drm_amdgpu_cs_chunk_ib		ib_data;
-- 
2.7.4

_______________________________________________
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel

             reply	other threads:[~2017-06-26 21:19 UTC|newest]

Thread overview: 18+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2017-06-26 21:19 Dave Airlie [this message]
     [not found] ` <20170626211922.7493-1-airlied-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org>
2017-06-27  2:59   ` [PATCH libdrm] libdrm/amdgpu: add interface for kernel semaphores Dave Airlie
  -- strict thread matches above, loose matches on Subject: below --
2017-03-14  0:50 [rfc] amdgpu/sync_file shared semaphores Dave Airlie
     [not found] ` <20170314005054.7487-1-airlied-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org>
2017-03-14  0:50   ` [PATCH libdrm] libdrm/amdgpu: add interface for kernel semaphores Dave Airlie
2017-03-14  2:00     ` zhoucm1
2017-03-14  2:52       ` Dave Airlie
     [not found]         ` <CAPM=9tzegdtiyEmbW+PqmVeQ+bXrNtrdKXuE3kB0dPMKnMir+Q-JsoAwUIsXosN+BqQ9rBEUg@public.gmane.org>
2017-03-14  3:30           ` zhoucm1
     [not found]             ` <58C763E0.1-5C7GfCeVMHo@public.gmane.org>
2017-03-14  4:16               ` Dave Airlie
2017-03-14  8:56                 ` Daniel Vetter
2017-03-14  8:54             ` Daniel Vetter
2017-03-14  9:20               ` Christian König
2017-03-14 10:04                 ` zhoucm1
     [not found]                   ` <58C7C032.7060706-5C7GfCeVMHo@public.gmane.org>
2017-03-14 17:45                     ` Harry Wentland
     [not found]                       ` <8dea3303-480d-c29a-22ec-42adf3dab4ce-5C7GfCeVMHo@public.gmane.org>
2017-03-14 17:54                         ` Daniel Vetter
2017-03-14 18:10                         ` Christian König
2017-03-15  0:01                           ` Marek Olšák
     [not found]                             ` <CAAxE2A7xRWhkp-OC59iy2i91uj5mtVTHR0uHfDf745L-ixxHcw-JsoAwUIsXosN+BqQ9rBEUg@public.gmane.org>
2017-03-15  8:48                               ` Daniel Vetter
2017-03-15  9:35                                 ` Christian König
     [not found]     ` <20170314005054.7487-2-airlied-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org>
2017-03-15 10:30       ` Emil Velikov

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20170626211922.7493-1-airlied@gmail.com \
    --to=airlied@gmail.com \
    --cc=amd-gfx@lists.freedesktop.org \
    --cc=dri-devel@lists.freedesktop.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.