All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH libdrm 1/2] amdgpu: add a function to create amdgpu bo internally (v3)
@ 2018-08-10  4:59 Junwei Zhang
       [not found] ` <1533877151-20165-1-git-send-email-Jerry.Zhang-5C7GfCeVMHo@public.gmane.org>
  0 siblings, 1 reply; 4+ messages in thread
From: Junwei Zhang @ 2018-08-10  4:59 UTC (permalink / raw)
  To: amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW
  Cc: Junwei Zhang, christian.koenig-5C7GfCeVMHo

a helper function to create and initialize amdgpu bo

v2: update error handling: add label and free bo
v3: update error handling: separate each error label

Signed-off-by: Junwei Zhang <Jerry.Zhang@amd.com>
---
 amdgpu/amdgpu_bo.c | 195 ++++++++++++++++++++++++++---------------------------
 1 file changed, 94 insertions(+), 101 deletions(-)

diff --git a/amdgpu/amdgpu_bo.c b/amdgpu/amdgpu_bo.c
index b790e9b..2947715 100644
--- a/amdgpu/amdgpu_bo.c
+++ b/amdgpu/amdgpu_bo.c
@@ -48,11 +48,31 @@ static void amdgpu_close_kms_handle(amdgpu_device_handle dev,
 	drmIoctl(dev->fd, DRM_IOCTL_GEM_CLOSE, &args);
 }
 
+static int amdgpu_bo_create(amdgpu_device_handle dev,
+			    uint64_t size,
+			    uint32_t handle,
+			    amdgpu_bo_handle *buf_handle)
+{
+	struct amdgpu_bo *bo;
+
+	bo = calloc(1, sizeof(struct amdgpu_bo));
+	if (!bo)
+		return -ENOMEM;
+
+	atomic_set(&bo->refcount, 1);
+	bo->dev = dev;
+	bo->alloc_size = size;
+	bo->handle = handle;
+	pthread_mutex_init(&bo->cpu_access_mutex, NULL);
+
+	*buf_handle = bo;
+	return 0;
+}
+
 int amdgpu_bo_alloc(amdgpu_device_handle dev,
 		    struct amdgpu_bo_alloc_request *alloc_buffer,
 		    amdgpu_bo_handle *buf_handle)
 {
-	struct amdgpu_bo *bo;
 	union drm_amdgpu_gem_create args;
 	unsigned heap = alloc_buffer->preferred_heap;
 	int r = 0;
@@ -61,14 +81,6 @@ int amdgpu_bo_alloc(amdgpu_device_handle dev,
 	if (!(heap & (AMDGPU_GEM_DOMAIN_GTT | AMDGPU_GEM_DOMAIN_VRAM)))
 		return -EINVAL;
 
-	bo = calloc(1, sizeof(struct amdgpu_bo));
-	if (!bo)
-		return -ENOMEM;
-
-	atomic_set(&bo->refcount, 1);
-	bo->dev = dev;
-	bo->alloc_size = alloc_buffer->alloc_size;
-
 	memset(&args, 0, sizeof(args));
 	args.in.bo_size = alloc_buffer->alloc_size;
 	args.in.alignment = alloc_buffer->phys_alignment;
@@ -80,24 +92,23 @@ int amdgpu_bo_alloc(amdgpu_device_handle dev,
 	/* Allocate the buffer with the preferred heap. */
 	r = drmCommandWriteRead(dev->fd, DRM_AMDGPU_GEM_CREATE,
 				&args, sizeof(args));
+	if (r)
+		goto out;
+
+	r = amdgpu_bo_create(dev, alloc_buffer->alloc_size, args.out.handle,
+			     buf_handle);
 	if (r) {
-		free(bo);
-		return r;
+		amdgpu_close_kms_handle(dev, args.out.handle);
+		goto out;
 	}
 
-	bo->handle = args.out.handle;
-
-	pthread_mutex_lock(&bo->dev->bo_table_mutex);
-	r = handle_table_insert(&bo->dev->bo_handles, bo->handle, bo);
-	pthread_mutex_unlock(&bo->dev->bo_table_mutex);
-
-	pthread_mutex_init(&bo->cpu_access_mutex, NULL);
-
+	pthread_mutex_lock(&dev->bo_table_mutex);
+	r = handle_table_insert(&dev->bo_handles, (*buf_handle)->handle,
+				*buf_handle);
+	pthread_mutex_unlock(&dev->bo_table_mutex);
 	if (r)
-		amdgpu_bo_free(bo);
-	else
-		*buf_handle = bo;
-
+		amdgpu_bo_free(*buf_handle);
+out:
 	return r;
 }
 
@@ -256,7 +267,9 @@ int amdgpu_bo_import(amdgpu_device_handle dev,
 {
 	struct drm_gem_open open_arg = {};
 	struct amdgpu_bo *bo = NULL;
-	int r;
+	uint32_t handle = 0, flink_name = 0;
+	uint64_t alloc_size = 0;
+	int r = 0;
 	int dma_fd;
 	uint64_t dma_buf_size = 0;
 
@@ -266,22 +279,18 @@ int amdgpu_bo_import(amdgpu_device_handle dev,
 
 	/* Convert a DMA buf handle to a KMS handle now. */
 	if (type == amdgpu_bo_handle_type_dma_buf_fd) {
-		uint32_t handle;
 		off_t size;
 
 		/* Get a KMS handle. */
 		r = drmPrimeFDToHandle(dev->fd, shared_handle, &handle);
-		if (r) {
-			pthread_mutex_unlock(&dev->bo_table_mutex);
-			return r;
-		}
+		if (r)
+			goto unlock;
 
 		/* Query the buffer size. */
 		size = lseek(shared_handle, 0, SEEK_END);
 		if (size == (off_t)-1) {
-			pthread_mutex_unlock(&dev->bo_table_mutex);
-			amdgpu_close_kms_handle(dev, handle);
-			return -errno;
+			r = -errno;
+			goto close_handle;
 		}
 		lseek(shared_handle, 0, SEEK_SET);
 
@@ -302,12 +311,12 @@ int amdgpu_bo_import(amdgpu_device_handle dev,
 	case amdgpu_bo_handle_type_kms:
 	case amdgpu_bo_handle_type_kms_noimport:
 		/* Importing a KMS handle in not allowed. */
-		pthread_mutex_unlock(&dev->bo_table_mutex);
-		return -EPERM;
+		r = -EPERM;
+		goto unlock;
 
 	default:
-		pthread_mutex_unlock(&dev->bo_table_mutex);
-		return -EINVAL;
+		r = -EINVAL;
+		goto unlock;
 	}
 
 	if (bo) {
@@ -320,58 +329,32 @@ int amdgpu_bo_import(amdgpu_device_handle dev,
 		return 0;
 	}
 
-	bo = calloc(1, sizeof(struct amdgpu_bo));
-	if (!bo) {
-		pthread_mutex_unlock(&dev->bo_table_mutex);
-		if (type == amdgpu_bo_handle_type_dma_buf_fd) {
-			amdgpu_close_kms_handle(dev, shared_handle);
-		}
-		return -ENOMEM;
-	}
-
 	/* Open the handle. */
 	switch (type) {
 	case amdgpu_bo_handle_type_gem_flink_name:
 		open_arg.name = shared_handle;
 		r = drmIoctl(dev->flink_fd, DRM_IOCTL_GEM_OPEN, &open_arg);
-		if (r) {
-			free(bo);
-			pthread_mutex_unlock(&dev->bo_table_mutex);
-			return r;
-		}
+		if (r)
+			goto unlock;
 
-		bo->handle = open_arg.handle;
+		flink_name = shared_handle;
+		handle = open_arg.handle;
+		alloc_size = open_arg.size;
 		if (dev->flink_fd != dev->fd) {
-			r = drmPrimeHandleToFD(dev->flink_fd, bo->handle, DRM_CLOEXEC, &dma_fd);
-			if (r) {
-				free(bo);
-				pthread_mutex_unlock(&dev->bo_table_mutex);
-				return r;
-			}
-			r = drmPrimeFDToHandle(dev->fd, dma_fd, &bo->handle );
-
+			r = drmPrimeHandleToFD(dev->flink_fd, handle,
+					       DRM_CLOEXEC, &dma_fd);
+			if (r)
+				goto close_handle;
+			r = drmPrimeFDToHandle(dev->fd, dma_fd, &handle);
 			close(dma_fd);
-
-			if (r) {
-				free(bo);
-				pthread_mutex_unlock(&dev->bo_table_mutex);
-				return r;
-			}
-		}
-		bo->flink_name = shared_handle;
-		bo->alloc_size = open_arg.size;
-		r = handle_table_insert(&dev->bo_flink_names, shared_handle,
-					bo);
-		if (r) {
-			pthread_mutex_unlock(&dev->bo_table_mutex);
-			amdgpu_bo_free(bo);
-			return r;
+			if (r)
+				goto close_handle;
 		}
 		break;
 
 	case amdgpu_bo_handle_type_dma_buf_fd:
-		bo->handle = shared_handle;
-		bo->alloc_size = dma_buf_size;
+		handle = shared_handle;
+		alloc_size = dma_buf_size;
 		break;
 
 	case amdgpu_bo_handle_type_kms:
@@ -380,16 +363,34 @@ int amdgpu_bo_import(amdgpu_device_handle dev,
 	}
 
 	/* Initialize it. */
-	atomic_set(&bo->refcount, 1);
-	bo->dev = dev;
-	pthread_mutex_init(&bo->cpu_access_mutex, NULL);
+	r = amdgpu_bo_create(dev, alloc_size, handle, &bo);
+	if (r)
+		goto close_handle;
 
-	handle_table_insert(&dev->bo_handles, bo->handle, bo);
-	pthread_mutex_unlock(&dev->bo_table_mutex);
+	r = handle_table_insert(&dev->bo_handles, bo->handle, bo);
+	if (r)
+		goto bo_free;
+	if (flink_name) {
+		r = handle_table_insert(&dev->bo_flink_names, flink_name,
+					bo);
+		if (r)
+			goto remove_handle;
+	}
 
 	output->buf_handle = bo;
 	output->alloc_size = bo->alloc_size;
+	pthread_mutex_unlock(&dev->bo_table_mutex);
 	return 0;
+
+remove_handle:
+	handle_table_remove(&dev->bo_handles, bo->handle);
+bo_free:
+	amdgpu_bo_free(bo);
+close_handle:
+	amdgpu_close_kms_handle(dev, handle);
+unlock:
+	pthread_mutex_unlock(&dev->bo_table_mutex);
+	return r;
 }
 
 int amdgpu_bo_free(amdgpu_bo_handle buf_handle)
@@ -574,7 +575,6 @@ int amdgpu_create_bo_from_user_mem(amdgpu_device_handle dev,
 				    amdgpu_bo_handle *buf_handle)
 {
 	int r;
-	struct amdgpu_bo *bo;
 	struct drm_amdgpu_gem_userptr args;
 
 	args.addr = (uintptr_t)cpu;
@@ -584,28 +584,21 @@ int amdgpu_create_bo_from_user_mem(amdgpu_device_handle dev,
 	r = drmCommandWriteRead(dev->fd, DRM_AMDGPU_GEM_USERPTR,
 				&args, sizeof(args));
 	if (r)
-		return r;
-
-	bo = calloc(1, sizeof(struct amdgpu_bo));
-	if (!bo)
-		return -ENOMEM;
+		goto out;
 
-	atomic_set(&bo->refcount, 1);
-	bo->dev = dev;
-	bo->alloc_size = size;
-	bo->handle = args.handle;
-
-	pthread_mutex_lock(&bo->dev->bo_table_mutex);
-	r = handle_table_insert(&bo->dev->bo_handles, bo->handle, bo);
-	pthread_mutex_unlock(&bo->dev->bo_table_mutex);
-
-	pthread_mutex_init(&bo->cpu_access_mutex, NULL);
+	r = amdgpu_bo_create(dev, size, args.handle, buf_handle);
+	if (r) {
+		amdgpu_close_kms_handle(dev, args.handle);
+		goto out;
+	}
 
+	pthread_mutex_lock(&dev->bo_table_mutex);
+	r = handle_table_insert(&dev->bo_handles, (*buf_handle)->handle,
+				*buf_handle);
+	pthread_mutex_unlock(&dev->bo_table_mutex);
 	if (r)
-		amdgpu_bo_free(bo);
-	else
-		*buf_handle = bo;
-
+		amdgpu_bo_free(*buf_handle);
+out:
 	return r;
 }
 
-- 
1.9.1

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply related	[flat|nested] 4+ messages in thread

* [PATCH libdrm 2/2] [RFC] amdgpu: do not free flink bo for flink_fd
       [not found] ` <1533877151-20165-1-git-send-email-Jerry.Zhang-5C7GfCeVMHo@public.gmane.org>
@ 2018-08-10  4:59   ` Junwei Zhang
  2018-08-10  5:55   ` [PATCH libdrm 1/2] amdgpu: add a function to create amdgpu bo internally (v3) Zhang, Jerry
  1 sibling, 0 replies; 4+ messages in thread
From: Junwei Zhang @ 2018-08-10  4:59 UTC (permalink / raw)
  To: amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW
  Cc: Junwei Zhang, christian.koenig-5C7GfCeVMHo

the flink bo is used to export

Signed-off-by: Junwei Zhang <Jerry.Zhang@amd.com>
---
 amdgpu/amdgpu_bo.c | 6 ------
 1 file changed, 6 deletions(-)

diff --git a/amdgpu/amdgpu_bo.c b/amdgpu/amdgpu_bo.c
index 2947715..0d0c73b 100644
--- a/amdgpu/amdgpu_bo.c
+++ b/amdgpu/amdgpu_bo.c
@@ -219,12 +219,6 @@ static int amdgpu_bo_export_flink(amdgpu_bo_handle bo)
 
 	bo->flink_name = flink.name;
 
-	if (bo->dev->flink_fd != bo->dev->fd) {
-		struct drm_gem_close args = {};
-		args.handle = handle;
-		drmIoctl(bo->dev->flink_fd, DRM_IOCTL_GEM_CLOSE, &args);
-	}
-
 	pthread_mutex_lock(&bo->dev->bo_table_mutex);
 	r = handle_table_insert(&bo->dev->bo_flink_names, bo->flink_name, bo);
 	pthread_mutex_unlock(&bo->dev->bo_table_mutex);
-- 
1.9.1

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply related	[flat|nested] 4+ messages in thread

* RE: [PATCH libdrm 1/2] amdgpu: add a function to create amdgpu bo internally (v3)
       [not found] ` <1533877151-20165-1-git-send-email-Jerry.Zhang-5C7GfCeVMHo@public.gmane.org>
  2018-08-10  4:59   ` [PATCH libdrm 2/2] [RFC] amdgpu: do not free flink bo for flink_fd Junwei Zhang
@ 2018-08-10  5:55   ` Zhang, Jerry
  1 sibling, 0 replies; 4+ messages in thread
From: Zhang, Jerry @ 2018-08-10  5:55 UTC (permalink / raw)
  To: Zhang, Jerry, amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW; +Cc: Koenig, Christian

Please ignore this one, which was send from a temporal branch.

Regards,
Jerry

> -----Original Message-----
> From: amd-gfx <amd-gfx-bounces@lists.freedesktop.org> On Behalf Of Junwei
> Zhang
> Sent: Friday, August 10, 2018 12:59
> To: amd-gfx@lists.freedesktop.org
> Cc: Zhang, Jerry <Jerry.Zhang@amd.com>; Koenig, Christian
> <Christian.Koenig@amd.com>
> Subject: [PATCH libdrm 1/2] amdgpu: add a function to create amdgpu bo
> internally (v3)
> 
> a helper function to create and initialize amdgpu bo
> 
> v2: update error handling: add label and free bo
> v3: update error handling: separate each error label
> 
> Signed-off-by: Junwei Zhang <Jerry.Zhang@amd.com>
> ---
>  amdgpu/amdgpu_bo.c | 195 ++++++++++++++++++++++++++---------------------
> ------
>  1 file changed, 94 insertions(+), 101 deletions(-)
> 
> diff --git a/amdgpu/amdgpu_bo.c b/amdgpu/amdgpu_bo.c index
> b790e9b..2947715 100644
> --- a/amdgpu/amdgpu_bo.c
> +++ b/amdgpu/amdgpu_bo.c
> @@ -48,11 +48,31 @@ static void
> amdgpu_close_kms_handle(amdgpu_device_handle dev,
>  	drmIoctl(dev->fd, DRM_IOCTL_GEM_CLOSE, &args);  }
> 
> +static int amdgpu_bo_create(amdgpu_device_handle dev,
> +			    uint64_t size,
> +			    uint32_t handle,
> +			    amdgpu_bo_handle *buf_handle)
> +{
> +	struct amdgpu_bo *bo;
> +
> +	bo = calloc(1, sizeof(struct amdgpu_bo));
> +	if (!bo)
> +		return -ENOMEM;
> +
> +	atomic_set(&bo->refcount, 1);
> +	bo->dev = dev;
> +	bo->alloc_size = size;
> +	bo->handle = handle;
> +	pthread_mutex_init(&bo->cpu_access_mutex, NULL);
> +
> +	*buf_handle = bo;
> +	return 0;
> +}
> +
>  int amdgpu_bo_alloc(amdgpu_device_handle dev,
>  		    struct amdgpu_bo_alloc_request *alloc_buffer,
>  		    amdgpu_bo_handle *buf_handle)
>  {
> -	struct amdgpu_bo *bo;
>  	union drm_amdgpu_gem_create args;
>  	unsigned heap = alloc_buffer->preferred_heap;
>  	int r = 0;
> @@ -61,14 +81,6 @@ int amdgpu_bo_alloc(amdgpu_device_handle dev,
>  	if (!(heap & (AMDGPU_GEM_DOMAIN_GTT |
> AMDGPU_GEM_DOMAIN_VRAM)))
>  		return -EINVAL;
> 
> -	bo = calloc(1, sizeof(struct amdgpu_bo));
> -	if (!bo)
> -		return -ENOMEM;
> -
> -	atomic_set(&bo->refcount, 1);
> -	bo->dev = dev;
> -	bo->alloc_size = alloc_buffer->alloc_size;
> -
>  	memset(&args, 0, sizeof(args));
>  	args.in.bo_size = alloc_buffer->alloc_size;
>  	args.in.alignment = alloc_buffer->phys_alignment; @@ -80,24 +92,23
> @@ int amdgpu_bo_alloc(amdgpu_device_handle dev,
>  	/* Allocate the buffer with the preferred heap. */
>  	r = drmCommandWriteRead(dev->fd, DRM_AMDGPU_GEM_CREATE,
>  				&args, sizeof(args));
> +	if (r)
> +		goto out;
> +
> +	r = amdgpu_bo_create(dev, alloc_buffer->alloc_size, args.out.handle,
> +			     buf_handle);
>  	if (r) {
> -		free(bo);
> -		return r;
> +		amdgpu_close_kms_handle(dev, args.out.handle);
> +		goto out;
>  	}
> 
> -	bo->handle = args.out.handle;
> -
> -	pthread_mutex_lock(&bo->dev->bo_table_mutex);
> -	r = handle_table_insert(&bo->dev->bo_handles, bo->handle, bo);
> -	pthread_mutex_unlock(&bo->dev->bo_table_mutex);
> -
> -	pthread_mutex_init(&bo->cpu_access_mutex, NULL);
> -
> +	pthread_mutex_lock(&dev->bo_table_mutex);
> +	r = handle_table_insert(&dev->bo_handles, (*buf_handle)->handle,
> +				*buf_handle);
> +	pthread_mutex_unlock(&dev->bo_table_mutex);
>  	if (r)
> -		amdgpu_bo_free(bo);
> -	else
> -		*buf_handle = bo;
> -
> +		amdgpu_bo_free(*buf_handle);
> +out:
>  	return r;
>  }
> 
> @@ -256,7 +267,9 @@ int amdgpu_bo_import(amdgpu_device_handle dev,  {
>  	struct drm_gem_open open_arg = {};
>  	struct amdgpu_bo *bo = NULL;
> -	int r;
> +	uint32_t handle = 0, flink_name = 0;
> +	uint64_t alloc_size = 0;
> +	int r = 0;
>  	int dma_fd;
>  	uint64_t dma_buf_size = 0;
> 
> @@ -266,22 +279,18 @@ int amdgpu_bo_import(amdgpu_device_handle dev,
> 
>  	/* Convert a DMA buf handle to a KMS handle now. */
>  	if (type == amdgpu_bo_handle_type_dma_buf_fd) {
> -		uint32_t handle;
>  		off_t size;
> 
>  		/* Get a KMS handle. */
>  		r = drmPrimeFDToHandle(dev->fd, shared_handle, &handle);
> -		if (r) {
> -			pthread_mutex_unlock(&dev->bo_table_mutex);
> -			return r;
> -		}
> +		if (r)
> +			goto unlock;
> 
>  		/* Query the buffer size. */
>  		size = lseek(shared_handle, 0, SEEK_END);
>  		if (size == (off_t)-1) {
> -			pthread_mutex_unlock(&dev->bo_table_mutex);
> -			amdgpu_close_kms_handle(dev, handle);
> -			return -errno;
> +			r = -errno;
> +			goto close_handle;
>  		}
>  		lseek(shared_handle, 0, SEEK_SET);
> 
> @@ -302,12 +311,12 @@ int amdgpu_bo_import(amdgpu_device_handle dev,
>  	case amdgpu_bo_handle_type_kms:
>  	case amdgpu_bo_handle_type_kms_noimport:
>  		/* Importing a KMS handle in not allowed. */
> -		pthread_mutex_unlock(&dev->bo_table_mutex);
> -		return -EPERM;
> +		r = -EPERM;
> +		goto unlock;
> 
>  	default:
> -		pthread_mutex_unlock(&dev->bo_table_mutex);
> -		return -EINVAL;
> +		r = -EINVAL;
> +		goto unlock;
>  	}
> 
>  	if (bo) {
> @@ -320,58 +329,32 @@ int amdgpu_bo_import(amdgpu_device_handle dev,
>  		return 0;
>  	}
> 
> -	bo = calloc(1, sizeof(struct amdgpu_bo));
> -	if (!bo) {
> -		pthread_mutex_unlock(&dev->bo_table_mutex);
> -		if (type == amdgpu_bo_handle_type_dma_buf_fd) {
> -			amdgpu_close_kms_handle(dev, shared_handle);
> -		}
> -		return -ENOMEM;
> -	}
> -
>  	/* Open the handle. */
>  	switch (type) {
>  	case amdgpu_bo_handle_type_gem_flink_name:
>  		open_arg.name = shared_handle;
>  		r = drmIoctl(dev->flink_fd, DRM_IOCTL_GEM_OPEN, &open_arg);
> -		if (r) {
> -			free(bo);
> -			pthread_mutex_unlock(&dev->bo_table_mutex);
> -			return r;
> -		}
> +		if (r)
> +			goto unlock;
> 
> -		bo->handle = open_arg.handle;
> +		flink_name = shared_handle;
> +		handle = open_arg.handle;
> +		alloc_size = open_arg.size;
>  		if (dev->flink_fd != dev->fd) {
> -			r = drmPrimeHandleToFD(dev->flink_fd, bo->handle,
> DRM_CLOEXEC, &dma_fd);
> -			if (r) {
> -				free(bo);
> -				pthread_mutex_unlock(&dev->bo_table_mutex);
> -				return r;
> -			}
> -			r = drmPrimeFDToHandle(dev->fd, dma_fd, &bo-
> >handle );
> -
> +			r = drmPrimeHandleToFD(dev->flink_fd, handle,
> +					       DRM_CLOEXEC, &dma_fd);
> +			if (r)
> +				goto close_handle;
> +			r = drmPrimeFDToHandle(dev->fd, dma_fd, &handle);
>  			close(dma_fd);
> -
> -			if (r) {
> -				free(bo);
> -				pthread_mutex_unlock(&dev->bo_table_mutex);
> -				return r;
> -			}
> -		}
> -		bo->flink_name = shared_handle;
> -		bo->alloc_size = open_arg.size;
> -		r = handle_table_insert(&dev->bo_flink_names, shared_handle,
> -					bo);
> -		if (r) {
> -			pthread_mutex_unlock(&dev->bo_table_mutex);
> -			amdgpu_bo_free(bo);
> -			return r;
> +			if (r)
> +				goto close_handle;
>  		}
>  		break;
> 
>  	case amdgpu_bo_handle_type_dma_buf_fd:
> -		bo->handle = shared_handle;
> -		bo->alloc_size = dma_buf_size;
> +		handle = shared_handle;
> +		alloc_size = dma_buf_size;
>  		break;
> 
>  	case amdgpu_bo_handle_type_kms:
> @@ -380,16 +363,34 @@ int amdgpu_bo_import(amdgpu_device_handle dev,
>  	}
> 
>  	/* Initialize it. */
> -	atomic_set(&bo->refcount, 1);
> -	bo->dev = dev;
> -	pthread_mutex_init(&bo->cpu_access_mutex, NULL);
> +	r = amdgpu_bo_create(dev, alloc_size, handle, &bo);
> +	if (r)
> +		goto close_handle;
> 
> -	handle_table_insert(&dev->bo_handles, bo->handle, bo);
> -	pthread_mutex_unlock(&dev->bo_table_mutex);
> +	r = handle_table_insert(&dev->bo_handles, bo->handle, bo);
> +	if (r)
> +		goto bo_free;
> +	if (flink_name) {
> +		r = handle_table_insert(&dev->bo_flink_names, flink_name,
> +					bo);
> +		if (r)
> +			goto remove_handle;
> +	}
> 
>  	output->buf_handle = bo;
>  	output->alloc_size = bo->alloc_size;
> +	pthread_mutex_unlock(&dev->bo_table_mutex);
>  	return 0;
> +
> +remove_handle:
> +	handle_table_remove(&dev->bo_handles, bo->handle);
> +bo_free:
> +	amdgpu_bo_free(bo);
> +close_handle:
> +	amdgpu_close_kms_handle(dev, handle);
> +unlock:
> +	pthread_mutex_unlock(&dev->bo_table_mutex);
> +	return r;
>  }
> 
>  int amdgpu_bo_free(amdgpu_bo_handle buf_handle) @@ -574,7 +575,6 @@
> int amdgpu_create_bo_from_user_mem(amdgpu_device_handle dev,
>  				    amdgpu_bo_handle *buf_handle)
>  {
>  	int r;
> -	struct amdgpu_bo *bo;
>  	struct drm_amdgpu_gem_userptr args;
> 
>  	args.addr = (uintptr_t)cpu;
> @@ -584,28 +584,21 @@ int
> amdgpu_create_bo_from_user_mem(amdgpu_device_handle dev,
>  	r = drmCommandWriteRead(dev->fd, DRM_AMDGPU_GEM_USERPTR,
>  				&args, sizeof(args));
>  	if (r)
> -		return r;
> -
> -	bo = calloc(1, sizeof(struct amdgpu_bo));
> -	if (!bo)
> -		return -ENOMEM;
> +		goto out;
> 
> -	atomic_set(&bo->refcount, 1);
> -	bo->dev = dev;
> -	bo->alloc_size = size;
> -	bo->handle = args.handle;
> -
> -	pthread_mutex_lock(&bo->dev->bo_table_mutex);
> -	r = handle_table_insert(&bo->dev->bo_handles, bo->handle, bo);
> -	pthread_mutex_unlock(&bo->dev->bo_table_mutex);
> -
> -	pthread_mutex_init(&bo->cpu_access_mutex, NULL);
> +	r = amdgpu_bo_create(dev, size, args.handle, buf_handle);
> +	if (r) {
> +		amdgpu_close_kms_handle(dev, args.handle);
> +		goto out;
> +	}
> 
> +	pthread_mutex_lock(&dev->bo_table_mutex);
> +	r = handle_table_insert(&dev->bo_handles, (*buf_handle)->handle,
> +				*buf_handle);
> +	pthread_mutex_unlock(&dev->bo_table_mutex);
>  	if (r)
> -		amdgpu_bo_free(bo);
> -	else
> -		*buf_handle = bo;
> -
> +		amdgpu_bo_free(*buf_handle);
> +out:
>  	return r;
>  }
> 
> --
> 1.9.1
> 
> _______________________________________________
> amd-gfx mailing list
> amd-gfx@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/amd-gfx
_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply	[flat|nested] 4+ messages in thread

* [PATCH libdrm 1/2] amdgpu: add a function to create amdgpu bo internally (v3)
@ 2018-08-10  5:05 Junwei Zhang
  0 siblings, 0 replies; 4+ messages in thread
From: Junwei Zhang @ 2018-08-10  5:05 UTC (permalink / raw)
  To: amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW
  Cc: Junwei Zhang, christian.koenig-5C7GfCeVMHo

a helper function to create and initialize amdgpu bo

v2: update error handling: add label and free bo
v3: update error handling: separate each error label

Signed-off-by: Junwei Zhang <Jerry.Zhang@amd.com>
---
 amdgpu/amdgpu_bo.c | 196 ++++++++++++++++++++++++++---------------------------
 1 file changed, 95 insertions(+), 101 deletions(-)

diff --git a/amdgpu/amdgpu_bo.c b/amdgpu/amdgpu_bo.c
index b790e9b..6f0baf1 100644
--- a/amdgpu/amdgpu_bo.c
+++ b/amdgpu/amdgpu_bo.c
@@ -48,11 +48,31 @@ static void amdgpu_close_kms_handle(amdgpu_device_handle dev,
 	drmIoctl(dev->fd, DRM_IOCTL_GEM_CLOSE, &args);
 }
 
+static int amdgpu_bo_create(amdgpu_device_handle dev,
+			    uint64_t size,
+			    uint32_t handle,
+			    amdgpu_bo_handle *buf_handle)
+{
+	struct amdgpu_bo *bo;
+
+	bo = calloc(1, sizeof(struct amdgpu_bo));
+	if (!bo)
+		return -ENOMEM;
+
+	atomic_set(&bo->refcount, 1);
+	bo->dev = dev;
+	bo->alloc_size = size;
+	bo->handle = handle;
+	pthread_mutex_init(&bo->cpu_access_mutex, NULL);
+
+	*buf_handle = bo;
+	return 0;
+}
+
 int amdgpu_bo_alloc(amdgpu_device_handle dev,
 		    struct amdgpu_bo_alloc_request *alloc_buffer,
 		    amdgpu_bo_handle *buf_handle)
 {
-	struct amdgpu_bo *bo;
 	union drm_amdgpu_gem_create args;
 	unsigned heap = alloc_buffer->preferred_heap;
 	int r = 0;
@@ -61,14 +81,6 @@ int amdgpu_bo_alloc(amdgpu_device_handle dev,
 	if (!(heap & (AMDGPU_GEM_DOMAIN_GTT | AMDGPU_GEM_DOMAIN_VRAM)))
 		return -EINVAL;
 
-	bo = calloc(1, sizeof(struct amdgpu_bo));
-	if (!bo)
-		return -ENOMEM;
-
-	atomic_set(&bo->refcount, 1);
-	bo->dev = dev;
-	bo->alloc_size = alloc_buffer->alloc_size;
-
 	memset(&args, 0, sizeof(args));
 	args.in.bo_size = alloc_buffer->alloc_size;
 	args.in.alignment = alloc_buffer->phys_alignment;
@@ -80,24 +92,23 @@ int amdgpu_bo_alloc(amdgpu_device_handle dev,
 	/* Allocate the buffer with the preferred heap. */
 	r = drmCommandWriteRead(dev->fd, DRM_AMDGPU_GEM_CREATE,
 				&args, sizeof(args));
+	if (r)
+		goto out;
+
+	r = amdgpu_bo_create(dev, alloc_buffer->alloc_size, args.out.handle,
+			     buf_handle);
 	if (r) {
-		free(bo);
-		return r;
+		amdgpu_close_kms_handle(dev, args.out.handle);
+		goto out;
 	}
 
-	bo->handle = args.out.handle;
-
-	pthread_mutex_lock(&bo->dev->bo_table_mutex);
-	r = handle_table_insert(&bo->dev->bo_handles, bo->handle, bo);
-	pthread_mutex_unlock(&bo->dev->bo_table_mutex);
-
-	pthread_mutex_init(&bo->cpu_access_mutex, NULL);
-
+	pthread_mutex_lock(&dev->bo_table_mutex);
+	r = handle_table_insert(&dev->bo_handles, (*buf_handle)->handle,
+				*buf_handle);
+	pthread_mutex_unlock(&dev->bo_table_mutex);
 	if (r)
-		amdgpu_bo_free(bo);
-	else
-		*buf_handle = bo;
-
+		amdgpu_bo_free(*buf_handle);
+out:
 	return r;
 }
 
@@ -256,7 +267,9 @@ int amdgpu_bo_import(amdgpu_device_handle dev,
 {
 	struct drm_gem_open open_arg = {};
 	struct amdgpu_bo *bo = NULL;
-	int r;
+	uint32_t handle = 0, flink_name = 0;
+	uint64_t alloc_size = 0;
+	int r = 0;
 	int dma_fd;
 	uint64_t dma_buf_size = 0;
 
@@ -266,22 +279,18 @@ int amdgpu_bo_import(amdgpu_device_handle dev,
 
 	/* Convert a DMA buf handle to a KMS handle now. */
 	if (type == amdgpu_bo_handle_type_dma_buf_fd) {
-		uint32_t handle;
 		off_t size;
 
 		/* Get a KMS handle. */
 		r = drmPrimeFDToHandle(dev->fd, shared_handle, &handle);
-		if (r) {
-			pthread_mutex_unlock(&dev->bo_table_mutex);
-			return r;
-		}
+		if (r)
+			goto unlock;
 
 		/* Query the buffer size. */
 		size = lseek(shared_handle, 0, SEEK_END);
 		if (size == (off_t)-1) {
-			pthread_mutex_unlock(&dev->bo_table_mutex);
-			amdgpu_close_kms_handle(dev, handle);
-			return -errno;
+			r = -errno;
+			goto close_handle;
 		}
 		lseek(shared_handle, 0, SEEK_SET);
 
@@ -302,12 +311,12 @@ int amdgpu_bo_import(amdgpu_device_handle dev,
 	case amdgpu_bo_handle_type_kms:
 	case amdgpu_bo_handle_type_kms_noimport:
 		/* Importing a KMS handle in not allowed. */
-		pthread_mutex_unlock(&dev->bo_table_mutex);
-		return -EPERM;
+		r = -EPERM;
+		goto unlock;
 
 	default:
-		pthread_mutex_unlock(&dev->bo_table_mutex);
-		return -EINVAL;
+		r = -EINVAL;
+		goto unlock;
 	}
 
 	if (bo) {
@@ -320,58 +329,32 @@ int amdgpu_bo_import(amdgpu_device_handle dev,
 		return 0;
 	}
 
-	bo = calloc(1, sizeof(struct amdgpu_bo));
-	if (!bo) {
-		pthread_mutex_unlock(&dev->bo_table_mutex);
-		if (type == amdgpu_bo_handle_type_dma_buf_fd) {
-			amdgpu_close_kms_handle(dev, shared_handle);
-		}
-		return -ENOMEM;
-	}
-
 	/* Open the handle. */
 	switch (type) {
 	case amdgpu_bo_handle_type_gem_flink_name:
 		open_arg.name = shared_handle;
 		r = drmIoctl(dev->flink_fd, DRM_IOCTL_GEM_OPEN, &open_arg);
-		if (r) {
-			free(bo);
-			pthread_mutex_unlock(&dev->bo_table_mutex);
-			return r;
-		}
+		if (r)
+			goto unlock;
 
-		bo->handle = open_arg.handle;
+		flink_name = shared_handle;
+		handle = open_arg.handle;
+		alloc_size = open_arg.size;
 		if (dev->flink_fd != dev->fd) {
-			r = drmPrimeHandleToFD(dev->flink_fd, bo->handle, DRM_CLOEXEC, &dma_fd);
-			if (r) {
-				free(bo);
-				pthread_mutex_unlock(&dev->bo_table_mutex);
-				return r;
-			}
-			r = drmPrimeFDToHandle(dev->fd, dma_fd, &bo->handle );
-
+			r = drmPrimeHandleToFD(dev->flink_fd, handle,
+					       DRM_CLOEXEC, &dma_fd);
+			if (r)
+				goto close_handle;
+			r = drmPrimeFDToHandle(dev->fd, dma_fd, &handle);
 			close(dma_fd);
-
-			if (r) {
-				free(bo);
-				pthread_mutex_unlock(&dev->bo_table_mutex);
-				return r;
-			}
-		}
-		bo->flink_name = shared_handle;
-		bo->alloc_size = open_arg.size;
-		r = handle_table_insert(&dev->bo_flink_names, shared_handle,
-					bo);
-		if (r) {
-			pthread_mutex_unlock(&dev->bo_table_mutex);
-			amdgpu_bo_free(bo);
-			return r;
+			if (r)
+				goto close_handle;
 		}
 		break;
 
 	case amdgpu_bo_handle_type_dma_buf_fd:
-		bo->handle = shared_handle;
-		bo->alloc_size = dma_buf_size;
+		handle = shared_handle;
+		alloc_size = dma_buf_size;
 		break;
 
 	case amdgpu_bo_handle_type_kms:
@@ -380,16 +363,35 @@ int amdgpu_bo_import(amdgpu_device_handle dev,
 	}
 
 	/* Initialize it. */
-	atomic_set(&bo->refcount, 1);
-	bo->dev = dev;
-	pthread_mutex_init(&bo->cpu_access_mutex, NULL);
+	r = amdgpu_bo_create(dev, alloc_size, handle, &bo);
+	if (r)
+		goto close_handle;
 
-	handle_table_insert(&dev->bo_handles, bo->handle, bo);
-	pthread_mutex_unlock(&dev->bo_table_mutex);
+	r = handle_table_insert(&dev->bo_handles, bo->handle, bo);
+	if (r)
+		goto bo_free;
+	if (flink_name) {
+		bo->flink_name = flink_name;
+		r = handle_table_insert(&dev->bo_flink_names, flink_name,
+					bo);
+		if (r)
+			goto remove_handle;
+	}
 
 	output->buf_handle = bo;
 	output->alloc_size = bo->alloc_size;
+	pthread_mutex_unlock(&dev->bo_table_mutex);
 	return 0;
+
+remove_handle:
+	handle_table_remove(&dev->bo_handles, bo->handle);
+bo_free:
+	amdgpu_bo_free(bo);
+close_handle:
+	amdgpu_close_kms_handle(dev, handle);
+unlock:
+	pthread_mutex_unlock(&dev->bo_table_mutex);
+	return r;
 }
 
 int amdgpu_bo_free(amdgpu_bo_handle buf_handle)
@@ -574,7 +576,6 @@ int amdgpu_create_bo_from_user_mem(amdgpu_device_handle dev,
 				    amdgpu_bo_handle *buf_handle)
 {
 	int r;
-	struct amdgpu_bo *bo;
 	struct drm_amdgpu_gem_userptr args;
 
 	args.addr = (uintptr_t)cpu;
@@ -584,28 +585,21 @@ int amdgpu_create_bo_from_user_mem(amdgpu_device_handle dev,
 	r = drmCommandWriteRead(dev->fd, DRM_AMDGPU_GEM_USERPTR,
 				&args, sizeof(args));
 	if (r)
-		return r;
-
-	bo = calloc(1, sizeof(struct amdgpu_bo));
-	if (!bo)
-		return -ENOMEM;
+		goto out;
 
-	atomic_set(&bo->refcount, 1);
-	bo->dev = dev;
-	bo->alloc_size = size;
-	bo->handle = args.handle;
-
-	pthread_mutex_lock(&bo->dev->bo_table_mutex);
-	r = handle_table_insert(&bo->dev->bo_handles, bo->handle, bo);
-	pthread_mutex_unlock(&bo->dev->bo_table_mutex);
-
-	pthread_mutex_init(&bo->cpu_access_mutex, NULL);
+	r = amdgpu_bo_create(dev, size, args.handle, buf_handle);
+	if (r) {
+		amdgpu_close_kms_handle(dev, args.handle);
+		goto out;
+	}
 
+	pthread_mutex_lock(&dev->bo_table_mutex);
+	r = handle_table_insert(&dev->bo_handles, (*buf_handle)->handle,
+				*buf_handle);
+	pthread_mutex_unlock(&dev->bo_table_mutex);
 	if (r)
-		amdgpu_bo_free(bo);
-	else
-		*buf_handle = bo;
-
+		amdgpu_bo_free(*buf_handle);
+out:
 	return r;
 }
 
-- 
1.9.1

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply related	[flat|nested] 4+ messages in thread

end of thread, other threads:[~2018-08-10  5:55 UTC | newest]

Thread overview: 4+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2018-08-10  4:59 [PATCH libdrm 1/2] amdgpu: add a function to create amdgpu bo internally (v3) Junwei Zhang
     [not found] ` <1533877151-20165-1-git-send-email-Jerry.Zhang-5C7GfCeVMHo@public.gmane.org>
2018-08-10  4:59   ` [PATCH libdrm 2/2] [RFC] amdgpu: do not free flink bo for flink_fd Junwei Zhang
2018-08-10  5:55   ` [PATCH libdrm 1/2] amdgpu: add a function to create amdgpu bo internally (v3) Zhang, Jerry
2018-08-10  5:05 Junwei Zhang

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.