All of lore.kernel.org
 help / color / mirror / Atom feed
* Shareable bufmgr objects v2
@ 2014-09-11 15:36 Lionel Landwerlin
  2014-09-11 15:36 ` [PATCH] intel: make bufmgr_gem shareable from different API Lionel Landwerlin
  0 siblings, 1 reply; 8+ messages in thread
From: Lionel Landwerlin @ 2014-09-11 15:36 UTC (permalink / raw)
  To: dri-devel

Following Chris' review, here is an updated patch using drmMMListHead.

I did a quick read of the benchmarks/tests files in igt, as far as I
can see, drm_intel_bufmgr_destroy() is always called before the drm
file descriptor is closed. So it seems this change shouldn't break
anything.

Cheers,

-
Lionel

^ permalink raw reply	[flat|nested] 8+ messages in thread

* [PATCH] intel: make bufmgr_gem shareable from different API
  2014-09-11 15:36 Shareable bufmgr objects v2 Lionel Landwerlin
@ 2014-09-11 15:36 ` Lionel Landwerlin
  2014-09-11 15:53   ` Chris Wilson
  0 siblings, 1 reply; 8+ messages in thread
From: Lionel Landwerlin @ 2014-09-11 15:36 UTC (permalink / raw)
  To: dri-devel

When using Mesa and LibVA in the same process, one would like to be
able bind buffers from the output of the decoder to a GL texture
through an EGLImage.

LibVA can reuse buffers allocated by Gbm through a file descriptor. It
will then wrap it into a drm_intel_bo with
drm_intel_bo_gem_create_from_prime().

The problem at the moment is that both library get a different
drm_intel_bufmgr object when they call drm_intel_bufmgr_gem_init()
even though they're using the same drm file descriptor. As a result,
instead of manipulating the same buffer object for a given file
descriptor, they get 2 different drm_intel_bo objects and 2 different
refcounts, leading one of the library to get errors from the kernel on
invalid BO when one of the 2 library is done with a shared buffer.

This patch modifies drm_intel_bufmgr_gem_init() so, given a file
descriptor, it will look for an already existing drm_intel_bufmgr
using the same file descriptor and return that object.

Signed-off-by: Lionel Landwerlin <lionel.g.landwerlin@intel.com>
---
 intel/intel_bufmgr_gem.c | 82 +++++++++++++++++++++++++++++++++++++++++-------
 1 file changed, 70 insertions(+), 12 deletions(-)

diff --git a/intel/intel_bufmgr_gem.c b/intel/intel_bufmgr_gem.c
index 0e1cb0d..ce43bc6 100644
--- a/intel/intel_bufmgr_gem.c
+++ b/intel/intel_bufmgr_gem.c
@@ -94,6 +94,8 @@ struct drm_intel_gem_bo_bucket {
 typedef struct _drm_intel_bufmgr_gem {
 	drm_intel_bufmgr bufmgr;
 
+	atomic_t refcount;
+
 	int fd;
 
 	int max_relocs;
@@ -111,6 +113,8 @@ typedef struct _drm_intel_bufmgr_gem {
 	int num_buckets;
 	time_t time;
 
+	drmMMListHead managers;
+
 	drmMMListHead named;
 	drmMMListHead vma_cache;
 	int vma_count, vma_open, vma_max;
@@ -3186,6 +3190,65 @@ drm_intel_bufmgr_gem_set_aub_annotations(drm_intel_bo *bo,
 	bo_gem->aub_annotation_count = count;
 }
 
+static pthread_mutex_t bufmgr_list_mutex = PTHREAD_MUTEX_INITIALIZER;
+static drmMMListHead bufmgr_list = { NULL, NULL };
+
+static drm_intel_bufmgr_gem *
+drm_intel_bufmgr_gem_find_or_create_for_fd(int fd, int *found)
+{
+	drm_intel_bufmgr_gem *bufmgr_gem;
+
+	assert(pthread_mutex_lock(&bufmgr_list_mutex) == 0);
+
+	if (bufmgr_list.next == NULL) {
+		DRMINITLISTHEAD(&bufmgr_list);
+	} else {
+		DRMLISTFOREACHENTRY(bufmgr_gem, &bufmgr_list, managers) {
+			if (bufmgr_gem->fd == fd) {
+				atomic_inc(&bufmgr_gem->refcount);
+				*found = 1;
+				goto exit;
+			}
+		}
+	}
+
+	bufmgr_gem = calloc(1, sizeof(*bufmgr_gem));
+	if (bufmgr_gem == NULL)
+		goto exit;
+
+	bufmgr_gem->fd = fd;
+	atomic_set(&bufmgr_gem->refcount, 1);
+
+	DRMLISTADD(&bufmgr_gem->managers, &bufmgr_list);
+
+	assert(pthread_mutex_init(&bufmgr_gem->lock, NULL) == 0);
+
+	pthread_mutex_lock(&bufmgr_gem->lock);
+
+	*found = 0;
+
+exit:
+	pthread_mutex_unlock(&bufmgr_list_mutex);
+
+	return bufmgr_gem;
+}
+
+static void
+drm_intel_bufmgr_gem_unref (drm_intel_bufmgr *bufmgr)
+{
+	drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
+
+	if (atomic_dec_and_test(&bufmgr_gem->refcount)) {
+		assert(pthread_mutex_lock(&bufmgr_list_mutex) == 0);
+
+		DRMLISTDEL(&bufmgr_gem->managers);
+
+		pthread_mutex_unlock(&bufmgr_list_mutex);
+
+		drm_intel_bufmgr_gem_destroy(bufmgr);
+	}
+}
+
 /**
  * Initializes the GEM buffer manager, which uses the kernel to allocate, map,
  * and manage map buffer objections.
@@ -3201,16 +3264,9 @@ drm_intel_bufmgr_gem_init(int fd, int batch_size)
 	int ret, tmp;
 	bool exec2 = false;
 
-	bufmgr_gem = calloc(1, sizeof(*bufmgr_gem));
-	if (bufmgr_gem == NULL)
-		return NULL;
-
-	bufmgr_gem->fd = fd;
-
-	if (pthread_mutex_init(&bufmgr_gem->lock, NULL) != 0) {
-		free(bufmgr_gem);
-		return NULL;
-	}
+	bufmgr_gem = drm_intel_bufmgr_gem_find_or_create_for_fd(fd, &ret);
+	if (bufmgr_gem && ret)
+		return &bufmgr_gem->bufmgr;
 
 	ret = drmIoctl(bufmgr_gem->fd,
 		       DRM_IOCTL_I915_GEM_GET_APERTURE,
@@ -3245,7 +3301,7 @@ drm_intel_bufmgr_gem_init(int fd, int batch_size)
 	else if (IS_GEN8(bufmgr_gem->pci_device))
 		bufmgr_gem->gen = 8;
 	else {
-		free(bufmgr_gem);
+		drm_intel_bufmgr_gem_unref(&bufmgr_gem->bufmgr);
 		return NULL;
 	}
 
@@ -3357,7 +3413,7 @@ drm_intel_bufmgr_gem_init(int fd, int batch_size)
 		bufmgr_gem->bufmgr.bo_exec = drm_intel_gem_bo_exec;
 	bufmgr_gem->bufmgr.bo_busy = drm_intel_gem_bo_busy;
 	bufmgr_gem->bufmgr.bo_madvise = drm_intel_gem_bo_madvise;
-	bufmgr_gem->bufmgr.destroy = drm_intel_bufmgr_gem_destroy;
+	bufmgr_gem->bufmgr.destroy = drm_intel_bufmgr_gem_unref;
 	bufmgr_gem->bufmgr.debug = 0;
 	bufmgr_gem->bufmgr.check_aperture_space =
 	    drm_intel_gem_check_aperture_space;
@@ -3373,5 +3429,7 @@ drm_intel_bufmgr_gem_init(int fd, int batch_size)
 	DRMINITLISTHEAD(&bufmgr_gem->vma_cache);
 	bufmgr_gem->vma_max = -1; /* unlimited by default */
 
+	pthread_mutex_unlock(&bufmgr_gem->lock);
+
 	return &bufmgr_gem->bufmgr;
 }
-- 
2.0.1

^ permalink raw reply related	[flat|nested] 8+ messages in thread

* Re: [PATCH] intel: make bufmgr_gem shareable from different API
  2014-09-11 15:36 ` [PATCH] intel: make bufmgr_gem shareable from different API Lionel Landwerlin
@ 2014-09-11 15:53   ` Chris Wilson
  2014-09-11 16:59     ` Lionel Landwerlin
  0 siblings, 1 reply; 8+ messages in thread
From: Chris Wilson @ 2014-09-11 15:53 UTC (permalink / raw)
  To: Lionel Landwerlin; +Cc: dri-devel

On Thu, Sep 11, 2014 at 04:36:20PM +0100, Lionel Landwerlin wrote:
> When using Mesa and LibVA in the same process, one would like to be
> able bind buffers from the output of the decoder to a GL texture
> through an EGLImage.
> 
> LibVA can reuse buffers allocated by Gbm through a file descriptor. It
> will then wrap it into a drm_intel_bo with
> drm_intel_bo_gem_create_from_prime().
> 
> The problem at the moment is that both library get a different
> drm_intel_bufmgr object when they call drm_intel_bufmgr_gem_init()
> even though they're using the same drm file descriptor. As a result,
> instead of manipulating the same buffer object for a given file
> descriptor, they get 2 different drm_intel_bo objects and 2 different
> refcounts, leading one of the library to get errors from the kernel on
> invalid BO when one of the 2 library is done with a shared buffer.
> 
> This patch modifies drm_intel_bufmgr_gem_init() so, given a file
> descriptor, it will look for an already existing drm_intel_bufmgr
> using the same file descriptor and return that object.
> 
> Signed-off-by: Lionel Landwerlin <lionel.g.landwerlin@intel.com>
> ---
>  intel/intel_bufmgr_gem.c | 82 +++++++++++++++++++++++++++++++++++++++++-------
>  1 file changed, 70 insertions(+), 12 deletions(-)
> 
> diff --git a/intel/intel_bufmgr_gem.c b/intel/intel_bufmgr_gem.c
> index 0e1cb0d..ce43bc6 100644
> --- a/intel/intel_bufmgr_gem.c
> +++ b/intel/intel_bufmgr_gem.c
> @@ -94,6 +94,8 @@ struct drm_intel_gem_bo_bucket {
>  typedef struct _drm_intel_bufmgr_gem {
>  	drm_intel_bufmgr bufmgr;
>  
> +	atomic_t refcount;
> +
>  	int fd;
>  
>  	int max_relocs;
> @@ -111,6 +113,8 @@ typedef struct _drm_intel_bufmgr_gem {
>  	int num_buckets;
>  	time_t time;
>  
> +	drmMMListHead managers;
> +
>  	drmMMListHead named;
>  	drmMMListHead vma_cache;
>  	int vma_count, vma_open, vma_max;
> @@ -3186,6 +3190,65 @@ drm_intel_bufmgr_gem_set_aub_annotations(drm_intel_bo *bo,
>  	bo_gem->aub_annotation_count = count;
>  }
>  
> +static pthread_mutex_t bufmgr_list_mutex = PTHREAD_MUTEX_INITIALIZER;
> +static drmMMListHead bufmgr_list = { NULL, NULL };

We don't have a static initialializer? Oh well,
static drmMMListHead bufmgr_list = { &bufmgr_list, &bufmgr_list };

> +static drm_intel_bufmgr_gem *
> +drm_intel_bufmgr_gem_find_or_create_for_fd(int fd, int *found)
> +{
> +	drm_intel_bufmgr_gem *bufmgr_gem;
> +
> +	assert(pthread_mutex_lock(&bufmgr_list_mutex) == 0);
> +
> +	if (bufmgr_list.next == NULL) {
> +		DRMINITLISTHEAD(&bufmgr_list);

Not needed with the static initializer above.

> +	} else {
> +		DRMLISTFOREACHENTRY(bufmgr_gem, &bufmgr_list, managers) {
> +			if (bufmgr_gem->fd == fd) {
> +				atomic_inc(&bufmgr_gem->refcount);
> +				*found = 1;
> +				goto exit;
> +			}
> +		}
> +	}
> +
> +	bufmgr_gem = calloc(1, sizeof(*bufmgr_gem));
> +	if (bufmgr_gem == NULL)
> +		goto exit;
> +
> +	bufmgr_gem->fd = fd;
> +	atomic_set(&bufmgr_gem->refcount, 1);
> +
> +	DRMLISTADD(&bufmgr_gem->managers, &bufmgr_list);
> +
> +	assert(pthread_mutex_init(&bufmgr_gem->lock, NULL) == 0);
> +
> +	pthread_mutex_lock(&bufmgr_gem->lock);

There is an issue with dropping the lock here. A second thread may try
to use the uninitialised bufmgr and crash. We need to hold the lock
until we have finished initialising the bufmgr. So this function can
just be reduced to a list search called with the lock held.

> +
> +	*found = 0;
> +
> +exit:
> +	pthread_mutex_unlock(&bufmgr_list_mutex);
> +
> +	return bufmgr_gem;
> +}
> +
> +static void
> +drm_intel_bufmgr_gem_unref (drm_intel_bufmgr *bufmgr)
> +{
> +	drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
> +
> +	if (atomic_dec_and_test(&bufmgr_gem->refcount)) {
> +		assert(pthread_mutex_lock(&bufmgr_list_mutex) == 0);

You need to recheck the reference count after grabbing the lock.

> +
> +		DRMLISTDEL(&bufmgr_gem->managers);
> +
> +		pthread_mutex_unlock(&bufmgr_list_mutex);
> +
> +		drm_intel_bufmgr_gem_destroy(bufmgr);
> +	}
> +}

-- 
Chris Wilson, Intel Open Source Technology Centre

^ permalink raw reply	[flat|nested] 8+ messages in thread

* [PATCH] intel: make bufmgr_gem shareable from different API
  2014-09-11 15:53   ` Chris Wilson
@ 2014-09-11 16:59     ` Lionel Landwerlin
  2014-09-12  6:13       ` Chris Wilson
  0 siblings, 1 reply; 8+ messages in thread
From: Lionel Landwerlin @ 2014-09-11 16:59 UTC (permalink / raw)
  To: dri-devel

When using Mesa and LibVA in the same process, one would like to be
able bind buffers from the output of the decoder to a GL texture
through an EGLImage.

LibVA can reuse buffers allocated by Gbm through a file descriptor. It
will then wrap it into a drm_intel_bo with
drm_intel_bo_gem_create_from_prime().

The problem at the moment is that both library get a different
drm_intel_bufmgr object when they call drm_intel_bufmgr_gem_init()
even though they're using the same drm file descriptor. As a result,
instead of manipulating the same buffer object for a given file
descriptor, they get 2 different drm_intel_bo objects and 2 different
refcounts, leading one of the library to get errors from the kernel on
invalid BO when one of the 2 library is done with a shared buffer.

This patch modifies drm_intel_bufmgr_gem_init() so, given a file
descriptor, it will look for an already existing drm_intel_bufmgr
using the same file descriptor and return that object.

Signed-off-by: Lionel Landwerlin <lionel.g.landwerlin@intel.com>
---
 intel/intel_bufmgr_gem.c | 60 ++++++++++++++++++++++++++++++++++++++++++++----
 1 file changed, 55 insertions(+), 5 deletions(-)

diff --git a/intel/intel_bufmgr_gem.c b/intel/intel_bufmgr_gem.c
index 0e1cb0d..0a2a62b 100644
--- a/intel/intel_bufmgr_gem.c
+++ b/intel/intel_bufmgr_gem.c
@@ -94,6 +94,8 @@ struct drm_intel_gem_bo_bucket {
 typedef struct _drm_intel_bufmgr_gem {
 	drm_intel_bufmgr bufmgr;
 
+	atomic_t refcount;
+
 	int fd;
 
 	int max_relocs;
@@ -111,6 +113,8 @@ typedef struct _drm_intel_bufmgr_gem {
 	int num_buckets;
 	time_t time;
 
+	drmMMListHead managers;
+
 	drmMMListHead named;
 	drmMMListHead vma_cache;
 	int vma_count, vma_open, vma_max;
@@ -3186,6 +3190,40 @@ drm_intel_bufmgr_gem_set_aub_annotations(drm_intel_bo *bo,
 	bo_gem->aub_annotation_count = count;
 }
 
+static pthread_mutex_t bufmgr_list_mutex = PTHREAD_MUTEX_INITIALIZER;
+static drmMMListHead bufmgr_list = { &bufmgr_list, &bufmgr_list };
+
+static drm_intel_bufmgr_gem *
+drm_intel_bufmgr_gem_find(int fd)
+{
+	drm_intel_bufmgr_gem *bufmgr_gem;
+
+	DRMLISTFOREACHENTRY(bufmgr_gem, &bufmgr_list, managers) {
+		if (bufmgr_gem->fd == fd) {
+			atomic_inc(&bufmgr_gem->refcount);
+			return bufmgr_gem;
+		}
+	}
+
+	return NULL;
+}
+
+static void
+drm_intel_bufmgr_gem_unref(drm_intel_bufmgr *bufmgr)
+{
+	drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
+
+	if (atomic_dec_and_test(&bufmgr_gem->refcount)) {
+		assert(pthread_mutex_lock(&bufmgr_list_mutex) == 0);
+
+		DRMLISTDEL(&bufmgr_gem->managers);
+
+		pthread_mutex_unlock(&bufmgr_list_mutex);
+
+		drm_intel_bufmgr_gem_destroy(bufmgr);
+	}
+}
+
 /**
  * Initializes the GEM buffer manager, which uses the kernel to allocate, map,
  * and manage map buffer objections.
@@ -3201,15 +3239,21 @@ drm_intel_bufmgr_gem_init(int fd, int batch_size)
 	int ret, tmp;
 	bool exec2 = false;
 
+	bufmgr_gem = drm_intel_bufmgr_gem_find(fd);
+	if (bufmgr_gem)
+		goto exit;
+
 	bufmgr_gem = calloc(1, sizeof(*bufmgr_gem));
 	if (bufmgr_gem == NULL)
-		return NULL;
+		goto exit;
 
 	bufmgr_gem->fd = fd;
+	atomic_set(&bufmgr_gem->refcount, 1);
 
 	if (pthread_mutex_init(&bufmgr_gem->lock, NULL) != 0) {
 		free(bufmgr_gem);
-		return NULL;
+		bufmgr_gem = NULL;
+		goto exit;
 	}
 
 	ret = drmIoctl(bufmgr_gem->fd,
@@ -3246,7 +3290,8 @@ drm_intel_bufmgr_gem_init(int fd, int batch_size)
 		bufmgr_gem->gen = 8;
 	else {
 		free(bufmgr_gem);
-		return NULL;
+		bufmgr_gem = NULL;
+		goto exit;
 	}
 
 	if (IS_GEN3(bufmgr_gem->pci_device) &&
@@ -3357,7 +3402,7 @@ drm_intel_bufmgr_gem_init(int fd, int batch_size)
 		bufmgr_gem->bufmgr.bo_exec = drm_intel_gem_bo_exec;
 	bufmgr_gem->bufmgr.bo_busy = drm_intel_gem_bo_busy;
 	bufmgr_gem->bufmgr.bo_madvise = drm_intel_gem_bo_madvise;
-	bufmgr_gem->bufmgr.destroy = drm_intel_bufmgr_gem_destroy;
+	bufmgr_gem->bufmgr.destroy = drm_intel_bufmgr_gem_unref;
 	bufmgr_gem->bufmgr.debug = 0;
 	bufmgr_gem->bufmgr.check_aperture_space =
 	    drm_intel_gem_check_aperture_space;
@@ -3373,5 +3418,10 @@ drm_intel_bufmgr_gem_init(int fd, int batch_size)
 	DRMINITLISTHEAD(&bufmgr_gem->vma_cache);
 	bufmgr_gem->vma_max = -1; /* unlimited by default */
 
-	return &bufmgr_gem->bufmgr;
+	DRMLISTADD(&bufmgr_gem->managers, &bufmgr_list);
+
+exit:
+	pthread_mutex_unlock(&bufmgr_list_mutex);
+
+	return bufmgr_gem != NULL ? &bufmgr_gem->bufmgr : NULL;
 }
-- 
2.0.1

^ permalink raw reply related	[flat|nested] 8+ messages in thread

* Re: [PATCH] intel: make bufmgr_gem shareable from different API
  2014-09-11 16:59     ` Lionel Landwerlin
@ 2014-09-12  6:13       ` Chris Wilson
  2014-09-12 10:27         ` Shareable bufmgr objects v3 Lionel Landwerlin
  0 siblings, 1 reply; 8+ messages in thread
From: Chris Wilson @ 2014-09-12  6:13 UTC (permalink / raw)
  To: Lionel Landwerlin; +Cc: dri-devel

On Thu, Sep 11, 2014 at 05:59:40PM +0100, Lionel Landwerlin wrote:
> When using Mesa and LibVA in the same process, one would like to be
> able bind buffers from the output of the decoder to a GL texture
> through an EGLImage.
> 
> LibVA can reuse buffers allocated by Gbm through a file descriptor. It
> will then wrap it into a drm_intel_bo with
> drm_intel_bo_gem_create_from_prime().
> 
> The problem at the moment is that both library get a different
> drm_intel_bufmgr object when they call drm_intel_bufmgr_gem_init()
> even though they're using the same drm file descriptor. As a result,
> instead of manipulating the same buffer object for a given file
> descriptor, they get 2 different drm_intel_bo objects and 2 different
> refcounts, leading one of the library to get errors from the kernel on
> invalid BO when one of the 2 library is done with a shared buffer.
> 
> This patch modifies drm_intel_bufmgr_gem_init() so, given a file
> descriptor, it will look for an already existing drm_intel_bufmgr
> using the same file descriptor and return that object.

Almost there!

> +static void
> +drm_intel_bufmgr_gem_unref(drm_intel_bufmgr *bufmgr)
> +{
> +	drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
> +
> +	if (atomic_dec_and_test(&bufmgr_gem->refcount)) {
> +		assert(pthread_mutex_lock(&bufmgr_list_mutex) == 0);

Consider thread A destroying its bufmgr on fd, whilst thread B is
creating his.

Thread A drops the last reference and so takes the mutex.

But just before it does, thread B leaps in grabs the mutex, searches
through the cache, finds its fd, bumps the refcount and leaves with the
old bufmgr (not before dropping the lock!).

Thread A resumes with the lock and frees the bufmgr now owned by thread
B.

The usual idiom is

static inline void
drm_gem_object_unreference_unlocked(struct drm_gem_object *obj)
{
        if (obj && !atomic_add_unless(&obj->refcount.refcount, -1, 1)) {
                struct drm_device *dev = obj->dev;

                mutex_lock(&dev->struct_mutex);
                if (likely(atomic_dec_and_test(&obj->refcount.refcount)))
                        drm_gem_object_free(&obj->refcount);
                mutex_unlock(&dev->struct_mutex);
        }
}

-- 
Chris Wilson, Intel Open Source Technology Centre

^ permalink raw reply	[flat|nested] 8+ messages in thread

* Shareable bufmgr objects v3
  2014-09-12  6:13       ` Chris Wilson
@ 2014-09-12 10:27         ` Lionel Landwerlin
  2014-09-12 10:27           ` [PATCH] intel: make bufmgr_gem shareable from different API Lionel Landwerlin
  0 siblings, 1 reply; 8+ messages in thread
From: Lionel Landwerlin @ 2014-09-12 10:27 UTC (permalink / raw)
  To: dri-devel

Hopefully I got this right this time :)

-
Lionel

^ permalink raw reply	[flat|nested] 8+ messages in thread

* [PATCH] intel: make bufmgr_gem shareable from different API
  2014-09-12 10:27         ` Shareable bufmgr objects v3 Lionel Landwerlin
@ 2014-09-12 10:27           ` Lionel Landwerlin
  2014-09-12 10:41             ` Chris Wilson
  0 siblings, 1 reply; 8+ messages in thread
From: Lionel Landwerlin @ 2014-09-12 10:27 UTC (permalink / raw)
  To: dri-devel

When using Mesa and LibVA in the same process, one would like to be
able bind buffers from the output of the decoder to a GL texture
through an EGLImage.

LibVA can reuse buffers allocated by Gbm through a file descriptor. It
will then wrap it into a drm_intel_bo with
drm_intel_bo_gem_create_from_prime().

The problem at the moment is that both library get a different
drm_intel_bufmgr object when they call drm_intel_bufmgr_gem_init()
even though they're using the same drm file descriptor. As a result,
instead of manipulating the same buffer object for a given file
descriptor, they get 2 different drm_intel_bo objects and 2 different
refcounts, leading one of the library to get errors from the kernel on
invalid BO when one of the 2 library is done with a shared buffer.

This patch modifies drm_intel_bufmgr_gem_init() so, given a file
descriptor, it will look for an already existing drm_intel_bufmgr
using the same file descriptor and return that object.

Signed-off-by: Lionel Landwerlin <lionel.g.landwerlin@intel.com>
---
 intel/intel_bufmgr_gem.c | 63 ++++++++++++++++++++++++++++++++++++++++++++----
 1 file changed, 58 insertions(+), 5 deletions(-)

diff --git a/intel/intel_bufmgr_gem.c b/intel/intel_bufmgr_gem.c
index 0e1cb0d..73f46a1 100644
--- a/intel/intel_bufmgr_gem.c
+++ b/intel/intel_bufmgr_gem.c
@@ -94,6 +94,8 @@ struct drm_intel_gem_bo_bucket {
 typedef struct _drm_intel_bufmgr_gem {
 	drm_intel_bufmgr bufmgr;
 
+	atomic_t refcount;
+
 	int fd;
 
 	int max_relocs;
@@ -111,6 +113,8 @@ typedef struct _drm_intel_bufmgr_gem {
 	int num_buckets;
 	time_t time;
 
+	drmMMListHead managers;
+
 	drmMMListHead named;
 	drmMMListHead vma_cache;
 	int vma_count, vma_open, vma_max;
@@ -3186,6 +3190,41 @@ drm_intel_bufmgr_gem_set_aub_annotations(drm_intel_bo *bo,
 	bo_gem->aub_annotation_count = count;
 }
 
+static pthread_mutex_t bufmgr_list_mutex = PTHREAD_MUTEX_INITIALIZER;
+static drmMMListHead bufmgr_list = { &bufmgr_list, &bufmgr_list };
+
+static drm_intel_bufmgr_gem *
+drm_intel_bufmgr_gem_find(int fd)
+{
+	drm_intel_bufmgr_gem *bufmgr_gem;
+
+	DRMLISTFOREACHENTRY(bufmgr_gem, &bufmgr_list, managers) {
+		if (bufmgr_gem->fd == fd) {
+			atomic_inc(&bufmgr_gem->refcount);
+			return bufmgr_gem;
+		}
+	}
+
+	return NULL;
+}
+
+static void
+drm_intel_bufmgr_gem_unref(drm_intel_bufmgr *bufmgr)
+{
+	drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
+
+	if (atomic_dec_and_test(&bufmgr_gem->refcount)) {
+		assert(pthread_mutex_lock(&bufmgr_list_mutex) == 0);
+
+		if (atomic_read(&bufmgr_gem->refcount)) {
+			DRMLISTDEL(&bufmgr_gem->managers);
+			drm_intel_bufmgr_gem_destroy(bufmgr);
+		}
+
+		pthread_mutex_unlock(&bufmgr_list_mutex);
+	}
+}
+
 /**
  * Initializes the GEM buffer manager, which uses the kernel to allocate, map,
  * and manage map buffer objections.
@@ -3201,15 +3240,23 @@ drm_intel_bufmgr_gem_init(int fd, int batch_size)
 	int ret, tmp;
 	bool exec2 = false;
 
+	pthread_mutex_lock(&bufmgr_list_mutex);
+
+	bufmgr_gem = drm_intel_bufmgr_gem_find(fd);
+	if (bufmgr_gem)
+		goto exit;
+
 	bufmgr_gem = calloc(1, sizeof(*bufmgr_gem));
 	if (bufmgr_gem == NULL)
-		return NULL;
+		goto exit;
 
 	bufmgr_gem->fd = fd;
+	atomic_set(&bufmgr_gem->refcount, 1);
 
 	if (pthread_mutex_init(&bufmgr_gem->lock, NULL) != 0) {
 		free(bufmgr_gem);
-		return NULL;
+		bufmgr_gem = NULL;
+		goto exit;
 	}
 
 	ret = drmIoctl(bufmgr_gem->fd,
@@ -3246,7 +3293,8 @@ drm_intel_bufmgr_gem_init(int fd, int batch_size)
 		bufmgr_gem->gen = 8;
 	else {
 		free(bufmgr_gem);
-		return NULL;
+		bufmgr_gem = NULL;
+		goto exit;
 	}
 
 	if (IS_GEN3(bufmgr_gem->pci_device) &&
@@ -3357,7 +3405,7 @@ drm_intel_bufmgr_gem_init(int fd, int batch_size)
 		bufmgr_gem->bufmgr.bo_exec = drm_intel_gem_bo_exec;
 	bufmgr_gem->bufmgr.bo_busy = drm_intel_gem_bo_busy;
 	bufmgr_gem->bufmgr.bo_madvise = drm_intel_gem_bo_madvise;
-	bufmgr_gem->bufmgr.destroy = drm_intel_bufmgr_gem_destroy;
+	bufmgr_gem->bufmgr.destroy = drm_intel_bufmgr_gem_unref;
 	bufmgr_gem->bufmgr.debug = 0;
 	bufmgr_gem->bufmgr.check_aperture_space =
 	    drm_intel_gem_check_aperture_space;
@@ -3373,5 +3421,10 @@ drm_intel_bufmgr_gem_init(int fd, int batch_size)
 	DRMINITLISTHEAD(&bufmgr_gem->vma_cache);
 	bufmgr_gem->vma_max = -1; /* unlimited by default */
 
-	return &bufmgr_gem->bufmgr;
+	DRMLISTADD(&bufmgr_gem->managers, &bufmgr_list);
+
+exit:
+	pthread_mutex_unlock(&bufmgr_list_mutex);
+
+	return bufmgr_gem != NULL ? &bufmgr_gem->bufmgr : NULL;
 }
-- 
2.0.1

^ permalink raw reply related	[flat|nested] 8+ messages in thread

* Re: [PATCH] intel: make bufmgr_gem shareable from different API
  2014-09-12 10:27           ` [PATCH] intel: make bufmgr_gem shareable from different API Lionel Landwerlin
@ 2014-09-12 10:41             ` Chris Wilson
  0 siblings, 0 replies; 8+ messages in thread
From: Chris Wilson @ 2014-09-12 10:41 UTC (permalink / raw)
  To: Lionel Landwerlin; +Cc: dri-devel

On Fri, Sep 12, 2014 at 11:27:07AM +0100, Lionel Landwerlin wrote:
> When using Mesa and LibVA in the same process, one would like to be
> able bind buffers from the output of the decoder to a GL texture
> through an EGLImage.
> 
> LibVA can reuse buffers allocated by Gbm through a file descriptor. It
> will then wrap it into a drm_intel_bo with
> drm_intel_bo_gem_create_from_prime().
> 
> The problem at the moment is that both library get a different
> drm_intel_bufmgr object when they call drm_intel_bufmgr_gem_init()
> even though they're using the same drm file descriptor. As a result,
> instead of manipulating the same buffer object for a given file
> descriptor, they get 2 different drm_intel_bo objects and 2 different
> refcounts, leading one of the library to get errors from the kernel on
> invalid BO when one of the 2 library is done with a shared buffer.
> 
> This patch modifies drm_intel_bufmgr_gem_init() so, given a file
> descriptor, it will look for an already existing drm_intel_bufmgr
> using the same file descriptor and return that object.
> 
> Signed-off-by: Lionel Landwerlin <lionel.g.landwerlin@intel.com>

So close.

> +static void
> +drm_intel_bufmgr_gem_unref(drm_intel_bufmgr *bufmgr)
> +{
> +	drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
> +
> +	if (atomic_dec_and_test(&bufmgr_gem->refcount)) {
> +		assert(pthread_mutex_lock(&bufmgr_list_mutex) == 0);

(Oh, don't use assert for expressions with side-effects)

> +
> +		if (atomic_read(&bufmgr_gem->refcount)) {

As a thought exercise, now consider what happens if thread B grabs a
reference to this bufmgr and frees it all before this thread wakes up?

Double free, kaboom.

This does have to be an atomic_add_unless, which does not yet exist in
libdrm:

static inline int atomic_add_unless(atomic_t *v, int add, int unless)
{
        int c, old;
        c = atomic_read(v);
        while (c != unless && (old = atomic_cmpxchg(v, c, c + add)) != c)
                c = old;
        return c == unless;
}

static void
drm_intel_bufmgr_gem_unref(drm_intel_bufmgr *bufmgr)
{
	drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;

	if (atomic_add_unless(&bufmgr_gem->refcount, -1, 1)) {
		pthread_mutex_lock(&bufmgr_list_mutex);
		if (atomic_dec_and_test(&bufmgr_gem->refcount)) {
			free stuff;
		}
		pthread_mutex_unlock(&bufmgr_list_mutex);
         }
}
-Chris

-- 
Chris Wilson, Intel Open Source Technology Centre

^ permalink raw reply	[flat|nested] 8+ messages in thread

end of thread, other threads:[~2014-09-12 10:41 UTC | newest]

Thread overview: 8+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2014-09-11 15:36 Shareable bufmgr objects v2 Lionel Landwerlin
2014-09-11 15:36 ` [PATCH] intel: make bufmgr_gem shareable from different API Lionel Landwerlin
2014-09-11 15:53   ` Chris Wilson
2014-09-11 16:59     ` Lionel Landwerlin
2014-09-12  6:13       ` Chris Wilson
2014-09-12 10:27         ` Shareable bufmgr objects v3 Lionel Landwerlin
2014-09-12 10:27           ` [PATCH] intel: make bufmgr_gem shareable from different API Lionel Landwerlin
2014-09-12 10:41             ` Chris Wilson

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.