All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH libdrm 1/2] intel: Export raw GEM mmap interfaces
@ 2016-09-22 14:05 Chris Wilson
  2016-09-22 14:05 ` [PATCH libdrm 2/2] intel: Migrate handle/name lookups from linear lists to hashtables Chris Wilson
  0 siblings, 1 reply; 2+ messages in thread
From: Chris Wilson @ 2016-09-22 14:05 UTC (permalink / raw)
  To: intel-gfx

Export a set of interfaces to allow the caller to have precise control
over mapping the buffer - but still provide caching of the mmaps between
callers.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
---
 intel/intel_bufmgr.h     |   4 ++
 intel/intel_bufmgr_gem.c | 154 +++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 158 insertions(+)

diff --git a/intel/intel_bufmgr.h b/intel/intel_bufmgr.h
index 96a4d9d..ce4e70d 100644
--- a/intel/intel_bufmgr.h
+++ b/intel/intel_bufmgr.h
@@ -184,6 +184,10 @@ int drm_intel_gem_bo_map_unsynchronized(drm_intel_bo *bo);
 int drm_intel_gem_bo_map_gtt(drm_intel_bo *bo);
 int drm_intel_gem_bo_unmap_gtt(drm_intel_bo *bo);
 
+void *drm_intel_gem_bo_map__cpu(drm_intel_bo *bo);
+void *drm_intel_gem_bo_map__gtt(drm_intel_bo *bo);
+void *drm_intel_gem_bo_map__wc(drm_intel_bo *bo);
+
 int drm_intel_gem_bo_get_reloc_count(drm_intel_bo *bo);
 void drm_intel_gem_bo_clear_relocs(drm_intel_bo *bo, int start);
 void drm_intel_gem_bo_start_gtt_access(drm_intel_bo *bo, int write_enable);
diff --git a/intel/intel_bufmgr_gem.c b/intel/intel_bufmgr_gem.c
index e6c251a..0f212e9 100644
--- a/intel/intel_bufmgr_gem.c
+++ b/intel/intel_bufmgr_gem.c
@@ -211,6 +211,8 @@ struct _drm_intel_bo_gem {
 	void *mem_virtual;
 	/** GTT virtual address for the buffer, saved across map/unmap cycles */
 	void *gtt_virtual;
+	/** WC CPU address for the buffer, saved across map/unmap cycles */
+	void *wc_virtual;
 	/**
 	 * Virtual address of the buffer allocated by user, used for userptr
 	 * objects only.
@@ -1188,6 +1190,11 @@ drm_intel_gem_bo_free(drm_intel_bo *bo)
 		drm_munmap(bo_gem->mem_virtual, bo_gem->bo.size);
 		bufmgr_gem->vma_count--;
 	}
+	if (bo_gem->wc_virtual) {
+		VG(VALGRIND_FREELIKE_BLOCK(bo_gem->wc_virtual, 0));
+		drm_munmap(bo_gem->wc_virtual, bo_gem->bo.size);
+		bufmgr_gem->vma_count--;
+	}
 	if (bo_gem->gtt_virtual) {
 		drm_munmap(bo_gem->gtt_virtual, bo_gem->bo.size);
 		bufmgr_gem->vma_count--;
@@ -1213,6 +1220,9 @@ drm_intel_gem_bo_mark_mmaps_incoherent(drm_intel_bo *bo)
 	if (bo_gem->mem_virtual)
 		VALGRIND_MAKE_MEM_NOACCESS(bo_gem->mem_virtual, bo->size);
 
+	if (bo_gem->wc_virtual)
+		VALGRIND_MAKE_MEM_NOACCESS(bo_gem->wc_virtual, bo->size);
+
 	if (bo_gem->gtt_virtual)
 		VALGRIND_MAKE_MEM_NOACCESS(bo_gem->gtt_virtual, bo->size);
 #endif
@@ -1277,6 +1287,11 @@ static void drm_intel_gem_bo_purge_vma_cache(drm_intel_bufmgr_gem *bufmgr_gem)
 			bo_gem->mem_virtual = NULL;
 			bufmgr_gem->vma_count--;
 		}
+		if (bo_gem->wc_virtual) {
+			drm_munmap(bo_gem->wc_virtual, bo_gem->bo.size);
+			bo_gem->wc_virtual = NULL;
+			bufmgr_gem->vma_count--;
+		}
 		if (bo_gem->gtt_virtual) {
 			drm_munmap(bo_gem->gtt_virtual, bo_gem->bo.size);
 			bo_gem->gtt_virtual = NULL;
@@ -1292,6 +1307,8 @@ static void drm_intel_gem_bo_close_vma(drm_intel_bufmgr_gem *bufmgr_gem,
 	DRMLISTADDTAIL(&bo_gem->vma_list, &bufmgr_gem->vma_cache);
 	if (bo_gem->mem_virtual)
 		bufmgr_gem->vma_count++;
+	if (bo_gem->wc_virtual)
+		bufmgr_gem->vma_count++;
 	if (bo_gem->gtt_virtual)
 		bufmgr_gem->vma_count++;
 	drm_intel_gem_bo_purge_vma_cache(bufmgr_gem);
@@ -1304,6 +1321,8 @@ static void drm_intel_gem_bo_open_vma(drm_intel_bufmgr_gem *bufmgr_gem,
 	DRMLISTDEL(&bo_gem->vma_list);
 	if (bo_gem->mem_virtual)
 		bufmgr_gem->vma_count--;
+	if (bo_gem->wc_virtual)
+		bufmgr_gem->vma_count--;
 	if (bo_gem->gtt_virtual)
 		bufmgr_gem->vma_count--;
 	drm_intel_gem_bo_purge_vma_cache(bufmgr_gem);
@@ -3330,6 +3349,141 @@ drm_intel_bufmgr_gem_unref(drm_intel_bufmgr *bufmgr)
 	}
 }
 
+void *drm_intel_gem_bo_map__gtt(drm_intel_bo *bo)
+{
+	drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
+	drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
+
+	if (bo_gem->gtt_virtual)
+		return bo_gem->gtt_virtual;
+
+	if (bo_gem->is_userptr)
+		return NULL;
+
+	pthread_mutex_lock(&bufmgr_gem->lock);
+	if (bo_gem->gtt_virtual == NULL) {
+		struct drm_i915_gem_mmap_gtt mmap_arg;
+		void *ptr;
+
+		DBG("bo_map_gtt: mmap %d (%s), map_count=%d\n",
+		    bo_gem->gem_handle, bo_gem->name, bo_gem->map_count);
+
+		if (bo_gem->map_count++ == 0)
+			drm_intel_gem_bo_open_vma(bufmgr_gem, bo_gem);
+
+		memclear(mmap_arg);
+		mmap_arg.handle = bo_gem->gem_handle;
+
+		/* Get the fake offset back... */
+		ptr = MAP_FAILED;
+		if (drmIoctl(bufmgr_gem->fd,
+			     DRM_IOCTL_I915_GEM_MMAP_GTT,
+			     &mmap_arg) == 0) {
+			/* and mmap it */
+			ptr = drm_mmap(0, bo->size, PROT_READ | PROT_WRITE,
+				       MAP_SHARED, bufmgr_gem->fd,
+				       mmap_arg.offset);
+		}
+		if (ptr == MAP_FAILED) {
+			if (--bo_gem->map_count == 0)
+				drm_intel_gem_bo_close_vma(bufmgr_gem, bo_gem);
+			ptr = NULL;
+		}
+
+		bo_gem->gtt_virtual = ptr;
+	}
+	pthread_mutex_unlock(&bufmgr_gem->lock);
+
+	return bo_gem->gtt_virtual;
+}
+
+void *drm_intel_gem_bo_map__cpu(drm_intel_bo *bo)
+{
+	drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
+	drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
+
+	if (bo_gem->mem_virtual)
+		return bo_gem->mem_virtual;
+
+	if (bo_gem->is_userptr) {
+		/* Return the same user ptr */
+		return bo_gem->user_virtual;
+	}
+
+	pthread_mutex_lock(&bufmgr_gem->lock);
+	if (!bo_gem->mem_virtual) {
+		struct drm_i915_gem_mmap mmap_arg;
+
+		if (bo_gem->map_count++ == 0)
+			drm_intel_gem_bo_open_vma(bufmgr_gem, bo_gem);
+
+		DBG("bo_map: %d (%s), map_count=%d\n",
+		    bo_gem->gem_handle, bo_gem->name, bo_gem->map_count);
+
+		memclear(mmap_arg);
+		mmap_arg.handle = bo_gem->gem_handle;
+		mmap_arg.size = bo->size;
+		if (drmIoctl(bufmgr_gem->fd,
+			     DRM_IOCTL_I915_GEM_MMAP,
+			     &mmap_arg)) {
+			DBG("%s:%d: Error mapping buffer %d (%s): %s .\n",
+			    __FILE__, __LINE__, bo_gem->gem_handle,
+			    bo_gem->name, strerror(errno));
+			if (--bo_gem->map_count == 0)
+				drm_intel_gem_bo_close_vma(bufmgr_gem, bo_gem);
+		} else {
+			VG(VALGRIND_MALLOCLIKE_BLOCK(mmap_arg.addr_ptr, mmap_arg.size, 0, 1));
+			bo_gem->mem_virtual = (void *)(uintptr_t) mmap_arg.addr_ptr;
+		}
+	}
+	pthread_mutex_unlock(&bufmgr_gem->lock);
+
+	return bo_gem->mem_virtual;
+}
+
+void *drm_intel_gem_bo_map__wc(drm_intel_bo *bo)
+{
+	drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
+	drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
+
+	if (bo_gem->wc_virtual)
+		return bo_gem->wc_virtual;
+
+	if (bo_gem->is_userptr)
+		return NULL;
+
+	pthread_mutex_lock(&bufmgr_gem->lock);
+	if (!bo_gem->wc_virtual) {
+		struct drm_i915_gem_mmap mmap_arg;
+
+		if (bo_gem->map_count++ == 0)
+			drm_intel_gem_bo_open_vma(bufmgr_gem, bo_gem);
+
+		DBG("bo_map: %d (%s), map_count=%d\n",
+		    bo_gem->gem_handle, bo_gem->name, bo_gem->map_count);
+
+		memclear(mmap_arg);
+		mmap_arg.handle = bo_gem->gem_handle;
+		mmap_arg.size = bo->size;
+		mmap_arg.flags = I915_MMAP_WC;
+		if (drmIoctl(bufmgr_gem->fd,
+			     DRM_IOCTL_I915_GEM_MMAP,
+			     &mmap_arg)) {
+			DBG("%s:%d: Error mapping buffer %d (%s): %s .\n",
+			    __FILE__, __LINE__, bo_gem->gem_handle,
+			    bo_gem->name, strerror(errno));
+			if (--bo_gem->map_count == 0)
+				drm_intel_gem_bo_close_vma(bufmgr_gem, bo_gem);
+		} else {
+			VG(VALGRIND_MALLOCLIKE_BLOCK(mmap_arg.addr_ptr, mmap_arg.size, 0, 1));
+			bo_gem->wc_virtual = (void *)(uintptr_t) mmap_arg.addr_ptr;
+		}
+	}
+	pthread_mutex_unlock(&bufmgr_gem->lock);
+
+	return bo_gem->wc_virtual;
+}
+
 /**
  * Initializes the GEM buffer manager, which uses the kernel to allocate, map,
  * and manage map buffer objections.
-- 
2.9.3

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 2+ messages in thread

* [PATCH libdrm 2/2] intel: Migrate handle/name lookups from linear lists to hashtables
  2016-09-22 14:05 [PATCH libdrm 1/2] intel: Export raw GEM mmap interfaces Chris Wilson
@ 2016-09-22 14:05 ` Chris Wilson
  0 siblings, 0 replies; 2+ messages in thread
From: Chris Wilson @ 2016-09-22 14:05 UTC (permalink / raw)
  To: intel-gfx

Walking a linear list to find a matching PRIME handle or flinked name
does not scale and becomes a major burden with just a few objects.

References: https://bugs.freedesktop.org/show_bug.cgi?id=94631
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
---
 intel/intel_bufmgr_gem.c | 127 +++++++++++++++++++++++++++--------------------
 xf86drm.h                |   1 +
 xf86drmHash.c            |  11 ++++
 3 files changed, 85 insertions(+), 54 deletions(-)

diff --git a/intel/intel_bufmgr_gem.c b/intel/intel_bufmgr_gem.c
index 0f212e9..6c9276c 100644
--- a/intel/intel_bufmgr_gem.c
+++ b/intel/intel_bufmgr_gem.c
@@ -39,6 +39,7 @@
 #endif
 
 #include <xf86drm.h>
+#include <xf86drmHash.h>
 #include <xf86atomic.h>
 #include <fcntl.h>
 #include <stdio.h>
@@ -130,7 +131,9 @@ typedef struct _drm_intel_bufmgr_gem {
 
 	drmMMListHead managers;
 
-	drmMMListHead named;
+	HashTable *name_table;
+	HashTable *handle_table;
+
 	drmMMListHead vma_cache;
 	int vma_count, vma_open, vma_max;
 
@@ -175,7 +178,6 @@ struct _drm_intel_bo_gem {
          * List contains both flink named and prime fd'd objects
 	 */
 	unsigned int global_name;
-	drmMMListHead name_list;
 
 	/**
 	 * Index of the buffer within the validation list while preparing a
@@ -808,6 +810,10 @@ retry:
 		if (!bo_gem)
 			return NULL;
 
+		/* drm_intel_gem_bo_free calls DRMLISTDEL() for an uninitialized
+		   list (vma_list), so better set the list head here */
+		DRMINITLISTHEAD(&bo_gem->vma_list);
+
 		bo_gem->bo.size = bo_size;
 
 		memclear(create);
@@ -816,23 +822,26 @@ retry:
 		ret = drmIoctl(bufmgr_gem->fd,
 			       DRM_IOCTL_I915_GEM_CREATE,
 			       &create);
-		bo_gem->gem_handle = create.handle;
-		bo_gem->bo.handle = bo_gem->gem_handle;
 		if (ret != 0) {
 			free(bo_gem);
 			return NULL;
 		}
+
+		bo_gem->gem_handle = create.handle;
+		bo_gem->bo.handle = bo_gem->gem_handle;
 		bo_gem->bo.bufmgr = bufmgr;
 		bo_gem->bo.align = alignment;
 
+		if (drmHashInsert(bufmgr_gem->handle_table,
+				  bo_gem->gem_handle, bo_gem)) {
+			drm_intel_gem_bo_free(&bo_gem->bo);
+			return NULL;
+		}
+
 		bo_gem->tiling_mode = I915_TILING_NONE;
 		bo_gem->swizzle_mode = I915_BIT_6_SWIZZLE_NONE;
 		bo_gem->stride = 0;
 
-		/* drm_intel_gem_bo_free calls DRMLISTDEL() for an uninitialized
-		   list (vma_list), so better set the list head here */
-		DRMINITLISTHEAD(&bo_gem->name_list);
-		DRMINITLISTHEAD(&bo_gem->vma_list);
 		if (drm_intel_gem_bo_set_tiling_internal(&bo_gem->bo,
 							 tiling_mode,
 							 stride)) {
@@ -956,6 +965,8 @@ drm_intel_gem_bo_alloc_userptr(drm_intel_bufmgr *bufmgr,
 	if (!bo_gem)
 		return NULL;
 
+	DRMINITLISTHEAD(&bo_gem->vma_list);
+
 	bo_gem->bo.size = size;
 
 	memclear(userptr);
@@ -985,8 +996,11 @@ drm_intel_gem_bo_alloc_userptr(drm_intel_bufmgr *bufmgr,
 	bo_gem->swizzle_mode = I915_BIT_6_SWIZZLE_NONE;
 	bo_gem->stride       = 0;
 
-	DRMINITLISTHEAD(&bo_gem->name_list);
-	DRMINITLISTHEAD(&bo_gem->vma_list);
+	if (drmHashInsert(bufmgr_gem->handle_table,
+			  bo_gem->gem_handle, bo_gem)) {
+		drm_intel_gem_bo_free(&bo_gem->bo);
+		return NULL;
+	}
 
 	bo_gem->name = name;
 	atomic_set(&bo_gem->refcount, 1);
@@ -1087,7 +1101,6 @@ drm_intel_bo_gem_create_from_name(drm_intel_bufmgr *bufmgr,
 	int ret;
 	struct drm_gem_open open_arg;
 	struct drm_i915_gem_get_tiling get_tiling;
-	drmMMListHead *list;
 
 	/* At the moment most applications only have a few named bo.
 	 * For instance, in a DRI client only the render buffers passed
@@ -1096,15 +1109,11 @@ drm_intel_bo_gem_create_from_name(drm_intel_bufmgr *bufmgr,
 	 * provides a sufficiently fast match.
 	 */
 	pthread_mutex_lock(&bufmgr_gem->lock);
-	for (list = bufmgr_gem->named.next;
-	     list != &bufmgr_gem->named;
-	     list = list->next) {
-		bo_gem = DRMLISTENTRY(drm_intel_bo_gem, list, name_list);
-		if (bo_gem->global_name == handle) {
-			drm_intel_gem_bo_reference(&bo_gem->bo);
-			pthread_mutex_unlock(&bufmgr_gem->lock);
-			return &bo_gem->bo;
-		}
+	bo_gem = drmHashLookupValue(bufmgr_gem->name_table, handle);
+	if (bo_gem) {
+		drm_intel_gem_bo_reference(&bo_gem->bo);
+		pthread_mutex_unlock(&bufmgr_gem->lock);
+		return &bo_gem->bo;
 	}
 
 	memclear(open_arg);
@@ -1122,15 +1131,11 @@ drm_intel_bo_gem_create_from_name(drm_intel_bufmgr *bufmgr,
          * object from the kernel before by looking through the list
          * again for a matching gem_handle
          */
-	for (list = bufmgr_gem->named.next;
-	     list != &bufmgr_gem->named;
-	     list = list->next) {
-		bo_gem = DRMLISTENTRY(drm_intel_bo_gem, list, name_list);
-		if (bo_gem->gem_handle == open_arg.handle) {
-			drm_intel_gem_bo_reference(&bo_gem->bo);
-			pthread_mutex_unlock(&bufmgr_gem->lock);
-			return &bo_gem->bo;
-		}
+	bo_gem = drmHashLookupValue(bufmgr_gem->handle_table, open_arg.handle);
+	if (bo_gem) {
+		drm_intel_gem_bo_reference(&bo_gem->bo);
+		pthread_mutex_unlock(&bufmgr_gem->lock);
+		return &bo_gem->bo;
 	}
 
 	bo_gem = calloc(1, sizeof(*bo_gem));
@@ -1139,6 +1144,8 @@ drm_intel_bo_gem_create_from_name(drm_intel_bufmgr *bufmgr,
 		return NULL;
 	}
 
+	DRMINITLISTHEAD(&bo_gem->vma_list);
+
 	bo_gem->bo.size = open_arg.size;
 	bo_gem->bo.offset = 0;
 	bo_gem->bo.offset64 = 0;
@@ -1153,14 +1160,23 @@ drm_intel_bo_gem_create_from_name(drm_intel_bufmgr *bufmgr,
 	bo_gem->reusable = false;
 	bo_gem->use_48b_address_range = false;
 
+	if (drmHashInsert(bufmgr_gem->handle_table,
+			  bo_gem->gem_handle, bo_gem) ||
+	    drmHashInsert(bufmgr_gem->name_table,
+			  bo_gem->global_name, bo_gem)) {
+		pthread_mutex_unlock(&bufmgr_gem->lock);
+		drm_intel_gem_bo_unreference(&bo_gem->bo);
+		return NULL;
+	}
+
 	memclear(get_tiling);
 	get_tiling.handle = bo_gem->gem_handle;
 	ret = drmIoctl(bufmgr_gem->fd,
 		       DRM_IOCTL_I915_GEM_GET_TILING,
 		       &get_tiling);
 	if (ret != 0) {
-		drm_intel_gem_bo_unreference(&bo_gem->bo);
 		pthread_mutex_unlock(&bufmgr_gem->lock);
+		drm_intel_gem_bo_unreference(&bo_gem->bo);
 		return NULL;
 	}
 	bo_gem->tiling_mode = get_tiling.tiling_mode;
@@ -1168,8 +1184,6 @@ drm_intel_bo_gem_create_from_name(drm_intel_bufmgr *bufmgr,
 	/* XXX stride is unknown */
 	drm_intel_bo_gem_set_in_aperture_size(bufmgr_gem, bo_gem, 0);
 
-	DRMINITLISTHEAD(&bo_gem->vma_list);
-	DRMLISTADDTAIL(&bo_gem->name_list, &bufmgr_gem->named);
 	pthread_mutex_unlock(&bufmgr_gem->lock);
 	DBG("bo_create_from_handle: %d (%s)\n", handle, bo_gem->name);
 
@@ -1200,6 +1214,8 @@ drm_intel_gem_bo_free(drm_intel_bo *bo)
 		bufmgr_gem->vma_count--;
 	}
 
+	drmHashDelete(bufmgr_gem->handle_table, bo_gem->gem_handle);
+
 	/* Close this object */
 	memclear(close);
 	close.handle = bo_gem->gem_handle;
@@ -1377,7 +1393,8 @@ drm_intel_gem_bo_unreference_final(drm_intel_bo *bo, time_t time)
 		drm_intel_gem_bo_mark_mmaps_incoherent(bo);
 	}
 
-	DRMLISTDEL(&bo_gem->name_list);
+	if (bo_gem->global_name)
+		drmHashDelete(bufmgr_gem->name_table, bo_gem->global_name);
 
 	bucket = drm_intel_gem_bo_bucket_for_size(bufmgr_gem, bo->size);
 	/* Put the buffer into our internal cache for reuse if we can. */
@@ -2610,7 +2627,6 @@ drm_intel_bo_gem_create_from_prime(drm_intel_bufmgr *bufmgr, int prime_fd, int s
 	uint32_t handle;
 	drm_intel_bo_gem *bo_gem;
 	struct drm_i915_gem_get_tiling get_tiling;
-	drmMMListHead *list;
 
 	pthread_mutex_lock(&bufmgr_gem->lock);
 	ret = drmPrimeFDToHandle(bufmgr_gem->fd, prime_fd, &handle);
@@ -2625,15 +2641,11 @@ drm_intel_bo_gem_create_from_prime(drm_intel_bufmgr *bufmgr, int prime_fd, int s
 	 * for named buffers, we must not create two bo's pointing at the same
 	 * kernel object
 	 */
-	for (list = bufmgr_gem->named.next;
-	     list != &bufmgr_gem->named;
-	     list = list->next) {
-		bo_gem = DRMLISTENTRY(drm_intel_bo_gem, list, name_list);
-		if (bo_gem->gem_handle == handle) {
-			drm_intel_gem_bo_reference(&bo_gem->bo);
-			pthread_mutex_unlock(&bufmgr_gem->lock);
-			return &bo_gem->bo;
-		}
+	bo_gem = drmHashLookupValue(bufmgr_gem->name_table, handle);
+	if (bo_gem) {
+		drm_intel_gem_bo_reference(&bo_gem->bo);
+		pthread_mutex_unlock(&bufmgr_gem->lock);
+		return &bo_gem->bo;
 	}
 
 	bo_gem = calloc(1, sizeof(*bo_gem));
@@ -2641,6 +2653,9 @@ drm_intel_bo_gem_create_from_prime(drm_intel_bufmgr *bufmgr, int prime_fd, int s
 		pthread_mutex_unlock(&bufmgr_gem->lock);
 		return NULL;
 	}
+
+	DRMINITLISTHEAD(&bo_gem->vma_list);
+
 	/* Determine size of bo.  The fd-to-handle ioctl really should
 	 * return the size, but it doesn't.  If we have kernel 3.12 or
 	 * later, we can lseek on the prime fd to get the size.  Older
@@ -2656,6 +2671,12 @@ drm_intel_bo_gem_create_from_prime(drm_intel_bufmgr *bufmgr, int prime_fd, int s
 	bo_gem->bo.bufmgr = bufmgr;
 
 	bo_gem->gem_handle = handle;
+	if (drmHashInsert(bufmgr_gem->handle_table,
+			  bo_gem->gem_handle, bo_gem)) {
+		drm_intel_gem_bo_free(&bo_gem->bo);
+		pthread_mutex_unlock(&bufmgr_gem->lock);
+		return NULL;
+	}
 
 	atomic_set(&bo_gem->refcount, 1);
 
@@ -2667,8 +2688,6 @@ drm_intel_bo_gem_create_from_prime(drm_intel_bufmgr *bufmgr, int prime_fd, int s
 	bo_gem->reusable = false;
 	bo_gem->use_48b_address_range = false;
 
-	DRMINITLISTHEAD(&bo_gem->vma_list);
-	DRMLISTADDTAIL(&bo_gem->name_list, &bufmgr_gem->named);
 	pthread_mutex_unlock(&bufmgr_gem->lock);
 
 	memclear(get_tiling);
@@ -2695,11 +2714,6 @@ drm_intel_bo_gem_export_to_prime(drm_intel_bo *bo, int *prime_fd)
 	drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
 	drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
 
-	pthread_mutex_lock(&bufmgr_gem->lock);
-        if (DRMLISTEMPTY(&bo_gem->name_list))
-                DRMLISTADDTAIL(&bo_gem->name_list, &bufmgr_gem->named);
-	pthread_mutex_unlock(&bufmgr_gem->lock);
-
 	if (drmPrimeHandleToFD(bufmgr_gem->fd, bo_gem->gem_handle,
 			       DRM_CLOEXEC, prime_fd) != 0)
 		return -errno;
@@ -2725,16 +2739,20 @@ drm_intel_gem_bo_flink(drm_intel_bo *bo, uint32_t * name)
 		pthread_mutex_lock(&bufmgr_gem->lock);
 
 		ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_GEM_FLINK, &flink);
-		if (ret != 0) {
+		if (ret) {
 			pthread_mutex_unlock(&bufmgr_gem->lock);
 			return -errno;
 		}
 
+		if (drmHashInsert(bufmgr_gem->name_table,
+				  bo_gem->global_name, bo_gem)) {
+			pthread_mutex_unlock(&bufmgr_gem->lock);
+			return -ENOMEM;
+		}
+
 		bo_gem->global_name = flink.name;
 		bo_gem->reusable = false;
 
-                if (DRMLISTEMPTY(&bo_gem->name_list))
-                        DRMLISTADDTAIL(&bo_gem->name_list, &bufmgr_gem->named);
 		pthread_mutex_unlock(&bufmgr_gem->lock);
 	}
 
@@ -3691,7 +3709,8 @@ drm_intel_bufmgr_gem_init(int fd, int batch_size)
 	    drm_intel_gem_get_pipe_from_crtc_id;
 	bufmgr_gem->bufmgr.bo_references = drm_intel_gem_bo_references;
 
-	DRMINITLISTHEAD(&bufmgr_gem->named);
+	bufmgr_gem->name_table = drmHashCreate();
+	bufmgr_gem->handle_table = drmHashCreate();
 	init_cache_buckets(bufmgr_gem);
 
 	DRMINITLISTHEAD(&bufmgr_gem->vma_cache);
diff --git a/xf86drm.h b/xf86drm.h
index 481d882..df1885e 100644
--- a/xf86drm.h
+++ b/xf86drm.h
@@ -695,6 +695,7 @@ extern void          drmFree(void *pt);
 extern void *drmHashCreate(void);
 extern int  drmHashDestroy(void *t);
 extern int  drmHashLookup(void *t, unsigned long key, void **value);
+extern void *drmHashLookupValue(void *t, unsigned long key);
 extern int  drmHashInsert(void *t, unsigned long key, void *value);
 extern int  drmHashDelete(void *t, unsigned long key);
 extern int  drmHashFirst(void *t, unsigned long *key, void **value);
diff --git a/xf86drmHash.c b/xf86drmHash.c
index f287e61..f689240 100644
--- a/xf86drmHash.c
+++ b/xf86drmHash.c
@@ -185,6 +185,17 @@ int drmHashLookup(void *t, unsigned long key, void **value)
     return 0;			/* Found */
 }
 
+void *drmHashLookupValue(void *t, unsigned long key)
+{
+    HashBucketPtr bucket;
+
+    bucket = HashFind(t, key, NULL);
+    if (!bucket)
+	    return NULL;
+
+    return bucket->value;
+}
+
 int drmHashInsert(void *t, unsigned long key, void *value)
 {
     HashTablePtr  table = (HashTablePtr)t;
-- 
2.9.3

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 2+ messages in thread

end of thread, other threads:[~2016-09-22 14:05 UTC | newest]

Thread overview: 2+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2016-09-22 14:05 [PATCH libdrm 1/2] intel: Export raw GEM mmap interfaces Chris Wilson
2016-09-22 14:05 ` [PATCH libdrm 2/2] intel: Migrate handle/name lookups from linear lists to hashtables Chris Wilson

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.