All of lore.kernel.org
 help / color / mirror / Atom feed
From: Ben Widawsky <benjamin.widawsky@intel.com>
To: Intel GFX <intel-gfx@lists.freedesktop.org>
Cc: Ben Widawsky <ben@bwidawsk.net>, David Herrmann <dh.herrmann@gmail.com>
Subject: [PATCH 46/48] drm: Optionally create mm blocks from top-to-bottom
Date: Fri,  6 Dec 2013 14:11:31 -0800	[thread overview]
Message-ID: <1386367941-7131-46-git-send-email-benjamin.widawsky@intel.com> (raw)
In-Reply-To: <1386367941-7131-1-git-send-email-benjamin.widawsky@intel.com>

From: Chris Wilson <chris@chris-wilson.co.uk>

Clients like i915 needs to segregate cache domains within the GTT which
can lead to small amounts of fragmentation. By allocating the uncached
buffers from the bottom and the cacheable buffers from the top, we can
reduce the amount of wasted space and also optimize allocation of the
mappable portion of the GTT to only those buffers that require CPU
access through the GTT.

v2 by Ben:
Update callers in i915_gem_object_bind_to_gtt()
Turn search flags and allocation flags into separate enums
Make checkpatch happy where logical/easy

v3 by Ben:
Rebased on top of the many drm_mm changes since the original patches
Remove ATOMIC from allocator flags (Chris)
Reverse order of TOPDOWN and BOTTOMUP

Cc: David Herrmann <dh.herrmann@gmail.com>
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Ben Widawsky <ben@bwidawsk.net>
---
 drivers/gpu/drm/drm_mm.c            | 56 +++++++++++++++++++++++++++----------
 drivers/gpu/drm/i915/i915_gem.c     |  3 +-
 drivers/gpu/drm/i915/i915_gem_gtt.c |  2 +-
 include/drm/drm_mm.h                | 29 ++++++++++++++++---
 4 files changed, 69 insertions(+), 21 deletions(-)

diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c
index af93cc5..4f5e4f6 100644
--- a/drivers/gpu/drm/drm_mm.c
+++ b/drivers/gpu/drm/drm_mm.c
@@ -65,7 +65,8 @@ static struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_
 static void drm_mm_insert_helper(struct drm_mm_node *hole_node,
 				 struct drm_mm_node *node,
 				 unsigned long size, unsigned alignment,
-				 unsigned long color)
+				 unsigned long color,
+				 enum drm_mm_allocator_flags flags)
 {
 	struct drm_mm *mm = hole_node->mm;
 	unsigned long hole_start = drm_mm_hole_node_start(hole_node);
@@ -78,12 +79,22 @@ static void drm_mm_insert_helper(struct drm_mm_node *hole_node,
 	if (mm->color_adjust)
 		mm->color_adjust(hole_node, color, &adj_start, &adj_end);
 
+	if (flags & DRM_MM_CREATE_TOP)
+		adj_start = adj_end - size;
+
 	if (alignment) {
 		unsigned tmp = adj_start % alignment;
-		if (tmp)
-			adj_start += alignment - tmp;
+		if (tmp) {
+			if (flags & DRM_MM_CREATE_TOP)
+				adj_start -= tmp;
+			else
+				adj_start += alignment - tmp;
+		}
 	}
 
+	BUG_ON(adj_start < hole_start);
+	BUG_ON(adj_end > hole_end);
+
 	if (adj_start == hole_start) {
 		hole_node->hole_follows = 0;
 		list_del(&hole_node->hole_stack);
@@ -155,16 +166,17 @@ EXPORT_SYMBOL(drm_mm_reserve_node);
 int drm_mm_insert_node_generic(struct drm_mm *mm, struct drm_mm_node *node,
 			       unsigned long size, unsigned alignment,
 			       unsigned long color,
-			       enum drm_mm_search_flags flags)
+			       enum drm_mm_search_flags sflags,
+			       enum drm_mm_allocator_flags aflags)
 {
 	struct drm_mm_node *hole_node;
 
 	hole_node = drm_mm_search_free_generic(mm, size, alignment,
-					       color, flags);
+					       color, sflags);
 	if (!hole_node)
 		return -ENOSPC;
 
-	drm_mm_insert_helper(hole_node, node, size, alignment, color);
+	drm_mm_insert_helper(hole_node, node, size, alignment, color, aflags);
 	return 0;
 }
 EXPORT_SYMBOL(drm_mm_insert_node_generic);
@@ -173,7 +185,8 @@ static void drm_mm_insert_helper_range(struct drm_mm_node *hole_node,
 				       struct drm_mm_node *node,
 				       unsigned long size, unsigned alignment,
 				       unsigned long color,
-				       unsigned long start, unsigned long end)
+				       unsigned long start, unsigned long end,
+				       enum drm_mm_allocator_flags flags)
 {
 	struct drm_mm *mm = hole_node->mm;
 	unsigned long hole_start = drm_mm_hole_node_start(hole_node);
@@ -188,13 +201,20 @@ static void drm_mm_insert_helper_range(struct drm_mm_node *hole_node,
 	if (adj_end > end)
 		adj_end = end;
 
+	if (flags & DRM_MM_CREATE_TOP)
+		adj_start = adj_end - size;
+
 	if (mm->color_adjust)
 		mm->color_adjust(hole_node, color, &adj_start, &adj_end);
 
 	if (alignment) {
 		unsigned tmp = adj_start % alignment;
-		if (tmp)
-			adj_start += alignment - tmp;
+		if (tmp) {
+			if (flags & DRM_MM_CREATE_TOP)
+				adj_start -= tmp;
+			else
+				adj_start += alignment - tmp;
+		}
 	}
 
 	if (adj_start == hole_start) {
@@ -211,6 +231,8 @@ static void drm_mm_insert_helper_range(struct drm_mm_node *hole_node,
 	INIT_LIST_HEAD(&node->hole_stack);
 	list_add(&node->node_list, &hole_node->node_list);
 
+	BUG_ON(node->start < start);
+	BUG_ON(node->start < adj_start);
 	BUG_ON(node->start + node->size > adj_end);
 	BUG_ON(node->start + node->size > end);
 
@@ -227,21 +249,23 @@ static void drm_mm_insert_helper_range(struct drm_mm_node *hole_node,
  * restricted allocations. The preallocated memory node must be cleared.
  */
 int drm_mm_insert_node_in_range_generic(struct drm_mm *mm, struct drm_mm_node *node,
-					unsigned long size, unsigned alignment, unsigned long color,
+					unsigned long size, unsigned alignment,
+					unsigned long color,
 					unsigned long start, unsigned long end,
-					enum drm_mm_search_flags flags)
+					enum drm_mm_search_flags sflags,
+					enum drm_mm_allocator_flags aflags)
 {
 	struct drm_mm_node *hole_node;
 
 	hole_node = drm_mm_search_free_in_range_generic(mm,
 							size, alignment, color,
-							start, end, flags);
+							start, end, sflags);
 	if (!hole_node)
 		return -ENOSPC;
 
 	drm_mm_insert_helper_range(hole_node, node,
 				   size, alignment, color,
-				   start, end);
+				   start, end, aflags);
 	return 0;
 }
 EXPORT_SYMBOL(drm_mm_insert_node_in_range_generic);
@@ -315,7 +339,8 @@ static struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm,
 	best = NULL;
 	best_size = ~0UL;
 
-	drm_mm_for_each_hole(entry, mm, adj_start, adj_end) {
+	__drm_mm_for_each_hole(entry, mm, adj_start, adj_end,
+			       flags & DRM_MM_SEARCH_BELOW) {
 		if (mm->color_adjust) {
 			mm->color_adjust(entry, color, &adj_start, &adj_end);
 			if (adj_end <= adj_start)
@@ -356,7 +381,8 @@ static struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_
 	best = NULL;
 	best_size = ~0UL;
 
-	drm_mm_for_each_hole(entry, mm, adj_start, adj_end) {
+	__drm_mm_for_each_hole(entry, mm, adj_start, adj_end,
+			       flags & DRM_MM_SEARCH_BELOW) {
 		if (adj_start < start)
 			adj_start = start;
 		if (adj_end > end)
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index a03c262..1360f89 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -3280,7 +3280,8 @@ search_free:
 	ret = drm_mm_insert_node_in_range_generic(&vm->mm, &vma->node,
 						  size, alignment,
 						  obj->cache_level, 1, gtt_max,
-						  DRM_MM_SEARCH_DEFAULT);
+						  DRM_MM_SEARCH_DEFAULT,
+						  DRM_MM_CREATE_DEFAULT);
 	if (ret) {
 		ret = i915_gem_evict_something(dev, vm, size, alignment,
 					       obj->cache_level,
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 998f9a0..37832e4 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -861,7 +861,7 @@ alloc:
 						  &ppgtt->node, GEN6_PD_SIZE,
 						  GEN6_PD_ALIGN, 0,
 						  0, dev_priv->gtt.base.total,
-						  DRM_MM_SEARCH_DEFAULT);
+						  DRM_MM_BOTTOMUP);
 	if (ret == -ENOSPC && !retried) {
 		ret = i915_gem_evict_something(dev, &dev_priv->gtt.base,
 					       GEN6_PD_SIZE, GEN6_PD_ALIGN,
diff --git a/include/drm/drm_mm.h b/include/drm/drm_mm.h
index cba6786..36c1d8f 100644
--- a/include/drm/drm_mm.h
+++ b/include/drm/drm_mm.h
@@ -47,8 +47,17 @@
 enum drm_mm_search_flags {
 	DRM_MM_SEARCH_DEFAULT =		0,
 	DRM_MM_SEARCH_BEST =		1 << 0,
+	DRM_MM_SEARCH_BELOW =		1 << 1,
 };
 
+enum drm_mm_allocator_flags {
+	DRM_MM_CREATE_DEFAULT =		0,
+	DRM_MM_CREATE_TOP =		1 << 0,
+};
+
+#define DRM_MM_BOTTOMUP DRM_MM_SEARCH_DEFAULT, DRM_MM_CREATE_DEFAULT
+#define DRM_MM_TOPDOWN DRM_MM_SEARCH_BELOW, DRM_MM_CREATE_TOP
+
 struct drm_mm_node {
 	struct list_head node_list;
 	struct list_head hole_stack;
@@ -133,6 +142,14 @@ static inline unsigned long drm_mm_hole_node_end(struct drm_mm_node *hole_node)
 	     1 : 0; \
 	     entry = list_entry(entry->hole_stack.next, struct drm_mm_node, hole_stack))
 
+#define __drm_mm_for_each_hole(entry, mm, hole_start, hole_end, backwards) \
+	for (entry = list_entry((backwards) ? (mm)->hole_stack.prev : (mm)->hole_stack.next, struct drm_mm_node, hole_stack); \
+	     &entry->hole_stack != &(mm)->hole_stack ? \
+	     hole_start = drm_mm_hole_node_start(entry), \
+	     hole_end = drm_mm_hole_node_end(entry), \
+	     1 : 0; \
+	     entry = list_entry((backwards) ? entry->hole_stack.prev : entry->hole_stack.next, struct drm_mm_node, hole_stack))
+
 /*
  * Basic range manager support (drm_mm.c)
  */
@@ -143,14 +160,16 @@ extern int drm_mm_insert_node_generic(struct drm_mm *mm,
 				      unsigned long size,
 				      unsigned alignment,
 				      unsigned long color,
-				      enum drm_mm_search_flags flags);
+				      enum drm_mm_search_flags sflags,
+				      enum drm_mm_allocator_flags aflags);
 static inline int drm_mm_insert_node(struct drm_mm *mm,
 				     struct drm_mm_node *node,
 				     unsigned long size,
 				     unsigned alignment,
 				     enum drm_mm_search_flags flags)
 {
-	return drm_mm_insert_node_generic(mm, node, size, alignment, 0, flags);
+	return drm_mm_insert_node_generic(mm, node, size, alignment, 0, flags,
+					  DRM_MM_CREATE_DEFAULT);
 }
 
 extern int drm_mm_insert_node_in_range_generic(struct drm_mm *mm,
@@ -160,7 +179,8 @@ extern int drm_mm_insert_node_in_range_generic(struct drm_mm *mm,
 				       unsigned long color,
 				       unsigned long start,
 				       unsigned long end,
-				       enum drm_mm_search_flags flags);
+				       enum drm_mm_search_flags sflags,
+				       enum drm_mm_allocator_flags aflags);
 static inline int drm_mm_insert_node_in_range(struct drm_mm *mm,
 					      struct drm_mm_node *node,
 					      unsigned long size,
@@ -170,7 +190,8 @@ static inline int drm_mm_insert_node_in_range(struct drm_mm *mm,
 					      enum drm_mm_search_flags flags)
 {
 	return drm_mm_insert_node_in_range_generic(mm, node, size, alignment,
-						   0, start, end, flags);
+						   0, start, end, flags,
+						   DRM_MM_CREATE_DEFAULT);
 }
 
 extern void drm_mm_remove_node(struct drm_mm_node *node);
-- 
1.8.4.2

  parent reply	other threads:[~2013-12-06 22:15 UTC|newest]

Thread overview: 119+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2013-12-06 21:55 [PULL] PPGTT Ben Widawsky
2013-12-06 22:10 ` [PATCH 01/48] drm/i915: Fix bad refcounting on execbuf failures Ben Widawsky
2013-12-06 22:10   ` [PATCH 02/48] drm/i915: Provide PDP updates via MMIO Ben Widawsky
2013-12-06 22:10   ` [PATCH 03/48] drm/i915: Don't unconditionally try to deref aliasing ppgtt Ben Widawsky
2013-12-18 13:50     ` Daniel Vetter
2013-12-06 22:10   ` [PATCH 04/48] drm/i915: Allow ggtt lookups to not WARN Ben Widawsky
2013-12-06 22:10   ` [PATCH 05/48] drm/i915: Takedown drm_mm on failed gtt setup Ben Widawsky
2013-12-06 22:10   ` [PATCH 06/48] drm/i915: Handle inactivating objects for all VMAs Ben Widawsky
2013-12-06 22:10   ` [PATCH 07/48] drm/i915: Add vm to error BO capture Ben Widawsky
2013-12-06 22:10   ` [PATCH 08/48] drm/i915: Don't use gtt mapping for !gtt error objects Ben Widawsky
2013-12-06 22:10   ` [PATCH 09/48] drm/i915: Identify active VM for batchbuffer capture Ben Widawsky
2013-12-06 22:10   ` [PATCH 10/48] drm/i915: Make pin count per VMA Ben Widawsky
2013-12-06 22:10   ` [PATCH 11/48] drm/i915: Create bind/unbind abstraction for VMAs Ben Widawsky
2013-12-06 22:10   ` [PATCH 12/48] drm/i915: Remove vm arg from relocate entry Ben Widawsky
2013-12-06 22:10   ` [PATCH 13/48] drm/i915: Add a context open function Ben Widawsky
2013-12-06 22:10   ` [PATCH 14/48] drm/i915: relax context alignment Ben Widawsky
2013-12-06 22:11   ` [PATCH 15/48] drm/i915: Simplify ring handling in execbuf Ben Widawsky
2013-12-06 22:11   ` [PATCH 16/48] drm/i915: Permit contexts on all rings Ben Widawsky
2013-12-06 22:11   ` [PATCH 17/48] drm/i915: Track which ring a context ran on Ben Widawsky
2014-04-18  9:51     ` Chris Wilson
2014-04-22 14:25       ` Daniel Vetter
2014-04-22 14:54         ` Chris Wilson
2013-12-06 22:11   ` [PATCH 18/48] drm/i915: Better reset handling for contexts Ben Widawsky
2013-12-18 14:21     ` Daniel Vetter
2013-12-06 22:11   ` [PATCH 19/48] drm/i915: Split context enabling from init Ben Widawsky
2013-12-06 22:11   ` [PATCH 20/48] drm/i915: Generalize default context setup Ben Widawsky
2013-12-06 22:11   ` [PATCH 21/48] drm/i915: PPGTT vfuncs should take a ppgtt argument Ben Widawsky
2013-12-06 22:11   ` [PATCH 22/48] drm/i915: Use drm_mm for PPGTT PDEs Ben Widawsky
2013-12-06 22:11   ` [PATCH 23/48] drm/i915: One hopeful eviction on PPGTT alloc Ben Widawsky
2013-12-06 22:11   ` [PATCH 24/48] drm/i915: Use platform specific ppgtt enable Ben Widawsky
2013-12-06 22:11   ` [PATCH 25/48] drm/i915: Extract mm switching to function Ben Widawsky
2013-12-06 22:11   ` [PATCH 26/48] drm/i915: Use LRI for switching PP_DIR_BASE Ben Widawsky
2013-12-06 22:11   ` [PATCH 27/48] drm/i915: Flush TLBs after !RCS PP_DIR_BASE Ben Widawsky
2013-12-06 22:11   ` [PATCH 28/48] drm/i915: Generalize PPGTT init Ben Widawsky
2013-12-06 22:11   ` [PATCH 29/48] drm/i915: Reorganize intel_enable_ppgtt Ben Widawsky
2013-12-06 22:11   ` [PATCH 30/48] drm/i915: Add VM to context Ben Widawsky
2013-12-06 22:11   ` [PATCH 31/48] drm/i915: Write PDEs at init instead of enable Ben Widawsky
2013-12-06 22:11   ` [PATCH 32/48] drm/i915: Restore PDEs for all VMs Ben Widawsky
2013-12-06 22:11   ` [PATCH 33/48] drm/i915: Do aliasing PPGTT init with contexts Ben Widawsky
2013-12-06 22:11   ` [PATCH 34/48] drm/i915: Create a per file_priv default context Ben Widawsky
2013-12-06 22:11   ` [PATCH 35/48] drm/i915: Piggy back hangstats off of contexts Ben Widawsky
2013-12-06 22:11   ` [PATCH 36/48] drm/i915: Get context early in execbuf Ben Widawsky
2013-12-06 22:11   ` [PATCH 37/48] drm/i915: Defer request freeing Ben Widawsky
2013-12-12 11:08     ` Chris Wilson
2013-12-18 14:39       ` Daniel Vetter
2013-12-06 22:11   ` [PATCH 38/48] drm/i915: Clean up VMAs before freeing Ben Widawsky
2013-12-18 14:55     ` Daniel Vetter
2013-12-18 14:56       ` Daniel Vetter
2013-12-06 22:11   ` [PATCH 39/48] drm/i915: Do not allow buffers at offset 0 Ben Widawsky
2013-12-12 10:59     ` Chris Wilson
2013-12-18 14:58       ` Daniel Vetter
2013-12-06 22:11   ` [PATCH 40/48] drm/i915: Add a tracepoint for new VMs Ben Widawsky
2013-12-18 14:59     ` Daniel Vetter
2013-12-06 22:11   ` [PATCH 41/48] drm/i915: Use multiple VMs -- the point of no return Ben Widawsky
2013-12-06 22:11   ` [PATCH 42/48] drm/i915: Remove extraneous mm_switch in ppgtt enable Ben Widawsky
2013-12-06 22:11   ` [PATCH 43/48] drm/i915: Warn on gem_pin usage Ben Widawsky
2013-12-18 15:25     ` Daniel Vetter
2013-12-06 22:11   ` [PATCH 44/48] drm/i915: Add PPGTT dumper Ben Widawsky
2013-12-06 22:11   ` [PATCH 45/48] drm/i915: Dump all ppgtt Ben Widawsky
2013-12-18 15:26     ` Daniel Vetter
2013-12-06 22:11   ` Ben Widawsky [this message]
2013-12-08 14:28     ` [PATCH 46/48] drm: Optionally create mm blocks from top-to-bottom David Herrmann
2013-12-06 22:11   ` [PATCH 47/48] drm/i915: Use topdown allocation for PPGTT Ben Widawsky
2013-12-06 22:11   ` [PATCH 48/48] page allocator: Tmp OOM deadlock w/a from Chris Ben Widawsky
2013-12-06 22:11   ` [PATCH 01/48] drm/i915: Fix bad refcounting on execbuf failures Ben Widawsky
2013-12-06 22:11   ` [PATCH 02/48] drm/i915: Provide PDP updates via MMIO Ben Widawsky
2013-12-06 22:11   ` [PATCH 03/48] drm/i915: Don't unconditionally try to deref aliasing ppgtt Ben Widawsky
2013-12-06 22:11   ` [PATCH 04/48] drm/i915: Allow ggtt lookups to not WARN Ben Widawsky
2013-12-06 22:11   ` [PATCH 05/48] drm/i915: Takedown drm_mm on failed gtt setup Ben Widawsky
2013-12-06 22:11   ` [PATCH 06/48] drm/i915: Handle inactivating objects for all VMAs Ben Widawsky
2013-12-06 22:11   ` [PATCH 07/48] drm/i915: Add vm to error BO capture Ben Widawsky
2013-12-06 22:11   ` [PATCH 08/48] drm/i915: Don't use gtt mapping for !gtt error objects Ben Widawsky
2013-12-06 22:11   ` [PATCH 09/48] drm/i915: Identify active VM for batchbuffer capture Ben Widawsky
2013-12-06 22:11   ` [PATCH 10/48] drm/i915: Make pin count per VMA Ben Widawsky
2013-12-06 22:11   ` [PATCH 11/48] drm/i915: Create bind/unbind abstraction for VMAs Ben Widawsky
2013-12-06 22:11   ` [PATCH 12/48] drm/i915: Remove vm arg from relocate entry Ben Widawsky
2013-12-06 22:11   ` [PATCH 13/48] drm/i915: Add a context open function Ben Widawsky
2013-12-06 22:11   ` [PATCH 14/48] drm/i915: relax context alignment Ben Widawsky
2013-12-06 22:11   ` [PATCH 15/48] drm/i915: Simplify ring handling in execbuf Ben Widawsky
2013-12-06 22:11   ` [PATCH 16/48] drm/i915: Permit contexts on all rings Ben Widawsky
2013-12-06 22:11   ` [PATCH 17/48] drm/i915: Track which ring a context ran on Ben Widawsky
2013-12-06 22:11   ` [PATCH 18/48] drm/i915: Better reset handling for contexts Ben Widawsky
2013-12-06 22:11   ` [PATCH 19/48] drm/i915: Split context enabling from init Ben Widawsky
2013-12-06 22:11   ` [PATCH 20/48] drm/i915: Generalize default context setup Ben Widawsky
2013-12-06 22:11   ` [PATCH 21/48] drm/i915: PPGTT vfuncs should take a ppgtt argument Ben Widawsky
2013-12-06 22:11   ` [PATCH 22/48] drm/i915: Use drm_mm for PPGTT PDEs Ben Widawsky
2014-03-20 11:10     ` Chris Wilson
2014-03-24 19:36       ` Ben Widawsky
2014-03-24 19:45         ` Chris Wilson
2014-03-24 20:02           ` Ben Widawsky
2014-03-25 13:41             ` Chris Wilson
2014-03-25 15:33               ` Daniel Vetter
2013-12-06 22:11   ` [PATCH 23/48] drm/i915: One hopeful eviction on PPGTT alloc Ben Widawsky
2014-03-20 11:12     ` Chris Wilson
2013-12-06 22:11   ` [PATCH 24/48] drm/i915: Use platform specific ppgtt enable Ben Widawsky
2013-12-06 22:11   ` [PATCH 25/48] drm/i915: Extract mm switching to function Ben Widawsky
2013-12-06 22:11   ` [PATCH 26/48] drm/i915: Use LRI for switching PP_DIR_BASE Ben Widawsky
2013-12-06 22:12   ` [PATCH 27/48] drm/i915: Flush TLBs after !RCS PP_DIR_BASE Ben Widawsky
2013-12-06 22:12   ` [PATCH 28/48] drm/i915: Generalize PPGTT init Ben Widawsky
2013-12-06 22:12   ` [PATCH 29/48] drm/i915: Reorganize intel_enable_ppgtt Ben Widawsky
2013-12-06 22:12   ` [PATCH 30/48] drm/i915: Add VM to context Ben Widawsky
2013-12-06 22:12   ` [PATCH 31/48] drm/i915: Write PDEs at init instead of enable Ben Widawsky
2013-12-06 22:12   ` [PATCH 32/48] drm/i915: Restore PDEs for all VMs Ben Widawsky
2013-12-06 22:12   ` [PATCH 33/48] drm/i915: Do aliasing PPGTT init with contexts Ben Widawsky
2013-12-06 22:12   ` [PATCH 34/48] drm/i915: Create a per file_priv default context Ben Widawsky
2013-12-06 22:12   ` [PATCH 35/48] drm/i915: Piggy back hangstats off of contexts Ben Widawsky
2013-12-06 22:12   ` [PATCH 36/48] drm/i915: Get context early in execbuf Ben Widawsky
2013-12-06 22:12   ` [PATCH 37/48] drm/i915: Defer request freeing Ben Widawsky
2013-12-06 22:12   ` [PATCH 38/48] drm/i915: Clean up VMAs before freeing Ben Widawsky
2013-12-06 22:12   ` [PATCH 39/48] drm/i915: Do not allow buffers at offset 0 Ben Widawsky
2013-12-06 22:12   ` [PATCH 40/48] drm/i915: Add a tracepoint for new VMs Ben Widawsky
2013-12-06 22:12   ` [PATCH 41/48] drm/i915: Use multiple VMs -- the point of no return Ben Widawsky
2013-12-06 22:12   ` [PATCH 42/48] drm/i915: Remove extraneous mm_switch in ppgtt enable Ben Widawsky
2013-12-06 22:12   ` [PATCH 43/48] drm/i915: Warn on gem_pin usage Ben Widawsky
2013-12-06 22:12   ` [PATCH 44/48] drm/i915: Add PPGTT dumper Ben Widawsky
2013-12-06 22:12   ` [PATCH 45/48] drm/i915: Dump all ppgtt Ben Widawsky
2013-12-06 22:12   ` [PATCH 46/48] drm: Optionally create mm blocks from top-to-bottom Ben Widawsky
2013-12-06 22:12   ` [PATCH 47/48] drm/i915: Use topdown allocation for PPGTT Ben Widawsky
2013-12-06 22:12   ` [PATCH 48/48] page allocator: Tmp OOM deadlock w/a from Chris Ben Widawsky

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1386367941-7131-46-git-send-email-benjamin.widawsky@intel.com \
    --to=benjamin.widawsky@intel.com \
    --cc=ben@bwidawsk.net \
    --cc=dh.herrmann@gmail.com \
    --cc=intel-gfx@lists.freedesktop.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.