All of lore.kernel.org
 help / color / mirror / Atom feed
From: Ben Widawsky <benjamin.widawsky@intel.com>
To: Intel GFX <intel-gfx@lists.freedesktop.org>
Cc: Ben Widawsky <ben@bwidawsk.net>,
	Ben Widawsky <benjamin.widawsky@intel.com>
Subject: [PATCH 56/68] drm/i915/bdw: Abstract PDP usage
Date: Thu, 21 Aug 2014 20:12:19 -0700	[thread overview]
Message-ID: <1408677155-1840-57-git-send-email-benjamin.widawsky@intel.com> (raw)
In-Reply-To: <1408677155-1840-1-git-send-email-benjamin.widawsky@intel.com>

Up until now, ppgtt->pdp has always been the root of our page tables.
Legacy 32b addresses acted like it had 1 PDP with 4 PDPEs.

In preparation for 4 level page tables, we need to stop use ppgtt->pdp
directly unless we know it's what we want. The future structure will use
ppgtt->pml4 for the top level, and the pdp is just one of the entries
being pointed to by a pml4e.

This patch addresses some carelessness done throughout development wrt
assumptions made of the root page tables.

Signed-off-by: Ben Widawsky <ben@bwidawsk.net>
---
 drivers/gpu/drm/i915/i915_gem_gtt.c | 160 ++++++++++++++++++++----------------
 1 file changed, 88 insertions(+), 72 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 8e15842..7cc6cf9 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -491,6 +491,7 @@ static void gen8_ppgtt_clear_range(struct i915_address_space *vm,
 {
 	struct i915_hw_ppgtt *ppgtt =
 		container_of(vm, struct i915_hw_ppgtt, base);
+	struct i915_pagedirpo *pdp = &ppgtt->pdp; /* FIXME: 48b */
 	gen8_gtt_pte_t *pt_vaddr, scratch_pte;
 	unsigned pdpe = gen8_pdpe_index(start);
 	unsigned pde = gen8_pde_index(start);
@@ -502,7 +503,7 @@ static void gen8_ppgtt_clear_range(struct i915_address_space *vm,
 				      I915_CACHE_LLC, use_scratch);
 
 	while (num_entries) {
-		struct i915_pagedir *pd = ppgtt->pdp.pagedirs[pdpe];
+		struct i915_pagedir *pd = pdp->pagedirs[pdpe];
 		struct i915_pagetab *pt = pd->page_tables[pde];
 		struct page *page_table = pt->page;
 
@@ -536,6 +537,7 @@ static void gen8_ppgtt_insert_entries(struct i915_address_space *vm,
 {
 	struct i915_hw_ppgtt *ppgtt =
 		container_of(vm, struct i915_hw_ppgtt, base);
+	struct i915_pagedirpo *pdp = &ppgtt->pdp; /* FIXME: 48b */
 	gen8_gtt_pte_t *pt_vaddr;
 	unsigned pdpe = gen8_pdpe_index(start);
 	unsigned pde = gen8_pde_index(start);
@@ -546,7 +548,7 @@ static void gen8_ppgtt_insert_entries(struct i915_address_space *vm,
 
 	for_each_sg_page(pages->sgl, &sg_iter, pages->nents, 0) {
 		if (pt_vaddr == NULL) {
-			struct i915_pagedir *pd = ppgtt->pdp.pagedirs[pdpe];
+			struct i915_pagedir *pd = pdp->pagedirs[pdpe];
 			struct i915_pagetab *pt = pd->page_tables[pde];
 			struct page *page_table = pt->page;
 			pt_vaddr = kmap_atomic(page_table);
@@ -604,24 +606,26 @@ static void gen8_map_pagetable_range(struct i915_pagedir *pd,
 	kunmap_atomic(pagedir);
 }
 
-static void __gen8_teardown_va_range(struct i915_address_space *vm,
-				     uint64_t start, uint64_t length,
-				     bool dead)
+static void gen8_teardown_va_range_3lvl(struct i915_address_space *vm,
+					struct i915_pagedirpo *pdp,
+					uint64_t start, uint64_t length,
+					bool dead)
 {
-	struct i915_hw_ppgtt *ppgtt =
-		        container_of(vm, struct i915_hw_ppgtt, base);
 	struct drm_device *dev = vm->dev;
 	struct i915_pagedir *pd;
 	struct i915_pagetab *pt;
 	uint64_t temp;
 	uint32_t pdpe, pde;
 
-	if (!ppgtt->pdp.pagedirs) {
+	BUG_ON(!pdp);
+	if (!pdp->pagedirs) {
+		WARN(!bitmap_empty(pdp->used_pdpes, I915_PDPES_PER_PDP(dev)),
+		     "Page directory leak detected\n");
 		/* If pagedirs are already free, there is nothing to do.*/
 		return;
 	}
 
-	gen8_for_each_pdpe(pd, &ppgtt->pdp, start, length, temp, pdpe) {
+	gen8_for_each_pdpe(pd, pdp, start, length, temp, pdpe) {
 		uint64_t pd_len = gen8_clamp_pd(start, length);
 		uint64_t pd_start = start;
 
@@ -629,19 +633,19 @@ static void __gen8_teardown_va_range(struct i915_address_space *vm,
 		 * down, and up.
 		 */
 		if (!pd) {
-			WARN(test_bit(pdpe, ppgtt->pdp.used_pdpes),
+			WARN(test_bit(pdpe, pdp->used_pdpes),
 			     "PDPE %d is not allocated, but is reserved (%p)\n",
 			     pdpe, vm);
 			continue;
 		} else {
 			if (dead && pd->zombie) {
-				WARN_ON(test_bit(pdpe, ppgtt->pdp.used_pdpes));
+				WARN_ON(test_bit(pdpe, pdp->used_pdpes));
 				free_pd_single(pd, vm->dev);
-				ppgtt->pdp.pagedirs[pdpe] = NULL;
+				pdp->pagedirs[pdpe] = NULL;
 				continue;
 			}
 
-			WARN(!test_bit(pdpe, ppgtt->pdp.used_pdpes),
+			WARN(!test_bit(pdpe, pdp->used_pdpes),
 			     "PDPE %d not reserved, but is allocated (%p)",
 			     pdpe, vm);
 		}
@@ -683,7 +687,7 @@ static void __gen8_teardown_va_range(struct i915_address_space *vm,
 		gen8_ppgtt_clear_range(vm, pd_start, pd_len, true);
 
 		if (bitmap_empty(pd->used_pdes, I915_PDES_PER_PD)) {
-			WARN_ON(!test_and_clear_bit(pdpe, ppgtt->pdp.used_pdpes));
+			WARN_ON(!test_and_clear_bit(pdpe, pdp->used_pdpes));
 			if (!dead) {
 				/* We've unmapped a possibly live context. Make
 				 * note of it so we can clean it up later. */
@@ -691,20 +695,32 @@ static void __gen8_teardown_va_range(struct i915_address_space *vm,
 				continue;
 			}
 			free_pd_single(pd, dev);
-			ppgtt->pdp.pagedirs[pdpe] = NULL;
+			pdp->pagedirs[pdpe] = NULL;
 		}
 	}
 
-	if (bitmap_empty(ppgtt->pdp.used_pdpes, I915_PDPES_PER_PDP(dev))) {
-		/* TODO: When pagetables are fully dynamic:
-		free_pdp_single(&ppgtt->pdp, dev); */
-	}
+	if (dead && bitmap_empty(pdp->used_pdpes, I915_PDPES_PER_PDP(dev)))
+		free_pdp_single(pdp, dev);
+}
+
+static void gen8_teardown_va_range_4lvl(struct i915_address_space *vm,
+					struct i915_pml4 *pml4,
+					uint64_t start, uint64_t length,
+					bool dead)
+{
+	BUG();
 }
 
 static void gen8_teardown_va_range(struct i915_address_space *vm,
 				   uint64_t start, uint64_t length)
 {
-	__gen8_teardown_va_range(vm, start, length, false);
+	struct i915_hw_ppgtt *ppgtt =
+		container_of(vm, struct i915_hw_ppgtt, base);
+
+	if (!HAS_48B_PPGTT(vm->dev))
+		gen8_teardown_va_range_3lvl(vm, &ppgtt->pdp, start, length, false);
+	else
+		gen8_teardown_va_range_4lvl(vm, &ppgtt->pml4, start, length, false);
 }
 
 static void gen8_ppgtt_free(struct i915_hw_ppgtt *ppgtt)
@@ -712,12 +728,10 @@ static void gen8_ppgtt_free(struct i915_hw_ppgtt *ppgtt)
 	trace_i915_va_teardown(&ppgtt->base,
 			       ppgtt->base.start, ppgtt->base.total,
 			       VM_TO_TRACE_NAME(&ppgtt->base));
-	__gen8_teardown_va_range(&ppgtt->base,
-				 ppgtt->base.start, ppgtt->base.total,
-				 true);
-	WARN_ON(!bitmap_empty(ppgtt->pdp.used_pdpes,
-			      I915_PDPES_PER_PDP(ppgtt->base.dev)));
-	free_pdp_single(&ppgtt->pdp, ppgtt->base.dev);
+	gen8_teardown_va_range_3lvl(&ppgtt->base, &ppgtt->pdp,
+				    ppgtt->base.start, ppgtt->base.total,
+				    true);
+	BUG_ON(ppgtt->pdp.pagedirs); /* FIXME: 48b */
 }
 
 static void gen8_ppgtt_cleanup(struct i915_address_space *vm)
@@ -733,7 +747,7 @@ static void gen8_ppgtt_cleanup(struct i915_address_space *vm)
 
 /**
  * gen8_ppgtt_alloc_pagetabs() - Allocate page tables for VA range.
- * @ppgtt:	Master ppgtt structure.
+ * @vm:		Master vm structure.
  * @pd:		Page directory for this address range.
  * @start:	Starting virtual address to begin allocations.
  * @length	Size of the allocations.
@@ -749,12 +763,13 @@ static void gen8_ppgtt_cleanup(struct i915_address_space *vm)
  *
  * Return: 0 if success; negative error code otherwise.
  */
-static int gen8_ppgtt_alloc_pagetabs(struct i915_hw_ppgtt *ppgtt,
+static int gen8_ppgtt_alloc_pagetabs(struct i915_address_space *vm,
 				     struct i915_pagedir *pd,
 				     uint64_t start,
 				     uint64_t length,
 				     unsigned long *new_pts)
 {
+	struct drm_device *dev = vm->dev;
 	struct i915_pagetab *pt;
 	uint64_t temp;
 	uint32_t pde;
@@ -772,7 +787,7 @@ static int gen8_ppgtt_alloc_pagetabs(struct i915_hw_ppgtt *ppgtt,
 			continue;
 		}
 
-		pt = alloc_pt_single(ppgtt->base.dev);
+		pt = alloc_pt_single(dev);
 		if (IS_ERR(pt))
 			goto unwind_out;
 
@@ -784,14 +799,14 @@ static int gen8_ppgtt_alloc_pagetabs(struct i915_hw_ppgtt *ppgtt,
 
 unwind_out:
 	for_each_set_bit(pde, new_pts, I915_PDES_PER_PD)
-		free_pt_single(pd->page_tables[pde], ppgtt->base.dev);
+		free_pt_single(pd->page_tables[pde], dev);
 
 	return -ENOMEM;
 }
 
 /**
  * gen8_ppgtt_alloc_pagedirs() - Allocate page directories for VA range.
- * @ppgtt:	Master ppgtt structure.
+ * @vm:		Master vm structure.
  * @pdp:	Page directory pointer for this address range.
  * @start:	Starting virtual address to begin allocations.
  * @length	Size of the allocations.
@@ -812,17 +827,17 @@ unwind_out:
  *
  * Return: 0 if success; negative error code otherwise.
  */
-static int gen8_ppgtt_alloc_pagedirs(struct i915_hw_ppgtt *ppgtt,
+static int gen8_ppgtt_alloc_pagedirs(struct i915_address_space *vm,
 				     struct i915_pagedirpo *pdp,
 				     uint64_t start,
 				     uint64_t length,
 				     unsigned long *new_pds)
 {
-	struct drm_device *dev = ppgtt->base.dev;
+	struct drm_device *dev = vm->dev;
 	struct i915_pagedir *pd;
 	uint64_t temp;
 	uint32_t pdpe;
-	size_t pdpes =  I915_PDPES_PER_PDP(ppgtt->base.dev);
+	size_t pdpes =  I915_PDPES_PER_PDP(vm->dev);
 
 	BUG_ON(!bitmap_empty(new_pds, pdpes));
 
@@ -833,7 +848,7 @@ static int gen8_ppgtt_alloc_pagedirs(struct i915_hw_ppgtt *ppgtt,
 		if (pd)
 			continue;
 
-		pd = alloc_pd_single(ppgtt->base.dev);
+		pd = alloc_pd_single(dev);
 		if (IS_ERR(pd))
 			goto unwind_out;
 
@@ -845,7 +860,7 @@ static int gen8_ppgtt_alloc_pagedirs(struct i915_hw_ppgtt *ppgtt,
 
 unwind_out:
 	for_each_set_bit(pdpe, new_pds, pdpes)
-		free_pd_single(pdp->pagedirs[pdpe], ppgtt->base.dev);
+		free_pd_single(pdp->pagedirs[pdpe], dev);
 
 	return -ENOMEM;
 }
@@ -899,12 +914,11 @@ err_out:
 	return -ENOMEM;
 }
 
-static int gen8_alloc_va_range(struct i915_address_space *vm,
-			       uint64_t start,
-			       uint64_t length)
+static int gen8_alloc_va_range_3lvl(struct i915_address_space *vm,
+				    struct i915_pagedirpo *pdp,
+				    uint64_t start,
+				    uint64_t length)
 {
-	struct i915_hw_ppgtt *ppgtt =
-		container_of(vm, struct i915_hw_ppgtt, base);
 	unsigned long *new_page_dirs, **new_page_tables;
 	struct drm_device *dev = vm->dev;
 	struct i915_pagedir *pd;
@@ -934,18 +948,15 @@ static int gen8_alloc_va_range(struct i915_address_space *vm,
 		return ret;
 
 	/* Do the allocations first so we can easily bail out */
-	ret = gen8_ppgtt_alloc_pagedirs(ppgtt, &ppgtt->pdp, start, length,
-					new_page_dirs);
+	ret = gen8_ppgtt_alloc_pagedirs(vm, pdp, start, length, new_page_dirs);
 	if (ret) {
 		free_gen8_temp_bitmaps(new_page_dirs, new_page_tables, pdpes);
 		return ret;
 	}
 
-	/* For every page directory referenced, allocate page tables */
-	gen8_for_each_pdpe(pd, &ppgtt->pdp, start, length, temp, pdpe) {
+	gen8_for_each_pdpe(pd, pdp, start, length, temp, pdpe) {
 		bitmap_zero(new_page_tables[pdpe], I915_PDES_PER_PD);
-		ret = gen8_ppgtt_alloc_pagetabs(ppgtt, pd, start, length,
-						new_page_tables[pdpe]);
+		ret = gen8_ppgtt_alloc_pagetabs(vm, pd, start, length, new_page_tables[pdpe]);
 		if (ret)
 			goto err_out;
 	}
@@ -953,10 +964,7 @@ static int gen8_alloc_va_range(struct i915_address_space *vm,
 	start = orig_start;
 	length = orig_length;
 
-	/* Allocations have completed successfully, so set the bitmaps, and do
-	 * the mappings. */
-	gen8_for_each_pdpe(pd, &ppgtt->pdp, start, length, temp, pdpe) {
-		gen8_ppgtt_pde_t *const pagedir = kmap_atomic(pd->page);
+	gen8_for_each_pdpe(pd, pdp, start, length, temp, pdpe) {
 		struct i915_pagetab *pt;
 		uint64_t pd_len = gen8_clamp_pd(start, length);
 		uint64_t pd_start = start;
@@ -978,25 +986,12 @@ static int gen8_alloc_va_range(struct i915_address_space *vm,
 
 			/* Our pde is now pointing to the pagetable, pt */
 			set_bit(pde, pd->used_pdes);
-
-			/* Map the PDE to the page table */
-			__gen8_do_map_pt(pagedir + pde, pt, vm->dev);
-
-			/* NB: We haven't yet mapped ptes to pages. At this
-			 * point we're still relying on insert_entries() */
-
-			/* No longer possible this page table is a zombie */
 			pt->zombie = 0;
 		}
 
-		if (!HAS_LLC(vm->dev))
-			drm_clflush_virt_range(pagedir, PAGE_SIZE);
-
-		kunmap_atomic(pagedir);
-
-		set_bit(pdpe, ppgtt->pdp.used_pdpes);
-		/* This pd is officially not a zombie either */
-		ppgtt->pdp.pagedirs[pdpe]->zombie = 0;
+		set_bit(pdpe, pdp->used_pdpes);
+		gen8_map_pagetable_range(pd, start, length, dev);
+		pd->zombie = 0;
 	}
 
 	free_gen8_temp_bitmaps(new_page_dirs, new_page_tables, pdpes);
@@ -1005,16 +1000,36 @@ static int gen8_alloc_va_range(struct i915_address_space *vm,
 err_out:
 	while (pdpe--) {
 		for_each_set_bit(temp, new_page_tables[pdpe], I915_PDES_PER_PD)
-			free_pt_single(pd->page_tables[temp], vm->dev);
+			free_pt_single(pd->page_tables[temp], dev);
 	}
 
 	for_each_set_bit(pdpe, new_page_dirs, pdpes)
-		free_pd_single(ppgtt->pdp.pagedirs[pdpe], vm->dev);
+		free_pd_single(pdp->pagedirs[pdpe], dev);
 
 	free_gen8_temp_bitmaps(new_page_dirs, new_page_tables, pdpes);
 	return ret;
 }
 
+static int __noreturn gen8_alloc_va_range_4lvl(struct i915_address_space *vm,
+					       struct i915_pml4 *pml4,
+					       uint64_t start,
+					       uint64_t length)
+{
+	BUG();
+}
+
+static int gen8_alloc_va_range(struct i915_address_space *vm,
+			       uint64_t start, uint64_t length)
+{
+	struct i915_hw_ppgtt *ppgtt =
+		container_of(vm, struct i915_hw_ppgtt, base);
+
+	if (!HAS_48B_PPGTT(vm->dev))
+		return gen8_alloc_va_range_3lvl(vm, &ppgtt->pdp, start, length);
+	else
+		return gen8_alloc_va_range_4lvl(vm, &ppgtt->pml4, start, length);
+}
+
 static void gen8_ppgtt_fini_common(struct i915_hw_ppgtt *ppgtt)
 {
 	free_pt_scratch(ppgtt->scratch_pd, ppgtt->base.dev);
@@ -1060,12 +1075,13 @@ static int gen8_aliasing_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
 {
 	struct drm_device *dev = ppgtt->base.dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct i915_pagedirpo *pdp = &ppgtt->pdp; /* FIXME: 48b */
 	struct i915_pagedir *pd;
 	uint64_t temp, start = 0, size = dev_priv->gtt.base.total;
 	uint32_t pdpe;
 	int ret;
 
-	ret = gen8_ppgtt_init_common(ppgtt, dev_priv->gtt.base.total);
+	ret = gen8_ppgtt_init_common(ppgtt, size);
 	if (ret)
 		return ret;
 
@@ -1078,8 +1094,8 @@ static int gen8_aliasing_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
 		return ret;
 	}
 
-	gen8_for_each_pdpe(pd, &ppgtt->pdp, start, size, temp, pdpe)
-		gen8_map_pagetable_range(pd, start, size, ppgtt->base.dev);
+	gen8_for_each_pdpe(pd, pdp, start, size, temp, pdpe)
+		gen8_map_pagetable_range(pd, start, size, dev);
 
 	ppgtt->base.allocate_va_range = NULL;
 	ppgtt->base.teardown_va_range = NULL;
-- 
2.0.4

  parent reply	other threads:[~2014-08-22  3:13 UTC|newest]

Thread overview: 85+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2014-08-22  3:11 [PATCH 00/68] Broadwell 48b addressing and prelocations (no relocs) Ben Widawsky
2014-08-22  3:11 ` [PATCH 01/68] drm/i915: Split up do_switch Ben Widawsky
2014-08-22  3:11 ` [PATCH 02/68] drm/i915: Extract l3 remapping out of ctx switch Ben Widawsky
2014-08-22  3:11 ` [PATCH 03/68] drm/i915/ppgtt: Load address space after mi_set_context Ben Widawsky
2014-08-22  3:11 ` [PATCH 04/68] drm/i915: Fix another another use-after-free in do_switch Ben Widawsky
2014-08-22  3:11 ` [PATCH 05/68] drm/i915/ctx: Return earlier on failure Ben Widawsky
2014-08-22  3:11 ` [PATCH 06/68] drm/i915/error: vma error capture prettyify Ben Widawsky
2014-08-22  3:11 ` [PATCH 07/68] drm/i915/error: Do a better job of disambiguating VMAs Ben Widawsky
2014-08-22  3:11 ` [PATCH 08/68] drm/i915/error: Capture vmas instead of BOs Ben Widawsky
2014-08-22  3:11 ` [PATCH 09/68] drm/i915: Add some extra guards in evict_vm Ben Widawsky
2014-08-22  3:11 ` [PATCH 10/68] drm/i915: Make an uninterruptible evict Ben Widawsky
2014-08-22  3:11 ` [PATCH 11/68] drm/i915: More correct (slower) ppgtt cleanup Ben Widawsky
2014-08-22  3:11 ` [PATCH 12/68] drm/i915: Defer PPGTT cleanup Ben Widawsky
2014-08-22  3:11 ` [PATCH 13/68] drm/i915/bdw: Enable full PPGTT Ben Widawsky
2014-08-22  3:11 ` [PATCH 14/68] drm/i915: Get the error state over the wire (HACKish) Ben Widawsky
2014-08-22  3:11 ` [PATCH 15/68] drm/i915/gen8: Invalidate TLBs before PDP reload Ben Widawsky
2014-08-22  3:11 ` [PATCH 16/68] drm/i915: Remove false assertion in ppgtt_release Ben Widawsky
2014-08-22  3:11 ` [PATCH 17/68] Revert "drm/i915/bdw: Use timeout mode for RC6 on bdw" Ben Widawsky
2014-10-31 19:45   ` Rodrigo Vivi
2014-10-31 21:10     ` Rodrigo Vivi
2014-08-22  3:11 ` [PATCH 18/68] drm/i915/trace: Fix offsets for 64b Ben Widawsky
2014-08-22  3:11 ` [PATCH 19/68] drm/i915: Wrap VMA binding Ben Widawsky
2014-08-22  3:11 ` [PATCH 20/68] drm/i915: Make pin global flags explicit Ben Widawsky
2014-08-22  3:11 ` [PATCH 21/68] drm/i915: Split out aliasing binds Ben Widawsky
2014-08-22  3:11 ` [PATCH 22/68] drm/i915: fix gtt_total_entries() Ben Widawsky
2014-08-22  3:11 ` [PATCH 23/68] drm/i915: Rename to GEN8_LEGACY_PDPES Ben Widawsky
2014-08-22  3:11 ` [PATCH 24/68] drm/i915: Split out verbose PPGTT dumping Ben Widawsky
2014-08-22  3:11 ` [PATCH 25/68] drm/i915: s/pd/pdpe, s/pt/pde Ben Widawsky
2014-08-22  3:11 ` [PATCH 26/68] drm/i915: rename map/unmap to dma_map/unmap Ben Widawsky
2014-08-22  3:11 ` [PATCH 27/68] drm/i915: Setup less PPGTT on failed pagedir Ben Widawsky
2014-08-22  3:11 ` [PATCH 28/68] drm/i915: clean up PPGTT init error path Ben Widawsky
2014-08-22  3:11 ` [PATCH 29/68] drm/i915: Un-hardcode number of page directories Ben Widawsky
2014-08-22  3:11 ` [PATCH 30/68] drm/i915: Make gen6_write_pdes gen6_map_page_tables Ben Widawsky
2014-08-22  3:11 ` [PATCH 31/68] drm/i915: Range clearing is PPGTT agnostic Ben Widawsky
2014-08-22  3:11 ` [PATCH 32/68] drm/i915: Page table helpers, and define renames Ben Widawsky
2014-08-22  3:11 ` [PATCH 33/68] drm/i915: construct page table abstractions Ben Widawsky
2014-08-22  3:11 ` [PATCH 34/68] drm/i915: Complete page table structures Ben Widawsky
2014-08-22  3:11 ` [PATCH 35/68] drm/i915: Create page table allocators Ben Widawsky
2014-08-22  3:11 ` [PATCH 36/68] drm/i915: Generalize GEN6 mapping Ben Widawsky
2014-08-22  3:12 ` [PATCH 37/68] drm/i915: Clean up pagetable DMA map & unmap Ben Widawsky
2014-08-22  3:12 ` [PATCH 38/68] drm/i915: Always dma map page table allocations Ben Widawsky
2014-08-22  3:12 ` [PATCH 39/68] drm/i915: Consolidate dma mappings Ben Widawsky
2014-08-22  3:12 ` [PATCH 40/68] drm/i915: Always dma map page directory allocations Ben Widawsky
2014-08-22  3:12 ` [PATCH 41/68] drm/i915: Track GEN6 page table usage Ben Widawsky
2014-08-22  3:12 ` [PATCH 42/68] drm/i915: Extract context switch skip logic Ben Widawsky
2014-08-22  3:12 ` [PATCH 43/68] drm/i915: Track page table reload need Ben Widawsky
2014-08-22  3:12 ` [PATCH 44/68] drm/i915: Initialize all contexts Ben Widawsky
2014-08-22  3:12 ` [PATCH 45/68] drm/i915: Finish gen6/7 dynamic page table allocation Ben Widawsky
2014-08-22  3:12 ` [PATCH 46/68] drm/i915/bdw: Use dynamic allocation idioms on free Ben Widawsky
2014-08-22  3:12 ` [PATCH 47/68] drm/i915/bdw: pagedirs rework allocation Ben Widawsky
2014-08-22  3:12 ` [PATCH 48/68] drm/i915/bdw: pagetable allocation rework Ben Widawsky
2014-08-22  3:12 ` [PATCH 49/68] drm/i915/bdw: Make the pdp switch a bit less hacky Ben Widawsky
2014-08-22  3:12 ` [PATCH 50/68] drm/i915: num_pd_pages/num_pd_entries isn't useful Ben Widawsky
2014-08-22  3:12 ` [PATCH 51/68] drm/i915: Extract PPGTT param from pagedir alloc Ben Widawsky
2014-08-22  3:12 ` [PATCH 52/68] drm/i915/bdw: Split out mappings Ben Widawsky
2014-08-22  3:12 ` [PATCH 53/68] drm/i915/bdw: begin bitmap tracking Ben Widawsky
2014-08-22  3:12 ` [PATCH 54/68] drm/i915/bdw: Dynamic page table allocations Ben Widawsky
2014-08-22  3:12 ` [PATCH 55/68] drm/i915/bdw: Make pdp allocation more dynamic Ben Widawsky
2014-08-22  3:12 ` Ben Widawsky [this message]
2014-08-22  3:12 ` [PATCH 57/68] drm/i915/bdw: Add dynamic page trace events Ben Widawsky
2014-08-22  3:12 ` [PATCH 58/68] drm/i915/bdw: Add ppgtt info for dynamic pages Ben Widawsky
2014-08-22  3:12 ` [PATCH 59/68] drm/i915/bdw: implement alloc/teardown for 4lvl Ben Widawsky
2014-08-22  3:12 ` [PATCH 60/68] drm/i915/bdw: Add 4 level switching infrastructure Ben Widawsky
2014-08-22  3:12 ` [PATCH 61/68] drm/i915/bdw: Generalize PTE writing for GEN8 PPGTT Ben Widawsky
2014-08-22  3:12 ` [PATCH 62/68] drm/i915: Plumb sg_iter through va allocation ->maps Ben Widawsky
2014-08-22  3:12 ` [PATCH 63/68] drm/i915: Introduce map and unmap for VMAs Ben Widawsky
2014-08-22  3:12 ` [PATCH 64/68] drm/i915: Depend exclusively on map and unmap_vma Ben Widawsky
2014-08-22  3:12 ` [PATCH 65/68] drm/i915: Expand error state's address width to 64b Ben Widawsky
2014-08-22  3:12 ` [PATCH 66/68] drm/i915/bdw: Flip the 48b switch Ben Widawsky
2014-08-22  3:12 ` [PATCH 67/68] drm/i915: Provide a soft_pin hook Ben Widawsky
2014-08-22  3:12 ` [PATCH 68/68] XXX: drm/i915: Unexplained workarounds Ben Widawsky
2014-08-22  3:12 ` [PATCH 1/2] intel: Split out bo allocation Ben Widawsky
2014-08-22  3:12 ` [PATCH 2/2] intel: Add prelocation support Ben Widawsky
2014-08-22  3:12 ` [PATCH] i965: First step toward prelocation Ben Widawsky
2014-08-22 12:15   ` [Mesa-dev] " Alex Deucher
2014-08-22 17:14     ` Ben Widawsky
2014-08-22  3:12 ` [PATCH] no_reloc: test case Ben Widawsky
2014-08-22  6:30 ` [Intel-gfx] [PATCH 00/68] Broadwell 48b addressing and prelocations (no relocs) Chris Wilson
2014-08-22  6:59   ` Kenneth Graunke
2014-08-22  7:03     ` Chris Wilson
2014-08-22 13:30       ` Daniel Vetter
2014-08-22 13:38         ` [Intel-gfx] " Chris Wilson
2014-08-22 20:29           ` Daniel Vetter
2014-08-22 20:38           ` [Intel-gfx] " Daniel Vetter
2014-08-25 22:42             ` Jesse Barnes

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1408677155-1840-57-git-send-email-benjamin.widawsky@intel.com \
    --to=benjamin.widawsky@intel.com \
    --cc=ben@bwidawsk.net \
    --cc=intel-gfx@lists.freedesktop.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.