All of lore.kernel.org
 help / color / mirror / Atom feed
From: Michel Thierry <michel.thierry@intel.com>
To: intel-gfx@lists.freedesktop.org
Subject: [PATCH 02/12] drm/i915/bdw: Abstract PDP usage
Date: Fri, 20 Feb 2015 17:45:56 +0000	[thread overview]
Message-ID: <1424454366-19006-3-git-send-email-michel.thierry@intel.com> (raw)
In-Reply-To: <1424454366-19006-1-git-send-email-michel.thierry@intel.com>

From: Ben Widawsky <benjamin.widawsky@intel.com>

Up until now, ppgtt->pdp has always been the root of our page tables.
Legacy 32b addresses acted like it had 1 PDP with 4 PDPEs.

In preparation for 4 level page tables, we need to stop use ppgtt->pdp
directly unless we know it's what we want. The future structure will use
ppgtt->pml4 for the top level, and the pdp is just one of the entries
being pointed to by a pml4e.

v2: Updated after dynamic page allocation changes.

Signed-off-by: Ben Widawsky <ben@bwidawsk.net>
Signed-off-by: Michel Thierry <michel.thierry@intel.com> (v2)
---
 drivers/gpu/drm/i915/i915_gem_gtt.c | 123 ++++++++++++++++++++----------------
 1 file changed, 70 insertions(+), 53 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 489f8db..d3ad517 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -560,6 +560,7 @@ static void gen8_ppgtt_clear_range(struct i915_address_space *vm,
 {
 	struct i915_hw_ppgtt *ppgtt =
 		container_of(vm, struct i915_hw_ppgtt, base);
+	struct i915_page_directory_pointer_entry *pdp = &ppgtt->pdp; /* FIXME: 48b */
 	gen8_gtt_pte_t *pt_vaddr, scratch_pte;
 	unsigned pdpe = start >> GEN8_PDPE_SHIFT & GEN8_PDPE_MASK;
 	unsigned pde = start >> GEN8_PDE_SHIFT & GEN8_PDE_MASK;
@@ -575,10 +576,10 @@ static void gen8_ppgtt_clear_range(struct i915_address_space *vm,
 		struct i915_page_table_entry *pt;
 		struct page *page_table;
 
-		if (WARN_ON(!ppgtt->pdp.page_directory[pdpe]))
+		if (WARN_ON(!pdp->page_directory[pdpe]))
 			continue;
 
-		pd = ppgtt->pdp.page_directory[pdpe];
+		pd = pdp->page_directory[pdpe];
 
 		if (WARN_ON(!pd->page_tables[pde]))
 			continue;
@@ -620,6 +621,7 @@ static void gen8_ppgtt_insert_entries(struct i915_address_space *vm,
 {
 	struct i915_hw_ppgtt *ppgtt =
 		container_of(vm, struct i915_hw_ppgtt, base);
+	struct i915_page_directory_pointer_entry *pdp = &ppgtt->pdp; /* FIXME: 48b */
 	gen8_gtt_pte_t *pt_vaddr;
 	unsigned pdpe = start >> GEN8_PDPE_SHIFT & GEN8_PDPE_MASK;
 	unsigned pde = start >> GEN8_PDE_SHIFT & GEN8_PDE_MASK;
@@ -630,7 +632,7 @@ static void gen8_ppgtt_insert_entries(struct i915_address_space *vm,
 
 	for_each_sg_page(pages->sgl, &sg_iter, pages->nents, 0) {
 		if (pt_vaddr == NULL) {
-			struct i915_page_directory_entry *pd = ppgtt->pdp.page_directory[pdpe];
+			struct i915_page_directory_entry *pd = pdp->page_directory[pdpe];
 			struct i915_page_table_entry *pt = pd->page_tables[pde];
 			struct page *page_table = pt->page;
 
@@ -708,16 +710,17 @@ static void gen8_free_page_tables(struct i915_page_directory_entry *pd, struct d
 static void gen8_ppgtt_unmap_pages(struct i915_hw_ppgtt *ppgtt)
 {
 	struct pci_dev *hwdev = ppgtt->base.dev->pdev;
+	struct i915_page_directory_pointer_entry *pdp = &ppgtt->pdp; /* FIXME: 48b */
 	int i, j;
 
-	for_each_set_bit(i, ppgtt->pdp.used_pdpes,
+	for_each_set_bit(i, pdp->used_pdpes,
 			I915_PDPES_PER_PDP(ppgtt->base.dev)) {
 		struct i915_page_directory_entry *pd;
 
-		if (WARN_ON(!ppgtt->pdp.page_directory[i]))
+		if (WARN_ON(!pdp->page_directory[i]))
 			continue;
 
-		pd = ppgtt->pdp.page_directory[i];
+		pd = pdp->page_directory[i];
 		if (!pd->daddr)
 			pci_unmap_page(hwdev, pd->daddr, PAGE_SIZE,
 					PCI_DMA_BIDIRECTIONAL);
@@ -743,15 +746,21 @@ static void gen8_ppgtt_free(struct i915_hw_ppgtt *ppgtt)
 {
 	int i;
 
-	for_each_set_bit(i, ppgtt->pdp.used_pdpes,
-				I915_PDPES_PER_PDP(ppgtt->base.dev)) {
-		if (WARN_ON(!ppgtt->pdp.page_directory[i]))
-			continue;
+	if (!USES_FULL_48BIT_PPGTT(ppgtt->base.dev)) {
+		for_each_set_bit(i, ppgtt->pdp.used_pdpes,
+				 I915_PDPES_PER_PDP(ppgtt->base.dev)) {
+			if (WARN_ON(!ppgtt->pdp.page_directory[i]))
+				continue;
 
-		gen8_free_page_tables(ppgtt->pdp.page_directory[i], ppgtt->base.dev);
-		unmap_and_free_pd(ppgtt->pdp.page_directory[i], ppgtt->base.dev);
+			gen8_free_page_tables(ppgtt->pdp.page_directory[i],
+					      ppgtt->base.dev);
+			unmap_and_free_pd(ppgtt->pdp.page_directory[i],
+					  ppgtt->base.dev);
+		}
+		unmap_and_free_pdp(&ppgtt->pdp, ppgtt->base.dev);
+	} else {
+		BUG(); /* to be implemented later */
 	}
-	unmap_and_free_pdp(&ppgtt->pdp, ppgtt->base.dev);
 }
 
 static void gen8_ppgtt_cleanup(struct i915_address_space *vm)
@@ -765,7 +774,7 @@ static void gen8_ppgtt_cleanup(struct i915_address_space *vm)
 
 /**
  * gen8_ppgtt_alloc_pagetabs() - Allocate page tables for VA range.
- * @ppgtt:	Master ppgtt structure.
+ * @vm:		Master vm structure.
  * @pd:		Page directory for this address range.
  * @start:	Starting virtual address to begin allocations.
  * @length	Size of the allocations.
@@ -781,12 +790,13 @@ static void gen8_ppgtt_cleanup(struct i915_address_space *vm)
  *
  * Return: 0 if success; negative error code otherwise.
  */
-static int gen8_ppgtt_alloc_pagetabs(struct i915_hw_ppgtt *ppgtt,
+static int gen8_ppgtt_alloc_pagetabs(struct i915_address_space *vm,
 				     struct i915_page_directory_entry *pd,
 				     uint64_t start,
 				     uint64_t length,
 				     unsigned long *new_pts)
 {
+	struct drm_device *dev = vm->dev;
 	struct i915_page_table_entry *pt;
 	uint64_t temp;
 	uint32_t pde;
@@ -799,7 +809,7 @@ static int gen8_ppgtt_alloc_pagetabs(struct i915_hw_ppgtt *ppgtt,
 			continue;
 		}
 
-		pt = alloc_pt_single(ppgtt->base.dev);
+		pt = alloc_pt_single(dev);
 		if (IS_ERR(pt))
 			goto unwind_out;
 
@@ -811,14 +821,14 @@ static int gen8_ppgtt_alloc_pagetabs(struct i915_hw_ppgtt *ppgtt,
 
 unwind_out:
 	for_each_set_bit(pde, new_pts, GEN8_PDES_PER_PAGE)
-		unmap_and_free_pt(pd->page_tables[pde], ppgtt->base.dev);
+		unmap_and_free_pt(pd->page_tables[pde], dev);
 
 	return -ENOMEM;
 }
 
 /**
  * gen8_ppgtt_alloc_page_directories() - Allocate page directories for VA range.
- * @ppgtt:	Master ppgtt structure.
+ * @vm:		Master vm structure.
  * @pdp:	Page directory pointer for this address range.
  * @start:	Starting virtual address to begin allocations.
  * @length	Size of the allocations.
@@ -839,16 +849,17 @@ unwind_out:
  *
  * Return: 0 if success; negative error code otherwise.
  */
-static int gen8_ppgtt_alloc_page_directories(struct i915_hw_ppgtt *ppgtt,
+static int gen8_ppgtt_alloc_page_directories(struct i915_address_space *vm,
 				     struct i915_page_directory_pointer_entry *pdp,
 				     uint64_t start,
 				     uint64_t length,
 				     unsigned long *new_pds)
 {
+	struct drm_device *dev = vm->dev;
 	struct i915_page_directory_entry *pd;
 	uint64_t temp;
 	uint32_t pdpe;
-	size_t pdpes =  I915_PDPES_PER_PDP(ppgtt->base.dev);
+	size_t pdpes =  I915_PDPES_PER_PDP(vm->dev);
 
 	BUG_ON(!bitmap_empty(new_pds, pdpes));
 
@@ -859,7 +870,7 @@ static int gen8_ppgtt_alloc_page_directories(struct i915_hw_ppgtt *ppgtt,
 		if (pd)
 			continue;
 
-		pd = alloc_pd_single(ppgtt->base.dev);
+		pd = alloc_pd_single(dev);
 		if (IS_ERR(pd))
 			goto unwind_out;
 
@@ -871,7 +882,7 @@ static int gen8_ppgtt_alloc_page_directories(struct i915_hw_ppgtt *ppgtt,
 
 unwind_out:
 	for_each_set_bit(pdpe, new_pds, pdpes)
-		unmap_and_free_pd(pdp->page_directory[pdpe], ppgtt->base.dev);
+		unmap_and_free_pd(pdp->page_directory[pdpe], dev);
 
 	return -ENOMEM;
 }
@@ -926,13 +937,13 @@ err_out:
 	return -ENOMEM;
 }
 
-static int gen8_alloc_va_range(struct i915_address_space *vm,
-			       uint64_t start,
-			       uint64_t length)
+static int gen8_alloc_va_range_3lvl(struct i915_address_space *vm,
+				    struct i915_page_directory_pointer_entry *pdp,
+				    uint64_t start,
+				    uint64_t length)
 {
-	struct i915_hw_ppgtt *ppgtt =
-		container_of(vm, struct i915_hw_ppgtt, base);
 	unsigned long *new_page_dirs, **new_page_tables;
+	struct drm_device *dev = vm->dev;
 	struct i915_page_directory_entry *pd;
 	const uint64_t orig_start = start;
 	const uint64_t orig_length = length;
@@ -961,17 +972,15 @@ static int gen8_alloc_va_range(struct i915_address_space *vm,
 		return ret;
 
 	/* Do the allocations first so we can easily bail out */
-	ret = gen8_ppgtt_alloc_page_directories(ppgtt, &ppgtt->pdp, start, length,
-					new_page_dirs);
+	ret = gen8_ppgtt_alloc_page_directories(vm, pdp, start, length, new_page_dirs);
 	if (ret) {
 		free_gen8_temp_bitmaps(new_page_dirs, new_page_tables, pdpes);
 		return ret;
 	}
 
-	/* For every page directory referenced, allocate page tables */
-	gen8_for_each_pdpe(pd, &ppgtt->pdp, start, length, temp, pdpe) {
+	gen8_for_each_pdpe(pd, pdp, start, length, temp, pdpe) {
 		bitmap_zero(new_page_tables[pdpe], GEN8_PDES_PER_PAGE);
-		ret = gen8_ppgtt_alloc_pagetabs(ppgtt, pd, start, length,
+		ret = gen8_ppgtt_alloc_pagetabs(vm, pd, start, length,
 						new_page_tables[pdpe]);
 		if (ret)
 			goto err_out;
@@ -980,10 +989,7 @@ static int gen8_alloc_va_range(struct i915_address_space *vm,
 	start = orig_start;
 	length = orig_length;
 
-	/* Allocations have completed successfully, so set the bitmaps, and do
-	 * the mappings. */
-	gen8_for_each_pdpe(pd, &ppgtt->pdp, start, length, temp, pdpe) {
-		gen8_ppgtt_pde_t *const page_directory = kmap_atomic(pd->page);
+	gen8_for_each_pdpe(pd, pdp, start, length, temp, pdpe) {
 		struct i915_page_table_entry *pt;
 		uint64_t pd_len = gen8_clamp_pd(start, length);
 		uint64_t pd_start = start;
@@ -1005,20 +1011,10 @@ static int gen8_alloc_va_range(struct i915_address_space *vm,
 
 			/* Our pde is now pointing to the pagetable, pt */
 			set_bit(pde, pd->used_pdes);
-
-			/* Map the PDE to the page table */
-			__gen8_do_map_pt(page_directory + pde, pt, vm->dev);
-
-			/* NB: We haven't yet mapped ptes to pages. At this
-			 * point we're still relying on insert_entries() */
 		}
 
-		if (!HAS_LLC(vm->dev))
-			drm_clflush_virt_range(page_directory, PAGE_SIZE);
-
-		kunmap_atomic(page_directory);
-
-		set_bit(pdpe, ppgtt->pdp.used_pdpes);
+		set_bit(pdpe, pdp->used_pdpes);
+		gen8_map_pagetable_range(pd, start, length, dev);
 	}
 
 	free_gen8_temp_bitmaps(new_page_dirs, new_page_tables, pdpes);
@@ -1027,16 +1023,36 @@ static int gen8_alloc_va_range(struct i915_address_space *vm,
 err_out:
 	while (pdpe--) {
 		for_each_set_bit(temp, new_page_tables[pdpe], GEN8_PDES_PER_PAGE)
-			unmap_and_free_pt(pd->page_tables[temp], vm->dev);
+			unmap_and_free_pt(pd->page_tables[temp], dev);
 	}
 
 	for_each_set_bit(pdpe, new_page_dirs, pdpes)
-		unmap_and_free_pd(ppgtt->pdp.page_directory[pdpe], vm->dev);
+		unmap_and_free_pd(pdp->page_directory[pdpe], dev);
 
 	free_gen8_temp_bitmaps(new_page_dirs, new_page_tables, pdpes);
 	return ret;
 }
 
+static int __noreturn gen8_alloc_va_range_4lvl(struct i915_address_space *vm,
+					       struct i915_pml4 *pml4,
+					       uint64_t start,
+					       uint64_t length)
+{
+	BUG(); /* to be implemented later */
+}
+
+static int gen8_alloc_va_range(struct i915_address_space *vm,
+			       uint64_t start, uint64_t length)
+{
+	struct i915_hw_ppgtt *ppgtt =
+		container_of(vm, struct i915_hw_ppgtt, base);
+
+	if (!USES_FULL_48BIT_PPGTT(vm->dev))
+		return gen8_alloc_va_range_3lvl(vm, &ppgtt->pdp, start, length);
+	else
+		return gen8_alloc_va_range_4lvl(vm, &ppgtt->pml4, start, length);
+}
+
 static void gen8_ppgtt_fini_common(struct i915_hw_ppgtt *ppgtt)
 {
 	unmap_and_free_pt(ppgtt->scratch_pd, ppgtt->base.dev);
@@ -1079,12 +1095,13 @@ static int gen8_aliasing_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
 {
 	struct drm_device *dev = ppgtt->base.dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct i915_page_directory_pointer_entry *pdp = &ppgtt->pdp; /* FIXME: 48b */
 	struct i915_page_directory_entry *pd;
 	uint64_t temp, start = 0, size = dev_priv->gtt.base.total;
 	uint32_t pdpe;
 	int ret;
 
-	ret = gen8_ppgtt_init_common(ppgtt, dev_priv->gtt.base.total);
+	ret = gen8_ppgtt_init_common(ppgtt, size);
 	if (ret)
 		return ret;
 
@@ -1097,8 +1114,8 @@ static int gen8_aliasing_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
 		return ret;
 	}
 
-	gen8_for_each_pdpe(pd, &ppgtt->pdp, start, size, temp, pdpe)
-		gen8_map_pagetable_range(pd, start, size, ppgtt->base.dev);
+	gen8_for_each_pdpe(pd, pdp, start, size, temp, pdpe)
+		gen8_map_pagetable_range(pd, start, size, dev);
 
 	ppgtt->base.allocate_va_range = NULL;
 	ppgtt->base.clear_range = gen8_ppgtt_clear_range;
-- 
2.1.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
http://lists.freedesktop.org/mailman/listinfo/intel-gfx

  parent reply	other threads:[~2015-02-20 17:46 UTC|newest]

Thread overview: 32+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2015-02-20 17:45 [PATCH 00/12] PPGTT with 48b addressing Michel Thierry
2015-02-20 17:45 ` [PATCH 01/12] drm/i915/bdw: Make pdp allocation more dynamic Michel Thierry
2015-03-03 11:48   ` akash goel
2015-03-18 10:15     ` Michel Thierry
2015-02-20 17:45 ` Michel Thierry [this message]
2015-03-03 12:16   ` [PATCH 02/12] drm/i915/bdw: Abstract PDP usage akash goel
2015-03-18 10:16     ` Michel Thierry
2015-03-04  3:07   ` akash goel
2015-02-20 17:45 ` [PATCH 03/12] drm/i915/bdw: Add dynamic page trace events Michel Thierry
2015-02-24 10:56   ` Daniel Vetter
2015-02-24 10:59   ` Daniel Vetter
2015-02-20 17:45 ` [PATCH 04/12] drm/i915/bdw: Add ppgtt info for dynamic pages Michel Thierry
2015-03-03 12:23   ` akash goel
2015-03-18 10:17     ` Michel Thierry
2015-02-20 17:45 ` [PATCH 05/12] drm/i915/bdw: implement alloc/free for 4lvl Michel Thierry
2015-03-03 12:55   ` akash goel
2015-03-04 13:00     ` Daniel Vetter
2015-03-04  2:48   ` akash goel
2015-02-20 17:46 ` [PATCH 06/12] drm/i915/bdw: Add 4 level switching infrastructure Michel Thierry
2015-03-03 13:01   ` akash goel
2015-03-04 13:08     ` Daniel Vetter
2015-02-20 17:46 ` [PATCH 07/12] drm/i915/bdw: Support 64 bit PPGTT in lrc mode Michel Thierry
2015-03-03 13:08   ` akash goel
2015-02-20 17:46 ` [PATCH 08/12] drm/i915/bdw: Generalize PTE writing for GEN8 PPGTT Michel Thierry
2015-02-20 17:46 ` [PATCH 09/12] drm/i915: Plumb sg_iter through va allocation ->maps Michel Thierry
2015-02-20 17:46 ` [PATCH 10/12] drm/i915/bdw: Add 4 level support in insert_entries and clear_range Michel Thierry
2015-03-03 16:39   ` akash goel
2015-02-20 17:46 ` [PATCH 11/12] drm/i915: Expand error state's address width to 64b Michel Thierry
2015-03-03 16:42   ` akash goel
2015-02-20 17:46 ` [PATCH 12/12] drm/i915/bdw: Flip the 48b switch Michel Thierry
2015-02-24 10:54 ` [PATCH 00/12] PPGTT with 48b addressing Daniel Vetter
2015-03-03 13:52 ` Damien Lespiau

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1424454366-19006-3-git-send-email-michel.thierry@intel.com \
    --to=michel.thierry@intel.com \
    --cc=intel-gfx@lists.freedesktop.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.