All of lore.kernel.org
 help / color / mirror / Atom feed
From: Mika Kuoppala <mika.kuoppala@linux.intel.com>
To: intel-gfx@lists.freedesktop.org
Cc: miku@iki.fi
Subject: [PATCH 08/21] drm/i915/gtt: Introduce struct i915_page_dma
Date: Fri, 22 May 2015 20:05:01 +0300	[thread overview]
Message-ID: <1432314314-23530-9-git-send-email-mika.kuoppala@intel.com> (raw)
In-Reply-To: <1432314314-23530-1-git-send-email-mika.kuoppala@intel.com>

All our paging structures have struct page and dma address
for that page.

Add struct for page/dma address pairs and use it to make
the setup and teardown for different paging structures
identical.

Include the page directory offset also in the struct for legacy
gens. Rename it to clearly point out that it is offset into the
ggtt.

Signed-off-by: Mika Kuoppala <mika.kuoppala@intel.com>
---
 drivers/gpu/drm/i915/i915_debugfs.c |   2 +-
 drivers/gpu/drm/i915/i915_gem_gtt.c | 120 ++++++++++++++----------------------
 drivers/gpu/drm/i915/i915_gem_gtt.h |  21 ++++---
 3 files changed, 60 insertions(+), 83 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index c7a840b..22770aa 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -2245,7 +2245,7 @@ static void gen6_ppgtt_info(struct seq_file *m, struct drm_device *dev)
 		struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
 
 		seq_puts(m, "aliasing PPGTT:\n");
-		seq_printf(m, "pd gtt offset: 0x%08x\n", ppgtt->pd.pd_offset);
+		seq_printf(m, "pd gtt offset: 0x%08x\n", ppgtt->pd.base.ggtt_offset);
 
 		ppgtt->debug_dump(ppgtt, m);
 	}
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 18989f7..1e1a7a1 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -301,52 +301,39 @@ static gen6_pte_t iris_pte_encode(dma_addr_t addr,
 	return pte;
 }
 
-#define i915_dma_unmap_single(px, dev) \
-	__i915_dma_unmap_single((px)->daddr, dev)
-
-static void __i915_dma_unmap_single(dma_addr_t daddr,
-				    struct drm_device *dev)
+static int setup_page_dma(struct drm_device *dev, struct i915_page_dma *p)
 {
 	struct device *device = &dev->pdev->dev;
 
-	dma_unmap_page(device, daddr, 4096, PCI_DMA_BIDIRECTIONAL);
-}
-
-/**
- * i915_dma_map_single() - Create a dma mapping for a page table/dir/etc.
- * @px:	Page table/dir/etc to get a DMA map for
- * @dev:	drm device
- *
- * Page table allocations are unified across all gens. They always require a
- * single 4k allocation, as well as a DMA mapping. If we keep the structs
- * symmetric here, the simple macro covers us for every page table type.
- *
- * Return: 0 if success.
- */
-#define i915_dma_map_single(px, dev) \
-	i915_dma_map_page_single((px)->page, (dev), &(px)->daddr)
+	p->page = alloc_page(GFP_KERNEL);
+	if (!p->page)
+		return -ENOMEM;
 
-static int i915_dma_map_page_single(struct page *page,
-				    struct drm_device *dev,
-				    dma_addr_t *daddr)
-{
-	struct device *device = &dev->pdev->dev;
+	p->daddr = dma_map_page(device,
+				p->page, 0, 4096, PCI_DMA_BIDIRECTIONAL);
 
-	*daddr = dma_map_page(device, page, 0, 4096, PCI_DMA_BIDIRECTIONAL);
-	if (dma_mapping_error(device, *daddr))
-		return -ENOMEM;
+	if (dma_mapping_error(device, p->daddr)) {
+		__free_page(p->page);
+		return -EINVAL;
+	}
 
 	return 0;
 }
 
-static void unmap_and_free_pt(struct i915_page_table *pt,
-			       struct drm_device *dev)
+static void cleanup_page_dma(struct drm_device *dev, struct i915_page_dma *p)
 {
-	if (WARN_ON(!pt->page))
+	if (WARN_ON(!p->page))
 		return;
 
-	i915_dma_unmap_single(pt, dev);
-	__free_page(pt->page);
+	dma_unmap_page(&dev->pdev->dev, p->daddr, 4096, PCI_DMA_BIDIRECTIONAL);
+	__free_page(p->page);
+	memset(p, 0, sizeof(*p));
+}
+
+static void unmap_and_free_pt(struct i915_page_table *pt,
+			       struct drm_device *dev)
+{
+	cleanup_page_dma(dev, &pt->base);
 	kfree(pt->used_ptes);
 	kfree(pt);
 }
@@ -357,7 +344,7 @@ static void gen8_initialize_pt(struct i915_address_space *vm,
 	gen8_pte_t *pt_vaddr, scratch_pte;
 	int i;
 
-	pt_vaddr = kmap_atomic(pt->page);
+	pt_vaddr = kmap_atomic(pt->base.page);
 	scratch_pte = gen8_pte_encode(vm->scratch.addr,
 				      I915_CACHE_LLC, true);
 
@@ -386,19 +373,13 @@ static struct i915_page_table *alloc_pt(struct drm_device *dev)
 	if (!pt->used_ptes)
 		goto fail_bitmap;
 
-	pt->page = alloc_page(GFP_KERNEL);
-	if (!pt->page)
-		goto fail_page;
-
-	ret = i915_dma_map_single(pt, dev);
+	ret = setup_page_dma(dev, &pt->base);
 	if (ret)
-		goto fail_dma;
+		goto fail_page_m;
 
 	return pt;
 
-fail_dma:
-	__free_page(pt->page);
-fail_page:
+fail_page_m:
 	kfree(pt->used_ptes);
 fail_bitmap:
 	kfree(pt);
@@ -409,9 +390,8 @@ fail_bitmap:
 static void unmap_and_free_pd(struct i915_page_directory *pd,
 			      struct drm_device *dev)
 {
-	if (pd->page) {
-		i915_dma_unmap_single(pd, dev);
-		__free_page(pd->page);
+	if (pd->base.page) {
+		cleanup_page_dma(dev, &pd->base);
 		kfree(pd->used_pdes);
 		kfree(pd);
 	}
@@ -431,18 +411,12 @@ static struct i915_page_directory *alloc_pd(struct drm_device *dev)
 	if (!pd->used_pdes)
 		goto free_pd;
 
-	pd->page = alloc_page(GFP_KERNEL);
-	if (!pd->page)
-		goto free_bitmap;
-
-	ret = i915_dma_map_single(pd, dev);
+	ret = setup_page_dma(dev, &pd->base);
 	if (ret)
-		goto free_page;
+		goto free_bitmap;
 
 	return pd;
 
-free_page:
-	__free_page(pd->page);
 free_bitmap:
 	kfree(pd->used_pdes);
 free_pd:
@@ -523,10 +497,10 @@ static void gen8_ppgtt_clear_range(struct i915_address_space *vm,
 
 		pt = pd->page_table[pde];
 
-		if (WARN_ON(!pt->page))
+		if (WARN_ON(!pt->base.page))
 			continue;
 
-		page_table = pt->page;
+		page_table = pt->base.page;
 
 		last_pte = pte + num_entries;
 		if (last_pte > GEN8_PTES)
@@ -573,7 +547,7 @@ static void gen8_ppgtt_insert_entries(struct i915_address_space *vm,
 		if (pt_vaddr == NULL) {
 			struct i915_page_directory *pd = ppgtt->pdp.page_directory[pdpe];
 			struct i915_page_table *pt = pd->page_table[pde];
-			struct page *page_table = pt->page;
+			struct page *page_table = pt->base.page;
 
 			pt_vaddr = kmap_atomic(page_table);
 		}
@@ -605,7 +579,7 @@ static void __gen8_do_map_pt(gen8_pde_t * const pde,
 			     struct drm_device *dev)
 {
 	gen8_pde_t entry =
-		gen8_pde_encode(dev, pt->daddr, I915_CACHE_LLC);
+		gen8_pde_encode(dev, pt->base.daddr, I915_CACHE_LLC);
 	*pde = entry;
 }
 
@@ -618,7 +592,7 @@ static void gen8_initialize_pd(struct i915_address_space *vm,
 	struct i915_page_table *pt;
 	int i;
 
-	page_directory = kmap_atomic(pd->page);
+	page_directory = kmap_atomic(pd->base.page);
 	pt = ppgtt->scratch_pt;
 	for (i = 0; i < I915_PDES; i++)
 		/* Map the PDE to the page table */
@@ -633,7 +607,7 @@ static void gen8_free_page_tables(struct i915_page_directory *pd, struct drm_dev
 {
 	int i;
 
-	if (!pd->page)
+	if (!pd->base.page)
 		return;
 
 	for_each_set_bit(i, pd->used_pdes, I915_PDES) {
@@ -883,7 +857,7 @@ static int gen8_alloc_va_range(struct i915_address_space *vm,
 	/* Allocations have completed successfully, so set the bitmaps, and do
 	 * the mappings. */
 	gen8_for_each_pdpe(pd, &ppgtt->pdp, start, length, temp, pdpe) {
-		gen8_pde_t *const page_directory = kmap_atomic(pd->page);
+		gen8_pde_t *const page_directory = kmap_atomic(pd->base.page);
 		struct i915_page_table *pt;
 		uint64_t pd_len = gen8_clamp_pd(start, length);
 		uint64_t pd_start = start;
@@ -1037,7 +1011,7 @@ static void gen6_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m)
 	gen6_for_each_pde(unused, &ppgtt->pd, start, length, temp, pde) {
 		u32 expected;
 		gen6_pte_t *pt_vaddr;
-		dma_addr_t pt_addr = ppgtt->pd.page_table[pde]->daddr;
+		dma_addr_t pt_addr = ppgtt->pd.page_table[pde]->base.daddr;
 		pd_entry = readl(ppgtt->pd_addr + pde);
 		expected = (GEN6_PDE_ADDR_ENCODE(pt_addr) | GEN6_PDE_VALID);
 
@@ -1048,7 +1022,7 @@ static void gen6_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m)
 				   expected);
 		seq_printf(m, "\tPDE: %x\n", pd_entry);
 
-		pt_vaddr = kmap_atomic(ppgtt->pd.page_table[pde]->page);
+		pt_vaddr = kmap_atomic(ppgtt->pd.page_table[pde]->base.page);
 		for (pte = 0; pte < GEN6_PTES; pte+=4) {
 			unsigned long va =
 				(pde * PAGE_SIZE * GEN6_PTES) +
@@ -1083,7 +1057,7 @@ static void gen6_write_pde(struct i915_page_directory *pd,
 		container_of(pd, struct i915_hw_ppgtt, pd);
 	u32 pd_entry;
 
-	pd_entry = GEN6_PDE_ADDR_ENCODE(pt->daddr);
+	pd_entry = GEN6_PDE_ADDR_ENCODE(pt->base.daddr);
 	pd_entry |= GEN6_PDE_VALID;
 
 	writel(pd_entry, ppgtt->pd_addr + pde);
@@ -1108,9 +1082,9 @@ static void gen6_write_page_range(struct drm_i915_private *dev_priv,
 
 static uint32_t get_pd_offset(struct i915_hw_ppgtt *ppgtt)
 {
-	BUG_ON(ppgtt->pd.pd_offset & 0x3f);
+	BUG_ON(ppgtt->pd.base.ggtt_offset & 0x3f);
 
-	return (ppgtt->pd.pd_offset / 64) << 16;
+	return (ppgtt->pd.base.ggtt_offset / 64) << 16;
 }
 
 static int hsw_mm_switch(struct i915_hw_ppgtt *ppgtt,
@@ -1273,7 +1247,7 @@ static void gen6_ppgtt_clear_range(struct i915_address_space *vm,
 		if (last_pte > GEN6_PTES)
 			last_pte = GEN6_PTES;
 
-		pt_vaddr = kmap_atomic(ppgtt->pd.page_table[act_pt]->page);
+		pt_vaddr = kmap_atomic(ppgtt->pd.page_table[act_pt]->base.page);
 
 		for (i = first_pte; i < last_pte; i++)
 			pt_vaddr[i] = scratch_pte;
@@ -1302,7 +1276,7 @@ static void gen6_ppgtt_insert_entries(struct i915_address_space *vm,
 	pt_vaddr = NULL;
 	for_each_sg_page(pages->sgl, &sg_iter, pages->nents, 0) {
 		if (pt_vaddr == NULL)
-			pt_vaddr = kmap_atomic(ppgtt->pd.page_table[act_pt]->page);
+			pt_vaddr = kmap_atomic(ppgtt->pd.page_table[act_pt]->base.page);
 
 		pt_vaddr[act_pte] =
 			vm->pte_encode(sg_page_iter_dma_address(&sg_iter),
@@ -1330,7 +1304,7 @@ static void gen6_initialize_pt(struct i915_address_space *vm,
 	scratch_pte = vm->pte_encode(vm->scratch.addr,
 			I915_CACHE_LLC, true, 0);
 
-	pt_vaddr = kmap_atomic(pt->page);
+	pt_vaddr = kmap_atomic(pt->base.page);
 
 	for (i = 0; i < GEN6_PTES; i++)
 		pt_vaddr[i] = scratch_pte;
@@ -1546,11 +1520,11 @@ static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
 	ppgtt->base.total = I915_PDES * GEN6_PTES * PAGE_SIZE;
 	ppgtt->debug_dump = gen6_dump_ppgtt;
 
-	ppgtt->pd.pd_offset =
+	ppgtt->pd.base.ggtt_offset =
 		ppgtt->node.start / PAGE_SIZE * sizeof(gen6_pte_t);
 
 	ppgtt->pd_addr = (gen6_pte_t __iomem *)dev_priv->gtt.gsm +
-		ppgtt->pd.pd_offset / sizeof(gen6_pte_t);
+		ppgtt->pd.base.ggtt_offset / sizeof(gen6_pte_t);
 
 	gen6_scratch_va_range(ppgtt, 0, ppgtt->base.total);
 
@@ -1561,7 +1535,7 @@ static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
 			 ppgtt->node.start / PAGE_SIZE);
 
 	DRM_DEBUG("Adding PPGTT at offset %x\n",
-		  ppgtt->pd.pd_offset << 10);
+		  ppgtt->pd.base.ggtt_offset << 10);
 
 	return 0;
 }
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h
index da67542..666decc 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.h
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.h
@@ -205,19 +205,22 @@ struct i915_vma {
 #define DRM_I915_GEM_OBJECT_MAX_PIN_COUNT 0xf
 };
 
-struct i915_page_table {
+struct i915_page_dma {
 	struct page *page;
-	dma_addr_t daddr;
+	union {
+		dma_addr_t daddr;
+		uint32_t ggtt_offset;
+	};
+};
+
+struct i915_page_table {
+	struct i915_page_dma base;
 
 	unsigned long *used_ptes;
 };
 
 struct i915_page_directory {
-	struct page *page; /* NULL for GEN6-GEN7 */
-	union {
-		uint32_t pd_offset;
-		dma_addr_t daddr;
-	};
+	struct i915_page_dma base;
 
 	unsigned long *used_pdes;
 	struct i915_page_table *page_table[I915_PDES]; /* PDEs */
@@ -472,8 +475,8 @@ static inline dma_addr_t
 i915_page_dir_dma_addr(const struct i915_hw_ppgtt *ppgtt, const unsigned n)
 {
 	return test_bit(n, ppgtt->pdp.used_pdpes) ?
-		ppgtt->pdp.page_directory[n]->daddr :
-		ppgtt->scratch_pd->daddr;
+		ppgtt->pdp.page_directory[n]->base.daddr :
+		ppgtt->scratch_pd->base.daddr;
 }
 
 int i915_gem_gtt_init(struct drm_device *dev);
-- 
1.9.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
http://lists.freedesktop.org/mailman/listinfo/intel-gfx

  parent reply	other threads:[~2015-05-22 17:05 UTC|newest]

Thread overview: 86+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2015-05-22 17:04 [PATCH 00/21] ppgtt cleanups / scratch merge (V2) Mika Kuoppala
2015-05-22 17:04 ` [PATCH 01/21] drm/i915/gtt: Mark TLBS dirty for gen8+ Mika Kuoppala
2015-06-01 14:51   ` Joonas Lahtinen
2015-06-11 17:37     ` Mika Kuoppala
2015-06-23 11:10       ` Joonas Lahtinen
2015-06-01 15:52   ` Michel Thierry
2015-05-22 17:04 ` [PATCH 02/21] drm/i915/gtt: Workaround for HW preload not flushing pdps Mika Kuoppala
2015-05-29 11:05   ` Michel Thierry
2015-05-29 12:53     ` Michel Thierry
2015-06-10 11:42       ` Michel Thierry
2015-06-11  7:31         ` Dave Gordon
2015-06-11 10:46           ` Michel Thierry
2015-06-11 13:57           ` Mika Kuoppala
2015-08-11  5:05             ` Zhiyuan Lv
2015-08-12  7:56               ` Michel Thierry
2015-08-12 15:09                 ` Dave Gordon
2015-08-13  9:36                   ` Zhiyuan Lv
2015-08-13  9:54                     ` Michel Thierry
2015-08-13  9:08                 ` Zhiyuan Lv
2015-08-13 10:12                   ` Michel Thierry
2015-08-13 11:42                     ` Dave Gordon
2015-08-13 12:03                       ` Dave Gordon
2015-08-13 14:56                         ` Zhiyuan Lv
2015-05-22 17:04 ` [PATCH 03/21] drm/i915/gtt: Check va range against vm size Mika Kuoppala
2015-06-01 15:33   ` Joonas Lahtinen
2015-06-11 14:23     ` Mika Kuoppala
2015-06-24 14:48       ` Michel Thierry
2015-05-22 17:04 ` [PATCH 04/21] drm/i915/gtt: Allow >= 4GB sizes for vm Mika Kuoppala
2015-05-26  7:15   ` Daniel Vetter
2015-06-11 17:38     ` Mika Kuoppala
2015-05-22 17:04 ` [PATCH 05/21] drm/i915/gtt: Don't leak scratch page on mapping error Mika Kuoppala
2015-06-01 15:02   ` Joonas Lahtinen
2015-06-15 10:13     ` Daniel Vetter
2015-05-22 17:04 ` [PATCH 06/21] drm/i915/gtt: Remove _single from page table allocator Mika Kuoppala
2015-06-02  9:53   ` Joonas Lahtinen
2015-06-02  9:56   ` Michel Thierry
2015-06-15 10:14     ` Daniel Vetter
2015-05-22 17:05 ` [PATCH 07/21] drm/i915/gtt: Introduce i915_page_dir_dma_addr Mika Kuoppala
2015-06-02 10:11   ` Michel Thierry
2015-05-22 17:05 ` Mika Kuoppala [this message]
2015-06-02 12:39   ` [PATCH 08/21] drm/i915/gtt: Introduce struct i915_page_dma Michel Thierry
2015-06-11 17:48     ` Mika Kuoppala
2015-06-22 14:05       ` Michel Thierry
2015-05-22 17:05 ` [PATCH 09/21] drm/i915/gtt: Rename unmap_and_free_px to free_px Mika Kuoppala
2015-06-02 13:08   ` Michel Thierry
2015-06-11 17:48     ` Mika Kuoppala
2015-06-22 14:09       ` Michel Thierry
2015-06-22 14:43         ` Daniel Vetter
2015-05-22 17:05 ` [PATCH 10/21] drm/i915/gtt: Remove superfluous free_pd with gen6/7 Mika Kuoppala
2015-06-02 14:07   ` Michel Thierry
2015-05-22 17:05 ` [PATCH 11/21] drm/i915/gtt: Introduce fill_page_dma() Mika Kuoppala
2015-06-02 14:51   ` Michel Thierry
2015-06-02 15:01     ` Ville Syrjälä
2015-06-15 10:16       ` Daniel Vetter
2015-06-11 17:50     ` Mika Kuoppala
2015-06-24 15:05       ` Michel Thierry
2015-05-22 17:05 ` [PATCH 12/21] drm/i915/gtt: Introduce kmap|kunmap for dma page Mika Kuoppala
2015-06-03 10:55   ` Michel Thierry
2015-06-11 17:50     ` Mika Kuoppala
2015-06-24 15:06       ` Michel Thierry
2015-05-22 17:05 ` [PATCH 13/21] drm/i915/gtt: Use macros to access dma mapped pages Mika Kuoppala
2015-06-03 10:57   ` Michel Thierry
2015-05-22 17:05 ` [PATCH 14/21] drm/i915/gtt: Make scratch page i915_page_dma compatible Mika Kuoppala
2015-06-03 13:44   ` Michel Thierry
2015-06-11 16:30     ` Mika Kuoppala
2015-06-24 14:59       ` Michel Thierry
2015-05-22 17:05 ` [PATCH 15/21] drm/i915/gtt: Fill scratch page Mika Kuoppala
2015-05-27 18:12   ` Tomas Elf
2015-06-01 15:53     ` Chris Wilson
2015-06-04 11:08       ` Tomas Elf
2015-06-04 11:24         ` Chris Wilson
2015-06-11 16:37     ` Mika Kuoppala
2015-06-03 14:03   ` Michel Thierry
2015-05-22 17:05 ` [PATCH 16/21] drm/i915/gtt: Pin vma during virtual address allocation Mika Kuoppala
2015-06-03 14:27   ` Michel Thierry
2015-05-22 17:05 ` [PATCH 17/21] drm/i915/gtt: Cleanup page directory encoding Mika Kuoppala
2015-06-03 14:58   ` Michel Thierry
2015-05-22 17:05 ` [PATCH 18/21] drm/i915/gtt: Move scratch_pd and scratch_pt into vm area Mika Kuoppala
2015-06-03 16:46   ` Michel Thierry
2015-05-22 17:05 ` [PATCH 19/21] drm/i915/gtt: One instance of scratch page table/directory Mika Kuoppala
2015-06-03 16:57   ` Michel Thierry
2015-05-22 17:05 ` [PATCH 20/21] drm/i915/gtt: Use nonatomic bitmap ops Mika Kuoppala
2015-06-03 17:07   ` Michel Thierry
2015-05-22 17:05 ` [PATCH 21/21] drm/i915/gtt: Reorder page alloc/free/init functions Mika Kuoppala
2015-06-03 17:14   ` Michel Thierry
2015-06-11 17:52     ` Mika Kuoppala

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1432314314-23530-9-git-send-email-mika.kuoppala@intel.com \
    --to=mika.kuoppala@linux.intel.com \
    --cc=intel-gfx@lists.freedesktop.org \
    --cc=miku@iki.fi \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.