From mboxrd@z Thu Jan 1 00:00:00 1970 From: Ben Widawsky Subject: [PATCH 20/26] drm/i915: Always dma map page directory allocations Date: Mon, 17 Mar 2014 22:48:52 -0700 Message-ID: <1395121738-29126-21-git-send-email-benjamin.widawsky@intel.com> References: <1395121738-29126-1-git-send-email-benjamin.widawsky@intel.com> Mime-Version: 1.0 Content-Type: text/plain; charset="us-ascii" Content-Transfer-Encoding: 7bit Return-path: Received: from mga02.intel.com (mga02.intel.com [134.134.136.20]) by gabe.freedesktop.org (Postfix) with ESMTP id 555772B0E3 for ; Mon, 17 Mar 2014 22:49:12 -0700 (PDT) In-Reply-To: <1395121738-29126-1-git-send-email-benjamin.widawsky@intel.com> List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: intel-gfx-bounces@lists.freedesktop.org Sender: "Intel-gfx" To: Intel GFX List-Id: intel-gfx@lists.freedesktop.org Similar to the patch a few back in the series, we can always map and unmap page directories when we do their allocation and teardown. Page directory pages only exist on gen8+, so this should only effect behavior on those platforms. Signed-off-by: Ben Widawsky --- drivers/gpu/drm/i915/i915_gem_gtt.c | 79 +++++++++---------------------------- 1 file changed, 19 insertions(+), 60 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index abef33dd..ad2f2c5 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -283,21 +283,23 @@ err_out: return ret; } -static void __free_pd_single(struct i915_pagedir *pd) +static void __free_pd_single(struct i915_pagedir *pd, struct drm_device *dev) { + i915_dma_unmap_single(pd, dev); __free_page(pd->page); kfree(pd); } -#define free_pd_single(pd) do { \ +#define free_pd_single(pd, dev) do { \ if ((pd)->page) { \ - __free_pd_single(pd); \ + __free_pd_single(pd, dev); \ } \ } while (0) -static struct i915_pagedir *alloc_pd_single(void) +static struct i915_pagedir *alloc_pd_single(struct drm_device *dev) { struct i915_pagedir *pd; + int ret; pd = kzalloc(sizeof(*pd), GFP_KERNEL); if (!pd) @@ -309,6 +311,13 @@ static struct i915_pagedir *alloc_pd_single(void) return ERR_PTR(-ENOMEM); } + ret = i915_dma_map_px_single(pd, dev); + if (ret) { + __free_page(pd->page); + kfree(pd); + return ERR_PTR(ret); + } + return pd; } @@ -466,30 +475,7 @@ static void gen8_ppgtt_free(struct i915_hw_ppgtt *ppgtt) for (i = 0; i < ppgtt->num_pd_pages; i++) { gen8_free_page_tables(ppgtt->pdp.pagedir[i], ppgtt->base.dev); - free_pd_single(ppgtt->pdp.pagedir[i]); - } -} - -static void gen8_ppgtt_dma_unmap_pages(struct i915_hw_ppgtt *ppgtt) -{ - struct drm_device *dev = ppgtt->base.dev; - int i, j; - - for (i = 0; i < ppgtt->num_pd_pages; i++) { - /* TODO: In the future we'll support sparse mappings, so this - * will have to change. */ - if (!ppgtt->pdp.pagedir[i]->daddr) - continue; - - i915_dma_unmap_single(ppgtt->pdp.pagedir[i], dev); - - for (j = 0; j < I915_PDES_PER_PD; j++) { - struct i915_pagedir *pd = ppgtt->pdp.pagedir[i]; - struct i915_pagetab *pt = pd->page_tables[j]; - dma_addr_t addr = pt->daddr; - if (addr) - i915_dma_unmap_single(pt, dev); - } + free_pd_single(ppgtt->pdp.pagedir[i], ppgtt->base.dev); } } @@ -501,7 +487,6 @@ static void gen8_ppgtt_cleanup(struct i915_address_space *vm) list_del(&vm->global_link); drm_mm_takedown(&vm->mm); - gen8_ppgtt_dma_unmap_pages(ppgtt); gen8_ppgtt_free(ppgtt); } @@ -531,7 +516,7 @@ static int gen8_ppgtt_allocate_page_directories(struct i915_hw_ppgtt *ppgtt, int i; for (i = 0; i < max_pdp; i++) { - ppgtt->pdp.pagedir[i] = alloc_pd_single(); + ppgtt->pdp.pagedir[i] = alloc_pd_single(ppgtt->base.dev); if (IS_ERR(ppgtt->pdp.pagedir[i])) goto unwind_out; } @@ -543,7 +528,8 @@ static int gen8_ppgtt_allocate_page_directories(struct i915_hw_ppgtt *ppgtt, unwind_out: while (i--) - free_pd_single(ppgtt->pdp.pagedir[i]); + free_pd_single(ppgtt->pdp.pagedir[i], + ppgtt->base.dev); return -ENOMEM; } @@ -571,19 +557,6 @@ err_out: return ret; } -static int gen8_ppgtt_setup_page_directories(struct i915_hw_ppgtt *ppgtt, - const int pdpe) -{ - int ret; - - ret = i915_dma_map_px_single(ppgtt->pdp.pagedir[pdpe], - ppgtt->base.dev); - if (ret) - return ret; - - return 0; -} - /** * GEN8 legacy ppgtt programming is accomplished through a max 4 PDP registers * with a net effect resembling a 2-level page table in normal x86 terms. Each @@ -609,16 +582,7 @@ static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt, uint64_t size) return ret; /* - * 2. Create DMA mappings for the page directories and page tables. - */ - for (i = 0; i < max_pdp; i++) { - ret = gen8_ppgtt_setup_page_directories(ppgtt, i); - if (ret) - goto bail; - } - - /* - * 3. Map all the page directory entires to point to the page tables + * 2. Map all the page directory entires to point to the page tables * we've allocated. * * For now, the PPGTT helper functions all require that the PDEs are @@ -652,11 +616,6 @@ static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt, uint64_t size) ppgtt->num_pd_entries, (ppgtt->num_pd_entries - min_pt_pages) + size % (1<<30)); return 0; - -bail: - gen8_ppgtt_dma_unmap_pages(ppgtt); - gen8_ppgtt_free(ppgtt); - return ret; } static void gen6_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m) @@ -1034,7 +993,7 @@ static void gen6_ppgtt_free(struct i915_hw_ppgtt *ppgtt) for (i = 0; i < ppgtt->num_pd_entries; i++) free_pt_single(ppgtt->pd.page_tables[i], ppgtt->base.dev); - free_pd_single(&ppgtt->pd); + free_pd_single(&ppgtt->pd, ppgtt->base.dev); } static void gen6_ppgtt_cleanup(struct i915_address_space *vm) -- 1.9.0