All of lore.kernel.org
 help / color / mirror / Atom feed
From: Mika Kuoppala <mika.kuoppala@linux.intel.com>
To: "Michał Winiarski" <michal.winiarski@intel.com>,
	intel-gfx@lists.freedesktop.org
Subject: Re: [PATCH 3/3] drm/i915: Remove unused "valid" parameter	from pte_encode
Date: Wed, 05 Oct 2016 11:49:12 +0300	[thread overview]
Message-ID: <87twcr2nav.fsf@gaia.fi.intel.com> (raw)
In-Reply-To: <1475589267-12440-3-git-send-email-michal.winiarski@intel.com>

Michał Winiarski <michal.winiarski@intel.com> writes:

> We're no longer using any invalid PTEs - everything that's not used
> should be pointing to scratch.
>

I was about to suggest this before patch 2/3 to make it simpler.

For historical context: https://patchwork.kernel.org/patch/9135411/

I would amend the commit message a little. 'We're no longer using'
suggests that we changed recently/by earlier commits how we map
into the invalid ptes.

I think it has always been the case that we map to valid==scratch.

Lets try to distill the reasoning from Chris Wilson's reply
for future reader, something like:

'We never used any invalid ptes, as those were put in place for a
possibility of doing gpu faults. However our batchbuffers are
not restricted in length, so everything needs to be pointing to
something and thus out-of-bounds pointing to scratch. Remove the
valid flag as it is always true.'

That message conveyed with perhaps better wording/english,
and you can add my,
Reviewed-by: Mika Kuoppala <mika.kuoppala@intel.com>

-Mika

> Cc: Chris Wilson <chris@chris-wilson.co.uk>
> Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
> Cc: Michel Thierry <michel.thierry@intel.com>
> Cc: Mika Kuoppala <mika.kuoppala@intel.com>
> Signed-off-by: Michał Winiarski <michal.winiarski@intel.com>
> ---
>  drivers/gpu/drm/i915/i915_gem.c            |   6 +-
>  drivers/gpu/drm/i915/i915_gem_execbuffer.c |   3 +-
>  drivers/gpu/drm/i915/i915_gem_gtt.c        | 154 +++++++++++------------------
>  drivers/gpu/drm/i915/i915_gem_gtt.h        |   5 +-
>  4 files changed, 65 insertions(+), 103 deletions(-)
>
> diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
> index 1418c1c..b344340 100644
> --- a/drivers/gpu/drm/i915/i915_gem.c
> +++ b/drivers/gpu/drm/i915/i915_gem.c
> @@ -919,8 +919,7 @@ out_unpin:
>  	if (node.allocated) {
>  		wmb();
>  		ggtt->base.clear_range(&ggtt->base,
> -				       node.start, node.size,
> -				       true);
> +				       node.start, node.size);
>  		i915_gem_object_unpin_pages(obj);
>  		remove_mappable_node(&node);
>  	} else {
> @@ -1228,8 +1227,7 @@ out_unpin:
>  	if (node.allocated) {
>  		wmb();
>  		ggtt->base.clear_range(&ggtt->base,
> -				       node.start, node.size,
> -				       true);
> +				       node.start, node.size);
>  		i915_gem_object_unpin_pages(obj);
>  		remove_mappable_node(&node);
>  	} else {
> diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
> index 33c8522..54b091b 100644
> --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
> +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
> @@ -370,8 +370,7 @@ static void reloc_cache_fini(struct reloc_cache *cache)
>  
>  			ggtt->base.clear_range(&ggtt->base,
>  					       cache->node.start,
> -					       cache->node.size,
> -					       true);
> +					       cache->node.size);
>  			drm_mm_remove_node(&cache->node);
>  		} else {
>  			i915_vma_unpin((struct i915_vma *)cache->node.mm);
> diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
> index 281e349..fa78bb1 100644
> --- a/drivers/gpu/drm/i915/i915_gem_gtt.c
> +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
> @@ -191,15 +191,13 @@ static void ppgtt_unbind_vma(struct i915_vma *vma)
>  {
>  	vma->vm->clear_range(vma->vm,
>  			     vma->node.start,
> -			     vma->size,
> -			     true);
> +			     vma->size);
>  }
>  
>  static gen8_pte_t gen8_pte_encode(dma_addr_t addr,
> -				  enum i915_cache_level level,
> -				  bool valid)
> +				  enum i915_cache_level level)
>  {
> -	gen8_pte_t pte = valid ? _PAGE_PRESENT | _PAGE_RW : 0;
> +	gen8_pte_t pte = _PAGE_PRESENT | _PAGE_RW;
>  	pte |= addr;
>  
>  	switch (level) {
> @@ -218,10 +216,9 @@ static gen8_pte_t gen8_pte_encode(dma_addr_t addr,
>  }
>  
>  static gen8_pde_t gen8_pde_encode(const dma_addr_t addr,
> -				  const enum i915_cache_level level,
> -				  bool valid)
> +				  const enum i915_cache_level level)
>  {
> -	gen8_pde_t pde = valid ? _PAGE_PRESENT | _PAGE_RW : 0;
> +	gen8_pde_t pde = _PAGE_PRESENT | _PAGE_RW;
>  	pde |= addr;
>  	if (level != I915_CACHE_NONE)
>  		pde |= PPAT_CACHED_PDE_INDEX;
> @@ -235,9 +232,9 @@ static gen8_pde_t gen8_pde_encode(const dma_addr_t addr,
>  
>  static gen6_pte_t snb_pte_encode(dma_addr_t addr,
>  				 enum i915_cache_level level,
> -				 bool valid, u32 unused)
> +				 u32 unused)
>  {
> -	gen6_pte_t pte = valid ? GEN6_PTE_VALID : 0;
> +	gen6_pte_t pte = GEN6_PTE_VALID;
>  	pte |= GEN6_PTE_ADDR_ENCODE(addr);
>  
>  	switch (level) {
> @@ -257,9 +254,9 @@ static gen6_pte_t snb_pte_encode(dma_addr_t addr,
>  
>  static gen6_pte_t ivb_pte_encode(dma_addr_t addr,
>  				 enum i915_cache_level level,
> -				 bool valid, u32 unused)
> +				 u32 unused)
>  {
> -	gen6_pte_t pte = valid ? GEN6_PTE_VALID : 0;
> +	gen6_pte_t pte = GEN6_PTE_VALID;
>  	pte |= GEN6_PTE_ADDR_ENCODE(addr);
>  
>  	switch (level) {
> @@ -281,9 +278,9 @@ static gen6_pte_t ivb_pte_encode(dma_addr_t addr,
>  
>  static gen6_pte_t byt_pte_encode(dma_addr_t addr,
>  				 enum i915_cache_level level,
> -				 bool valid, u32 flags)
> +				 u32 flags)
>  {
> -	gen6_pte_t pte = valid ? GEN6_PTE_VALID : 0;
> +	gen6_pte_t pte = GEN6_PTE_VALID;
>  	pte |= GEN6_PTE_ADDR_ENCODE(addr);
>  
>  	if (!(flags & PTE_READ_ONLY))
> @@ -297,9 +294,9 @@ static gen6_pte_t byt_pte_encode(dma_addr_t addr,
>  
>  static gen6_pte_t hsw_pte_encode(dma_addr_t addr,
>  				 enum i915_cache_level level,
> -				 bool valid, u32 unused)
> +				 u32 unused)
>  {
> -	gen6_pte_t pte = valid ? GEN6_PTE_VALID : 0;
> +	gen6_pte_t pte = GEN6_PTE_VALID;
>  	pte |= HSW_PTE_ADDR_ENCODE(addr);
>  
>  	if (level != I915_CACHE_NONE)
> @@ -310,9 +307,9 @@ static gen6_pte_t hsw_pte_encode(dma_addr_t addr,
>  
>  static gen6_pte_t iris_pte_encode(dma_addr_t addr,
>  				  enum i915_cache_level level,
> -				  bool valid, u32 unused)
> +				  u32 unused)
>  {
> -	gen6_pte_t pte = valid ? GEN6_PTE_VALID : 0;
> +	gen6_pte_t pte = GEN6_PTE_VALID;
>  	pte |= HSW_PTE_ADDR_ENCODE(addr);
>  
>  	switch (level) {
> @@ -473,7 +470,7 @@ static void gen8_initialize_pt(struct i915_address_space *vm,
>  	gen8_pte_t scratch_pte;
>  
>  	scratch_pte = gen8_pte_encode(vm->scratch_page.daddr,
> -				      I915_CACHE_LLC, true);
> +				      I915_CACHE_LLC);
>  
>  	fill_px(vm->dev, pt, scratch_pte);
>  }
> @@ -486,7 +483,7 @@ static void gen6_initialize_pt(struct i915_address_space *vm,
>  	WARN_ON(vm->scratch_page.daddr == 0);
>  
>  	scratch_pte = vm->pte_encode(vm->scratch_page.daddr,
> -				     I915_CACHE_LLC, true, 0);
> +				     I915_CACHE_LLC, 0);
>  
>  	fill32_px(vm->dev, pt, scratch_pte);
>  }
> @@ -533,8 +530,7 @@ static void gen8_initialize_pd(struct i915_address_space *vm,
>  {
>  	gen8_pde_t scratch_pde;
>  
> -	scratch_pde = gen8_pde_encode(px_dma(vm->scratch_pt), I915_CACHE_LLC,
> -				      true);
> +	scratch_pde = gen8_pde_encode(px_dma(vm->scratch_pt), I915_CACHE_LLC);
>  
>  	fill_px(vm->dev, pd, scratch_pde);
>  }
> @@ -615,8 +611,7 @@ static void gen8_initialize_pdp(struct i915_address_space *vm,
>  {
>  	gen8_ppgtt_pdpe_t scratch_pdpe;
>  
> -	scratch_pdpe = gen8_pdpe_encode(px_dma(vm->scratch_pd), I915_CACHE_LLC,
> -					true);
> +	scratch_pdpe = gen8_pdpe_encode(px_dma(vm->scratch_pd), I915_CACHE_LLC);
>  
>  	fill_px(vm->dev, pdp, scratch_pdpe);
>  }
> @@ -627,7 +622,7 @@ static void gen8_initialize_pml4(struct i915_address_space *vm,
>  	gen8_ppgtt_pml4e_t scratch_pml4e;
>  
>  	scratch_pml4e = gen8_pml4e_encode(px_dma(vm->scratch_pdp),
> -					  I915_CACHE_LLC, true);
> +					  I915_CACHE_LLC);
>  
>  	fill_px(vm->dev, pml4, scratch_pml4e);
>  }
> @@ -644,8 +639,7 @@ gen8_setup_page_directory(struct i915_hw_ppgtt *ppgtt,
>  		return;
>  
>  	page_directorypo = kmap_px(pdp);
> -	page_directorypo[index] = gen8_pdpe_encode(px_dma(pd), I915_CACHE_LLC,
> -						   true);
> +	page_directorypo[index] = gen8_pdpe_encode(px_dma(pd), I915_CACHE_LLC);
>  	kunmap_px(ppgtt, page_directorypo);
>  }
>  
> @@ -658,7 +652,7 @@ gen8_setup_page_directory_pointer(struct i915_hw_ppgtt *ppgtt,
>  	gen8_ppgtt_pml4e_t *pagemap = kmap_px(pml4);
>  
>  	WARN_ON(!USES_FULL_48BIT_PPGTT(ppgtt->base.dev));
> -	pagemap[index] = gen8_pml4e_encode(px_dma(pdp), I915_CACHE_LLC, true);
> +	pagemap[index] = gen8_pml4e_encode(px_dma(pdp), I915_CACHE_LLC);
>  	kunmap_px(ppgtt, pagemap);
>  }
>  
> @@ -713,8 +707,7 @@ static int gen8_48b_mm_switch(struct i915_hw_ppgtt *ppgtt,
>  static bool gen8_ppgtt_clear_pt(struct i915_address_space *vm,
>  				struct i915_page_table *pt,
>  				uint64_t start,
> -				uint64_t length,
> -				bool use_scratch)
> +				uint64_t length)
>  {
>  	struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
>  
> @@ -725,7 +718,7 @@ static bool gen8_ppgtt_clear_pt(struct i915_address_space *vm,
>  
>  	gen8_pte_t *pt_vaddr;
>  	gen8_pte_t scratch_pte = gen8_pte_encode(vm->scratch_page.daddr,
> -						 I915_CACHE_LLC, use_scratch);
> +						 I915_CACHE_LLC);
>  
>  	if (WARN_ON(!px_page(pt)))
>  		return false;
> @@ -750,8 +743,7 @@ static bool gen8_ppgtt_clear_pt(struct i915_address_space *vm,
>  static bool gen8_ppgtt_clear_pd(struct i915_address_space *vm,
>  				struct i915_page_directory *pd,
>  				uint64_t start,
> -				uint64_t length,
> -				bool use_scratch)
> +				uint64_t length)
>  {
>  	struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
>  
> @@ -760,15 +752,14 @@ static bool gen8_ppgtt_clear_pd(struct i915_address_space *vm,
>  
>  	gen8_pde_t *pde_vaddr;
>  	gen8_pde_t scratch_pde = gen8_pde_encode(px_dma(vm->scratch_pt),
> -						 I915_CACHE_LLC, use_scratch);
> +						 I915_CACHE_LLC);
>  	bool reduce;
>  
>  	gen8_for_each_pde(pt, pd, start, length, pde) {
>  		if (WARN_ON(!pd->page_table[pde]))
>  			break;
>  
> -		reduce = gen8_ppgtt_clear_pt(vm, pt, start, length,
> -					     use_scratch);
> +		reduce = gen8_ppgtt_clear_pt(vm, pt, start, length);
>  
>  		if (reduce) {
>  			__clear_bit(pde, pd->used_pdes);
> @@ -789,8 +780,7 @@ static bool gen8_ppgtt_clear_pd(struct i915_address_space *vm,
>  static bool gen8_ppgtt_clear_pdp(struct i915_address_space *vm,
>  				 struct i915_page_directory_pointer *pdp,
>  				 uint64_t start,
> -				 uint64_t length,
> -				 bool use_scratch)
> +				 uint64_t length)
>  {
>  	struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
>  
> @@ -799,16 +789,14 @@ static bool gen8_ppgtt_clear_pdp(struct i915_address_space *vm,
>  
>  	gen8_ppgtt_pdpe_t *pdpe_vaddr;
>  	gen8_ppgtt_pdpe_t scratch_pdpe =
> -		gen8_pdpe_encode(px_dma(vm->scratch_pd), I915_CACHE_LLC,
> -				 use_scratch);
> +		gen8_pdpe_encode(px_dma(vm->scratch_pd), I915_CACHE_LLC);
>  	bool reduce;
>  
>  	gen8_for_each_pdpe(pd, pdp, start, length, pdpe) {
>  		if (WARN_ON(!pdp->page_directory[pdpe]))
>  			break;
>  
> -		reduce = gen8_ppgtt_clear_pd(vm, pd, start, length,
> -					     use_scratch);
> +		reduce = gen8_ppgtt_clear_pd(vm, pd, start, length);
>  
>  		if (reduce) {
>  			__clear_bit(pdpe, pdp->used_pdpes);
> @@ -829,8 +817,7 @@ static bool gen8_ppgtt_clear_pdp(struct i915_address_space *vm,
>  static void gen8_ppgtt_clear_pml4(struct i915_address_space *vm,
>  				  struct i915_pml4 *pml4,
>  				  uint64_t start,
> -				  uint64_t length,
> -				  bool use_scratch)
> +				  uint64_t length)
>  {
>  	struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
>  
> @@ -839,16 +826,14 @@ static void gen8_ppgtt_clear_pml4(struct i915_address_space *vm,
>  
>  	gen8_ppgtt_pml4e_t *pml4e_vaddr;
>  	gen8_ppgtt_pml4e_t scratch_pml4e =
> -		gen8_pml4e_encode(px_dma(vm->scratch_pdp), I915_CACHE_LLC,
> -				  use_scratch);
> +		gen8_pml4e_encode(px_dma(vm->scratch_pdp), I915_CACHE_LLC);
>  	bool reduce;
>  
>  	gen8_for_each_pml4e(pdp, pml4, start, length, pml4e) {
>  		if (WARN_ON(!pml4->pdps[pml4e]))
>  			break;
>  
> -		reduce = gen8_ppgtt_clear_pdp(vm, pdp, start, length,
> -					      use_scratch);
> +		reduce = gen8_ppgtt_clear_pdp(vm, pdp, start, length);
>  
>  		if (reduce) {
>  			__clear_bit(pml4e, pml4->used_pml4es);
> @@ -860,18 +845,14 @@ static void gen8_ppgtt_clear_pml4(struct i915_address_space *vm,
>  }
>  
>  static void gen8_ppgtt_clear_range(struct i915_address_space *vm,
> -				   uint64_t start,
> -				   uint64_t length,
> -				   bool use_scratch)
> +				   uint64_t start, uint64_t length)
>  {
>  	struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
>  
>  	if (!USES_FULL_48BIT_PPGTT(vm->dev))
> -		gen8_ppgtt_clear_pdp(vm, &ppgtt->pdp, start, length,
> -				     use_scratch);
> +		gen8_ppgtt_clear_pdp(vm, &ppgtt->pdp, start, length);
>  	else
> -		gen8_ppgtt_clear_pml4(vm, &ppgtt->pml4, start, length,
> -				      use_scratch);
> +		gen8_ppgtt_clear_pml4(vm, &ppgtt->pml4, start, length);
>  }
>  
>  static void
> @@ -898,7 +879,7 @@ gen8_ppgtt_insert_pte_entries(struct i915_address_space *vm,
>  
>  		pt_vaddr[pte] =
>  			gen8_pte_encode(sg_page_iter_dma_address(sg_iter),
> -					cache_level, true);
> +					cache_level);
>  		if (++pte == GEN8_PTES) {
>  			kunmap_px(ppgtt, pt_vaddr);
>  			pt_vaddr = NULL;
> @@ -1382,8 +1363,7 @@ static int gen8_alloc_va_range_3lvl(struct i915_address_space *vm,
>  
>  			/* Map the PDE to the page table */
>  			page_directory[pde] = gen8_pde_encode(px_dma(pt),
> -							      I915_CACHE_LLC,
> -							      true);
> +							      I915_CACHE_LLC);
>  			trace_i915_page_table_entry_map(&ppgtt->base, pde, pt,
>  							gen8_pte_index(start),
>  							gen8_pte_count(start, length),
> @@ -1542,7 +1522,7 @@ static void gen8_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m)
>  	uint64_t start = ppgtt->base.start;
>  	uint64_t length = ppgtt->base.total;
>  	gen8_pte_t scratch_pte = gen8_pte_encode(vm->scratch_page.daddr,
> -						 I915_CACHE_LLC, true);
> +						 I915_CACHE_LLC);
>  
>  	if (!USES_FULL_48BIT_PPGTT(vm->dev)) {
>  		gen8_dump_pdp(&ppgtt->pdp, start, length, scratch_pte, m);
> @@ -1659,7 +1639,7 @@ static void gen6_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m)
>  	uint32_t start = ppgtt->base.start, length = ppgtt->base.total;
>  
>  	scratch_pte = vm->pte_encode(vm->scratch_page.daddr,
> -				     I915_CACHE_LLC, true, 0);
> +				     I915_CACHE_LLC, 0);
>  
>  	gen6_for_each_pde(unused, &ppgtt->pd, start, length, pde) {
>  		u32 expected;
> @@ -1872,8 +1852,7 @@ static void gen6_ppgtt_enable(struct drm_device *dev)
>  /* PPGTT support for Sandybdrige/Gen6 and later */
>  static void gen6_ppgtt_clear_range(struct i915_address_space *vm,
>  				   uint64_t start,
> -				   uint64_t length,
> -				   bool use_scratch)
> +				   uint64_t length)
>  {
>  	struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
>  	gen6_pte_t *pt_vaddr, scratch_pte;
> @@ -1884,7 +1863,7 @@ static void gen6_ppgtt_clear_range(struct i915_address_space *vm,
>  	unsigned last_pte, i;
>  
>  	scratch_pte = vm->pte_encode(vm->scratch_page.daddr,
> -				     I915_CACHE_LLC, true, 0);
> +				     I915_CACHE_LLC, 0);
>  
>  	while (num_entries) {
>  		last_pte = first_pte + num_entries;
> @@ -1922,7 +1901,7 @@ static void gen6_ppgtt_insert_entries(struct i915_address_space *vm,
>  			pt_vaddr = kmap_px(ppgtt->pd.page_table[act_pt]);
>  
>  		pt_vaddr[act_pte] =
> -			vm->pte_encode(addr, cache_level, true, flags);
> +			vm->pte_encode(addr, cache_level, flags);
>  
>  		if (++act_pte == GEN6_PTES) {
>  			kunmap_px(ppgtt, pt_vaddr);
> @@ -2376,8 +2355,7 @@ void i915_gem_suspend_gtt_mappings(struct drm_device *dev)
>  
>  	i915_check_and_clear_faults(dev_priv);
>  
> -	ggtt->base.clear_range(&ggtt->base, ggtt->base.start, ggtt->base.total,
> -			     true);
> +	ggtt->base.clear_range(&ggtt->base, ggtt->base.start, ggtt->base.total);
>  
>  	i915_ggtt_flush(dev_priv);
>  }
> @@ -2411,7 +2389,7 @@ static void gen8_ggtt_insert_page(struct i915_address_space *vm,
>  
>  	rpm_atomic_seq = assert_rpm_atomic_begin(dev_priv);
>  
> -	gen8_set_pte(pte, gen8_pte_encode(addr, level, true));
> +	gen8_set_pte(pte, gen8_pte_encode(addr, level));
>  
>  	I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
>  	POSTING_READ(GFX_FLSH_CNTL_GEN6);
> @@ -2438,7 +2416,7 @@ static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
>  	gtt_entries = (gen8_pte_t __iomem *)ggtt->gsm + (start >> PAGE_SHIFT);
>  
>  	for_each_sgt_dma(addr, sgt_iter, st) {
> -		gtt_entry = gen8_pte_encode(addr, level, true);
> +		gtt_entry = gen8_pte_encode(addr, level);
>  		gen8_set_pte(&gtt_entries[i++], gtt_entry);
>  	}
>  
> @@ -2502,7 +2480,7 @@ static void gen6_ggtt_insert_page(struct i915_address_space *vm,
>  
>  	rpm_atomic_seq = assert_rpm_atomic_begin(dev_priv);
>  
> -	iowrite32(vm->pte_encode(addr, level, true, flags), pte);
> +	iowrite32(vm->pte_encode(addr, level, flags), pte);
>  
>  	I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
>  	POSTING_READ(GFX_FLSH_CNTL_GEN6);
> @@ -2535,7 +2513,7 @@ static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
>  	gtt_entries = (gen6_pte_t __iomem *)ggtt->gsm + (start >> PAGE_SHIFT);
>  
>  	for_each_sgt_dma(addr, sgt_iter, st) {
> -		gtt_entry = vm->pte_encode(addr, level, true, flags);
> +		gtt_entry = vm->pte_encode(addr, level, flags);
>  		iowrite32(gtt_entry, &gtt_entries[i++]);
>  	}
>  
> @@ -2559,16 +2537,12 @@ static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
>  }
>  
>  static void nop_clear_range(struct i915_address_space *vm,
> -			    uint64_t start,
> -			    uint64_t length,
> -			    bool use_scratch)
> +			    uint64_t start, uint64_t length)
>  {
>  }
>  
>  static void gen8_ggtt_clear_range(struct i915_address_space *vm,
> -				  uint64_t start,
> -				  uint64_t length,
> -				  bool use_scratch)
> +				  uint64_t start, uint64_t length)
>  {
>  	struct drm_i915_private *dev_priv = to_i915(vm->dev);
>  	struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
> @@ -2588,8 +2562,7 @@ static void gen8_ggtt_clear_range(struct i915_address_space *vm,
>  		num_entries = max_entries;
>  
>  	scratch_pte = gen8_pte_encode(vm->scratch_page.daddr,
> -				      I915_CACHE_LLC,
> -				      use_scratch);
> +				      I915_CACHE_LLC);
>  	for (i = 0; i < num_entries; i++)
>  		gen8_set_pte(&gtt_base[i], scratch_pte);
>  	readl(gtt_base);
> @@ -2599,8 +2572,7 @@ static void gen8_ggtt_clear_range(struct i915_address_space *vm,
>  
>  static void gen6_ggtt_clear_range(struct i915_address_space *vm,
>  				  uint64_t start,
> -				  uint64_t length,
> -				  bool use_scratch)
> +				  uint64_t length)
>  {
>  	struct drm_i915_private *dev_priv = to_i915(vm->dev);
>  	struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
> @@ -2620,7 +2592,7 @@ static void gen6_ggtt_clear_range(struct i915_address_space *vm,
>  		num_entries = max_entries;
>  
>  	scratch_pte = vm->pte_encode(vm->scratch_page.daddr,
> -				     I915_CACHE_LLC, use_scratch, 0);
> +				     I915_CACHE_LLC, 0);
>  
>  	for (i = 0; i < num_entries; i++)
>  		iowrite32(scratch_pte, &gtt_base[i]);
> @@ -2667,8 +2639,7 @@ static void i915_ggtt_insert_entries(struct i915_address_space *vm,
>  
>  static void i915_ggtt_clear_range(struct i915_address_space *vm,
>  				  uint64_t start,
> -				  uint64_t length,
> -				  bool unused)
> +				  uint64_t length)
>  {
>  	struct drm_i915_private *dev_priv = to_i915(vm->dev);
>  	unsigned first_entry = start >> PAGE_SHIFT;
> @@ -2752,13 +2723,11 @@ static void ggtt_unbind_vma(struct i915_vma *vma)
>  
>  	if (vma->flags & I915_VMA_GLOBAL_BIND)
>  		vma->vm->clear_range(vma->vm,
> -				     vma->node.start, size,
> -				     true);
> +				     vma->node.start, size);
>  
>  	if (vma->flags & I915_VMA_LOCAL_BIND && appgtt)
>  		appgtt->base.clear_range(&appgtt->base,
> -					 vma->node.start, size,
> -					 true);
> +					 vma->node.start, size);
>  }
>  
>  void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj)
> @@ -2819,13 +2788,12 @@ int i915_gem_init_ggtt(struct drm_i915_private *dev_priv)
>  		DRM_DEBUG_KMS("clearing unused GTT space: [%lx, %lx]\n",
>  			      hole_start, hole_end);
>  		ggtt->base.clear_range(&ggtt->base, hole_start,
> -				     hole_end - hole_start, true);
> +				       hole_end - hole_start);
>  	}
>  
>  	/* And finally clear the reserved guard page */
>  	ggtt->base.clear_range(&ggtt->base,
> -			       ggtt->base.total - PAGE_SIZE, PAGE_SIZE,
> -			       true);
> +			       ggtt->base.total - PAGE_SIZE, PAGE_SIZE);
>  
>  	if (USES_PPGTT(dev_priv) && !USES_FULL_PPGTT(dev_priv)) {
>  		struct i915_hw_ppgtt *ppgtt;
> @@ -2851,8 +2819,7 @@ int i915_gem_init_ggtt(struct drm_i915_private *dev_priv)
>  
>  		ppgtt->base.clear_range(&ppgtt->base,
>  					ppgtt->base.start,
> -					ppgtt->base.total,
> -					true);
> +					ppgtt->base.total);
>  
>  		dev_priv->mm.aliasing_ppgtt = ppgtt;
>  		WARN_ON(ggtt->base.bind_vma != ggtt_bind_vma);
> @@ -3327,8 +3294,7 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev)
>  	i915_check_and_clear_faults(dev_priv);
>  
>  	/* First fill our portion of the GTT with scratch pages */
> -	ggtt->base.clear_range(&ggtt->base, ggtt->base.start, ggtt->base.total,
> -			       true);
> +	ggtt->base.clear_range(&ggtt->base, ggtt->base.start, ggtt->base.total);
>  
>  	ggtt->base.closed = true; /* skip rewriting PTE on VMA unbind */
>  
> diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h
> index ec78be2..931d712 100644
> --- a/drivers/gpu/drm/i915/i915_gem_gtt.h
> +++ b/drivers/gpu/drm/i915/i915_gem_gtt.h
> @@ -395,7 +395,7 @@ struct i915_address_space {
>  	/* FIXME: Need a more generic return type */
>  	gen6_pte_t (*pte_encode)(dma_addr_t addr,
>  				 enum i915_cache_level level,
> -				 bool valid, u32 flags); /* Create a valid PTE */
> +				 u32 flags); /* Create a valid PTE */
>  	/* flags for pte_encode */
>  #define PTE_READ_ONLY	(1<<0)
>  	int (*allocate_va_range)(struct i915_address_space *vm,
> @@ -403,8 +403,7 @@ struct i915_address_space {
>  				 uint64_t length);
>  	void (*clear_range)(struct i915_address_space *vm,
>  			    uint64_t start,
> -			    uint64_t length,
> -			    bool use_scratch);
> +			    uint64_t length);
>  	void (*insert_page)(struct i915_address_space *vm,
>  			    dma_addr_t addr,
>  			    uint64_t offset,
> -- 
> 2.7.4
>
> _______________________________________________
> Intel-gfx mailing list
> Intel-gfx@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/intel-gfx
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

  parent reply	other threads:[~2016-10-05  8:49 UTC|newest]

Thread overview: 9+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2016-10-04 13:54 [PATCH 1/3] drm/i915/gtt: Split gen8_ppgtt_clear_pte_range Michał Winiarski
2016-10-04 13:54 ` [PATCH 2/3] drm/i915/gtt: Free unused lower-level page tables Michał Winiarski
2016-10-05  6:40   ` Joonas Lahtinen
2016-10-05  9:30     ` Chris Wilson
2016-10-04 13:54 ` [PATCH 3/3] drm/i915: Remove unused "valid" parameter from pte_encode Michał Winiarski
2016-10-05  5:48   ` Joonas Lahtinen
2016-10-05  8:49   ` Mika Kuoppala [this message]
2016-10-04 14:26 ` ✗ Fi.CI.BAT: failure for series starting with [1/3] drm/i915/gtt: Split gen8_ppgtt_clear_pte_range Patchwork
2016-10-05  5:44 ` [PATCH 1/3] " Joonas Lahtinen

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=87twcr2nav.fsf@gaia.fi.intel.com \
    --to=mika.kuoppala@linux.intel.com \
    --cc=intel-gfx@lists.freedesktop.org \
    --cc=michal.winiarski@intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.