All of lore.kernel.org
 help / color / mirror / Atom feed
From: Tvrtko Ursulin <tvrtko.ursulin@linux.intel.com>
To: Chris Wilson <chris@chris-wilson.co.uk>, intel-gfx@lists.freedesktop.org
Subject: Re: [PATCH 17/41] drm/i915: Pass around sg_table to get_pages/put_pages backend
Date: Mon, 17 Oct 2016 11:55:54 +0100	[thread overview]
Message-ID: <d25efe94-e3e8-afcb-2bfa-2eed5b828101@linux.intel.com> (raw)
In-Reply-To: <20161014121833.439-18-chris@chris-wilson.co.uk>


On 14/10/2016 13:18, Chris Wilson wrote:
> The plan is to move obj->pages out from under the struct_mutex into its
> own per-object lock. We need to prune any assumption of the struct_mutex
> from the get_pages/put_pages backends, and to make it easier we pass
> around the sg_table to operate on rather than indirectly via the obj.
>
> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> ---
>   drivers/gpu/drm/i915/i915_drv.h          |  36 +++++--
>   drivers/gpu/drm/i915/i915_gem.c          | 172 +++++++++++++++----------------
>   drivers/gpu/drm/i915/i915_gem_dmabuf.c   |  20 ++--
>   drivers/gpu/drm/i915/i915_gem_fence.c    |  17 +--
>   drivers/gpu/drm/i915/i915_gem_gtt.c      |  19 ++--
>   drivers/gpu/drm/i915/i915_gem_gtt.h      |   6 +-
>   drivers/gpu/drm/i915/i915_gem_internal.c |  23 ++---
>   drivers/gpu/drm/i915/i915_gem_shrinker.c |  11 +-
>   drivers/gpu/drm/i915/i915_gem_stolen.c   |  43 ++++----
>   drivers/gpu/drm/i915/i915_gem_userptr.c  |  88 ++++++++--------
>   10 files changed, 227 insertions(+), 208 deletions(-)
>
> diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
> index 95ad35773b2a..ebadb5e55476 100644
> --- a/drivers/gpu/drm/i915/i915_drv.h
> +++ b/drivers/gpu/drm/i915/i915_drv.h
> @@ -2172,8 +2172,8 @@ struct drm_i915_gem_object_ops {
>   	 * being released or under memory pressure (where we attempt to
>   	 * reap pages for the shrinker).
>   	 */
> -	int (*get_pages)(struct drm_i915_gem_object *);
> -	void (*put_pages)(struct drm_i915_gem_object *);
> +	struct sg_table *(*get_pages)(struct drm_i915_gem_object *);
> +	void (*put_pages)(struct drm_i915_gem_object *, struct sg_table *);
>   
>   	int (*dmabuf_export)(struct drm_i915_gem_object *);
>   	void (*release)(struct drm_i915_gem_object *);
> @@ -2308,8 +2308,6 @@ struct drm_i915_gem_object {
>   	struct i915_gem_userptr {
>   		uintptr_t ptr;
>   		unsigned read_only :1;
> -		unsigned workers :4;
> -#define I915_GEM_USERPTR_MAX_WORKERS 15

Open in another thread.

>   
>   		struct i915_mm_struct *mm;
>   		struct i915_mmu_object *mmu_object;
> @@ -2371,6 +2369,19 @@ __deprecated
>   extern void drm_gem_object_unreference_unlocked(struct drm_gem_object *);
>   
>   static inline bool
> +i915_gem_object_is_dead(const struct drm_i915_gem_object *obj)
> +{
> +	return atomic_read(&obj->base.refcount.refcount) == 0;
> +}
> +
> +#if IS_ENABLED(CONFIG_LOCKDEP)
> +#define lockdep_assert_held_unless(lock, cond) \
> +	GEM_BUG_ON(debug_locks && !lockdep_is_held(lock) && !(cond))
> +#else
> +#define lockdep_assert_held_unless(lock, cond)
> +#endif
> +
> +static inline bool
>   i915_gem_object_has_struct_page(const struct drm_i915_gem_object *obj)
>   {
>   	return obj->ops->flags & I915_GEM_OBJECT_HAS_STRUCT_PAGE;
> @@ -3196,6 +3207,8 @@ dma_addr_t
>   i915_gem_object_get_dma_address(struct drm_i915_gem_object *obj,
>   				unsigned long n);
>   
> +void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
> +				 struct sg_table *pages);
>   int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj);
>   
>   static inline int __must_check
> @@ -3212,7 +3225,8 @@ i915_gem_object_pin_pages(struct drm_i915_gem_object *obj)
>   static inline void
>   __i915_gem_object_pin_pages(struct drm_i915_gem_object *obj)
>   {
> -	lockdep_assert_held(&obj->base.dev->struct_mutex);
> +	lockdep_assert_held_unless(&obj->base.dev->struct_mutex,
> +				   i915_gem_object_is_dead(obj));
>   	GEM_BUG_ON(!obj->mm.pages);
>   
>   	obj->mm.pages_pin_count++;
> @@ -3227,7 +3241,8 @@ i915_gem_object_has_pinned_pages(struct drm_i915_gem_object *obj)
>   static inline void
>   __i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
>   {
> -	lockdep_assert_held(&obj->base.dev->struct_mutex);
> +	lockdep_assert_held_unless(&obj->base.dev->struct_mutex,
> +				   i915_gem_object_is_dead(obj));
>   	GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
>   	GEM_BUG_ON(!obj->mm.pages);
>   
> @@ -3239,7 +3254,8 @@ static inline void i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
>   	__i915_gem_object_unpin_pages(obj);
>   }
>   
> -int __i915_gem_object_put_pages(struct drm_i915_gem_object *obj);
> +void __i915_gem_object_put_pages(struct drm_i915_gem_object *obj);
> +void __i915_gem_object_invalidate(struct drm_i915_gem_object *obj);
>   
>   enum i915_map_type {
>   	I915_MAP_WB = 0,
> @@ -3462,8 +3478,10 @@ i915_vma_unpin_fence(struct i915_vma *vma)
>   void i915_gem_restore_fences(struct drm_device *dev);
>   
>   void i915_gem_detect_bit_6_swizzle(struct drm_device *dev);
> -void i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj);
> -void i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj);
> +void i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj,
> +				       struct sg_table *pages);
> +void i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj,
> +					 struct sg_table *pages);
>   
>   /* i915_gem_context.c */
>   int __must_check i915_gem_context_init(struct drm_device *dev);
> diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
> index ae320a35931c..4faef4bd135b 100644
> --- a/drivers/gpu/drm/i915/i915_gem.c
> +++ b/drivers/gpu/drm/i915/i915_gem.c
> @@ -169,7 +169,7 @@ i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
>   	return 0;
>   }
>   
> -static int
> +static struct sg_table *
>   i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
>   {
>   	struct address_space *mapping = obj->base.filp->f_mapping;
> @@ -179,7 +179,7 @@ i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
>   	int i;
>   
>   	if (WARN_ON(i915_gem_object_needs_bit17_swizzle(obj)))
> -		return -EINVAL;
> +		return ERR_PTR(-EINVAL);
>   
>   	for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
>   		struct page *page;
> @@ -187,7 +187,7 @@ i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
>   
>   		page = shmem_read_mapping_page(mapping, i);
>   		if (IS_ERR(page))
> -			return PTR_ERR(page);
> +			return ERR_CAST(page);
>   
>   		src = kmap_atomic(page);
>   		memcpy(vaddr, src, PAGE_SIZE);
> @@ -202,11 +202,11 @@ i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
>   
>   	st = kmalloc(sizeof(*st), GFP_KERNEL);
>   	if (st == NULL)
> -		return -ENOMEM;
> +		return ERR_PTR(-ENOMEM);
>   
>   	if (sg_alloc_table(st, 1, GFP_KERNEL)) {
>   		kfree(st);
> -		return -ENOMEM;
> +		return ERR_PTR(-ENOMEM);
>   	}
>   
>   	sg = st->sgl;
> @@ -216,28 +216,30 @@ i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
>   	sg_dma_address(sg) = obj->phys_handle->busaddr;
>   	sg_dma_len(sg) = obj->base.size;
>   
> -	obj->mm.pages = st;
> -	return 0;
> +	return st;
>   }
>   
>   static void
> -i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj)
> +__i915_gem_object_release_shmem(struct drm_i915_gem_object *obj)
>   {
> -	int ret;
> -
>   	GEM_BUG_ON(obj->mm.madv == __I915_MADV_PURGED);
>   
> -	ret = i915_gem_object_set_to_cpu_domain(obj, true);
> -	if (WARN_ON(ret)) {
> -		/* In the event of a disaster, abandon all caches and
> -		 * hope for the best.
> -		 */
> -		obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU;
> -	}
> -
>   	if (obj->mm.madv == I915_MADV_DONTNEED)
>   		obj->mm.dirty = false;
>   
> +	if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0)
> +		i915_gem_clflush_object(obj, false);
> +
> +	obj->base.read_domains = I915_GEM_DOMAIN_CPU;
> +	obj->base.write_domain = I915_GEM_DOMAIN_CPU;
> +}
> +
> +static void
> +i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj,
> +			       struct sg_table *pages)
> +{
> +	__i915_gem_object_release_shmem(obj);
> +
>   	if (obj->mm.dirty) {
>   		struct address_space *mapping = obj->base.filp->f_mapping;
>   		char *vaddr = obj->phys_handle->vaddr;
> @@ -265,8 +267,8 @@ i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj)
>   		obj->mm.dirty = false;
>   	}
>   
> -	sg_free_table(obj->mm.pages);
> -	kfree(obj->mm.pages);
> +	sg_free_table(pages);
> +	kfree(pages);
>   }
>   
>   static void
> @@ -517,9 +519,9 @@ i915_gem_object_attach_phys(struct drm_i915_gem_object *obj,
>   	if (ret)
>   		return ret;
>   
> -	ret = __i915_gem_object_put_pages(obj);
> -	if (ret)
> -		return ret;
> +	__i915_gem_object_put_pages(obj);
> +	if (obj->mm.pages)
> +		return -EBUSY;
>   
>   	/* create a new object */
>   	phys = drm_pci_alloc(obj->base.dev, obj->base.size, align);
> @@ -535,7 +537,7 @@ i915_gem_object_attach_phys(struct drm_i915_gem_object *obj,
>   static int
>   i915_gem_phys_pwrite(struct drm_i915_gem_object *obj,
>   		     struct drm_i915_gem_pwrite *args,
> -		     struct drm_file *file_priv)
> +		     struct drm_file *file)
>   {
>   	struct drm_device *dev = obj->base.dev;
>   	void *vaddr = obj->phys_handle->vaddr + args->offset;
> @@ -551,7 +553,7 @@ i915_gem_phys_pwrite(struct drm_i915_gem_object *obj,
>   				   I915_WAIT_LOCKED |
>   				   I915_WAIT_ALL,
>   				   MAX_SCHEDULE_TIMEOUT,
> -				   to_rps_client(file_priv));
> +				   to_rps_client(file));
>   	if (ret)
>   		return ret;
>   
> @@ -2266,8 +2268,7 @@ i915_gem_object_truncate(struct drm_i915_gem_object *obj)
>   }
>   
>   /* Try to discard unwanted pages */
> -static void
> -i915_gem_object_invalidate(struct drm_i915_gem_object *obj)
> +void __i915_gem_object_invalidate(struct drm_i915_gem_object *obj)
>   {
>   	struct address_space *mapping;
>   
> @@ -2286,32 +2287,20 @@ i915_gem_object_invalidate(struct drm_i915_gem_object *obj)
>   }
>   
>   static void
> -i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
> +i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj,
> +			      struct sg_table *pages)
>   {
>   	struct sgt_iter sgt_iter;
>   	struct page *page;
> -	int ret;
>   
> -	GEM_BUG_ON(obj->mm.madv == __I915_MADV_PURGED);
> -
> -	ret = i915_gem_object_set_to_cpu_domain(obj, true);
> -	if (WARN_ON(ret)) {
> -		/* In the event of a disaster, abandon all caches and
> -		 * hope for the best.
> -		 */
> -		i915_gem_clflush_object(obj, true);
> -		obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU;
> -	}
> +	__i915_gem_object_release_shmem(obj);

Waiting for the object to become inactive is now gone. It did not spot 
that that changed with this patch. Did it?

>   
> -	i915_gem_gtt_finish_object(obj);
> +	i915_gem_gtt_finish_pages(obj, pages);
>   
>   	if (i915_gem_object_needs_bit17_swizzle(obj))
> -		i915_gem_object_save_bit_17_swizzle(obj);
> -
> -	if (obj->mm.madv == I915_MADV_DONTNEED)
> -		obj->mm.dirty = false;
> +		i915_gem_object_save_bit_17_swizzle(obj, pages);
>   
> -	for_each_sgt_page(page, sgt_iter, obj->mm.pages) {
> +	for_each_sgt_page(page, sgt_iter, pages) {
>   		if (obj->mm.dirty)
>   			set_page_dirty(page);
>   
> @@ -2322,8 +2311,8 @@ i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
>   	}
>   	obj->mm.dirty = false;
>   
> -	sg_free_table(obj->mm.pages);
> -	kfree(obj->mm.pages);
> +	sg_free_table(pages);
> +	kfree(pages);
>   }
>   
>   static void __i915_gem_object_reset_page_iter(struct drm_i915_gem_object *obj)
> @@ -2335,24 +2324,22 @@ static void __i915_gem_object_reset_page_iter(struct drm_i915_gem_object *obj)
>   		radix_tree_delete(&obj->mm.get_page.radix, iter.index);
>   }
>   
> -int __i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
> +void __i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
>   {
> -	const struct drm_i915_gem_object_ops *ops = obj->ops;
> +	struct sg_table *pages;
>   
>   	lockdep_assert_held(&obj->base.dev->struct_mutex);
>   
> -	if (!obj->mm.pages)
> -		return 0;
> -
>   	if (i915_gem_object_has_pinned_pages(obj))
> -		return -EBUSY;
> +		return;
>   
>   	GEM_BUG_ON(obj->bind_count);
>   
>   	/* ->put_pages might need to allocate memory for the bit17 swizzle
>   	 * array, hence protect them from being reaped by removing them from gtt
>   	 * lists early. */
> -	list_del(&obj->global_list);
> +	pages = fetch_and_zero(&obj->mm.pages);
> +	GEM_BUG_ON(!pages);
>   
>   	if (obj->mm.mapping) {
>   		void *ptr;
> @@ -2368,12 +2355,7 @@ int __i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
>   
>   	__i915_gem_object_reset_page_iter(obj);
>   
> -	ops->put_pages(obj);
> -	obj->mm.pages = NULL;
> -
> -	i915_gem_object_invalidate(obj);
> -
> -	return 0;
> +	obj->ops->put_pages(obj, pages);
>   }
>   
>   static unsigned long swiotlb_max_size(void)
> @@ -2385,7 +2367,7 @@ static unsigned long swiotlb_max_size(void)
>   #endif
>   }
>   
> -static int
> +static struct sg_table *
>   i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
>   {
>   	struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
> @@ -2404,8 +2386,8 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
>   	 * wasn't in the GTT, there shouldn't be any way it could have been in
>   	 * a GPU cache
>   	 */
> -	BUG_ON(obj->base.read_domains & I915_GEM_GPU_DOMAINS);
> -	BUG_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS);
> +	GEM_BUG_ON(obj->base.read_domains & I915_GEM_GPU_DOMAINS);
> +	GEM_BUG_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS);
>   
>   	max_segment = swiotlb_max_size();
>   	if (!max_segment)
> @@ -2413,12 +2395,12 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
>   
>   	st = kmalloc(sizeof(*st), GFP_KERNEL);
>   	if (st == NULL)
> -		return -ENOMEM;
> +		return ERR_PTR(-ENOMEM);
>   
>   	page_count = obj->base.size / PAGE_SIZE;
>   	if (sg_alloc_table(st, page_count, GFP_KERNEL)) {
>   		kfree(st);
> -		return -ENOMEM;
> +		return ERR_PTR(-ENOMEM);
>   	}
>   
>   	/* Get the list of pages out of our struct file.  They'll be pinned
> @@ -2469,20 +2451,19 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
>   	}
>   	if (sg) /* loop terminated early; short sg table */
>   		sg_mark_end(sg);
> -	obj->mm.pages = st;
>   
> -	ret = i915_gem_gtt_prepare_object(obj);
> +	ret = i915_gem_gtt_prepare_pages(obj, st);
>   	if (ret)
>   		goto err_pages;
>   
>   	if (i915_gem_object_needs_bit17_swizzle(obj))
> -		i915_gem_object_do_bit_17_swizzle(obj);
> +		i915_gem_object_do_bit_17_swizzle(obj, st);
>   
>   	if (i915_gem_object_is_tiled(obj) &&
>   	    dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES)
>   		__i915_gem_object_pin_pages(obj);
>   
> -	return 0;
> +	return st;
>   
>   err_pages:
>   	sg_mark_end(sg);
> @@ -2502,7 +2483,35 @@ err_pages:
>   	if (ret == -ENOSPC)
>   		ret = -ENOMEM;
>   
> -	return ret;
> +	return ERR_PTR(ret);
> +}
> +
> +void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
> +				 struct sg_table *pages)
> +{
> +	lockdep_assert_held(&obj->base.dev->struct_mutex);
> +
> +	obj->mm.get_page.sg_pos = pages->sgl;
> +	obj->mm.get_page.sg_idx = 0;
> +
> +	obj->mm.pages = pages;
> +}
> +
> +static int ____i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
> +{
> +	struct sg_table *pages;
> +
> +	if (unlikely(obj->mm.madv != I915_MADV_WILLNEED)) {
> +		DRM_DEBUG("Attempting to obtain a purgeable object\n");
> +		return -EFAULT;
> +	}
> +
> +	pages = obj->ops->get_pages(obj);
> +	if (unlikely(IS_ERR(pages)))
> +		return PTR_ERR(pages);
> +
> +	__i915_gem_object_set_pages(obj, pages);
> +	return 0;
>   }
>   
>   /* Ensure that the associated pages are gathered from the backing storage
> @@ -2514,33 +2523,18 @@ err_pages:
>    */
>   int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
>   {
> -	struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
> -	const struct drm_i915_gem_object_ops *ops = obj->ops;
> -	int ret;
> +	int err;
>   
>   	lockdep_assert_held(&obj->base.dev->struct_mutex);
>   
>   	if (obj->mm.pages)
>   		return 0;
>   
> -	if (obj->mm.madv != I915_MADV_WILLNEED) {
> -		DRM_DEBUG("Attempting to obtain a purgeable object\n");
> -		__i915_gem_object_unpin_pages(obj);
> -		return -EFAULT;
> -	}
> -
> -	ret = ops->get_pages(obj);
> -	if (ret) {
> +	err = ____i915_gem_object_get_pages(obj);
> +	if (err)
>   		__i915_gem_object_unpin_pages(obj);
> -		return ret;
> -	}
>   
> -	list_add_tail(&obj->global_list, &dev_priv->mm.unbound_list);

Objects will no longer appear on the unbound list when they have backing 
store?

> -
> -	obj->mm.get_page.sg_pos = obj->mm.pages->sgl;
> -	obj->mm.get_page.sg_idx = 0;
> -
> -	return 0;
> +	return err;
>   }
>   
>   /* The 'mapping' part of i915_gem_object_pin_map() below */
> diff --git a/drivers/gpu/drm/i915/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
> index 10441dc72e73..2abd524aba14 100644
> --- a/drivers/gpu/drm/i915/i915_gem_dmabuf.c
> +++ b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
> @@ -289,22 +289,18 @@ struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
>   	return dma_buf;
>   }
>   
> -static int i915_gem_object_get_pages_dmabuf(struct drm_i915_gem_object *obj)
> +static struct sg_table *
> +i915_gem_object_get_pages_dmabuf(struct drm_i915_gem_object *obj)
>   {
> -	struct sg_table *sg;
> -
> -	sg = dma_buf_map_attachment(obj->base.import_attach, DMA_BIDIRECTIONAL);
> -	if (IS_ERR(sg))
> -		return PTR_ERR(sg);
> -
> -	obj->mm.pages = sg;
> -	return 0;
> +	return dma_buf_map_attachment(obj->base.import_attach,
> +				      DMA_BIDIRECTIONAL);
>   }
>   
> -static void i915_gem_object_put_pages_dmabuf(struct drm_i915_gem_object *obj)
> +static void i915_gem_object_put_pages_dmabuf(struct drm_i915_gem_object *obj,
> +					     struct sg_table *pages)
>   {
> -	dma_buf_unmap_attachment(obj->base.import_attach,
> -				 obj->mm.pages, DMA_BIDIRECTIONAL);
> +	dma_buf_unmap_attachment(obj->base.import_attach, pages,
> +				 DMA_BIDIRECTIONAL);
>   }
>   
>   static const struct drm_i915_gem_object_ops i915_gem_object_dmabuf_ops = {
> diff --git a/drivers/gpu/drm/i915/i915_gem_fence.c b/drivers/gpu/drm/i915/i915_gem_fence.c
> index 5aadab59f071..cd59dbc6588c 100644
> --- a/drivers/gpu/drm/i915/i915_gem_fence.c
> +++ b/drivers/gpu/drm/i915/i915_gem_fence.c
> @@ -644,6 +644,7 @@ i915_gem_swizzle_page(struct page *page)
>   /**
>    * i915_gem_object_do_bit_17_swizzle - fixup bit 17 swizzling
>    * @obj: i915 GEM buffer object
> + * @pages: the scattergather list of physical pages
>    *
>    * This function fixes up the swizzling in case any page frame number for this
>    * object has changed in bit 17 since that state has been saved with
> @@ -654,7 +655,8 @@ i915_gem_swizzle_page(struct page *page)
>    * by swapping them out and back in again).
>    */
>   void
> -i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj)
> +i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj,
> +				  struct sg_table *pages)
>   {
>   	struct sgt_iter sgt_iter;
>   	struct page *page;
> @@ -664,10 +666,9 @@ i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj)
>   		return;
>   
>   	i = 0;
> -	for_each_sgt_page(page, sgt_iter, obj->mm.pages) {
> +	for_each_sgt_page(page, sgt_iter, pages) {
>   		char new_bit_17 = page_to_phys(page) >> 17;
> -		if ((new_bit_17 & 0x1) !=
> -		    (test_bit(i, obj->bit_17) != 0)) {
> +		if ((new_bit_17 & 0x1) != (test_bit(i, obj->bit_17) != 0)) {
>   			i915_gem_swizzle_page(page);
>   			set_page_dirty(page);
>   		}
> @@ -678,17 +679,19 @@ i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj)
>   /**
>    * i915_gem_object_save_bit_17_swizzle - save bit 17 swizzling
>    * @obj: i915 GEM buffer object
> + * @pages: the scattergather list of physical pages
>    *
>    * This function saves the bit 17 of each page frame number so that swizzling
>    * can be fixed up later on with i915_gem_object_do_bit_17_swizzle(). This must
>    * be called before the backing storage can be unpinned.
>    */
>   void
> -i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj)
> +i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj,
> +				    struct sg_table *pages)
>   {
> +	const unsigned int page_count = obj->base.size >> PAGE_SHIFT;
>   	struct sgt_iter sgt_iter;
>   	struct page *page;
> -	int page_count = obj->base.size >> PAGE_SHIFT;
>   	int i;
>   
>   	if (obj->bit_17 == NULL) {
> @@ -703,7 +706,7 @@ i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj)
>   
>   	i = 0;
>   
> -	for_each_sgt_page(page, sgt_iter, obj->mm.pages) {
> +	for_each_sgt_page(page, sgt_iter, pages) {
>   		if (page_to_phys(page) & (1 << 17))
>   			__set_bit(i, obj->bit_17);
>   		else
> diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
> index 794ccc4cffaa..1008209ca797 100644
> --- a/drivers/gpu/drm/i915/i915_gem_gtt.c
> +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
> @@ -2370,14 +2370,15 @@ void i915_gem_suspend_gtt_mappings(struct drm_device *dev)
>   	i915_ggtt_flush(dev_priv);
>   }
>   
> -int i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj)
> +int i915_gem_gtt_prepare_pages(struct drm_i915_gem_object *obj,
> +			       struct sg_table *pages)
>   {
> -	if (!dma_map_sg(&obj->base.dev->pdev->dev,
> -			obj->mm.pages->sgl, obj->mm.pages->nents,
> -			PCI_DMA_BIDIRECTIONAL))
> -		return -ENOSPC;
> +	if (dma_map_sg(&obj->base.dev->pdev->dev,
> +		       pages->sgl, pages->nents,
> +		       PCI_DMA_BIDIRECTIONAL))
> +		return 0;
>   
> -	return 0;
> +	return -ENOSPC;
>   }
>   
>   static void gen8_set_pte(void __iomem *addr, gen8_pte_t pte)
> @@ -2696,7 +2697,8 @@ static void ggtt_unbind_vma(struct i915_vma *vma)
>   					 vma->node.start, size);
>   }
>   
> -void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj)
> +void i915_gem_gtt_finish_pages(struct drm_i915_gem_object *obj,
> +			       struct sg_table *pages)
>   {
>   	struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
>   	struct device *kdev = &dev_priv->drm.pdev->dev;
> @@ -2710,8 +2712,7 @@ void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj)
>   		}
>   	}
>   
> -	dma_unmap_sg(kdev, obj->mm.pages->sgl, obj->mm.pages->nents,
> -		     PCI_DMA_BIDIRECTIONAL);
> +	dma_unmap_sg(kdev, pages->sgl, pages->nents, PCI_DMA_BIDIRECTIONAL);
>   }
>   
>   static void i915_gtt_color_adjust(struct drm_mm_node *node,
> diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h
> index c241d8143255..dbe6a6cec20d 100644
> --- a/drivers/gpu/drm/i915/i915_gem_gtt.h
> +++ b/drivers/gpu/drm/i915/i915_gem_gtt.h
> @@ -628,8 +628,10 @@ void i915_check_and_clear_faults(struct drm_i915_private *dev_priv);
>   void i915_gem_suspend_gtt_mappings(struct drm_device *dev);
>   void i915_gem_restore_gtt_mappings(struct drm_device *dev);
>   
> -int __must_check i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj);
> -void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj);
> +int __must_check i915_gem_gtt_prepare_pages(struct drm_i915_gem_object *obj,
> +					    struct sg_table *pages);
> +void i915_gem_gtt_finish_pages(struct drm_i915_gem_object *obj,
> +			       struct sg_table *pages);
>   
>   /* Flags used by pin/bind&friends. */
>   #define PIN_NONBLOCK		BIT(0)
> diff --git a/drivers/gpu/drm/i915/i915_gem_internal.c b/drivers/gpu/drm/i915/i915_gem_internal.c
> index c9393729607f..76b3cf346f3d 100644
> --- a/drivers/gpu/drm/i915/i915_gem_internal.c
> +++ b/drivers/gpu/drm/i915/i915_gem_internal.c
> @@ -39,7 +39,8 @@ static void internal_free_pages(struct sg_table *st)
>   	kfree(st);
>   }
>   
> -static int i915_gem_object_get_pages_internal(struct drm_i915_gem_object *obj)
> +static struct sg_table *
> +i915_gem_object_get_pages_internal(struct drm_i915_gem_object *obj)
>   {
>   	struct drm_i915_private *i915 = to_i915(obj->base.dev);
>   	unsigned int npages = obj->base.size / PAGE_SIZE;
> @@ -50,11 +51,11 @@ static int i915_gem_object_get_pages_internal(struct drm_i915_gem_object *obj)
>   
>   	st = kmalloc(sizeof(*st), GFP_KERNEL);
>   	if (!st)
> -		return -ENOMEM;
> +		return ERR_PTR(-ENOMEM);
>   
>   	if (sg_alloc_table(st, npages, GFP_KERNEL)) {
>   		kfree(st);
> -		return -ENOMEM;
> +		return ERR_PTR(-ENOMEM);
>   	}
>   
>   	sg = st->sgl;
> @@ -96,12 +97,9 @@ static int i915_gem_object_get_pages_internal(struct drm_i915_gem_object *obj)
>   
>   		sg = __sg_next(sg);
>   	} while (1);
> -	obj->mm.pages = st;
>   
> -	if (i915_gem_gtt_prepare_object(obj)) {
> -		obj->mm.pages = NULL;
> +	if (i915_gem_gtt_prepare_pages(obj, st))
>   		goto err;
> -	}
>   
>   	/* Mark the pages as dontneed whilst they are still pinned. As soon
>   	 * as they are unpinned they are allowed to be reaped by the shrinker,
> @@ -109,18 +107,19 @@ static int i915_gem_object_get_pages_internal(struct drm_i915_gem_object *obj)
>   	 * object are only valid whilst active and pinned.
>   	 */
>   	obj->mm.madv = I915_MADV_DONTNEED;
> -	return 0;
> +	return st;
>   
>   err:
>   	sg_mark_end(sg);
>   	internal_free_pages(st);
> -	return -ENOMEM;
> +	return ERR_PTR(-ENOMEM);
>   }
>   
> -static void i915_gem_object_put_pages_internal(struct drm_i915_gem_object *obj)
> +static void i915_gem_object_put_pages_internal(struct drm_i915_gem_object *obj,
> +					       struct sg_table *pages)
>   {
> -	i915_gem_gtt_finish_object(obj);
> -	internal_free_pages(obj->mm.pages);
> +	i915_gem_gtt_finish_pages(obj, pages);
> +	internal_free_pages(pages);
>   
>   	obj->mm.dirty = false;
>   	obj->mm.madv = I915_MADV_WILLNEED;
> diff --git a/drivers/gpu/drm/i915/i915_gem_shrinker.c b/drivers/gpu/drm/i915/i915_gem_shrinker.c
> index 124f69a80162..f95061faeae6 100644
> --- a/drivers/gpu/drm/i915/i915_gem_shrinker.c
> +++ b/drivers/gpu/drm/i915/i915_gem_shrinker.c
> @@ -91,6 +91,13 @@ static bool can_release_pages(struct drm_i915_gem_object *obj)
>   	return swap_available() || obj->mm.madv == I915_MADV_DONTNEED;
>   }
>   
> +static bool unsafe_drop_pages(struct drm_i915_gem_object *obj)
> +{
> +	if (i915_gem_object_unbind(obj) == 0)
> +		__i915_gem_object_put_pages(obj);
> +	return !obj->mm.pages;
> +}

Can we have some comments with this function?

> +
>   /**
>    * i915_gem_shrink - Shrink buffer object caches
>    * @dev_priv: i915 device
> @@ -192,9 +199,7 @@ i915_gem_shrink(struct drm_i915_private *dev_priv,
>   
>   			i915_gem_object_get(obj);
>   
> -			/* For the unbound phase, this should be a no-op! */
> -			i915_gem_object_unbind(obj);
> -			if (__i915_gem_object_put_pages(obj) == 0)
> +			if (unsafe_drop_pages(obj))
>   				count += obj->base.size >> PAGE_SHIFT;
>   
>   			i915_gem_object_put(obj);
> diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c
> index 0acbdcbd8f3a..1a63ffa4d189 100644
> --- a/drivers/gpu/drm/i915/i915_gem_stolen.c
> +++ b/drivers/gpu/drm/i915/i915_gem_stolen.c
> @@ -546,17 +546,20 @@ i915_pages_create_for_stolen(struct drm_device *dev,
>   	return st;
>   }
>   
> -static int i915_gem_object_get_pages_stolen(struct drm_i915_gem_object *obj)
> +static struct sg_table *
> +i915_gem_object_get_pages_stolen(struct drm_i915_gem_object *obj)
>   {
> -	BUG();
> -	return -EINVAL;
> +	return i915_pages_create_for_stolen(obj->base.dev,
> +					    obj->stolen->start,
> +					    obj->stolen->size);
>   }
>   
> -static void i915_gem_object_put_pages_stolen(struct drm_i915_gem_object *obj)
> +static void i915_gem_object_put_pages_stolen(struct drm_i915_gem_object *obj,
> +					     struct sg_table *pages)
>   {
>   	/* Should only be called during free */
> -	sg_free_table(obj->mm.pages);
> -	kfree(obj->mm.pages);
> +	sg_free_table(pages);
> +	kfree(pages);
>   }
>   
>   static void
> @@ -591,21 +594,13 @@ _i915_gem_object_create_stolen(struct drm_device *dev,
>   	drm_gem_private_object_init(dev, &obj->base, stolen->size);
>   	i915_gem_object_init(obj, &i915_gem_object_stolen_ops);
>   
> -	obj->mm.pages = i915_pages_create_for_stolen(dev,
> -						     stolen->start,
> -						     stolen->size);
> -	if (!obj->mm.pages)
> -		goto cleanup;
> -
> -	obj->mm.get_page.sg_pos = obj->mm.pages->sgl;
> -	obj->mm.get_page.sg_idx = 0;
> -
> -	__i915_gem_object_pin_pages(obj);
>   	obj->stolen = stolen;
> -
>   	obj->base.read_domains = I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT;
>   	obj->cache_level = HAS_LLC(dev) ? I915_CACHE_LLC : I915_CACHE_NONE;
>   
> +	if (i915_gem_object_pin_pages(obj))
> +		goto cleanup;
> +
>   	return obj;
>   
>   cleanup:
> @@ -700,10 +695,14 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
>   	if (gtt_offset == I915_GTT_OFFSET_NONE)
>   		return obj;
>   
> +	ret = i915_gem_object_pin_pages(obj);
> +	if (ret)
> +		goto err;
> +
>   	vma = i915_gem_obj_lookup_or_create_vma(obj, &ggtt->base, NULL);
>   	if (IS_ERR(vma)) {
>   		ret = PTR_ERR(vma);
> -		goto err;
> +		goto err_pages;
>   	}
>   
>   	/* To simplify the initialisation sequence between KMS and GTT,
> @@ -717,20 +716,20 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
>   	ret = drm_mm_reserve_node(&ggtt->base.mm, &vma->node);
>   	if (ret) {
>   		DRM_DEBUG_KMS("failed to allocate stolen GTT space\n");
> -		goto err;
> +		goto err_pages;
>   	}
>   
>   	vma->pages = obj->mm.pages;
>   	vma->flags |= I915_VMA_GLOBAL_BIND;
>   	__i915_vma_set_map_and_fenceable(vma);
>   	list_move_tail(&vma->vm_link, &ggtt->base.inactive_list);
> +	list_move_tail(&obj->global_list, &dev_priv->mm.bound_list);
>   	obj->bind_count++;
>   
> -	list_add_tail(&obj->global_list, &dev_priv->mm.bound_list);
> -	__i915_gem_object_pin_pages(obj);
> -
>   	return obj;
>   
> +err_pages:
> +	i915_gem_object_unpin_pages(obj);
>   err:
>   	i915_gem_object_put(obj);
>   	return NULL;
> diff --git a/drivers/gpu/drm/i915/i915_gem_userptr.c b/drivers/gpu/drm/i915/i915_gem_userptr.c
> index 0fdd1d6723d1..e97c7b589439 100644
> --- a/drivers/gpu/drm/i915/i915_gem_userptr.c
> +++ b/drivers/gpu/drm/i915/i915_gem_userptr.c
> @@ -73,11 +73,14 @@ static void cancel_userptr(struct work_struct *work)
>   	/* Cancel any active worker and force us to re-evaluate gup */
>   	obj->userptr.work = NULL;
>   
> -	if (obj->mm.pages) {
> -		/* We are inside a kthread context and can't be interrupted */
> -		WARN_ON(i915_gem_object_unbind(obj));
> -		WARN_ON(__i915_gem_object_put_pages(obj));
> -	}
> +	/* We are inside a kthread context and can't be interrupted */
> +	if (i915_gem_object_unbind(obj) == 0)
> +		__i915_gem_object_put_pages(obj);
> +	WARN_ONCE(obj->mm.pages,
> +		  "Failed to release pages: bind_count=%d, pages_pin_count=%d, pin_display=%d\n",
> +		  obj->bind_count,
> +		  obj->mm.pages_pin_count,
> +		  obj->pin_display);
>   
>   	i915_gem_object_put(obj);
>   	mutex_unlock(&dev->struct_mutex);
> @@ -426,24 +429,25 @@ err:
>   	return ret;
>   }
>   
> -static int
> +static struct sg_table *
>   __i915_gem_userptr_set_pages(struct drm_i915_gem_object *obj,
>   			     struct page **pvec, int num_pages)
>   {
> +	struct sg_table *pages;
>   	int ret;
>   
> -	ret = st_set_pages(&obj->mm.pages, pvec, num_pages);
> +	ret = st_set_pages(&pages, pvec, num_pages);
>   	if (ret)
> -		return ret;
> +		return ERR_PTR(ret);
>   
> -	ret = i915_gem_gtt_prepare_object(obj);
> +	ret = i915_gem_gtt_prepare_pages(obj, pages);
>   	if (ret) {
> -		sg_free_table(obj->mm.pages);
> -		kfree(obj->mm.pages);
> -		obj->mm.pages = NULL;
> +		sg_free_table(pages);
> +		kfree(pages);
> +		return ERR_PTR(ret);
>   	}
>   
> -	return ret;
> +	return pages;
>   }
>   
>   static int
> @@ -521,20 +525,20 @@ __i915_gem_userptr_get_pages_worker(struct work_struct *_work)
>   
>   	mutex_lock(&dev->struct_mutex);
>   	if (obj->userptr.work == &work->work) {
> +		struct sg_table *pages = ERR_PTR(ret);
> +
>   		if (pinned == npages) {
> -			ret = __i915_gem_userptr_set_pages(obj, pvec, npages);
> -			if (ret == 0) {
> -				list_add_tail(&obj->global_list,
> -					      &to_i915(dev)->mm.unbound_list);
> -				obj->mm.get_page.sg_pos = obj->mm.pages->sgl;
> -				obj->mm.get_page.sg_idx = 0;
> +			pages = __i915_gem_userptr_set_pages(obj, pvec, npages);
> +			if (!IS_ERR(pages)) {
> +				__i915_gem_object_set_pages(obj, pages);
>   				pinned = 0;
> +				pages = NULL;
>   			}
>   		}
> -		obj->userptr.work = ERR_PTR(ret);
> +
> +		obj->userptr.work = ERR_CAST(pages);
>   	}
>   
> -	obj->userptr.workers--;
>   	i915_gem_object_put(obj);
>   	mutex_unlock(&dev->struct_mutex);
>   
> @@ -545,7 +549,7 @@ __i915_gem_userptr_get_pages_worker(struct work_struct *_work)
>   	kfree(work);
>   }
>   
> -static int
> +static struct sg_table *
>   __i915_gem_userptr_get_pages_schedule(struct drm_i915_gem_object *obj,
>   				      bool *active)
>   {
> @@ -570,15 +574,11 @@ __i915_gem_userptr_get_pages_schedule(struct drm_i915_gem_object *obj,
>   	 * that error back to this function through
>   	 * obj->userptr.work = ERR_PTR.
>   	 */
> -	if (obj->userptr.workers >= I915_GEM_USERPTR_MAX_WORKERS)
> -		return -EAGAIN;
> -
>   	work = kmalloc(sizeof(*work), GFP_KERNEL);
>   	if (work == NULL)
> -		return -ENOMEM;
> +		return ERR_PTR(-ENOMEM);
>   
>   	obj->userptr.work = &work->work;
> -	obj->userptr.workers++;
>   
>   	work->obj = i915_gem_object_get(obj);
>   
> @@ -589,14 +589,15 @@ __i915_gem_userptr_get_pages_schedule(struct drm_i915_gem_object *obj,
>   	schedule_work(&work->work);
>   
>   	*active = true;
> -	return -EAGAIN;
> +	return ERR_PTR(-EAGAIN);
>   }
>   
> -static int
> +static struct sg_table *
>   i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj)
>   {
>   	const int num_pages = obj->base.size >> PAGE_SHIFT;
>   	struct page **pvec;
> +	struct sg_table *pages;
>   	int pinned, ret;
>   	bool active;
>   
> @@ -620,15 +621,15 @@ i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj)
>   	if (obj->userptr.work) {
>   		/* active flag should still be held for the pending work */
>   		if (IS_ERR(obj->userptr.work))
> -			return PTR_ERR(obj->userptr.work);
> +			return ERR_CAST(obj->userptr.work);
>   		else
> -			return -EAGAIN;
> +			return ERR_PTR(-EAGAIN);
>   	}
>   
>   	/* Let the mmu-notifier know that we have begun and need cancellation */
>   	ret = __i915_gem_userptr_set_active(obj, true);
>   	if (ret)
> -		return ret;
> +		return ERR_PTR(ret);
>   
>   	pvec = NULL;
>   	pinned = 0;
> @@ -637,7 +638,7 @@ i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj)
>   				      GFP_TEMPORARY);
>   		if (pvec == NULL) {
>   			__i915_gem_userptr_set_active(obj, false);
> -			return -ENOMEM;
> +			return ERR_PTR(-ENOMEM);
>   		}
>   
>   		pinned = __get_user_pages_fast(obj->userptr.ptr, num_pages,
> @@ -646,21 +647,22 @@ i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj)
>   
>   	active = false;
>   	if (pinned < 0)
> -		ret = pinned, pinned = 0;
> +		pages = ERR_PTR(pinned), pinned = 0;
>   	else if (pinned < num_pages)
> -		ret = __i915_gem_userptr_get_pages_schedule(obj, &active);
> +		pages = __i915_gem_userptr_get_pages_schedule(obj, &active);
>   	else
> -		ret = __i915_gem_userptr_set_pages(obj, pvec, num_pages);
> -	if (ret) {
> +		pages = __i915_gem_userptr_set_pages(obj, pvec, num_pages);
> +	if (IS_ERR(pages)) {
>   		__i915_gem_userptr_set_active(obj, active);
>   		release_pages(pvec, pinned, 0);
>   	}
>   	drm_free_large(pvec);
> -	return ret;
> +	return pages;
>   }
>   
>   static void
> -i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj)
> +i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj,
> +			   struct sg_table *pages)
>   {
>   	struct sgt_iter sgt_iter;
>   	struct page *page;
> @@ -671,9 +673,9 @@ i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj)
>   	if (obj->mm.madv != I915_MADV_WILLNEED)
>   		obj->mm.dirty = false;
>   
> -	i915_gem_gtt_finish_object(obj);
> +	i915_gem_gtt_finish_pages(obj, pages);
>   
> -	for_each_sgt_page(page, sgt_iter, obj->mm.pages) {
> +	for_each_sgt_page(page, sgt_iter, pages) {
>   		if (obj->mm.dirty)
>   			set_page_dirty(page);
>   
> @@ -682,8 +684,8 @@ i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj)
>   	}
>   	obj->mm.dirty = false;
>   
> -	sg_free_table(obj->mm.pages);
> -	kfree(obj->mm.pages);
> +	sg_free_table(pages);
> +	kfree(pages);
>   }
>   
>   static void

I did not spot any issues with the code itself, but I think it could 
have been split up in smaller patches. For example just add the sg table 
function parameter in one patch and then follow with all the other 
changes. Like this I found it very difficult to review and even when my 
questions are resolved my confidence level for giving an r-b will be low. :I

Regards,

Tvrtko

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

  reply	other threads:[~2016-10-17 10:55 UTC|newest]

Thread overview: 78+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2016-10-14 12:17 Fencing, fencing, fencing Chris Wilson
2016-10-14 12:17 ` [PATCH 01/41] drm/i915: Move user fault tracking to a separate list Chris Wilson
2016-10-14 12:17 ` [PATCH 02/41] drm/i915: Use RPM as the barrier for controlling user mmap access Chris Wilson
2016-10-14 12:17 ` [PATCH 03/41] drm/i915: Remove superfluous locking around userfault_list Chris Wilson
2016-10-14 12:17 ` [PATCH 04/41] drm/i915: Remove RPM sequence checking Chris Wilson
2016-10-14 12:17 ` [PATCH 05/41] drm/i915: Move fence cancellation to runtime suspend Chris Wilson
2016-10-14 12:17 ` [PATCH 06/41] drm/i915: Support asynchronous waits on struct fence from i915_gem_request Chris Wilson
2016-10-17 12:20   ` Joonas Lahtinen
2016-10-20 10:28     ` Chris Wilson
2016-10-14 12:17 ` [PATCH 07/41] drm/i915: Allow i915_sw_fence_await_sw_fence() to allocate Chris Wilson
2016-10-14 12:18 ` [PATCH 08/41] drm/i915: Rearrange i915_wait_request() accounting with callers Chris Wilson
2016-10-17 12:26   ` Joonas Lahtinen
2016-10-18 18:51   ` Matthew Auld
2016-10-19 10:39     ` Joonas Lahtinen
2016-10-14 12:18 ` [PATCH 09/41] drm/i915: Remove unused i915_gem_active_wait() in favour of _unlocked() Chris Wilson
2016-10-14 12:18 ` [PATCH 10/41] drm/i915: Defer active reference until required Chris Wilson
2016-10-14 12:18 ` [PATCH 11/41] drm/i915: Introduce an internal allocator for disposable private objects Chris Wilson
2016-10-14 12:42   ` Tvrtko Ursulin
2016-10-14 12:54     ` Chris Wilson
2016-10-14 13:44       ` Tvrtko Ursulin
2016-10-14 13:53         ` Chris Wilson
2016-10-14 14:35           ` Tvrtko Ursulin
2016-10-14 14:42             ` Chris Wilson
2016-10-17  9:47               ` Tvrtko Ursulin
2016-10-17  9:55                 ` Chris Wilson
2016-10-14 12:18 ` [PATCH 12/41] drm/i915: Reuse the active golden render state batch Chris Wilson
2016-10-14 12:18 ` [PATCH 13/41] drm/i915: Markup GEM API with lockdep asserts Chris Wilson
2016-10-14 12:18 ` [PATCH 14/41] drm/i915: Use a radixtree for random access to the object's backing storage Chris Wilson
2016-10-14 13:32   ` Tvrtko Ursulin
2016-10-14 14:07     ` Chris Wilson
2016-10-17  9:56       ` Tvrtko Ursulin
2016-10-17 10:57         ` Chris Wilson
2016-10-14 12:18 ` [PATCH 15/41] drm/i915: Use radixtree to jump start intel_partial_pages() Chris Wilson
2016-10-14 13:38   ` Tvrtko Ursulin
2016-10-14 12:18 ` [PATCH 16/41] drm/i915: Refactor object page API Chris Wilson
2016-10-14 12:18 ` [PATCH 17/41] drm/i915: Pass around sg_table to get_pages/put_pages backend Chris Wilson
2016-10-17 10:55   ` Tvrtko Ursulin [this message]
2016-10-17 11:31     ` Chris Wilson
2016-10-17 13:51       ` Tvrtko Ursulin
2016-10-17 14:08         ` Chris Wilson
2016-10-14 12:18 ` [PATCH 18/41] drm/i915: Move object backing storage manipulation to its own locking Chris Wilson
2016-10-14 12:18 ` [PATCH 19/41] drm/i915/dmabuf: Acquire the backing storage outside of struct_mutex Chris Wilson
2016-10-14 12:18 ` [PATCH 20/41] drm/i915: Implement pread without struct-mutex Chris Wilson
2016-10-14 12:18 ` [PATCH 21/41] drm/i915: Implement pwrite " Chris Wilson
2016-10-14 12:18 ` [PATCH 22/41] drm/i915: Acquire the backing storage outside of struct_mutex in set-domain Chris Wilson
2016-10-14 12:18 ` [PATCH 23/41] drm/i915: Move object release to a freelist + worker Chris Wilson
2016-10-18  9:19   ` Joonas Lahtinen
2016-10-18  9:20     ` Joonas Lahtinen
2016-10-18  9:51   ` John Harrison
2016-10-20  9:38     ` Chris Wilson
2016-10-14 12:18 ` [PATCH 24/41] drm/i915: Use lockless object free Chris Wilson
2016-10-18  8:50   ` Joonas Lahtinen
2016-10-14 12:18 ` [PATCH 25/41] drm/i915: Move GEM activity tracking into a common struct reservation_object Chris Wilson
2016-10-17 12:14   ` Joonas Lahtinen
2016-10-14 12:18 ` [PATCH 26/41] drm: Add reference counting to drm_atomic_state Chris Wilson
2016-10-17  6:20   ` Daniel Vetter
2016-10-14 12:18 ` [PATCH 27/41] drm/i915: Restore nonblocking awaits for modesetting Chris Wilson
2016-10-14 12:18 ` [PATCH 28/41] drm/i915: Combine seqno + tracking into a global timeline struct Chris Wilson
2016-10-14 12:18 ` [PATCH 29/41] drm/i915: Queue the idling context switch after all other timelines Chris Wilson
2016-10-14 12:18 ` [PATCH 30/41] drm/i915: Wait first for submission, before waiting for request completion Chris Wilson
2016-10-14 12:18 ` [PATCH 31/41] drm/i915: Introduce a global_seqno for each request Chris Wilson
2016-10-14 12:18 ` [PATCH 32/41] drm/i915: Rename ->emit_request to ->emit_breadcrumb Chris Wilson
2016-10-17 12:09   ` Joonas Lahtinen
2016-10-14 12:18 ` [PATCH 33/41] drm/i915: Record space required for breadcrumb emission Chris Wilson
2016-10-14 12:18 ` [PATCH 34/41] drm/i915: Defer " Chris Wilson
2016-10-14 12:18 ` [PATCH 35/41] drm/i915: Move the global sync optimisation to the timeline Chris Wilson
2016-10-14 12:18 ` [PATCH 36/41] drm/i915: Create a unique name for the context Chris Wilson
2016-10-14 12:18 ` [PATCH 37/41] drm/i915: Reserve space in the global seqno during request allocation Chris Wilson
2016-10-14 12:18 ` [PATCH 38/41] drm/i915: Defer setting of global seqno on request to submission Chris Wilson
2016-10-17 12:12   ` Joonas Lahtinen
2016-10-14 12:18 ` [PATCH 39/41] drm/i915: Enable multiple timelines Chris Wilson
2016-10-20 15:26   ` Joonas Lahtinen
2016-10-20 15:40     ` Chris Wilson
2016-10-14 12:18 ` [PATCH 40/41] drm/i915: Enable userspace to opt-out of implicit fencing Chris Wilson
2016-10-14 12:18 ` [PATCH 41/41] drm/i915: Support explicit fencing for execbuf Chris Wilson
2016-10-14 13:58 ` ✗ Fi.CI.BAT: failure for series starting with [01/41] drm/i915: Move user fault tracking to a separate list Patchwork
2016-10-14 17:20   ` Saarinen, Jani
2016-10-14 17:38     ` Chris Wilson

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=d25efe94-e3e8-afcb-2bfa-2eed5b828101@linux.intel.com \
    --to=tvrtko.ursulin@linux.intel.com \
    --cc=chris@chris-wilson.co.uk \
    --cc=intel-gfx@lists.freedesktop.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.