All of lore.kernel.org
 help / color / mirror / Atom feed
* Start planning for handling async pages, binding, everything
@ 2017-02-27 13:31 Chris Wilson
  2017-02-27 13:31 ` [PATCH 1/3] drm/i915: Start splitting out i915_gem_object routines Chris Wilson
                   ` (3 more replies)
  0 siblings, 4 replies; 5+ messages in thread
From: Chris Wilson @ 2017-02-27 13:31 UTC (permalink / raw)
  To: intel-gfx

Currently we have a heavyweight EAGAIN loop round tripping to userspace
to handle asynchronous loading. However, we now have fences to handle
asynchronous execution. Fun times ahead. For starters, let's move the
current asynchronous get_pages onto a more secure footing.
-Chris

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 5+ messages in thread

* [PATCH 1/3] drm/i915: Start splitting out i915_gem_object routines
  2017-02-27 13:31 Start planning for handling async pages, binding, everything Chris Wilson
@ 2017-02-27 13:31 ` Chris Wilson
  2017-02-27 13:31 ` [PATCH 2/3] drm/i915: Exercise backing storage of mock gem objects Chris Wilson
                   ` (2 subsequent siblings)
  3 siblings, 0 replies; 5+ messages in thread
From: Chris Wilson @ 2017-02-27 13:31 UTC (permalink / raw)
  To: intel-gfx

To begin with move obj->mm related operations to i915_gem_object.c, in
preparation for future tweaks.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/Makefile                      |   1 +
 drivers/gpu/drm/i915/i915_drv.h                    | 107 ------
 drivers/gpu/drm/i915/i915_gem.c                    | 385 +------------------
 drivers/gpu/drm/i915/i915_gem_object.c             | 412 +++++++++++++++++++++
 drivers/gpu/drm/i915/i915_gem_object.h             | 112 +++++-
 .../selftests/{i915_gem_object.c => i915_gem.c}    |   4 +-
 .../gpu/drm/i915/selftests/i915_live_selftests.h   |   2 +-
 .../gpu/drm/i915/selftests/i915_mock_selftests.h   |   2 +-
 8 files changed, 529 insertions(+), 496 deletions(-)
 create mode 100644 drivers/gpu/drm/i915/i915_gem_object.c
 rename drivers/gpu/drm/i915/selftests/{i915_gem_object.c => i915_gem.c} (99%)

diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index d1d8ec49791c..1dbbcd3e23b1 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -40,6 +40,7 @@ i915-y += i915_cmd_parser.o \
 	  i915_gem_gtt.o \
 	  i915_gem_internal.o \
 	  i915_gem.o \
+	  i915_gem_object.o \
 	  i915_gem_render_state.o \
 	  i915_gem_request.o \
 	  i915_gem_shrinker.o \
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 3d76f9d16cc2..b88c6c58a860 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -3285,113 +3285,6 @@ static inline int __sg_page_count(const struct scatterlist *sg)
 	return sg->length >> PAGE_SHIFT;
 }
 
-struct scatterlist *
-i915_gem_object_get_sg(struct drm_i915_gem_object *obj,
-		       unsigned int n, unsigned int *offset);
-
-struct page *
-i915_gem_object_get_page(struct drm_i915_gem_object *obj,
-			 unsigned int n);
-
-struct page *
-i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj,
-			       unsigned int n);
-
-dma_addr_t
-i915_gem_object_get_dma_address(struct drm_i915_gem_object *obj,
-				unsigned long n);
-
-void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
-				 struct sg_table *pages);
-int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj);
-
-static inline int __must_check
-i915_gem_object_pin_pages(struct drm_i915_gem_object *obj)
-{
-	might_lock(&obj->mm.lock);
-
-	if (atomic_inc_not_zero(&obj->mm.pages_pin_count))
-		return 0;
-
-	return __i915_gem_object_get_pages(obj);
-}
-
-static inline void
-__i915_gem_object_pin_pages(struct drm_i915_gem_object *obj)
-{
-	GEM_BUG_ON(!obj->mm.pages);
-
-	atomic_inc(&obj->mm.pages_pin_count);
-}
-
-static inline bool
-i915_gem_object_has_pinned_pages(struct drm_i915_gem_object *obj)
-{
-	return atomic_read(&obj->mm.pages_pin_count);
-}
-
-static inline void
-__i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
-{
-	GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
-	GEM_BUG_ON(!obj->mm.pages);
-
-	atomic_dec(&obj->mm.pages_pin_count);
-}
-
-static inline void
-i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
-{
-	__i915_gem_object_unpin_pages(obj);
-}
-
-enum i915_mm_subclass { /* lockdep subclass for obj->mm.lock */
-	I915_MM_NORMAL = 0,
-	I915_MM_SHRINKER
-};
-
-void __i915_gem_object_put_pages(struct drm_i915_gem_object *obj,
-				 enum i915_mm_subclass subclass);
-void __i915_gem_object_invalidate(struct drm_i915_gem_object *obj);
-
-enum i915_map_type {
-	I915_MAP_WB = 0,
-	I915_MAP_WC,
-};
-
-/**
- * i915_gem_object_pin_map - return a contiguous mapping of the entire object
- * @obj: the object to map into kernel address space
- * @type: the type of mapping, used to select pgprot_t
- *
- * Calls i915_gem_object_pin_pages() to prevent reaping of the object's
- * pages and then returns a contiguous mapping of the backing storage into
- * the kernel address space. Based on the @type of mapping, the PTE will be
- * set to either WriteBack or WriteCombine (via pgprot_t).
- *
- * The caller is responsible for calling i915_gem_object_unpin_map() when the
- * mapping is no longer required.
- *
- * Returns the pointer through which to access the mapped object, or an
- * ERR_PTR() on error.
- */
-void *__must_check i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
-					   enum i915_map_type type);
-
-/**
- * i915_gem_object_unpin_map - releases an earlier mapping
- * @obj: the object to unmap
- *
- * After pinning the object and mapping its pages, once you are finished
- * with your access, call i915_gem_object_unpin_map() to release the pin
- * upon the mapping. Once the pin count reaches zero, that mapping may be
- * removed.
- */
-static inline void i915_gem_object_unpin_map(struct drm_i915_gem_object *obj)
-{
-	i915_gem_object_unpin_pages(obj);
-}
-
 int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj,
 				    unsigned int *needs_clflush);
 int i915_gem_obj_prepare_shmem_write(struct drm_i915_gem_object *obj,
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index aeb46af196c2..0316215221f8 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -2190,57 +2190,6 @@ i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj,
 	kfree(pages);
 }
 
-static void __i915_gem_object_reset_page_iter(struct drm_i915_gem_object *obj)
-{
-	struct radix_tree_iter iter;
-	void **slot;
-
-	radix_tree_for_each_slot(slot, &obj->mm.get_page.radix, &iter, 0)
-		radix_tree_delete(&obj->mm.get_page.radix, iter.index);
-}
-
-void __i915_gem_object_put_pages(struct drm_i915_gem_object *obj,
-				 enum i915_mm_subclass subclass)
-{
-	struct sg_table *pages;
-
-	if (i915_gem_object_has_pinned_pages(obj))
-		return;
-
-	GEM_BUG_ON(obj->bind_count);
-	if (!READ_ONCE(obj->mm.pages))
-		return;
-
-	/* May be called by shrinker from within get_pages() (on another bo) */
-	mutex_lock_nested(&obj->mm.lock, subclass);
-	if (unlikely(atomic_read(&obj->mm.pages_pin_count)))
-		goto unlock;
-
-	/* ->put_pages might need to allocate memory for the bit17 swizzle
-	 * array, hence protect them from being reaped by removing them from gtt
-	 * lists early. */
-	pages = fetch_and_zero(&obj->mm.pages);
-	GEM_BUG_ON(!pages);
-
-	if (obj->mm.mapping) {
-		void *ptr;
-
-		ptr = ptr_mask_bits(obj->mm.mapping);
-		if (is_vmalloc_addr(ptr))
-			vunmap(ptr);
-		else
-			kunmap(kmap_to_page(ptr));
-
-		obj->mm.mapping = NULL;
-	}
-
-	__i915_gem_object_reset_page_iter(obj);
-
-	obj->ops->put_pages(obj, pages);
-unlock:
-	mutex_unlock(&obj->mm.lock);
-}
-
 static bool i915_sg_trim(struct sg_table *orig_st)
 {
 	struct sg_table new_st;
@@ -2407,184 +2356,6 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
 	return ERR_PTR(ret);
 }
 
-void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
-				 struct sg_table *pages)
-{
-	lockdep_assert_held(&obj->mm.lock);
-
-	obj->mm.get_page.sg_pos = pages->sgl;
-	obj->mm.get_page.sg_idx = 0;
-
-	obj->mm.pages = pages;
-
-	if (i915_gem_object_is_tiled(obj) &&
-	    to_i915(obj->base.dev)->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
-		GEM_BUG_ON(obj->mm.quirked);
-		__i915_gem_object_pin_pages(obj);
-		obj->mm.quirked = true;
-	}
-}
-
-static int ____i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
-{
-	struct sg_table *pages;
-
-	GEM_BUG_ON(i915_gem_object_has_pinned_pages(obj));
-
-	if (unlikely(obj->mm.madv != I915_MADV_WILLNEED)) {
-		DRM_DEBUG("Attempting to obtain a purgeable object\n");
-		return -EFAULT;
-	}
-
-	pages = obj->ops->get_pages(obj);
-	if (unlikely(IS_ERR(pages)))
-		return PTR_ERR(pages);
-
-	__i915_gem_object_set_pages(obj, pages);
-	return 0;
-}
-
-/* Ensure that the associated pages are gathered from the backing storage
- * and pinned into our object. i915_gem_object_pin_pages() may be called
- * multiple times before they are released by a single call to
- * i915_gem_object_unpin_pages() - once the pages are no longer referenced
- * either as a result of memory pressure (reaping pages under the shrinker)
- * or as the object is itself released.
- */
-int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
-{
-	int err;
-
-	err = mutex_lock_interruptible(&obj->mm.lock);
-	if (err)
-		return err;
-
-	if (unlikely(!obj->mm.pages)) {
-		err = ____i915_gem_object_get_pages(obj);
-		if (err)
-			goto unlock;
-
-		smp_mb__before_atomic();
-	}
-	atomic_inc(&obj->mm.pages_pin_count);
-
-unlock:
-	mutex_unlock(&obj->mm.lock);
-	return err;
-}
-
-/* The 'mapping' part of i915_gem_object_pin_map() below */
-static void *i915_gem_object_map(const struct drm_i915_gem_object *obj,
-				 enum i915_map_type type)
-{
-	unsigned long n_pages = obj->base.size >> PAGE_SHIFT;
-	struct sg_table *sgt = obj->mm.pages;
-	struct sgt_iter sgt_iter;
-	struct page *page;
-	struct page *stack_pages[32];
-	struct page **pages = stack_pages;
-	unsigned long i = 0;
-	pgprot_t pgprot;
-	void *addr;
-
-	/* A single page can always be kmapped */
-	if (n_pages == 1 && type == I915_MAP_WB)
-		return kmap(sg_page(sgt->sgl));
-
-	if (n_pages > ARRAY_SIZE(stack_pages)) {
-		/* Too big for stack -- allocate temporary array instead */
-		pages = drm_malloc_gfp(n_pages, sizeof(*pages), GFP_TEMPORARY);
-		if (!pages)
-			return NULL;
-	}
-
-	for_each_sgt_page(page, sgt_iter, sgt)
-		pages[i++] = page;
-
-	/* Check that we have the expected number of pages */
-	GEM_BUG_ON(i != n_pages);
-
-	switch (type) {
-	case I915_MAP_WB:
-		pgprot = PAGE_KERNEL;
-		break;
-	case I915_MAP_WC:
-		pgprot = pgprot_writecombine(PAGE_KERNEL_IO);
-		break;
-	}
-	addr = vmap(pages, n_pages, 0, pgprot);
-
-	if (pages != stack_pages)
-		drm_free_large(pages);
-
-	return addr;
-}
-
-/* get, pin, and map the pages of the object into kernel space */
-void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
-			      enum i915_map_type type)
-{
-	enum i915_map_type has_type;
-	bool pinned;
-	void *ptr;
-	int ret;
-
-	GEM_BUG_ON(!i915_gem_object_has_struct_page(obj));
-
-	ret = mutex_lock_interruptible(&obj->mm.lock);
-	if (ret)
-		return ERR_PTR(ret);
-
-	pinned = true;
-	if (!atomic_inc_not_zero(&obj->mm.pages_pin_count)) {
-		if (unlikely(!obj->mm.pages)) {
-			ret = ____i915_gem_object_get_pages(obj);
-			if (ret)
-				goto err_unlock;
-
-			smp_mb__before_atomic();
-		}
-		atomic_inc(&obj->mm.pages_pin_count);
-		pinned = false;
-	}
-	GEM_BUG_ON(!obj->mm.pages);
-
-	ptr = ptr_unpack_bits(obj->mm.mapping, has_type);
-	if (ptr && has_type != type) {
-		if (pinned) {
-			ret = -EBUSY;
-			goto err_unpin;
-		}
-
-		if (is_vmalloc_addr(ptr))
-			vunmap(ptr);
-		else
-			kunmap(kmap_to_page(ptr));
-
-		ptr = obj->mm.mapping = NULL;
-	}
-
-	if (!ptr) {
-		ptr = i915_gem_object_map(obj, type);
-		if (!ptr) {
-			ret = -ENOMEM;
-			goto err_unpin;
-		}
-
-		obj->mm.mapping = ptr_pack_bits(ptr, type);
-	}
-
-out_unlock:
-	mutex_unlock(&obj->mm.lock);
-	return ptr;
-
-err_unpin:
-	atomic_dec(&obj->mm.pages_pin_count);
-err_unlock:
-	ptr = ERR_PTR(ret);
-	goto out_unlock;
-}
-
 static bool ban_context(const struct i915_gem_context *ctx)
 {
 	return (i915_gem_context_is_bannable(ctx) &&
@@ -5019,163 +4790,9 @@ i915_gem_object_create_from_data(struct drm_i915_private *dev_priv,
 	return ERR_PTR(ret);
 }
 
-struct scatterlist *
-i915_gem_object_get_sg(struct drm_i915_gem_object *obj,
-		       unsigned int n,
-		       unsigned int *offset)
-{
-	struct i915_gem_object_page_iter *iter = &obj->mm.get_page;
-	struct scatterlist *sg;
-	unsigned int idx, count;
-
-	might_sleep();
-	GEM_BUG_ON(n >= obj->base.size >> PAGE_SHIFT);
-	GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
-
-	/* As we iterate forward through the sg, we record each entry in a
-	 * radixtree for quick repeated (backwards) lookups. If we have seen
-	 * this index previously, we will have an entry for it.
-	 *
-	 * Initial lookup is O(N), but this is amortized to O(1) for
-	 * sequential page access (where each new request is consecutive
-	 * to the previous one). Repeated lookups are O(lg(obj->base.size)),
-	 * i.e. O(1) with a large constant!
-	 */
-	if (n < READ_ONCE(iter->sg_idx))
-		goto lookup;
-
-	mutex_lock(&iter->lock);
-
-	/* We prefer to reuse the last sg so that repeated lookup of this
-	 * (or the subsequent) sg are fast - comparing against the last
-	 * sg is faster than going through the radixtree.
-	 */
-
-	sg = iter->sg_pos;
-	idx = iter->sg_idx;
-	count = __sg_page_count(sg);
-
-	while (idx + count <= n) {
-		unsigned long exception, i;
-		int ret;
-
-		/* If we cannot allocate and insert this entry, or the
-		 * individual pages from this range, cancel updating the
-		 * sg_idx so that on this lookup we are forced to linearly
-		 * scan onwards, but on future lookups we will try the
-		 * insertion again (in which case we need to be careful of
-		 * the error return reporting that we have already inserted
-		 * this index).
-		 */
-		ret = radix_tree_insert(&iter->radix, idx, sg);
-		if (ret && ret != -EEXIST)
-			goto scan;
-
-		exception =
-			RADIX_TREE_EXCEPTIONAL_ENTRY |
-			idx << RADIX_TREE_EXCEPTIONAL_SHIFT;
-		for (i = 1; i < count; i++) {
-			ret = radix_tree_insert(&iter->radix, idx + i,
-						(void *)exception);
-			if (ret && ret != -EEXIST)
-				goto scan;
-		}
-
-		idx += count;
-		sg = ____sg_next(sg);
-		count = __sg_page_count(sg);
-	}
-
-scan:
-	iter->sg_pos = sg;
-	iter->sg_idx = idx;
-
-	mutex_unlock(&iter->lock);
-
-	if (unlikely(n < idx)) /* insertion completed by another thread */
-		goto lookup;
-
-	/* In case we failed to insert the entry into the radixtree, we need
-	 * to look beyond the current sg.
-	 */
-	while (idx + count <= n) {
-		idx += count;
-		sg = ____sg_next(sg);
-		count = __sg_page_count(sg);
-	}
-
-	*offset = n - idx;
-	return sg;
-
-lookup:
-	rcu_read_lock();
-
-	sg = radix_tree_lookup(&iter->radix, n);
-	GEM_BUG_ON(!sg);
-
-	/* If this index is in the middle of multi-page sg entry,
-	 * the radixtree will contain an exceptional entry that points
-	 * to the start of that range. We will return the pointer to
-	 * the base page and the offset of this page within the
-	 * sg entry's range.
-	 */
-	*offset = 0;
-	if (unlikely(radix_tree_exception(sg))) {
-		unsigned long base =
-			(unsigned long)sg >> RADIX_TREE_EXCEPTIONAL_SHIFT;
-
-		sg = radix_tree_lookup(&iter->radix, base);
-		GEM_BUG_ON(!sg);
-
-		*offset = n - base;
-	}
-
-	rcu_read_unlock();
-
-	return sg;
-}
-
-struct page *
-i915_gem_object_get_page(struct drm_i915_gem_object *obj, unsigned int n)
-{
-	struct scatterlist *sg;
-	unsigned int offset;
-
-	GEM_BUG_ON(!i915_gem_object_has_struct_page(obj));
-
-	sg = i915_gem_object_get_sg(obj, n, &offset);
-	return nth_page(sg_page(sg), offset);
-}
-
-/* Like i915_gem_object_get_page(), but mark the returned page dirty */
-struct page *
-i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj,
-			       unsigned int n)
-{
-	struct page *page;
-
-	page = i915_gem_object_get_page(obj, n);
-	if (!obj->mm.dirty)
-		set_page_dirty(page);
-
-	return page;
-}
-
-dma_addr_t
-i915_gem_object_get_dma_address(struct drm_i915_gem_object *obj,
-				unsigned long n)
-{
-	struct scatterlist *sg;
-	unsigned int offset;
-
-	sg = i915_gem_object_get_sg(obj, n, &offset);
-	return sg_dma_address(sg) + (offset << PAGE_SHIFT);
-}
-
 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
 #include "selftests/scatterlist.c"
 #include "selftests/mock_gem_device.c"
-#include "selftests/huge_gem_object.c"
-#include "selftests/i915_gem_object.c"
+#include "selftests/i915_gem.c"
 #include "selftests/i915_gem_coherency.c"
 #endif
diff --git a/drivers/gpu/drm/i915/i915_gem_object.c b/drivers/gpu/drm/i915/i915_gem_object.c
new file mode 100644
index 000000000000..f222980cee34
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_gem_object.c
@@ -0,0 +1,412 @@
+/*
+ * Copyright © 2017 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#include "i915_drv.h"
+#include "i915_gem_object.h"
+
+void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
+				 struct sg_table *pages)
+{
+	lockdep_assert_held(&obj->mm.lock);
+
+	obj->mm.get_page.sg_pos = pages->sgl;
+	obj->mm.get_page.sg_idx = 0;
+
+	obj->mm.pages = pages;
+
+	if (i915_gem_object_is_tiled(obj) &&
+	    to_i915(obj->base.dev)->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
+		GEM_BUG_ON(obj->mm.quirked);
+		__i915_gem_object_pin_pages(obj);
+		obj->mm.quirked = true;
+	}
+}
+
+static int ____i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
+{
+	struct sg_table *pages;
+
+	GEM_BUG_ON(i915_gem_object_has_pinned_pages(obj));
+
+	if (unlikely(obj->mm.madv != I915_MADV_WILLNEED)) {
+		DRM_DEBUG("Attempting to obtain a purgeable object\n");
+		return -EFAULT;
+	}
+
+	pages = obj->ops->get_pages(obj);
+	if (unlikely(IS_ERR(pages)))
+		return PTR_ERR(pages);
+
+	__i915_gem_object_set_pages(obj, pages);
+	return 0;
+}
+
+/* Ensure that the associated pages are gathered from the backing storage
+ * and pinned into our object. i915_gem_object_pin_pages() may be called
+ * multiple times before they are released by a single call to
+ * i915_gem_object_unpin_pages() - once the pages are no longer referenced
+ * either as a result of memory pressure (reaping pages under the shrinker)
+ * or as the object is itself released.
+ */
+int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
+{
+	int err;
+
+	err = mutex_lock_interruptible(&obj->mm.lock);
+	if (err)
+		return err;
+
+	if (unlikely(!obj->mm.pages)) {
+		err = ____i915_gem_object_get_pages(obj);
+		if (err)
+			goto unlock;
+
+		smp_mb__before_atomic();
+	}
+	atomic_inc(&obj->mm.pages_pin_count);
+
+unlock:
+	mutex_unlock(&obj->mm.lock);
+	return err;
+}
+
+static void __i915_gem_object_reset_page_iter(struct drm_i915_gem_object *obj)
+{
+	struct radix_tree_iter iter;
+	void **slot;
+
+	radix_tree_for_each_slot(slot, &obj->mm.get_page.radix, &iter, 0)
+		radix_tree_delete(&obj->mm.get_page.radix, iter.index);
+}
+
+void __i915_gem_object_put_pages(struct drm_i915_gem_object *obj,
+				 enum i915_mm_subclass subclass)
+{
+	struct sg_table *pages;
+
+	if (i915_gem_object_has_pinned_pages(obj))
+		return;
+
+	GEM_BUG_ON(obj->bind_count);
+	if (!READ_ONCE(obj->mm.pages))
+		return;
+
+	/* May be called by shrinker from within get_pages() (on another bo) */
+	mutex_lock_nested(&obj->mm.lock, subclass);
+	if (unlikely(atomic_read(&obj->mm.pages_pin_count)))
+		goto unlock;
+
+	/* ->put_pages might need to allocate memory for the bit17 swizzle
+	 * array, hence protect them from being reaped by removing them from gtt
+	 * lists early. */
+	pages = fetch_and_zero(&obj->mm.pages);
+	GEM_BUG_ON(!pages);
+
+	if (obj->mm.mapping) {
+		void *ptr;
+
+		ptr = ptr_mask_bits(obj->mm.mapping);
+		if (is_vmalloc_addr(ptr))
+			vunmap(ptr);
+		else
+			kunmap(kmap_to_page(ptr));
+
+		obj->mm.mapping = NULL;
+	}
+
+	__i915_gem_object_reset_page_iter(obj);
+
+	obj->ops->put_pages(obj, pages);
+unlock:
+	mutex_unlock(&obj->mm.lock);
+}
+
+struct scatterlist *
+i915_gem_object_get_sg(struct drm_i915_gem_object *obj,
+		       unsigned int n,
+		       unsigned int *offset)
+{
+	struct i915_gem_object_page_iter *iter = &obj->mm.get_page;
+	struct scatterlist *sg;
+	unsigned int idx, count;
+
+	might_sleep();
+	GEM_BUG_ON(n >= obj->base.size >> PAGE_SHIFT);
+	GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
+
+	/* As we iterate forward through the sg, we record each entry in a
+	 * radixtree for quick repeated (backwards) lookups. If we have seen
+	 * this index previously, we will have an entry for it.
+	 *
+	 * Initial lookup is O(N), but this is amortized to O(1) for
+	 * sequential page access (where each new request is consecutive
+	 * to the previous one). Repeated lookups are O(lg(obj->base.size)),
+	 * i.e. O(1) with a large constant!
+	 */
+	if (n < READ_ONCE(iter->sg_idx))
+		goto lookup;
+
+	mutex_lock(&iter->lock);
+
+	/* We prefer to reuse the last sg so that repeated lookup of this
+	 * (or the subsequent) sg are fast - comparing against the last
+	 * sg is faster than going through the radixtree.
+	 */
+
+	sg = iter->sg_pos;
+	idx = iter->sg_idx;
+	count = __sg_page_count(sg);
+
+	while (idx + count <= n) {
+		unsigned long exception, i;
+		int ret;
+
+		/* If we cannot allocate and insert this entry, or the
+		 * individual pages from this range, cancel updating the
+		 * sg_idx so that on this lookup we are forced to linearly
+		 * scan onwards, but on future lookups we will try the
+		 * insertion again (in which case we need to be careful of
+		 * the error return reporting that we have already inserted
+		 * this index).
+		 */
+		ret = radix_tree_insert(&iter->radix, idx, sg);
+		if (ret && ret != -EEXIST)
+			goto scan;
+
+		exception =
+			RADIX_TREE_EXCEPTIONAL_ENTRY |
+			idx << RADIX_TREE_EXCEPTIONAL_SHIFT;
+		for (i = 1; i < count; i++) {
+			ret = radix_tree_insert(&iter->radix, idx + i,
+						(void *)exception);
+			if (ret && ret != -EEXIST)
+				goto scan;
+		}
+
+		idx += count;
+		sg = ____sg_next(sg);
+		count = __sg_page_count(sg);
+	}
+
+scan:
+	iter->sg_pos = sg;
+	iter->sg_idx = idx;
+
+	mutex_unlock(&iter->lock);
+
+	if (unlikely(n < idx)) /* insertion completed by another thread */
+		goto lookup;
+
+	/* In case we failed to insert the entry into the radixtree, we need
+	 * to look beyond the current sg.
+	 */
+	while (idx + count <= n) {
+		idx += count;
+		sg = ____sg_next(sg);
+		count = __sg_page_count(sg);
+	}
+
+	*offset = n - idx;
+	return sg;
+
+lookup:
+	rcu_read_lock();
+
+	sg = radix_tree_lookup(&iter->radix, n);
+	GEM_BUG_ON(!sg);
+
+	/* If this index is in the middle of multi-page sg entry,
+	 * the radixtree will contain an exceptional entry that points
+	 * to the start of that range. We will return the pointer to
+	 * the base page and the offset of this page within the
+	 * sg entry's range.
+	 */
+	*offset = 0;
+	if (unlikely(radix_tree_exception(sg))) {
+		unsigned long base =
+			(unsigned long)sg >> RADIX_TREE_EXCEPTIONAL_SHIFT;
+
+		sg = radix_tree_lookup(&iter->radix, base);
+		GEM_BUG_ON(!sg);
+
+		*offset = n - base;
+	}
+
+	rcu_read_unlock();
+
+	return sg;
+}
+
+struct page *
+i915_gem_object_get_page(struct drm_i915_gem_object *obj, unsigned int n)
+{
+	struct scatterlist *sg;
+	unsigned int offset;
+
+	GEM_BUG_ON(!i915_gem_object_has_struct_page(obj));
+
+	sg = i915_gem_object_get_sg(obj, n, &offset);
+	return nth_page(sg_page(sg), offset);
+}
+
+/* Like i915_gem_object_get_page(), but mark the returned page dirty */
+struct page *
+i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj,
+			       unsigned int n)
+{
+	struct page *page;
+
+	page = i915_gem_object_get_page(obj, n);
+	if (!obj->mm.dirty)
+		set_page_dirty(page);
+
+	return page;
+}
+
+dma_addr_t
+i915_gem_object_get_dma_address(struct drm_i915_gem_object *obj,
+				unsigned long n)
+{
+	struct scatterlist *sg;
+	unsigned int offset;
+
+	sg = i915_gem_object_get_sg(obj, n, &offset);
+	return sg_dma_address(sg) + (offset << PAGE_SHIFT);
+}
+
+/* The 'mapping' part of i915_gem_object_pin_map() below */
+static void *i915_gem_object_map(const struct drm_i915_gem_object *obj,
+				 enum i915_map_type type)
+{
+	unsigned long n_pages = obj->base.size >> PAGE_SHIFT;
+	struct sg_table *sgt = obj->mm.pages;
+	struct sgt_iter sgt_iter;
+	struct page *page;
+	struct page *stack_pages[32];
+	struct page **pages = stack_pages;
+	unsigned long i = 0;
+	pgprot_t pgprot;
+	void *addr;
+
+	/* A single page can always be kmapped */
+	if (n_pages == 1 && type == I915_MAP_WB)
+		return kmap(sg_page(sgt->sgl));
+
+	if (n_pages > ARRAY_SIZE(stack_pages)) {
+		/* Too big for stack -- allocate temporary array instead */
+		pages = drm_malloc_gfp(n_pages, sizeof(*pages), GFP_TEMPORARY);
+		if (!pages)
+			return NULL;
+	}
+
+	for_each_sgt_page(page, sgt_iter, sgt)
+		pages[i++] = page;
+
+	/* Check that we have the expected number of pages */
+	GEM_BUG_ON(i != n_pages);
+
+	switch (type) {
+	case I915_MAP_WB:
+		pgprot = PAGE_KERNEL;
+		break;
+	case I915_MAP_WC:
+		pgprot = pgprot_writecombine(PAGE_KERNEL_IO);
+		break;
+	}
+	addr = vmap(pages, n_pages, 0, pgprot);
+
+	if (pages != stack_pages)
+		drm_free_large(pages);
+
+	return addr;
+}
+
+/* get, pin, and map the pages of the object into kernel space */
+void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
+			      enum i915_map_type type)
+{
+	enum i915_map_type has_type;
+	bool pinned;
+	void *ptr;
+	int ret;
+
+	GEM_BUG_ON(!i915_gem_object_has_struct_page(obj));
+
+	ret = mutex_lock_interruptible(&obj->mm.lock);
+	if (ret)
+		return ERR_PTR(ret);
+
+	pinned = true;
+	if (!atomic_inc_not_zero(&obj->mm.pages_pin_count)) {
+		if (unlikely(!obj->mm.pages)) {
+			ret = ____i915_gem_object_get_pages(obj);
+			if (ret)
+				goto err_unlock;
+
+			smp_mb__before_atomic();
+		}
+		atomic_inc(&obj->mm.pages_pin_count);
+		pinned = false;
+	}
+	GEM_BUG_ON(!obj->mm.pages);
+
+	ptr = ptr_unpack_bits(obj->mm.mapping, has_type);
+	if (ptr && has_type != type) {
+		if (pinned) {
+			ret = -EBUSY;
+			goto err_unpin;
+		}
+
+		if (is_vmalloc_addr(ptr))
+			vunmap(ptr);
+		else
+			kunmap(kmap_to_page(ptr));
+
+		ptr = obj->mm.mapping = NULL;
+	}
+
+	if (!ptr) {
+		ptr = i915_gem_object_map(obj, type);
+		if (!ptr) {
+			ret = -ENOMEM;
+			goto err_unpin;
+		}
+
+		obj->mm.mapping = ptr_pack_bits(ptr, type);
+	}
+
+out_unlock:
+	mutex_unlock(&obj->mm.lock);
+	return ptr;
+
+err_unpin:
+	atomic_dec(&obj->mm.pages_pin_count);
+err_unlock:
+	ptr = ERR_PTR(ret);
+	goto out_unlock;
+}
+
+#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
+#include "selftests/huge_gem_object.c"
+#endif
diff --git a/drivers/gpu/drm/i915/i915_gem_object.h b/drivers/gpu/drm/i915/i915_gem_object.h
index c9c9a6cf8bb1..1b0bd6576785 100644
--- a/drivers/gpu/drm/i915/i915_gem_object.h
+++ b/drivers/gpu/drm/i915/i915_gem_object.h
@@ -33,8 +33,12 @@
 
 #include <drm/i915_drm.h>
 
+#include "i915_gem_request.h"
+
 #include "i915_selftest.h"
 
+struct drm_i915_gem_object;
+
 struct drm_i915_gem_object_ops {
 	unsigned int flags;
 #define I915_GEM_OBJECT_HAS_STRUCT_PAGE 0x1
@@ -364,5 +368,111 @@ i915_gem_object_last_write_engine(struct drm_i915_gem_object *obj)
 
 void i915_gem_object_flush_if_display(struct drm_i915_gem_object *obj);
 
-#endif
+struct scatterlist *
+i915_gem_object_get_sg(struct drm_i915_gem_object *obj,
+		       unsigned int n, unsigned int *offset);
+
+struct page *
+i915_gem_object_get_page(struct drm_i915_gem_object *obj,
+			 unsigned int n);
+
+struct page *
+i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj,
+			       unsigned int n);
+
+dma_addr_t
+i915_gem_object_get_dma_address(struct drm_i915_gem_object *obj,
+				unsigned long n);
+
+void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
+				 struct sg_table *pages);
+int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj);
+
+static inline int __must_check
+i915_gem_object_pin_pages(struct drm_i915_gem_object *obj)
+{
+	might_lock(&obj->mm.lock);
+
+	if (atomic_inc_not_zero(&obj->mm.pages_pin_count))
+		return 0;
+
+	return __i915_gem_object_get_pages(obj);
+}
+
+static inline void
+__i915_gem_object_pin_pages(struct drm_i915_gem_object *obj)
+{
+	GEM_BUG_ON(!obj->mm.pages);
+
+	atomic_inc(&obj->mm.pages_pin_count);
+}
+
+static inline bool
+i915_gem_object_has_pinned_pages(struct drm_i915_gem_object *obj)
+{
+	return atomic_read(&obj->mm.pages_pin_count);
+}
+
+static inline void
+__i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
+{
+	GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
+	GEM_BUG_ON(!obj->mm.pages);
 
+	atomic_dec(&obj->mm.pages_pin_count);
+}
+
+static inline void
+i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
+{
+	__i915_gem_object_unpin_pages(obj);
+}
+
+enum i915_mm_subclass { /* lockdep subclass for obj->mm.lock */
+	I915_MM_NORMAL = 0,
+	I915_MM_SHRINKER
+};
+
+void __i915_gem_object_put_pages(struct drm_i915_gem_object *obj,
+				 enum i915_mm_subclass subclass);
+void __i915_gem_object_invalidate(struct drm_i915_gem_object *obj);
+
+enum i915_map_type {
+	I915_MAP_WB = 0,
+	I915_MAP_WC,
+};
+
+/**
+ * i915_gem_object_pin_map - return a contiguous mapping of the entire object
+ * @obj: the object to map into kernel address space
+ * @type: the type of mapping, used to select pgprot_t
+ *
+ * Calls i915_gem_object_pin_pages() to prevent reaping of the object's
+ * pages and then returns a contiguous mapping of the backing storage into
+ * the kernel address space. Based on the @type of mapping, the PTE will be
+ * set to either WriteBack or WriteCombine (via pgprot_t).
+ *
+ * The caller is responsible for calling i915_gem_object_unpin_map() when the
+ * mapping is no longer required.
+ *
+ * Returns the pointer through which to access the mapped object, or an
+ * ERR_PTR() on error.
+ */
+void *__must_check i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
+					   enum i915_map_type type);
+
+/**
+ * i915_gem_object_unpin_map - releases an earlier mapping
+ * @obj: the object to unmap
+ *
+ * After pinning the object and mapping its pages, once you are finished
+ * with your access, call i915_gem_object_unpin_map() to release the pin
+ * upon the mapping. Once the pin count reaches zero, that mapping may be
+ * removed.
+ */
+static inline void i915_gem_object_unpin_map(struct drm_i915_gem_object *obj)
+{
+	i915_gem_object_unpin_pages(obj);
+}
+
+#endif
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_object.c b/drivers/gpu/drm/i915/selftests/i915_gem.c
similarity index 99%
rename from drivers/gpu/drm/i915/selftests/i915_gem_object.c
rename to drivers/gpu/drm/i915/selftests/i915_gem.c
index 67d82bf1407f..13f05263a574 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_object.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem.c
@@ -569,7 +569,7 @@ static int igt_mmap_offset_exhaustion(void *arg)
 	goto out;
 }
 
-int i915_gem_object_mock_selftests(void)
+int i915_gem_mock_selftests(void)
 {
 	static const struct i915_subtest tests[] = {
 		SUBTEST(igt_gem_object),
@@ -588,7 +588,7 @@ int i915_gem_object_mock_selftests(void)
 	return err;
 }
 
-int i915_gem_object_live_selftests(struct drm_i915_private *i915)
+int i915_gem_live_selftests(struct drm_i915_private *i915)
 {
 	static const struct i915_subtest tests[] = {
 		SUBTEST(igt_gem_huge),
diff --git a/drivers/gpu/drm/i915/selftests/i915_live_selftests.h b/drivers/gpu/drm/i915/selftests/i915_live_selftests.h
index aa680c69e5a8..c7194e6f6af9 100644
--- a/drivers/gpu/drm/i915/selftests/i915_live_selftests.h
+++ b/drivers/gpu/drm/i915/selftests/i915_live_selftests.h
@@ -11,7 +11,7 @@
 selftest(sanitycheck, i915_live_sanitycheck) /* keep first (igt selfcheck) */
 selftest(uncore, intel_uncore_live_selftests)
 selftest(requests, i915_gem_request_live_selftests)
-selftest(objects, i915_gem_object_live_selftests)
+selftest(gem, i915_gem_live_selftests)
 selftest(dmabuf, i915_gem_dmabuf_live_selftests)
 selftest(coherency, i915_gem_coherency_live_selftests)
 selftest(gtt, i915_gem_gtt_live_selftests)
diff --git a/drivers/gpu/drm/i915/selftests/i915_mock_selftests.h b/drivers/gpu/drm/i915/selftests/i915_mock_selftests.h
index be9a9ebf5692..3c862db41c2c 100644
--- a/drivers/gpu/drm/i915/selftests/i915_mock_selftests.h
+++ b/drivers/gpu/drm/i915/selftests/i915_mock_selftests.h
@@ -13,7 +13,7 @@ selftest(scatterlist, scatterlist_mock_selftests)
 selftest(uncore, intel_uncore_mock_selftests)
 selftest(breadcrumbs, intel_breadcrumbs_mock_selftests)
 selftest(requests, i915_gem_request_mock_selftests)
-selftest(objects, i915_gem_object_mock_selftests)
+selftest(gem, i915_gem_mock_selftests)
 selftest(dmabuf, i915_gem_dmabuf_mock_selftests)
 selftest(vma, i915_vma_mock_selftests)
 selftest(evict, i915_gem_evict_mock_selftests)
-- 
2.11.0

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 5+ messages in thread

* [PATCH 2/3] drm/i915: Exercise backing storage of mock gem objects
  2017-02-27 13:31 Start planning for handling async pages, binding, everything Chris Wilson
  2017-02-27 13:31 ` [PATCH 1/3] drm/i915: Start splitting out i915_gem_object routines Chris Wilson
@ 2017-02-27 13:31 ` Chris Wilson
  2017-02-27 13:31 ` [PATCH 3/3] drm/i915: Prepare for async get_pages Chris Wilson
  2017-02-27 15:22 ` ✗ Fi.CI.BAT: warning for series starting with [1/3] drm/i915: Start splitting out i915_gem_object routines Patchwork
  3 siblings, 0 replies; 5+ messages in thread
From: Chris Wilson @ 2017-02-27 13:31 UTC (permalink / raw)
  To: intel-gfx

Check that we can retrieve the right page for a random index, and that
we can map the whole object.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/i915_gem_object.c             |   1 +
 drivers/gpu/drm/i915/selftests/i915_gem_object.c   | 405 +++++++++++++++++++++
 .../gpu/drm/i915/selftests/i915_mock_selftests.h   |   1 +
 3 files changed, 407 insertions(+)
 create mode 100644 drivers/gpu/drm/i915/selftests/i915_gem_object.c

diff --git a/drivers/gpu/drm/i915/i915_gem_object.c b/drivers/gpu/drm/i915/i915_gem_object.c
index f222980cee34..30a704ea7e3b 100644
--- a/drivers/gpu/drm/i915/i915_gem_object.c
+++ b/drivers/gpu/drm/i915/i915_gem_object.c
@@ -409,4 +409,5 @@ void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
 
 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
 #include "selftests/huge_gem_object.c"
+#include "selftests/i915_gem_object.c"
 #endif
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_object.c b/drivers/gpu/drm/i915/selftests/i915_gem_object.c
new file mode 100644
index 000000000000..1328332150f6
--- /dev/null
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_object.c
@@ -0,0 +1,405 @@
+/*
+ * Copyright © 2017 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#include "../i915_selftest.h"
+#include "i915_random.h"
+
+#include "mock_gem_device.h"
+
+#define PFN_BIAS 0x1000
+
+static void fake_free_pages(struct drm_i915_gem_object *obj,
+			    struct sg_table *pages)
+{
+	sg_free_table(pages);
+	kfree(pages);
+}
+
+static struct sg_table *
+fake_get_pages(struct drm_i915_gem_object *obj)
+{
+#define GFP (GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY)
+	struct sg_table *pages;
+	struct scatterlist *sg;
+	struct rnd_state prng;
+	unsigned long pfn, rem;
+
+	prandom_seed_state(&prng, obj->scratch);
+
+	pages = kmalloc(sizeof(*pages), GFP);
+	if (!pages)
+		return ERR_PTR(-ENOMEM);
+
+	rem = obj->base.size >> PAGE_SHIFT;
+	if (sg_alloc_table(pages, obj->base.size >> PAGE_SHIFT, GFP)) {
+		kfree(pages);
+		return ERR_PTR(-ENOMEM);
+	}
+
+	pfn = PFN_BIAS;
+	for (sg = pages->sgl; sg; sg = sg_next(sg)) {
+		unsigned int len = 1 + prandom_u32_state(&prng) % rem;
+
+		sg_set_page(sg, pfn_to_page(pfn), len * PAGE_SIZE, 0);
+
+		pfn += len;
+		rem -= len;
+		if (!rem) {
+			sg_mark_end(sg);
+			break;
+		}
+	}
+	GEM_BUG_ON(rem);
+
+	obj->mm.madv = I915_MADV_DONTNEED;
+	return pages;
+#undef GFP
+}
+
+static void fake_put_pages(struct drm_i915_gem_object *obj,
+			   struct sg_table *pages)
+{
+	fake_free_pages(obj, pages);
+	obj->mm.dirty = false;
+	obj->mm.madv = I915_MADV_WILLNEED;
+}
+
+static const struct drm_i915_gem_object_ops fake_ops = {
+	.flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE,
+	.get_pages = fake_get_pages,
+	.put_pages = fake_put_pages,
+};
+
+static struct drm_i915_gem_object *
+fake_object(struct drm_i915_private *i915, u64 size, u32 seed)
+{
+	struct drm_i915_gem_object *obj;
+
+	GEM_BUG_ON(!size);
+	GEM_BUG_ON(!IS_ALIGNED(size, PAGE_SIZE));
+
+	if (overflows_type(size, obj->base.size))
+		return ERR_PTR(-E2BIG);
+
+	obj = i915_gem_object_alloc(i915);
+	if (!obj)
+		return ERR_PTR(-ENOMEM);
+
+	drm_gem_private_object_init(&i915->drm, &obj->base, size);
+	i915_gem_object_init(obj, &fake_ops);
+
+	obj->base.write_domain = I915_GEM_DOMAIN_CPU;
+	obj->base.read_domains = I915_GEM_DOMAIN_CPU;
+	obj->cache_level = I915_CACHE_NONE;
+
+	obj->scratch = seed;
+
+	return obj;
+}
+
+static unsigned int *order_forward(unsigned int count, struct rnd_state *prng)
+{
+	unsigned int *order;
+	unsigned int i;
+
+	order = kmalloc(sizeof(*order) * count, GFP_TEMPORARY);
+	if (!order)
+		return NULL;
+
+	for (i = 0; i < count; i++)
+		order[i] = i;
+
+	return order;
+}
+
+static unsigned int *order_backward(unsigned int count, struct rnd_state *prng)
+{
+	unsigned int *order;
+	unsigned int i;
+
+	order = kmalloc(sizeof(*order) * count, GFP_TEMPORARY);
+	if (!order)
+		return NULL;
+
+	for (i = 0; i < count; i++)
+		order[i] = count - i - 1;
+
+	return order;
+}
+
+static unsigned int *order_random(unsigned int count, struct rnd_state *prng)
+{
+	return i915_random_order(count, prng);
+}
+
+static int igt_gem_object_get_page(void *arg)
+{
+	struct drm_i915_private *i915 = arg;
+	const struct phase {
+		const char *name;
+		unsigned int *(*order)(unsigned int, struct rnd_state *);
+		unsigned int flags;
+	} phases[] = {
+		{ "forward", order_forward },
+		{ "backward", order_backward },
+		{ "random", order_random },
+		{}
+	}, *p;
+	I915_RND_STATE(prng);
+
+	for (p = phases; p->name; p++) {
+		const unsigned int npages = 1024;
+		struct drm_i915_gem_object *obj;
+		unsigned int *order;
+		unsigned int i;
+		int err;
+
+		order = p->order(npages, &prng);
+		if (!order)
+			return -ENOMEM;
+
+		obj = fake_object(i915,
+				  npages * PAGE_SIZE,
+				  prandom_u32_state(&prng));
+		if (IS_ERR(obj)) {
+			err = PTR_ERR(obj);
+			goto err_free;
+		}
+
+		err = i915_gem_object_pin_pages(obj);
+		if (err)
+			goto err_put;
+
+		for (i = 0; i < npages; i++) {
+			unsigned int idx = order[i];
+			struct page *page;
+
+			page = i915_gem_object_get_page(obj, idx);
+			if (page_to_pfn(page) != idx + PFN_BIAS) {
+				pr_err("object->page[%d:%d] lookup failed, direction %s, found pfn %lu, expected %u\n",
+				       i, idx, p->name, page_to_pfn(page), idx + PFN_BIAS);
+				err = EINVAL;
+				goto err_unpin;
+			}
+		}
+
+err_unpin:
+		i915_gem_object_unpin_pages(obj);
+err_put:
+		i915_gem_object_put(obj);
+err_free:
+		kfree(order);
+		if (err)
+			return err;
+	}
+
+	return 0;
+}
+
+#define FAULT BIT(0)
+
+static void map_free_pages(struct sg_table *st)
+{
+	struct scatterlist *sg;
+
+	for (sg = st->sgl; sg; sg = __sg_next(sg)) {
+		if (sg_page(sg))
+			__free_pages(sg_page(sg), get_order(sg->length));
+	}
+
+	sg_free_table(st);
+	kfree(st);
+}
+
+static struct sg_table *
+map_get_pages(struct drm_i915_gem_object *obj)
+{
+	struct sg_table *pages;
+	struct scatterlist *sg;
+	unsigned int order;
+
+	if (obj->scratch & FAULT)
+		return ERR_PTR(-EFAULT);
+
+	pages = kmalloc(sizeof(*pages), GFP_KERNEL);
+	if (!pages)
+		return ERR_PTR(-ENOMEM);
+
+	if (sg_alloc_table(pages, MAX_ORDER, GFP_KERNEL)) {
+		kfree(pages);
+		return ERR_PTR(-ENOMEM);
+	}
+
+	sg = pages->sgl;
+	for (order = 0; order < MAX_ORDER; order++) {
+		struct page *page;
+		unsigned int *vaddr;
+		unsigned int n;
+
+		page = alloc_pages(GFP_KERNEL, order);
+		if (!page) {
+			sg_set_page(sg, NULL, 0, 0);
+			sg_mark_end(sg);
+			map_free_pages(pages);
+			kfree(pages);
+			return ERR_PTR(-ENOMEM);
+		}
+
+		vaddr = kmap(page);
+		for (n = 0; n < 1 << order; n++)
+			vaddr[n * PAGE_SIZE / sizeof(*vaddr)] = order;
+		kunmap(page);
+
+		sg_set_page(sg, page, PAGE_SIZE << order, 0);
+		sg = sg_next(sg);
+	}
+	GEM_BUG_ON(sg);
+
+	obj->mm.madv = I915_MADV_DONTNEED;
+	return pages;
+#undef GFP
+}
+
+static void map_put_pages(struct drm_i915_gem_object *obj,
+			  struct sg_table *pages)
+{
+	map_free_pages(pages);
+	obj->mm.dirty = false;
+	obj->mm.madv = I915_MADV_WILLNEED;
+}
+
+static const struct drm_i915_gem_object_ops map_ops = {
+	.flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE,
+	.get_pages = map_get_pages,
+	.put_pages = map_put_pages,
+};
+
+static struct drm_i915_gem_object *
+map_object(struct drm_i915_private *i915,
+	   unsigned int flags)
+{
+	struct drm_i915_gem_object *obj;
+	unsigned int n, size;
+
+	obj = i915_gem_object_alloc(i915);
+	if (!obj)
+		return ERR_PTR(-ENOMEM);
+
+	size = 0;
+	for (n = 0; n < MAX_ORDER; n++) /* lazy! */
+		size += PAGE_SIZE << n;
+
+	drm_gem_private_object_init(&i915->drm, &obj->base, size);
+	i915_gem_object_init(obj, &map_ops);
+
+	obj->base.write_domain = I915_GEM_DOMAIN_CPU;
+	obj->base.read_domains = I915_GEM_DOMAIN_CPU;
+	obj->cache_level = I915_CACHE_NONE;
+
+	obj->scratch = flags;
+
+	return obj;
+}
+
+static int igt_gem_object_pin_map(void *arg)
+{
+	struct drm_i915_private *i915 = arg;
+	struct drm_i915_gem_object *obj;
+	const struct {
+		const char *name;
+		unsigned int flags;
+	} phases[] = {
+		{ "sync" },
+		{ "sync-fault", FAULT },
+		{ "sync-after-fault" },
+		{},
+	}, *p;
+	unsigned int *vaddr;
+	unsigned int order;
+	int err = 0;
+
+	for (p = phases; p->name; p++) {
+		obj = map_object(i915, p->flags);
+		if (IS_ERR(obj))
+			return PTR_ERR(obj);
+
+		vaddr = i915_gem_object_pin_map(obj, I915_MAP_WB);
+
+		if (p->flags & FAULT) {
+			if (vaddr != ERR_PTR(-EFAULT)) {
+				pr_err("Expected fault injection!\n");
+				err = -EINVAL;
+			}
+			goto err;
+		}
+
+		if (IS_ERR(vaddr)) {
+			err = PTR_ERR(vaddr);
+			goto err;
+		}
+
+		for (order = 0; order < MAX_ORDER; order++) {
+			unsigned int n;
+
+			for (n = 0; n < 1 << order; n++) {
+				if (vaddr[n * PAGE_SIZE / sizeof(*vaddr)] != order) {
+					pr_err("invalid mapping at order %d, page %d: found %d\n",
+							order, n, vaddr[n * PAGE_SIZE / sizeof(*vaddr)]);
+					err = -EINVAL;
+					goto err_unmap;
+				}
+			}
+
+			vaddr += (PAGE_SIZE << order) / sizeof(*vaddr);
+		}
+
+err_unmap:
+		i915_gem_object_unpin_map(obj);
+err:
+		i915_gem_object_put(obj);
+		if (err)
+			return err;
+	}
+
+	return 0;
+}
+
+int i915_gem_object_mock_selftests(void)
+{
+	static const struct i915_subtest tests[] = {
+		SUBTEST(igt_gem_object_get_page),
+		SUBTEST(igt_gem_object_pin_map),
+	};
+	struct drm_i915_private *i915;
+	int err;
+
+	i915 = mock_gem_device();
+	if (!i915)
+		return -ENOMEM;
+
+	err = i915_subtests(tests, i915);
+
+	drm_dev_unref(&i915->drm);
+	return err;
+}
diff --git a/drivers/gpu/drm/i915/selftests/i915_mock_selftests.h b/drivers/gpu/drm/i915/selftests/i915_mock_selftests.h
index 3c862db41c2c..5cae61db5b56 100644
--- a/drivers/gpu/drm/i915/selftests/i915_mock_selftests.h
+++ b/drivers/gpu/drm/i915/selftests/i915_mock_selftests.h
@@ -14,6 +14,7 @@ selftest(uncore, intel_uncore_mock_selftests)
 selftest(breadcrumbs, intel_breadcrumbs_mock_selftests)
 selftest(requests, i915_gem_request_mock_selftests)
 selftest(gem, i915_gem_mock_selftests)
+selftest(object, i915_gem_object_mock_selftests)
 selftest(dmabuf, i915_gem_dmabuf_mock_selftests)
 selftest(vma, i915_vma_mock_selftests)
 selftest(evict, i915_gem_evict_mock_selftests)
-- 
2.11.0

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 5+ messages in thread

* [PATCH 3/3] drm/i915: Prepare for async get_pages
  2017-02-27 13:31 Start planning for handling async pages, binding, everything Chris Wilson
  2017-02-27 13:31 ` [PATCH 1/3] drm/i915: Start splitting out i915_gem_object routines Chris Wilson
  2017-02-27 13:31 ` [PATCH 2/3] drm/i915: Exercise backing storage of mock gem objects Chris Wilson
@ 2017-02-27 13:31 ` Chris Wilson
  2017-02-27 15:22 ` ✗ Fi.CI.BAT: warning for series starting with [1/3] drm/i915: Start splitting out i915_gem_object routines Patchwork
  3 siblings, 0 replies; 5+ messages in thread
From: Chris Wilson @ 2017-02-27 13:31 UTC (permalink / raw)
  To: intel-gfx

In the next patch, we will allow for obj->mm.__pages to be populated
asynchronously. This means that simply acquiring a pages_pin_count is no
longer sufficient to be sure the pages are there, we need to acquire the
pin (to prevent the pages from disappearing again) and then wait for the
completion.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/i915_gem.c                  | 26 +++++++---
 drivers/gpu/drm/i915/i915_gem_clflush.c          |  4 +-
 drivers/gpu/drm/i915/i915_gem_dmabuf.c           | 14 ++++--
 drivers/gpu/drm/i915/i915_gem_gtt.c              | 23 +++++++--
 drivers/gpu/drm/i915/i915_gem_object.c           | 61 ++++++++++++++++--------
 drivers/gpu/drm/i915/i915_gem_object.h           | 47 ++++++++++++++----
 drivers/gpu/drm/i915/i915_gem_render_state.c     |  2 +-
 drivers/gpu/drm/i915/i915_gem_shrinker.c         | 14 +++---
 drivers/gpu/drm/i915/i915_gem_stolen.c           |  2 +-
 drivers/gpu/drm/i915/i915_gem_tiling.c           |  2 +-
 drivers/gpu/drm/i915/i915_gem_userptr.c          | 48 +++++--------------
 drivers/gpu/drm/i915/i915_vma.c                  |  4 +-
 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c    |  4 +-
 drivers/gpu/drm/i915/selftests/i915_gem_object.c | 45 +++++++++++++++--
 drivers/gpu/drm/i915/selftests/i915_vma.c        |  6 +--
 drivers/gpu/drm/i915/selftests/mock_gtt.c        |  2 +-
 16 files changed, 203 insertions(+), 101 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 0316215221f8..ce3d83f924bf 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -596,7 +596,7 @@ i915_gem_object_attach_phys(struct drm_i915_gem_object *obj,
 		return ret;
 
 	__i915_gem_object_put_pages(obj, I915_MM_NORMAL);
-	if (obj->mm.pages)
+	if (i915_gem_object_has_pages(obj))
 		return -EBUSY;
 
 	GEM_BUG_ON(obj->ops != &i915_gem_object_ops);
@@ -2144,7 +2144,7 @@ void __i915_gem_object_invalidate(struct drm_i915_gem_object *obj)
 	struct address_space *mapping;
 
 	lockdep_assert_held(&obj->mm.lock);
-	GEM_BUG_ON(obj->mm.pages);
+	GEM_BUG_ON(i915_gem_object_has_pages(obj));
 
 	switch (obj->mm.madv) {
 	case I915_MADV_DONTNEED:
@@ -3867,7 +3867,7 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
 	if (err)
 		goto out;
 
-	if (obj->mm.pages &&
+	if (i915_gem_object_has_pages(obj) &&
 	    i915_gem_object_is_tiled(obj) &&
 	    dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
 		if (obj->mm.madv == I915_MADV_WILLNEED) {
@@ -3886,7 +3886,8 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
 		obj->mm.madv = args->madv;
 
 	/* if the object is no longer attached, discard its backing storage */
-	if (obj->mm.madv == I915_MADV_DONTNEED && !obj->mm.pages)
+	if (obj->mm.madv == I915_MADV_DONTNEED &&
+	    !i915_gem_object_has_pages(obj))
 		i915_gem_object_truncate(obj);
 
 	args->retained = obj->mm.madv != __I915_MADV_PURGED;
@@ -3928,6 +3929,8 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj,
 	obj->mm.madv = I915_MADV_WILLNEED;
 	INIT_RADIX_TREE(&obj->mm.get_page.radix, GFP_KERNEL | __GFP_NOWARN);
 	mutex_init(&obj->mm.get_page.lock);
+	init_completion(&obj->mm.complete);
+	complete_all(&obj->mm.complete);
 
 	i915_gem_info_add_obj(to_i915(obj->base.dev), obj->base.size);
 }
@@ -4071,7 +4074,7 @@ static void __i915_gem_free_objects(struct drm_i915_private *i915,
 		if (WARN_ON(i915_gem_object_has_pinned_pages(obj)))
 			atomic_set(&obj->mm.pages_pin_count, 0);
 		__i915_gem_object_put_pages(obj, I915_MM_NORMAL);
-		GEM_BUG_ON(obj->mm.pages);
+		GEM_BUG_ON(i915_gem_object_has_pages(obj));
 
 		if (obj->base.import_attach)
 			drm_prime_gem_destroy(&obj->base, NULL);
@@ -4772,7 +4775,16 @@ i915_gem_object_create_from_data(struct drm_i915_private *dev_priv,
 	if (ret)
 		goto fail;
 
-	sg = obj->mm.pages;
+	ret = i915_gem_object_wait_for_pages(obj);
+	if (ret)
+		goto fail_unpin;
+
+	sg = i915_gem_object_pages(obj);
+	if (IS_ERR(sg)) {
+		ret = PTR_ERR(sg);
+		goto fail_unpin;
+	}
+
 	bytes = sg_copy_from_buffer(sg->sgl, sg->nents, (void *)data, size);
 	obj->mm.dirty = true; /* Backing store is now out of date */
 	i915_gem_object_unpin_pages(obj);
@@ -4785,6 +4797,8 @@ i915_gem_object_create_from_data(struct drm_i915_private *dev_priv,
 
 	return obj;
 
+fail_unpin:
+	i915_gem_object_unpin_pages(obj);
 fail:
 	i915_gem_object_put(obj);
 	return ERR_PTR(ret);
diff --git a/drivers/gpu/drm/i915/i915_gem_clflush.c b/drivers/gpu/drm/i915/i915_gem_clflush.c
index d925fb582ba7..d68817231e98 100644
--- a/drivers/gpu/drm/i915/i915_gem_clflush.c
+++ b/drivers/gpu/drm/i915/i915_gem_clflush.c
@@ -71,7 +71,7 @@ static const struct dma_fence_ops i915_clflush_ops = {
 
 static void __i915_do_clflush(struct drm_i915_gem_object *obj)
 {
-	drm_clflush_sg(obj->mm.pages);
+	drm_clflush_sg(i915_gem_object_pages(obj));
 	obj->cache_dirty = false;
 
 	intel_fb_obj_flush(obj, ORIGIN_CPU);
@@ -176,7 +176,7 @@ void i915_gem_clflush_object(struct drm_i915_gem_object *obj,
 		reservation_object_unlock(obj->resv);
 
 		i915_sw_fence_commit(&clflush->wait);
-	} else if (obj->mm.pages) {
+	} else if (i915_gem_object_has_pinned_pages(obj)) {
 		__i915_do_clflush(obj);
 	} else {
 		GEM_BUG_ON(obj->base.write_domain != I915_GEM_DOMAIN_CPU);
diff --git a/drivers/gpu/drm/i915/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
index 74edd187d0aa..5eb8a81887b6 100644
--- a/drivers/gpu/drm/i915/i915_gem_dmabuf.c
+++ b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
@@ -40,7 +40,7 @@ static struct sg_table *i915_gem_map_dma_buf(struct dma_buf_attachment *attachme
 					     enum dma_data_direction dir)
 {
 	struct drm_i915_gem_object *obj = dma_buf_to_obj(attachment->dmabuf);
-	struct sg_table *st;
+	struct sg_table *st, *pages;
 	struct scatterlist *src, *dst;
 	int ret, i;
 
@@ -48,6 +48,10 @@ static struct sg_table *i915_gem_map_dma_buf(struct dma_buf_attachment *attachme
 	if (ret)
 		goto err;
 
+	ret = i915_gem_object_wait_for_pages(obj);
+	if (ret)
+		goto err_unpin_pages;
+
 	/* Copy sg so that we make an independent mapping */
 	st = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
 	if (st == NULL) {
@@ -55,13 +59,15 @@ static struct sg_table *i915_gem_map_dma_buf(struct dma_buf_attachment *attachme
 		goto err_unpin_pages;
 	}
 
-	ret = sg_alloc_table(st, obj->mm.pages->nents, GFP_KERNEL);
+	pages = i915_gem_object_pages(obj);
+
+	ret = sg_alloc_table(st, pages->nents, GFP_KERNEL);
 	if (ret)
 		goto err_free;
 
-	src = obj->mm.pages->sgl;
+	src = pages->sgl;
 	dst = st->sgl;
-	for (i = 0; i < obj->mm.pages->nents; i++) {
+	for (i = 0; i < pages->nents; i++) {
 		sg_set_page(dst, sg_page(src), src->length, 0);
 		dst = sg_next(dst);
 		src = sg_next(src);
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index db335d3ba3ee..0454ade960e6 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -190,21 +190,30 @@ static int ppgtt_bind_vma(struct i915_vma *vma,
 			  enum i915_cache_level cache_level,
 			  u32 unused)
 {
+	struct sg_table *pages;
 	u32 pte_flags;
 	int ret;
 
+	ret = i915_gem_object_wait_for_pages(vma->obj);
+	if (ret)
+		return ret;
+
+	pages = i915_gem_object_pages(vma->obj);
+	if (IS_ERR(pages))
+		return PTR_ERR(pages);
+
 	ret = vma->vm->allocate_va_range(vma->vm, vma->node.start, vma->size);
 	if (ret)
 		return ret;
 
-	vma->pages = vma->obj->mm.pages;
+	vma->pages = pages;
 
 	/* Currently applicable only to VLV */
 	pte_flags = 0;
 	if (vma->obj->gt_ro)
 		pte_flags |= PTE_READ_ONLY;
 
-	vma->vm->insert_entries(vma->vm, vma->pages, vma->node.start,
+	vma->vm->insert_entries(vma->vm, pages, vma->node.start,
 				cache_level, pte_flags);
 
 	return 0;
@@ -2046,7 +2055,7 @@ int i915_gem_gtt_prepare_pages(struct drm_i915_gem_object *obj,
 		 * try again - if there are no more pages to remove from
 		 * the DMA remapper, i915_gem_shrink will return 0.
 		 */
-		GEM_BUG_ON(obj->mm.pages == pages);
+		GEM_BUG_ON(obj->mm.__pages == pages);
 	} while (i915_gem_shrink(to_i915(obj->base.dev),
 				 obj->base.size >> PAGE_SHIFT,
 				 I915_SHRINK_BOUND |
@@ -2315,7 +2324,7 @@ static int aliasing_gtt_bind_vma(struct i915_vma *vma,
 
 err_pages:
 	if (!(vma->flags & (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND))) {
-		if (vma->pages != vma->obj->mm.pages) {
+		if (vma->pages != vma->obj->mm.__pages) {
 			GEM_BUG_ON(!vma->pages);
 			sg_free_table(vma->pages);
 			kfree(vma->pages);
@@ -3170,9 +3179,13 @@ i915_get_ggtt_vma_pages(struct i915_vma *vma)
 	 */
 	GEM_BUG_ON(!i915_gem_object_has_pinned_pages(vma->obj));
 
+	ret = i915_gem_object_wait_for_pages(vma->obj);
+	if (ret)
+		return ret;
+
 	switch (vma->ggtt_view.type) {
 	case I915_GGTT_VIEW_NORMAL:
-		vma->pages = vma->obj->mm.pages;
+		vma->pages = i915_gem_object_pages(vma->obj);
 		return 0;
 
 	case I915_GGTT_VIEW_ROTATED:
diff --git a/drivers/gpu/drm/i915/i915_gem_object.c b/drivers/gpu/drm/i915/i915_gem_object.c
index 30a704ea7e3b..9a07530ca2b3 100644
--- a/drivers/gpu/drm/i915/i915_gem_object.c
+++ b/drivers/gpu/drm/i915/i915_gem_object.c
@@ -25,22 +25,34 @@
 #include "i915_drv.h"
 #include "i915_gem_object.h"
 
-void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
-				 struct sg_table *pages)
+int __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
+				struct sg_table *pages)
 {
-	lockdep_assert_held(&obj->mm.lock);
+	int err = 0;
+
+	/* Seralized by obj->mm.lock + obj->mm.complete */
+	GEM_BUG_ON(completion_done(&obj->mm.complete));
+	GEM_BUG_ON(!pages);
+
+	obj->mm.__pages = pages;
+	if (unlikely(IS_ERR(pages))) {
+		err = PTR_ERR(pages);
+		goto out;
+	}
 
 	obj->mm.get_page.sg_pos = pages->sgl;
 	obj->mm.get_page.sg_idx = 0;
 
-	obj->mm.pages = pages;
-
 	if (i915_gem_object_is_tiled(obj) &&
 	    to_i915(obj->base.dev)->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
 		GEM_BUG_ON(obj->mm.quirked);
 		__i915_gem_object_pin_pages(obj);
 		obj->mm.quirked = true;
 	}
+
+out:
+	complete_all(&obj->mm.complete);
+	return err;
 }
 
 static int ____i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
@@ -54,12 +66,15 @@ static int ____i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
 		return -EFAULT;
 	}
 
+	if (!completion_done(&obj->mm.complete))
+		return 0;
+
+	reinit_completion(&obj->mm.complete);
 	pages = obj->ops->get_pages(obj);
-	if (unlikely(IS_ERR(pages)))
-		return PTR_ERR(pages);
+	if (!pages) /* async completion */
+		return 0;
 
-	__i915_gem_object_set_pages(obj, pages);
-	return 0;
+	return __i915_gem_object_set_pages(obj, pages);
 }
 
 /* Ensure that the associated pages are gathered from the backing storage
@@ -77,12 +92,10 @@ int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
 	if (err)
 		return err;
 
-	if (unlikely(!obj->mm.pages)) {
+	if (unlikely(!obj->mm.__pages)) {
 		err = ____i915_gem_object_get_pages(obj);
 		if (err)
 			goto unlock;
-
-		smp_mb__before_atomic();
 	}
 	atomic_inc(&obj->mm.pages_pin_count);
 
@@ -109,7 +122,9 @@ void __i915_gem_object_put_pages(struct drm_i915_gem_object *obj,
 		return;
 
 	GEM_BUG_ON(obj->bind_count);
-	if (!READ_ONCE(obj->mm.pages))
+
+	wait_for_completion(&obj->mm.complete);
+	if (!i915_gem_object_has_pages(obj))
 		return;
 
 	/* May be called by shrinker from within get_pages() (on another bo) */
@@ -120,7 +135,7 @@ void __i915_gem_object_put_pages(struct drm_i915_gem_object *obj,
 	/* ->put_pages might need to allocate memory for the bit17 swizzle
 	 * array, hence protect them from being reaped by removing them from gtt
 	 * lists early. */
-	pages = fetch_and_zero(&obj->mm.pages);
+	pages = fetch_and_zero(&obj->mm.__pages);
 	GEM_BUG_ON(!pages);
 
 	if (obj->mm.mapping) {
@@ -136,8 +151,9 @@ void __i915_gem_object_put_pages(struct drm_i915_gem_object *obj,
 	}
 
 	__i915_gem_object_reset_page_iter(obj);
+	if (!IS_ERR(pages))
+		obj->ops->put_pages(obj, pages);
 
-	obj->ops->put_pages(obj, pages);
 unlock:
 	mutex_unlock(&obj->mm.lock);
 }
@@ -154,6 +170,8 @@ i915_gem_object_get_sg(struct drm_i915_gem_object *obj,
 	might_sleep();
 	GEM_BUG_ON(n >= obj->base.size >> PAGE_SHIFT);
 	GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
+	GEM_BUG_ON(!i915_gem_object_has_pages(obj));
+	GEM_BUG_ON(IS_ERR(obj->mm.__pages));
 
 	/* As we iterate forward through the sg, we record each entry in a
 	 * radixtree for quick repeated (backwards) lookups. If we have seen
@@ -296,11 +314,11 @@ i915_gem_object_get_dma_address(struct drm_i915_gem_object *obj,
 }
 
 /* The 'mapping' part of i915_gem_object_pin_map() below */
-static void *i915_gem_object_map(const struct drm_i915_gem_object *obj,
+static void *i915_gem_object_map(struct drm_i915_gem_object *obj,
 				 enum i915_map_type type)
 {
 	unsigned long n_pages = obj->base.size >> PAGE_SHIFT;
-	struct sg_table *sgt = obj->mm.pages;
+	const struct sg_table *sgt = i915_gem_object_pages(obj);
 	struct sgt_iter sgt_iter;
 	struct page *page;
 	struct page *stack_pages[32];
@@ -359,17 +377,14 @@ void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
 
 	pinned = true;
 	if (!atomic_inc_not_zero(&obj->mm.pages_pin_count)) {
-		if (unlikely(!obj->mm.pages)) {
+		if (unlikely(!obj->mm.__pages)) {
 			ret = ____i915_gem_object_get_pages(obj);
 			if (ret)
 				goto err_unlock;
-
-			smp_mb__before_atomic();
 		}
 		atomic_inc(&obj->mm.pages_pin_count);
 		pinned = false;
 	}
-	GEM_BUG_ON(!obj->mm.pages);
 
 	ptr = ptr_unpack_bits(obj->mm.mapping, has_type);
 	if (ptr && has_type != type) {
@@ -387,6 +402,10 @@ void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
 	}
 
 	if (!ptr) {
+		ret = i915_gem_object_wait_for_pages(obj);
+		if (ret)
+			goto err_unpin;
+
 		ptr = i915_gem_object_map(obj, type);
 		if (!ptr) {
 			ret = -ENOMEM;
diff --git a/drivers/gpu/drm/i915/i915_gem_object.h b/drivers/gpu/drm/i915/i915_gem_object.h
index 1b0bd6576785..813d35b060be 100644
--- a/drivers/gpu/drm/i915/i915_gem_object.h
+++ b/drivers/gpu/drm/i915/i915_gem_object.h
@@ -25,6 +25,7 @@
 #ifndef __I915_GEM_OBJECT_H__
 #define __I915_GEM_OBJECT_H__
 
+#include <linux/completion.h>
 #include <linux/reservation.h>
 
 #include <drm/drm_vma_manager.h>
@@ -124,8 +125,9 @@ struct drm_i915_gem_object {
 	struct {
 		struct mutex lock; /* protects the pages and their use */
 		atomic_t pages_pin_count;
+		struct completion complete;
 
-		struct sg_table *pages;
+		struct sg_table *__pages;
 		void *mapping;
 
 		struct i915_gem_object_page_iter {
@@ -179,7 +181,6 @@ struct drm_i915_gem_object {
 
 			struct i915_mm_struct *mm;
 			struct i915_mmu_object *mmu_object;
-			struct work_struct *work;
 		} userptr;
 
 		unsigned long scratch;
@@ -384,8 +385,8 @@ dma_addr_t
 i915_gem_object_get_dma_address(struct drm_i915_gem_object *obj,
 				unsigned long n);
 
-void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
-				 struct sg_table *pages);
+int __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
+				struct sg_table *pages);
 int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj);
 
 static inline int __must_check
@@ -402,22 +403,52 @@ i915_gem_object_pin_pages(struct drm_i915_gem_object *obj)
 static inline void
 __i915_gem_object_pin_pages(struct drm_i915_gem_object *obj)
 {
-	GEM_BUG_ON(!obj->mm.pages);
-
 	atomic_inc(&obj->mm.pages_pin_count);
 }
 
 static inline bool
-i915_gem_object_has_pinned_pages(struct drm_i915_gem_object *obj)
+i915_gem_object_has_pinned_pages(const struct drm_i915_gem_object *obj)
 {
 	return atomic_read(&obj->mm.pages_pin_count);
 }
 
+static inline bool
+i915_gem_object_has_pages(const struct drm_i915_gem_object *obj)
+{
+	return READ_ONCE(obj->mm.__pages);
+}
+
+static inline int
+i915_gem_object_wait_for_pages(struct drm_i915_gem_object *obj)
+{
+	int err;
+
+	GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
+	err = wait_for_completion_interruptible(&obj->mm.complete);
+	if (err)
+		return err;
+
+	GEM_BUG_ON(!i915_gem_object_has_pages(obj));
+	if (IS_ERR(obj->mm.__pages))
+		return PTR_ERR(obj->mm.__pages);
+
+	return 0;
+}
+
+static inline struct sg_table *
+i915_gem_object_pages(struct drm_i915_gem_object *obj)
+{
+	GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
+	GEM_BUG_ON(!READ_ONCE(obj->mm.complete.done));
+	GEM_BUG_ON(!i915_gem_object_has_pages(obj));
+
+	return obj->mm.__pages;
+}
+
 static inline void
 __i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
 {
 	GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
-	GEM_BUG_ON(!obj->mm.pages);
 
 	atomic_dec(&obj->mm.pages_pin_count);
 }
diff --git a/drivers/gpu/drm/i915/i915_gem_render_state.c b/drivers/gpu/drm/i915/i915_gem_render_state.c
index b42c81b42487..0096ca8b9c24 100644
--- a/drivers/gpu/drm/i915/i915_gem_render_state.c
+++ b/drivers/gpu/drm/i915/i915_gem_render_state.c
@@ -229,7 +229,7 @@ int i915_gem_render_state_emit(struct drm_i915_gem_request *req)
 		return 0;
 
 	/* Recreate the page after shrinking */
-	if (!so->vma->obj->mm.pages)
+	if (!i915_gem_object_has_pages(so->vma->obj))
 		so->batch_offset = -1;
 
 	ret = i915_vma_pin(so->vma, 0, 0, PIN_GLOBAL | PIN_HIGH);
diff --git a/drivers/gpu/drm/i915/i915_gem_shrinker.c b/drivers/gpu/drm/i915/i915_gem_shrinker.c
index 3ba838f27292..7e3bb48e043e 100644
--- a/drivers/gpu/drm/i915/i915_gem_shrinker.c
+++ b/drivers/gpu/drm/i915/i915_gem_shrinker.c
@@ -72,9 +72,11 @@ static bool swap_available(void)
 
 static bool can_release_pages(struct drm_i915_gem_object *obj)
 {
-	if (!obj->mm.pages)
+	if (!i915_gem_object_has_pages(obj))
 		return false;
 
+	GEM_BUG_ON(!completion_done(&obj->mm.complete));
+
 	/* Consider only shrinkable ojects. */
 	if (!i915_gem_object_is_shrinkable(obj))
 		return false;
@@ -104,7 +106,7 @@ static bool unsafe_drop_pages(struct drm_i915_gem_object *obj)
 {
 	if (i915_gem_object_unbind(obj) == 0)
 		__i915_gem_object_put_pages(obj, I915_MM_SHRINKER);
-	return !READ_ONCE(obj->mm.pages);
+	return !i915_gem_object_has_pages(obj);
 }
 
 /**
@@ -193,7 +195,7 @@ i915_gem_shrink(struct drm_i915_private *dev_priv,
 						       typeof(*obj),
 						       global_link))) {
 			list_move_tail(&obj->global_link, &still_in_list);
-			if (!obj->mm.pages) {
+			if (!i915_gem_object_has_pages(obj)) {
 				list_del_init(&obj->global_link);
 				continue;
 			}
@@ -218,7 +220,7 @@ i915_gem_shrink(struct drm_i915_private *dev_priv,
 				/* May arrive from get_pages on another bo */
 				mutex_lock_nested(&obj->mm.lock,
 						  I915_MM_SHRINKER);
-				if (!obj->mm.pages) {
+				if (!i915_gem_object_has_pages(obj)) {
 					__i915_gem_object_invalidate(obj);
 					list_del_init(&obj->global_link);
 					count += obj->base.size >> PAGE_SHIFT;
@@ -392,7 +394,7 @@ i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr)
 	 */
 	unbound = bound = unevictable = 0;
 	list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_link) {
-		if (!obj->mm.pages)
+		if (!i915_gem_object_has_pages(obj))
 			continue;
 
 		if (!can_release_pages(obj))
@@ -401,7 +403,7 @@ i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr)
 			unbound += obj->base.size >> PAGE_SHIFT;
 	}
 	list_for_each_entry(obj, &dev_priv->mm.bound_list, global_link) {
-		if (!obj->mm.pages)
+		if (!i915_gem_object_has_pages(obj))
 			continue;
 
 		if (!can_release_pages(obj))
diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c
index f3abdc27c5dd..eb552c7e76e3 100644
--- a/drivers/gpu/drm/i915/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/i915_gem_stolen.c
@@ -713,7 +713,7 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_i915_private *dev_priv
 
 	GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
 
-	vma->pages = obj->mm.pages;
+	vma->pages = i915_gem_object_pages(obj);
 	vma->flags |= I915_VMA_GLOBAL_BIND;
 	__i915_vma_set_map_and_fenceable(vma);
 	list_move_tail(&vma->vm_link, &ggtt->base.inactive_list);
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
index 5128dac5ba3f..c1d669e32f41 100644
--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -263,7 +263,7 @@ i915_gem_object_set_tiling(struct drm_i915_gem_object *obj,
 	 * due to the change in swizzling.
 	 */
 	mutex_lock(&obj->mm.lock);
-	if (obj->mm.pages &&
+	if (i915_gem_object_has_pages(obj) &&
 	    obj->mm.madv == I915_MADV_WILLNEED &&
 	    i915->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
 		if (tiling == I915_TILING_NONE) {
diff --git a/drivers/gpu/drm/i915/i915_gem_userptr.c b/drivers/gpu/drm/i915/i915_gem_userptr.c
index 120186122c82..609120b5ae5c 100644
--- a/drivers/gpu/drm/i915/i915_gem_userptr.c
+++ b/drivers/gpu/drm/i915/i915_gem_userptr.c
@@ -68,22 +68,20 @@ static void cancel_userptr(struct work_struct *work)
 	struct drm_device *dev = obj->base.dev;
 
 	i915_gem_object_wait(obj, I915_WAIT_ALL, MAX_SCHEDULE_TIMEOUT, NULL);
+	wait_for_completion(&obj->mm.complete);
 
 	mutex_lock(&dev->struct_mutex);
-	/* Cancel any active worker and force us to re-evaluate gup */
-	obj->userptr.work = NULL;
-
 	/* We are inside a kthread context and can't be interrupted */
 	if (i915_gem_object_unbind(obj) == 0)
 		__i915_gem_object_put_pages(obj, I915_MM_NORMAL);
-	WARN_ONCE(obj->mm.pages,
+	WARN_ONCE(i915_gem_object_has_pages(obj),
 		  "Failed to release pages: bind_count=%d, pages_pin_count=%d, pin_display=%d\n",
 		  obj->bind_count,
 		  atomic_read(&obj->mm.pages_pin_count),
 		  obj->pin_display);
+	mutex_unlock(&dev->struct_mutex);
 
 	i915_gem_object_put(obj);
-	mutex_unlock(&dev->struct_mutex);
 }
 
 static void add_object(struct i915_mmu_object *mo)
@@ -476,10 +474,8 @@ __i915_gem_userptr_set_active(struct drm_i915_gem_object *obj,
 	 */
 	if (!value)
 		del_object(obj->userptr.mmu_object);
-	else if (!work_pending(&obj->userptr.mmu_object->work))
-		add_object(obj->userptr.mmu_object);
 	else
-		ret = -EAGAIN;
+		add_object(obj->userptr.mmu_object);
 	spin_unlock(&obj->userptr.mmu_object->mn->lock);
 #endif
 
@@ -492,6 +488,7 @@ __i915_gem_userptr_get_pages_worker(struct work_struct *_work)
 	struct get_pages_work *work = container_of(_work, typeof(*work), work);
 	struct drm_i915_gem_object *obj = work->obj;
 	const int npages = obj->base.size >> PAGE_SHIFT;
+	struct sg_table *pages;
 	struct page **pvec;
 	int pinned, ret;
 
@@ -526,22 +523,14 @@ __i915_gem_userptr_get_pages_worker(struct work_struct *_work)
 		}
 	}
 
-	mutex_lock(&obj->mm.lock);
-	if (obj->userptr.work == &work->work) {
-		struct sg_table *pages = ERR_PTR(ret);
-
-		if (pinned == npages) {
-			pages = __i915_gem_userptr_set_pages(obj, pvec, npages);
-			if (!IS_ERR(pages)) {
-				__i915_gem_object_set_pages(obj, pages);
-				pinned = 0;
-				pages = NULL;
-			}
-		}
-
-		obj->userptr.work = ERR_CAST(pages);
+	pages = ERR_PTR(ret);
+	if (pinned == npages)
+		pages = __i915_gem_userptr_set_pages(obj, pvec, npages);
+	__i915_gem_object_set_pages(obj, pages);
+	if (!IS_ERR(pages)) {
+		pinned = 0;
+		pages = NULL;
 	}
-	mutex_unlock(&obj->mm.lock);
 
 	release_pages(pvec, pinned, 0);
 	drm_free_large(pvec);
@@ -580,8 +569,6 @@ __i915_gem_userptr_get_pages_schedule(struct drm_i915_gem_object *obj,
 	if (work == NULL)
 		return ERR_PTR(-ENOMEM);
 
-	obj->userptr.work = &work->work;
-
 	work->obj = i915_gem_object_get(obj);
 
 	work->task = current;
@@ -591,7 +578,7 @@ __i915_gem_userptr_get_pages_schedule(struct drm_i915_gem_object *obj,
 	queue_work(to_i915(obj->base.dev)->mm.userptr_wq, &work->work);
 
 	*active = true;
-	return ERR_PTR(-EAGAIN);
+	return NULL;
 }
 
 static struct sg_table *
@@ -620,14 +607,6 @@ i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj)
 	 * egregious cases from causing harm.
 	 */
 
-	if (obj->userptr.work) {
-		/* active flag should still be held for the pending work */
-		if (IS_ERR(obj->userptr.work))
-			return ERR_CAST(obj->userptr.work);
-		else
-			return ERR_PTR(-EAGAIN);
-	}
-
 	/* Let the mmu-notifier know that we have begun and need cancellation */
 	ret = __i915_gem_userptr_set_active(obj, true);
 	if (ret)
@@ -669,7 +648,6 @@ i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj,
 	struct sgt_iter sgt_iter;
 	struct page *page;
 
-	BUG_ON(obj->userptr.work != NULL);
 	__i915_gem_userptr_set_active(obj, false);
 
 	if (obj->mm.madv != I915_MADV_WILLNEED)
diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c
index 9c1ef8d67ba7..cc29015837d5 100644
--- a/drivers/gpu/drm/i915/i915_vma.c
+++ b/drivers/gpu/drm/i915/i915_vma.c
@@ -691,6 +691,8 @@ int i915_vma_unbind(struct i915_vma *vma)
 
 	GEM_BUG_ON(obj->bind_count == 0);
 	GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
+	GEM_BUG_ON(!i915_gem_object_has_pages(obj));
+	GEM_BUG_ON(!completion_done(&obj->mm.complete));
 
 	if (i915_vma_is_map_and_fenceable(vma)) {
 		/* release the fence reg _after_ flushing */
@@ -711,7 +713,7 @@ int i915_vma_unbind(struct i915_vma *vma)
 	}
 	vma->flags &= ~(I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND);
 
-	if (vma->pages != obj->mm.pages) {
+	if (vma->pages != obj->mm.__pages) {
 		GEM_BUG_ON(!vma->pages);
 		sg_free_table(vma->pages);
 		kfree(vma->pages);
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
index 0f3fa34377c6..dbcbae93f496 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
@@ -250,7 +250,7 @@ static int lowlevel_hole(struct drm_i915_private *i915,
 			    vm->allocate_va_range(vm, addr, BIT_ULL(size)))
 				break;
 
-			vm->insert_entries(vm, obj->mm.pages, addr,
+			vm->insert_entries(vm, obj->mm.__pages, addr,
 					   I915_CACHE_NONE, 0);
 		}
 		count = n;
@@ -1089,7 +1089,7 @@ static void track_vma_bind(struct i915_vma *vma)
 	obj->bind_count++; /* track for eviction later */
 	__i915_gem_object_pin_pages(obj);
 
-	vma->pages = obj->mm.pages;
+	vma->pages = i915_gem_object_pages(obj);
 	list_move_tail(&vma->vm_link, &vma->vm->inactive_list);
 }
 
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_object.c b/drivers/gpu/drm/i915/selftests/i915_gem_object.c
index 1328332150f6..5902433d4e14 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_object.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_object.c
@@ -233,7 +233,7 @@ static void map_free_pages(struct sg_table *st)
 }
 
 static struct sg_table *
-map_get_pages(struct drm_i915_gem_object *obj)
+__map_get_pages(struct drm_i915_gem_object *obj)
 {
 	struct sg_table *pages;
 	struct scatterlist *sg;
@@ -281,6 +281,39 @@ map_get_pages(struct drm_i915_gem_object *obj)
 #undef GFP
 }
 
+struct map_work {
+	struct delayed_work work;
+	struct drm_i915_gem_object *obj;
+};
+
+static void map_get_pages_work(struct work_struct *work)
+{
+	struct map_work *data = container_of(work, typeof(*data), work.work);
+
+	__i915_gem_object_set_pages(data->obj, __map_get_pages(data->obj));
+	kfree(data);
+}
+
+static struct sg_table *
+map_get_pages(struct drm_i915_gem_object *obj)
+{
+	unsigned long delay = obj->scratch >> 8;
+	struct map_work *data;
+
+	if (!delay)
+		return __map_get_pages(obj);
+
+	data = kmalloc(sizeof(*data), GFP_KERNEL);
+	if (!data)
+		return ERR_PTR(-ENOMEM);
+
+	INIT_DELAYED_WORK(&data->work, map_get_pages_work);
+	data->obj = obj;
+
+	schedule_delayed_work(&data->work, delay);
+	return NULL;
+}
+
 static void map_put_pages(struct drm_i915_gem_object *obj,
 			  struct sg_table *pages)
 {
@@ -297,6 +330,7 @@ static const struct drm_i915_gem_object_ops map_ops = {
 
 static struct drm_i915_gem_object *
 map_object(struct drm_i915_private *i915,
+	   unsigned long delay,
 	   unsigned int flags)
 {
 	struct drm_i915_gem_object *obj;
@@ -317,7 +351,7 @@ map_object(struct drm_i915_private *i915,
 	obj->base.read_domains = I915_GEM_DOMAIN_CPU;
 	obj->cache_level = I915_CACHE_NONE;
 
-	obj->scratch = flags;
+	obj->scratch = delay << 8 | flags;
 
 	return obj;
 }
@@ -328,10 +362,13 @@ static int igt_gem_object_pin_map(void *arg)
 	struct drm_i915_gem_object *obj;
 	const struct {
 		const char *name;
+		unsigned long delay;
 		unsigned int flags;
 	} phases[] = {
 		{ "sync" },
-		{ "sync-fault", FAULT },
+		{ "sync-fault", 0, FAULT },
+		{ "async", msecs_to_jiffies(10) },
+		{ "async-fault", msecs_to_jiffies(10), FAULT },
 		{ "sync-after-fault" },
 		{},
 	}, *p;
@@ -340,7 +377,7 @@ static int igt_gem_object_pin_map(void *arg)
 	int err = 0;
 
 	for (p = phases; p->name; p++) {
-		obj = map_object(i915, p->flags);
+		obj = map_object(i915, p->delay, p->flags);
 		if (IS_ERR(obj))
 			return PTR_ERR(obj);
 
diff --git a/drivers/gpu/drm/i915/selftests/i915_vma.c b/drivers/gpu/drm/i915/selftests/i915_vma.c
index fb9072d5877f..16e954b20a3e 100644
--- a/drivers/gpu/drm/i915/selftests/i915_vma.c
+++ b/drivers/gpu/drm/i915/selftests/i915_vma.c
@@ -484,7 +484,7 @@ static int igt_vma_rotate(void *arg)
 						goto out_object;
 					}
 
-					if (vma->pages == obj->mm.pages) {
+					if (vma->pages == obj->mm.__pages) {
 						pr_err("VMA using unrotated object pages!\n");
 						err = -EINVAL;
 						goto out_object;
@@ -576,7 +576,7 @@ static bool assert_pin(struct i915_vma *vma,
 			ok = false;
 		}
 
-		if (vma->pages == vma->obj->mm.pages) {
+		if (vma->pages == vma->obj->mm.__pages) {
 			pr_err("(%s) VMA using original object pages!\n",
 			       name);
 			ok = false;
@@ -588,7 +588,7 @@ static bool assert_pin(struct i915_vma *vma,
 			ok = false;
 		}
 
-		if (vma->pages != vma->obj->mm.pages) {
+		if (vma->pages != vma->obj->mm.__pages) {
 			pr_err("VMA not using object pages!\n");
 			ok = false;
 		}
diff --git a/drivers/gpu/drm/i915/selftests/mock_gtt.c b/drivers/gpu/drm/i915/selftests/mock_gtt.c
index a61309c7cb3e..b321cd5dfa3f 100644
--- a/drivers/gpu/drm/i915/selftests/mock_gtt.c
+++ b/drivers/gpu/drm/i915/selftests/mock_gtt.c
@@ -44,7 +44,7 @@ static int mock_bind_ppgtt(struct i915_vma *vma,
 			   u32 flags)
 {
 	GEM_BUG_ON(flags & I915_VMA_GLOBAL_BIND);
-	vma->pages = vma->obj->mm.pages;
+	vma->pages = i915_gem_object_pages(vma->obj);
 	vma->flags |= I915_VMA_LOCAL_BIND;
 	return 0;
 }
-- 
2.11.0

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 5+ messages in thread

* ✗ Fi.CI.BAT: warning for series starting with [1/3] drm/i915: Start splitting out i915_gem_object routines
  2017-02-27 13:31 Start planning for handling async pages, binding, everything Chris Wilson
                   ` (2 preceding siblings ...)
  2017-02-27 13:31 ` [PATCH 3/3] drm/i915: Prepare for async get_pages Chris Wilson
@ 2017-02-27 15:22 ` Patchwork
  3 siblings, 0 replies; 5+ messages in thread
From: Patchwork @ 2017-02-27 15:22 UTC (permalink / raw)
  To: Chris Wilson; +Cc: intel-gfx

== Series Details ==

Series: series starting with [1/3] drm/i915: Start splitting out i915_gem_object routines
URL   : https://patchwork.freedesktop.org/series/20312/
State : warning

== Summary ==

Series 20312v1 Series without cover letter
https://patchwork.freedesktop.org/api/1.0/series/20312/revisions/1/mbox/

Test kms_force_connector_basic:
        Subgroup force-connector-state:
                pass       -> SKIP       (fi-ivb-3520m)
        Subgroup force-edid:
                pass       -> SKIP       (fi-ivb-3520m)
        Subgroup force-load-detect:
                pass       -> SKIP       (fi-ivb-3520m)
        Subgroup prune-stale-modes:
                pass       -> SKIP       (fi-ivb-3520m)

fi-bdw-5557u     total:278  pass:267  dwarn:0   dfail:0   fail:0   skip:11 
fi-bsw-n3050     total:278  pass:239  dwarn:0   dfail:0   fail:0   skip:39 
fi-bxt-j4205     total:278  pass:259  dwarn:0   dfail:0   fail:0   skip:19 
fi-bxt-t5700     total:108  pass:95   dwarn:0   dfail:0   fail:0   skip:12 
fi-byt-j1900     total:278  pass:251  dwarn:0   dfail:0   fail:0   skip:27 
fi-byt-n2820     total:278  pass:247  dwarn:0   dfail:0   fail:0   skip:31 
fi-hsw-4770      total:278  pass:262  dwarn:0   dfail:0   fail:0   skip:16 
fi-hsw-4770r     total:278  pass:262  dwarn:0   dfail:0   fail:0   skip:16 
fi-ilk-650       total:278  pass:228  dwarn:0   dfail:0   fail:0   skip:50 
fi-ivb-3520m     total:278  pass:256  dwarn:0   dfail:0   fail:0   skip:22 
fi-ivb-3770      total:278  pass:260  dwarn:0   dfail:0   fail:0   skip:18 
fi-kbl-7500u     total:278  pass:260  dwarn:0   dfail:0   fail:0   skip:18 
fi-skl-6260u     total:278  pass:268  dwarn:0   dfail:0   fail:0   skip:10 
fi-skl-6700hq    total:278  pass:261  dwarn:0   dfail:0   fail:0   skip:17 
fi-skl-6700k     total:278  pass:256  dwarn:4   dfail:0   fail:0   skip:18 
fi-skl-6770hq    total:278  pass:268  dwarn:0   dfail:0   fail:0   skip:10 
fi-snb-2520m     total:278  pass:250  dwarn:0   dfail:0   fail:0   skip:28 
fi-snb-2600      total:278  pass:249  dwarn:0   dfail:0   fail:0   skip:29 

f76e5eca8c2a46cbd0203d32842bca6ce0ec16ef drm-tip: 2017y-02m-27d-13h-20m-25s UTC integration manifest
2f0647e drm/i915: Exercise backing storage of mock gem objects
b8842c2 drm/i915: Start splitting out i915_gem_object routines

== Logs ==

For more details see: https://intel-gfx-ci.01.org/CI/Patchwork_3983/
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 5+ messages in thread

end of thread, other threads:[~2017-02-27 15:22 UTC | newest]

Thread overview: 5+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2017-02-27 13:31 Start planning for handling async pages, binding, everything Chris Wilson
2017-02-27 13:31 ` [PATCH 1/3] drm/i915: Start splitting out i915_gem_object routines Chris Wilson
2017-02-27 13:31 ` [PATCH 2/3] drm/i915: Exercise backing storage of mock gem objects Chris Wilson
2017-02-27 13:31 ` [PATCH 3/3] drm/i915: Prepare for async get_pages Chris Wilson
2017-02-27 15:22 ` ✗ Fi.CI.BAT: warning for series starting with [1/3] drm/i915: Start splitting out i915_gem_object routines Patchwork

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.