All of lore.kernel.org
 help / color / mirror / Atom feed
From: Chris Wilson <chris@chris-wilson.co.uk>
To: intel-gfx@lists.freedesktop.org
Subject: [CI 08/39] drm/i915: Extract i915_gem_obj_prepare_shmem_write()
Date: Thu, 18 Aug 2016 17:16:47 +0100	[thread overview]
Message-ID: <20160818161718.27187-8-chris@chris-wilson.co.uk> (raw)
In-Reply-To: <20160818161718.27187-1-chris@chris-wilson.co.uk>

This is a companion to i915_gem_obj_prepare_shmem_read() that prepares
the backing storage for direct writes. It first serialises with the GPU,
pins the backing storage and then indicates what clfushes are required in
order for the writes to be coherent.

Whilst here, fix support for ancient CPUs without clflush for which we
cannot do the GTT+clflush tricks.

v2: Add i915_gem_obj_finish_shmem_access() for symmetry

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
---
 drivers/gpu/drm/i915/i915_cmd_parser.c |   4 +-
 drivers/gpu/drm/i915/i915_drv.h        |  17 +++-
 drivers/gpu/drm/i915/i915_gem.c        | 146 +++++++++++++++++++--------------
 3 files changed, 102 insertions(+), 65 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_cmd_parser.c b/drivers/gpu/drm/i915/i915_cmd_parser.c
index 1db829c8b912..e586e15e172f 100644
--- a/drivers/gpu/drm/i915/i915_cmd_parser.c
+++ b/drivers/gpu/drm/i915/i915_cmd_parser.c
@@ -973,7 +973,7 @@ static u32 *copy_batch(struct drm_i915_gem_object *dest_obj,
 		       u32 batch_start_offset,
 		       u32 batch_len)
 {
-	int needs_clflush = 0;
+	unsigned int needs_clflush;
 	void *src_base, *src;
 	void *dst = NULL;
 	int ret;
@@ -1020,7 +1020,7 @@ static u32 *copy_batch(struct drm_i915_gem_object *dest_obj,
 unmap_src:
 	vunmap(src_base);
 unpin_src:
-	i915_gem_object_unpin_pages(src_obj);
+	i915_gem_obj_finish_shmem_access(src_obj);
 
 	return ret ? ERR_PTR(ret) : dst;
 }
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 5b778ceba82e..91861a08787c 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -3098,9 +3098,6 @@ int i915_gem_object_put_pages(struct drm_i915_gem_object *obj);
 void i915_gem_release_all_mmaps(struct drm_i915_private *dev_priv);
 void i915_gem_release_mmap(struct drm_i915_gem_object *obj);
 
-int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj,
-				    int *needs_clflush);
-
 int __must_check i915_gem_object_get_pages(struct drm_i915_gem_object *obj);
 
 static inline int __sg_page_count(struct scatterlist *sg)
@@ -3201,6 +3198,20 @@ static inline void i915_gem_object_unpin_map(struct drm_i915_gem_object *obj)
 	i915_gem_object_unpin_pages(obj);
 }
 
+int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj,
+				    unsigned int *needs_clflush);
+int i915_gem_obj_prepare_shmem_write(struct drm_i915_gem_object *obj,
+				     unsigned int *needs_clflush);
+#define CLFLUSH_BEFORE 0x1
+#define CLFLUSH_AFTER 0x2
+#define CLFLUSH_FLAGS (CLFLUSH_BEFORE | CLFLUSH_AFTER)
+
+static inline void
+i915_gem_obj_finish_shmem_access(struct drm_i915_gem_object *obj)
+{
+	i915_gem_object_unpin_pages(obj);
+}
+
 int __must_check i915_mutex_lock_interruptible(struct drm_device *dev);
 int i915_gem_object_sync(struct drm_i915_gem_object *obj,
 			 struct drm_i915_gem_request *to);
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index a609522221ed..f27c340bb8ee 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -609,35 +609,95 @@ __copy_from_user_swizzled(char *gpu_vaddr, int gpu_offset,
  * flush the object from the CPU cache.
  */
 int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj,
-				    int *needs_clflush)
+				    unsigned int *needs_clflush)
 {
 	int ret;
 
 	*needs_clflush = 0;
 
-	if (WARN_ON(!i915_gem_object_has_struct_page(obj)))
-		return -EINVAL;
+	if (!i915_gem_object_has_struct_page(obj))
+		return -ENODEV;
 
 	ret = i915_gem_object_wait_rendering(obj, true);
 	if (ret)
 		return ret;
 
-	if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU)) {
-		/* If we're not in the cpu read domain, set ourself into the gtt
-		 * read domain and manually flush cachelines (if required). This
-		 * optimizes for the case when the gpu will dirty the data
-		 * anyway again before the next pread happens. */
+	/* If we're not in the cpu read domain, set ourself into the gtt
+	 * read domain and manually flush cachelines (if required). This
+	 * optimizes for the case when the gpu will dirty the data
+	 * anyway again before the next pread happens.
+	 */
+	if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU))
 		*needs_clflush = !cpu_cache_is_coherent(obj->base.dev,
 							obj->cache_level);
+
+	ret = i915_gem_object_get_pages(obj);
+	if (ret)
+		return ret;
+
+	i915_gem_object_pin_pages(obj);
+
+	if (*needs_clflush && !static_cpu_has(X86_FEATURE_CLFLUSH)) {
+		ret = i915_gem_object_set_to_cpu_domain(obj, false);
+		if (ret) {
+			i915_gem_object_unpin_pages(obj);
+			return ret;
+		}
+		*needs_clflush = 0;
 	}
 
+	return 0;
+}
+
+int i915_gem_obj_prepare_shmem_write(struct drm_i915_gem_object *obj,
+				     unsigned int *needs_clflush)
+{
+	int ret;
+
+	*needs_clflush = 0;
+	if (!i915_gem_object_has_struct_page(obj))
+		return -ENODEV;
+
+	ret = i915_gem_object_wait_rendering(obj, false);
+	if (ret)
+		return ret;
+
+	/* If we're not in the cpu write domain, set ourself into the
+	 * gtt write domain and manually flush cachelines (as required).
+	 * This optimizes for the case when the gpu will use the data
+	 * right away and we therefore have to clflush anyway.
+	 */
+	if (obj->base.write_domain != I915_GEM_DOMAIN_CPU)
+		*needs_clflush |= cpu_write_needs_clflush(obj) << 1;
+
+	/* Same trick applies to invalidate partially written cachelines read
+	 * before writing.
+	 */
+	if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU))
+		*needs_clflush |= !cpu_cache_is_coherent(obj->base.dev,
+							 obj->cache_level);
+
 	ret = i915_gem_object_get_pages(obj);
 	if (ret)
 		return ret;
 
 	i915_gem_object_pin_pages(obj);
 
-	return ret;
+	if (*needs_clflush && !static_cpu_has(X86_FEATURE_CLFLUSH)) {
+		ret = i915_gem_object_set_to_cpu_domain(obj, true);
+		if (ret) {
+			i915_gem_object_unpin_pages(obj);
+			return ret;
+		}
+		*needs_clflush = 0;
+	}
+
+	if ((*needs_clflush & CLFLUSH_AFTER) == 0)
+		obj->cache_dirty = true;
+
+	intel_fb_obj_invalidate(obj, ORIGIN_CPU);
+	obj->dirty = 1;
+	return 0;
 }
 
 /* Per-page copy function for the shmem pread fastpath.
@@ -872,19 +932,14 @@ i915_gem_shmem_pread(struct drm_device *dev,
 	int needs_clflush = 0;
 	struct sg_page_iter sg_iter;
 
-	if (!i915_gem_object_has_struct_page(obj))
-		return -ENODEV;
-
-	user_data = u64_to_user_ptr(args->data_ptr);
-	remain = args->size;
-
-	obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
-
 	ret = i915_gem_obj_prepare_shmem_read(obj, &needs_clflush);
 	if (ret)
 		return ret;
 
+	obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
+	user_data = u64_to_user_ptr(args->data_ptr);
 	offset = args->offset;
+	remain = args->size;
 
 	for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents,
 			 offset >> PAGE_SHIFT) {
@@ -940,7 +995,7 @@ next_page:
 	}
 
 out:
-	i915_gem_object_unpin_pages(obj);
+	i915_gem_obj_finish_shmem_access(obj);
 
 	return ret;
 }
@@ -1248,42 +1303,17 @@ i915_gem_shmem_pwrite(struct drm_device *dev,
 	int shmem_page_offset, page_length, ret = 0;
 	int obj_do_bit17_swizzling, page_do_bit17_swizzling;
 	int hit_slowpath = 0;
-	int needs_clflush_after = 0;
-	int needs_clflush_before = 0;
+	unsigned int needs_clflush;
 	struct sg_page_iter sg_iter;
 
-	user_data = u64_to_user_ptr(args->data_ptr);
-	remain = args->size;
-
-	obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
-
-	ret = i915_gem_object_wait_rendering(obj, false);
+	ret = i915_gem_obj_prepare_shmem_write(obj, &needs_clflush);
 	if (ret)
 		return ret;
 
-	if (obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
-		/* If we're not in the cpu write domain, set ourself into the gtt
-		 * write domain and manually flush cachelines (if required). This
-		 * optimizes for the case when the gpu will use the data
-		 * right away and we therefore have to clflush anyway. */
-		needs_clflush_after = cpu_write_needs_clflush(obj);
-	}
-	/* Same trick applies to invalidate partially written cachelines read
-	 * before writing. */
-	if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0)
-		needs_clflush_before =
-			!cpu_cache_is_coherent(dev, obj->cache_level);
-
-	ret = i915_gem_object_get_pages(obj);
-	if (ret)
-		return ret;
-
-	intel_fb_obj_invalidate(obj, ORIGIN_CPU);
-
-	i915_gem_object_pin_pages(obj);
-
+	obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
+	user_data = u64_to_user_ptr(args->data_ptr);
 	offset = args->offset;
-	obj->dirty = 1;
+	remain = args->size;
 
 	for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents,
 			 offset >> PAGE_SHIFT) {
@@ -1307,7 +1337,7 @@ i915_gem_shmem_pwrite(struct drm_device *dev,
 		/* If we don't overwrite a cacheline completely we need to be
 		 * careful to have up-to-date data by first clflushing. Don't
 		 * overcomplicate things and flush the entire patch. */
-		partial_cacheline_write = needs_clflush_before &&
+		partial_cacheline_write = needs_clflush & CLFLUSH_BEFORE &&
 			((shmem_page_offset | page_length)
 				& (boot_cpu_data.x86_clflush_size - 1));
 
@@ -1317,7 +1347,7 @@ i915_gem_shmem_pwrite(struct drm_device *dev,
 		ret = shmem_pwrite_fast(page, shmem_page_offset, page_length,
 					user_data, page_do_bit17_swizzling,
 					partial_cacheline_write,
-					needs_clflush_after);
+					needs_clflush & CLFLUSH_AFTER);
 		if (ret == 0)
 			goto next_page;
 
@@ -1326,7 +1356,7 @@ i915_gem_shmem_pwrite(struct drm_device *dev,
 		ret = shmem_pwrite_slow(page, shmem_page_offset, page_length,
 					user_data, page_do_bit17_swizzling,
 					partial_cacheline_write,
-					needs_clflush_after);
+					needs_clflush & CLFLUSH_AFTER);
 
 		mutex_lock(&dev->struct_mutex);
 
@@ -1340,7 +1370,7 @@ next_page:
 	}
 
 out:
-	i915_gem_object_unpin_pages(obj);
+	i915_gem_obj_finish_shmem_access(obj);
 
 	if (hit_slowpath) {
 		/*
@@ -1348,17 +1378,15 @@ out:
 		 * cachelines in-line while writing and the object moved
 		 * out of the cpu write domain while we've dropped the lock.
 		 */
-		if (!needs_clflush_after &&
+		if (!(needs_clflush & CLFLUSH_AFTER) &&
 		    obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
 			if (i915_gem_clflush_object(obj, obj->pin_display))
-				needs_clflush_after = true;
+				needs_clflush |= CLFLUSH_AFTER;
 		}
 	}
 
-	if (needs_clflush_after)
+	if (needs_clflush & CLFLUSH_AFTER)
 		i915_gem_chipset_flush(to_i915(dev));
-	else
-		obj->cache_dirty = true;
 
 	intel_fb_obj_flush(obj, false, ORIGIN_CPU);
 	return ret;
@@ -1437,10 +1465,8 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
 	if (ret == -EFAULT || ret == -ENOSPC) {
 		if (obj->phys_handle)
 			ret = i915_gem_phys_pwrite(obj, args, file);
-		else if (i915_gem_object_has_struct_page(obj))
-			ret = i915_gem_shmem_pwrite(dev, obj, args, file);
 		else
-			ret = -ENODEV;
+			ret = i915_gem_shmem_pwrite(dev, obj, args, file);
 	}
 
 	i915_gem_object_put(obj);
-- 
2.9.3

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

  parent reply	other threads:[~2016-08-18 16:17 UTC|newest]

Thread overview: 40+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2016-08-18 16:16 [CI 01/39] drm/i915: Unconditionally flush any chipset buffers before execbuf Chris Wilson
2016-08-18 16:16 ` [CI 02/39] agp/intel: Flush chipset writes after updating a single PTE Chris Wilson
2016-08-18 16:16 ` [CI 03/39] drm/i915: vfree() no longer ignores the low bits of the address Chris Wilson
2016-08-18 16:16 ` [CI 04/39] drm/i915: Use ORIGIN_CPU for fb invalidation from pwrite Chris Wilson
2016-08-18 16:16 ` [CI 05/39] drm/i915: Mark up the GTT flush following WC writes as ORIGIN_CPU Chris Wilson
2016-08-18 16:16 ` [CI 06/39] drm/i915: Fallback to single page pwrite/pread if unable to release fence Chris Wilson
2016-08-18 16:16 ` [CI 07/39] drm/i915: Cache kmap between relocations Chris Wilson
2016-08-18 16:16 ` Chris Wilson [this message]
2016-08-18 16:16 ` [CI 09/39] drm/i915: Before accessing an object via the cpu, flush GTT writes Chris Wilson
2016-08-18 16:16 ` [CI 10/39] drm/i915: Wait for writes through the GTT to land before reading back Chris Wilson
2016-08-18 16:16 ` [CI 11/39] drm/i915: Pin the pages first in shmem prepare read/write Chris Wilson
2016-08-18 16:16 ` [CI 12/39] drm/i915: Tidy up flush cpu/gtt write domains Chris Wilson
2016-08-18 16:16 ` [CI 13/39] drm/i915: Refactor execbuffer relocation writing Chris Wilson
2016-08-18 16:16 ` [CI 14/39] drm/i915: Fallback to single page GTT mmappings for relocations Chris Wilson
2016-08-18 16:16 ` [CI 15/39] drm/i915: Disallow direct CPU access to stolen pages " Chris Wilson
2016-08-18 16:16 ` [CI 16/39] drm/i915: Move map-and-fenceable tracking to the VMA Chris Wilson
2016-08-18 16:16 ` [CI 17/39] drm/i915: Allow ringbuffers to be bound anywhere Chris Wilson
2016-08-18 16:16 ` [CI 18/39] drm/i915: Allocate rings from stolen Chris Wilson
2016-08-18 16:16 ` [CI 19/39] drm/i915/userptr: Make gup errors stickier Chris Wilson
2016-08-18 16:16 ` [CI 20/39] drm/i915: Rename fence.lru_list to link Chris Wilson
2016-08-18 16:17 ` [CI 21/39] drm/i915: Move fence tracking from object to vma Chris Wilson
2016-08-18 16:17 ` [CI 22/39] drm/i915: Choose partial chunksize based on tile row size Chris Wilson
2016-08-18 16:17 ` [CI 23/39] drm/i915: Fix partial GGTT faulting Chris Wilson
2016-08-18 16:17 ` [CI 24/39] drm/i915: Convert partial ggtt vma to full ggtt if it spans the entire object Chris Wilson
2016-08-18 16:17 ` [CI 25/39] drm/i915: Drop ORIGIN_GTT for untracked GTT writes Chris Wilson
2016-08-18 16:17 ` [CI 26/39] drm/i915: Choose not to evict faultable objects from the GGTT Chris Wilson
2016-08-18 16:17 ` [CI 27/39] drm/i915: Fallback to using unmappable memory for scanout Chris Wilson
2016-08-18 16:17 ` [CI 28/39] drm/i915: Track display alignment on VMA Chris Wilson
2016-08-18 16:17 ` [CI 29/39] drm/i915: Bump the inactive tracking for all VMA accessed Chris Wilson
2016-08-18 16:17 ` [CI 30/39] drm/i915: Stop discarding GTT cache-domain on unbind vma Chris Wilson
2016-08-18 16:17 ` [CI 31/39] drm/i915/cmdparser: Make initialisation failure non-fatal Chris Wilson
2016-08-18 16:17 ` [CI 32/39] drm/i915/cmdparser: Add the TIMESTAMP register for the other engines Chris Wilson
2016-08-18 16:17 ` [CI 33/39] drm/i915/cmdparser: Use cached vmappings Chris Wilson
2016-08-18 16:17 ` [CI 34/39] drm/i915/cmdparser: Only cache the dst vmap Chris Wilson
2016-08-18 16:17 ` [CI 35/39] drm/i915/cmdparser: Improve hash function Chris Wilson
2016-08-18 16:17 ` [CI 36/39] drm/i915/cmdparser: Compare against the previous command descriptor Chris Wilson
2016-08-18 16:17 ` [CI 37/39] drm/i915/cmdparser: Check for SKIP descriptors first Chris Wilson
2016-08-18 16:17 ` [CI 38/39] drm/i915/cmdparser: Use binary search for faster register lookup Chris Wilson
2016-08-18 16:17 ` [CI 39/39] drm/i915/cmdparser: Accelerate copies from WC memory Chris Wilson
2016-08-18 16:41 ` ✗ Ro.CI.BAT: failure for series starting with [CI,01/39] drm/i915: Unconditionally flush any chipset buffers before execbuf Patchwork

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20160818161718.27187-8-chris@chris-wilson.co.uk \
    --to=chris@chris-wilson.co.uk \
    --cc=intel-gfx@lists.freedesktop.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.