All of lore.kernel.org
 help / color / mirror / Atom feed
From: ankitprasad.r.sharma@intel.com
To: intel-gfx@lists.freedesktop.org
Cc: Ankitprasad Sharma <ankitprasad.r.sharma@intel.com>,
	akash.goel@intel.com, shashidhar.hiremath@intel.com
Subject: [PATCH 08/10] drm/i915: Support for pread/pwrite from/to non shmem backed objects
Date: Mon, 29 Feb 2016 13:09:37 +0530	[thread overview]
Message-ID: <1456731579-25584-9-git-send-email-ankitprasad.r.sharma@intel.com> (raw)
In-Reply-To: <1456731579-25584-1-git-send-email-ankitprasad.r.sharma@intel.com>

From: Ankitprasad Sharma <ankitprasad.r.sharma@intel.com>

This patch adds support for extending the pread/pwrite functionality
for objects not backed by shmem. The access will be made through
gtt interface. This will cover objects backed by stolen memory as well
as other non-shmem backed objects.

v2: Drop locks around slow_user_access, prefault the pages before
access (Chris)

v3: Rebased to the latest drm-intel-nightly (Ankit)

v4: Moved page base & offset calculations outside the copy loop,
corrected data types for size and offset variables, corrected if-else
braces format (Tvrtko/kerneldocs)

v5: Enabled pread/pwrite for all non-shmem backed objects including
without tiling restrictions (Ankit)

v6: Using pwrite_fast for non-shmem backed objects as well (Chris)

v7: Updated commit message, Renamed i915_gem_gtt_read to i915_gem_gtt_copy,
added pwrite slow path for non-shmem backed objects (Chris/Tvrtko)

v8: Updated v7 commit message, mutex unlock around pwrite slow path for
non-shmem backed objects (Tvrtko)

v9: Corrected check during pread_ioctl, to avoid shmem_pread being
called for non-shmem backed objects (Tvrtko)

v10: Moved the write_domain check to needs_clflush and tiling mode check
to pwrite_fast (Chris)

v11: Use pwrite_fast fallback for all objects (shmem and non-shmem backed),
call fast_user_write regardless of pagefault in previous iteration

v12: Use page-by-page copy for slow user access too (Chris)

v13: Handled EFAULT, Avoid use of WARN_ON, put_fence only if whole obj
pinned (Chris)

v14: Corrected datatypes/initializations (Tvrtko)

Testcase: igt/gem_stolen, igt/gem_pread, igt/gem_pwrite

Signed-off-by: Ankitprasad Sharma <ankitprasad.r.sharma@intel.com>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
---
 drivers/gpu/drm/i915/i915_gem.c | 221 ++++++++++++++++++++++++++++++++++------
 1 file changed, 189 insertions(+), 32 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index df0a19f..8cbe426 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -55,6 +55,9 @@ static bool cpu_cache_is_coherent(struct drm_device *dev,
 
 static bool cpu_write_needs_clflush(struct drm_i915_gem_object *obj)
 {
+	if (obj->base.write_domain == I915_GEM_DOMAIN_CPU)
+		return false;
+
 	if (!cpu_cache_is_coherent(obj->base.dev, obj->cache_level))
 		return true;
 
@@ -646,6 +649,141 @@ shmem_pread_slow(struct page *page, int shmem_page_offset, int page_length,
 	return ret ? - EFAULT : 0;
 }
 
+static inline unsigned long
+slow_user_access(struct io_mapping *mapping,
+		 uint64_t page_base, int page_offset,
+		 char __user *user_data,
+		 unsigned long length, bool pwrite)
+{
+	void __iomem *ioaddr;
+	void *vaddr;
+	uint64_t unwritten;
+
+	ioaddr = io_mapping_map_wc(mapping, page_base);
+	/* We can use the cpu mem copy function because this is X86. */
+	vaddr = (void __force *)ioaddr + page_offset;
+	if (pwrite)
+		unwritten = __copy_from_user(vaddr, user_data, length);
+	else
+		unwritten = __copy_to_user(user_data, vaddr, length);
+
+	io_mapping_unmap(ioaddr);
+	return unwritten;
+}
+
+static int
+i915_gem_gtt_pread(struct drm_device *dev,
+		   struct drm_i915_gem_object *obj, uint64_t size,
+		   uint64_t data_offset, uint64_t data_ptr)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_mm_node node;
+	char __user *user_data;
+	uint64_t remain;
+	uint64_t offset;
+	int ret;
+
+	ret = i915_gem_obj_ggtt_pin(obj, 0, PIN_MAPPABLE);
+	if (ret) {
+		ret = insert_mappable_node(dev_priv, &node, PAGE_SIZE);
+		if (ret)
+			goto out;
+
+		ret = i915_gem_object_get_pages(obj);
+		if (ret) {
+			remove_mappable_node(&node);
+			goto out;
+		}
+
+		i915_gem_object_pin_pages(obj);
+	} else {
+		node.start = i915_gem_obj_ggtt_offset(obj);
+		node.allocated = false;
+		ret = i915_gem_object_put_fence(obj);
+		if (ret)
+			goto out_unpin;
+	}
+
+	ret = i915_gem_object_set_to_gtt_domain(obj, false);
+	if (ret)
+		goto out_unpin;
+
+	user_data = to_user_ptr(data_ptr);
+	remain = size;
+	offset = data_offset;
+
+	mutex_unlock(&dev->struct_mutex);
+	if (likely(!i915.prefault_disable)) {
+		ret = fault_in_multipages_writeable(user_data, remain);
+		if (ret) {
+			mutex_lock(&dev->struct_mutex);
+			goto out_unpin;
+		}
+	}
+
+	while (remain > 0) {
+		/* Operation in this page
+		 *
+		 * page_base = page offset within aperture
+		 * page_offset = offset within page
+		 * page_length = bytes to copy for this page
+		 */
+		u32 page_base = node.start;
+		unsigned page_offset = offset_in_page(offset);
+		unsigned page_length = PAGE_SIZE - page_offset;
+		page_length = remain < page_length ? remain : page_length;
+		if (node.allocated) {
+			wmb();
+			dev_priv->gtt.base.insert_page(&dev_priv->gtt.base,
+						       i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT),
+						       node.start,
+						       I915_CACHE_NONE, 0);
+			wmb();
+		} else {
+			page_base += offset & PAGE_MASK;
+		}
+		/* This is a slow read/write as it tries to read from
+		 * and write to user memory which may result into page
+		 * faults, and so we cannot perform this under struct_mutex.
+		 */
+		if (slow_user_access(dev_priv->gtt.mappable, page_base,
+				     page_offset, user_data,
+				     page_length, false)) {
+			ret = -EFAULT;
+			break;
+		}
+
+		remain -= page_length;
+		user_data += page_length;
+		offset += page_length;
+	}
+
+	mutex_lock(&dev->struct_mutex);
+	if (ret == 0 && (obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0) {
+		/* The user has modified the object whilst we tried
+		 * reading from it, and we now have no idea what domain
+		 * the pages should be in. As we have just been touching
+		 * them directly, flush everything back to the GTT
+		 * domain.
+		 */
+		ret = i915_gem_object_set_to_gtt_domain(obj, false);
+	}
+
+out_unpin:
+	if (node.allocated) {
+		wmb();
+		dev_priv->gtt.base.clear_range(&dev_priv->gtt.base,
+					       node.start, node.size,
+					       true);
+		i915_gem_object_unpin_pages(obj);
+		remove_mappable_node(&node);
+	} else {
+		i915_gem_object_ggtt_unpin(obj);
+	}
+out:
+	return ret;
+}
+
 static int
 i915_gem_shmem_pread(struct drm_device *dev,
 		     struct drm_i915_gem_object *obj,
@@ -661,6 +799,9 @@ i915_gem_shmem_pread(struct drm_device *dev,
 	int needs_clflush = 0;
 	struct sg_page_iter sg_iter;
 
+	if (!obj->base.filp)
+		return -ENODEV;
+
 	user_data = to_user_ptr(args->data_ptr);
 	remain = args->size;
 
@@ -769,18 +910,15 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data,
 		goto out;
 	}
 
-	/* prime objects have no backing filp to GEM pread/pwrite
-	 * pages from.
-	 */
-	if (!obj->base.filp) {
-		ret = -EINVAL;
-		goto out;
-	}
-
 	trace_i915_gem_object_pread(obj, args->offset, args->size);
 
 	ret = i915_gem_shmem_pread(dev, obj, args, file);
 
+	/* pread for non shmem backed objects */
+	if (ret == -EFAULT || ret == -ENODEV)
+		ret = i915_gem_gtt_pread(dev, obj, args->size,
+					args->offset, args->data_ptr);
+
 out:
 	drm_gem_object_unreference(&obj->base);
 unlock:
@@ -821,10 +959,15 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_private *i915,
 			 struct drm_i915_gem_pwrite *args,
 			 struct drm_file *file)
 {
+	struct drm_device *dev = obj->base.dev;
 	struct drm_mm_node node;
 	uint64_t remain, offset;
 	char __user *user_data;
 	int ret;
+	bool hit_slow_path = false;
+
+	if (obj->tiling_mode != I915_TILING_NONE)
+		return -EFAULT;
 
 	ret = i915_gem_obj_ggtt_pin(obj, 0, PIN_MAPPABLE | PIN_NONBLOCK);
 	if (ret) {
@@ -842,16 +985,15 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_private *i915,
 	} else {
 		node.start = i915_gem_obj_ggtt_offset(obj);
 		node.allocated = false;
+		ret = i915_gem_object_put_fence(obj);
+		if (ret)
+			goto out_unpin;
 	}
 
 	ret = i915_gem_object_set_to_gtt_domain(obj, true);
 	if (ret)
 		goto out_unpin;
 
-	ret = i915_gem_object_put_fence(obj);
-	if (ret)
-		goto out_unpin;
-
 	intel_fb_obj_invalidate(obj, ORIGIN_GTT);
 	obj->dirty = true;
 
@@ -870,24 +1012,36 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_private *i915,
 		unsigned page_length = PAGE_SIZE - page_offset;
 		page_length = remain < page_length ? remain : page_length;
 		if (node.allocated) {
-			wmb();
+			wmb(); /* flush the write before we modify the GGTT */
 			i915->gtt.base.insert_page(&i915->gtt.base,
 						   i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT),
 						   node.start,
 						   I915_CACHE_NONE,
 						   0);
-			wmb();
+			wmb(); /* flush modifications to the GGTT (insert_page) */
 		} else {
 			page_base += offset & PAGE_MASK;
 		}
 		/* If we get a fault while copying data, then (presumably) our
 		 * source page isn't available.  Return the error and we'll
 		 * retry in the slow path.
+		 * If the object is non-shmem backed, we retry again with the
+		 * path that handles page fault.
 		 */
 		if (fast_user_write(i915->gtt.mappable, page_base,
 				    page_offset, user_data, page_length)) {
-			ret = -EFAULT;
-			goto out_flush;
+			hit_slow_path = true;
+			mutex_unlock(&dev->struct_mutex);
+			if (slow_user_access(i915->gtt.mappable,
+					     page_base,
+					     page_offset, user_data,
+					     page_length, true)) {
+				ret = -EFAULT;
+				mutex_lock(&dev->struct_mutex);
+				goto out_flush;
+			}
+
+			mutex_lock(&dev->struct_mutex);
 		}
 
 		remain -= page_length;
@@ -896,6 +1050,19 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_private *i915,
 	}
 
 out_flush:
+	if (hit_slow_path) {
+		if (ret == 0 &&
+		    (obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0) {
+			/* The user has modified the object whilst we tried
+			 * reading from it, and we now have no idea what domain
+			 * the pages should be in. As we have just been touching
+			 * them directly, flush everything back to the GTT
+			 * domain.
+			 */
+			ret = i915_gem_object_set_to_gtt_domain(obj, false);
+		}
+	}
+
 	intel_fb_obj_flush(obj, false, ORIGIN_GTT);
 out_unpin:
 	if (node.allocated) {
@@ -1152,14 +1319,6 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
 		goto out;
 	}
 
-	/* prime objects have no backing filp to GEM pread/pwrite
-	 * pages from.
-	 */
-	if (!obj->base.filp) {
-		ret = -EINVAL;
-		goto out;
-	}
-
 	trace_i915_gem_object_pwrite(obj, args->offset, args->size);
 
 	ret = -EFAULT;
@@ -1169,20 +1328,20 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
 	 * pread/pwrite currently are reading and writing from the CPU
 	 * perspective, requiring manual detiling by the client.
 	 */
-	if (obj->tiling_mode == I915_TILING_NONE &&
-	    obj->base.write_domain != I915_GEM_DOMAIN_CPU &&
-	    cpu_write_needs_clflush(obj)) {
+	if (!obj->base.filp || cpu_write_needs_clflush(obj)) {
 		ret = i915_gem_gtt_pwrite_fast(dev_priv, obj, args, file);
 		/* Note that the gtt paths might fail with non-page-backed user
 		 * pointers (e.g. gtt mappings when moving data between
 		 * textures). Fallback to the shmem path in that case. */
 	}
 
-	if (ret == -EFAULT || ret == -ENOSPC) {
+	if (ret == -EFAULT) {
 		if (obj->phys_handle)
 			ret = i915_gem_phys_pwrite(obj, args, file);
-		else
+		else if (obj->base.filp)
 			ret = i915_gem_shmem_pwrite(dev, obj, args, file);
+		else
+			ret = -ENODEV;
 	}
 
 out:
@@ -3976,9 +4135,7 @@ out:
 	 * object is now coherent at its new cache level (with respect
 	 * to the access domain).
 	 */
-	if (obj->cache_dirty &&
-	    obj->base.write_domain != I915_GEM_DOMAIN_CPU &&
-	    cpu_write_needs_clflush(obj)) {
+	if (obj->cache_dirty && cpu_write_needs_clflush(obj)) {
 		if (i915_gem_clflush_object(obj, true))
 			i915_gem_chipset_flush(obj->base.dev);
 	}
-- 
1.9.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

  parent reply	other threads:[~2016-02-29  8:03 UTC|newest]

Thread overview: 21+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2016-02-29  7:39 [PATCH v1 0/10] Support for creating/using Stolen memory backed objects ankitprasad.r.sharma
2016-02-29  7:39 ` [PATCH 01/10] drm/i915: Add support for mapping an object page by page ankitprasad.r.sharma
2016-02-29  7:39 ` [PATCH 02/10] drm/i915: Introduce i915_gem_object_get_dma_address() ankitprasad.r.sharma
2016-02-29  7:39 ` [PATCH 03/10] drm/i915: Use insert_page for pwrite_fast ankitprasad.r.sharma
2016-02-29  7:39 ` [PATCH 04/10] drm/i915: Clearing buffer objects via CPU/GTT ankitprasad.r.sharma
2016-02-29  7:39 ` [PATCH 05/10] drm/i915: Support for creating Stolen memory backed objects ankitprasad.r.sharma
2016-02-29  7:39 ` [PATCH 06/10] drm/i915: Propagating correct error codes to the userspace ankitprasad.r.sharma
2016-02-29  7:39 ` [PATCH 07/10] drm/i915: Add support for stealing purgable stolen pages ankitprasad.r.sharma
2016-02-29  7:39 ` ankitprasad.r.sharma [this message]
2016-02-29  7:39 ` [PATCH 09/10] drm/i915: Migrate stolen objects before hibernation ankitprasad.r.sharma
2016-02-29  7:39 ` [PATCH 10/10] drm/i915: Disable use of stolen area by User when Intel RST is present ankitprasad.r.sharma
2016-02-29  8:27 ` ✗ Fi.CI.BAT: failure for Support for creating/using Stolen memory backed objects (rev11) Patchwork
  -- strict thread matches above, loose matches on Subject: below --
2016-03-18  6:22 [PATCH v19 0/10] Support for creating/using Stolen memory backed objects ankitprasad.r.sharma
2016-03-18  6:22 ` [PATCH 08/10] drm/i915: Support for pread/pwrite from/to non shmem " ankitprasad.r.sharma
2016-02-19  6:51 [PATCH v17 0/10] Support for creating/using Stolen memory " ankitprasad.r.sharma
2016-02-19  6:51 ` [PATCH 08/10] drm/i915: Support for pread/pwrite from/to non shmem " ankitprasad.r.sharma
2016-02-19 10:55   ` Tvrtko Ursulin
2016-02-04  9:30 [PATCH v16 0/10] Support for creating/using Stolen memory " ankitprasad.r.sharma
2016-02-04  9:30 ` [PATCH 08/10] drm/i915: Support for pread/pwrite from/to non shmem " ankitprasad.r.sharma
2016-02-11 11:40   ` Tvrtko Ursulin
2016-02-18  8:26     ` Ankitprasad Sharma
2016-02-19  5:54     ` Ankitprasad Sharma
2016-01-25 19:43 [PATCH v15 0/10] Support for creating/using Stolen memory " ankitprasad.r.sharma
2016-01-25 19:43 ` [PATCH 08/10] drm/i915: Support for pread/pwrite from/to non shmem " ankitprasad.r.sharma
2016-01-26 10:49   ` Chris Wilson

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1456731579-25584-9-git-send-email-ankitprasad.r.sharma@intel.com \
    --to=ankitprasad.r.sharma@intel.com \
    --cc=akash.goel@intel.com \
    --cc=intel-gfx@lists.freedesktop.org \
    --cc=shashidhar.hiremath@intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.