All of lore.kernel.org
 help / color / mirror / Atom feed
From: Chris Wilson <chris@chris-wilson.co.uk>
To: intel-gfx@lists.freedesktop.org
Subject: [PATCH 10/16] drm/i915: Handle stolen objects for pread
Date: Thu, 15 Nov 2012 11:32:25 +0000	[thread overview]
Message-ID: <1352979151-9934-11-git-send-email-chris@chris-wilson.co.uk> (raw)
In-Reply-To: <1352979151-9934-1-git-send-email-chris@chris-wilson.co.uk>

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/i915_gem.c |  175 ++++++++++++++++++++++++++-------------
 1 file changed, 116 insertions(+), 59 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 9e66e29..db87ce4 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -323,24 +323,21 @@ __copy_from_user_swizzled(char *gpu_vaddr, int gpu_offset,
  * Flushes invalid cachelines before reading the target if
  * needs_clflush is set. */
 static int
-shmem_pread_fast(struct page *page, int shmem_page_offset, int page_length,
+shmem_pread_fast(char *vaddr, int shmem_page_offset, int page_length,
 		 char __user *user_data,
 		 bool page_do_bit17_swizzling, bool needs_clflush)
 {
-	char *vaddr;
 	int ret;
 
 	if (unlikely(page_do_bit17_swizzling))
 		return -EINVAL;
 
-	vaddr = kmap_atomic(page);
 	if (needs_clflush)
 		drm_clflush_virt_range(vaddr + shmem_page_offset,
 				       page_length);
 	ret = __copy_to_user_inatomic(user_data,
 				      vaddr + shmem_page_offset,
 				      page_length);
-	kunmap_atomic(vaddr);
 
 	return ret ? -EFAULT : 0;
 }
@@ -370,14 +367,12 @@ shmem_clflush_swizzled_range(char *addr, unsigned long length,
 /* Only difference to the fast-path function is that this can handle bit17
  * and uses non-atomic copy and kmap functions. */
 static int
-shmem_pread_slow(struct page *page, int shmem_page_offset, int page_length,
+shmem_pread_slow(char *vaddr, int shmem_page_offset, int page_length,
 		 char __user *user_data,
 		 bool page_do_bit17_swizzling, bool needs_clflush)
 {
-	char *vaddr;
 	int ret;
 
-	vaddr = kmap(page);
 	if (needs_clflush)
 		shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
 					     page_length,
@@ -391,7 +386,6 @@ shmem_pread_slow(struct page *page, int shmem_page_offset, int page_length,
 		ret = __copy_to_user(user_data,
 				     vaddr + shmem_page_offset,
 				     page_length);
-	kunmap(page);
 
 	return ret ? - EFAULT : 0;
 }
@@ -402,6 +396,7 @@ i915_gem_shmem_pread(struct drm_device *dev,
 		     struct drm_i915_gem_pread *args,
 		     struct drm_file *file)
 {
+	struct drm_i915_private *dev_priv = dev->dev_private;
 	char __user *user_data;
 	ssize_t remain;
 	loff_t offset;
@@ -432,76 +427,138 @@ i915_gem_shmem_pread(struct drm_device *dev,
 		}
 	}
 
-	ret = i915_gem_object_get_pages(obj);
-	if (ret)
-		return ret;
+	offset = args->offset;
 
-	i915_gem_object_pin_pages(obj);
+	if (obj->stolen) {
+		char *vaddr;
 
-	offset = args->offset;
+		vaddr = (char *)dev_priv->mm.stolen_base;
+		vaddr += obj->stolen->start + (offset & PAGE_MASK);
 
-	for_each_sg(obj->pages->sgl, sg, obj->pages->nents, i) {
-		struct page *page;
+		shmem_page_offset = offset_in_page(offset);
+		while (remain > 0) {
+			/* Operation in this page
+			 *
+			 * shmem_page_offset = offset within page in shmem file
+			 * page_length = bytes to copy for this page
+			 */
+			page_length = remain;
+			if ((shmem_page_offset + page_length) > PAGE_SIZE)
+				page_length = PAGE_SIZE - shmem_page_offset;
 
-		if (i < offset >> PAGE_SHIFT)
-			continue;
+			page_do_bit17_swizzling = obj_do_bit17_swizzling &&
+				((uintptr_t)vaddr & (1 << 17)) != 0;
 
-		if (remain <= 0)
-			break;
+			ret = shmem_pread_fast(vaddr, shmem_page_offset, page_length,
+					       user_data, page_do_bit17_swizzling,
+					       needs_clflush);
+			if (ret == 0)
+				goto next_stolen;
 
-		/* Operation in this page
-		 *
-		 * shmem_page_offset = offset within page in shmem file
-		 * page_length = bytes to copy for this page
-		 */
-		shmem_page_offset = offset_in_page(offset);
-		page_length = remain;
-		if ((shmem_page_offset + page_length) > PAGE_SIZE)
-			page_length = PAGE_SIZE - shmem_page_offset;
+			hit_slowpath = 1;
+			mutex_unlock(&dev->struct_mutex);
 
-		page = sg_page(sg);
-		page_do_bit17_swizzling = obj_do_bit17_swizzling &&
-			(page_to_phys(page) & (1 << 17)) != 0;
+			if (!prefaulted) {
+				ret = fault_in_multipages_writeable(user_data, remain);
+				/* Userspace is tricking us, but we've already clobbered
+				 * its pages with the prefault and promised to write the
+				 * data up to the first fault. Hence ignore any errors
+				 * and just continue. */
+				(void)ret;
+				prefaulted = 1;
+			}
 
-		ret = shmem_pread_fast(page, shmem_page_offset, page_length,
-				       user_data, page_do_bit17_swizzling,
-				       needs_clflush);
-		if (ret == 0)
-			goto next_page;
+			ret = shmem_pread_slow(vaddr, shmem_page_offset, page_length,
+					       user_data, page_do_bit17_swizzling,
+					       needs_clflush);
 
-		hit_slowpath = 1;
-		mutex_unlock(&dev->struct_mutex);
+			mutex_lock(&dev->struct_mutex);
+			if (ret)
+				goto out;
 
-		if (!prefaulted) {
-			ret = fault_in_multipages_writeable(user_data, remain);
-			/* Userspace is tricking us, but we've already clobbered
-			 * its pages with the prefault and promised to write the
-			 * data up to the first fault. Hence ignore any errors
-			 * and just continue. */
-			(void)ret;
-			prefaulted = 1;
+next_stolen:
+			remain -= page_length;
+			user_data += page_length;
+			vaddr += page_length;
+			shmem_page_offset = 0;
 		}
+	} else {
+		ret = i915_gem_object_get_pages(obj);
+		if (ret)
+			return ret;
 
-		ret = shmem_pread_slow(page, shmem_page_offset, page_length,
-				       user_data, page_do_bit17_swizzling,
-				       needs_clflush);
+		i915_gem_object_pin_pages(obj);
 
-		mutex_lock(&dev->struct_mutex);
+		for_each_sg(obj->pages->sgl, sg, obj->pages->nents, i) {
+			char *vaddr;
+			struct page *page;
 
-next_page:
-		mark_page_accessed(page);
+			if (i < offset >> PAGE_SHIFT)
+				continue;
 
-		if (ret)
-			goto out;
+			if (remain <= 0)
+				break;
 
-		remain -= page_length;
-		user_data += page_length;
-		offset += page_length;
+			/* Operation in this page
+			 *
+			 * shmem_page_offset = offset within page in shmem file
+			 * page_length = bytes to copy for this page
+			 */
+			shmem_page_offset = offset_in_page(offset);
+			page_length = remain;
+			if ((shmem_page_offset + page_length) > PAGE_SIZE)
+				page_length = PAGE_SIZE - shmem_page_offset;
+
+			page = sg_page(sg);
+			mark_page_accessed(page);
+
+			page_do_bit17_swizzling = obj_do_bit17_swizzling &&
+				(page_to_phys(page) & (1 << 17)) != 0;
+
+			vaddr = kmap_atomic(page);
+			ret = shmem_pread_fast(vaddr, shmem_page_offset, page_length,
+					       user_data, page_do_bit17_swizzling,
+					       needs_clflush);
+			kunmap_atomic(vaddr);
+
+			if (ret == 0)
+				goto next_page;
+
+			hit_slowpath = 1;
+			mutex_unlock(&dev->struct_mutex);
+
+			if (!prefaulted) {
+				ret = fault_in_multipages_writeable(user_data, remain);
+				/* Userspace is tricking us, but we've already clobbered
+				 * its pages with the prefault and promised to write the
+				 * data up to the first fault. Hence ignore any errors
+				 * and just continue. */
+				(void)ret;
+				prefaulted = 1;
+			}
+
+			vaddr = kmap(page);
+			ret = shmem_pread_slow(vaddr, shmem_page_offset, page_length,
+					       user_data, page_do_bit17_swizzling,
+					       needs_clflush);
+			kunmap(page);
+
+			mutex_lock(&dev->struct_mutex);
+
+			if (ret)
+				goto out_unpin;
+
+next_page:
+			remain -= page_length;
+			user_data += page_length;
+			offset += page_length;
+		}
+out_unpin:
+		i915_gem_object_unpin_pages(obj);
 	}
 
-out:
-	i915_gem_object_unpin_pages(obj);
 
+out:
 	if (hit_slowpath) {
 		/* Fixup: Kill any reinstated backing storage pages */
 		if (obj->madv == __I915_MADV_PURGED)
-- 
1.7.10.4

  parent reply	other threads:[~2012-11-15 11:33 UTC|newest]

Thread overview: 26+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2012-11-15 11:32 Hopefully the last round of stolen work Chris Wilson
2012-11-15 11:32 ` [PATCH 01/16] drm: Introduce drm_mm_create_block() Chris Wilson
2012-11-15 11:32 ` [PATCH 02/16] drm: Introduce an iterator over holes in the drm_mm range manager Chris Wilson
2012-11-15 11:32 ` [PATCH 03/16] drm/i915: Fix detection of base of stolen memory Chris Wilson
2012-11-15 11:32 ` [PATCH 04/16] drm/i915: Avoid clearing preallocated regions from the GTT Chris Wilson
2012-11-15 11:32 ` [PATCH 05/16] drm/i915: Delay allocation of stolen space for FBC Chris Wilson
2012-11-15 11:32 ` [PATCH 06/16] drm/i915: Allow objects to be created with no backing pages, but stolen space Chris Wilson
2012-11-15 11:32 ` [PATCH 07/16] drm/i915: Differentiate between prime and stolen objects Chris Wilson
2012-11-15 11:32 ` [PATCH 08/16] drm/i915: Support readback of stolen objects upon error Chris Wilson
2012-11-15 11:32 ` [PATCH 09/16] drm/i915: Handle stolen objects in pwrite Chris Wilson
2012-11-15 11:32 ` Chris Wilson [this message]
2012-11-30 22:33   ` [PATCH 10/16] drm/i915: Handle stolen objects for pread Daniel Vetter
2012-11-30 23:46     ` Chris Wilson
2012-12-01  0:03       ` Daniel Vetter
2012-12-01  0:14         ` Chris Wilson
2012-11-15 11:32 ` [PATCH 11/16] drm/i915: Introduce i915_gem_object_create_stolen() Chris Wilson
2012-11-15 11:32 ` [PATCH 12/16] drm/i915: Allocate fbcon from stolen memory Chris Wilson
2012-11-15 11:32 ` [PATCH 13/16] drm/i915: Allocate ringbuffers " Chris Wilson
2012-11-15 11:32 ` [PATCH 14/16] drm/i915: Allocate overlay registers " Chris Wilson
2012-11-15 11:32 ` [PATCH 15/16] drm/i915: Use a slab for object allocation Chris Wilson
2012-11-30 22:45   ` Daniel Vetter
2012-11-15 11:32 ` [PATCH 16/16] drm/i915: Introduce mapping of user pages into video memory (userptr) ioctl Chris Wilson
2012-11-30 23:06   ` Daniel Vetter
2012-11-30 23:57     ` Chris Wilson
2012-12-01 11:25     ` Chris Wilson
2012-12-01 11:35       ` Daniel Vetter

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1352979151-9934-11-git-send-email-chris@chris-wilson.co.uk \
    --to=chris@chris-wilson.co.uk \
    --cc=intel-gfx@lists.freedesktop.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.