intel-gfx.lists.freedesktop.org archive mirror
 help / color / mirror / Atom feed
From: Chris Wilson <chris@chris-wilson.co.uk>
To: intel-gfx@lists.freedesktop.org
Subject: [PATCH 1/2] drm/i915: Use non-atomic kmap for slow copy paths
Date: Thu, 27 May 2010 14:15:34 +0100	[thread overview]
Message-ID: <1274966135-31667-1-git-send-email-chris@chris-wilson.co.uk> (raw)

As we do not have a requirement to be atomic and avoid sleeping whilst
performing the slow copy for shmem based pread and pwrite, we can use
kmap instead, thus simplifying the code.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/i915_gem.c |   82 ++++++++++++++------------------------
 1 files changed, 30 insertions(+), 52 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 6694b44..6769fab 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -167,7 +167,7 @@ static int i915_gem_object_needs_bit17_swizzle(struct drm_gem_object *obj)
 		obj_priv->tiling_mode != I915_TILING_NONE;
 }
 
-static inline int
+static inline void
 slow_shmem_copy(struct page *dst_page,
 		int dst_offset,
 		struct page *src_page,
@@ -176,25 +176,16 @@ slow_shmem_copy(struct page *dst_page,
 {
 	char *dst_vaddr, *src_vaddr;
 
-	dst_vaddr = kmap_atomic(dst_page, KM_USER0);
-	if (dst_vaddr == NULL)
-		return -ENOMEM;
-
-	src_vaddr = kmap_atomic(src_page, KM_USER1);
-	if (src_vaddr == NULL) {
-		kunmap_atomic(dst_vaddr, KM_USER0);
-		return -ENOMEM;
-	}
+	dst_vaddr = kmap(dst_page);
+	src_vaddr = kmap(src_page);
 
 	memcpy(dst_vaddr + dst_offset, src_vaddr + src_offset, length);
 
-	kunmap_atomic(src_vaddr, KM_USER1);
-	kunmap_atomic(dst_vaddr, KM_USER0);
-
-	return 0;
+	kunmap(src_page);
+	kunmap(dst_page);
 }
 
-static inline int
+static inline void
 slow_shmem_bit17_copy(struct page *gpu_page,
 		      int gpu_offset,
 		      struct page *cpu_page,
@@ -214,15 +205,8 @@ slow_shmem_bit17_copy(struct page *gpu_page,
 					       cpu_page, cpu_offset, length);
 	}
 
-	gpu_vaddr = kmap_atomic(gpu_page, KM_USER0);
-	if (gpu_vaddr == NULL)
-		return -ENOMEM;
-
-	cpu_vaddr = kmap_atomic(cpu_page, KM_USER1);
-	if (cpu_vaddr == NULL) {
-		kunmap_atomic(gpu_vaddr, KM_USER0);
-		return -ENOMEM;
-	}
+	gpu_vaddr = kmap(gpu_page);
+	cpu_vaddr = kmap(cpu_page);
 
 	/* Copy the data, XORing A6 with A17 (1). The user already knows he's
 	 * XORing with the other bits (A9 for Y, A9 and A10 for X)
@@ -246,10 +230,8 @@ slow_shmem_bit17_copy(struct page *gpu_page,
 		length -= this_length;
 	}
 
-	kunmap_atomic(cpu_vaddr, KM_USER1);
-	kunmap_atomic(gpu_vaddr, KM_USER0);
-
-	return 0;
+	kunmap(cpu_page);
+	kunmap(gpu_page);
 }
 
 /**
@@ -425,21 +407,19 @@ i915_gem_shmem_pread_slow(struct drm_device *dev, struct drm_gem_object *obj,
 			page_length = PAGE_SIZE - data_page_offset;
 
 		if (do_bit17_swizzling) {
-			ret = slow_shmem_bit17_copy(obj_priv->pages[shmem_page_index],
-						    shmem_page_offset,
-						    user_pages[data_page_index],
-						    data_page_offset,
-						    page_length,
-						    1);
-		} else {
-			ret = slow_shmem_copy(user_pages[data_page_index],
-					      data_page_offset,
-					      obj_priv->pages[shmem_page_index],
+			slow_shmem_bit17_copy(obj_priv->pages[shmem_page_index],
 					      shmem_page_offset,
-					      page_length);
+					      user_pages[data_page_index],
+					      data_page_offset,
+					      page_length,
+					      1);
+		} else {
+			slow_shmem_copy(user_pages[data_page_index],
+					data_page_offset,
+					obj_priv->pages[shmem_page_index],
+					shmem_page_offset,
+					page_length);
 		}
-		if (ret)
-			goto fail_put_pages;
 
 		remain -= page_length;
 		data_ptr += page_length;
@@ -900,21 +880,19 @@ i915_gem_shmem_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
 			page_length = PAGE_SIZE - data_page_offset;
 
 		if (do_bit17_swizzling) {
-			ret = slow_shmem_bit17_copy(obj_priv->pages[shmem_page_index],
-						    shmem_page_offset,
-						    user_pages[data_page_index],
-						    data_page_offset,
-						    page_length,
-						    0);
-		} else {
-			ret = slow_shmem_copy(obj_priv->pages[shmem_page_index],
+			slow_shmem_bit17_copy(obj_priv->pages[shmem_page_index],
 					      shmem_page_offset,
 					      user_pages[data_page_index],
 					      data_page_offset,
-					      page_length);
+					      page_length,
+					      0);
+		} else {
+			slow_shmem_copy(obj_priv->pages[shmem_page_index],
+					shmem_page_offset,
+					user_pages[data_page_index],
+					data_page_offset,
+					page_length);
 		}
-		if (ret)
-			goto fail_put_pages;
 
 		remain -= page_length;
 		data_ptr += page_length;
-- 
1.7.1

             reply	other threads:[~2010-05-27 13:15 UTC|newest]

Thread overview: 5+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2010-05-27 13:15 Chris Wilson [this message]
2010-05-27 13:15 ` [PATCH 2/2] drm/i915: Fix up address spaces in slow_kernel_write() Chris Wilson
2010-05-28  9:28   ` Daniel Vetter
2010-05-28  9:36     ` Chris Wilson
2010-05-28 18:06 ` [PATCH 1/2] drm/i915: Use non-atomic kmap for slow copy paths Eric Anholt

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1274966135-31667-1-git-send-email-chris@chris-wilson.co.uk \
    --to=chris@chris-wilson.co.uk \
    --cc=intel-gfx@lists.freedesktop.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).