From mboxrd@z Thu Jan 1 00:00:00 1970 From: Chris Wilson Subject: [PATCH 10/16] drm/i915: Handle stolen objects for pread Date: Thu, 15 Nov 2012 11:32:25 +0000 Message-ID: <1352979151-9934-11-git-send-email-chris@chris-wilson.co.uk> References: <1352979151-9934-1-git-send-email-chris@chris-wilson.co.uk> Mime-Version: 1.0 Content-Type: text/plain; charset="us-ascii" Content-Transfer-Encoding: 7bit Return-path: Received: from relay.fireflyinternet.com (relay1.fireflyinternet.com [217.160.24.105]) by gabe.freedesktop.org (Postfix) with ESMTP id DE6529E951 for ; Thu, 15 Nov 2012 03:33:08 -0800 (PST) Received: from fireflyinternet.com (unverified [109.228.6.235]) by relay.fireflyinternet.com (FireflyRelay1) with ESMTP id 208343-2000100 for ; Thu, 15 Nov 2012 11:33:18 +0000 In-Reply-To: <1352979151-9934-1-git-send-email-chris@chris-wilson.co.uk> List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Sender: intel-gfx-bounces+gcfxdi-intel-gfx=m.gmane.org@lists.freedesktop.org Errors-To: intel-gfx-bounces+gcfxdi-intel-gfx=m.gmane.org@lists.freedesktop.org To: intel-gfx@lists.freedesktop.org List-Id: intel-gfx@lists.freedesktop.org Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/i915_gem.c | 175 ++++++++++++++++++++++++++------------- 1 file changed, 116 insertions(+), 59 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 9e66e29..db87ce4 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -323,24 +323,21 @@ __copy_from_user_swizzled(char *gpu_vaddr, int gpu_offset, * Flushes invalid cachelines before reading the target if * needs_clflush is set. */ static int -shmem_pread_fast(struct page *page, int shmem_page_offset, int page_length, +shmem_pread_fast(char *vaddr, int shmem_page_offset, int page_length, char __user *user_data, bool page_do_bit17_swizzling, bool needs_clflush) { - char *vaddr; int ret; if (unlikely(page_do_bit17_swizzling)) return -EINVAL; - vaddr = kmap_atomic(page); if (needs_clflush) drm_clflush_virt_range(vaddr + shmem_page_offset, page_length); ret = __copy_to_user_inatomic(user_data, vaddr + shmem_page_offset, page_length); - kunmap_atomic(vaddr); return ret ? -EFAULT : 0; } @@ -370,14 +367,12 @@ shmem_clflush_swizzled_range(char *addr, unsigned long length, /* Only difference to the fast-path function is that this can handle bit17 * and uses non-atomic copy and kmap functions. */ static int -shmem_pread_slow(struct page *page, int shmem_page_offset, int page_length, +shmem_pread_slow(char *vaddr, int shmem_page_offset, int page_length, char __user *user_data, bool page_do_bit17_swizzling, bool needs_clflush) { - char *vaddr; int ret; - vaddr = kmap(page); if (needs_clflush) shmem_clflush_swizzled_range(vaddr + shmem_page_offset, page_length, @@ -391,7 +386,6 @@ shmem_pread_slow(struct page *page, int shmem_page_offset, int page_length, ret = __copy_to_user(user_data, vaddr + shmem_page_offset, page_length); - kunmap(page); return ret ? - EFAULT : 0; } @@ -402,6 +396,7 @@ i915_gem_shmem_pread(struct drm_device *dev, struct drm_i915_gem_pread *args, struct drm_file *file) { + struct drm_i915_private *dev_priv = dev->dev_private; char __user *user_data; ssize_t remain; loff_t offset; @@ -432,76 +427,138 @@ i915_gem_shmem_pread(struct drm_device *dev, } } - ret = i915_gem_object_get_pages(obj); - if (ret) - return ret; + offset = args->offset; - i915_gem_object_pin_pages(obj); + if (obj->stolen) { + char *vaddr; - offset = args->offset; + vaddr = (char *)dev_priv->mm.stolen_base; + vaddr += obj->stolen->start + (offset & PAGE_MASK); - for_each_sg(obj->pages->sgl, sg, obj->pages->nents, i) { - struct page *page; + shmem_page_offset = offset_in_page(offset); + while (remain > 0) { + /* Operation in this page + * + * shmem_page_offset = offset within page in shmem file + * page_length = bytes to copy for this page + */ + page_length = remain; + if ((shmem_page_offset + page_length) > PAGE_SIZE) + page_length = PAGE_SIZE - shmem_page_offset; - if (i < offset >> PAGE_SHIFT) - continue; + page_do_bit17_swizzling = obj_do_bit17_swizzling && + ((uintptr_t)vaddr & (1 << 17)) != 0; - if (remain <= 0) - break; + ret = shmem_pread_fast(vaddr, shmem_page_offset, page_length, + user_data, page_do_bit17_swizzling, + needs_clflush); + if (ret == 0) + goto next_stolen; - /* Operation in this page - * - * shmem_page_offset = offset within page in shmem file - * page_length = bytes to copy for this page - */ - shmem_page_offset = offset_in_page(offset); - page_length = remain; - if ((shmem_page_offset + page_length) > PAGE_SIZE) - page_length = PAGE_SIZE - shmem_page_offset; + hit_slowpath = 1; + mutex_unlock(&dev->struct_mutex); - page = sg_page(sg); - page_do_bit17_swizzling = obj_do_bit17_swizzling && - (page_to_phys(page) & (1 << 17)) != 0; + if (!prefaulted) { + ret = fault_in_multipages_writeable(user_data, remain); + /* Userspace is tricking us, but we've already clobbered + * its pages with the prefault and promised to write the + * data up to the first fault. Hence ignore any errors + * and just continue. */ + (void)ret; + prefaulted = 1; + } - ret = shmem_pread_fast(page, shmem_page_offset, page_length, - user_data, page_do_bit17_swizzling, - needs_clflush); - if (ret == 0) - goto next_page; + ret = shmem_pread_slow(vaddr, shmem_page_offset, page_length, + user_data, page_do_bit17_swizzling, + needs_clflush); - hit_slowpath = 1; - mutex_unlock(&dev->struct_mutex); + mutex_lock(&dev->struct_mutex); + if (ret) + goto out; - if (!prefaulted) { - ret = fault_in_multipages_writeable(user_data, remain); - /* Userspace is tricking us, but we've already clobbered - * its pages with the prefault and promised to write the - * data up to the first fault. Hence ignore any errors - * and just continue. */ - (void)ret; - prefaulted = 1; +next_stolen: + remain -= page_length; + user_data += page_length; + vaddr += page_length; + shmem_page_offset = 0; } + } else { + ret = i915_gem_object_get_pages(obj); + if (ret) + return ret; - ret = shmem_pread_slow(page, shmem_page_offset, page_length, - user_data, page_do_bit17_swizzling, - needs_clflush); + i915_gem_object_pin_pages(obj); - mutex_lock(&dev->struct_mutex); + for_each_sg(obj->pages->sgl, sg, obj->pages->nents, i) { + char *vaddr; + struct page *page; -next_page: - mark_page_accessed(page); + if (i < offset >> PAGE_SHIFT) + continue; - if (ret) - goto out; + if (remain <= 0) + break; - remain -= page_length; - user_data += page_length; - offset += page_length; + /* Operation in this page + * + * shmem_page_offset = offset within page in shmem file + * page_length = bytes to copy for this page + */ + shmem_page_offset = offset_in_page(offset); + page_length = remain; + if ((shmem_page_offset + page_length) > PAGE_SIZE) + page_length = PAGE_SIZE - shmem_page_offset; + + page = sg_page(sg); + mark_page_accessed(page); + + page_do_bit17_swizzling = obj_do_bit17_swizzling && + (page_to_phys(page) & (1 << 17)) != 0; + + vaddr = kmap_atomic(page); + ret = shmem_pread_fast(vaddr, shmem_page_offset, page_length, + user_data, page_do_bit17_swizzling, + needs_clflush); + kunmap_atomic(vaddr); + + if (ret == 0) + goto next_page; + + hit_slowpath = 1; + mutex_unlock(&dev->struct_mutex); + + if (!prefaulted) { + ret = fault_in_multipages_writeable(user_data, remain); + /* Userspace is tricking us, but we've already clobbered + * its pages with the prefault and promised to write the + * data up to the first fault. Hence ignore any errors + * and just continue. */ + (void)ret; + prefaulted = 1; + } + + vaddr = kmap(page); + ret = shmem_pread_slow(vaddr, shmem_page_offset, page_length, + user_data, page_do_bit17_swizzling, + needs_clflush); + kunmap(page); + + mutex_lock(&dev->struct_mutex); + + if (ret) + goto out_unpin; + +next_page: + remain -= page_length; + user_data += page_length; + offset += page_length; + } +out_unpin: + i915_gem_object_unpin_pages(obj); } -out: - i915_gem_object_unpin_pages(obj); +out: if (hit_slowpath) { /* Fixup: Kill any reinstated backing storage pages */ if (obj->madv == __I915_MADV_PURGED) -- 1.7.10.4