From: Maarten Lankhorst <maarten.lankhorst@linux.intel.com> To: intel-gfx@lists.freedesktop.org Cc: dri-devel@lists.freedesktop.org Subject: [PATCH v9 69/70] drm/i915: Pass ww ctx to i915_gem_object_pin_pages Date: Tue, 23 Mar 2021 16:50:58 +0100 [thread overview] Message-ID: <20210323155059.628690-70-maarten.lankhorst@linux.intel.com> (raw) In-Reply-To: <20210323155059.628690-1-maarten.lankhorst@linux.intel.com> This is the final part of passing ww ctx to the get_pages() callbacks. Now we no longer have to implicitly get ww ctx by using get_ww_ctx. Signed-off-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com> --- drivers/gpu/drm/i915/display/intel_display.c | 2 +- drivers/gpu/drm/i915/gem/i915_gem_clflush.c | 2 +- drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c | 4 +-- drivers/gpu/drm/i915/gem/i915_gem_domain.c | 35 +++++++++++++------ .../gpu/drm/i915/gem/i915_gem_execbuffer.c | 2 +- drivers/gpu/drm/i915/gem/i915_gem_mman.c | 19 ++++++---- drivers/gpu/drm/i915/gem/i915_gem_object.h | 11 +++--- drivers/gpu/drm/i915/gem/i915_gem_pages.c | 14 ++++---- drivers/gpu/drm/i915/gem/i915_gem_stolen.c | 2 +- drivers/gpu/drm/i915/gem/i915_gem_userptr.c | 4 +-- drivers/gpu/drm/i915/gt/intel_gtt.c | 4 +-- drivers/gpu/drm/i915/i915_gem.c | 6 ++-- drivers/gpu/drm/i915/i915_vma.c | 7 ++-- 13 files changed, 70 insertions(+), 42 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c index 56b63731af60..31f9685557f7 100644 --- a/drivers/gpu/drm/i915/display/intel_display.c +++ b/drivers/gpu/drm/i915/display/intel_display.c @@ -1155,7 +1155,7 @@ intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb, if (!ret && phys_cursor) ret = i915_gem_object_attach_phys(obj, alignment); if (!ret) - ret = i915_gem_object_pin_pages(obj); + ret = i915_gem_object_pin_pages(obj, &ww); if (ret) goto err; diff --git a/drivers/gpu/drm/i915/gem/i915_gem_clflush.c b/drivers/gpu/drm/i915/gem/i915_gem_clflush.c index e4c24558eaa8..109f5c8b802a 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_clflush.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_clflush.c @@ -55,7 +55,7 @@ static struct clflush *clflush_work_create(struct drm_i915_gem_object *obj) if (!clflush) return NULL; - if (__i915_gem_object_get_pages(obj) < 0) { + if (__i915_gem_object_get_pages(obj, NULL) < 0) { kfree(clflush); return NULL; } diff --git a/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c index 1b3998c066a7..35ac62d4dea2 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c @@ -130,7 +130,7 @@ static int i915_gem_begin_cpu_access(struct dma_buf *dma_buf, enum dma_data_dire retry: err = i915_gem_object_lock(obj, &ww); if (!err) - err = i915_gem_object_pin_pages(obj); + err = i915_gem_object_pin_pages(obj, &ww); if (!err) { i915_gem_object_set_to_cpu_domain(obj, write); i915_gem_object_unpin_pages(obj); @@ -154,7 +154,7 @@ static int i915_gem_end_cpu_access(struct dma_buf *dma_buf, enum dma_data_direct retry: err = i915_gem_object_lock(obj, &ww); if (!err) - err = i915_gem_object_pin_pages(obj); + err = i915_gem_object_pin_pages(obj, &ww); if (!err) { i915_gem_object_set_to_gtt_domain(obj, false); i915_gem_object_unpin_pages(obj); diff --git a/drivers/gpu/drm/i915/gem/i915_gem_domain.c b/drivers/gpu/drm/i915/gem/i915_gem_domain.c index a5b3a21faf9c..27dde2b9597e 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_domain.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_domain.c @@ -430,6 +430,7 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data, struct drm_i915_gem_object *obj; u32 read_domains = args->read_domains; u32 write_domain = args->write_domain; + struct i915_gem_ww_ctx ww; int err; /* Only handle setting domains to types used by the CPU. */ @@ -456,7 +457,15 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data, * userptr validity */ err = i915_gem_object_userptr_validate(obj); - goto out_wait; + if (err) + goto out; + + err = i915_gem_object_wait(obj, + I915_WAIT_INTERRUPTIBLE | + I915_WAIT_PRIORITY | + (write_domain ? I915_WAIT_ALL : 0), + MAX_SCHEDULE_TIMEOUT); + goto out; } /* @@ -470,9 +479,11 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data, goto out; } - err = i915_gem_object_lock_interruptible(obj, NULL); + i915_gem_ww_ctx_init(&ww, true); +retry: + err = i915_gem_object_lock_interruptible(obj, &ww); if (err) - goto out; + goto out_ww; /* * Flush and acquire obj->pages so that we are coherent through @@ -483,9 +494,9 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data, * continue to assume that the obj remained out of the CPU cached * domain. */ - err = i915_gem_object_pin_pages(obj); + err = i915_gem_object_pin_pages(obj, &ww); if (err) - goto out_unlock; + goto out_ww; /* * Already in the desired write domain? Nothing for us to do! @@ -510,9 +521,6 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data, out_unpin: i915_gem_object_unpin_pages(obj); -out_unlock: - i915_gem_object_unlock(obj); -out_wait: if (!err) { err = i915_gem_object_wait(obj, I915_WAIT_INTERRUPTIBLE | @@ -523,6 +531,13 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data, i915_gem_object_invalidate_frontbuffer(obj, ORIGIN_CPU); } +out_ww: + if (err == -EDEADLK) { + err = i915_gem_ww_ctx_backoff(&ww); + if (!err) + goto retry; + } + i915_gem_ww_ctx_fini(&ww); out: i915_gem_object_put(obj); return err; @@ -545,7 +560,7 @@ int i915_gem_object_prepare_read(struct drm_i915_gem_object *obj, assert_object_held(obj); - ret = i915_gem_object_pin_pages(obj); + ret = i915_gem_object_pin_pages(obj, ww); if (ret) return ret; @@ -590,7 +605,7 @@ int i915_gem_object_prepare_write(struct drm_i915_gem_object *obj, assert_object_held(obj); - ret = i915_gem_object_pin_pages(obj); + ret = i915_gem_object_pin_pages(obj, ww); if (ret) return ret; diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c index 73dd2a7673f5..b3678c0f218b 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c @@ -2504,7 +2504,7 @@ static int eb_parse_pipeline(struct i915_execbuffer *eb, pw->batch_map = i915_gem_object_pin_map(batch, &eb->ww, I915_MAP_WC); if (IS_ERR(pw->batch_map)) { - err = i915_gem_object_pin_pages(batch); + err = i915_gem_object_pin_pages(batch, &eb->ww); if (err) goto err_unmap_shadow; pw->batch_map = NULL; diff --git a/drivers/gpu/drm/i915/gem/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/i915_gem_mman.c index edac8ee3be9a..8690bf434407 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_mman.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_mman.c @@ -239,6 +239,7 @@ static vm_fault_t vm_fault_cpu(struct vm_fault *vmf) struct i915_mmap_offset *mmo = area->vm_private_data; struct drm_i915_gem_object *obj = mmo->obj; resource_size_t iomap; + struct i915_gem_ww_ctx ww; int err; /* Sanity check that we allow writing into this object */ @@ -246,10 +247,11 @@ static vm_fault_t vm_fault_cpu(struct vm_fault *vmf) area->vm_flags & VM_WRITE)) return VM_FAULT_SIGBUS; - if (i915_gem_object_lock_interruptible(obj, NULL)) - return VM_FAULT_NOPAGE; - - err = i915_gem_object_pin_pages(obj); + i915_gem_ww_ctx_init(&ww, true); +retry: + err = i915_gem_object_lock(obj, &ww); + if (!err) + err = i915_gem_object_pin_pages(obj, &ww); if (err) goto out; @@ -272,7 +274,12 @@ static vm_fault_t vm_fault_cpu(struct vm_fault *vmf) i915_gem_object_unpin_pages(obj); out: - i915_gem_object_unlock(obj); + if (err == -EDEADLK) { + err = i915_gem_ww_ctx_backoff(&ww); + if (!err) + goto retry; + } + i915_gem_ww_ctx_fini(&ww); return i915_error_to_vmf_fault(err); } @@ -313,7 +320,7 @@ static vm_fault_t vm_fault_gtt(struct vm_fault *vmf) goto err_rpm; } - ret = i915_gem_object_pin_pages(obj); + ret = i915_gem_object_pin_pages(obj, &ww); if (ret) goto err_rpm; diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.h b/drivers/gpu/drm/i915/gem/i915_gem_object.h index 9bd9b47dcc8d..64819b4e592a 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_object.h +++ b/drivers/gpu/drm/i915/gem/i915_gem_object.h @@ -378,18 +378,21 @@ void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj, struct sg_table *pages, unsigned int sg_page_sizes); -int ____i915_gem_object_get_pages(struct drm_i915_gem_object *obj); -int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj); +int ____i915_gem_object_get_pages(struct drm_i915_gem_object *obj, + struct i915_gem_ww_ctx *ww); +int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj, + struct i915_gem_ww_ctx *ww); static inline int __must_check -i915_gem_object_pin_pages(struct drm_i915_gem_object *obj) +i915_gem_object_pin_pages(struct drm_i915_gem_object *obj, + struct i915_gem_ww_ctx *ww) { assert_object_held(obj); if (atomic_inc_not_zero(&obj->mm.pages_pin_count)) return 0; - return __i915_gem_object_get_pages(obj); + return __i915_gem_object_get_pages(obj, ww); } int i915_gem_object_pin_pages_unlocked(struct drm_i915_gem_object *obj); diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pages.c b/drivers/gpu/drm/i915/gem/i915_gem_pages.c index 232832398457..94cc33ea483d 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_pages.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_pages.c @@ -87,7 +87,8 @@ void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj, } } -int ____i915_gem_object_get_pages(struct drm_i915_gem_object *obj) +int ____i915_gem_object_get_pages(struct drm_i915_gem_object *obj, + struct i915_gem_ww_ctx *ww) { struct drm_i915_private *i915 = to_i915(obj->base.dev); int err; @@ -100,7 +101,7 @@ int ____i915_gem_object_get_pages(struct drm_i915_gem_object *obj) return -EFAULT; } - err = obj->ops->get_pages(obj, NULL); + err = obj->ops->get_pages(obj, ww); GEM_BUG_ON(!err && !i915_gem_object_has_pages(obj)); return err; @@ -113,7 +114,8 @@ int ____i915_gem_object_get_pages(struct drm_i915_gem_object *obj) * either as a result of memory pressure (reaping pages under the shrinker) * or as the object is itself released. */ -int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj) +int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj, + struct i915_gem_ww_ctx *ww) { int err; @@ -124,7 +126,7 @@ int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj) if (unlikely(!i915_gem_object_has_pages(obj))) { GEM_BUG_ON(i915_gem_object_has_pinned_pages(obj)); - err = ____i915_gem_object_get_pages(obj); + err = ____i915_gem_object_get_pages(obj, ww); if (err) return err; @@ -144,7 +146,7 @@ int i915_gem_object_pin_pages_unlocked(struct drm_i915_gem_object *obj) retry: err = i915_gem_object_lock(obj, &ww); if (!err) - err = i915_gem_object_pin_pages(obj); + err = i915_gem_object_pin_pages(obj, &ww); if (err == -EDEADLK) { err = i915_gem_ww_ctx_backoff(&ww); @@ -362,7 +364,7 @@ void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj, if (unlikely(!i915_gem_object_has_pages(obj))) { GEM_BUG_ON(i915_gem_object_has_pinned_pages(obj)); - err = ____i915_gem_object_get_pages(obj); + err = ____i915_gem_object_get_pages(obj, ww); if (err) return ERR_PTR(err); diff --git a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c index 5b732b0fe5ce..48b2258091c3 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c @@ -641,7 +641,7 @@ static int __i915_gem_object_create_stolen(struct intel_memory_region *mem, if (WARN_ON(!i915_gem_object_trylock(obj))) return -EBUSY; - err = i915_gem_object_pin_pages(obj); + err = i915_gem_object_pin_pages(obj, NULL); if (!err) i915_gem_object_init_memory_region(obj, mem); i915_gem_object_unlock(obj); diff --git a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c index 693d0dbe9ed2..71c928c789b3 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c @@ -261,7 +261,7 @@ static int i915_gem_object_userptr_unbind(struct drm_i915_gem_object *obj, bool i915_gem_userptr_put_pages(obj, pages); if (get_pages) - err = ____i915_gem_object_get_pages(obj); + err = ____i915_gem_object_get_pages(obj, NULL); return err; } @@ -390,7 +390,7 @@ int i915_gem_object_userptr_validate(struct drm_i915_gem_object *obj) * it doesn't matter if we collide with the mmu notifier, * and -EAGAIN handling is not required. */ - err = i915_gem_object_pin_pages(obj); + err = i915_gem_object_pin_pages(obj, NULL); if (!err) i915_gem_object_unpin_pages(obj); diff --git a/drivers/gpu/drm/i915/gt/intel_gtt.c b/drivers/gpu/drm/i915/gt/intel_gtt.c index 994e4ea28903..38c1ba203071 100644 --- a/drivers/gpu/drm/i915/gt/intel_gtt.c +++ b/drivers/gpu/drm/i915/gt/intel_gtt.c @@ -30,7 +30,7 @@ int pin_pt_dma(struct i915_address_space *vm, struct drm_i915_gem_object *obj) int err; i915_gem_object_lock(obj, NULL); - err = i915_gem_object_pin_pages(obj); + err = i915_gem_object_pin_pages(obj, NULL); i915_gem_object_unlock(obj); if (err) return err; @@ -43,7 +43,7 @@ int pin_pt_dma_locked(struct i915_address_space *vm, struct drm_i915_gem_object { int err; - err = i915_gem_object_pin_pages(obj); + err = i915_gem_object_pin_pages(obj, NULL); if (err) return err; diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 960e94997c9b..d23a417295f8 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -212,7 +212,7 @@ i915_gem_shmem_pread(struct drm_i915_gem_object *obj, if (ret) return ret; - ret = i915_gem_object_pin_pages(obj); + ret = i915_gem_object_pin_pages(obj, NULL); if (ret) goto err_unlock; @@ -311,7 +311,7 @@ static struct i915_vma *i915_gem_gtt_prepare(struct drm_i915_gem_object *obj, vma = NULL; } - ret = i915_gem_object_pin_pages(obj); + ret = i915_gem_object_pin_pages(obj, &ww); if (ret) { if (drm_mm_node_allocated(node)) { ggtt->vm.clear_range(&ggtt->vm, node->start, node->size); @@ -640,7 +640,7 @@ i915_gem_shmem_pwrite(struct drm_i915_gem_object *obj, if (ret) return ret; - ret = i915_gem_object_pin_pages(obj); + ret = i915_gem_object_pin_pages(obj, NULL); if (ret) goto err_unlock; diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c index c5b9f30ac0a3..03291c032814 100644 --- a/drivers/gpu/drm/i915/i915_vma.c +++ b/drivers/gpu/drm/i915/i915_vma.c @@ -785,7 +785,8 @@ static bool try_qad_pin(struct i915_vma *vma, unsigned int flags) return pinned; } -static int vma_get_pages(struct i915_vma *vma) +static int vma_get_pages(struct i915_vma *vma, + struct i915_gem_ww_ctx *ww) { int err = 0; @@ -798,7 +799,7 @@ static int vma_get_pages(struct i915_vma *vma) if (!atomic_read(&vma->pages_count)) { if (vma->obj) { - err = i915_gem_object_pin_pages(vma->obj); + err = i915_gem_object_pin_pages(vma->obj, ww); if (err) goto unlock; } @@ -876,7 +877,7 @@ int i915_vma_pin_ww(struct i915_vma *vma, struct i915_gem_ww_ctx *ww, if (try_qad_pin(vma, flags & I915_VMA_BIND_MASK)) return 0; - err = vma_get_pages(vma); + err = vma_get_pages(vma, ww); if (err) return err; -- 2.31.0 _______________________________________________ dri-devel mailing list dri-devel@lists.freedesktop.org https://lists.freedesktop.org/mailman/listinfo/dri-devel
WARNING: multiple messages have this Message-ID (diff)
From: Maarten Lankhorst <maarten.lankhorst@linux.intel.com> To: intel-gfx@lists.freedesktop.org Cc: dri-devel@lists.freedesktop.org Subject: [Intel-gfx] [PATCH v9 69/70] drm/i915: Pass ww ctx to i915_gem_object_pin_pages Date: Tue, 23 Mar 2021 16:50:58 +0100 [thread overview] Message-ID: <20210323155059.628690-70-maarten.lankhorst@linux.intel.com> (raw) In-Reply-To: <20210323155059.628690-1-maarten.lankhorst@linux.intel.com> This is the final part of passing ww ctx to the get_pages() callbacks. Now we no longer have to implicitly get ww ctx by using get_ww_ctx. Signed-off-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com> --- drivers/gpu/drm/i915/display/intel_display.c | 2 +- drivers/gpu/drm/i915/gem/i915_gem_clflush.c | 2 +- drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c | 4 +-- drivers/gpu/drm/i915/gem/i915_gem_domain.c | 35 +++++++++++++------ .../gpu/drm/i915/gem/i915_gem_execbuffer.c | 2 +- drivers/gpu/drm/i915/gem/i915_gem_mman.c | 19 ++++++---- drivers/gpu/drm/i915/gem/i915_gem_object.h | 11 +++--- drivers/gpu/drm/i915/gem/i915_gem_pages.c | 14 ++++---- drivers/gpu/drm/i915/gem/i915_gem_stolen.c | 2 +- drivers/gpu/drm/i915/gem/i915_gem_userptr.c | 4 +-- drivers/gpu/drm/i915/gt/intel_gtt.c | 4 +-- drivers/gpu/drm/i915/i915_gem.c | 6 ++-- drivers/gpu/drm/i915/i915_vma.c | 7 ++-- 13 files changed, 70 insertions(+), 42 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c index 56b63731af60..31f9685557f7 100644 --- a/drivers/gpu/drm/i915/display/intel_display.c +++ b/drivers/gpu/drm/i915/display/intel_display.c @@ -1155,7 +1155,7 @@ intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb, if (!ret && phys_cursor) ret = i915_gem_object_attach_phys(obj, alignment); if (!ret) - ret = i915_gem_object_pin_pages(obj); + ret = i915_gem_object_pin_pages(obj, &ww); if (ret) goto err; diff --git a/drivers/gpu/drm/i915/gem/i915_gem_clflush.c b/drivers/gpu/drm/i915/gem/i915_gem_clflush.c index e4c24558eaa8..109f5c8b802a 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_clflush.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_clflush.c @@ -55,7 +55,7 @@ static struct clflush *clflush_work_create(struct drm_i915_gem_object *obj) if (!clflush) return NULL; - if (__i915_gem_object_get_pages(obj) < 0) { + if (__i915_gem_object_get_pages(obj, NULL) < 0) { kfree(clflush); return NULL; } diff --git a/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c index 1b3998c066a7..35ac62d4dea2 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c @@ -130,7 +130,7 @@ static int i915_gem_begin_cpu_access(struct dma_buf *dma_buf, enum dma_data_dire retry: err = i915_gem_object_lock(obj, &ww); if (!err) - err = i915_gem_object_pin_pages(obj); + err = i915_gem_object_pin_pages(obj, &ww); if (!err) { i915_gem_object_set_to_cpu_domain(obj, write); i915_gem_object_unpin_pages(obj); @@ -154,7 +154,7 @@ static int i915_gem_end_cpu_access(struct dma_buf *dma_buf, enum dma_data_direct retry: err = i915_gem_object_lock(obj, &ww); if (!err) - err = i915_gem_object_pin_pages(obj); + err = i915_gem_object_pin_pages(obj, &ww); if (!err) { i915_gem_object_set_to_gtt_domain(obj, false); i915_gem_object_unpin_pages(obj); diff --git a/drivers/gpu/drm/i915/gem/i915_gem_domain.c b/drivers/gpu/drm/i915/gem/i915_gem_domain.c index a5b3a21faf9c..27dde2b9597e 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_domain.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_domain.c @@ -430,6 +430,7 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data, struct drm_i915_gem_object *obj; u32 read_domains = args->read_domains; u32 write_domain = args->write_domain; + struct i915_gem_ww_ctx ww; int err; /* Only handle setting domains to types used by the CPU. */ @@ -456,7 +457,15 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data, * userptr validity */ err = i915_gem_object_userptr_validate(obj); - goto out_wait; + if (err) + goto out; + + err = i915_gem_object_wait(obj, + I915_WAIT_INTERRUPTIBLE | + I915_WAIT_PRIORITY | + (write_domain ? I915_WAIT_ALL : 0), + MAX_SCHEDULE_TIMEOUT); + goto out; } /* @@ -470,9 +479,11 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data, goto out; } - err = i915_gem_object_lock_interruptible(obj, NULL); + i915_gem_ww_ctx_init(&ww, true); +retry: + err = i915_gem_object_lock_interruptible(obj, &ww); if (err) - goto out; + goto out_ww; /* * Flush and acquire obj->pages so that we are coherent through @@ -483,9 +494,9 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data, * continue to assume that the obj remained out of the CPU cached * domain. */ - err = i915_gem_object_pin_pages(obj); + err = i915_gem_object_pin_pages(obj, &ww); if (err) - goto out_unlock; + goto out_ww; /* * Already in the desired write domain? Nothing for us to do! @@ -510,9 +521,6 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data, out_unpin: i915_gem_object_unpin_pages(obj); -out_unlock: - i915_gem_object_unlock(obj); -out_wait: if (!err) { err = i915_gem_object_wait(obj, I915_WAIT_INTERRUPTIBLE | @@ -523,6 +531,13 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data, i915_gem_object_invalidate_frontbuffer(obj, ORIGIN_CPU); } +out_ww: + if (err == -EDEADLK) { + err = i915_gem_ww_ctx_backoff(&ww); + if (!err) + goto retry; + } + i915_gem_ww_ctx_fini(&ww); out: i915_gem_object_put(obj); return err; @@ -545,7 +560,7 @@ int i915_gem_object_prepare_read(struct drm_i915_gem_object *obj, assert_object_held(obj); - ret = i915_gem_object_pin_pages(obj); + ret = i915_gem_object_pin_pages(obj, ww); if (ret) return ret; @@ -590,7 +605,7 @@ int i915_gem_object_prepare_write(struct drm_i915_gem_object *obj, assert_object_held(obj); - ret = i915_gem_object_pin_pages(obj); + ret = i915_gem_object_pin_pages(obj, ww); if (ret) return ret; diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c index 73dd2a7673f5..b3678c0f218b 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c @@ -2504,7 +2504,7 @@ static int eb_parse_pipeline(struct i915_execbuffer *eb, pw->batch_map = i915_gem_object_pin_map(batch, &eb->ww, I915_MAP_WC); if (IS_ERR(pw->batch_map)) { - err = i915_gem_object_pin_pages(batch); + err = i915_gem_object_pin_pages(batch, &eb->ww); if (err) goto err_unmap_shadow; pw->batch_map = NULL; diff --git a/drivers/gpu/drm/i915/gem/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/i915_gem_mman.c index edac8ee3be9a..8690bf434407 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_mman.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_mman.c @@ -239,6 +239,7 @@ static vm_fault_t vm_fault_cpu(struct vm_fault *vmf) struct i915_mmap_offset *mmo = area->vm_private_data; struct drm_i915_gem_object *obj = mmo->obj; resource_size_t iomap; + struct i915_gem_ww_ctx ww; int err; /* Sanity check that we allow writing into this object */ @@ -246,10 +247,11 @@ static vm_fault_t vm_fault_cpu(struct vm_fault *vmf) area->vm_flags & VM_WRITE)) return VM_FAULT_SIGBUS; - if (i915_gem_object_lock_interruptible(obj, NULL)) - return VM_FAULT_NOPAGE; - - err = i915_gem_object_pin_pages(obj); + i915_gem_ww_ctx_init(&ww, true); +retry: + err = i915_gem_object_lock(obj, &ww); + if (!err) + err = i915_gem_object_pin_pages(obj, &ww); if (err) goto out; @@ -272,7 +274,12 @@ static vm_fault_t vm_fault_cpu(struct vm_fault *vmf) i915_gem_object_unpin_pages(obj); out: - i915_gem_object_unlock(obj); + if (err == -EDEADLK) { + err = i915_gem_ww_ctx_backoff(&ww); + if (!err) + goto retry; + } + i915_gem_ww_ctx_fini(&ww); return i915_error_to_vmf_fault(err); } @@ -313,7 +320,7 @@ static vm_fault_t vm_fault_gtt(struct vm_fault *vmf) goto err_rpm; } - ret = i915_gem_object_pin_pages(obj); + ret = i915_gem_object_pin_pages(obj, &ww); if (ret) goto err_rpm; diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.h b/drivers/gpu/drm/i915/gem/i915_gem_object.h index 9bd9b47dcc8d..64819b4e592a 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_object.h +++ b/drivers/gpu/drm/i915/gem/i915_gem_object.h @@ -378,18 +378,21 @@ void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj, struct sg_table *pages, unsigned int sg_page_sizes); -int ____i915_gem_object_get_pages(struct drm_i915_gem_object *obj); -int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj); +int ____i915_gem_object_get_pages(struct drm_i915_gem_object *obj, + struct i915_gem_ww_ctx *ww); +int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj, + struct i915_gem_ww_ctx *ww); static inline int __must_check -i915_gem_object_pin_pages(struct drm_i915_gem_object *obj) +i915_gem_object_pin_pages(struct drm_i915_gem_object *obj, + struct i915_gem_ww_ctx *ww) { assert_object_held(obj); if (atomic_inc_not_zero(&obj->mm.pages_pin_count)) return 0; - return __i915_gem_object_get_pages(obj); + return __i915_gem_object_get_pages(obj, ww); } int i915_gem_object_pin_pages_unlocked(struct drm_i915_gem_object *obj); diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pages.c b/drivers/gpu/drm/i915/gem/i915_gem_pages.c index 232832398457..94cc33ea483d 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_pages.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_pages.c @@ -87,7 +87,8 @@ void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj, } } -int ____i915_gem_object_get_pages(struct drm_i915_gem_object *obj) +int ____i915_gem_object_get_pages(struct drm_i915_gem_object *obj, + struct i915_gem_ww_ctx *ww) { struct drm_i915_private *i915 = to_i915(obj->base.dev); int err; @@ -100,7 +101,7 @@ int ____i915_gem_object_get_pages(struct drm_i915_gem_object *obj) return -EFAULT; } - err = obj->ops->get_pages(obj, NULL); + err = obj->ops->get_pages(obj, ww); GEM_BUG_ON(!err && !i915_gem_object_has_pages(obj)); return err; @@ -113,7 +114,8 @@ int ____i915_gem_object_get_pages(struct drm_i915_gem_object *obj) * either as a result of memory pressure (reaping pages under the shrinker) * or as the object is itself released. */ -int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj) +int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj, + struct i915_gem_ww_ctx *ww) { int err; @@ -124,7 +126,7 @@ int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj) if (unlikely(!i915_gem_object_has_pages(obj))) { GEM_BUG_ON(i915_gem_object_has_pinned_pages(obj)); - err = ____i915_gem_object_get_pages(obj); + err = ____i915_gem_object_get_pages(obj, ww); if (err) return err; @@ -144,7 +146,7 @@ int i915_gem_object_pin_pages_unlocked(struct drm_i915_gem_object *obj) retry: err = i915_gem_object_lock(obj, &ww); if (!err) - err = i915_gem_object_pin_pages(obj); + err = i915_gem_object_pin_pages(obj, &ww); if (err == -EDEADLK) { err = i915_gem_ww_ctx_backoff(&ww); @@ -362,7 +364,7 @@ void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj, if (unlikely(!i915_gem_object_has_pages(obj))) { GEM_BUG_ON(i915_gem_object_has_pinned_pages(obj)); - err = ____i915_gem_object_get_pages(obj); + err = ____i915_gem_object_get_pages(obj, ww); if (err) return ERR_PTR(err); diff --git a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c index 5b732b0fe5ce..48b2258091c3 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c @@ -641,7 +641,7 @@ static int __i915_gem_object_create_stolen(struct intel_memory_region *mem, if (WARN_ON(!i915_gem_object_trylock(obj))) return -EBUSY; - err = i915_gem_object_pin_pages(obj); + err = i915_gem_object_pin_pages(obj, NULL); if (!err) i915_gem_object_init_memory_region(obj, mem); i915_gem_object_unlock(obj); diff --git a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c index 693d0dbe9ed2..71c928c789b3 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c @@ -261,7 +261,7 @@ static int i915_gem_object_userptr_unbind(struct drm_i915_gem_object *obj, bool i915_gem_userptr_put_pages(obj, pages); if (get_pages) - err = ____i915_gem_object_get_pages(obj); + err = ____i915_gem_object_get_pages(obj, NULL); return err; } @@ -390,7 +390,7 @@ int i915_gem_object_userptr_validate(struct drm_i915_gem_object *obj) * it doesn't matter if we collide with the mmu notifier, * and -EAGAIN handling is not required. */ - err = i915_gem_object_pin_pages(obj); + err = i915_gem_object_pin_pages(obj, NULL); if (!err) i915_gem_object_unpin_pages(obj); diff --git a/drivers/gpu/drm/i915/gt/intel_gtt.c b/drivers/gpu/drm/i915/gt/intel_gtt.c index 994e4ea28903..38c1ba203071 100644 --- a/drivers/gpu/drm/i915/gt/intel_gtt.c +++ b/drivers/gpu/drm/i915/gt/intel_gtt.c @@ -30,7 +30,7 @@ int pin_pt_dma(struct i915_address_space *vm, struct drm_i915_gem_object *obj) int err; i915_gem_object_lock(obj, NULL); - err = i915_gem_object_pin_pages(obj); + err = i915_gem_object_pin_pages(obj, NULL); i915_gem_object_unlock(obj); if (err) return err; @@ -43,7 +43,7 @@ int pin_pt_dma_locked(struct i915_address_space *vm, struct drm_i915_gem_object { int err; - err = i915_gem_object_pin_pages(obj); + err = i915_gem_object_pin_pages(obj, NULL); if (err) return err; diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 960e94997c9b..d23a417295f8 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -212,7 +212,7 @@ i915_gem_shmem_pread(struct drm_i915_gem_object *obj, if (ret) return ret; - ret = i915_gem_object_pin_pages(obj); + ret = i915_gem_object_pin_pages(obj, NULL); if (ret) goto err_unlock; @@ -311,7 +311,7 @@ static struct i915_vma *i915_gem_gtt_prepare(struct drm_i915_gem_object *obj, vma = NULL; } - ret = i915_gem_object_pin_pages(obj); + ret = i915_gem_object_pin_pages(obj, &ww); if (ret) { if (drm_mm_node_allocated(node)) { ggtt->vm.clear_range(&ggtt->vm, node->start, node->size); @@ -640,7 +640,7 @@ i915_gem_shmem_pwrite(struct drm_i915_gem_object *obj, if (ret) return ret; - ret = i915_gem_object_pin_pages(obj); + ret = i915_gem_object_pin_pages(obj, NULL); if (ret) goto err_unlock; diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c index c5b9f30ac0a3..03291c032814 100644 --- a/drivers/gpu/drm/i915/i915_vma.c +++ b/drivers/gpu/drm/i915/i915_vma.c @@ -785,7 +785,8 @@ static bool try_qad_pin(struct i915_vma *vma, unsigned int flags) return pinned; } -static int vma_get_pages(struct i915_vma *vma) +static int vma_get_pages(struct i915_vma *vma, + struct i915_gem_ww_ctx *ww) { int err = 0; @@ -798,7 +799,7 @@ static int vma_get_pages(struct i915_vma *vma) if (!atomic_read(&vma->pages_count)) { if (vma->obj) { - err = i915_gem_object_pin_pages(vma->obj); + err = i915_gem_object_pin_pages(vma->obj, ww); if (err) goto unlock; } @@ -876,7 +877,7 @@ int i915_vma_pin_ww(struct i915_vma *vma, struct i915_gem_ww_ctx *ww, if (try_qad_pin(vma, flags & I915_VMA_BIND_MASK)) return 0; - err = vma_get_pages(vma); + err = vma_get_pages(vma, ww); if (err) return err; -- 2.31.0 _______________________________________________ Intel-gfx mailing list Intel-gfx@lists.freedesktop.org https://lists.freedesktop.org/mailman/listinfo/intel-gfx
next prev parent reply other threads:[~2021-03-23 15:58 UTC|newest] Thread overview: 207+ messages / expand[flat|nested] mbox.gz Atom feed top 2021-03-23 15:49 [PATCH v9 00/70] drm/i915: Remove obj->mm.lock! Maarten Lankhorst 2021-03-23 15:49 ` [Intel-gfx] " Maarten Lankhorst 2021-03-23 15:49 ` [PATCH v9 01/70] drm/i915: Do not share hwsp across contexts any more, v8 Maarten Lankhorst 2021-03-23 15:49 ` [Intel-gfx] " Maarten Lankhorst 2021-03-23 15:49 ` [PATCH v9 02/70] drm/i915: Pin timeline map after first timeline pin, v4 Maarten Lankhorst 2021-03-23 15:49 ` [Intel-gfx] " Maarten Lankhorst 2021-03-23 15:49 ` [PATCH v9 03/70] drm/i915: Move cmd parser pinning to execbuffer Maarten Lankhorst 2021-03-23 15:49 ` [Intel-gfx] " Maarten Lankhorst 2021-03-23 15:49 ` [PATCH v9 04/70] drm/i915: Add missing -EDEADLK handling to execbuf pinning, v2 Maarten Lankhorst 2021-03-23 15:49 ` [Intel-gfx] " Maarten Lankhorst 2021-03-23 15:49 ` [PATCH v9 05/70] drm/i915: Ensure we hold the object mutex in pin correctly Maarten Lankhorst 2021-03-23 15:49 ` [Intel-gfx] " Maarten Lankhorst 2021-03-23 15:49 ` [PATCH v9 06/70] drm/i915: Add gem object locking to madvise Maarten Lankhorst 2021-03-23 15:49 ` [Intel-gfx] " Maarten Lankhorst 2021-03-23 15:49 ` [PATCH v9 07/70] drm/i915: Move HAS_STRUCT_PAGE to obj->flags Maarten Lankhorst 2021-03-23 15:49 ` [Intel-gfx] " Maarten Lankhorst 2021-03-23 15:49 ` [PATCH v9 08/70] drm/i915: Rework struct phys attachment handling Maarten Lankhorst 2021-03-23 15:49 ` [Intel-gfx] " Maarten Lankhorst 2021-03-23 15:49 ` [PATCH v9 09/70] drm/i915: Convert i915_gem_object_attach_phys() to ww locking, v2 Maarten Lankhorst 2021-03-23 15:49 ` [Intel-gfx] " Maarten Lankhorst 2021-03-23 15:49 ` [PATCH v9 10/70] drm/i915: make lockdep slightly happier about execbuf Maarten Lankhorst 2021-03-23 15:49 ` [Intel-gfx] " Maarten Lankhorst 2021-03-23 15:50 ` [PATCH v9 11/70] drm/i915: Disable userptr pread/pwrite support Maarten Lankhorst 2021-03-23 15:50 ` [Intel-gfx] " Maarten Lankhorst 2021-03-24 13:57 ` Jason Ekstrand 2021-03-24 13:57 ` Jason Ekstrand 2021-03-23 15:50 ` [PATCH v9 12/70] drm/i915: No longer allow exporting userptr through dma-buf Maarten Lankhorst 2021-03-23 15:50 ` [Intel-gfx] " Maarten Lankhorst 2021-03-23 15:50 ` [PATCH v9 13/70] drm/i915: Reject more ioctls for userptr, v2 Maarten Lankhorst 2021-03-23 15:50 ` [Intel-gfx] " Maarten Lankhorst 2021-03-23 15:50 ` [PATCH v9 14/70] drm/i915: Reject UNSYNCHRONIZED " Maarten Lankhorst 2021-03-23 15:50 ` [Intel-gfx] " Maarten Lankhorst 2021-03-23 15:50 ` [PATCH v9 15/70] drm/i915: Make compilation of userptr code depend on MMU_NOTIFIER Maarten Lankhorst 2021-03-23 15:50 ` [Intel-gfx] " Maarten Lankhorst 2021-03-23 15:50 ` [PATCH v9 16/70] drm/i915: Fix userptr so we do not have to worry about obj->mm.lock, v7 Maarten Lankhorst 2021-03-23 15:50 ` [Intel-gfx] " Maarten Lankhorst 2021-03-24 11:28 ` Daniel Vetter 2021-03-24 11:28 ` Daniel Vetter 2021-03-24 11:34 ` Thomas Hellström (Intel) 2021-03-24 11:34 ` Thomas Hellström (Intel) 2021-03-25 9:23 ` [PATCH] drm/i915: Fix userptr so we do not have to worry about obj->mm.lock, v8 Maarten Lankhorst 2021-03-25 9:23 ` [Intel-gfx] " Maarten Lankhorst 2021-03-25 9:55 ` Thomas Hellström (Intel) 2021-03-25 9:55 ` [Intel-gfx] " Thomas Hellström (Intel) 2021-03-25 10:27 ` Daniel Vetter 2021-03-25 10:27 ` [Intel-gfx] " Daniel Vetter 2021-03-23 15:50 ` [PATCH v9 17/70] drm/i915: Flatten obj->mm.lock Maarten Lankhorst 2021-03-23 15:50 ` [Intel-gfx] " Maarten Lankhorst 2021-03-24 11:13 ` Daniel Vetter 2021-03-24 11:13 ` Daniel Vetter 2021-03-23 15:50 ` [PATCH v9 18/70] drm/i915: Populate logical context during first pin Maarten Lankhorst 2021-03-23 15:50 ` [Intel-gfx] " Maarten Lankhorst 2021-03-23 15:50 ` [PATCH v9 19/70] drm/i915: Make ring submission compatible with obj->mm.lock removal, v2 Maarten Lankhorst 2021-03-23 15:50 ` [Intel-gfx] " Maarten Lankhorst 2021-03-23 15:50 ` [PATCH v9 20/70] drm/i915: Handle ww locking in init_status_page Maarten Lankhorst 2021-03-23 15:50 ` [Intel-gfx] " Maarten Lankhorst 2021-03-23 15:50 ` [PATCH v9 21/70] drm/i915: Rework clflush to work correctly without obj->mm.lock Maarten Lankhorst 2021-03-23 15:50 ` [Intel-gfx] " Maarten Lankhorst 2021-03-23 15:50 ` [PATCH v9 22/70] drm/i915: Pass ww ctx to intel_pin_to_display_plane Maarten Lankhorst 2021-03-23 15:50 ` [Intel-gfx] " Maarten Lankhorst 2021-03-23 15:50 ` [PATCH v9 23/70] drm/i915: Add object locking to vm_fault_cpu Maarten Lankhorst 2021-03-23 15:50 ` [Intel-gfx] " Maarten Lankhorst 2021-03-23 15:50 ` [PATCH v9 24/70] drm/i915: Move pinning to inside engine_wa_list_verify() Maarten Lankhorst 2021-03-23 15:50 ` [Intel-gfx] " Maarten Lankhorst 2021-03-23 15:50 ` [PATCH v9 25/70] drm/i915: Take reservation lock around i915_vma_pin Maarten Lankhorst 2021-03-23 15:50 ` [Intel-gfx] " Maarten Lankhorst 2021-03-24 12:35 ` Daniel Vetter 2021-03-24 12:35 ` Daniel Vetter 2021-03-23 15:50 ` [PATCH v9 26/70] drm/i915: Make lrc_init_wa_ctx compatible with ww locking, v3 Maarten Lankhorst 2021-03-23 15:50 ` [Intel-gfx] " Maarten Lankhorst 2021-03-23 15:50 ` [PATCH v9 27/70] drm/i915: Make __engine_unpark() compatible with ww locking Maarten Lankhorst 2021-03-23 15:50 ` [Intel-gfx] " Maarten Lankhorst 2021-03-24 12:37 ` Daniel Vetter 2021-03-24 12:37 ` Daniel Vetter 2021-03-23 15:50 ` [PATCH v9 28/70] drm/i915: Take obj lock around set_domain ioctl Maarten Lankhorst 2021-03-23 15:50 ` [Intel-gfx] " Maarten Lankhorst 2021-03-24 14:12 ` Daniel Vetter 2021-03-24 14:12 ` Daniel Vetter 2021-03-23 15:50 ` [PATCH v9 29/70] drm/i915: Defer pin calls in buffer pool until first use by caller Maarten Lankhorst 2021-03-23 15:50 ` [Intel-gfx] " Maarten Lankhorst 2021-03-23 15:50 ` [PATCH v9 30/70] drm/i915: Fix pread/pwrite to work with new locking rules Maarten Lankhorst 2021-03-23 15:50 ` [Intel-gfx] " Maarten Lankhorst 2021-03-24 14:45 ` Daniel Vetter 2021-03-24 14:45 ` Daniel Vetter 2021-03-23 15:50 ` [PATCH v9 31/70] drm/i915: Fix workarounds selftest, part 1 Maarten Lankhorst 2021-03-23 15:50 ` [Intel-gfx] " Maarten Lankhorst 2021-03-24 16:16 ` Daniel Vetter 2021-03-24 16:16 ` Daniel Vetter 2021-03-23 15:50 ` [PATCH v9 32/70] drm/i915: Prepare for obj->mm.lock removal, v2 Maarten Lankhorst 2021-03-23 15:50 ` [Intel-gfx] " Maarten Lankhorst 2021-03-23 16:18 ` Matthew Auld 2021-03-23 16:18 ` Matthew Auld 2021-03-23 20:25 ` Thomas Hellström 2021-03-23 20:25 ` Thomas Hellström 2021-03-23 15:50 ` [PATCH v9 33/70] drm/i915: Add igt_spinner_pin() to allow for ww locking around spinner Maarten Lankhorst 2021-03-23 15:50 ` [Intel-gfx] " Maarten Lankhorst 2021-03-23 15:50 ` [PATCH v9 34/70] drm/i915: Add ww locking around vm_access() Maarten Lankhorst 2021-03-23 15:50 ` [Intel-gfx] " Maarten Lankhorst 2021-03-24 16:21 ` Daniel Vetter 2021-03-24 16:21 ` Daniel Vetter 2021-03-23 15:50 ` [PATCH v9 35/70] drm/i915: Increase ww locking for perf Maarten Lankhorst 2021-03-23 15:50 ` [Intel-gfx] " Maarten Lankhorst 2021-03-23 15:50 ` [PATCH v9 36/70] drm/i915: Lock ww in ucode objects correctly Maarten Lankhorst 2021-03-23 15:50 ` [Intel-gfx] " Maarten Lankhorst 2021-03-23 15:50 ` [PATCH v9 37/70] drm/i915: Add ww locking to dma-buf ops, v2 Maarten Lankhorst 2021-03-23 15:50 ` [Intel-gfx] " Maarten Lankhorst 2021-03-23 15:50 ` [PATCH v9 38/70] drm/i915: Add missing ww lock in intel_dsb_prepare Maarten Lankhorst 2021-03-23 15:50 ` [Intel-gfx] " Maarten Lankhorst 2021-03-23 15:50 ` [PATCH v9 39/70] drm/i915: Fix ww locking in shmem_create_from_object Maarten Lankhorst 2021-03-23 15:50 ` [Intel-gfx] " Maarten Lankhorst 2021-03-23 15:50 ` [PATCH v9 40/70] drm/i915: Use a single page table lock for each gtt Maarten Lankhorst 2021-03-23 15:50 ` [Intel-gfx] " Maarten Lankhorst 2021-03-23 15:50 ` [PATCH v9 41/70] drm/i915/selftests: Prepare huge_pages testcases for obj->mm.lock removal Maarten Lankhorst 2021-03-23 15:50 ` [Intel-gfx] " Maarten Lankhorst 2021-03-23 15:50 ` [PATCH v9 42/70] drm/i915/selftests: Prepare client blit " Maarten Lankhorst 2021-03-23 15:50 ` [Intel-gfx] " Maarten Lankhorst 2021-03-23 15:50 ` [PATCH v9 43/70] drm/i915/selftests: Prepare coherency tests " Maarten Lankhorst 2021-03-23 15:50 ` [Intel-gfx] " Maarten Lankhorst 2021-03-23 15:50 ` [PATCH v9 44/70] drm/i915/selftests: Prepare context " Maarten Lankhorst 2021-03-23 15:50 ` [Intel-gfx] " Maarten Lankhorst 2021-03-24 16:40 ` Daniel Vetter 2021-03-24 16:40 ` Daniel Vetter 2021-03-23 15:50 ` [PATCH v9 45/70] drm/i915/selftests: Prepare dma-buf " Maarten Lankhorst 2021-03-23 15:50 ` [Intel-gfx] " Maarten Lankhorst 2021-03-23 15:50 ` [PATCH v9 46/70] drm/i915/selftests: Prepare execbuf " Maarten Lankhorst 2021-03-23 15:50 ` [Intel-gfx] " Maarten Lankhorst 2021-03-23 15:50 ` [PATCH v9 47/70] drm/i915/selftests: Prepare mman testcases " Maarten Lankhorst 2021-03-23 15:50 ` [Intel-gfx] " Maarten Lankhorst 2021-03-23 15:50 ` [PATCH v9 48/70] drm/i915/selftests: Prepare object tests " Maarten Lankhorst 2021-03-23 15:50 ` [Intel-gfx] " Maarten Lankhorst 2021-03-23 15:50 ` [PATCH v9 49/70] drm/i915/selftests: Prepare object blit " Maarten Lankhorst 2021-03-23 15:50 ` [Intel-gfx] " Maarten Lankhorst 2021-03-23 15:50 ` [PATCH v9 50/70] drm/i915/selftests: Prepare igt_gem_utils " Maarten Lankhorst 2021-03-23 15:50 ` [Intel-gfx] " Maarten Lankhorst 2021-03-23 15:50 ` [PATCH v9 51/70] drm/i915/selftests: Prepare context selftest " Maarten Lankhorst 2021-03-23 15:50 ` [Intel-gfx] " Maarten Lankhorst 2021-03-23 15:50 ` [PATCH v9 52/70] drm/i915/selftests: Prepare hangcheck " Maarten Lankhorst 2021-03-23 15:50 ` [Intel-gfx] " Maarten Lankhorst 2021-03-23 15:50 ` [PATCH v9 53/70] drm/i915/selftests: Prepare execlists and lrc selftests " Maarten Lankhorst 2021-03-23 15:50 ` [Intel-gfx] " Maarten Lankhorst 2021-03-23 15:50 ` [PATCH v9 54/70] drm/i915/selftests: Prepare mocs tests " Maarten Lankhorst 2021-03-23 15:50 ` [Intel-gfx] " Maarten Lankhorst 2021-03-23 15:50 ` [PATCH v9 55/70] drm/i915/selftests: Prepare ring submission " Maarten Lankhorst 2021-03-23 15:50 ` [Intel-gfx] " Maarten Lankhorst 2021-03-23 15:50 ` [PATCH v9 56/70] drm/i915/selftests: Prepare timeline tests " Maarten Lankhorst 2021-03-23 15:50 ` [Intel-gfx] " Maarten Lankhorst 2021-03-23 15:50 ` [PATCH v9 57/70] drm/i915/selftests: Prepare i915_request " Maarten Lankhorst 2021-03-23 15:50 ` [Intel-gfx] " Maarten Lankhorst 2021-03-23 15:50 ` [PATCH v9 58/70] drm/i915/selftests: Prepare memory region " Maarten Lankhorst 2021-03-23 15:50 ` [Intel-gfx] " Maarten Lankhorst 2021-03-23 15:50 ` [PATCH v9 59/70] drm/i915/selftests: Prepare cs engine " Maarten Lankhorst 2021-03-23 15:50 ` [Intel-gfx] " Maarten Lankhorst 2021-03-23 15:50 ` [PATCH v9 60/70] drm/i915/selftests: Prepare gtt " Maarten Lankhorst 2021-03-23 15:50 ` [Intel-gfx] " Maarten Lankhorst 2021-03-23 15:50 ` [PATCH v9 61/70] drm/i915: Finally remove obj->mm.lock Maarten Lankhorst 2021-03-23 15:50 ` [Intel-gfx] " Maarten Lankhorst 2021-03-23 15:50 ` [PATCH v9 62/70] drm/i915: Keep userpointer bindings if seqcount is unchanged, v2 Maarten Lankhorst 2021-03-23 15:50 ` [Intel-gfx] " Maarten Lankhorst 2021-03-23 15:50 ` [PATCH v9 63/70] drm/i915: Move gt_revoke() slightly Maarten Lankhorst 2021-03-23 15:50 ` [Intel-gfx] " Maarten Lankhorst 2021-03-24 17:00 ` Daniel Vetter 2021-03-24 17:00 ` Daniel Vetter 2021-03-24 17:15 ` Ville Syrjälä 2021-03-24 17:15 ` Ville Syrjälä 2021-03-24 17:16 ` Daniel Vetter 2021-03-24 17:16 ` Daniel Vetter 2021-03-24 17:58 ` Ville Syrjälä 2021-03-24 17:58 ` Ville Syrjälä 2021-03-23 15:50 ` [PATCH v9 64/70] drm/i915: Add missing -EDEADLK path in execbuffer ggtt pinning Maarten Lankhorst 2021-03-23 15:50 ` [Intel-gfx] " Maarten Lankhorst 2021-03-24 17:05 ` Daniel Vetter 2021-03-24 17:05 ` Daniel Vetter 2021-03-23 15:50 ` [PATCH v9 65/70] drm/i915: Fix pin_map in scheduler selftests Maarten Lankhorst 2021-03-23 15:50 ` [Intel-gfx] " Maarten Lankhorst 2021-03-24 17:14 ` Daniel Vetter 2021-03-24 17:14 ` Daniel Vetter 2021-03-23 15:50 ` [PATCH v9 66/70] drm/i915: Add ww parameter to get_pages() callback Maarten Lankhorst 2021-03-23 15:50 ` [Intel-gfx] " Maarten Lankhorst 2021-03-24 17:20 ` Daniel Vetter 2021-03-24 17:20 ` Daniel Vetter 2021-03-23 15:50 ` [PATCH v9 67/70] drm/i915: Add ww context to prepare_(read/write) Maarten Lankhorst 2021-03-23 15:50 ` [Intel-gfx] " Maarten Lankhorst 2021-03-23 15:50 ` [PATCH v9 68/70] drm/i915: Pass ww ctx to pin_map Maarten Lankhorst 2021-03-23 15:50 ` [Intel-gfx] " Maarten Lankhorst 2021-03-23 17:30 ` Matthew Auld 2021-03-23 17:30 ` Matthew Auld 2021-03-24 9:31 ` Maarten Lankhorst 2021-03-24 9:31 ` Maarten Lankhorst 2021-03-24 10:11 ` Daniel Vetter 2021-03-24 10:11 ` Daniel Vetter 2021-03-24 11:54 ` [PATCH] drm/i915: Pass ww ctx to pin_map, v2 Maarten Lankhorst 2021-03-24 11:54 ` [Intel-gfx] " Maarten Lankhorst 2021-03-23 15:50 ` Maarten Lankhorst [this message] 2021-03-23 15:50 ` [Intel-gfx] [PATCH v9 69/70] drm/i915: Pass ww ctx to i915_gem_object_pin_pages Maarten Lankhorst 2021-03-23 15:50 ` [PATCH v9 70/70] drm/i915: Remove asynchronous vma binding Maarten Lankhorst 2021-03-23 15:50 ` [Intel-gfx] " Maarten Lankhorst 2021-03-24 17:19 ` Daniel Vetter 2021-03-24 17:19 ` Daniel Vetter 2021-03-23 16:07 ` [Intel-gfx] ✗ Fi.CI.CHECKPATCH: warning for drm/i915: Remove obj->mm.lock! (rev18) Patchwork 2021-03-23 16:09 ` [Intel-gfx] ✗ Fi.CI.SPARSE: " Patchwork 2021-03-23 16:12 ` [Intel-gfx] ✗ Fi.CI.DOCS: " Patchwork 2021-03-23 16:35 ` [Intel-gfx] ✗ Fi.CI.BAT: failure " Patchwork 2021-03-24 12:44 ` [Intel-gfx] ✗ Fi.CI.CHECKPATCH: warning for drm/i915: Remove obj->mm.lock! (rev19) Patchwork 2021-03-24 12:46 ` [Intel-gfx] ✗ Fi.CI.SPARSE: " Patchwork 2021-03-24 12:49 ` [Intel-gfx] ✗ Fi.CI.DOCS: " Patchwork 2021-03-24 13:13 ` [Intel-gfx] ✗ Fi.CI.BAT: failure " Patchwork 2021-03-25 21:28 ` [Intel-gfx] ✗ Fi.CI.BUILD: failure for drm/i915: Remove obj->mm.lock! (rev20) Patchwork
Reply instructions: You may reply publicly to this message via plain-text email using any one of the following methods: * Save the following mbox file, import it into your mail client, and reply-to-all from there: mbox Avoid top-posting and favor interleaved quoting: https://en.wikipedia.org/wiki/Posting_style#Interleaved_style * Reply using the --to, --cc, and --in-reply-to switches of git-send-email(1): git send-email \ --in-reply-to=20210323155059.628690-70-maarten.lankhorst@linux.intel.com \ --to=maarten.lankhorst@linux.intel.com \ --cc=dri-devel@lists.freedesktop.org \ --cc=intel-gfx@lists.freedesktop.org \ /path/to/YOUR_REPLY https://kernel.org/pub/software/scm/git/docs/git-send-email.html * If your mail client supports setting the In-Reply-To header via mailto: links, try the mailto: linkBe sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes, see mirroring instructions on how to clone and mirror all data and code used by this external index.