All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH 1/7] dma-buf: fix inconsistent debug print
@ 2021-06-02 11:17 Christian König
  2021-06-02 11:17 ` [PATCH 2/7] dma-buf: add SPDX header and fix style in dma-resv.c Christian König
                   ` (6 more replies)
  0 siblings, 7 replies; 26+ messages in thread
From: Christian König @ 2021-06-02 11:17 UTC (permalink / raw)
  To: daniel, jason, dri-devel

The code tries to acquire the rcu protected fence list, but then ignores
individual fences which have been modified while holding the rcu.

Stop that madness and just note cleanly that the list was concurrently modified.

Signed-off-by: Christian König <christian.koenig@amd.com>
---
 drivers/dma-buf/dma-buf.c | 19 ++++++++-----------
 1 file changed, 8 insertions(+), 11 deletions(-)

diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c
index eadd1eaa2fb5..d3b4e370dbc1 100644
--- a/drivers/dma-buf/dma-buf.c
+++ b/drivers/dma-buf/dma-buf.c
@@ -1383,22 +1383,17 @@ static int dma_buf_debug_show(struct seq_file *s, void *unused)
 				buf_obj->name ?: "");
 
 		robj = buf_obj->resv;
-		while (true) {
-			seq = read_seqcount_begin(&robj->seq);
-			rcu_read_lock();
-			fobj = rcu_dereference(robj->fence);
-			shared_count = fobj ? fobj->shared_count : 0;
-			fence = rcu_dereference(robj->fence_excl);
-			if (!read_seqcount_retry(&robj->seq, seq))
-				break;
-			rcu_read_unlock();
-		}
-
+		seq = read_seqcount_begin(&robj->seq);
+		rcu_read_lock();
+		fence = rcu_dereference(robj->fence_excl);
 		if (fence)
 			seq_printf(s, "\tExclusive fence: %s %s %ssignalled\n",
 				   fence->ops->get_driver_name(fence),
 				   fence->ops->get_timeline_name(fence),
 				   dma_fence_is_signaled(fence) ? "" : "un");
+
+		fobj = rcu_dereference(robj->fence);
+		shared_count = fobj ? fobj->shared_count : 0;
 		for (i = 0; i < shared_count; i++) {
 			fence = rcu_dereference(fobj->shared[i]);
 			if (!dma_fence_get_rcu(fence))
@@ -1410,6 +1405,8 @@ static int dma_buf_debug_show(struct seq_file *s, void *unused)
 			dma_fence_put(fence);
 		}
 		rcu_read_unlock();
+		if (read_seqcount_retry(&robj->seq, seq))
+			seq_printf(s, "\tFences concurrently modified\n");
 
 		seq_puts(s, "\tAttached Devices:\n");
 		attach_count = 0;
-- 
2.25.1


^ permalink raw reply	[flat|nested] 26+ messages in thread

* [PATCH 2/7] dma-buf: add SPDX header and fix style in dma-resv.c
  2021-06-02 11:17 [PATCH 1/7] dma-buf: fix inconsistent debug print Christian König
@ 2021-06-02 11:17 ` Christian König
  2021-06-02 12:34   ` Daniel Vetter
  2021-06-02 11:17 ` [PATCH 3/7] dma-buf: cleanup dma-resv shared fence debugging a bit Christian König
                   ` (5 subsequent siblings)
  6 siblings, 1 reply; 26+ messages in thread
From: Christian König @ 2021-06-02 11:17 UTC (permalink / raw)
  To: daniel, jason, dri-devel

No functional change.

Signed-off-by: Christian König <christian.koenig@amd.com>
---
 drivers/dma-buf/dma-resv.c | 128 +++++++++++++++++++------------------
 1 file changed, 65 insertions(+), 63 deletions(-)

diff --git a/drivers/dma-buf/dma-resv.c b/drivers/dma-buf/dma-resv.c
index 6ddbeb5dfbf6..87f5d82d992a 100644
--- a/drivers/dma-buf/dma-resv.c
+++ b/drivers/dma-buf/dma-resv.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: MIT
 /*
  * Copyright (C) 2012-2014 Canonical Ltd (Maarten Lankhorst)
  *
@@ -92,49 +93,6 @@ static void dma_resv_list_free(struct dma_resv_list *list)
 	kfree_rcu(list, rcu);
 }
 
-#if IS_ENABLED(CONFIG_LOCKDEP)
-static int __init dma_resv_lockdep(void)
-{
-	struct mm_struct *mm = mm_alloc();
-	struct ww_acquire_ctx ctx;
-	struct dma_resv obj;
-	struct address_space mapping;
-	int ret;
-
-	if (!mm)
-		return -ENOMEM;
-
-	dma_resv_init(&obj);
-	address_space_init_once(&mapping);
-
-	mmap_read_lock(mm);
-	ww_acquire_init(&ctx, &reservation_ww_class);
-	ret = dma_resv_lock(&obj, &ctx);
-	if (ret == -EDEADLK)
-		dma_resv_lock_slow(&obj, &ctx);
-	fs_reclaim_acquire(GFP_KERNEL);
-	/* for unmap_mapping_range on trylocked buffer objects in shrinkers */
-	i_mmap_lock_write(&mapping);
-	i_mmap_unlock_write(&mapping);
-#ifdef CONFIG_MMU_NOTIFIER
-	lock_map_acquire(&__mmu_notifier_invalidate_range_start_map);
-	__dma_fence_might_wait();
-	lock_map_release(&__mmu_notifier_invalidate_range_start_map);
-#else
-	__dma_fence_might_wait();
-#endif
-	fs_reclaim_release(GFP_KERNEL);
-	ww_mutex_unlock(&obj.lock);
-	ww_acquire_fini(&ctx);
-	mmap_read_unlock(mm);
-	
-	mmput(mm);
-
-	return 0;
-}
-subsys_initcall(dma_resv_lockdep);
-#endif
-
 /**
  * dma_resv_init - initialize a reservation object
  * @obj: the reservation object
@@ -196,9 +154,7 @@ int dma_resv_reserve_shared(struct dma_resv *obj, unsigned int num_fences)
 	if (old && old->shared_max) {
 		if ((old->shared_count + num_fences) <= old->shared_max)
 			return 0;
-		else
-			max = max(old->shared_count + num_fences,
-				  old->shared_max * 2);
+		max = max(old->shared_count + num_fences, old->shared_max * 2);
 	} else {
 		max = max(4ul, roundup_pow_of_two(num_fences));
 	}
@@ -337,17 +293,17 @@ void dma_resv_add_excl_fence(struct dma_resv *obj, struct dma_fence *fence)
 EXPORT_SYMBOL(dma_resv_add_excl_fence);
 
 /**
-* dma_resv_copy_fences - Copy all fences from src to dst.
-* @dst: the destination reservation object
-* @src: the source reservation object
-*
-* Copy all fences from src to dst. dst-lock must be held.
-*/
+ * dma_resv_copy_fences - Copy all fences from src to dst.
+ * @dst: the destination reservation object
+ * @src: the source reservation object
+ *
+ * Copy all fences from src to dst. dst-lock must be held.
+ */
 int dma_resv_copy_fences(struct dma_resv *dst, struct dma_resv *src)
 {
 	struct dma_resv_list *src_list, *dst_list;
 	struct dma_fence *old, *new;
-	unsigned i;
+	unsigned int i;
 
 	dma_resv_assert_held(dst);
 
@@ -356,7 +312,7 @@ int dma_resv_copy_fences(struct dma_resv *dst, struct dma_resv *src)
 
 retry:
 	if (src_list) {
-		unsigned shared_count = src_list->shared_count;
+		unsigned int shared_count = src_list->shared_count;
 
 		rcu_read_unlock();
 
@@ -373,6 +329,7 @@ int dma_resv_copy_fences(struct dma_resv *dst, struct dma_resv *src)
 
 		dst_list->shared_count = 0;
 		for (i = 0; i < src_list->shared_count; ++i) {
+			struct dma_fence __rcu **dst;
 			struct dma_fence *fence;
 
 			fence = rcu_dereference(src_list->shared[i]);
@@ -391,7 +348,8 @@ int dma_resv_copy_fences(struct dma_resv *dst, struct dma_resv *src)
 				continue;
 			}
 
-			rcu_assign_pointer(dst_list->shared[dst_list->shared_count++], fence);
+			dst = &dst_list->shared[dst_list->shared_count++];
+			rcu_assign_pointer(*dst, fence);
 		}
 	} else {
 		dst_list = NULL;
@@ -431,7 +389,7 @@ EXPORT_SYMBOL(dma_resv_copy_fences);
  */
 int dma_resv_get_fences_rcu(struct dma_resv *obj,
 			    struct dma_fence **pfence_excl,
-			    unsigned *pshared_count,
+			    unsigned int *pshared_count,
 			    struct dma_fence ***pshared)
 {
 	struct dma_fence **shared = NULL;
@@ -533,9 +491,9 @@ long dma_resv_wait_timeout_rcu(struct dma_resv *obj,
 			       bool wait_all, bool intr,
 			       unsigned long timeout)
 {
-	struct dma_fence *fence;
-	unsigned seq, shared_count;
 	long ret = timeout ? timeout : 1;
+	unsigned int seq, shared_count;
+	struct dma_fence *fence;
 	int i;
 
 retry:
@@ -565,8 +523,9 @@ long dma_resv_wait_timeout_rcu(struct dma_resv *obj,
 			shared_count = fobj->shared_count;
 
 		for (i = 0; !fence && i < shared_count; ++i) {
-			struct dma_fence *lfence = rcu_dereference(fobj->shared[i]);
+			struct dma_fence *lfence;
 
+			lfence = rcu_dereference(fobj->shared[i]);
 			if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
 				     &lfence->flags))
 				continue;
@@ -633,7 +592,7 @@ static inline int dma_resv_test_signaled_single(struct dma_fence *passed_fence)
  */
 bool dma_resv_test_signaled_rcu(struct dma_resv *obj, bool test_all)
 {
-	unsigned seq, shared_count;
+	unsigned int seq, shared_count;
 	int ret;
 
 	rcu_read_lock();
@@ -643,16 +602,16 @@ bool dma_resv_test_signaled_rcu(struct dma_resv *obj, bool test_all)
 	seq = read_seqcount_begin(&obj->seq);
 
 	if (test_all) {
-		unsigned i;
-
 		struct dma_resv_list *fobj = rcu_dereference(obj->fence);
+		unsigned int i;
 
 		if (fobj)
 			shared_count = fobj->shared_count;
 
 		for (i = 0; i < shared_count; ++i) {
-			struct dma_fence *fence = rcu_dereference(fobj->shared[i]);
+			struct dma_fence *fence;
 
+			fence = rcu_dereference(fobj->shared[i]);
 			ret = dma_resv_test_signaled_single(fence);
 			if (ret < 0)
 				goto retry;
@@ -681,3 +640,46 @@ bool dma_resv_test_signaled_rcu(struct dma_resv *obj, bool test_all)
 	return ret;
 }
 EXPORT_SYMBOL_GPL(dma_resv_test_signaled_rcu);
+
+#if IS_ENABLED(CONFIG_LOCKDEP)
+static int __init dma_resv_lockdep(void)
+{
+	struct mm_struct *mm = mm_alloc();
+	struct ww_acquire_ctx ctx;
+	struct dma_resv obj;
+	struct address_space mapping;
+	int ret;
+
+	if (!mm)
+		return -ENOMEM;
+
+	dma_resv_init(&obj);
+	address_space_init_once(&mapping);
+
+	mmap_read_lock(mm);
+	ww_acquire_init(&ctx, &reservation_ww_class);
+	ret = dma_resv_lock(&obj, &ctx);
+	if (ret == -EDEADLK)
+		dma_resv_lock_slow(&obj, &ctx);
+	fs_reclaim_acquire(GFP_KERNEL);
+	/* for unmap_mapping_range on trylocked buffer objects in shrinkers */
+	i_mmap_lock_write(&mapping);
+	i_mmap_unlock_write(&mapping);
+#ifdef CONFIG_MMU_NOTIFIER
+	lock_map_acquire(&__mmu_notifier_invalidate_range_start_map);
+	__dma_fence_might_wait();
+	lock_map_release(&__mmu_notifier_invalidate_range_start_map);
+#else
+	__dma_fence_might_wait();
+#endif
+	fs_reclaim_release(GFP_KERNEL);
+	ww_mutex_unlock(&obj.lock);
+	ww_acquire_fini(&ctx);
+	mmap_read_unlock(mm);
+
+	mmput(mm);
+
+	return 0;
+}
+subsys_initcall(dma_resv_lockdep);
+#endif
-- 
2.25.1


^ permalink raw reply	[flat|nested] 26+ messages in thread

* [PATCH 3/7] dma-buf: cleanup dma-resv shared fence debugging a bit
  2021-06-02 11:17 [PATCH 1/7] dma-buf: fix inconsistent debug print Christian König
  2021-06-02 11:17 ` [PATCH 2/7] dma-buf: add SPDX header and fix style in dma-resv.c Christian König
@ 2021-06-02 11:17 ` Christian König
  2021-06-02 12:41   ` Daniel Vetter
  2021-06-02 11:17 ` [PATCH 4/7] dma-buf: rename and cleanup dma_resv_get_excl Christian König
                   ` (4 subsequent siblings)
  6 siblings, 1 reply; 26+ messages in thread
From: Christian König @ 2021-06-02 11:17 UTC (permalink / raw)
  To: daniel, jason, dri-devel

Make that a function instead of inline.

Signed-off-by: Christian König <christian.koenig@amd.com>
---
 drivers/dma-buf/dma-resv.c | 18 ++++++++++++++++++
 include/linux/dma-resv.h   | 15 +++++++--------
 2 files changed, 25 insertions(+), 8 deletions(-)

diff --git a/drivers/dma-buf/dma-resv.c b/drivers/dma-buf/dma-resv.c
index 87f5d82d992a..6c6195315e9f 100644
--- a/drivers/dma-buf/dma-resv.c
+++ b/drivers/dma-buf/dma-resv.c
@@ -208,6 +208,24 @@ int dma_resv_reserve_shared(struct dma_resv *obj, unsigned int num_fences)
 }
 EXPORT_SYMBOL(dma_resv_reserve_shared);
 
+#ifdef CONFIG_DEBUG_MUTEXES
+/**
+ * dma_resv_reset_shared_max - reset shared fences for debugging
+ * @obj: the dma_resv object to reset
+ *
+ * Reset the shared_max to test if drivers do correct slot allocation.
+ */
+void dma_resv_reset_shared_max(struct dma_resv *obj)
+{
+	/* Test shared fence slot reservation */
+	if (rcu_access_pointer(obj->fence)) {
+		struct dma_resv_list *fence = dma_resv_get_list(obj);
+
+		fence->shared_max = fence->shared_count;
+	}
+}
+#endif
+
 /**
  * dma_resv_add_shared_fence - Add a fence to a shared slot
  * @obj: the reservation object
diff --git a/include/linux/dma-resv.h b/include/linux/dma-resv.h
index d44a77e8a7e3..f32a3d176513 100644
--- a/include/linux/dma-resv.h
+++ b/include/linux/dma-resv.h
@@ -92,6 +92,12 @@ static inline struct dma_resv_list *dma_resv_get_list(struct dma_resv *obj)
 					 dma_resv_held(obj));
 }
 
+#ifdef CONFIG_DEBUG_MUTEXES
+void dma_resv_reset_shared_max(struct dma_resv *obj);
+#else
+static inline void dma_resv_reset_shared_max(struct dma_resv *obj) {}
+#endif
+
 /**
  * dma_resv_lock - lock the reservation object
  * @obj: the reservation object
@@ -215,14 +221,7 @@ static inline struct ww_acquire_ctx *dma_resv_locking_ctx(struct dma_resv *obj)
  */
 static inline void dma_resv_unlock(struct dma_resv *obj)
 {
-#ifdef CONFIG_DEBUG_MUTEXES
-	/* Test shared fence slot reservation */
-	if (rcu_access_pointer(obj->fence)) {
-		struct dma_resv_list *fence = dma_resv_get_list(obj);
-
-		fence->shared_max = fence->shared_count;
-	}
-#endif
+	dma_resv_reset_shared_max(obj);
 	ww_mutex_unlock(&obj->lock);
 }
 
-- 
2.25.1


^ permalink raw reply	[flat|nested] 26+ messages in thread

* [PATCH 4/7] dma-buf: rename and cleanup dma_resv_get_excl
  2021-06-02 11:17 [PATCH 1/7] dma-buf: fix inconsistent debug print Christian König
  2021-06-02 11:17 ` [PATCH 2/7] dma-buf: add SPDX header and fix style in dma-resv.c Christian König
  2021-06-02 11:17 ` [PATCH 3/7] dma-buf: cleanup dma-resv shared fence debugging a bit Christian König
@ 2021-06-02 11:17 ` Christian König
  2021-06-02 12:43   ` Daniel Vetter
  2021-06-02 12:46   ` Daniel Vetter
  2021-06-02 11:17 ` [PATCH 5/7] dma-buf: rename and cleanup dma_resv_get_list Christian König
                   ` (3 subsequent siblings)
  6 siblings, 2 replies; 26+ messages in thread
From: Christian König @ 2021-06-02 11:17 UTC (permalink / raw)
  To: daniel, jason, dri-devel

When the comment needs to state explicitly that this
doesn't get a reference to the object then the function
is named rather badly.

Rename the function and use rcu_dereference_check(), this
way it can be used from both rcu as well as lock protected
critical sections.

Signed-off-by: Christian König <christian.koenig@amd.com>
---
 drivers/dma-buf/dma-buf.c                |  4 ++--
 drivers/dma-buf/dma-resv.c               | 10 +++++-----
 drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c  |  2 +-
 drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c |  2 +-
 drivers/gpu/drm/etnaviv/etnaviv_gem.c    |  2 +-
 drivers/gpu/drm/i915/gem/i915_gem_busy.c |  3 +--
 drivers/gpu/drm/msm/msm_gem.c            |  4 ++--
 drivers/gpu/drm/nouveau/nouveau_bo.c     |  2 +-
 drivers/gpu/drm/nouveau/nouveau_fence.c  |  2 +-
 drivers/gpu/drm/radeon/radeon_display.c  |  2 +-
 drivers/gpu/drm/radeon/radeon_sync.c     |  2 +-
 drivers/gpu/drm/radeon/radeon_uvd.c      |  2 +-
 drivers/gpu/drm/ttm/ttm_bo.c             |  2 +-
 drivers/gpu/drm/vmwgfx/vmwgfx_resource.c |  2 +-
 include/linux/dma-resv.h                 | 13 +++++--------
 15 files changed, 25 insertions(+), 29 deletions(-)

diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c
index d3b4e370dbc1..4d0ddc712f1e 100644
--- a/drivers/dma-buf/dma-buf.c
+++ b/drivers/dma-buf/dma-buf.c
@@ -234,7 +234,7 @@ static __poll_t dma_buf_poll(struct file *file, poll_table *poll)
 		shared_count = fobj->shared_count;
 	else
 		shared_count = 0;
-	fence_excl = rcu_dereference(resv->fence_excl);
+	fence_excl = dma_resv_exclusive(resv);
 	if (read_seqcount_retry(&resv->seq, seq)) {
 		rcu_read_unlock();
 		goto retry;
@@ -1385,7 +1385,7 @@ static int dma_buf_debug_show(struct seq_file *s, void *unused)
 		robj = buf_obj->resv;
 		seq = read_seqcount_begin(&robj->seq);
 		rcu_read_lock();
-		fence = rcu_dereference(robj->fence_excl);
+		fence = dma_resv_exclusive(robj);
 		if (fence)
 			seq_printf(s, "\tExclusive fence: %s %s %ssignalled\n",
 				   fence->ops->get_driver_name(fence),
diff --git a/drivers/dma-buf/dma-resv.c b/drivers/dma-buf/dma-resv.c
index 6c6195315e9f..81b032b43457 100644
--- a/drivers/dma-buf/dma-resv.c
+++ b/drivers/dma-buf/dma-resv.c
@@ -281,7 +281,7 @@ EXPORT_SYMBOL(dma_resv_add_shared_fence);
  */
 void dma_resv_add_excl_fence(struct dma_resv *obj, struct dma_fence *fence)
 {
-	struct dma_fence *old_fence = dma_resv_get_excl(obj);
+	struct dma_fence *old_fence = dma_resv_exclusive(obj);
 	struct dma_resv_list *old;
 	u32 i = 0;
 
@@ -377,7 +377,7 @@ int dma_resv_copy_fences(struct dma_resv *dst, struct dma_resv *src)
 	rcu_read_unlock();
 
 	src_list = dma_resv_get_list(dst);
-	old = dma_resv_get_excl(dst);
+	old = dma_resv_exclusive(dst);
 
 	write_seqcount_begin(&dst->seq);
 	/* write_seqcount_begin provides the necessary memory barrier */
@@ -425,7 +425,7 @@ int dma_resv_get_fences_rcu(struct dma_resv *obj,
 		rcu_read_lock();
 		seq = read_seqcount_begin(&obj->seq);
 
-		fence_excl = rcu_dereference(obj->fence_excl);
+		fence_excl = dma_resv_exclusive(obj);
 		if (fence_excl && !dma_fence_get_rcu(fence_excl))
 			goto unlock;
 
@@ -520,7 +520,7 @@ long dma_resv_wait_timeout_rcu(struct dma_resv *obj,
 	rcu_read_lock();
 	i = -1;
 
-	fence = rcu_dereference(obj->fence_excl);
+	fence = dma_resv_exclusive(obj);
 	if (fence && !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
 		if (!dma_fence_get_rcu(fence))
 			goto unlock_retry;
@@ -642,7 +642,7 @@ bool dma_resv_test_signaled_rcu(struct dma_resv *obj, bool test_all)
 	}
 
 	if (!shared_count) {
-		struct dma_fence *fence_excl = rcu_dereference(obj->fence_excl);
+		struct dma_fence *fence_excl = dma_resv_exclusive(obj);
 
 		if (fence_excl) {
 			ret = dma_resv_test_signaled_single(fence_excl);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
index 73c76a3e2b12..cd5146fa6fb6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
@@ -226,7 +226,7 @@ static void amdgpu_gem_object_close(struct drm_gem_object *obj,
 	if (!amdgpu_vm_ready(vm))
 		goto out_unlock;
 
-	fence = dma_resv_get_excl(bo->tbo.base.resv);
+	fence = dma_resv_exclusive(bo->tbo.base.resv);
 	if (fence) {
 		amdgpu_bo_fence(bo, fence, true);
 		fence = NULL;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
index 4e558632a5d2..c84d5b843985 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
@@ -210,7 +210,7 @@ int amdgpu_sync_resv(struct amdgpu_device *adev, struct amdgpu_sync *sync,
 		return -EINVAL;
 
 	/* always sync to the exclusive fence */
-	f = dma_resv_get_excl(resv);
+	f = dma_resv_exclusive(resv);
 	r = amdgpu_sync_fence(sync, f);
 
 	flist = dma_resv_get_list(resv);
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.c b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
index db69f19ab5bc..d4f54dea8ac1 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
@@ -471,7 +471,7 @@ static void etnaviv_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
 		}
 	}
 
-	fence = rcu_dereference(robj->fence_excl);
+	fence = dma_resv_exclusive(robj);
 	if (fence)
 		etnaviv_gem_describe_fence(fence, "Exclusive", m);
 	rcu_read_unlock();
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_busy.c b/drivers/gpu/drm/i915/gem/i915_gem_busy.c
index 25235ef630c1..02312a0c3a36 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_busy.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_busy.c
@@ -113,8 +113,7 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
 	seq = raw_read_seqcount(&obj->base.resv->seq);
 
 	/* Translate the exclusive fence to the READ *and* WRITE engine */
-	args->busy =
-		busy_check_writer(rcu_dereference(obj->base.resv->fence_excl));
+	args->busy = busy_check_writer(dma_resv_exclusive(obj->base.resv));
 
 	/* Translate shared fences to READ set of engines */
 	list = rcu_dereference(obj->base.resv->fence);
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index 56df86e5f740..54c1b53426d6 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -819,7 +819,7 @@ int msm_gem_sync_object(struct drm_gem_object *obj,
 
 	fobj = dma_resv_get_list(obj->resv);
 	if (!fobj || (fobj->shared_count == 0)) {
-		fence = dma_resv_get_excl(obj->resv);
+		fence = dma_resv_exclusive(obj->resv);
 		/* don't need to wait on our own fences, since ring is fifo */
 		if (fence && (fence->context != fctx->context)) {
 			ret = dma_fence_wait(fence, true);
@@ -1035,7 +1035,7 @@ void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m,
 		}
 	}
 
-	fence = rcu_dereference(robj->fence_excl);
+	fence = dma_resv_exclusive(robj);
 	if (fence)
 		describe_fence(fence, "Exclusive", m);
 	rcu_read_unlock();
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index e688ca77483d..ac0ebcc4ebb7 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -955,7 +955,7 @@ nouveau_bo_vm_cleanup(struct ttm_buffer_object *bo,
 {
 	struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
 	struct drm_device *dev = drm->dev;
-	struct dma_fence *fence = dma_resv_get_excl(bo->base.resv);
+	struct dma_fence *fence = dma_resv_exclusive(bo->base.resv);
 
 	nv10_bo_put_tile_region(dev, *old_tile, fence);
 	*old_tile = new_tile;
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c
index e5dcbf67de7e..a6cb35181aee 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fence.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.c
@@ -356,7 +356,7 @@ nouveau_fence_sync(struct nouveau_bo *nvbo, struct nouveau_channel *chan, bool e
 	}
 
 	fobj = dma_resv_get_list(resv);
-	fence = dma_resv_get_excl(resv);
+	fence = dma_resv_exclusive(resv);
 
 	if (fence && (!exclusive || !fobj || !fobj->shared_count)) {
 		struct nouveau_channel *prev = NULL;
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index 652af7a134bd..57c910e5ae77 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -533,7 +533,7 @@ static int radeon_crtc_page_flip_target(struct drm_crtc *crtc,
 		DRM_ERROR("failed to pin new rbo buffer before flip\n");
 		goto cleanup;
 	}
-	work->fence = dma_fence_get(dma_resv_get_excl(new_rbo->tbo.base.resv));
+	work->fence = dma_fence_get(dma_resv_exclusive(new_rbo->tbo.base.resv));
 	radeon_bo_get_tiling_flags(new_rbo, &tiling_flags, NULL);
 	radeon_bo_unreserve(new_rbo);
 
diff --git a/drivers/gpu/drm/radeon/radeon_sync.c b/drivers/gpu/drm/radeon/radeon_sync.c
index 5d3302945076..e476f90ef1c1 100644
--- a/drivers/gpu/drm/radeon/radeon_sync.c
+++ b/drivers/gpu/drm/radeon/radeon_sync.c
@@ -98,7 +98,7 @@ int radeon_sync_resv(struct radeon_device *rdev,
 	int r = 0;
 
 	/* always sync to the exclusive fence */
-	f = dma_resv_get_excl(resv);
+	f = dma_resv_exclusive(resv);
 	fence = f ? to_radeon_fence(f) : NULL;
 	if (fence && fence->rdev == rdev)
 		radeon_sync_fence(sync, fence);
diff --git a/drivers/gpu/drm/radeon/radeon_uvd.c b/drivers/gpu/drm/radeon/radeon_uvd.c
index dfa9fdbe98da..02d4bbdc9111 100644
--- a/drivers/gpu/drm/radeon/radeon_uvd.c
+++ b/drivers/gpu/drm/radeon/radeon_uvd.c
@@ -477,7 +477,7 @@ static int radeon_uvd_cs_msg(struct radeon_cs_parser *p, struct radeon_bo *bo,
 		return -EINVAL;
 	}
 
-	f = dma_resv_get_excl(bo->tbo.base.resv);
+	f = dma_resv_exclusive(bo->tbo.base.resv);
 	if (f) {
 		r = radeon_fence_wait((struct radeon_fence *)f, false);
 		if (r) {
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 5a7ab4b35b2d..92361556bf0b 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -262,7 +262,7 @@ static void ttm_bo_flush_all_fences(struct ttm_buffer_object *bo)
 
 	rcu_read_lock();
 	fobj = rcu_dereference(resv->fence);
-	fence = rcu_dereference(resv->fence_excl);
+	fence = dma_resv_exclusive(resv);
 	if (fence && !fence->ops->signaled)
 		dma_fence_enable_sw_signaling(fence);
 
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
index 62ea920addc3..c78f38ee1c20 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
@@ -1166,7 +1166,7 @@ int vmw_resources_clean(struct vmw_buffer_object *vbo, pgoff_t start,
 		if (bo->moving)
 			dma_fence_put(bo->moving);
 		bo->moving = dma_fence_get
-			(dma_resv_get_excl(bo->base.resv));
+			(dma_resv_exclusive(bo->base.resv));
 	}
 
 	return 0;
diff --git a/include/linux/dma-resv.h b/include/linux/dma-resv.h
index f32a3d176513..7549ec5eb35c 100644
--- a/include/linux/dma-resv.h
+++ b/include/linux/dma-resv.h
@@ -226,22 +226,19 @@ static inline void dma_resv_unlock(struct dma_resv *obj)
 }
 
 /**
- * dma_resv_get_excl - get the reservation object's
- * exclusive fence, with update-side lock held
+ * dma_resv_exclusive - return the object's exclusive fence
  * @obj: the reservation object
  *
- * Returns the exclusive fence (if any).  Does NOT take a
- * reference. Writers must hold obj->lock, readers may only
- * hold a RCU read side lock.
+ * Returns the exclusive fence (if any). Caller must either hold the objects
+ * lock or the rcu read side lock.
  *
  * RETURNS
  * The exclusive fence or NULL
  */
 static inline struct dma_fence *
-dma_resv_get_excl(struct dma_resv *obj)
+dma_resv_exclusive(struct dma_resv *obj)
 {
-	return rcu_dereference_protected(obj->fence_excl,
-					 dma_resv_held(obj));
+	return rcu_dereference_check(obj->fence_excl, dma_resv_held(obj));
 }
 
 /**
-- 
2.25.1


^ permalink raw reply	[flat|nested] 26+ messages in thread

* [PATCH 5/7] dma-buf: rename and cleanup dma_resv_get_list
  2021-06-02 11:17 [PATCH 1/7] dma-buf: fix inconsistent debug print Christian König
                   ` (2 preceding siblings ...)
  2021-06-02 11:17 ` [PATCH 4/7] dma-buf: rename and cleanup dma_resv_get_excl Christian König
@ 2021-06-02 11:17 ` Christian König
  2021-06-02 12:46   ` Daniel Vetter
  2021-06-02 20:22   ` Jason Ekstrand
  2021-06-02 11:17 ` [PATCH 6/7] dma-buf: rename dma_resv_get_excl_rcu to _unlocked Christian König
                   ` (2 subsequent siblings)
  6 siblings, 2 replies; 26+ messages in thread
From: Christian König @ 2021-06-02 11:17 UTC (permalink / raw)
  To: daniel, jason, dri-devel

When the comment needs to state explicitly that this is doesn't get a reference
to the object then the function is named rather badly.

Rename the function and use it in even more places.

Signed-off-by: Christian König <christian.koenig@amd.com>
---
 drivers/dma-buf/dma-resv.c                    | 32 +++++++++----------
 .../gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c  |  2 +-
 drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c   |  2 +-
 drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c      |  2 +-
 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c       |  2 +-
 drivers/gpu/drm/etnaviv/etnaviv_gem.c         |  2 +-
 drivers/gpu/drm/i915/gem/i915_gem_busy.c      |  2 +-
 drivers/gpu/drm/msm/msm_gem.c                 |  4 +--
 drivers/gpu/drm/nouveau/nouveau_fence.c       |  2 +-
 drivers/gpu/drm/qxl/qxl_debugfs.c             |  2 +-
 drivers/gpu/drm/radeon/radeon_sync.c          |  2 +-
 drivers/gpu/drm/ttm/ttm_bo.c                  |  2 +-
 include/linux/dma-resv.h                      | 25 +++++++--------
 13 files changed, 39 insertions(+), 42 deletions(-)

diff --git a/drivers/dma-buf/dma-resv.c b/drivers/dma-buf/dma-resv.c
index 81b032b43457..b1a1a31dc009 100644
--- a/drivers/dma-buf/dma-resv.c
+++ b/drivers/dma-buf/dma-resv.c
@@ -149,8 +149,7 @@ int dma_resv_reserve_shared(struct dma_resv *obj, unsigned int num_fences)
 
 	dma_resv_assert_held(obj);
 
-	old = dma_resv_get_list(obj);
-
+	old = dma_resv_shared(obj);
 	if (old && old->shared_max) {
 		if ((old->shared_count + num_fences) <= old->shared_max)
 			return 0;
@@ -217,12 +216,13 @@ EXPORT_SYMBOL(dma_resv_reserve_shared);
  */
 void dma_resv_reset_shared_max(struct dma_resv *obj)
 {
-	/* Test shared fence slot reservation */
-	if (rcu_access_pointer(obj->fence)) {
-		struct dma_resv_list *fence = dma_resv_get_list(obj);
+	struct dma_resv_list *fences = dma_resv_shared(obj);
 
-		fence->shared_max = fence->shared_count;
-	}
+	dma_resv_assert_held(obj);
+
+	/* Test shared fence slot reservation */
+	if (fences)
+		fences->shared_max = fences->shared_count;
 }
 #endif
 
@@ -244,7 +244,7 @@ void dma_resv_add_shared_fence(struct dma_resv *obj, struct dma_fence *fence)
 
 	dma_resv_assert_held(obj);
 
-	fobj = dma_resv_get_list(obj);
+	fobj = dma_resv_shared(obj);
 	count = fobj->shared_count;
 
 	write_seqcount_begin(&obj->seq);
@@ -287,7 +287,7 @@ void dma_resv_add_excl_fence(struct dma_resv *obj, struct dma_fence *fence)
 
 	dma_resv_assert_held(obj);
 
-	old = dma_resv_get_list(obj);
+	old = dma_resv_shared(obj);
 	if (old)
 		i = old->shared_count;
 
@@ -326,7 +326,7 @@ int dma_resv_copy_fences(struct dma_resv *dst, struct dma_resv *src)
 	dma_resv_assert_held(dst);
 
 	rcu_read_lock();
-	src_list = rcu_dereference(src->fence);
+	src_list = dma_resv_shared(src);
 
 retry:
 	if (src_list) {
@@ -339,7 +339,7 @@ int dma_resv_copy_fences(struct dma_resv *dst, struct dma_resv *src)
 			return -ENOMEM;
 
 		rcu_read_lock();
-		src_list = rcu_dereference(src->fence);
+		src_list = dma_resv_shared(src);
 		if (!src_list || src_list->shared_count > shared_count) {
 			kfree(dst_list);
 			goto retry;
@@ -357,7 +357,7 @@ int dma_resv_copy_fences(struct dma_resv *dst, struct dma_resv *src)
 
 			if (!dma_fence_get_rcu(fence)) {
 				dma_resv_list_free(dst_list);
-				src_list = rcu_dereference(src->fence);
+				src_list = dma_resv_shared(src);
 				goto retry;
 			}
 
@@ -376,7 +376,7 @@ int dma_resv_copy_fences(struct dma_resv *dst, struct dma_resv *src)
 	new = dma_fence_get_rcu_safe(&src->fence_excl);
 	rcu_read_unlock();
 
-	src_list = dma_resv_get_list(dst);
+	src_list = dma_resv_shared(dst);
 	old = dma_resv_exclusive(dst);
 
 	write_seqcount_begin(&dst->seq);
@@ -429,7 +429,7 @@ int dma_resv_get_fences_rcu(struct dma_resv *obj,
 		if (fence_excl && !dma_fence_get_rcu(fence_excl))
 			goto unlock;
 
-		fobj = rcu_dereference(obj->fence);
+		fobj = dma_resv_shared(obj);
 		if (fobj)
 			sz += sizeof(*shared) * fobj->shared_max;
 
@@ -535,7 +535,7 @@ long dma_resv_wait_timeout_rcu(struct dma_resv *obj,
 	}
 
 	if (wait_all) {
-		struct dma_resv_list *fobj = rcu_dereference(obj->fence);
+		struct dma_resv_list *fobj = dma_resv_shared(obj);
 
 		if (fobj)
 			shared_count = fobj->shared_count;
@@ -620,7 +620,7 @@ bool dma_resv_test_signaled_rcu(struct dma_resv *obj, bool test_all)
 	seq = read_seqcount_begin(&obj->seq);
 
 	if (test_all) {
-		struct dma_resv_list *fobj = rcu_dereference(obj->fence);
+		struct dma_resv_list *fobj = dma_resv_shared(obj);
 		unsigned int i;
 
 		if (fobj)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
index d5e6519bdea1..e90495ca49fd 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
@@ -247,7 +247,7 @@ static int amdgpu_amdkfd_remove_eviction_fence(struct amdgpu_bo *bo,
 	if (!ef)
 		return -EINVAL;
 
-	old = dma_resv_get_list(resv);
+	old = dma_resv_shared(resv);
 	if (!old)
 		return 0;
 
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
index 6dd0ea6e9e24..3b13c8a38c4e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
@@ -49,7 +49,7 @@ __dma_resv_make_exclusive(struct dma_resv *obj)
 	unsigned int count;
 	int r;
 
-	if (!dma_resv_get_list(obj)) /* no shared fences to convert */
+	if (!dma_resv_shared(obj)) /* no shared fences to convert */
 		return 0;
 
 	r = dma_resv_get_fences_rcu(obj, NULL, &count, &fences);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
index c84d5b843985..c50d9f92a0cd 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
@@ -213,7 +213,7 @@ int amdgpu_sync_resv(struct amdgpu_device *adev, struct amdgpu_sync *sync,
 	f = dma_resv_exclusive(resv);
 	r = amdgpu_sync_fence(sync, f);
 
-	flist = dma_resv_get_list(resv);
+	flist = dma_resv_shared(resv);
 	if (!flist || r)
 		return r;
 
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index 663aa7d2e2ea..ddb6ce7d48bc 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -1338,7 +1338,7 @@ static bool amdgpu_ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
 	 * If true, then return false as any KFD process needs all its BOs to
 	 * be resident to run successfully
 	 */
-	flist = dma_resv_get_list(bo->base.resv);
+	flist = dma_resv_shared(bo->base.resv);
 	if (flist) {
 		for (i = 0; i < flist->shared_count; ++i) {
 			f = rcu_dereference_protected(flist->shared[i],
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.c b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
index d4f54dea8ac1..4d43b8630f0e 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
@@ -461,7 +461,7 @@ static void etnaviv_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
 			off, etnaviv_obj->vaddr, obj->size);
 
 	rcu_read_lock();
-	fobj = rcu_dereference(robj->fence);
+	fobj = dma_resv_shared(robj);
 	if (fobj) {
 		unsigned int i, shared_count = fobj->shared_count;
 
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_busy.c b/drivers/gpu/drm/i915/gem/i915_gem_busy.c
index 02312a0c3a36..3f94becac541 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_busy.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_busy.c
@@ -116,7 +116,7 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
 	args->busy = busy_check_writer(dma_resv_exclusive(obj->base.resv));
 
 	/* Translate shared fences to READ set of engines */
-	list = rcu_dereference(obj->base.resv->fence);
+	list = dma_resv_shared(obj->base.resv);
 	if (list) {
 		unsigned int shared_count = list->shared_count, i;
 
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index 54c1b53426d6..43af91df552e 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -817,7 +817,7 @@ int msm_gem_sync_object(struct drm_gem_object *obj,
 	struct dma_fence *fence;
 	int i, ret;
 
-	fobj = dma_resv_get_list(obj->resv);
+	fobj = dma_resv_shared(obj->resv);
 	if (!fobj || (fobj->shared_count == 0)) {
 		fence = dma_resv_exclusive(obj->resv);
 		/* don't need to wait on our own fences, since ring is fifo */
@@ -1025,7 +1025,7 @@ void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m,
 	}
 
 	rcu_read_lock();
-	fobj = rcu_dereference(robj->fence);
+	fobj = dma_resv_shared(robj);
 	if (fobj) {
 		unsigned int i, shared_count = fobj->shared_count;
 
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c
index a6cb35181aee..5ce441c655ea 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fence.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.c
@@ -355,7 +355,7 @@ nouveau_fence_sync(struct nouveau_bo *nvbo, struct nouveau_channel *chan, bool e
 			return ret;
 	}
 
-	fobj = dma_resv_get_list(resv);
+	fobj = dma_resv_shared(resv);
 	fence = dma_resv_exclusive(resv);
 
 	if (fence && (!exclusive || !fobj || !fobj->shared_count)) {
diff --git a/drivers/gpu/drm/qxl/qxl_debugfs.c b/drivers/gpu/drm/qxl/qxl_debugfs.c
index 183d15e2cf58..0acc70a6d3dd 100644
--- a/drivers/gpu/drm/qxl/qxl_debugfs.c
+++ b/drivers/gpu/drm/qxl/qxl_debugfs.c
@@ -61,7 +61,7 @@ qxl_debugfs_buffers_info(struct seq_file *m, void *data)
 		int rel;
 
 		rcu_read_lock();
-		fobj = rcu_dereference(bo->tbo.base.resv->fence);
+		fobj = dma_resv_shared(bo->tbo.base.resv);
 		rel = fobj ? fobj->shared_count : 0;
 		rcu_read_unlock();
 
diff --git a/drivers/gpu/drm/radeon/radeon_sync.c b/drivers/gpu/drm/radeon/radeon_sync.c
index e476f90ef1c1..a9cdb88da173 100644
--- a/drivers/gpu/drm/radeon/radeon_sync.c
+++ b/drivers/gpu/drm/radeon/radeon_sync.c
@@ -105,7 +105,7 @@ int radeon_sync_resv(struct radeon_device *rdev,
 	else if (f)
 		r = dma_fence_wait(f, true);
 
-	flist = dma_resv_get_list(resv);
+	flist = dma_resv_shared(resv);
 	if (shared || !flist || r)
 		return r;
 
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 92361556bf0b..c41ef0caa492 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -261,7 +261,7 @@ static void ttm_bo_flush_all_fences(struct ttm_buffer_object *bo)
 	int i;
 
 	rcu_read_lock();
-	fobj = rcu_dereference(resv->fence);
+	fobj = dma_resv_shared(resv);
 	fence = dma_resv_exclusive(resv);
 	if (fence && !fence->ops->signaled)
 		dma_fence_enable_sw_signaling(fence);
diff --git a/include/linux/dma-resv.h b/include/linux/dma-resv.h
index 7549ec5eb35c..98ac66fecb71 100644
--- a/include/linux/dma-resv.h
+++ b/include/linux/dma-resv.h
@@ -78,20 +78,6 @@ struct dma_resv {
 #define dma_resv_held(obj) lockdep_is_held(&(obj)->lock.base)
 #define dma_resv_assert_held(obj) lockdep_assert_held(&(obj)->lock.base)
 
-/**
- * dma_resv_get_list - get the reservation object's
- * shared fence list, with update-side lock held
- * @obj: the reservation object
- *
- * Returns the shared fence list.  Does NOT take references to
- * the fence.  The obj->lock must be held.
- */
-static inline struct dma_resv_list *dma_resv_get_list(struct dma_resv *obj)
-{
-	return rcu_dereference_protected(obj->fence,
-					 dma_resv_held(obj));
-}
-
 #ifdef CONFIG_DEBUG_MUTEXES
 void dma_resv_reset_shared_max(struct dma_resv *obj);
 #else
@@ -267,6 +253,17 @@ dma_resv_get_excl_rcu(struct dma_resv *obj)
 	return fence;
 }
 
+/**
+ * dma_resv_shared - get the reservation object's shared fence list
+ * @obj: the reservation object
+ *
+ * Returns the shared fence list. The obj->lock or rcu read side must be held.
+ */
+static inline struct dma_resv_list *dma_resv_shared(struct dma_resv *obj)
+{
+	return rcu_dereference_check(obj->fence, dma_resv_held(obj));
+}
+
 void dma_resv_init(struct dma_resv *obj);
 void dma_resv_fini(struct dma_resv *obj);
 int dma_resv_reserve_shared(struct dma_resv *obj, unsigned int num_fences);
-- 
2.25.1


^ permalink raw reply	[flat|nested] 26+ messages in thread

* [PATCH 6/7] dma-buf: rename dma_resv_get_excl_rcu to _unlocked
  2021-06-02 11:17 [PATCH 1/7] dma-buf: fix inconsistent debug print Christian König
                   ` (3 preceding siblings ...)
  2021-06-02 11:17 ` [PATCH 5/7] dma-buf: rename and cleanup dma_resv_get_list Christian König
@ 2021-06-02 11:17 ` Christian König
  2021-06-02 12:47   ` Daniel Vetter
  2021-06-02 20:25   ` Jason Ekstrand
  2021-06-02 11:17 ` [PATCH 7/7] dma-buf: drop the _rcu postfix on function names Christian König
  2021-06-02 12:33 ` [PATCH 1/7] dma-buf: fix inconsistent debug print Daniel Vetter
  6 siblings, 2 replies; 26+ messages in thread
From: Christian König @ 2021-06-02 11:17 UTC (permalink / raw)
  To: daniel, jason, dri-devel

That describes much better what the function is doing here.

Signed-off-by: Christian König <christian.koenig@amd.com>
---
 drivers/gpu/drm/drm_gem.c                    | 2 +-
 drivers/gpu/drm/drm_gem_atomic_helper.c      | 2 +-
 drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c | 2 +-
 drivers/gpu/drm/i915/display/intel_display.c | 2 +-
 drivers/gpu/drm/i915/gem/i915_gem_object.h   | 2 +-
 drivers/gpu/drm/i915/gem/i915_gem_wait.c     | 4 ++--
 drivers/gpu/drm/i915/i915_request.c          | 2 +-
 drivers/gpu/drm/i915/i915_sw_fence.c         | 2 +-
 drivers/gpu/drm/nouveau/dispnv50/wndw.c      | 2 +-
 drivers/gpu/drm/panfrost/panfrost_job.c      | 2 +-
 include/linux/dma-resv.h                     | 4 ++--
 11 files changed, 13 insertions(+), 13 deletions(-)

diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index 9989425e9875..263b4fb03303 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -1375,7 +1375,7 @@ int drm_gem_fence_array_add_implicit(struct xarray *fence_array,
 
 	if (!write) {
 		struct dma_fence *fence =
-			dma_resv_get_excl_rcu(obj->resv);
+			dma_resv_get_excl_unlocked(obj->resv);
 
 		return drm_gem_fence_array_add(fence_array, fence);
 	}
diff --git a/drivers/gpu/drm/drm_gem_atomic_helper.c b/drivers/gpu/drm/drm_gem_atomic_helper.c
index a005c5a0ba46..a27135084ae5 100644
--- a/drivers/gpu/drm/drm_gem_atomic_helper.c
+++ b/drivers/gpu/drm/drm_gem_atomic_helper.c
@@ -147,7 +147,7 @@ int drm_gem_plane_helper_prepare_fb(struct drm_plane *plane, struct drm_plane_st
 		return 0;
 
 	obj = drm_gem_fb_get_obj(state->fb, 0);
-	fence = dma_resv_get_excl_rcu(obj->resv);
+	fence = dma_resv_get_excl_unlocked(obj->resv);
 	drm_atomic_set_fence_for_plane(state, fence);
 
 	return 0;
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
index d05c35994579..c942d2a8c252 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
@@ -195,7 +195,7 @@ static int submit_fence_sync(struct etnaviv_gem_submit *submit)
 			if (ret)
 				return ret;
 		} else {
-			bo->excl = dma_resv_get_excl_rcu(robj);
+			bo->excl = dma_resv_get_excl_unlocked(robj);
 		}
 
 	}
diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c
index 384ff0bb6e19..f17c5f54feb6 100644
--- a/drivers/gpu/drm/i915/display/intel_display.c
+++ b/drivers/gpu/drm/i915/display/intel_display.c
@@ -11040,7 +11040,7 @@ intel_prepare_plane_fb(struct drm_plane *_plane,
 		if (ret < 0)
 			goto unpin_fb;
 
-		fence = dma_resv_get_excl_rcu(obj->base.resv);
+		fence = dma_resv_get_excl_unlocked(obj->base.resv);
 		if (fence) {
 			add_rps_boost_after_vblank(new_plane_state->hw.crtc,
 						   fence);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.h b/drivers/gpu/drm/i915/gem/i915_gem_object.h
index 2ebd79537aea..7c0eb425cb3b 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object.h
@@ -500,7 +500,7 @@ i915_gem_object_last_write_engine(struct drm_i915_gem_object *obj)
 	struct dma_fence *fence;
 
 	rcu_read_lock();
-	fence = dma_resv_get_excl_rcu(obj->base.resv);
+	fence = dma_resv_get_excl_unlocked(obj->base.resv);
 	rcu_read_unlock();
 
 	if (fence && dma_fence_is_i915(fence) && !dma_fence_is_signaled(fence))
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_wait.c b/drivers/gpu/drm/i915/gem/i915_gem_wait.c
index 4b9856d5ba14..c13aeddf5aa7 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_wait.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_wait.c
@@ -73,7 +73,7 @@ i915_gem_object_wait_reservation(struct dma_resv *resv,
 		 */
 		prune_fences = count && timeout >= 0;
 	} else {
-		excl = dma_resv_get_excl_rcu(resv);
+		excl = dma_resv_get_excl_unlocked(resv);
 	}
 
 	if (excl && timeout >= 0)
@@ -170,7 +170,7 @@ i915_gem_object_wait_priority(struct drm_i915_gem_object *obj,
 
 		kfree(shared);
 	} else {
-		excl = dma_resv_get_excl_rcu(obj->base.resv);
+		excl = dma_resv_get_excl_unlocked(obj->base.resv);
 	}
 
 	if (excl) {
diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
index bec9c3652188..c85494f411f4 100644
--- a/drivers/gpu/drm/i915/i915_request.c
+++ b/drivers/gpu/drm/i915/i915_request.c
@@ -1611,7 +1611,7 @@ i915_request_await_object(struct i915_request *to,
 			dma_fence_put(shared[i]);
 		kfree(shared);
 	} else {
-		excl = dma_resv_get_excl_rcu(obj->base.resv);
+		excl = dma_resv_get_excl_unlocked(obj->base.resv);
 	}
 
 	if (excl) {
diff --git a/drivers/gpu/drm/i915/i915_sw_fence.c b/drivers/gpu/drm/i915/i915_sw_fence.c
index 2744558f3050..7aaf74552d06 100644
--- a/drivers/gpu/drm/i915/i915_sw_fence.c
+++ b/drivers/gpu/drm/i915/i915_sw_fence.c
@@ -606,7 +606,7 @@ int i915_sw_fence_await_reservation(struct i915_sw_fence *fence,
 			dma_fence_put(shared[i]);
 		kfree(shared);
 	} else {
-		excl = dma_resv_get_excl_rcu(resv);
+		excl = dma_resv_get_excl_unlocked(resv);
 	}
 
 	if (ret >= 0 && excl && excl->ops != exclude) {
diff --git a/drivers/gpu/drm/nouveau/dispnv50/wndw.c b/drivers/gpu/drm/nouveau/dispnv50/wndw.c
index 0cb1f9d848d3..8d048bacd6f0 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/wndw.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/wndw.c
@@ -561,7 +561,7 @@ nv50_wndw_prepare_fb(struct drm_plane *plane, struct drm_plane_state *state)
 			asyw->image.handle[0] = ctxdma->object.handle;
 	}
 
-	asyw->state.fence = dma_resv_get_excl_rcu(nvbo->bo.base.resv);
+	asyw->state.fence = dma_resv_get_excl_unlocked(nvbo->bo.base.resv);
 	asyw->image.offset[0] = nvbo->offset;
 
 	if (wndw->func->prepare) {
diff --git a/drivers/gpu/drm/panfrost/panfrost_job.c b/drivers/gpu/drm/panfrost/panfrost_job.c
index 6003cfeb1322..2df3e999a38d 100644
--- a/drivers/gpu/drm/panfrost/panfrost_job.c
+++ b/drivers/gpu/drm/panfrost/panfrost_job.c
@@ -203,7 +203,7 @@ static void panfrost_acquire_object_fences(struct drm_gem_object **bos,
 	int i;
 
 	for (i = 0; i < bo_count; i++)
-		implicit_fences[i] = dma_resv_get_excl_rcu(bos[i]->resv);
+		implicit_fences[i] = dma_resv_get_excl_unlocked(bos[i]->resv);
 }
 
 static void panfrost_attach_object_fences(struct drm_gem_object **bos,
diff --git a/include/linux/dma-resv.h b/include/linux/dma-resv.h
index 98ac66fecb71..f6b71712c029 100644
--- a/include/linux/dma-resv.h
+++ b/include/linux/dma-resv.h
@@ -228,7 +228,7 @@ dma_resv_exclusive(struct dma_resv *obj)
 }
 
 /**
- * dma_resv_get_excl_rcu - get the reservation object's
+ * dma_resv_get_excl_unlocked - get the reservation object's
  * exclusive fence, without lock held.
  * @obj: the reservation object
  *
@@ -239,7 +239,7 @@ dma_resv_exclusive(struct dma_resv *obj)
  * The exclusive fence or NULL if none
  */
 static inline struct dma_fence *
-dma_resv_get_excl_rcu(struct dma_resv *obj)
+dma_resv_get_excl_unlocked(struct dma_resv *obj)
 {
 	struct dma_fence *fence;
 
-- 
2.25.1


^ permalink raw reply	[flat|nested] 26+ messages in thread

* [PATCH 7/7] dma-buf: drop the _rcu postfix on function names
  2021-06-02 11:17 [PATCH 1/7] dma-buf: fix inconsistent debug print Christian König
                   ` (4 preceding siblings ...)
  2021-06-02 11:17 ` [PATCH 6/7] dma-buf: rename dma_resv_get_excl_rcu to _unlocked Christian König
@ 2021-06-02 11:17 ` Christian König
  2021-06-02 12:49   ` Daniel Vetter
  2021-06-02 20:34   ` Jason Ekstrand
  2021-06-02 12:33 ` [PATCH 1/7] dma-buf: fix inconsistent debug print Daniel Vetter
  6 siblings, 2 replies; 26+ messages in thread
From: Christian König @ 2021-06-02 11:17 UTC (permalink / raw)
  To: daniel, jason, dri-devel

The functions can be called both in _rcu context as well
as while holding the lock.

Signed-off-by: Christian König <christian.koenig@amd.com>
---
 drivers/dma-buf/dma-buf.c                     |  3 +--
 drivers/dma-buf/dma-resv.c                    | 24 +++++++++----------
 drivers/gpu/drm/amd/amdgpu/amdgpu_display.c   |  2 +-
 drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c   |  2 +-
 drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c       |  2 +-
 drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c       |  4 ++--
 drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c        |  2 +-
 drivers/gpu/drm/amd/amdgpu/amdgpu_object.c    |  2 +-
 drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c       |  2 +-
 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c        |  8 +++----
 .../gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c |  2 +-
 drivers/gpu/drm/drm_gem.c                     |  4 ++--
 drivers/gpu/drm/etnaviv/etnaviv_gem.c         |  4 ++--
 drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c  |  2 +-
 drivers/gpu/drm/i915/dma_resv_utils.c         |  2 +-
 drivers/gpu/drm/i915/gem/i915_gem_busy.c      |  2 +-
 .../gpu/drm/i915/gem/i915_gem_execbuffer.c    |  2 +-
 drivers/gpu/drm/i915/gem/i915_gem_userptr.c   |  2 +-
 drivers/gpu/drm/i915/gem/i915_gem_wait.c      |  4 ++--
 drivers/gpu/drm/i915/i915_request.c           |  2 +-
 drivers/gpu/drm/i915/i915_sw_fence.c          |  2 +-
 drivers/gpu/drm/msm/msm_gem.c                 |  2 +-
 drivers/gpu/drm/nouveau/nouveau_gem.c         |  2 +-
 drivers/gpu/drm/panfrost/panfrost_drv.c       |  2 +-
 drivers/gpu/drm/radeon/radeon_gem.c           |  6 ++---
 drivers/gpu/drm/radeon/radeon_mn.c            |  2 +-
 drivers/gpu/drm/ttm/ttm_bo.c                  | 12 +++++-----
 drivers/gpu/drm/vgem/vgem_fence.c             |  2 +-
 drivers/gpu/drm/virtio/virtgpu_ioctl.c        |  4 ++--
 drivers/gpu/drm/vmwgfx/vmwgfx_bo.c            |  2 +-
 include/linux/dma-resv.h                      | 17 ++++---------
 31 files changed, 60 insertions(+), 70 deletions(-)

diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c
index 4d0ddc712f1e..f92931d8db51 100644
--- a/drivers/dma-buf/dma-buf.c
+++ b/drivers/dma-buf/dma-buf.c
@@ -1147,8 +1147,7 @@ static int __dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
 	long ret;
 
 	/* Wait on any implicit rendering fences */
-	ret = dma_resv_wait_timeout_rcu(resv, write, true,
-						  MAX_SCHEDULE_TIMEOUT);
+	ret = dma_resv_wait_timeout(resv, write, true, MAX_SCHEDULE_TIMEOUT);
 	if (ret < 0)
 		return ret;
 
diff --git a/drivers/dma-buf/dma-resv.c b/drivers/dma-buf/dma-resv.c
index b1a1a31dc009..74fe64dc1ce3 100644
--- a/drivers/dma-buf/dma-resv.c
+++ b/drivers/dma-buf/dma-resv.c
@@ -393,7 +393,7 @@ int dma_resv_copy_fences(struct dma_resv *dst, struct dma_resv *src)
 EXPORT_SYMBOL(dma_resv_copy_fences);
 
 /**
- * dma_resv_get_fences_rcu - Get an object's shared and exclusive
+ * dma_resv_get_fences - Get an object's shared and exclusive
  * fences without update side lock held
  * @obj: the reservation object
  * @pfence_excl: the returned exclusive fence (or NULL)
@@ -405,10 +405,9 @@ EXPORT_SYMBOL(dma_resv_copy_fences);
  * exclusive fence is not specified the fence is put into the array of the
  * shared fences as well. Returns either zero or -ENOMEM.
  */
-int dma_resv_get_fences_rcu(struct dma_resv *obj,
-			    struct dma_fence **pfence_excl,
-			    unsigned int *pshared_count,
-			    struct dma_fence ***pshared)
+int dma_resv_get_fences(struct dma_resv *obj, struct dma_fence **pfence_excl,
+			unsigned int *pshared_count,
+			struct dma_fence ***pshared)
 {
 	struct dma_fence **shared = NULL;
 	struct dma_fence *fence_excl;
@@ -491,10 +490,10 @@ int dma_resv_get_fences_rcu(struct dma_resv *obj,
 	*pshared = shared;
 	return ret;
 }
-EXPORT_SYMBOL_GPL(dma_resv_get_fences_rcu);
+EXPORT_SYMBOL_GPL(dma_resv_get_fences);
 
 /**
- * dma_resv_wait_timeout_rcu - Wait on reservation's objects
+ * dma_resv_wait_timeout - Wait on reservation's objects
  * shared and/or exclusive fences.
  * @obj: the reservation object
  * @wait_all: if true, wait on all fences, else wait on just exclusive fence
@@ -505,9 +504,8 @@ EXPORT_SYMBOL_GPL(dma_resv_get_fences_rcu);
  * Returns -ERESTARTSYS if interrupted, 0 if the wait timed out, or
  * greater than zer on success.
  */
-long dma_resv_wait_timeout_rcu(struct dma_resv *obj,
-			       bool wait_all, bool intr,
-			       unsigned long timeout)
+long dma_resv_wait_timeout(struct dma_resv *obj, bool wait_all, bool intr,
+			   unsigned long timeout)
 {
 	long ret = timeout ? timeout : 1;
 	unsigned int seq, shared_count;
@@ -579,7 +577,7 @@ long dma_resv_wait_timeout_rcu(struct dma_resv *obj,
 	rcu_read_unlock();
 	goto retry;
 }
-EXPORT_SYMBOL_GPL(dma_resv_wait_timeout_rcu);
+EXPORT_SYMBOL_GPL(dma_resv_wait_timeout);
 
 
 static inline int dma_resv_test_signaled_single(struct dma_fence *passed_fence)
@@ -608,7 +606,7 @@ static inline int dma_resv_test_signaled_single(struct dma_fence *passed_fence)
  * RETURNS
  * true if all fences signaled, else false
  */
-bool dma_resv_test_signaled_rcu(struct dma_resv *obj, bool test_all)
+bool dma_resv_test_signaled(struct dma_resv *obj, bool test_all)
 {
 	unsigned int seq, shared_count;
 	int ret;
@@ -657,7 +655,7 @@ bool dma_resv_test_signaled_rcu(struct dma_resv *obj, bool test_all)
 	rcu_read_unlock();
 	return ret;
 }
-EXPORT_SYMBOL_GPL(dma_resv_test_signaled_rcu);
+EXPORT_SYMBOL_GPL(dma_resv_test_signaled);
 
 #if IS_ENABLED(CONFIG_LOCKDEP)
 static int __init dma_resv_lockdep(void)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
index 49f73b5b89b0..004d01d2e1d7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
@@ -203,7 +203,7 @@ int amdgpu_display_crtc_page_flip_target(struct drm_crtc *crtc,
 		goto unpin;
 	}
 
-	r = dma_resv_get_fences_rcu(new_abo->tbo.base.resv, &work->excl,
+	r = dma_resv_get_fences(new_abo->tbo.base.resv, &work->excl,
 					      &work->shared_count,
 					      &work->shared);
 	if (unlikely(r != 0)) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
index 3b13c8a38c4e..615be1697d49 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
@@ -52,7 +52,7 @@ __dma_resv_make_exclusive(struct dma_resv *obj)
 	if (!dma_resv_shared(obj)) /* no shared fences to convert */
 		return 0;
 
-	r = dma_resv_get_fences_rcu(obj, NULL, &count, &fences);
+	r = dma_resv_get_fences(obj, NULL, &count, &fences);
 	if (r)
 		return r;
 
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
index cd5146fa6fb6..dafc96032d7d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
@@ -526,7 +526,7 @@ int amdgpu_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
 		return -ENOENT;
 	}
 	robj = gem_to_amdgpu_bo(gobj);
-	ret = dma_resv_wait_timeout_rcu(robj->tbo.base.resv, true, true,
+	ret = dma_resv_wait_timeout(robj->tbo.base.resv, true, true,
 						  timeout);
 
 	/* ret == 0 means not signaled,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
index b4971e90b98c..65a3422ec078 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
@@ -112,7 +112,7 @@ void amdgpu_pasid_free_delayed(struct dma_resv *resv,
 	unsigned count;
 	int r;
 
-	r = dma_resv_get_fences_rcu(resv, NULL, &count, &fences);
+	r = dma_resv_get_fences(resv, NULL, &count, &fences);
 	if (r)
 		goto fallback;
 
@@ -156,7 +156,7 @@ void amdgpu_pasid_free_delayed(struct dma_resv *resv,
 	/* Not enough memory for the delayed delete, as last resort
 	 * block for all the fences to complete.
 	 */
-	dma_resv_wait_timeout_rcu(resv, true, false,
+	dma_resv_wait_timeout(resv, true, false,
 					    MAX_SCHEDULE_TIMEOUT);
 	amdgpu_pasid_free(pasid);
 }
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
index 2741c28ff1b5..86de11a86a3e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
@@ -75,7 +75,7 @@ static bool amdgpu_mn_invalidate_gfx(struct mmu_interval_notifier *mni,
 
 	mmu_interval_set_seq(mni, cur_seq);
 
-	r = dma_resv_wait_timeout_rcu(bo->tbo.base.resv, true, false,
+	r = dma_resv_wait_timeout(bo->tbo.base.resv, true, false,
 				      MAX_SCHEDULE_TIMEOUT);
 	mutex_unlock(&adev->notifier_lock);
 	if (r <= 0)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index 03c6b63d1d54..821dec6d2f73 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -756,7 +756,7 @@ int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr)
 		return 0;
 	}
 
-	r = dma_resv_wait_timeout_rcu(bo->tbo.base.resv, false, false,
+	r = dma_resv_wait_timeout(bo->tbo.base.resv, false, false,
 						MAX_SCHEDULE_TIMEOUT);
 	if (r < 0)
 		return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
index 82f0542c7792..3773f5ff6f0e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
@@ -1126,7 +1126,7 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,
 	ib->length_dw = 16;
 
 	if (direct) {
-		r = dma_resv_wait_timeout_rcu(bo->tbo.base.resv,
+		r = dma_resv_wait_timeout(bo->tbo.base.resv,
 							true, false,
 							msecs_to_jiffies(10));
 		if (r == 0)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index bcfd4a8d0288..da716aa38085 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -2022,13 +2022,13 @@ static void amdgpu_vm_prt_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
 	unsigned i, shared_count;
 	int r;
 
-	r = dma_resv_get_fences_rcu(resv, &excl,
+	r = dma_resv_get_fences(resv, &excl,
 					      &shared_count, &shared);
 	if (r) {
 		/* Not enough memory to grab the fence list, as last resort
 		 * block for all the fences to complete.
 		 */
-		dma_resv_wait_timeout_rcu(resv, true, false,
+		dma_resv_wait_timeout(resv, true, false,
 						    MAX_SCHEDULE_TIMEOUT);
 		return;
 	}
@@ -2640,7 +2640,7 @@ bool amdgpu_vm_evictable(struct amdgpu_bo *bo)
 		return true;
 
 	/* Don't evict VM page tables while they are busy */
-	if (!dma_resv_test_signaled_rcu(bo->tbo.base.resv, true))
+	if (!dma_resv_test_signaled(bo->tbo.base.resv, true))
 		return false;
 
 	/* Try to block ongoing updates */
@@ -2820,7 +2820,7 @@ void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t min_vm_size,
  */
 long amdgpu_vm_wait_idle(struct amdgpu_vm *vm, long timeout)
 {
-	timeout = dma_resv_wait_timeout_rcu(vm->root.base.bo->tbo.base.resv,
+	timeout = dma_resv_wait_timeout(vm->root.base.bo->tbo.base.resv,
 					    true, true, timeout);
 	if (timeout <= 0)
 		return timeout;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 3267eb2e35dd..1633afd3c03b 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -8400,7 +8400,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
 		 * deadlock during GPU reset when this fence will not signal
 		 * but we hold reservation lock for the BO.
 		 */
-		r = dma_resv_wait_timeout_rcu(abo->tbo.base.resv, true,
+		r = dma_resv_wait_timeout(abo->tbo.base.resv, true,
 							false,
 							msecs_to_jiffies(5000));
 		if (unlikely(r <= 0))
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index 263b4fb03303..11770da97dc0 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -770,7 +770,7 @@ long drm_gem_dma_resv_wait(struct drm_file *filep, u32 handle,
 		return -EINVAL;
 	}
 
-	ret = dma_resv_wait_timeout_rcu(obj->resv, wait_all,
+	ret = dma_resv_wait_timeout(obj->resv, wait_all,
 						  true, timeout);
 	if (ret == 0)
 		ret = -ETIME;
@@ -1380,7 +1380,7 @@ int drm_gem_fence_array_add_implicit(struct xarray *fence_array,
 		return drm_gem_fence_array_add(fence_array, fence);
 	}
 
-	ret = dma_resv_get_fences_rcu(obj->resv, NULL,
+	ret = dma_resv_get_fences(obj->resv, NULL,
 						&fence_count, &fences);
 	if (ret || !fence_count)
 		return ret;
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.c b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
index 4d43b8630f0e..e3c209628688 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
@@ -390,13 +390,13 @@ int etnaviv_gem_cpu_prep(struct drm_gem_object *obj, u32 op,
 	}
 
 	if (op & ETNA_PREP_NOSYNC) {
-		if (!dma_resv_test_signaled_rcu(obj->resv,
+		if (!dma_resv_test_signaled(obj->resv,
 							  write))
 			return -EBUSY;
 	} else {
 		unsigned long remain = etnaviv_timeout_to_jiffies(timeout);
 
-		ret = dma_resv_wait_timeout_rcu(obj->resv,
+		ret = dma_resv_wait_timeout(obj->resv,
 							  write, true, remain);
 		if (ret <= 0)
 			return ret == 0 ? -ETIMEDOUT : ret;
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
index c942d2a8c252..9cc36bbc2502 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
@@ -189,7 +189,7 @@ static int submit_fence_sync(struct etnaviv_gem_submit *submit)
 			continue;
 
 		if (bo->flags & ETNA_SUBMIT_BO_WRITE) {
-			ret = dma_resv_get_fences_rcu(robj, &bo->excl,
+			ret = dma_resv_get_fences(robj, &bo->excl,
 								&bo->nr_shared,
 								&bo->shared);
 			if (ret)
diff --git a/drivers/gpu/drm/i915/dma_resv_utils.c b/drivers/gpu/drm/i915/dma_resv_utils.c
index 9e508e7d4629..7df91b7e4ca8 100644
--- a/drivers/gpu/drm/i915/dma_resv_utils.c
+++ b/drivers/gpu/drm/i915/dma_resv_utils.c
@@ -10,7 +10,7 @@
 void dma_resv_prune(struct dma_resv *resv)
 {
 	if (dma_resv_trylock(resv)) {
-		if (dma_resv_test_signaled_rcu(resv, true))
+		if (dma_resv_test_signaled(resv, true))
 			dma_resv_add_excl_fence(resv, NULL);
 		dma_resv_unlock(resv);
 	}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_busy.c b/drivers/gpu/drm/i915/gem/i915_gem_busy.c
index 3f94becac541..0083a850f839 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_busy.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_busy.c
@@ -105,7 +105,7 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
 	 * Alternatively, we can trade that extra information on read/write
 	 * activity with
 	 *	args->busy =
-	 *		!dma_resv_test_signaled_rcu(obj->resv, true);
+	 *		!dma_resv_test_signaled(obj->resv, true);
 	 * to report the overall busyness. This is what the wait-ioctl does.
 	 *
 	 */
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
index 297143511f99..66789111a24b 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
@@ -1481,7 +1481,7 @@ static inline bool use_reloc_gpu(struct i915_vma *vma)
 	if (DBG_FORCE_RELOC)
 		return false;
 
-	return !dma_resv_test_signaled_rcu(vma->resv, true);
+	return !dma_resv_test_signaled(vma->resv, true);
 }
 
 static unsigned long vma_phys_addr(struct i915_vma *vma, u32 offset)
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
index a657b99ec760..e78738aec7b2 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
@@ -85,7 +85,7 @@ static bool i915_gem_userptr_invalidate(struct mmu_interval_notifier *mni,
 		return true;
 
 	/* we will unbind on next submission, still have userptr pins */
-	r = dma_resv_wait_timeout_rcu(obj->base.resv, true, false,
+	r = dma_resv_wait_timeout(obj->base.resv, true, false,
 				      MAX_SCHEDULE_TIMEOUT);
 	if (r <= 0)
 		drm_err(&i915->drm, "(%ld) failed to wait for idle\n", r);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_wait.c b/drivers/gpu/drm/i915/gem/i915_gem_wait.c
index c13aeddf5aa7..e7aebb8fb468 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_wait.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_wait.c
@@ -45,7 +45,7 @@ i915_gem_object_wait_reservation(struct dma_resv *resv,
 		unsigned int count, i;
 		int ret;
 
-		ret = dma_resv_get_fences_rcu(resv, &excl, &count, &shared);
+		ret = dma_resv_get_fences(resv, &excl, &count, &shared);
 		if (ret)
 			return ret;
 
@@ -158,7 +158,7 @@ i915_gem_object_wait_priority(struct drm_i915_gem_object *obj,
 		unsigned int count, i;
 		int ret;
 
-		ret = dma_resv_get_fences_rcu(obj->base.resv,
+		ret = dma_resv_get_fences(obj->base.resv,
 					      &excl, &count, &shared);
 		if (ret)
 			return ret;
diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
index c85494f411f4..4a70a1881d79 100644
--- a/drivers/gpu/drm/i915/i915_request.c
+++ b/drivers/gpu/drm/i915/i915_request.c
@@ -1594,7 +1594,7 @@ i915_request_await_object(struct i915_request *to,
 		struct dma_fence **shared;
 		unsigned int count, i;
 
-		ret = dma_resv_get_fences_rcu(obj->base.resv,
+		ret = dma_resv_get_fences(obj->base.resv,
 							&excl, &count, &shared);
 		if (ret)
 			return ret;
diff --git a/drivers/gpu/drm/i915/i915_sw_fence.c b/drivers/gpu/drm/i915/i915_sw_fence.c
index 7aaf74552d06..c589a681da77 100644
--- a/drivers/gpu/drm/i915/i915_sw_fence.c
+++ b/drivers/gpu/drm/i915/i915_sw_fence.c
@@ -582,7 +582,7 @@ int i915_sw_fence_await_reservation(struct i915_sw_fence *fence,
 		struct dma_fence **shared;
 		unsigned int count, i;
 
-		ret = dma_resv_get_fences_rcu(resv, &excl, &count, &shared);
+		ret = dma_resv_get_fences(resv, &excl, &count, &shared);
 		if (ret)
 			return ret;
 
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index 43af91df552e..ecd35986ddb5 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -915,7 +915,7 @@ int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout)
 		op & MSM_PREP_NOSYNC ? 0 : timeout_to_jiffies(timeout);
 	long ret;
 
-	ret = dma_resv_wait_timeout_rcu(obj->resv, write,
+	ret = dma_resv_wait_timeout(obj->resv, write,
 						  true,  remain);
 	if (ret == 0)
 		return remain == 0 ? -EBUSY : -ETIMEDOUT;
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
index d863e5ed954a..c59072f254f1 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
@@ -964,7 +964,7 @@ nouveau_gem_ioctl_cpu_prep(struct drm_device *dev, void *data,
 		return -ENOENT;
 	nvbo = nouveau_gem_object(gem);
 
-	lret = dma_resv_wait_timeout_rcu(nvbo->bo.base.resv, write, true,
+	lret = dma_resv_wait_timeout(nvbo->bo.base.resv, write, true,
 						   no_wait ? 0 : 30 * HZ);
 	if (!lret)
 		ret = -EBUSY;
diff --git a/drivers/gpu/drm/panfrost/panfrost_drv.c b/drivers/gpu/drm/panfrost/panfrost_drv.c
index ca07098a6141..0e6e893eb81d 100644
--- a/drivers/gpu/drm/panfrost/panfrost_drv.c
+++ b/drivers/gpu/drm/panfrost/panfrost_drv.c
@@ -311,7 +311,7 @@ panfrost_ioctl_wait_bo(struct drm_device *dev, void *data,
 	if (!gem_obj)
 		return -ENOENT;
 
-	ret = dma_resv_wait_timeout_rcu(gem_obj->resv, true,
+	ret = dma_resv_wait_timeout(gem_obj->resv, true,
 						  true, timeout);
 	if (!ret)
 		ret = timeout ? -ETIMEDOUT : -EBUSY;
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c
index 3272c33af8fe..458f92a70887 100644
--- a/drivers/gpu/drm/radeon/radeon_gem.c
+++ b/drivers/gpu/drm/radeon/radeon_gem.c
@@ -161,7 +161,7 @@ static int radeon_gem_set_domain(struct drm_gem_object *gobj,
 	}
 	if (domain == RADEON_GEM_DOMAIN_CPU) {
 		/* Asking for cpu access wait for object idle */
-		r = dma_resv_wait_timeout_rcu(robj->tbo.base.resv, true, true, 30 * HZ);
+		r = dma_resv_wait_timeout(robj->tbo.base.resv, true, true, 30 * HZ);
 		if (!r)
 			r = -EBUSY;
 
@@ -523,7 +523,7 @@ int radeon_gem_busy_ioctl(struct drm_device *dev, void *data,
 	}
 	robj = gem_to_radeon_bo(gobj);
 
-	r = dma_resv_test_signaled_rcu(robj->tbo.base.resv, true);
+	r = dma_resv_test_signaled(robj->tbo.base.resv, true);
 	if (r == 0)
 		r = -EBUSY;
 	else
@@ -552,7 +552,7 @@ int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
 	}
 	robj = gem_to_radeon_bo(gobj);
 
-	ret = dma_resv_wait_timeout_rcu(robj->tbo.base.resv, true, true, 30 * HZ);
+	ret = dma_resv_wait_timeout(robj->tbo.base.resv, true, true, 30 * HZ);
 	if (ret == 0)
 		r = -EBUSY;
 	else if (ret < 0)
diff --git a/drivers/gpu/drm/radeon/radeon_mn.c b/drivers/gpu/drm/radeon/radeon_mn.c
index e37c9a57a7c3..adb084e6ddbe 100644
--- a/drivers/gpu/drm/radeon/radeon_mn.c
+++ b/drivers/gpu/drm/radeon/radeon_mn.c
@@ -66,7 +66,7 @@ static bool radeon_mn_invalidate(struct mmu_interval_notifier *mn,
 		return true;
 	}
 
-	r = dma_resv_wait_timeout_rcu(bo->tbo.base.resv, true, false,
+	r = dma_resv_wait_timeout(bo->tbo.base.resv, true, false,
 				      MAX_SCHEDULE_TIMEOUT);
 	if (r <= 0)
 		DRM_ERROR("(%ld) failed to wait for user bo\n", r);
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index c41ef0caa492..32004cf37549 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -296,7 +296,7 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo,
 	struct dma_resv *resv = &bo->base._resv;
 	int ret;
 
-	if (dma_resv_test_signaled_rcu(resv, true))
+	if (dma_resv_test_signaled(resv, true))
 		ret = 0;
 	else
 		ret = -EBUSY;
@@ -308,7 +308,7 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo,
 			dma_resv_unlock(bo->base.resv);
 		spin_unlock(&bo->bdev->lru_lock);
 
-		lret = dma_resv_wait_timeout_rcu(resv, true, interruptible,
+		lret = dma_resv_wait_timeout(resv, true, interruptible,
 						 30 * HZ);
 
 		if (lret < 0)
@@ -411,7 +411,7 @@ static void ttm_bo_release(struct kref *kref)
 			/* Last resort, if we fail to allocate memory for the
 			 * fences block for the BO to become idle
 			 */
-			dma_resv_wait_timeout_rcu(bo->base.resv, true, false,
+			dma_resv_wait_timeout(bo->base.resv, true, false,
 						  30 * HZ);
 		}
 
@@ -422,7 +422,7 @@ static void ttm_bo_release(struct kref *kref)
 		ttm_mem_io_free(bdev, bo->resource);
 	}
 
-	if (!dma_resv_test_signaled_rcu(bo->base.resv, true) ||
+	if (!dma_resv_test_signaled(bo->base.resv, true) ||
 	    !dma_resv_trylock(bo->base.resv)) {
 		/* The BO is not idle, resurrect it for delayed destroy */
 		ttm_bo_flush_all_fences(bo);
@@ -1121,13 +1121,13 @@ int ttm_bo_wait(struct ttm_buffer_object *bo,
 	long timeout = 15 * HZ;
 
 	if (no_wait) {
-		if (dma_resv_test_signaled_rcu(bo->base.resv, true))
+		if (dma_resv_test_signaled(bo->base.resv, true))
 			return 0;
 		else
 			return -EBUSY;
 	}
 
-	timeout = dma_resv_wait_timeout_rcu(bo->base.resv, true,
+	timeout = dma_resv_wait_timeout(bo->base.resv, true,
 						      interruptible, timeout);
 	if (timeout < 0)
 		return timeout;
diff --git a/drivers/gpu/drm/vgem/vgem_fence.c b/drivers/gpu/drm/vgem/vgem_fence.c
index 2902dc6e64fa..7f3125cf5358 100644
--- a/drivers/gpu/drm/vgem/vgem_fence.c
+++ b/drivers/gpu/drm/vgem/vgem_fence.c
@@ -151,7 +151,7 @@ int vgem_fence_attach_ioctl(struct drm_device *dev,
 
 	/* Check for a conflicting fence */
 	resv = obj->resv;
-	if (!dma_resv_test_signaled_rcu(resv,
+	if (!dma_resv_test_signaled(resv,
 						  arg->flags & VGEM_FENCE_WRITE)) {
 		ret = -EBUSY;
 		goto err_fence;
diff --git a/drivers/gpu/drm/virtio/virtgpu_ioctl.c b/drivers/gpu/drm/virtio/virtgpu_ioctl.c
index 669f2ee39515..190d9495dc0e 100644
--- a/drivers/gpu/drm/virtio/virtgpu_ioctl.c
+++ b/drivers/gpu/drm/virtio/virtgpu_ioctl.c
@@ -451,9 +451,9 @@ static int virtio_gpu_wait_ioctl(struct drm_device *dev, void *data,
 		return -ENOENT;
 
 	if (args->flags & VIRTGPU_WAIT_NOWAIT) {
-		ret = dma_resv_test_signaled_rcu(obj->resv, true);
+		ret = dma_resv_test_signaled(obj->resv, true);
 	} else {
-		ret = dma_resv_wait_timeout_rcu(obj->resv, true, true,
+		ret = dma_resv_wait_timeout(obj->resv, true, true,
 						timeout);
 	}
 	if (ret == 0)
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
index 176b6201ef2b..8faf1df027f3 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
@@ -743,7 +743,7 @@ static int vmw_user_bo_synccpu_grab(struct vmw_user_buffer_object *user_bo,
 	if (flags & drm_vmw_synccpu_allow_cs) {
 		long lret;
 
-		lret = dma_resv_wait_timeout_rcu
+		lret = dma_resv_wait_timeout
 			(bo->base.resv, true, true,
 			 nonblock ? 0 : MAX_SCHEDULE_TIMEOUT);
 		if (!lret)
diff --git a/include/linux/dma-resv.h b/include/linux/dma-resv.h
index f6b71712c029..22325dfa7744 100644
--- a/include/linux/dma-resv.h
+++ b/include/linux/dma-resv.h
@@ -268,19 +268,12 @@ void dma_resv_init(struct dma_resv *obj);
 void dma_resv_fini(struct dma_resv *obj);
 int dma_resv_reserve_shared(struct dma_resv *obj, unsigned int num_fences);
 void dma_resv_add_shared_fence(struct dma_resv *obj, struct dma_fence *fence);
-
 void dma_resv_add_excl_fence(struct dma_resv *obj, struct dma_fence *fence);
-
-int dma_resv_get_fences_rcu(struct dma_resv *obj,
-			    struct dma_fence **pfence_excl,
-			    unsigned *pshared_count,
-			    struct dma_fence ***pshared);
-
+int dma_resv_get_fences(struct dma_resv *obj, struct dma_fence **pfence_excl,
+			unsigned *pshared_count, struct dma_fence ***pshared);
 int dma_resv_copy_fences(struct dma_resv *dst, struct dma_resv *src);
-
-long dma_resv_wait_timeout_rcu(struct dma_resv *obj, bool wait_all, bool intr,
-			       unsigned long timeout);
-
-bool dma_resv_test_signaled_rcu(struct dma_resv *obj, bool test_all);
+long dma_resv_wait_timeout(struct dma_resv *obj, bool wait_all, bool intr,
+			   unsigned long timeout);
+bool dma_resv_test_signaled(struct dma_resv *obj, bool test_all);
 
 #endif /* _LINUX_RESERVATION_H */
-- 
2.25.1


^ permalink raw reply	[flat|nested] 26+ messages in thread

* Re: [PATCH 1/7] dma-buf: fix inconsistent debug print
  2021-06-02 11:17 [PATCH 1/7] dma-buf: fix inconsistent debug print Christian König
                   ` (5 preceding siblings ...)
  2021-06-02 11:17 ` [PATCH 7/7] dma-buf: drop the _rcu postfix on function names Christian König
@ 2021-06-02 12:33 ` Daniel Vetter
  2021-06-02 12:36   ` Christian König
  6 siblings, 1 reply; 26+ messages in thread
From: Daniel Vetter @ 2021-06-02 12:33 UTC (permalink / raw)
  To: Christian König; +Cc: dri-devel, jason

On Wed, Jun 02, 2021 at 01:17:08PM +0200, Christian König wrote:
> The code tries to acquire the rcu protected fence list, but then ignores
> individual fences which have been modified while holding the rcu.
> 
> Stop that madness and just note cleanly that the list was concurrently modified.
> 
> Signed-off-by: Christian König <christian.koenig@amd.com>

Yeah it's debugfs, it's better not to be fancy here and if you race you
can just re-grab it all.

What's worse, we do grab the dma_resv_lock, which means no one should be
able to race with us. I think 100% right thing here is actually to drop
the rcu_read_lock too, and switch over to rcu_dereference_protected().

And also drop the seqcount check, that would be a bug. seqcount is only
to get a consistent snapshot of all fences on the read (i.e. protected by
rcu only) section. We hold the write lock with dma_resv_lock here.

Cheers, Daniel

> ---
>  drivers/dma-buf/dma-buf.c | 19 ++++++++-----------
>  1 file changed, 8 insertions(+), 11 deletions(-)
> 
> diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c
> index eadd1eaa2fb5..d3b4e370dbc1 100644
> --- a/drivers/dma-buf/dma-buf.c
> +++ b/drivers/dma-buf/dma-buf.c
> @@ -1383,22 +1383,17 @@ static int dma_buf_debug_show(struct seq_file *s, void *unused)
>  				buf_obj->name ?: "");
>  
>  		robj = buf_obj->resv;
> -		while (true) {
> -			seq = read_seqcount_begin(&robj->seq);
> -			rcu_read_lock();
> -			fobj = rcu_dereference(robj->fence);
> -			shared_count = fobj ? fobj->shared_count : 0;
> -			fence = rcu_dereference(robj->fence_excl);
> -			if (!read_seqcount_retry(&robj->seq, seq))
> -				break;
> -			rcu_read_unlock();
> -		}
> -
> +		seq = read_seqcount_begin(&robj->seq);
> +		rcu_read_lock();
> +		fence = rcu_dereference(robj->fence_excl);
>  		if (fence)
>  			seq_printf(s, "\tExclusive fence: %s %s %ssignalled\n",
>  				   fence->ops->get_driver_name(fence),
>  				   fence->ops->get_timeline_name(fence),
>  				   dma_fence_is_signaled(fence) ? "" : "un");
> +
> +		fobj = rcu_dereference(robj->fence);
> +		shared_count = fobj ? fobj->shared_count : 0;
>  		for (i = 0; i < shared_count; i++) {
>  			fence = rcu_dereference(fobj->shared[i]);
>  			if (!dma_fence_get_rcu(fence))
> @@ -1410,6 +1405,8 @@ static int dma_buf_debug_show(struct seq_file *s, void *unused)
>  			dma_fence_put(fence);
>  		}
>  		rcu_read_unlock();
> +		if (read_seqcount_retry(&robj->seq, seq))
> +			seq_printf(s, "\tFences concurrently modified\n");
>  
>  		seq_puts(s, "\tAttached Devices:\n");
>  		attach_count = 0;
> -- 
> 2.25.1
> 

-- 
Daniel Vetter
Software Engineer, Intel Corporation
http://blog.ffwll.ch

^ permalink raw reply	[flat|nested] 26+ messages in thread

* Re: [PATCH 2/7] dma-buf: add SPDX header and fix style in dma-resv.c
  2021-06-02 11:17 ` [PATCH 2/7] dma-buf: add SPDX header and fix style in dma-resv.c Christian König
@ 2021-06-02 12:34   ` Daniel Vetter
  2021-06-02 12:47     ` Christian König
  0 siblings, 1 reply; 26+ messages in thread
From: Daniel Vetter @ 2021-06-02 12:34 UTC (permalink / raw)
  To: Christian König; +Cc: dri-devel, jason

On Wed, Jun 02, 2021 at 01:17:09PM +0200, Christian König wrote:
> No functional change.
> 
> Signed-off-by: Christian König <christian.koenig@amd.com>

Acked-by: Daniel Vetter <daniel.vetter@ffwll.ch>

Maybe add the checkpatch warnings you're fixing here to the commit
message. I didn't know that initcalls should be at the bottom ...
-Daniel


> ---
>  drivers/dma-buf/dma-resv.c | 128 +++++++++++++++++++------------------
>  1 file changed, 65 insertions(+), 63 deletions(-)
> 
> diff --git a/drivers/dma-buf/dma-resv.c b/drivers/dma-buf/dma-resv.c
> index 6ddbeb5dfbf6..87f5d82d992a 100644
> --- a/drivers/dma-buf/dma-resv.c
> +++ b/drivers/dma-buf/dma-resv.c
> @@ -1,3 +1,4 @@
> +// SPDX-License-Identifier: MIT
>  /*
>   * Copyright (C) 2012-2014 Canonical Ltd (Maarten Lankhorst)
>   *
> @@ -92,49 +93,6 @@ static void dma_resv_list_free(struct dma_resv_list *list)
>  	kfree_rcu(list, rcu);
>  }
>  
> -#if IS_ENABLED(CONFIG_LOCKDEP)
> -static int __init dma_resv_lockdep(void)
> -{
> -	struct mm_struct *mm = mm_alloc();
> -	struct ww_acquire_ctx ctx;
> -	struct dma_resv obj;
> -	struct address_space mapping;
> -	int ret;
> -
> -	if (!mm)
> -		return -ENOMEM;
> -
> -	dma_resv_init(&obj);
> -	address_space_init_once(&mapping);
> -
> -	mmap_read_lock(mm);
> -	ww_acquire_init(&ctx, &reservation_ww_class);
> -	ret = dma_resv_lock(&obj, &ctx);
> -	if (ret == -EDEADLK)
> -		dma_resv_lock_slow(&obj, &ctx);
> -	fs_reclaim_acquire(GFP_KERNEL);
> -	/* for unmap_mapping_range on trylocked buffer objects in shrinkers */
> -	i_mmap_lock_write(&mapping);
> -	i_mmap_unlock_write(&mapping);
> -#ifdef CONFIG_MMU_NOTIFIER
> -	lock_map_acquire(&__mmu_notifier_invalidate_range_start_map);
> -	__dma_fence_might_wait();
> -	lock_map_release(&__mmu_notifier_invalidate_range_start_map);
> -#else
> -	__dma_fence_might_wait();
> -#endif
> -	fs_reclaim_release(GFP_KERNEL);
> -	ww_mutex_unlock(&obj.lock);
> -	ww_acquire_fini(&ctx);
> -	mmap_read_unlock(mm);
> -	
> -	mmput(mm);
> -
> -	return 0;
> -}
> -subsys_initcall(dma_resv_lockdep);
> -#endif
> -
>  /**
>   * dma_resv_init - initialize a reservation object
>   * @obj: the reservation object
> @@ -196,9 +154,7 @@ int dma_resv_reserve_shared(struct dma_resv *obj, unsigned int num_fences)
>  	if (old && old->shared_max) {
>  		if ((old->shared_count + num_fences) <= old->shared_max)
>  			return 0;
> -		else
> -			max = max(old->shared_count + num_fences,
> -				  old->shared_max * 2);
> +		max = max(old->shared_count + num_fences, old->shared_max * 2);
>  	} else {
>  		max = max(4ul, roundup_pow_of_two(num_fences));
>  	}
> @@ -337,17 +293,17 @@ void dma_resv_add_excl_fence(struct dma_resv *obj, struct dma_fence *fence)
>  EXPORT_SYMBOL(dma_resv_add_excl_fence);
>  
>  /**
> -* dma_resv_copy_fences - Copy all fences from src to dst.
> -* @dst: the destination reservation object
> -* @src: the source reservation object
> -*
> -* Copy all fences from src to dst. dst-lock must be held.
> -*/
> + * dma_resv_copy_fences - Copy all fences from src to dst.
> + * @dst: the destination reservation object
> + * @src: the source reservation object
> + *
> + * Copy all fences from src to dst. dst-lock must be held.
> + */
>  int dma_resv_copy_fences(struct dma_resv *dst, struct dma_resv *src)
>  {
>  	struct dma_resv_list *src_list, *dst_list;
>  	struct dma_fence *old, *new;
> -	unsigned i;
> +	unsigned int i;
>  
>  	dma_resv_assert_held(dst);
>  
> @@ -356,7 +312,7 @@ int dma_resv_copy_fences(struct dma_resv *dst, struct dma_resv *src)
>  
>  retry:
>  	if (src_list) {
> -		unsigned shared_count = src_list->shared_count;
> +		unsigned int shared_count = src_list->shared_count;
>  
>  		rcu_read_unlock();
>  
> @@ -373,6 +329,7 @@ int dma_resv_copy_fences(struct dma_resv *dst, struct dma_resv *src)
>  
>  		dst_list->shared_count = 0;
>  		for (i = 0; i < src_list->shared_count; ++i) {
> +			struct dma_fence __rcu **dst;
>  			struct dma_fence *fence;
>  
>  			fence = rcu_dereference(src_list->shared[i]);
> @@ -391,7 +348,8 @@ int dma_resv_copy_fences(struct dma_resv *dst, struct dma_resv *src)
>  				continue;
>  			}
>  
> -			rcu_assign_pointer(dst_list->shared[dst_list->shared_count++], fence);
> +			dst = &dst_list->shared[dst_list->shared_count++];
> +			rcu_assign_pointer(*dst, fence);
>  		}
>  	} else {
>  		dst_list = NULL;
> @@ -431,7 +389,7 @@ EXPORT_SYMBOL(dma_resv_copy_fences);
>   */
>  int dma_resv_get_fences_rcu(struct dma_resv *obj,
>  			    struct dma_fence **pfence_excl,
> -			    unsigned *pshared_count,
> +			    unsigned int *pshared_count,
>  			    struct dma_fence ***pshared)
>  {
>  	struct dma_fence **shared = NULL;
> @@ -533,9 +491,9 @@ long dma_resv_wait_timeout_rcu(struct dma_resv *obj,
>  			       bool wait_all, bool intr,
>  			       unsigned long timeout)
>  {
> -	struct dma_fence *fence;
> -	unsigned seq, shared_count;
>  	long ret = timeout ? timeout : 1;
> +	unsigned int seq, shared_count;
> +	struct dma_fence *fence;
>  	int i;
>  
>  retry:
> @@ -565,8 +523,9 @@ long dma_resv_wait_timeout_rcu(struct dma_resv *obj,
>  			shared_count = fobj->shared_count;
>  
>  		for (i = 0; !fence && i < shared_count; ++i) {
> -			struct dma_fence *lfence = rcu_dereference(fobj->shared[i]);
> +			struct dma_fence *lfence;
>  
> +			lfence = rcu_dereference(fobj->shared[i]);
>  			if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
>  				     &lfence->flags))
>  				continue;
> @@ -633,7 +592,7 @@ static inline int dma_resv_test_signaled_single(struct dma_fence *passed_fence)
>   */
>  bool dma_resv_test_signaled_rcu(struct dma_resv *obj, bool test_all)
>  {
> -	unsigned seq, shared_count;
> +	unsigned int seq, shared_count;
>  	int ret;
>  
>  	rcu_read_lock();
> @@ -643,16 +602,16 @@ bool dma_resv_test_signaled_rcu(struct dma_resv *obj, bool test_all)
>  	seq = read_seqcount_begin(&obj->seq);
>  
>  	if (test_all) {
> -		unsigned i;
> -
>  		struct dma_resv_list *fobj = rcu_dereference(obj->fence);
> +		unsigned int i;
>  
>  		if (fobj)
>  			shared_count = fobj->shared_count;
>  
>  		for (i = 0; i < shared_count; ++i) {
> -			struct dma_fence *fence = rcu_dereference(fobj->shared[i]);
> +			struct dma_fence *fence;
>  
> +			fence = rcu_dereference(fobj->shared[i]);
>  			ret = dma_resv_test_signaled_single(fence);
>  			if (ret < 0)
>  				goto retry;
> @@ -681,3 +640,46 @@ bool dma_resv_test_signaled_rcu(struct dma_resv *obj, bool test_all)
>  	return ret;
>  }
>  EXPORT_SYMBOL_GPL(dma_resv_test_signaled_rcu);
> +
> +#if IS_ENABLED(CONFIG_LOCKDEP)
> +static int __init dma_resv_lockdep(void)
> +{
> +	struct mm_struct *mm = mm_alloc();
> +	struct ww_acquire_ctx ctx;
> +	struct dma_resv obj;
> +	struct address_space mapping;
> +	int ret;
> +
> +	if (!mm)
> +		return -ENOMEM;
> +
> +	dma_resv_init(&obj);
> +	address_space_init_once(&mapping);
> +
> +	mmap_read_lock(mm);
> +	ww_acquire_init(&ctx, &reservation_ww_class);
> +	ret = dma_resv_lock(&obj, &ctx);
> +	if (ret == -EDEADLK)
> +		dma_resv_lock_slow(&obj, &ctx);
> +	fs_reclaim_acquire(GFP_KERNEL);
> +	/* for unmap_mapping_range on trylocked buffer objects in shrinkers */
> +	i_mmap_lock_write(&mapping);
> +	i_mmap_unlock_write(&mapping);
> +#ifdef CONFIG_MMU_NOTIFIER
> +	lock_map_acquire(&__mmu_notifier_invalidate_range_start_map);
> +	__dma_fence_might_wait();
> +	lock_map_release(&__mmu_notifier_invalidate_range_start_map);
> +#else
> +	__dma_fence_might_wait();
> +#endif
> +	fs_reclaim_release(GFP_KERNEL);
> +	ww_mutex_unlock(&obj.lock);
> +	ww_acquire_fini(&ctx);
> +	mmap_read_unlock(mm);
> +
> +	mmput(mm);
> +
> +	return 0;
> +}
> +subsys_initcall(dma_resv_lockdep);
> +#endif
> -- 
> 2.25.1
> 

-- 
Daniel Vetter
Software Engineer, Intel Corporation
http://blog.ffwll.ch

^ permalink raw reply	[flat|nested] 26+ messages in thread

* Re: [PATCH 1/7] dma-buf: fix inconsistent debug print
  2021-06-02 12:33 ` [PATCH 1/7] dma-buf: fix inconsistent debug print Daniel Vetter
@ 2021-06-02 12:36   ` Christian König
  2021-06-02 12:50     ` Daniel Vetter
  0 siblings, 1 reply; 26+ messages in thread
From: Christian König @ 2021-06-02 12:36 UTC (permalink / raw)
  To: Daniel Vetter; +Cc: dri-devel, jason



Am 02.06.21 um 14:33 schrieb Daniel Vetter:
> On Wed, Jun 02, 2021 at 01:17:08PM +0200, Christian König wrote:
>> The code tries to acquire the rcu protected fence list, but then ignores
>> individual fences which have been modified while holding the rcu.
>>
>> Stop that madness and just note cleanly that the list was concurrently modified.
>>
>> Signed-off-by: Christian König <christian.koenig@amd.com>
> Yeah it's debugfs, it's better not to be fancy here and if you race you
> can just re-grab it all.
>
> What's worse, we do grab the dma_resv_lock, which means no one should be
> able to race with us. I think 100% right thing here is actually to drop
> the rcu_read_lock too, and switch over to rcu_dereference_protected().
>
> And also drop the seqcount check, that would be a bug. seqcount is only
> to get a consistent snapshot of all fences on the read (i.e. protected by
> rcu only) section. We hold the write lock with dma_resv_lock here.

Yes that what I had in mind as alternative as well.

Just wasn't 100% sure which way to go here.

Going to adjust that,
Christian.

>
> Cheers, Daniel
>
>> ---
>>   drivers/dma-buf/dma-buf.c | 19 ++++++++-----------
>>   1 file changed, 8 insertions(+), 11 deletions(-)
>>
>> diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c
>> index eadd1eaa2fb5..d3b4e370dbc1 100644
>> --- a/drivers/dma-buf/dma-buf.c
>> +++ b/drivers/dma-buf/dma-buf.c
>> @@ -1383,22 +1383,17 @@ static int dma_buf_debug_show(struct seq_file *s, void *unused)
>>   				buf_obj->name ?: "");
>>   
>>   		robj = buf_obj->resv;
>> -		while (true) {
>> -			seq = read_seqcount_begin(&robj->seq);
>> -			rcu_read_lock();
>> -			fobj = rcu_dereference(robj->fence);
>> -			shared_count = fobj ? fobj->shared_count : 0;
>> -			fence = rcu_dereference(robj->fence_excl);
>> -			if (!read_seqcount_retry(&robj->seq, seq))
>> -				break;
>> -			rcu_read_unlock();
>> -		}
>> -
>> +		seq = read_seqcount_begin(&robj->seq);
>> +		rcu_read_lock();
>> +		fence = rcu_dereference(robj->fence_excl);
>>   		if (fence)
>>   			seq_printf(s, "\tExclusive fence: %s %s %ssignalled\n",
>>   				   fence->ops->get_driver_name(fence),
>>   				   fence->ops->get_timeline_name(fence),
>>   				   dma_fence_is_signaled(fence) ? "" : "un");
>> +
>> +		fobj = rcu_dereference(robj->fence);
>> +		shared_count = fobj ? fobj->shared_count : 0;
>>   		for (i = 0; i < shared_count; i++) {
>>   			fence = rcu_dereference(fobj->shared[i]);
>>   			if (!dma_fence_get_rcu(fence))
>> @@ -1410,6 +1405,8 @@ static int dma_buf_debug_show(struct seq_file *s, void *unused)
>>   			dma_fence_put(fence);
>>   		}
>>   		rcu_read_unlock();
>> +		if (read_seqcount_retry(&robj->seq, seq))
>> +			seq_printf(s, "\tFences concurrently modified\n");
>>   
>>   		seq_puts(s, "\tAttached Devices:\n");
>>   		attach_count = 0;
>> -- 
>> 2.25.1
>>


^ permalink raw reply	[flat|nested] 26+ messages in thread

* Re: [PATCH 3/7] dma-buf: cleanup dma-resv shared fence debugging a bit
  2021-06-02 11:17 ` [PATCH 3/7] dma-buf: cleanup dma-resv shared fence debugging a bit Christian König
@ 2021-06-02 12:41   ` Daniel Vetter
  0 siblings, 0 replies; 26+ messages in thread
From: Daniel Vetter @ 2021-06-02 12:41 UTC (permalink / raw)
  To: Christian König; +Cc: dri-devel, jason

On Wed, Jun 02, 2021 at 01:17:10PM +0200, Christian König wrote:
> Make that a function instead of inline.
> 
> Signed-off-by: Christian König <christian.koenig@amd.com>

Hm yeah pulling that out makes some sense.

> ---
>  drivers/dma-buf/dma-resv.c | 18 ++++++++++++++++++
>  include/linux/dma-resv.h   | 15 +++++++--------
>  2 files changed, 25 insertions(+), 8 deletions(-)
> 
> diff --git a/drivers/dma-buf/dma-resv.c b/drivers/dma-buf/dma-resv.c
> index 87f5d82d992a..6c6195315e9f 100644
> --- a/drivers/dma-buf/dma-resv.c
> +++ b/drivers/dma-buf/dma-resv.c
> @@ -208,6 +208,24 @@ int dma_resv_reserve_shared(struct dma_resv *obj, unsigned int num_fences)
>  }
>  EXPORT_SYMBOL(dma_resv_reserve_shared);
>  
> +#ifdef CONFIG_DEBUG_MUTEXES
> +/**
> + * dma_resv_reset_shared_max - reset shared fences for debugging
> + * @obj: the dma_resv object to reset
> + *
> + * Reset the shared_max to test if drivers do correct slot allocation.

I'd phrase this as

"Reset the number of pre-reserved shared slots to test that drivers do
correct slot allocation using dma_resv_reserve_shared(). See also
&dma_resv_list.shared_max."

That way it's a bit clearer how it all ties together.

The kerneldoc for dma_resv_list is also a bit thin, but another patch for
that. With the kerneldoc polish:

Reviewed-by: Daniel Vetter <daniel.vetter@ffwll.ch>


> + */
> +void dma_resv_reset_shared_max(struct dma_resv *obj)
> +{
> +	/* Test shared fence slot reservation */
> +	if (rcu_access_pointer(obj->fence)) {
> +		struct dma_resv_list *fence = dma_resv_get_list(obj);
> +
> +		fence->shared_max = fence->shared_count;
> +	}
> +}
> +#endif
> +
>  /**
>   * dma_resv_add_shared_fence - Add a fence to a shared slot
>   * @obj: the reservation object
> diff --git a/include/linux/dma-resv.h b/include/linux/dma-resv.h
> index d44a77e8a7e3..f32a3d176513 100644
> --- a/include/linux/dma-resv.h
> +++ b/include/linux/dma-resv.h
> @@ -92,6 +92,12 @@ static inline struct dma_resv_list *dma_resv_get_list(struct dma_resv *obj)
>  					 dma_resv_held(obj));
>  }
>  
> +#ifdef CONFIG_DEBUG_MUTEXES
> +void dma_resv_reset_shared_max(struct dma_resv *obj);
> +#else
> +static inline void dma_resv_reset_shared_max(struct dma_resv *obj) {}
> +#endif
> +
>  /**
>   * dma_resv_lock - lock the reservation object
>   * @obj: the reservation object
> @@ -215,14 +221,7 @@ static inline struct ww_acquire_ctx *dma_resv_locking_ctx(struct dma_resv *obj)
>   */
>  static inline void dma_resv_unlock(struct dma_resv *obj)
>  {
> -#ifdef CONFIG_DEBUG_MUTEXES
> -	/* Test shared fence slot reservation */
> -	if (rcu_access_pointer(obj->fence)) {
> -		struct dma_resv_list *fence = dma_resv_get_list(obj);
> -
> -		fence->shared_max = fence->shared_count;
> -	}
> -#endif
> +	dma_resv_reset_shared_max(obj);
>  	ww_mutex_unlock(&obj->lock);
>  }
>  
> -- 
> 2.25.1
> 

-- 
Daniel Vetter
Software Engineer, Intel Corporation
http://blog.ffwll.ch

^ permalink raw reply	[flat|nested] 26+ messages in thread

* Re: [PATCH 4/7] dma-buf: rename and cleanup dma_resv_get_excl
  2021-06-02 11:17 ` [PATCH 4/7] dma-buf: rename and cleanup dma_resv_get_excl Christian König
@ 2021-06-02 12:43   ` Daniel Vetter
  2021-06-02 20:04     ` Jason Ekstrand
  2021-06-02 12:46   ` Daniel Vetter
  1 sibling, 1 reply; 26+ messages in thread
From: Daniel Vetter @ 2021-06-02 12:43 UTC (permalink / raw)
  To: Christian König; +Cc: dri-devel, jason

On Wed, Jun 02, 2021 at 01:17:11PM +0200, Christian König wrote:
> When the comment needs to state explicitly that this
> doesn't get a reference to the object then the function
> is named rather badly.
> 
> Rename the function and use rcu_dereference_check(), this
> way it can be used from both rcu as well as lock protected
> critical sections.
> 
> Signed-off-by: Christian König <christian.koenig@amd.com>

I'd call this dma_resv_exlusive_fence, since without that it's a bit close
to dma_resv_make_exclusive or something like that. But this is definitely
better than the previous pointer deref in a "I'm totally getting you a
full reference" trenchcoat thing.

Acked-by: Daniel Vetter <daniel.vetter@ffwll.ch>

> ---
>  drivers/dma-buf/dma-buf.c                |  4 ++--
>  drivers/dma-buf/dma-resv.c               | 10 +++++-----
>  drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c  |  2 +-
>  drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c |  2 +-
>  drivers/gpu/drm/etnaviv/etnaviv_gem.c    |  2 +-
>  drivers/gpu/drm/i915/gem/i915_gem_busy.c |  3 +--
>  drivers/gpu/drm/msm/msm_gem.c            |  4 ++--
>  drivers/gpu/drm/nouveau/nouveau_bo.c     |  2 +-
>  drivers/gpu/drm/nouveau/nouveau_fence.c  |  2 +-
>  drivers/gpu/drm/radeon/radeon_display.c  |  2 +-
>  drivers/gpu/drm/radeon/radeon_sync.c     |  2 +-
>  drivers/gpu/drm/radeon/radeon_uvd.c      |  2 +-
>  drivers/gpu/drm/ttm/ttm_bo.c             |  2 +-
>  drivers/gpu/drm/vmwgfx/vmwgfx_resource.c |  2 +-
>  include/linux/dma-resv.h                 | 13 +++++--------
>  15 files changed, 25 insertions(+), 29 deletions(-)
> 
> diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c
> index d3b4e370dbc1..4d0ddc712f1e 100644
> --- a/drivers/dma-buf/dma-buf.c
> +++ b/drivers/dma-buf/dma-buf.c
> @@ -234,7 +234,7 @@ static __poll_t dma_buf_poll(struct file *file, poll_table *poll)
>  		shared_count = fobj->shared_count;
>  	else
>  		shared_count = 0;
> -	fence_excl = rcu_dereference(resv->fence_excl);
> +	fence_excl = dma_resv_exclusive(resv);
>  	if (read_seqcount_retry(&resv->seq, seq)) {
>  		rcu_read_unlock();
>  		goto retry;
> @@ -1385,7 +1385,7 @@ static int dma_buf_debug_show(struct seq_file *s, void *unused)
>  		robj = buf_obj->resv;
>  		seq = read_seqcount_begin(&robj->seq);
>  		rcu_read_lock();
> -		fence = rcu_dereference(robj->fence_excl);
> +		fence = dma_resv_exclusive(robj);
>  		if (fence)
>  			seq_printf(s, "\tExclusive fence: %s %s %ssignalled\n",
>  				   fence->ops->get_driver_name(fence),
> diff --git a/drivers/dma-buf/dma-resv.c b/drivers/dma-buf/dma-resv.c
> index 6c6195315e9f..81b032b43457 100644
> --- a/drivers/dma-buf/dma-resv.c
> +++ b/drivers/dma-buf/dma-resv.c
> @@ -281,7 +281,7 @@ EXPORT_SYMBOL(dma_resv_add_shared_fence);
>   */
>  void dma_resv_add_excl_fence(struct dma_resv *obj, struct dma_fence *fence)
>  {
> -	struct dma_fence *old_fence = dma_resv_get_excl(obj);
> +	struct dma_fence *old_fence = dma_resv_exclusive(obj);
>  	struct dma_resv_list *old;
>  	u32 i = 0;
>  
> @@ -377,7 +377,7 @@ int dma_resv_copy_fences(struct dma_resv *dst, struct dma_resv *src)
>  	rcu_read_unlock();
>  
>  	src_list = dma_resv_get_list(dst);
> -	old = dma_resv_get_excl(dst);
> +	old = dma_resv_exclusive(dst);
>  
>  	write_seqcount_begin(&dst->seq);
>  	/* write_seqcount_begin provides the necessary memory barrier */
> @@ -425,7 +425,7 @@ int dma_resv_get_fences_rcu(struct dma_resv *obj,
>  		rcu_read_lock();
>  		seq = read_seqcount_begin(&obj->seq);
>  
> -		fence_excl = rcu_dereference(obj->fence_excl);
> +		fence_excl = dma_resv_exclusive(obj);
>  		if (fence_excl && !dma_fence_get_rcu(fence_excl))
>  			goto unlock;
>  
> @@ -520,7 +520,7 @@ long dma_resv_wait_timeout_rcu(struct dma_resv *obj,
>  	rcu_read_lock();
>  	i = -1;
>  
> -	fence = rcu_dereference(obj->fence_excl);
> +	fence = dma_resv_exclusive(obj);
>  	if (fence && !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
>  		if (!dma_fence_get_rcu(fence))
>  			goto unlock_retry;
> @@ -642,7 +642,7 @@ bool dma_resv_test_signaled_rcu(struct dma_resv *obj, bool test_all)
>  	}
>  
>  	if (!shared_count) {
> -		struct dma_fence *fence_excl = rcu_dereference(obj->fence_excl);
> +		struct dma_fence *fence_excl = dma_resv_exclusive(obj);
>  
>  		if (fence_excl) {
>  			ret = dma_resv_test_signaled_single(fence_excl);
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
> index 73c76a3e2b12..cd5146fa6fb6 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
> @@ -226,7 +226,7 @@ static void amdgpu_gem_object_close(struct drm_gem_object *obj,
>  	if (!amdgpu_vm_ready(vm))
>  		goto out_unlock;
>  
> -	fence = dma_resv_get_excl(bo->tbo.base.resv);
> +	fence = dma_resv_exclusive(bo->tbo.base.resv);
>  	if (fence) {
>  		amdgpu_bo_fence(bo, fence, true);
>  		fence = NULL;
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
> index 4e558632a5d2..c84d5b843985 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
> @@ -210,7 +210,7 @@ int amdgpu_sync_resv(struct amdgpu_device *adev, struct amdgpu_sync *sync,
>  		return -EINVAL;
>  
>  	/* always sync to the exclusive fence */
> -	f = dma_resv_get_excl(resv);
> +	f = dma_resv_exclusive(resv);
>  	r = amdgpu_sync_fence(sync, f);
>  
>  	flist = dma_resv_get_list(resv);
> diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.c b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
> index db69f19ab5bc..d4f54dea8ac1 100644
> --- a/drivers/gpu/drm/etnaviv/etnaviv_gem.c
> +++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
> @@ -471,7 +471,7 @@ static void etnaviv_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
>  		}
>  	}
>  
> -	fence = rcu_dereference(robj->fence_excl);
> +	fence = dma_resv_exclusive(robj);
>  	if (fence)
>  		etnaviv_gem_describe_fence(fence, "Exclusive", m);
>  	rcu_read_unlock();
> diff --git a/drivers/gpu/drm/i915/gem/i915_gem_busy.c b/drivers/gpu/drm/i915/gem/i915_gem_busy.c
> index 25235ef630c1..02312a0c3a36 100644
> --- a/drivers/gpu/drm/i915/gem/i915_gem_busy.c
> +++ b/drivers/gpu/drm/i915/gem/i915_gem_busy.c
> @@ -113,8 +113,7 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
>  	seq = raw_read_seqcount(&obj->base.resv->seq);
>  
>  	/* Translate the exclusive fence to the READ *and* WRITE engine */
> -	args->busy =
> -		busy_check_writer(rcu_dereference(obj->base.resv->fence_excl));
> +	args->busy = busy_check_writer(dma_resv_exclusive(obj->base.resv));
>  
>  	/* Translate shared fences to READ set of engines */
>  	list = rcu_dereference(obj->base.resv->fence);
> diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
> index 56df86e5f740..54c1b53426d6 100644
> --- a/drivers/gpu/drm/msm/msm_gem.c
> +++ b/drivers/gpu/drm/msm/msm_gem.c
> @@ -819,7 +819,7 @@ int msm_gem_sync_object(struct drm_gem_object *obj,
>  
>  	fobj = dma_resv_get_list(obj->resv);
>  	if (!fobj || (fobj->shared_count == 0)) {
> -		fence = dma_resv_get_excl(obj->resv);
> +		fence = dma_resv_exclusive(obj->resv);
>  		/* don't need to wait on our own fences, since ring is fifo */
>  		if (fence && (fence->context != fctx->context)) {
>  			ret = dma_fence_wait(fence, true);
> @@ -1035,7 +1035,7 @@ void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m,
>  		}
>  	}
>  
> -	fence = rcu_dereference(robj->fence_excl);
> +	fence = dma_resv_exclusive(robj);
>  	if (fence)
>  		describe_fence(fence, "Exclusive", m);
>  	rcu_read_unlock();
> diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
> index e688ca77483d..ac0ebcc4ebb7 100644
> --- a/drivers/gpu/drm/nouveau/nouveau_bo.c
> +++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
> @@ -955,7 +955,7 @@ nouveau_bo_vm_cleanup(struct ttm_buffer_object *bo,
>  {
>  	struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
>  	struct drm_device *dev = drm->dev;
> -	struct dma_fence *fence = dma_resv_get_excl(bo->base.resv);
> +	struct dma_fence *fence = dma_resv_exclusive(bo->base.resv);
>  
>  	nv10_bo_put_tile_region(dev, *old_tile, fence);
>  	*old_tile = new_tile;
> diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c
> index e5dcbf67de7e..a6cb35181aee 100644
> --- a/drivers/gpu/drm/nouveau/nouveau_fence.c
> +++ b/drivers/gpu/drm/nouveau/nouveau_fence.c
> @@ -356,7 +356,7 @@ nouveau_fence_sync(struct nouveau_bo *nvbo, struct nouveau_channel *chan, bool e
>  	}
>  
>  	fobj = dma_resv_get_list(resv);
> -	fence = dma_resv_get_excl(resv);
> +	fence = dma_resv_exclusive(resv);
>  
>  	if (fence && (!exclusive || !fobj || !fobj->shared_count)) {
>  		struct nouveau_channel *prev = NULL;
> diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
> index 652af7a134bd..57c910e5ae77 100644
> --- a/drivers/gpu/drm/radeon/radeon_display.c
> +++ b/drivers/gpu/drm/radeon/radeon_display.c
> @@ -533,7 +533,7 @@ static int radeon_crtc_page_flip_target(struct drm_crtc *crtc,
>  		DRM_ERROR("failed to pin new rbo buffer before flip\n");
>  		goto cleanup;
>  	}
> -	work->fence = dma_fence_get(dma_resv_get_excl(new_rbo->tbo.base.resv));
> +	work->fence = dma_fence_get(dma_resv_exclusive(new_rbo->tbo.base.resv));
>  	radeon_bo_get_tiling_flags(new_rbo, &tiling_flags, NULL);
>  	radeon_bo_unreserve(new_rbo);
>  
> diff --git a/drivers/gpu/drm/radeon/radeon_sync.c b/drivers/gpu/drm/radeon/radeon_sync.c
> index 5d3302945076..e476f90ef1c1 100644
> --- a/drivers/gpu/drm/radeon/radeon_sync.c
> +++ b/drivers/gpu/drm/radeon/radeon_sync.c
> @@ -98,7 +98,7 @@ int radeon_sync_resv(struct radeon_device *rdev,
>  	int r = 0;
>  
>  	/* always sync to the exclusive fence */
> -	f = dma_resv_get_excl(resv);
> +	f = dma_resv_exclusive(resv);
>  	fence = f ? to_radeon_fence(f) : NULL;
>  	if (fence && fence->rdev == rdev)
>  		radeon_sync_fence(sync, fence);
> diff --git a/drivers/gpu/drm/radeon/radeon_uvd.c b/drivers/gpu/drm/radeon/radeon_uvd.c
> index dfa9fdbe98da..02d4bbdc9111 100644
> --- a/drivers/gpu/drm/radeon/radeon_uvd.c
> +++ b/drivers/gpu/drm/radeon/radeon_uvd.c
> @@ -477,7 +477,7 @@ static int radeon_uvd_cs_msg(struct radeon_cs_parser *p, struct radeon_bo *bo,
>  		return -EINVAL;
>  	}
>  
> -	f = dma_resv_get_excl(bo->tbo.base.resv);
> +	f = dma_resv_exclusive(bo->tbo.base.resv);
>  	if (f) {
>  		r = radeon_fence_wait((struct radeon_fence *)f, false);
>  		if (r) {
> diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
> index 5a7ab4b35b2d..92361556bf0b 100644
> --- a/drivers/gpu/drm/ttm/ttm_bo.c
> +++ b/drivers/gpu/drm/ttm/ttm_bo.c
> @@ -262,7 +262,7 @@ static void ttm_bo_flush_all_fences(struct ttm_buffer_object *bo)
>  
>  	rcu_read_lock();
>  	fobj = rcu_dereference(resv->fence);
> -	fence = rcu_dereference(resv->fence_excl);
> +	fence = dma_resv_exclusive(resv);
>  	if (fence && !fence->ops->signaled)
>  		dma_fence_enable_sw_signaling(fence);
>  
> diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
> index 62ea920addc3..c78f38ee1c20 100644
> --- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
> +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
> @@ -1166,7 +1166,7 @@ int vmw_resources_clean(struct vmw_buffer_object *vbo, pgoff_t start,
>  		if (bo->moving)
>  			dma_fence_put(bo->moving);
>  		bo->moving = dma_fence_get
> -			(dma_resv_get_excl(bo->base.resv));
> +			(dma_resv_exclusive(bo->base.resv));
>  	}
>  
>  	return 0;
> diff --git a/include/linux/dma-resv.h b/include/linux/dma-resv.h
> index f32a3d176513..7549ec5eb35c 100644
> --- a/include/linux/dma-resv.h
> +++ b/include/linux/dma-resv.h
> @@ -226,22 +226,19 @@ static inline void dma_resv_unlock(struct dma_resv *obj)
>  }
>  
>  /**
> - * dma_resv_get_excl - get the reservation object's
> - * exclusive fence, with update-side lock held
> + * dma_resv_exclusive - return the object's exclusive fence
>   * @obj: the reservation object
>   *
> - * Returns the exclusive fence (if any).  Does NOT take a
> - * reference. Writers must hold obj->lock, readers may only
> - * hold a RCU read side lock.
> + * Returns the exclusive fence (if any). Caller must either hold the objects
> + * lock or the rcu read side lock.
>   *
>   * RETURNS
>   * The exclusive fence or NULL
>   */
>  static inline struct dma_fence *
> -dma_resv_get_excl(struct dma_resv *obj)
> +dma_resv_exclusive(struct dma_resv *obj)
>  {
> -	return rcu_dereference_protected(obj->fence_excl,
> -					 dma_resv_held(obj));
> +	return rcu_dereference_check(obj->fence_excl, dma_resv_held(obj));
>  }
>  
>  /**
> -- 
> 2.25.1
> 

-- 
Daniel Vetter
Software Engineer, Intel Corporation
http://blog.ffwll.ch

^ permalink raw reply	[flat|nested] 26+ messages in thread

* Re: [PATCH 4/7] dma-buf: rename and cleanup dma_resv_get_excl
  2021-06-02 11:17 ` [PATCH 4/7] dma-buf: rename and cleanup dma_resv_get_excl Christian König
  2021-06-02 12:43   ` Daniel Vetter
@ 2021-06-02 12:46   ` Daniel Vetter
  1 sibling, 0 replies; 26+ messages in thread
From: Daniel Vetter @ 2021-06-02 12:46 UTC (permalink / raw)
  To: Christian König; +Cc: dri-devel, jason

On Wed, Jun 02, 2021 at 01:17:11PM +0200, Christian König wrote:
> When the comment needs to state explicitly that this
> doesn't get a reference to the object then the function
> is named rather badly.
> 
> Rename the function and use rcu_dereference_check(), this
> way it can be used from both rcu as well as lock protected
> critical sections.
> 
> Signed-off-by: Christian König <christian.koenig@amd.com>
> ---
>  drivers/dma-buf/dma-buf.c                |  4 ++--
>  drivers/dma-buf/dma-resv.c               | 10 +++++-----
>  drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c  |  2 +-
>  drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c |  2 +-
>  drivers/gpu/drm/etnaviv/etnaviv_gem.c    |  2 +-
>  drivers/gpu/drm/i915/gem/i915_gem_busy.c |  3 +--
>  drivers/gpu/drm/msm/msm_gem.c            |  4 ++--
>  drivers/gpu/drm/nouveau/nouveau_bo.c     |  2 +-
>  drivers/gpu/drm/nouveau/nouveau_fence.c  |  2 +-
>  drivers/gpu/drm/radeon/radeon_display.c  |  2 +-
>  drivers/gpu/drm/radeon/radeon_sync.c     |  2 +-
>  drivers/gpu/drm/radeon/radeon_uvd.c      |  2 +-
>  drivers/gpu/drm/ttm/ttm_bo.c             |  2 +-
>  drivers/gpu/drm/vmwgfx/vmwgfx_resource.c |  2 +-
>  include/linux/dma-resv.h                 | 13 +++++--------
>  15 files changed, 25 insertions(+), 29 deletions(-)
> 
> diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c
> index d3b4e370dbc1..4d0ddc712f1e 100644
> --- a/drivers/dma-buf/dma-buf.c
> +++ b/drivers/dma-buf/dma-buf.c
> @@ -234,7 +234,7 @@ static __poll_t dma_buf_poll(struct file *file, poll_table *poll)
>  		shared_count = fobj->shared_count;
>  	else
>  		shared_count = 0;
> -	fence_excl = rcu_dereference(resv->fence_excl);
> +	fence_excl = dma_resv_exclusive(resv);
>  	if (read_seqcount_retry(&resv->seq, seq)) {
>  		rcu_read_unlock();
>  		goto retry;
> @@ -1385,7 +1385,7 @@ static int dma_buf_debug_show(struct seq_file *s, void *unused)
>  		robj = buf_obj->resv;
>  		seq = read_seqcount_begin(&robj->seq);
>  		rcu_read_lock();
> -		fence = rcu_dereference(robj->fence_excl);
> +		fence = dma_resv_exclusive(robj);
>  		if (fence)
>  			seq_printf(s, "\tExclusive fence: %s %s %ssignalled\n",
>  				   fence->ops->get_driver_name(fence),
> diff --git a/drivers/dma-buf/dma-resv.c b/drivers/dma-buf/dma-resv.c
> index 6c6195315e9f..81b032b43457 100644
> --- a/drivers/dma-buf/dma-resv.c
> +++ b/drivers/dma-buf/dma-resv.c
> @@ -281,7 +281,7 @@ EXPORT_SYMBOL(dma_resv_add_shared_fence);
>   */
>  void dma_resv_add_excl_fence(struct dma_resv *obj, struct dma_fence *fence)
>  {
> -	struct dma_fence *old_fence = dma_resv_get_excl(obj);
> +	struct dma_fence *old_fence = dma_resv_exclusive(obj);
>  	struct dma_resv_list *old;
>  	u32 i = 0;
>  
> @@ -377,7 +377,7 @@ int dma_resv_copy_fences(struct dma_resv *dst, struct dma_resv *src)
>  	rcu_read_unlock();
>  
>  	src_list = dma_resv_get_list(dst);
> -	old = dma_resv_get_excl(dst);
> +	old = dma_resv_exclusive(dst);
>  
>  	write_seqcount_begin(&dst->seq);
>  	/* write_seqcount_begin provides the necessary memory barrier */
> @@ -425,7 +425,7 @@ int dma_resv_get_fences_rcu(struct dma_resv *obj,
>  		rcu_read_lock();
>  		seq = read_seqcount_begin(&obj->seq);
>  
> -		fence_excl = rcu_dereference(obj->fence_excl);
> +		fence_excl = dma_resv_exclusive(obj);
>  		if (fence_excl && !dma_fence_get_rcu(fence_excl))
>  			goto unlock;
>  
> @@ -520,7 +520,7 @@ long dma_resv_wait_timeout_rcu(struct dma_resv *obj,
>  	rcu_read_lock();
>  	i = -1;
>  
> -	fence = rcu_dereference(obj->fence_excl);
> +	fence = dma_resv_exclusive(obj);
>  	if (fence && !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
>  		if (!dma_fence_get_rcu(fence))
>  			goto unlock_retry;
> @@ -642,7 +642,7 @@ bool dma_resv_test_signaled_rcu(struct dma_resv *obj, bool test_all)
>  	}
>  
>  	if (!shared_count) {
> -		struct dma_fence *fence_excl = rcu_dereference(obj->fence_excl);
> +		struct dma_fence *fence_excl = dma_resv_exclusive(obj);
>  
>  		if (fence_excl) {
>  			ret = dma_resv_test_signaled_single(fence_excl);
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
> index 73c76a3e2b12..cd5146fa6fb6 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
> @@ -226,7 +226,7 @@ static void amdgpu_gem_object_close(struct drm_gem_object *obj,
>  	if (!amdgpu_vm_ready(vm))
>  		goto out_unlock;
>  
> -	fence = dma_resv_get_excl(bo->tbo.base.resv);
> +	fence = dma_resv_exclusive(bo->tbo.base.resv);
>  	if (fence) {
>  		amdgpu_bo_fence(bo, fence, true);
>  		fence = NULL;
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
> index 4e558632a5d2..c84d5b843985 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
> @@ -210,7 +210,7 @@ int amdgpu_sync_resv(struct amdgpu_device *adev, struct amdgpu_sync *sync,
>  		return -EINVAL;
>  
>  	/* always sync to the exclusive fence */
> -	f = dma_resv_get_excl(resv);
> +	f = dma_resv_exclusive(resv);
>  	r = amdgpu_sync_fence(sync, f);
>  
>  	flist = dma_resv_get_list(resv);
> diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.c b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
> index db69f19ab5bc..d4f54dea8ac1 100644
> --- a/drivers/gpu/drm/etnaviv/etnaviv_gem.c
> +++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
> @@ -471,7 +471,7 @@ static void etnaviv_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
>  		}
>  	}
>  
> -	fence = rcu_dereference(robj->fence_excl);
> +	fence = dma_resv_exclusive(robj);
>  	if (fence)
>  		etnaviv_gem_describe_fence(fence, "Exclusive", m);
>  	rcu_read_unlock();
> diff --git a/drivers/gpu/drm/i915/gem/i915_gem_busy.c b/drivers/gpu/drm/i915/gem/i915_gem_busy.c
> index 25235ef630c1..02312a0c3a36 100644
> --- a/drivers/gpu/drm/i915/gem/i915_gem_busy.c
> +++ b/drivers/gpu/drm/i915/gem/i915_gem_busy.c
> @@ -113,8 +113,7 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
>  	seq = raw_read_seqcount(&obj->base.resv->seq);
>  
>  	/* Translate the exclusive fence to the READ *and* WRITE engine */
> -	args->busy =
> -		busy_check_writer(rcu_dereference(obj->base.resv->fence_excl));
> +	args->busy = busy_check_writer(dma_resv_exclusive(obj->base.resv));
>  
>  	/* Translate shared fences to READ set of engines */
>  	list = rcu_dereference(obj->base.resv->fence);
> diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
> index 56df86e5f740..54c1b53426d6 100644
> --- a/drivers/gpu/drm/msm/msm_gem.c
> +++ b/drivers/gpu/drm/msm/msm_gem.c
> @@ -819,7 +819,7 @@ int msm_gem_sync_object(struct drm_gem_object *obj,
>  
>  	fobj = dma_resv_get_list(obj->resv);
>  	if (!fobj || (fobj->shared_count == 0)) {
> -		fence = dma_resv_get_excl(obj->resv);
> +		fence = dma_resv_exclusive(obj->resv);
>  		/* don't need to wait on our own fences, since ring is fifo */
>  		if (fence && (fence->context != fctx->context)) {
>  			ret = dma_fence_wait(fence, true);
> @@ -1035,7 +1035,7 @@ void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m,
>  		}
>  	}
>  
> -	fence = rcu_dereference(robj->fence_excl);
> +	fence = dma_resv_exclusive(robj);
>  	if (fence)
>  		describe_fence(fence, "Exclusive", m);
>  	rcu_read_unlock();
> diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
> index e688ca77483d..ac0ebcc4ebb7 100644
> --- a/drivers/gpu/drm/nouveau/nouveau_bo.c
> +++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
> @@ -955,7 +955,7 @@ nouveau_bo_vm_cleanup(struct ttm_buffer_object *bo,
>  {
>  	struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
>  	struct drm_device *dev = drm->dev;
> -	struct dma_fence *fence = dma_resv_get_excl(bo->base.resv);
> +	struct dma_fence *fence = dma_resv_exclusive(bo->base.resv);
>  
>  	nv10_bo_put_tile_region(dev, *old_tile, fence);
>  	*old_tile = new_tile;
> diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c
> index e5dcbf67de7e..a6cb35181aee 100644
> --- a/drivers/gpu/drm/nouveau/nouveau_fence.c
> +++ b/drivers/gpu/drm/nouveau/nouveau_fence.c
> @@ -356,7 +356,7 @@ nouveau_fence_sync(struct nouveau_bo *nvbo, struct nouveau_channel *chan, bool e
>  	}
>  
>  	fobj = dma_resv_get_list(resv);
> -	fence = dma_resv_get_excl(resv);
> +	fence = dma_resv_exclusive(resv);
>  
>  	if (fence && (!exclusive || !fobj || !fobj->shared_count)) {
>  		struct nouveau_channel *prev = NULL;
> diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
> index 652af7a134bd..57c910e5ae77 100644
> --- a/drivers/gpu/drm/radeon/radeon_display.c
> +++ b/drivers/gpu/drm/radeon/radeon_display.c
> @@ -533,7 +533,7 @@ static int radeon_crtc_page_flip_target(struct drm_crtc *crtc,
>  		DRM_ERROR("failed to pin new rbo buffer before flip\n");
>  		goto cleanup;
>  	}
> -	work->fence = dma_fence_get(dma_resv_get_excl(new_rbo->tbo.base.resv));
> +	work->fence = dma_fence_get(dma_resv_exclusive(new_rbo->tbo.base.resv));
>  	radeon_bo_get_tiling_flags(new_rbo, &tiling_flags, NULL);
>  	radeon_bo_unreserve(new_rbo);
>  
> diff --git a/drivers/gpu/drm/radeon/radeon_sync.c b/drivers/gpu/drm/radeon/radeon_sync.c
> index 5d3302945076..e476f90ef1c1 100644
> --- a/drivers/gpu/drm/radeon/radeon_sync.c
> +++ b/drivers/gpu/drm/radeon/radeon_sync.c
> @@ -98,7 +98,7 @@ int radeon_sync_resv(struct radeon_device *rdev,
>  	int r = 0;
>  
>  	/* always sync to the exclusive fence */
> -	f = dma_resv_get_excl(resv);
> +	f = dma_resv_exclusive(resv);
>  	fence = f ? to_radeon_fence(f) : NULL;
>  	if (fence && fence->rdev == rdev)
>  		radeon_sync_fence(sync, fence);
> diff --git a/drivers/gpu/drm/radeon/radeon_uvd.c b/drivers/gpu/drm/radeon/radeon_uvd.c
> index dfa9fdbe98da..02d4bbdc9111 100644
> --- a/drivers/gpu/drm/radeon/radeon_uvd.c
> +++ b/drivers/gpu/drm/radeon/radeon_uvd.c
> @@ -477,7 +477,7 @@ static int radeon_uvd_cs_msg(struct radeon_cs_parser *p, struct radeon_bo *bo,
>  		return -EINVAL;
>  	}
>  
> -	f = dma_resv_get_excl(bo->tbo.base.resv);
> +	f = dma_resv_exclusive(bo->tbo.base.resv);
>  	if (f) {
>  		r = radeon_fence_wait((struct radeon_fence *)f, false);
>  		if (r) {
> diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
> index 5a7ab4b35b2d..92361556bf0b 100644
> --- a/drivers/gpu/drm/ttm/ttm_bo.c
> +++ b/drivers/gpu/drm/ttm/ttm_bo.c
> @@ -262,7 +262,7 @@ static void ttm_bo_flush_all_fences(struct ttm_buffer_object *bo)
>  
>  	rcu_read_lock();
>  	fobj = rcu_dereference(resv->fence);
> -	fence = rcu_dereference(resv->fence_excl);
> +	fence = dma_resv_exclusive(resv);
>  	if (fence && !fence->ops->signaled)
>  		dma_fence_enable_sw_signaling(fence);
>  
> diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
> index 62ea920addc3..c78f38ee1c20 100644
> --- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
> +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
> @@ -1166,7 +1166,7 @@ int vmw_resources_clean(struct vmw_buffer_object *vbo, pgoff_t start,
>  		if (bo->moving)
>  			dma_fence_put(bo->moving);
>  		bo->moving = dma_fence_get
> -			(dma_resv_get_excl(bo->base.resv));
> +			(dma_resv_exclusive(bo->base.resv));
>  	}
>  
>  	return 0;
> diff --git a/include/linux/dma-resv.h b/include/linux/dma-resv.h
> index f32a3d176513..7549ec5eb35c 100644
> --- a/include/linux/dma-resv.h
> +++ b/include/linux/dma-resv.h
> @@ -226,22 +226,19 @@ static inline void dma_resv_unlock(struct dma_resv *obj)
>  }
>  
>  /**
> - * dma_resv_get_excl - get the reservation object's
> - * exclusive fence, with update-side lock held
> + * dma_resv_exclusive - return the object's exclusive fence
>   * @obj: the reservation object
>   *
> - * Returns the exclusive fence (if any).  Does NOT take a
> - * reference. Writers must hold obj->lock, readers may only
> - * hold a RCU read side lock.
> + * Returns the exclusive fence (if any). Caller must either hold the objects
> + * lock or the rcu read side lock.

For some kerneldoc goodies, please spell this out with "object lock
through dma_resv_lock() or the RCU read side lock through rcu_read_lock(),
or one of the variants of each".

I just like a to throw around a lot of hyperlinks around to let people who
read docs navigate to all the pieces easily.
-Daniel

>   *
>   * RETURNS
>   * The exclusive fence or NULL
>   */
>  static inline struct dma_fence *
> -dma_resv_get_excl(struct dma_resv *obj)
> +dma_resv_exclusive(struct dma_resv *obj)
>  {
> -	return rcu_dereference_protected(obj->fence_excl,
> -					 dma_resv_held(obj));
> +	return rcu_dereference_check(obj->fence_excl, dma_resv_held(obj));
>  }
>  
>  /**
> -- 
> 2.25.1
> 

-- 
Daniel Vetter
Software Engineer, Intel Corporation
http://blog.ffwll.ch

^ permalink raw reply	[flat|nested] 26+ messages in thread

* Re: [PATCH 5/7] dma-buf: rename and cleanup dma_resv_get_list
  2021-06-02 11:17 ` [PATCH 5/7] dma-buf: rename and cleanup dma_resv_get_list Christian König
@ 2021-06-02 12:46   ` Daniel Vetter
  2021-06-02 20:22   ` Jason Ekstrand
  1 sibling, 0 replies; 26+ messages in thread
From: Daniel Vetter @ 2021-06-02 12:46 UTC (permalink / raw)
  To: Christian König; +Cc: dri-devel, jason

On Wed, Jun 02, 2021 at 01:17:12PM +0200, Christian König wrote:
> When the comment needs to state explicitly that this is doesn't get a reference
> to the object then the function is named rather badly.
> 
> Rename the function and use it in even more places.
> 
> Signed-off-by: Christian König <christian.koenig@amd.com>
> ---
>  drivers/dma-buf/dma-resv.c                    | 32 +++++++++----------
>  .../gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c  |  2 +-
>  drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c   |  2 +-
>  drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c      |  2 +-
>  drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c       |  2 +-
>  drivers/gpu/drm/etnaviv/etnaviv_gem.c         |  2 +-
>  drivers/gpu/drm/i915/gem/i915_gem_busy.c      |  2 +-
>  drivers/gpu/drm/msm/msm_gem.c                 |  4 +--
>  drivers/gpu/drm/nouveau/nouveau_fence.c       |  2 +-
>  drivers/gpu/drm/qxl/qxl_debugfs.c             |  2 +-
>  drivers/gpu/drm/radeon/radeon_sync.c          |  2 +-
>  drivers/gpu/drm/ttm/ttm_bo.c                  |  2 +-
>  include/linux/dma-resv.h                      | 25 +++++++--------
>  13 files changed, 39 insertions(+), 42 deletions(-)
> 
> diff --git a/drivers/dma-buf/dma-resv.c b/drivers/dma-buf/dma-resv.c
> index 81b032b43457..b1a1a31dc009 100644
> --- a/drivers/dma-buf/dma-resv.c
> +++ b/drivers/dma-buf/dma-resv.c
> @@ -149,8 +149,7 @@ int dma_resv_reserve_shared(struct dma_resv *obj, unsigned int num_fences)
>  
>  	dma_resv_assert_held(obj);
>  
> -	old = dma_resv_get_list(obj);
> -
> +	old = dma_resv_shared(obj);
>  	if (old && old->shared_max) {
>  		if ((old->shared_count + num_fences) <= old->shared_max)
>  			return 0;
> @@ -217,12 +216,13 @@ EXPORT_SYMBOL(dma_resv_reserve_shared);
>   */
>  void dma_resv_reset_shared_max(struct dma_resv *obj)
>  {
> -	/* Test shared fence slot reservation */
> -	if (rcu_access_pointer(obj->fence)) {
> -		struct dma_resv_list *fence = dma_resv_get_list(obj);
> +	struct dma_resv_list *fences = dma_resv_shared(obj);
>  
> -		fence->shared_max = fence->shared_count;
> -	}
> +	dma_resv_assert_held(obj);
> +
> +	/* Test shared fence slot reservation */
> +	if (fences)
> +		fences->shared_max = fences->shared_count;
>  }
>  #endif
>  
> @@ -244,7 +244,7 @@ void dma_resv_add_shared_fence(struct dma_resv *obj, struct dma_fence *fence)
>  
>  	dma_resv_assert_held(obj);
>  
> -	fobj = dma_resv_get_list(obj);
> +	fobj = dma_resv_shared(obj);
>  	count = fobj->shared_count;
>  
>  	write_seqcount_begin(&obj->seq);
> @@ -287,7 +287,7 @@ void dma_resv_add_excl_fence(struct dma_resv *obj, struct dma_fence *fence)
>  
>  	dma_resv_assert_held(obj);
>  
> -	old = dma_resv_get_list(obj);
> +	old = dma_resv_shared(obj);
>  	if (old)
>  		i = old->shared_count;
>  
> @@ -326,7 +326,7 @@ int dma_resv_copy_fences(struct dma_resv *dst, struct dma_resv *src)
>  	dma_resv_assert_held(dst);
>  
>  	rcu_read_lock();
> -	src_list = rcu_dereference(src->fence);
> +	src_list = dma_resv_shared(src);
>  
>  retry:
>  	if (src_list) {
> @@ -339,7 +339,7 @@ int dma_resv_copy_fences(struct dma_resv *dst, struct dma_resv *src)
>  			return -ENOMEM;
>  
>  		rcu_read_lock();
> -		src_list = rcu_dereference(src->fence);
> +		src_list = dma_resv_shared(src);
>  		if (!src_list || src_list->shared_count > shared_count) {
>  			kfree(dst_list);
>  			goto retry;
> @@ -357,7 +357,7 @@ int dma_resv_copy_fences(struct dma_resv *dst, struct dma_resv *src)
>  
>  			if (!dma_fence_get_rcu(fence)) {
>  				dma_resv_list_free(dst_list);
> -				src_list = rcu_dereference(src->fence);
> +				src_list = dma_resv_shared(src);
>  				goto retry;
>  			}
>  
> @@ -376,7 +376,7 @@ int dma_resv_copy_fences(struct dma_resv *dst, struct dma_resv *src)
>  	new = dma_fence_get_rcu_safe(&src->fence_excl);
>  	rcu_read_unlock();
>  
> -	src_list = dma_resv_get_list(dst);
> +	src_list = dma_resv_shared(dst);
>  	old = dma_resv_exclusive(dst);
>  
>  	write_seqcount_begin(&dst->seq);
> @@ -429,7 +429,7 @@ int dma_resv_get_fences_rcu(struct dma_resv *obj,
>  		if (fence_excl && !dma_fence_get_rcu(fence_excl))
>  			goto unlock;
>  
> -		fobj = rcu_dereference(obj->fence);
> +		fobj = dma_resv_shared(obj);
>  		if (fobj)
>  			sz += sizeof(*shared) * fobj->shared_max;
>  
> @@ -535,7 +535,7 @@ long dma_resv_wait_timeout_rcu(struct dma_resv *obj,
>  	}
>  
>  	if (wait_all) {
> -		struct dma_resv_list *fobj = rcu_dereference(obj->fence);
> +		struct dma_resv_list *fobj = dma_resv_shared(obj);
>  
>  		if (fobj)
>  			shared_count = fobj->shared_count;
> @@ -620,7 +620,7 @@ bool dma_resv_test_signaled_rcu(struct dma_resv *obj, bool test_all)
>  	seq = read_seqcount_begin(&obj->seq);
>  
>  	if (test_all) {
> -		struct dma_resv_list *fobj = rcu_dereference(obj->fence);
> +		struct dma_resv_list *fobj = dma_resv_shared(obj);
>  		unsigned int i;
>  
>  		if (fobj)
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
> index d5e6519bdea1..e90495ca49fd 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
> @@ -247,7 +247,7 @@ static int amdgpu_amdkfd_remove_eviction_fence(struct amdgpu_bo *bo,
>  	if (!ef)
>  		return -EINVAL;
>  
> -	old = dma_resv_get_list(resv);
> +	old = dma_resv_shared(resv);
>  	if (!old)
>  		return 0;
>  
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
> index 6dd0ea6e9e24..3b13c8a38c4e 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
> @@ -49,7 +49,7 @@ __dma_resv_make_exclusive(struct dma_resv *obj)
>  	unsigned int count;
>  	int r;
>  
> -	if (!dma_resv_get_list(obj)) /* no shared fences to convert */
> +	if (!dma_resv_shared(obj)) /* no shared fences to convert */
>  		return 0;
>  
>  	r = dma_resv_get_fences_rcu(obj, NULL, &count, &fences);
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
> index c84d5b843985..c50d9f92a0cd 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
> @@ -213,7 +213,7 @@ int amdgpu_sync_resv(struct amdgpu_device *adev, struct amdgpu_sync *sync,
>  	f = dma_resv_exclusive(resv);
>  	r = amdgpu_sync_fence(sync, f);
>  
> -	flist = dma_resv_get_list(resv);
> +	flist = dma_resv_shared(resv);
>  	if (!flist || r)
>  		return r;
>  
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
> index 663aa7d2e2ea..ddb6ce7d48bc 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
> @@ -1338,7 +1338,7 @@ static bool amdgpu_ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
>  	 * If true, then return false as any KFD process needs all its BOs to
>  	 * be resident to run successfully
>  	 */
> -	flist = dma_resv_get_list(bo->base.resv);
> +	flist = dma_resv_shared(bo->base.resv);
>  	if (flist) {
>  		for (i = 0; i < flist->shared_count; ++i) {
>  			f = rcu_dereference_protected(flist->shared[i],
> diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.c b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
> index d4f54dea8ac1..4d43b8630f0e 100644
> --- a/drivers/gpu/drm/etnaviv/etnaviv_gem.c
> +++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
> @@ -461,7 +461,7 @@ static void etnaviv_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
>  			off, etnaviv_obj->vaddr, obj->size);
>  
>  	rcu_read_lock();
> -	fobj = rcu_dereference(robj->fence);
> +	fobj = dma_resv_shared(robj);
>  	if (fobj) {
>  		unsigned int i, shared_count = fobj->shared_count;
>  
> diff --git a/drivers/gpu/drm/i915/gem/i915_gem_busy.c b/drivers/gpu/drm/i915/gem/i915_gem_busy.c
> index 02312a0c3a36..3f94becac541 100644
> --- a/drivers/gpu/drm/i915/gem/i915_gem_busy.c
> +++ b/drivers/gpu/drm/i915/gem/i915_gem_busy.c
> @@ -116,7 +116,7 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
>  	args->busy = busy_check_writer(dma_resv_exclusive(obj->base.resv));
>  
>  	/* Translate shared fences to READ set of engines */
> -	list = rcu_dereference(obj->base.resv->fence);
> +	list = dma_resv_shared(obj->base.resv);
>  	if (list) {
>  		unsigned int shared_count = list->shared_count, i;
>  
> diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
> index 54c1b53426d6..43af91df552e 100644
> --- a/drivers/gpu/drm/msm/msm_gem.c
> +++ b/drivers/gpu/drm/msm/msm_gem.c
> @@ -817,7 +817,7 @@ int msm_gem_sync_object(struct drm_gem_object *obj,
>  	struct dma_fence *fence;
>  	int i, ret;
>  
> -	fobj = dma_resv_get_list(obj->resv);
> +	fobj = dma_resv_shared(obj->resv);
>  	if (!fobj || (fobj->shared_count == 0)) {
>  		fence = dma_resv_exclusive(obj->resv);
>  		/* don't need to wait on our own fences, since ring is fifo */
> @@ -1025,7 +1025,7 @@ void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m,
>  	}
>  
>  	rcu_read_lock();
> -	fobj = rcu_dereference(robj->fence);
> +	fobj = dma_resv_shared(robj);
>  	if (fobj) {
>  		unsigned int i, shared_count = fobj->shared_count;
>  
> diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c
> index a6cb35181aee..5ce441c655ea 100644
> --- a/drivers/gpu/drm/nouveau/nouveau_fence.c
> +++ b/drivers/gpu/drm/nouveau/nouveau_fence.c
> @@ -355,7 +355,7 @@ nouveau_fence_sync(struct nouveau_bo *nvbo, struct nouveau_channel *chan, bool e
>  			return ret;
>  	}
>  
> -	fobj = dma_resv_get_list(resv);
> +	fobj = dma_resv_shared(resv);
>  	fence = dma_resv_exclusive(resv);
>  
>  	if (fence && (!exclusive || !fobj || !fobj->shared_count)) {
> diff --git a/drivers/gpu/drm/qxl/qxl_debugfs.c b/drivers/gpu/drm/qxl/qxl_debugfs.c
> index 183d15e2cf58..0acc70a6d3dd 100644
> --- a/drivers/gpu/drm/qxl/qxl_debugfs.c
> +++ b/drivers/gpu/drm/qxl/qxl_debugfs.c
> @@ -61,7 +61,7 @@ qxl_debugfs_buffers_info(struct seq_file *m, void *data)
>  		int rel;
>  
>  		rcu_read_lock();
> -		fobj = rcu_dereference(bo->tbo.base.resv->fence);
> +		fobj = dma_resv_shared(bo->tbo.base.resv);
>  		rel = fobj ? fobj->shared_count : 0;
>  		rcu_read_unlock();
>  
> diff --git a/drivers/gpu/drm/radeon/radeon_sync.c b/drivers/gpu/drm/radeon/radeon_sync.c
> index e476f90ef1c1..a9cdb88da173 100644
> --- a/drivers/gpu/drm/radeon/radeon_sync.c
> +++ b/drivers/gpu/drm/radeon/radeon_sync.c
> @@ -105,7 +105,7 @@ int radeon_sync_resv(struct radeon_device *rdev,
>  	else if (f)
>  		r = dma_fence_wait(f, true);
>  
> -	flist = dma_resv_get_list(resv);
> +	flist = dma_resv_shared(resv);
>  	if (shared || !flist || r)
>  		return r;
>  
> diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
> index 92361556bf0b..c41ef0caa492 100644
> --- a/drivers/gpu/drm/ttm/ttm_bo.c
> +++ b/drivers/gpu/drm/ttm/ttm_bo.c
> @@ -261,7 +261,7 @@ static void ttm_bo_flush_all_fences(struct ttm_buffer_object *bo)
>  	int i;
>  
>  	rcu_read_lock();
> -	fobj = rcu_dereference(resv->fence);
> +	fobj = dma_resv_shared(resv);
>  	fence = dma_resv_exclusive(resv);
>  	if (fence && !fence->ops->signaled)
>  		dma_fence_enable_sw_signaling(fence);
> diff --git a/include/linux/dma-resv.h b/include/linux/dma-resv.h
> index 7549ec5eb35c..98ac66fecb71 100644
> --- a/include/linux/dma-resv.h
> +++ b/include/linux/dma-resv.h
> @@ -78,20 +78,6 @@ struct dma_resv {
>  #define dma_resv_held(obj) lockdep_is_held(&(obj)->lock.base)
>  #define dma_resv_assert_held(obj) lockdep_assert_held(&(obj)->lock.base)
>  
> -/**
> - * dma_resv_get_list - get the reservation object's
> - * shared fence list, with update-side lock held
> - * @obj: the reservation object
> - *
> - * Returns the shared fence list.  Does NOT take references to
> - * the fence.  The obj->lock must be held.
> - */
> -static inline struct dma_resv_list *dma_resv_get_list(struct dma_resv *obj)
> -{
> -	return rcu_dereference_protected(obj->fence,
> -					 dma_resv_held(obj));
> -}
> -
>  #ifdef CONFIG_DEBUG_MUTEXES
>  void dma_resv_reset_shared_max(struct dma_resv *obj);
>  #else
> @@ -267,6 +253,17 @@ dma_resv_get_excl_rcu(struct dma_resv *obj)
>  	return fence;
>  }
>  
> +/**
> + * dma_resv_shared - get the reservation object's shared fence list
> + * @obj: the reservation object
> + *
> + * Returns the shared fence list. The obj->lock or rcu read side must be held.

Please copypaste the same wording from dma_resv_exclusive with all the
links to dma_resv_lock() and rcu_read_lock() here to make the kerneldoc
more useful.

Acked-by: Daniel Vetter <daniel.vetter@ffwll.ch>

> + */
> +static inline struct dma_resv_list *dma_resv_shared(struct dma_resv *obj)
> +{
> +	return rcu_dereference_check(obj->fence, dma_resv_held(obj));
> +}
> +
>  void dma_resv_init(struct dma_resv *obj);
>  void dma_resv_fini(struct dma_resv *obj);
>  int dma_resv_reserve_shared(struct dma_resv *obj, unsigned int num_fences);
> -- 
> 2.25.1
> 

-- 
Daniel Vetter
Software Engineer, Intel Corporation
http://blog.ffwll.ch

^ permalink raw reply	[flat|nested] 26+ messages in thread

* Re: [PATCH 2/7] dma-buf: add SPDX header and fix style in dma-resv.c
  2021-06-02 12:34   ` Daniel Vetter
@ 2021-06-02 12:47     ` Christian König
  2021-06-02 12:55       ` Daniel Vetter
  0 siblings, 1 reply; 26+ messages in thread
From: Christian König @ 2021-06-02 12:47 UTC (permalink / raw)
  To: Daniel Vetter; +Cc: dri-devel, jason



Am 02.06.21 um 14:34 schrieb Daniel Vetter:
> On Wed, Jun 02, 2021 at 01:17:09PM +0200, Christian König wrote:
>> No functional change.
>>
>> Signed-off-by: Christian König <christian.koenig@amd.com>
> Acked-by: Daniel Vetter <daniel.vetter@ffwll.ch>
>
> Maybe add the checkpatch warnings you're fixing here to the commit
> message. I didn't know that initcalls should be at the bottom ...

Well dma_resv_lockdep() had some tab/space mixup and moving it around 
was the easiest way to fix that in the editor :)

Moving it to the end seemed logical to me.

Christian.

> -Daniel
>
>
>> ---
>>   drivers/dma-buf/dma-resv.c | 128 +++++++++++++++++++------------------
>>   1 file changed, 65 insertions(+), 63 deletions(-)
>>
>> diff --git a/drivers/dma-buf/dma-resv.c b/drivers/dma-buf/dma-resv.c
>> index 6ddbeb5dfbf6..87f5d82d992a 100644
>> --- a/drivers/dma-buf/dma-resv.c
>> +++ b/drivers/dma-buf/dma-resv.c
>> @@ -1,3 +1,4 @@
>> +// SPDX-License-Identifier: MIT
>>   /*
>>    * Copyright (C) 2012-2014 Canonical Ltd (Maarten Lankhorst)
>>    *
>> @@ -92,49 +93,6 @@ static void dma_resv_list_free(struct dma_resv_list *list)
>>   	kfree_rcu(list, rcu);
>>   }
>>   
>> -#if IS_ENABLED(CONFIG_LOCKDEP)
>> -static int __init dma_resv_lockdep(void)
>> -{
>> -	struct mm_struct *mm = mm_alloc();
>> -	struct ww_acquire_ctx ctx;
>> -	struct dma_resv obj;
>> -	struct address_space mapping;
>> -	int ret;
>> -
>> -	if (!mm)
>> -		return -ENOMEM;
>> -
>> -	dma_resv_init(&obj);
>> -	address_space_init_once(&mapping);
>> -
>> -	mmap_read_lock(mm);
>> -	ww_acquire_init(&ctx, &reservation_ww_class);
>> -	ret = dma_resv_lock(&obj, &ctx);
>> -	if (ret == -EDEADLK)
>> -		dma_resv_lock_slow(&obj, &ctx);
>> -	fs_reclaim_acquire(GFP_KERNEL);
>> -	/* for unmap_mapping_range on trylocked buffer objects in shrinkers */
>> -	i_mmap_lock_write(&mapping);
>> -	i_mmap_unlock_write(&mapping);
>> -#ifdef CONFIG_MMU_NOTIFIER
>> -	lock_map_acquire(&__mmu_notifier_invalidate_range_start_map);
>> -	__dma_fence_might_wait();
>> -	lock_map_release(&__mmu_notifier_invalidate_range_start_map);
>> -#else
>> -	__dma_fence_might_wait();
>> -#endif
>> -	fs_reclaim_release(GFP_KERNEL);
>> -	ww_mutex_unlock(&obj.lock);
>> -	ww_acquire_fini(&ctx);
>> -	mmap_read_unlock(mm);
>> -	
>> -	mmput(mm);
>> -
>> -	return 0;
>> -}
>> -subsys_initcall(dma_resv_lockdep);
>> -#endif
>> -
>>   /**
>>    * dma_resv_init - initialize a reservation object
>>    * @obj: the reservation object
>> @@ -196,9 +154,7 @@ int dma_resv_reserve_shared(struct dma_resv *obj, unsigned int num_fences)
>>   	if (old && old->shared_max) {
>>   		if ((old->shared_count + num_fences) <= old->shared_max)
>>   			return 0;
>> -		else
>> -			max = max(old->shared_count + num_fences,
>> -				  old->shared_max * 2);
>> +		max = max(old->shared_count + num_fences, old->shared_max * 2);
>>   	} else {
>>   		max = max(4ul, roundup_pow_of_two(num_fences));
>>   	}
>> @@ -337,17 +293,17 @@ void dma_resv_add_excl_fence(struct dma_resv *obj, struct dma_fence *fence)
>>   EXPORT_SYMBOL(dma_resv_add_excl_fence);
>>   
>>   /**
>> -* dma_resv_copy_fences - Copy all fences from src to dst.
>> -* @dst: the destination reservation object
>> -* @src: the source reservation object
>> -*
>> -* Copy all fences from src to dst. dst-lock must be held.
>> -*/
>> + * dma_resv_copy_fences - Copy all fences from src to dst.
>> + * @dst: the destination reservation object
>> + * @src: the source reservation object
>> + *
>> + * Copy all fences from src to dst. dst-lock must be held.
>> + */
>>   int dma_resv_copy_fences(struct dma_resv *dst, struct dma_resv *src)
>>   {
>>   	struct dma_resv_list *src_list, *dst_list;
>>   	struct dma_fence *old, *new;
>> -	unsigned i;
>> +	unsigned int i;
>>   
>>   	dma_resv_assert_held(dst);
>>   
>> @@ -356,7 +312,7 @@ int dma_resv_copy_fences(struct dma_resv *dst, struct dma_resv *src)
>>   
>>   retry:
>>   	if (src_list) {
>> -		unsigned shared_count = src_list->shared_count;
>> +		unsigned int shared_count = src_list->shared_count;
>>   
>>   		rcu_read_unlock();
>>   
>> @@ -373,6 +329,7 @@ int dma_resv_copy_fences(struct dma_resv *dst, struct dma_resv *src)
>>   
>>   		dst_list->shared_count = 0;
>>   		for (i = 0; i < src_list->shared_count; ++i) {
>> +			struct dma_fence __rcu **dst;
>>   			struct dma_fence *fence;
>>   
>>   			fence = rcu_dereference(src_list->shared[i]);
>> @@ -391,7 +348,8 @@ int dma_resv_copy_fences(struct dma_resv *dst, struct dma_resv *src)
>>   				continue;
>>   			}
>>   
>> -			rcu_assign_pointer(dst_list->shared[dst_list->shared_count++], fence);
>> +			dst = &dst_list->shared[dst_list->shared_count++];
>> +			rcu_assign_pointer(*dst, fence);
>>   		}
>>   	} else {
>>   		dst_list = NULL;
>> @@ -431,7 +389,7 @@ EXPORT_SYMBOL(dma_resv_copy_fences);
>>    */
>>   int dma_resv_get_fences_rcu(struct dma_resv *obj,
>>   			    struct dma_fence **pfence_excl,
>> -			    unsigned *pshared_count,
>> +			    unsigned int *pshared_count,
>>   			    struct dma_fence ***pshared)
>>   {
>>   	struct dma_fence **shared = NULL;
>> @@ -533,9 +491,9 @@ long dma_resv_wait_timeout_rcu(struct dma_resv *obj,
>>   			       bool wait_all, bool intr,
>>   			       unsigned long timeout)
>>   {
>> -	struct dma_fence *fence;
>> -	unsigned seq, shared_count;
>>   	long ret = timeout ? timeout : 1;
>> +	unsigned int seq, shared_count;
>> +	struct dma_fence *fence;
>>   	int i;
>>   
>>   retry:
>> @@ -565,8 +523,9 @@ long dma_resv_wait_timeout_rcu(struct dma_resv *obj,
>>   			shared_count = fobj->shared_count;
>>   
>>   		for (i = 0; !fence && i < shared_count; ++i) {
>> -			struct dma_fence *lfence = rcu_dereference(fobj->shared[i]);
>> +			struct dma_fence *lfence;
>>   
>> +			lfence = rcu_dereference(fobj->shared[i]);
>>   			if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
>>   				     &lfence->flags))
>>   				continue;
>> @@ -633,7 +592,7 @@ static inline int dma_resv_test_signaled_single(struct dma_fence *passed_fence)
>>    */
>>   bool dma_resv_test_signaled_rcu(struct dma_resv *obj, bool test_all)
>>   {
>> -	unsigned seq, shared_count;
>> +	unsigned int seq, shared_count;
>>   	int ret;
>>   
>>   	rcu_read_lock();
>> @@ -643,16 +602,16 @@ bool dma_resv_test_signaled_rcu(struct dma_resv *obj, bool test_all)
>>   	seq = read_seqcount_begin(&obj->seq);
>>   
>>   	if (test_all) {
>> -		unsigned i;
>> -
>>   		struct dma_resv_list *fobj = rcu_dereference(obj->fence);
>> +		unsigned int i;
>>   
>>   		if (fobj)
>>   			shared_count = fobj->shared_count;
>>   
>>   		for (i = 0; i < shared_count; ++i) {
>> -			struct dma_fence *fence = rcu_dereference(fobj->shared[i]);
>> +			struct dma_fence *fence;
>>   
>> +			fence = rcu_dereference(fobj->shared[i]);
>>   			ret = dma_resv_test_signaled_single(fence);
>>   			if (ret < 0)
>>   				goto retry;
>> @@ -681,3 +640,46 @@ bool dma_resv_test_signaled_rcu(struct dma_resv *obj, bool test_all)
>>   	return ret;
>>   }
>>   EXPORT_SYMBOL_GPL(dma_resv_test_signaled_rcu);
>> +
>> +#if IS_ENABLED(CONFIG_LOCKDEP)
>> +static int __init dma_resv_lockdep(void)
>> +{
>> +	struct mm_struct *mm = mm_alloc();
>> +	struct ww_acquire_ctx ctx;
>> +	struct dma_resv obj;
>> +	struct address_space mapping;
>> +	int ret;
>> +
>> +	if (!mm)
>> +		return -ENOMEM;
>> +
>> +	dma_resv_init(&obj);
>> +	address_space_init_once(&mapping);
>> +
>> +	mmap_read_lock(mm);
>> +	ww_acquire_init(&ctx, &reservation_ww_class);
>> +	ret = dma_resv_lock(&obj, &ctx);
>> +	if (ret == -EDEADLK)
>> +		dma_resv_lock_slow(&obj, &ctx);
>> +	fs_reclaim_acquire(GFP_KERNEL);
>> +	/* for unmap_mapping_range on trylocked buffer objects in shrinkers */
>> +	i_mmap_lock_write(&mapping);
>> +	i_mmap_unlock_write(&mapping);
>> +#ifdef CONFIG_MMU_NOTIFIER
>> +	lock_map_acquire(&__mmu_notifier_invalidate_range_start_map);
>> +	__dma_fence_might_wait();
>> +	lock_map_release(&__mmu_notifier_invalidate_range_start_map);
>> +#else
>> +	__dma_fence_might_wait();
>> +#endif
>> +	fs_reclaim_release(GFP_KERNEL);
>> +	ww_mutex_unlock(&obj.lock);
>> +	ww_acquire_fini(&ctx);
>> +	mmap_read_unlock(mm);
>> +
>> +	mmput(mm);
>> +
>> +	return 0;
>> +}
>> +subsys_initcall(dma_resv_lockdep);
>> +#endif
>> -- 
>> 2.25.1
>>


^ permalink raw reply	[flat|nested] 26+ messages in thread

* Re: [PATCH 6/7] dma-buf: rename dma_resv_get_excl_rcu to _unlocked
  2021-06-02 11:17 ` [PATCH 6/7] dma-buf: rename dma_resv_get_excl_rcu to _unlocked Christian König
@ 2021-06-02 12:47   ` Daniel Vetter
  2021-06-02 20:25   ` Jason Ekstrand
  1 sibling, 0 replies; 26+ messages in thread
From: Daniel Vetter @ 2021-06-02 12:47 UTC (permalink / raw)
  To: Christian König; +Cc: dri-devel, jason

On Wed, Jun 02, 2021 at 01:17:13PM +0200, Christian König wrote:
> That describes much better what the function is doing here.
> 
> Signed-off-by: Christian König <christian.koenig@amd.com>

Acked-by: Daniel Vetter <daniel.vetter@ffwll.ch>

> ---
>  drivers/gpu/drm/drm_gem.c                    | 2 +-
>  drivers/gpu/drm/drm_gem_atomic_helper.c      | 2 +-
>  drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c | 2 +-
>  drivers/gpu/drm/i915/display/intel_display.c | 2 +-
>  drivers/gpu/drm/i915/gem/i915_gem_object.h   | 2 +-
>  drivers/gpu/drm/i915/gem/i915_gem_wait.c     | 4 ++--
>  drivers/gpu/drm/i915/i915_request.c          | 2 +-
>  drivers/gpu/drm/i915/i915_sw_fence.c         | 2 +-
>  drivers/gpu/drm/nouveau/dispnv50/wndw.c      | 2 +-
>  drivers/gpu/drm/panfrost/panfrost_job.c      | 2 +-
>  include/linux/dma-resv.h                     | 4 ++--
>  11 files changed, 13 insertions(+), 13 deletions(-)
> 
> diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
> index 9989425e9875..263b4fb03303 100644
> --- a/drivers/gpu/drm/drm_gem.c
> +++ b/drivers/gpu/drm/drm_gem.c
> @@ -1375,7 +1375,7 @@ int drm_gem_fence_array_add_implicit(struct xarray *fence_array,
>  
>  	if (!write) {
>  		struct dma_fence *fence =
> -			dma_resv_get_excl_rcu(obj->resv);
> +			dma_resv_get_excl_unlocked(obj->resv);
>  
>  		return drm_gem_fence_array_add(fence_array, fence);
>  	}
> diff --git a/drivers/gpu/drm/drm_gem_atomic_helper.c b/drivers/gpu/drm/drm_gem_atomic_helper.c
> index a005c5a0ba46..a27135084ae5 100644
> --- a/drivers/gpu/drm/drm_gem_atomic_helper.c
> +++ b/drivers/gpu/drm/drm_gem_atomic_helper.c
> @@ -147,7 +147,7 @@ int drm_gem_plane_helper_prepare_fb(struct drm_plane *plane, struct drm_plane_st
>  		return 0;
>  
>  	obj = drm_gem_fb_get_obj(state->fb, 0);
> -	fence = dma_resv_get_excl_rcu(obj->resv);
> +	fence = dma_resv_get_excl_unlocked(obj->resv);
>  	drm_atomic_set_fence_for_plane(state, fence);
>  
>  	return 0;
> diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
> index d05c35994579..c942d2a8c252 100644
> --- a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
> +++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
> @@ -195,7 +195,7 @@ static int submit_fence_sync(struct etnaviv_gem_submit *submit)
>  			if (ret)
>  				return ret;
>  		} else {
> -			bo->excl = dma_resv_get_excl_rcu(robj);
> +			bo->excl = dma_resv_get_excl_unlocked(robj);
>  		}
>  
>  	}
> diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c
> index 384ff0bb6e19..f17c5f54feb6 100644
> --- a/drivers/gpu/drm/i915/display/intel_display.c
> +++ b/drivers/gpu/drm/i915/display/intel_display.c
> @@ -11040,7 +11040,7 @@ intel_prepare_plane_fb(struct drm_plane *_plane,
>  		if (ret < 0)
>  			goto unpin_fb;
>  
> -		fence = dma_resv_get_excl_rcu(obj->base.resv);
> +		fence = dma_resv_get_excl_unlocked(obj->base.resv);
>  		if (fence) {
>  			add_rps_boost_after_vblank(new_plane_state->hw.crtc,
>  						   fence);
> diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.h b/drivers/gpu/drm/i915/gem/i915_gem_object.h
> index 2ebd79537aea..7c0eb425cb3b 100644
> --- a/drivers/gpu/drm/i915/gem/i915_gem_object.h
> +++ b/drivers/gpu/drm/i915/gem/i915_gem_object.h
> @@ -500,7 +500,7 @@ i915_gem_object_last_write_engine(struct drm_i915_gem_object *obj)
>  	struct dma_fence *fence;
>  
>  	rcu_read_lock();
> -	fence = dma_resv_get_excl_rcu(obj->base.resv);
> +	fence = dma_resv_get_excl_unlocked(obj->base.resv);
>  	rcu_read_unlock();
>  
>  	if (fence && dma_fence_is_i915(fence) && !dma_fence_is_signaled(fence))
> diff --git a/drivers/gpu/drm/i915/gem/i915_gem_wait.c b/drivers/gpu/drm/i915/gem/i915_gem_wait.c
> index 4b9856d5ba14..c13aeddf5aa7 100644
> --- a/drivers/gpu/drm/i915/gem/i915_gem_wait.c
> +++ b/drivers/gpu/drm/i915/gem/i915_gem_wait.c
> @@ -73,7 +73,7 @@ i915_gem_object_wait_reservation(struct dma_resv *resv,
>  		 */
>  		prune_fences = count && timeout >= 0;
>  	} else {
> -		excl = dma_resv_get_excl_rcu(resv);
> +		excl = dma_resv_get_excl_unlocked(resv);
>  	}
>  
>  	if (excl && timeout >= 0)
> @@ -170,7 +170,7 @@ i915_gem_object_wait_priority(struct drm_i915_gem_object *obj,
>  
>  		kfree(shared);
>  	} else {
> -		excl = dma_resv_get_excl_rcu(obj->base.resv);
> +		excl = dma_resv_get_excl_unlocked(obj->base.resv);
>  	}
>  
>  	if (excl) {
> diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
> index bec9c3652188..c85494f411f4 100644
> --- a/drivers/gpu/drm/i915/i915_request.c
> +++ b/drivers/gpu/drm/i915/i915_request.c
> @@ -1611,7 +1611,7 @@ i915_request_await_object(struct i915_request *to,
>  			dma_fence_put(shared[i]);
>  		kfree(shared);
>  	} else {
> -		excl = dma_resv_get_excl_rcu(obj->base.resv);
> +		excl = dma_resv_get_excl_unlocked(obj->base.resv);
>  	}
>  
>  	if (excl) {
> diff --git a/drivers/gpu/drm/i915/i915_sw_fence.c b/drivers/gpu/drm/i915/i915_sw_fence.c
> index 2744558f3050..7aaf74552d06 100644
> --- a/drivers/gpu/drm/i915/i915_sw_fence.c
> +++ b/drivers/gpu/drm/i915/i915_sw_fence.c
> @@ -606,7 +606,7 @@ int i915_sw_fence_await_reservation(struct i915_sw_fence *fence,
>  			dma_fence_put(shared[i]);
>  		kfree(shared);
>  	} else {
> -		excl = dma_resv_get_excl_rcu(resv);
> +		excl = dma_resv_get_excl_unlocked(resv);
>  	}
>  
>  	if (ret >= 0 && excl && excl->ops != exclude) {
> diff --git a/drivers/gpu/drm/nouveau/dispnv50/wndw.c b/drivers/gpu/drm/nouveau/dispnv50/wndw.c
> index 0cb1f9d848d3..8d048bacd6f0 100644
> --- a/drivers/gpu/drm/nouveau/dispnv50/wndw.c
> +++ b/drivers/gpu/drm/nouveau/dispnv50/wndw.c
> @@ -561,7 +561,7 @@ nv50_wndw_prepare_fb(struct drm_plane *plane, struct drm_plane_state *state)
>  			asyw->image.handle[0] = ctxdma->object.handle;
>  	}
>  
> -	asyw->state.fence = dma_resv_get_excl_rcu(nvbo->bo.base.resv);
> +	asyw->state.fence = dma_resv_get_excl_unlocked(nvbo->bo.base.resv);
>  	asyw->image.offset[0] = nvbo->offset;
>  
>  	if (wndw->func->prepare) {
> diff --git a/drivers/gpu/drm/panfrost/panfrost_job.c b/drivers/gpu/drm/panfrost/panfrost_job.c
> index 6003cfeb1322..2df3e999a38d 100644
> --- a/drivers/gpu/drm/panfrost/panfrost_job.c
> +++ b/drivers/gpu/drm/panfrost/panfrost_job.c
> @@ -203,7 +203,7 @@ static void panfrost_acquire_object_fences(struct drm_gem_object **bos,
>  	int i;
>  
>  	for (i = 0; i < bo_count; i++)
> -		implicit_fences[i] = dma_resv_get_excl_rcu(bos[i]->resv);
> +		implicit_fences[i] = dma_resv_get_excl_unlocked(bos[i]->resv);
>  }
>  
>  static void panfrost_attach_object_fences(struct drm_gem_object **bos,
> diff --git a/include/linux/dma-resv.h b/include/linux/dma-resv.h
> index 98ac66fecb71..f6b71712c029 100644
> --- a/include/linux/dma-resv.h
> +++ b/include/linux/dma-resv.h
> @@ -228,7 +228,7 @@ dma_resv_exclusive(struct dma_resv *obj)
>  }
>  
>  /**
> - * dma_resv_get_excl_rcu - get the reservation object's
> + * dma_resv_get_excl_unlocked - get the reservation object's
>   * exclusive fence, without lock held.
>   * @obj: the reservation object
>   *
> @@ -239,7 +239,7 @@ dma_resv_exclusive(struct dma_resv *obj)
>   * The exclusive fence or NULL if none
>   */
>  static inline struct dma_fence *
> -dma_resv_get_excl_rcu(struct dma_resv *obj)
> +dma_resv_get_excl_unlocked(struct dma_resv *obj)
>  {
>  	struct dma_fence *fence;
>  
> -- 
> 2.25.1
> 

-- 
Daniel Vetter
Software Engineer, Intel Corporation
http://blog.ffwll.ch

^ permalink raw reply	[flat|nested] 26+ messages in thread

* Re: [PATCH 7/7] dma-buf: drop the _rcu postfix on function names
  2021-06-02 11:17 ` [PATCH 7/7] dma-buf: drop the _rcu postfix on function names Christian König
@ 2021-06-02 12:49   ` Daniel Vetter
  2021-06-02 20:34   ` Jason Ekstrand
  1 sibling, 0 replies; 26+ messages in thread
From: Daniel Vetter @ 2021-06-02 12:49 UTC (permalink / raw)
  To: Christian König; +Cc: dri-devel, jason

On Wed, Jun 02, 2021 at 01:17:14PM +0200, Christian König wrote:
> The functions can be called both in _rcu context as well
> as while holding the lock.
> 
> Signed-off-by: Christian König <christian.koenig@amd.com>

Please add to both functions some kerneldoc like "Callers are not required
to hold specific locks, but maybe hold dma_resv_lock() already". Or
something like that.

Acked-by: Daniel Vetter <daniel.vetter@ffwll.ch>

Btw on the mass changes only an ack because I'm too lazy to check whether
you caught all the drivers myself, I'm trusting gcc and compile-checking
on x86/arm/arm64 for this to be enough :-)
-Daniel


> ---
>  drivers/dma-buf/dma-buf.c                     |  3 +--
>  drivers/dma-buf/dma-resv.c                    | 24 +++++++++----------
>  drivers/gpu/drm/amd/amdgpu/amdgpu_display.c   |  2 +-
>  drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c   |  2 +-
>  drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c       |  2 +-
>  drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c       |  4 ++--
>  drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c        |  2 +-
>  drivers/gpu/drm/amd/amdgpu/amdgpu_object.c    |  2 +-
>  drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c       |  2 +-
>  drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c        |  8 +++----
>  .../gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c |  2 +-
>  drivers/gpu/drm/drm_gem.c                     |  4 ++--
>  drivers/gpu/drm/etnaviv/etnaviv_gem.c         |  4 ++--
>  drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c  |  2 +-
>  drivers/gpu/drm/i915/dma_resv_utils.c         |  2 +-
>  drivers/gpu/drm/i915/gem/i915_gem_busy.c      |  2 +-
>  .../gpu/drm/i915/gem/i915_gem_execbuffer.c    |  2 +-
>  drivers/gpu/drm/i915/gem/i915_gem_userptr.c   |  2 +-
>  drivers/gpu/drm/i915/gem/i915_gem_wait.c      |  4 ++--
>  drivers/gpu/drm/i915/i915_request.c           |  2 +-
>  drivers/gpu/drm/i915/i915_sw_fence.c          |  2 +-
>  drivers/gpu/drm/msm/msm_gem.c                 |  2 +-
>  drivers/gpu/drm/nouveau/nouveau_gem.c         |  2 +-
>  drivers/gpu/drm/panfrost/panfrost_drv.c       |  2 +-
>  drivers/gpu/drm/radeon/radeon_gem.c           |  6 ++---
>  drivers/gpu/drm/radeon/radeon_mn.c            |  2 +-
>  drivers/gpu/drm/ttm/ttm_bo.c                  | 12 +++++-----
>  drivers/gpu/drm/vgem/vgem_fence.c             |  2 +-
>  drivers/gpu/drm/virtio/virtgpu_ioctl.c        |  4 ++--
>  drivers/gpu/drm/vmwgfx/vmwgfx_bo.c            |  2 +-
>  include/linux/dma-resv.h                      | 17 ++++---------
>  31 files changed, 60 insertions(+), 70 deletions(-)
> 
> diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c
> index 4d0ddc712f1e..f92931d8db51 100644
> --- a/drivers/dma-buf/dma-buf.c
> +++ b/drivers/dma-buf/dma-buf.c
> @@ -1147,8 +1147,7 @@ static int __dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
>  	long ret;
>  
>  	/* Wait on any implicit rendering fences */
> -	ret = dma_resv_wait_timeout_rcu(resv, write, true,
> -						  MAX_SCHEDULE_TIMEOUT);
> +	ret = dma_resv_wait_timeout(resv, write, true, MAX_SCHEDULE_TIMEOUT);
>  	if (ret < 0)
>  		return ret;
>  
> diff --git a/drivers/dma-buf/dma-resv.c b/drivers/dma-buf/dma-resv.c
> index b1a1a31dc009..74fe64dc1ce3 100644
> --- a/drivers/dma-buf/dma-resv.c
> +++ b/drivers/dma-buf/dma-resv.c
> @@ -393,7 +393,7 @@ int dma_resv_copy_fences(struct dma_resv *dst, struct dma_resv *src)
>  EXPORT_SYMBOL(dma_resv_copy_fences);
>  
>  /**
> - * dma_resv_get_fences_rcu - Get an object's shared and exclusive
> + * dma_resv_get_fences - Get an object's shared and exclusive
>   * fences without update side lock held
>   * @obj: the reservation object
>   * @pfence_excl: the returned exclusive fence (or NULL)
> @@ -405,10 +405,9 @@ EXPORT_SYMBOL(dma_resv_copy_fences);
>   * exclusive fence is not specified the fence is put into the array of the
>   * shared fences as well. Returns either zero or -ENOMEM.
>   */
> -int dma_resv_get_fences_rcu(struct dma_resv *obj,
> -			    struct dma_fence **pfence_excl,
> -			    unsigned int *pshared_count,
> -			    struct dma_fence ***pshared)
> +int dma_resv_get_fences(struct dma_resv *obj, struct dma_fence **pfence_excl,
> +			unsigned int *pshared_count,
> +			struct dma_fence ***pshared)
>  {
>  	struct dma_fence **shared = NULL;
>  	struct dma_fence *fence_excl;
> @@ -491,10 +490,10 @@ int dma_resv_get_fences_rcu(struct dma_resv *obj,
>  	*pshared = shared;
>  	return ret;
>  }
> -EXPORT_SYMBOL_GPL(dma_resv_get_fences_rcu);
> +EXPORT_SYMBOL_GPL(dma_resv_get_fences);
>  
>  /**
> - * dma_resv_wait_timeout_rcu - Wait on reservation's objects
> + * dma_resv_wait_timeout - Wait on reservation's objects
>   * shared and/or exclusive fences.
>   * @obj: the reservation object
>   * @wait_all: if true, wait on all fences, else wait on just exclusive fence
> @@ -505,9 +504,8 @@ EXPORT_SYMBOL_GPL(dma_resv_get_fences_rcu);
>   * Returns -ERESTARTSYS if interrupted, 0 if the wait timed out, or
>   * greater than zer on success.
>   */
> -long dma_resv_wait_timeout_rcu(struct dma_resv *obj,
> -			       bool wait_all, bool intr,
> -			       unsigned long timeout)
> +long dma_resv_wait_timeout(struct dma_resv *obj, bool wait_all, bool intr,
> +			   unsigned long timeout)
>  {
>  	long ret = timeout ? timeout : 1;
>  	unsigned int seq, shared_count;
> @@ -579,7 +577,7 @@ long dma_resv_wait_timeout_rcu(struct dma_resv *obj,
>  	rcu_read_unlock();
>  	goto retry;
>  }
> -EXPORT_SYMBOL_GPL(dma_resv_wait_timeout_rcu);
> +EXPORT_SYMBOL_GPL(dma_resv_wait_timeout);
>  
>  
>  static inline int dma_resv_test_signaled_single(struct dma_fence *passed_fence)
> @@ -608,7 +606,7 @@ static inline int dma_resv_test_signaled_single(struct dma_fence *passed_fence)
>   * RETURNS
>   * true if all fences signaled, else false
>   */
> -bool dma_resv_test_signaled_rcu(struct dma_resv *obj, bool test_all)
> +bool dma_resv_test_signaled(struct dma_resv *obj, bool test_all)
>  {
>  	unsigned int seq, shared_count;
>  	int ret;
> @@ -657,7 +655,7 @@ bool dma_resv_test_signaled_rcu(struct dma_resv *obj, bool test_all)
>  	rcu_read_unlock();
>  	return ret;
>  }
> -EXPORT_SYMBOL_GPL(dma_resv_test_signaled_rcu);
> +EXPORT_SYMBOL_GPL(dma_resv_test_signaled);
>  
>  #if IS_ENABLED(CONFIG_LOCKDEP)
>  static int __init dma_resv_lockdep(void)
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
> index 49f73b5b89b0..004d01d2e1d7 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
> @@ -203,7 +203,7 @@ int amdgpu_display_crtc_page_flip_target(struct drm_crtc *crtc,
>  		goto unpin;
>  	}
>  
> -	r = dma_resv_get_fences_rcu(new_abo->tbo.base.resv, &work->excl,
> +	r = dma_resv_get_fences(new_abo->tbo.base.resv, &work->excl,
>  					      &work->shared_count,
>  					      &work->shared);
>  	if (unlikely(r != 0)) {
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
> index 3b13c8a38c4e..615be1697d49 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
> @@ -52,7 +52,7 @@ __dma_resv_make_exclusive(struct dma_resv *obj)
>  	if (!dma_resv_shared(obj)) /* no shared fences to convert */
>  		return 0;
>  
> -	r = dma_resv_get_fences_rcu(obj, NULL, &count, &fences);
> +	r = dma_resv_get_fences(obj, NULL, &count, &fences);
>  	if (r)
>  		return r;
>  
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
> index cd5146fa6fb6..dafc96032d7d 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
> @@ -526,7 +526,7 @@ int amdgpu_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
>  		return -ENOENT;
>  	}
>  	robj = gem_to_amdgpu_bo(gobj);
> -	ret = dma_resv_wait_timeout_rcu(robj->tbo.base.resv, true, true,
> +	ret = dma_resv_wait_timeout(robj->tbo.base.resv, true, true,
>  						  timeout);
>  
>  	/* ret == 0 means not signaled,
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
> index b4971e90b98c..65a3422ec078 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
> @@ -112,7 +112,7 @@ void amdgpu_pasid_free_delayed(struct dma_resv *resv,
>  	unsigned count;
>  	int r;
>  
> -	r = dma_resv_get_fences_rcu(resv, NULL, &count, &fences);
> +	r = dma_resv_get_fences(resv, NULL, &count, &fences);
>  	if (r)
>  		goto fallback;
>  
> @@ -156,7 +156,7 @@ void amdgpu_pasid_free_delayed(struct dma_resv *resv,
>  	/* Not enough memory for the delayed delete, as last resort
>  	 * block for all the fences to complete.
>  	 */
> -	dma_resv_wait_timeout_rcu(resv, true, false,
> +	dma_resv_wait_timeout(resv, true, false,
>  					    MAX_SCHEDULE_TIMEOUT);
>  	amdgpu_pasid_free(pasid);
>  }
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
> index 2741c28ff1b5..86de11a86a3e 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
> @@ -75,7 +75,7 @@ static bool amdgpu_mn_invalidate_gfx(struct mmu_interval_notifier *mni,
>  
>  	mmu_interval_set_seq(mni, cur_seq);
>  
> -	r = dma_resv_wait_timeout_rcu(bo->tbo.base.resv, true, false,
> +	r = dma_resv_wait_timeout(bo->tbo.base.resv, true, false,
>  				      MAX_SCHEDULE_TIMEOUT);
>  	mutex_unlock(&adev->notifier_lock);
>  	if (r <= 0)
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
> index 03c6b63d1d54..821dec6d2f73 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
> @@ -756,7 +756,7 @@ int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr)
>  		return 0;
>  	}
>  
> -	r = dma_resv_wait_timeout_rcu(bo->tbo.base.resv, false, false,
> +	r = dma_resv_wait_timeout(bo->tbo.base.resv, false, false,
>  						MAX_SCHEDULE_TIMEOUT);
>  	if (r < 0)
>  		return r;
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
> index 82f0542c7792..3773f5ff6f0e 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
> @@ -1126,7 +1126,7 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,
>  	ib->length_dw = 16;
>  
>  	if (direct) {
> -		r = dma_resv_wait_timeout_rcu(bo->tbo.base.resv,
> +		r = dma_resv_wait_timeout(bo->tbo.base.resv,
>  							true, false,
>  							msecs_to_jiffies(10));
>  		if (r == 0)
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
> index bcfd4a8d0288..da716aa38085 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
> @@ -2022,13 +2022,13 @@ static void amdgpu_vm_prt_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
>  	unsigned i, shared_count;
>  	int r;
>  
> -	r = dma_resv_get_fences_rcu(resv, &excl,
> +	r = dma_resv_get_fences(resv, &excl,
>  					      &shared_count, &shared);
>  	if (r) {
>  		/* Not enough memory to grab the fence list, as last resort
>  		 * block for all the fences to complete.
>  		 */
> -		dma_resv_wait_timeout_rcu(resv, true, false,
> +		dma_resv_wait_timeout(resv, true, false,
>  						    MAX_SCHEDULE_TIMEOUT);
>  		return;
>  	}
> @@ -2640,7 +2640,7 @@ bool amdgpu_vm_evictable(struct amdgpu_bo *bo)
>  		return true;
>  
>  	/* Don't evict VM page tables while they are busy */
> -	if (!dma_resv_test_signaled_rcu(bo->tbo.base.resv, true))
> +	if (!dma_resv_test_signaled(bo->tbo.base.resv, true))
>  		return false;
>  
>  	/* Try to block ongoing updates */
> @@ -2820,7 +2820,7 @@ void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t min_vm_size,
>   */
>  long amdgpu_vm_wait_idle(struct amdgpu_vm *vm, long timeout)
>  {
> -	timeout = dma_resv_wait_timeout_rcu(vm->root.base.bo->tbo.base.resv,
> +	timeout = dma_resv_wait_timeout(vm->root.base.bo->tbo.base.resv,
>  					    true, true, timeout);
>  	if (timeout <= 0)
>  		return timeout;
> diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
> index 3267eb2e35dd..1633afd3c03b 100644
> --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
> +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
> @@ -8400,7 +8400,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
>  		 * deadlock during GPU reset when this fence will not signal
>  		 * but we hold reservation lock for the BO.
>  		 */
> -		r = dma_resv_wait_timeout_rcu(abo->tbo.base.resv, true,
> +		r = dma_resv_wait_timeout(abo->tbo.base.resv, true,
>  							false,
>  							msecs_to_jiffies(5000));
>  		if (unlikely(r <= 0))
> diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
> index 263b4fb03303..11770da97dc0 100644
> --- a/drivers/gpu/drm/drm_gem.c
> +++ b/drivers/gpu/drm/drm_gem.c
> @@ -770,7 +770,7 @@ long drm_gem_dma_resv_wait(struct drm_file *filep, u32 handle,
>  		return -EINVAL;
>  	}
>  
> -	ret = dma_resv_wait_timeout_rcu(obj->resv, wait_all,
> +	ret = dma_resv_wait_timeout(obj->resv, wait_all,
>  						  true, timeout);
>  	if (ret == 0)
>  		ret = -ETIME;
> @@ -1380,7 +1380,7 @@ int drm_gem_fence_array_add_implicit(struct xarray *fence_array,
>  		return drm_gem_fence_array_add(fence_array, fence);
>  	}
>  
> -	ret = dma_resv_get_fences_rcu(obj->resv, NULL,
> +	ret = dma_resv_get_fences(obj->resv, NULL,
>  						&fence_count, &fences);
>  	if (ret || !fence_count)
>  		return ret;
> diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.c b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
> index 4d43b8630f0e..e3c209628688 100644
> --- a/drivers/gpu/drm/etnaviv/etnaviv_gem.c
> +++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
> @@ -390,13 +390,13 @@ int etnaviv_gem_cpu_prep(struct drm_gem_object *obj, u32 op,
>  	}
>  
>  	if (op & ETNA_PREP_NOSYNC) {
> -		if (!dma_resv_test_signaled_rcu(obj->resv,
> +		if (!dma_resv_test_signaled(obj->resv,
>  							  write))
>  			return -EBUSY;
>  	} else {
>  		unsigned long remain = etnaviv_timeout_to_jiffies(timeout);
>  
> -		ret = dma_resv_wait_timeout_rcu(obj->resv,
> +		ret = dma_resv_wait_timeout(obj->resv,
>  							  write, true, remain);
>  		if (ret <= 0)
>  			return ret == 0 ? -ETIMEDOUT : ret;
> diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
> index c942d2a8c252..9cc36bbc2502 100644
> --- a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
> +++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
> @@ -189,7 +189,7 @@ static int submit_fence_sync(struct etnaviv_gem_submit *submit)
>  			continue;
>  
>  		if (bo->flags & ETNA_SUBMIT_BO_WRITE) {
> -			ret = dma_resv_get_fences_rcu(robj, &bo->excl,
> +			ret = dma_resv_get_fences(robj, &bo->excl,
>  								&bo->nr_shared,
>  								&bo->shared);
>  			if (ret)
> diff --git a/drivers/gpu/drm/i915/dma_resv_utils.c b/drivers/gpu/drm/i915/dma_resv_utils.c
> index 9e508e7d4629..7df91b7e4ca8 100644
> --- a/drivers/gpu/drm/i915/dma_resv_utils.c
> +++ b/drivers/gpu/drm/i915/dma_resv_utils.c
> @@ -10,7 +10,7 @@
>  void dma_resv_prune(struct dma_resv *resv)
>  {
>  	if (dma_resv_trylock(resv)) {
> -		if (dma_resv_test_signaled_rcu(resv, true))
> +		if (dma_resv_test_signaled(resv, true))
>  			dma_resv_add_excl_fence(resv, NULL);
>  		dma_resv_unlock(resv);
>  	}
> diff --git a/drivers/gpu/drm/i915/gem/i915_gem_busy.c b/drivers/gpu/drm/i915/gem/i915_gem_busy.c
> index 3f94becac541..0083a850f839 100644
> --- a/drivers/gpu/drm/i915/gem/i915_gem_busy.c
> +++ b/drivers/gpu/drm/i915/gem/i915_gem_busy.c
> @@ -105,7 +105,7 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
>  	 * Alternatively, we can trade that extra information on read/write
>  	 * activity with
>  	 *	args->busy =
> -	 *		!dma_resv_test_signaled_rcu(obj->resv, true);
> +	 *		!dma_resv_test_signaled(obj->resv, true);
>  	 * to report the overall busyness. This is what the wait-ioctl does.
>  	 *
>  	 */
> diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
> index 297143511f99..66789111a24b 100644
> --- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
> +++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
> @@ -1481,7 +1481,7 @@ static inline bool use_reloc_gpu(struct i915_vma *vma)
>  	if (DBG_FORCE_RELOC)
>  		return false;
>  
> -	return !dma_resv_test_signaled_rcu(vma->resv, true);
> +	return !dma_resv_test_signaled(vma->resv, true);
>  }
>  
>  static unsigned long vma_phys_addr(struct i915_vma *vma, u32 offset)
> diff --git a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
> index a657b99ec760..e78738aec7b2 100644
> --- a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
> +++ b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
> @@ -85,7 +85,7 @@ static bool i915_gem_userptr_invalidate(struct mmu_interval_notifier *mni,
>  		return true;
>  
>  	/* we will unbind on next submission, still have userptr pins */
> -	r = dma_resv_wait_timeout_rcu(obj->base.resv, true, false,
> +	r = dma_resv_wait_timeout(obj->base.resv, true, false,
>  				      MAX_SCHEDULE_TIMEOUT);
>  	if (r <= 0)
>  		drm_err(&i915->drm, "(%ld) failed to wait for idle\n", r);
> diff --git a/drivers/gpu/drm/i915/gem/i915_gem_wait.c b/drivers/gpu/drm/i915/gem/i915_gem_wait.c
> index c13aeddf5aa7..e7aebb8fb468 100644
> --- a/drivers/gpu/drm/i915/gem/i915_gem_wait.c
> +++ b/drivers/gpu/drm/i915/gem/i915_gem_wait.c
> @@ -45,7 +45,7 @@ i915_gem_object_wait_reservation(struct dma_resv *resv,
>  		unsigned int count, i;
>  		int ret;
>  
> -		ret = dma_resv_get_fences_rcu(resv, &excl, &count, &shared);
> +		ret = dma_resv_get_fences(resv, &excl, &count, &shared);
>  		if (ret)
>  			return ret;
>  
> @@ -158,7 +158,7 @@ i915_gem_object_wait_priority(struct drm_i915_gem_object *obj,
>  		unsigned int count, i;
>  		int ret;
>  
> -		ret = dma_resv_get_fences_rcu(obj->base.resv,
> +		ret = dma_resv_get_fences(obj->base.resv,
>  					      &excl, &count, &shared);
>  		if (ret)
>  			return ret;
> diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
> index c85494f411f4..4a70a1881d79 100644
> --- a/drivers/gpu/drm/i915/i915_request.c
> +++ b/drivers/gpu/drm/i915/i915_request.c
> @@ -1594,7 +1594,7 @@ i915_request_await_object(struct i915_request *to,
>  		struct dma_fence **shared;
>  		unsigned int count, i;
>  
> -		ret = dma_resv_get_fences_rcu(obj->base.resv,
> +		ret = dma_resv_get_fences(obj->base.resv,
>  							&excl, &count, &shared);
>  		if (ret)
>  			return ret;
> diff --git a/drivers/gpu/drm/i915/i915_sw_fence.c b/drivers/gpu/drm/i915/i915_sw_fence.c
> index 7aaf74552d06..c589a681da77 100644
> --- a/drivers/gpu/drm/i915/i915_sw_fence.c
> +++ b/drivers/gpu/drm/i915/i915_sw_fence.c
> @@ -582,7 +582,7 @@ int i915_sw_fence_await_reservation(struct i915_sw_fence *fence,
>  		struct dma_fence **shared;
>  		unsigned int count, i;
>  
> -		ret = dma_resv_get_fences_rcu(resv, &excl, &count, &shared);
> +		ret = dma_resv_get_fences(resv, &excl, &count, &shared);
>  		if (ret)
>  			return ret;
>  
> diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
> index 43af91df552e..ecd35986ddb5 100644
> --- a/drivers/gpu/drm/msm/msm_gem.c
> +++ b/drivers/gpu/drm/msm/msm_gem.c
> @@ -915,7 +915,7 @@ int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout)
>  		op & MSM_PREP_NOSYNC ? 0 : timeout_to_jiffies(timeout);
>  	long ret;
>  
> -	ret = dma_resv_wait_timeout_rcu(obj->resv, write,
> +	ret = dma_resv_wait_timeout(obj->resv, write,
>  						  true,  remain);
>  	if (ret == 0)
>  		return remain == 0 ? -EBUSY : -ETIMEDOUT;
> diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
> index d863e5ed954a..c59072f254f1 100644
> --- a/drivers/gpu/drm/nouveau/nouveau_gem.c
> +++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
> @@ -964,7 +964,7 @@ nouveau_gem_ioctl_cpu_prep(struct drm_device *dev, void *data,
>  		return -ENOENT;
>  	nvbo = nouveau_gem_object(gem);
>  
> -	lret = dma_resv_wait_timeout_rcu(nvbo->bo.base.resv, write, true,
> +	lret = dma_resv_wait_timeout(nvbo->bo.base.resv, write, true,
>  						   no_wait ? 0 : 30 * HZ);
>  	if (!lret)
>  		ret = -EBUSY;
> diff --git a/drivers/gpu/drm/panfrost/panfrost_drv.c b/drivers/gpu/drm/panfrost/panfrost_drv.c
> index ca07098a6141..0e6e893eb81d 100644
> --- a/drivers/gpu/drm/panfrost/panfrost_drv.c
> +++ b/drivers/gpu/drm/panfrost/panfrost_drv.c
> @@ -311,7 +311,7 @@ panfrost_ioctl_wait_bo(struct drm_device *dev, void *data,
>  	if (!gem_obj)
>  		return -ENOENT;
>  
> -	ret = dma_resv_wait_timeout_rcu(gem_obj->resv, true,
> +	ret = dma_resv_wait_timeout(gem_obj->resv, true,
>  						  true, timeout);
>  	if (!ret)
>  		ret = timeout ? -ETIMEDOUT : -EBUSY;
> diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c
> index 3272c33af8fe..458f92a70887 100644
> --- a/drivers/gpu/drm/radeon/radeon_gem.c
> +++ b/drivers/gpu/drm/radeon/radeon_gem.c
> @@ -161,7 +161,7 @@ static int radeon_gem_set_domain(struct drm_gem_object *gobj,
>  	}
>  	if (domain == RADEON_GEM_DOMAIN_CPU) {
>  		/* Asking for cpu access wait for object idle */
> -		r = dma_resv_wait_timeout_rcu(robj->tbo.base.resv, true, true, 30 * HZ);
> +		r = dma_resv_wait_timeout(robj->tbo.base.resv, true, true, 30 * HZ);
>  		if (!r)
>  			r = -EBUSY;
>  
> @@ -523,7 +523,7 @@ int radeon_gem_busy_ioctl(struct drm_device *dev, void *data,
>  	}
>  	robj = gem_to_radeon_bo(gobj);
>  
> -	r = dma_resv_test_signaled_rcu(robj->tbo.base.resv, true);
> +	r = dma_resv_test_signaled(robj->tbo.base.resv, true);
>  	if (r == 0)
>  		r = -EBUSY;
>  	else
> @@ -552,7 +552,7 @@ int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
>  	}
>  	robj = gem_to_radeon_bo(gobj);
>  
> -	ret = dma_resv_wait_timeout_rcu(robj->tbo.base.resv, true, true, 30 * HZ);
> +	ret = dma_resv_wait_timeout(robj->tbo.base.resv, true, true, 30 * HZ);
>  	if (ret == 0)
>  		r = -EBUSY;
>  	else if (ret < 0)
> diff --git a/drivers/gpu/drm/radeon/radeon_mn.c b/drivers/gpu/drm/radeon/radeon_mn.c
> index e37c9a57a7c3..adb084e6ddbe 100644
> --- a/drivers/gpu/drm/radeon/radeon_mn.c
> +++ b/drivers/gpu/drm/radeon/radeon_mn.c
> @@ -66,7 +66,7 @@ static bool radeon_mn_invalidate(struct mmu_interval_notifier *mn,
>  		return true;
>  	}
>  
> -	r = dma_resv_wait_timeout_rcu(bo->tbo.base.resv, true, false,
> +	r = dma_resv_wait_timeout(bo->tbo.base.resv, true, false,
>  				      MAX_SCHEDULE_TIMEOUT);
>  	if (r <= 0)
>  		DRM_ERROR("(%ld) failed to wait for user bo\n", r);
> diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
> index c41ef0caa492..32004cf37549 100644
> --- a/drivers/gpu/drm/ttm/ttm_bo.c
> +++ b/drivers/gpu/drm/ttm/ttm_bo.c
> @@ -296,7 +296,7 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo,
>  	struct dma_resv *resv = &bo->base._resv;
>  	int ret;
>  
> -	if (dma_resv_test_signaled_rcu(resv, true))
> +	if (dma_resv_test_signaled(resv, true))
>  		ret = 0;
>  	else
>  		ret = -EBUSY;
> @@ -308,7 +308,7 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo,
>  			dma_resv_unlock(bo->base.resv);
>  		spin_unlock(&bo->bdev->lru_lock);
>  
> -		lret = dma_resv_wait_timeout_rcu(resv, true, interruptible,
> +		lret = dma_resv_wait_timeout(resv, true, interruptible,
>  						 30 * HZ);
>  
>  		if (lret < 0)
> @@ -411,7 +411,7 @@ static void ttm_bo_release(struct kref *kref)
>  			/* Last resort, if we fail to allocate memory for the
>  			 * fences block for the BO to become idle
>  			 */
> -			dma_resv_wait_timeout_rcu(bo->base.resv, true, false,
> +			dma_resv_wait_timeout(bo->base.resv, true, false,
>  						  30 * HZ);
>  		}
>  
> @@ -422,7 +422,7 @@ static void ttm_bo_release(struct kref *kref)
>  		ttm_mem_io_free(bdev, bo->resource);
>  	}
>  
> -	if (!dma_resv_test_signaled_rcu(bo->base.resv, true) ||
> +	if (!dma_resv_test_signaled(bo->base.resv, true) ||
>  	    !dma_resv_trylock(bo->base.resv)) {
>  		/* The BO is not idle, resurrect it for delayed destroy */
>  		ttm_bo_flush_all_fences(bo);
> @@ -1121,13 +1121,13 @@ int ttm_bo_wait(struct ttm_buffer_object *bo,
>  	long timeout = 15 * HZ;
>  
>  	if (no_wait) {
> -		if (dma_resv_test_signaled_rcu(bo->base.resv, true))
> +		if (dma_resv_test_signaled(bo->base.resv, true))
>  			return 0;
>  		else
>  			return -EBUSY;
>  	}
>  
> -	timeout = dma_resv_wait_timeout_rcu(bo->base.resv, true,
> +	timeout = dma_resv_wait_timeout(bo->base.resv, true,
>  						      interruptible, timeout);
>  	if (timeout < 0)
>  		return timeout;
> diff --git a/drivers/gpu/drm/vgem/vgem_fence.c b/drivers/gpu/drm/vgem/vgem_fence.c
> index 2902dc6e64fa..7f3125cf5358 100644
> --- a/drivers/gpu/drm/vgem/vgem_fence.c
> +++ b/drivers/gpu/drm/vgem/vgem_fence.c
> @@ -151,7 +151,7 @@ int vgem_fence_attach_ioctl(struct drm_device *dev,
>  
>  	/* Check for a conflicting fence */
>  	resv = obj->resv;
> -	if (!dma_resv_test_signaled_rcu(resv,
> +	if (!dma_resv_test_signaled(resv,
>  						  arg->flags & VGEM_FENCE_WRITE)) {
>  		ret = -EBUSY;
>  		goto err_fence;
> diff --git a/drivers/gpu/drm/virtio/virtgpu_ioctl.c b/drivers/gpu/drm/virtio/virtgpu_ioctl.c
> index 669f2ee39515..190d9495dc0e 100644
> --- a/drivers/gpu/drm/virtio/virtgpu_ioctl.c
> +++ b/drivers/gpu/drm/virtio/virtgpu_ioctl.c
> @@ -451,9 +451,9 @@ static int virtio_gpu_wait_ioctl(struct drm_device *dev, void *data,
>  		return -ENOENT;
>  
>  	if (args->flags & VIRTGPU_WAIT_NOWAIT) {
> -		ret = dma_resv_test_signaled_rcu(obj->resv, true);
> +		ret = dma_resv_test_signaled(obj->resv, true);
>  	} else {
> -		ret = dma_resv_wait_timeout_rcu(obj->resv, true, true,
> +		ret = dma_resv_wait_timeout(obj->resv, true, true,
>  						timeout);
>  	}
>  	if (ret == 0)
> diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
> index 176b6201ef2b..8faf1df027f3 100644
> --- a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
> +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
> @@ -743,7 +743,7 @@ static int vmw_user_bo_synccpu_grab(struct vmw_user_buffer_object *user_bo,
>  	if (flags & drm_vmw_synccpu_allow_cs) {
>  		long lret;
>  
> -		lret = dma_resv_wait_timeout_rcu
> +		lret = dma_resv_wait_timeout
>  			(bo->base.resv, true, true,
>  			 nonblock ? 0 : MAX_SCHEDULE_TIMEOUT);
>  		if (!lret)
> diff --git a/include/linux/dma-resv.h b/include/linux/dma-resv.h
> index f6b71712c029..22325dfa7744 100644
> --- a/include/linux/dma-resv.h
> +++ b/include/linux/dma-resv.h
> @@ -268,19 +268,12 @@ void dma_resv_init(struct dma_resv *obj);
>  void dma_resv_fini(struct dma_resv *obj);
>  int dma_resv_reserve_shared(struct dma_resv *obj, unsigned int num_fences);
>  void dma_resv_add_shared_fence(struct dma_resv *obj, struct dma_fence *fence);
> -
>  void dma_resv_add_excl_fence(struct dma_resv *obj, struct dma_fence *fence);
> -
> -int dma_resv_get_fences_rcu(struct dma_resv *obj,
> -			    struct dma_fence **pfence_excl,
> -			    unsigned *pshared_count,
> -			    struct dma_fence ***pshared);
> -
> +int dma_resv_get_fences(struct dma_resv *obj, struct dma_fence **pfence_excl,
> +			unsigned *pshared_count, struct dma_fence ***pshared);
>  int dma_resv_copy_fences(struct dma_resv *dst, struct dma_resv *src);
> -
> -long dma_resv_wait_timeout_rcu(struct dma_resv *obj, bool wait_all, bool intr,
> -			       unsigned long timeout);
> -
> -bool dma_resv_test_signaled_rcu(struct dma_resv *obj, bool test_all);
> +long dma_resv_wait_timeout(struct dma_resv *obj, bool wait_all, bool intr,
> +			   unsigned long timeout);
> +bool dma_resv_test_signaled(struct dma_resv *obj, bool test_all);
>  
>  #endif /* _LINUX_RESERVATION_H */
> -- 
> 2.25.1
> 

-- 
Daniel Vetter
Software Engineer, Intel Corporation
http://blog.ffwll.ch

^ permalink raw reply	[flat|nested] 26+ messages in thread

* Re: [PATCH 1/7] dma-buf: fix inconsistent debug print
  2021-06-02 12:36   ` Christian König
@ 2021-06-02 12:50     ` Daniel Vetter
  0 siblings, 0 replies; 26+ messages in thread
From: Daniel Vetter @ 2021-06-02 12:50 UTC (permalink / raw)
  To: Christian König; +Cc: dri-devel, jason

On Wed, Jun 02, 2021 at 02:36:58PM +0200, Christian König wrote:
> 
> 
> Am 02.06.21 um 14:33 schrieb Daniel Vetter:
> > On Wed, Jun 02, 2021 at 01:17:08PM +0200, Christian König wrote:
> > > The code tries to acquire the rcu protected fence list, but then ignores
> > > individual fences which have been modified while holding the rcu.
> > > 
> > > Stop that madness and just note cleanly that the list was concurrently modified.
> > > 
> > > Signed-off-by: Christian König <christian.koenig@amd.com>
> > Yeah it's debugfs, it's better not to be fancy here and if you race you
> > can just re-grab it all.
> > 
> > What's worse, we do grab the dma_resv_lock, which means no one should be
> > able to race with us. I think 100% right thing here is actually to drop
> > the rcu_read_lock too, and switch over to rcu_dereference_protected().
> > 
> > And also drop the seqcount check, that would be a bug. seqcount is only
> > to get a consistent snapshot of all fences on the read (i.e. protected by
> > rcu only) section. We hold the write lock with dma_resv_lock here.
> 
> Yes that what I had in mind as alternative as well.
> 
> Just wasn't 100% sure which way to go here.

I think for paranoia we could read the seqcount and do a WARN_ON if it
ever increments while we hold dma_resv_lock ourselves. But I think this
one here is the only one where this applies, and it's debugfs only, so
meh?
-Daniel

> Going to adjust that,
> Christian.
> 
> > 
> > Cheers, Daniel
> > 
> > > ---
> > >   drivers/dma-buf/dma-buf.c | 19 ++++++++-----------
> > >   1 file changed, 8 insertions(+), 11 deletions(-)
> > > 
> > > diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c
> > > index eadd1eaa2fb5..d3b4e370dbc1 100644
> > > --- a/drivers/dma-buf/dma-buf.c
> > > +++ b/drivers/dma-buf/dma-buf.c
> > > @@ -1383,22 +1383,17 @@ static int dma_buf_debug_show(struct seq_file *s, void *unused)
> > >   				buf_obj->name ?: "");
> > >   		robj = buf_obj->resv;
> > > -		while (true) {
> > > -			seq = read_seqcount_begin(&robj->seq);
> > > -			rcu_read_lock();
> > > -			fobj = rcu_dereference(robj->fence);
> > > -			shared_count = fobj ? fobj->shared_count : 0;
> > > -			fence = rcu_dereference(robj->fence_excl);
> > > -			if (!read_seqcount_retry(&robj->seq, seq))
> > > -				break;
> > > -			rcu_read_unlock();
> > > -		}
> > > -
> > > +		seq = read_seqcount_begin(&robj->seq);
> > > +		rcu_read_lock();
> > > +		fence = rcu_dereference(robj->fence_excl);
> > >   		if (fence)
> > >   			seq_printf(s, "\tExclusive fence: %s %s %ssignalled\n",
> > >   				   fence->ops->get_driver_name(fence),
> > >   				   fence->ops->get_timeline_name(fence),
> > >   				   dma_fence_is_signaled(fence) ? "" : "un");
> > > +
> > > +		fobj = rcu_dereference(robj->fence);
> > > +		shared_count = fobj ? fobj->shared_count : 0;
> > >   		for (i = 0; i < shared_count; i++) {
> > >   			fence = rcu_dereference(fobj->shared[i]);
> > >   			if (!dma_fence_get_rcu(fence))
> > > @@ -1410,6 +1405,8 @@ static int dma_buf_debug_show(struct seq_file *s, void *unused)
> > >   			dma_fence_put(fence);
> > >   		}
> > >   		rcu_read_unlock();
> > > +		if (read_seqcount_retry(&robj->seq, seq))
> > > +			seq_printf(s, "\tFences concurrently modified\n");
> > >   		seq_puts(s, "\tAttached Devices:\n");
> > >   		attach_count = 0;
> > > -- 
> > > 2.25.1
> > > 
> 

-- 
Daniel Vetter
Software Engineer, Intel Corporation
http://blog.ffwll.ch

^ permalink raw reply	[flat|nested] 26+ messages in thread

* Re: [PATCH 2/7] dma-buf: add SPDX header and fix style in dma-resv.c
  2021-06-02 12:47     ` Christian König
@ 2021-06-02 12:55       ` Daniel Vetter
  0 siblings, 0 replies; 26+ messages in thread
From: Daniel Vetter @ 2021-06-02 12:55 UTC (permalink / raw)
  To: Christian König; +Cc: dri-devel, jason

On Wed, Jun 02, 2021 at 02:47:25PM +0200, Christian König wrote:
> 
> 
> Am 02.06.21 um 14:34 schrieb Daniel Vetter:
> > On Wed, Jun 02, 2021 at 01:17:09PM +0200, Christian König wrote:
> > > No functional change.
> > > 
> > > Signed-off-by: Christian König <christian.koenig@amd.com>
> > Acked-by: Daniel Vetter <daniel.vetter@ffwll.ch>
> > 
> > Maybe add the checkpatch warnings you're fixing here to the commit
> > message. I didn't know that initcalls should be at the bottom ...
> 
> Well dma_resv_lockdep() had some tab/space mixup and moving it around was
> the easiest way to fix that in the editor :)
> 
> Moving it to the end seemed logical to me.

Ah whack that into the commit message then, I was confused for a bit :-)
-Daniel

> 
> Christian.
> 
> > -Daniel
> > 
> > 
> > > ---
> > >   drivers/dma-buf/dma-resv.c | 128 +++++++++++++++++++------------------
> > >   1 file changed, 65 insertions(+), 63 deletions(-)
> > > 
> > > diff --git a/drivers/dma-buf/dma-resv.c b/drivers/dma-buf/dma-resv.c
> > > index 6ddbeb5dfbf6..87f5d82d992a 100644
> > > --- a/drivers/dma-buf/dma-resv.c
> > > +++ b/drivers/dma-buf/dma-resv.c
> > > @@ -1,3 +1,4 @@
> > > +// SPDX-License-Identifier: MIT
> > >   /*
> > >    * Copyright (C) 2012-2014 Canonical Ltd (Maarten Lankhorst)
> > >    *
> > > @@ -92,49 +93,6 @@ static void dma_resv_list_free(struct dma_resv_list *list)
> > >   	kfree_rcu(list, rcu);
> > >   }
> > > -#if IS_ENABLED(CONFIG_LOCKDEP)
> > > -static int __init dma_resv_lockdep(void)
> > > -{
> > > -	struct mm_struct *mm = mm_alloc();
> > > -	struct ww_acquire_ctx ctx;
> > > -	struct dma_resv obj;
> > > -	struct address_space mapping;
> > > -	int ret;
> > > -
> > > -	if (!mm)
> > > -		return -ENOMEM;
> > > -
> > > -	dma_resv_init(&obj);
> > > -	address_space_init_once(&mapping);
> > > -
> > > -	mmap_read_lock(mm);
> > > -	ww_acquire_init(&ctx, &reservation_ww_class);
> > > -	ret = dma_resv_lock(&obj, &ctx);
> > > -	if (ret == -EDEADLK)
> > > -		dma_resv_lock_slow(&obj, &ctx);
> > > -	fs_reclaim_acquire(GFP_KERNEL);
> > > -	/* for unmap_mapping_range on trylocked buffer objects in shrinkers */
> > > -	i_mmap_lock_write(&mapping);
> > > -	i_mmap_unlock_write(&mapping);
> > > -#ifdef CONFIG_MMU_NOTIFIER
> > > -	lock_map_acquire(&__mmu_notifier_invalidate_range_start_map);
> > > -	__dma_fence_might_wait();
> > > -	lock_map_release(&__mmu_notifier_invalidate_range_start_map);
> > > -#else
> > > -	__dma_fence_might_wait();
> > > -#endif
> > > -	fs_reclaim_release(GFP_KERNEL);
> > > -	ww_mutex_unlock(&obj.lock);
> > > -	ww_acquire_fini(&ctx);
> > > -	mmap_read_unlock(mm);
> > > -	
> > > -	mmput(mm);
> > > -
> > > -	return 0;
> > > -}
> > > -subsys_initcall(dma_resv_lockdep);
> > > -#endif
> > > -
> > >   /**
> > >    * dma_resv_init - initialize a reservation object
> > >    * @obj: the reservation object
> > > @@ -196,9 +154,7 @@ int dma_resv_reserve_shared(struct dma_resv *obj, unsigned int num_fences)
> > >   	if (old && old->shared_max) {
> > >   		if ((old->shared_count + num_fences) <= old->shared_max)
> > >   			return 0;
> > > -		else
> > > -			max = max(old->shared_count + num_fences,
> > > -				  old->shared_max * 2);
> > > +		max = max(old->shared_count + num_fences, old->shared_max * 2);
> > >   	} else {
> > >   		max = max(4ul, roundup_pow_of_two(num_fences));
> > >   	}
> > > @@ -337,17 +293,17 @@ void dma_resv_add_excl_fence(struct dma_resv *obj, struct dma_fence *fence)
> > >   EXPORT_SYMBOL(dma_resv_add_excl_fence);
> > >   /**
> > > -* dma_resv_copy_fences - Copy all fences from src to dst.
> > > -* @dst: the destination reservation object
> > > -* @src: the source reservation object
> > > -*
> > > -* Copy all fences from src to dst. dst-lock must be held.
> > > -*/
> > > + * dma_resv_copy_fences - Copy all fences from src to dst.
> > > + * @dst: the destination reservation object
> > > + * @src: the source reservation object
> > > + *
> > > + * Copy all fences from src to dst. dst-lock must be held.
> > > + */
> > >   int dma_resv_copy_fences(struct dma_resv *dst, struct dma_resv *src)
> > >   {
> > >   	struct dma_resv_list *src_list, *dst_list;
> > >   	struct dma_fence *old, *new;
> > > -	unsigned i;
> > > +	unsigned int i;
> > >   	dma_resv_assert_held(dst);
> > > @@ -356,7 +312,7 @@ int dma_resv_copy_fences(struct dma_resv *dst, struct dma_resv *src)
> > >   retry:
> > >   	if (src_list) {
> > > -		unsigned shared_count = src_list->shared_count;
> > > +		unsigned int shared_count = src_list->shared_count;
> > >   		rcu_read_unlock();
> > > @@ -373,6 +329,7 @@ int dma_resv_copy_fences(struct dma_resv *dst, struct dma_resv *src)
> > >   		dst_list->shared_count = 0;
> > >   		for (i = 0; i < src_list->shared_count; ++i) {
> > > +			struct dma_fence __rcu **dst;
> > >   			struct dma_fence *fence;
> > >   			fence = rcu_dereference(src_list->shared[i]);
> > > @@ -391,7 +348,8 @@ int dma_resv_copy_fences(struct dma_resv *dst, struct dma_resv *src)
> > >   				continue;
> > >   			}
> > > -			rcu_assign_pointer(dst_list->shared[dst_list->shared_count++], fence);
> > > +			dst = &dst_list->shared[dst_list->shared_count++];
> > > +			rcu_assign_pointer(*dst, fence);
> > >   		}
> > >   	} else {
> > >   		dst_list = NULL;
> > > @@ -431,7 +389,7 @@ EXPORT_SYMBOL(dma_resv_copy_fences);
> > >    */
> > >   int dma_resv_get_fences_rcu(struct dma_resv *obj,
> > >   			    struct dma_fence **pfence_excl,
> > > -			    unsigned *pshared_count,
> > > +			    unsigned int *pshared_count,
> > >   			    struct dma_fence ***pshared)
> > >   {
> > >   	struct dma_fence **shared = NULL;
> > > @@ -533,9 +491,9 @@ long dma_resv_wait_timeout_rcu(struct dma_resv *obj,
> > >   			       bool wait_all, bool intr,
> > >   			       unsigned long timeout)
> > >   {
> > > -	struct dma_fence *fence;
> > > -	unsigned seq, shared_count;
> > >   	long ret = timeout ? timeout : 1;
> > > +	unsigned int seq, shared_count;
> > > +	struct dma_fence *fence;
> > >   	int i;
> > >   retry:
> > > @@ -565,8 +523,9 @@ long dma_resv_wait_timeout_rcu(struct dma_resv *obj,
> > >   			shared_count = fobj->shared_count;
> > >   		for (i = 0; !fence && i < shared_count; ++i) {
> > > -			struct dma_fence *lfence = rcu_dereference(fobj->shared[i]);
> > > +			struct dma_fence *lfence;
> > > +			lfence = rcu_dereference(fobj->shared[i]);
> > >   			if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
> > >   				     &lfence->flags))
> > >   				continue;
> > > @@ -633,7 +592,7 @@ static inline int dma_resv_test_signaled_single(struct dma_fence *passed_fence)
> > >    */
> > >   bool dma_resv_test_signaled_rcu(struct dma_resv *obj, bool test_all)
> > >   {
> > > -	unsigned seq, shared_count;
> > > +	unsigned int seq, shared_count;
> > >   	int ret;
> > >   	rcu_read_lock();
> > > @@ -643,16 +602,16 @@ bool dma_resv_test_signaled_rcu(struct dma_resv *obj, bool test_all)
> > >   	seq = read_seqcount_begin(&obj->seq);
> > >   	if (test_all) {
> > > -		unsigned i;
> > > -
> > >   		struct dma_resv_list *fobj = rcu_dereference(obj->fence);
> > > +		unsigned int i;
> > >   		if (fobj)
> > >   			shared_count = fobj->shared_count;
> > >   		for (i = 0; i < shared_count; ++i) {
> > > -			struct dma_fence *fence = rcu_dereference(fobj->shared[i]);
> > > +			struct dma_fence *fence;
> > > +			fence = rcu_dereference(fobj->shared[i]);
> > >   			ret = dma_resv_test_signaled_single(fence);
> > >   			if (ret < 0)
> > >   				goto retry;
> > > @@ -681,3 +640,46 @@ bool dma_resv_test_signaled_rcu(struct dma_resv *obj, bool test_all)
> > >   	return ret;
> > >   }
> > >   EXPORT_SYMBOL_GPL(dma_resv_test_signaled_rcu);
> > > +
> > > +#if IS_ENABLED(CONFIG_LOCKDEP)
> > > +static int __init dma_resv_lockdep(void)
> > > +{
> > > +	struct mm_struct *mm = mm_alloc();
> > > +	struct ww_acquire_ctx ctx;
> > > +	struct dma_resv obj;
> > > +	struct address_space mapping;
> > > +	int ret;
> > > +
> > > +	if (!mm)
> > > +		return -ENOMEM;
> > > +
> > > +	dma_resv_init(&obj);
> > > +	address_space_init_once(&mapping);
> > > +
> > > +	mmap_read_lock(mm);
> > > +	ww_acquire_init(&ctx, &reservation_ww_class);
> > > +	ret = dma_resv_lock(&obj, &ctx);
> > > +	if (ret == -EDEADLK)
> > > +		dma_resv_lock_slow(&obj, &ctx);
> > > +	fs_reclaim_acquire(GFP_KERNEL);
> > > +	/* for unmap_mapping_range on trylocked buffer objects in shrinkers */
> > > +	i_mmap_lock_write(&mapping);
> > > +	i_mmap_unlock_write(&mapping);
> > > +#ifdef CONFIG_MMU_NOTIFIER
> > > +	lock_map_acquire(&__mmu_notifier_invalidate_range_start_map);
> > > +	__dma_fence_might_wait();
> > > +	lock_map_release(&__mmu_notifier_invalidate_range_start_map);
> > > +#else
> > > +	__dma_fence_might_wait();
> > > +#endif
> > > +	fs_reclaim_release(GFP_KERNEL);
> > > +	ww_mutex_unlock(&obj.lock);
> > > +	ww_acquire_fini(&ctx);
> > > +	mmap_read_unlock(mm);
> > > +
> > > +	mmput(mm);
> > > +
> > > +	return 0;
> > > +}
> > > +subsys_initcall(dma_resv_lockdep);
> > > +#endif
> > > -- 
> > > 2.25.1
> > > 
> 

-- 
Daniel Vetter
Software Engineer, Intel Corporation
http://blog.ffwll.ch

^ permalink raw reply	[flat|nested] 26+ messages in thread

* Re: [PATCH 4/7] dma-buf: rename and cleanup dma_resv_get_excl
  2021-06-02 12:43   ` Daniel Vetter
@ 2021-06-02 20:04     ` Jason Ekstrand
  0 siblings, 0 replies; 26+ messages in thread
From: Jason Ekstrand @ 2021-06-02 20:04 UTC (permalink / raw)
  To: Daniel Vetter; +Cc: Christian König, Maling list - DRI developers

On Wed, Jun 2, 2021 at 7:43 AM Daniel Vetter <daniel@ffwll.ch> wrote:
>
> On Wed, Jun 02, 2021 at 01:17:11PM +0200, Christian König wrote:
> > When the comment needs to state explicitly that this
> > doesn't get a reference to the object then the function
> > is named rather badly.
> >
> > Rename the function and use rcu_dereference_check(), this
> > way it can be used from both rcu as well as lock protected
> > critical sections.
> >
> > Signed-off-by: Christian König <christian.koenig@amd.com>
>
> I'd call this dma_resv_exlusive_fence, since without that it's a bit close

Or, if we want to keep it shorter, dma_resv_excl_fence().  I don't
care much either way

Reviewed-by: Jason Ekstrand <jason@jlekstrand.net>

> to dma_resv_make_exclusive or something like that. But this is definitely
> better than the previous pointer deref in a "I'm totally getting you a
> full reference" trenchcoat thing.
>
> Acked-by: Daniel Vetter <daniel.vetter@ffwll.ch>
>
> > ---
> >  drivers/dma-buf/dma-buf.c                |  4 ++--
> >  drivers/dma-buf/dma-resv.c               | 10 +++++-----
> >  drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c  |  2 +-
> >  drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c |  2 +-
> >  drivers/gpu/drm/etnaviv/etnaviv_gem.c    |  2 +-
> >  drivers/gpu/drm/i915/gem/i915_gem_busy.c |  3 +--
> >  drivers/gpu/drm/msm/msm_gem.c            |  4 ++--
> >  drivers/gpu/drm/nouveau/nouveau_bo.c     |  2 +-
> >  drivers/gpu/drm/nouveau/nouveau_fence.c  |  2 +-
> >  drivers/gpu/drm/radeon/radeon_display.c  |  2 +-
> >  drivers/gpu/drm/radeon/radeon_sync.c     |  2 +-
> >  drivers/gpu/drm/radeon/radeon_uvd.c      |  2 +-
> >  drivers/gpu/drm/ttm/ttm_bo.c             |  2 +-
> >  drivers/gpu/drm/vmwgfx/vmwgfx_resource.c |  2 +-
> >  include/linux/dma-resv.h                 | 13 +++++--------
> >  15 files changed, 25 insertions(+), 29 deletions(-)
> >
> > diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c
> > index d3b4e370dbc1..4d0ddc712f1e 100644
> > --- a/drivers/dma-buf/dma-buf.c
> > +++ b/drivers/dma-buf/dma-buf.c
> > @@ -234,7 +234,7 @@ static __poll_t dma_buf_poll(struct file *file, poll_table *poll)
> >               shared_count = fobj->shared_count;
> >       else
> >               shared_count = 0;
> > -     fence_excl = rcu_dereference(resv->fence_excl);
> > +     fence_excl = dma_resv_exclusive(resv);
> >       if (read_seqcount_retry(&resv->seq, seq)) {
> >               rcu_read_unlock();
> >               goto retry;
> > @@ -1385,7 +1385,7 @@ static int dma_buf_debug_show(struct seq_file *s, void *unused)
> >               robj = buf_obj->resv;
> >               seq = read_seqcount_begin(&robj->seq);
> >               rcu_read_lock();
> > -             fence = rcu_dereference(robj->fence_excl);
> > +             fence = dma_resv_exclusive(robj);
> >               if (fence)
> >                       seq_printf(s, "\tExclusive fence: %s %s %ssignalled\n",
> >                                  fence->ops->get_driver_name(fence),
> > diff --git a/drivers/dma-buf/dma-resv.c b/drivers/dma-buf/dma-resv.c
> > index 6c6195315e9f..81b032b43457 100644
> > --- a/drivers/dma-buf/dma-resv.c
> > +++ b/drivers/dma-buf/dma-resv.c
> > @@ -281,7 +281,7 @@ EXPORT_SYMBOL(dma_resv_add_shared_fence);
> >   */
> >  void dma_resv_add_excl_fence(struct dma_resv *obj, struct dma_fence *fence)
> >  {
> > -     struct dma_fence *old_fence = dma_resv_get_excl(obj);
> > +     struct dma_fence *old_fence = dma_resv_exclusive(obj);
> >       struct dma_resv_list *old;
> >       u32 i = 0;
> >
> > @@ -377,7 +377,7 @@ int dma_resv_copy_fences(struct dma_resv *dst, struct dma_resv *src)
> >       rcu_read_unlock();
> >
> >       src_list = dma_resv_get_list(dst);
> > -     old = dma_resv_get_excl(dst);
> > +     old = dma_resv_exclusive(dst);
> >
> >       write_seqcount_begin(&dst->seq);
> >       /* write_seqcount_begin provides the necessary memory barrier */
> > @@ -425,7 +425,7 @@ int dma_resv_get_fences_rcu(struct dma_resv *obj,
> >               rcu_read_lock();
> >               seq = read_seqcount_begin(&obj->seq);
> >
> > -             fence_excl = rcu_dereference(obj->fence_excl);
> > +             fence_excl = dma_resv_exclusive(obj);
> >               if (fence_excl && !dma_fence_get_rcu(fence_excl))
> >                       goto unlock;
> >
> > @@ -520,7 +520,7 @@ long dma_resv_wait_timeout_rcu(struct dma_resv *obj,
> >       rcu_read_lock();
> >       i = -1;
> >
> > -     fence = rcu_dereference(obj->fence_excl);
> > +     fence = dma_resv_exclusive(obj);
> >       if (fence && !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
> >               if (!dma_fence_get_rcu(fence))
> >                       goto unlock_retry;
> > @@ -642,7 +642,7 @@ bool dma_resv_test_signaled_rcu(struct dma_resv *obj, bool test_all)
> >       }
> >
> >       if (!shared_count) {
> > -             struct dma_fence *fence_excl = rcu_dereference(obj->fence_excl);
> > +             struct dma_fence *fence_excl = dma_resv_exclusive(obj);
> >
> >               if (fence_excl) {
> >                       ret = dma_resv_test_signaled_single(fence_excl);
> > diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
> > index 73c76a3e2b12..cd5146fa6fb6 100644
> > --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
> > +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
> > @@ -226,7 +226,7 @@ static void amdgpu_gem_object_close(struct drm_gem_object *obj,
> >       if (!amdgpu_vm_ready(vm))
> >               goto out_unlock;
> >
> > -     fence = dma_resv_get_excl(bo->tbo.base.resv);
> > +     fence = dma_resv_exclusive(bo->tbo.base.resv);
> >       if (fence) {
> >               amdgpu_bo_fence(bo, fence, true);
> >               fence = NULL;
> > diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
> > index 4e558632a5d2..c84d5b843985 100644
> > --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
> > +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
> > @@ -210,7 +210,7 @@ int amdgpu_sync_resv(struct amdgpu_device *adev, struct amdgpu_sync *sync,
> >               return -EINVAL;
> >
> >       /* always sync to the exclusive fence */
> > -     f = dma_resv_get_excl(resv);
> > +     f = dma_resv_exclusive(resv);
> >       r = amdgpu_sync_fence(sync, f);
> >
> >       flist = dma_resv_get_list(resv);
> > diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.c b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
> > index db69f19ab5bc..d4f54dea8ac1 100644
> > --- a/drivers/gpu/drm/etnaviv/etnaviv_gem.c
> > +++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
> > @@ -471,7 +471,7 @@ static void etnaviv_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
> >               }
> >       }
> >
> > -     fence = rcu_dereference(robj->fence_excl);
> > +     fence = dma_resv_exclusive(robj);
> >       if (fence)
> >               etnaviv_gem_describe_fence(fence, "Exclusive", m);
> >       rcu_read_unlock();
> > diff --git a/drivers/gpu/drm/i915/gem/i915_gem_busy.c b/drivers/gpu/drm/i915/gem/i915_gem_busy.c
> > index 25235ef630c1..02312a0c3a36 100644
> > --- a/drivers/gpu/drm/i915/gem/i915_gem_busy.c
> > +++ b/drivers/gpu/drm/i915/gem/i915_gem_busy.c
> > @@ -113,8 +113,7 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
> >       seq = raw_read_seqcount(&obj->base.resv->seq);
> >
> >       /* Translate the exclusive fence to the READ *and* WRITE engine */
> > -     args->busy =
> > -             busy_check_writer(rcu_dereference(obj->base.resv->fence_excl));
> > +     args->busy = busy_check_writer(dma_resv_exclusive(obj->base.resv));
> >
> >       /* Translate shared fences to READ set of engines */
> >       list = rcu_dereference(obj->base.resv->fence);
> > diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
> > index 56df86e5f740..54c1b53426d6 100644
> > --- a/drivers/gpu/drm/msm/msm_gem.c
> > +++ b/drivers/gpu/drm/msm/msm_gem.c
> > @@ -819,7 +819,7 @@ int msm_gem_sync_object(struct drm_gem_object *obj,
> >
> >       fobj = dma_resv_get_list(obj->resv);
> >       if (!fobj || (fobj->shared_count == 0)) {
> > -             fence = dma_resv_get_excl(obj->resv);
> > +             fence = dma_resv_exclusive(obj->resv);
> >               /* don't need to wait on our own fences, since ring is fifo */
> >               if (fence && (fence->context != fctx->context)) {
> >                       ret = dma_fence_wait(fence, true);
> > @@ -1035,7 +1035,7 @@ void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m,
> >               }
> >       }
> >
> > -     fence = rcu_dereference(robj->fence_excl);
> > +     fence = dma_resv_exclusive(robj);
> >       if (fence)
> >               describe_fence(fence, "Exclusive", m);
> >       rcu_read_unlock();
> > diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
> > index e688ca77483d..ac0ebcc4ebb7 100644
> > --- a/drivers/gpu/drm/nouveau/nouveau_bo.c
> > +++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
> > @@ -955,7 +955,7 @@ nouveau_bo_vm_cleanup(struct ttm_buffer_object *bo,
> >  {
> >       struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
> >       struct drm_device *dev = drm->dev;
> > -     struct dma_fence *fence = dma_resv_get_excl(bo->base.resv);
> > +     struct dma_fence *fence = dma_resv_exclusive(bo->base.resv);
> >
> >       nv10_bo_put_tile_region(dev, *old_tile, fence);
> >       *old_tile = new_tile;
> > diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c
> > index e5dcbf67de7e..a6cb35181aee 100644
> > --- a/drivers/gpu/drm/nouveau/nouveau_fence.c
> > +++ b/drivers/gpu/drm/nouveau/nouveau_fence.c
> > @@ -356,7 +356,7 @@ nouveau_fence_sync(struct nouveau_bo *nvbo, struct nouveau_channel *chan, bool e
> >       }
> >
> >       fobj = dma_resv_get_list(resv);
> > -     fence = dma_resv_get_excl(resv);
> > +     fence = dma_resv_exclusive(resv);
> >
> >       if (fence && (!exclusive || !fobj || !fobj->shared_count)) {
> >               struct nouveau_channel *prev = NULL;
> > diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
> > index 652af7a134bd..57c910e5ae77 100644
> > --- a/drivers/gpu/drm/radeon/radeon_display.c
> > +++ b/drivers/gpu/drm/radeon/radeon_display.c
> > @@ -533,7 +533,7 @@ static int radeon_crtc_page_flip_target(struct drm_crtc *crtc,
> >               DRM_ERROR("failed to pin new rbo buffer before flip\n");
> >               goto cleanup;
> >       }
> > -     work->fence = dma_fence_get(dma_resv_get_excl(new_rbo->tbo.base.resv));
> > +     work->fence = dma_fence_get(dma_resv_exclusive(new_rbo->tbo.base.resv));
> >       radeon_bo_get_tiling_flags(new_rbo, &tiling_flags, NULL);
> >       radeon_bo_unreserve(new_rbo);
> >
> > diff --git a/drivers/gpu/drm/radeon/radeon_sync.c b/drivers/gpu/drm/radeon/radeon_sync.c
> > index 5d3302945076..e476f90ef1c1 100644
> > --- a/drivers/gpu/drm/radeon/radeon_sync.c
> > +++ b/drivers/gpu/drm/radeon/radeon_sync.c
> > @@ -98,7 +98,7 @@ int radeon_sync_resv(struct radeon_device *rdev,
> >       int r = 0;
> >
> >       /* always sync to the exclusive fence */
> > -     f = dma_resv_get_excl(resv);
> > +     f = dma_resv_exclusive(resv);
> >       fence = f ? to_radeon_fence(f) : NULL;
> >       if (fence && fence->rdev == rdev)
> >               radeon_sync_fence(sync, fence);
> > diff --git a/drivers/gpu/drm/radeon/radeon_uvd.c b/drivers/gpu/drm/radeon/radeon_uvd.c
> > index dfa9fdbe98da..02d4bbdc9111 100644
> > --- a/drivers/gpu/drm/radeon/radeon_uvd.c
> > +++ b/drivers/gpu/drm/radeon/radeon_uvd.c
> > @@ -477,7 +477,7 @@ static int radeon_uvd_cs_msg(struct radeon_cs_parser *p, struct radeon_bo *bo,
> >               return -EINVAL;
> >       }
> >
> > -     f = dma_resv_get_excl(bo->tbo.base.resv);
> > +     f = dma_resv_exclusive(bo->tbo.base.resv);
> >       if (f) {
> >               r = radeon_fence_wait((struct radeon_fence *)f, false);
> >               if (r) {
> > diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
> > index 5a7ab4b35b2d..92361556bf0b 100644
> > --- a/drivers/gpu/drm/ttm/ttm_bo.c
> > +++ b/drivers/gpu/drm/ttm/ttm_bo.c
> > @@ -262,7 +262,7 @@ static void ttm_bo_flush_all_fences(struct ttm_buffer_object *bo)
> >
> >       rcu_read_lock();
> >       fobj = rcu_dereference(resv->fence);
> > -     fence = rcu_dereference(resv->fence_excl);
> > +     fence = dma_resv_exclusive(resv);
> >       if (fence && !fence->ops->signaled)
> >               dma_fence_enable_sw_signaling(fence);
> >
> > diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
> > index 62ea920addc3..c78f38ee1c20 100644
> > --- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
> > +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
> > @@ -1166,7 +1166,7 @@ int vmw_resources_clean(struct vmw_buffer_object *vbo, pgoff_t start,
> >               if (bo->moving)
> >                       dma_fence_put(bo->moving);
> >               bo->moving = dma_fence_get
> > -                     (dma_resv_get_excl(bo->base.resv));
> > +                     (dma_resv_exclusive(bo->base.resv));
> >       }
> >
> >       return 0;
> > diff --git a/include/linux/dma-resv.h b/include/linux/dma-resv.h
> > index f32a3d176513..7549ec5eb35c 100644
> > --- a/include/linux/dma-resv.h
> > +++ b/include/linux/dma-resv.h
> > @@ -226,22 +226,19 @@ static inline void dma_resv_unlock(struct dma_resv *obj)
> >  }
> >
> >  /**
> > - * dma_resv_get_excl - get the reservation object's
> > - * exclusive fence, with update-side lock held
> > + * dma_resv_exclusive - return the object's exclusive fence
> >   * @obj: the reservation object
> >   *
> > - * Returns the exclusive fence (if any).  Does NOT take a
> > - * reference. Writers must hold obj->lock, readers may only
> > - * hold a RCU read side lock.
> > + * Returns the exclusive fence (if any). Caller must either hold the objects
> > + * lock or the rcu read side lock.
> >   *
> >   * RETURNS
> >   * The exclusive fence or NULL
> >   */
> >  static inline struct dma_fence *
> > -dma_resv_get_excl(struct dma_resv *obj)
> > +dma_resv_exclusive(struct dma_resv *obj)
> >  {
> > -     return rcu_dereference_protected(obj->fence_excl,
> > -                                      dma_resv_held(obj));
> > +     return rcu_dereference_check(obj->fence_excl, dma_resv_held(obj));
> >  }
> >
> >  /**
> > --
> > 2.25.1
> >
>
> --
> Daniel Vetter
> Software Engineer, Intel Corporation
> http://blog.ffwll.ch

^ permalink raw reply	[flat|nested] 26+ messages in thread

* Re: [PATCH 5/7] dma-buf: rename and cleanup dma_resv_get_list
  2021-06-02 11:17 ` [PATCH 5/7] dma-buf: rename and cleanup dma_resv_get_list Christian König
  2021-06-02 12:46   ` Daniel Vetter
@ 2021-06-02 20:22   ` Jason Ekstrand
  2021-06-06  8:53     ` Christian König
  1 sibling, 1 reply; 26+ messages in thread
From: Jason Ekstrand @ 2021-06-02 20:22 UTC (permalink / raw)
  To: Christian König; +Cc: Maling list - DRI developers

On Wed, Jun 2, 2021 at 6:17 AM Christian König
<ckoenig.leichtzumerken@gmail.com> wrote:
>
> When the comment needs to state explicitly that this is doesn't get a reference
> to the object then the function is named rather badly.
>
> Rename the function and use it in even more places.
>
> Signed-off-by: Christian König <christian.koenig@amd.com>
> ---
>  drivers/dma-buf/dma-resv.c                    | 32 +++++++++----------
>  .../gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c  |  2 +-
>  drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c   |  2 +-
>  drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c      |  2 +-
>  drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c       |  2 +-
>  drivers/gpu/drm/etnaviv/etnaviv_gem.c         |  2 +-
>  drivers/gpu/drm/i915/gem/i915_gem_busy.c      |  2 +-
>  drivers/gpu/drm/msm/msm_gem.c                 |  4 +--
>  drivers/gpu/drm/nouveau/nouveau_fence.c       |  2 +-
>  drivers/gpu/drm/qxl/qxl_debugfs.c             |  2 +-
>  drivers/gpu/drm/radeon/radeon_sync.c          |  2 +-
>  drivers/gpu/drm/ttm/ttm_bo.c                  |  2 +-
>  include/linux/dma-resv.h                      | 25 +++++++--------
>  13 files changed, 39 insertions(+), 42 deletions(-)
>
> diff --git a/drivers/dma-buf/dma-resv.c b/drivers/dma-buf/dma-resv.c
> index 81b032b43457..b1a1a31dc009 100644
> --- a/drivers/dma-buf/dma-resv.c
> +++ b/drivers/dma-buf/dma-resv.c
> @@ -149,8 +149,7 @@ int dma_resv_reserve_shared(struct dma_resv *obj, unsigned int num_fences)
>
>         dma_resv_assert_held(obj);
>
> -       old = dma_resv_get_list(obj);
> -
> +       old = dma_resv_shared(obj);
>         if (old && old->shared_max) {
>                 if ((old->shared_count + num_fences) <= old->shared_max)
>                         return 0;
> @@ -217,12 +216,13 @@ EXPORT_SYMBOL(dma_resv_reserve_shared);
>   */
>  void dma_resv_reset_shared_max(struct dma_resv *obj)
>  {
> -       /* Test shared fence slot reservation */
> -       if (rcu_access_pointer(obj->fence)) {
> -               struct dma_resv_list *fence = dma_resv_get_list(obj);
> +       struct dma_resv_list *fences = dma_resv_shared(obj);
>
> -               fence->shared_max = fence->shared_count;
> -       }
> +       dma_resv_assert_held(obj);

Does it make sense to assert we hold the lock *before* we touch it
with something that requires that we do?  Maybe it doesn't matter?

> +
> +       /* Test shared fence slot reservation */
> +       if (fences)
> +               fences->shared_max = fences->shared_count;
>  }
>  #endif
>
> @@ -244,7 +244,7 @@ void dma_resv_add_shared_fence(struct dma_resv *obj, struct dma_fence *fence)
>
>         dma_resv_assert_held(obj);
>
> -       fobj = dma_resv_get_list(obj);
> +       fobj = dma_resv_shared(obj);
>         count = fobj->shared_count;
>
>         write_seqcount_begin(&obj->seq);
> @@ -287,7 +287,7 @@ void dma_resv_add_excl_fence(struct dma_resv *obj, struct dma_fence *fence)
>
>         dma_resv_assert_held(obj);
>
> -       old = dma_resv_get_list(obj);
> +       old = dma_resv_shared(obj);
>         if (old)
>                 i = old->shared_count;
>
> @@ -326,7 +326,7 @@ int dma_resv_copy_fences(struct dma_resv *dst, struct dma_resv *src)
>         dma_resv_assert_held(dst);
>
>         rcu_read_lock();
> -       src_list = rcu_dereference(src->fence);
> +       src_list = dma_resv_shared(src);
>
>  retry:
>         if (src_list) {
> @@ -339,7 +339,7 @@ int dma_resv_copy_fences(struct dma_resv *dst, struct dma_resv *src)
>                         return -ENOMEM;
>
>                 rcu_read_lock();
> -               src_list = rcu_dereference(src->fence);
> +               src_list = dma_resv_shared(src);
>                 if (!src_list || src_list->shared_count > shared_count) {
>                         kfree(dst_list);
>                         goto retry;
> @@ -357,7 +357,7 @@ int dma_resv_copy_fences(struct dma_resv *dst, struct dma_resv *src)
>
>                         if (!dma_fence_get_rcu(fence)) {
>                                 dma_resv_list_free(dst_list);
> -                               src_list = rcu_dereference(src->fence);
> +                               src_list = dma_resv_shared(src);
>                                 goto retry;
>                         }
>
> @@ -376,7 +376,7 @@ int dma_resv_copy_fences(struct dma_resv *dst, struct dma_resv *src)
>         new = dma_fence_get_rcu_safe(&src->fence_excl);
>         rcu_read_unlock();
>
> -       src_list = dma_resv_get_list(dst);
> +       src_list = dma_resv_shared(dst);
>         old = dma_resv_exclusive(dst);
>
>         write_seqcount_begin(&dst->seq);
> @@ -429,7 +429,7 @@ int dma_resv_get_fences_rcu(struct dma_resv *obj,
>                 if (fence_excl && !dma_fence_get_rcu(fence_excl))
>                         goto unlock;
>
> -               fobj = rcu_dereference(obj->fence);
> +               fobj = dma_resv_shared(obj);
>                 if (fobj)
>                         sz += sizeof(*shared) * fobj->shared_max;
>
> @@ -535,7 +535,7 @@ long dma_resv_wait_timeout_rcu(struct dma_resv *obj,
>         }
>
>         if (wait_all) {
> -               struct dma_resv_list *fobj = rcu_dereference(obj->fence);
> +               struct dma_resv_list *fobj = dma_resv_shared(obj);
>
>                 if (fobj)
>                         shared_count = fobj->shared_count;
> @@ -620,7 +620,7 @@ bool dma_resv_test_signaled_rcu(struct dma_resv *obj, bool test_all)
>         seq = read_seqcount_begin(&obj->seq);
>
>         if (test_all) {
> -               struct dma_resv_list *fobj = rcu_dereference(obj->fence);
> +               struct dma_resv_list *fobj = dma_resv_shared(obj);
>                 unsigned int i;
>
>                 if (fobj)
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
> index d5e6519bdea1..e90495ca49fd 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
> @@ -247,7 +247,7 @@ static int amdgpu_amdkfd_remove_eviction_fence(struct amdgpu_bo *bo,
>         if (!ef)
>                 return -EINVAL;
>
> -       old = dma_resv_get_list(resv);
> +       old = dma_resv_shared(resv);
>         if (!old)
>                 return 0;
>
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
> index 6dd0ea6e9e24..3b13c8a38c4e 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
> @@ -49,7 +49,7 @@ __dma_resv_make_exclusive(struct dma_resv *obj)
>         unsigned int count;
>         int r;
>
> -       if (!dma_resv_get_list(obj)) /* no shared fences to convert */
> +       if (!dma_resv_shared(obj)) /* no shared fences to convert */
>                 return 0;
>
>         r = dma_resv_get_fences_rcu(obj, NULL, &count, &fences);
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
> index c84d5b843985..c50d9f92a0cd 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
> @@ -213,7 +213,7 @@ int amdgpu_sync_resv(struct amdgpu_device *adev, struct amdgpu_sync *sync,
>         f = dma_resv_exclusive(resv);
>         r = amdgpu_sync_fence(sync, f);
>
> -       flist = dma_resv_get_list(resv);
> +       flist = dma_resv_shared(resv);
>         if (!flist || r)
>                 return r;
>
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
> index 663aa7d2e2ea..ddb6ce7d48bc 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
> @@ -1338,7 +1338,7 @@ static bool amdgpu_ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
>          * If true, then return false as any KFD process needs all its BOs to
>          * be resident to run successfully
>          */
> -       flist = dma_resv_get_list(bo->base.resv);
> +       flist = dma_resv_shared(bo->base.resv);
>         if (flist) {
>                 for (i = 0; i < flist->shared_count; ++i) {
>                         f = rcu_dereference_protected(flist->shared[i],
> diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.c b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
> index d4f54dea8ac1..4d43b8630f0e 100644
> --- a/drivers/gpu/drm/etnaviv/etnaviv_gem.c
> +++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
> @@ -461,7 +461,7 @@ static void etnaviv_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
>                         off, etnaviv_obj->vaddr, obj->size);
>
>         rcu_read_lock();
> -       fobj = rcu_dereference(robj->fence);
> +       fobj = dma_resv_shared(robj);
>         if (fobj) {
>                 unsigned int i, shared_count = fobj->shared_count;
>
> diff --git a/drivers/gpu/drm/i915/gem/i915_gem_busy.c b/drivers/gpu/drm/i915/gem/i915_gem_busy.c
> index 02312a0c3a36..3f94becac541 100644
> --- a/drivers/gpu/drm/i915/gem/i915_gem_busy.c
> +++ b/drivers/gpu/drm/i915/gem/i915_gem_busy.c
> @@ -116,7 +116,7 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
>         args->busy = busy_check_writer(dma_resv_exclusive(obj->base.resv));
>
>         /* Translate shared fences to READ set of engines */
> -       list = rcu_dereference(obj->base.resv->fence);
> +       list = dma_resv_shared(obj->base.resv);
>         if (list) {
>                 unsigned int shared_count = list->shared_count, i;
>
> diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
> index 54c1b53426d6..43af91df552e 100644
> --- a/drivers/gpu/drm/msm/msm_gem.c
> +++ b/drivers/gpu/drm/msm/msm_gem.c
> @@ -817,7 +817,7 @@ int msm_gem_sync_object(struct drm_gem_object *obj,
>         struct dma_fence *fence;
>         int i, ret;
>
> -       fobj = dma_resv_get_list(obj->resv);
> +       fobj = dma_resv_shared(obj->resv);
>         if (!fobj || (fobj->shared_count == 0)) {
>                 fence = dma_resv_exclusive(obj->resv);
>                 /* don't need to wait on our own fences, since ring is fifo */
> @@ -1025,7 +1025,7 @@ void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m,
>         }
>
>         rcu_read_lock();
> -       fobj = rcu_dereference(robj->fence);
> +       fobj = dma_resv_shared(robj);
>         if (fobj) {
>                 unsigned int i, shared_count = fobj->shared_count;
>
> diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c
> index a6cb35181aee..5ce441c655ea 100644
> --- a/drivers/gpu/drm/nouveau/nouveau_fence.c
> +++ b/drivers/gpu/drm/nouveau/nouveau_fence.c
> @@ -355,7 +355,7 @@ nouveau_fence_sync(struct nouveau_bo *nvbo, struct nouveau_channel *chan, bool e
>                         return ret;
>         }
>
> -       fobj = dma_resv_get_list(resv);
> +       fobj = dma_resv_shared(resv);
>         fence = dma_resv_exclusive(resv);
>
>         if (fence && (!exclusive || !fobj || !fobj->shared_count)) {
> diff --git a/drivers/gpu/drm/qxl/qxl_debugfs.c b/drivers/gpu/drm/qxl/qxl_debugfs.c
> index 183d15e2cf58..0acc70a6d3dd 100644
> --- a/drivers/gpu/drm/qxl/qxl_debugfs.c
> +++ b/drivers/gpu/drm/qxl/qxl_debugfs.c
> @@ -61,7 +61,7 @@ qxl_debugfs_buffers_info(struct seq_file *m, void *data)
>                 int rel;
>
>                 rcu_read_lock();
> -               fobj = rcu_dereference(bo->tbo.base.resv->fence);
> +               fobj = dma_resv_shared(bo->tbo.base.resv);
>                 rel = fobj ? fobj->shared_count : 0;
>                 rcu_read_unlock();
>
> diff --git a/drivers/gpu/drm/radeon/radeon_sync.c b/drivers/gpu/drm/radeon/radeon_sync.c
> index e476f90ef1c1..a9cdb88da173 100644
> --- a/drivers/gpu/drm/radeon/radeon_sync.c
> +++ b/drivers/gpu/drm/radeon/radeon_sync.c
> @@ -105,7 +105,7 @@ int radeon_sync_resv(struct radeon_device *rdev,
>         else if (f)
>                 r = dma_fence_wait(f, true);
>
> -       flist = dma_resv_get_list(resv);
> +       flist = dma_resv_shared(resv);
>         if (shared || !flist || r)
>                 return r;
>
> diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
> index 92361556bf0b..c41ef0caa492 100644
> --- a/drivers/gpu/drm/ttm/ttm_bo.c
> +++ b/drivers/gpu/drm/ttm/ttm_bo.c
> @@ -261,7 +261,7 @@ static void ttm_bo_flush_all_fences(struct ttm_buffer_object *bo)
>         int i;
>
>         rcu_read_lock();
> -       fobj = rcu_dereference(resv->fence);
> +       fobj = dma_resv_shared(resv);
>         fence = dma_resv_exclusive(resv);
>         if (fence && !fence->ops->signaled)
>                 dma_fence_enable_sw_signaling(fence);
> diff --git a/include/linux/dma-resv.h b/include/linux/dma-resv.h
> index 7549ec5eb35c..98ac66fecb71 100644
> --- a/include/linux/dma-resv.h
> +++ b/include/linux/dma-resv.h
> @@ -78,20 +78,6 @@ struct dma_resv {
>  #define dma_resv_held(obj) lockdep_is_held(&(obj)->lock.base)
>  #define dma_resv_assert_held(obj) lockdep_assert_held(&(obj)->lock.base)
>
> -/**
> - * dma_resv_get_list - get the reservation object's
> - * shared fence list, with update-side lock held
> - * @obj: the reservation object
> - *
> - * Returns the shared fence list.  Does NOT take references to
> - * the fence.  The obj->lock must be held.
> - */
> -static inline struct dma_resv_list *dma_resv_get_list(struct dma_resv *obj)
> -{
> -       return rcu_dereference_protected(obj->fence,
> -                                        dma_resv_held(obj));
> -}
> -
>  #ifdef CONFIG_DEBUG_MUTEXES
>  void dma_resv_reset_shared_max(struct dma_resv *obj);
>  #else
> @@ -267,6 +253,17 @@ dma_resv_get_excl_rcu(struct dma_resv *obj)
>         return fence;
>  }
>
> +/**
> + * dma_resv_shared - get the reservation object's shared fence list
> + * @obj: the reservation object
> + *
> + * Returns the shared fence list. The obj->lock or rcu read side must be held.
> + */
> +static inline struct dma_resv_list *dma_resv_shared(struct dma_resv *obj)

Maybe dma_resv_shared_list() just to be a little more clear what's
being returned?

--Jason

> +{
> +       return rcu_dereference_check(obj->fence, dma_resv_held(obj));
> +}
> +
>  void dma_resv_init(struct dma_resv *obj);
>  void dma_resv_fini(struct dma_resv *obj);
>  int dma_resv_reserve_shared(struct dma_resv *obj, unsigned int num_fences);
> --
> 2.25.1
>

^ permalink raw reply	[flat|nested] 26+ messages in thread

* Re: [PATCH 6/7] dma-buf: rename dma_resv_get_excl_rcu to _unlocked
  2021-06-02 11:17 ` [PATCH 6/7] dma-buf: rename dma_resv_get_excl_rcu to _unlocked Christian König
  2021-06-02 12:47   ` Daniel Vetter
@ 2021-06-02 20:25   ` Jason Ekstrand
  1 sibling, 0 replies; 26+ messages in thread
From: Jason Ekstrand @ 2021-06-02 20:25 UTC (permalink / raw)
  To: Christian König; +Cc: Maling list - DRI developers

Reviewed-by: Jason Ekstrand <jason@jlekstrand.net>

On Wed, Jun 2, 2021 at 6:17 AM Christian König
<ckoenig.leichtzumerken@gmail.com> wrote:
>
> That describes much better what the function is doing here.
>
> Signed-off-by: Christian König <christian.koenig@amd.com>
> ---
>  drivers/gpu/drm/drm_gem.c                    | 2 +-
>  drivers/gpu/drm/drm_gem_atomic_helper.c      | 2 +-
>  drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c | 2 +-
>  drivers/gpu/drm/i915/display/intel_display.c | 2 +-
>  drivers/gpu/drm/i915/gem/i915_gem_object.h   | 2 +-
>  drivers/gpu/drm/i915/gem/i915_gem_wait.c     | 4 ++--
>  drivers/gpu/drm/i915/i915_request.c          | 2 +-
>  drivers/gpu/drm/i915/i915_sw_fence.c         | 2 +-
>  drivers/gpu/drm/nouveau/dispnv50/wndw.c      | 2 +-
>  drivers/gpu/drm/panfrost/panfrost_job.c      | 2 +-
>  include/linux/dma-resv.h                     | 4 ++--
>  11 files changed, 13 insertions(+), 13 deletions(-)
>
> diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
> index 9989425e9875..263b4fb03303 100644
> --- a/drivers/gpu/drm/drm_gem.c
> +++ b/drivers/gpu/drm/drm_gem.c
> @@ -1375,7 +1375,7 @@ int drm_gem_fence_array_add_implicit(struct xarray *fence_array,
>
>         if (!write) {
>                 struct dma_fence *fence =
> -                       dma_resv_get_excl_rcu(obj->resv);
> +                       dma_resv_get_excl_unlocked(obj->resv);
>
>                 return drm_gem_fence_array_add(fence_array, fence);
>         }
> diff --git a/drivers/gpu/drm/drm_gem_atomic_helper.c b/drivers/gpu/drm/drm_gem_atomic_helper.c
> index a005c5a0ba46..a27135084ae5 100644
> --- a/drivers/gpu/drm/drm_gem_atomic_helper.c
> +++ b/drivers/gpu/drm/drm_gem_atomic_helper.c
> @@ -147,7 +147,7 @@ int drm_gem_plane_helper_prepare_fb(struct drm_plane *plane, struct drm_plane_st
>                 return 0;
>
>         obj = drm_gem_fb_get_obj(state->fb, 0);
> -       fence = dma_resv_get_excl_rcu(obj->resv);
> +       fence = dma_resv_get_excl_unlocked(obj->resv);
>         drm_atomic_set_fence_for_plane(state, fence);
>
>         return 0;
> diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
> index d05c35994579..c942d2a8c252 100644
> --- a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
> +++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
> @@ -195,7 +195,7 @@ static int submit_fence_sync(struct etnaviv_gem_submit *submit)
>                         if (ret)
>                                 return ret;
>                 } else {
> -                       bo->excl = dma_resv_get_excl_rcu(robj);
> +                       bo->excl = dma_resv_get_excl_unlocked(robj);
>                 }
>
>         }
> diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c
> index 384ff0bb6e19..f17c5f54feb6 100644
> --- a/drivers/gpu/drm/i915/display/intel_display.c
> +++ b/drivers/gpu/drm/i915/display/intel_display.c
> @@ -11040,7 +11040,7 @@ intel_prepare_plane_fb(struct drm_plane *_plane,
>                 if (ret < 0)
>                         goto unpin_fb;
>
> -               fence = dma_resv_get_excl_rcu(obj->base.resv);
> +               fence = dma_resv_get_excl_unlocked(obj->base.resv);
>                 if (fence) {
>                         add_rps_boost_after_vblank(new_plane_state->hw.crtc,
>                                                    fence);
> diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.h b/drivers/gpu/drm/i915/gem/i915_gem_object.h
> index 2ebd79537aea..7c0eb425cb3b 100644
> --- a/drivers/gpu/drm/i915/gem/i915_gem_object.h
> +++ b/drivers/gpu/drm/i915/gem/i915_gem_object.h
> @@ -500,7 +500,7 @@ i915_gem_object_last_write_engine(struct drm_i915_gem_object *obj)
>         struct dma_fence *fence;
>
>         rcu_read_lock();
> -       fence = dma_resv_get_excl_rcu(obj->base.resv);
> +       fence = dma_resv_get_excl_unlocked(obj->base.resv);
>         rcu_read_unlock();
>
>         if (fence && dma_fence_is_i915(fence) && !dma_fence_is_signaled(fence))
> diff --git a/drivers/gpu/drm/i915/gem/i915_gem_wait.c b/drivers/gpu/drm/i915/gem/i915_gem_wait.c
> index 4b9856d5ba14..c13aeddf5aa7 100644
> --- a/drivers/gpu/drm/i915/gem/i915_gem_wait.c
> +++ b/drivers/gpu/drm/i915/gem/i915_gem_wait.c
> @@ -73,7 +73,7 @@ i915_gem_object_wait_reservation(struct dma_resv *resv,
>                  */
>                 prune_fences = count && timeout >= 0;
>         } else {
> -               excl = dma_resv_get_excl_rcu(resv);
> +               excl = dma_resv_get_excl_unlocked(resv);
>         }
>
>         if (excl && timeout >= 0)
> @@ -170,7 +170,7 @@ i915_gem_object_wait_priority(struct drm_i915_gem_object *obj,
>
>                 kfree(shared);
>         } else {
> -               excl = dma_resv_get_excl_rcu(obj->base.resv);
> +               excl = dma_resv_get_excl_unlocked(obj->base.resv);
>         }
>
>         if (excl) {
> diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
> index bec9c3652188..c85494f411f4 100644
> --- a/drivers/gpu/drm/i915/i915_request.c
> +++ b/drivers/gpu/drm/i915/i915_request.c
> @@ -1611,7 +1611,7 @@ i915_request_await_object(struct i915_request *to,
>                         dma_fence_put(shared[i]);
>                 kfree(shared);
>         } else {
> -               excl = dma_resv_get_excl_rcu(obj->base.resv);
> +               excl = dma_resv_get_excl_unlocked(obj->base.resv);
>         }
>
>         if (excl) {
> diff --git a/drivers/gpu/drm/i915/i915_sw_fence.c b/drivers/gpu/drm/i915/i915_sw_fence.c
> index 2744558f3050..7aaf74552d06 100644
> --- a/drivers/gpu/drm/i915/i915_sw_fence.c
> +++ b/drivers/gpu/drm/i915/i915_sw_fence.c
> @@ -606,7 +606,7 @@ int i915_sw_fence_await_reservation(struct i915_sw_fence *fence,
>                         dma_fence_put(shared[i]);
>                 kfree(shared);
>         } else {
> -               excl = dma_resv_get_excl_rcu(resv);
> +               excl = dma_resv_get_excl_unlocked(resv);
>         }
>
>         if (ret >= 0 && excl && excl->ops != exclude) {
> diff --git a/drivers/gpu/drm/nouveau/dispnv50/wndw.c b/drivers/gpu/drm/nouveau/dispnv50/wndw.c
> index 0cb1f9d848d3..8d048bacd6f0 100644
> --- a/drivers/gpu/drm/nouveau/dispnv50/wndw.c
> +++ b/drivers/gpu/drm/nouveau/dispnv50/wndw.c
> @@ -561,7 +561,7 @@ nv50_wndw_prepare_fb(struct drm_plane *plane, struct drm_plane_state *state)
>                         asyw->image.handle[0] = ctxdma->object.handle;
>         }
>
> -       asyw->state.fence = dma_resv_get_excl_rcu(nvbo->bo.base.resv);
> +       asyw->state.fence = dma_resv_get_excl_unlocked(nvbo->bo.base.resv);
>         asyw->image.offset[0] = nvbo->offset;
>
>         if (wndw->func->prepare) {
> diff --git a/drivers/gpu/drm/panfrost/panfrost_job.c b/drivers/gpu/drm/panfrost/panfrost_job.c
> index 6003cfeb1322..2df3e999a38d 100644
> --- a/drivers/gpu/drm/panfrost/panfrost_job.c
> +++ b/drivers/gpu/drm/panfrost/panfrost_job.c
> @@ -203,7 +203,7 @@ static void panfrost_acquire_object_fences(struct drm_gem_object **bos,
>         int i;
>
>         for (i = 0; i < bo_count; i++)
> -               implicit_fences[i] = dma_resv_get_excl_rcu(bos[i]->resv);
> +               implicit_fences[i] = dma_resv_get_excl_unlocked(bos[i]->resv);
>  }
>
>  static void panfrost_attach_object_fences(struct drm_gem_object **bos,
> diff --git a/include/linux/dma-resv.h b/include/linux/dma-resv.h
> index 98ac66fecb71..f6b71712c029 100644
> --- a/include/linux/dma-resv.h
> +++ b/include/linux/dma-resv.h
> @@ -228,7 +228,7 @@ dma_resv_exclusive(struct dma_resv *obj)
>  }
>
>  /**
> - * dma_resv_get_excl_rcu - get the reservation object's
> + * dma_resv_get_excl_unlocked - get the reservation object's
>   * exclusive fence, without lock held.
>   * @obj: the reservation object
>   *
> @@ -239,7 +239,7 @@ dma_resv_exclusive(struct dma_resv *obj)
>   * The exclusive fence or NULL if none
>   */
>  static inline struct dma_fence *
> -dma_resv_get_excl_rcu(struct dma_resv *obj)
> +dma_resv_get_excl_unlocked(struct dma_resv *obj)
>  {
>         struct dma_fence *fence;
>
> --
> 2.25.1
>

^ permalink raw reply	[flat|nested] 26+ messages in thread

* Re: [PATCH 7/7] dma-buf: drop the _rcu postfix on function names
  2021-06-02 11:17 ` [PATCH 7/7] dma-buf: drop the _rcu postfix on function names Christian König
  2021-06-02 12:49   ` Daniel Vetter
@ 2021-06-02 20:34   ` Jason Ekstrand
  2021-06-06  9:08     ` Christian König
  1 sibling, 1 reply; 26+ messages in thread
From: Jason Ekstrand @ 2021-06-02 20:34 UTC (permalink / raw)
  To: Christian König; +Cc: Maling list - DRI developers

On Wed, Jun 2, 2021 at 6:17 AM Christian König
<ckoenig.leichtzumerken@gmail.com> wrote:
>
> The functions can be called both in _rcu context as well
> as while holding the lock.
>
> Signed-off-by: Christian König <christian.koenig@amd.com>
> ---
>  drivers/dma-buf/dma-buf.c                     |  3 +--
>  drivers/dma-buf/dma-resv.c                    | 24 +++++++++----------
>  drivers/gpu/drm/amd/amdgpu/amdgpu_display.c   |  2 +-
>  drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c   |  2 +-
>  drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c       |  2 +-
>  drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c       |  4 ++--
>  drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c        |  2 +-
>  drivers/gpu/drm/amd/amdgpu/amdgpu_object.c    |  2 +-
>  drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c       |  2 +-
>  drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c        |  8 +++----
>  .../gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c |  2 +-
>  drivers/gpu/drm/drm_gem.c                     |  4 ++--
>  drivers/gpu/drm/etnaviv/etnaviv_gem.c         |  4 ++--
>  drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c  |  2 +-
>  drivers/gpu/drm/i915/dma_resv_utils.c         |  2 +-
>  drivers/gpu/drm/i915/gem/i915_gem_busy.c      |  2 +-
>  .../gpu/drm/i915/gem/i915_gem_execbuffer.c    |  2 +-
>  drivers/gpu/drm/i915/gem/i915_gem_userptr.c   |  2 +-
>  drivers/gpu/drm/i915/gem/i915_gem_wait.c      |  4 ++--
>  drivers/gpu/drm/i915/i915_request.c           |  2 +-
>  drivers/gpu/drm/i915/i915_sw_fence.c          |  2 +-
>  drivers/gpu/drm/msm/msm_gem.c                 |  2 +-
>  drivers/gpu/drm/nouveau/nouveau_gem.c         |  2 +-
>  drivers/gpu/drm/panfrost/panfrost_drv.c       |  2 +-
>  drivers/gpu/drm/radeon/radeon_gem.c           |  6 ++---
>  drivers/gpu/drm/radeon/radeon_mn.c            |  2 +-
>  drivers/gpu/drm/ttm/ttm_bo.c                  | 12 +++++-----
>  drivers/gpu/drm/vgem/vgem_fence.c             |  2 +-
>  drivers/gpu/drm/virtio/virtgpu_ioctl.c        |  4 ++--
>  drivers/gpu/drm/vmwgfx/vmwgfx_bo.c            |  2 +-
>  include/linux/dma-resv.h                      | 17 ++++---------
>  31 files changed, 60 insertions(+), 70 deletions(-)
>
> diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c
> index 4d0ddc712f1e..f92931d8db51 100644
> --- a/drivers/dma-buf/dma-buf.c
> +++ b/drivers/dma-buf/dma-buf.c
> @@ -1147,8 +1147,7 @@ static int __dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
>         long ret;
>
>         /* Wait on any implicit rendering fences */
> -       ret = dma_resv_wait_timeout_rcu(resv, write, true,
> -                                                 MAX_SCHEDULE_TIMEOUT);
> +       ret = dma_resv_wait_timeout(resv, write, true, MAX_SCHEDULE_TIMEOUT);
>         if (ret < 0)
>                 return ret;
>
> diff --git a/drivers/dma-buf/dma-resv.c b/drivers/dma-buf/dma-resv.c
> index b1a1a31dc009..74fe64dc1ce3 100644
> --- a/drivers/dma-buf/dma-resv.c
> +++ b/drivers/dma-buf/dma-resv.c
> @@ -393,7 +393,7 @@ int dma_resv_copy_fences(struct dma_resv *dst, struct dma_resv *src)
>  EXPORT_SYMBOL(dma_resv_copy_fences);
>
>  /**
> - * dma_resv_get_fences_rcu - Get an object's shared and exclusive
> + * dma_resv_get_fences - Get an object's shared and exclusive
>   * fences without update side lock held
>   * @obj: the reservation object
>   * @pfence_excl: the returned exclusive fence (or NULL)
> @@ -405,10 +405,9 @@ EXPORT_SYMBOL(dma_resv_copy_fences);
>   * exclusive fence is not specified the fence is put into the array of the
>   * shared fences as well. Returns either zero or -ENOMEM.
>   */
> -int dma_resv_get_fences_rcu(struct dma_resv *obj,
> -                           struct dma_fence **pfence_excl,
> -                           unsigned int *pshared_count,
> -                           struct dma_fence ***pshared)
> +int dma_resv_get_fences(struct dma_resv *obj, struct dma_fence **pfence_excl,
> +                       unsigned int *pshared_count,
> +                       struct dma_fence ***pshared)
>  {
>         struct dma_fence **shared = NULL;
>         struct dma_fence *fence_excl;
> @@ -491,10 +490,10 @@ int dma_resv_get_fences_rcu(struct dma_resv *obj,
>         *pshared = shared;
>         return ret;
>  }
> -EXPORT_SYMBOL_GPL(dma_resv_get_fences_rcu);
> +EXPORT_SYMBOL_GPL(dma_resv_get_fences);
>
>  /**
> - * dma_resv_wait_timeout_rcu - Wait on reservation's objects
> + * dma_resv_wait_timeout - Wait on reservation's objects
>   * shared and/or exclusive fences.
>   * @obj: the reservation object
>   * @wait_all: if true, wait on all fences, else wait on just exclusive fence
> @@ -505,9 +504,8 @@ EXPORT_SYMBOL_GPL(dma_resv_get_fences_rcu);
>   * Returns -ERESTARTSYS if interrupted, 0 if the wait timed out, or
>   * greater than zer on success.
>   */
> -long dma_resv_wait_timeout_rcu(struct dma_resv *obj,
> -                              bool wait_all, bool intr,
> -                              unsigned long timeout)
> +long dma_resv_wait_timeout(struct dma_resv *obj, bool wait_all, bool intr,
> +                          unsigned long timeout)
>  {
>         long ret = timeout ? timeout : 1;
>         unsigned int seq, shared_count;
> @@ -579,7 +577,7 @@ long dma_resv_wait_timeout_rcu(struct dma_resv *obj,
>         rcu_read_unlock();
>         goto retry;
>  }
> -EXPORT_SYMBOL_GPL(dma_resv_wait_timeout_rcu);
> +EXPORT_SYMBOL_GPL(dma_resv_wait_timeout);
>
>
>  static inline int dma_resv_test_signaled_single(struct dma_fence *passed_fence)
> @@ -608,7 +606,7 @@ static inline int dma_resv_test_signaled_single(struct dma_fence *passed_fence)
>   * RETURNS
>   * true if all fences signaled, else false
>   */
> -bool dma_resv_test_signaled_rcu(struct dma_resv *obj, bool test_all)
> +bool dma_resv_test_signaled(struct dma_resv *obj, bool test_all)
>  {
>         unsigned int seq, shared_count;
>         int ret;
> @@ -657,7 +655,7 @@ bool dma_resv_test_signaled_rcu(struct dma_resv *obj, bool test_all)
>         rcu_read_unlock();
>         return ret;
>  }
> -EXPORT_SYMBOL_GPL(dma_resv_test_signaled_rcu);
> +EXPORT_SYMBOL_GPL(dma_resv_test_signaled);
>
>  #if IS_ENABLED(CONFIG_LOCKDEP)
>  static int __init dma_resv_lockdep(void)
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
> index 49f73b5b89b0..004d01d2e1d7 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
> @@ -203,7 +203,7 @@ int amdgpu_display_crtc_page_flip_target(struct drm_crtc *crtc,
>                 goto unpin;
>         }
>
> -       r = dma_resv_get_fences_rcu(new_abo->tbo.base.resv, &work->excl,
> +       r = dma_resv_get_fences(new_abo->tbo.base.resv, &work->excl,
>                                               &work->shared_count,
>                                               &work->shared);
>         if (unlikely(r != 0)) {
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
> index 3b13c8a38c4e..615be1697d49 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
> @@ -52,7 +52,7 @@ __dma_resv_make_exclusive(struct dma_resv *obj)
>         if (!dma_resv_shared(obj)) /* no shared fences to convert */
>                 return 0;
>
> -       r = dma_resv_get_fences_rcu(obj, NULL, &count, &fences);
> +       r = dma_resv_get_fences(obj, NULL, &count, &fences);
>         if (r)
>                 return r;
>
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
> index cd5146fa6fb6..dafc96032d7d 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
> @@ -526,7 +526,7 @@ int amdgpu_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
>                 return -ENOENT;
>         }
>         robj = gem_to_amdgpu_bo(gobj);
> -       ret = dma_resv_wait_timeout_rcu(robj->tbo.base.resv, true, true,
> +       ret = dma_resv_wait_timeout(robj->tbo.base.resv, true, true,
>                                                   timeout);
>
>         /* ret == 0 means not signaled,
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
> index b4971e90b98c..65a3422ec078 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
> @@ -112,7 +112,7 @@ void amdgpu_pasid_free_delayed(struct dma_resv *resv,
>         unsigned count;
>         int r;
>
> -       r = dma_resv_get_fences_rcu(resv, NULL, &count, &fences);
> +       r = dma_resv_get_fences(resv, NULL, &count, &fences);
>         if (r)
>                 goto fallback;
>
> @@ -156,7 +156,7 @@ void amdgpu_pasid_free_delayed(struct dma_resv *resv,
>         /* Not enough memory for the delayed delete, as last resort
>          * block for all the fences to complete.
>          */
> -       dma_resv_wait_timeout_rcu(resv, true, false,
> +       dma_resv_wait_timeout(resv, true, false,
>                                             MAX_SCHEDULE_TIMEOUT);
>         amdgpu_pasid_free(pasid);
>  }
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
> index 2741c28ff1b5..86de11a86a3e 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
> @@ -75,7 +75,7 @@ static bool amdgpu_mn_invalidate_gfx(struct mmu_interval_notifier *mni,
>
>         mmu_interval_set_seq(mni, cur_seq);
>
> -       r = dma_resv_wait_timeout_rcu(bo->tbo.base.resv, true, false,
> +       r = dma_resv_wait_timeout(bo->tbo.base.resv, true, false,
>                                       MAX_SCHEDULE_TIMEOUT);
>         mutex_unlock(&adev->notifier_lock);
>         if (r <= 0)
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
> index 03c6b63d1d54..821dec6d2f73 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
> @@ -756,7 +756,7 @@ int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr)
>                 return 0;
>         }
>
> -       r = dma_resv_wait_timeout_rcu(bo->tbo.base.resv, false, false,
> +       r = dma_resv_wait_timeout(bo->tbo.base.resv, false, false,
>                                                 MAX_SCHEDULE_TIMEOUT);
>         if (r < 0)
>                 return r;
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
> index 82f0542c7792..3773f5ff6f0e 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
> @@ -1126,7 +1126,7 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,
>         ib->length_dw = 16;
>
>         if (direct) {
> -               r = dma_resv_wait_timeout_rcu(bo->tbo.base.resv,
> +               r = dma_resv_wait_timeout(bo->tbo.base.resv,
>                                                         true, false,
>                                                         msecs_to_jiffies(10));

Some kernel CI thing (not sure who runs it) is likely going to
complain at you about messing up indentation.  I don't know how much
you care.  I went ahead and fixed it all in my version of this change
but it's annoying and half of them were indented wrong before this
change so I'm a bit Meh.

Either way, assuming you've done a grep for the _rcu versions to
ensure you haven't missed any and assuming it all compiles,

Reviewed-by: Jason Ekstrand <jason@jlekstrand.net>

>                 if (r == 0)
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
> index bcfd4a8d0288..da716aa38085 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
> @@ -2022,13 +2022,13 @@ static void amdgpu_vm_prt_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
>         unsigned i, shared_count;
>         int r;
>
> -       r = dma_resv_get_fences_rcu(resv, &excl,
> +       r = dma_resv_get_fences(resv, &excl,
>                                               &shared_count, &shared);
>         if (r) {
>                 /* Not enough memory to grab the fence list, as last resort
>                  * block for all the fences to complete.
>                  */
> -               dma_resv_wait_timeout_rcu(resv, true, false,
> +               dma_resv_wait_timeout(resv, true, false,
>                                                     MAX_SCHEDULE_TIMEOUT);
>                 return;
>         }
> @@ -2640,7 +2640,7 @@ bool amdgpu_vm_evictable(struct amdgpu_bo *bo)
>                 return true;
>
>         /* Don't evict VM page tables while they are busy */
> -       if (!dma_resv_test_signaled_rcu(bo->tbo.base.resv, true))
> +       if (!dma_resv_test_signaled(bo->tbo.base.resv, true))
>                 return false;
>
>         /* Try to block ongoing updates */
> @@ -2820,7 +2820,7 @@ void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t min_vm_size,
>   */
>  long amdgpu_vm_wait_idle(struct amdgpu_vm *vm, long timeout)
>  {
> -       timeout = dma_resv_wait_timeout_rcu(vm->root.base.bo->tbo.base.resv,
> +       timeout = dma_resv_wait_timeout(vm->root.base.bo->tbo.base.resv,
>                                             true, true, timeout);
>         if (timeout <= 0)
>                 return timeout;
> diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
> index 3267eb2e35dd..1633afd3c03b 100644
> --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
> +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
> @@ -8400,7 +8400,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
>                  * deadlock during GPU reset when this fence will not signal
>                  * but we hold reservation lock for the BO.
>                  */
> -               r = dma_resv_wait_timeout_rcu(abo->tbo.base.resv, true,
> +               r = dma_resv_wait_timeout(abo->tbo.base.resv, true,
>                                                         false,
>                                                         msecs_to_jiffies(5000));
>                 if (unlikely(r <= 0))
> diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
> index 263b4fb03303..11770da97dc0 100644
> --- a/drivers/gpu/drm/drm_gem.c
> +++ b/drivers/gpu/drm/drm_gem.c
> @@ -770,7 +770,7 @@ long drm_gem_dma_resv_wait(struct drm_file *filep, u32 handle,
>                 return -EINVAL;
>         }
>
> -       ret = dma_resv_wait_timeout_rcu(obj->resv, wait_all,
> +       ret = dma_resv_wait_timeout(obj->resv, wait_all,
>                                                   true, timeout);
>         if (ret == 0)
>                 ret = -ETIME;
> @@ -1380,7 +1380,7 @@ int drm_gem_fence_array_add_implicit(struct xarray *fence_array,
>                 return drm_gem_fence_array_add(fence_array, fence);
>         }
>
> -       ret = dma_resv_get_fences_rcu(obj->resv, NULL,
> +       ret = dma_resv_get_fences(obj->resv, NULL,
>                                                 &fence_count, &fences);
>         if (ret || !fence_count)
>                 return ret;
> diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.c b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
> index 4d43b8630f0e..e3c209628688 100644
> --- a/drivers/gpu/drm/etnaviv/etnaviv_gem.c
> +++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
> @@ -390,13 +390,13 @@ int etnaviv_gem_cpu_prep(struct drm_gem_object *obj, u32 op,
>         }
>
>         if (op & ETNA_PREP_NOSYNC) {
> -               if (!dma_resv_test_signaled_rcu(obj->resv,
> +               if (!dma_resv_test_signaled(obj->resv,
>                                                           write))
>                         return -EBUSY;
>         } else {
>                 unsigned long remain = etnaviv_timeout_to_jiffies(timeout);
>
> -               ret = dma_resv_wait_timeout_rcu(obj->resv,
> +               ret = dma_resv_wait_timeout(obj->resv,
>                                                           write, true, remain);
>                 if (ret <= 0)
>                         return ret == 0 ? -ETIMEDOUT : ret;
> diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
> index c942d2a8c252..9cc36bbc2502 100644
> --- a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
> +++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
> @@ -189,7 +189,7 @@ static int submit_fence_sync(struct etnaviv_gem_submit *submit)
>                         continue;
>
>                 if (bo->flags & ETNA_SUBMIT_BO_WRITE) {
> -                       ret = dma_resv_get_fences_rcu(robj, &bo->excl,
> +                       ret = dma_resv_get_fences(robj, &bo->excl,
>                                                                 &bo->nr_shared,
>                                                                 &bo->shared);
>                         if (ret)
> diff --git a/drivers/gpu/drm/i915/dma_resv_utils.c b/drivers/gpu/drm/i915/dma_resv_utils.c
> index 9e508e7d4629..7df91b7e4ca8 100644
> --- a/drivers/gpu/drm/i915/dma_resv_utils.c
> +++ b/drivers/gpu/drm/i915/dma_resv_utils.c
> @@ -10,7 +10,7 @@
>  void dma_resv_prune(struct dma_resv *resv)
>  {
>         if (dma_resv_trylock(resv)) {
> -               if (dma_resv_test_signaled_rcu(resv, true))
> +               if (dma_resv_test_signaled(resv, true))
>                         dma_resv_add_excl_fence(resv, NULL);
>                 dma_resv_unlock(resv);
>         }
> diff --git a/drivers/gpu/drm/i915/gem/i915_gem_busy.c b/drivers/gpu/drm/i915/gem/i915_gem_busy.c
> index 3f94becac541..0083a850f839 100644
> --- a/drivers/gpu/drm/i915/gem/i915_gem_busy.c
> +++ b/drivers/gpu/drm/i915/gem/i915_gem_busy.c
> @@ -105,7 +105,7 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
>          * Alternatively, we can trade that extra information on read/write
>          * activity with
>          *      args->busy =
> -        *              !dma_resv_test_signaled_rcu(obj->resv, true);
> +        *              !dma_resv_test_signaled(obj->resv, true);
>          * to report the overall busyness. This is what the wait-ioctl does.
>          *
>          */
> diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
> index 297143511f99..66789111a24b 100644
> --- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
> +++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
> @@ -1481,7 +1481,7 @@ static inline bool use_reloc_gpu(struct i915_vma *vma)
>         if (DBG_FORCE_RELOC)
>                 return false;
>
> -       return !dma_resv_test_signaled_rcu(vma->resv, true);
> +       return !dma_resv_test_signaled(vma->resv, true);
>  }
>
>  static unsigned long vma_phys_addr(struct i915_vma *vma, u32 offset)
> diff --git a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
> index a657b99ec760..e78738aec7b2 100644
> --- a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
> +++ b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
> @@ -85,7 +85,7 @@ static bool i915_gem_userptr_invalidate(struct mmu_interval_notifier *mni,
>                 return true;
>
>         /* we will unbind on next submission, still have userptr pins */
> -       r = dma_resv_wait_timeout_rcu(obj->base.resv, true, false,
> +       r = dma_resv_wait_timeout(obj->base.resv, true, false,
>                                       MAX_SCHEDULE_TIMEOUT);
>         if (r <= 0)
>                 drm_err(&i915->drm, "(%ld) failed to wait for idle\n", r);
> diff --git a/drivers/gpu/drm/i915/gem/i915_gem_wait.c b/drivers/gpu/drm/i915/gem/i915_gem_wait.c
> index c13aeddf5aa7..e7aebb8fb468 100644
> --- a/drivers/gpu/drm/i915/gem/i915_gem_wait.c
> +++ b/drivers/gpu/drm/i915/gem/i915_gem_wait.c
> @@ -45,7 +45,7 @@ i915_gem_object_wait_reservation(struct dma_resv *resv,
>                 unsigned int count, i;
>                 int ret;
>
> -               ret = dma_resv_get_fences_rcu(resv, &excl, &count, &shared);
> +               ret = dma_resv_get_fences(resv, &excl, &count, &shared);
>                 if (ret)
>                         return ret;
>
> @@ -158,7 +158,7 @@ i915_gem_object_wait_priority(struct drm_i915_gem_object *obj,
>                 unsigned int count, i;
>                 int ret;
>
> -               ret = dma_resv_get_fences_rcu(obj->base.resv,
> +               ret = dma_resv_get_fences(obj->base.resv,
>                                               &excl, &count, &shared);
>                 if (ret)
>                         return ret;
> diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
> index c85494f411f4..4a70a1881d79 100644
> --- a/drivers/gpu/drm/i915/i915_request.c
> +++ b/drivers/gpu/drm/i915/i915_request.c
> @@ -1594,7 +1594,7 @@ i915_request_await_object(struct i915_request *to,
>                 struct dma_fence **shared;
>                 unsigned int count, i;
>
> -               ret = dma_resv_get_fences_rcu(obj->base.resv,
> +               ret = dma_resv_get_fences(obj->base.resv,
>                                                         &excl, &count, &shared);
>                 if (ret)
>                         return ret;
> diff --git a/drivers/gpu/drm/i915/i915_sw_fence.c b/drivers/gpu/drm/i915/i915_sw_fence.c
> index 7aaf74552d06..c589a681da77 100644
> --- a/drivers/gpu/drm/i915/i915_sw_fence.c
> +++ b/drivers/gpu/drm/i915/i915_sw_fence.c
> @@ -582,7 +582,7 @@ int i915_sw_fence_await_reservation(struct i915_sw_fence *fence,
>                 struct dma_fence **shared;
>                 unsigned int count, i;
>
> -               ret = dma_resv_get_fences_rcu(resv, &excl, &count, &shared);
> +               ret = dma_resv_get_fences(resv, &excl, &count, &shared);
>                 if (ret)
>                         return ret;
>
> diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
> index 43af91df552e..ecd35986ddb5 100644
> --- a/drivers/gpu/drm/msm/msm_gem.c
> +++ b/drivers/gpu/drm/msm/msm_gem.c
> @@ -915,7 +915,7 @@ int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout)
>                 op & MSM_PREP_NOSYNC ? 0 : timeout_to_jiffies(timeout);
>         long ret;
>
> -       ret = dma_resv_wait_timeout_rcu(obj->resv, write,
> +       ret = dma_resv_wait_timeout(obj->resv, write,
>                                                   true,  remain);
>         if (ret == 0)
>                 return remain == 0 ? -EBUSY : -ETIMEDOUT;
> diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
> index d863e5ed954a..c59072f254f1 100644
> --- a/drivers/gpu/drm/nouveau/nouveau_gem.c
> +++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
> @@ -964,7 +964,7 @@ nouveau_gem_ioctl_cpu_prep(struct drm_device *dev, void *data,
>                 return -ENOENT;
>         nvbo = nouveau_gem_object(gem);
>
> -       lret = dma_resv_wait_timeout_rcu(nvbo->bo.base.resv, write, true,
> +       lret = dma_resv_wait_timeout(nvbo->bo.base.resv, write, true,
>                                                    no_wait ? 0 : 30 * HZ);
>         if (!lret)
>                 ret = -EBUSY;
> diff --git a/drivers/gpu/drm/panfrost/panfrost_drv.c b/drivers/gpu/drm/panfrost/panfrost_drv.c
> index ca07098a6141..0e6e893eb81d 100644
> --- a/drivers/gpu/drm/panfrost/panfrost_drv.c
> +++ b/drivers/gpu/drm/panfrost/panfrost_drv.c
> @@ -311,7 +311,7 @@ panfrost_ioctl_wait_bo(struct drm_device *dev, void *data,
>         if (!gem_obj)
>                 return -ENOENT;
>
> -       ret = dma_resv_wait_timeout_rcu(gem_obj->resv, true,
> +       ret = dma_resv_wait_timeout(gem_obj->resv, true,
>                                                   true, timeout);
>         if (!ret)
>                 ret = timeout ? -ETIMEDOUT : -EBUSY;
> diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c
> index 3272c33af8fe..458f92a70887 100644
> --- a/drivers/gpu/drm/radeon/radeon_gem.c
> +++ b/drivers/gpu/drm/radeon/radeon_gem.c
> @@ -161,7 +161,7 @@ static int radeon_gem_set_domain(struct drm_gem_object *gobj,
>         }
>         if (domain == RADEON_GEM_DOMAIN_CPU) {
>                 /* Asking for cpu access wait for object idle */
> -               r = dma_resv_wait_timeout_rcu(robj->tbo.base.resv, true, true, 30 * HZ);
> +               r = dma_resv_wait_timeout(robj->tbo.base.resv, true, true, 30 * HZ);
>                 if (!r)
>                         r = -EBUSY;
>
> @@ -523,7 +523,7 @@ int radeon_gem_busy_ioctl(struct drm_device *dev, void *data,
>         }
>         robj = gem_to_radeon_bo(gobj);
>
> -       r = dma_resv_test_signaled_rcu(robj->tbo.base.resv, true);
> +       r = dma_resv_test_signaled(robj->tbo.base.resv, true);
>         if (r == 0)
>                 r = -EBUSY;
>         else
> @@ -552,7 +552,7 @@ int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
>         }
>         robj = gem_to_radeon_bo(gobj);
>
> -       ret = dma_resv_wait_timeout_rcu(robj->tbo.base.resv, true, true, 30 * HZ);
> +       ret = dma_resv_wait_timeout(robj->tbo.base.resv, true, true, 30 * HZ);
>         if (ret == 0)
>                 r = -EBUSY;
>         else if (ret < 0)
> diff --git a/drivers/gpu/drm/radeon/radeon_mn.c b/drivers/gpu/drm/radeon/radeon_mn.c
> index e37c9a57a7c3..adb084e6ddbe 100644
> --- a/drivers/gpu/drm/radeon/radeon_mn.c
> +++ b/drivers/gpu/drm/radeon/radeon_mn.c
> @@ -66,7 +66,7 @@ static bool radeon_mn_invalidate(struct mmu_interval_notifier *mn,
>                 return true;
>         }
>
> -       r = dma_resv_wait_timeout_rcu(bo->tbo.base.resv, true, false,
> +       r = dma_resv_wait_timeout(bo->tbo.base.resv, true, false,
>                                       MAX_SCHEDULE_TIMEOUT);
>         if (r <= 0)
>                 DRM_ERROR("(%ld) failed to wait for user bo\n", r);
> diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
> index c41ef0caa492..32004cf37549 100644
> --- a/drivers/gpu/drm/ttm/ttm_bo.c
> +++ b/drivers/gpu/drm/ttm/ttm_bo.c
> @@ -296,7 +296,7 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo,
>         struct dma_resv *resv = &bo->base._resv;
>         int ret;
>
> -       if (dma_resv_test_signaled_rcu(resv, true))
> +       if (dma_resv_test_signaled(resv, true))
>                 ret = 0;
>         else
>                 ret = -EBUSY;
> @@ -308,7 +308,7 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo,
>                         dma_resv_unlock(bo->base.resv);
>                 spin_unlock(&bo->bdev->lru_lock);
>
> -               lret = dma_resv_wait_timeout_rcu(resv, true, interruptible,
> +               lret = dma_resv_wait_timeout(resv, true, interruptible,
>                                                  30 * HZ);
>
>                 if (lret < 0)
> @@ -411,7 +411,7 @@ static void ttm_bo_release(struct kref *kref)
>                         /* Last resort, if we fail to allocate memory for the
>                          * fences block for the BO to become idle
>                          */
> -                       dma_resv_wait_timeout_rcu(bo->base.resv, true, false,
> +                       dma_resv_wait_timeout(bo->base.resv, true, false,
>                                                   30 * HZ);
>                 }
>
> @@ -422,7 +422,7 @@ static void ttm_bo_release(struct kref *kref)
>                 ttm_mem_io_free(bdev, bo->resource);
>         }
>
> -       if (!dma_resv_test_signaled_rcu(bo->base.resv, true) ||
> +       if (!dma_resv_test_signaled(bo->base.resv, true) ||
>             !dma_resv_trylock(bo->base.resv)) {
>                 /* The BO is not idle, resurrect it for delayed destroy */
>                 ttm_bo_flush_all_fences(bo);
> @@ -1121,13 +1121,13 @@ int ttm_bo_wait(struct ttm_buffer_object *bo,
>         long timeout = 15 * HZ;
>
>         if (no_wait) {
> -               if (dma_resv_test_signaled_rcu(bo->base.resv, true))
> +               if (dma_resv_test_signaled(bo->base.resv, true))
>                         return 0;
>                 else
>                         return -EBUSY;
>         }
>
> -       timeout = dma_resv_wait_timeout_rcu(bo->base.resv, true,
> +       timeout = dma_resv_wait_timeout(bo->base.resv, true,
>                                                       interruptible, timeout);
>         if (timeout < 0)
>                 return timeout;
> diff --git a/drivers/gpu/drm/vgem/vgem_fence.c b/drivers/gpu/drm/vgem/vgem_fence.c
> index 2902dc6e64fa..7f3125cf5358 100644
> --- a/drivers/gpu/drm/vgem/vgem_fence.c
> +++ b/drivers/gpu/drm/vgem/vgem_fence.c
> @@ -151,7 +151,7 @@ int vgem_fence_attach_ioctl(struct drm_device *dev,
>
>         /* Check for a conflicting fence */
>         resv = obj->resv;
> -       if (!dma_resv_test_signaled_rcu(resv,
> +       if (!dma_resv_test_signaled(resv,
>                                                   arg->flags & VGEM_FENCE_WRITE)) {
>                 ret = -EBUSY;
>                 goto err_fence;
> diff --git a/drivers/gpu/drm/virtio/virtgpu_ioctl.c b/drivers/gpu/drm/virtio/virtgpu_ioctl.c
> index 669f2ee39515..190d9495dc0e 100644
> --- a/drivers/gpu/drm/virtio/virtgpu_ioctl.c
> +++ b/drivers/gpu/drm/virtio/virtgpu_ioctl.c
> @@ -451,9 +451,9 @@ static int virtio_gpu_wait_ioctl(struct drm_device *dev, void *data,
>                 return -ENOENT;
>
>         if (args->flags & VIRTGPU_WAIT_NOWAIT) {
> -               ret = dma_resv_test_signaled_rcu(obj->resv, true);
> +               ret = dma_resv_test_signaled(obj->resv, true);
>         } else {
> -               ret = dma_resv_wait_timeout_rcu(obj->resv, true, true,
> +               ret = dma_resv_wait_timeout(obj->resv, true, true,
>                                                 timeout);
>         }
>         if (ret == 0)
> diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
> index 176b6201ef2b..8faf1df027f3 100644
> --- a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
> +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
> @@ -743,7 +743,7 @@ static int vmw_user_bo_synccpu_grab(struct vmw_user_buffer_object *user_bo,
>         if (flags & drm_vmw_synccpu_allow_cs) {
>                 long lret;
>
> -               lret = dma_resv_wait_timeout_rcu
> +               lret = dma_resv_wait_timeout
>                         (bo->base.resv, true, true,
>                          nonblock ? 0 : MAX_SCHEDULE_TIMEOUT);
>                 if (!lret)
> diff --git a/include/linux/dma-resv.h b/include/linux/dma-resv.h
> index f6b71712c029..22325dfa7744 100644
> --- a/include/linux/dma-resv.h
> +++ b/include/linux/dma-resv.h
> @@ -268,19 +268,12 @@ void dma_resv_init(struct dma_resv *obj);
>  void dma_resv_fini(struct dma_resv *obj);
>  int dma_resv_reserve_shared(struct dma_resv *obj, unsigned int num_fences);
>  void dma_resv_add_shared_fence(struct dma_resv *obj, struct dma_fence *fence);
> -
>  void dma_resv_add_excl_fence(struct dma_resv *obj, struct dma_fence *fence);
> -
> -int dma_resv_get_fences_rcu(struct dma_resv *obj,
> -                           struct dma_fence **pfence_excl,
> -                           unsigned *pshared_count,
> -                           struct dma_fence ***pshared);
> -
> +int dma_resv_get_fences(struct dma_resv *obj, struct dma_fence **pfence_excl,
> +                       unsigned *pshared_count, struct dma_fence ***pshared);
>  int dma_resv_copy_fences(struct dma_resv *dst, struct dma_resv *src);
> -
> -long dma_resv_wait_timeout_rcu(struct dma_resv *obj, bool wait_all, bool intr,
> -                              unsigned long timeout);
> -
> -bool dma_resv_test_signaled_rcu(struct dma_resv *obj, bool test_all);
> +long dma_resv_wait_timeout(struct dma_resv *obj, bool wait_all, bool intr,
> +                          unsigned long timeout);
> +bool dma_resv_test_signaled(struct dma_resv *obj, bool test_all);
>
>  #endif /* _LINUX_RESERVATION_H */
> --
> 2.25.1
>

^ permalink raw reply	[flat|nested] 26+ messages in thread

* Re: [PATCH 5/7] dma-buf: rename and cleanup dma_resv_get_list
  2021-06-02 20:22   ` Jason Ekstrand
@ 2021-06-06  8:53     ` Christian König
  2021-06-07 19:42       ` Jason Ekstrand
  0 siblings, 1 reply; 26+ messages in thread
From: Christian König @ 2021-06-06  8:53 UTC (permalink / raw)
  To: Jason Ekstrand; +Cc: Maling list - DRI developers

Am 02.06.21 um 22:22 schrieb Jason Ekstrand:
> On Wed, Jun 2, 2021 at 6:17 AM Christian König
> <ckoenig.leichtzumerken@gmail.com> wrote:
>> When the comment needs to state explicitly that this is doesn't get a reference
>> to the object then the function is named rather badly.
>>
>> Rename the function and use it in even more places.
>>
>> Signed-off-by: Christian König <christian.koenig@amd.com>
>> ---
>>   drivers/dma-buf/dma-resv.c                    | 32 +++++++++----------
>>   .../gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c  |  2 +-
>>   drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c   |  2 +-
>>   drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c      |  2 +-
>>   drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c       |  2 +-
>>   drivers/gpu/drm/etnaviv/etnaviv_gem.c         |  2 +-
>>   drivers/gpu/drm/i915/gem/i915_gem_busy.c      |  2 +-
>>   drivers/gpu/drm/msm/msm_gem.c                 |  4 +--
>>   drivers/gpu/drm/nouveau/nouveau_fence.c       |  2 +-
>>   drivers/gpu/drm/qxl/qxl_debugfs.c             |  2 +-
>>   drivers/gpu/drm/radeon/radeon_sync.c          |  2 +-
>>   drivers/gpu/drm/ttm/ttm_bo.c                  |  2 +-
>>   include/linux/dma-resv.h                      | 25 +++++++--------
>>   13 files changed, 39 insertions(+), 42 deletions(-)
>>
>> diff --git a/drivers/dma-buf/dma-resv.c b/drivers/dma-buf/dma-resv.c
>> index 81b032b43457..b1a1a31dc009 100644
>> --- a/drivers/dma-buf/dma-resv.c
>> +++ b/drivers/dma-buf/dma-resv.c
>> @@ -149,8 +149,7 @@ int dma_resv_reserve_shared(struct dma_resv *obj, unsigned int num_fences)
>>
>>          dma_resv_assert_held(obj);
>>
>> -       old = dma_resv_get_list(obj);
>> -
>> +       old = dma_resv_shared(obj);
>>          if (old && old->shared_max) {
>>                  if ((old->shared_count + num_fences) <= old->shared_max)
>>                          return 0;
>> @@ -217,12 +216,13 @@ EXPORT_SYMBOL(dma_resv_reserve_shared);
>>    */
>>   void dma_resv_reset_shared_max(struct dma_resv *obj)
>>   {
>> -       /* Test shared fence slot reservation */
>> -       if (rcu_access_pointer(obj->fence)) {
>> -               struct dma_resv_list *fence = dma_resv_get_list(obj);
>> +       struct dma_resv_list *fences = dma_resv_shared(obj);
>>
>> -               fence->shared_max = fence->shared_count;
>> -       }
>> +       dma_resv_assert_held(obj);
> Does it make sense to assert we hold the lock *before* we touch it
> with something that requires that we do?  Maybe it doesn't matter?

As far as I can see that shouldn't matter.

>
>> +
>> +       /* Test shared fence slot reservation */
>> +       if (fences)
>> +               fences->shared_max = fences->shared_count;
>>   }
>>   #endif
>>
>> @@ -244,7 +244,7 @@ void dma_resv_add_shared_fence(struct dma_resv *obj, struct dma_fence *fence)
>>
>>          dma_resv_assert_held(obj);
>>
>> -       fobj = dma_resv_get_list(obj);
>> +       fobj = dma_resv_shared(obj);
>>          count = fobj->shared_count;
>>
>>          write_seqcount_begin(&obj->seq);
>> @@ -287,7 +287,7 @@ void dma_resv_add_excl_fence(struct dma_resv *obj, struct dma_fence *fence)
>>
>>          dma_resv_assert_held(obj);
>>
>> -       old = dma_resv_get_list(obj);
>> +       old = dma_resv_shared(obj);
>>          if (old)
>>                  i = old->shared_count;
>>
>> @@ -326,7 +326,7 @@ int dma_resv_copy_fences(struct dma_resv *dst, struct dma_resv *src)
>>          dma_resv_assert_held(dst);
>>
>>          rcu_read_lock();
>> -       src_list = rcu_dereference(src->fence);
>> +       src_list = dma_resv_shared(src);
>>
>>   retry:
>>          if (src_list) {
>> @@ -339,7 +339,7 @@ int dma_resv_copy_fences(struct dma_resv *dst, struct dma_resv *src)
>>                          return -ENOMEM;
>>
>>                  rcu_read_lock();
>> -               src_list = rcu_dereference(src->fence);
>> +               src_list = dma_resv_shared(src);
>>                  if (!src_list || src_list->shared_count > shared_count) {
>>                          kfree(dst_list);
>>                          goto retry;
>> @@ -357,7 +357,7 @@ int dma_resv_copy_fences(struct dma_resv *dst, struct dma_resv *src)
>>
>>                          if (!dma_fence_get_rcu(fence)) {
>>                                  dma_resv_list_free(dst_list);
>> -                               src_list = rcu_dereference(src->fence);
>> +                               src_list = dma_resv_shared(src);
>>                                  goto retry;
>>                          }
>>
>> @@ -376,7 +376,7 @@ int dma_resv_copy_fences(struct dma_resv *dst, struct dma_resv *src)
>>          new = dma_fence_get_rcu_safe(&src->fence_excl);
>>          rcu_read_unlock();
>>
>> -       src_list = dma_resv_get_list(dst);
>> +       src_list = dma_resv_shared(dst);
>>          old = dma_resv_exclusive(dst);
>>
>>          write_seqcount_begin(&dst->seq);
>> @@ -429,7 +429,7 @@ int dma_resv_get_fences_rcu(struct dma_resv *obj,
>>                  if (fence_excl && !dma_fence_get_rcu(fence_excl))
>>                          goto unlock;
>>
>> -               fobj = rcu_dereference(obj->fence);
>> +               fobj = dma_resv_shared(obj);
>>                  if (fobj)
>>                          sz += sizeof(*shared) * fobj->shared_max;
>>
>> @@ -535,7 +535,7 @@ long dma_resv_wait_timeout_rcu(struct dma_resv *obj,
>>          }
>>
>>          if (wait_all) {
>> -               struct dma_resv_list *fobj = rcu_dereference(obj->fence);
>> +               struct dma_resv_list *fobj = dma_resv_shared(obj);
>>
>>                  if (fobj)
>>                          shared_count = fobj->shared_count;
>> @@ -620,7 +620,7 @@ bool dma_resv_test_signaled_rcu(struct dma_resv *obj, bool test_all)
>>          seq = read_seqcount_begin(&obj->seq);
>>
>>          if (test_all) {
>> -               struct dma_resv_list *fobj = rcu_dereference(obj->fence);
>> +               struct dma_resv_list *fobj = dma_resv_shared(obj);
>>                  unsigned int i;
>>
>>                  if (fobj)
>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
>> index d5e6519bdea1..e90495ca49fd 100644
>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
>> @@ -247,7 +247,7 @@ static int amdgpu_amdkfd_remove_eviction_fence(struct amdgpu_bo *bo,
>>          if (!ef)
>>                  return -EINVAL;
>>
>> -       old = dma_resv_get_list(resv);
>> +       old = dma_resv_shared(resv);
>>          if (!old)
>>                  return 0;
>>
>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
>> index 6dd0ea6e9e24..3b13c8a38c4e 100644
>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
>> @@ -49,7 +49,7 @@ __dma_resv_make_exclusive(struct dma_resv *obj)
>>          unsigned int count;
>>          int r;
>>
>> -       if (!dma_resv_get_list(obj)) /* no shared fences to convert */
>> +       if (!dma_resv_shared(obj)) /* no shared fences to convert */
>>                  return 0;
>>
>>          r = dma_resv_get_fences_rcu(obj, NULL, &count, &fences);
>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
>> index c84d5b843985..c50d9f92a0cd 100644
>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
>> @@ -213,7 +213,7 @@ int amdgpu_sync_resv(struct amdgpu_device *adev, struct amdgpu_sync *sync,
>>          f = dma_resv_exclusive(resv);
>>          r = amdgpu_sync_fence(sync, f);
>>
>> -       flist = dma_resv_get_list(resv);
>> +       flist = dma_resv_shared(resv);
>>          if (!flist || r)
>>                  return r;
>>
>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
>> index 663aa7d2e2ea..ddb6ce7d48bc 100644
>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
>> @@ -1338,7 +1338,7 @@ static bool amdgpu_ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
>>           * If true, then return false as any KFD process needs all its BOs to
>>           * be resident to run successfully
>>           */
>> -       flist = dma_resv_get_list(bo->base.resv);
>> +       flist = dma_resv_shared(bo->base.resv);
>>          if (flist) {
>>                  for (i = 0; i < flist->shared_count; ++i) {
>>                          f = rcu_dereference_protected(flist->shared[i],
>> diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.c b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
>> index d4f54dea8ac1..4d43b8630f0e 100644
>> --- a/drivers/gpu/drm/etnaviv/etnaviv_gem.c
>> +++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
>> @@ -461,7 +461,7 @@ static void etnaviv_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
>>                          off, etnaviv_obj->vaddr, obj->size);
>>
>>          rcu_read_lock();
>> -       fobj = rcu_dereference(robj->fence);
>> +       fobj = dma_resv_shared(robj);
>>          if (fobj) {
>>                  unsigned int i, shared_count = fobj->shared_count;
>>
>> diff --git a/drivers/gpu/drm/i915/gem/i915_gem_busy.c b/drivers/gpu/drm/i915/gem/i915_gem_busy.c
>> index 02312a0c3a36..3f94becac541 100644
>> --- a/drivers/gpu/drm/i915/gem/i915_gem_busy.c
>> +++ b/drivers/gpu/drm/i915/gem/i915_gem_busy.c
>> @@ -116,7 +116,7 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
>>          args->busy = busy_check_writer(dma_resv_exclusive(obj->base.resv));
>>
>>          /* Translate shared fences to READ set of engines */
>> -       list = rcu_dereference(obj->base.resv->fence);
>> +       list = dma_resv_shared(obj->base.resv);
>>          if (list) {
>>                  unsigned int shared_count = list->shared_count, i;
>>
>> diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
>> index 54c1b53426d6..43af91df552e 100644
>> --- a/drivers/gpu/drm/msm/msm_gem.c
>> +++ b/drivers/gpu/drm/msm/msm_gem.c
>> @@ -817,7 +817,7 @@ int msm_gem_sync_object(struct drm_gem_object *obj,
>>          struct dma_fence *fence;
>>          int i, ret;
>>
>> -       fobj = dma_resv_get_list(obj->resv);
>> +       fobj = dma_resv_shared(obj->resv);
>>          if (!fobj || (fobj->shared_count == 0)) {
>>                  fence = dma_resv_exclusive(obj->resv);
>>                  /* don't need to wait on our own fences, since ring is fifo */
>> @@ -1025,7 +1025,7 @@ void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m,
>>          }
>>
>>          rcu_read_lock();
>> -       fobj = rcu_dereference(robj->fence);
>> +       fobj = dma_resv_shared(robj);
>>          if (fobj) {
>>                  unsigned int i, shared_count = fobj->shared_count;
>>
>> diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c
>> index a6cb35181aee..5ce441c655ea 100644
>> --- a/drivers/gpu/drm/nouveau/nouveau_fence.c
>> +++ b/drivers/gpu/drm/nouveau/nouveau_fence.c
>> @@ -355,7 +355,7 @@ nouveau_fence_sync(struct nouveau_bo *nvbo, struct nouveau_channel *chan, bool e
>>                          return ret;
>>          }
>>
>> -       fobj = dma_resv_get_list(resv);
>> +       fobj = dma_resv_shared(resv);
>>          fence = dma_resv_exclusive(resv);
>>
>>          if (fence && (!exclusive || !fobj || !fobj->shared_count)) {
>> diff --git a/drivers/gpu/drm/qxl/qxl_debugfs.c b/drivers/gpu/drm/qxl/qxl_debugfs.c
>> index 183d15e2cf58..0acc70a6d3dd 100644
>> --- a/drivers/gpu/drm/qxl/qxl_debugfs.c
>> +++ b/drivers/gpu/drm/qxl/qxl_debugfs.c
>> @@ -61,7 +61,7 @@ qxl_debugfs_buffers_info(struct seq_file *m, void *data)
>>                  int rel;
>>
>>                  rcu_read_lock();
>> -               fobj = rcu_dereference(bo->tbo.base.resv->fence);
>> +               fobj = dma_resv_shared(bo->tbo.base.resv);
>>                  rel = fobj ? fobj->shared_count : 0;
>>                  rcu_read_unlock();
>>
>> diff --git a/drivers/gpu/drm/radeon/radeon_sync.c b/drivers/gpu/drm/radeon/radeon_sync.c
>> index e476f90ef1c1..a9cdb88da173 100644
>> --- a/drivers/gpu/drm/radeon/radeon_sync.c
>> +++ b/drivers/gpu/drm/radeon/radeon_sync.c
>> @@ -105,7 +105,7 @@ int radeon_sync_resv(struct radeon_device *rdev,
>>          else if (f)
>>                  r = dma_fence_wait(f, true);
>>
>> -       flist = dma_resv_get_list(resv);
>> +       flist = dma_resv_shared(resv);
>>          if (shared || !flist || r)
>>                  return r;
>>
>> diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
>> index 92361556bf0b..c41ef0caa492 100644
>> --- a/drivers/gpu/drm/ttm/ttm_bo.c
>> +++ b/drivers/gpu/drm/ttm/ttm_bo.c
>> @@ -261,7 +261,7 @@ static void ttm_bo_flush_all_fences(struct ttm_buffer_object *bo)
>>          int i;
>>
>>          rcu_read_lock();
>> -       fobj = rcu_dereference(resv->fence);
>> +       fobj = dma_resv_shared(resv);
>>          fence = dma_resv_exclusive(resv);
>>          if (fence && !fence->ops->signaled)
>>                  dma_fence_enable_sw_signaling(fence);
>> diff --git a/include/linux/dma-resv.h b/include/linux/dma-resv.h
>> index 7549ec5eb35c..98ac66fecb71 100644
>> --- a/include/linux/dma-resv.h
>> +++ b/include/linux/dma-resv.h
>> @@ -78,20 +78,6 @@ struct dma_resv {
>>   #define dma_resv_held(obj) lockdep_is_held(&(obj)->lock.base)
>>   #define dma_resv_assert_held(obj) lockdep_assert_held(&(obj)->lock.base)
>>
>> -/**
>> - * dma_resv_get_list - get the reservation object's
>> - * shared fence list, with update-side lock held
>> - * @obj: the reservation object
>> - *
>> - * Returns the shared fence list.  Does NOT take references to
>> - * the fence.  The obj->lock must be held.
>> - */
>> -static inline struct dma_resv_list *dma_resv_get_list(struct dma_resv *obj)
>> -{
>> -       return rcu_dereference_protected(obj->fence,
>> -                                        dma_resv_held(obj));
>> -}
>> -
>>   #ifdef CONFIG_DEBUG_MUTEXES
>>   void dma_resv_reset_shared_max(struct dma_resv *obj);
>>   #else
>> @@ -267,6 +253,17 @@ dma_resv_get_excl_rcu(struct dma_resv *obj)
>>          return fence;
>>   }
>>
>> +/**
>> + * dma_resv_shared - get the reservation object's shared fence list
>> + * @obj: the reservation object
>> + *
>> + * Returns the shared fence list. The obj->lock or rcu read side must be held.
>> + */
>> +static inline struct dma_resv_list *dma_resv_shared(struct dma_resv *obj)
> Maybe dma_resv_shared_list() just to be a little more clear what's
> being returned?

Ok, renamed this one and dma_resv_exclusive into dma_resv_excl_fence as 
well.

Christian.

>
> --Jason
>
>> +{
>> +       return rcu_dereference_check(obj->fence, dma_resv_held(obj));
>> +}
>> +
>>   void dma_resv_init(struct dma_resv *obj);
>>   void dma_resv_fini(struct dma_resv *obj);
>>   int dma_resv_reserve_shared(struct dma_resv *obj, unsigned int num_fences);
>> --
>> 2.25.1
>>


^ permalink raw reply	[flat|nested] 26+ messages in thread

* Re: [PATCH 7/7] dma-buf: drop the _rcu postfix on function names
  2021-06-02 20:34   ` Jason Ekstrand
@ 2021-06-06  9:08     ` Christian König
  0 siblings, 0 replies; 26+ messages in thread
From: Christian König @ 2021-06-06  9:08 UTC (permalink / raw)
  To: Jason Ekstrand; +Cc: Maling list - DRI developers

Am 02.06.21 um 22:34 schrieb Jason Ekstrand:
> On Wed, Jun 2, 2021 at 6:17 AM Christian König
> <ckoenig.leichtzumerken@gmail.com> wrote:
>> The functions can be called both in _rcu context as well
>> as while holding the lock.
>>
>> Signed-off-by: Christian König <christian.koenig@amd.com>
>> ---
>>   drivers/dma-buf/dma-buf.c                     |  3 +--
>>   drivers/dma-buf/dma-resv.c                    | 24 +++++++++----------
>>   drivers/gpu/drm/amd/amdgpu/amdgpu_display.c   |  2 +-
>>   drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c   |  2 +-
>>   drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c       |  2 +-
>>   drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c       |  4 ++--
>>   drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c        |  2 +-
>>   drivers/gpu/drm/amd/amdgpu/amdgpu_object.c    |  2 +-
>>   drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c       |  2 +-
>>   drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c        |  8 +++----
>>   .../gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c |  2 +-
>>   drivers/gpu/drm/drm_gem.c                     |  4 ++--
>>   drivers/gpu/drm/etnaviv/etnaviv_gem.c         |  4 ++--
>>   drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c  |  2 +-
>>   drivers/gpu/drm/i915/dma_resv_utils.c         |  2 +-
>>   drivers/gpu/drm/i915/gem/i915_gem_busy.c      |  2 +-
>>   .../gpu/drm/i915/gem/i915_gem_execbuffer.c    |  2 +-
>>   drivers/gpu/drm/i915/gem/i915_gem_userptr.c   |  2 +-
>>   drivers/gpu/drm/i915/gem/i915_gem_wait.c      |  4 ++--
>>   drivers/gpu/drm/i915/i915_request.c           |  2 +-
>>   drivers/gpu/drm/i915/i915_sw_fence.c          |  2 +-
>>   drivers/gpu/drm/msm/msm_gem.c                 |  2 +-
>>   drivers/gpu/drm/nouveau/nouveau_gem.c         |  2 +-
>>   drivers/gpu/drm/panfrost/panfrost_drv.c       |  2 +-
>>   drivers/gpu/drm/radeon/radeon_gem.c           |  6 ++---
>>   drivers/gpu/drm/radeon/radeon_mn.c            |  2 +-
>>   drivers/gpu/drm/ttm/ttm_bo.c                  | 12 +++++-----
>>   drivers/gpu/drm/vgem/vgem_fence.c             |  2 +-
>>   drivers/gpu/drm/virtio/virtgpu_ioctl.c        |  4 ++--
>>   drivers/gpu/drm/vmwgfx/vmwgfx_bo.c            |  2 +-
>>   include/linux/dma-resv.h                      | 17 ++++---------
>>   31 files changed, 60 insertions(+), 70 deletions(-)
>>
>> diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c
>> index 4d0ddc712f1e..f92931d8db51 100644
>> --- a/drivers/dma-buf/dma-buf.c
>> +++ b/drivers/dma-buf/dma-buf.c
>> @@ -1147,8 +1147,7 @@ static int __dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
>>          long ret;
>>
>>          /* Wait on any implicit rendering fences */
>> -       ret = dma_resv_wait_timeout_rcu(resv, write, true,
>> -                                                 MAX_SCHEDULE_TIMEOUT);
>> +       ret = dma_resv_wait_timeout(resv, write, true, MAX_SCHEDULE_TIMEOUT);
>>          if (ret < 0)
>>                  return ret;
>>
>> diff --git a/drivers/dma-buf/dma-resv.c b/drivers/dma-buf/dma-resv.c
>> index b1a1a31dc009..74fe64dc1ce3 100644
>> --- a/drivers/dma-buf/dma-resv.c
>> +++ b/drivers/dma-buf/dma-resv.c
>> @@ -393,7 +393,7 @@ int dma_resv_copy_fences(struct dma_resv *dst, struct dma_resv *src)
>>   EXPORT_SYMBOL(dma_resv_copy_fences);
>>
>>   /**
>> - * dma_resv_get_fences_rcu - Get an object's shared and exclusive
>> + * dma_resv_get_fences - Get an object's shared and exclusive
>>    * fences without update side lock held
>>    * @obj: the reservation object
>>    * @pfence_excl: the returned exclusive fence (or NULL)
>> @@ -405,10 +405,9 @@ EXPORT_SYMBOL(dma_resv_copy_fences);
>>    * exclusive fence is not specified the fence is put into the array of the
>>    * shared fences as well. Returns either zero or -ENOMEM.
>>    */
>> -int dma_resv_get_fences_rcu(struct dma_resv *obj,
>> -                           struct dma_fence **pfence_excl,
>> -                           unsigned int *pshared_count,
>> -                           struct dma_fence ***pshared)
>> +int dma_resv_get_fences(struct dma_resv *obj, struct dma_fence **pfence_excl,
>> +                       unsigned int *pshared_count,
>> +                       struct dma_fence ***pshared)
>>   {
>>          struct dma_fence **shared = NULL;
>>          struct dma_fence *fence_excl;
>> @@ -491,10 +490,10 @@ int dma_resv_get_fences_rcu(struct dma_resv *obj,
>>          *pshared = shared;
>>          return ret;
>>   }
>> -EXPORT_SYMBOL_GPL(dma_resv_get_fences_rcu);
>> +EXPORT_SYMBOL_GPL(dma_resv_get_fences);
>>
>>   /**
>> - * dma_resv_wait_timeout_rcu - Wait on reservation's objects
>> + * dma_resv_wait_timeout - Wait on reservation's objects
>>    * shared and/or exclusive fences.
>>    * @obj: the reservation object
>>    * @wait_all: if true, wait on all fences, else wait on just exclusive fence
>> @@ -505,9 +504,8 @@ EXPORT_SYMBOL_GPL(dma_resv_get_fences_rcu);
>>    * Returns -ERESTARTSYS if interrupted, 0 if the wait timed out, or
>>    * greater than zer on success.
>>    */
>> -long dma_resv_wait_timeout_rcu(struct dma_resv *obj,
>> -                              bool wait_all, bool intr,
>> -                              unsigned long timeout)
>> +long dma_resv_wait_timeout(struct dma_resv *obj, bool wait_all, bool intr,
>> +                          unsigned long timeout)
>>   {
>>          long ret = timeout ? timeout : 1;
>>          unsigned int seq, shared_count;
>> @@ -579,7 +577,7 @@ long dma_resv_wait_timeout_rcu(struct dma_resv *obj,
>>          rcu_read_unlock();
>>          goto retry;
>>   }
>> -EXPORT_SYMBOL_GPL(dma_resv_wait_timeout_rcu);
>> +EXPORT_SYMBOL_GPL(dma_resv_wait_timeout);
>>
>>
>>   static inline int dma_resv_test_signaled_single(struct dma_fence *passed_fence)
>> @@ -608,7 +606,7 @@ static inline int dma_resv_test_signaled_single(struct dma_fence *passed_fence)
>>    * RETURNS
>>    * true if all fences signaled, else false
>>    */
>> -bool dma_resv_test_signaled_rcu(struct dma_resv *obj, bool test_all)
>> +bool dma_resv_test_signaled(struct dma_resv *obj, bool test_all)
>>   {
>>          unsigned int seq, shared_count;
>>          int ret;
>> @@ -657,7 +655,7 @@ bool dma_resv_test_signaled_rcu(struct dma_resv *obj, bool test_all)
>>          rcu_read_unlock();
>>          return ret;
>>   }
>> -EXPORT_SYMBOL_GPL(dma_resv_test_signaled_rcu);
>> +EXPORT_SYMBOL_GPL(dma_resv_test_signaled);
>>
>>   #if IS_ENABLED(CONFIG_LOCKDEP)
>>   static int __init dma_resv_lockdep(void)
>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
>> index 49f73b5b89b0..004d01d2e1d7 100644
>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
>> @@ -203,7 +203,7 @@ int amdgpu_display_crtc_page_flip_target(struct drm_crtc *crtc,
>>                  goto unpin;
>>          }
>>
>> -       r = dma_resv_get_fences_rcu(new_abo->tbo.base.resv, &work->excl,
>> +       r = dma_resv_get_fences(new_abo->tbo.base.resv, &work->excl,
>>                                                &work->shared_count,
>>                                                &work->shared);
>>          if (unlikely(r != 0)) {
>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
>> index 3b13c8a38c4e..615be1697d49 100644
>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
>> @@ -52,7 +52,7 @@ __dma_resv_make_exclusive(struct dma_resv *obj)
>>          if (!dma_resv_shared(obj)) /* no shared fences to convert */
>>                  return 0;
>>
>> -       r = dma_resv_get_fences_rcu(obj, NULL, &count, &fences);
>> +       r = dma_resv_get_fences(obj, NULL, &count, &fences);
>>          if (r)
>>                  return r;
>>
>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
>> index cd5146fa6fb6..dafc96032d7d 100644
>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
>> @@ -526,7 +526,7 @@ int amdgpu_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
>>                  return -ENOENT;
>>          }
>>          robj = gem_to_amdgpu_bo(gobj);
>> -       ret = dma_resv_wait_timeout_rcu(robj->tbo.base.resv, true, true,
>> +       ret = dma_resv_wait_timeout(robj->tbo.base.resv, true, true,
>>                                                    timeout);
>>
>>          /* ret == 0 means not signaled,
>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
>> index b4971e90b98c..65a3422ec078 100644
>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
>> @@ -112,7 +112,7 @@ void amdgpu_pasid_free_delayed(struct dma_resv *resv,
>>          unsigned count;
>>          int r;
>>
>> -       r = dma_resv_get_fences_rcu(resv, NULL, &count, &fences);
>> +       r = dma_resv_get_fences(resv, NULL, &count, &fences);
>>          if (r)
>>                  goto fallback;
>>
>> @@ -156,7 +156,7 @@ void amdgpu_pasid_free_delayed(struct dma_resv *resv,
>>          /* Not enough memory for the delayed delete, as last resort
>>           * block for all the fences to complete.
>>           */
>> -       dma_resv_wait_timeout_rcu(resv, true, false,
>> +       dma_resv_wait_timeout(resv, true, false,
>>                                              MAX_SCHEDULE_TIMEOUT);
>>          amdgpu_pasid_free(pasid);
>>   }
>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
>> index 2741c28ff1b5..86de11a86a3e 100644
>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
>> @@ -75,7 +75,7 @@ static bool amdgpu_mn_invalidate_gfx(struct mmu_interval_notifier *mni,
>>
>>          mmu_interval_set_seq(mni, cur_seq);
>>
>> -       r = dma_resv_wait_timeout_rcu(bo->tbo.base.resv, true, false,
>> +       r = dma_resv_wait_timeout(bo->tbo.base.resv, true, false,
>>                                        MAX_SCHEDULE_TIMEOUT);
>>          mutex_unlock(&adev->notifier_lock);
>>          if (r <= 0)
>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
>> index 03c6b63d1d54..821dec6d2f73 100644
>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
>> @@ -756,7 +756,7 @@ int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr)
>>                  return 0;
>>          }
>>
>> -       r = dma_resv_wait_timeout_rcu(bo->tbo.base.resv, false, false,
>> +       r = dma_resv_wait_timeout(bo->tbo.base.resv, false, false,
>>                                                  MAX_SCHEDULE_TIMEOUT);
>>          if (r < 0)
>>                  return r;
>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
>> index 82f0542c7792..3773f5ff6f0e 100644
>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
>> @@ -1126,7 +1126,7 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,
>>          ib->length_dw = 16;
>>
>>          if (direct) {
>> -               r = dma_resv_wait_timeout_rcu(bo->tbo.base.resv,
>> +               r = dma_resv_wait_timeout(bo->tbo.base.resv,
>>                                                          true, false,
>>                                                          msecs_to_jiffies(10));
> Some kernel CI thing (not sure who runs it) is likely going to
> complain at you about messing up indentation.  I don't know how much
> you care.  I went ahead and fixed it all in my version of this change
> but it's annoying and half of them were indented wrong before this
> change so I'm a bit Meh.

Yeah, we messed those up in the reservation_object -> dma_resv rename 
already.

I've just gone over them and fixed the indentation halve automated once 
more.

> Either way, assuming you've done a grep for the _rcu versions to
> ensure you haven't missed any and assuming it all compiles,

That was a completely automated rename anyway. Could be that I didn't 
catch all the subdirs where dma_resv object is used, but that is rather 
unlikely.

> Reviewed-by: Jason Ekstrand <jason@jlekstrand.net>

Thanks going to push that now,
Christian.

>
>>                  if (r == 0)
>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
>> index bcfd4a8d0288..da716aa38085 100644
>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
>> @@ -2022,13 +2022,13 @@ static void amdgpu_vm_prt_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
>>          unsigned i, shared_count;
>>          int r;
>>
>> -       r = dma_resv_get_fences_rcu(resv, &excl,
>> +       r = dma_resv_get_fences(resv, &excl,
>>                                                &shared_count, &shared);
>>          if (r) {
>>                  /* Not enough memory to grab the fence list, as last resort
>>                   * block for all the fences to complete.
>>                   */
>> -               dma_resv_wait_timeout_rcu(resv, true, false,
>> +               dma_resv_wait_timeout(resv, true, false,
>>                                                      MAX_SCHEDULE_TIMEOUT);
>>                  return;
>>          }
>> @@ -2640,7 +2640,7 @@ bool amdgpu_vm_evictable(struct amdgpu_bo *bo)
>>                  return true;
>>
>>          /* Don't evict VM page tables while they are busy */
>> -       if (!dma_resv_test_signaled_rcu(bo->tbo.base.resv, true))
>> +       if (!dma_resv_test_signaled(bo->tbo.base.resv, true))
>>                  return false;
>>
>>          /* Try to block ongoing updates */
>> @@ -2820,7 +2820,7 @@ void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t min_vm_size,
>>    */
>>   long amdgpu_vm_wait_idle(struct amdgpu_vm *vm, long timeout)
>>   {
>> -       timeout = dma_resv_wait_timeout_rcu(vm->root.base.bo->tbo.base.resv,
>> +       timeout = dma_resv_wait_timeout(vm->root.base.bo->tbo.base.resv,
>>                                              true, true, timeout);
>>          if (timeout <= 0)
>>                  return timeout;
>> diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
>> index 3267eb2e35dd..1633afd3c03b 100644
>> --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
>> +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
>> @@ -8400,7 +8400,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
>>                   * deadlock during GPU reset when this fence will not signal
>>                   * but we hold reservation lock for the BO.
>>                   */
>> -               r = dma_resv_wait_timeout_rcu(abo->tbo.base.resv, true,
>> +               r = dma_resv_wait_timeout(abo->tbo.base.resv, true,
>>                                                          false,
>>                                                          msecs_to_jiffies(5000));
>>                  if (unlikely(r <= 0))
>> diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
>> index 263b4fb03303..11770da97dc0 100644
>> --- a/drivers/gpu/drm/drm_gem.c
>> +++ b/drivers/gpu/drm/drm_gem.c
>> @@ -770,7 +770,7 @@ long drm_gem_dma_resv_wait(struct drm_file *filep, u32 handle,
>>                  return -EINVAL;
>>          }
>>
>> -       ret = dma_resv_wait_timeout_rcu(obj->resv, wait_all,
>> +       ret = dma_resv_wait_timeout(obj->resv, wait_all,
>>                                                    true, timeout);
>>          if (ret == 0)
>>                  ret = -ETIME;
>> @@ -1380,7 +1380,7 @@ int drm_gem_fence_array_add_implicit(struct xarray *fence_array,
>>                  return drm_gem_fence_array_add(fence_array, fence);
>>          }
>>
>> -       ret = dma_resv_get_fences_rcu(obj->resv, NULL,
>> +       ret = dma_resv_get_fences(obj->resv, NULL,
>>                                                  &fence_count, &fences);
>>          if (ret || !fence_count)
>>                  return ret;
>> diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.c b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
>> index 4d43b8630f0e..e3c209628688 100644
>> --- a/drivers/gpu/drm/etnaviv/etnaviv_gem.c
>> +++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
>> @@ -390,13 +390,13 @@ int etnaviv_gem_cpu_prep(struct drm_gem_object *obj, u32 op,
>>          }
>>
>>          if (op & ETNA_PREP_NOSYNC) {
>> -               if (!dma_resv_test_signaled_rcu(obj->resv,
>> +               if (!dma_resv_test_signaled(obj->resv,
>>                                                            write))
>>                          return -EBUSY;
>>          } else {
>>                  unsigned long remain = etnaviv_timeout_to_jiffies(timeout);
>>
>> -               ret = dma_resv_wait_timeout_rcu(obj->resv,
>> +               ret = dma_resv_wait_timeout(obj->resv,
>>                                                            write, true, remain);
>>                  if (ret <= 0)
>>                          return ret == 0 ? -ETIMEDOUT : ret;
>> diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
>> index c942d2a8c252..9cc36bbc2502 100644
>> --- a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
>> +++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
>> @@ -189,7 +189,7 @@ static int submit_fence_sync(struct etnaviv_gem_submit *submit)
>>                          continue;
>>
>>                  if (bo->flags & ETNA_SUBMIT_BO_WRITE) {
>> -                       ret = dma_resv_get_fences_rcu(robj, &bo->excl,
>> +                       ret = dma_resv_get_fences(robj, &bo->excl,
>>                                                                  &bo->nr_shared,
>>                                                                  &bo->shared);
>>                          if (ret)
>> diff --git a/drivers/gpu/drm/i915/dma_resv_utils.c b/drivers/gpu/drm/i915/dma_resv_utils.c
>> index 9e508e7d4629..7df91b7e4ca8 100644
>> --- a/drivers/gpu/drm/i915/dma_resv_utils.c
>> +++ b/drivers/gpu/drm/i915/dma_resv_utils.c
>> @@ -10,7 +10,7 @@
>>   void dma_resv_prune(struct dma_resv *resv)
>>   {
>>          if (dma_resv_trylock(resv)) {
>> -               if (dma_resv_test_signaled_rcu(resv, true))
>> +               if (dma_resv_test_signaled(resv, true))
>>                          dma_resv_add_excl_fence(resv, NULL);
>>                  dma_resv_unlock(resv);
>>          }
>> diff --git a/drivers/gpu/drm/i915/gem/i915_gem_busy.c b/drivers/gpu/drm/i915/gem/i915_gem_busy.c
>> index 3f94becac541..0083a850f839 100644
>> --- a/drivers/gpu/drm/i915/gem/i915_gem_busy.c
>> +++ b/drivers/gpu/drm/i915/gem/i915_gem_busy.c
>> @@ -105,7 +105,7 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
>>           * Alternatively, we can trade that extra information on read/write
>>           * activity with
>>           *      args->busy =
>> -        *              !dma_resv_test_signaled_rcu(obj->resv, true);
>> +        *              !dma_resv_test_signaled(obj->resv, true);
>>           * to report the overall busyness. This is what the wait-ioctl does.
>>           *
>>           */
>> diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
>> index 297143511f99..66789111a24b 100644
>> --- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
>> +++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
>> @@ -1481,7 +1481,7 @@ static inline bool use_reloc_gpu(struct i915_vma *vma)
>>          if (DBG_FORCE_RELOC)
>>                  return false;
>>
>> -       return !dma_resv_test_signaled_rcu(vma->resv, true);
>> +       return !dma_resv_test_signaled(vma->resv, true);
>>   }
>>
>>   static unsigned long vma_phys_addr(struct i915_vma *vma, u32 offset)
>> diff --git a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
>> index a657b99ec760..e78738aec7b2 100644
>> --- a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
>> +++ b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
>> @@ -85,7 +85,7 @@ static bool i915_gem_userptr_invalidate(struct mmu_interval_notifier *mni,
>>                  return true;
>>
>>          /* we will unbind on next submission, still have userptr pins */
>> -       r = dma_resv_wait_timeout_rcu(obj->base.resv, true, false,
>> +       r = dma_resv_wait_timeout(obj->base.resv, true, false,
>>                                        MAX_SCHEDULE_TIMEOUT);
>>          if (r <= 0)
>>                  drm_err(&i915->drm, "(%ld) failed to wait for idle\n", r);
>> diff --git a/drivers/gpu/drm/i915/gem/i915_gem_wait.c b/drivers/gpu/drm/i915/gem/i915_gem_wait.c
>> index c13aeddf5aa7..e7aebb8fb468 100644
>> --- a/drivers/gpu/drm/i915/gem/i915_gem_wait.c
>> +++ b/drivers/gpu/drm/i915/gem/i915_gem_wait.c
>> @@ -45,7 +45,7 @@ i915_gem_object_wait_reservation(struct dma_resv *resv,
>>                  unsigned int count, i;
>>                  int ret;
>>
>> -               ret = dma_resv_get_fences_rcu(resv, &excl, &count, &shared);
>> +               ret = dma_resv_get_fences(resv, &excl, &count, &shared);
>>                  if (ret)
>>                          return ret;
>>
>> @@ -158,7 +158,7 @@ i915_gem_object_wait_priority(struct drm_i915_gem_object *obj,
>>                  unsigned int count, i;
>>                  int ret;
>>
>> -               ret = dma_resv_get_fences_rcu(obj->base.resv,
>> +               ret = dma_resv_get_fences(obj->base.resv,
>>                                                &excl, &count, &shared);
>>                  if (ret)
>>                          return ret;
>> diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
>> index c85494f411f4..4a70a1881d79 100644
>> --- a/drivers/gpu/drm/i915/i915_request.c
>> +++ b/drivers/gpu/drm/i915/i915_request.c
>> @@ -1594,7 +1594,7 @@ i915_request_await_object(struct i915_request *to,
>>                  struct dma_fence **shared;
>>                  unsigned int count, i;
>>
>> -               ret = dma_resv_get_fences_rcu(obj->base.resv,
>> +               ret = dma_resv_get_fences(obj->base.resv,
>>                                                          &excl, &count, &shared);
>>                  if (ret)
>>                          return ret;
>> diff --git a/drivers/gpu/drm/i915/i915_sw_fence.c b/drivers/gpu/drm/i915/i915_sw_fence.c
>> index 7aaf74552d06..c589a681da77 100644
>> --- a/drivers/gpu/drm/i915/i915_sw_fence.c
>> +++ b/drivers/gpu/drm/i915/i915_sw_fence.c
>> @@ -582,7 +582,7 @@ int i915_sw_fence_await_reservation(struct i915_sw_fence *fence,
>>                  struct dma_fence **shared;
>>                  unsigned int count, i;
>>
>> -               ret = dma_resv_get_fences_rcu(resv, &excl, &count, &shared);
>> +               ret = dma_resv_get_fences(resv, &excl, &count, &shared);
>>                  if (ret)
>>                          return ret;
>>
>> diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
>> index 43af91df552e..ecd35986ddb5 100644
>> --- a/drivers/gpu/drm/msm/msm_gem.c
>> +++ b/drivers/gpu/drm/msm/msm_gem.c
>> @@ -915,7 +915,7 @@ int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout)
>>                  op & MSM_PREP_NOSYNC ? 0 : timeout_to_jiffies(timeout);
>>          long ret;
>>
>> -       ret = dma_resv_wait_timeout_rcu(obj->resv, write,
>> +       ret = dma_resv_wait_timeout(obj->resv, write,
>>                                                    true,  remain);
>>          if (ret == 0)
>>                  return remain == 0 ? -EBUSY : -ETIMEDOUT;
>> diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
>> index d863e5ed954a..c59072f254f1 100644
>> --- a/drivers/gpu/drm/nouveau/nouveau_gem.c
>> +++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
>> @@ -964,7 +964,7 @@ nouveau_gem_ioctl_cpu_prep(struct drm_device *dev, void *data,
>>                  return -ENOENT;
>>          nvbo = nouveau_gem_object(gem);
>>
>> -       lret = dma_resv_wait_timeout_rcu(nvbo->bo.base.resv, write, true,
>> +       lret = dma_resv_wait_timeout(nvbo->bo.base.resv, write, true,
>>                                                     no_wait ? 0 : 30 * HZ);
>>          if (!lret)
>>                  ret = -EBUSY;
>> diff --git a/drivers/gpu/drm/panfrost/panfrost_drv.c b/drivers/gpu/drm/panfrost/panfrost_drv.c
>> index ca07098a6141..0e6e893eb81d 100644
>> --- a/drivers/gpu/drm/panfrost/panfrost_drv.c
>> +++ b/drivers/gpu/drm/panfrost/panfrost_drv.c
>> @@ -311,7 +311,7 @@ panfrost_ioctl_wait_bo(struct drm_device *dev, void *data,
>>          if (!gem_obj)
>>                  return -ENOENT;
>>
>> -       ret = dma_resv_wait_timeout_rcu(gem_obj->resv, true,
>> +       ret = dma_resv_wait_timeout(gem_obj->resv, true,
>>                                                    true, timeout);
>>          if (!ret)
>>                  ret = timeout ? -ETIMEDOUT : -EBUSY;
>> diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c
>> index 3272c33af8fe..458f92a70887 100644
>> --- a/drivers/gpu/drm/radeon/radeon_gem.c
>> +++ b/drivers/gpu/drm/radeon/radeon_gem.c
>> @@ -161,7 +161,7 @@ static int radeon_gem_set_domain(struct drm_gem_object *gobj,
>>          }
>>          if (domain == RADEON_GEM_DOMAIN_CPU) {
>>                  /* Asking for cpu access wait for object idle */
>> -               r = dma_resv_wait_timeout_rcu(robj->tbo.base.resv, true, true, 30 * HZ);
>> +               r = dma_resv_wait_timeout(robj->tbo.base.resv, true, true, 30 * HZ);
>>                  if (!r)
>>                          r = -EBUSY;
>>
>> @@ -523,7 +523,7 @@ int radeon_gem_busy_ioctl(struct drm_device *dev, void *data,
>>          }
>>          robj = gem_to_radeon_bo(gobj);
>>
>> -       r = dma_resv_test_signaled_rcu(robj->tbo.base.resv, true);
>> +       r = dma_resv_test_signaled(robj->tbo.base.resv, true);
>>          if (r == 0)
>>                  r = -EBUSY;
>>          else
>> @@ -552,7 +552,7 @@ int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
>>          }
>>          robj = gem_to_radeon_bo(gobj);
>>
>> -       ret = dma_resv_wait_timeout_rcu(robj->tbo.base.resv, true, true, 30 * HZ);
>> +       ret = dma_resv_wait_timeout(robj->tbo.base.resv, true, true, 30 * HZ);
>>          if (ret == 0)
>>                  r = -EBUSY;
>>          else if (ret < 0)
>> diff --git a/drivers/gpu/drm/radeon/radeon_mn.c b/drivers/gpu/drm/radeon/radeon_mn.c
>> index e37c9a57a7c3..adb084e6ddbe 100644
>> --- a/drivers/gpu/drm/radeon/radeon_mn.c
>> +++ b/drivers/gpu/drm/radeon/radeon_mn.c
>> @@ -66,7 +66,7 @@ static bool radeon_mn_invalidate(struct mmu_interval_notifier *mn,
>>                  return true;
>>          }
>>
>> -       r = dma_resv_wait_timeout_rcu(bo->tbo.base.resv, true, false,
>> +       r = dma_resv_wait_timeout(bo->tbo.base.resv, true, false,
>>                                        MAX_SCHEDULE_TIMEOUT);
>>          if (r <= 0)
>>                  DRM_ERROR("(%ld) failed to wait for user bo\n", r);
>> diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
>> index c41ef0caa492..32004cf37549 100644
>> --- a/drivers/gpu/drm/ttm/ttm_bo.c
>> +++ b/drivers/gpu/drm/ttm/ttm_bo.c
>> @@ -296,7 +296,7 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo,
>>          struct dma_resv *resv = &bo->base._resv;
>>          int ret;
>>
>> -       if (dma_resv_test_signaled_rcu(resv, true))
>> +       if (dma_resv_test_signaled(resv, true))
>>                  ret = 0;
>>          else
>>                  ret = -EBUSY;
>> @@ -308,7 +308,7 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo,
>>                          dma_resv_unlock(bo->base.resv);
>>                  spin_unlock(&bo->bdev->lru_lock);
>>
>> -               lret = dma_resv_wait_timeout_rcu(resv, true, interruptible,
>> +               lret = dma_resv_wait_timeout(resv, true, interruptible,
>>                                                   30 * HZ);
>>
>>                  if (lret < 0)
>> @@ -411,7 +411,7 @@ static void ttm_bo_release(struct kref *kref)
>>                          /* Last resort, if we fail to allocate memory for the
>>                           * fences block for the BO to become idle
>>                           */
>> -                       dma_resv_wait_timeout_rcu(bo->base.resv, true, false,
>> +                       dma_resv_wait_timeout(bo->base.resv, true, false,
>>                                                    30 * HZ);
>>                  }
>>
>> @@ -422,7 +422,7 @@ static void ttm_bo_release(struct kref *kref)
>>                  ttm_mem_io_free(bdev, bo->resource);
>>          }
>>
>> -       if (!dma_resv_test_signaled_rcu(bo->base.resv, true) ||
>> +       if (!dma_resv_test_signaled(bo->base.resv, true) ||
>>              !dma_resv_trylock(bo->base.resv)) {
>>                  /* The BO is not idle, resurrect it for delayed destroy */
>>                  ttm_bo_flush_all_fences(bo);
>> @@ -1121,13 +1121,13 @@ int ttm_bo_wait(struct ttm_buffer_object *bo,
>>          long timeout = 15 * HZ;
>>
>>          if (no_wait) {
>> -               if (dma_resv_test_signaled_rcu(bo->base.resv, true))
>> +               if (dma_resv_test_signaled(bo->base.resv, true))
>>                          return 0;
>>                  else
>>                          return -EBUSY;
>>          }
>>
>> -       timeout = dma_resv_wait_timeout_rcu(bo->base.resv, true,
>> +       timeout = dma_resv_wait_timeout(bo->base.resv, true,
>>                                                        interruptible, timeout);
>>          if (timeout < 0)
>>                  return timeout;
>> diff --git a/drivers/gpu/drm/vgem/vgem_fence.c b/drivers/gpu/drm/vgem/vgem_fence.c
>> index 2902dc6e64fa..7f3125cf5358 100644
>> --- a/drivers/gpu/drm/vgem/vgem_fence.c
>> +++ b/drivers/gpu/drm/vgem/vgem_fence.c
>> @@ -151,7 +151,7 @@ int vgem_fence_attach_ioctl(struct drm_device *dev,
>>
>>          /* Check for a conflicting fence */
>>          resv = obj->resv;
>> -       if (!dma_resv_test_signaled_rcu(resv,
>> +       if (!dma_resv_test_signaled(resv,
>>                                                    arg->flags & VGEM_FENCE_WRITE)) {
>>                  ret = -EBUSY;
>>                  goto err_fence;
>> diff --git a/drivers/gpu/drm/virtio/virtgpu_ioctl.c b/drivers/gpu/drm/virtio/virtgpu_ioctl.c
>> index 669f2ee39515..190d9495dc0e 100644
>> --- a/drivers/gpu/drm/virtio/virtgpu_ioctl.c
>> +++ b/drivers/gpu/drm/virtio/virtgpu_ioctl.c
>> @@ -451,9 +451,9 @@ static int virtio_gpu_wait_ioctl(struct drm_device *dev, void *data,
>>                  return -ENOENT;
>>
>>          if (args->flags & VIRTGPU_WAIT_NOWAIT) {
>> -               ret = dma_resv_test_signaled_rcu(obj->resv, true);
>> +               ret = dma_resv_test_signaled(obj->resv, true);
>>          } else {
>> -               ret = dma_resv_wait_timeout_rcu(obj->resv, true, true,
>> +               ret = dma_resv_wait_timeout(obj->resv, true, true,
>>                                                  timeout);
>>          }
>>          if (ret == 0)
>> diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
>> index 176b6201ef2b..8faf1df027f3 100644
>> --- a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
>> +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
>> @@ -743,7 +743,7 @@ static int vmw_user_bo_synccpu_grab(struct vmw_user_buffer_object *user_bo,
>>          if (flags & drm_vmw_synccpu_allow_cs) {
>>                  long lret;
>>
>> -               lret = dma_resv_wait_timeout_rcu
>> +               lret = dma_resv_wait_timeout
>>                          (bo->base.resv, true, true,
>>                           nonblock ? 0 : MAX_SCHEDULE_TIMEOUT);
>>                  if (!lret)
>> diff --git a/include/linux/dma-resv.h b/include/linux/dma-resv.h
>> index f6b71712c029..22325dfa7744 100644
>> --- a/include/linux/dma-resv.h
>> +++ b/include/linux/dma-resv.h
>> @@ -268,19 +268,12 @@ void dma_resv_init(struct dma_resv *obj);
>>   void dma_resv_fini(struct dma_resv *obj);
>>   int dma_resv_reserve_shared(struct dma_resv *obj, unsigned int num_fences);
>>   void dma_resv_add_shared_fence(struct dma_resv *obj, struct dma_fence *fence);
>> -
>>   void dma_resv_add_excl_fence(struct dma_resv *obj, struct dma_fence *fence);
>> -
>> -int dma_resv_get_fences_rcu(struct dma_resv *obj,
>> -                           struct dma_fence **pfence_excl,
>> -                           unsigned *pshared_count,
>> -                           struct dma_fence ***pshared);
>> -
>> +int dma_resv_get_fences(struct dma_resv *obj, struct dma_fence **pfence_excl,
>> +                       unsigned *pshared_count, struct dma_fence ***pshared);
>>   int dma_resv_copy_fences(struct dma_resv *dst, struct dma_resv *src);
>> -
>> -long dma_resv_wait_timeout_rcu(struct dma_resv *obj, bool wait_all, bool intr,
>> -                              unsigned long timeout);
>> -
>> -bool dma_resv_test_signaled_rcu(struct dma_resv *obj, bool test_all);
>> +long dma_resv_wait_timeout(struct dma_resv *obj, bool wait_all, bool intr,
>> +                          unsigned long timeout);
>> +bool dma_resv_test_signaled(struct dma_resv *obj, bool test_all);
>>
>>   #endif /* _LINUX_RESERVATION_H */
>> --
>> 2.25.1
>>


^ permalink raw reply	[flat|nested] 26+ messages in thread

* Re: [PATCH 5/7] dma-buf: rename and cleanup dma_resv_get_list
  2021-06-06  8:53     ` Christian König
@ 2021-06-07 19:42       ` Jason Ekstrand
  0 siblings, 0 replies; 26+ messages in thread
From: Jason Ekstrand @ 2021-06-07 19:42 UTC (permalink / raw)
  To: Christian König; +Cc: Maling list - DRI developers

On Sun, Jun 6, 2021 at 3:53 AM Christian König
<ckoenig.leichtzumerken@gmail.com> wrote:
>
> Am 02.06.21 um 22:22 schrieb Jason Ekstrand:
> > On Wed, Jun 2, 2021 at 6:17 AM Christian König
> > <ckoenig.leichtzumerken@gmail.com> wrote:
> >> When the comment needs to state explicitly that this is doesn't get a reference
> >> to the object then the function is named rather badly.
> >>
> >> Rename the function and use it in even more places.
> >>
> >> Signed-off-by: Christian König <christian.koenig@amd.com>
> >> ---
> >>   drivers/dma-buf/dma-resv.c                    | 32 +++++++++----------
> >>   .../gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c  |  2 +-
> >>   drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c   |  2 +-
> >>   drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c      |  2 +-
> >>   drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c       |  2 +-
> >>   drivers/gpu/drm/etnaviv/etnaviv_gem.c         |  2 +-
> >>   drivers/gpu/drm/i915/gem/i915_gem_busy.c      |  2 +-
> >>   drivers/gpu/drm/msm/msm_gem.c                 |  4 +--
> >>   drivers/gpu/drm/nouveau/nouveau_fence.c       |  2 +-
> >>   drivers/gpu/drm/qxl/qxl_debugfs.c             |  2 +-
> >>   drivers/gpu/drm/radeon/radeon_sync.c          |  2 +-
> >>   drivers/gpu/drm/ttm/ttm_bo.c                  |  2 +-
> >>   include/linux/dma-resv.h                      | 25 +++++++--------
> >>   13 files changed, 39 insertions(+), 42 deletions(-)
> >>
> >> diff --git a/drivers/dma-buf/dma-resv.c b/drivers/dma-buf/dma-resv.c
> >> index 81b032b43457..b1a1a31dc009 100644
> >> --- a/drivers/dma-buf/dma-resv.c
> >> +++ b/drivers/dma-buf/dma-resv.c
> >> @@ -149,8 +149,7 @@ int dma_resv_reserve_shared(struct dma_resv *obj, unsigned int num_fences)
> >>
> >>          dma_resv_assert_held(obj);
> >>
> >> -       old = dma_resv_get_list(obj);
> >> -
> >> +       old = dma_resv_shared(obj);
> >>          if (old && old->shared_max) {
> >>                  if ((old->shared_count + num_fences) <= old->shared_max)
> >>                          return 0;
> >> @@ -217,12 +216,13 @@ EXPORT_SYMBOL(dma_resv_reserve_shared);
> >>    */
> >>   void dma_resv_reset_shared_max(struct dma_resv *obj)
> >>   {
> >> -       /* Test shared fence slot reservation */
> >> -       if (rcu_access_pointer(obj->fence)) {
> >> -               struct dma_resv_list *fence = dma_resv_get_list(obj);
> >> +       struct dma_resv_list *fences = dma_resv_shared(obj);
> >>
> >> -               fence->shared_max = fence->shared_count;
> >> -       }
> >> +       dma_resv_assert_held(obj);
> > Does it make sense to assert we hold the lock *before* we touch it
> > with something that requires that we do?  Maybe it doesn't matter?
>
> As far as I can see that shouldn't matter.
>
> >
> >> +
> >> +       /* Test shared fence slot reservation */
> >> +       if (fences)
> >> +               fences->shared_max = fences->shared_count;
> >>   }
> >>   #endif
> >>
> >> @@ -244,7 +244,7 @@ void dma_resv_add_shared_fence(struct dma_resv *obj, struct dma_fence *fence)
> >>
> >>          dma_resv_assert_held(obj);
> >>
> >> -       fobj = dma_resv_get_list(obj);
> >> +       fobj = dma_resv_shared(obj);
> >>          count = fobj->shared_count;
> >>
> >>          write_seqcount_begin(&obj->seq);
> >> @@ -287,7 +287,7 @@ void dma_resv_add_excl_fence(struct dma_resv *obj, struct dma_fence *fence)
> >>
> >>          dma_resv_assert_held(obj);
> >>
> >> -       old = dma_resv_get_list(obj);
> >> +       old = dma_resv_shared(obj);
> >>          if (old)
> >>                  i = old->shared_count;
> >>
> >> @@ -326,7 +326,7 @@ int dma_resv_copy_fences(struct dma_resv *dst, struct dma_resv *src)
> >>          dma_resv_assert_held(dst);
> >>
> >>          rcu_read_lock();
> >> -       src_list = rcu_dereference(src->fence);
> >> +       src_list = dma_resv_shared(src);
> >>
> >>   retry:
> >>          if (src_list) {
> >> @@ -339,7 +339,7 @@ int dma_resv_copy_fences(struct dma_resv *dst, struct dma_resv *src)
> >>                          return -ENOMEM;
> >>
> >>                  rcu_read_lock();
> >> -               src_list = rcu_dereference(src->fence);
> >> +               src_list = dma_resv_shared(src);
> >>                  if (!src_list || src_list->shared_count > shared_count) {
> >>                          kfree(dst_list);
> >>                          goto retry;
> >> @@ -357,7 +357,7 @@ int dma_resv_copy_fences(struct dma_resv *dst, struct dma_resv *src)
> >>
> >>                          if (!dma_fence_get_rcu(fence)) {
> >>                                  dma_resv_list_free(dst_list);
> >> -                               src_list = rcu_dereference(src->fence);
> >> +                               src_list = dma_resv_shared(src);
> >>                                  goto retry;
> >>                          }
> >>
> >> @@ -376,7 +376,7 @@ int dma_resv_copy_fences(struct dma_resv *dst, struct dma_resv *src)
> >>          new = dma_fence_get_rcu_safe(&src->fence_excl);
> >>          rcu_read_unlock();
> >>
> >> -       src_list = dma_resv_get_list(dst);
> >> +       src_list = dma_resv_shared(dst);
> >>          old = dma_resv_exclusive(dst);
> >>
> >>          write_seqcount_begin(&dst->seq);
> >> @@ -429,7 +429,7 @@ int dma_resv_get_fences_rcu(struct dma_resv *obj,
> >>                  if (fence_excl && !dma_fence_get_rcu(fence_excl))
> >>                          goto unlock;
> >>
> >> -               fobj = rcu_dereference(obj->fence);
> >> +               fobj = dma_resv_shared(obj);
> >>                  if (fobj)
> >>                          sz += sizeof(*shared) * fobj->shared_max;
> >>
> >> @@ -535,7 +535,7 @@ long dma_resv_wait_timeout_rcu(struct dma_resv *obj,
> >>          }
> >>
> >>          if (wait_all) {
> >> -               struct dma_resv_list *fobj = rcu_dereference(obj->fence);
> >> +               struct dma_resv_list *fobj = dma_resv_shared(obj);
> >>
> >>                  if (fobj)
> >>                          shared_count = fobj->shared_count;
> >> @@ -620,7 +620,7 @@ bool dma_resv_test_signaled_rcu(struct dma_resv *obj, bool test_all)
> >>          seq = read_seqcount_begin(&obj->seq);
> >>
> >>          if (test_all) {
> >> -               struct dma_resv_list *fobj = rcu_dereference(obj->fence);
> >> +               struct dma_resv_list *fobj = dma_resv_shared(obj);
> >>                  unsigned int i;
> >>
> >>                  if (fobj)
> >> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
> >> index d5e6519bdea1..e90495ca49fd 100644
> >> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
> >> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
> >> @@ -247,7 +247,7 @@ static int amdgpu_amdkfd_remove_eviction_fence(struct amdgpu_bo *bo,
> >>          if (!ef)
> >>                  return -EINVAL;
> >>
> >> -       old = dma_resv_get_list(resv);
> >> +       old = dma_resv_shared(resv);
> >>          if (!old)
> >>                  return 0;
> >>
> >> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
> >> index 6dd0ea6e9e24..3b13c8a38c4e 100644
> >> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
> >> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
> >> @@ -49,7 +49,7 @@ __dma_resv_make_exclusive(struct dma_resv *obj)
> >>          unsigned int count;
> >>          int r;
> >>
> >> -       if (!dma_resv_get_list(obj)) /* no shared fences to convert */
> >> +       if (!dma_resv_shared(obj)) /* no shared fences to convert */
> >>                  return 0;
> >>
> >>          r = dma_resv_get_fences_rcu(obj, NULL, &count, &fences);
> >> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
> >> index c84d5b843985..c50d9f92a0cd 100644
> >> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
> >> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
> >> @@ -213,7 +213,7 @@ int amdgpu_sync_resv(struct amdgpu_device *adev, struct amdgpu_sync *sync,
> >>          f = dma_resv_exclusive(resv);
> >>          r = amdgpu_sync_fence(sync, f);
> >>
> >> -       flist = dma_resv_get_list(resv);
> >> +       flist = dma_resv_shared(resv);
> >>          if (!flist || r)
> >>                  return r;
> >>
> >> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
> >> index 663aa7d2e2ea..ddb6ce7d48bc 100644
> >> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
> >> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
> >> @@ -1338,7 +1338,7 @@ static bool amdgpu_ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
> >>           * If true, then return false as any KFD process needs all its BOs to
> >>           * be resident to run successfully
> >>           */
> >> -       flist = dma_resv_get_list(bo->base.resv);
> >> +       flist = dma_resv_shared(bo->base.resv);
> >>          if (flist) {
> >>                  for (i = 0; i < flist->shared_count; ++i) {
> >>                          f = rcu_dereference_protected(flist->shared[i],
> >> diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.c b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
> >> index d4f54dea8ac1..4d43b8630f0e 100644
> >> --- a/drivers/gpu/drm/etnaviv/etnaviv_gem.c
> >> +++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
> >> @@ -461,7 +461,7 @@ static void etnaviv_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
> >>                          off, etnaviv_obj->vaddr, obj->size);
> >>
> >>          rcu_read_lock();
> >> -       fobj = rcu_dereference(robj->fence);
> >> +       fobj = dma_resv_shared(robj);
> >>          if (fobj) {
> >>                  unsigned int i, shared_count = fobj->shared_count;
> >>
> >> diff --git a/drivers/gpu/drm/i915/gem/i915_gem_busy.c b/drivers/gpu/drm/i915/gem/i915_gem_busy.c
> >> index 02312a0c3a36..3f94becac541 100644
> >> --- a/drivers/gpu/drm/i915/gem/i915_gem_busy.c
> >> +++ b/drivers/gpu/drm/i915/gem/i915_gem_busy.c
> >> @@ -116,7 +116,7 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
> >>          args->busy = busy_check_writer(dma_resv_exclusive(obj->base.resv));
> >>
> >>          /* Translate shared fences to READ set of engines */
> >> -       list = rcu_dereference(obj->base.resv->fence);
> >> +       list = dma_resv_shared(obj->base.resv);
> >>          if (list) {
> >>                  unsigned int shared_count = list->shared_count, i;
> >>
> >> diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
> >> index 54c1b53426d6..43af91df552e 100644
> >> --- a/drivers/gpu/drm/msm/msm_gem.c
> >> +++ b/drivers/gpu/drm/msm/msm_gem.c
> >> @@ -817,7 +817,7 @@ int msm_gem_sync_object(struct drm_gem_object *obj,
> >>          struct dma_fence *fence;
> >>          int i, ret;
> >>
> >> -       fobj = dma_resv_get_list(obj->resv);
> >> +       fobj = dma_resv_shared(obj->resv);
> >>          if (!fobj || (fobj->shared_count == 0)) {
> >>                  fence = dma_resv_exclusive(obj->resv);
> >>                  /* don't need to wait on our own fences, since ring is fifo */
> >> @@ -1025,7 +1025,7 @@ void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m,
> >>          }
> >>
> >>          rcu_read_lock();
> >> -       fobj = rcu_dereference(robj->fence);
> >> +       fobj = dma_resv_shared(robj);
> >>          if (fobj) {
> >>                  unsigned int i, shared_count = fobj->shared_count;
> >>
> >> diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c
> >> index a6cb35181aee..5ce441c655ea 100644
> >> --- a/drivers/gpu/drm/nouveau/nouveau_fence.c
> >> +++ b/drivers/gpu/drm/nouveau/nouveau_fence.c
> >> @@ -355,7 +355,7 @@ nouveau_fence_sync(struct nouveau_bo *nvbo, struct nouveau_channel *chan, bool e
> >>                          return ret;
> >>          }
> >>
> >> -       fobj = dma_resv_get_list(resv);
> >> +       fobj = dma_resv_shared(resv);
> >>          fence = dma_resv_exclusive(resv);
> >>
> >>          if (fence && (!exclusive || !fobj || !fobj->shared_count)) {
> >> diff --git a/drivers/gpu/drm/qxl/qxl_debugfs.c b/drivers/gpu/drm/qxl/qxl_debugfs.c
> >> index 183d15e2cf58..0acc70a6d3dd 100644
> >> --- a/drivers/gpu/drm/qxl/qxl_debugfs.c
> >> +++ b/drivers/gpu/drm/qxl/qxl_debugfs.c
> >> @@ -61,7 +61,7 @@ qxl_debugfs_buffers_info(struct seq_file *m, void *data)
> >>                  int rel;
> >>
> >>                  rcu_read_lock();
> >> -               fobj = rcu_dereference(bo->tbo.base.resv->fence);
> >> +               fobj = dma_resv_shared(bo->tbo.base.resv);
> >>                  rel = fobj ? fobj->shared_count : 0;
> >>                  rcu_read_unlock();
> >>
> >> diff --git a/drivers/gpu/drm/radeon/radeon_sync.c b/drivers/gpu/drm/radeon/radeon_sync.c
> >> index e476f90ef1c1..a9cdb88da173 100644
> >> --- a/drivers/gpu/drm/radeon/radeon_sync.c
> >> +++ b/drivers/gpu/drm/radeon/radeon_sync.c
> >> @@ -105,7 +105,7 @@ int radeon_sync_resv(struct radeon_device *rdev,
> >>          else if (f)
> >>                  r = dma_fence_wait(f, true);
> >>
> >> -       flist = dma_resv_get_list(resv);
> >> +       flist = dma_resv_shared(resv);
> >>          if (shared || !flist || r)
> >>                  return r;
> >>
> >> diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
> >> index 92361556bf0b..c41ef0caa492 100644
> >> --- a/drivers/gpu/drm/ttm/ttm_bo.c
> >> +++ b/drivers/gpu/drm/ttm/ttm_bo.c
> >> @@ -261,7 +261,7 @@ static void ttm_bo_flush_all_fences(struct ttm_buffer_object *bo)
> >>          int i;
> >>
> >>          rcu_read_lock();
> >> -       fobj = rcu_dereference(resv->fence);
> >> +       fobj = dma_resv_shared(resv);
> >>          fence = dma_resv_exclusive(resv);
> >>          if (fence && !fence->ops->signaled)
> >>                  dma_fence_enable_sw_signaling(fence);
> >> diff --git a/include/linux/dma-resv.h b/include/linux/dma-resv.h
> >> index 7549ec5eb35c..98ac66fecb71 100644
> >> --- a/include/linux/dma-resv.h
> >> +++ b/include/linux/dma-resv.h
> >> @@ -78,20 +78,6 @@ struct dma_resv {
> >>   #define dma_resv_held(obj) lockdep_is_held(&(obj)->lock.base)
> >>   #define dma_resv_assert_held(obj) lockdep_assert_held(&(obj)->lock.base)
> >>
> >> -/**
> >> - * dma_resv_get_list - get the reservation object's
> >> - * shared fence list, with update-side lock held
> >> - * @obj: the reservation object
> >> - *
> >> - * Returns the shared fence list.  Does NOT take references to
> >> - * the fence.  The obj->lock must be held.
> >> - */
> >> -static inline struct dma_resv_list *dma_resv_get_list(struct dma_resv *obj)
> >> -{
> >> -       return rcu_dereference_protected(obj->fence,
> >> -                                        dma_resv_held(obj));
> >> -}
> >> -
> >>   #ifdef CONFIG_DEBUG_MUTEXES
> >>   void dma_resv_reset_shared_max(struct dma_resv *obj);
> >>   #else
> >> @@ -267,6 +253,17 @@ dma_resv_get_excl_rcu(struct dma_resv *obj)
> >>          return fence;
> >>   }
> >>
> >> +/**
> >> + * dma_resv_shared - get the reservation object's shared fence list
> >> + * @obj: the reservation object
> >> + *
> >> + * Returns the shared fence list. The obj->lock or rcu read side must be held.
> >> + */
> >> +static inline struct dma_resv_list *dma_resv_shared(struct dma_resv *obj)
> > Maybe dma_resv_shared_list() just to be a little more clear what's
> > being returned?
>
> Ok, renamed this one and dma_resv_exclusive into dma_resv_excl_fence as
> well.

Cool

Reviewed-by: Jason Ekstrand <jason@jlekstrand.net>

then.  Any more left needing review?

--Jason

> Christian.
>
> >
> > --Jason
> >
> >> +{
> >> +       return rcu_dereference_check(obj->fence, dma_resv_held(obj));
> >> +}
> >> +
> >>   void dma_resv_init(struct dma_resv *obj);
> >>   void dma_resv_fini(struct dma_resv *obj);
> >>   int dma_resv_reserve_shared(struct dma_resv *obj, unsigned int num_fences);
> >> --
> >> 2.25.1
> >>
>

^ permalink raw reply	[flat|nested] 26+ messages in thread

end of thread, other threads:[~2021-06-07 19:43 UTC | newest]

Thread overview: 26+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2021-06-02 11:17 [PATCH 1/7] dma-buf: fix inconsistent debug print Christian König
2021-06-02 11:17 ` [PATCH 2/7] dma-buf: add SPDX header and fix style in dma-resv.c Christian König
2021-06-02 12:34   ` Daniel Vetter
2021-06-02 12:47     ` Christian König
2021-06-02 12:55       ` Daniel Vetter
2021-06-02 11:17 ` [PATCH 3/7] dma-buf: cleanup dma-resv shared fence debugging a bit Christian König
2021-06-02 12:41   ` Daniel Vetter
2021-06-02 11:17 ` [PATCH 4/7] dma-buf: rename and cleanup dma_resv_get_excl Christian König
2021-06-02 12:43   ` Daniel Vetter
2021-06-02 20:04     ` Jason Ekstrand
2021-06-02 12:46   ` Daniel Vetter
2021-06-02 11:17 ` [PATCH 5/7] dma-buf: rename and cleanup dma_resv_get_list Christian König
2021-06-02 12:46   ` Daniel Vetter
2021-06-02 20:22   ` Jason Ekstrand
2021-06-06  8:53     ` Christian König
2021-06-07 19:42       ` Jason Ekstrand
2021-06-02 11:17 ` [PATCH 6/7] dma-buf: rename dma_resv_get_excl_rcu to _unlocked Christian König
2021-06-02 12:47   ` Daniel Vetter
2021-06-02 20:25   ` Jason Ekstrand
2021-06-02 11:17 ` [PATCH 7/7] dma-buf: drop the _rcu postfix on function names Christian König
2021-06-02 12:49   ` Daniel Vetter
2021-06-02 20:34   ` Jason Ekstrand
2021-06-06  9:08     ` Christian König
2021-06-02 12:33 ` [PATCH 1/7] dma-buf: fix inconsistent debug print Daniel Vetter
2021-06-02 12:36   ` Christian König
2021-06-02 12:50     ` Daniel Vetter

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.