All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH] dma-buf: fix and rework dma_buf_poll v5
@ 2021-07-08 11:19 Christian König
  2021-07-08 11:41 ` Christian König
  2021-07-09 12:50   ` kernel test robot
  0 siblings, 2 replies; 7+ messages in thread
From: Christian König @ 2021-07-08 11:19 UTC (permalink / raw)
  To: dri-devel, roberto.sassu

Daniel pointed me towards this function and there are multiple obvious problems
in the implementation.

First of all the retry loop is not working as intended. In general the retry
makes only sense if you grab the reference first and then check the sequence
values.

Then we should always also wait for the exclusive fence.

It's also good practice to keep the reference around when installing callbacks
to fences you don't own.

And last the whole implementation was unnecessary complex and rather hard to
understand which could lead to probably unexpected behavior of the IOCTL.

Fix all this by reworking the implementation from scratch. Dropping the
whole RCU approach and taking the lock instead.

Only mildly tested and needs a thoughtful review of the code.

v2: fix the reference counting as well
v3: keep the excl fence handling as is for stable
v4: back to testing all fences, drop RCU
v5: handle in and out separately

Signed-off-by: Christian König <christian.koenig@amd.com>
CC: stable@vger.kernel.org
---
 drivers/dma-buf/dma-buf.c | 152 +++++++++++++++++---------------------
 include/linux/dma-buf.h   |   2 +-
 2 files changed, 68 insertions(+), 86 deletions(-)

diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c
index eadd1eaa2fb5..439e2379e1cb 100644
--- a/drivers/dma-buf/dma-buf.c
+++ b/drivers/dma-buf/dma-buf.c
@@ -72,7 +72,7 @@ static void dma_buf_release(struct dentry *dentry)
 	 * If you hit this BUG() it means someone dropped their ref to the
 	 * dma-buf while still having pending operation to the buffer.
 	 */
-	BUG_ON(dmabuf->cb_shared.active || dmabuf->cb_excl.active);
+	BUG_ON(dmabuf->cb_in.active || dmabuf->cb_out.active);
 
 	dmabuf->ops->release(dmabuf);
 
@@ -202,16 +202,57 @@ static void dma_buf_poll_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
 	wake_up_locked_poll(dcb->poll, dcb->active);
 	dcb->active = 0;
 	spin_unlock_irqrestore(&dcb->poll->lock, flags);
+	dma_fence_put(fence);
+}
+
+static bool dma_buf_poll_shared(struct dma_resv *resv,
+				struct dma_buf_poll_cb_t *dcb)
+{
+	struct dma_resv_list *fobj = dma_resv_get_list(resv);
+	struct dma_fence *fence;
+	int i, r;
+
+	if (!fobj)
+		return false;
+
+	for (i = 0; i < fobj->shared_count; ++i) {
+		fence = rcu_dereference_protected(fobj->shared[i],
+						  dma_resv_held(resv));
+		dma_fence_get(fence);
+		r = dma_fence_add_callback(fence, &dcb->cb, dma_buf_poll_cb);
+		if (!r)
+			return true;
+		dma_fence_put(fence);
+	}
+
+	return false;
+}
+
+static bool dma_buf_poll_excl(struct dma_resv *resv,
+			      struct dma_buf_poll_cb_t *dcb)
+{
+	struct dma_fence *fence = dma_resv_get_excl(resv);
+	int r;
+
+	if (!fence)
+		return false;
+
+	dma_fence_get(fence);
+	r = dma_fence_add_callback(fence, &dcb->cb, dma_buf_poll_cb);
+	if (!r)
+		return true;
+	dma_fence_put(fence);
+
+	return false;
 }
 
 static __poll_t dma_buf_poll(struct file *file, poll_table *poll)
 {
 	struct dma_buf *dmabuf;
 	struct dma_resv *resv;
-	struct dma_resv_list *fobj;
-	struct dma_fence *fence_excl;
+	unsigned shared_count;
 	__poll_t events;
-	unsigned shared_count, seq;
+	int r, i;
 
 	dmabuf = file->private_data;
 	if (!dmabuf || !dmabuf->resv)
@@ -225,101 +266,42 @@ static __poll_t dma_buf_poll(struct file *file, poll_table *poll)
 	if (!events)
 		return 0;
 
-retry:
-	seq = read_seqcount_begin(&resv->seq);
-	rcu_read_lock();
-
-	fobj = rcu_dereference(resv->fence);
-	if (fobj)
-		shared_count = fobj->shared_count;
-	else
-		shared_count = 0;
-	fence_excl = rcu_dereference(resv->fence_excl);
-	if (read_seqcount_retry(&resv->seq, seq)) {
-		rcu_read_unlock();
-		goto retry;
-	}
-
-	if (fence_excl && (!(events & EPOLLOUT) || shared_count == 0)) {
-		struct dma_buf_poll_cb_t *dcb = &dmabuf->cb_excl;
-		__poll_t pevents = EPOLLIN;
+	dma_resv_lock(resv, NULL);
 
-		if (shared_count == 0)
-			pevents |= EPOLLOUT;
+	if (events & EPOLLOUT) {
+		struct dma_buf_poll_cb_t *dcb = &dmabuf->cb_out;
 
+		/* Check that callback isn't busy */
 		spin_lock_irq(&dmabuf->poll.lock);
-		if (dcb->active) {
-			dcb->active |= pevents;
-			events &= ~pevents;
-		} else
-			dcb->active = pevents;
+		if (dcb->active)
+			events &= ~EPOLLOUT;
+		else
+			dcb->active = EPOLLOUT;
 		spin_unlock_irq(&dmabuf->poll.lock);
 
-		if (events & pevents) {
-			if (!dma_fence_get_rcu(fence_excl)) {
-				/* force a recheck */
-				events &= ~pevents;
-				dma_buf_poll_cb(NULL, &dcb->cb);
-			} else if (!dma_fence_add_callback(fence_excl, &dcb->cb,
-							   dma_buf_poll_cb)) {
-				events &= ~pevents;
-				dma_fence_put(fence_excl);
-			} else {
-				/*
-				 * No callback queued, wake up any additional
-				 * waiters.
-				 */
-				dma_fence_put(fence_excl);
-				dma_buf_poll_cb(NULL, &dcb->cb);
-			}
-		}
+		if (events & EPOLLOUT && !dma_buf_poll_shared(resv, dcb) &&
+		    !dma_buf_poll_excl(resv, dcb))
+			/* No callback queued, wake up any other waiters */
+			dma_buf_poll_cb(NULL, &dcb->cb);
 	}
 
-	if ((events & EPOLLOUT) && shared_count > 0) {
-		struct dma_buf_poll_cb_t *dcb = &dmabuf->cb_shared;
-		int i;
+	if (events & EPOLLIN) {
+		struct dma_buf_poll_cb_t *dcb = &dmabuf->cb_in;
 
-		/* Only queue a new callback if no event has fired yet */
+		/* Check that callback isn't busy */
 		spin_lock_irq(&dmabuf->poll.lock);
 		if (dcb->active)
-			events &= ~EPOLLOUT;
+			events &= ~EPOLLIN;
 		else
-			dcb->active = EPOLLOUT;
+			dcb->active = EPOLLIN;
 		spin_unlock_irq(&dmabuf->poll.lock);
 
-		if (!(events & EPOLLOUT))
-			goto out;
-
-		for (i = 0; i < shared_count; ++i) {
-			struct dma_fence *fence = rcu_dereference(fobj->shared[i]);
-
-			if (!dma_fence_get_rcu(fence)) {
-				/*
-				 * fence refcount dropped to zero, this means
-				 * that fobj has been freed
-				 *
-				 * call dma_buf_poll_cb and force a recheck!
-				 */
-				events &= ~EPOLLOUT;
-				dma_buf_poll_cb(NULL, &dcb->cb);
-				break;
-			}
-			if (!dma_fence_add_callback(fence, &dcb->cb,
-						    dma_buf_poll_cb)) {
-				dma_fence_put(fence);
-				events &= ~EPOLLOUT;
-				break;
-			}
-			dma_fence_put(fence);
-		}
-
-		/* No callback queued, wake up any additional waiters. */
-		if (i == shared_count)
+		if (events & EPOLLIN && !dma_buf_poll_excl(resv, dcb))
+			/* No callback queued, wake up any other waiters */
 			dma_buf_poll_cb(NULL, &dcb->cb);
 	}
 
-out:
-	rcu_read_unlock();
+	dma_resv_unlock(resv);
 	return events;
 }
 
@@ -562,8 +544,8 @@ struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info)
 	dmabuf->owner = exp_info->owner;
 	spin_lock_init(&dmabuf->name_lock);
 	init_waitqueue_head(&dmabuf->poll);
-	dmabuf->cb_excl.poll = dmabuf->cb_shared.poll = &dmabuf->poll;
-	dmabuf->cb_excl.active = dmabuf->cb_shared.active = 0;
+	dmabuf->cb_in.poll = dmabuf->cb_out.poll = &dmabuf->poll;
+	dmabuf->cb_in.active = dmabuf->cb_out.active = 0;
 
 	if (!resv) {
 		resv = (struct dma_resv *)&dmabuf[1];
diff --git a/include/linux/dma-buf.h b/include/linux/dma-buf.h
index efdc56b9d95f..7e747ad54c81 100644
--- a/include/linux/dma-buf.h
+++ b/include/linux/dma-buf.h
@@ -329,7 +329,7 @@ struct dma_buf {
 		wait_queue_head_t *poll;
 
 		__poll_t active;
-	} cb_excl, cb_shared;
+	} cb_in, cb_out;
 };
 
 /**
-- 
2.25.1


^ permalink raw reply related	[flat|nested] 7+ messages in thread
* Re: [PATCH] dma-buf: fix and rework dma_buf_poll v5
@ 2021-07-02 16:00 kernel test robot
  0 siblings, 0 replies; 7+ messages in thread
From: kernel test robot @ 2021-07-02 16:00 UTC (permalink / raw)
  To: kbuild

[-- Attachment #1: Type: text/plain, Size: 6151 bytes --]

CC: kbuild-all(a)lists.01.org
In-Reply-To: <20210702103143.16824-1-christian.koenig@amd.com>
References: <20210702103143.16824-1-christian.koenig@amd.com>
TO: "Christian König" <ckoenig.leichtzumerken@gmail.com>
TO: dri-devel(a)lists.freedesktop.org
TO: daniel.vetter(a)ffwll.ch

Hi "Christian,

I love your patch! Perhaps something to improve:

[auto build test WARNING on tegra-drm/drm/tegra/for-next]
[also build test WARNING on v5.13]
[cannot apply to linus/master next-20210701]
[If your patch is applied to the wrong git tree, kindly drop us a note.
And when submitting patch, we suggest to use '--base' as documented in
https://git-scm.com/docs/git-format-patch]

url:    https://github.com/0day-ci/linux/commits/Christian-K-nig/dma-buf-fix-and-rework-dma_buf_poll-v5/20210702-183256
base:   git://anongit.freedesktop.org/tegra/linux.git drm/tegra/for-next
:::::: branch date: 5 hours ago
:::::: commit date: 5 hours ago
compiler: arm-linux-gnueabi-gcc (GCC) 9.3.0

If you fix the issue, kindly add following tag as appropriate
Reported-by: kernel test robot <lkp@intel.com>


cppcheck possible warnings: (new ones prefixed by >>, may not real problems)

>> drivers/dma-buf/dma-buf.c:253:11: warning: Unused variable: shared_count [unusedVariable]
    unsigned shared_count;
             ^
>> drivers/dma-buf/dma-buf.c:255:6: warning: Unused variable: r [unusedVariable]
    int r, i;
        ^
>> drivers/dma-buf/dma-buf.c:255:9: warning: Unused variable: i [unusedVariable]
    int r, i;
           ^

vim +253 drivers/dma-buf/dma-buf.c

9b495a5887994a Maarten Lankhorst 2014-07-01  248  
afc9a42b7464f7 Al Viro           2017-07-03  249  static __poll_t dma_buf_poll(struct file *file, poll_table *poll)
9b495a5887994a Maarten Lankhorst 2014-07-01  250  {
9b495a5887994a Maarten Lankhorst 2014-07-01  251  	struct dma_buf *dmabuf;
52791eeec1d9f4 Christian König   2019-08-11  252  	struct dma_resv *resv;
5bb0f0637b179f Christian König   2021-07-02 @253  	unsigned shared_count;
01699437758328 Al Viro           2017-07-03  254  	__poll_t events;
5bb0f0637b179f Christian König   2021-07-02 @255  	int r, i;
9b495a5887994a Maarten Lankhorst 2014-07-01  256  
9b495a5887994a Maarten Lankhorst 2014-07-01  257  	dmabuf = file->private_data;
9b495a5887994a Maarten Lankhorst 2014-07-01  258  	if (!dmabuf || !dmabuf->resv)
a9a08845e9acbd Linus Torvalds    2018-02-11  259  		return EPOLLERR;
9b495a5887994a Maarten Lankhorst 2014-07-01  260  
9b495a5887994a Maarten Lankhorst 2014-07-01  261  	resv = dmabuf->resv;
9b495a5887994a Maarten Lankhorst 2014-07-01  262  
9b495a5887994a Maarten Lankhorst 2014-07-01  263  	poll_wait(file, &dmabuf->poll, poll);
9b495a5887994a Maarten Lankhorst 2014-07-01  264  
a9a08845e9acbd Linus Torvalds    2018-02-11  265  	events = poll_requested_events(poll) & (EPOLLIN | EPOLLOUT);
9b495a5887994a Maarten Lankhorst 2014-07-01  266  	if (!events)
9b495a5887994a Maarten Lankhorst 2014-07-01  267  		return 0;
9b495a5887994a Maarten Lankhorst 2014-07-01  268  
5bb0f0637b179f Christian König   2021-07-02  269  	dma_resv_lock(resv, NULL);
9b495a5887994a Maarten Lankhorst 2014-07-01  270  
5bb0f0637b179f Christian König   2021-07-02  271  	if (events & EPOLLOUT) {
5bb0f0637b179f Christian König   2021-07-02  272  		struct dma_buf_poll_cb_t *dcb = &dmabuf->cb_out;
9b495a5887994a Maarten Lankhorst 2014-07-01  273  
5bb0f0637b179f Christian König   2021-07-02  274  		/* Check that callback isn't busy */
9b495a5887994a Maarten Lankhorst 2014-07-01  275  		spin_lock_irq(&dmabuf->poll.lock);
5bb0f0637b179f Christian König   2021-07-02  276  		if (dcb->active)
5bb0f0637b179f Christian König   2021-07-02  277  			events &= ~EPOLLOUT;
5bb0f0637b179f Christian König   2021-07-02  278  		else
5bb0f0637b179f Christian König   2021-07-02  279  			dcb->active = EPOLLOUT;
9b495a5887994a Maarten Lankhorst 2014-07-01  280  		spin_unlock_irq(&dmabuf->poll.lock);
9b495a5887994a Maarten Lankhorst 2014-07-01  281  
5bb0f0637b179f Christian König   2021-07-02  282  		if (events & EPOLLOUT && !dma_buf_poll_shared(resv, dcb) &&
5bb0f0637b179f Christian König   2021-07-02  283  		    !dma_buf_poll_excl(resv, dcb))
5bb0f0637b179f Christian König   2021-07-02  284  			/* No callback queued, wake up any other waiters */
3c3b177a9369b2 Maarten Lankhorst 2014-07-01  285  			dma_buf_poll_cb(NULL, &dcb->cb);
04a5faa8cbe5a8 Maarten Lankhorst 2014-07-01  286  	}
9b495a5887994a Maarten Lankhorst 2014-07-01  287  
5bb0f0637b179f Christian König   2021-07-02  288  	if (events & EPOLLIN) {
5bb0f0637b179f Christian König   2021-07-02  289  		struct dma_buf_poll_cb_t *dcb = &dmabuf->cb_in;
9b495a5887994a Maarten Lankhorst 2014-07-01  290  
5bb0f0637b179f Christian König   2021-07-02  291  		/* Check that callback isn't busy */
9b495a5887994a Maarten Lankhorst 2014-07-01  292  		spin_lock_irq(&dmabuf->poll.lock);
9b495a5887994a Maarten Lankhorst 2014-07-01  293  		if (dcb->active)
5bb0f0637b179f Christian König   2021-07-02  294  			events &= ~EPOLLIN;
9b495a5887994a Maarten Lankhorst 2014-07-01  295  		else
5bb0f0637b179f Christian König   2021-07-02  296  			dcb->active = EPOLLIN;
9b495a5887994a Maarten Lankhorst 2014-07-01  297  		spin_unlock_irq(&dmabuf->poll.lock);
9b495a5887994a Maarten Lankhorst 2014-07-01  298  
5bb0f0637b179f Christian König   2021-07-02  299  		if (events & EPOLLIN && !dma_buf_poll_excl(resv, dcb))
5bb0f0637b179f Christian König   2021-07-02  300  			/* No callback queued, wake up any other waiters */
9b495a5887994a Maarten Lankhorst 2014-07-01  301  			dma_buf_poll_cb(NULL, &dcb->cb);
9b495a5887994a Maarten Lankhorst 2014-07-01  302  	}
9b495a5887994a Maarten Lankhorst 2014-07-01  303  
5bb0f0637b179f Christian König   2021-07-02  304  	dma_resv_unlock(resv);
9b495a5887994a Maarten Lankhorst 2014-07-01  305  	return events;
9b495a5887994a Maarten Lankhorst 2014-07-01  306  }
9b495a5887994a Maarten Lankhorst 2014-07-01  307  

---
0-DAY CI Kernel Test Service, Intel Corporation
https://lists.01.org/hyperkitty/list/kbuild-all(a)lists.01.org

^ permalink raw reply	[flat|nested] 7+ messages in thread
* [PATCH] dma-buf: fix and rework dma_buf_poll v5
@ 2021-07-02 10:31 Christian König
  2021-07-02 19:45 ` Daniel Vetter
  0 siblings, 1 reply; 7+ messages in thread
From: Christian König @ 2021-07-02 10:31 UTC (permalink / raw)
  To: dri-devel, daniel.vetter

Daniel pointed me towards this function and there are multiple obvious problems
in the implementation.

First of all the retry loop is not working as intended. In general the retry
makes only sense if you grab the reference first and then check the sequence
values.

Then we should always also wait for the exclusive fence.

It's also good practice to keep the reference around when installing callbacks
to fences you don't own.

And last the whole implementation was unnecessary complex and rather hard to
understand which could lead to probably unexpected behavior of the IOCTL.

Fix all this by reworking the implementation from scratch. Dropping the
whole RCU approach and taking the lock instead.

Only mildly tested and needs a thoughtful review of the code.

v2: fix the reference counting as well
v3: keep the excl fence handling as is for stable
v4: back to testing all fences, drop RCU
v5: handle in and out separately

Signed-off-by: Christian König <christian.koenig@amd.com>
CC: stable@vger.kernel.org
---
 drivers/dma-buf/dma-buf.c | 152 +++++++++++++++++---------------------
 include/linux/dma-buf.h   |   2 +-
 2 files changed, 68 insertions(+), 86 deletions(-)

diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c
index eadd1eaa2fb5..439e2379e1cb 100644
--- a/drivers/dma-buf/dma-buf.c
+++ b/drivers/dma-buf/dma-buf.c
@@ -72,7 +72,7 @@ static void dma_buf_release(struct dentry *dentry)
 	 * If you hit this BUG() it means someone dropped their ref to the
 	 * dma-buf while still having pending operation to the buffer.
 	 */
-	BUG_ON(dmabuf->cb_shared.active || dmabuf->cb_excl.active);
+	BUG_ON(dmabuf->cb_in.active || dmabuf->cb_out.active);
 
 	dmabuf->ops->release(dmabuf);
 
@@ -202,16 +202,57 @@ static void dma_buf_poll_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
 	wake_up_locked_poll(dcb->poll, dcb->active);
 	dcb->active = 0;
 	spin_unlock_irqrestore(&dcb->poll->lock, flags);
+	dma_fence_put(fence);
+}
+
+static bool dma_buf_poll_shared(struct dma_resv *resv,
+				struct dma_buf_poll_cb_t *dcb)
+{
+	struct dma_resv_list *fobj = dma_resv_get_list(resv);
+	struct dma_fence *fence;
+	int i, r;
+
+	if (!fobj)
+		return false;
+
+	for (i = 0; i < fobj->shared_count; ++i) {
+		fence = rcu_dereference_protected(fobj->shared[i],
+						  dma_resv_held(resv));
+		dma_fence_get(fence);
+		r = dma_fence_add_callback(fence, &dcb->cb, dma_buf_poll_cb);
+		if (!r)
+			return true;
+		dma_fence_put(fence);
+	}
+
+	return false;
+}
+
+static bool dma_buf_poll_excl(struct dma_resv *resv,
+			      struct dma_buf_poll_cb_t *dcb)
+{
+	struct dma_fence *fence = dma_resv_get_excl(resv);
+	int r;
+
+	if (!fence)
+		return false;
+
+	dma_fence_get(fence);
+	r = dma_fence_add_callback(fence, &dcb->cb, dma_buf_poll_cb);
+	if (!r)
+		return true;
+	dma_fence_put(fence);
+
+	return false;
 }
 
 static __poll_t dma_buf_poll(struct file *file, poll_table *poll)
 {
 	struct dma_buf *dmabuf;
 	struct dma_resv *resv;
-	struct dma_resv_list *fobj;
-	struct dma_fence *fence_excl;
+	unsigned shared_count;
 	__poll_t events;
-	unsigned shared_count, seq;
+	int r, i;
 
 	dmabuf = file->private_data;
 	if (!dmabuf || !dmabuf->resv)
@@ -225,101 +266,42 @@ static __poll_t dma_buf_poll(struct file *file, poll_table *poll)
 	if (!events)
 		return 0;
 
-retry:
-	seq = read_seqcount_begin(&resv->seq);
-	rcu_read_lock();
-
-	fobj = rcu_dereference(resv->fence);
-	if (fobj)
-		shared_count = fobj->shared_count;
-	else
-		shared_count = 0;
-	fence_excl = rcu_dereference(resv->fence_excl);
-	if (read_seqcount_retry(&resv->seq, seq)) {
-		rcu_read_unlock();
-		goto retry;
-	}
-
-	if (fence_excl && (!(events & EPOLLOUT) || shared_count == 0)) {
-		struct dma_buf_poll_cb_t *dcb = &dmabuf->cb_excl;
-		__poll_t pevents = EPOLLIN;
+	dma_resv_lock(resv, NULL);
 
-		if (shared_count == 0)
-			pevents |= EPOLLOUT;
+	if (events & EPOLLOUT) {
+		struct dma_buf_poll_cb_t *dcb = &dmabuf->cb_out;
 
+		/* Check that callback isn't busy */
 		spin_lock_irq(&dmabuf->poll.lock);
-		if (dcb->active) {
-			dcb->active |= pevents;
-			events &= ~pevents;
-		} else
-			dcb->active = pevents;
+		if (dcb->active)
+			events &= ~EPOLLOUT;
+		else
+			dcb->active = EPOLLOUT;
 		spin_unlock_irq(&dmabuf->poll.lock);
 
-		if (events & pevents) {
-			if (!dma_fence_get_rcu(fence_excl)) {
-				/* force a recheck */
-				events &= ~pevents;
-				dma_buf_poll_cb(NULL, &dcb->cb);
-			} else if (!dma_fence_add_callback(fence_excl, &dcb->cb,
-							   dma_buf_poll_cb)) {
-				events &= ~pevents;
-				dma_fence_put(fence_excl);
-			} else {
-				/*
-				 * No callback queued, wake up any additional
-				 * waiters.
-				 */
-				dma_fence_put(fence_excl);
-				dma_buf_poll_cb(NULL, &dcb->cb);
-			}
-		}
+		if (events & EPOLLOUT && !dma_buf_poll_shared(resv, dcb) &&
+		    !dma_buf_poll_excl(resv, dcb))
+			/* No callback queued, wake up any other waiters */
+			dma_buf_poll_cb(NULL, &dcb->cb);
 	}
 
-	if ((events & EPOLLOUT) && shared_count > 0) {
-		struct dma_buf_poll_cb_t *dcb = &dmabuf->cb_shared;
-		int i;
+	if (events & EPOLLIN) {
+		struct dma_buf_poll_cb_t *dcb = &dmabuf->cb_in;
 
-		/* Only queue a new callback if no event has fired yet */
+		/* Check that callback isn't busy */
 		spin_lock_irq(&dmabuf->poll.lock);
 		if (dcb->active)
-			events &= ~EPOLLOUT;
+			events &= ~EPOLLIN;
 		else
-			dcb->active = EPOLLOUT;
+			dcb->active = EPOLLIN;
 		spin_unlock_irq(&dmabuf->poll.lock);
 
-		if (!(events & EPOLLOUT))
-			goto out;
-
-		for (i = 0; i < shared_count; ++i) {
-			struct dma_fence *fence = rcu_dereference(fobj->shared[i]);
-
-			if (!dma_fence_get_rcu(fence)) {
-				/*
-				 * fence refcount dropped to zero, this means
-				 * that fobj has been freed
-				 *
-				 * call dma_buf_poll_cb and force a recheck!
-				 */
-				events &= ~EPOLLOUT;
-				dma_buf_poll_cb(NULL, &dcb->cb);
-				break;
-			}
-			if (!dma_fence_add_callback(fence, &dcb->cb,
-						    dma_buf_poll_cb)) {
-				dma_fence_put(fence);
-				events &= ~EPOLLOUT;
-				break;
-			}
-			dma_fence_put(fence);
-		}
-
-		/* No callback queued, wake up any additional waiters. */
-		if (i == shared_count)
+		if (events & EPOLLIN && !dma_buf_poll_excl(resv, dcb))
+			/* No callback queued, wake up any other waiters */
 			dma_buf_poll_cb(NULL, &dcb->cb);
 	}
 
-out:
-	rcu_read_unlock();
+	dma_resv_unlock(resv);
 	return events;
 }
 
@@ -562,8 +544,8 @@ struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info)
 	dmabuf->owner = exp_info->owner;
 	spin_lock_init(&dmabuf->name_lock);
 	init_waitqueue_head(&dmabuf->poll);
-	dmabuf->cb_excl.poll = dmabuf->cb_shared.poll = &dmabuf->poll;
-	dmabuf->cb_excl.active = dmabuf->cb_shared.active = 0;
+	dmabuf->cb_in.poll = dmabuf->cb_out.poll = &dmabuf->poll;
+	dmabuf->cb_in.active = dmabuf->cb_out.active = 0;
 
 	if (!resv) {
 		resv = (struct dma_resv *)&dmabuf[1];
diff --git a/include/linux/dma-buf.h b/include/linux/dma-buf.h
index efdc56b9d95f..7e747ad54c81 100644
--- a/include/linux/dma-buf.h
+++ b/include/linux/dma-buf.h
@@ -329,7 +329,7 @@ struct dma_buf {
 		wait_queue_head_t *poll;
 
 		__poll_t active;
-	} cb_excl, cb_shared;
+	} cb_in, cb_out;
 };
 
 /**
-- 
2.25.1


^ permalink raw reply related	[flat|nested] 7+ messages in thread

end of thread, other threads:[~2021-07-09 12:50 UTC | newest]

Thread overview: 7+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2021-07-08 11:19 [PATCH] dma-buf: fix and rework dma_buf_poll v5 Christian König
2021-07-08 11:41 ` Christian König
2021-07-09 12:50 ` kernel test robot
2021-07-09 12:50   ` kernel test robot
  -- strict thread matches above, loose matches on Subject: below --
2021-07-02 16:00 kernel test robot
2021-07-02 10:31 Christian König
2021-07-02 19:45 ` Daniel Vetter

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.