All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH] virtiofsd: vu_dispatch locking should never fail
@ 2021-01-29 15:53 Greg Kurz
  2021-02-03 14:57 ` Stefan Hajnoczi
  2021-02-03 15:59 ` Vivek Goyal
  0 siblings, 2 replies; 7+ messages in thread
From: Greg Kurz @ 2021-01-29 15:53 UTC (permalink / raw)
  To: qemu-devel
  Cc: Stefan Hajnoczi, Dr. David Alan Gilbert, Vivek Goyal, Greg Kurz

pthread_rwlock_rdlock() and pthread_rwlock_wrlock() can fail if a
deadlock condition is detected or the current thread already owns
the lock. They can also fail, like pthread_rwlock_unlock(), if the
mutex wasn't properly initialized. None of these are ever expected
to happen with fv_VuDev::vu_dispatch_rwlock.

Some users already check the return value and assert, some others
don't. Introduce rdlock/wrlock/unlock wrappers that just do the
former and use them everywhere.

Signed-off-by: Greg Kurz <groug@kaod.org>
---
 tools/virtiofsd/fuse_virtio.c | 42 +++++++++++++++++++++++------------
 1 file changed, 28 insertions(+), 14 deletions(-)

diff --git a/tools/virtiofsd/fuse_virtio.c b/tools/virtiofsd/fuse_virtio.c
index ddcefee4272f..7ea269c4b65d 100644
--- a/tools/virtiofsd/fuse_virtio.c
+++ b/tools/virtiofsd/fuse_virtio.c
@@ -187,6 +187,24 @@ static void copy_iov(struct iovec *src_iov, int src_count,
     }
 }
 
+/*
+ * pthread_rwlock_rdlock() and pthread_rwlock_wrlock can fail if
+ * a deadlock condition is detected or the current thread already
+ * owns the lock. They can also fail, like pthread_rwlock_unlock(),
+ * if the mutex wasn't properly initialized. None of these are ever
+ * expected to happen.
+ */
+#define VU_DISPATCH_LOCK_OP(op)                              \
+static inline void vu_dispatch_##op(struct fv_VuDev *vud)    \
+{                                                            \
+    int ret = pthread_rwlock_##op(&vud->vu_dispatch_rwlock); \
+    assert(ret == 0);                                        \
+}
+
+VU_DISPATCH_LOCK_OP(rdlock);
+VU_DISPATCH_LOCK_OP(wrlock);
+VU_DISPATCH_LOCK_OP(unlock);
+
 /*
  * Called back by ll whenever it wants to send a reply/message back
  * The 1st element of the iov starts with the fuse_out_header
@@ -240,12 +258,12 @@ int virtio_send_msg(struct fuse_session *se, struct fuse_chan *ch,
 
     copy_iov(iov, count, in_sg, in_num, tosend_len);
 
-    pthread_rwlock_rdlock(&qi->virtio_dev->vu_dispatch_rwlock);
+    vu_dispatch_rdlock(qi->virtio_dev);
     pthread_mutex_lock(&qi->vq_lock);
     vu_queue_push(dev, q, elem, tosend_len);
     vu_queue_notify(dev, q);
     pthread_mutex_unlock(&qi->vq_lock);
-    pthread_rwlock_unlock(&qi->virtio_dev->vu_dispatch_rwlock);
+    vu_dispatch_unlock(qi->virtio_dev);
 
     req->reply_sent = true;
 
@@ -403,12 +421,12 @@ int virtio_send_data_iov(struct fuse_session *se, struct fuse_chan *ch,
 
     ret = 0;
 
-    pthread_rwlock_rdlock(&qi->virtio_dev->vu_dispatch_rwlock);
+    vu_dispatch_rdlock(qi->virtio_dev);
     pthread_mutex_lock(&qi->vq_lock);
     vu_queue_push(dev, q, elem, tosend_len);
     vu_queue_notify(dev, q);
     pthread_mutex_unlock(&qi->vq_lock);
-    pthread_rwlock_unlock(&qi->virtio_dev->vu_dispatch_rwlock);
+    vu_dispatch_unlock(qi->virtio_dev);
 
 err:
     if (ret == 0) {
@@ -558,12 +576,12 @@ out:
         fuse_log(FUSE_LOG_DEBUG, "%s: elem %d no reply sent\n", __func__,
                  elem->index);
 
-        pthread_rwlock_rdlock(&qi->virtio_dev->vu_dispatch_rwlock);
+        vu_dispatch_rdlock(qi->virtio_dev);
         pthread_mutex_lock(&qi->vq_lock);
         vu_queue_push(dev, q, elem, 0);
         vu_queue_notify(dev, q);
         pthread_mutex_unlock(&qi->vq_lock);
-        pthread_rwlock_unlock(&qi->virtio_dev->vu_dispatch_rwlock);
+        vu_dispatch_unlock(qi->virtio_dev);
     }
 
     pthread_mutex_destroy(&req->ch.lock);
@@ -596,7 +614,6 @@ static void *fv_queue_thread(void *opaque)
              qi->qidx, qi->kick_fd);
     while (1) {
         struct pollfd pf[2];
-        int ret;
 
         pf[0].fd = qi->kick_fd;
         pf[0].events = POLLIN;
@@ -645,8 +662,7 @@ static void *fv_queue_thread(void *opaque)
             break;
         }
         /* Mutual exclusion with virtio_loop() */
-        ret = pthread_rwlock_rdlock(&qi->virtio_dev->vu_dispatch_rwlock);
-        assert(ret == 0); /* there is no possible error case */
+        vu_dispatch_wrlock(qi->virtio_dev);
         pthread_mutex_lock(&qi->vq_lock);
         /* out is from guest, in is too guest */
         unsigned int in_bytes, out_bytes;
@@ -672,7 +688,7 @@ static void *fv_queue_thread(void *opaque)
         }
 
         pthread_mutex_unlock(&qi->vq_lock);
-        pthread_rwlock_unlock(&qi->virtio_dev->vu_dispatch_rwlock);
+        vu_dispatch_unlock(qi->virtio_dev);
 
         /* Process all the requests. */
         if (!se->thread_pool_size && req_list != NULL) {
@@ -799,7 +815,6 @@ int virtio_loop(struct fuse_session *se)
     while (!fuse_session_exited(se)) {
         struct pollfd pf[1];
         bool ok;
-        int ret;
         pf[0].fd = se->vu_socketfd;
         pf[0].events = POLLIN;
         pf[0].revents = 0;
@@ -825,12 +840,11 @@ int virtio_loop(struct fuse_session *se)
         assert(pf[0].revents & POLLIN);
         fuse_log(FUSE_LOG_DEBUG, "%s: Got VU event\n", __func__);
         /* Mutual exclusion with fv_queue_thread() */
-        ret = pthread_rwlock_wrlock(&se->virtio_dev->vu_dispatch_rwlock);
-        assert(ret == 0); /* there is no possible error case */
+        vu_dispatch_wrlock(se->virtio_dev);
 
         ok = vu_dispatch(&se->virtio_dev->dev);
 
-        pthread_rwlock_unlock(&se->virtio_dev->vu_dispatch_rwlock);
+        vu_dispatch_unlock(se->virtio_dev);
 
         if (!ok) {
             fuse_log(FUSE_LOG_ERR, "%s: vu_dispatch failed\n", __func__);
-- 
2.26.2



^ permalink raw reply related	[flat|nested] 7+ messages in thread

* Re: [PATCH] virtiofsd: vu_dispatch locking should never fail
  2021-01-29 15:53 [PATCH] virtiofsd: vu_dispatch locking should never fail Greg Kurz
@ 2021-02-03 14:57 ` Stefan Hajnoczi
  2021-02-03 15:35   ` Greg Kurz
  2021-02-03 15:59 ` Vivek Goyal
  1 sibling, 1 reply; 7+ messages in thread
From: Stefan Hajnoczi @ 2021-02-03 14:57 UTC (permalink / raw)
  To: Greg Kurz; +Cc: qemu-devel, Vivek Goyal, Dr. David Alan Gilbert

[-- Attachment #1: Type: text/plain, Size: 1337 bytes --]

On Fri, Jan 29, 2021 at 04:53:12PM +0100, Greg Kurz wrote:
> pthread_rwlock_rdlock() and pthread_rwlock_wrlock() can fail if a
> deadlock condition is detected or the current thread already owns
> the lock. They can also fail, like pthread_rwlock_unlock(), if the
> mutex wasn't properly initialized. None of these are ever expected
> to happen with fv_VuDev::vu_dispatch_rwlock.
> 
> Some users already check the return value and assert, some others
> don't. Introduce rdlock/wrlock/unlock wrappers that just do the
> former and use them everywhere.
> 
> Signed-off-by: Greg Kurz <groug@kaod.org>

What is the purpose of this commit:
1. Code cleanup/consistency?
2. Helps debug an issue that doesn't occur in production but you hit
   during development?
3. Needed to diagnose a real-world issue? How do you reproduce it?

I wanted to check to make sure I'm not missing an issue you found with
production workloads.

> @@ -645,8 +662,7 @@ static void *fv_queue_thread(void *opaque)
>              break;
>          }
>          /* Mutual exclusion with virtio_loop() */
> -        ret = pthread_rwlock_rdlock(&qi->virtio_dev->vu_dispatch_rwlock);
> -        assert(ret == 0); /* there is no possible error case */
> +        vu_dispatch_wrlock(qi->virtio_dev);

s/vu_dispatch_wrlock/vu_dispatch_rdlock/ ?

[-- Attachment #2: signature.asc --]
[-- Type: application/pgp-signature, Size: 488 bytes --]

^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [PATCH] virtiofsd: vu_dispatch locking should never fail
  2021-02-03 14:57 ` Stefan Hajnoczi
@ 2021-02-03 15:35   ` Greg Kurz
  0 siblings, 0 replies; 7+ messages in thread
From: Greg Kurz @ 2021-02-03 15:35 UTC (permalink / raw)
  To: Stefan Hajnoczi; +Cc: qemu-devel, Vivek Goyal, Dr. David Alan Gilbert

[-- Attachment #1: Type: text/plain, Size: 1645 bytes --]

On Wed, 3 Feb 2021 14:57:23 +0000
Stefan Hajnoczi <stefanha@redhat.com> wrote:

> On Fri, Jan 29, 2021 at 04:53:12PM +0100, Greg Kurz wrote:
> > pthread_rwlock_rdlock() and pthread_rwlock_wrlock() can fail if a
> > deadlock condition is detected or the current thread already owns
> > the lock. They can also fail, like pthread_rwlock_unlock(), if the
> > mutex wasn't properly initialized. None of these are ever expected
> > to happen with fv_VuDev::vu_dispatch_rwlock.
> > 
> > Some users already check the return value and assert, some others
> > don't. Introduce rdlock/wrlock/unlock wrappers that just do the
> > former and use them everywhere.
> > 
> > Signed-off-by: Greg Kurz <groug@kaod.org>
> 
> What is the purpose of this commit:
> 1. Code cleanup/consistency?
> 2. Helps debug an issue that doesn't occur in production but you hit
>    during development?
> 3. Needed to diagnose a real-world issue? How do you reproduce it?
> 
> I wanted to check to make sure I'm not missing an issue you found with
> production workloads.
> 

I would have provided more details if that came from an actual issue,
but you're definitely right to ask : this falls more under 1.

> > @@ -645,8 +662,7 @@ static void *fv_queue_thread(void *opaque)
> >              break;
> >          }
> >          /* Mutual exclusion with virtio_loop() */
> > -        ret = pthread_rwlock_rdlock(&qi->virtio_dev->vu_dispatch_rwlock);
> > -        assert(ret == 0); /* there is no possible error case */
> > +        vu_dispatch_wrlock(qi->virtio_dev);
> 
> s/vu_dispatch_wrlock/vu_dispatch_rdlock/ ?

Oops... definitely...

[-- Attachment #2: OpenPGP digital signature --]
[-- Type: application/pgp-signature, Size: 833 bytes --]

^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [PATCH] virtiofsd: vu_dispatch locking should never fail
  2021-01-29 15:53 [PATCH] virtiofsd: vu_dispatch locking should never fail Greg Kurz
  2021-02-03 14:57 ` Stefan Hajnoczi
@ 2021-02-03 15:59 ` Vivek Goyal
  2021-02-03 16:08   ` Greg Kurz
  1 sibling, 1 reply; 7+ messages in thread
From: Vivek Goyal @ 2021-02-03 15:59 UTC (permalink / raw)
  To: Greg Kurz; +Cc: qemu-devel, Stefan Hajnoczi, Dr. David Alan Gilbert

On Fri, Jan 29, 2021 at 04:53:12PM +0100, Greg Kurz wrote:
> pthread_rwlock_rdlock() and pthread_rwlock_wrlock() can fail if a
> deadlock condition is detected or the current thread already owns
> the lock. They can also fail, like pthread_rwlock_unlock(), if the
> mutex wasn't properly initialized. None of these are ever expected
> to happen with fv_VuDev::vu_dispatch_rwlock.
> 
> Some users already check the return value and assert, some others
> don't. Introduce rdlock/wrlock/unlock wrappers that just do the
> former and use them everywhere.
> 
> Signed-off-by: Greg Kurz <groug@kaod.org>
> ---
>  tools/virtiofsd/fuse_virtio.c | 42 +++++++++++++++++++++++------------
>  1 file changed, 28 insertions(+), 14 deletions(-)
> 
> diff --git a/tools/virtiofsd/fuse_virtio.c b/tools/virtiofsd/fuse_virtio.c
> index ddcefee4272f..7ea269c4b65d 100644
> --- a/tools/virtiofsd/fuse_virtio.c
> +++ b/tools/virtiofsd/fuse_virtio.c
> @@ -187,6 +187,24 @@ static void copy_iov(struct iovec *src_iov, int src_count,
>      }
>  }
>  
> +/*
> + * pthread_rwlock_rdlock() and pthread_rwlock_wrlock can fail if
> + * a deadlock condition is detected or the current thread already
> + * owns the lock. They can also fail, like pthread_rwlock_unlock(),
> + * if the mutex wasn't properly initialized. None of these are ever
> + * expected to happen.
> + */
> +#define VU_DISPATCH_LOCK_OP(op)                              \
> +static inline void vu_dispatch_##op(struct fv_VuDev *vud)    \
> +{                                                            \
> +    int ret = pthread_rwlock_##op(&vud->vu_dispatch_rwlock); \
> +    assert(ret == 0);                                        \
> +}
> +
> +VU_DISPATCH_LOCK_OP(rdlock);
> +VU_DISPATCH_LOCK_OP(wrlock);
> +VU_DISPATCH_LOCK_OP(unlock);
> +

I generally do not prefer using macros to define functions as searching
to functions declarations/definitions becomes harder. But I see lot
of people prefer that because they can reduce number of lines of code.

Apart from that one issue of using rdlock in fv_queue_thread(), stefan
pointed, it looks good to me.

Reviewed-by: Vivek Goyal <vgoyal@redhat.com>

Vivek
>  /*
>   * Called back by ll whenever it wants to send a reply/message back
>   * The 1st element of the iov starts with the fuse_out_header
> @@ -240,12 +258,12 @@ int virtio_send_msg(struct fuse_session *se, struct fuse_chan *ch,
>  
>      copy_iov(iov, count, in_sg, in_num, tosend_len);
>  
> -    pthread_rwlock_rdlock(&qi->virtio_dev->vu_dispatch_rwlock);
> +    vu_dispatch_rdlock(qi->virtio_dev);
>      pthread_mutex_lock(&qi->vq_lock);
>      vu_queue_push(dev, q, elem, tosend_len);
>      vu_queue_notify(dev, q);
>      pthread_mutex_unlock(&qi->vq_lock);
> -    pthread_rwlock_unlock(&qi->virtio_dev->vu_dispatch_rwlock);
> +    vu_dispatch_unlock(qi->virtio_dev);
>  
>      req->reply_sent = true;
>  
> @@ -403,12 +421,12 @@ int virtio_send_data_iov(struct fuse_session *se, struct fuse_chan *ch,
>  
>      ret = 0;
>  
> -    pthread_rwlock_rdlock(&qi->virtio_dev->vu_dispatch_rwlock);
> +    vu_dispatch_rdlock(qi->virtio_dev);
>      pthread_mutex_lock(&qi->vq_lock);
>      vu_queue_push(dev, q, elem, tosend_len);
>      vu_queue_notify(dev, q);
>      pthread_mutex_unlock(&qi->vq_lock);
> -    pthread_rwlock_unlock(&qi->virtio_dev->vu_dispatch_rwlock);
> +    vu_dispatch_unlock(qi->virtio_dev);
>  
>  err:
>      if (ret == 0) {
> @@ -558,12 +576,12 @@ out:
>          fuse_log(FUSE_LOG_DEBUG, "%s: elem %d no reply sent\n", __func__,
>                   elem->index);
>  
> -        pthread_rwlock_rdlock(&qi->virtio_dev->vu_dispatch_rwlock);
> +        vu_dispatch_rdlock(qi->virtio_dev);
>          pthread_mutex_lock(&qi->vq_lock);
>          vu_queue_push(dev, q, elem, 0);
>          vu_queue_notify(dev, q);
>          pthread_mutex_unlock(&qi->vq_lock);
> -        pthread_rwlock_unlock(&qi->virtio_dev->vu_dispatch_rwlock);
> +        vu_dispatch_unlock(qi->virtio_dev);
>      }
>  
>      pthread_mutex_destroy(&req->ch.lock);
> @@ -596,7 +614,6 @@ static void *fv_queue_thread(void *opaque)
>               qi->qidx, qi->kick_fd);
>      while (1) {
>          struct pollfd pf[2];
> -        int ret;
>  
>          pf[0].fd = qi->kick_fd;
>          pf[0].events = POLLIN;
> @@ -645,8 +662,7 @@ static void *fv_queue_thread(void *opaque)
>              break;
>          }
>          /* Mutual exclusion with virtio_loop() */
> -        ret = pthread_rwlock_rdlock(&qi->virtio_dev->vu_dispatch_rwlock);
> -        assert(ret == 0); /* there is no possible error case */
> +        vu_dispatch_wrlock(qi->virtio_dev);
>          pthread_mutex_lock(&qi->vq_lock);
>          /* out is from guest, in is too guest */
>          unsigned int in_bytes, out_bytes;
> @@ -672,7 +688,7 @@ static void *fv_queue_thread(void *opaque)
>          }
>  
>          pthread_mutex_unlock(&qi->vq_lock);
> -        pthread_rwlock_unlock(&qi->virtio_dev->vu_dispatch_rwlock);
> +        vu_dispatch_unlock(qi->virtio_dev);
>  
>          /* Process all the requests. */
>          if (!se->thread_pool_size && req_list != NULL) {
> @@ -799,7 +815,6 @@ int virtio_loop(struct fuse_session *se)
>      while (!fuse_session_exited(se)) {
>          struct pollfd pf[1];
>          bool ok;
> -        int ret;
>          pf[0].fd = se->vu_socketfd;
>          pf[0].events = POLLIN;
>          pf[0].revents = 0;
> @@ -825,12 +840,11 @@ int virtio_loop(struct fuse_session *se)
>          assert(pf[0].revents & POLLIN);
>          fuse_log(FUSE_LOG_DEBUG, "%s: Got VU event\n", __func__);
>          /* Mutual exclusion with fv_queue_thread() */
> -        ret = pthread_rwlock_wrlock(&se->virtio_dev->vu_dispatch_rwlock);
> -        assert(ret == 0); /* there is no possible error case */
> +        vu_dispatch_wrlock(se->virtio_dev);
>  
>          ok = vu_dispatch(&se->virtio_dev->dev);
>  
> -        pthread_rwlock_unlock(&se->virtio_dev->vu_dispatch_rwlock);
> +        vu_dispatch_unlock(se->virtio_dev);
>  
>          if (!ok) {
>              fuse_log(FUSE_LOG_ERR, "%s: vu_dispatch failed\n", __func__);
> -- 
> 2.26.2
> 



^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [PATCH] virtiofsd: vu_dispatch locking should never fail
  2021-02-03 15:59 ` Vivek Goyal
@ 2021-02-03 16:08   ` Greg Kurz
  2021-02-03 16:29     ` Vivek Goyal
  0 siblings, 1 reply; 7+ messages in thread
From: Greg Kurz @ 2021-02-03 16:08 UTC (permalink / raw)
  To: Vivek Goyal; +Cc: qemu-devel, Stefan Hajnoczi, Dr. David Alan Gilbert

On Wed, 3 Feb 2021 10:59:34 -0500
Vivek Goyal <vgoyal@redhat.com> wrote:

> On Fri, Jan 29, 2021 at 04:53:12PM +0100, Greg Kurz wrote:
> > pthread_rwlock_rdlock() and pthread_rwlock_wrlock() can fail if a
> > deadlock condition is detected or the current thread already owns
> > the lock. They can also fail, like pthread_rwlock_unlock(), if the
> > mutex wasn't properly initialized. None of these are ever expected
> > to happen with fv_VuDev::vu_dispatch_rwlock.
> > 
> > Some users already check the return value and assert, some others
> > don't. Introduce rdlock/wrlock/unlock wrappers that just do the
> > former and use them everywhere.
> > 
> > Signed-off-by: Greg Kurz <groug@kaod.org>
> > ---
> >  tools/virtiofsd/fuse_virtio.c | 42 +++++++++++++++++++++++------------
> >  1 file changed, 28 insertions(+), 14 deletions(-)
> > 
> > diff --git a/tools/virtiofsd/fuse_virtio.c b/tools/virtiofsd/fuse_virtio.c
> > index ddcefee4272f..7ea269c4b65d 100644
> > --- a/tools/virtiofsd/fuse_virtio.c
> > +++ b/tools/virtiofsd/fuse_virtio.c
> > @@ -187,6 +187,24 @@ static void copy_iov(struct iovec *src_iov, int src_count,
> >      }
> >  }
> >  
> > +/*
> > + * pthread_rwlock_rdlock() and pthread_rwlock_wrlock can fail if
> > + * a deadlock condition is detected or the current thread already
> > + * owns the lock. They can also fail, like pthread_rwlock_unlock(),
> > + * if the mutex wasn't properly initialized. None of these are ever
> > + * expected to happen.
> > + */
> > +#define VU_DISPATCH_LOCK_OP(op)                              \
> > +static inline void vu_dispatch_##op(struct fv_VuDev *vud)    \
> > +{                                                            \
> > +    int ret = pthread_rwlock_##op(&vud->vu_dispatch_rwlock); \
> > +    assert(ret == 0);                                        \
> > +}
> > +
> > +VU_DISPATCH_LOCK_OP(rdlock);
> > +VU_DISPATCH_LOCK_OP(wrlock);
> > +VU_DISPATCH_LOCK_OP(unlock);
> > +
> 
> I generally do not prefer using macros to define functions as searching
> to functions declarations/definitions becomes harder. But I see lot
> of people prefer that because they can reduce number of lines of code.
> 

Well, I must admit I hesitated since this doesn't gain much in
terms of LoC compared to the expanded version. I'm perfectly
fine with dropping the macro in my v2 if this looks better
to you.

> Apart from that one issue of using rdlock in fv_queue_thread(), stefan
> pointed, it looks good to me.
> 
> Reviewed-by: Vivek Goyal <vgoyal@redhat.com>
> 
> Vivek
> >  /*
> >   * Called back by ll whenever it wants to send a reply/message back
> >   * The 1st element of the iov starts with the fuse_out_header
> > @@ -240,12 +258,12 @@ int virtio_send_msg(struct fuse_session *se, struct fuse_chan *ch,
> >  
> >      copy_iov(iov, count, in_sg, in_num, tosend_len);
> >  
> > -    pthread_rwlock_rdlock(&qi->virtio_dev->vu_dispatch_rwlock);
> > +    vu_dispatch_rdlock(qi->virtio_dev);
> >      pthread_mutex_lock(&qi->vq_lock);
> >      vu_queue_push(dev, q, elem, tosend_len);
> >      vu_queue_notify(dev, q);
> >      pthread_mutex_unlock(&qi->vq_lock);
> > -    pthread_rwlock_unlock(&qi->virtio_dev->vu_dispatch_rwlock);
> > +    vu_dispatch_unlock(qi->virtio_dev);
> >  
> >      req->reply_sent = true;
> >  
> > @@ -403,12 +421,12 @@ int virtio_send_data_iov(struct fuse_session *se, struct fuse_chan *ch,
> >  
> >      ret = 0;
> >  
> > -    pthread_rwlock_rdlock(&qi->virtio_dev->vu_dispatch_rwlock);
> > +    vu_dispatch_rdlock(qi->virtio_dev);
> >      pthread_mutex_lock(&qi->vq_lock);
> >      vu_queue_push(dev, q, elem, tosend_len);
> >      vu_queue_notify(dev, q);
> >      pthread_mutex_unlock(&qi->vq_lock);
> > -    pthread_rwlock_unlock(&qi->virtio_dev->vu_dispatch_rwlock);
> > +    vu_dispatch_unlock(qi->virtio_dev);
> >  
> >  err:
> >      if (ret == 0) {
> > @@ -558,12 +576,12 @@ out:
> >          fuse_log(FUSE_LOG_DEBUG, "%s: elem %d no reply sent\n", __func__,
> >                   elem->index);
> >  
> > -        pthread_rwlock_rdlock(&qi->virtio_dev->vu_dispatch_rwlock);
> > +        vu_dispatch_rdlock(qi->virtio_dev);
> >          pthread_mutex_lock(&qi->vq_lock);
> >          vu_queue_push(dev, q, elem, 0);
> >          vu_queue_notify(dev, q);
> >          pthread_mutex_unlock(&qi->vq_lock);
> > -        pthread_rwlock_unlock(&qi->virtio_dev->vu_dispatch_rwlock);
> > +        vu_dispatch_unlock(qi->virtio_dev);
> >      }
> >  
> >      pthread_mutex_destroy(&req->ch.lock);
> > @@ -596,7 +614,6 @@ static void *fv_queue_thread(void *opaque)
> >               qi->qidx, qi->kick_fd);
> >      while (1) {
> >          struct pollfd pf[2];
> > -        int ret;
> >  
> >          pf[0].fd = qi->kick_fd;
> >          pf[0].events = POLLIN;
> > @@ -645,8 +662,7 @@ static void *fv_queue_thread(void *opaque)
> >              break;
> >          }
> >          /* Mutual exclusion with virtio_loop() */
> > -        ret = pthread_rwlock_rdlock(&qi->virtio_dev->vu_dispatch_rwlock);
> > -        assert(ret == 0); /* there is no possible error case */
> > +        vu_dispatch_wrlock(qi->virtio_dev);
> >          pthread_mutex_lock(&qi->vq_lock);
> >          /* out is from guest, in is too guest */
> >          unsigned int in_bytes, out_bytes;
> > @@ -672,7 +688,7 @@ static void *fv_queue_thread(void *opaque)
> >          }
> >  
> >          pthread_mutex_unlock(&qi->vq_lock);
> > -        pthread_rwlock_unlock(&qi->virtio_dev->vu_dispatch_rwlock);
> > +        vu_dispatch_unlock(qi->virtio_dev);
> >  
> >          /* Process all the requests. */
> >          if (!se->thread_pool_size && req_list != NULL) {
> > @@ -799,7 +815,6 @@ int virtio_loop(struct fuse_session *se)
> >      while (!fuse_session_exited(se)) {
> >          struct pollfd pf[1];
> >          bool ok;
> > -        int ret;
> >          pf[0].fd = se->vu_socketfd;
> >          pf[0].events = POLLIN;
> >          pf[0].revents = 0;
> > @@ -825,12 +840,11 @@ int virtio_loop(struct fuse_session *se)
> >          assert(pf[0].revents & POLLIN);
> >          fuse_log(FUSE_LOG_DEBUG, "%s: Got VU event\n", __func__);
> >          /* Mutual exclusion with fv_queue_thread() */
> > -        ret = pthread_rwlock_wrlock(&se->virtio_dev->vu_dispatch_rwlock);
> > -        assert(ret == 0); /* there is no possible error case */
> > +        vu_dispatch_wrlock(se->virtio_dev);
> >  
> >          ok = vu_dispatch(&se->virtio_dev->dev);
> >  
> > -        pthread_rwlock_unlock(&se->virtio_dev->vu_dispatch_rwlock);
> > +        vu_dispatch_unlock(se->virtio_dev);
> >  
> >          if (!ok) {
> >              fuse_log(FUSE_LOG_ERR, "%s: vu_dispatch failed\n", __func__);
> > -- 
> > 2.26.2
> > 
> 



^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [PATCH] virtiofsd: vu_dispatch locking should never fail
  2021-02-03 16:08   ` Greg Kurz
@ 2021-02-03 16:29     ` Vivek Goyal
  2021-02-03 16:53       ` Stefan Hajnoczi
  0 siblings, 1 reply; 7+ messages in thread
From: Vivek Goyal @ 2021-02-03 16:29 UTC (permalink / raw)
  To: Greg Kurz; +Cc: qemu-devel, Stefan Hajnoczi, Dr. David Alan Gilbert

On Wed, Feb 03, 2021 at 05:08:57PM +0100, Greg Kurz wrote:
> On Wed, 3 Feb 2021 10:59:34 -0500
> Vivek Goyal <vgoyal@redhat.com> wrote:
> 
> > On Fri, Jan 29, 2021 at 04:53:12PM +0100, Greg Kurz wrote:
> > > pthread_rwlock_rdlock() and pthread_rwlock_wrlock() can fail if a
> > > deadlock condition is detected or the current thread already owns
> > > the lock. They can also fail, like pthread_rwlock_unlock(), if the
> > > mutex wasn't properly initialized. None of these are ever expected
> > > to happen with fv_VuDev::vu_dispatch_rwlock.
> > > 
> > > Some users already check the return value and assert, some others
> > > don't. Introduce rdlock/wrlock/unlock wrappers that just do the
> > > former and use them everywhere.
> > > 
> > > Signed-off-by: Greg Kurz <groug@kaod.org>
> > > ---
> > >  tools/virtiofsd/fuse_virtio.c | 42 +++++++++++++++++++++++------------
> > >  1 file changed, 28 insertions(+), 14 deletions(-)
> > > 
> > > diff --git a/tools/virtiofsd/fuse_virtio.c b/tools/virtiofsd/fuse_virtio.c
> > > index ddcefee4272f..7ea269c4b65d 100644
> > > --- a/tools/virtiofsd/fuse_virtio.c
> > > +++ b/tools/virtiofsd/fuse_virtio.c
> > > @@ -187,6 +187,24 @@ static void copy_iov(struct iovec *src_iov, int src_count,
> > >      }
> > >  }
> > >  
> > > +/*
> > > + * pthread_rwlock_rdlock() and pthread_rwlock_wrlock can fail if
> > > + * a deadlock condition is detected or the current thread already
> > > + * owns the lock. They can also fail, like pthread_rwlock_unlock(),
> > > + * if the mutex wasn't properly initialized. None of these are ever
> > > + * expected to happen.
> > > + */
> > > +#define VU_DISPATCH_LOCK_OP(op)                              \
> > > +static inline void vu_dispatch_##op(struct fv_VuDev *vud)    \
> > > +{                                                            \
> > > +    int ret = pthread_rwlock_##op(&vud->vu_dispatch_rwlock); \
> > > +    assert(ret == 0);                                        \
> > > +}
> > > +
> > > +VU_DISPATCH_LOCK_OP(rdlock);
> > > +VU_DISPATCH_LOCK_OP(wrlock);
> > > +VU_DISPATCH_LOCK_OP(unlock);
> > > +
> > 
> > I generally do not prefer using macros to define functions as searching
> > to functions declarations/definitions becomes harder. But I see lot
> > of people prefer that because they can reduce number of lines of code.
> > 
> 
> Well, I must admit I hesitated since this doesn't gain much in
> terms of LoC compared to the expanded version. I'm perfectly
> fine with dropping the macro in my v2 if this looks better
> to you.

If you are posting V2 anyway, so lets do it. Agreed, we are not saving
many lines where so why to use macros to define functions.

Vivek

> 
> > Apart from that one issue of using rdlock in fv_queue_thread(), stefan
> > pointed, it looks good to me.
> > 
> > Reviewed-by: Vivek Goyal <vgoyal@redhat.com>
> > 
> > Vivek
> > >  /*
> > >   * Called back by ll whenever it wants to send a reply/message back
> > >   * The 1st element of the iov starts with the fuse_out_header
> > > @@ -240,12 +258,12 @@ int virtio_send_msg(struct fuse_session *se, struct fuse_chan *ch,
> > >  
> > >      copy_iov(iov, count, in_sg, in_num, tosend_len);
> > >  
> > > -    pthread_rwlock_rdlock(&qi->virtio_dev->vu_dispatch_rwlock);
> > > +    vu_dispatch_rdlock(qi->virtio_dev);
> > >      pthread_mutex_lock(&qi->vq_lock);
> > >      vu_queue_push(dev, q, elem, tosend_len);
> > >      vu_queue_notify(dev, q);
> > >      pthread_mutex_unlock(&qi->vq_lock);
> > > -    pthread_rwlock_unlock(&qi->virtio_dev->vu_dispatch_rwlock);
> > > +    vu_dispatch_unlock(qi->virtio_dev);
> > >  
> > >      req->reply_sent = true;
> > >  
> > > @@ -403,12 +421,12 @@ int virtio_send_data_iov(struct fuse_session *se, struct fuse_chan *ch,
> > >  
> > >      ret = 0;
> > >  
> > > -    pthread_rwlock_rdlock(&qi->virtio_dev->vu_dispatch_rwlock);
> > > +    vu_dispatch_rdlock(qi->virtio_dev);
> > >      pthread_mutex_lock(&qi->vq_lock);
> > >      vu_queue_push(dev, q, elem, tosend_len);
> > >      vu_queue_notify(dev, q);
> > >      pthread_mutex_unlock(&qi->vq_lock);
> > > -    pthread_rwlock_unlock(&qi->virtio_dev->vu_dispatch_rwlock);
> > > +    vu_dispatch_unlock(qi->virtio_dev);
> > >  
> > >  err:
> > >      if (ret == 0) {
> > > @@ -558,12 +576,12 @@ out:
> > >          fuse_log(FUSE_LOG_DEBUG, "%s: elem %d no reply sent\n", __func__,
> > >                   elem->index);
> > >  
> > > -        pthread_rwlock_rdlock(&qi->virtio_dev->vu_dispatch_rwlock);
> > > +        vu_dispatch_rdlock(qi->virtio_dev);
> > >          pthread_mutex_lock(&qi->vq_lock);
> > >          vu_queue_push(dev, q, elem, 0);
> > >          vu_queue_notify(dev, q);
> > >          pthread_mutex_unlock(&qi->vq_lock);
> > > -        pthread_rwlock_unlock(&qi->virtio_dev->vu_dispatch_rwlock);
> > > +        vu_dispatch_unlock(qi->virtio_dev);
> > >      }
> > >  
> > >      pthread_mutex_destroy(&req->ch.lock);
> > > @@ -596,7 +614,6 @@ static void *fv_queue_thread(void *opaque)
> > >               qi->qidx, qi->kick_fd);
> > >      while (1) {
> > >          struct pollfd pf[2];
> > > -        int ret;
> > >  
> > >          pf[0].fd = qi->kick_fd;
> > >          pf[0].events = POLLIN;
> > > @@ -645,8 +662,7 @@ static void *fv_queue_thread(void *opaque)
> > >              break;
> > >          }
> > >          /* Mutual exclusion with virtio_loop() */
> > > -        ret = pthread_rwlock_rdlock(&qi->virtio_dev->vu_dispatch_rwlock);
> > > -        assert(ret == 0); /* there is no possible error case */
> > > +        vu_dispatch_wrlock(qi->virtio_dev);
> > >          pthread_mutex_lock(&qi->vq_lock);
> > >          /* out is from guest, in is too guest */
> > >          unsigned int in_bytes, out_bytes;
> > > @@ -672,7 +688,7 @@ static void *fv_queue_thread(void *opaque)
> > >          }
> > >  
> > >          pthread_mutex_unlock(&qi->vq_lock);
> > > -        pthread_rwlock_unlock(&qi->virtio_dev->vu_dispatch_rwlock);
> > > +        vu_dispatch_unlock(qi->virtio_dev);
> > >  
> > >          /* Process all the requests. */
> > >          if (!se->thread_pool_size && req_list != NULL) {
> > > @@ -799,7 +815,6 @@ int virtio_loop(struct fuse_session *se)
> > >      while (!fuse_session_exited(se)) {
> > >          struct pollfd pf[1];
> > >          bool ok;
> > > -        int ret;
> > >          pf[0].fd = se->vu_socketfd;
> > >          pf[0].events = POLLIN;
> > >          pf[0].revents = 0;
> > > @@ -825,12 +840,11 @@ int virtio_loop(struct fuse_session *se)
> > >          assert(pf[0].revents & POLLIN);
> > >          fuse_log(FUSE_LOG_DEBUG, "%s: Got VU event\n", __func__);
> > >          /* Mutual exclusion with fv_queue_thread() */
> > > -        ret = pthread_rwlock_wrlock(&se->virtio_dev->vu_dispatch_rwlock);
> > > -        assert(ret == 0); /* there is no possible error case */
> > > +        vu_dispatch_wrlock(se->virtio_dev);
> > >  
> > >          ok = vu_dispatch(&se->virtio_dev->dev);
> > >  
> > > -        pthread_rwlock_unlock(&se->virtio_dev->vu_dispatch_rwlock);
> > > +        vu_dispatch_unlock(se->virtio_dev);
> > >  
> > >          if (!ok) {
> > >              fuse_log(FUSE_LOG_ERR, "%s: vu_dispatch failed\n", __func__);
> > > -- 
> > > 2.26.2
> > > 
> > 
> 



^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [PATCH] virtiofsd: vu_dispatch locking should never fail
  2021-02-03 16:29     ` Vivek Goyal
@ 2021-02-03 16:53       ` Stefan Hajnoczi
  0 siblings, 0 replies; 7+ messages in thread
From: Stefan Hajnoczi @ 2021-02-03 16:53 UTC (permalink / raw)
  To: Vivek Goyal; +Cc: Greg Kurz, Dr. David Alan Gilbert, qemu-devel

[-- Attachment #1: Type: text/plain, Size: 3011 bytes --]

On Wed, Feb 03, 2021 at 11:29:15AM -0500, Vivek Goyal wrote:
> On Wed, Feb 03, 2021 at 05:08:57PM +0100, Greg Kurz wrote:
> > On Wed, 3 Feb 2021 10:59:34 -0500
> > Vivek Goyal <vgoyal@redhat.com> wrote:
> > 
> > > On Fri, Jan 29, 2021 at 04:53:12PM +0100, Greg Kurz wrote:
> > > > pthread_rwlock_rdlock() and pthread_rwlock_wrlock() can fail if a
> > > > deadlock condition is detected or the current thread already owns
> > > > the lock. They can also fail, like pthread_rwlock_unlock(), if the
> > > > mutex wasn't properly initialized. None of these are ever expected
> > > > to happen with fv_VuDev::vu_dispatch_rwlock.
> > > > 
> > > > Some users already check the return value and assert, some others
> > > > don't. Introduce rdlock/wrlock/unlock wrappers that just do the
> > > > former and use them everywhere.
> > > > 
> > > > Signed-off-by: Greg Kurz <groug@kaod.org>
> > > > ---
> > > >  tools/virtiofsd/fuse_virtio.c | 42 +++++++++++++++++++++++------------
> > > >  1 file changed, 28 insertions(+), 14 deletions(-)
> > > > 
> > > > diff --git a/tools/virtiofsd/fuse_virtio.c b/tools/virtiofsd/fuse_virtio.c
> > > > index ddcefee4272f..7ea269c4b65d 100644
> > > > --- a/tools/virtiofsd/fuse_virtio.c
> > > > +++ b/tools/virtiofsd/fuse_virtio.c
> > > > @@ -187,6 +187,24 @@ static void copy_iov(struct iovec *src_iov, int src_count,
> > > >      }
> > > >  }
> > > >  
> > > > +/*
> > > > + * pthread_rwlock_rdlock() and pthread_rwlock_wrlock can fail if
> > > > + * a deadlock condition is detected or the current thread already
> > > > + * owns the lock. They can also fail, like pthread_rwlock_unlock(),
> > > > + * if the mutex wasn't properly initialized. None of these are ever
> > > > + * expected to happen.
> > > > + */
> > > > +#define VU_DISPATCH_LOCK_OP(op)                              \
> > > > +static inline void vu_dispatch_##op(struct fv_VuDev *vud)    \
> > > > +{                                                            \
> > > > +    int ret = pthread_rwlock_##op(&vud->vu_dispatch_rwlock); \
> > > > +    assert(ret == 0);                                        \
> > > > +}
> > > > +
> > > > +VU_DISPATCH_LOCK_OP(rdlock);
> > > > +VU_DISPATCH_LOCK_OP(wrlock);
> > > > +VU_DISPATCH_LOCK_OP(unlock);
> > > > +
> > > 
> > > I generally do not prefer using macros to define functions as searching
> > > to functions declarations/definitions becomes harder. But I see lot
> > > of people prefer that because they can reduce number of lines of code.
> > > 
> > 
> > Well, I must admit I hesitated since this doesn't gain much in
> > terms of LoC compared to the expanded version. I'm perfectly
> > fine with dropping the macro in my v2 if this looks better
> > to you.
> 
> If you are posting V2 anyway, so lets do it. Agreed, we are not saving
> many lines where so why to use macros to define functions.

Nice. I also prefer the open-coded version because ctags won't be able
to interpret the macros :).

Stefan

[-- Attachment #2: signature.asc --]
[-- Type: application/pgp-signature, Size: 488 bytes --]

^ permalink raw reply	[flat|nested] 7+ messages in thread

end of thread, other threads:[~2021-02-03 16:55 UTC | newest]

Thread overview: 7+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2021-01-29 15:53 [PATCH] virtiofsd: vu_dispatch locking should never fail Greg Kurz
2021-02-03 14:57 ` Stefan Hajnoczi
2021-02-03 15:35   ` Greg Kurz
2021-02-03 15:59 ` Vivek Goyal
2021-02-03 16:08   ` Greg Kurz
2021-02-03 16:29     ` Vivek Goyal
2021-02-03 16:53       ` Stefan Hajnoczi

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.