qemu-devel.nongnu.org archive mirror
 help / color / mirror / Atom feed
From: "Dr. David Alan Gilbert" <dgilbert@redhat.com>
To: "Michael S. Tsirkin" <mst@redhat.com>
Cc: qemu-devel@nongnu.org, stefanha@redhat.com
Subject: Re: [PATCH] virito: Use auto rcu_read macros
Date: Mon, 28 Oct 2019 16:13:00 +0000	[thread overview]
Message-ID: <20191028161300.GD2961@work-vm> (raw)
In-Reply-To: <20191025075225-mutt-send-email-mst@kernel.org>

* Michael S. Tsirkin (mst@redhat.com) wrote:
> On Mon, Oct 14, 2019 at 06:54:40PM +0100, Dr. David Alan Gilbert (git) wrote:
> > From: "Dr. David Alan Gilbert" <dgilbert@redhat.com>
> > 
> > Use RCU_READ_LOCK_GUARD and WITH_RCU_READ_LOCK_GUARD
> > to replace the manual rcu_read_(un)lock calls.
> > 
> > I think the only change is virtio_load which was missing unlocks
> > in error paths; those end up being fatal errors so it's not
> > that important anyway.
> > 
> > Signed-off-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
> 
> Can you rebase on top of packed ring support please?
> They are in my queue now.

Sent; fixed up in about the same way but not tested much.

Dave

> 
> > ---
> >  hw/virtio/virtio.c | 46 ++++++++++++++++------------------------------
> >  1 file changed, 16 insertions(+), 30 deletions(-)
> > 
> > diff --git a/hw/virtio/virtio.c b/hw/virtio/virtio.c
> > index 527df03bfd..15ae9da60b 100644
> > --- a/hw/virtio/virtio.c
> > +++ b/hw/virtio/virtio.c
> > @@ -337,7 +337,7 @@ void virtio_queue_set_notification(VirtQueue *vq, int enable)
> >          return;
> >      }
> >  
> > -    rcu_read_lock();
> > +    RCU_READ_LOCK_GUARD();
> >      if (virtio_vdev_has_feature(vq->vdev, VIRTIO_RING_F_EVENT_IDX)) {
> >          vring_set_avail_event(vq, vring_avail_idx(vq));
> >      } else if (enable) {
> > @@ -349,7 +349,6 @@ void virtio_queue_set_notification(VirtQueue *vq, int enable)
> >          /* Expose avail event/used flags before caller checks the avail idx. */
> >          smp_mb();
> >      }
> > -    rcu_read_unlock();
> >  }
> >  
> >  int virtio_queue_ready(VirtQueue *vq)
> > @@ -393,9 +392,8 @@ int virtio_queue_empty(VirtQueue *vq)
> >          return 0;
> >      }
> >  
> > -    rcu_read_lock();
> > +    RCU_READ_LOCK_GUARD();
> >      empty = vring_avail_idx(vq) == vq->last_avail_idx;
> > -    rcu_read_unlock();
> >      return empty;
> >  }
> >  
> > @@ -530,10 +528,9 @@ void virtqueue_flush(VirtQueue *vq, unsigned int count)
> >  void virtqueue_push(VirtQueue *vq, const VirtQueueElement *elem,
> >                      unsigned int len)
> >  {
> > -    rcu_read_lock();
> > +    RCU_READ_LOCK_GUARD();
> >      virtqueue_fill(vq, elem, len, 0);
> >      virtqueue_flush(vq, 1);
> > -    rcu_read_unlock();
> >  }
> >  
> >  /* Called within rcu_read_lock().  */
> > @@ -624,7 +621,7 @@ void virtqueue_get_avail_bytes(VirtQueue *vq, unsigned int *in_bytes,
> >          return;
> >      }
> >  
> > -    rcu_read_lock();
> > +    RCU_READ_LOCK_GUARD();
> >      idx = vq->last_avail_idx;
> >      total_bufs = in_total = out_total = 0;
> >  
> > @@ -719,7 +716,6 @@ done:
> >      if (out_bytes) {
> >          *out_bytes = out_total;
> >      }
> > -    rcu_read_unlock();
> >      return;
> >  
> >  err:
> > @@ -869,7 +865,7 @@ void *virtqueue_pop(VirtQueue *vq, size_t sz)
> >      if (unlikely(vdev->broken)) {
> >          return NULL;
> >      }
> > -    rcu_read_lock();
> > +    RCU_READ_LOCK_GUARD();
> >      if (virtio_queue_empty_rcu(vq)) {
> >          goto done;
> >      }
> > @@ -977,7 +973,6 @@ void *virtqueue_pop(VirtQueue *vq, size_t sz)
> >      trace_virtqueue_pop(vq, elem, elem->in_num, elem->out_num);
> >  done:
> >      address_space_cache_destroy(&indirect_desc_cache);
> > -    rcu_read_unlock();
> >  
> >      return elem;
> >  
> > @@ -1680,13 +1675,10 @@ static bool virtio_should_notify(VirtIODevice *vdev, VirtQueue *vq)
> >  
> >  void virtio_notify_irqfd(VirtIODevice *vdev, VirtQueue *vq)
> >  {
> > -    bool should_notify;
> > -    rcu_read_lock();
> > -    should_notify = virtio_should_notify(vdev, vq);
> > -    rcu_read_unlock();
> > -
> > -    if (!should_notify) {
> > -        return;
> > +    WITH_RCU_READ_LOCK_GUARD() {
> > +        if (!virtio_should_notify(vdev, vq)) {
> > +            return;
> > +        }
> >      }
> >  
> >      trace_virtio_notify_irqfd(vdev, vq);
> > @@ -1718,13 +1710,10 @@ static void virtio_irq(VirtQueue *vq)
> >  
> >  void virtio_notify(VirtIODevice *vdev, VirtQueue *vq)
> >  {
> > -    bool should_notify;
> > -    rcu_read_lock();
> > -    should_notify = virtio_should_notify(vdev, vq);
> > -    rcu_read_unlock();
> > -
> > -    if (!should_notify) {
> > -        return;
> > +    WITH_RCU_READ_LOCK_GUARD() {
> > +        if (!virtio_should_notify(vdev, vq)) {
> > +            return;
> > +        }
> >      }
> >  
> >      trace_virtio_notify(vdev, vq);
> > @@ -2241,7 +2230,7 @@ int virtio_load(VirtIODevice *vdev, QEMUFile *f, int version_id)
> >          vdev->start_on_kick = true;
> >      }
> >  
> > -    rcu_read_lock();
> > +    RCU_READ_LOCK_GUARD();
> >      for (i = 0; i < num; i++) {
> >          if (vdev->vq[i].vring.desc) {
> >              uint16_t nheads;
> > @@ -2289,7 +2278,6 @@ int virtio_load(VirtIODevice *vdev, QEMUFile *f, int version_id)
> >              }
> >          }
> >      }
> > -    rcu_read_unlock();
> >  
> >      return 0;
> >  }
> > @@ -2422,21 +2410,19 @@ void virtio_queue_set_last_avail_idx(VirtIODevice *vdev, int n, uint16_t idx)
> >  
> >  void virtio_queue_restore_last_avail_idx(VirtIODevice *vdev, int n)
> >  {
> > -    rcu_read_lock();
> > +    RCU_READ_LOCK_GUARD();
> >      if (vdev->vq[n].vring.desc) {
> >          vdev->vq[n].last_avail_idx = vring_used_idx(&vdev->vq[n]);
> >          vdev->vq[n].shadow_avail_idx = vdev->vq[n].last_avail_idx;
> >      }
> > -    rcu_read_unlock();
> >  }
> >  
> >  void virtio_queue_update_used_idx(VirtIODevice *vdev, int n)
> >  {
> > -    rcu_read_lock();
> > +    RCU_READ_LOCK_GUARD();
> >      if (vdev->vq[n].vring.desc) {
> >          vdev->vq[n].used_idx = vring_used_idx(&vdev->vq[n]);
> >      }
> > -    rcu_read_unlock();
> >  }
> >  
> >  void virtio_queue_invalidate_signalled_used(VirtIODevice *vdev, int n)
> > -- 
> > 2.23.0
--
Dr. David Alan Gilbert / dgilbert@redhat.com / Manchester, UK



      reply	other threads:[~2019-10-28 17:24 UTC|newest]

Thread overview: 5+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2019-10-14 17:54 [PATCH] virito: Use auto rcu_read macros Dr. David Alan Gilbert (git)
2019-10-14 17:57 ` Dr. David Alan Gilbert
2019-10-16 18:44 ` Stefan Hajnoczi
2019-10-25 11:54 ` Michael S. Tsirkin
2019-10-28 16:13   ` Dr. David Alan Gilbert [this message]

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20191028161300.GD2961@work-vm \
    --to=dgilbert@redhat.com \
    --cc=mst@redhat.com \
    --cc=qemu-devel@nongnu.org \
    --cc=stefanha@redhat.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).