All of lore.kernel.org
 help / color / mirror / Atom feed
From: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
To: Mahmoud Mandour <ma.mandourr@gmail.com>, qemu-devel@nongnu.org
Cc: Kevin Wolf <kwolf@redhat.com>,
	"open list:CURL" <qemu-block@nongnu.org>,
	Max Reitz <mreitz@redhat.com>
Subject: Re: [PATCH 2/9] block: Replaced qemu_mutex_lock calls with QEMU_LOCK_GUARD
Date: Fri, 12 Mar 2021 13:23:48 +0300	[thread overview]
Message-ID: <d74ef980-ad9b-8a97-0bc8-1ecc60a28c65@virtuozzo.com> (raw)
In-Reply-To: <20210311031538.5325-3-ma.mandourr@gmail.com>

11.03.2021 06:15, Mahmoud Mandour wrote:
> Replaced various qemu_mutex_lock/qemu_mutex_unlock calls with
> lock guard macros (QEMU_LOCK_GUARD() and WITH_QEMU_LOCK_GUARD).
> This slightly simplifies the code by eliminating calls to
> qemu_mutex_unlock and eliminates goto paths.
> 
> Signed-off-by: Mahmoud Mandour <ma.mandourr@gmail.com>
> ---
>   block/curl.c |  13 ++--
>   block/nbd.c  | 188 ++++++++++++++++++++++++---------------------------

Better would be make two separate patches I think.

>   2 files changed, 95 insertions(+), 106 deletions(-)
> 
> diff --git a/block/curl.c b/block/curl.c
> index 4ff895df8f..56a217951a 100644
> --- a/block/curl.c
> +++ b/block/curl.c
> @@ -832,12 +832,12 @@ static void curl_setup_preadv(BlockDriverState *bs, CURLAIOCB *acb)
>       uint64_t start = acb->offset;
>       uint64_t end;
>   
> -    qemu_mutex_lock(&s->mutex);
> +    QEMU_LOCK_GUARD(&s->mutex);
>   
>       // In case we have the requested data already (e.g. read-ahead),
>       // we can just call the callback and be done.
>       if (curl_find_buf(s, start, acb->bytes, acb)) {
> -        goto out;
> +        return;
>       }
>   
>       // No cache found, so let's start a new request
> @@ -852,7 +852,7 @@ static void curl_setup_preadv(BlockDriverState *bs, CURLAIOCB *acb)
>       if (curl_init_state(s, state) < 0) {
>           curl_clean_state(state);
>           acb->ret = -EIO;
> -        goto out;
> +        return;
>       }
>   
>       acb->start = 0;
> @@ -867,7 +867,7 @@ static void curl_setup_preadv(BlockDriverState *bs, CURLAIOCB *acb)
>       if (state->buf_len && state->orig_buf == NULL) {
>           curl_clean_state(state);
>           acb->ret = -ENOMEM;
> -        goto out;
> +        return;
>       }
>       state->acb[0] = acb;
>   
> @@ -880,14 +880,11 @@ static void curl_setup_preadv(BlockDriverState *bs, CURLAIOCB *acb)
>           acb->ret = -EIO;
>   
>           curl_clean_state(state);
> -        goto out;
> +        return;
>       }
>   
>       /* Tell curl it needs to kick things off */
>       curl_multi_socket_action(s->multi, CURL_SOCKET_TIMEOUT, 0, &running);
> -
> -out:
> -    qemu_mutex_unlock(&s->mutex);
>   }

This change is obvious and good.

>   
>   static int coroutine_fn curl_co_preadv(BlockDriverState *bs,
> diff --git a/block/nbd.c b/block/nbd.c
> index c26dc5a54f..28ba7aad61 100644
> --- a/block/nbd.c
> +++ b/block/nbd.c
> @@ -407,27 +407,25 @@ static void *connect_thread_func(void *opaque)
>           thr->sioc = NULL;
>       }
>   
> -    qemu_mutex_lock(&thr->mutex);
> -
> -    switch (thr->state) {
> -    case CONNECT_THREAD_RUNNING:
> -        thr->state = ret < 0 ? CONNECT_THREAD_FAIL : CONNECT_THREAD_SUCCESS;
> -        if (thr->bh_ctx) {
> -            aio_bh_schedule_oneshot(thr->bh_ctx, thr->bh_func, thr->bh_opaque);
> -
> -            /* play safe, don't reuse bh_ctx on further connection attempts */
> -            thr->bh_ctx = NULL;
> +    WITH_QEMU_LOCK_GUARD(&thr->mutex) {
> +        switch (thr->state) {
> +        case CONNECT_THREAD_RUNNING:
> +            thr->state = ret < 0 ? CONNECT_THREAD_FAIL : CONNECT_THREAD_SUCCESS;
> +            if (thr->bh_ctx) {
> +                aio_bh_schedule_oneshot(thr->bh_ctx, thr->bh_func, thr->bh_opaque);

over-80 line

> +
> +                /* play safe, don't reuse bh_ctx on further connection attempts */

and here

> +                thr->bh_ctx = NULL;
> +            }
> +            break;
> +        case CONNECT_THREAD_RUNNING_DETACHED:
> +            do_free = true;
> +            break;
> +        default:
> +            abort();
>           }
> -        break;
> -    case CONNECT_THREAD_RUNNING_DETACHED:
> -        do_free = true;
> -        break;
> -    default:
> -        abort();
>       }
>   
> -    qemu_mutex_unlock(&thr->mutex);
> ->       if (do_free) {
>           nbd_free_connect_thread(thr);
>       }
> @@ -443,36 +441,33 @@ nbd_co_establish_connection(BlockDriverState *bs, Error **errp)
>       BDRVNBDState *s = bs->opaque;
>       NBDConnectThread *thr = s->connect_thread;
>   
> -    qemu_mutex_lock(&thr->mutex);
> -
> -    switch (thr->state) {
> -    case CONNECT_THREAD_FAIL:
> -    case CONNECT_THREAD_NONE:
> -        error_free(thr->err);
> -        thr->err = NULL;
> -        thr->state = CONNECT_THREAD_RUNNING;
> -        qemu_thread_create(&thread, "nbd-connect",
> -                           connect_thread_func, thr, QEMU_THREAD_DETACHED);
> -        break;
> -    case CONNECT_THREAD_SUCCESS:
> -        /* Previous attempt finally succeeded in background */
> -        thr->state = CONNECT_THREAD_NONE;
> -        s->sioc = thr->sioc;
> -        thr->sioc = NULL;
> -        yank_register_function(BLOCKDEV_YANK_INSTANCE(bs->node_name),
> -                               nbd_yank, bs);
> -        qemu_mutex_unlock(&thr->mutex);
> -        return 0;
> -    case CONNECT_THREAD_RUNNING:
> -        /* Already running, will wait */
> -        break;
> -    default:
> -        abort();
> -    }
> -
> -    thr->bh_ctx = qemu_get_current_aio_context();
> +    WITH_QEMU_LOCK_GUARD(&thr->mutex) {
> +        switch (thr->state) {
> +        case CONNECT_THREAD_FAIL:
> +        case CONNECT_THREAD_NONE:
> +            error_free(thr->err);
> +            thr->err = NULL;
> +            thr->state = CONNECT_THREAD_RUNNING;
> +            qemu_thread_create(&thread, "nbd-connect",
> +                               connect_thread_func, thr, QEMU_THREAD_DETACHED);
> +            break;
> +        case CONNECT_THREAD_SUCCESS:
> +            /* Previous attempt finally succeeded in background */
> +            thr->state = CONNECT_THREAD_NONE;
> +            s->sioc = thr->sioc;
> +            thr->sioc = NULL;
> +            yank_register_function(BLOCKDEV_YANK_INSTANCE(bs->node_name),
> +                                   nbd_yank, bs);
> +            return 0;
> +        case CONNECT_THREAD_RUNNING:
> +            /* Already running, will wait */
> +            break;
> +        default:
> +            abort();
> +        }
>   
> -    qemu_mutex_unlock(&thr->mutex);
> +        thr->bh_ctx = qemu_get_current_aio_context();
> +    }
>   
>   
>       /*
> @@ -486,46 +481,45 @@ nbd_co_establish_connection(BlockDriverState *bs, Error **errp)
>       s->wait_connect = true;
>       qemu_coroutine_yield();
>   
> -    qemu_mutex_lock(&thr->mutex);
>   
> -    switch (thr->state) {
> -    case CONNECT_THREAD_SUCCESS:
> -    case CONNECT_THREAD_FAIL:
> -        thr->state = CONNECT_THREAD_NONE;
> -        error_propagate(errp, thr->err);
> -        thr->err = NULL;
> -        s->sioc = thr->sioc;
> -        thr->sioc = NULL;
> -        if (s->sioc) {
> -            yank_register_function(BLOCKDEV_YANK_INSTANCE(bs->node_name),
> -                                   nbd_yank, bs);
> -        }
> -        ret = (s->sioc ? 0 : -1);
> -        break;
> -    case CONNECT_THREAD_RUNNING:
> -    case CONNECT_THREAD_RUNNING_DETACHED:
> -        /*
> -         * Obviously, drained section wants to start. Report the attempt as
> -         * failed. Still connect thread is executing in background, and its
> -         * result may be used for next connection attempt.
> -         */
> -        ret = -1;
> -        error_setg(errp, "Connection attempt cancelled by other operation");
> -        break;
> +    WITH_QEMU_LOCK_GUARD(&thr->mutex) {
> +        switch (thr->state) {
> +        case CONNECT_THREAD_SUCCESS:
> +        case CONNECT_THREAD_FAIL:
> +            thr->state = CONNECT_THREAD_NONE;
> +            error_propagate(errp, thr->err);
> +            thr->err = NULL;
> +            s->sioc = thr->sioc;
> +            thr->sioc = NULL;
> +            if (s->sioc) {
> +                yank_register_function(BLOCKDEV_YANK_INSTANCE(bs->node_name),
> +                                       nbd_yank, bs);
> +            }
> +            ret = (s->sioc ? 0 : -1);
> +            break;
> +        case CONNECT_THREAD_RUNNING:
> +        case CONNECT_THREAD_RUNNING_DETACHED:
> +            /*
> +             * Obviously, drained section wants to start. Report the attempt as
> +             * failed. Still connect thread is executing in background, and its
> +             * result may be used for next connection attempt.
> +             */
> +            ret = -1;
> +            error_setg(errp, "Connection attempt cancelled by other operation");
> +            break;
>   
> -    case CONNECT_THREAD_NONE:
> -        /*
> -         * Impossible. We've seen this thread running. So it should be
> -         * running or at least give some results.
> -         */
> -        abort();
> +        case CONNECT_THREAD_NONE:
> +            /*
> +             * Impossible. We've seen this thread running. So it should be
> +             * running or at least give some results.
> +             */
> +            abort();
>   
> -    default:
> -        abort();
> +        default:
> +            abort();
> +        }
>       }
>   
> -    qemu_mutex_unlock(&thr->mutex);
> -
>       return ret;
>   }
>   
> @@ -546,25 +540,23 @@ static void nbd_co_establish_connection_cancel(BlockDriverState *bs,
>       bool wake = false;
>       bool do_free = false;
>   
> -    qemu_mutex_lock(&thr->mutex);
> -
> -    if (thr->state == CONNECT_THREAD_RUNNING) {
> -        /* We can cancel only in running state, when bh is not yet scheduled */
> -        thr->bh_ctx = NULL;
> -        if (s->wait_connect) {
> -            s->wait_connect = false;
> -            wake = true;
> -        }
> -        if (detach) {
> -            thr->state = CONNECT_THREAD_RUNNING_DETACHED;
> -            s->connect_thread = NULL;
> +    WITH_QEMU_LOCK_GUARD(&thr->mutex) {
> +        if (thr->state == CONNECT_THREAD_RUNNING) {
> +            /* We can cancel only in running state, when bh is not yet scheduled */

over-80 line

> +            thr->bh_ctx = NULL;
> +            if (s->wait_connect) {
> +                s->wait_connect = false;
> +                wake = true;
> +            }
> +            if (detach) {
> +                thr->state = CONNECT_THREAD_RUNNING_DETACHED;
> +                s->connect_thread = NULL;
> +            }
> +        } else if (detach) {
> +            do_free = true;
>           }
> -    } else if (detach) {
> -        do_free = true;
>       }
>   
> -    qemu_mutex_unlock(&thr->mutex);
> -
>       if (do_free) {
>           nbd_free_connect_thread(thr);
>           s->connect_thread = NULL;
> 


For nbd.c we mostly change simple critical sections

qemu_mutex_lock()
...
qemu_mutex_unlock()

into

WITH_QEMU_LOCK_GUARD() {
...
}

And don't eliminate any gotos.. Hmm. On the first sight increasing nesting of the code doesn't make it more beautiful.
But I understand that context-manager concept is safer than calling unlock() by hand, and with nested block the critical section becomes more obvious. So, with fixed over-80 lines:

Reviewed-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>

-- 
Best regards,
Vladimir


  reply	other threads:[~2021-03-12 10:33 UTC|newest]

Thread overview: 28+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-03-11  3:15 [PATCH 0/9] Changing qemu_mutex_locks to lock guard macros Mahmoud Mandour
2021-03-11  3:15 ` [PATCH 1/9] tpm: Changed a qemu_mutex_lock to QEMU_LOCK_GUARD Mahmoud Mandour
2021-03-11 10:04   ` Marc-André Lureau
2021-03-23  2:58   ` Stefan Berger
2021-03-11  3:15 ` [PATCH 2/9] block: Replaced qemu_mutex_lock calls with QEMU_LOCK_GUARD Mahmoud Mandour
2021-03-12 10:23   ` Vladimir Sementsov-Ogievskiy [this message]
2021-03-13  5:51     ` Mahmoud Mandour
2021-03-15 14:08       ` Vladimir Sementsov-Ogievskiy
2021-03-16 13:29     ` Eric Blake
2021-03-11  3:15 ` [PATCH 3/9] char: Replaced a qemu_mutex_lock " Mahmoud Mandour
2021-03-11 10:05   ` Marc-André Lureau
2021-03-11  3:15 ` [PATCH 4/9] util: Replaced qemu_mutex_lock with QEMU_LOCK_GUARDs Mahmoud Mandour
2021-03-11  3:15 ` [PATCH 5/9] monitor: Replaced qemu_mutex_lock calls with QEMU_LOCK_GUARD Mahmoud Mandour
2021-03-11  9:50   ` Dr. David Alan Gilbert
2021-03-11  3:15 ` [PATCH 6/9] migration: " Mahmoud Mandour
2021-03-11  9:44   ` Dr. David Alan Gilbert
2021-03-15 18:01     ` Dr. David Alan Gilbert
2021-03-11  3:15 ` [PATCH 7/9] virtio-iommu: " Mahmoud Mandour
2021-03-11  3:15 ` [PATCH 8/9] hw/9pfs/9p-synth: Replaced qemu_mutex_lock " Mahmoud Mandour
2021-03-11  7:43   ` Greg Kurz
2021-03-11 10:49   ` Christian Schoenebeck
2021-03-11 11:52     ` Greg Kurz
2021-03-11 11:59       ` Christian Schoenebeck
2021-03-13  5:43         ` Mahmoud Mandour
2021-03-13  7:51           ` Greg Kurz
2021-03-15 16:07             ` Christian Schoenebeck
2021-03-15 20:31               ` Greg Kurz
2021-03-11  3:15 ` [PATCH 9/9] hw/hyperv/vmbus: replaced " Mahmoud Mandour

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=d74ef980-ad9b-8a97-0bc8-1ecc60a28c65@virtuozzo.com \
    --to=vsementsov@virtuozzo.com \
    --cc=kwolf@redhat.com \
    --cc=ma.mandourr@gmail.com \
    --cc=mreitz@redhat.com \
    --cc=qemu-block@nongnu.org \
    --cc=qemu-devel@nongnu.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.