All of lore.kernel.org
 help / color / mirror / Atom feed
From: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
To: qemu-block@nongnu.org
Cc: qemu-devel@nongnu.org, vsementsov@virtuozzo.com,
	eblake@redhat.com, mreitz@redhat.com, kwolf@redhat.com,
	rvkagan@yandex-team.ru, den@openvz.org
Subject: [PATCH v3 33/33] block/nbd: drop connection_co
Date: Fri, 16 Apr 2021 11:09:11 +0300	[thread overview]
Message-ID: <20210416080911.83197-34-vsementsov@virtuozzo.com> (raw)
In-Reply-To: <20210416080911.83197-1-vsementsov@virtuozzo.com>

OK, that's a big rewrite of the logic.

Pre-patch we have an always running coroutine - connection_co. It does
reply receiving and reconnecting. And it leads to a lot of difficult
and unobvious code around drained sections and context switch. We also
abuse bs->in_flight counter which is increased for connection_co and
temporary decreased in points where we want to allow drained section to
begin. One of these place is in another file: in nbd_read_eof() in
nbd/client.c.

We also cancel reconnect and requests waiting for reconnect on drained
begin which is not correct.

Let's finally drop this always running coroutine and go another way:

1. reconnect_attempt() goes to nbd_co_send_request and called under
   send_mutex

2. We do receive headers in request coroutine. But we also should
   dispatch replies for another pending requests. So,
   nbd_connection_entry() is turned into nbd_receive_replies(), which
   does reply dispatching until it receive another request headers, and
   returns when it receive the requested header.

3. All old staff around drained sections and context switch is dropped.

Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
---
 block/nbd.c  | 376 ++++++++++++++++-----------------------------------
 nbd/client.c |   2 -
 2 files changed, 119 insertions(+), 259 deletions(-)

diff --git a/block/nbd.c b/block/nbd.c
index 03391bb231..3a7b532790 100644
--- a/block/nbd.c
+++ b/block/nbd.c
@@ -59,7 +59,7 @@
 typedef struct {
     Coroutine *coroutine;
     uint64_t offset;        /* original offset of the request */
-    bool receiving;         /* waiting for connection_co? */
+    bool receiving; /* waiting in first yield of nbd_receive_replies() */
 } NBDClientRequest;
 
 typedef enum NBDClientState {
@@ -75,14 +75,10 @@ typedef struct BDRVNBDState {
 
     CoMutex send_mutex;
     CoQueue free_sema;
-    Coroutine *connection_co;
-    Coroutine *teardown_co;
-    QemuCoSleepState *connection_co_sleep_ns_state;
-    bool drained;
-    bool wait_drained_end;
+    Coroutine *receive_co;
+    Coroutine *in_flight_waiter;
     int in_flight;
     NBDClientState state;
-    bool wait_in_flight;
 
     QEMUTimer *reconnect_delay_timer;
 
@@ -131,33 +127,20 @@ static bool nbd_client_connected(BDRVNBDState *s)
 
 static void nbd_channel_error(BDRVNBDState *s, int ret)
 {
+    if (nbd_client_connected(s)) {
+        qio_channel_shutdown(s->ioc, QIO_CHANNEL_SHUTDOWN_BOTH, NULL);
+    }
+
     if (ret == -EIO) {
         if (nbd_client_connected(s)) {
             s->state = s->reconnect_delay ? NBD_CLIENT_CONNECTING_WAIT :
                                             NBD_CLIENT_CONNECTING_NOWAIT;
         }
     } else {
-        if (nbd_client_connected(s)) {
-            qio_channel_shutdown(s->ioc, QIO_CHANNEL_SHUTDOWN_BOTH, NULL);
-        }
         s->state = NBD_CLIENT_QUIT;
     }
 }
 
-static void nbd_recv_coroutines_wake_all(BDRVNBDState *s)
-{
-    int i;
-
-    for (i = 0; i < MAX_NBD_REQUESTS; i++) {
-        NBDClientRequest *req = &s->requests[i];
-
-        if (req->coroutine && req->receiving) {
-            req->receiving = false;
-            aio_co_wake(req->coroutine);
-        }
-    }
-}
-
 static void reconnect_delay_timer_del(BDRVNBDState *s)
 {
     if (s->reconnect_delay_timer) {
@@ -194,117 +177,23 @@ static void reconnect_delay_timer_init(BDRVNBDState *s, uint64_t expire_time_ns)
     timer_mod(s->reconnect_delay_timer, expire_time_ns);
 }
 
-static void nbd_client_detach_aio_context(BlockDriverState *bs)
-{
-    BDRVNBDState *s = (BDRVNBDState *)bs->opaque;
-
-    /* Timer is deleted in nbd_client_co_drain_begin() */
-    assert(!s->reconnect_delay_timer);
-    /*
-     * If reconnect is in progress we may have no ->ioc.  It will be
-     * re-instantiated in the proper aio context once the connection is
-     * reestablished.
-     */
-    if (s->ioc) {
-        qio_channel_detach_aio_context(QIO_CHANNEL(s->ioc));
-    }
-}
-
-static void nbd_client_attach_aio_context_bh(void *opaque)
-{
-    BlockDriverState *bs = opaque;
-    BDRVNBDState *s = (BDRVNBDState *)bs->opaque;
-
-    if (s->connection_co) {
-        /*
-         * The node is still drained, so we know the coroutine has yielded in
-         * nbd_read_eof(), the only place where bs->in_flight can reach 0, or
-         * it is entered for the first time. Both places are safe for entering
-         * the coroutine.
-         */
-        qemu_aio_coroutine_enter(bs->aio_context, s->connection_co);
-    }
-    bdrv_dec_in_flight(bs);
-}
-
-static void nbd_client_attach_aio_context(BlockDriverState *bs,
-                                          AioContext *new_context)
-{
-    BDRVNBDState *s = (BDRVNBDState *)bs->opaque;
-
-    /*
-     * s->connection_co is either yielded from nbd_receive_reply or from
-     * nbd_co_reconnect_loop()
-     */
-    if (nbd_client_connected(s)) {
-        qio_channel_attach_aio_context(QIO_CHANNEL(s->ioc), new_context);
-    }
-
-    bdrv_inc_in_flight(bs);
-
-    /*
-     * Need to wait here for the BH to run because the BH must run while the
-     * node is still drained.
-     */
-    aio_wait_bh_oneshot(new_context, nbd_client_attach_aio_context_bh, bs);
-}
-
-static void coroutine_fn nbd_client_co_drain_begin(BlockDriverState *bs)
-{
-    BDRVNBDState *s = (BDRVNBDState *)bs->opaque;
-
-    s->drained = true;
-    if (s->connection_co_sleep_ns_state) {
-        qemu_co_sleep_wake(s->connection_co_sleep_ns_state);
-    }
-
-    nbd_co_establish_connection_cancel(s->conn);
-
-    reconnect_delay_timer_del(s);
-
-    if (qatomic_load_acquire(&s->state) == NBD_CLIENT_CONNECTING_WAIT) {
-        s->state = NBD_CLIENT_CONNECTING_NOWAIT;
-        qemu_co_queue_restart_all(&s->free_sema);
-    }
-}
-
-static void coroutine_fn nbd_client_co_drain_end(BlockDriverState *bs)
-{
-    BDRVNBDState *s = (BDRVNBDState *)bs->opaque;
-
-    s->drained = false;
-    if (s->wait_drained_end) {
-        s->wait_drained_end = false;
-        aio_co_wake(s->connection_co);
-    }
-}
-
-
 static void nbd_teardown_connection(BlockDriverState *bs)
 {
     BDRVNBDState *s = (BDRVNBDState *)bs->opaque;
 
+    assert(!s->in_flight);
+    assert(!s->receive_co);
+
     if (s->ioc) {
         /* finish any pending coroutines */
         qio_channel_shutdown(s->ioc, QIO_CHANNEL_SHUTDOWN_BOTH, NULL);
+        yank_unregister_function(BLOCKDEV_YANK_INSTANCE(s->bs->node_name),
+                                 nbd_yank, s->bs);
+        object_unref(OBJECT(s->ioc));
+        s->ioc = NULL;
     }
 
     s->state = NBD_CLIENT_QUIT;
-    if (s->connection_co) {
-        if (s->connection_co_sleep_ns_state) {
-            qemu_co_sleep_wake(s->connection_co_sleep_ns_state);
-        }
-        nbd_co_establish_connection_cancel(s->conn);
-    }
-    if (qemu_in_coroutine()) {
-        s->teardown_co = qemu_coroutine_self();
-        /* connection_co resumes us when it terminates */
-        qemu_coroutine_yield();
-        s->teardown_co = NULL;
-    } else {
-        BDRV_POLL_WHILE(bs, s->connection_co);
-    }
-    assert(!s->connection_co);
 }
 
 static bool nbd_client_connecting(BDRVNBDState *s)
@@ -367,10 +256,11 @@ int nbd_co_do_establish_connection(BlockDriverState *bs, Error **errp)
 {
     BDRVNBDState *s = (BDRVNBDState *)bs->opaque;
     int ret;
+    bool blocking = nbd_client_connecting_wait(s);
 
     assert(!s->ioc);
 
-    s->ioc = nbd_co_establish_connection(s->conn, &s->info, true, errp);
+    s->ioc = nbd_co_establish_connection(s->conn, &s->info, blocking, errp);
     if (!s->ioc) {
         return -ECONNREFUSED;
     }
@@ -404,6 +294,7 @@ int nbd_co_do_establish_connection(BlockDriverState *bs, Error **errp)
     return 0;
 }
 
+/* called under s->send_mutex */
 static coroutine_fn void nbd_reconnect_attempt(BDRVNBDState *s)
 {
     if (!nbd_client_connecting(s)) {
@@ -412,23 +303,29 @@ static coroutine_fn void nbd_reconnect_attempt(BDRVNBDState *s)
 
     /* Wait for completion of all in-flight requests */
 
-    qemu_co_mutex_lock(&s->send_mutex);
-
-    while (s->in_flight > 0) {
-        qemu_co_mutex_unlock(&s->send_mutex);
-        nbd_recv_coroutines_wake_all(s);
-        s->wait_in_flight = true;
+    if (s->in_flight) {
+        s->in_flight_waiter = qemu_coroutine_self();
         qemu_coroutine_yield();
-        s->wait_in_flight = false;
-        qemu_co_mutex_lock(&s->send_mutex);
+        assert(!s->in_flight_waiter);
+        assert(!s->in_flight);
     }
 
-    qemu_co_mutex_unlock(&s->send_mutex);
-
     if (!nbd_client_connecting(s)) {
         return;
     }
 
+    if (nbd_client_connecting_wait(s) && s->reconnect_delay &&
+        !s->reconnect_delay_timer)
+    {
+        /*
+         * It's first reconnect attempt after switching to
+         * NBD_CLIENT_CONNECTING_WAIT
+         */
+        reconnect_delay_timer_init(s,
+            qemu_clock_get_ns(QEMU_CLOCK_REALTIME) +
+            s->reconnect_delay * NANOSECONDS_PER_SECOND);
+    }
+
     /*
      * Now we are sure that nobody is accessing the channel, and no one will
      * try until we set the state to CONNECTED.
@@ -446,73 +343,34 @@ static coroutine_fn void nbd_reconnect_attempt(BDRVNBDState *s)
     nbd_co_do_establish_connection(s->bs, NULL);
 }
 
-static coroutine_fn void nbd_co_reconnect_loop(BDRVNBDState *s)
-{
-    uint64_t timeout = 1 * NANOSECONDS_PER_SECOND;
-    uint64_t max_timeout = 16 * NANOSECONDS_PER_SECOND;
-
-    if (qatomic_load_acquire(&s->state) == NBD_CLIENT_CONNECTING_WAIT) {
-        reconnect_delay_timer_init(s, qemu_clock_get_ns(QEMU_CLOCK_REALTIME) +
-                                   s->reconnect_delay * NANOSECONDS_PER_SECOND);
-    }
-
-    nbd_reconnect_attempt(s);
-
-    while (nbd_client_connecting(s)) {
-        if (s->drained) {
-            bdrv_dec_in_flight(s->bs);
-            s->wait_drained_end = true;
-            while (s->drained) {
-                /*
-                 * We may be entered once from nbd_client_attach_aio_context_bh
-                 * and then from nbd_client_co_drain_end. So here is a loop.
-                 */
-                qemu_coroutine_yield();
-            }
-            bdrv_inc_in_flight(s->bs);
-        } else {
-            qemu_co_sleep_ns_wakeable(QEMU_CLOCK_REALTIME, timeout,
-                                      &s->connection_co_sleep_ns_state);
-            if (s->drained) {
-                continue;
-            }
-            if (timeout < max_timeout) {
-                timeout *= 2;
-            }
-        }
-
-        nbd_reconnect_attempt(s);
-    }
-
-    reconnect_delay_timer_del(s);
-}
-
-static coroutine_fn void nbd_connection_entry(void *opaque)
+static coroutine_fn void nbd_receive_replies(BDRVNBDState *s, uint64_t handle)
 {
-    BDRVNBDState *s = opaque;
     uint64_t i;
     int ret = 0;
     Error *local_err = NULL;
 
-    while (qatomic_load_acquire(&s->state) != NBD_CLIENT_QUIT) {
-        /*
-         * The NBD client can only really be considered idle when it has
-         * yielded from qio_channel_readv_all_eof(), waiting for data. This is
-         * the point where the additional scheduled coroutine entry happens
-         * after nbd_client_attach_aio_context().
-         *
-         * Therefore we keep an additional in_flight reference all the time and
-         * only drop it temporarily here.
-         */
+    i = HANDLE_TO_INDEX(s, handle);
+    if (s->receive_co) {
+        assert(s->receive_co != qemu_coroutine_self());
 
-        if (nbd_client_connecting(s)) {
-            nbd_co_reconnect_loop(s);
-        }
+        /* Another request coroutine is receiving now */
+        s->requests[i].receiving = true;
+        qemu_coroutine_yield();
+        assert(!s->requests[i].receiving);
 
-        if (!nbd_client_connected(s)) {
-            continue;
+        if (s->receive_co != qemu_coroutine_self()) {
+            /*
+             * We are either failed or done, caller uses nbd_client_connected()
+             * to distinguish.
+             */
+            return;
         }
+    }
+
+    assert(s->receive_co == 0 || s->receive_co == qemu_coroutine_self());
+    s->receive_co = qemu_coroutine_self();
 
+    while (nbd_client_connected(s)) {
         assert(s->reply.handle == 0);
         ret = nbd_receive_reply(s->bs, s->ioc, &s->reply, &local_err);
 
@@ -522,8 +380,21 @@ static coroutine_fn void nbd_connection_entry(void *opaque)
             local_err = NULL;
         }
         if (ret <= 0) {
-            nbd_channel_error(s, ret ? ret : -EIO);
-            continue;
+            ret = ret ? ret : -EIO;
+            nbd_channel_error(s, ret);
+            goto out;
+        }
+
+        if (!nbd_client_connected(s)) {
+            ret = -EIO;
+            goto out;
+        }
+
+        i = HANDLE_TO_INDEX(s, s->reply.handle);
+
+        if (s->reply.handle == handle) {
+            ret = 0;
+            goto out;
         }
 
         /*
@@ -531,50 +402,49 @@ static coroutine_fn void nbd_connection_entry(void *opaque)
          * handler acts as a synchronization point and ensures that only
          * one coroutine is called until the reply finishes.
          */
-        i = HANDLE_TO_INDEX(s, s->reply.handle);
         if (i >= MAX_NBD_REQUESTS ||
             !s->requests[i].coroutine ||
             !s->requests[i].receiving ||
             (nbd_reply_is_structured(&s->reply) && !s->info.structured_reply))
         {
             nbd_channel_error(s, -EINVAL);
-            continue;
+            ret = -EINVAL;
+            goto out;
         }
 
-        /*
-         * We're woken up again by the request itself.  Note that there
-         * is no race between yielding and reentering connection_co.  This
-         * is because:
-         *
-         * - if the request runs on the same AioContext, it is only
-         *   entered after we yield
-         *
-         * - if the request runs on a different AioContext, reentering
-         *   connection_co happens through a bottom half, which can only
-         *   run after we yield.
-         */
         s->requests[i].receiving = false;
         aio_co_wake(s->requests[i].coroutine);
         qemu_coroutine_yield();
     }
 
-    qemu_co_queue_restart_all(&s->free_sema);
-    nbd_recv_coroutines_wake_all(s);
-    bdrv_dec_in_flight(s->bs);
-
-    s->connection_co = NULL;
-    if (s->ioc) {
-        qio_channel_detach_aio_context(QIO_CHANNEL(s->ioc));
-        yank_unregister_function(BLOCKDEV_YANK_INSTANCE(s->bs->node_name),
-                                 nbd_yank, s->bs);
-        object_unref(OBJECT(s->ioc));
-        s->ioc = NULL;
-    }
+out:
+    if (ret < 0) {
+        s->receive_co = NULL;
+        for (i = 0; i < MAX_NBD_REQUESTS; i++) {
+            NBDClientRequest *req = &s->requests[i];
 
-    if (s->teardown_co) {
-        aio_co_wake(s->teardown_co);
+            if (req->coroutine && req->receiving) {
+                req->receiving = false;
+                aio_co_wake(req->coroutine);
+            }
+        }
+    } else {
+        /*
+         * If there are still some receiving request, it should become next
+         * "receive_co"
+         */
+        for (i = 0; i < MAX_NBD_REQUESTS; i++) {
+            NBDClientRequest *req = &s->requests[i];
+
+            if (req->coroutine && req->receiving) {
+                req->receiving = false;
+                s->receive_co = req->coroutine;
+                aio_co_wake(req->coroutine);
+                return;
+            }
+        }
+        s->receive_co = NULL;
     }
-    aio_wait_kick();
 }
 
 static int nbd_co_send_request(BlockDriverState *bs,
@@ -585,7 +455,15 @@ static int nbd_co_send_request(BlockDriverState *bs,
     int rc, i = -1;
 
     qemu_co_mutex_lock(&s->send_mutex);
-    while (s->in_flight == MAX_NBD_REQUESTS || nbd_client_connecting_wait(s)) {
+
+    nbd_reconnect_attempt(s);
+
+    if (!nbd_client_connected(s)) {
+        qemu_co_mutex_unlock(&s->send_mutex);
+        return -EIO;
+    }
+
+    while (s->in_flight == MAX_NBD_REQUESTS) {
         qemu_co_queue_wait(&s->free_sema, &s->send_mutex);
     }
 
@@ -636,10 +514,10 @@ err:
             s->requests[i].coroutine = NULL;
             s->in_flight--;
         }
-        if (s->in_flight == 0 && s->wait_in_flight) {
-            aio_co_wake(s->connection_co);
-        } else {
-            qemu_co_queue_next(&s->free_sema);
+        if (s->in_flight == 0 && s->in_flight_waiter) {
+            Coroutine *co = s->in_flight_waiter;
+            s->in_flight_waiter = NULL;
+            aio_co_wake(co);
         }
     }
     qemu_co_mutex_unlock(&s->send_mutex);
@@ -938,9 +816,7 @@ static coroutine_fn int nbd_co_do_receive_one_chunk(
     *request_ret = 0;
 
     /* Wait until we're woken up by nbd_connection_entry.  */
-    s->requests[i].receiving = true;
-    qemu_coroutine_yield();
-    assert(!s->requests[i].receiving);
+    nbd_receive_replies(s, handle);
     if (!nbd_client_connected(s)) {
         error_setg(errp, "Connection closed");
         return -EIO;
@@ -1033,13 +909,8 @@ static coroutine_fn int nbd_co_receive_one_chunk(
     }
     s->reply.handle = 0;
 
-    if (s->connection_co && !s->wait_in_flight) {
-        /*
-         * We must check s->wait_in_flight, because we may entered by
-         * nbd_recv_coroutines_wake_all(), in this case we should not
-         * wake connection_co here, it will woken by last request.
-         */
-        aio_co_wake(s->connection_co);
+    if (s->receive_co) {
+        aio_co_wake(s->receive_co);
     }
 
     return ret;
@@ -1151,8 +1022,10 @@ break_loop:
 
     qemu_co_mutex_lock(&s->send_mutex);
     s->in_flight--;
-    if (s->in_flight == 0 && s->wait_in_flight) {
-        aio_co_wake(s->connection_co);
+    if (s->in_flight == 0 && s->in_flight_waiter) {
+        Coroutine *co = s->in_flight_waiter;
+        s->in_flight_waiter = NULL;
+        aio_co_wake(co);
     } else {
         qemu_co_queue_next(&s->free_sema);
     }
@@ -1980,14 +1853,13 @@ static int nbd_open(BlockDriverState *bs, QDict *options, int flags,
                                         monitor_cur());
 
     /* TODO: Configurable retry-until-timeout behaviour.*/
+    s->state = NBD_CLIENT_CONNECTING_WAIT;
     ret = nbd_do_establish_connection(bs, errp);
     if (ret < 0) {
         goto fail;
     }
 
-    s->connection_co = qemu_coroutine_create(nbd_connection_entry, s);
-    bdrv_inc_in_flight(bs);
-    aio_co_schedule(bdrv_get_aio_context(bs), s->connection_co);
+    nbd_client_connection_enable_retry(s->conn);
 
     return 0;
 
@@ -2141,6 +2013,8 @@ static void nbd_cancel_in_flight(BlockDriverState *bs)
         s->state = NBD_CLIENT_CONNECTING_NOWAIT;
         qemu_co_queue_restart_all(&s->free_sema);
     }
+
+    nbd_co_establish_connection_cancel(s->conn);
 }
 
 static BlockDriver bdrv_nbd = {
@@ -2161,10 +2035,6 @@ static BlockDriver bdrv_nbd = {
     .bdrv_refresh_limits        = nbd_refresh_limits,
     .bdrv_co_truncate           = nbd_co_truncate,
     .bdrv_getlength             = nbd_getlength,
-    .bdrv_detach_aio_context    = nbd_client_detach_aio_context,
-    .bdrv_attach_aio_context    = nbd_client_attach_aio_context,
-    .bdrv_co_drain_begin        = nbd_client_co_drain_begin,
-    .bdrv_co_drain_end          = nbd_client_co_drain_end,
     .bdrv_refresh_filename      = nbd_refresh_filename,
     .bdrv_co_block_status       = nbd_client_co_block_status,
     .bdrv_dirname               = nbd_dirname,
@@ -2190,10 +2060,6 @@ static BlockDriver bdrv_nbd_tcp = {
     .bdrv_refresh_limits        = nbd_refresh_limits,
     .bdrv_co_truncate           = nbd_co_truncate,
     .bdrv_getlength             = nbd_getlength,
-    .bdrv_detach_aio_context    = nbd_client_detach_aio_context,
-    .bdrv_attach_aio_context    = nbd_client_attach_aio_context,
-    .bdrv_co_drain_begin        = nbd_client_co_drain_begin,
-    .bdrv_co_drain_end          = nbd_client_co_drain_end,
     .bdrv_refresh_filename      = nbd_refresh_filename,
     .bdrv_co_block_status       = nbd_client_co_block_status,
     .bdrv_dirname               = nbd_dirname,
@@ -2219,10 +2085,6 @@ static BlockDriver bdrv_nbd_unix = {
     .bdrv_refresh_limits        = nbd_refresh_limits,
     .bdrv_co_truncate           = nbd_co_truncate,
     .bdrv_getlength             = nbd_getlength,
-    .bdrv_detach_aio_context    = nbd_client_detach_aio_context,
-    .bdrv_attach_aio_context    = nbd_client_attach_aio_context,
-    .bdrv_co_drain_begin        = nbd_client_co_drain_begin,
-    .bdrv_co_drain_end          = nbd_client_co_drain_end,
     .bdrv_refresh_filename      = nbd_refresh_filename,
     .bdrv_co_block_status       = nbd_client_co_block_status,
     .bdrv_dirname               = nbd_dirname,
diff --git a/nbd/client.c b/nbd/client.c
index 0c2db4bcba..30d5383cb1 100644
--- a/nbd/client.c
+++ b/nbd/client.c
@@ -1434,9 +1434,7 @@ nbd_read_eof(BlockDriverState *bs, QIOChannel *ioc, void *buffer, size_t size,
 
         len = qio_channel_readv(ioc, &iov, 1, errp);
         if (len == QIO_CHANNEL_ERR_BLOCK) {
-            bdrv_dec_in_flight(bs);
             qio_channel_yield(ioc, G_IO_IN);
-            bdrv_inc_in_flight(bs);
             continue;
         } else if (len < 0) {
             return -EIO;
-- 
2.29.2



  parent reply	other threads:[~2021-04-16  8:37 UTC|newest]

Thread overview: 121+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-04-16  8:08 [PATCH v3 00/33] block/nbd: rework client connection Vladimir Sementsov-Ogievskiy
2021-04-16  8:08 ` [PATCH v3 01/33] block/nbd: fix channel object leak Vladimir Sementsov-Ogievskiy
2021-05-24 21:31   ` Eric Blake
2021-05-25  4:47     ` Vladimir Sementsov-Ogievskiy
2021-04-16  8:08 ` [PATCH v3 02/33] block/nbd: fix how state is cleared on nbd_open() failure paths Vladimir Sementsov-Ogievskiy
2021-04-21 14:00   ` Roman Kagan
2021-04-21 22:27     ` Vladimir Sementsov-Ogievskiy
2021-04-22  8:49       ` Roman Kagan
2021-06-01 21:39   ` Eric Blake
2021-04-16  8:08 ` [PATCH v3 03/33] block/nbd: ensure ->connection_thread is always valid Vladimir Sementsov-Ogievskiy
2021-06-01 21:41   ` Eric Blake
2021-04-16  8:08 ` [PATCH v3 04/33] block/nbd: nbd_client_handshake(): fix leak of s->ioc Vladimir Sementsov-Ogievskiy
2021-04-22  8:59   ` Roman Kagan
2021-04-16  8:08 ` [PATCH v3 05/33] block/nbd: BDRVNBDState: drop unused connect_err and connect_status Vladimir Sementsov-Ogievskiy
2021-06-01 21:43   ` Eric Blake
2021-04-16  8:08 ` [PATCH v3 06/33] util/async: aio_co_schedule(): support reschedule in same ctx Vladimir Sementsov-Ogievskiy
2021-04-23 10:09   ` Roman Kagan
2021-04-26  8:52     ` Vladimir Sementsov-Ogievskiy
2021-05-12  6:56   ` Paolo Bonzini
2021-05-12  7:15     ` Vladimir Sementsov-Ogievskiy
2021-05-13 21:04       ` Paolo Bonzini
2021-05-14 17:27         ` Roman Kagan
2021-05-14 21:19           ` Paolo Bonzini
2021-06-08 18:45         ` Vladimir Sementsov-Ogievskiy
2021-06-09  9:35           ` Paolo Bonzini
2021-06-09 10:24             ` Vladimir Sementsov-Ogievskiy
2021-06-09 12:17               ` Paolo Bonzini
2021-04-16  8:08 ` [PATCH v3 07/33] block/nbd: simplify waking of nbd_co_establish_connection() Vladimir Sementsov-Ogievskiy
2021-04-27 21:44   ` Roman Kagan
2021-06-02 19:05   ` Eric Blake
2021-04-16  8:08 ` [PATCH v3 08/33] block/nbd: drop thr->state Vladimir Sementsov-Ogievskiy
2021-04-27 22:23   ` Roman Kagan
2021-04-28  8:01     ` Vladimir Sementsov-Ogievskiy
2021-04-16  8:08 ` [PATCH v3 09/33] block/nbd: bs-independent interface for nbd_co_establish_connection() Vladimir Sementsov-Ogievskiy
2021-06-02 19:14   ` Eric Blake
2021-06-08 10:12     ` Vladimir Sementsov-Ogievskiy
2021-04-16  8:08 ` [PATCH v3 10/33] block/nbd: make nbd_co_establish_connection_cancel() bs-independent Vladimir Sementsov-Ogievskiy
2021-06-02 21:18   ` Eric Blake
2021-04-16  8:08 ` [PATCH v3 11/33] block/nbd: rename NBDConnectThread to NBDClientConnection Vladimir Sementsov-Ogievskiy
2021-04-27 22:28   ` Roman Kagan
2021-06-02 21:21   ` Eric Blake
2021-04-16  8:08 ` [PATCH v3 12/33] block/nbd: introduce nbd_client_connection_new() Vladimir Sementsov-Ogievskiy
2021-06-02 21:22   ` Eric Blake
2021-04-16  8:08 ` [PATCH v3 13/33] block/nbd: introduce nbd_client_connection_release() Vladimir Sementsov-Ogievskiy
2021-04-27 22:35   ` Roman Kagan
2021-04-28  8:06     ` Vladimir Sementsov-Ogievskiy
2021-06-02 21:27   ` Eric Blake
2021-06-03 11:59     ` Vladimir Sementsov-Ogievskiy
2021-06-08 10:00     ` Vladimir Sementsov-Ogievskiy
2021-06-08 14:18       ` Eric Blake
2021-04-16  8:08 ` [PATCH v3 14/33] nbd: move connection code from block/nbd to nbd/client-connection Vladimir Sementsov-Ogievskiy
2021-04-27 22:45   ` Roman Kagan
2021-04-28  8:14     ` Vladimir Sementsov-Ogievskiy
2021-06-09 15:49       ` Vladimir Sementsov-Ogievskiy
2021-06-03 15:55   ` Eric Blake
2021-04-16  8:08 ` [PATCH v3 15/33] nbd/client-connection: use QEMU_LOCK_GUARD Vladimir Sementsov-Ogievskiy
2021-04-28  6:08   ` Roman Kagan
2021-04-28  8:17     ` Vladimir Sementsov-Ogievskiy
2021-04-16  8:08 ` [PATCH v3 16/33] nbd/client-connection: add possibility of negotiation Vladimir Sementsov-Ogievskiy
2021-05-11 10:45   ` Roman Kagan
2021-05-12  6:42     ` Vladimir Sementsov-Ogievskiy
2021-06-08 19:23       ` Vladimir Sementsov-Ogievskiy
2021-04-16  8:08 ` [PATCH v3 17/33] nbd/client-connection: implement connection retry Vladimir Sementsov-Ogievskiy
2021-05-11 20:54   ` Roman Kagan
2021-06-08 10:24     ` Vladimir Sementsov-Ogievskiy
2021-06-03 16:17   ` Eric Blake
2021-06-03 17:49     ` Vladimir Sementsov-Ogievskiy
2021-06-08 10:38       ` Vladimir Sementsov-Ogievskiy
2021-04-16  8:08 ` [PATCH v3 18/33] nbd/client-connection: shutdown connection on release Vladimir Sementsov-Ogievskiy
2021-05-11 21:08   ` Roman Kagan
2021-05-12  6:39     ` Vladimir Sementsov-Ogievskiy
2021-06-03 16:20   ` Eric Blake
2021-04-16  8:08 ` [PATCH v3 19/33] block/nbd: split nbd_handle_updated_info out of nbd_client_handshake() Vladimir Sementsov-Ogievskiy
2021-05-12  8:40   ` Roman Kagan
2021-06-03 16:29   ` Eric Blake
2021-06-09 17:23     ` Vladimir Sementsov-Ogievskiy
2021-06-09 18:28       ` Eric Blake
2021-04-16  8:08 ` [PATCH v3 20/33] block/nbd: use negotiation of NBDClientConnection Vladimir Sementsov-Ogievskiy
2021-06-03 18:05   ` Eric Blake
2021-04-16  8:08 ` [PATCH v3 21/33] qemu-socket: pass monitor link to socket_get_fd directly Vladimir Sementsov-Ogievskiy
2021-04-19  9:34   ` Daniel P. Berrangé
2021-04-19 10:09     ` Vladimir Sementsov-Ogievskiy
2021-05-12  9:40     ` Roman Kagan
2021-05-12  9:59       ` Daniel P. Berrangé
2021-05-13 11:02         ` Vladimir Sementsov-Ogievskiy
2021-04-16  8:09 ` [PATCH v3 22/33] block/nbd: pass monitor directly to connection thread Vladimir Sementsov-Ogievskiy
2021-06-03 18:16   ` Eric Blake
2021-06-03 18:31     ` Vladimir Sementsov-Ogievskiy
2021-04-16  8:09 ` [PATCH v3 23/33] block/nbd: nbd_teardown_connection() don't touch s->sioc Vladimir Sementsov-Ogievskiy
2021-06-03 19:04   ` Eric Blake
2021-04-16  8:09 ` [PATCH v3 24/33] block/nbd: drop BDRVNBDState::sioc Vladimir Sementsov-Ogievskiy
2021-06-03 19:12   ` Eric Blake
2021-04-16  8:09 ` [PATCH v3 25/33] nbd/client-connection: return only one io channel Vladimir Sementsov-Ogievskiy
2021-06-03 19:58   ` Eric Blake
2021-04-16  8:09 ` [PATCH v3 26/33] block-coroutine-wrapper: allow non bdrv_ prefix Vladimir Sementsov-Ogievskiy
2021-06-03 20:00   ` Eric Blake
2021-06-03 20:53   ` Eric Blake
2021-06-04  5:29     ` Vladimir Sementsov-Ogievskiy
2021-04-16  8:09 ` [PATCH v3 27/33] block/nbd: split nbd_co_do_establish_connection out of nbd_reconnect_attempt Vladimir Sementsov-Ogievskiy
2021-06-03 20:04   ` Eric Blake
2021-06-04  5:30     ` Vladimir Sementsov-Ogievskiy
2021-04-16  8:09 ` [PATCH v3 28/33] nbd/client-connection: do qio_channel_set_delay(false) Vladimir Sementsov-Ogievskiy
2021-06-03 20:48   ` Eric Blake
2021-06-04  5:32     ` Vladimir Sementsov-Ogievskiy
2021-04-16  8:09 ` [PATCH v3 29/33] nbd/client-connection: add option for non-blocking connection attempt Vladimir Sementsov-Ogievskiy
2021-06-03 20:51   ` Eric Blake
2021-04-16  8:09 ` [PATCH v3 30/33] block/nbd: reuse nbd_co_do_establish_connection() in nbd_open() Vladimir Sementsov-Ogievskiy
2021-06-03 20:57   ` Eric Blake
2021-04-16  8:09 ` [PATCH v3 31/33] block/nbd: add nbd_clinent_connected() helper Vladimir Sementsov-Ogievskiy
2021-05-12  7:06   ` Paolo Bonzini
2021-05-12  7:19     ` Vladimir Sementsov-Ogievskiy
2021-06-03 21:08   ` Eric Blake
2021-04-16  8:09 ` [PATCH v3 32/33] block/nbd: safer transition to receiving request Vladimir Sementsov-Ogievskiy
2021-06-03 21:11   ` Eric Blake
2021-06-04  5:36     ` Vladimir Sementsov-Ogievskiy
2021-04-16  8:09 ` Vladimir Sementsov-Ogievskiy [this message]
2021-04-16  8:14   ` [PATCH v3 33/33] block/nbd: drop connection_co Vladimir Sementsov-Ogievskiy
2021-04-16  8:21     ` Vladimir Sementsov-Ogievskiy
2021-06-03 21:27   ` Eric Blake
2021-06-04  5:39     ` Vladimir Sementsov-Ogievskiy
2021-05-12  6:54 ` [PATCH v3 00/33] block/nbd: rework client connection Paolo Bonzini

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20210416080911.83197-34-vsementsov@virtuozzo.com \
    --to=vsementsov@virtuozzo.com \
    --cc=den@openvz.org \
    --cc=eblake@redhat.com \
    --cc=kwolf@redhat.com \
    --cc=mreitz@redhat.com \
    --cc=qemu-block@nongnu.org \
    --cc=qemu-devel@nongnu.org \
    --cc=rvkagan@yandex-team.ru \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.