qemu-devel.nongnu.org archive mirror
 help / color / mirror / Atom feed
From: Eric Blake <eblake@redhat.com>
To: qemu-devel@nongnu.org
Cc: Roman Kagan <rvkagan@yandex-team.ru>,
	Kevin Wolf <kwolf@redhat.com>,
	Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>,
	"open list:Network Block Dev..." <qemu-block@nongnu.org>,
	Max Reitz <mreitz@redhat.com>
Subject: [PULL 24/34] block/nbd: split nbd_handle_updated_info out of nbd_client_handshake()
Date: Tue, 15 Jun 2021 15:47:46 -0500	[thread overview]
Message-ID: <20210615204756.281505-25-eblake@redhat.com> (raw)
In-Reply-To: <20210615204756.281505-1-eblake@redhat.com>

From: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>

To be reused in the following patch.

Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
Reviewed-by: Roman Kagan <rvkagan@yandex-team.ru>
Reviewed-by: Eric Blake <eblake@redhat.com>
Message-Id: <20210610100802.5888-23-vsementsov@virtuozzo.com>
Signed-off-by: Eric Blake <eblake@redhat.com>
---
 block/nbd.c | 112 ++++++++++++++++++++++++++++++----------------------
 1 file changed, 64 insertions(+), 48 deletions(-)

diff --git a/block/nbd.c b/block/nbd.c
index df9d241313f4..240c6e1b3d72 100644
--- a/block/nbd.c
+++ b/block/nbd.c
@@ -314,6 +314,51 @@ static bool nbd_client_connecting_wait(BDRVNBDState *s)
     return qatomic_load_acquire(&s->state) == NBD_CLIENT_CONNECTING_WAIT;
 }

+/*
+ * Update @bs with information learned during a completed negotiation process.
+ * Return failure if the server's advertised options are incompatible with the
+ * client's needs.
+ */
+static int nbd_handle_updated_info(BlockDriverState *bs, Error **errp)
+{
+    BDRVNBDState *s = (BDRVNBDState *)bs->opaque;
+    int ret;
+
+    if (s->x_dirty_bitmap) {
+        if (!s->info.base_allocation) {
+            error_setg(errp, "requested x-dirty-bitmap %s not found",
+                       s->x_dirty_bitmap);
+            return -EINVAL;
+        }
+        if (strcmp(s->x_dirty_bitmap, "qemu:allocation-depth") == 0) {
+            s->alloc_depth = true;
+        }
+    }
+
+    if (s->info.flags & NBD_FLAG_READ_ONLY) {
+        ret = bdrv_apply_auto_read_only(bs, "NBD export is read-only", errp);
+        if (ret < 0) {
+            return ret;
+        }
+    }
+
+    if (s->info.flags & NBD_FLAG_SEND_FUA) {
+        bs->supported_write_flags = BDRV_REQ_FUA;
+        bs->supported_zero_flags |= BDRV_REQ_FUA;
+    }
+
+    if (s->info.flags & NBD_FLAG_SEND_WRITE_ZEROES) {
+        bs->supported_zero_flags |= BDRV_REQ_MAY_UNMAP;
+        if (s->info.flags & NBD_FLAG_SEND_FAST_ZERO) {
+            bs->supported_zero_flags |= BDRV_REQ_NO_FALLBACK;
+        }
+    }
+
+    trace_nbd_client_handshake_success(s->export);
+
+    return 0;
+}
+
 static coroutine_fn void nbd_reconnect_attempt(BDRVNBDState *s)
 {
     int ret;
@@ -1575,32 +1620,25 @@ static int nbd_client_handshake(BlockDriverState *bs, Error **errp)
         s->sioc = NULL;
         return ret;
     }
-    if (s->x_dirty_bitmap) {
-        if (!s->info.base_allocation) {
-            error_setg(errp, "requested x-dirty-bitmap %s not found",
-                       s->x_dirty_bitmap);
-            ret = -EINVAL;
-            goto fail;
-        }
-        if (strcmp(s->x_dirty_bitmap, "qemu:allocation-depth") == 0) {
-            s->alloc_depth = true;
-        }
-    }
-    if (s->info.flags & NBD_FLAG_READ_ONLY) {
-        ret = bdrv_apply_auto_read_only(bs, "NBD export is read-only", errp);
-        if (ret < 0) {
-            goto fail;
-        }
-    }
-    if (s->info.flags & NBD_FLAG_SEND_FUA) {
-        bs->supported_write_flags = BDRV_REQ_FUA;
-        bs->supported_zero_flags |= BDRV_REQ_FUA;
-    }
-    if (s->info.flags & NBD_FLAG_SEND_WRITE_ZEROES) {
-        bs->supported_zero_flags |= BDRV_REQ_MAY_UNMAP;
-        if (s->info.flags & NBD_FLAG_SEND_FAST_ZERO) {
-            bs->supported_zero_flags |= BDRV_REQ_NO_FALLBACK;
-        }
+
+    ret = nbd_handle_updated_info(bs, errp);
+    if (ret < 0) {
+        /*
+         * We have connected, but must fail for other reasons.
+         * Send NBD_CMD_DISC as a courtesy to the server.
+         */
+        NBDRequest request = { .type = NBD_CMD_DISC };
+
+        nbd_send_request(s->ioc ?: QIO_CHANNEL(s->sioc), &request);
+
+        yank_unregister_function(BLOCKDEV_YANK_INSTANCE(bs->node_name),
+                                 nbd_yank, bs);
+        object_unref(OBJECT(s->sioc));
+        s->sioc = NULL;
+        object_unref(OBJECT(s->ioc));
+        s->ioc = NULL;
+
+        return ret;
     }

     if (!s->ioc) {
@@ -1608,29 +1646,7 @@ static int nbd_client_handshake(BlockDriverState *bs, Error **errp)
         object_ref(OBJECT(s->ioc));
     }

-    trace_nbd_client_handshake_success(s->export);
-
     return 0;
-
- fail:
-    /*
-     * We have connected, but must fail for other reasons.
-     * Send NBD_CMD_DISC as a courtesy to the server.
-     */
-    {
-        NBDRequest request = { .type = NBD_CMD_DISC };
-
-        nbd_send_request(s->ioc ?: QIO_CHANNEL(s->sioc), &request);
-
-        yank_unregister_function(BLOCKDEV_YANK_INSTANCE(bs->node_name),
-                                 nbd_yank, bs);
-        object_unref(OBJECT(s->sioc));
-        s->sioc = NULL;
-        object_unref(OBJECT(s->ioc));
-        s->ioc = NULL;
-
-        return ret;
-    }
 }

 /*
-- 
2.31.1



  parent reply	other threads:[~2021-06-15 21:13 UTC|newest]

Thread overview: 39+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-06-15 20:47 [PULL 00/34] NBD patches for 2021-06-15 Eric Blake
2021-06-15 20:47 ` [PULL 01/34] async: the main AioContext is only "current" if under the BQL Eric Blake
2021-06-15 20:47 ` [PULL 02/34] tests: cover aio_co_enter from a worker thread without BQL taken Eric Blake
2021-06-15 20:47 ` [PULL 03/34] co-queue: drop extra coroutine_fn marks Eric Blake
2021-06-15 20:47 ` [PULL 04/34] block/nbd: fix channel object leak Eric Blake
2021-06-15 20:47 ` [PULL 05/34] block/nbd: fix how state is cleared on nbd_open() failure paths Eric Blake
2021-06-15 20:47 ` [PULL 06/34] block/nbd: connect_thread_func(): do qio_channel_set_delay(false) Eric Blake
2021-06-15 20:47 ` [PULL 07/34] qemu-sockets: introduce socket_address_parse_named_fd() Eric Blake
2021-06-15 20:47 ` [PULL 08/34] block/nbd: call socket_address_parse_named_fd() in advance Eric Blake
2021-06-15 20:47 ` [PULL 09/34] block/nbd: ensure ->connection_thread is always valid Eric Blake
2021-06-15 20:47 ` [PULL 10/34] block/nbd: nbd_client_handshake(): fix leak of s->ioc Eric Blake
2021-06-15 20:47 ` [PULL 11/34] block/nbd: BDRVNBDState: drop unused connect_err and connect_status Eric Blake
2021-06-15 20:47 ` [PULL 12/34] block/nbd: simplify waking of nbd_co_establish_connection() Eric Blake
2021-06-15 20:47 ` [PULL 13/34] block/nbd: drop thr->state Eric Blake
2021-06-15 20:47 ` [PULL 14/34] block/nbd: bs-independent interface for nbd_co_establish_connection() Eric Blake
2021-06-15 20:47 ` [PULL 15/34] block/nbd: make nbd_co_establish_connection_cancel() bs-independent Eric Blake
2021-06-15 20:47 ` [PULL 16/34] block/nbd: rename NBDConnectThread to NBDClientConnection Eric Blake
2021-06-15 20:47 ` [PULL 17/34] block/nbd: introduce nbd_client_connection_new() Eric Blake
2021-06-15 20:47 ` [PULL 18/34] block/nbd: introduce nbd_client_connection_release() Eric Blake
2021-06-15 20:47 ` [PULL 19/34] nbd: move connection code from block/nbd to nbd/client-connection Eric Blake
2021-06-15 20:47 ` [PULL 20/34] nbd/client-connection: use QEMU_LOCK_GUARD Eric Blake
2021-06-15 20:47 ` [PULL 21/34] nbd/client-connection: add possibility of negotiation Eric Blake
2021-06-15 20:47 ` [PULL 22/34] nbd/client-connection: implement connection retry Eric Blake
2021-06-15 20:47 ` [PULL 23/34] nbd/client-connection: shutdown connection on release Eric Blake
2021-06-15 20:47 ` Eric Blake [this message]
2021-06-15 20:47 ` [PULL 25/34] block/nbd: use negotiation of NBDClientConnection Eric Blake
2021-06-15 20:47 ` [PULL 26/34] block/nbd: don't touch s->sioc in nbd_teardown_connection() Eric Blake
2021-06-15 20:47 ` [PULL 27/34] block/nbd: drop BDRVNBDState::sioc Eric Blake
2021-06-15 20:47 ` [PULL 28/34] nbd/client-connection: return only one io channel Eric Blake
2021-06-17 18:32   ` Vladimir Sementsov-Ogievskiy
2021-06-18 15:55     ` Eric Blake
2021-06-15 20:47 ` [PULL 29/34] block-coroutine-wrapper: allow non bdrv_ prefix Eric Blake
2021-06-15 20:47 ` [PULL 30/34] block/nbd: split nbd_co_do_establish_connection out of nbd_reconnect_attempt Eric Blake
2021-06-15 20:47 ` [PULL 31/34] nbd/client-connection: add option for non-blocking connection attempt Eric Blake
2021-06-15 20:47 ` [PULL 32/34] block/nbd: reuse nbd_co_do_establish_connection() in nbd_open() Eric Blake
2021-06-15 20:47 ` [PULL 33/34] block/nbd: add nbd_client_connected() helper Eric Blake
2021-06-15 20:47 ` [PULL 34/34] block/nbd: safer transition to receiving request Eric Blake
2021-06-17  9:42 ` [PULL 00/34] NBD patches for 2021-06-15 Peter Maydell
2021-06-17 18:35   ` Vladimir Sementsov-Ogievskiy

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20210615204756.281505-25-eblake@redhat.com \
    --to=eblake@redhat.com \
    --cc=kwolf@redhat.com \
    --cc=mreitz@redhat.com \
    --cc=qemu-block@nongnu.org \
    --cc=qemu-devel@nongnu.org \
    --cc=rvkagan@yandex-team.ru \
    --cc=vsementsov@virtuozzo.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).