All of lore.kernel.org
 help / color / mirror / Atom feed
From: Vladimir Sementsov-Ogievskiy <vsementsov@yandex-team.ru>
To: Eric Blake <eblake@redhat.com>, qemu-devel@nongnu.org
Cc: libguestfs@redhat.com, Kevin Wolf <kwolf@redhat.com>,
	Hanna Reitz <hreitz@redhat.com>,
	"open list:Network Block Dev..." <qemu-block@nongnu.org>
Subject: Re: [PATCH v3 13/14] nbd/server: Prepare for per-request filtering of BLOCK_STATUS
Date: Thu, 1 Jun 2023 12:57:44 +0300	[thread overview]
Message-ID: <789718bc-1348-77d4-134b-9e9356aa7378@yandex-team.ru> (raw)
In-Reply-To: <20230515195343.1915857-14-eblake@redhat.com>

On 15.05.23 22:53, Eric Blake wrote:
> The next commit will add support for the new addition of
> NBD_CMD_FLAG_PAYLOAD during NBD_CMD_BLOCK_STATUS, where the client can
> request that the server only return a subset of negotiated contexts,
> rather than all contexts.  To make that task easier, this patch
> populates the list of contexts to return on a per-command basis (for
> now, identical to the full set of negotiated contexts).
> 
> Signed-off-by: Eric Blake <eblake@redhat.com>
> ---
>   include/block/nbd.h |  15 ++++++
>   nbd/server.c        | 108 +++++++++++++++++++++++---------------------
>   2 files changed, 72 insertions(+), 51 deletions(-)
> 
> diff --git a/include/block/nbd.h b/include/block/nbd.h
> index 865bb4ee2e1..6696d61bd59 100644
> --- a/include/block/nbd.h
> +++ b/include/block/nbd.h
> @@ -60,6 +60,20 @@ typedef enum NBDHeaderStyle {
>       NBD_HEADER_EXTENDED,    /* NBD_OPT_EXTENDED_HEADERS negotiated */
>   } NBDHeaderStyle;
> 
> +/*
> + * NBDMetaContexts represents a list of meta contexts in use, as
> + * selected by NBD_OPT_SET_META_CONTEXT. Also used for
> + * NBD_OPT_LIST_META_CONTEXT, and payload filtering in
> + * NBD_CMD_BLOCK_STATUS.
> + */
> +typedef struct NBDMetaContexts {
> +    size_t count; /* number of negotiated contexts */
> +    bool base_allocation; /* export base:allocation context (block status) */
> +    bool allocation_depth; /* export qemu:allocation-depth */
> +    size_t nr_bitmaps; /* Length of bitmaps array */
> +    bool *bitmaps; /* export qemu:dirty-bitmap:<export bitmap name> */
> +} NBDMetaContexts;
> +
>   /*
>    * Note: NBDRequest is _NOT_ the same as the network representation of an NBD
>    * request!
> @@ -70,6 +84,7 @@ typedef struct NBDRequest {
>       uint64_t len;   /* Effect length; 32 bit limit without extended headers */
>       uint16_t flags; /* NBD_CMD_FLAG_* */
>       uint16_t type;  /* NBD_CMD_* */
> +    NBDMetaContexts contexts; /* Used by NBD_CMD_BLOCK_STATUS */
>   } NBDRequest;
> 
>   typedef struct NBDSimpleReply {
> diff --git a/nbd/server.c b/nbd/server.c
> index 6475a76c1f0..db550c82cd2 100644
> --- a/nbd/server.c
> +++ b/nbd/server.c
> @@ -105,20 +105,6 @@ struct NBDExport {
> 
>   static QTAILQ_HEAD(, NBDExport) exports = QTAILQ_HEAD_INITIALIZER(exports);
> 
> -/* NBDExportMetaContexts represents a list of contexts to be exported,
> - * as selected by NBD_OPT_SET_META_CONTEXT. Also used for
> - * NBD_OPT_LIST_META_CONTEXT. */
> -typedef struct NBDExportMetaContexts {
> -    NBDExport *exp;
> -    size_t count; /* number of negotiated contexts */
> -    bool base_allocation; /* export base:allocation context (block status) */
> -    bool allocation_depth; /* export qemu:allocation-depth */
> -    bool *bitmaps; /*
> -                    * export qemu:dirty-bitmap:<export bitmap name>,
> -                    * sized by exp->nr_export_bitmaps
> -                    */
> -} NBDExportMetaContexts;
> -
>   struct NBDClient {
>       int refcount;
>       void (*close_fn)(NBDClient *client, bool negotiated);
> @@ -144,7 +130,8 @@ struct NBDClient {
>       uint32_t check_align; /* If non-zero, check for aligned client requests */
> 
>       NBDHeaderStyle header_style;
> -    NBDExportMetaContexts export_meta;
> +    NBDExport *context_exp; /* export of last OPT_SET_META_CONTEXT */
> +    NBDMetaContexts contexts; /* Negotiated meta contexts */
> 
>       uint32_t opt; /* Current option being negotiated */
>       uint32_t optlen; /* remaining length of data in ioc for the option being
> @@ -457,8 +444,8 @@ static int nbd_negotiate_handle_list(NBDClient *client, Error **errp)
> 
>   static void nbd_check_meta_export(NBDClient *client)
>   {
> -    if (client->exp != client->export_meta.exp) {
> -        client->export_meta.count = 0;
> +    if (client->exp != client->context_exp) {
> +        client->contexts.count = 0;
>       }
>   }
> 
> @@ -852,7 +839,7 @@ static bool nbd_strshift(const char **str, const char *prefix)
>    * Handle queries to 'base' namespace. For now, only the base:allocation
>    * context is available.  Return true if @query has been handled.
>    */
> -static bool nbd_meta_base_query(NBDClient *client, NBDExportMetaContexts *meta,
> +static bool nbd_meta_base_query(NBDClient *client, NBDMetaContexts *meta,
>                                   const char *query)
>   {
>       if (!nbd_strshift(&query, "base:")) {
> @@ -872,8 +859,8 @@ static bool nbd_meta_base_query(NBDClient *client, NBDExportMetaContexts *meta,
>    * and qemu:allocation-depth contexts are available.  Return true if @query
>    * has been handled.
>    */
> -static bool nbd_meta_qemu_query(NBDClient *client, NBDExportMetaContexts *meta,
> -                                const char *query)
> +static bool nbd_meta_qemu_query(NBDClient *client, NBDExport *exp,
> +                                NBDMetaContexts *meta, const char *query)
>   {
>       size_t i;
> 
> @@ -884,9 +871,9 @@ static bool nbd_meta_qemu_query(NBDClient *client, NBDExportMetaContexts *meta,
> 
>       if (!*query) {
>           if (client->opt == NBD_OPT_LIST_META_CONTEXT) {
> -            meta->allocation_depth = meta->exp->allocation_depth;
> -            if (meta->exp->nr_export_bitmaps) {
> -                memset(meta->bitmaps, 1, meta->exp->nr_export_bitmaps);
> +            meta->allocation_depth = exp->allocation_depth;
> +            if (meta->nr_bitmaps) {
> +                memset(meta->bitmaps, 1, meta->nr_bitmaps);
>               }
>           }
>           trace_nbd_negotiate_meta_query_parse("empty");
> @@ -895,7 +882,7 @@ static bool nbd_meta_qemu_query(NBDClient *client, NBDExportMetaContexts *meta,
> 
>       if (strcmp(query, "allocation-depth") == 0) {
>           trace_nbd_negotiate_meta_query_parse("allocation-depth");
> -        meta->allocation_depth = meta->exp->allocation_depth;
> +        meta->allocation_depth = exp->allocation_depth;
>           return true;
>       }
> 
> @@ -903,17 +890,17 @@ static bool nbd_meta_qemu_query(NBDClient *client, NBDExportMetaContexts *meta,
>           trace_nbd_negotiate_meta_query_parse("dirty-bitmap:");
>           if (!*query) {
>               if (client->opt == NBD_OPT_LIST_META_CONTEXT &&
> -                meta->exp->nr_export_bitmaps) {
> -                memset(meta->bitmaps, 1, meta->exp->nr_export_bitmaps);
> +                exp->nr_export_bitmaps) {
> +                memset(meta->bitmaps, 1, exp->nr_export_bitmaps);

sometimes you change s/meta->exp->nr_export_bitmaps/meta->nr_bitmaps/ and sometimes s/meta->exp->nr_export_bitmaps/exp->nr_export_bitmaps/

>               }
>               trace_nbd_negotiate_meta_query_parse("empty");
>               return true;
>           }
> 
> -        for (i = 0; i < meta->exp->nr_export_bitmaps; i++) {
> +        for (i = 0; i < meta->nr_bitmaps; i++) {
>               const char *bm_name;
> 
> -            bm_name = bdrv_dirty_bitmap_name(meta->exp->export_bitmaps[i]);
> +            bm_name = bdrv_dirty_bitmap_name(exp->export_bitmaps[i]);

I don't like the change: we iterate through exp->export_bitmaps. So seems more correct to use exp->nr_export_bitmaps as a limit.

I understand why you want to duplicated nr_export_bitmaps in NBDMetaContexts, but the result doesn't seem better than just use exp->nr_export_bitmaps.

Probably, if we want to duplicate nr_export_bitmaps, we should duplicate export_bitmaps as well. But this leads actually to keeping meta->exp as is.

Hm, may be just do

size_t nr_bitmaps = meta->nr_bitmaps;
assert(nr_bitmaps == exp->nr_export_bitmaps)
at top of the function? and use nr_bitmaps everywhere.

But this again make me doubt that meta.nr_bitmaps is good idea: we have to pass exp together and check meta.nr_bitmaps == exp->nr_export_bitmaps every time.

So, I understand, that

     bool *bitmaps; /*
                     * export qemu:dirty-bitmap:<export bitmap name>,
                     * sized by exp->nr_export_bitmaps
                     */

sounds like not good design - we have an array, sized by field of another object. But, we have a link that that object (meta->exp).

But with your way we fall into bad design too: we have a duplicated field, and have to control that they not diverse. And we iterate things in exp, using field of meta.


I looked through, seems in all places where we use nr_bitmaps, we also have access to nr_export_bitmaps. So, maybe, drop this nr_bitmaps?
And why not to keep meta.exp? With it, meta is self-sufficient object, which fully describe some selected contexts.

For per-request filtering, meta.exp just must always point to current export. And anyway would be good to split refactoring of meta structure (if we need it) and new logic of per-command filtering.

>               if (strcmp(bm_name, query) == 0) {
>                   meta->bitmaps[i] = true;
>                   trace_nbd_negotiate_meta_query_parse(query);
> @@ -937,8 +924,8 @@ static bool nbd_meta_qemu_query(NBDClient *client, NBDExportMetaContexts *meta,
>    *
>    * Return -errno on I/O error, 0 if option was completely handled by
>    * sending a reply about inconsistent lengths, or 1 on success. */
> -static int nbd_negotiate_meta_query(NBDClient *client,
> -                                    NBDExportMetaContexts *meta, Error **errp)
> +static int nbd_negotiate_meta_query(NBDClient *client, NBDExport *exp,
> +                                    NBDMetaContexts *meta, Error **errp)
>   {
>       int ret;
>       g_autofree char *query = NULL;
> @@ -965,7 +952,7 @@ static int nbd_negotiate_meta_query(NBDClient *client,
>       if (nbd_meta_base_query(client, meta, query)) {
>           return 1;
>       }
> -    if (nbd_meta_qemu_query(client, meta, query)) {
> +    if (nbd_meta_qemu_query(client, exp, meta, query)) {
>           return 1;
>       }
> 
> @@ -977,14 +964,15 @@ static int nbd_negotiate_meta_query(NBDClient *client,
>    * Handle NBD_OPT_LIST_META_CONTEXT and NBD_OPT_SET_META_CONTEXT
>    *
>    * Return -errno on I/O error, or 0 if option was completely handled. */
> -static int nbd_negotiate_meta_queries(NBDClient *client,
> -                                      NBDExportMetaContexts *meta, Error **errp)
> +static int nbd_negotiate_meta_queries(NBDClient *client, Error **errp)
>   {
>       int ret;
>       g_autofree char *export_name = NULL;
>       /* Mark unused to work around https://bugs.llvm.org/show_bug.cgi?id=3888 */
>       g_autofree G_GNUC_UNUSED bool *bitmaps = NULL;
> -    NBDExportMetaContexts local_meta = {0};
> +    NBDMetaContexts local_meta = {0};
> +    NBDMetaContexts *meta;
> +    NBDExport *exp;
>       uint32_t nb_queries;
>       size_t i;
>       size_t count = 0;
> @@ -1000,6 +988,9 @@ static int nbd_negotiate_meta_queries(NBDClient *client,
>       if (client->opt == NBD_OPT_LIST_META_CONTEXT) {
>           /* Only change the caller's meta on SET. */
>           meta = &local_meta;
> +    } else {
> +        meta = &client->contexts;
> +        client->context_exp = NULL;
>       }
> 
>       g_free(meta->bitmaps);
> @@ -1010,14 +1001,15 @@ static int nbd_negotiate_meta_queries(NBDClient *client,
>           return ret;
>       }
> 
> -    meta->exp = nbd_export_find(export_name);
> -    if (meta->exp == NULL) {
> +    exp = nbd_export_find(export_name);
> +    if (exp == NULL) {
>           g_autofree char *sane_name = nbd_sanitize_name(export_name);
> 
>           return nbd_opt_drop(client, NBD_REP_ERR_UNKNOWN, errp,
>                               "export '%s' not present", sane_name);
>       }
> -    meta->bitmaps = g_new0(bool, meta->exp->nr_export_bitmaps);
> +    meta->nr_bitmaps = exp->nr_export_bitmaps;
> +    meta->bitmaps = g_new0(bool, exp->nr_export_bitmaps);
>       if (client->opt == NBD_OPT_LIST_META_CONTEXT) {
>           bitmaps = meta->bitmaps;
>       }
> @@ -1033,13 +1025,13 @@ static int nbd_negotiate_meta_queries(NBDClient *client,
>       if (client->opt == NBD_OPT_LIST_META_CONTEXT && !nb_queries) {
>           /* enable all known contexts */
>           meta->base_allocation = true;
> -        meta->allocation_depth = meta->exp->allocation_depth;
> -        if (meta->exp->nr_export_bitmaps) {
> -            memset(meta->bitmaps, 1, meta->exp->nr_export_bitmaps);
> +        meta->allocation_depth = exp->allocation_depth;
> +        if (exp->nr_export_bitmaps) {
> +            memset(meta->bitmaps, 1, meta->nr_bitmaps);
>           }
>       } else {
>           for (i = 0; i < nb_queries; ++i) {
> -            ret = nbd_negotiate_meta_query(client, meta, errp);
> +            ret = nbd_negotiate_meta_query(client, exp, meta, errp);
>               if (ret <= 0) {
>                   return ret;
>               }
> @@ -1066,7 +1058,7 @@ static int nbd_negotiate_meta_queries(NBDClient *client,
>           count++;
>       }
> 
> -    for (i = 0; i < meta->exp->nr_export_bitmaps; i++) {
> +    for (i = 0; i < meta->nr_bitmaps; i++) {
>           const char *bm_name;
>           g_autofree char *context = NULL;
> 
> @@ -1074,7 +1066,7 @@ static int nbd_negotiate_meta_queries(NBDClient *client,
>               continue;
>           }
> 
> -        bm_name = bdrv_dirty_bitmap_name(meta->exp->export_bitmaps[i]);
> +        bm_name = bdrv_dirty_bitmap_name(exp->export_bitmaps[i]);
>           context = g_strdup_printf("qemu:dirty-bitmap:%s", bm_name);
> 
>           ret = nbd_negotiate_send_meta_context(client, context,
> @@ -1089,6 +1081,9 @@ static int nbd_negotiate_meta_queries(NBDClient *client,
>       ret = nbd_negotiate_send_rep(client, NBD_REP_ACK, errp);
>       if (ret == 0) {
>           meta->count = count;
> +        if (client->opt == NBD_OPT_SET_META_CONTEXT) {
> +            client->context_exp = exp;
> +        }
>       }
> 
>       return ret;
> @@ -1282,8 +1277,7 @@ static int nbd_negotiate_options(NBDClient *client, Error **errp)
> 
>               case NBD_OPT_LIST_META_CONTEXT:
>               case NBD_OPT_SET_META_CONTEXT:
> -                ret = nbd_negotiate_meta_queries(client, &client->export_meta,
> -                                                 errp);
> +                ret = nbd_negotiate_meta_queries(client, errp);
>                   break;
> 
>               case NBD_OPT_EXTENDED_HEADERS:
> @@ -1514,7 +1508,7 @@ void nbd_client_put(NBDClient *client)
>               QTAILQ_REMOVE(&client->exp->clients, client, next);
>               blk_exp_unref(&client->exp->common);
>           }
> -        g_free(client->export_meta.bitmaps);
> +        g_free(client->contexts.bitmaps);
>           g_free(client);
>       }
>   }
> @@ -2489,6 +2483,8 @@ static int coroutine_fn nbd_co_receive_request(NBDRequestData *req, NBDRequest *
>                   return -ENOMEM;
>               }
>           }
> +    } else if (request->type == NBD_CMD_BLOCK_STATUS) {
> +        request->contexts = client->contexts;
>       }
> 
>       if (payload_len) {
> @@ -2715,11 +2711,11 @@ static coroutine_fn int nbd_handle_request(NBDClient *client,
>           }
>           assert(client->header_style >= NBD_HEADER_EXTENDED ||
>                  request->len <= UINT32_MAX);
> -        if (client->export_meta.count) {
> +        if (request->contexts.count) {
>               bool dont_fragment = request->flags & NBD_CMD_FLAG_REQ_ONE;
> -            int contexts_remaining = client->export_meta.count;
> +            int contexts_remaining = request->contexts.count;
> 
> -            if (client->export_meta.base_allocation) {
> +            if (request->contexts.base_allocation) {
>                   ret = nbd_co_send_block_status(client, request,
>                                                  exp->common.blk,
>                                                  request->from,
> @@ -2732,7 +2728,7 @@ static coroutine_fn int nbd_handle_request(NBDClient *client,
>                   }
>               }
> 
> -            if (client->export_meta.allocation_depth) {
> +            if (request->contexts.allocation_depth) {
>                   ret = nbd_co_send_block_status(client, request,
>                                                  exp->common.blk,
>                                                  request->from, request->len,
> @@ -2745,8 +2741,10 @@ static coroutine_fn int nbd_handle_request(NBDClient *client,
>                   }
>               }
> 
> +            assert(request->contexts.nr_bitmaps ==
> +                   client->exp->nr_export_bitmaps);
>               for (i = 0; i < client->exp->nr_export_bitmaps; i++) {
> -                if (!client->export_meta.bitmaps[i]) {
> +                if (!request->contexts.bitmaps[i]) {
>                       continue;
>                   }
>                   ret = nbd_co_send_bitmap(client, request,
> @@ -2762,6 +2760,10 @@ static coroutine_fn int nbd_handle_request(NBDClient *client,
>               assert(!contexts_remaining);
> 
>               return 0;
> +        } else if (client->contexts.count) {
> +            return nbd_send_generic_reply(client, request, -EINVAL,
> +                                          "CMD_BLOCK_STATUS payload not valid",
> +                                          errp);
>           } else {
>               return nbd_send_generic_reply(client, request, -EINVAL,
>                                             "CMD_BLOCK_STATUS not negotiated",
> @@ -2840,6 +2842,10 @@ static coroutine_fn void nbd_trip(void *opaque)
>       } else {
>           ret = nbd_handle_request(client, &request, req->data, &local_err);
>       }
> +    if (request.type == NBD_CMD_BLOCK_STATUS &&
> +        request.contexts.bitmaps != client->contexts.bitmaps) {
> +        g_free(request.contexts.bitmaps);
> +    }
>       if (ret < 0) {
>           error_prepend(&local_err, "Failed to send reply: ");
>           goto disconnect;

-- 
Best regards,
Vladimir



  reply	other threads:[~2023-06-01  9:58 UTC|newest]

Thread overview: 38+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2023-05-15 19:53 [PATCH v3 00/14] qemu patches for 64-bit NBD extensions Eric Blake
2023-05-15 19:53 ` [PATCH v3 01/14] nbd/client: Use smarter assert Eric Blake
2023-05-29  8:20   ` Vladimir Sementsov-Ogievskiy
2023-05-15 19:53 ` [PATCH v3 02/14] nbd/client: Add safety check on chunk payload length Eric Blake
2023-05-29  8:25   ` Vladimir Sementsov-Ogievskiy
2023-05-15 19:53 ` [PATCH v3 03/14] nbd/server: Prepare for alternate-size headers Eric Blake
2023-05-29 14:26   ` Vladimir Sementsov-Ogievskiy
2023-05-30 16:29     ` Eric Blake
2023-05-31  7:28       ` Vladimir Sementsov-Ogievskiy
2023-05-15 19:53 ` [PATCH v3 04/14] nbd: Prepare for 64-bit request effect lengths Eric Blake
2023-05-30 13:05   ` Vladimir Sementsov-Ogievskiy
2023-05-30 18:23     ` Eric Blake
2023-05-15 19:53 ` [PATCH v3 05/14] nbd: Add types for extended headers Eric Blake
2023-05-30 13:23   ` Vladimir Sementsov-Ogievskiy
2023-05-30 18:22     ` [Libguestfs] " Eric Blake
2023-05-31  7:30       ` Vladimir Sementsov-Ogievskiy
2023-05-15 19:53 ` [PATCH v3 06/14] nbd/server: Refactor handling of request payload Eric Blake
2023-05-31  8:04   ` Vladimir Sementsov-Ogievskiy
2023-05-15 19:53 ` [PATCH v3 07/14] nbd/server: Refactor to pass full request around Eric Blake
2023-05-31  8:13   ` Vladimir Sementsov-Ogievskiy
2023-05-15 19:53 ` [PATCH v3 08/14] nbd/server: Support 64-bit block status Eric Blake
2023-05-31 14:10   ` Vladimir Sementsov-Ogievskiy
2023-05-15 19:53 ` [PATCH v3 09/14] nbd/server: Initial support for extended headers Eric Blake
2023-05-31 14:46   ` Vladimir Sementsov-Ogievskiy
2023-06-07 11:39     ` Eric Blake
2023-05-15 19:53 ` [PATCH v3 10/14] nbd/client: " Eric Blake
2023-05-31 15:26   ` Vladimir Sementsov-Ogievskiy
2023-06-07 18:22     ` Eric Blake
2023-05-15 19:53 ` [PATCH v3 11/14] nbd/client: Accept 64-bit block status chunks Eric Blake
2023-05-31 17:00   ` Vladimir Sementsov-Ogievskiy
2023-05-15 19:53 ` [PATCH v3 12/14] nbd/client: Request extended headers during negotiation Eric Blake
     [not found]   ` <1af7f692-b5de-c767-2568-1fc024a57133@yandex-team.ru>
     [not found]     ` <cqb3yww5ceeinh2pb5nqaljrsllu3ejkjsdueuw32cwcocumsn@okgujto2lzmn>
     [not found]       ` <cd83b0bc-0e6b-fc94-1cc2-9bf00d516140@yandex-team.ru>
     [not found]         ` <hbjtjovry4e5kb6oyii4g2hncetfo2uic67r5ipufcikvgyb5x@idenexfxits4>
2023-06-01  8:43           ` Vladimir Sementsov-Ogievskiy
2023-05-15 19:53 ` [PATCH v3 13/14] nbd/server: Prepare for per-request filtering of BLOCK_STATUS Eric Blake
2023-06-01  9:57   ` Vladimir Sementsov-Ogievskiy [this message]
2023-05-15 19:53 ` [PATCH v3 14/14] nbd/server: Add FLAG_PAYLOAD support to CMD_BLOCK_STATUS Eric Blake
2023-06-02  9:13   ` Vladimir Sementsov-Ogievskiy
2023-06-02 13:14     ` [Libguestfs] " Eric Blake
2023-05-15 21:05 ` [Libguestfs] [PATCH v3 00/14] qemu patches for 64-bit NBD extensions Eric Blake

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=789718bc-1348-77d4-134b-9e9356aa7378@yandex-team.ru \
    --to=vsementsov@yandex-team.ru \
    --cc=eblake@redhat.com \
    --cc=hreitz@redhat.com \
    --cc=kwolf@redhat.com \
    --cc=libguestfs@redhat.com \
    --cc=qemu-block@nongnu.org \
    --cc=qemu-devel@nongnu.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.