All of lore.kernel.org
 help / color / mirror / Atom feed
From: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
To: qemu-block@nongnu.org
Cc: kwolf@redhat.com, vsementsov@virtuozzo.com, ehabkost@redhat.com,
	wencongyang2@huawei.com, xiechanglong.d@gmail.com,
	armbru@redhat.com, qemu-devel@nongnu.org, jsnow@redhat.com,
	crosa@redhat.com, den@openvz.org, mreitz@redhat.com
Subject: [RFC 07/24] block/block-copy: hide structure definitions
Date: Fri, 15 Nov 2019 17:14:27 +0300	[thread overview]
Message-ID: <20191115141444.24155-8-vsementsov@virtuozzo.com> (raw)
In-Reply-To: <20191115141444.24155-1-vsementsov@virtuozzo.com>

Hide structure definitions and add explicit API instead, to keep an
eye on the scope of the shared fields.

Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
---
 include/block/block-copy.h | 57 +++------------------------------
 block/backup-top.c         |  6 ++--
 block/backup.c             | 27 ++++++++--------
 block/block-copy.c         | 64 ++++++++++++++++++++++++++++++++++++++
 4 files changed, 86 insertions(+), 68 deletions(-)

diff --git a/include/block/block-copy.h b/include/block/block-copy.h
index d96b097267..753fa663ac 100644
--- a/include/block/block-copy.h
+++ b/include/block/block-copy.h
@@ -18,61 +18,9 @@
 #include "block/block.h"
 #include "qemu/co-shared-resource.h"
 
-typedef struct BlockCopyInFlightReq {
-    int64_t offset;
-    int64_t bytes;
-    QLIST_ENTRY(BlockCopyInFlightReq) list;
-    CoQueue wait_queue; /* coroutines blocked on this request */
-} BlockCopyInFlightReq;
-
 typedef void (*ProgressBytesCallbackFunc)(int64_t bytes, void *opaque);
 typedef void (*ProgressResetCallbackFunc)(void *opaque);
-typedef struct BlockCopyState {
-    /*
-     * BdrvChild objects are not owned or managed by block-copy. They are
-     * provided by block-copy user and user is responsible for appropriate
-     * permissions on these children.
-     */
-    BdrvChild *source;
-    BdrvChild *target;
-    BdrvDirtyBitmap *copy_bitmap;
-    int64_t cluster_size;
-    bool use_copy_range;
-    int64_t copy_size;
-    uint64_t len;
-    QLIST_HEAD(, BlockCopyInFlightReq) inflight_reqs;
-
-    BdrvRequestFlags write_flags;
-
-    /*
-     * skip_unallocated:
-     *
-     * Used by sync=top jobs, which first scan the source node for unallocated
-     * areas and clear them in the copy_bitmap.  During this process, the bitmap
-     * is thus not fully initialized: It may still have bits set for areas that
-     * are unallocated and should actually not be copied.
-     *
-     * This is indicated by skip_unallocated.
-     *
-     * In this case, block_copy() will query the source’s allocation status,
-     * skip unallocated regions, clear them in the copy_bitmap, and invoke
-     * block_copy_reset_unallocated() every time it does.
-     */
-    bool skip_unallocated;
-
-    /* progress_bytes_callback: called when some copying progress is done. */
-    ProgressBytesCallbackFunc progress_bytes_callback;
-
-    /*
-     * progress_reset_callback: called when some bytes reset from copy_bitmap
-     * (see @skip_unallocated above). The callee is assumed to recalculate how
-     * many bytes remain based on the dirty bit count of copy_bitmap.
-     */
-    ProgressResetCallbackFunc progress_reset_callback;
-    void *progress_opaque;
-
-    SharedResource *mem;
-} BlockCopyState;
+typedef struct BlockCopyState BlockCopyState;
 
 BlockCopyState *block_copy_state_new(BdrvChild *source, BdrvChild *target,
                                      int64_t cluster_size,
@@ -93,4 +41,7 @@ int64_t block_copy_reset_unallocated(BlockCopyState *s,
 int coroutine_fn block_copy(BlockCopyState *s, int64_t start, uint64_t bytes,
                             bool *error_is_read);
 
+BdrvDirtyBitmap *block_copy_dirty_bitmap(BlockCopyState *s);
+void block_copy_set_skip_unallocated(BlockCopyState *s, bool skip);
+
 #endif /* BLOCK_COPY_H */
diff --git a/block/backup-top.c b/block/backup-top.c
index 7cdb1f8eba..1026628b57 100644
--- a/block/backup-top.c
+++ b/block/backup-top.c
@@ -38,6 +38,7 @@ typedef struct BDRVBackupTopState {
     BlockCopyState *bcs;
     BdrvChild *target;
     bool active;
+    int64_t cluster_size;
 } BDRVBackupTopState;
 
 static coroutine_fn int backup_top_co_preadv(
@@ -51,8 +52,8 @@ static coroutine_fn int backup_top_cbw(BlockDriverState *bs, uint64_t offset,
                                        uint64_t bytes)
 {
     BDRVBackupTopState *s = bs->opaque;
-    uint64_t end = QEMU_ALIGN_UP(offset + bytes, s->bcs->cluster_size);
-    uint64_t off = QEMU_ALIGN_DOWN(offset, s->bcs->cluster_size);
+    uint64_t end = QEMU_ALIGN_UP(offset + bytes, s->cluster_size);
+    uint64_t off = QEMU_ALIGN_DOWN(offset, s->cluster_size);
 
     return block_copy(s->bcs, off, end - off, NULL);
 }
@@ -227,6 +228,7 @@ BlockDriverState *bdrv_backup_top_append(BlockDriverState *source,
         goto failed_after_append;
     }
 
+    state->cluster_size = cluster_size;
     state->bcs = block_copy_state_new(top->backing, state->target,
                                       cluster_size, write_flags, &local_err);
     if (local_err) {
diff --git a/block/backup.c b/block/backup.c
index cf62b1a38c..acab0d08da 100644
--- a/block/backup.c
+++ b/block/backup.c
@@ -48,6 +48,7 @@ typedef struct BackupBlockJob {
     int64_t cluster_size;
 
     BlockCopyState *bcs;
+    BdrvDirtyBitmap *bcs_bitmap;
 } BackupBlockJob;
 
 static const BlockJobDriver backup_job_driver;
@@ -63,7 +64,7 @@ static void backup_progress_bytes_callback(int64_t bytes, void *opaque)
 static void backup_progress_reset_callback(void *opaque)
 {
     BackupBlockJob *s = opaque;
-    uint64_t estimate = bdrv_get_dirty_count(s->bcs->copy_bitmap);
+    uint64_t estimate = bdrv_get_dirty_count(s->bcs_bitmap);
 
     job_progress_set_remaining(&s->common.job, estimate);
 }
@@ -111,8 +112,7 @@ static void backup_cleanup_sync_bitmap(BackupBlockJob *job, int ret)
 
     if (ret < 0 && job->bitmap_mode == BITMAP_SYNC_MODE_ALWAYS) {
         /* If we failed and synced, merge in the bits we didn't copy: */
-        bdrv_dirty_bitmap_merge_internal(bm, job->bcs->copy_bitmap,
-                                         NULL, true);
+        bdrv_dirty_bitmap_merge_internal(bm, job->bcs_bitmap, NULL, true);
     }
 }
 
@@ -151,7 +151,7 @@ void backup_do_checkpoint(BlockJob *job, Error **errp)
         return;
     }
 
-    bdrv_set_dirty_bitmap(backup_job->bcs->copy_bitmap, 0, backup_job->len);
+    bdrv_set_dirty_bitmap(backup_job->bcs_bitmap, 0, backup_job->len);
 }
 
 static BlockErrorAction backup_error_action(BackupBlockJob *job,
@@ -196,7 +196,7 @@ static int coroutine_fn backup_loop(BackupBlockJob *job)
     BdrvDirtyBitmapIter *bdbi;
     int ret = 0;
 
-    bdbi = bdrv_dirty_iter_new(job->bcs->copy_bitmap);
+    bdbi = bdrv_dirty_iter_new(job->bcs_bitmap);
     while ((offset = bdrv_dirty_iter_next(bdbi)) != -1) {
         do {
             if (yield_and_check(job)) {
@@ -216,13 +216,13 @@ static int coroutine_fn backup_loop(BackupBlockJob *job)
     return ret;
 }
 
-static void backup_init_copy_bitmap(BackupBlockJob *job)
+static void backup_init_bcs_bitmap(BackupBlockJob *job)
 {
     bool ret;
     uint64_t estimate;
 
     if (job->sync_mode == MIRROR_SYNC_MODE_BITMAP) {
-        ret = bdrv_dirty_bitmap_merge_internal(job->bcs->copy_bitmap,
+        ret = bdrv_dirty_bitmap_merge_internal(job->bcs_bitmap,
                                                job->sync_bitmap,
                                                NULL, true);
         assert(ret);
@@ -232,12 +232,12 @@ static void backup_init_copy_bitmap(BackupBlockJob *job)
              * We can't hog the coroutine to initialize this thoroughly.
              * Set a flag and resume work when we are able to yield safely.
              */
-            job->bcs->skip_unallocated = true;
+            block_copy_set_skip_unallocated(job->bcs, true);
         }
-        bdrv_set_dirty_bitmap(job->bcs->copy_bitmap, 0, job->len);
+        bdrv_set_dirty_bitmap(job->bcs_bitmap, 0, job->len);
     }
 
-    estimate = bdrv_get_dirty_count(job->bcs->copy_bitmap);
+    estimate = bdrv_get_dirty_count(job->bcs_bitmap);
     job_progress_set_remaining(&job->common.job, estimate);
 }
 
@@ -246,7 +246,7 @@ static int coroutine_fn backup_run(Job *job, Error **errp)
     BackupBlockJob *s = container_of(job, BackupBlockJob, common.job);
     int ret = 0;
 
-    backup_init_copy_bitmap(s);
+    backup_init_bcs_bitmap(s);
 
     if (s->sync_mode == MIRROR_SYNC_MODE_TOP) {
         int64_t offset = 0;
@@ -265,12 +265,12 @@ static int coroutine_fn backup_run(Job *job, Error **errp)
 
             offset += count;
         }
-        s->bcs->skip_unallocated = false;
+        block_copy_set_skip_unallocated(s->bcs, false);
     }
 
     if (s->sync_mode == MIRROR_SYNC_MODE_NONE) {
         /*
-         * All bits are set in copy_bitmap to allow any cluster to be copied.
+         * All bits are set in bcs_bitmap to allow any cluster to be copied.
          * This does not actually require them to be copied.
          */
         while (!job_is_cancelled(job)) {
@@ -458,6 +458,7 @@ BlockJob *backup_job_create(const char *job_id, BlockDriverState *bs,
     job->sync_bitmap = sync_bitmap;
     job->bitmap_mode = bitmap_mode;
     job->bcs = bcs;
+    job->bcs_bitmap = block_copy_dirty_bitmap(bcs);
     job->cluster_size = cluster_size;
     job->len = len;
 
diff --git a/block/block-copy.c b/block/block-copy.c
index aca44b13fb..7e14e86a2d 100644
--- a/block/block-copy.c
+++ b/block/block-copy.c
@@ -24,6 +24,60 @@
 #define BLOCK_COPY_MAX_BUFFER (1 * MiB)
 #define BLOCK_COPY_MAX_MEM (128 * MiB)
 
+typedef struct BlockCopyInFlightReq {
+    int64_t offset;
+    int64_t bytes;
+    QLIST_ENTRY(BlockCopyInFlightReq) list;
+    CoQueue wait_queue; /* coroutines blocked on this request */
+} BlockCopyInFlightReq;
+
+typedef struct BlockCopyState {
+    /*
+     * BdrvChild objects are not owned or managed by block-copy. They are
+     * provided by block-copy user and user is responsible for appropriate
+     * permissions on these children.
+     */
+    BdrvChild *source;
+    BdrvChild *target;
+    BdrvDirtyBitmap *copy_bitmap;
+    int64_t cluster_size;
+    bool use_copy_range;
+    int64_t copy_size;
+    uint64_t len;
+    QLIST_HEAD(, BlockCopyInFlightReq) inflight_reqs;
+
+    BdrvRequestFlags write_flags;
+
+    /*
+     * skip_unallocated:
+     *
+     * Used by sync=top jobs, which first scan the source node for unallocated
+     * areas and clear them in the copy_bitmap.  During this process, the bitmap
+     * is thus not fully initialized: It may still have bits set for areas that
+     * are unallocated and should actually not be copied.
+     *
+     * This is indicated by skip_unallocated.
+     *
+     * In this case, block_copy() will query the source’s allocation status,
+     * skip unallocated regions, clear them in the copy_bitmap, and invoke
+     * block_copy_reset_unallocated() every time it does.
+     */
+    bool skip_unallocated;
+
+    /* progress_bytes_callback: called when some copying progress is done. */
+    ProgressBytesCallbackFunc progress_bytes_callback;
+
+    /*
+     * progress_reset_callback: called when some bytes reset from copy_bitmap
+     * (see @skip_unallocated above). The callee is assumed to recalculate how
+     * many bytes remain based on the dirty bit count of copy_bitmap.
+     */
+    ProgressResetCallbackFunc progress_reset_callback;
+    void *progress_opaque;
+
+    SharedResource *mem;
+} BlockCopyState;
+
 static BlockCopyInFlightReq *block_copy_find_inflight_req(BlockCopyState *s,
                                                           int64_t offset,
                                                           int64_t bytes)
@@ -492,3 +546,13 @@ int coroutine_fn block_copy(BlockCopyState *s, int64_t start, uint64_t bytes,
 
     return 0;
 }
+
+BdrvDirtyBitmap *block_copy_dirty_bitmap(BlockCopyState *s)
+{
+    return s->copy_bitmap;
+}
+
+void block_copy_set_skip_unallocated(BlockCopyState *s, bool skip)
+{
+    s->skip_unallocated = skip;
+}
-- 
2.21.0



  parent reply	other threads:[~2019-11-15 14:29 UTC|newest]

Thread overview: 29+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2019-11-15 14:14 [RFC 00/24] backup performance: block_status + async Vladimir Sementsov-Ogievskiy
2019-11-15 14:14 ` [RFC 01/24] block/block-copy: specialcase first copy_range request Vladimir Sementsov-Ogievskiy
2019-11-15 14:14 ` [RFC 02/24] block/block-copy: use block_status Vladimir Sementsov-Ogievskiy
2019-11-15 14:14 ` [RFC 03/24] block/block-copy: factor out block_copy_find_inflight_req Vladimir Sementsov-Ogievskiy
2019-11-15 14:14 ` [RFC 04/24] block/block-copy: refactor interfaces to use bytes instead of end Vladimir Sementsov-Ogievskiy
2019-11-15 14:14 ` [RFC 05/24] block/block-copy: rename start to offset in interfaces Vladimir Sementsov-Ogievskiy
2019-11-15 14:14 ` [RFC 06/24] block/block-copy: reduce intersecting request lock Vladimir Sementsov-Ogievskiy
2019-11-15 14:14 ` Vladimir Sementsov-Ogievskiy [this message]
2019-11-15 14:14 ` [RFC 08/24] block/block-copy: rename in-flight requests to tasks Vladimir Sementsov-Ogievskiy
2019-11-15 14:14 ` [RFC 09/24] block/block-copy: alloc task on each iteration Vladimir Sementsov-Ogievskiy
2019-11-15 14:14 ` [RFC 10/24] block/block-copy: add state pointer to BlockCopyTask Vladimir Sementsov-Ogievskiy
2019-11-15 14:14 ` [RFC 11/24] block/block-copy: move task size initial calculation to _task_create Vladimir Sementsov-Ogievskiy
2019-11-15 14:14 ` [RFC 12/24] block/block-copy: move block_copy_task_create down Vladimir Sementsov-Ogievskiy
2019-11-15 14:14 ` [RFC 13/24] block/block-copy: use aio-task-pool API Vladimir Sementsov-Ogievskiy
2019-11-15 14:14 ` [RFC 14/24] block/block-copy: More explicit call_state Vladimir Sementsov-Ogievskiy
2019-11-15 14:14 ` [RFC 15/24] block/block-copy: implement block_copy_async Vladimir Sementsov-Ogievskiy
2019-11-15 14:14 ` [RFC 16/24] block/block-copy: add max_chunk and max_workers paramters Vladimir Sementsov-Ogievskiy
2019-11-15 14:14 ` [RFC 17/24] block/block-copy: add ratelimit to block-copy Vladimir Sementsov-Ogievskiy
2019-11-15 14:14 ` [RFC 18/24] block/block-copy: add block_copy_cancel Vladimir Sementsov-Ogievskiy
2019-11-15 14:14 ` [RFC 19/24] blockjob: add set_speed to BlockJobDriver Vladimir Sementsov-Ogievskiy
2019-11-15 14:14 ` [RFC 20/24] job: call job_enter from job_user_pause Vladimir Sementsov-Ogievskiy
2019-11-15 14:14 ` [RFC 21/24] backup: move to block-copy Vladimir Sementsov-Ogievskiy
2019-11-15 17:58   ` Eric Blake
2019-11-16  9:28     ` Vladimir Sementsov-Ogievskiy
2019-11-15 14:14 ` [RFC 22/24] python: add simplebench.py Vladimir Sementsov-Ogievskiy
2019-11-15 14:14 ` [RFC 23/24] python: add qemu/bench_block_job.py Vladimir Sementsov-Ogievskiy
2019-11-15 14:14 ` [RFC 24/24] python: benchmark new backup architecture Vladimir Sementsov-Ogievskiy
2019-11-15 17:30 ` [RFC 00/24] backup performance: block_status + async no-reply
2019-11-15 17:33   ` Vladimir Sementsov-Ogievskiy

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20191115141444.24155-8-vsementsov@virtuozzo.com \
    --to=vsementsov@virtuozzo.com \
    --cc=armbru@redhat.com \
    --cc=crosa@redhat.com \
    --cc=den@openvz.org \
    --cc=ehabkost@redhat.com \
    --cc=jsnow@redhat.com \
    --cc=kwolf@redhat.com \
    --cc=mreitz@redhat.com \
    --cc=qemu-block@nongnu.org \
    --cc=qemu-devel@nongnu.org \
    --cc=wencongyang2@huawei.com \
    --cc=xiechanglong.d@gmail.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.