All of lore.kernel.org
 help / color / mirror / Atom feed
From: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
To: qemu-block@nongnu.org
Cc: fam@euphon.net, kwolf@redhat.com, vsementsov@virtuozzo.com,
	wencongyang2@huawei.com, xiechanglong.d@gmail.com,
	qemu-devel@nongnu.org, armbru@redhat.com, jsnow@redhat.com,
	stefanha@redhat.com, den@openvz.org, mreitz@redhat.com
Subject: [Qemu-devel] [PATCH v9 03/13] block/backup: introduce BlockCopyState
Date: Mon, 26 Aug 2019 19:13:02 +0300	[thread overview]
Message-ID: <20190826161312.489398-4-vsementsov@virtuozzo.com> (raw)
In-Reply-To: <20190826161312.489398-1-vsementsov@virtuozzo.com>

Split copying code part from backup to "block-copy", including separate
state structure and function renaming. This is needed to share it with
backup-top filter driver in further commits.

Notes:

1. As BlockCopyState keeps own BlockBackend objects, remaining
job->common.blk users only use it to get bs by blk_bs() call, so clear
job->commen.blk permissions set in block_job_create.

2. Rename s/initializing_bitmap/skip_unallocated/ to sound a bit better
as interface to BlockCopyState

3. Split is not very clean: there left some duplicated fields, backup
code uses some BlockCopyState fields directly, let's postpone it for
further improvements and keep this comment simpler for review.

Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
---
 block/backup.c     | 324 +++++++++++++++++++++++++++------------------
 block/trace-events |  12 +-
 2 files changed, 200 insertions(+), 136 deletions(-)

diff --git a/block/backup.c b/block/backup.c
index 13a1d80157..f52ac622e0 100644
--- a/block/backup.c
+++ b/block/backup.c
@@ -35,12 +35,35 @@ typedef struct CowRequest {
     CoQueue wait_queue; /* coroutines blocked on this request */
 } CowRequest;
 
+/*
+ * ProgressCallbackFunc
+ *
+ * Called when some progress is done in context of BlockCopyState:
+ *  1. When some bytes copied, called with @bytes > 0.
+ *  2. When some bytes resetted from copy_bitmap, called with @bytes = 0 (user
+ *     may recalculate remaining bytes from copy_bitmap dirty count.
+ */
+typedef void (*ProgressCallbackFunc)(int64_t bytes, void *opaque);
+typedef struct BlockCopyState {
+    BlockBackend *source;
+    BlockBackend *target;
+    BdrvDirtyBitmap *copy_bitmap;
+    int64_t cluster_size;
+    bool use_copy_range;
+    int64_t copy_range_size;
+    uint64_t len;
+
+    BdrvRequestFlags write_flags;
+    bool skip_unallocated;
+
+    ProgressCallbackFunc progress_callback;
+    void *progress_opaque;
+} BlockCopyState;
+
 typedef struct BackupBlockJob {
     BlockJob common;
-    BlockBackend *target;
 
     BdrvDirtyBitmap *sync_bitmap;
-    BdrvDirtyBitmap *copy_bitmap;
 
     MirrorSyncMode sync_mode;
     BitmapSyncMode bitmap_mode;
@@ -53,11 +76,7 @@ typedef struct BackupBlockJob {
     NotifierWithReturn before_write;
     QLIST_HEAD(, CowRequest) inflight_reqs;
 
-    bool use_copy_range;
-    int64_t copy_range_size;
-
-    BdrvRequestFlags write_flags;
-    bool initializing_bitmap;
+    BlockCopyState *bcs;
 } BackupBlockJob;
 
 static const BlockJobDriver backup_job_driver;
@@ -99,9 +118,83 @@ static void cow_request_end(CowRequest *req)
     qemu_co_queue_restart_all(&req->wait_queue);
 }
 
+static void block_copy_state_free(BlockCopyState *s)
+{
+    if (!s) {
+        return;
+    }
+
+    bdrv_release_dirty_bitmap(blk_bs(s->source), s->copy_bitmap);
+    blk_unref(s->source);
+    s->source = NULL;
+    blk_unref(s->target);
+    s->target = NULL;
+    g_free(s);
+}
+
+static BlockCopyState *block_copy_state_new(
+        BlockDriverState *source, BlockDriverState *target,
+        int64_t cluster_size, BdrvRequestFlags write_flags,
+        ProgressCallbackFunc progress_callback, void *progress_opaque,
+        Error **errp)
+{
+    BlockCopyState *s;
+    int ret;
+    uint64_t no_resize = BLK_PERM_CONSISTENT_READ | BLK_PERM_WRITE |
+                         BLK_PERM_WRITE_UNCHANGED | BLK_PERM_GRAPH_MOD;
+    BdrvDirtyBitmap *copy_bitmap =
+            bdrv_create_dirty_bitmap(source, cluster_size, NULL, errp);
+
+    if (!copy_bitmap) {
+        return NULL;
+    }
+    bdrv_disable_dirty_bitmap(copy_bitmap);
+
+    s = g_new0(BlockCopyState, 1);
+    *s = (BlockCopyState) {
+        .source = blk_new(bdrv_get_aio_context(source),
+                          BLK_PERM_CONSISTENT_READ, no_resize),
+        .target = blk_new(bdrv_get_aio_context(target),
+                          BLK_PERM_WRITE, no_resize),
+        .copy_bitmap = copy_bitmap,
+        .cluster_size = cluster_size,
+        .len = bdrv_dirty_bitmap_size(copy_bitmap),
+        .write_flags = write_flags,
+        .use_copy_range = !(write_flags & BDRV_REQ_WRITE_COMPRESSED),
+        .progress_callback = progress_callback,
+        .progress_opaque = progress_opaque,
+    };
+
+    s->copy_range_size = QEMU_ALIGN_UP(MIN(blk_get_max_transfer(s->source),
+                                           blk_get_max_transfer(s->target)),
+                                       s->cluster_size),
+
+    blk_set_disable_request_queuing(s->source, true);
+    blk_set_allow_aio_context_change(s->source, true);
+    blk_set_disable_request_queuing(s->target, true);
+    blk_set_allow_aio_context_change(s->target, true);
+
+    ret = blk_insert_bs(s->source, source, errp);
+    if (ret < 0) {
+        goto fail;
+    }
+
+    ret = blk_insert_bs(s->target, target, errp);
+    if (ret < 0) {
+        goto fail;
+    }
+
+    return s;
+
+fail:
+    block_copy_state_free(s);
+
+    return NULL;
+}
+
 /* Copy range to target with a bounce buffer and return the bytes copied. If
  * error occurred, return a negative error number */
-static int coroutine_fn backup_cow_with_bounce_buffer(BackupBlockJob *job,
+static int coroutine_fn block_copy_with_bounce_buffer(BlockCopyState *s,
                                                       int64_t start,
                                                       int64_t end,
                                                       bool is_write_notifier,
@@ -109,30 +202,29 @@ static int coroutine_fn backup_cow_with_bounce_buffer(BackupBlockJob *job,
                                                       void **bounce_buffer)
 {
     int ret;
-    BlockBackend *blk = job->common.blk;
     int nbytes;
     int read_flags = is_write_notifier ? BDRV_REQ_NO_SERIALISING : 0;
 
-    assert(QEMU_IS_ALIGNED(start, job->cluster_size));
-    bdrv_reset_dirty_bitmap(job->copy_bitmap, start, job->cluster_size);
-    nbytes = MIN(job->cluster_size, job->len - start);
+    assert(QEMU_IS_ALIGNED(start, s->cluster_size));
+    bdrv_reset_dirty_bitmap(s->copy_bitmap, start, s->cluster_size);
+    nbytes = MIN(s->cluster_size, s->len - start);
     if (!*bounce_buffer) {
-        *bounce_buffer = blk_blockalign(blk, job->cluster_size);
+        *bounce_buffer = blk_blockalign(s->source, s->cluster_size);
     }
 
-    ret = blk_co_pread(blk, start, nbytes, *bounce_buffer, read_flags);
+    ret = blk_co_pread(s->source, start, nbytes, *bounce_buffer, read_flags);
     if (ret < 0) {
-        trace_backup_do_cow_read_fail(job, start, ret);
+        trace_block_copy_with_bounce_buffer_read_fail(s, start, ret);
         if (error_is_read) {
             *error_is_read = true;
         }
         goto fail;
     }
 
-    ret = blk_co_pwrite(job->target, start, nbytes, *bounce_buffer,
-                        job->write_flags);
+    ret = blk_co_pwrite(s->target, start, nbytes, *bounce_buffer,
+                        s->write_flags);
     if (ret < 0) {
-        trace_backup_do_cow_write_fail(job, start, ret);
+        trace_block_copy_with_bounce_buffer_write_fail(s, start, ret);
         if (error_is_read) {
             *error_is_read = false;
         }
@@ -141,36 +233,35 @@ static int coroutine_fn backup_cow_with_bounce_buffer(BackupBlockJob *job,
 
     return nbytes;
 fail:
-    bdrv_set_dirty_bitmap(job->copy_bitmap, start, job->cluster_size);
+    bdrv_set_dirty_bitmap(s->copy_bitmap, start, s->cluster_size);
     return ret;
 
 }
 
 /* Copy range to target and return the bytes copied. If error occurred, return a
  * negative error number. */
-static int coroutine_fn backup_cow_with_offload(BackupBlockJob *job,
+static int coroutine_fn block_copy_with_offload(BlockCopyState *s,
                                                 int64_t start,
                                                 int64_t end,
                                                 bool is_write_notifier)
 {
     int ret;
     int nr_clusters;
-    BlockBackend *blk = job->common.blk;
     int nbytes;
     int read_flags = is_write_notifier ? BDRV_REQ_NO_SERIALISING : 0;
 
-    assert(QEMU_IS_ALIGNED(job->copy_range_size, job->cluster_size));
-    assert(QEMU_IS_ALIGNED(start, job->cluster_size));
-    nbytes = MIN(job->copy_range_size, MIN(end - start, job->len - start));
-    nr_clusters = DIV_ROUND_UP(nbytes, job->cluster_size);
-    bdrv_reset_dirty_bitmap(job->copy_bitmap, start,
-                            job->cluster_size * nr_clusters);
-    ret = blk_co_copy_range(blk, start, job->target, start, nbytes,
-                            read_flags, job->write_flags);
+    assert(QEMU_IS_ALIGNED(s->copy_range_size, s->cluster_size));
+    assert(QEMU_IS_ALIGNED(start, s->cluster_size));
+    nbytes = MIN(s->copy_range_size, MIN(end - start, s->len - start));
+    nr_clusters = DIV_ROUND_UP(nbytes, s->cluster_size);
+    bdrv_reset_dirty_bitmap(s->copy_bitmap, start,
+                            s->cluster_size * nr_clusters);
+    ret = blk_co_copy_range(s->source, start, s->target, start, nbytes,
+                            read_flags, s->write_flags);
     if (ret < 0) {
-        trace_backup_do_cow_copy_range_fail(job, start, ret);
-        bdrv_set_dirty_bitmap(job->copy_bitmap, start,
-                              job->cluster_size * nr_clusters);
+        trace_block_copy_with_offload_fail(s, start, ret);
+        bdrv_set_dirty_bitmap(s->copy_bitmap, start,
+                              s->cluster_size * nr_clusters);
         return ret;
     }
 
@@ -181,10 +272,10 @@ static int coroutine_fn backup_cow_with_offload(BackupBlockJob *job,
  * Check if the cluster starting at offset is allocated or not.
  * return via pnum the number of contiguous clusters sharing this allocation.
  */
-static int backup_is_cluster_allocated(BackupBlockJob *s, int64_t offset,
-                                       int64_t *pnum)
+static int block_copy_is_cluster_allocated(BlockCopyState *s, int64_t offset,
+                                           int64_t *pnum)
 {
-    BlockDriverState *bs = blk_bs(s->common.blk);
+    BlockDriverState *bs = blk_bs(s->source);
     int64_t count, total_count = 0;
     int64_t bytes = s->len - offset;
     int ret;
@@ -225,13 +316,13 @@ static int backup_is_cluster_allocated(BackupBlockJob *s, int64_t offset,
  * @return 0 when the cluster at @offset was unallocated,
  *         1 otherwise, and -ret on error.
  */
-static int64_t backup_bitmap_reset_unallocated(BackupBlockJob *s,
-                                               int64_t offset, int64_t *count)
+static int64_t block_copy_reset_unallocated(BlockCopyState *s,
+                                            int64_t offset, int64_t *count)
 {
     int ret;
-    int64_t clusters, bytes, estimate;
+    int64_t clusters, bytes;
 
-    ret = backup_is_cluster_allocated(s, offset, &clusters);
+    ret = block_copy_is_cluster_allocated(s, offset, &clusters);
     if (ret < 0) {
         return ret;
     }
@@ -240,46 +331,45 @@ static int64_t backup_bitmap_reset_unallocated(BackupBlockJob *s,
 
     if (!ret) {
         bdrv_reset_dirty_bitmap(s->copy_bitmap, offset, bytes);
-        estimate = bdrv_get_dirty_count(s->copy_bitmap);
-        job_progress_set_remaining(&s->common.job, estimate);
+        s->progress_callback(0, s->progress_opaque);
     }
 
     *count = bytes;
     return ret;
 }
 
-static int coroutine_fn backup_do_copy(BackupBlockJob *job,
-                                       int64_t offset, uint64_t bytes,
-                                       bool *error_is_read,
-                                       bool is_write_notifier)
+static int coroutine_fn block_copy(BlockCopyState *s,
+                                   int64_t offset, uint64_t bytes,
+                                   bool *error_is_read,
+                                   bool is_write_notifier)
 {
     int ret = 0;
     int64_t start = offset, end = bytes + offset; /* bytes */
     void *bounce_buffer = NULL;
     int64_t status_bytes;
 
-    assert(QEMU_IS_ALIGNED(start, job->cluster_size));
-    assert(QEMU_IS_ALIGNED(end, job->cluster_size));
+    assert(QEMU_IS_ALIGNED(start, s->cluster_size));
+    assert(QEMU_IS_ALIGNED(end, s->cluster_size));
 
     while (start < end) {
         int64_t dirty_end;
 
-        if (!bdrv_dirty_bitmap_get(job->copy_bitmap, start)) {
-            trace_backup_do_cow_skip(job, start);
-            start += job->cluster_size;
+        if (!bdrv_dirty_bitmap_get(s->copy_bitmap, start)) {
+            trace_block_copy_skip(s, start);
+            start += s->cluster_size;
             continue; /* already copied */
         }
 
-        dirty_end = bdrv_dirty_bitmap_next_zero(job->copy_bitmap, start,
+        dirty_end = bdrv_dirty_bitmap_next_zero(s->copy_bitmap, start,
                                                 (end - start));
         if (dirty_end < 0) {
             dirty_end = end;
         }
 
-        if (job->initializing_bitmap) {
-            ret = backup_bitmap_reset_unallocated(job, start, &status_bytes);
+        if (s->skip_unallocated) {
+            ret = block_copy_reset_unallocated(s, start, &status_bytes);
             if (ret == 0) {
-                trace_backup_do_cow_skip_range(job, start, status_bytes);
+                trace_block_copy_skip_range(s, start, status_bytes);
                 start += status_bytes;
                 continue;
             }
@@ -287,17 +377,17 @@ static int coroutine_fn backup_do_copy(BackupBlockJob *job,
             dirty_end = MIN(dirty_end, start + status_bytes);
         }
 
-        trace_backup_do_cow_process(job, start);
+        trace_block_copy_process(s, start);
 
-        if (job->use_copy_range) {
-            ret = backup_cow_with_offload(job, start, dirty_end,
+        if (s->use_copy_range) {
+            ret = block_copy_with_offload(s, start, dirty_end,
                                           is_write_notifier);
             if (ret < 0) {
-                job->use_copy_range = false;
+                s->use_copy_range = false;
             }
         }
-        if (!job->use_copy_range) {
-            ret = backup_cow_with_bounce_buffer(job, start, dirty_end,
+        if (!s->use_copy_range) {
+            ret = block_copy_with_bounce_buffer(s, start, dirty_end,
                                                 is_write_notifier,
                                                 error_is_read, &bounce_buffer);
         }
@@ -305,12 +395,8 @@ static int coroutine_fn backup_do_copy(BackupBlockJob *job,
             break;
         }
 
-        /* Publish progress, guest I/O counts as progress too.  Note that the
-         * offset field is an opaque progress value, it is not a disk offset.
-         */
         start += ret;
-        job->bytes_read += ret;
-        job_progress_update(&job->common.job, ret);
+        s->progress_callback(ret, s->progress_opaque);
         ret = 0;
     }
 
@@ -321,6 +407,20 @@ static int coroutine_fn backup_do_copy(BackupBlockJob *job,
     return ret;
 }
 
+static void backup_progress_callback(int64_t bytes, void *opaque)
+{
+    BackupBlockJob *s = opaque;
+    uint64_t estimate;
+
+    if (bytes > 0) {
+        s->bytes_read += bytes;
+        job_progress_update(&s->common.job, bytes);
+    } else {
+        estimate = bdrv_get_dirty_count(s->bcs->copy_bitmap);
+        job_progress_set_remaining(&s->common.job, estimate);
+    }
+}
+
 static int coroutine_fn backup_do_cow(BackupBlockJob *job,
                                       int64_t offset, uint64_t bytes,
                                       bool *error_is_read,
@@ -340,8 +440,8 @@ static int coroutine_fn backup_do_cow(BackupBlockJob *job,
     wait_for_overlapping_requests(job, start, end);
     cow_request_begin(&cow_request, job, start, end);
 
-    ret = backup_do_copy(job, start, end - start, error_is_read,
-                         is_write_notifier);
+    ret = block_copy(job->bcs, start, end - start, error_is_read,
+                     is_write_notifier);
 
     cow_request_end(&cow_request);
 
@@ -391,7 +491,7 @@ static void backup_cleanup_sync_bitmap(BackupBlockJob *job, int ret)
 
     if (ret < 0 && job->bitmap_mode == BITMAP_SYNC_MODE_ALWAYS) {
         /* If we failed and synced, merge in the bits we didn't copy: */
-        bdrv_dirty_bitmap_merge_internal(bm, job->copy_bitmap,
+        bdrv_dirty_bitmap_merge_internal(bm, job->bcs->copy_bitmap,
                                          NULL, true);
     }
 }
@@ -415,16 +515,9 @@ static void backup_abort(Job *job)
 static void backup_clean(Job *job)
 {
     BackupBlockJob *s = container_of(job, BackupBlockJob, common.job);
-    BlockDriverState *bs = blk_bs(s->common.blk);
 
-    if (s->copy_bitmap) {
-        bdrv_release_dirty_bitmap(bs, s->copy_bitmap);
-        s->copy_bitmap = NULL;
-    }
-
-    assert(s->target);
-    blk_unref(s->target);
-    s->target = NULL;
+    block_copy_state_free(s->bcs);
+    s->bcs = NULL;
 }
 
 void backup_do_checkpoint(BlockJob *job, Error **errp)
@@ -439,7 +532,7 @@ void backup_do_checkpoint(BlockJob *job, Error **errp)
         return;
     }
 
-    bdrv_set_dirty_bitmap(backup_job->copy_bitmap, 0, backup_job->len);
+    bdrv_set_dirty_bitmap(backup_job->bcs->copy_bitmap, 0, backup_job->len);
 }
 
 static void backup_drain(BlockJob *job)
@@ -449,8 +542,8 @@ static void backup_drain(BlockJob *job)
     /* Need to keep a reference in case blk_drain triggers execution
      * of backup_complete...
      */
-    if (s->target) {
-        BlockBackend *target = s->target;
+    if (s->bcs && s->bcs->target) {
+        BlockBackend *target = s->bcs->target;
         blk_ref(target);
         blk_drain(target);
         blk_unref(target);
@@ -497,7 +590,7 @@ static int coroutine_fn backup_loop(BackupBlockJob *job)
     BdrvDirtyBitmapIter *bdbi;
     int ret = 0;
 
-    bdbi = bdrv_dirty_iter_new(job->copy_bitmap);
+    bdbi = bdrv_dirty_iter_new(job->bcs->copy_bitmap);
     while ((offset = bdrv_dirty_iter_next(bdbi)) != -1) {
         do {
             if (yield_and_check(job)) {
@@ -524,7 +617,7 @@ static void backup_init_copy_bitmap(BackupBlockJob *job)
     uint64_t estimate;
 
     if (job->sync_mode == MIRROR_SYNC_MODE_BITMAP) {
-        ret = bdrv_dirty_bitmap_merge_internal(job->copy_bitmap,
+        ret = bdrv_dirty_bitmap_merge_internal(job->bcs->copy_bitmap,
                                                job->sync_bitmap,
                                                NULL, true);
         assert(ret);
@@ -534,12 +627,12 @@ static void backup_init_copy_bitmap(BackupBlockJob *job)
              * We can't hog the coroutine to initialize this thoroughly.
              * Set a flag and resume work when we are able to yield safely.
              */
-            job->initializing_bitmap = true;
+            job->bcs->skip_unallocated = true;
         }
-        bdrv_set_dirty_bitmap(job->copy_bitmap, 0, job->len);
+        bdrv_set_dirty_bitmap(job->bcs->copy_bitmap, 0, job->len);
     }
 
-    estimate = bdrv_get_dirty_count(job->copy_bitmap);
+    estimate = bdrv_get_dirty_count(job->bcs->copy_bitmap);
     job_progress_set_remaining(&job->common.job, estimate);
 }
 
@@ -567,14 +660,14 @@ static int coroutine_fn backup_run(Job *job, Error **errp)
                 goto out;
             }
 
-            ret = backup_bitmap_reset_unallocated(s, offset, &count);
+            ret = block_copy_reset_unallocated(s->bcs, offset, &count);
             if (ret < 0) {
                 goto out;
             }
 
             offset += count;
         }
-        s->initializing_bitmap = false;
+        s->bcs->skip_unallocated = false;
     }
 
     if (s->sync_mode == MIRROR_SYNC_MODE_NONE) {
@@ -663,9 +756,7 @@ BlockJob *backup_job_create(const char *job_id, BlockDriverState *bs,
 {
     int64_t len;
     BackupBlockJob *job = NULL;
-    int ret;
     int64_t cluster_size;
-    BdrvDirtyBitmap *copy_bitmap = NULL;
 
     assert(bs);
     assert(target);
@@ -730,57 +821,34 @@ BlockJob *backup_job_create(const char *job_id, BlockDriverState *bs,
         goto error;
     }
 
-    copy_bitmap = bdrv_create_dirty_bitmap(bs, cluster_size, NULL, errp);
-    if (!copy_bitmap) {
-        goto error;
-    }
-    bdrv_disable_dirty_bitmap(copy_bitmap);
-
     /* job->len is fixed, so we can't allow resize */
-    job = block_job_create(job_id, &backup_job_driver, txn, bs,
-                           BLK_PERM_CONSISTENT_READ,
-                           BLK_PERM_CONSISTENT_READ | BLK_PERM_WRITE |
-                           BLK_PERM_WRITE_UNCHANGED | BLK_PERM_GRAPH_MOD,
+    job = block_job_create(job_id, &backup_job_driver, txn, bs, 0, BLK_PERM_ALL,
                            speed, creation_flags, cb, opaque, errp);
     if (!job) {
         goto error;
     }
 
-    /* The target must match the source in size, so no resize here either */
-    job->target = blk_new(job->common.job.aio_context,
-                          BLK_PERM_WRITE,
-                          BLK_PERM_CONSISTENT_READ | BLK_PERM_WRITE |
-                          BLK_PERM_WRITE_UNCHANGED | BLK_PERM_GRAPH_MOD);
-    ret = blk_insert_bs(job->target, target, errp);
-    if (ret < 0) {
-        goto error;
-    }
-    blk_set_disable_request_queuing(job->target, true);
-
     job->on_source_error = on_source_error;
     job->on_target_error = on_target_error;
     job->sync_mode = sync_mode;
     job->sync_bitmap = sync_bitmap;
     job->bitmap_mode = bitmap_mode;
 
-    /*
-     * Set write flags:
-     * 1. Detect image-fleecing (and similar) schemes
-     * 2. Handle compression
-     */
-    job->write_flags =
-        (bdrv_chain_contains(target, bs) ? BDRV_REQ_SERIALISING : 0) |
-        (compress ? BDRV_REQ_WRITE_COMPRESSED : 0);
+    job->bcs = block_copy_state_new(
+            bs, target, cluster_size,
+            /*
+             * Set write flags:
+             * 1. Detect image-fleecing (and similar) schemes
+             * 2. Handle compression
+             */
+            (bdrv_chain_contains(target, bs) ? BDRV_REQ_SERIALISING : 0) |
+            (compress ? BDRV_REQ_WRITE_COMPRESSED : 0),
+            backup_progress_callback, job, errp);
+    if (!job->bcs) {
+        goto error;
+    }
 
     job->cluster_size = cluster_size;
-    job->copy_bitmap = copy_bitmap;
-    copy_bitmap = NULL;
-    job->use_copy_range = !compress; /* compression isn't supported for it */
-    job->copy_range_size = MIN_NON_ZERO(blk_get_max_transfer(job->common.blk),
-                                        blk_get_max_transfer(job->target));
-    job->copy_range_size = MAX(job->cluster_size,
-                               QEMU_ALIGN_UP(job->copy_range_size,
-                                             job->cluster_size));
 
     /* Required permissions are already taken with target's blk_new() */
     block_job_add_bdrv(&job->common, "target", target, 0, BLK_PERM_ALL,
@@ -790,10 +858,6 @@ BlockJob *backup_job_create(const char *job_id, BlockDriverState *bs,
     return &job->common;
 
  error:
-    if (copy_bitmap) {
-        assert(!job || !job->copy_bitmap);
-        bdrv_release_dirty_bitmap(bs, copy_bitmap);
-    }
     if (sync_bitmap) {
         bdrv_reclaim_dirty_bitmap(bs, sync_bitmap, NULL);
     }
diff --git a/block/trace-events b/block/trace-events
index 04209f058d..ad1454f539 100644
--- a/block/trace-events
+++ b/block/trace-events
@@ -40,12 +40,12 @@ mirror_yield_in_flight(void *s, int64_t offset, int in_flight) "s %p offset %" P
 # backup.c
 backup_do_cow_enter(void *job, int64_t start, int64_t offset, uint64_t bytes) "job %p start %" PRId64 " offset %" PRId64 " bytes %" PRIu64
 backup_do_cow_return(void *job, int64_t offset, uint64_t bytes, int ret) "job %p offset %" PRId64 " bytes %" PRIu64 " ret %d"
-backup_do_cow_skip(void *job, int64_t start) "job %p start %"PRId64
-backup_do_cow_skip_range(void *job, int64_t start, uint64_t bytes) "job %p start %"PRId64" bytes %"PRId64
-backup_do_cow_process(void *job, int64_t start) "job %p start %"PRId64
-backup_do_cow_read_fail(void *job, int64_t start, int ret) "job %p start %"PRId64" ret %d"
-backup_do_cow_write_fail(void *job, int64_t start, int ret) "job %p start %"PRId64" ret %d"
-backup_do_cow_copy_range_fail(void *job, int64_t start, int ret) "job %p start %"PRId64" ret %d"
+block_copy_skip(void *job, int64_t start) "job %p start %"PRId64
+block_copy_skip_range(void *job, int64_t start, uint64_t bytes) "job %p start %"PRId64" bytes %"PRId64
+block_copy_process(void *job, int64_t start) "job %p start %"PRId64
+block_copy_with_bounce_buffer_read_fail(void *job, int64_t start, int ret) "job %p start %"PRId64" ret %d"
+block_copy_with_bounce_buffer_write_fail(void *job, int64_t start, int ret) "job %p start %"PRId64" ret %d"
+block_copy_with_offload_fail(void *job, int64_t start, int ret) "job %p start %"PRId64" ret %d"
 
 # ../blockdev.c
 qmp_block_job_cancel(void *job) "job %p"
-- 
2.18.0



  parent reply	other threads:[~2019-08-26 16:30 UTC|newest]

Thread overview: 39+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2019-08-26 16:12 [Qemu-devel] [PATCH v9 00/13] backup-top filter driver for backup Vladimir Sementsov-Ogievskiy
2019-08-26 16:13 ` [Qemu-devel] [PATCH v9 01/13] block/backup: fix backup_cow_with_offload for last cluster Vladimir Sementsov-Ogievskiy
2019-08-28 14:08   ` Max Reitz
2019-08-26 16:13 ` [Qemu-devel] [PATCH v9 02/13] block/backup: split shareable copying part from backup_do_cow Vladimir Sementsov-Ogievskiy
2019-08-28 14:22   ` Max Reitz
2019-08-28 14:27     ` Vladimir Sementsov-Ogievskiy
2019-08-28 14:32       ` Max Reitz
2019-08-26 16:13 ` Vladimir Sementsov-Ogievskiy [this message]
2019-08-28 15:59   ` [Qemu-devel] [PATCH v9 03/13] block/backup: introduce BlockCopyState Max Reitz
2019-08-29 10:52     ` Vladimir Sementsov-Ogievskiy
2019-09-02 16:09       ` Max Reitz
2019-08-26 16:13 ` [Qemu-devel] [PATCH v9 04/13] block/backup: adjust block-copy functions style Vladimir Sementsov-Ogievskiy
2019-08-28 16:06   ` Max Reitz
2019-08-29 11:25     ` Vladimir Sementsov-Ogievskiy
2019-08-26 16:13 ` [Qemu-devel] [PATCH v9 05/13] block: move block_copy from block/backup.c to separate file Vladimir Sementsov-Ogievskiy
2019-08-28 16:16   ` Max Reitz
2019-08-29 12:00     ` Vladimir Sementsov-Ogievskiy
2019-08-26 16:13 ` [Qemu-devel] [PATCH v9 06/13] block: teach bdrv_debug_breakpoint skip filters with backing Vladimir Sementsov-Ogievskiy
2019-08-28 16:21   ` Max Reitz
2019-08-26 16:13 ` [Qemu-devel] [PATCH v9 07/13] iotests: prepare 124 and 257 bitmap querying for backup-top filter Vladimir Sementsov-Ogievskiy
2019-08-28 16:40   ` Max Reitz
2019-08-29 13:22     ` Vladimir Sementsov-Ogievskiy
2019-09-02 16:10       ` Max Reitz
2019-08-26 16:13 ` [Qemu-devel] [PATCH v9 08/13] iotests: 257: drop unused Drive.device field Vladimir Sementsov-Ogievskiy
2019-08-28 16:50   ` Max Reitz
2019-08-26 16:13 ` [Qemu-devel] [PATCH v9 09/13] iotests: 257: drop device_add Vladimir Sementsov-Ogievskiy
2019-08-28 16:54   ` Max Reitz
2019-08-26 16:13 ` [Qemu-devel] [PATCH v9 10/13] block/io: refactor wait_serialising_requests Vladimir Sementsov-Ogievskiy
2019-08-26 16:13 ` [Qemu-devel] [PATCH v9 11/13] block: add lock/unlock range functions Vladimir Sementsov-Ogievskiy
2019-08-28 17:02   ` Max Reitz
2019-08-29  9:17     ` Vladimir Sementsov-Ogievskiy
2019-08-26 16:13 ` [Qemu-devel] [PATCH v9 12/13] block: introduce backup-top filter driver Vladimir Sementsov-Ogievskiy
2019-08-28 17:52   ` Max Reitz
2019-08-26 16:13 ` [Qemu-devel] [PATCH v9 13/13] block/backup: use backup-top instead of write notifiers Vladimir Sementsov-Ogievskiy
2019-08-28 19:50   ` Max Reitz
2019-08-29 14:55     ` Vladimir Sementsov-Ogievskiy
2019-09-02 16:34       ` Max Reitz
2019-09-03  8:06         ` Vladimir Sementsov-Ogievskiy
2019-09-09  9:12           ` Max Reitz

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20190826161312.489398-4-vsementsov@virtuozzo.com \
    --to=vsementsov@virtuozzo.com \
    --cc=armbru@redhat.com \
    --cc=den@openvz.org \
    --cc=fam@euphon.net \
    --cc=jsnow@redhat.com \
    --cc=kwolf@redhat.com \
    --cc=mreitz@redhat.com \
    --cc=qemu-block@nongnu.org \
    --cc=qemu-devel@nongnu.org \
    --cc=stefanha@redhat.com \
    --cc=wencongyang2@huawei.com \
    --cc=xiechanglong.d@gmail.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.