From mboxrd@z Thu Jan 1 00:00:00 1970 Received: from eggs.gnu.org ([208.118.235.92]:58362) by lists.gnu.org with esmtp (Exim 4.71) (envelope-from ) id 1SfY8G-0005DI-Ds for qemu-devel@nongnu.org; Fri, 15 Jun 2012 11:08:22 -0400 Received: from Debian-exim by eggs.gnu.org with spam-scanned (Exim 4.71) (envelope-from ) id 1SfY8D-0005It-7B for qemu-devel@nongnu.org; Fri, 15 Jun 2012 11:08:16 -0400 Received: from mail-pb0-f45.google.com ([209.85.160.45]:47190) by eggs.gnu.org with esmtp (Exim 4.71) (envelope-from ) id 1SfY8C-0004gs-R3 for qemu-devel@nongnu.org; Fri, 15 Jun 2012 11:08:13 -0400 Received: by mail-pb0-f45.google.com with SMTP id ro12so5648793pbb.4 for ; Fri, 15 Jun 2012 08:08:12 -0700 (PDT) Sender: Paolo Bonzini From: Paolo Bonzini Date: Fri, 15 Jun 2012 17:05:59 +0200 Message-Id: <1339772759-31004-37-git-send-email-pbonzini@redhat.com> In-Reply-To: <1339772759-31004-1-git-send-email-pbonzini@redhat.com> References: <1339772759-31004-1-git-send-email-pbonzini@redhat.com> Subject: [Qemu-devel] [RFC PATCH 36/36] mirror: allow customizing the granularity List-Id: List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , To: qemu-devel@nongnu.org Cc: kwolf@redhat.com, stefanha@linux.vnet.ibm.com, lcapitulino@redhat.com The desired granularity may be very different depending on the kind of operation (e.g. continous replication vs. collapse-to-raw) and whether the VM is expected to perform lots of I/O while mirroring is in progress. Allow the user to customize it. Signed-off-by: Paolo Bonzini --- block/mirror.c | 38 +++++++++++++++++++------------------- block_int.h | 3 ++- blockdev.c | 7 ++++++- hmp.c | 2 +- qapi-schema.json | 5 ++++- qmp-commands.hx | 4 +++- 6 files changed, 35 insertions(+), 24 deletions(-) diff --git a/block/mirror.c b/block/mirror.c index e09beaf..4e6aa81 100644 --- a/block/mirror.c +++ b/block/mirror.c @@ -17,9 +17,6 @@ #include "qemu/ratelimit.h" #include "bitmap.h" -#define BLOCK_SIZE (1 << 20) -#define BDRV_SECTORS_PER_DIRTY_CHUNK (BLOCK_SIZE >> BDRV_SECTOR_BITS) - #define SLICE_TIME 100000000ULL /* ns */ typedef struct MirrorBlockJob { @@ -31,6 +28,7 @@ typedef struct MirrorBlockJob { bool synced; bool complete; int64_t sector_num; + int64_t granularity; int64_t buf_size; unsigned long *cow_bitmap; HBitmapIter hbi; @@ -43,7 +41,7 @@ static int coroutine_fn mirror_iteration(MirrorBlockJob *s, BlockDriverState *source = s->common.bs; BlockDriverState *target = s->target; QEMUIOVector qiov; - int ret, nb_sectors; + int ret, nb_sectors, nb_sectors_chunk; int64_t end, sector_num, cluster_num; struct iovec iov; @@ -58,24 +56,24 @@ static int coroutine_fn mirror_iteration(MirrorBlockJob *s, * is very large, we need to do COW ourselves. The first time a cluster is * copied, copy it entirely. * - * Because both BDRV_SECTORS_PER_DIRTY_CHUNK and the cluster size are - * powers of two, the number of sectors to copy cannot exceed one cluster. + * Because both the granularity and the cluster size are powers of two, the + * number of sectors to copy cannot exceed one cluster. */ sector_num = s->sector_num; - nb_sectors = BDRV_SECTORS_PER_DIRTY_CHUNK; - cluster_num = sector_num / BDRV_SECTORS_PER_DIRTY_CHUNK; + nb_sectors_chunk = nb_sectors = s->granularity >> BDRV_SECTOR_BITS; + cluster_num = sector_num / nb_sectors_chunk; if (s->cow_bitmap && !test_bit(cluster_num, s->cow_bitmap)) { bdrv_round_to_clusters(s->target, - sector_num, BDRV_SECTORS_PER_DIRTY_CHUNK, + sector_num, nb_sectors_chunk, §or_num, &nb_sectors); - bitmap_set(s->cow_bitmap, sector_num / BDRV_SECTORS_PER_DIRTY_CHUNK, - nb_sectors / BDRV_SECTORS_PER_DIRTY_CHUNK); + bitmap_set(s->cow_bitmap, sector_num / nb_sectors_chunk, + nb_sectors / nb_sectors_chunk); } end = s->common.len >> BDRV_SECTOR_BITS; nb_sectors = MIN(nb_sectors, end - sector_num); trace_mirror_one_iteration(s, sector_num); - bdrv_reset_dirty(source, sector_num, BDRV_SECTORS_PER_DIRTY_CHUNK); + bdrv_reset_dirty(source, sector_num, nb_sectors); /* Copy the dirty cluster. */ iov.iov_base = s->buf; @@ -107,7 +105,7 @@ static void coroutine_fn mirror_run(void *opaque) { MirrorBlockJob *s = opaque; BlockDriverState *bs = s->common.bs; - int64_t sector_num, end; + int64_t sector_num, end, nb_sectors_chunk; int ret = 0; int n; @@ -123,13 +121,14 @@ static void coroutine_fn mirror_run(void *opaque) end = s->common.len >> BDRV_SECTOR_BITS; s->buf = qemu_blockalign(bs, s->buf_size); + nb_sectors_chunk = s->granularity >> BDRV_SECTOR_BITS; if (s->mode == MIRROR_SYNC_MODE_FULL || s->mode == MIRROR_SYNC_MODE_TOP) { /* First part, loop on the sectors and initialize the dirty bitmap. */ BlockDriverState *base; base = s->mode == MIRROR_SYNC_MODE_FULL ? NULL : bs->backing_hd; for (sector_num = 0; sector_num < end; ) { - int64_t next = (sector_num | (BDRV_SECTORS_PER_DIRTY_CHUNK - 1)) + 1; + int64_t next = (sector_num | (nb_sectors_chunk - 1)) + 1; ret = bdrv_co_is_allocated_above(bs, base, sector_num, next - sector_num, &n); @@ -209,7 +208,7 @@ static void coroutine_fn mirror_run(void *opaque) s->common.offset = (end - cnt) * BDRV_SECTOR_SIZE; if (s->common.speed) { - delay_ns = ratelimit_calculate_delay(&s->limit, BDRV_SECTORS_PER_DIRTY_CHUNK); + delay_ns = ratelimit_calculate_delay(&s->limit, nb_sectors_chunk); } else { delay_ns = 0; } @@ -301,7 +300,7 @@ static BlockJobType mirror_job_type = { }; void mirror_start(BlockDriverState *bs, BlockDriverState *target, - int64_t speed, MirrorSyncMode mode, + int64_t speed, int64_t granularity, MirrorSyncMode mode, BlockdevOnError on_source_error, BlockdevOnError on_target_error, BlockDriverCompletionFunc *cb, @@ -326,19 +325,20 @@ void mirror_start(BlockDriverState *bs, BlockDriverState *target, * the destination do COW. Instead, we copy sectors around the * dirty data if needed. */ - s->buf_size = BLOCK_SIZE; + s->granularity = granularity; + s->buf_size = granularity; bdrv_get_backing_filename(s->target, backing_filename, sizeof(backing_filename)); if (backing_filename[0] && !s->target->backing_hd) { bdrv_get_info(s->target, &bdi); if (s->buf_size < bdi.cluster_size) { s->buf_size = bdi.cluster_size; - length = (bdrv_getlength(bs) + BLOCK_SIZE - 1) / BLOCK_SIZE; + length = (bdrv_getlength(bs) + granularity - 1) / granularity; s->cow_bitmap = bitmap_new(length); } } - bdrv_set_dirty_tracking(bs, BDRV_SECTORS_PER_DIRTY_CHUNK); + bdrv_set_dirty_tracking(bs, granularity >> BDRV_SECTOR_BITS); bdrv_set_on_error(s->target, on_target_error, on_target_error); bdrv_iostatus_enable(s->target); s->common.co = qemu_coroutine_create(mirror_run); diff --git a/block_int.h b/block_int.h index 6d36fe7..8bbe479 100644 --- a/block_int.h +++ b/block_int.h @@ -311,6 +311,7 @@ void stream_start(BlockDriverState *bs, BlockDriverState *base, * @bs: Block device to operate on. * @target: Block device to write to. * @speed: The maximum speed, in bytes per second, or 0 for unlimited. + * @granularity: The chosen granularity for the dirty bitmap. * @mode: Whether to collapse all images in the chain to the target. * @on_source_error: The action to take upon error reading from the source. * @on_target_error: The action to take upon error writing to the target. @@ -324,7 +325,7 @@ void stream_start(BlockDriverState *bs, BlockDriverState *base, * @bs will be switched to read from @target. */ void mirror_start(BlockDriverState *bs, BlockDriverState *target, - int64_t speed, MirrorSyncMode mode, + int64_t speed, int64_t granularity, MirrorSyncMode mode, BlockdevOnError on_source_error, BlockdevOnError on_target_error, BlockDriverCompletionFunc *cb, diff --git a/blockdev.c b/blockdev.c index f940e8f..a1b2b1b 100644 --- a/blockdev.c +++ b/blockdev.c @@ -836,6 +836,7 @@ void qmp_drive_mirror(const char *device, const char *target, enum MirrorSyncMode sync, bool has_mode, enum NewImageMode mode, bool has_speed, int64_t speed, + bool has_granularity, int64_t granularity, bool has_on_source_error, BlockdevOnError on_source_error, bool has_on_target_error, BlockdevOnError on_target_error, Error **errp) @@ -858,6 +859,9 @@ void qmp_drive_mirror(const char *device, const char *target, if (!has_mode) { mode = NEW_IMAGE_MODE_ABSOLUTE_PATHS; } + if (!has_granularity) { + granularity = 65536; + } bs = bdrv_find(device); if (!bs) { @@ -936,7 +940,8 @@ void qmp_drive_mirror(const char *device, const char *target, return; } - mirror_start(bs, target_bs, speed, sync, on_source_error, on_target_error, + mirror_start(bs, target_bs, speed, granularity, sync, + on_source_error, on_target_error, block_job_cb, bs, &local_err); if (local_err != NULL) { bdrv_delete(target_bs); diff --git a/hmp.c b/hmp.c index ef0b87f..f01d608 100644 --- a/hmp.c +++ b/hmp.c @@ -717,7 +717,7 @@ void hmp_drive_mirror(Monitor *mon, const QDict *qdict) qmp_drive_mirror(device, filename, !!format, format, full ? MIRROR_SYNC_MODE_FULL : MIRROR_SYNC_MODE_TOP, - true, mode, false, 0, + true, mode, false, 0, false, 0, false, 0, false, 0, &errp); hmp_handle_error(mon, &errp); } diff --git a/qapi-schema.json b/qapi-schema.json index bbcfa0e..b579581 100644 --- a/qapi-schema.json +++ b/qapi-schema.json @@ -1383,6 +1383,8 @@ # (all the disk, only the sectors allocated in the topmost image, or # only new I/O). # +# @granularity: #optional granularity of the dirty bitmap, default is 64K. +# # @on_source_error: #optional the action to take on an error on the source # # @on_target_error: #optional the action to take on an error on the target @@ -1397,7 +1399,8 @@ { 'command': 'drive-mirror', 'data': { 'device': 'str', 'target': 'str', '*format': 'str', 'sync': 'MirrorSyncMode', '*mode': 'NewImageMode', - '*speed': 'int', '*on_source_error': 'BlockdevOnError', + '*speed': 'int', '*granularity': 'int', + '*on_source_error': 'BlockdevOnError', '*on_target_error': 'BlockdevOnError' } } ## diff --git a/qmp-commands.hx b/qmp-commands.hx index f1b0f90..40ef5ab 100644 --- a/qmp-commands.hx +++ b/qmp-commands.hx @@ -839,7 +839,8 @@ EQMP { .name = "drive-mirror", .args_type = "sync:s,device:B,target:s,sync:s?,format:s?," - "on_source_error:s?,on_target_error:s?", + "on_source_error:s?,on_target_error:s?," + "granularity:i?", .mhandler.cmd_new = qmp_marshal_input_drive_mirror, }, @@ -862,6 +863,7 @@ Arguments: file/device (NewImageMode, optional, default 'absolute-paths') - "speed": maximum speed of the streaming job, in bytes per second (json-int) +- "granularity": granularity of the dirty bitmap (json-int, default 64k) - "sync": what parts of the disk image should be copied to the destination; possibilities include "full" for all the disk, "top" for only the sectors allocated in the topmost image, or "none" to only replicate new I/O -- 1.7.10.2