* [PATCH v3 0/5] block-copy: use aio-task-pool
@ 2020-04-29 6:10 Vladimir Sementsov-Ogievskiy
2020-04-29 6:10 ` [PATCH v3 1/5] block/block-copy: rename in-flight requests to tasks Vladimir Sementsov-Ogievskiy
` (4 more replies)
0 siblings, 5 replies; 13+ messages in thread
From: Vladimir Sementsov-Ogievskiy @ 2020-04-29 6:10 UTC (permalink / raw)
To: qemu-block; +Cc: kwolf, den, vsementsov, qemu-devel, mreitz
Hi all!
v3:
01: drop extra line "+ s->in_flight_bytes -= task->bytes - new_bytes;"
02: add Max's r-b
03: rebased on 01 fix (dropped line not updated now). keep Max's r-b
04: more refactoring:
don't require offset argument of block_copy_task_create being dirty,
use !bdrv_dirty_bitmap_next_dirty_area() in block_copy_task_create()
05: declare block_copy_task_entry (as moving patch dropped)
rebased on 04 changes
==
This is the next step of improving block-copy: use aio task pool.
Async copying loop has better performance than linear, which is shown
in original series (was
"[RFC 00/24] backup performance: block_status + async".
Vladimir Sementsov-Ogievskiy (5):
block/block-copy: rename in-flight requests to tasks
block/block-copy: alloc task on each iteration
block/block-copy: add state pointer to BlockCopyTask
block/block-copy: refactor task creation
block/block-copy: use aio-task-pool API
block/block-copy.c | 262 ++++++++++++++++++++++++++++++---------------
1 file changed, 177 insertions(+), 85 deletions(-)
--
2.21.0
^ permalink raw reply [flat|nested] 13+ messages in thread
* [PATCH v3 1/5] block/block-copy: rename in-flight requests to tasks
2020-04-29 6:10 [PATCH v3 0/5] block-copy: use aio-task-pool Vladimir Sementsov-Ogievskiy
@ 2020-04-29 6:10 ` Vladimir Sementsov-Ogievskiy
2020-04-29 10:57 ` Max Reitz
2020-04-29 6:10 ` [PATCH v3 2/5] block/block-copy: alloc task on each iteration Vladimir Sementsov-Ogievskiy
` (3 subsequent siblings)
4 siblings, 1 reply; 13+ messages in thread
From: Vladimir Sementsov-Ogievskiy @ 2020-04-29 6:10 UTC (permalink / raw)
To: qemu-block; +Cc: kwolf, den, vsementsov, qemu-devel, mreitz
We are going to use aio-task-pool API and extend in-flight request
structure to be a successor of AioTask, so rename things appropriately.
Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
---
block/block-copy.c | 98 +++++++++++++++++++++++-----------------------
1 file changed, 48 insertions(+), 50 deletions(-)
diff --git a/block/block-copy.c b/block/block-copy.c
index 05227e18bf..bbb29366dc 100644
--- a/block/block-copy.c
+++ b/block/block-copy.c
@@ -24,12 +24,12 @@
#define BLOCK_COPY_MAX_BUFFER (1 * MiB)
#define BLOCK_COPY_MAX_MEM (128 * MiB)
-typedef struct BlockCopyInFlightReq {
+typedef struct BlockCopyTask {
int64_t offset;
int64_t bytes;
- QLIST_ENTRY(BlockCopyInFlightReq) list;
- CoQueue wait_queue; /* coroutines blocked on this request */
-} BlockCopyInFlightReq;
+ QLIST_ENTRY(BlockCopyTask) list;
+ CoQueue wait_queue; /* coroutines blocked on this task */
+} BlockCopyTask;
typedef struct BlockCopyState {
/*
@@ -45,7 +45,7 @@ typedef struct BlockCopyState {
bool use_copy_range;
int64_t copy_size;
uint64_t len;
- QLIST_HEAD(, BlockCopyInFlightReq) inflight_reqs;
+ QLIST_HEAD(, BlockCopyTask) tasks;
BdrvRequestFlags write_flags;
@@ -73,15 +73,14 @@ typedef struct BlockCopyState {
SharedResource *mem;
} BlockCopyState;
-static BlockCopyInFlightReq *find_conflicting_inflight_req(BlockCopyState *s,
- int64_t offset,
- int64_t bytes)
+static BlockCopyTask *find_conflicting_task(BlockCopyState *s,
+ int64_t offset, int64_t bytes)
{
- BlockCopyInFlightReq *req;
+ BlockCopyTask *t;
- QLIST_FOREACH(req, &s->inflight_reqs, list) {
- if (offset + bytes > req->offset && offset < req->offset + req->bytes) {
- return req;
+ QLIST_FOREACH(t, &s->tasks, list) {
+ if (offset + bytes > t->offset && offset < t->offset + t->bytes) {
+ return t;
}
}
@@ -89,73 +88,72 @@ static BlockCopyInFlightReq *find_conflicting_inflight_req(BlockCopyState *s,
}
/*
- * If there are no intersecting requests return false. Otherwise, wait for the
- * first found intersecting request to finish and return true.
+ * If there are no intersecting tasks return false. Otherwise, wait for the
+ * first found intersecting tasks to finish and return true.
*/
static bool coroutine_fn block_copy_wait_one(BlockCopyState *s, int64_t offset,
int64_t bytes)
{
- BlockCopyInFlightReq *req = find_conflicting_inflight_req(s, offset, bytes);
+ BlockCopyTask *task = find_conflicting_task(s, offset, bytes);
- if (!req) {
+ if (!task) {
return false;
}
- qemu_co_queue_wait(&req->wait_queue, NULL);
+ qemu_co_queue_wait(&task->wait_queue, NULL);
return true;
}
/* Called only on full-dirty region */
-static void block_copy_inflight_req_begin(BlockCopyState *s,
- BlockCopyInFlightReq *req,
- int64_t offset, int64_t bytes)
+static void block_copy_task_begin(BlockCopyState *s, BlockCopyTask *task,
+ int64_t offset, int64_t bytes)
{
- assert(!find_conflicting_inflight_req(s, offset, bytes));
+ assert(!find_conflicting_task(s, offset, bytes));
bdrv_reset_dirty_bitmap(s->copy_bitmap, offset, bytes);
s->in_flight_bytes += bytes;
- req->offset = offset;
- req->bytes = bytes;
- qemu_co_queue_init(&req->wait_queue);
- QLIST_INSERT_HEAD(&s->inflight_reqs, req, list);
+ task->offset = offset;
+ task->bytes = bytes;
+ qemu_co_queue_init(&task->wait_queue);
+ QLIST_INSERT_HEAD(&s->tasks, task, list);
}
/*
- * block_copy_inflight_req_shrink
+ * block_copy_task_shrink
*
- * Drop the tail of the request to be handled later. Set dirty bits back and
- * wake up all requests waiting for us (may be some of them are not intersecting
- * with shrunk request)
+ * Drop the tail of the task to be handled later. Set dirty bits back and
+ * wake up all tasks waiting for us (may be some of them are not intersecting
+ * with shrunk task)
*/
-static void coroutine_fn block_copy_inflight_req_shrink(BlockCopyState *s,
- BlockCopyInFlightReq *req, int64_t new_bytes)
+static void coroutine_fn block_copy_task_shrink(BlockCopyState *s,
+ BlockCopyTask *task,
+ int64_t new_bytes)
{
- if (new_bytes == req->bytes) {
+ if (new_bytes == task->bytes) {
return;
}
- assert(new_bytes > 0 && new_bytes < req->bytes);
+ assert(new_bytes > 0 && new_bytes < task->bytes);
- s->in_flight_bytes -= req->bytes - new_bytes;
+ s->in_flight_bytes -= task->bytes - new_bytes;
bdrv_set_dirty_bitmap(s->copy_bitmap,
- req->offset + new_bytes, req->bytes - new_bytes);
+ task->offset + new_bytes, task->bytes - new_bytes);
- req->bytes = new_bytes;
- qemu_co_queue_restart_all(&req->wait_queue);
+ task->bytes = new_bytes;
+ qemu_co_queue_restart_all(&task->wait_queue);
}
-static void coroutine_fn block_copy_inflight_req_end(BlockCopyState *s,
- BlockCopyInFlightReq *req,
- int ret)
+static void coroutine_fn block_copy_task_end(BlockCopyState *s,
+ BlockCopyTask *task, int ret)
{
- s->in_flight_bytes -= req->bytes;
+ s->in_flight_bytes -= task->bytes;
if (ret < 0) {
- bdrv_set_dirty_bitmap(s->copy_bitmap, req->offset, req->bytes);
+ bdrv_set_dirty_bitmap(s->copy_bitmap, task->offset, task->bytes);
}
- QLIST_REMOVE(req, list);
- qemu_co_queue_restart_all(&req->wait_queue);
+ QLIST_REMOVE(task, list);
+ qemu_co_queue_restart_all(&task->wait_queue);
}
void block_copy_state_free(BlockCopyState *s)
@@ -223,7 +221,7 @@ BlockCopyState *block_copy_state_new(BdrvChild *source, BdrvChild *target,
s->copy_size = MAX(s->cluster_size, BLOCK_COPY_MAX_BUFFER);
}
- QLIST_INIT(&s->inflight_reqs);
+ QLIST_INIT(&s->tasks);
return s;
}
@@ -474,7 +472,7 @@ static int coroutine_fn block_copy_dirty_clusters(BlockCopyState *s,
assert(QEMU_IS_ALIGNED(bytes, s->cluster_size));
while (bytes) {
- BlockCopyInFlightReq req;
+ BlockCopyTask task;
int64_t next_zero, cur_bytes, status_bytes;
if (!bdrv_dirty_bitmap_get(s->copy_bitmap, offset)) {
@@ -495,14 +493,14 @@ static int coroutine_fn block_copy_dirty_clusters(BlockCopyState *s,
assert(next_zero < offset + cur_bytes); /* no need to do MIN() */
cur_bytes = next_zero - offset;
}
- block_copy_inflight_req_begin(s, &req, offset, cur_bytes);
+ block_copy_task_begin(s, &task, offset, cur_bytes);
ret = block_copy_block_status(s, offset, cur_bytes, &status_bytes);
assert(ret >= 0); /* never fail */
cur_bytes = MIN(cur_bytes, status_bytes);
- block_copy_inflight_req_shrink(s, &req, cur_bytes);
+ block_copy_task_shrink(s, &task, cur_bytes);
if (s->skip_unallocated && !(ret & BDRV_BLOCK_ALLOCATED)) {
- block_copy_inflight_req_end(s, &req, 0);
+ block_copy_task_end(s, &task, 0);
progress_set_remaining(s->progress,
bdrv_get_dirty_count(s->copy_bitmap) +
s->in_flight_bytes);
@@ -518,7 +516,7 @@ static int coroutine_fn block_copy_dirty_clusters(BlockCopyState *s,
ret = block_copy_do_copy(s, offset, cur_bytes, ret & BDRV_BLOCK_ZERO,
error_is_read);
co_put_to_shres(s->mem, cur_bytes);
- block_copy_inflight_req_end(s, &req, ret);
+ block_copy_task_end(s, &task, ret);
if (ret < 0) {
return ret;
}
--
2.21.0
^ permalink raw reply related [flat|nested] 13+ messages in thread
* [PATCH v3 2/5] block/block-copy: alloc task on each iteration
2020-04-29 6:10 [PATCH v3 0/5] block-copy: use aio-task-pool Vladimir Sementsov-Ogievskiy
2020-04-29 6:10 ` [PATCH v3 1/5] block/block-copy: rename in-flight requests to tasks Vladimir Sementsov-Ogievskiy
@ 2020-04-29 6:10 ` Vladimir Sementsov-Ogievskiy
2020-04-29 6:10 ` [PATCH v3 3/5] block/block-copy: add state pointer to BlockCopyTask Vladimir Sementsov-Ogievskiy
` (2 subsequent siblings)
4 siblings, 0 replies; 13+ messages in thread
From: Vladimir Sementsov-Ogievskiy @ 2020-04-29 6:10 UTC (permalink / raw)
To: qemu-block; +Cc: kwolf, den, vsementsov, qemu-devel, mreitz
We are going to use aio-task-pool API, so tasks will be handled in
parallel. We need therefore separate allocated task on each iteration.
Introduce this logic now.
Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
Reviewed-by: Max Reitz <mreitz@redhat.com>
---
block/block-copy.c | 18 +++++++++++-------
1 file changed, 11 insertions(+), 7 deletions(-)
diff --git a/block/block-copy.c b/block/block-copy.c
index bbb29366dc..8d1b9ab9f0 100644
--- a/block/block-copy.c
+++ b/block/block-copy.c
@@ -106,9 +106,11 @@ static bool coroutine_fn block_copy_wait_one(BlockCopyState *s, int64_t offset,
}
/* Called only on full-dirty region */
-static void block_copy_task_begin(BlockCopyState *s, BlockCopyTask *task,
- int64_t offset, int64_t bytes)
+static BlockCopyTask *block_copy_task_create(BlockCopyState *s,
+ int64_t offset, int64_t bytes)
{
+ BlockCopyTask *task = g_new(BlockCopyTask, 1);
+
assert(!find_conflicting_task(s, offset, bytes));
bdrv_reset_dirty_bitmap(s->copy_bitmap, offset, bytes);
@@ -118,6 +120,8 @@ static void block_copy_task_begin(BlockCopyState *s, BlockCopyTask *task,
task->bytes = bytes;
qemu_co_queue_init(&task->wait_queue);
QLIST_INSERT_HEAD(&s->tasks, task, list);
+
+ return task;
}
/*
@@ -472,7 +476,7 @@ static int coroutine_fn block_copy_dirty_clusters(BlockCopyState *s,
assert(QEMU_IS_ALIGNED(bytes, s->cluster_size));
while (bytes) {
- BlockCopyTask task;
+ g_autofree BlockCopyTask *task = NULL;
int64_t next_zero, cur_bytes, status_bytes;
if (!bdrv_dirty_bitmap_get(s->copy_bitmap, offset)) {
@@ -493,14 +497,14 @@ static int coroutine_fn block_copy_dirty_clusters(BlockCopyState *s,
assert(next_zero < offset + cur_bytes); /* no need to do MIN() */
cur_bytes = next_zero - offset;
}
- block_copy_task_begin(s, &task, offset, cur_bytes);
+ task = block_copy_task_create(s, offset, cur_bytes);
ret = block_copy_block_status(s, offset, cur_bytes, &status_bytes);
assert(ret >= 0); /* never fail */
cur_bytes = MIN(cur_bytes, status_bytes);
- block_copy_task_shrink(s, &task, cur_bytes);
+ block_copy_task_shrink(s, task, cur_bytes);
if (s->skip_unallocated && !(ret & BDRV_BLOCK_ALLOCATED)) {
- block_copy_task_end(s, &task, 0);
+ block_copy_task_end(s, task, 0);
progress_set_remaining(s->progress,
bdrv_get_dirty_count(s->copy_bitmap) +
s->in_flight_bytes);
@@ -516,7 +520,7 @@ static int coroutine_fn block_copy_dirty_clusters(BlockCopyState *s,
ret = block_copy_do_copy(s, offset, cur_bytes, ret & BDRV_BLOCK_ZERO,
error_is_read);
co_put_to_shres(s->mem, cur_bytes);
- block_copy_task_end(s, &task, ret);
+ block_copy_task_end(s, task, ret);
if (ret < 0) {
return ret;
}
--
2.21.0
^ permalink raw reply related [flat|nested] 13+ messages in thread
* [PATCH v3 3/5] block/block-copy: add state pointer to BlockCopyTask
2020-04-29 6:10 [PATCH v3 0/5] block-copy: use aio-task-pool Vladimir Sementsov-Ogievskiy
2020-04-29 6:10 ` [PATCH v3 1/5] block/block-copy: rename in-flight requests to tasks Vladimir Sementsov-Ogievskiy
2020-04-29 6:10 ` [PATCH v3 2/5] block/block-copy: alloc task on each iteration Vladimir Sementsov-Ogievskiy
@ 2020-04-29 6:10 ` Vladimir Sementsov-Ogievskiy
2020-04-29 6:10 ` [PATCH v3 4/5] block/block-copy: refactor task creation Vladimir Sementsov-Ogievskiy
2020-04-29 6:10 ` [PATCH v3 5/5] block/block-copy: use aio-task-pool API Vladimir Sementsov-Ogievskiy
4 siblings, 0 replies; 13+ messages in thread
From: Vladimir Sementsov-Ogievskiy @ 2020-04-29 6:10 UTC (permalink / raw)
To: qemu-block; +Cc: kwolf, den, vsementsov, qemu-devel, mreitz
We are going to use aio-task-pool API, so we'll need state pointer in
BlockCopyTask anyway. Add it now and use where possible.
Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
Reviewed-by: Max Reitz <mreitz@redhat.com>
---
block/block-copy.c | 28 +++++++++++++++-------------
1 file changed, 15 insertions(+), 13 deletions(-)
diff --git a/block/block-copy.c b/block/block-copy.c
index 8d1b9ab9f0..35ff9cc3ef 100644
--- a/block/block-copy.c
+++ b/block/block-copy.c
@@ -25,6 +25,7 @@
#define BLOCK_COPY_MAX_MEM (128 * MiB)
typedef struct BlockCopyTask {
+ BlockCopyState *s;
int64_t offset;
int64_t bytes;
QLIST_ENTRY(BlockCopyTask) list;
@@ -116,8 +117,11 @@ static BlockCopyTask *block_copy_task_create(BlockCopyState *s,
bdrv_reset_dirty_bitmap(s->copy_bitmap, offset, bytes);
s->in_flight_bytes += bytes;
- task->offset = offset;
- task->bytes = bytes;
+ *task = (BlockCopyTask) {
+ .s = s,
+ .offset = offset,
+ .bytes = bytes,
+ };
qemu_co_queue_init(&task->wait_queue);
QLIST_INSERT_HEAD(&s->tasks, task, list);
@@ -131,8 +135,7 @@ static BlockCopyTask *block_copy_task_create(BlockCopyState *s,
* wake up all tasks waiting for us (may be some of them are not intersecting
* with shrunk task)
*/
-static void coroutine_fn block_copy_task_shrink(BlockCopyState *s,
- BlockCopyTask *task,
+static void coroutine_fn block_copy_task_shrink(BlockCopyTask *task,
int64_t new_bytes)
{
if (new_bytes == task->bytes) {
@@ -141,20 +144,19 @@ static void coroutine_fn block_copy_task_shrink(BlockCopyState *s,
assert(new_bytes > 0 && new_bytes < task->bytes);
- s->in_flight_bytes -= task->bytes - new_bytes;
- bdrv_set_dirty_bitmap(s->copy_bitmap,
+ task->s->in_flight_bytes -= task->bytes - new_bytes;
+ bdrv_set_dirty_bitmap(task->s->copy_bitmap,
task->offset + new_bytes, task->bytes - new_bytes);
task->bytes = new_bytes;
qemu_co_queue_restart_all(&task->wait_queue);
}
-static void coroutine_fn block_copy_task_end(BlockCopyState *s,
- BlockCopyTask *task, int ret)
+static void coroutine_fn block_copy_task_end(BlockCopyTask *task, int ret)
{
- s->in_flight_bytes -= task->bytes;
+ task->s->in_flight_bytes -= task->bytes;
if (ret < 0) {
- bdrv_set_dirty_bitmap(s->copy_bitmap, task->offset, task->bytes);
+ bdrv_set_dirty_bitmap(task->s->copy_bitmap, task->offset, task->bytes);
}
QLIST_REMOVE(task, list);
qemu_co_queue_restart_all(&task->wait_queue);
@@ -502,9 +504,9 @@ static int coroutine_fn block_copy_dirty_clusters(BlockCopyState *s,
ret = block_copy_block_status(s, offset, cur_bytes, &status_bytes);
assert(ret >= 0); /* never fail */
cur_bytes = MIN(cur_bytes, status_bytes);
- block_copy_task_shrink(s, task, cur_bytes);
+ block_copy_task_shrink(task, cur_bytes);
if (s->skip_unallocated && !(ret & BDRV_BLOCK_ALLOCATED)) {
- block_copy_task_end(s, task, 0);
+ block_copy_task_end(task, 0);
progress_set_remaining(s->progress,
bdrv_get_dirty_count(s->copy_bitmap) +
s->in_flight_bytes);
@@ -520,7 +522,7 @@ static int coroutine_fn block_copy_dirty_clusters(BlockCopyState *s,
ret = block_copy_do_copy(s, offset, cur_bytes, ret & BDRV_BLOCK_ZERO,
error_is_read);
co_put_to_shres(s->mem, cur_bytes);
- block_copy_task_end(s, task, ret);
+ block_copy_task_end(task, ret);
if (ret < 0) {
return ret;
}
--
2.21.0
^ permalink raw reply related [flat|nested] 13+ messages in thread
* [PATCH v3 4/5] block/block-copy: refactor task creation
2020-04-29 6:10 [PATCH v3 0/5] block-copy: use aio-task-pool Vladimir Sementsov-Ogievskiy
` (2 preceding siblings ...)
2020-04-29 6:10 ` [PATCH v3 3/5] block/block-copy: add state pointer to BlockCopyTask Vladimir Sementsov-Ogievskiy
@ 2020-04-29 6:10 ` Vladimir Sementsov-Ogievskiy
2020-04-29 11:38 ` Max Reitz
2020-04-29 6:10 ` [PATCH v3 5/5] block/block-copy: use aio-task-pool API Vladimir Sementsov-Ogievskiy
4 siblings, 1 reply; 13+ messages in thread
From: Vladimir Sementsov-Ogievskiy @ 2020-04-29 6:10 UTC (permalink / raw)
To: qemu-block; +Cc: kwolf, den, vsementsov, qemu-devel, mreitz
Instead of just relying on the comment "Called only on full-dirty
region" in block_copy_task_create() let's move initial dirty area
search directly to block_copy_task_create(). Let's also use effective
bdrv_dirty_bitmap_next_dirty_area instead of looping through all
non-dirty clusters.
Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
---
block/block-copy.c | 78 ++++++++++++++++++++++++++--------------------
1 file changed, 44 insertions(+), 34 deletions(-)
diff --git a/block/block-copy.c b/block/block-copy.c
index 35ff9cc3ef..5cf032c4d8 100644
--- a/block/block-copy.c
+++ b/block/block-copy.c
@@ -32,6 +32,11 @@ typedef struct BlockCopyTask {
CoQueue wait_queue; /* coroutines blocked on this task */
} BlockCopyTask;
+static int64_t task_end(BlockCopyTask *task)
+{
+ return task->offset + task->bytes;
+}
+
typedef struct BlockCopyState {
/*
* BdrvChild objects are not owned or managed by block-copy. They are
@@ -106,17 +111,27 @@ static bool coroutine_fn block_copy_wait_one(BlockCopyState *s, int64_t offset,
return true;
}
-/* Called only on full-dirty region */
+/*
+ * Search for the first dirty area in offset/bytes range and create task at
+ * the beginning of it.
+ */
static BlockCopyTask *block_copy_task_create(BlockCopyState *s,
int64_t offset, int64_t bytes)
{
- BlockCopyTask *task = g_new(BlockCopyTask, 1);
+ if (!bdrv_dirty_bitmap_next_dirty_area(s->copy_bitmap,
+ offset, offset + bytes,
+ s->copy_size, &offset, &bytes))
+ {
+ return NULL;
+ }
+ /* region is dirty, so no existent tasks possible in it */
assert(!find_conflicting_task(s, offset, bytes));
bdrv_reset_dirty_bitmap(s->copy_bitmap, offset, bytes);
s->in_flight_bytes += bytes;
+ BlockCopyTask *task = g_new(BlockCopyTask, 1);
*task = (BlockCopyTask) {
.s = s,
.offset = offset,
@@ -466,6 +481,7 @@ static int coroutine_fn block_copy_dirty_clusters(BlockCopyState *s,
{
int ret = 0;
bool found_dirty = false;
+ int64_t end = offset + bytes;
/*
* block_copy() user is responsible for keeping source and target in same
@@ -479,58 +495,52 @@ static int coroutine_fn block_copy_dirty_clusters(BlockCopyState *s,
while (bytes) {
g_autofree BlockCopyTask *task = NULL;
- int64_t next_zero, cur_bytes, status_bytes;
+ int64_t status_bytes;
- if (!bdrv_dirty_bitmap_get(s->copy_bitmap, offset)) {
- trace_block_copy_skip(s, offset);
- offset += s->cluster_size;
- bytes -= s->cluster_size;
- continue; /* already copied */
+ task = block_copy_task_create(s, offset, bytes);
+ if (!task) {
+ /* No more dirty bits in the bitmap */
+ trace_block_copy_skip_range(s, offset, bytes);
+ break;
+ }
+ if (task->offset > offset) {
+ trace_block_copy_skip_range(s, offset, task->offset - offset);
}
found_dirty = true;
- cur_bytes = MIN(bytes, s->copy_size);
-
- next_zero = bdrv_dirty_bitmap_next_zero(s->copy_bitmap, offset,
- cur_bytes);
- if (next_zero >= 0) {
- assert(next_zero > offset); /* offset is dirty */
- assert(next_zero < offset + cur_bytes); /* no need to do MIN() */
- cur_bytes = next_zero - offset;
- }
- task = block_copy_task_create(s, offset, cur_bytes);
-
- ret = block_copy_block_status(s, offset, cur_bytes, &status_bytes);
+ ret = block_copy_block_status(s, task->offset, task->bytes,
+ &status_bytes);
assert(ret >= 0); /* never fail */
- cur_bytes = MIN(cur_bytes, status_bytes);
- block_copy_task_shrink(task, cur_bytes);
+ if (status_bytes < task->bytes) {
+ block_copy_task_shrink(task, status_bytes);
+ }
if (s->skip_unallocated && !(ret & BDRV_BLOCK_ALLOCATED)) {
block_copy_task_end(task, 0);
progress_set_remaining(s->progress,
bdrv_get_dirty_count(s->copy_bitmap) +
s->in_flight_bytes);
- trace_block_copy_skip_range(s, offset, status_bytes);
- offset += status_bytes;
- bytes -= status_bytes;
+ trace_block_copy_skip_range(s, task->offset, task->bytes);
+ offset = task_end(task);
+ bytes = end - offset;
continue;
}
- trace_block_copy_process(s, offset);
+ trace_block_copy_process(s, task->offset);
- co_get_from_shres(s->mem, cur_bytes);
- ret = block_copy_do_copy(s, offset, cur_bytes, ret & BDRV_BLOCK_ZERO,
- error_is_read);
- co_put_to_shres(s->mem, cur_bytes);
+ co_get_from_shres(s->mem, task->bytes);
+ ret = block_copy_do_copy(s, task->offset, task->bytes,
+ ret & BDRV_BLOCK_ZERO, error_is_read);
+ co_put_to_shres(s->mem, task->bytes);
block_copy_task_end(task, ret);
if (ret < 0) {
return ret;
}
- progress_work_done(s->progress, cur_bytes);
- s->progress_bytes_callback(cur_bytes, s->progress_opaque);
- offset += cur_bytes;
- bytes -= cur_bytes;
+ progress_work_done(s->progress, task->bytes);
+ s->progress_bytes_callback(task->bytes, s->progress_opaque);
+ offset = task_end(task);
+ bytes = end - offset;
}
return found_dirty;
--
2.21.0
^ permalink raw reply related [flat|nested] 13+ messages in thread
* [PATCH v3 5/5] block/block-copy: use aio-task-pool API
2020-04-29 6:10 [PATCH v3 0/5] block-copy: use aio-task-pool Vladimir Sementsov-Ogievskiy
` (3 preceding siblings ...)
2020-04-29 6:10 ` [PATCH v3 4/5] block/block-copy: refactor task creation Vladimir Sementsov-Ogievskiy
@ 2020-04-29 6:10 ` Vladimir Sementsov-Ogievskiy
2020-04-29 11:55 ` Max Reitz
4 siblings, 1 reply; 13+ messages in thread
From: Vladimir Sementsov-Ogievskiy @ 2020-04-29 6:10 UTC (permalink / raw)
To: qemu-block; +Cc: kwolf, den, vsementsov, qemu-devel, mreitz
Run block_copy iterations in parallel in aio tasks.
Changes:
- BlockCopyTask becomes aio task structure. Add zeroes field to pass
it to block_copy_do_copy
- add call state - it's a state of one call of block_copy(), shared
between parallel tasks. For now used only to keep information about
first error: is it read or not.
- convert block_copy_dirty_clusters to aio-task loop.
Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
---
block/block-copy.c | 104 +++++++++++++++++++++++++++++++++++++++------
1 file changed, 91 insertions(+), 13 deletions(-)
diff --git a/block/block-copy.c b/block/block-copy.c
index 5cf032c4d8..f5ef91f292 100644
--- a/block/block-copy.c
+++ b/block/block-copy.c
@@ -19,15 +19,29 @@
#include "block/block-copy.h"
#include "sysemu/block-backend.h"
#include "qemu/units.h"
+#include "qemu/coroutine.h"
+#include "block/aio_task.h"
#define BLOCK_COPY_MAX_COPY_RANGE (16 * MiB)
#define BLOCK_COPY_MAX_BUFFER (1 * MiB)
#define BLOCK_COPY_MAX_MEM (128 * MiB)
+#define BLOCK_COPY_MAX_WORKERS 64
+
+static coroutine_fn int block_copy_task_entry(AioTask *task);
+
+typedef struct BlockCopyCallState {
+ bool failed;
+ bool error_is_read;
+} BlockCopyCallState;
typedef struct BlockCopyTask {
+ AioTask task;
+
BlockCopyState *s;
+ BlockCopyCallState *call_state;
int64_t offset;
int64_t bytes;
+ bool zeroes;
QLIST_ENTRY(BlockCopyTask) list;
CoQueue wait_queue; /* coroutines blocked on this task */
} BlockCopyTask;
@@ -116,6 +130,7 @@ static bool coroutine_fn block_copy_wait_one(BlockCopyState *s, int64_t offset,
* the beginning of it.
*/
static BlockCopyTask *block_copy_task_create(BlockCopyState *s,
+ BlockCopyCallState *call_state,
int64_t offset, int64_t bytes)
{
if (!bdrv_dirty_bitmap_next_dirty_area(s->copy_bitmap,
@@ -133,7 +148,9 @@ static BlockCopyTask *block_copy_task_create(BlockCopyState *s,
BlockCopyTask *task = g_new(BlockCopyTask, 1);
*task = (BlockCopyTask) {
+ .task.func = block_copy_task_entry,
.s = s,
+ .call_state = call_state,
.offset = offset,
.bytes = bytes,
};
@@ -261,6 +278,30 @@ void block_copy_set_progress_meter(BlockCopyState *s, ProgressMeter *pm)
s->progress = pm;
}
+/* Takes ownership on @task */
+static coroutine_fn int block_copy_task_run(AioTaskPool *pool,
+ BlockCopyTask *task)
+{
+ if (!pool) {
+ int ret = task->task.func(&task->task);
+
+ g_free(task);
+ return ret;
+ }
+
+ aio_task_pool_wait_slot(pool);
+ if (aio_task_pool_status(pool) < 0) {
+ co_put_to_shres(task->s->mem, task->bytes);
+ block_copy_task_end(task, -EAGAIN);
+ g_free(task);
+ return aio_task_pool_status(pool);
+ }
+
+ aio_task_pool_start_task(pool, &task->task);
+
+ return 0;
+}
+
/*
* block_copy_do_copy
*
@@ -364,6 +405,27 @@ out:
return ret;
}
+static coroutine_fn int block_copy_task_entry(AioTask *task)
+{
+ BlockCopyTask *t = container_of(task, BlockCopyTask, task);
+ bool error_is_read;
+ int ret;
+
+ ret = block_copy_do_copy(t->s, t->offset, t->bytes, t->zeroes,
+ &error_is_read);
+ if (ret < 0 && !t->call_state->failed) {
+ t->call_state->failed = true;
+ t->call_state->error_is_read = error_is_read;
+ } else {
+ progress_work_done(t->s->progress, t->bytes);
+ t->s->progress_bytes_callback(t->bytes, t->s->progress_opaque);
+ }
+ co_put_to_shres(t->s->mem, t->bytes);
+ block_copy_task_end(t, ret);
+
+ return ret;
+}
+
static int block_copy_block_status(BlockCopyState *s, int64_t offset,
int64_t bytes, int64_t *pnum)
{
@@ -482,6 +544,8 @@ static int coroutine_fn block_copy_dirty_clusters(BlockCopyState *s,
int ret = 0;
bool found_dirty = false;
int64_t end = offset + bytes;
+ AioTaskPool *aio = NULL;
+ BlockCopyCallState call_state = {false, false};
/*
* block_copy() user is responsible for keeping source and target in same
@@ -493,11 +557,11 @@ static int coroutine_fn block_copy_dirty_clusters(BlockCopyState *s,
assert(QEMU_IS_ALIGNED(offset, s->cluster_size));
assert(QEMU_IS_ALIGNED(bytes, s->cluster_size));
- while (bytes) {
- g_autofree BlockCopyTask *task = NULL;
+ while (bytes && aio_task_pool_status(aio) == 0) {
+ BlockCopyTask *task;
int64_t status_bytes;
- task = block_copy_task_create(s, offset, bytes);
+ task = block_copy_task_create(s, &call_state, offset, bytes);
if (!task) {
/* No more dirty bits in the bitmap */
trace_block_copy_skip_range(s, offset, bytes);
@@ -517,6 +581,7 @@ static int coroutine_fn block_copy_dirty_clusters(BlockCopyState *s,
}
if (s->skip_unallocated && !(ret & BDRV_BLOCK_ALLOCATED)) {
block_copy_task_end(task, 0);
+ g_free(task);
progress_set_remaining(s->progress,
bdrv_get_dirty_count(s->copy_bitmap) +
s->in_flight_bytes);
@@ -525,25 +590,38 @@ static int coroutine_fn block_copy_dirty_clusters(BlockCopyState *s,
bytes = end - offset;
continue;
}
+ task->zeroes = ret & BDRV_BLOCK_ZERO;
trace_block_copy_process(s, task->offset);
co_get_from_shres(s->mem, task->bytes);
- ret = block_copy_do_copy(s, task->offset, task->bytes,
- ret & BDRV_BLOCK_ZERO, error_is_read);
- co_put_to_shres(s->mem, task->bytes);
- block_copy_task_end(task, ret);
- if (ret < 0) {
- return ret;
- }
- progress_work_done(s->progress, task->bytes);
- s->progress_bytes_callback(task->bytes, s->progress_opaque);
offset = task_end(task);
bytes = end - offset;
+
+ if (!aio && bytes) {
+ aio = aio_task_pool_new(BLOCK_COPY_MAX_WORKERS);
+ }
+
+ ret = block_copy_task_run(aio, task);
+ if (ret < 0) {
+ goto out;
+ }
+ }
+
+out:
+ if (aio) {
+ aio_task_pool_wait_all(aio);
+ if (ret == 0) {
+ ret = aio_task_pool_status(aio);
+ }
+ g_free(aio);
+ }
+ if (error_is_read && ret < 0) {
+ *error_is_read = call_state.error_is_read;
}
- return found_dirty;
+ return ret < 0 ? ret : found_dirty;
}
/*
--
2.21.0
^ permalink raw reply related [flat|nested] 13+ messages in thread
* Re: [PATCH v3 1/5] block/block-copy: rename in-flight requests to tasks
2020-04-29 6:10 ` [PATCH v3 1/5] block/block-copy: rename in-flight requests to tasks Vladimir Sementsov-Ogievskiy
@ 2020-04-29 10:57 ` Max Reitz
0 siblings, 0 replies; 13+ messages in thread
From: Max Reitz @ 2020-04-29 10:57 UTC (permalink / raw)
To: Vladimir Sementsov-Ogievskiy, qemu-block; +Cc: kwolf, den, qemu-devel
[-- Attachment #1.1: Type: text/plain, Size: 463 bytes --]
On 29.04.20 08:10, Vladimir Sementsov-Ogievskiy wrote:
> We are going to use aio-task-pool API and extend in-flight request
> structure to be a successor of AioTask, so rename things appropriately.
>
> Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
> ---
> block/block-copy.c | 98 +++++++++++++++++++++++-----------------------
> 1 file changed, 48 insertions(+), 50 deletions(-)
Reviewed-by: Max Reitz <mreitz@redhat.com>
[-- Attachment #2: OpenPGP digital signature --]
[-- Type: application/pgp-signature, Size: 488 bytes --]
^ permalink raw reply [flat|nested] 13+ messages in thread
* Re: [PATCH v3 4/5] block/block-copy: refactor task creation
2020-04-29 6:10 ` [PATCH v3 4/5] block/block-copy: refactor task creation Vladimir Sementsov-Ogievskiy
@ 2020-04-29 11:38 ` Max Reitz
2020-04-29 11:54 ` Vladimir Sementsov-Ogievskiy
0 siblings, 1 reply; 13+ messages in thread
From: Max Reitz @ 2020-04-29 11:38 UTC (permalink / raw)
To: Vladimir Sementsov-Ogievskiy, qemu-block; +Cc: kwolf, den, qemu-devel
[-- Attachment #1.1: Type: text/plain, Size: 2042 bytes --]
On 29.04.20 08:10, Vladimir Sementsov-Ogievskiy wrote:
> Instead of just relying on the comment "Called only on full-dirty
> region" in block_copy_task_create() let's move initial dirty area
> search directly to block_copy_task_create(). Let's also use effective
> bdrv_dirty_bitmap_next_dirty_area instead of looping through all
> non-dirty clusters.
>
> Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
> ---
> block/block-copy.c | 78 ++++++++++++++++++++++++++--------------------
> 1 file changed, 44 insertions(+), 34 deletions(-)
>
> diff --git a/block/block-copy.c b/block/block-copy.c
> index 35ff9cc3ef..5cf032c4d8 100644
> --- a/block/block-copy.c
> +++ b/block/block-copy.c
[...]
> @@ -106,17 +111,27 @@ static bool coroutine_fn block_copy_wait_one(BlockCopyState *s, int64_t offset,
> return true;
> }
>
> -/* Called only on full-dirty region */
> +/*
> + * Search for the first dirty area in offset/bytes range and create task at
> + * the beginning of it.
Oh, that’s even better.
> + */
> static BlockCopyTask *block_copy_task_create(BlockCopyState *s,
> int64_t offset, int64_t bytes)
> {
> - BlockCopyTask *task = g_new(BlockCopyTask, 1);
> + if (!bdrv_dirty_bitmap_next_dirty_area(s->copy_bitmap,
> + offset, offset + bytes,
> + s->copy_size, &offset, &bytes))
> + {
> + return NULL;
> + }
>
> + /* region is dirty, so no existent tasks possible in it */
> assert(!find_conflicting_task(s, offset, bytes));
>
> bdrv_reset_dirty_bitmap(s->copy_bitmap, offset, bytes);
> s->in_flight_bytes += bytes;
>
> + BlockCopyTask *task = g_new(BlockCopyTask, 1);
This should be declared at the top of the function.
With that fixed:
Reviewed-by: Max Reitz <mreitz@redhat.com>
> *task = (BlockCopyTask) {
> .s = s,
> .offset = offset,
[-- Attachment #2: OpenPGP digital signature --]
[-- Type: application/pgp-signature, Size: 488 bytes --]
^ permalink raw reply [flat|nested] 13+ messages in thread
* Re: [PATCH v3 4/5] block/block-copy: refactor task creation
2020-04-29 11:38 ` Max Reitz
@ 2020-04-29 11:54 ` Vladimir Sementsov-Ogievskiy
2020-04-29 11:56 ` Max Reitz
0 siblings, 1 reply; 13+ messages in thread
From: Vladimir Sementsov-Ogievskiy @ 2020-04-29 11:54 UTC (permalink / raw)
To: Max Reitz, qemu-block; +Cc: kwolf, den, qemu-devel
29.04.2020 14:38, Max Reitz wrote:
> On 29.04.20 08:10, Vladimir Sementsov-Ogievskiy wrote:
>> Instead of just relying on the comment "Called only on full-dirty
>> region" in block_copy_task_create() let's move initial dirty area
>> search directly to block_copy_task_create(). Let's also use effective
>> bdrv_dirty_bitmap_next_dirty_area instead of looping through all
>> non-dirty clusters.
>>
>> Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
>> ---
>> block/block-copy.c | 78 ++++++++++++++++++++++++++--------------------
>> 1 file changed, 44 insertions(+), 34 deletions(-)
>>
>> diff --git a/block/block-copy.c b/block/block-copy.c
>> index 35ff9cc3ef..5cf032c4d8 100644
>> --- a/block/block-copy.c
>> +++ b/block/block-copy.c
>
> [...]
>
>> @@ -106,17 +111,27 @@ static bool coroutine_fn block_copy_wait_one(BlockCopyState *s, int64_t offset,
>> return true;
>> }
>>
>> -/* Called only on full-dirty region */
>> +/*
>> + * Search for the first dirty area in offset/bytes range and create task at
>> + * the beginning of it.
>
> Oh, that’s even better.
>
>> + */
>> static BlockCopyTask *block_copy_task_create(BlockCopyState *s,
>> int64_t offset, int64_t bytes)
>> {
>> - BlockCopyTask *task = g_new(BlockCopyTask, 1);
>> + if (!bdrv_dirty_bitmap_next_dirty_area(s->copy_bitmap,
>> + offset, offset + bytes,
>> + s->copy_size, &offset, &bytes))
>> + {
>> + return NULL;
>> + }
>>
>> + /* region is dirty, so no existent tasks possible in it */
>> assert(!find_conflicting_task(s, offset, bytes));
>>
>> bdrv_reset_dirty_bitmap(s->copy_bitmap, offset, bytes);
>> s->in_flight_bytes += bytes;
>>
>> + BlockCopyTask *task = g_new(BlockCopyTask, 1);
>
> This should be declared at the top of the function.
>
I just thought, why not to try another style? Are you against? Requirement to declare variables at start of block is obsolete, isn't it?
>
> Reviewed-by: Max Reitz <mreitz@redhat.com>
>
>> *task = (BlockCopyTask) {
>> .s = s,
>> .offset = offset,
>
--
Best regards,
Vladimir
^ permalink raw reply [flat|nested] 13+ messages in thread
* Re: [PATCH v3 5/5] block/block-copy: use aio-task-pool API
2020-04-29 6:10 ` [PATCH v3 5/5] block/block-copy: use aio-task-pool API Vladimir Sementsov-Ogievskiy
@ 2020-04-29 11:55 ` Max Reitz
2020-04-29 12:11 ` Vladimir Sementsov-Ogievskiy
0 siblings, 1 reply; 13+ messages in thread
From: Max Reitz @ 2020-04-29 11:55 UTC (permalink / raw)
To: Vladimir Sementsov-Ogievskiy, qemu-block; +Cc: kwolf, den, qemu-devel
[-- Attachment #1.1: Type: text/plain, Size: 2448 bytes --]
On 29.04.20 08:10, Vladimir Sementsov-Ogievskiy wrote:
> Run block_copy iterations in parallel in aio tasks.
>
> Changes:
> - BlockCopyTask becomes aio task structure. Add zeroes field to pass
> it to block_copy_do_copy
> - add call state - it's a state of one call of block_copy(), shared
> between parallel tasks. For now used only to keep information about
> first error: is it read or not.
> - convert block_copy_dirty_clusters to aio-task loop.
>
> Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
> ---
> block/block-copy.c | 104 +++++++++++++++++++++++++++++++++++++++------
> 1 file changed, 91 insertions(+), 13 deletions(-)
>
> diff --git a/block/block-copy.c b/block/block-copy.c
> index 5cf032c4d8..f5ef91f292 100644
> --- a/block/block-copy.c
> +++ b/block/block-copy.c
[...]
> @@ -261,6 +278,30 @@ void block_copy_set_progress_meter(BlockCopyState *s, ProgressMeter *pm)
> s->progress = pm;
> }
>
> +/* Takes ownership on @task */
Still *of
> +static coroutine_fn int block_copy_task_run(AioTaskPool *pool,
> + BlockCopyTask *task)
> +{
> + if (!pool) {
> + int ret = task->task.func(&task->task);
> +
> + g_free(task);
> + return ret;
> + }
> +
> + aio_task_pool_wait_slot(pool);
> + if (aio_task_pool_status(pool) < 0) {
> + co_put_to_shres(task->s->mem, task->bytes);
> + block_copy_task_end(task, -EAGAIN);
It looks like you may have missed my nit picks on v2 regarding this
patch, so I’m going to ask again whether -ECANCELED might be better here
(even though it still doesn’t really matter).
> + g_free(task);
> + return aio_task_pool_status(pool);
And whether it may be better to return a constant like -ECANCELED here,
because how a previous task failed shouldn’t really concern this task
(or its error code).
> + }
> +
> + aio_task_pool_start_task(pool, &task->task);
> +
> + return 0;
> +}
> +
> /*
> * block_copy_do_copy
> *
[...]
> @@ -525,25 +590,38 @@ static int coroutine_fn block_copy_dirty_clusters(BlockCopyState *s,
[...]
> +out:
> + if (aio) {
> + aio_task_pool_wait_all(aio);
> + if (ret == 0) {
> + ret = aio_task_pool_status(aio);
> + }
> + g_free(aio);
I’d still prefer aio_task_pool_free().
Max
[-- Attachment #2: OpenPGP digital signature --]
[-- Type: application/pgp-signature, Size: 488 bytes --]
^ permalink raw reply [flat|nested] 13+ messages in thread
* Re: [PATCH v3 4/5] block/block-copy: refactor task creation
2020-04-29 11:54 ` Vladimir Sementsov-Ogievskiy
@ 2020-04-29 11:56 ` Max Reitz
2020-04-29 12:04 ` Vladimir Sementsov-Ogievskiy
0 siblings, 1 reply; 13+ messages in thread
From: Max Reitz @ 2020-04-29 11:56 UTC (permalink / raw)
To: Vladimir Sementsov-Ogievskiy, qemu-block; +Cc: kwolf, den, qemu-devel
[-- Attachment #1.1: Type: text/plain, Size: 2838 bytes --]
On 29.04.20 13:54, Vladimir Sementsov-Ogievskiy wrote:
> 29.04.2020 14:38, Max Reitz wrote:
>> On 29.04.20 08:10, Vladimir Sementsov-Ogievskiy wrote:
>>> Instead of just relying on the comment "Called only on full-dirty
>>> region" in block_copy_task_create() let's move initial dirty area
>>> search directly to block_copy_task_create(). Let's also use effective
>>> bdrv_dirty_bitmap_next_dirty_area instead of looping through all
>>> non-dirty clusters.
>>>
>>> Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
>>> ---
>>> block/block-copy.c | 78 ++++++++++++++++++++++++++--------------------
>>> 1 file changed, 44 insertions(+), 34 deletions(-)
>>>
>>> diff --git a/block/block-copy.c b/block/block-copy.c
>>> index 35ff9cc3ef..5cf032c4d8 100644
>>> --- a/block/block-copy.c
>>> +++ b/block/block-copy.c
>>
>> [...]
>>
>>> @@ -106,17 +111,27 @@ static bool coroutine_fn
>>> block_copy_wait_one(BlockCopyState *s, int64_t offset,
>>> return true;
>>> }
>>> -/* Called only on full-dirty region */
>>> +/*
>>> + * Search for the first dirty area in offset/bytes range and create
>>> task at
>>> + * the beginning of it.
>>
>> Oh, that’s even better.
>>
>>> + */
>>> static BlockCopyTask *block_copy_task_create(BlockCopyState *s,
>>> int64_t offset,
>>> int64_t bytes)
>>> {
>>> - BlockCopyTask *task = g_new(BlockCopyTask, 1);
>>> + if (!bdrv_dirty_bitmap_next_dirty_area(s->copy_bitmap,
>>> + offset, offset + bytes,
>>> + s->copy_size, &offset,
>>> &bytes))
>>> + {
>>> + return NULL;
>>> + }
>>> + /* region is dirty, so no existent tasks possible in it */
>>> assert(!find_conflicting_task(s, offset, bytes));
>>> bdrv_reset_dirty_bitmap(s->copy_bitmap, offset, bytes);
>>> s->in_flight_bytes += bytes;
>>> + BlockCopyTask *task = g_new(BlockCopyTask, 1);
>>
>> This should be declared at the top of the function.
>>
>
> I just thought, why not to try another style? Are you against?
> Requirement to declare variables at start of block is obsolete, isn't it?
Oh, it absolutely is and personally I’m absolutely not against it, but
CODING_STYLE says:
> Mixed declarations (interleaving statements and declarations within
> blocks) are generally not allowed; declarations should be at the beginning
> of blocks.
Max
>> Reviewed-by: Max Reitz <mreitz@redhat.com>
>>
>>> *task = (BlockCopyTask) {
>>> .s = s,
>>> .offset = offset,
>>
>
>
[-- Attachment #2: OpenPGP digital signature --]
[-- Type: application/pgp-signature, Size: 488 bytes --]
^ permalink raw reply [flat|nested] 13+ messages in thread
* Re: [PATCH v3 4/5] block/block-copy: refactor task creation
2020-04-29 11:56 ` Max Reitz
@ 2020-04-29 12:04 ` Vladimir Sementsov-Ogievskiy
0 siblings, 0 replies; 13+ messages in thread
From: Vladimir Sementsov-Ogievskiy @ 2020-04-29 12:04 UTC (permalink / raw)
To: Max Reitz, qemu-block; +Cc: kwolf, den, qemu-devel
29.04.2020 14:56, Max Reitz wrote:
> On 29.04.20 13:54, Vladimir Sementsov-Ogievskiy wrote:
>> 29.04.2020 14:38, Max Reitz wrote:
>>> On 29.04.20 08:10, Vladimir Sementsov-Ogievskiy wrote:
>>>> Instead of just relying on the comment "Called only on full-dirty
>>>> region" in block_copy_task_create() let's move initial dirty area
>>>> search directly to block_copy_task_create(). Let's also use effective
>>>> bdrv_dirty_bitmap_next_dirty_area instead of looping through all
>>>> non-dirty clusters.
>>>>
>>>> Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
>>>> ---
>>>> block/block-copy.c | 78 ++++++++++++++++++++++++++--------------------
>>>> 1 file changed, 44 insertions(+), 34 deletions(-)
>>>>
>>>> diff --git a/block/block-copy.c b/block/block-copy.c
>>>> index 35ff9cc3ef..5cf032c4d8 100644
>>>> --- a/block/block-copy.c
>>>> +++ b/block/block-copy.c
>>>
>>> [...]
>>>
>>>> @@ -106,17 +111,27 @@ static bool coroutine_fn
>>>> block_copy_wait_one(BlockCopyState *s, int64_t offset,
>>>> return true;
>>>> }
>>>> -/* Called only on full-dirty region */
>>>> +/*
>>>> + * Search for the first dirty area in offset/bytes range and create
>>>> task at
>>>> + * the beginning of it.
>>>
>>> Oh, that’s even better.
>>>
>>>> + */
>>>> static BlockCopyTask *block_copy_task_create(BlockCopyState *s,
>>>> int64_t offset,
>>>> int64_t bytes)
>>>> {
>>>> - BlockCopyTask *task = g_new(BlockCopyTask, 1);
>>>> + if (!bdrv_dirty_bitmap_next_dirty_area(s->copy_bitmap,
>>>> + offset, offset + bytes,
>>>> + s->copy_size, &offset,
>>>> &bytes))
>>>> + {
>>>> + return NULL;
>>>> + }
>>>> + /* region is dirty, so no existent tasks possible in it */
>>>> assert(!find_conflicting_task(s, offset, bytes));
>>>> bdrv_reset_dirty_bitmap(s->copy_bitmap, offset, bytes);
>>>> s->in_flight_bytes += bytes;
>>>> + BlockCopyTask *task = g_new(BlockCopyTask, 1);
>>>
>>> This should be declared at the top of the function.
>>>
>>
>> I just thought, why not to try another style? Are you against?
>> Requirement to declare variables at start of block is obsolete, isn't it?
>
> Oh, it absolutely is and personally I’m absolutely not against it, but
> CODING_STYLE says:
>
>> Mixed declarations (interleaving statements and declarations within
>> blocks) are generally not allowed; declarations should be at the beginning
>> of blocks.
>
Oh, missed (or forget). Let's fix it? :) Not in these series, of course. OK, I'll fix the patch.
--
Best regards,
Vladimir
^ permalink raw reply [flat|nested] 13+ messages in thread
* Re: [PATCH v3 5/5] block/block-copy: use aio-task-pool API
2020-04-29 11:55 ` Max Reitz
@ 2020-04-29 12:11 ` Vladimir Sementsov-Ogievskiy
0 siblings, 0 replies; 13+ messages in thread
From: Vladimir Sementsov-Ogievskiy @ 2020-04-29 12:11 UTC (permalink / raw)
To: Max Reitz, qemu-block; +Cc: kwolf, den, qemu-devel
29.04.2020 14:55, Max Reitz wrote:
> On 29.04.20 08:10, Vladimir Sementsov-Ogievskiy wrote:
>> Run block_copy iterations in parallel in aio tasks.
>>
>> Changes:
>> - BlockCopyTask becomes aio task structure. Add zeroes field to pass
>> it to block_copy_do_copy
>> - add call state - it's a state of one call of block_copy(), shared
>> between parallel tasks. For now used only to keep information about
>> first error: is it read or not.
>> - convert block_copy_dirty_clusters to aio-task loop.
>>
>> Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
>> ---
>> block/block-copy.c | 104 +++++++++++++++++++++++++++++++++++++++------
>> 1 file changed, 91 insertions(+), 13 deletions(-)
>>
>> diff --git a/block/block-copy.c b/block/block-copy.c
>> index 5cf032c4d8..f5ef91f292 100644
>> --- a/block/block-copy.c
>> +++ b/block/block-copy.c
>
> [...]
>
>> @@ -261,6 +278,30 @@ void block_copy_set_progress_meter(BlockCopyState *s, ProgressMeter *pm)
>> s->progress = pm;
>> }
>>
>> +/* Takes ownership on @task */
>
> Still *of
Ohhh, very sorry for it, I really forget to update the patch :(
>
>> +static coroutine_fn int block_copy_task_run(AioTaskPool *pool,
>> + BlockCopyTask *task)
>> +{
>> + if (!pool) {
>> + int ret = task->task.func(&task->task);
>> +
>> + g_free(task);
>> + return ret;
>> + }
>> +
>> + aio_task_pool_wait_slot(pool);
>> + if (aio_task_pool_status(pool) < 0) {
>> + co_put_to_shres(task->s->mem, task->bytes);
>> + block_copy_task_end(task, -EAGAIN);
>
> It looks like you may have missed my nit picks on v2 regarding this
> patch, so I’m going to ask again whether -ECANCELED might be better here
> (even though it still doesn’t really matter).
Hmm yes, sounds better in the context. And I don't see any specific usage of it, and don't remember why I've chosen EAGAIN :) Let's use ECANCELED.
>
>> + g_free(task);
>> + return aio_task_pool_status(pool);
>
> And whether it may be better to return a constant like -ECANCELED here,
> because how a previous task failed shouldn’t really concern this task
> (or its error code).
Looks correct, will change.
>
>> + }
>> +
>> + aio_task_pool_start_task(pool, &task->task);
>> +
>> + return 0;
>> +}
>> +
>> /*
>> * block_copy_do_copy
>> *
>
> [...]
>
>> @@ -525,25 +590,38 @@ static int coroutine_fn block_copy_dirty_clusters(BlockCopyState *s,
>
> [...]
>
>> +out:
>> + if (aio) {
>> + aio_task_pool_wait_all(aio);
>> + if (ret == 0) {
>> + ret = aio_task_pool_status(aio);
>> + }
>> + g_free(aio);
>
> I’d still prefer aio_task_pool_free().
>
a thousand apologies :(
--
Best regards,
Vladimir
^ permalink raw reply [flat|nested] 13+ messages in thread
end of thread, other threads:[~2020-04-29 12:12 UTC | newest]
Thread overview: 13+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2020-04-29 6:10 [PATCH v3 0/5] block-copy: use aio-task-pool Vladimir Sementsov-Ogievskiy
2020-04-29 6:10 ` [PATCH v3 1/5] block/block-copy: rename in-flight requests to tasks Vladimir Sementsov-Ogievskiy
2020-04-29 10:57 ` Max Reitz
2020-04-29 6:10 ` [PATCH v3 2/5] block/block-copy: alloc task on each iteration Vladimir Sementsov-Ogievskiy
2020-04-29 6:10 ` [PATCH v3 3/5] block/block-copy: add state pointer to BlockCopyTask Vladimir Sementsov-Ogievskiy
2020-04-29 6:10 ` [PATCH v3 4/5] block/block-copy: refactor task creation Vladimir Sementsov-Ogievskiy
2020-04-29 11:38 ` Max Reitz
2020-04-29 11:54 ` Vladimir Sementsov-Ogievskiy
2020-04-29 11:56 ` Max Reitz
2020-04-29 12:04 ` Vladimir Sementsov-Ogievskiy
2020-04-29 6:10 ` [PATCH v3 5/5] block/block-copy: use aio-task-pool API Vladimir Sementsov-Ogievskiy
2020-04-29 11:55 ` Max Reitz
2020-04-29 12:11 ` Vladimir Sementsov-Ogievskiy
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.