All of lore.kernel.org
 help / color / mirror / Atom feed
From: Bingsong Si <owen.si@ucloud.cn>
To: qemu-devel@nongnu.org
Subject: [PATCH] ram: add support for dirty page tracking
Date: Fri,  3 Jul 2020 17:28:53 +0800	[thread overview]
Message-ID: <20200703092853.1448582-1-owen.si@ucloud.cn> (raw)

In production, the VM with insentive memory activity maybe failed to migrate,
because of the change of memory in the VM greater than the throughtput of the
network interface, and we want to identify it before migration.

1. dirty tracking start:
virsh qemu-monitor-command <domain> --hmp dirty_track

2. wait some time, stop dirty tracking:
virsh qemu-monitor-command <domain> --hmp dirty_track_stop
Dirty rate: 607 pages/s

Signed-off-by: Bingsong Si <owen.si@ucloud.cn>
---
 hmp-commands.hx       | 26 ++++++++++++++
 include/monitor/hmp.h |  2 ++
 migration/migration.c |  5 +++
 migration/ram.c       | 65 +++++++++++++++++++++++++++++++++
 migration/ram.h       |  5 +++
 migration/savevm.c    | 83 +++++++++++++++++++++++++++++++++++++++++++
 migration/savevm.h    |  2 ++
 7 files changed, 188 insertions(+)

diff --git a/hmp-commands.hx b/hmp-commands.hx
index 60f395c276..05a688286b 100644
--- a/hmp-commands.hx
+++ b/hmp-commands.hx
@@ -1819,6 +1819,32 @@ SRST
   Set QOM property *property* of object at location *path* to value *value*
 ERST
 
+    {
+        .name       = "dirty_track",
+        .args_type  = "",
+        .params     = "",
+        .help       = "track dirty pages rate",
+        .cmd        = hmp_dirty_track,
+    },
+
+SRST
+``dirty_track``
+  Track dirty pages rate.
+ERST
+
+    {
+        .name       = "dirty_track_stop",
+        .args_type  = "",
+        .params     = "",
+        .help       = "stop current dirty pages track",
+        .cmd        = hmp_dirty_track_stop,
+    },
+
+SRST
+``dirty_track_stop``
+  Stop current dirty pages track.
+ERST
+
     {
         .name       = "info",
         .args_type  = "item:s?",
diff --git a/include/monitor/hmp.h b/include/monitor/hmp.h
index c986cfd28b..c139fe8758 100644
--- a/include/monitor/hmp.h
+++ b/include/monitor/hmp.h
@@ -130,5 +130,7 @@ void hmp_hotpluggable_cpus(Monitor *mon, const QDict *qdict);
 void hmp_info_vm_generation_id(Monitor *mon, const QDict *qdict);
 void hmp_info_memory_size_summary(Monitor *mon, const QDict *qdict);
 void hmp_info_sev(Monitor *mon, const QDict *qdict);
+void hmp_dirty_track(Monitor *mon, const QDict *qdict);
+void hmp_dirty_track_stop(Monitor *mon, const QDict *qdict);
 
 #endif
diff --git a/migration/migration.c b/migration/migration.c
index 481a590f72..5550afafe6 100644
--- a/migration/migration.c
+++ b/migration/migration.c
@@ -1964,6 +1964,11 @@ static bool migrate_prepare(MigrationState *s, bool blk, bool blk_inc,
 {
     Error *local_err = NULL;
 
+    if (dirty_track_is_running()) {
+        error_setg(errp, "There is a dirty tracking process in progress");
+        return false;
+    }
+
     if (resume) {
         if (s->state != MIGRATION_STATUS_POSTCOPY_PAUSED) {
             error_setg(errp, "Cannot resume if there is no "
diff --git a/migration/ram.c b/migration/ram.c
index 069b6e30bc..03a5e44617 100644
--- a/migration/ram.c
+++ b/migration/ram.c
@@ -3780,6 +3780,71 @@ static int ram_resume_prepare(MigrationState *s, void *opaque)
     return 0;
 }
 
+void dirty_track_init(void)
+{
+    RAMBlock *block;
+
+    if (ram_bytes_total()) {
+        RAMBLOCK_FOREACH_NOT_IGNORED(block) {
+            unsigned long pages = block->max_length >> TARGET_PAGE_BITS;
+
+            block->bmap = bitmap_new(pages);
+            bitmap_set(block->bmap, 0, pages);
+        }
+    }
+    ram_state = g_new0(RAMState, 1);
+    ram_state->migration_dirty_pages = 0;
+    memory_global_dirty_log_start();
+}
+
+uint64_t dirty_track_dirty_pages(void)
+{
+    return ram_state->migration_dirty_pages;
+}
+
+void dirty_track_sync(void)
+{
+    RAMBlock *block = NULL;
+    unsigned long offset = 0;
+
+    memory_global_dirty_log_sync();
+    rcu_read_lock();
+    RAMBLOCK_FOREACH_NOT_IGNORED(block) {
+       ramblock_sync_dirty_bitmap(ram_state, block);
+    }
+    rcu_read_unlock();
+
+    rcu_read_lock();
+    block = QLIST_FIRST_RCU(&ram_list.blocks);
+
+    while (block) {
+        offset = migration_bitmap_find_dirty(ram_state, block, offset);
+
+        if (offset << TARGET_PAGE_BITS >= block->used_length) {
+            offset = 0;
+            block = QLIST_NEXT_RCU(block, next);
+        } else {
+            test_and_clear_bit(offset, block->bmap);
+        }
+    }
+
+    rcu_read_unlock();
+}
+
+void dirty_track_cleanup(void)
+{
+    RAMBlock *block;
+
+    memory_global_dirty_log_stop();
+    RAMBLOCK_FOREACH_NOT_IGNORED(block) {
+        g_free(block->bmap);
+        block->bmap = NULL;
+    }
+
+    g_free(ram_state);
+    ram_state = NULL;
+}
+
 static SaveVMHandlers savevm_ram_handlers = {
     .save_setup = ram_save_setup,
     .save_live_iterate = ram_save_iterate,
diff --git a/migration/ram.h b/migration/ram.h
index 2eeaacfa13..104c48285c 100644
--- a/migration/ram.h
+++ b/migration/ram.h
@@ -69,4 +69,9 @@ void colo_flush_ram_cache(void);
 void colo_release_ram_cache(void);
 void colo_incoming_start_dirty_log(void);
 
+void dirty_track_init(void);
+uint64_t dirty_track_dirty_pages(void);
+void dirty_track_sync(void);
+void dirty_track_cleanup(void);
+
 #endif
diff --git a/migration/savevm.c b/migration/savevm.c
index b979ea6e7f..0be70e6528 100644
--- a/migration/savevm.c
+++ b/migration/savevm.c
@@ -63,6 +63,8 @@
 #include "migration/colo.h"
 #include "qemu/bitmap.h"
 #include "net/announce.h"
+#include "monitor/monitor.h"
+#include "monitor/hmp.h"
 
 const unsigned int postcopy_ram_discard_version = 0;
 
@@ -171,6 +173,15 @@ static QEMUFile *qemu_fopen_bdrv(BlockDriverState *bs, int is_writable)
     return qemu_fopen_ops(bs, &bdrv_read_ops);
 }
 
+static struct DirtyTrackState {
+    QemuThread thread;
+    int dirty_pages_rate;
+    bool quit;
+} current_dirty_track_state = {
+    { .thread = 0 },
+    .dirty_pages_rate = 0,
+    .quit = false,
+};
 
 /* QEMUFile timer support.
  * Not in qemu-file.c to not add qemu-timer.c as dependency to qemu-file.c
@@ -2747,6 +2758,78 @@ int save_snapshot(const char *name, Error **errp)
     return ret;
 }
 
+static void *dirty_track_thread(void *opaque)
+{
+    int64_t initial_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
+    struct DirtyTrackState *s = opaque;
+    int64_t current_time;
+    uint64_t time_spent;
+
+    for (;;) {
+        dirty_track_sync();
+        if (s->quit) {
+            current_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
+            time_spent = current_time - initial_time;
+
+            if (time_spent) {
+                s->dirty_pages_rate = dirty_track_dirty_pages() * 1000 /
+                    time_spent;
+            }
+            break;
+        }
+        usleep(1000 * 100);
+    }
+    return NULL;
+}
+
+bool dirty_track_is_running(void)
+{
+    return !!current_dirty_track_state.thread.thread;
+}
+
+void hmp_dirty_track(Monitor *mon, const QDict *qdict)
+{
+    MigrationState *s = migrate_get_current();
+
+    if (migration_is_running(s->state)) {
+        error_report(QERR_MIGRATION_ACTIVE);
+        return;
+    }
+
+    if (runstate_check(RUN_STATE_INMIGRATE)) {
+        error_report("Guest is waiting for an incoming migration");
+        return;
+    }
+
+    if (dirty_track_is_running()) {
+        error_report("There is a dirty tracking process in progress");
+        return;
+    }
+
+    dirty_track_init();
+    qemu_thread_create(&current_dirty_track_state.thread, "dirty tracking",
+                       dirty_track_thread, &current_dirty_track_state,
+                       QEMU_THREAD_JOINABLE);
+}
+
+void hmp_dirty_track_stop(Monitor *mon, const QDict *qdict)
+{
+    if (current_dirty_track_state.thread.thread == 0) {
+        error_report("There is no dirty tracking process in progress");
+        return;
+    }
+
+    current_dirty_track_state.quit = true;
+    qemu_thread_join(&current_dirty_track_state.thread);
+    monitor_printf(mon, "Dirty rate: %d pages/s\n",
+                   current_dirty_track_state.dirty_pages_rate);
+
+    dirty_track_cleanup();
+    current_dirty_track_state.thread.thread = 0;
+    current_dirty_track_state.dirty_pages_rate = 0;
+    current_dirty_track_state.quit = false;
+}
+
 void qmp_xen_save_devices_state(const char *filename, bool has_live, bool live,
                                 Error **errp)
 {
diff --git a/migration/savevm.h b/migration/savevm.h
index ba64a7e271..216b9b7396 100644
--- a/migration/savevm.h
+++ b/migration/savevm.h
@@ -65,4 +65,6 @@ void qemu_loadvm_state_cleanup(void);
 int qemu_loadvm_state_main(QEMUFile *f, MigrationIncomingState *mis);
 int qemu_load_device_state(QEMUFile *f);
 
+bool dirty_track_is_running(void);
+
 #endif
-- 
2.18.4



             reply	other threads:[~2020-07-03  9:39 UTC|newest]

Thread overview: 3+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-07-03  9:28 Bingsong Si [this message]
2020-07-03 10:30 ` [PATCH] ram: add support for dirty page tracking no-reply
2020-07-06  6:39 Bingsong Si

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20200703092853.1448582-1-owen.si@ucloud.cn \
    --to=owen.si@ucloud.cn \
    --cc=qemu-devel@nongnu.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.