All of lore.kernel.org
 help / color / mirror / Atom feed
From: "Michael S. Tsirkin" <mst@redhat.com>
To: kvm@vger.kernel.org
Subject: [PATCH 01/20] exec: memory notifiers
Date: Thu, 4 Feb 2010 17:27:19 +0200	[thread overview]
Message-ID: <20100204152719.GB8461@redhat.com> (raw)
In-Reply-To: <cover.1265297173.git.mst@redhat.com>

This adds notifiers for phys memory changes: a set of callbacks that
vhost can register and update kernel accordingly.  Down the road, kvm
code can be switched to use these as well, instead of calling kvm code
directly from exec.c as is done now.

Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
---
 cpu-common.h |   19 ++++++++++
 exec.c       |  114 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++--
 2 files changed, 130 insertions(+), 3 deletions(-)

diff --git a/cpu-common.h b/cpu-common.h
index 5e59564..326513d 100644
--- a/cpu-common.h
+++ b/cpu-common.h
@@ -8,6 +8,7 @@
 #endif
 
 #include "bswap.h"
+#include "qemu-queue.h"
 
 /* address in the RAM (different from a physical address) */
 typedef unsigned long ram_addr_t;
@@ -62,6 +63,24 @@ void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len,
 void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque));
 void cpu_unregister_map_client(void *cookie);
 
+struct CPUPhysMemoryClient;
+typedef struct CPUPhysMemoryClient CPUPhysMemoryClient;
+struct CPUPhysMemoryClient {
+    void (*set_memory)(struct CPUPhysMemoryClient *client,
+                       target_phys_addr_t start_addr,
+                       ram_addr_t size,
+                       ram_addr_t phys_offset);
+    int (*sync_dirty_bitmap)(struct CPUPhysMemoryClient *client,
+                             target_phys_addr_t start_addr,
+                             target_phys_addr_t end_addr);
+    int (*migration_log)(struct CPUPhysMemoryClient *client,
+                         int enable);
+    QLIST_ENTRY(CPUPhysMemoryClient) list;
+};
+
+void cpu_register_phys_memory_client(CPUPhysMemoryClient *);
+void cpu_unregister_phys_memory_client(CPUPhysMemoryClient *);
+
 uint32_t ldub_phys(target_phys_addr_t addr);
 uint32_t lduw_phys(target_phys_addr_t addr);
 uint32_t ldl_phys(target_phys_addr_t addr);
diff --git a/exec.c b/exec.c
index ade09cb..fb3ce3f 100644
--- a/exec.c
+++ b/exec.c
@@ -1642,6 +1642,101 @@ const CPULogItem cpu_log_items[] = {
     { 0, NULL, NULL },
 };
 
+#ifndef CONFIG_USER_ONLY
+static QLIST_HEAD(memory_client_list, CPUPhysMemoryClient) memory_client_list
+    = QLIST_HEAD_INITIALIZER(memory_client_list);
+
+static void cpu_notify_set_memory(target_phys_addr_t start_addr,
+				  ram_addr_t size,
+				  ram_addr_t phys_offset)
+{
+    CPUPhysMemoryClient *client;
+    QLIST_FOREACH(client, &memory_client_list, list) {
+        client->set_memory(client, start_addr, size, phys_offset);
+    }
+}
+
+static int cpu_notify_sync_dirty_bitmap(target_phys_addr_t start,
+					target_phys_addr_t end)
+{
+    CPUPhysMemoryClient *client;
+    QLIST_FOREACH(client, &memory_client_list, list) {
+        int r = client->sync_dirty_bitmap(client, start, end);
+        if (r < 0)
+            return r;
+    }
+    return 0;
+}
+
+static int cpu_notify_migration_log(int enable)
+{
+    CPUPhysMemoryClient *client;
+    QLIST_FOREACH(client, &memory_client_list, list) {
+        int r = client->migration_log(client, enable);
+        if (r < 0)
+            return r;
+    }
+    return 0;
+}
+
+static void phys_page_for_each_in_l1_map(PhysPageDesc **phys_map,
+                                         CPUPhysMemoryClient *client)
+{
+    PhysPageDesc *pd;
+    int l1, l2;
+
+    for (l1 = 0; l1 < L1_SIZE; ++l1) {
+        pd = phys_map[l1];
+        if (!pd) {
+            continue;
+        }
+        for (l2 = 0; l2 < L2_SIZE; ++l2) {
+            if (pd[l2].phys_offset == IO_MEM_UNASSIGNED) {
+                continue;
+            }
+            client->set_memory(client, pd[l2].region_offset,
+                               TARGET_PAGE_SIZE, pd[l2].phys_offset);
+        }
+    }
+}
+
+static void phys_page_for_each(CPUPhysMemoryClient *client)
+{
+#if TARGET_PHYS_ADDR_SPACE_BITS > 32
+
+#if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
+#error unsupported TARGET_PHYS_ADDR_SPACE_BITS
+#endif
+    void **phys_map = (void **)l1_phys_map;
+    int l1;
+    if (!l1_phys_map) {
+        return;
+    }
+    for (l1 = 0; l1 < L1_SIZE; ++l1) {
+        if (phys_map[l1]) {
+            phys_page_for_each_in_l1_map(phys_map[l1], client);
+        }
+    }
+#else
+    if (!l1_phys_map) {
+        return;
+    }
+    phys_page_for_each_in_l1_map(l1_phys_map, client);
+#endif
+}
+
+void cpu_register_phys_memory_client(CPUPhysMemoryClient *client)
+{
+    QLIST_INSERT_HEAD(&memory_client_list, client, list);
+    phys_page_for_each(client);
+}
+
+void cpu_unregister_phys_memory_client(CPUPhysMemoryClient *client)
+{
+    QLIST_REMOVE(client, list);
+}
+#endif
+
 static int cmp1(const char *s1, int n, const char *s2)
 {
     if (strlen(s2) != n)
@@ -1901,10 +1996,16 @@ void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
 
 int cpu_physical_memory_set_dirty_tracking(int enable)
 {
+    int ret = 0;
+    in_migration = enable;
     if (kvm_enabled()) {
-        return kvm_set_migration_log(enable);
+        ret = kvm_set_migration_log(enable);
     }
-    return 0;
+    if (ret < 0) {
+        return ret;
+    }
+    ret = cpu_notify_migration_log(!!enable);
+    return ret;
 }
 
 int cpu_physical_memory_get_dirty_tracking(void)
@@ -1917,8 +2018,13 @@ int cpu_physical_sync_dirty_bitmap(target_phys_addr_t start_addr,
 {
     int ret = 0;
 
-    if (kvm_enabled())
+    if (kvm_enabled()) {
         ret = kvm_physical_sync_dirty_bitmap(start_addr, end_addr);
+    }
+    if (ret < 0) {
+        return ret;
+    }
+    ret = cpu_notify_sync_dirty_bitmap(start_addr, end_addr);
     return ret;
 }
 
@@ -2333,6 +2439,8 @@ void cpu_register_physical_memory_offset(target_phys_addr_t start_addr,
     if (kvm_enabled())
         kvm_set_phys_mem(start_addr, size, phys_offset);
 
+    cpu_notify_set_memory(start_addr, size, phys_offset);
+
     if (phys_offset == IO_MEM_UNASSIGNED) {
         region_offset = start_addr;
     }
-- 
1.6.6.144.g5c3af


       reply	other threads:[~2010-02-04 15:30 UTC|newest]

Thread overview: 20+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
     [not found] <cover.1265297173.git.mst@redhat.com>
2010-02-04 15:27 ` Michael S. Tsirkin [this message]
2010-02-04 15:27 ` [PATCH 02/20] kvm: move kvm_set_phys_mem around Michael S. Tsirkin
2010-02-04 15:27 ` [PATCH 03/20] kvm: move kvm to use memory notifiers Michael S. Tsirkin
2010-02-04 15:27 ` [PATCH 04/20] qemu-kvm: fixup after merging " Michael S. Tsirkin
2010-02-04 15:27 ` [PATCH 05/20] kvm: add API to set ioeventfd Michael S. Tsirkin
2010-02-04 15:27 ` [PATCH 06/20] notifier: event notifier implementation Michael S. Tsirkin
2010-02-04 15:28 ` [PATCH 07/20] virtio: add notifier support Michael S. Tsirkin
2010-02-04 15:28 ` [PATCH 08/20] virtio: add APIs for queue fields Michael S. Tsirkin
2010-02-04 15:28 ` [PATCH 09/20] virtio: add status change callback Michael S. Tsirkin
2010-02-04 15:28 ` [PATCH 10/20] virtio: move typedef to qemu-common Michael S. Tsirkin
2010-02-04 15:28 ` [PATCH 11/20] virtio-pci: fill in notifier support Michael S. Tsirkin
2010-02-04 15:28 ` [PATCH 12/20] tap: add interface to get device fd Michael S. Tsirkin
2010-02-04 15:28 ` [PATCH 13/20] vhost: vhost net support Michael S. Tsirkin
2010-02-04 15:28 ` [PATCH 14/20] tap: add vhost/vhostfd options Michael S. Tsirkin
2010-02-04 15:28 ` [PATCH 15/20] tap: add API to retrieve vhost net header Michael S. Tsirkin
2010-02-04 15:28 ` [PATCH 16/20] virtio-net: vhost net support Michael S. Tsirkin
2010-02-04 15:29 ` [PATCH 17/20] qemu-kvm: add vhost.h header Michael S. Tsirkin
2010-02-04 15:29 ` [PATCH 18/20] kvm: irqfd support Michael S. Tsirkin
2010-02-04 15:29 ` [PATCH 19/20] msix: add mask/unmask notifiers Michael S. Tsirkin
2010-02-04 15:29 ` [PATCH 20/20] virtio-pci: irqfd support Michael S. Tsirkin

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20100204152719.GB8461@redhat.com \
    --to=mst@redhat.com \
    --cc=kvm@vger.kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.