qemu-devel.nongnu.org archive mirror
 help / color / mirror / Atom feed
From: David Woodhouse <dwmw2@infradead.org>
To: qemu-devel@nongnu.org
Cc: "Paolo Bonzini" <pbonzini@redhat.com>,
	"Paul Durrant" <paul@xen.org>,
	"Joao Martins" <joao.m.martins@oracle.com>,
	"Ankur Arora" <ankur.a.arora@oracle.com>,
	"Philippe Mathieu-Daudé" <philmd@linaro.org>,
	"Thomas Huth" <thuth@redhat.com>,
	"Alex Bennée" <alex.bennee@linaro.org>,
	"Juan Quintela" <quintela@redhat.com>,
	"Dr . David Alan Gilbert" <dgilbert@redhat.com>,
	"Claudio Fontana" <cfontana@suse.de>,
	"Julien Grall" <julien@xen.org>
Subject: [PATCH v6 20/51] i386/xen: handle VCPUOP_register_vcpu_info
Date: Tue, 10 Jan 2023 12:20:11 +0000	[thread overview]
Message-ID: <20230110122042.1562155-21-dwmw2@infradead.org> (raw)
In-Reply-To: <20230110122042.1562155-1-dwmw2@infradead.org>

From: Joao Martins <joao.m.martins@oracle.com>

Handle the hypercall to set a per vcpu info, and also wire up the default
vcpu_info in the shared_info page for the first 32 vCPUs.

To avoid deadlock within KVM a vCPU thread must set its *own* vcpu_info
rather than it being set from the context in which the hypercall is
invoked.

Add the vcpu_info (and default) GPA to the vmstate_x86_cpu for migration,
and restore it in kvm_arch_put_registers() appropriately.

Signed-off-by: Joao Martins <joao.m.martins@oracle.com>
Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
---
 target/i386/cpu.h            |   2 +
 target/i386/kvm/kvm.c        |  17 ++++
 target/i386/kvm/trace-events |   1 +
 target/i386/kvm/xen-emu.c    | 152 ++++++++++++++++++++++++++++++++++-
 target/i386/kvm/xen-emu.h    |   2 +
 target/i386/machine.c        |  19 +++++
 6 files changed, 190 insertions(+), 3 deletions(-)

diff --git a/target/i386/cpu.h b/target/i386/cpu.h
index c6c57baed5..109b2e5669 100644
--- a/target/i386/cpu.h
+++ b/target/i386/cpu.h
@@ -1788,6 +1788,8 @@ typedef struct CPUArchState {
 #endif
 #if defined(CONFIG_KVM)
     struct kvm_nested_state *nested_state;
+    uint64_t xen_vcpu_info_gpa;
+    uint64_t xen_vcpu_info_default_gpa;
 #endif
 #if defined(CONFIG_HVF)
     HVFX86LazyFlags hvf_lflags;
diff --git a/target/i386/kvm/kvm.c b/target/i386/kvm/kvm.c
index 7cbfbed492..fa08cb6574 100644
--- a/target/i386/kvm/kvm.c
+++ b/target/i386/kvm/kvm.c
@@ -4734,6 +4734,15 @@ int kvm_arch_put_registers(CPUState *cpu, int level)
         kvm_arch_set_tsc_khz(cpu);
     }
 
+#ifdef CONFIG_XEN_EMU
+    if (xen_mode == XEN_EMULATE && level == KVM_PUT_FULL_STATE) {
+        ret = kvm_put_xen_state(cpu);
+        if (ret < 0) {
+            return ret;
+        }
+    }
+#endif
+
     ret = kvm_getput_regs(x86_cpu, 1);
     if (ret < 0) {
         return ret;
@@ -4833,6 +4842,14 @@ int kvm_arch_get_registers(CPUState *cs)
     if (ret < 0) {
         goto out;
     }
+#ifdef CONFIG_XEN_EMU
+    if (xen_mode == XEN_EMULATE) {
+        ret = kvm_get_xen_state(cs);
+        if (ret < 0) {
+            goto out;
+        }
+    }
+#endif
     ret = 0;
  out:
     cpu_sync_bndcs_hflags(&cpu->env);
diff --git a/target/i386/kvm/trace-events b/target/i386/kvm/trace-events
index 0a47c26e80..14e54dfca5 100644
--- a/target/i386/kvm/trace-events
+++ b/target/i386/kvm/trace-events
@@ -9,3 +9,4 @@ kvm_x86_update_msi_routes(int num) "Updated %d MSI routes"
 # xen-emu.c
 kvm_xen_hypercall(int cpu, uint8_t cpl, uint64_t input, uint64_t a0, uint64_t a1, uint64_t a2, uint64_t ret) "xen_hypercall: cpu %d cpl %d input %" PRIu64 " a0 0x%" PRIx64 " a1 0x%" PRIx64 " a2 0x%" PRIx64" ret 0x%" PRIx64
 kvm_xen_set_shared_info(uint64_t gfn) "shared info at gfn 0x%" PRIx64
+kvm_xen_set_vcpu_attr(int cpu, int type, uint64_t gpa) "vcpu attr cpu %d type %d gpa 0x%" PRIx64
diff --git a/target/i386/kvm/xen-emu.c b/target/i386/kvm/xen-emu.c
index e95a2aee35..dc0dbffa40 100644
--- a/target/i386/kvm/xen-emu.c
+++ b/target/i386/kvm/xen-emu.c
@@ -118,6 +118,8 @@ int kvm_xen_init(KVMState *s, uint32_t hypercall_msr)
 
 int kvm_xen_init_vcpu(CPUState *cs)
 {
+    X86CPU *cpu = X86_CPU(cs);
+    CPUX86State *env = &cpu->env;
     int err;
 
     /*
@@ -141,6 +143,9 @@ int kvm_xen_init_vcpu(CPUState *cs)
         }
     }
 
+    env->xen_vcpu_info_gpa = INVALID_GPA;
+    env->xen_vcpu_info_default_gpa = INVALID_GPA;
+
     return 0;
 }
 
@@ -186,10 +191,58 @@ static bool kvm_xen_hcall_xen_version(struct kvm_xen_exit *exit, X86CPU *cpu,
     return true;
 }
 
+static int kvm_xen_set_vcpu_attr(CPUState *cs, uint16_t type, uint64_t gpa)
+{
+    struct kvm_xen_vcpu_attr xhsi;
+
+    xhsi.type = type;
+    xhsi.u.gpa = gpa;
+
+    trace_kvm_xen_set_vcpu_attr(cs->cpu_index, type, gpa);
+
+    return kvm_vcpu_ioctl(cs, KVM_XEN_VCPU_SET_ATTR, &xhsi);
+}
+
+static void do_set_vcpu_info_default_gpa(CPUState *cs, run_on_cpu_data data)
+{
+    X86CPU *cpu = X86_CPU(cs);
+    CPUX86State *env = &cpu->env;
+
+    env->xen_vcpu_info_default_gpa = data.host_ulong;
+
+    /* Changing the default does nothing if a vcpu_info was explicitly set. */
+    if (env->xen_vcpu_info_gpa == INVALID_GPA) {
+        kvm_xen_set_vcpu_attr(cs, KVM_XEN_VCPU_ATTR_TYPE_VCPU_INFO,
+                              env->xen_vcpu_info_default_gpa);
+    }
+}
+
+static void do_set_vcpu_info_gpa(CPUState *cs, run_on_cpu_data data)
+{
+    X86CPU *cpu = X86_CPU(cs);
+    CPUX86State *env = &cpu->env;
+
+    env->xen_vcpu_info_gpa = data.host_ulong;
+
+    kvm_xen_set_vcpu_attr(cs, KVM_XEN_VCPU_ATTR_TYPE_VCPU_INFO,
+                          env->xen_vcpu_info_gpa);
+}
+
+static void do_vcpu_soft_reset(CPUState *cs, run_on_cpu_data data)
+{
+    X86CPU *cpu = X86_CPU(cs);
+    CPUX86State *env = &cpu->env;
+
+    env->xen_vcpu_info_gpa = INVALID_GPA;
+    env->xen_vcpu_info_default_gpa = INVALID_GPA;
+
+    kvm_xen_set_vcpu_attr(cs, KVM_XEN_VCPU_ATTR_TYPE_VCPU_INFO, INVALID_GPA);
+}
+
 static int xen_set_shared_info(uint64_t gfn)
 {
     uint64_t gpa = gfn << TARGET_PAGE_BITS;
-    int err;
+    int i, err;
 
     /*
      * The xen_overlay device tells KVM about it too, since it had to
@@ -204,6 +257,15 @@ static int xen_set_shared_info(uint64_t gfn)
 
     trace_kvm_xen_set_shared_info(gfn);
 
+    for (i = 0; i < XEN_LEGACY_MAX_VCPUS; i++) {
+        CPUState *cpu = qemu_get_cpu(i);
+        if (cpu) {
+            async_run_on_cpu(cpu, do_set_vcpu_info_default_gpa,
+                             RUN_ON_CPU_HOST_ULONG(gpa));
+        }
+        gpa += sizeof(vcpu_info_t);
+    }
+
     return err;
 }
 
@@ -361,15 +423,43 @@ static bool kvm_xen_hcall_hvm_op(struct kvm_xen_exit *exit, X86CPU *cpu,
     }
 }
 
+static int vcpuop_register_vcpu_info(CPUState *cs, CPUState *target,
+                                     uint64_t arg)
+{
+    struct vcpu_register_vcpu_info rvi;
+    uint64_t gpa;
+
+    /* No need for 32/64 compat handling */
+    qemu_build_assert(sizeof(rvi) == 16);
+    qemu_build_assert(sizeof(struct vcpu_info) == 64);
+
+    if (!target) {
+        return -ENOENT;
+    }
+
+    if (kvm_copy_from_gva(cs, arg, &rvi, sizeof(rvi))) {
+        return -EFAULT;
+    }
+
+    if (rvi.offset > TARGET_PAGE_SIZE - sizeof(struct vcpu_info)) {
+        return -EINVAL;
+    }
+
+    gpa = ((rvi.mfn << TARGET_PAGE_BITS) + rvi.offset);
+    async_run_on_cpu(target, do_set_vcpu_info_gpa, RUN_ON_CPU_HOST_ULONG(gpa));
+    return 0;
+}
+
 static bool kvm_xen_hcall_vcpu_op(struct kvm_xen_exit *exit, X86CPU *cpu,
                                   int cmd, int vcpu_id, uint64_t arg)
 {
+    CPUState *dest = qemu_get_cpu(vcpu_id);
+    CPUState *cs = CPU(cpu);
     int err;
 
     switch (cmd) {
     case VCPUOP_register_vcpu_info:
-        /* no vcpu info placement for now */
-        err = -ENOSYS;
+        err = vcpuop_register_vcpu_info(cs, dest, arg);
         break;
 
     default:
@@ -382,8 +472,13 @@ static bool kvm_xen_hcall_vcpu_op(struct kvm_xen_exit *exit, X86CPU *cpu,
 
 static int kvm_xen_soft_reset(void)
 {
+    CPUState *cpu;
     int err;
 
+    CPU_FOREACH(cpu) {
+        async_run_on_cpu(cpu, do_vcpu_soft_reset, RUN_ON_CPU_NULL);
+    }
+
     err = xen_overlay_map_shinfo_page(INVALID_GFN);
     if (err) {
         return err;
@@ -529,3 +624,54 @@ int kvm_xen_handle_exit(X86CPU *cpu, struct kvm_xen_exit *exit)
                             exit->u.hcall.result);
     return 0;
 }
+
+int kvm_put_xen_state(CPUState *cs)
+{
+    X86CPU *cpu = X86_CPU(cs);
+    CPUX86State *env = &cpu->env;
+    uint64_t gpa;
+    int ret;
+
+    gpa = env->xen_vcpu_info_gpa;
+    if (gpa == INVALID_GPA) {
+        gpa = env->xen_vcpu_info_default_gpa;
+    }
+
+    if (gpa != INVALID_GPA) {
+        ret = kvm_xen_set_vcpu_attr(cs, KVM_XEN_VCPU_ATTR_TYPE_VCPU_INFO, gpa);
+        if (ret < 0) {
+            return ret;
+        }
+    }
+
+    return 0;
+}
+
+int kvm_get_xen_state(CPUState *cs)
+{
+    X86CPU *cpu = X86_CPU(cs);
+    CPUX86State *env = &cpu->env;
+    uint64_t gpa;
+
+    /*
+     * The kernel does not mark vcpu_info as dirty when it delivers interrupts
+     * to it. It's up to userspace to *assume* that any page shared thus is
+     * always considered dirty. The shared_info page is different since it's
+     * an overlay and migrated separately anyway.
+     */
+    gpa = env->xen_vcpu_info_gpa;
+    if (gpa == INVALID_GPA) {
+        gpa = env->xen_vcpu_info_default_gpa;
+    }
+    if (gpa != INVALID_GPA) {
+        MemoryRegionSection mrs = memory_region_find(get_system_memory(),
+                                                     gpa,
+                                                     sizeof(struct vcpu_info));
+        if (mrs.mr && mrs.size >= sizeof(struct vcpu_info)) {
+            memory_region_set_dirty(mrs.mr, mrs.offset_within_region,
+                                    sizeof(struct vcpu_info));
+        }
+    }
+
+    return 0;
+}
diff --git a/target/i386/kvm/xen-emu.h b/target/i386/kvm/xen-emu.h
index 21faf6bf38..452605699a 100644
--- a/target/i386/kvm/xen-emu.h
+++ b/target/i386/kvm/xen-emu.h
@@ -26,5 +26,7 @@
 int kvm_xen_init(KVMState *s, uint32_t hypercall_msr);
 int kvm_xen_init_vcpu(CPUState *cs);
 int kvm_xen_handle_exit(X86CPU *cpu, struct kvm_xen_exit *exit);
+int kvm_put_xen_state(CPUState *cs);
+int kvm_get_xen_state(CPUState *cs);
 
 #endif /* QEMU_I386_KVM_XEN_EMU_H */
diff --git a/target/i386/machine.c b/target/i386/machine.c
index 310b125235..1215e616c8 100644
--- a/target/i386/machine.c
+++ b/target/i386/machine.c
@@ -6,8 +6,10 @@
 #include "kvm/hyperv.h"
 #include "hw/i386/x86.h"
 #include "kvm/kvm_i386.h"
+#include "hw/xen/xen.h"
 
 #include "sysemu/kvm.h"
+#include "sysemu/kvm_xen.h"
 #include "sysemu/tcg.h"
 
 #include "qemu/error-report.h"
@@ -1257,6 +1259,22 @@ static const VMStateDescription vmstate_nested_state = {
     }
 };
 
+static bool xen_vcpu_needed(void *opaque)
+{
+    return (xen_mode == XEN_EMULATE);
+}
+
+static const VMStateDescription vmstate_xen_vcpu = {
+    .name = "cpu/xen_vcpu",
+    .version_id = 1,
+    .minimum_version_id = 1,
+    .needed = xen_vcpu_needed,
+    .fields = (VMStateField[]) {
+        VMSTATE_UINT64(env.xen_vcpu_info_gpa, X86CPU),
+        VMSTATE_UINT64(env.xen_vcpu_info_default_gpa, X86CPU),
+        VMSTATE_END_OF_LIST()
+    }
+};
 #endif
 
 static bool mcg_ext_ctl_needed(void *opaque)
@@ -1716,6 +1734,7 @@ const VMStateDescription vmstate_x86_cpu = {
 #endif
 #ifdef CONFIG_KVM
         &vmstate_nested_state,
+        &vmstate_xen_vcpu,
 #endif
         &vmstate_msr_tsx_ctrl,
         &vmstate_msr_intel_sgx,
-- 
2.35.3



  parent reply	other threads:[~2023-01-10 12:47 UTC|newest]

Thread overview: 94+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2023-01-10 12:19 [PATCH v6 00/51] Xen support under KVM David Woodhouse
2023-01-10 12:19 ` [PATCH v6 01/51] include: import Xen public headers to include/standard-headers/ David Woodhouse
2023-01-10 12:19 ` [PATCH v6 02/51] xen: add CONFIG_XENFV_MACHINE and CONFIG_XEN_EMU options for Xen emulation David Woodhouse
2023-01-10 12:19 ` [PATCH v6 03/51] xen: Add XEN_DISABLED mode and make it default David Woodhouse
2023-01-10 12:19 ` [PATCH v6 04/51] i386/kvm: Add xen-version KVM accelerator property and init KVM Xen support David Woodhouse
2023-01-10 12:19 ` [PATCH v6 05/51] i386/kvm: handle Xen HVM cpuid leaves David Woodhouse
2023-01-10 12:19 ` [PATCH v6 06/51] i386/hvm: Set Xen vCPU ID in KVM David Woodhouse
2023-01-10 12:19 ` [PATCH v6 07/51] xen-platform: exclude vfio-pci from the PCI platform unplug David Woodhouse
2023-01-10 12:19 ` [PATCH v6 08/51] xen-platform: allow its creation with XEN_EMULATE mode David Woodhouse
2023-01-16 16:20   ` Paul Durrant
2023-01-16 17:56     ` David Woodhouse
2023-01-10 12:20 ` [PATCH v6 09/51] i386/xen: handle guest hypercalls David Woodhouse
2023-01-16 16:24   ` Paul Durrant
2023-01-16 17:57     ` David Woodhouse
2023-01-10 12:20 ` [PATCH v6 10/51] i386/xen: implement HYPERVISOR_xen_version David Woodhouse
2023-01-10 12:20 ` [PATCH v6 11/51] i386/xen: implement HYPERVISOR_sched_op, SCHEDOP_shutdown David Woodhouse
2023-01-16 16:27   ` Paul Durrant
2023-01-10 12:20 ` [PATCH v6 12/51] i386/xen: Implement SCHEDOP_poll and SCHEDOP_yield David Woodhouse
2023-01-16 16:36   ` Paul Durrant
2023-01-10 12:20 ` [PATCH v6 13/51] hw/xen: Add xen_overlay device for emulating shared xenheap pages David Woodhouse
2023-01-16 16:57   ` Paul Durrant
2023-01-10 12:20 ` [PATCH v6 14/51] i386/xen: add pc_machine_kvm_type to initialize XEN_EMULATE mode David Woodhouse
2023-01-16 17:17   ` Paul Durrant
2023-01-16 19:45     ` David Woodhouse
2023-01-10 12:20 ` [PATCH v6 15/51] i386/xen: manage and save/restore Xen guest long_mode setting David Woodhouse
2023-01-16 17:20   ` Paul Durrant
2023-01-10 12:20 ` [PATCH v6 16/51] i386/xen: implement HYPERVISOR_memory_op David Woodhouse
2023-01-16 17:28   ` Paul Durrant
2023-01-10 12:20 ` [PATCH v6 17/51] i386/xen: implement XENMEM_add_to_physmap_batch David Woodhouse
2023-01-16 17:36   ` Paul Durrant
2023-01-10 12:20 ` [PATCH v6 18/51] i386/xen: implement HYPERVISOR_hvm_op David Woodhouse
2023-01-16 17:39   ` Paul Durrant
2023-01-10 12:20 ` [PATCH v6 19/51] i386/xen: implement HYPERVISOR_vcpu_op David Woodhouse
2023-01-16 17:40   ` Paul Durrant
2023-01-10 12:20 ` David Woodhouse [this message]
2023-01-16 17:46   ` [PATCH v6 20/51] i386/xen: handle VCPUOP_register_vcpu_info Paul Durrant
2023-01-10 12:20 ` [PATCH v6 21/51] i386/xen: handle VCPUOP_register_vcpu_time_info David Woodhouse
2023-01-16 17:53   ` Paul Durrant
2023-01-10 12:20 ` [PATCH v6 22/51] i386/xen: handle VCPUOP_register_runstate_memory_area David Woodhouse
2023-01-16 17:56   ` Paul Durrant
2023-01-10 12:20 ` [PATCH v6 23/51] i386/xen: implement HYPERVISOR_event_channel_op David Woodhouse
2023-01-16 17:59   ` Paul Durrant
2023-01-16 19:54     ` David Woodhouse
2023-01-10 12:20 ` [PATCH v6 24/51] i386/xen: implement HVMOP_set_evtchn_upcall_vector David Woodhouse
2023-01-10 12:20 ` [PATCH v6 25/51] i386/xen: implement HVMOP_set_param David Woodhouse
2023-01-16 18:00   ` Paul Durrant
2023-01-10 12:20 ` [PATCH v6 26/51] hw/xen: Add xen_evtchn device for event channel emulation David Woodhouse
2023-01-10 12:20 ` [PATCH v6 27/51] i386/xen: Add support for Xen event channel delivery to vCPU David Woodhouse
2023-01-10 12:20 ` [PATCH v6 28/51] hw/xen: Implement EVTCHNOP_status David Woodhouse
2023-01-10 12:20 ` [PATCH v6 29/51] hw/xen: Implement EVTCHNOP_close David Woodhouse
2023-01-10 12:20 ` [PATCH v6 30/51] hw/xen: Implement EVTCHNOP_unmask David Woodhouse
2023-01-10 12:20 ` [PATCH v6 31/51] hw/xen: Implement EVTCHNOP_bind_virq David Woodhouse
2023-01-10 12:20 ` [PATCH v6 32/51] hw/xen: Implement EVTCHNOP_bind_ipi David Woodhouse
2023-01-10 12:20 ` [PATCH v6 33/51] hw/xen: Implement EVTCHNOP_send David Woodhouse
2023-01-10 12:20 ` [PATCH v6 34/51] hw/xen: Implement EVTCHNOP_alloc_unbound David Woodhouse
2023-01-10 12:20 ` [PATCH v6 35/51] hw/xen: Implement EVTCHNOP_bind_interdomain David Woodhouse
2023-01-10 12:20 ` [PATCH v6 36/51] hw/xen: Implement EVTCHNOP_bind_vcpu David Woodhouse
2023-01-10 12:20 ` [PATCH v6 37/51] hw/xen: Implement EVTCHNOP_reset David Woodhouse
2023-01-10 12:20 ` [PATCH v6 38/51] i386/xen: add monitor commands to test event injection David Woodhouse
2023-01-11 14:28   ` Dr. David Alan Gilbert
2023-01-11 14:57     ` David Woodhouse
2023-01-10 12:20 ` [PATCH v6 39/51] hw/xen: Support HVM_PARAM_CALLBACK_TYPE_GSI callback David Woodhouse
2023-01-10 12:20 ` [PATCH v6 40/51] hw/xen: Support HVM_PARAM_CALLBACK_TYPE_PCI_INTX callback David Woodhouse
2023-01-10 12:20 ` [PATCH v6 41/51] kvm/i386: Add xen-gnttab-max-frames property David Woodhouse
2023-01-10 12:20 ` [PATCH v6 42/51] hw/xen: Add xen_gnttab device for grant table emulation David Woodhouse
2023-01-10 12:20 ` [PATCH v6 43/51] hw/xen: Support mapping grant frames David Woodhouse
2023-01-10 12:20 ` [PATCH v6 44/51] i386/xen: Implement HYPERVISOR_grant_table_op and GNTTABOP_[gs]et_verson David Woodhouse
2023-01-10 12:20 ` [PATCH v6 45/51] hw/xen: Implement GNTTABOP_query_size David Woodhouse
2023-01-10 12:20 ` [PATCH v6 46/51] i386/xen: handle PV timer hypercalls David Woodhouse
2023-01-10 12:20 ` [PATCH v6 47/51] i386/xen: Reserve Xen special pages for console, xenstore rings David Woodhouse
2023-01-10 12:20 ` [PATCH v6 48/51] i386/xen: handle HVMOP_get_param David Woodhouse
2023-01-10 12:20 ` [PATCH v6 49/51] hw/xen: Add backend implementation of interdomain event channel support David Woodhouse
2023-01-10 12:20 ` [PATCH v6 50/51] hw/xen: Add xen_xenstore device for xenstore emulation David Woodhouse
2023-01-10 12:20 ` [PATCH v6 51/51] hw/xen: Add basic ring handling to xenstore David Woodhouse
2023-01-10 12:37 ` [RFC PATCH v1 00/15] Xen PV backend support for KVM/Xen guests David Woodhouse
2023-01-10 12:37   ` [RFC PATCH v1 01/15] hw/xen: Add evtchn operations to allow redirection to internal emulation David Woodhouse
2023-01-10 12:37   ` [RFC PATCH v1 02/15] hw/xen: Add emulated evtchn ops David Woodhouse
2023-01-10 12:37   ` [RFC PATCH v1 03/15] hw/xen: Add gnttab operations to allow redirection to internal emulation David Woodhouse
2023-01-10 12:37   ` [RFC PATCH v1 04/15] hw/xen: Pass grant ref to gnttab unmap David Woodhouse
2023-01-10 12:37   ` [RFC PATCH v1 05/15] hw/xen: Add foreignmem operations to allow redirection to internal emulation David Woodhouse
2023-01-10 12:37   ` [RFC PATCH v1 06/15] hw/xen: Add xenstore " David Woodhouse
2023-01-10 12:37   ` [RFC PATCH v1 07/15] hw/xen: Move xenstore_store_pv_console_info to xen_console.c David Woodhouse
2023-01-10 12:37   ` [RFC PATCH v1 08/15] hw/xen: Use XEN_PAGE_SIZE in PV backend drivers David Woodhouse
2023-01-10 12:37   ` [RFC PATCH v1 09/15] hw/xen: Rename xen_common.h to xen_native.h David Woodhouse
2023-01-10 12:37   ` [RFC PATCH v1 10/15] hw/xen: Build PV backend drivers for XENFV_MACHINE David Woodhouse
2023-01-10 12:37   ` [RFC PATCH v1 11/15] hw/xen: Map guest XENSTORE_PFN grant in emulated Xenstore David Woodhouse
2023-01-10 12:37   ` [RFC PATCH v1 12/15] hw/xen: Add backend implementation of grant table operations David Woodhouse
2023-01-10 12:37   ` [RFC PATCH v1 13/15] hw/xen: Implement soft reset for emulated gnttab David Woodhouse
2023-01-10 12:37   ` [RFC PATCH v1 14/15] hw/xen: Remove old version of Xen headers David Woodhouse
2023-01-10 12:37   ` [RFC PATCH v1 15/15] i386/xen: Initialize XenBus and legacy backends from pc_init1() David Woodhouse
2023-01-10 15:43   ` [RFC PATCH v1 00/15] Xen PV backend support for KVM/Xen guests Joao Martins
2023-01-10 15:47     ` Joao Martins
2023-01-10 16:52     ` David Woodhouse
2023-01-10 17:26       ` Joao Martins

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20230110122042.1562155-21-dwmw2@infradead.org \
    --to=dwmw2@infradead.org \
    --cc=alex.bennee@linaro.org \
    --cc=ankur.a.arora@oracle.com \
    --cc=cfontana@suse.de \
    --cc=dgilbert@redhat.com \
    --cc=joao.m.martins@oracle.com \
    --cc=julien@xen.org \
    --cc=paul@xen.org \
    --cc=pbonzini@redhat.com \
    --cc=philmd@linaro.org \
    --cc=qemu-devel@nongnu.org \
    --cc=quintela@redhat.com \
    --cc=thuth@redhat.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).