All of lore.kernel.org
 help / color / mirror / Atom feed
From: David Woodhouse <dwmw2@infradead.org>
To: qemu-devel@nongnu.org
Cc: "Paolo Bonzini" <pbonzini@redhat.com>,
	"Paul Durrant" <paul@xen.org>,
	"Joao Martins" <joao.m.martins@oracle.com>,
	"Ankur Arora" <ankur.a.arora@oracle.com>,
	"Philippe Mathieu-Daudé" <philmd@linaro.org>,
	"Thomas Huth" <thuth@redhat.com>,
	"Alex Bennée" <alex.bennee@linaro.org>,
	"Juan Quintela" <quintela@redhat.com>,
	"Dr . David Alan Gilbert" <dgilbert@redhat.com>,
	"Claudio Fontana" <cfontana@suse.de>,
	"Julien Grall" <julien@xen.org>,
	"Michael S. Tsirkin" <mst@redhat.com>,
	"Marcel Apfelbaum" <marcel.apfelbaum@gmail.com>,
	armbru@redhat.com
Subject: [PATCH v7 49/51] hw/xen: Add backend implementation of interdomain event channel support
Date: Mon, 16 Jan 2023 21:58:03 +0000	[thread overview]
Message-ID: <20230116215805.1123514-50-dwmw2@infradead.org> (raw)
In-Reply-To: <20230116215805.1123514-1-dwmw2@infradead.org>

From: David Woodhouse <dwmw@amazon.co.uk>

The provides the QEMU side of interdomain event channels, allowing events
to be sent to/from the guest.

The API mirrors libxenevtchn, and in time both this and the real Xen one
will be available through ops structures so that the PV backend drivers
can use the correct one as appropriate.

For now, this implementation can be used directly by our XenStore which
will be for emulated mode only.

Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
---
 hw/i386/kvm/xen_evtchn.c | 340 ++++++++++++++++++++++++++++++++++++++-
 hw/i386/kvm/xen_evtchn.h |  19 +++
 2 files changed, 352 insertions(+), 7 deletions(-)

diff --git a/hw/i386/kvm/xen_evtchn.c b/hw/i386/kvm/xen_evtchn.c
index 3340cbb109..dc58843cf2 100644
--- a/hw/i386/kvm/xen_evtchn.c
+++ b/hw/i386/kvm/xen_evtchn.c
@@ -37,6 +37,7 @@
 #include "sysemu/kvm.h"
 #include "sysemu/kvm_xen.h"
 #include <linux/kvm.h>
+#include <sys/eventfd.h>
 
 #include "standard-headers/xen/memory.h"
 #include "standard-headers/xen/hvm/params.h"
@@ -87,6 +88,13 @@ struct compat_shared_info {
 
 #define COMPAT_EVTCHN_2L_NR_CHANNELS            1024
 
+/* Local private implementation of struct xenevtchn_handle */
+struct xenevtchn_handle {
+    evtchn_port_t be_port;
+    evtchn_port_t guest_port; /* Or zero for unbound */
+    int fd;
+};
+
 /*
  * For unbound/interdomain ports there are only two possible remote
  * domains; self and QEMU. Use a single high bit in type_val for that,
@@ -110,6 +118,8 @@ struct XenEvtchnState {
     uint32_t nr_ports;
     XenEvtchnPort port_table[EVTCHN_2L_NR_CHANNELS];
     qemu_irq gsis[GSI_NUM_PINS];
+
+    struct xenevtchn_handle *be_handles[EVTCHN_2L_NR_CHANNELS];
 };
 
 struct XenEvtchnState *xen_evtchn_singleton;
@@ -117,6 +127,18 @@ struct XenEvtchnState *xen_evtchn_singleton;
 /* Top bits of callback_param are the type (HVM_PARAM_CALLBACK_TYPE_xxx) */
 #define CALLBACK_VIA_TYPE_SHIFT 56
 
+static void unbind_backend_ports(XenEvtchnState *s);
+
+static int xen_evtchn_pre_load(void *opaque)
+{
+    XenEvtchnState *s = opaque;
+
+    /* Unbind all the backend-side ports; they need to rebind */
+    unbind_backend_ports(s);
+
+    return 0;
+}
+
 static int xen_evtchn_post_load(void *opaque, int version_id)
 {
     XenEvtchnState *s = opaque;
@@ -150,6 +172,7 @@ static const VMStateDescription xen_evtchn_vmstate = {
     .version_id = 1,
     .minimum_version_id = 1,
     .needed = xen_evtchn_is_needed,
+    .pre_load = xen_evtchn_pre_load,
     .post_load = xen_evtchn_post_load,
     .fields = (VMStateField[]) {
         VMSTATE_UINT64(callback_param, XenEvtchnState),
@@ -416,6 +439,20 @@ static int assign_kernel_port(uint16_t type, evtchn_port_t port,
     return kvm_vm_ioctl(kvm_state, KVM_XEN_HVM_SET_ATTR, &ha);
 }
 
+static int assign_kernel_eventfd(uint16_t type, evtchn_port_t port, int fd)
+{
+    struct kvm_xen_hvm_attr ha;
+
+    ha.type = KVM_XEN_ATTR_TYPE_EVTCHN;
+    ha.u.evtchn.send_port = port;
+    ha.u.evtchn.type = type;
+    ha.u.evtchn.flags = 0;
+    ha.u.evtchn.deliver.eventfd.port = 0;
+    ha.u.evtchn.deliver.eventfd.fd = fd;
+
+    return kvm_vm_ioctl(kvm_state, KVM_XEN_HVM_SET_ATTR, &ha);
+}
+
 static bool valid_port(evtchn_port_t port)
 {
     if (!port) {
@@ -434,6 +471,32 @@ static bool valid_vcpu(uint32_t vcpu)
     return !!qemu_get_cpu(vcpu);
 }
 
+static void unbind_backend_ports(XenEvtchnState *s)
+{
+    XenEvtchnPort *p;
+    int i;
+
+    for (i = 1; i < s->nr_ports; i++) {
+        p = &s->port_table[i];
+        if (p->type == EVTCHNSTAT_interdomain &&
+            (p->type_val & PORT_INFO_TYPEVAL_REMOTE_QEMU) ) {
+            evtchn_port_t be_port = p->type_val & PORT_INFO_TYPEVAL_REMOTE_PORT_MASK;
+
+            if (s->be_handles[be_port]) {
+                /* This part will be overwritten on the load anyway. */
+                p->type = EVTCHNSTAT_unbound;
+                p->type_val = PORT_INFO_TYPEVAL_REMOTE_QEMU;
+
+                /* Leave the backend port open and unbound too. */
+                if (kvm_xen_has_cap(EVTCHN_SEND)) {
+                    deassign_kernel_port(i);
+                }
+                s->be_handles[be_port]->guest_port = 0;
+            }
+        }
+    }
+}
+
 int xen_evtchn_status_op(struct evtchn_status *status)
 {
     XenEvtchnState *s = xen_evtchn_singleton;
@@ -869,7 +932,14 @@ static int close_port(XenEvtchnState *s, evtchn_port_t port)
 
     case EVTCHNSTAT_interdomain:
         if (p->type_val & PORT_INFO_TYPEVAL_REMOTE_QEMU) {
-            /* Not yet implemented. This can't happen! */
+            uint16_t be_port = p->type_val & ~PORT_INFO_TYPEVAL_REMOTE_QEMU;
+            struct xenevtchn_handle *xc = s->be_handles[be_port];
+            if (xc) {
+                if (kvm_xen_has_cap(EVTCHN_SEND)) {
+                    deassign_kernel_port(port);
+                }
+                xc->guest_port = 0;
+            }
         } else {
             /* Loopback interdomain */
             XenEvtchnPort *rp = &s->port_table[p->type_val];
@@ -1101,8 +1171,27 @@ int xen_evtchn_bind_interdomain_op(struct evtchn_bind_interdomain *interdomain)
     }
 
     if (interdomain->remote_dom == DOMID_QEMU) {
-        /* We haven't hooked up QEMU's PV drivers to this yet */
-        ret = -ENOSYS;
+        struct xenevtchn_handle *xc = s->be_handles[interdomain->remote_port];
+        XenEvtchnPort *lp = &s->port_table[interdomain->local_port];
+
+        if (!xc) {
+            ret = -ENOENT;
+            goto out_free_port;
+        }
+
+        if (xc->guest_port) {
+            ret = -EBUSY;
+            goto out_free_port;
+        }
+
+        assert(xc->be_port == interdomain->remote_port);
+        xc->guest_port = interdomain->local_port;
+        if (kvm_xen_has_cap(EVTCHN_SEND)) {
+            assign_kernel_eventfd(lp->type, xc->guest_port, xc->fd);
+        }
+        lp->type = EVTCHNSTAT_interdomain;
+        lp->type_val = PORT_INFO_TYPEVAL_REMOTE_QEMU | interdomain->remote_port;
+        ret = 0;
     } else {
         /* Loopback */
         XenEvtchnPort *rp = &s->port_table[interdomain->remote_port];
@@ -1120,6 +1209,7 @@ int xen_evtchn_bind_interdomain_op(struct evtchn_bind_interdomain *interdomain)
         }
     }
 
+ out_free_port:
     if (ret) {
         free_port(s, interdomain->local_port);
     }
@@ -1184,11 +1274,16 @@ int xen_evtchn_send_op(struct evtchn_send *send)
         if (p->type_val & PORT_INFO_TYPEVAL_REMOTE_QEMU) {
             /*
              * This is an event from the guest to qemu itself, which is
-             * serving as the driver domain. Not yet implemented; it will
-             * be hooked up to the qemu implementation of xenstore,
-             * console, PV net/block drivers etc.
+             * serving as the driver domain.
              */
-            ret = -ENOSYS;
+            uint16_t be_port = p->type_val & ~PORT_INFO_TYPEVAL_REMOTE_QEMU;
+            struct xenevtchn_handle *xc = s->be_handles[be_port];
+            if (xc) {
+                eventfd_write(xc->fd, 1);
+                ret = 0;
+            } else {
+                ret = -ENOENT;
+            }
         } else {
             /* Loopback interdomain ports; just a complex IPI */
             set_port_pending(s, p->type_val);
@@ -1244,6 +1339,237 @@ int xen_evtchn_set_port(uint16_t port)
     return ret;
 }
 
+struct xenevtchn_handle *xen_be_evtchn_open(void)
+{
+    struct xenevtchn_handle *xc = g_new0(struct xenevtchn_handle, 1);
+
+    xc->fd = eventfd(0, EFD_CLOEXEC);
+    if (xc->fd < 0) {
+        free(xc);
+        return NULL;
+    }
+
+    return xc;
+}
+
+static int find_be_port(XenEvtchnState *s, struct xenevtchn_handle *xc)
+{
+    int i;
+
+    for (i = 1; i < EVTCHN_2L_NR_CHANNELS; i++) {
+        if (!s->be_handles[i]) {
+            s->be_handles[i] = xc;
+            xc->be_port = i;
+            return i;
+        }
+    }
+    return 0;
+}
+
+int xen_be_evtchn_bind_interdomain(struct xenevtchn_handle *xc, uint32_t domid,
+                                   evtchn_port_t guest_port)
+{
+    XenEvtchnState *s = xen_evtchn_singleton;
+    XenEvtchnPort *gp;
+    uint16_t be_port = 0;
+    int ret;
+
+    if (!s) {
+        return -ENOTSUP;
+    }
+
+    if (!xc) {
+        return -EFAULT;
+    }
+
+    if (domid != xen_domid) {
+        return -ESRCH;
+    }
+
+    if (!valid_port(guest_port)) {
+        return -EINVAL;
+    }
+
+    qemu_mutex_lock(&s->port_lock);
+
+    /* The guest has to have an unbound port waiting for us to bind */
+    gp = &s->port_table[guest_port];
+
+    switch (gp->type) {
+    case EVTCHNSTAT_interdomain:
+        /* Allow rebinding after migration, preserve port # if possible */
+        be_port = gp->type_val & ~PORT_INFO_TYPEVAL_REMOTE_QEMU;
+        assert(be_port != 0);
+        if (!s->be_handles[be_port]) {
+            s->be_handles[be_port] = xc;
+            xc->guest_port = guest_port;
+            ret = xc->be_port = be_port;
+            if (kvm_xen_has_cap(EVTCHN_SEND)) {
+                assign_kernel_eventfd(gp->type, guest_port, xc->fd);
+            }
+            break;
+        }
+        /* fall through */
+
+    case EVTCHNSTAT_unbound:
+        be_port = find_be_port(s, xc);
+        if (!be_port) {
+            ret = -ENOSPC;
+            goto out;
+        }
+
+        gp->type = EVTCHNSTAT_interdomain;
+        gp->type_val = be_port | PORT_INFO_TYPEVAL_REMOTE_QEMU;
+        xc->guest_port = guest_port;
+        if (kvm_xen_has_cap(EVTCHN_SEND)) {
+            assign_kernel_eventfd(gp->type, guest_port, xc->fd);
+        }
+        ret = be_port;
+        break;
+
+    default:
+        ret = -EINVAL;
+        break;
+    }
+
+ out:
+    qemu_mutex_unlock(&s->port_lock);
+
+    return ret;
+}
+
+int xen_be_evtchn_unbind(struct xenevtchn_handle *xc, evtchn_port_t port)
+{
+    XenEvtchnState *s = xen_evtchn_singleton;
+    int ret;
+
+    if (!s) {
+        return -ENOTSUP;
+    }
+
+    if (!xc) {
+        return -EFAULT;
+    }
+
+    qemu_mutex_lock(&s->port_lock);
+
+    if (port && port != xc->be_port) {
+        ret = -EINVAL;
+        goto out;
+    }
+
+    if (xc->guest_port) {
+        XenEvtchnPort *gp = &s->port_table[xc->guest_port];
+
+        /* This should never *not* be true */
+        if (gp->type == EVTCHNSTAT_interdomain) {
+            gp->type = EVTCHNSTAT_unbound;
+            gp->type_val = PORT_INFO_TYPEVAL_REMOTE_QEMU;
+        }
+
+        if (kvm_xen_has_cap(EVTCHN_SEND)) {
+            deassign_kernel_port(xc->guest_port);
+        }
+        xc->guest_port = 0;
+    }
+
+    s->be_handles[xc->be_port] = NULL;
+    xc->be_port = 0;
+    ret = 0;
+ out:
+    qemu_mutex_unlock(&s->port_lock);
+    return ret;
+}
+
+int xen_be_evtchn_close(struct xenevtchn_handle *xc)
+{
+    if (!xc) {
+        return -EFAULT;
+    }
+
+    xen_be_evtchn_unbind(xc, 0);
+
+    close(xc->fd);
+    free(xc);
+    return 0;
+}
+
+int xen_be_evtchn_fd(struct xenevtchn_handle *xc)
+{
+    if (!xc) {
+        return -1;
+    }
+    return xc->fd;
+}
+
+int xen_be_evtchn_notify(struct xenevtchn_handle *xc, evtchn_port_t port)
+{
+    XenEvtchnState *s = xen_evtchn_singleton;
+    int ret;
+
+    if (!s) {
+        return -ENOTSUP;
+    }
+
+    if (!xc) {
+        return -EFAULT;
+    }
+
+    qemu_mutex_lock(&s->port_lock);
+
+    if (xc->guest_port) {
+        set_port_pending(s, xc->guest_port);
+        ret = 0;
+    } else {
+        ret = -ENOTCONN;
+    }
+
+    qemu_mutex_unlock(&s->port_lock);
+
+    return ret;
+}
+
+int xen_be_evtchn_pending(struct xenevtchn_handle *xc)
+{
+    uint64_t val;
+
+    if (!xc) {
+        return -EFAULT;
+    }
+
+    if (!xc->be_port) {
+        return 0;
+    }
+
+    if (eventfd_read(xc->fd, &val)) {
+        return -errno;
+    }
+
+    return val ? xc->be_port : 0;
+}
+
+int xen_be_evtchn_unmask(struct xenevtchn_handle *xc, evtchn_port_t port)
+{
+    if (!xc) {
+        return -EFAULT;
+    }
+
+    if (xc->be_port != port) {
+        return -EINVAL;
+    }
+
+    /*
+     * We don't actually do anything to unmask it; the event was already
+     * consumed in xen_be_evtchn_pending().
+     */
+    return 0;
+}
+
+int xen_be_evtchn_get_guest_port(struct xenevtchn_handle *xc)
+{
+    return xc->guest_port;
+}
+
 static const char *type_names[] = {
     "closed",
     "unbound",
diff --git a/hw/i386/kvm/xen_evtchn.h b/hw/i386/kvm/xen_evtchn.h
index d85b45067b..b7b6f4e592 100644
--- a/hw/i386/kvm/xen_evtchn.h
+++ b/hw/i386/kvm/xen_evtchn.h
@@ -14,6 +14,8 @@
 
 #include "hw/sysbus.h"
 
+typedef uint32_t evtchn_port_t;
+
 void xen_evtchn_create(void);
 int xen_evtchn_soft_reset(void);
 int xen_evtchn_set_callback_param(uint64_t param);
@@ -22,6 +24,23 @@ void xen_evtchn_set_callback_level(int level);
 
 int xen_evtchn_set_port(uint16_t port);
 
+/*
+ * These functions mirror the libxenevtchn library API, providing the QEMU
+ * backend side of "interdomain" event channels.
+ */
+struct xenevtchn_handle;
+struct xenevtchn_handle *xen_be_evtchn_open(void);
+int xen_be_evtchn_bind_interdomain(struct xenevtchn_handle *xc, uint32_t domid,
+                                   evtchn_port_t guest_port);
+int xen_be_evtchn_unbind(struct xenevtchn_handle *xc, evtchn_port_t port);
+int xen_be_evtchn_close(struct xenevtchn_handle *xc);
+int xen_be_evtchn_fd(struct xenevtchn_handle *xc);
+int xen_be_evtchn_notify(struct xenevtchn_handle *xc, evtchn_port_t port);
+int xen_be_evtchn_unmask(struct xenevtchn_handle *xc, evtchn_port_t port);
+int xen_be_evtchn_pending(struct xenevtchn_handle *xc);
+/* Apart from this which is a local addition */
+int xen_be_evtchn_get_guest_port(struct xenevtchn_handle *xc);
+
 void hmp_xen_event_inject(Monitor *mon, const QDict *qdict);
 void hmp_xen_event_list(Monitor *mon, const QDict *qdict);
 
-- 
2.39.0



  parent reply	other threads:[~2023-01-16 22:07 UTC|newest]

Thread overview: 97+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2023-01-16 21:57 [PATCH v7 00/51] Xen support under KVM David Woodhouse
2023-01-16 21:57 ` [PATCH v7 01/51] include: import Xen public headers to include/standard-headers/ David Woodhouse
2023-01-19 13:02   ` Alex Bennée
2023-01-19 15:06     ` David Woodhouse
2023-01-16 21:57 ` [PATCH v7 02/51] xen: add CONFIG_XENFV_MACHINE and CONFIG_XEN_EMU options for Xen emulation David Woodhouse
2023-01-16 21:57 ` [PATCH v7 03/51] xen: Add XEN_DISABLED mode and make it default David Woodhouse
2023-01-16 21:57 ` [PATCH v7 04/51] i386/kvm: Add xen-version KVM accelerator property and init KVM Xen support David Woodhouse
2023-01-16 21:57 ` [PATCH v7 05/51] i386/kvm: handle Xen HVM cpuid leaves David Woodhouse
2023-01-16 21:57 ` [PATCH v7 06/51] i386/hvm: Set Xen vCPU ID in KVM David Woodhouse
2023-01-16 21:57 ` [PATCH v7 07/51] xen-platform: exclude vfio-pci from the PCI platform unplug David Woodhouse
2023-01-16 21:57 ` [PATCH v7 08/51] xen-platform: allow its creation with XEN_EMULATE mode David Woodhouse
2023-01-17  9:44   ` Paul Durrant
2023-01-16 21:57 ` [PATCH v7 09/51] i386/xen: handle guest hypercalls David Woodhouse
2023-01-16 21:57 ` [PATCH v7 10/51] i386/xen: implement HYPERVISOR_xen_version David Woodhouse
2023-01-16 21:57 ` [PATCH v7 11/51] i386/xen: implement HYPERVISOR_sched_op, SCHEDOP_shutdown David Woodhouse
2023-01-16 21:57 ` [PATCH v7 12/51] i386/xen: Implement SCHEDOP_poll and SCHEDOP_yield David Woodhouse
2023-01-16 21:57 ` [PATCH v7 13/51] hw/xen: Add xen_overlay device for emulating shared xenheap pages David Woodhouse
2023-01-16 21:57 ` [PATCH v7 14/51] i386/xen: add pc_machine_kvm_type to initialize XEN_EMULATE mode David Woodhouse
2023-01-17  9:47   ` Paul Durrant
2023-01-16 21:57 ` [PATCH v7 15/51] i386/xen: manage and save/restore Xen guest long_mode setting David Woodhouse
2023-01-16 21:57 ` [PATCH v7 16/51] i386/xen: implement HYPERVISOR_memory_op David Woodhouse
2023-01-16 21:57 ` [PATCH v7 17/51] i386/xen: implement XENMEM_add_to_physmap_batch David Woodhouse
2023-01-16 21:57 ` [PATCH v7 18/51] i386/xen: implement HYPERVISOR_hvm_op David Woodhouse
2023-01-16 21:57 ` [PATCH v7 19/51] i386/xen: implement HYPERVISOR_vcpu_op David Woodhouse
2023-01-16 21:57 ` [PATCH v7 20/51] i386/xen: handle VCPUOP_register_vcpu_info David Woodhouse
2023-01-16 21:57 ` [PATCH v7 21/51] i386/xen: handle VCPUOP_register_vcpu_time_info David Woodhouse
2023-01-16 21:57 ` [PATCH v7 22/51] i386/xen: handle VCPUOP_register_runstate_memory_area David Woodhouse
2023-01-16 21:57 ` [PATCH v7 23/51] i386/xen: implement HYPERVISOR_event_channel_op David Woodhouse
2023-01-17  9:53   ` Paul Durrant
2023-01-17  9:59     ` David Woodhouse
2023-01-16 21:57 ` [PATCH v7 24/51] i386/xen: implement HVMOP_set_evtchn_upcall_vector David Woodhouse
2023-01-16 21:57 ` [PATCH v7 25/51] i386/xen: implement HVMOP_set_param David Woodhouse
2023-01-16 21:57 ` [PATCH v7 26/51] hw/xen: Add xen_evtchn device for event channel emulation David Woodhouse
2023-01-17 10:00   ` Paul Durrant
2023-01-17 10:23     ` David Woodhouse
2023-01-17 10:56       ` Paul Durrant
2023-01-17 11:02         ` David Woodhouse
2023-01-17 11:06           ` Paul Durrant
2023-01-17 11:24             ` David Woodhouse
2023-01-17 11:53               ` Paul Durrant
2023-01-17 12:12                 ` David Woodhouse
2023-01-17 13:01                 ` [PATCH v7.1 " David Woodhouse
2023-01-16 21:57 ` [PATCH v7 27/51] i386/xen: Add support for Xen event channel delivery to vCPU David Woodhouse
2023-01-17 11:11   ` Paul Durrant
2023-01-17 12:31     ` David Woodhouse
2023-01-17 13:11       ` Paul Durrant
2023-01-17 12:01   ` Paul Durrant
2023-01-16 21:57 ` [PATCH v7 28/51] hw/xen: Implement EVTCHNOP_status David Woodhouse
2023-01-16 21:57 ` [PATCH v7 29/51] hw/xen: Implement EVTCHNOP_close David Woodhouse
2023-01-16 21:57 ` [PATCH v7 30/51] hw/xen: Implement EVTCHNOP_unmask David Woodhouse
2023-01-16 21:57 ` [PATCH v7 31/51] hw/xen: Implement EVTCHNOP_bind_virq David Woodhouse
2023-01-16 21:57 ` [PATCH v7 32/51] hw/xen: Implement EVTCHNOP_bind_ipi David Woodhouse
2023-01-16 21:57 ` [PATCH v7 33/51] hw/xen: Implement EVTCHNOP_send David Woodhouse
2023-01-16 21:57 ` [PATCH v7 34/51] hw/xen: Implement EVTCHNOP_alloc_unbound David Woodhouse
2023-01-16 21:57 ` [PATCH v7 35/51] hw/xen: Implement EVTCHNOP_bind_interdomain David Woodhouse
2023-01-16 21:57 ` [PATCH v7 36/51] hw/xen: Implement EVTCHNOP_bind_vcpu David Woodhouse
2023-01-16 21:57 ` [PATCH v7 37/51] hw/xen: Implement EVTCHNOP_reset David Woodhouse
2023-01-16 21:57 ` [PATCH v7 38/51] i386/xen: add monitor commands to test event injection David Woodhouse
2023-01-17 10:08   ` Markus Armbruster
2023-01-17 10:41     ` David Woodhouse
2023-01-17 11:31     ` David Woodhouse
2023-01-19 11:01     ` David Woodhouse
2023-01-16 21:57 ` [PATCH v7 39/51] hw/xen: Support HVM_PARAM_CALLBACK_TYPE_GSI callback David Woodhouse
2023-01-16 21:57 ` [PATCH v7 40/51] hw/xen: Support HVM_PARAM_CALLBACK_TYPE_PCI_INTX callback David Woodhouse
2023-01-16 21:57 ` [PATCH v7 41/51] kvm/i386: Add xen-gnttab-max-frames property David Woodhouse
2023-01-16 21:57 ` [PATCH v7 42/51] hw/xen: Add xen_gnttab device for grant table emulation David Woodhouse
2023-01-16 21:57 ` [PATCH v7 43/51] hw/xen: Support mapping grant frames David Woodhouse
2023-01-16 21:57 ` [PATCH v7 44/51] i386/xen: Implement HYPERVISOR_grant_table_op and GNTTABOP_[gs]et_verson David Woodhouse
2023-01-16 21:57 ` [PATCH v7 45/51] hw/xen: Implement GNTTABOP_query_size David Woodhouse
2023-01-16 21:58 ` [PATCH v7 46/51] i386/xen: handle PV timer hypercalls David Woodhouse
2023-01-16 21:58 ` [PATCH v7 47/51] i386/xen: Reserve Xen special pages for console, xenstore rings David Woodhouse
2023-01-16 21:58 ` [PATCH v7 48/51] i386/xen: handle HVMOP_get_param David Woodhouse
2023-01-16 21:58 ` David Woodhouse [this message]
2023-01-16 21:58 ` [PATCH v7 50/51] hw/xen: Add xen_xenstore device for xenstore emulation David Woodhouse
2023-01-16 21:58 ` [PATCH v7 51/51] hw/xen: Add basic ring handling to xenstore David Woodhouse
2023-01-16 22:19 ` [RFC PATCH v7bis 00/19] Emulated Xen PV backend and PIRQ support David Woodhouse
2023-01-16 22:19   ` [RFC PATCH v7bis 01/19] hw/xen: Add evtchn operations to allow redirection to internal emulation David Woodhouse
2023-01-16 22:19   ` [RFC PATCH v7bis 02/19] hw/xen: Add emulated evtchn ops David Woodhouse
2023-01-16 22:19   ` [RFC PATCH v7bis 03/19] hw/xen: Add gnttab operations to allow redirection to internal emulation David Woodhouse
2023-01-16 22:19   ` [RFC PATCH v7bis 04/19] hw/xen: Pass grant ref to gnttab unmap David Woodhouse
2023-01-16 22:19   ` [RFC PATCH v7bis 05/19] hw/xen: Add foreignmem operations to allow redirection to internal emulation David Woodhouse
2023-01-16 22:19   ` [RFC PATCH v7bis 06/19] hw/xen: Add xenstore " David Woodhouse
2023-01-16 22:19   ` [RFC PATCH v7bis 07/19] hw/xen: Move xenstore_store_pv_console_info to xen_console.c David Woodhouse
2023-01-16 22:19   ` [RFC PATCH v7bis 08/19] hw/xen: Use XEN_PAGE_SIZE in PV backend drivers David Woodhouse
2023-01-16 22:19   ` [RFC PATCH v7bis 09/19] hw/xen: Rename xen_common.h to xen_native.h David Woodhouse
2023-01-16 22:19   ` [RFC PATCH v7bis 10/19] hw/xen: Build PV backend drivers for XENFV_MACHINE David Woodhouse
2023-01-16 22:19   ` [RFC PATCH v7bis 11/19] hw/xen: Map guest XENSTORE_PFN grant in emulated Xenstore David Woodhouse
2023-01-16 22:19   ` [RFC PATCH v7bis 12/19] hw/xen: Add backend implementation of grant table operations David Woodhouse
2023-01-16 22:19   ` [RFC PATCH v7bis 13/19] hw/xen: Implement soft reset for emulated gnttab David Woodhouse
2023-01-16 22:19   ` [RFC PATCH v7bis 14/19] hw/xen: Remove old version of Xen headers David Woodhouse
2023-01-16 22:19   ` [RFC PATCH v7bis 15/19] i386/xen: Initialize XenBus and legacy backends from pc_init1() David Woodhouse
2023-01-16 22:19   ` [RFC PATCH v7bis 16/19] i386/xen: Implement HYPERVISOR_physdev_op David Woodhouse
2023-01-16 22:19   ` [RFC PATCH v7bis 17/19] hw/xen: Implement emulated PIRQ hypercall support David Woodhouse
2023-01-16 22:19   ` [RFC PATCH v7bis 18/19] hw/xen: Support GSI mapping to PIRQ David Woodhouse
2023-01-16 22:19   ` [RFC PATCH v7bis 19/19] hw/xen: Support MSI " David Woodhouse
2023-01-17 16:01 ` [PATCH v7 52/51] hw/xen: Automatically add xen-platform PCI device for emulated Xen guests David Woodhouse
2023-01-17 16:02 ` [PATCH v7 53/51] i386/xen: Document Xen HVM emulation David Woodhouse

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20230116215805.1123514-50-dwmw2@infradead.org \
    --to=dwmw2@infradead.org \
    --cc=alex.bennee@linaro.org \
    --cc=ankur.a.arora@oracle.com \
    --cc=armbru@redhat.com \
    --cc=cfontana@suse.de \
    --cc=dgilbert@redhat.com \
    --cc=joao.m.martins@oracle.com \
    --cc=julien@xen.org \
    --cc=marcel.apfelbaum@gmail.com \
    --cc=mst@redhat.com \
    --cc=paul@xen.org \
    --cc=pbonzini@redhat.com \
    --cc=philmd@linaro.org \
    --cc=qemu-devel@nongnu.org \
    --cc=quintela@redhat.com \
    --cc=thuth@redhat.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.