qemu-devel.nongnu.org archive mirror
 help / color / mirror / Atom feed
From: "Dr. David Alan Gilbert" <dgilbert@redhat.com>
To: David Woodhouse <dwmw2@infradead.org>
Cc: qemu-devel@nongnu.org, "Paolo Bonzini" <pbonzini@redhat.com>,
	"Paul Durrant" <paul@xen.org>,
	"Joao Martins" <joao.m.martins@oracle.com>,
	"Ankur Arora" <ankur.a.arora@oracle.com>,
	"Philippe Mathieu-Daudé" <philmd@linaro.org>,
	"Thomas Huth" <thuth@redhat.com>,
	"Alex Bennée" <alex.bennee@linaro.org>,
	"Juan Quintela" <quintela@redhat.com>,
	"Claudio Fontana" <cfontana@suse.de>,
	"Julien Grall" <julien@xen.org>
Subject: Re: [RFC PATCH v5 50/52] hw/xen: Add backend implementation of interdomain event channel support
Date: Wed, 4 Jan 2023 11:52:18 +0000	[thread overview]
Message-ID: <Y7VocVvuu9hYyFed@work-vm> (raw)
In-Reply-To: <20221230121235.1282915-51-dwmw2@infradead.org>

* David Woodhouse (dwmw2@infradead.org) wrote:
> From: David Woodhouse <dwmw@amazon.co.uk>
> 
> The provides the QEMU side of interdomain event channels, allowing events
> to be sent to/from the guest.
> 
> The API mirrors libxenevtchn, and in time both this and the real Xen one
> will be available through ops structures so that the PV backend drivers
> can use the correct one as appropriate.
> 
> For now, this implementation can be used directly by our XenStore which
> will be for emulated mode only.
> 
> Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
> ---
>  hw/i386/kvm/xen_evtchn.c | 342 +++++++++++++++++++++++++++++++++++++--
>  hw/i386/kvm/xen_evtchn.h |  20 +++
>  2 files changed, 353 insertions(+), 9 deletions(-)
> 
> diff --git a/hw/i386/kvm/xen_evtchn.c b/hw/i386/kvm/xen_evtchn.c
> index 34c5199421..c0f6ef9dff 100644
> --- a/hw/i386/kvm/xen_evtchn.c
> +++ b/hw/i386/kvm/xen_evtchn.c
> @@ -35,6 +35,7 @@
>  #include "sysemu/kvm.h"
>  #include "sysemu/kvm_xen.h"
>  #include <linux/kvm.h>
> +#include <sys/eventfd.h>
>  
>  #include "standard-headers/xen/memory.h"
>  #include "standard-headers/xen/hvm/params.h"
> @@ -85,6 +86,13 @@ struct compat_shared_info {
>  
>  #define COMPAT_EVTCHN_2L_NR_CHANNELS            1024
>  
> +/* Local private implementation of struct xenevtchn_handle */
> +struct xenevtchn_handle {
> +    evtchn_port_t be_port;
> +    evtchn_port_t guest_port; /* Or zero for unbound */
> +    int fd;
> +};
> +
>  /*
>   * For unbound/interdomain ports there are only two possible remote
>   * domains; self and QEMU. Use a single high bit in type_val for that,
> @@ -93,8 +101,6 @@ struct compat_shared_info {
>  #define PORT_INFO_TYPEVAL_REMOTE_QEMU           0x8000
>  #define PORT_INFO_TYPEVAL_REMOTE_PORT_MASK      0x7FFF
>  
> -#define DOMID_QEMU      0
> -
>  struct XenEvtchnState {
>      /*< private >*/
>      SysBusDevice busdev;
> @@ -108,6 +114,8 @@ struct XenEvtchnState {
>      uint32_t nr_ports;
>      XenEvtchnPort port_table[EVTCHN_2L_NR_CHANNELS];
>      qemu_irq gsis[GSI_NUM_PINS];
> +
> +    struct xenevtchn_handle *be_handles[EVTCHN_2L_NR_CHANNELS];
>  };
>  
>  struct XenEvtchnState *xen_evtchn_singleton;
> @@ -115,6 +123,18 @@ struct XenEvtchnState *xen_evtchn_singleton;
>  /* Top bits of callback_param are the type (HVM_PARAM_CALLBACK_TYPE_xxx) */
>  #define CALLBACK_VIA_TYPE_SHIFT 56
>  
> +static void unbind_backend_ports(XenEvtchnState *s);
> +
> +static int xen_evtchn_pre_load(void *opaque)
> +{
> +    XenEvtchnState *s = opaque;
> +
> +    /* Unbind all the backend-side ports; they need to rebind */
> +    unbind_backend_ports(s);
> +
> +    return 0;
> +}
> +
>  static int xen_evtchn_post_load(void *opaque, int version_id)
>  {
>      XenEvtchnState *s = opaque;
> @@ -148,6 +168,7 @@ static const VMStateDescription xen_evtchn_vmstate = {
>      .version_id = 1,
>      .minimum_version_id = 1,
>      .needed = xen_evtchn_is_needed,
> +    .pre_load = xen_evtchn_pre_load,
>      .post_load = xen_evtchn_post_load,
>      .fields = (VMStateField[]) {
>          VMSTATE_UINT64(callback_param, XenEvtchnState),
> @@ -362,6 +383,20 @@ static int assign_kernel_port(uint16_t type, evtchn_port_t port,
>      return kvm_vm_ioctl(kvm_state, KVM_XEN_HVM_SET_ATTR, &ha);
>  }
>  
> +static int assign_kernel_eventfd(uint16_t type, evtchn_port_t port, int fd)
> +{
> +    struct kvm_xen_hvm_attr ha;
> +
> +    ha.type = KVM_XEN_ATTR_TYPE_EVTCHN;
> +    ha.u.evtchn.send_port = port;
> +    ha.u.evtchn.type = type;
> +    ha.u.evtchn.flags = 0;
> +    ha.u.evtchn.deliver.eventfd.port = 0;
> +    ha.u.evtchn.deliver.eventfd.fd = fd;
> +
> +    return kvm_vm_ioctl(kvm_state, KVM_XEN_HVM_SET_ATTR, &ha);
> +}
> +
>  static bool valid_port(evtchn_port_t port)
>  {
>      if (!port) {
> @@ -380,6 +415,32 @@ static bool valid_vcpu(uint32_t vcpu)
>      return !!qemu_get_cpu(vcpu);
>  }
>  
> +static void unbind_backend_ports(XenEvtchnState *s)
> +{
> +    XenEvtchnPort *p;
> +    int i;
> +
> +    for (i = 1; i <= s->nr_ports; i++) {
> +        p = &s->port_table[i];
> +        if (p->type == EVTCHNSTAT_interdomain &&
> +            (p->type_val & PORT_INFO_TYPEVAL_REMOTE_QEMU) ) {
> +            evtchn_port_t be_port = p->type_val & PORT_INFO_TYPEVAL_REMOTE_PORT_MASK;
> +
> +            if (s->be_handles[be_port]) {
> +                /* This part will be overwritten on the load anyway. */
> +                p->type = EVTCHNSTAT_unbound;
> +                p->type_val = PORT_INFO_TYPEVAL_REMOTE_QEMU;
> +
> +                /* Leave the backend port open and unbound too. */
> +                if (kvm_xen_has_cap(EVTCHN_SEND)) {
> +                    deassign_kernel_port(i);
> +                }
> +                s->be_handles[be_port]->guest_port = 0;
> +            }
> +        }
> +    }
> +}
> +
>  int xen_evtchn_status_op(struct evtchn_status *status)
>  {
>      XenEvtchnState *s = xen_evtchn_singleton;
> @@ -815,7 +876,14 @@ static int close_port(XenEvtchnState *s, evtchn_port_t port)
>  
>      case EVTCHNSTAT_interdomain:
>          if (p->type_val & PORT_INFO_TYPEVAL_REMOTE_QEMU) {
> -            /* Not yet implemented. This can't happen! */
> +            uint16_t be_port = p->type_val & ~PORT_INFO_TYPEVAL_REMOTE_QEMU;
> +            struct xenevtchn_handle *xc = s->be_handles[be_port];
> +            if (xc) {
> +                if (kvm_xen_has_cap(EVTCHN_SEND)) {
> +                    deassign_kernel_port(port);
> +                }
> +                xc->guest_port = 0;
> +            }
>          } else {
>              /* Loopback interdomain */
>              XenEvtchnPort *rp = &s->port_table[p->type_val];
> @@ -1047,8 +1115,27 @@ int xen_evtchn_bind_interdomain_op(struct evtchn_bind_interdomain *interdomain)
>      }
>  
>      if (interdomain->remote_dom == DOMID_QEMU) {
> -        /* We haven't hooked up QEMU's PV drivers to this yet */
> -        ret = -ENOSYS;
> +        struct xenevtchn_handle *xc = s->be_handles[interdomain->remote_port];
> +        XenEvtchnPort *lp = &s->port_table[interdomain->local_port];
> +
> +        if (!xc) {
> +            ret = -ENOENT;
> +            goto out_free_port;
> +        }
> +
> +        if (xc->guest_port) {
> +            ret = -EBUSY;
> +            goto out_free_port;
> +        }
> +
> +        assert(xc->be_port == interdomain->remote_port);
> +        xc->guest_port = interdomain->local_port;
> +        if (kvm_xen_has_cap(EVTCHN_SEND)) {
> +            assign_kernel_eventfd(lp->type, xc->guest_port, xc->fd);
> +        }
> +        lp->type = EVTCHNSTAT_interdomain;
> +        lp->type_val = PORT_INFO_TYPEVAL_REMOTE_QEMU | interdomain->remote_port;
> +        ret = 0;
>      } else {
>          /* Loopback */
>          XenEvtchnPort *rp = &s->port_table[interdomain->remote_port];
> @@ -1066,6 +1153,7 @@ int xen_evtchn_bind_interdomain_op(struct evtchn_bind_interdomain *interdomain)
>          }
>      }
>  
> + out_free_port:
>      if (ret) {
>          free_port(s, interdomain->local_port);
>      }
> @@ -1130,11 +1218,16 @@ int xen_evtchn_send_op(struct evtchn_send *send)
>          if (p->type_val & PORT_INFO_TYPEVAL_REMOTE_QEMU) {
>              /*
>               * This is an event from the guest to qemu itself, which is
> -             * serving as the driver domain. Not yet implemented; it will
> -             * be hooked up to the qemu implementation of xenstore,
> -             * console, PV net/block drivers etc.
> +             * serving as the driver domain.
>               */
> -            ret = -ENOSYS;
> +            uint16_t be_port = p->type_val & ~PORT_INFO_TYPEVAL_REMOTE_QEMU;
> +            struct xenevtchn_handle *xc = s->be_handles[be_port];
> +            if (xc) {
> +                eventfd_write(xc->fd, 1);
> +                ret = 0;
> +            } else {
> +                ret = -ENOENT;
> +            }
>          } else {
>              /* Loopback interdomain ports; just a complex IPI */
>              set_port_pending(s, p->type_val);
> @@ -1190,6 +1283,237 @@ int xen_evtchn_set_port(uint16_t port)
>      return ret;
>  }
>  
> +struct xenevtchn_handle *xen_be_evtchn_open(void *logger, unsigned int flags)
> +{
> +    struct xenevtchn_handle *xc = g_new0(struct xenevtchn_handle, 1);
> +
> +    xc->fd = eventfd(0, EFD_CLOEXEC);
> +    if (xc->fd < 0) {
> +        free(xc);
> +        return NULL;
> +    }
> +
> +    return xc;
> +}
> +
> +static int find_be_port(XenEvtchnState *s, struct xenevtchn_handle *xc)
> +{
> +    int i;
> +
> +    for (i = 25; valid_port(i); i++) {
> +        if (!s->be_handles[i]) {
> +            s->be_handles[i] = xc;
> +            xc->be_port = i;
> +            return i;
> +        }
> +    }
> +    return 0;
> +}
> +
> +int xen_be_evtchn_bind_interdomain(struct xenevtchn_handle *xc, uint32_t domid,
> +                                   evtchn_port_t guest_port)
> +{
> +    XenEvtchnState *s = xen_evtchn_singleton;
> +    XenEvtchnPort *gp;
> +    uint16_t be_port = 0;
> +    int ret;
> +
> +    if (!s) {
> +        return -ENOTSUP;
> +    }
> +
> +    if (!xc) {
> +        return -EFAULT;
> +    }
> +
> +    if (domid != xen_domid) {
> +        return -ESRCH;
> +    }
> +
> +    if (!valid_port(guest_port)) {
> +        return -EINVAL;
> +    }
> +
> +    qemu_mutex_lock(&s->port_lock);

You might consider using:
  QEMU_LOCK_GUARD(&s->port_lock);
it automatically drops it at the end of scope; it avoids the cleanup and
the goto.

> +
> +    /* The guest has to have an unbound port waiting for us to bind */
> +    gp = &s->port_table[guest_port];
> +
> +    switch (gp->type) {
> +    case EVTCHNSTAT_interdomain:
> +        /* Allow rebinding after migration, preserve port # if possible */
> +        be_port = gp->type_val & ~PORT_INFO_TYPEVAL_REMOTE_QEMU;
> +        assert(be_port != 0);
> +        if (!s->be_handles[be_port]) {
> +            s->be_handles[be_port] = xc;
> +            xc->guest_port = guest_port;
> +            ret = xc->be_port = be_port;
> +            if (kvm_xen_has_cap(EVTCHN_SEND)) {
> +                assign_kernel_eventfd(gp->type, guest_port, xc->fd);
> +            }
> +            break;
> +        }
> +        /* fall through */
> +
> +    case EVTCHNSTAT_unbound:
> +        be_port = find_be_port(s, xc);
> +        if (!be_port) {
> +            ret = -ENOSPC;
> +            goto out;
> +        }
> +
> +        gp->type = EVTCHNSTAT_interdomain;
> +        gp->type_val = be_port | PORT_INFO_TYPEVAL_REMOTE_QEMU;
> +        xc->guest_port = guest_port;
> +        if (kvm_xen_has_cap(EVTCHN_SEND)) {
> +            assign_kernel_eventfd(gp->type, guest_port, xc->fd);
> +        }
> +        ret = be_port;
> +        break;
> +
> +    default:
> +        ret = -EINVAL;
> +        break;
> +    }
> +
> + out:
> +    qemu_mutex_unlock(&s->port_lock);
> +
> +    return ret;
> +}
> +
> +int xen_be_evtchn_unbind(struct xenevtchn_handle *xc, evtchn_port_t port)
> +{
> +    XenEvtchnState *s = xen_evtchn_singleton;
> +    int ret;
> +
> +    if (!s) {
> +        return -ENOTSUP;
> +    }
> +
> +    if (!xc) {
> +        return -EFAULT;
> +    }
> +
> +    qemu_mutex_lock(&s->port_lock);
> +
> +    if (port && port != xc->be_port) {
> +        ret = -EINVAL;
> +        goto out;
> +    }
> +
> +    if (xc->guest_port) {
> +        XenEvtchnPort *gp = &s->port_table[xc->guest_port];
> +
> +        /* This should never *not* be true */
> +        if (gp->type == EVTCHNSTAT_interdomain) {
> +            gp->type = EVTCHNSTAT_unbound;
> +            gp->type_val = PORT_INFO_TYPEVAL_REMOTE_QEMU;
> +        }
> +
> +        if (kvm_xen_has_cap(EVTCHN_SEND)) {
> +            deassign_kernel_port(xc->guest_port);
> +        }
> +        xc->guest_port = 0;
> +    }
> +
> +    s->be_handles[xc->be_port] = NULL;
> +    xc->be_port = 0;
> +    ret = 0;
> + out:
> +    qemu_mutex_unlock(&s->port_lock);
> +    return ret;
> +}
> +
> +int xen_be_evtchn_close(struct xenevtchn_handle *xc)
> +{
> +    if (!xc) {
> +        return -EFAULT;
> +    }
> +
> +    xen_be_evtchn_unbind(xc, 0);
> +
> +    close(xc->fd);
> +    free(xc);
> +    return 0;
> +}
> +
> +int xen_be_evtchn_fd(struct xenevtchn_handle *xc)
> +{
> +    if (!xc) {
> +        return -1;
> +    }
> +    return xc->fd;
> +}
> +
> +int xen_be_evtchn_notify(struct xenevtchn_handle *xc, evtchn_port_t port)
> +{
> +    XenEvtchnState *s = xen_evtchn_singleton;
> +    int ret;
> +
> +    if (!s) {
> +        return -ENOTSUP;
> +    }
> +
> +    if (!xc) {
> +        return -EFAULT;
> +    }
> +
> +    qemu_mutex_lock(&s->port_lock);

Used here it would remove the need for 'ret' as well.

(Note it's only a suggestion).

Dave

> +    if (xc->guest_port) {
> +        set_port_pending(s, xc->guest_port);
> +        ret = 0;
> +    } else {
> +        ret = -ENOTCONN;
> +    }
> +
> +    qemu_mutex_unlock(&s->port_lock);
> +
> +    return ret;
> +}
> +
> +int xen_be_evtchn_pending(struct xenevtchn_handle *xc)
> +{
> +    uint64_t val;
> +
> +    if (!xc) {
> +        return -EFAULT;
> +    }
> +
> +    if (!xc->be_port) {
> +        return 0;
> +    }
> +
> +    if (eventfd_read(xc->fd, &val)) {
> +        return -errno;
> +    }
> +
> +    return val ? xc->be_port : 0;
> +}
> +
> +int xen_be_evtchn_unmask(struct xenevtchn_handle *xc, evtchn_port_t port)
> +{
> +    if (!xc) {
> +        return -EFAULT;
> +    }
> +
> +    if (xc->be_port != port) {
> +        return -EINVAL;
> +    }
> +
> +    /*
> +     * We don't actually do anything to unmask it; the event was already
> +     * consumed in xen_be_evtchn_pending().
> +     */
> +    return 0;
> +}
> +
> +int xen_be_evtchn_get_guest_port(struct xenevtchn_handle *xc)
> +{
> +    return xc->guest_port;
> +}
> +
>  static const char *type_names[] = {
>      "closed",
>      "unbound",
> diff --git a/hw/i386/kvm/xen_evtchn.h b/hw/i386/kvm/xen_evtchn.h
> index eb6ba16f72..029496eaa9 100644
> --- a/hw/i386/kvm/xen_evtchn.h
> +++ b/hw/i386/kvm/xen_evtchn.h
> @@ -14,6 +14,9 @@
>  
>  #include "hw/sysbus.h"
>  
> +typedef uint32_t evtchn_port_t;
> +#define DOMID_QEMU      0
> +
>  void xen_evtchn_create(void);
>  int xen_evtchn_soft_reset(void);
>  int xen_evtchn_set_callback_param(uint64_t param);
> @@ -22,6 +25,23 @@ void xen_evtchn_set_callback_level(int level);
>  
>  int xen_evtchn_set_port(uint16_t port);
>  
> +/*
> + * These functions mirror the libxenevtchn library API, providing the QEMU
> + * backend side of "interdomain" event channels.
> + */
> +struct xenevtchn_handle;
> +struct xenevtchn_handle *xen_be_evtchn_open(void *logger, unsigned int flags);
> +int xen_be_evtchn_bind_interdomain(struct xenevtchn_handle *xc, uint32_t domid,
> +                                   evtchn_port_t guest_port);
> +int xen_be_evtchn_unbind(struct xenevtchn_handle *xc, evtchn_port_t port);
> +int xen_be_evtchn_close(struct xenevtchn_handle *xc);
> +int xen_be_evtchn_fd(struct xenevtchn_handle *xc);
> +int xen_be_evtchn_notify(struct xenevtchn_handle *xc, evtchn_port_t port);
> +int xen_be_evtchn_unmask(struct xenevtchn_handle *xc, evtchn_port_t port);
> +int xen_be_evtchn_pending(struct xenevtchn_handle *xc);
> +/* Apart from this which is a local addition */
> +int xen_be_evtchn_get_guest_port(struct xenevtchn_handle *xc);
> +
>  void hmp_xen_event_list(Monitor *mon, const QDict *qdict);
>  void hmp_xen_event_inject(Monitor *mon, const QDict *qdict);
>  
> -- 
> 2.35.3
> 
> 
-- 
Dr. David Alan Gilbert / dgilbert@redhat.com / Manchester, UK



  parent reply	other threads:[~2023-01-04 11:52 UTC|newest]

Thread overview: 67+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2022-12-30 12:11 [RFC PATCH v5 00/52] Xen support under KVM David Woodhouse
2022-12-30 12:11 ` [RFC PATCH v5 01/52] include: import Xen public headers to include/standard-headers/ David Woodhouse
2022-12-30 12:11 ` [RFC PATCH v5 02/52] xen: add CONFIG_XENFV_MACHINE and CONFIG_XEN_EMU options for Xen emulation David Woodhouse
2022-12-30 12:11 ` [RFC PATCH v5 03/52] xen: Add XEN_DISABLED mode and make it default David Woodhouse
2022-12-30 12:11 ` [RFC PATCH v5 04/52] i386/kvm: Add xen-version KVM accelerator property and init KVM Xen support David Woodhouse
2022-12-30 12:11 ` [RFC PATCH v5 05/52] i386/kvm: handle Xen HVM cpuid leaves David Woodhouse
2022-12-30 12:11 ` [RFC PATCH v5 06/52] i386/hvm: Set Xen vCPU ID in KVM David Woodhouse
2022-12-30 12:11 ` [RFC PATCH v5 07/52] xen-platform: exclude vfio-pci from the PCI platform unplug David Woodhouse
2022-12-30 12:11 ` [RFC PATCH v5 08/52] xen-platform: allow its creation with XEN_EMULATE mode David Woodhouse
2022-12-30 12:11 ` [RFC PATCH v5 09/52] hw/xen_backend: refactor xen_be_init() David Woodhouse
2022-12-30 12:11 ` [RFC PATCH v5 10/52] i386/xen: handle guest hypercalls David Woodhouse
2022-12-30 12:11 ` [RFC PATCH v5 11/52] i386/xen: implement HYPERVISOR_xen_version David Woodhouse
2022-12-30 12:11 ` [RFC PATCH v5 12/52] i386/xen: implement HYPERVISOR_sched_op, SCHEDOP_shutdown David Woodhouse
2022-12-30 12:11 ` [RFC PATCH v5 13/52] i386/xen: Implement SCHEDOP_poll and SCHEDOP_yield David Woodhouse
2022-12-30 12:11 ` [RFC PATCH v5 14/52] hw/xen: Add xen_overlay device for emulating shared xenheap pages David Woodhouse
2023-01-03 17:54   ` Dr. David Alan Gilbert
2022-12-30 12:11 ` [RFC PATCH v5 15/52] i386/xen: add pc_machine_kvm_type to initialize XEN_EMULATE mode David Woodhouse
2022-12-30 12:11 ` [RFC PATCH v5 16/52] i386/xen: manage and save/restore Xen guest long_mode setting David Woodhouse
2022-12-30 12:12 ` [RFC PATCH v5 17/52] i386/xen: implement HYPERVISOR_memory_op David Woodhouse
2022-12-30 12:12 ` [RFC PATCH v5 18/52] i386/xen: implement XENMEM_add_to_physmap_batch David Woodhouse
2022-12-30 12:12 ` [RFC PATCH v5 19/52] i386/xen: implement HYPERVISOR_hvm_op David Woodhouse
2022-12-30 12:12 ` [RFC PATCH v5 20/52] i386/xen: implement HYPERVISOR_vcpu_op David Woodhouse
2022-12-30 12:12 ` [RFC PATCH v5 21/52] i386/xen: handle VCPUOP_register_vcpu_info David Woodhouse
2023-01-03 18:13   ` Dr. David Alan Gilbert
2022-12-30 12:12 ` [RFC PATCH v5 22/52] i386/xen: handle VCPUOP_register_vcpu_time_info David Woodhouse
2022-12-30 12:12 ` [RFC PATCH v5 23/52] i386/xen: handle VCPUOP_register_runstate_memory_area David Woodhouse
2022-12-30 12:12 ` [RFC PATCH v5 24/52] i386/xen: implement HYPERVISOR_event_channel_op David Woodhouse
2022-12-30 12:12 ` [RFC PATCH v5 25/52] i386/xen: implement HVMOP_set_evtchn_upcall_vector David Woodhouse
2022-12-30 12:12 ` [RFC PATCH v5 26/52] i386/xen: implement HVMOP_set_param David Woodhouse
2022-12-30 12:12 ` [RFC PATCH v5 27/52] hw/xen: Add xen_evtchn device for event channel emulation David Woodhouse
2022-12-30 12:12 ` [RFC PATCH v5 28/52] i386/xen: Add support for Xen event channel delivery to vCPU David Woodhouse
2023-01-09 21:18   ` David Woodhouse
2022-12-30 12:12 ` [RFC PATCH v5 29/52] hw/xen: Implement EVTCHNOP_status David Woodhouse
2022-12-30 12:12 ` [RFC PATCH v5 30/52] hw/xen: Implement EVTCHNOP_close David Woodhouse
2022-12-30 12:12 ` [RFC PATCH v5 31/52] hw/xen: Implement EVTCHNOP_unmask David Woodhouse
2022-12-30 12:12 ` [RFC PATCH v5 32/52] hw/xen: Implement EVTCHNOP_bind_virq David Woodhouse
2022-12-30 12:12 ` [RFC PATCH v5 33/52] hw/xen: Implement EVTCHNOP_bind_ipi David Woodhouse
2022-12-30 12:12 ` [RFC PATCH v5 34/52] hw/xen: Implement EVTCHNOP_send David Woodhouse
2022-12-30 12:12 ` [RFC PATCH v5 35/52] hw/xen: Implement EVTCHNOP_alloc_unbound David Woodhouse
2022-12-30 12:12 ` [RFC PATCH v5 36/52] hw/xen: Implement EVTCHNOP_bind_interdomain David Woodhouse
2022-12-30 12:12 ` [RFC PATCH v5 37/52] hw/xen: Implement EVTCHNOP_bind_vcpu David Woodhouse
2022-12-30 12:12 ` [RFC PATCH v5 38/52] hw/xen: Implement EVTCHNOP_reset David Woodhouse
2022-12-30 12:12 ` [RFC PATCH v5 39/52] i386/xen: add monitor commands to test event injection David Woodhouse
2023-01-04 12:48   ` Dr. David Alan Gilbert
2023-01-05 19:42     ` David Woodhouse
2023-01-05 20:09       ` Dr. David Alan Gilbert
2023-01-09 17:24         ` David Woodhouse
2023-01-09 18:51           ` Dr. David Alan Gilbert
2023-01-09 19:49             ` David Woodhouse
2022-12-30 12:12 ` [RFC PATCH v5 40/52] hw/xen: Support HVM_PARAM_CALLBACK_TYPE_GSI callback David Woodhouse
2022-12-30 12:12 ` [RFC PATCH v5 41/52] hw/xen: Support HVM_PARAM_CALLBACK_TYPE_PCI_INTX callback David Woodhouse
2022-12-30 12:12 ` [RFC PATCH v5 42/52] kvm/i386: Add xen-gnttab-max-frames property David Woodhouse
2022-12-30 12:12 ` [RFC PATCH v5 43/52] hw/xen: Add xen_gnttab device for grant table emulation David Woodhouse
2022-12-30 12:12 ` [RFC PATCH v5 44/52] hw/xen: Support mapping grant frames David Woodhouse
2022-12-30 12:12 ` [RFC PATCH v5 45/52] i386/xen: Implement HYPERVISOR_grant_table_op and GNTTABOP_[gs]et_verson David Woodhouse
2022-12-30 12:12 ` [RFC PATCH v5 46/52] hw/xen: Implement GNTTABOP_query_size David Woodhouse
2022-12-30 12:12 ` [RFC PATCH v5 47/52] i386/xen: handle PV timer hypercalls David Woodhouse
2022-12-30 12:12 ` [RFC PATCH v5 48/52] i386/xen: Reserve Xen special pages for console, xenstore rings David Woodhouse
2022-12-30 12:12 ` [RFC PATCH v5 49/52] i386/xen: handle HVMOP_get_param David Woodhouse
2022-12-30 12:12 ` [RFC PATCH v5 50/52] hw/xen: Add backend implementation of interdomain event channel support David Woodhouse
2023-01-04 11:22   ` Dr. David Alan Gilbert
2023-01-04 14:33     ` David Woodhouse
2023-01-04 11:52   ` Dr. David Alan Gilbert [this message]
2022-12-30 12:12 ` [RFC PATCH v5 51/52] hw/xen: Add xen_xenstore device for xenstore emulation David Woodhouse
2023-01-04 12:01   ` Dr. David Alan Gilbert
2023-01-04 14:35     ` David Woodhouse
2022-12-30 12:12 ` [RFC PATCH v5 52/52] hw/xen: Add basic ring handling to xenstore David Woodhouse

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=Y7VocVvuu9hYyFed@work-vm \
    --to=dgilbert@redhat.com \
    --cc=alex.bennee@linaro.org \
    --cc=ankur.a.arora@oracle.com \
    --cc=cfontana@suse.de \
    --cc=dwmw2@infradead.org \
    --cc=joao.m.martins@oracle.com \
    --cc=julien@xen.org \
    --cc=paul@xen.org \
    --cc=pbonzini@redhat.com \
    --cc=philmd@linaro.org \
    --cc=qemu-devel@nongnu.org \
    --cc=quintela@redhat.com \
    --cc=thuth@redhat.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).