qemu-devel.nongnu.org archive mirror
 help / color / mirror / Atom feed
From: David Woodhouse <dwmw2@infradead.org>
To: "Dr. David Alan Gilbert" <dgilbert@redhat.com>
Cc: qemu-devel@nongnu.org, "Paolo Bonzini" <pbonzini@redhat.com>,
	"Paul Durrant" <paul@xen.org>,
	"Joao Martins" <joao.m.martins@oracle.com>,
	"Ankur Arora" <ankur.a.arora@oracle.com>,
	"Philippe Mathieu-Daudé" <philmd@linaro.org>,
	"Thomas Huth" <thuth@redhat.com>,
	"Alex Bennée" <alex.bennee@linaro.org>,
	"Juan Quintela" <quintela@redhat.com>,
	"Claudio Fontana" <cfontana@suse.de>,
	"Julien Grall" <julien@xen.org>
Subject: Re: [RFC PATCH v5 50/52] hw/xen: Add backend implementation of interdomain event channel support
Date: Wed, 04 Jan 2023 14:33:22 +0000	[thread overview]
Message-ID: <03D229F7-12B7-4AFE-9013-8717D039EF12@infradead.org> (raw)
In-Reply-To: <Y7VhWyGCCNz0+k+H@work-vm>



On 4 January 2023 11:22:03 GMT, "Dr. David Alan Gilbert" <dgilbert@redhat.com> wrote:
>* David Woodhouse (dwmw2@infradead.org) wrote:
>> From: David Woodhouse <dwmw@amazon.co.uk>
>> 
>> The provides the QEMU side of interdomain event channels, allowing events
>> to be sent to/from the guest.
>> 
>> The API mirrors libxenevtchn, and in time both this and the real Xen one
>> will be available through ops structures so that the PV backend drivers
>> can use the correct one as appropriate.
>> 
>> For now, this implementation can be used directly by our XenStore which
>> will be for emulated mode only.
>> 
>> Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
>> ---
>>  hw/i386/kvm/xen_evtchn.c | 342 +++++++++++++++++++++++++++++++++++++--
>>  hw/i386/kvm/xen_evtchn.h |  20 +++
>>  2 files changed, 353 insertions(+), 9 deletions(-)
>> 
>> diff --git a/hw/i386/kvm/xen_evtchn.c b/hw/i386/kvm/xen_evtchn.c
>> index 34c5199421..c0f6ef9dff 100644
>> --- a/hw/i386/kvm/xen_evtchn.c
>> +++ b/hw/i386/kvm/xen_evtchn.c
>> @@ -35,6 +35,7 @@
>>  #include "sysemu/kvm.h"
>>  #include "sysemu/kvm_xen.h"
>>  #include <linux/kvm.h>
>> +#include <sys/eventfd.h>
>>  
>>  #include "standard-headers/xen/memory.h"
>>  #include "standard-headers/xen/hvm/params.h"
>> @@ -85,6 +86,13 @@ struct compat_shared_info {
>>  
>>  #define COMPAT_EVTCHN_2L_NR_CHANNELS            1024
>>  
>> +/* Local private implementation of struct xenevtchn_handle */
>> +struct xenevtchn_handle {
>> +    evtchn_port_t be_port;
>> +    evtchn_port_t guest_port; /* Or zero for unbound */
>> +    int fd;
>> +};
>> +
>>  /*
>>   * For unbound/interdomain ports there are only two possible remote
>>   * domains; self and QEMU. Use a single high bit in type_val for that,
>> @@ -93,8 +101,6 @@ struct compat_shared_info {
>>  #define PORT_INFO_TYPEVAL_REMOTE_QEMU           0x8000
>>  #define PORT_INFO_TYPEVAL_REMOTE_PORT_MASK      0x7FFF
>>  
>> -#define DOMID_QEMU      0
>> -
>>  struct XenEvtchnState {
>>      /*< private >*/
>>      SysBusDevice busdev;
>> @@ -108,6 +114,8 @@ struct XenEvtchnState {
>>      uint32_t nr_ports;
>>      XenEvtchnPort port_table[EVTCHN_2L_NR_CHANNELS];
>>      qemu_irq gsis[GSI_NUM_PINS];
>> +
>> +    struct xenevtchn_handle *be_handles[EVTCHN_2L_NR_CHANNELS];
>>  };
>>  
>>  struct XenEvtchnState *xen_evtchn_singleton;
>> @@ -115,6 +123,18 @@ struct XenEvtchnState *xen_evtchn_singleton;
>>  /* Top bits of callback_param are the type (HVM_PARAM_CALLBACK_TYPE_xxx) */
>>  #define CALLBACK_VIA_TYPE_SHIFT 56
>>  
>> +static void unbind_backend_ports(XenEvtchnState *s);
>> +
>> +static int xen_evtchn_pre_load(void *opaque)
>> +{
>> +    XenEvtchnState *s = opaque;
>> +
>> +    /* Unbind all the backend-side ports; they need to rebind */
>> +    unbind_backend_ports(s);
>> +
>> +    return 0;
>> +}
>> +
>>  static int xen_evtchn_post_load(void *opaque, int version_id)
>>  {
>>      XenEvtchnState *s = opaque;
>> @@ -148,6 +168,7 @@ static const VMStateDescription xen_evtchn_vmstate = {
>>      .version_id = 1,
>>      .minimum_version_id = 1,
>>      .needed = xen_evtchn_is_needed,
>> +    .pre_load = xen_evtchn_pre_load,
>>      .post_load = xen_evtchn_post_load,
>>      .fields = (VMStateField[]) {
>>          VMSTATE_UINT64(callback_param, XenEvtchnState),
>> @@ -362,6 +383,20 @@ static int assign_kernel_port(uint16_t type, evtchn_port_t port,
>>      return kvm_vm_ioctl(kvm_state, KVM_XEN_HVM_SET_ATTR, &ha);
>>  }
>>  
>> +static int assign_kernel_eventfd(uint16_t type, evtchn_port_t port, int fd)
>> +{
>> +    struct kvm_xen_hvm_attr ha;
>> +
>> +    ha.type = KVM_XEN_ATTR_TYPE_EVTCHN;
>> +    ha.u.evtchn.send_port = port;
>> +    ha.u.evtchn.type = type;
>> +    ha.u.evtchn.flags = 0;
>> +    ha.u.evtchn.deliver.eventfd.port = 0;
>> +    ha.u.evtchn.deliver.eventfd.fd = fd;
>> +
>> +    return kvm_vm_ioctl(kvm_state, KVM_XEN_HVM_SET_ATTR, &ha);
>> +}
>> +
>>  static bool valid_port(evtchn_port_t port)
>>  {
>>      if (!port) {
>> @@ -380,6 +415,32 @@ static bool valid_vcpu(uint32_t vcpu)
>>      return !!qemu_get_cpu(vcpu);
>>  }
>>  
>> +static void unbind_backend_ports(XenEvtchnState *s)
>> +{
>> +    XenEvtchnPort *p;
>> +    int i;
>> +
>> +    for (i = 1; i <= s->nr_ports; i++) {
>> +        p = &s->port_table[i];
>> +        if (p->type == EVTCHNSTAT_interdomain &&
>> +            (p->type_val & PORT_INFO_TYPEVAL_REMOTE_QEMU) ) {
>> +            evtchn_port_t be_port = p->type_val & PORT_INFO_TYPEVAL_REMOTE_PORT_MASK;
>> +
>> +            if (s->be_handles[be_port]) {
>> +                /* This part will be overwritten on the load anyway. */
>> +                p->type = EVTCHNSTAT_unbound;
>> +                p->type_val = PORT_INFO_TYPEVAL_REMOTE_QEMU;
>> +
>> +                /* Leave the backend port open and unbound too. */
>> +                if (kvm_xen_has_cap(EVTCHN_SEND)) {
>> +                    deassign_kernel_port(i);
>> +                }
>> +                s->be_handles[be_port]->guest_port = 0;
>> +            }
>> +        }
>> +    }
>> +}
>> +
>>  int xen_evtchn_status_op(struct evtchn_status *status)
>>  {
>>      XenEvtchnState *s = xen_evtchn_singleton;
>> @@ -815,7 +876,14 @@ static int close_port(XenEvtchnState *s, evtchn_port_t port)
>>  
>>      case EVTCHNSTAT_interdomain:
>>          if (p->type_val & PORT_INFO_TYPEVAL_REMOTE_QEMU) {
>> -            /* Not yet implemented. This can't happen! */
>> +            uint16_t be_port = p->type_val & ~PORT_INFO_TYPEVAL_REMOTE_QEMU;
>> +            struct xenevtchn_handle *xc = s->be_handles[be_port];
>> +            if (xc) {
>> +                if (kvm_xen_has_cap(EVTCHN_SEND)) {
>> +                    deassign_kernel_port(port);
>> +                }
>> +                xc->guest_port = 0;
>> +            }
>>          } else {
>>              /* Loopback interdomain */
>>              XenEvtchnPort *rp = &s->port_table[p->type_val];
>> @@ -1047,8 +1115,27 @@ int xen_evtchn_bind_interdomain_op(struct evtchn_bind_interdomain *interdomain)
>>      }
>>  
>>      if (interdomain->remote_dom == DOMID_QEMU) {
>> -        /* We haven't hooked up QEMU's PV drivers to this yet */
>> -        ret = -ENOSYS;
>> +        struct xenevtchn_handle *xc = s->be_handles[interdomain->remote_port];
>> +        XenEvtchnPort *lp = &s->port_table[interdomain->local_port];
>> +
>> +        if (!xc) {
>> +            ret = -ENOENT;
>> +            goto out_free_port;
>> +        }
>> +
>> +        if (xc->guest_port) {
>> +            ret = -EBUSY;
>> +            goto out_free_port;
>> +        }
>> +
>> +        assert(xc->be_port == interdomain->remote_port);
>> +        xc->guest_port = interdomain->local_port;
>> +        if (kvm_xen_has_cap(EVTCHN_SEND)) {
>> +            assign_kernel_eventfd(lp->type, xc->guest_port, xc->fd);
>> +        }
>> +        lp->type = EVTCHNSTAT_interdomain;
>> +        lp->type_val = PORT_INFO_TYPEVAL_REMOTE_QEMU | interdomain->remote_port;
>> +        ret = 0;
>>      } else {
>>          /* Loopback */
>>          XenEvtchnPort *rp = &s->port_table[interdomain->remote_port];
>> @@ -1066,6 +1153,7 @@ int xen_evtchn_bind_interdomain_op(struct evtchn_bind_interdomain *interdomain)
>>          }
>>      }
>>  
>> + out_free_port:
>>      if (ret) {
>>          free_port(s, interdomain->local_port);
>>      }
>> @@ -1130,11 +1218,16 @@ int xen_evtchn_send_op(struct evtchn_send *send)
>>          if (p->type_val & PORT_INFO_TYPEVAL_REMOTE_QEMU) {
>>              /*
>>               * This is an event from the guest to qemu itself, which is
>> -             * serving as the driver domain. Not yet implemented; it will
>> -             * be hooked up to the qemu implementation of xenstore,
>> -             * console, PV net/block drivers etc.
>> +             * serving as the driver domain.
>>               */
>> -            ret = -ENOSYS;
>> +            uint16_t be_port = p->type_val & ~PORT_INFO_TYPEVAL_REMOTE_QEMU;
>> +            struct xenevtchn_handle *xc = s->be_handles[be_port];
>> +            if (xc) {
>> +                eventfd_write(xc->fd, 1);
>> +                ret = 0;
>> +            } else {
>> +                ret = -ENOENT;
>> +            }
>>          } else {
>>              /* Loopback interdomain ports; just a complex IPI */
>>              set_port_pending(s, p->type_val);
>> @@ -1190,6 +1283,237 @@ int xen_evtchn_set_port(uint16_t port)
>>      return ret;
>>  }
>>  
>> +struct xenevtchn_handle *xen_be_evtchn_open(void *logger, unsigned int flags)
>> +{
>> +    struct xenevtchn_handle *xc = g_new0(struct xenevtchn_handle, 1);
>> +
>> +    xc->fd = eventfd(0, EFD_CLOEXEC);
>> +    if (xc->fd < 0) {
>> +        free(xc);
>> +        return NULL;
>> +    }
>> +
>> +    return xc;
>> +}
>> +
>> +static int find_be_port(XenEvtchnState *s, struct xenevtchn_handle *xc)
>> +{
>> +    int i;
>> +
>> +    for (i = 25; valid_port(i); i++) {
>
>Magic 25 number ?

Ah, yes, I kept meaning to remove that. Was just for testing that I had remote vs. local port correct by ensuring the numbers were actually different. Really will remember to kill it and start the qemu-side numbering from 1 next time round!


  reply	other threads:[~2023-01-04 14:34 UTC|newest]

Thread overview: 67+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2022-12-30 12:11 [RFC PATCH v5 00/52] Xen support under KVM David Woodhouse
2022-12-30 12:11 ` [RFC PATCH v5 01/52] include: import Xen public headers to include/standard-headers/ David Woodhouse
2022-12-30 12:11 ` [RFC PATCH v5 02/52] xen: add CONFIG_XENFV_MACHINE and CONFIG_XEN_EMU options for Xen emulation David Woodhouse
2022-12-30 12:11 ` [RFC PATCH v5 03/52] xen: Add XEN_DISABLED mode and make it default David Woodhouse
2022-12-30 12:11 ` [RFC PATCH v5 04/52] i386/kvm: Add xen-version KVM accelerator property and init KVM Xen support David Woodhouse
2022-12-30 12:11 ` [RFC PATCH v5 05/52] i386/kvm: handle Xen HVM cpuid leaves David Woodhouse
2022-12-30 12:11 ` [RFC PATCH v5 06/52] i386/hvm: Set Xen vCPU ID in KVM David Woodhouse
2022-12-30 12:11 ` [RFC PATCH v5 07/52] xen-platform: exclude vfio-pci from the PCI platform unplug David Woodhouse
2022-12-30 12:11 ` [RFC PATCH v5 08/52] xen-platform: allow its creation with XEN_EMULATE mode David Woodhouse
2022-12-30 12:11 ` [RFC PATCH v5 09/52] hw/xen_backend: refactor xen_be_init() David Woodhouse
2022-12-30 12:11 ` [RFC PATCH v5 10/52] i386/xen: handle guest hypercalls David Woodhouse
2022-12-30 12:11 ` [RFC PATCH v5 11/52] i386/xen: implement HYPERVISOR_xen_version David Woodhouse
2022-12-30 12:11 ` [RFC PATCH v5 12/52] i386/xen: implement HYPERVISOR_sched_op, SCHEDOP_shutdown David Woodhouse
2022-12-30 12:11 ` [RFC PATCH v5 13/52] i386/xen: Implement SCHEDOP_poll and SCHEDOP_yield David Woodhouse
2022-12-30 12:11 ` [RFC PATCH v5 14/52] hw/xen: Add xen_overlay device for emulating shared xenheap pages David Woodhouse
2023-01-03 17:54   ` Dr. David Alan Gilbert
2022-12-30 12:11 ` [RFC PATCH v5 15/52] i386/xen: add pc_machine_kvm_type to initialize XEN_EMULATE mode David Woodhouse
2022-12-30 12:11 ` [RFC PATCH v5 16/52] i386/xen: manage and save/restore Xen guest long_mode setting David Woodhouse
2022-12-30 12:12 ` [RFC PATCH v5 17/52] i386/xen: implement HYPERVISOR_memory_op David Woodhouse
2022-12-30 12:12 ` [RFC PATCH v5 18/52] i386/xen: implement XENMEM_add_to_physmap_batch David Woodhouse
2022-12-30 12:12 ` [RFC PATCH v5 19/52] i386/xen: implement HYPERVISOR_hvm_op David Woodhouse
2022-12-30 12:12 ` [RFC PATCH v5 20/52] i386/xen: implement HYPERVISOR_vcpu_op David Woodhouse
2022-12-30 12:12 ` [RFC PATCH v5 21/52] i386/xen: handle VCPUOP_register_vcpu_info David Woodhouse
2023-01-03 18:13   ` Dr. David Alan Gilbert
2022-12-30 12:12 ` [RFC PATCH v5 22/52] i386/xen: handle VCPUOP_register_vcpu_time_info David Woodhouse
2022-12-30 12:12 ` [RFC PATCH v5 23/52] i386/xen: handle VCPUOP_register_runstate_memory_area David Woodhouse
2022-12-30 12:12 ` [RFC PATCH v5 24/52] i386/xen: implement HYPERVISOR_event_channel_op David Woodhouse
2022-12-30 12:12 ` [RFC PATCH v5 25/52] i386/xen: implement HVMOP_set_evtchn_upcall_vector David Woodhouse
2022-12-30 12:12 ` [RFC PATCH v5 26/52] i386/xen: implement HVMOP_set_param David Woodhouse
2022-12-30 12:12 ` [RFC PATCH v5 27/52] hw/xen: Add xen_evtchn device for event channel emulation David Woodhouse
2022-12-30 12:12 ` [RFC PATCH v5 28/52] i386/xen: Add support for Xen event channel delivery to vCPU David Woodhouse
2023-01-09 21:18   ` David Woodhouse
2022-12-30 12:12 ` [RFC PATCH v5 29/52] hw/xen: Implement EVTCHNOP_status David Woodhouse
2022-12-30 12:12 ` [RFC PATCH v5 30/52] hw/xen: Implement EVTCHNOP_close David Woodhouse
2022-12-30 12:12 ` [RFC PATCH v5 31/52] hw/xen: Implement EVTCHNOP_unmask David Woodhouse
2022-12-30 12:12 ` [RFC PATCH v5 32/52] hw/xen: Implement EVTCHNOP_bind_virq David Woodhouse
2022-12-30 12:12 ` [RFC PATCH v5 33/52] hw/xen: Implement EVTCHNOP_bind_ipi David Woodhouse
2022-12-30 12:12 ` [RFC PATCH v5 34/52] hw/xen: Implement EVTCHNOP_send David Woodhouse
2022-12-30 12:12 ` [RFC PATCH v5 35/52] hw/xen: Implement EVTCHNOP_alloc_unbound David Woodhouse
2022-12-30 12:12 ` [RFC PATCH v5 36/52] hw/xen: Implement EVTCHNOP_bind_interdomain David Woodhouse
2022-12-30 12:12 ` [RFC PATCH v5 37/52] hw/xen: Implement EVTCHNOP_bind_vcpu David Woodhouse
2022-12-30 12:12 ` [RFC PATCH v5 38/52] hw/xen: Implement EVTCHNOP_reset David Woodhouse
2022-12-30 12:12 ` [RFC PATCH v5 39/52] i386/xen: add monitor commands to test event injection David Woodhouse
2023-01-04 12:48   ` Dr. David Alan Gilbert
2023-01-05 19:42     ` David Woodhouse
2023-01-05 20:09       ` Dr. David Alan Gilbert
2023-01-09 17:24         ` David Woodhouse
2023-01-09 18:51           ` Dr. David Alan Gilbert
2023-01-09 19:49             ` David Woodhouse
2022-12-30 12:12 ` [RFC PATCH v5 40/52] hw/xen: Support HVM_PARAM_CALLBACK_TYPE_GSI callback David Woodhouse
2022-12-30 12:12 ` [RFC PATCH v5 41/52] hw/xen: Support HVM_PARAM_CALLBACK_TYPE_PCI_INTX callback David Woodhouse
2022-12-30 12:12 ` [RFC PATCH v5 42/52] kvm/i386: Add xen-gnttab-max-frames property David Woodhouse
2022-12-30 12:12 ` [RFC PATCH v5 43/52] hw/xen: Add xen_gnttab device for grant table emulation David Woodhouse
2022-12-30 12:12 ` [RFC PATCH v5 44/52] hw/xen: Support mapping grant frames David Woodhouse
2022-12-30 12:12 ` [RFC PATCH v5 45/52] i386/xen: Implement HYPERVISOR_grant_table_op and GNTTABOP_[gs]et_verson David Woodhouse
2022-12-30 12:12 ` [RFC PATCH v5 46/52] hw/xen: Implement GNTTABOP_query_size David Woodhouse
2022-12-30 12:12 ` [RFC PATCH v5 47/52] i386/xen: handle PV timer hypercalls David Woodhouse
2022-12-30 12:12 ` [RFC PATCH v5 48/52] i386/xen: Reserve Xen special pages for console, xenstore rings David Woodhouse
2022-12-30 12:12 ` [RFC PATCH v5 49/52] i386/xen: handle HVMOP_get_param David Woodhouse
2022-12-30 12:12 ` [RFC PATCH v5 50/52] hw/xen: Add backend implementation of interdomain event channel support David Woodhouse
2023-01-04 11:22   ` Dr. David Alan Gilbert
2023-01-04 14:33     ` David Woodhouse [this message]
2023-01-04 11:52   ` Dr. David Alan Gilbert
2022-12-30 12:12 ` [RFC PATCH v5 51/52] hw/xen: Add xen_xenstore device for xenstore emulation David Woodhouse
2023-01-04 12:01   ` Dr. David Alan Gilbert
2023-01-04 14:35     ` David Woodhouse
2022-12-30 12:12 ` [RFC PATCH v5 52/52] hw/xen: Add basic ring handling to xenstore David Woodhouse

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=03D229F7-12B7-4AFE-9013-8717D039EF12@infradead.org \
    --to=dwmw2@infradead.org \
    --cc=alex.bennee@linaro.org \
    --cc=ankur.a.arora@oracle.com \
    --cc=cfontana@suse.de \
    --cc=dgilbert@redhat.com \
    --cc=joao.m.martins@oracle.com \
    --cc=julien@xen.org \
    --cc=paul@xen.org \
    --cc=pbonzini@redhat.com \
    --cc=philmd@linaro.org \
    --cc=qemu-devel@nongnu.org \
    --cc=quintela@redhat.com \
    --cc=thuth@redhat.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).