qemu-devel.nongnu.org archive mirror
 help / color / mirror / Atom feed
From: David Woodhouse <dwmw2@infradead.org>
To: qemu-devel@nongnu.org
Cc: "Paolo Bonzini" <pbonzini@redhat.com>,
	"Paul Durrant" <paul@xen.org>,
	"Joao Martins" <joao.m.martins@oracle.com>,
	"Ankur Arora" <ankur.a.arora@oracle.com>,
	"Philippe Mathieu-Daudé" <philmd@linaro.org>,
	"Thomas Huth" <thuth@redhat.com>,
	"Alex Bennée" <alex.bennee@linaro.org>,
	"Juan Quintela" <quintela@redhat.com>,
	"Dr . David Alan Gilbert" <dgilbert@redhat.com>,
	"Claudio Fontana" <cfontana@suse.de>,
	"Julien Grall" <julien@xen.org>
Subject: [RFC PATCH v5 40/52] hw/xen: Support HVM_PARAM_CALLBACK_TYPE_GSI callback
Date: Fri, 30 Dec 2022 12:12:23 +0000	[thread overview]
Message-ID: <20221230121235.1282915-41-dwmw2@infradead.org> (raw)
In-Reply-To: <20221230121235.1282915-1-dwmw2@infradead.org>

From: David Woodhouse <dwmw@amazon.co.uk>

The GSI callback (and later PCI_INTX) is a level triggered interrupt. It
is asserted when an event channel is delivered to vCPU0, and is supposed
to be cleared when the vcpu_info->evtchn_upcall_pending field for vCPU0
is cleared again.

Thankfully, Xen does *not* assert the GSI if the guest sets its own
evtchn_upcall_pending field; we only need to assert the GSI when we
have delivered an event for ourselves. So that's the easy part.

However, we *do* need to poll for the evtchn_upcall_pending flag being
cleared. In an ideal world we would poll that when the EOI happens on
the PIC/IOAPIC. That's how it works in the kernel with the VFIO eventfd
pairs — one is used to trigger the interrupt, and the other works in the
other direction to 'resample' on EOI, and trigger the first eventfd
again if the line is still active.

However, QEMU doesn't seem to do that. Even VFIO level interrupts seem
to be supported by temporarily unmapping the device's BARs from the
guest when an interrupt happens, then trapping *all* MMIO to the device
and sending the 'resample' event on *every* MMIO access until the IRQ
is cleared! Maybe in future we'll plumb the 'resample' concept through
QEMU's irq framework but for now we'll do what Xen itself does: just
check the flag on every vmexit if the upcall GSI is known to be
asserted.

Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
---
 hw/i386/kvm/xen_evtchn.c  | 45 ++++++++++++++++++++++++++++++++++++++
 hw/i386/kvm/xen_evtchn.h  |  4 ++++
 hw/i386/pc.c              |  6 +++++
 target/i386/cpu.h         |  1 +
 target/i386/kvm/kvm.c     | 13 +++++++++++
 target/i386/kvm/xen-emu.c | 46 ++++++++++++++++++++++++++++++---------
 target/i386/kvm/xen-emu.h |  1 +
 7 files changed, 106 insertions(+), 10 deletions(-)

diff --git a/hw/i386/kvm/xen_evtchn.c b/hw/i386/kvm/xen_evtchn.c
index 87136334f5..29f5e60172 100644
--- a/hw/i386/kvm/xen_evtchn.c
+++ b/hw/i386/kvm/xen_evtchn.c
@@ -24,6 +24,8 @@
 
 #include "hw/sysbus.h"
 #include "hw/xen/xen.h"
+#include "hw/i386/x86.h"
+#include "hw/irq.h"
 
 #include "xen_evtchn.h"
 #include "xen_overlay.h"
@@ -102,6 +104,7 @@ struct XenEvtchnState {
     QemuMutex port_lock;
     uint32_t nr_ports;
     XenEvtchnPort port_table[EVTCHN_2L_NR_CHANNELS];
+    qemu_irq gsis[GSI_NUM_PINS];
 };
 
 struct XenEvtchnState *xen_evtchn_singleton;
@@ -170,9 +173,29 @@ void xen_evtchn_create(void)
 {
     XenEvtchnState *s = XEN_EVTCHN(sysbus_create_simple(TYPE_XEN_EVTCHN,
                                                         -1, NULL));
+    int i;
+
     xen_evtchn_singleton = s;
 
     qemu_mutex_init(&s->port_lock);
+
+    for (i = 0; i < GSI_NUM_PINS; i++) {
+        sysbus_init_irq(SYS_BUS_DEVICE(s), &s->gsis[i]);
+    }
+}
+
+void xen_evtchn_connect_gsis(qemu_irq *system_gsis)
+{
+    XenEvtchnState *s = xen_evtchn_singleton;
+    int i;
+
+    if (!s) {
+        return;
+    }
+
+    for (i = 0; i < GSI_NUM_PINS; i++) {
+        sysbus_connect_irq(SYS_BUS_DEVICE(s), i, system_gsis[i]);
+    }
 }
 
 static void xen_evtchn_register_types(void)
@@ -182,6 +205,23 @@ static void xen_evtchn_register_types(void)
 
 type_init(xen_evtchn_register_types)
 
+void xen_evtchn_set_callback_level(int level)
+{
+    XenEvtchnState *s = xen_evtchn_singleton;
+
+    if (s) {
+        uint32_t param = (uint32_t)s->callback_param;
+
+        switch (s->callback_param >> CALLBACK_VIA_TYPE_SHIFT) {
+        case HVM_PARAM_CALLBACK_TYPE_GSI:
+            if (param < GSI_NUM_PINS) {
+                qemu_set_irq(s->gsis[param], level);
+            }
+            break;
+        }
+    }
+}
+
 int xen_evtchn_set_callback_param(uint64_t param)
 {
     XenEvtchnState *s = xen_evtchn_singleton;
@@ -207,6 +247,11 @@ int xen_evtchn_set_callback_param(uint64_t param)
         }
         break;
     }
+
+    case HVM_PARAM_CALLBACK_TYPE_GSI:
+        ret = 0;
+        break;
+
     default:
         ret = -ENOSYS;
         break;
diff --git a/hw/i386/kvm/xen_evtchn.h b/hw/i386/kvm/xen_evtchn.h
index 1de53daa52..86f154e359 100644
--- a/hw/i386/kvm/xen_evtchn.h
+++ b/hw/i386/kvm/xen_evtchn.h
@@ -12,9 +12,13 @@
 #ifndef QEMU_XEN_EVTCHN_H
 #define QEMU_XEN_EVTCHN_H
 
+#include "hw/sysbus.h"
+
 void xen_evtchn_create(void);
 int xen_evtchn_soft_reset(void);
 int xen_evtchn_set_callback_param(uint64_t param);
+void xen_evtchn_connect_gsis(qemu_irq *system_gsis);
+void xen_evtchn_set_callback_level(int level);
 
 void hmp_xen_event_list(Monitor *mon, const QDict *qdict);
 void hmp_xen_event_inject(Monitor *mon, const QDict *qdict);
diff --git a/hw/i386/pc.c b/hw/i386/pc.c
index c1328a779d..1c4941de8f 100644
--- a/hw/i386/pc.c
+++ b/hw/i386/pc.c
@@ -1303,6 +1303,12 @@ void pc_basic_device_init(struct PCMachineState *pcms,
     }
     *rtc_state = mc146818_rtc_init(isa_bus, 2000, rtc_irq);
 
+#ifdef CONFIG_XEN_EMU
+    if (xen_mode == XEN_EMULATE) {
+        xen_evtchn_connect_gsis(gsi);
+    }
+#endif
+
     qemu_register_boot_set(pc_boot_set, *rtc_state);
 
     if (!xen_enabled() &&
diff --git a/target/i386/cpu.h b/target/i386/cpu.h
index 000ed2fed9..144b1c3038 100644
--- a/target/i386/cpu.h
+++ b/target/i386/cpu.h
@@ -1795,6 +1795,7 @@ typedef struct CPUArchState {
     uint64_t xen_vcpu_time_info_gpa;
     uint64_t xen_vcpu_runstate_gpa;
     uint8_t xen_vcpu_callback_vector;
+    bool xen_callback_asserted;
     uint16_t xen_virq[XEN_NR_VIRQS];
     uint64_t xen_singleshot_timer_ns;
 #endif
diff --git a/target/i386/kvm/kvm.c b/target/i386/kvm/kvm.c
index 52d69e87e7..2488773e1e 100644
--- a/target/i386/kvm/kvm.c
+++ b/target/i386/kvm/kvm.c
@@ -5415,6 +5415,19 @@ int kvm_arch_handle_exit(CPUState *cs, struct kvm_run *run)
     char str[256];
     KVMState *state;
 
+#ifdef CONFIG_XEN_EMU
+    /*
+     * If the callback is asserted as a GSI (or PCI INTx) then check if
+     * vcpu_info->evtchn_upcall_pending has been cleared, and deassert
+     * the callback IRQ if so. Ideally we could hook into the PIC/IOAPIC
+     * EOI and only resample then, exactly how the VFIO eventfd pairs
+     * are designed to work for level triggered interrupts.
+     */
+    if (cpu->env.xen_callback_asserted) {
+        kvm_xen_maybe_deassert_callback(cs);
+    }
+#endif
+
     switch (run->exit_reason) {
     case KVM_EXIT_HLT:
         DPRINTF("handle_hlt\n");
diff --git a/target/i386/kvm/xen-emu.c b/target/i386/kvm/xen-emu.c
index e4104b67c3..9a8eb0f1f9 100644
--- a/target/i386/kvm/xen-emu.c
+++ b/target/i386/kvm/xen-emu.c
@@ -285,18 +285,11 @@ static void *gpa_to_hva(uint64_t gpa)
                                              mrs.offset_within_region);
 }
 
-void *kvm_xen_get_vcpu_info_hva(uint32_t vcpu_id)
+static void *vcpu_info_hva_from_cs(CPUState *cs)
 {
-    CPUState *cs = qemu_get_cpu(vcpu_id);
-    CPUX86State *env;
-    uint64_t gpa;
-
-    if (!cs) {
-        return NULL;
-    }
-    env = &X86_CPU(cs)->env;
+    CPUX86State *env = &X86_CPU(cs)->env;
+    uint64_t gpa = env->xen_vcpu_info_gpa;
 
-    gpa = env->xen_vcpu_info_gpa;
     if (gpa == INVALID_GPA) {
         gpa = env->xen_vcpu_info_default_gpa;
     }
@@ -307,6 +300,31 @@ void *kvm_xen_get_vcpu_info_hva(uint32_t vcpu_id)
     return gpa_to_hva(gpa);
 }
 
+void *kvm_xen_get_vcpu_info_hva(uint32_t vcpu_id)
+{
+    CPUState *cs = qemu_get_cpu(vcpu_id);
+
+    if (!cs) {
+            return NULL;
+    }
+
+    return vcpu_info_hva_from_cs(cs);
+}
+
+void kvm_xen_maybe_deassert_callback(CPUState *cs)
+{
+    struct vcpu_info *vi = vcpu_info_hva_from_cs(cs);
+    if (!vi) {
+            return;
+    }
+
+    /* If the evtchn_upcall_pending flag is cleared, turn the GSI off. */
+    if (!vi->evtchn_upcall_pending) {
+        X86_CPU(cs)->env.xen_callback_asserted = false;
+        xen_evtchn_set_callback_level(0);
+    }
+}
+
 void kvm_xen_inject_vcpu_callback_vector(uint32_t vcpu_id, int type)
 {
     CPUState *cs = qemu_get_cpu(vcpu_id);
@@ -339,6 +357,14 @@ void kvm_xen_inject_vcpu_callback_vector(uint32_t vcpu_id, int type)
          */
         qemu_cpu_kick(cs);
         break;
+
+    case HVM_PARAM_CALLBACK_TYPE_GSI:
+    case HVM_PARAM_CALLBACK_TYPE_PCI_INTX:
+        if (vcpu_id == 0) {
+            xen_evtchn_set_callback_level(1);
+            X86_CPU(cs)->env.xen_callback_asserted = true;
+        }
+        break;
     }
 }
 
diff --git a/target/i386/kvm/xen-emu.h b/target/i386/kvm/xen-emu.h
index 452605699a..fe85e0b195 100644
--- a/target/i386/kvm/xen-emu.h
+++ b/target/i386/kvm/xen-emu.h
@@ -28,5 +28,6 @@ int kvm_xen_init_vcpu(CPUState *cs);
 int kvm_xen_handle_exit(X86CPU *cpu, struct kvm_xen_exit *exit);
 int kvm_put_xen_state(CPUState *cs);
 int kvm_get_xen_state(CPUState *cs);
+void kvm_xen_maybe_deassert_callback(CPUState *cs);
 
 #endif /* QEMU_I386_KVM_XEN_EMU_H */
-- 
2.35.3



  parent reply	other threads:[~2022-12-30 12:30 UTC|newest]

Thread overview: 67+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2022-12-30 12:11 [RFC PATCH v5 00/52] Xen support under KVM David Woodhouse
2022-12-30 12:11 ` [RFC PATCH v5 01/52] include: import Xen public headers to include/standard-headers/ David Woodhouse
2022-12-30 12:11 ` [RFC PATCH v5 02/52] xen: add CONFIG_XENFV_MACHINE and CONFIG_XEN_EMU options for Xen emulation David Woodhouse
2022-12-30 12:11 ` [RFC PATCH v5 03/52] xen: Add XEN_DISABLED mode and make it default David Woodhouse
2022-12-30 12:11 ` [RFC PATCH v5 04/52] i386/kvm: Add xen-version KVM accelerator property and init KVM Xen support David Woodhouse
2022-12-30 12:11 ` [RFC PATCH v5 05/52] i386/kvm: handle Xen HVM cpuid leaves David Woodhouse
2022-12-30 12:11 ` [RFC PATCH v5 06/52] i386/hvm: Set Xen vCPU ID in KVM David Woodhouse
2022-12-30 12:11 ` [RFC PATCH v5 07/52] xen-platform: exclude vfio-pci from the PCI platform unplug David Woodhouse
2022-12-30 12:11 ` [RFC PATCH v5 08/52] xen-platform: allow its creation with XEN_EMULATE mode David Woodhouse
2022-12-30 12:11 ` [RFC PATCH v5 09/52] hw/xen_backend: refactor xen_be_init() David Woodhouse
2022-12-30 12:11 ` [RFC PATCH v5 10/52] i386/xen: handle guest hypercalls David Woodhouse
2022-12-30 12:11 ` [RFC PATCH v5 11/52] i386/xen: implement HYPERVISOR_xen_version David Woodhouse
2022-12-30 12:11 ` [RFC PATCH v5 12/52] i386/xen: implement HYPERVISOR_sched_op, SCHEDOP_shutdown David Woodhouse
2022-12-30 12:11 ` [RFC PATCH v5 13/52] i386/xen: Implement SCHEDOP_poll and SCHEDOP_yield David Woodhouse
2022-12-30 12:11 ` [RFC PATCH v5 14/52] hw/xen: Add xen_overlay device for emulating shared xenheap pages David Woodhouse
2023-01-03 17:54   ` Dr. David Alan Gilbert
2022-12-30 12:11 ` [RFC PATCH v5 15/52] i386/xen: add pc_machine_kvm_type to initialize XEN_EMULATE mode David Woodhouse
2022-12-30 12:11 ` [RFC PATCH v5 16/52] i386/xen: manage and save/restore Xen guest long_mode setting David Woodhouse
2022-12-30 12:12 ` [RFC PATCH v5 17/52] i386/xen: implement HYPERVISOR_memory_op David Woodhouse
2022-12-30 12:12 ` [RFC PATCH v5 18/52] i386/xen: implement XENMEM_add_to_physmap_batch David Woodhouse
2022-12-30 12:12 ` [RFC PATCH v5 19/52] i386/xen: implement HYPERVISOR_hvm_op David Woodhouse
2022-12-30 12:12 ` [RFC PATCH v5 20/52] i386/xen: implement HYPERVISOR_vcpu_op David Woodhouse
2022-12-30 12:12 ` [RFC PATCH v5 21/52] i386/xen: handle VCPUOP_register_vcpu_info David Woodhouse
2023-01-03 18:13   ` Dr. David Alan Gilbert
2022-12-30 12:12 ` [RFC PATCH v5 22/52] i386/xen: handle VCPUOP_register_vcpu_time_info David Woodhouse
2022-12-30 12:12 ` [RFC PATCH v5 23/52] i386/xen: handle VCPUOP_register_runstate_memory_area David Woodhouse
2022-12-30 12:12 ` [RFC PATCH v5 24/52] i386/xen: implement HYPERVISOR_event_channel_op David Woodhouse
2022-12-30 12:12 ` [RFC PATCH v5 25/52] i386/xen: implement HVMOP_set_evtchn_upcall_vector David Woodhouse
2022-12-30 12:12 ` [RFC PATCH v5 26/52] i386/xen: implement HVMOP_set_param David Woodhouse
2022-12-30 12:12 ` [RFC PATCH v5 27/52] hw/xen: Add xen_evtchn device for event channel emulation David Woodhouse
2022-12-30 12:12 ` [RFC PATCH v5 28/52] i386/xen: Add support for Xen event channel delivery to vCPU David Woodhouse
2023-01-09 21:18   ` David Woodhouse
2022-12-30 12:12 ` [RFC PATCH v5 29/52] hw/xen: Implement EVTCHNOP_status David Woodhouse
2022-12-30 12:12 ` [RFC PATCH v5 30/52] hw/xen: Implement EVTCHNOP_close David Woodhouse
2022-12-30 12:12 ` [RFC PATCH v5 31/52] hw/xen: Implement EVTCHNOP_unmask David Woodhouse
2022-12-30 12:12 ` [RFC PATCH v5 32/52] hw/xen: Implement EVTCHNOP_bind_virq David Woodhouse
2022-12-30 12:12 ` [RFC PATCH v5 33/52] hw/xen: Implement EVTCHNOP_bind_ipi David Woodhouse
2022-12-30 12:12 ` [RFC PATCH v5 34/52] hw/xen: Implement EVTCHNOP_send David Woodhouse
2022-12-30 12:12 ` [RFC PATCH v5 35/52] hw/xen: Implement EVTCHNOP_alloc_unbound David Woodhouse
2022-12-30 12:12 ` [RFC PATCH v5 36/52] hw/xen: Implement EVTCHNOP_bind_interdomain David Woodhouse
2022-12-30 12:12 ` [RFC PATCH v5 37/52] hw/xen: Implement EVTCHNOP_bind_vcpu David Woodhouse
2022-12-30 12:12 ` [RFC PATCH v5 38/52] hw/xen: Implement EVTCHNOP_reset David Woodhouse
2022-12-30 12:12 ` [RFC PATCH v5 39/52] i386/xen: add monitor commands to test event injection David Woodhouse
2023-01-04 12:48   ` Dr. David Alan Gilbert
2023-01-05 19:42     ` David Woodhouse
2023-01-05 20:09       ` Dr. David Alan Gilbert
2023-01-09 17:24         ` David Woodhouse
2023-01-09 18:51           ` Dr. David Alan Gilbert
2023-01-09 19:49             ` David Woodhouse
2022-12-30 12:12 ` David Woodhouse [this message]
2022-12-30 12:12 ` [RFC PATCH v5 41/52] hw/xen: Support HVM_PARAM_CALLBACK_TYPE_PCI_INTX callback David Woodhouse
2022-12-30 12:12 ` [RFC PATCH v5 42/52] kvm/i386: Add xen-gnttab-max-frames property David Woodhouse
2022-12-30 12:12 ` [RFC PATCH v5 43/52] hw/xen: Add xen_gnttab device for grant table emulation David Woodhouse
2022-12-30 12:12 ` [RFC PATCH v5 44/52] hw/xen: Support mapping grant frames David Woodhouse
2022-12-30 12:12 ` [RFC PATCH v5 45/52] i386/xen: Implement HYPERVISOR_grant_table_op and GNTTABOP_[gs]et_verson David Woodhouse
2022-12-30 12:12 ` [RFC PATCH v5 46/52] hw/xen: Implement GNTTABOP_query_size David Woodhouse
2022-12-30 12:12 ` [RFC PATCH v5 47/52] i386/xen: handle PV timer hypercalls David Woodhouse
2022-12-30 12:12 ` [RFC PATCH v5 48/52] i386/xen: Reserve Xen special pages for console, xenstore rings David Woodhouse
2022-12-30 12:12 ` [RFC PATCH v5 49/52] i386/xen: handle HVMOP_get_param David Woodhouse
2022-12-30 12:12 ` [RFC PATCH v5 50/52] hw/xen: Add backend implementation of interdomain event channel support David Woodhouse
2023-01-04 11:22   ` Dr. David Alan Gilbert
2023-01-04 14:33     ` David Woodhouse
2023-01-04 11:52   ` Dr. David Alan Gilbert
2022-12-30 12:12 ` [RFC PATCH v5 51/52] hw/xen: Add xen_xenstore device for xenstore emulation David Woodhouse
2023-01-04 12:01   ` Dr. David Alan Gilbert
2023-01-04 14:35     ` David Woodhouse
2022-12-30 12:12 ` [RFC PATCH v5 52/52] hw/xen: Add basic ring handling to xenstore David Woodhouse

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20221230121235.1282915-41-dwmw2@infradead.org \
    --to=dwmw2@infradead.org \
    --cc=alex.bennee@linaro.org \
    --cc=ankur.a.arora@oracle.com \
    --cc=cfontana@suse.de \
    --cc=dgilbert@redhat.com \
    --cc=joao.m.martins@oracle.com \
    --cc=julien@xen.org \
    --cc=paul@xen.org \
    --cc=pbonzini@redhat.com \
    --cc=philmd@linaro.org \
    --cc=qemu-devel@nongnu.org \
    --cc=quintela@redhat.com \
    --cc=thuth@redhat.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).