From: David Woodhouse <dwmw2@infradead.org>
To: qemu-devel@nongnu.org
Cc: "Paolo Bonzini" <pbonzini@redhat.com>,
"Paul Durrant" <paul@xen.org>,
"Joao Martins" <joao.m.martins@oracle.com>,
"Ankur Arora" <ankur.a.arora@oracle.com>,
"Philippe Mathieu-Daudé" <philmd@linaro.org>,
"Thomas Huth" <thuth@redhat.com>,
"Alex Bennée" <alex.bennee@linaro.org>,
"Juan Quintela" <quintela@redhat.com>,
"Dr . David Alan Gilbert" <dgilbert@redhat.com>,
"Claudio Fontana" <cfontana@suse.de>
Subject: [RFC PATCH v4 31/47] hw/xen: Implement EVTCHNOP_unmask
Date: Wed, 21 Dec 2022 01:06:07 +0000 [thread overview]
Message-ID: <20221221010623.1000191-32-dwmw2@infradead.org> (raw)
In-Reply-To: <20221221010623.1000191-1-dwmw2@infradead.org>
From: David Woodhouse <dwmw@amazon.co.uk>
This finally comes with a mechanism for actually injecting events into
the guest vCPU, with all the atomic-test-and-set that's involved in
setting the bit in the shinfo, then the index in the vcpu_info, and
injecting either the lapic vector as MSI, or letting KVM inject the
bare vector.
Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
---
hw/i386/kvm/xen_evtchn.c | 220 ++++++++++++++++++++++++++++++++++++++
hw/i386/kvm/xen_evtchn.h | 2 +
target/i386/kvm/xen-emu.c | 12 +++
3 files changed, 234 insertions(+)
diff --git a/hw/i386/kvm/xen_evtchn.c b/hw/i386/kvm/xen_evtchn.c
index 3ccf72d78b..a3ad2ddb55 100644
--- a/hw/i386/kvm/xen_evtchn.c
+++ b/hw/i386/kvm/xen_evtchn.c
@@ -21,6 +21,7 @@
#include "hw/sysbus.h"
#include "hw/xen/xen.h"
+
#include "xen_evtchn.h"
#include "xen_overlay.h"
@@ -40,6 +41,41 @@ typedef struct XenEvtchnPort {
uint16_t type_val; /* pirq# / virq# / remote port according to type */
} XenEvtchnPort;
+/* 32-bit compatibility definitions, also used natively in 32-bit build */
+struct compat_arch_vcpu_info {
+ unsigned int cr2;
+ unsigned int pad[5];
+};
+
+struct compat_vcpu_info {
+ uint8_t evtchn_upcall_pending;
+ uint8_t evtchn_upcall_mask;
+ uint16_t pad;
+ uint32_t evtchn_pending_sel;
+ struct compat_arch_vcpu_info arch;
+ struct vcpu_time_info time;
+}; /* 64 bytes (x86) */
+
+struct compat_arch_shared_info {
+ unsigned int max_pfn;
+ unsigned int pfn_to_mfn_frame_list_list;
+ unsigned int nmi_reason;
+ unsigned int p2m_cr3;
+ unsigned int p2m_vaddr;
+ unsigned int p2m_generation;
+ uint32_t wc_sec_hi;
+};
+
+struct compat_shared_info {
+ struct compat_vcpu_info vcpu_info[XEN_LEGACY_MAX_VCPUS];
+ uint32_t evtchn_pending[32];
+ uint32_t evtchn_mask[32];
+ uint32_t wc_version; /* Version counter: see vcpu_time_info_t. */
+ uint32_t wc_sec;
+ uint32_t wc_nsec;
+ struct compat_arch_shared_info arch;
+};
+
#define COMPAT_EVTCHN_2L_NR_CHANNELS 1024
/*
@@ -183,6 +219,22 @@ int xen_evtchn_set_callback_param(uint64_t param)
return ret;
}
+static void inject_callback(XenEvtchnState *s, uint32_t vcpu)
+{
+ int type = s->callback_param >> CALLBACK_VIA_TYPE_SHIFT;
+
+ if (kvm_xen_inject_vcpu_callback_vector(vcpu, type)) {
+ return;
+ }
+
+ /* GSI or PCI_INTX delivery is only for events on vCPU 0 */
+ if (vcpu) {
+ return;
+ }
+
+ /* ... and isn't implemented yet. */
+}
+
static bool valid_port(evtchn_port_t port)
{
if (!port) {
@@ -253,6 +305,152 @@ int xen_evtchn_status_op(struct evtchn_status *status)
return 0;
}
+/*
+ * Never thought I'd hear myself say this, but C++ templates would be
+ * kind of nice here.
+ *
+ * template<class T> static int do_unmask_port(T *shinfo, ...);
+ */
+static int do_unmask_port_lm(XenEvtchnState *s, evtchn_port_t port,
+ bool do_unmask, struct shared_info *shinfo,
+ struct vcpu_info *vcpu_info)
+{
+ const int bits_per_word = BITS_PER_BYTE * sizeof(shinfo->evtchn_pending[0]);
+ typeof(shinfo->evtchn_pending[0]) mask;
+ int idx = port / bits_per_word;
+ int offset = port % bits_per_word;
+
+ mask = 1UL << offset;
+
+ if (idx >= bits_per_word) {
+ return -EINVAL;
+ }
+
+ if (do_unmask) {
+ /*
+ * If this is a true unmask operation, clear the mask bit. If
+ * it was already unmasked, we have nothing further to do.
+ */
+ if (!((qatomic_fetch_and(&shinfo->evtchn_mask[idx], ~mask) & mask))) {
+ return 0;
+ }
+ } else {
+ /*
+ * This is a pseudo-unmask for affinity changes. We don't
+ * change the mask bit, and if it's *masked* we have nothing
+ * else to do.
+ */
+ if (qatomic_fetch_or(&shinfo->evtchn_mask[idx], 0) & mask) {
+ return 0;
+ }
+ }
+
+ /* If the event was not pending, we're done. */
+ if (!(qatomic_fetch_or(&shinfo->evtchn_pending[idx], 0) & mask)) {
+ return 0;
+ }
+
+ /* Now on to the vcpu_info evtchn_pending_sel index... */
+ mask = 1UL << idx;
+
+ /* If a port in this word was already pending for this vCPU, all done. */
+ if (qatomic_fetch_or(&vcpu_info->evtchn_pending_sel, mask) & mask) {
+ return 0;
+ }
+
+ /* Set evtchn_upcall_pending for this vCPU */
+ if (qatomic_fetch_or(&vcpu_info->evtchn_upcall_pending, 1)) {
+ return 0;
+ }
+
+ inject_callback(s, s->port_table[port].vcpu);
+
+ return 0;
+}
+
+static int do_unmask_port_compat(XenEvtchnState *s, evtchn_port_t port,
+ bool do_unmask,
+ struct compat_shared_info *shinfo,
+ struct compat_vcpu_info *vcpu_info)
+{
+ const int bits_per_word = BITS_PER_BYTE * sizeof(shinfo->evtchn_pending[0]);
+ typeof(shinfo->evtchn_pending[0]) mask;
+ int idx = port / bits_per_word;
+ int offset = port % bits_per_word;
+
+ mask = 1UL << offset;
+
+ if (idx >= bits_per_word) {
+ return -EINVAL;
+ }
+
+ if (do_unmask) {
+ /*
+ * If this is a true unmask operation, clear the mask bit. If
+ * it was already unmasked, we have nothing further to do.
+ */
+ if (!((qatomic_fetch_and(&shinfo->evtchn_mask[idx], ~mask) & mask))) {
+ return 0;
+ }
+ } else {
+ /*
+ * This is a pseudo-unmask for affinity changes. We don't
+ * change the mask bit, and if it's *masked* we have nothing
+ * else to do.
+ */
+ if (qatomic_fetch_or(&shinfo->evtchn_mask[idx], 0) & mask) {
+ return 0;
+ }
+ }
+
+ /* If the event was not pending, we're done. */
+ if (!(qatomic_fetch_or(&shinfo->evtchn_pending[idx], 0) & mask)) {
+ return 0;
+ }
+
+ /* Now on to the vcpu_info evtchn_pending_sel index... */
+ mask = 1UL << idx;
+
+ /* If a port in this word was already pending for this vCPU, all done. */
+ if (qatomic_fetch_or(&vcpu_info->evtchn_pending_sel, mask) & mask) {
+ return 0;
+ }
+
+ /* Set evtchn_upcall_pending for this vCPU */
+ if (qatomic_fetch_or(&vcpu_info->evtchn_upcall_pending, 1)) {
+ return 0;
+ }
+
+ inject_callback(s, s->port_table[port].vcpu);
+
+ return 0;
+}
+
+static int unmask_port(XenEvtchnState *s, evtchn_port_t port, bool do_unmask)
+{
+ void *vcpu_info, *shinfo;
+
+ if (s->port_table[port].type == EVTCHNSTAT_closed) {
+ return -EINVAL;
+ }
+
+ shinfo = xen_overlay_get_shinfo_ptr();
+ if (!shinfo) {
+ return -ENOTSUP;
+ }
+
+ vcpu_info = kvm_xen_get_vcpu_info_hva(s->port_table[port].vcpu);
+ if (!vcpu_info) {
+ return -EINVAL;
+ }
+
+ if (xen_is_long_mode()) {
+ return do_unmask_port_lm(s, port, do_unmask, shinfo, vcpu_info);
+ } else {
+ return do_unmask_port_compat(s, port, do_unmask, shinfo, vcpu_info);
+ }
+}
+
static void free_port(XenEvtchnState *s, evtchn_port_t port)
{
s->port_table[port].type = EVTCHNSTAT_closed;
@@ -303,3 +501,25 @@ int xen_evtchn_close_op(struct evtchn_close *close)
return ret;
}
+
+int xen_evtchn_unmask_op(struct evtchn_unmask *unmask)
+{
+ XenEvtchnState *s = xen_evtchn_singleton;
+ int ret;
+
+ if (!s) {
+ return -ENOTSUP;
+ }
+
+ if (!valid_port(unmask->port)) {
+ return -EINVAL;
+ }
+
+ qemu_mutex_lock(&s->port_lock);
+
+ ret = unmask_port(s, unmask->port, true);
+
+ qemu_mutex_unlock(&s->port_lock);
+
+ return ret;
+}
diff --git a/hw/i386/kvm/xen_evtchn.h b/hw/i386/kvm/xen_evtchn.h
index cb3924941a..69c6b0d743 100644
--- a/hw/i386/kvm/xen_evtchn.h
+++ b/hw/i386/kvm/xen_evtchn.h
@@ -17,7 +17,9 @@ int xen_evtchn_set_callback_param(uint64_t param);
struct evtchn_status;
struct evtchn_close;
+struct evtchn_unmask;
int xen_evtchn_status_op(struct evtchn_status *status);
int xen_evtchn_close_op(struct evtchn_close *close);
+int xen_evtchn_unmask_op(struct evtchn_unmask *unmask);
#endif /* QEMU_XEN_EVTCHN_H */
diff --git a/target/i386/kvm/xen-emu.c b/target/i386/kvm/xen-emu.c
index a81dbf5b63..c218696fe4 100644
--- a/target/i386/kvm/xen-emu.c
+++ b/target/i386/kvm/xen-emu.c
@@ -802,6 +802,18 @@ static bool kvm_xen_hcall_evtchn_op(struct kvm_xen_exit *exit, X86CPU *cpu,
err = xen_evtchn_close_op(&close);
break;
}
+ case EVTCHNOP_unmask: {
+ struct evtchn_unmask unmask;
+
+ qemu_build_assert(sizeof(unmask) == 4);
+ if (kvm_copy_from_gva(cs, arg, &unmask, sizeof(unmask))) {
+ err = -EFAULT;
+ break;
+ }
+
+ err = xen_evtchn_unmask_op(&unmask);
+ break;
+ }
default:
return false;
}
--
2.35.3
next prev parent reply other threads:[~2022-12-21 1:20 UTC|newest]
Thread overview: 51+ messages / expand[flat|nested] mbox.gz Atom feed top
2022-12-21 1:05 [RFC PATCH v4 01/47] Xen HVM support under KVM David Woodhouse
2022-12-21 1:05 ` [RFC PATCH v4 01/47] include: import Xen public headers to include/standard-headers/ David Woodhouse
2022-12-21 1:05 ` [RFC PATCH v4 02/47] xen: add CONFIG_XENFV_MACHINE and CONFIG_XEN_EMU options for Xen emulation David Woodhouse
2022-12-21 1:05 ` [RFC PATCH v4 03/47] xen: Add XEN_DISABLED mode and make it default David Woodhouse
2022-12-21 17:54 ` Paul Durrant
2022-12-21 1:05 ` [RFC PATCH v4 04/47] i386/kvm: Add xen-version KVM accelerator property and init KVM Xen support David Woodhouse
2022-12-21 1:05 ` [RFC PATCH v4 05/47] i386/kvm: handle Xen HVM cpuid leaves David Woodhouse
2022-12-21 1:05 ` [RFC PATCH v4 06/47] i386/hvm: Set Xen vCPU ID in KVM David Woodhouse
2022-12-21 18:00 ` Paul Durrant
2022-12-21 1:05 ` [RFC PATCH v4 07/47] xen-platform: exclude vfio-pci from the PCI platform unplug David Woodhouse
2022-12-21 1:05 ` [RFC PATCH v4 08/47] xen-platform: allow its creation with XEN_EMULATE mode David Woodhouse
2022-12-21 1:05 ` [RFC PATCH v4 09/47] hw/xen_backend: refactor xen_be_init() David Woodhouse
2022-12-21 1:05 ` [RFC PATCH v4 10/47] i386/xen: handle guest hypercalls David Woodhouse
2022-12-21 1:05 ` [RFC PATCH v4 11/47] i386/xen: implement HYPERVISOR_xen_version David Woodhouse
2022-12-21 18:03 ` Paul Durrant
2022-12-21 1:05 ` [RFC PATCH v4 12/47] i386/xen: implement HYPERVISOR_sched_op, SCHEDOP_shutdown David Woodhouse
2022-12-21 1:05 ` [RFC PATCH v4 13/47] i386/xen: Implement SCHEDOP_poll and SCHEDOP_yield David Woodhouse
2022-12-21 1:05 ` [RFC PATCH v4 14/47] hw/xen: Add xen_overlay device for emulating shared xenheap pages David Woodhouse
2022-12-21 1:05 ` [RFC PATCH v4 15/47] i386/xen: add pc_machine_kvm_type to initialize XEN_EMULATE mode David Woodhouse
2022-12-21 1:05 ` [RFC PATCH v4 16/47] i386/xen: manage and save/restore Xen guest long_mode setting David Woodhouse
2022-12-21 1:05 ` [RFC PATCH v4 17/47] i386/xen: implement HYPERVISOR_memory_op David Woodhouse
2022-12-21 1:05 ` [RFC PATCH v4 18/47] i386/xen: implement XENMEM_add_to_physmap_batch David Woodhouse
2022-12-21 1:05 ` [RFC PATCH v4 19/47] i386/xen: implement HYPERVISOR_hvm_op David Woodhouse
2022-12-21 1:05 ` [RFC PATCH v4 20/47] i386/xen: implement HYPERVISOR_vcpu_op David Woodhouse
2022-12-21 1:05 ` [RFC PATCH v4 21/47] i386/xen: handle VCPUOP_register_vcpu_info David Woodhouse
2022-12-21 1:05 ` [RFC PATCH v4 22/47] i386/xen: handle VCPUOP_register_vcpu_time_info David Woodhouse
2022-12-21 1:05 ` [RFC PATCH v4 23/47] i386/xen: handle VCPUOP_register_runstate_memory_area David Woodhouse
2022-12-21 1:06 ` [RFC PATCH v4 24/47] i386/xen: implement HYPERVISOR_event_channel_op David Woodhouse
2022-12-21 1:06 ` [RFC PATCH v4 25/47] i386/xen: implement HVMOP_set_evtchn_upcall_vector David Woodhouse
2022-12-21 1:06 ` [RFC PATCH v4 26/47] i386/xen: implement HVMOP_set_param David Woodhouse
2022-12-21 1:06 ` [RFC PATCH v4 27/47] hw/xen: Add xen_evtchn device for event channel emulation David Woodhouse
2022-12-21 1:06 ` [RFC PATCH v4 28/47] i386/xen: Add support for Xen event channel delivery to vCPU David Woodhouse
2022-12-21 1:06 ` [RFC PATCH v4 29/47] hw/xen: Implement EVTCHNOP_status David Woodhouse
2022-12-21 1:06 ` [RFC PATCH v4 30/47] hw/xen: Implement EVTCHNOP_close David Woodhouse
2022-12-21 1:06 ` David Woodhouse [this message]
2022-12-21 1:06 ` [RFC PATCH v4 32/47] hw/xen: Implement EVTCHNOP_bind_virq David Woodhouse
2022-12-21 1:06 ` [RFC PATCH v4 33/47] hw/xen: Implement EVTCHNOP_bind_ipi David Woodhouse
2022-12-21 1:06 ` [RFC PATCH v4 34/47] hw/xen: Implement EVTCHNOP_send David Woodhouse
2022-12-21 1:06 ` [RFC PATCH v4 35/47] hw/xen: Implement EVTCHNOP_alloc_unbound David Woodhouse
2022-12-21 1:06 ` [RFC PATCH v4 36/47] hw/xen: Implement EVTCHNOP_bind_interdomain David Woodhouse
2022-12-21 1:06 ` [RFC PATCH v4 37/47] hw/xen: Implement EVTCHNOP_bind_vcpu David Woodhouse
2022-12-21 1:06 ` [RFC PATCH v4 38/47] hw/xen: Implement EVTCHNOP_reset David Woodhouse
2022-12-21 1:06 ` [RFC PATCH v4 39/47] i386/xen: add monitor commands to test event injection David Woodhouse
2022-12-21 1:06 ` [RFC PATCH v4 40/47] hw/xen: Support HVM_PARAM_CALLBACK_TYPE_GSI callback David Woodhouse
2022-12-21 1:06 ` [RFC PATCH v4 41/47] hw/xen: Support HVM_PARAM_CALLBACK_TYPE_PCI_INTX callback David Woodhouse
2022-12-21 1:06 ` [RFC PATCH v4 42/47] kvm/i386: Add xen-gnttab-max-frames property David Woodhouse
2022-12-21 1:06 ` [RFC PATCH v4 43/47] hw/xen: Add xen_gnttab device for grant table emulation David Woodhouse
2022-12-21 1:06 ` [RFC PATCH v4 44/47] hw/xen: Support mapping grant frames David Woodhouse
2022-12-21 1:06 ` [RFC PATCH v4 45/47] i386/xen: Implement HYPERVISOR_grant_table_op and GNTTABOP_[gs]et_verson David Woodhouse
2022-12-21 1:06 ` [RFC PATCH v4 46/47] hw/xen: Implement GNTTABOP_query_size David Woodhouse
2022-12-21 1:06 ` [RFC PATCH v4 47/47] i386/xen: handle PV timer hypercalls David Woodhouse
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20221221010623.1000191-32-dwmw2@infradead.org \
--to=dwmw2@infradead.org \
--cc=alex.bennee@linaro.org \
--cc=ankur.a.arora@oracle.com \
--cc=cfontana@suse.de \
--cc=dgilbert@redhat.com \
--cc=joao.m.martins@oracle.com \
--cc=paul@xen.org \
--cc=pbonzini@redhat.com \
--cc=philmd@linaro.org \
--cc=qemu-devel@nongnu.org \
--cc=quintela@redhat.com \
--cc=thuth@redhat.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).