qemu-devel.nongnu.org archive mirror
 help / color / mirror / Atom feed
From: David Woodhouse <dwmw2@infradead.org>
To: qemu-devel@nongnu.org
Cc: "Paolo Bonzini" <pbonzini@redhat.com>,
	"Paul Durrant" <paul@xen.org>,
	"Joao Martins" <joao.m.martins@oracle.com>,
	"Ankur Arora" <ankur.a.arora@oracle.com>,
	"Philippe Mathieu-Daudé" <philmd@linaro.org>,
	"Thomas Huth" <thuth@redhat.com>,
	"Alex Bennée" <alex.bennee@linaro.org>,
	"Juan Quintela" <quintela@redhat.com>,
	"Dr . David Alan Gilbert" <dgilbert@redhat.com>,
	"Claudio Fontana" <cfontana@suse.de>,
	"Julien Grall" <julien@xen.org>,
	"Michael S. Tsirkin" <mst@redhat.com>,
	"Marcel Apfelbaum" <marcel.apfelbaum@gmail.com>,
	armbru@redhat.com
Subject: [PATCH v7 30/51] hw/xen: Implement EVTCHNOP_unmask
Date: Mon, 16 Jan 2023 21:57:44 +0000	[thread overview]
Message-ID: <20230116215805.1123514-31-dwmw2@infradead.org> (raw)
In-Reply-To: <20230116215805.1123514-1-dwmw2@infradead.org>

From: David Woodhouse <dwmw@amazon.co.uk>

This finally comes with a mechanism for actually injecting events into
the guest vCPU, with all the atomic-test-and-set that's involved in
setting the bit in the shinfo, then the index in the vcpu_info, and
injecting either the lapic vector as MSI, or letting KVM inject the
bare vector.

Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
---
 hw/i386/kvm/xen_evtchn.c  | 175 ++++++++++++++++++++++++++++++++++++++
 hw/i386/kvm/xen_evtchn.h  |   2 +
 target/i386/kvm/xen-emu.c |  12 +++
 3 files changed, 189 insertions(+)

diff --git a/hw/i386/kvm/xen_evtchn.c b/hw/i386/kvm/xen_evtchn.c
index a74d95d024..0e5b33e417 100644
--- a/hw/i386/kvm/xen_evtchn.c
+++ b/hw/i386/kvm/xen_evtchn.c
@@ -217,6 +217,13 @@ int xen_evtchn_set_callback_param(uint64_t param)
     return ret;
 }
 
+static void inject_callback(XenEvtchnState *s, uint32_t vcpu)
+{
+    int type = s->callback_param >> CALLBACK_VIA_TYPE_SHIFT;
+
+    kvm_xen_inject_vcpu_callback_vector(vcpu, type);
+}
+
 static bool valid_port(evtchn_port_t port)
 {
     if (!port) {
@@ -287,6 +294,152 @@ int xen_evtchn_status_op(struct evtchn_status *status)
     return 0;
 }
 
+/*
+ * Never thought I'd hear myself say this, but C++ templates would be
+ * kind of nice here.
+ *
+ * template<class T> static int do_unmask_port(T *shinfo, ...);
+ */
+static int do_unmask_port_lm(XenEvtchnState *s, evtchn_port_t port,
+                             bool do_unmask, struct shared_info *shinfo,
+                             struct vcpu_info *vcpu_info)
+{
+    const int bits_per_word = BITS_PER_BYTE * sizeof(shinfo->evtchn_pending[0]);
+    typeof(shinfo->evtchn_pending[0]) mask;
+    int idx = port / bits_per_word;
+    int offset = port % bits_per_word;
+
+    mask = 1UL << offset;
+
+    if (idx >= bits_per_word) {
+        return -EINVAL;
+    }
+
+    if (do_unmask) {
+        /*
+         * If this is a true unmask operation, clear the mask bit. If
+         * it was already unmasked, we have nothing further to do.
+         */
+        if (!((qatomic_fetch_and(&shinfo->evtchn_mask[idx], ~mask) & mask))) {
+            return 0;
+        }
+    } else {
+        /*
+         * This is a pseudo-unmask for affinity changes. We don't
+         * change the mask bit, and if it's *masked* we have nothing
+         * else to do.
+         */
+        if (qatomic_fetch_or(&shinfo->evtchn_mask[idx], 0) & mask) {
+            return 0;
+        }
+    }
+
+    /* If the event was not pending, we're done. */
+    if (!(qatomic_fetch_or(&shinfo->evtchn_pending[idx], 0) & mask)) {
+        return 0;
+    }
+
+    /* Now on to the vcpu_info evtchn_pending_sel index... */
+    mask = 1UL << idx;
+
+    /* If a port in this word was already pending for this vCPU, all done. */
+    if (qatomic_fetch_or(&vcpu_info->evtchn_pending_sel, mask) & mask) {
+        return 0;
+    }
+
+    /* Set evtchn_upcall_pending for this vCPU */
+    if (qatomic_fetch_or(&vcpu_info->evtchn_upcall_pending, 1)) {
+        return 0;
+    }
+
+    inject_callback(s, s->port_table[port].vcpu);
+
+    return 0;
+}
+
+static int do_unmask_port_compat(XenEvtchnState *s, evtchn_port_t port,
+                                 bool do_unmask,
+                                 struct compat_shared_info *shinfo,
+                                 struct compat_vcpu_info *vcpu_info)
+{
+    const int bits_per_word = BITS_PER_BYTE * sizeof(shinfo->evtchn_pending[0]);
+    typeof(shinfo->evtchn_pending[0]) mask;
+    int idx = port / bits_per_word;
+    int offset = port % bits_per_word;
+
+    mask = 1UL << offset;
+
+    if (idx >= bits_per_word) {
+        return -EINVAL;
+    }
+
+    if (do_unmask) {
+        /*
+         * If this is a true unmask operation, clear the mask bit. If
+         * it was already unmasked, we have nothing further to do.
+         */
+        if (!((qatomic_fetch_and(&shinfo->evtchn_mask[idx], ~mask) & mask))) {
+            return 0;
+        }
+    } else {
+        /*
+         * This is a pseudo-unmask for affinity changes. We don't
+         * change the mask bit, and if it's *masked* we have nothing
+         * else to do.
+         */
+        if (qatomic_fetch_or(&shinfo->evtchn_mask[idx], 0) & mask) {
+            return 0;
+        }
+    }
+
+    /* If the event was not pending, we're done. */
+    if (!(qatomic_fetch_or(&shinfo->evtchn_pending[idx], 0) & mask)) {
+        return 0;
+    }
+
+    /* Now on to the vcpu_info evtchn_pending_sel index... */
+    mask = 1UL << idx;
+
+    /* If a port in this word was already pending for this vCPU, all done. */
+    if (qatomic_fetch_or(&vcpu_info->evtchn_pending_sel, mask) & mask) {
+        return 0;
+    }
+
+    /* Set evtchn_upcall_pending for this vCPU */
+    if (qatomic_fetch_or(&vcpu_info->evtchn_upcall_pending, 1)) {
+        return 0;
+    }
+
+    inject_callback(s, s->port_table[port].vcpu);
+
+    return 0;
+}
+
+static int unmask_port(XenEvtchnState *s, evtchn_port_t port, bool do_unmask)
+{
+    void *vcpu_info, *shinfo;
+
+    if (s->port_table[port].type == EVTCHNSTAT_closed) {
+        return -EINVAL;
+    }
+
+    shinfo = xen_overlay_get_shinfo_ptr();
+    if (!shinfo) {
+        return -ENOTSUP;
+    }
+
+    vcpu_info = kvm_xen_get_vcpu_info_hva(s->port_table[port].vcpu);
+    if (!vcpu_info) {
+        return -EINVAL;
+    }
+
+    if (xen_is_long_mode()) {
+        return do_unmask_port_lm(s, port, do_unmask, shinfo, vcpu_info);
+    } else {
+        return do_unmask_port_compat(s, port, do_unmask, shinfo, vcpu_info);
+    }
+}
+
 static int clear_port_pending(XenEvtchnState *s, evtchn_port_t port)
 {
     void *p = xen_overlay_get_shinfo_ptr();
@@ -371,3 +524,25 @@ int xen_evtchn_close_op(struct evtchn_close *close)
 
     return ret;
 }
+
+int xen_evtchn_unmask_op(struct evtchn_unmask *unmask)
+{
+    XenEvtchnState *s = xen_evtchn_singleton;
+    int ret;
+
+    if (!s) {
+        return -ENOTSUP;
+    }
+
+    if (!valid_port(unmask->port)) {
+        return -EINVAL;
+    }
+
+    qemu_mutex_lock(&s->port_lock);
+
+    ret = unmask_port(s, unmask->port, true);
+
+    qemu_mutex_unlock(&s->port_lock);
+
+    return ret;
+}
diff --git a/hw/i386/kvm/xen_evtchn.h b/hw/i386/kvm/xen_evtchn.h
index cb3924941a..69c6b0d743 100644
--- a/hw/i386/kvm/xen_evtchn.h
+++ b/hw/i386/kvm/xen_evtchn.h
@@ -17,7 +17,9 @@ int xen_evtchn_set_callback_param(uint64_t param);
 
 struct evtchn_status;
 struct evtchn_close;
+struct evtchn_unmask;
 int xen_evtchn_status_op(struct evtchn_status *status);
 int xen_evtchn_close_op(struct evtchn_close *close);
+int xen_evtchn_unmask_op(struct evtchn_unmask *unmask);
 
 #endif /* QEMU_XEN_EVTCHN_H */
diff --git a/target/i386/kvm/xen-emu.c b/target/i386/kvm/xen-emu.c
index 6ff527b33f..c04bcda5ac 100644
--- a/target/i386/kvm/xen-emu.c
+++ b/target/i386/kvm/xen-emu.c
@@ -806,6 +806,18 @@ static bool kvm_xen_hcall_evtchn_op(struct kvm_xen_exit *exit, X86CPU *cpu,
         err = xen_evtchn_close_op(&close);
         break;
     }
+    case EVTCHNOP_unmask: {
+        struct evtchn_unmask unmask;
+
+        qemu_build_assert(sizeof(unmask) == 4);
+        if (kvm_copy_from_gva(cs, arg, &unmask, sizeof(unmask))) {
+            err = -EFAULT;
+            break;
+        }
+
+        err = xen_evtchn_unmask_op(&unmask);
+        break;
+    }
     default:
         return false;
     }
-- 
2.39.0



  parent reply	other threads:[~2023-01-16 22:02 UTC|newest]

Thread overview: 97+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2023-01-16 21:57 [PATCH v7 00/51] Xen support under KVM David Woodhouse
2023-01-16 21:57 ` [PATCH v7 01/51] include: import Xen public headers to include/standard-headers/ David Woodhouse
2023-01-19 13:02   ` Alex Bennée
2023-01-19 15:06     ` David Woodhouse
2023-01-16 21:57 ` [PATCH v7 02/51] xen: add CONFIG_XENFV_MACHINE and CONFIG_XEN_EMU options for Xen emulation David Woodhouse
2023-01-16 21:57 ` [PATCH v7 03/51] xen: Add XEN_DISABLED mode and make it default David Woodhouse
2023-01-16 21:57 ` [PATCH v7 04/51] i386/kvm: Add xen-version KVM accelerator property and init KVM Xen support David Woodhouse
2023-01-16 21:57 ` [PATCH v7 05/51] i386/kvm: handle Xen HVM cpuid leaves David Woodhouse
2023-01-16 21:57 ` [PATCH v7 06/51] i386/hvm: Set Xen vCPU ID in KVM David Woodhouse
2023-01-16 21:57 ` [PATCH v7 07/51] xen-platform: exclude vfio-pci from the PCI platform unplug David Woodhouse
2023-01-16 21:57 ` [PATCH v7 08/51] xen-platform: allow its creation with XEN_EMULATE mode David Woodhouse
2023-01-17  9:44   ` Paul Durrant
2023-01-16 21:57 ` [PATCH v7 09/51] i386/xen: handle guest hypercalls David Woodhouse
2023-01-16 21:57 ` [PATCH v7 10/51] i386/xen: implement HYPERVISOR_xen_version David Woodhouse
2023-01-16 21:57 ` [PATCH v7 11/51] i386/xen: implement HYPERVISOR_sched_op, SCHEDOP_shutdown David Woodhouse
2023-01-16 21:57 ` [PATCH v7 12/51] i386/xen: Implement SCHEDOP_poll and SCHEDOP_yield David Woodhouse
2023-01-16 21:57 ` [PATCH v7 13/51] hw/xen: Add xen_overlay device for emulating shared xenheap pages David Woodhouse
2023-01-16 21:57 ` [PATCH v7 14/51] i386/xen: add pc_machine_kvm_type to initialize XEN_EMULATE mode David Woodhouse
2023-01-17  9:47   ` Paul Durrant
2023-01-16 21:57 ` [PATCH v7 15/51] i386/xen: manage and save/restore Xen guest long_mode setting David Woodhouse
2023-01-16 21:57 ` [PATCH v7 16/51] i386/xen: implement HYPERVISOR_memory_op David Woodhouse
2023-01-16 21:57 ` [PATCH v7 17/51] i386/xen: implement XENMEM_add_to_physmap_batch David Woodhouse
2023-01-16 21:57 ` [PATCH v7 18/51] i386/xen: implement HYPERVISOR_hvm_op David Woodhouse
2023-01-16 21:57 ` [PATCH v7 19/51] i386/xen: implement HYPERVISOR_vcpu_op David Woodhouse
2023-01-16 21:57 ` [PATCH v7 20/51] i386/xen: handle VCPUOP_register_vcpu_info David Woodhouse
2023-01-16 21:57 ` [PATCH v7 21/51] i386/xen: handle VCPUOP_register_vcpu_time_info David Woodhouse
2023-01-16 21:57 ` [PATCH v7 22/51] i386/xen: handle VCPUOP_register_runstate_memory_area David Woodhouse
2023-01-16 21:57 ` [PATCH v7 23/51] i386/xen: implement HYPERVISOR_event_channel_op David Woodhouse
2023-01-17  9:53   ` Paul Durrant
2023-01-17  9:59     ` David Woodhouse
2023-01-16 21:57 ` [PATCH v7 24/51] i386/xen: implement HVMOP_set_evtchn_upcall_vector David Woodhouse
2023-01-16 21:57 ` [PATCH v7 25/51] i386/xen: implement HVMOP_set_param David Woodhouse
2023-01-16 21:57 ` [PATCH v7 26/51] hw/xen: Add xen_evtchn device for event channel emulation David Woodhouse
2023-01-17 10:00   ` Paul Durrant
2023-01-17 10:23     ` David Woodhouse
2023-01-17 10:56       ` Paul Durrant
2023-01-17 11:02         ` David Woodhouse
2023-01-17 11:06           ` Paul Durrant
2023-01-17 11:24             ` David Woodhouse
2023-01-17 11:53               ` Paul Durrant
2023-01-17 12:12                 ` David Woodhouse
2023-01-17 13:01                 ` [PATCH v7.1 " David Woodhouse
2023-01-16 21:57 ` [PATCH v7 27/51] i386/xen: Add support for Xen event channel delivery to vCPU David Woodhouse
2023-01-17 11:11   ` Paul Durrant
2023-01-17 12:31     ` David Woodhouse
2023-01-17 13:11       ` Paul Durrant
2023-01-17 12:01   ` Paul Durrant
2023-01-16 21:57 ` [PATCH v7 28/51] hw/xen: Implement EVTCHNOP_status David Woodhouse
2023-01-16 21:57 ` [PATCH v7 29/51] hw/xen: Implement EVTCHNOP_close David Woodhouse
2023-01-16 21:57 ` David Woodhouse [this message]
2023-01-16 21:57 ` [PATCH v7 31/51] hw/xen: Implement EVTCHNOP_bind_virq David Woodhouse
2023-01-16 21:57 ` [PATCH v7 32/51] hw/xen: Implement EVTCHNOP_bind_ipi David Woodhouse
2023-01-16 21:57 ` [PATCH v7 33/51] hw/xen: Implement EVTCHNOP_send David Woodhouse
2023-01-16 21:57 ` [PATCH v7 34/51] hw/xen: Implement EVTCHNOP_alloc_unbound David Woodhouse
2023-01-16 21:57 ` [PATCH v7 35/51] hw/xen: Implement EVTCHNOP_bind_interdomain David Woodhouse
2023-01-16 21:57 ` [PATCH v7 36/51] hw/xen: Implement EVTCHNOP_bind_vcpu David Woodhouse
2023-01-16 21:57 ` [PATCH v7 37/51] hw/xen: Implement EVTCHNOP_reset David Woodhouse
2023-01-16 21:57 ` [PATCH v7 38/51] i386/xen: add monitor commands to test event injection David Woodhouse
2023-01-17 10:08   ` Markus Armbruster
2023-01-17 10:41     ` David Woodhouse
2023-01-17 11:31     ` David Woodhouse
2023-01-19 11:01     ` David Woodhouse
2023-01-16 21:57 ` [PATCH v7 39/51] hw/xen: Support HVM_PARAM_CALLBACK_TYPE_GSI callback David Woodhouse
2023-01-16 21:57 ` [PATCH v7 40/51] hw/xen: Support HVM_PARAM_CALLBACK_TYPE_PCI_INTX callback David Woodhouse
2023-01-16 21:57 ` [PATCH v7 41/51] kvm/i386: Add xen-gnttab-max-frames property David Woodhouse
2023-01-16 21:57 ` [PATCH v7 42/51] hw/xen: Add xen_gnttab device for grant table emulation David Woodhouse
2023-01-16 21:57 ` [PATCH v7 43/51] hw/xen: Support mapping grant frames David Woodhouse
2023-01-16 21:57 ` [PATCH v7 44/51] i386/xen: Implement HYPERVISOR_grant_table_op and GNTTABOP_[gs]et_verson David Woodhouse
2023-01-16 21:57 ` [PATCH v7 45/51] hw/xen: Implement GNTTABOP_query_size David Woodhouse
2023-01-16 21:58 ` [PATCH v7 46/51] i386/xen: handle PV timer hypercalls David Woodhouse
2023-01-16 21:58 ` [PATCH v7 47/51] i386/xen: Reserve Xen special pages for console, xenstore rings David Woodhouse
2023-01-16 21:58 ` [PATCH v7 48/51] i386/xen: handle HVMOP_get_param David Woodhouse
2023-01-16 21:58 ` [PATCH v7 49/51] hw/xen: Add backend implementation of interdomain event channel support David Woodhouse
2023-01-16 21:58 ` [PATCH v7 50/51] hw/xen: Add xen_xenstore device for xenstore emulation David Woodhouse
2023-01-16 21:58 ` [PATCH v7 51/51] hw/xen: Add basic ring handling to xenstore David Woodhouse
2023-01-16 22:19 ` [RFC PATCH v7bis 00/19] Emulated Xen PV backend and PIRQ support David Woodhouse
2023-01-16 22:19   ` [RFC PATCH v7bis 01/19] hw/xen: Add evtchn operations to allow redirection to internal emulation David Woodhouse
2023-01-16 22:19   ` [RFC PATCH v7bis 02/19] hw/xen: Add emulated evtchn ops David Woodhouse
2023-01-16 22:19   ` [RFC PATCH v7bis 03/19] hw/xen: Add gnttab operations to allow redirection to internal emulation David Woodhouse
2023-01-16 22:19   ` [RFC PATCH v7bis 04/19] hw/xen: Pass grant ref to gnttab unmap David Woodhouse
2023-01-16 22:19   ` [RFC PATCH v7bis 05/19] hw/xen: Add foreignmem operations to allow redirection to internal emulation David Woodhouse
2023-01-16 22:19   ` [RFC PATCH v7bis 06/19] hw/xen: Add xenstore " David Woodhouse
2023-01-16 22:19   ` [RFC PATCH v7bis 07/19] hw/xen: Move xenstore_store_pv_console_info to xen_console.c David Woodhouse
2023-01-16 22:19   ` [RFC PATCH v7bis 08/19] hw/xen: Use XEN_PAGE_SIZE in PV backend drivers David Woodhouse
2023-01-16 22:19   ` [RFC PATCH v7bis 09/19] hw/xen: Rename xen_common.h to xen_native.h David Woodhouse
2023-01-16 22:19   ` [RFC PATCH v7bis 10/19] hw/xen: Build PV backend drivers for XENFV_MACHINE David Woodhouse
2023-01-16 22:19   ` [RFC PATCH v7bis 11/19] hw/xen: Map guest XENSTORE_PFN grant in emulated Xenstore David Woodhouse
2023-01-16 22:19   ` [RFC PATCH v7bis 12/19] hw/xen: Add backend implementation of grant table operations David Woodhouse
2023-01-16 22:19   ` [RFC PATCH v7bis 13/19] hw/xen: Implement soft reset for emulated gnttab David Woodhouse
2023-01-16 22:19   ` [RFC PATCH v7bis 14/19] hw/xen: Remove old version of Xen headers David Woodhouse
2023-01-16 22:19   ` [RFC PATCH v7bis 15/19] i386/xen: Initialize XenBus and legacy backends from pc_init1() David Woodhouse
2023-01-16 22:19   ` [RFC PATCH v7bis 16/19] i386/xen: Implement HYPERVISOR_physdev_op David Woodhouse
2023-01-16 22:19   ` [RFC PATCH v7bis 17/19] hw/xen: Implement emulated PIRQ hypercall support David Woodhouse
2023-01-16 22:19   ` [RFC PATCH v7bis 18/19] hw/xen: Support GSI mapping to PIRQ David Woodhouse
2023-01-16 22:19   ` [RFC PATCH v7bis 19/19] hw/xen: Support MSI " David Woodhouse
2023-01-17 16:01 ` [PATCH v7 52/51] hw/xen: Automatically add xen-platform PCI device for emulated Xen guests David Woodhouse
2023-01-17 16:02 ` [PATCH v7 53/51] i386/xen: Document Xen HVM emulation David Woodhouse

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20230116215805.1123514-31-dwmw2@infradead.org \
    --to=dwmw2@infradead.org \
    --cc=alex.bennee@linaro.org \
    --cc=ankur.a.arora@oracle.com \
    --cc=armbru@redhat.com \
    --cc=cfontana@suse.de \
    --cc=dgilbert@redhat.com \
    --cc=joao.m.martins@oracle.com \
    --cc=julien@xen.org \
    --cc=marcel.apfelbaum@gmail.com \
    --cc=mst@redhat.com \
    --cc=paul@xen.org \
    --cc=pbonzini@redhat.com \
    --cc=philmd@linaro.org \
    --cc=qemu-devel@nongnu.org \
    --cc=quintela@redhat.com \
    --cc=thuth@redhat.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).