qemu-devel.nongnu.org archive mirror
 help / color / mirror / Atom feed
From: David Woodhouse <dwmw2@infradead.org>
To: qemu-devel@nongnu.org
Cc: "Paolo Bonzini" <pbonzini@redhat.com>,
	"Paul Durrant" <paul@xen.org>,
	"Joao Martins" <joao.m.martins@oracle.com>,
	"Ankur Arora" <ankur.a.arora@oracle.com>,
	"Philippe Mathieu-Daudé" <philmd@linaro.org>,
	"Thomas Huth" <thuth@redhat.com>,
	"Alex Bennée" <alex.bennee@linaro.org>,
	"Juan Quintela" <quintela@redhat.com>,
	"Dr . David Alan Gilbert" <dgilbert@redhat.com>,
	"Claudio Fontana" <cfontana@suse.de>
Subject: [RFC PATCH v2 19/22] i386/xen: implement HVMOP_set_evtchn_upcall_vector
Date: Fri,  9 Dec 2022 09:56:09 +0000	[thread overview]
Message-ID: <20221209095612.689243-20-dwmw2@infradead.org> (raw)
In-Reply-To: <20221209095612.689243-1-dwmw2@infradead.org>

From: Ankur Arora <ankur.a.arora@oracle.com>

The HVMOP_set_evtchn_upcall_vector hypercall sets the per-vCPU upcall
vector, to be delivered to the local APIC just like an MSI (with an EOI).

This takes precedence over the system-wide delivery method set by the
HVMOP_set_param hypercall with HVM_PARAM_CALLBACK_IRQ. It's used by
Windows and Xen (PV shim) guests but normally not by Linux.

Signed-off-by: Ankur Arora <ankur.a.arora@oracle.com>
Signed-off-by: Joao Martins <joao.m.martins@oracle.com>
[dwmw2: Rework for upstream kernel changes and split from HVMOP_set_param]
Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
---
 target/i386/cpu.h        |  1 +
 target/i386/kvm/kvm.c    |  7 ++++
 target/i386/machine.c    |  4 ++-
 target/i386/trace-events |  1 +
 target/i386/xen.c        | 69 +++++++++++++++++++++++++++++++++++++---
 target/i386/xen.h        |  1 +
 6 files changed, 77 insertions(+), 6 deletions(-)

diff --git a/target/i386/cpu.h b/target/i386/cpu.h
index bf44a87ddb..938a1b9c8b 100644
--- a/target/i386/cpu.h
+++ b/target/i386/cpu.h
@@ -1792,6 +1792,7 @@ typedef struct CPUArchState {
     uint64_t xen_vcpu_info_default_gpa;
     uint64_t xen_vcpu_time_info_gpa;
     uint64_t xen_vcpu_runstate_gpa;
+    uint8_t xen_vcpu_callback_vector;
 #endif
 #if defined(CONFIG_HVF)
     HVFX86LazyFlags hvf_lflags;
diff --git a/target/i386/kvm/kvm.c b/target/i386/kvm/kvm.c
index a5e67a3119..dc1b3fc502 100644
--- a/target/i386/kvm/kvm.c
+++ b/target/i386/kvm/kvm.c
@@ -4762,6 +4762,13 @@ int kvm_arch_put_registers(CPUState *cpu, int level)
                 return ret;
             }
         }
+
+        if (x86_cpu->env.xen_vcpu_callback_vector) {
+            ret = kvm_xen_set_vcpu_callback_vector(cpu);
+            if (ret < 0) {
+                return ret;
+            }
+        }
     }
 #endif
 
diff --git a/target/i386/machine.c b/target/i386/machine.c
index 6a510e5cbd..09e13cf716 100644
--- a/target/i386/machine.c
+++ b/target/i386/machine.c
@@ -1265,7 +1265,8 @@ static bool xen_vcpu_needed(void *opaque)
     return (env->xen_vcpu_info_gpa != UINT64_MAX ||
             env->xen_vcpu_info_default_gpa != UINT64_MAX ||
             env->xen_vcpu_time_info_gpa != UINT64_MAX ||
-            env->xen_vcpu_runstate_gpa != UINT64_MAX);
+            env->xen_vcpu_runstate_gpa != UINT64_MAX ||
+            env->xen_vcpu_callback_vector != 0);
 }
 
 static const VMStateDescription vmstate_xen_vcpu = {
@@ -1278,6 +1279,7 @@ static const VMStateDescription vmstate_xen_vcpu = {
         VMSTATE_UINT64(env.xen_vcpu_info_default_gpa, X86CPU),
         VMSTATE_UINT64(env.xen_vcpu_time_info_gpa, X86CPU),
         VMSTATE_UINT64(env.xen_vcpu_runstate_gpa, X86CPU),
+        VMSTATE_UINT8(env.xen_vcpu_callback_vector, X86CPU),
         VMSTATE_END_OF_LIST()
     }
 };
diff --git a/target/i386/trace-events b/target/i386/trace-events
index 7118640697..58e28f1a19 100644
--- a/target/i386/trace-events
+++ b/target/i386/trace-events
@@ -16,3 +16,4 @@ kvm_sev_attestation_report(const char *mnonce, const char *data) "mnonce %s data
 kvm_xen_hypercall(int cpu, uint8_t cpl, uint64_t input, uint64_t a0, uint64_t a1, uint64_t a2, uint64_t ret) "xen_hypercall: cpu %d cpl %d input %" PRIu64 " a0 0x%" PRIx64 " a1 0x%" PRIx64 " a2 0x%" PRIx64" ret 0x%" PRIx64
 kvm_xen_set_shared_info(uint64_t gfn) "shared info at gfn 0x%" PRIx64
 kvm_xen_set_vcpu_attr(int cpu, int type, uint64_t gpa) "vcpu attr cpu %d type %d gpa 0x%" PRIx64
+kvm_xen_set_vcpu_callback(int cpu, int vector) "callback vcpu %d vector %d"
diff --git a/target/i386/xen.c b/target/i386/xen.c
index 97032049e6..2583c00a6b 100644
--- a/target/i386/xen.c
+++ b/target/i386/xen.c
@@ -19,6 +19,7 @@
 #include "standard-headers/xen/version.h"
 #include "standard-headers/xen/memory.h"
 #include "standard-headers/xen/hvm/hvm_op.h"
+#include "standard-headers/xen/hvm/params.h"
 #include "standard-headers/xen/vcpu.h"
 
 static bool kvm_gva_to_gpa(CPUState *cs, uint64_t gva, uint64_t *gpa,
@@ -127,7 +128,8 @@ static bool kvm_xen_hcall_xen_version(struct kvm_xen_exit *exit, X86CPU *cpu,
             fi.submap |= 1 << XENFEAT_writable_page_tables |
                          1 << XENFEAT_writable_descriptor_tables |
                          1 << XENFEAT_auto_translated_physmap |
-                         1 << XENFEAT_supervisor_mode_kernel;
+                         1 << XENFEAT_supervisor_mode_kernel |
+                         1 << XENFEAT_hvm_callback_vector;
         }
 
         err = kvm_copy_to_gva(CPU(cpu), arg, &fi, sizeof(fi));
@@ -154,6 +156,29 @@ int kvm_xen_set_vcpu_attr(CPUState *cs, uint16_t type, uint64_t gpa)
     return kvm_vcpu_ioctl(cs, KVM_XEN_VCPU_SET_ATTR, &xhsi);
 }
 
+int kvm_xen_set_vcpu_callback_vector(CPUState *cs)
+{
+    uint8_t vector = X86_CPU(cs)->env.xen_vcpu_callback_vector;
+    struct kvm_xen_vcpu_attr xva;
+
+    xva.type = KVM_XEN_VCPU_ATTR_TYPE_UPCALL_VECTOR;
+    xva.u.vector = vector;
+
+    trace_kvm_xen_set_vcpu_callback(cs->cpu_index, vector);
+
+    return kvm_vcpu_ioctl(cs, KVM_XEN_HVM_SET_ATTR, &xva);
+}
+
+static void do_set_vcpu_callback_vector(CPUState *cs, run_on_cpu_data data)
+{
+    X86CPU *cpu = X86_CPU(cs);
+    CPUX86State *env = &cpu->env;
+
+    env->xen_vcpu_callback_vector = data.host_int;
+
+    kvm_xen_set_vcpu_callback_vector(cs);
+}
+
 static void do_set_vcpu_info_default_gpa(CPUState *cs, run_on_cpu_data data)
 {
     X86CPU *cpu = X86_CPU(cs);
@@ -262,17 +287,51 @@ static bool kvm_xen_hcall_memory_op(struct kvm_xen_exit *exit,
     return true;
 }
 
-static bool kvm_xen_hcall_hvm_op(struct kvm_xen_exit *exit,
+static int kvm_xen_hcall_evtchn_upcall_vector(struct kvm_xen_exit *exit,
+                                              X86CPU *cpu, uint64_t arg)
+{
+    struct xen_hvm_evtchn_upcall_vector *up;
+    CPUState *target_cs;
+    int vector;
+
+    up = gva_to_hva(CPU(cpu), arg);
+    if (!up) {
+        return -EFAULT;
+    }
+
+    vector = up->vector;
+    if (vector < 0x10) {
+        return -EINVAL;
+    }
+
+    target_cs = qemu_get_cpu(up->vcpu);
+    if (!target_cs) {
+        return -EINVAL;
+    }
+
+    async_run_on_cpu(target_cs, do_set_vcpu_callback_vector, RUN_ON_CPU_HOST_INT(vector));
+    return 0;
+}
+
+static bool kvm_xen_hcall_hvm_op(struct kvm_xen_exit *exit, X86CPU *cpu,
                                  int cmd, uint64_t arg)
 {
+    int ret = -ENOSYS;
     switch (cmd) {
+    case HVMOP_set_evtchn_upcall_vector:
+            ret = kvm_xen_hcall_evtchn_upcall_vector(exit, cpu,
+                                                     exit->u.hcall.params[0]);
+            break;
     case HVMOP_pagetable_dying:
-            exit->u.hcall.result = -ENOSYS;
-            return true;
+            ret = -ENOSYS;
+            break;
 
     default:
             return false;
     }
+
+    exit->u.hcall.result = ret;
+    return true;
 }
 
 static int vcpuop_register_vcpu_info(CPUState *cs, CPUState *target,
@@ -377,7 +436,7 @@ static bool __kvm_xen_handle_exit(X86CPU *cpu, struct kvm_xen_exit *exit)
                                      exit->u.hcall.params[1],
                                      exit->u.hcall.params[2]);
     case __HYPERVISOR_hvm_op:
-        return kvm_xen_hcall_hvm_op(exit, exit->u.hcall.params[0],
+        return kvm_xen_hcall_hvm_op(exit, cpu, exit->u.hcall.params[0],
                                     exit->u.hcall.params[1]);
     case __HYPERVISOR_memory_op:
         return kvm_xen_hcall_memory_op(exit, exit->u.hcall.params[0],
diff --git a/target/i386/xen.h b/target/i386/xen.h
index 53573e07f8..07b133ae29 100644
--- a/target/i386/xen.h
+++ b/target/i386/xen.h
@@ -25,5 +25,6 @@
 int kvm_xen_init(KVMState *s, uint32_t xen_version);
 int kvm_xen_handle_exit(X86CPU *cpu, struct kvm_xen_exit *exit);
 int kvm_xen_set_vcpu_attr(CPUState *cs, uint16_t type, uint64_t gpa);
+int kvm_xen_set_vcpu_callback_vector(CPUState *cs);
 
 #endif /* QEMU_I386_XEN_H */
-- 
2.35.3



  parent reply	other threads:[~2022-12-09 10:08 UTC|newest]

Thread overview: 78+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2022-12-09  9:55 [RFC PATCH v2 00/22] Xen HVM support under KVM David Woodhouse
2022-12-09  9:55 ` [RFC PATCH v2 01/22] include: import xen public headers David Woodhouse
2022-12-12  9:17   ` Paul Durrant
2022-12-09  9:55 ` [RFC PATCH v2 02/22] xen: add CONFIG_XENFV_MACHINE and CONFIG_XEN_EMU options for Xen emulation David Woodhouse
2022-12-12  9:19   ` Paul Durrant
2022-12-12 17:07   ` Paolo Bonzini
2022-12-12 22:22     ` David Woodhouse
2022-12-13  0:39       ` Paolo Bonzini
2022-12-13  0:59         ` David Woodhouse
2022-12-13 22:32           ` Paolo Bonzini
2022-12-16  8:40             ` David Woodhouse
2022-12-09  9:55 ` [RFC PATCH v2 03/22] i386/xen: Add xen-version machine property and init KVM Xen support David Woodhouse
2022-12-12 12:48   ` Paul Durrant
2022-12-12 17:30   ` Paolo Bonzini
2022-12-12 17:55     ` Paul Durrant
2022-12-13  0:13     ` David Woodhouse
2023-01-17 13:49     ` David Woodhouse
2022-12-09  9:55 ` [RFC PATCH v2 04/22] i386/kvm: handle Xen HVM cpuid leaves David Woodhouse
2022-12-12 13:13   ` Paul Durrant
2022-12-13  9:47     ` David Woodhouse
2022-12-09  9:55 ` [RFC PATCH v2 05/22] xen-platform-pci: allow its creation with XEN_EMULATE mode David Woodhouse
2022-12-12 13:24   ` Paul Durrant
2022-12-12 22:07     ` David Woodhouse
2022-12-09  9:55 ` [RFC PATCH v2 06/22] hw/xen_backend: refactor xen_be_init() David Woodhouse
2022-12-12 13:27   ` Paul Durrant
2022-12-09  9:55 ` [RFC PATCH v2 07/22] pc_piix: handle XEN_EMULATE backend init David Woodhouse
2022-12-12 13:47   ` Paul Durrant
2022-12-12 14:50     ` David Woodhouse
2022-12-09  9:55 ` [RFC PATCH v2 08/22] xen_platform: exclude vfio-pci from the PCI platform unplug David Woodhouse
2022-12-12 13:52   ` Paul Durrant
2022-12-09  9:55 ` [RFC PATCH v2 09/22] pc_piix: allow xenfv machine with XEN_EMULATE David Woodhouse
2022-12-12 14:05   ` Paul Durrant
2022-12-09  9:56 ` [RFC PATCH v2 10/22] i386/xen: handle guest hypercalls David Woodhouse
2022-12-12 14:11   ` Paul Durrant
2022-12-12 14:17     ` David Woodhouse
2022-12-12 17:07   ` Paolo Bonzini
2022-12-09  9:56 ` [RFC PATCH v2 11/22] i386/xen: implement HYPERCALL_xen_version David Woodhouse
2022-12-12 14:17   ` Paul Durrant
2022-12-13  0:06     ` David Woodhouse
2022-12-09  9:56 ` [RFC PATCH v2 12/22] hw/xen: Add xen_overlay device for emulating shared xenheap pages David Woodhouse
2022-12-12 14:29   ` Paul Durrant
2022-12-12 17:14   ` Paolo Bonzini
2022-12-09  9:56 ` [RFC PATCH v2 13/22] i386/xen: implement HYPERVISOR_memory_op David Woodhouse
2022-12-12 14:38   ` Paul Durrant
2022-12-13  0:08     ` David Woodhouse
2022-12-09  9:56 ` [RFC PATCH v2 14/22] i386/xen: implement HYPERVISOR_hvm_op David Woodhouse
2022-12-12 14:41   ` Paul Durrant
2022-12-09  9:56 ` [RFC PATCH v2 15/22] i386/xen: implement HYPERVISOR_vcpu_op David Woodhouse
2022-12-12 14:51   ` Paul Durrant
2022-12-13  0:10     ` David Woodhouse
2022-12-09  9:56 ` [RFC PATCH v2 16/22] i386/xen: handle VCPUOP_register_vcpu_info David Woodhouse
2022-12-12 14:58   ` Paul Durrant
2022-12-13  0:13     ` David Woodhouse
2022-12-14 10:28       ` Paul Durrant
2022-12-14 11:04         ` David Woodhouse
2022-12-09  9:56 ` [RFC PATCH v2 17/22] i386/xen: handle VCPUOP_register_vcpu_time_info David Woodhouse
2022-12-12 15:34   ` Paul Durrant
2022-12-09  9:56 ` [RFC PATCH v2 18/22] i386/xen: handle VCPUOP_register_runstate_memory_area David Woodhouse
2022-12-12 15:38   ` Paul Durrant
2022-12-09  9:56 ` David Woodhouse [this message]
2022-12-12 15:52   ` [RFC PATCH v2 19/22] i386/xen: implement HVMOP_set_evtchn_upcall_vector Paul Durrant
2022-12-09  9:56 ` [RFC PATCH v2 20/22] i386/xen: HVMOP_set_param / HVM_PARAM_CALLBACK_IRQ David Woodhouse
2022-12-12 16:16   ` Paul Durrant
2022-12-12 16:26     ` David Woodhouse
2022-12-12 16:39       ` Paul Durrant
2022-12-15 20:54         ` David Woodhouse
2022-12-20 13:56           ` Paul Durrant
2022-12-20 16:27             ` David Woodhouse
2022-12-20 17:25               ` Paul Durrant
2022-12-20 17:29                 ` David Woodhouse
2022-12-28 10:45                   ` David Woodhouse
2022-12-21  1:41     ` David Woodhouse
2022-12-21  9:37       ` Paul Durrant
2022-12-21 12:16         ` David Woodhouse
2022-12-09  9:56 ` [RFC PATCH v2 21/22] i386/xen: implement HYPERVISOR_event_channel_op David Woodhouse
2022-12-12 16:23   ` Paul Durrant
2022-12-09  9:56 ` [RFC PATCH v2 22/22] i386/xen: implement HYPERVISOR_sched_op David Woodhouse
2022-12-12 16:37   ` Paul Durrant

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20221209095612.689243-20-dwmw2@infradead.org \
    --to=dwmw2@infradead.org \
    --cc=alex.bennee@linaro.org \
    --cc=ankur.a.arora@oracle.com \
    --cc=cfontana@suse.de \
    --cc=dgilbert@redhat.com \
    --cc=joao.m.martins@oracle.com \
    --cc=paul@xen.org \
    --cc=pbonzini@redhat.com \
    --cc=philmd@linaro.org \
    --cc=qemu-devel@nongnu.org \
    --cc=quintela@redhat.com \
    --cc=thuth@redhat.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).