From: Jan Beulich <jbeulich@suse.com>
To: "xen-devel@lists.xenproject.org" <xen-devel@lists.xenproject.org>
Cc: "Andrew Cooper" <andrew.cooper3@citrix.com>,
"Wei Liu" <wl@xen.org>, "Roger Pau Monné" <roger.pau@citrix.com>,
"Ian Jackson" <iwj@xenproject.org>,
"Juergen Gross" <jgross@suse.com>
Subject: [PATCH v2 1/6] x86/HVM: wire up multicalls
Date: Tue, 22 Jun 2021 17:17:51 +0200 [thread overview]
Message-ID: <a96ff7d7-f594-4b86-e9fa-6b1a99edc992@suse.com> (raw)
In-Reply-To: <6c532607-c2a3-d0ab-e4e5-428f85f4a045@suse.com>
To be able to use them from, in particular, the tool stack, they need to
be supported for all guest types. Note that xc_resource_op() already
does, so would not work without this on PVH Dom0.
Signed-off-by: Jan Beulich <jbeulich@suse.com>
Begrudingly acked-by: Andrew Cooper <andrew.cooper3@citrix.com>
Acked-by: Ian Jackson <iwj@xenproject.org>
--- a/xen/arch/x86/hvm/hypercall.c
+++ b/xen/arch/x86/hvm/hypercall.c
@@ -26,6 +26,7 @@
#include <asm/hvm/emulate.h>
#include <asm/hvm/support.h>
#include <asm/hvm/viridian.h>
+#include <asm/multicall.h>
#include <public/hvm/hvm_op.h>
#include <public/hvm/params.h>
@@ -125,6 +126,7 @@ static const struct {
hypercall_fn_t *native, *compat;
} hvm_hypercall_table[] = {
HVM_CALL(memory_op),
+ COMPAT_CALL(multicall),
#ifdef CONFIG_GRANT_TABLE
HVM_CALL(grant_table_op),
#endif
@@ -334,6 +336,39 @@ int hvm_hypercall(struct cpu_user_regs *
return curr->hcall_preempted ? HVM_HCALL_preempted : HVM_HCALL_completed;
}
+enum mc_disposition hvm_do_multicall_call(struct mc_state *state)
+{
+ struct vcpu *curr = current;
+ hypercall_fn_t *func = NULL;
+
+ if ( hvm_guest_x86_mode(curr) == 8 )
+ {
+ struct multicall_entry *call = &state->call;
+
+ if ( call->op < ARRAY_SIZE(hvm_hypercall_table) )
+ func = array_access_nospec(hvm_hypercall_table, call->op).native;
+ if ( func )
+ call->result = func(call->args[0], call->args[1], call->args[2],
+ call->args[3], call->args[4], call->args[5]);
+ else
+ call->result = -ENOSYS;
+ }
+ else
+ {
+ struct compat_multicall_entry *call = &state->compat_call;
+
+ if ( call->op < ARRAY_SIZE(hvm_hypercall_table) )
+ func = array_access_nospec(hvm_hypercall_table, call->op).compat;
+ if ( func )
+ call->result = func(call->args[0], call->args[1], call->args[2],
+ call->args[3], call->args[4], call->args[5]);
+ else
+ call->result = -ENOSYS;
+ }
+
+ return !hvm_get_cpl(curr) ? mc_continue : mc_preempt;
+}
+
/*
* Local variables:
* mode: C
--- a/xen/arch/x86/hypercall.c
+++ b/xen/arch/x86/hypercall.c
@@ -20,6 +20,7 @@
*/
#include <xen/hypercall.h>
+#include <asm/multicall.h>
#ifdef CONFIG_COMPAT
#define ARGS(x, n) \
@@ -273,13 +274,18 @@ int hypercall_xlat_continuation(unsigned
return rc;
}
-#ifndef CONFIG_PV
-/* Stub for arch_do_multicall_call */
-enum mc_disposition arch_do_multicall_call(struct mc_state *mc)
+enum mc_disposition arch_do_multicall_call(struct mc_state *state)
{
+ const struct domain *currd = current->domain;
+
+ if ( is_pv_domain(currd) )
+ return pv_do_multicall_call(state);
+
+ if ( is_hvm_domain(currd) )
+ return hvm_do_multicall_call(state);
+
return mc_exit;
}
-#endif
/*
* Local variables:
--- a/xen/arch/x86/pv/hypercall.c
+++ b/xen/arch/x86/pv/hypercall.c
@@ -23,6 +23,7 @@
#include <xen/hypercall.h>
#include <xen/nospec.h>
#include <xen/trace.h>
+#include <asm/multicall.h>
#include <irq_vectors.h>
#ifdef CONFIG_PV32
@@ -245,7 +246,7 @@ void pv_hypercall(struct cpu_user_regs *
perfc_incr(hypercalls);
}
-enum mc_disposition arch_do_multicall_call(struct mc_state *state)
+enum mc_disposition pv_do_multicall_call(struct mc_state *state)
{
struct vcpu *curr = current;
unsigned long op;
--- /dev/null
+++ b/xen/include/asm-x86/multicall.h
@@ -0,0 +1,12 @@
+/******************************************************************************
+ * asm-x86/multicall.h
+ */
+
+#ifndef __ASM_X86_MULTICALL_H__
+#define __ASM_X86_MULTICALL_H__
+
+#include <xen/multicall.h>
+
+typeof(arch_do_multicall_call) pv_do_multicall_call, hvm_do_multicall_call;
+
+#endif /* __ASM_X86_MULTICALL_H__ */
next prev parent reply other threads:[~2021-06-22 15:18 UTC|newest]
Thread overview: 12+ messages / expand[flat|nested] mbox.gz Atom feed top
2021-06-22 15:16 [PATCH v2 0/6] allow xc_domain_maximum_gpfn() to observe full GFN value Jan Beulich
2021-06-22 15:17 ` Jan Beulich [this message]
2021-06-22 15:18 ` [PATCH v2 2/6] libxencall: osdep_hypercall() should return long Jan Beulich
2021-06-22 15:18 ` [PATCH v2 3/6] libxencall: introduce variant of xencall2() returning long Jan Beulich
2021-06-22 18:22 ` Andrew Cooper
2021-06-22 15:19 ` [PATCH v2 4/6] libxc: use multicall for memory-op on Linux (and Solaris) Jan Beulich
2021-06-22 19:35 ` Andrew Cooper
2021-06-22 15:19 ` [PATCH v2 5/6] libxencall: drop bogus mentioning of xencall6() Jan Beulich
2021-06-22 18:25 ` Andrew Cooper
2021-06-23 6:18 ` Jan Beulich
2021-06-22 15:20 ` [PATCH v2 6/6] libxc: make xc_domain_maximum_gpfn() endianness-agnostic Jan Beulich
2021-06-22 18:33 ` Andrew Cooper
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=a96ff7d7-f594-4b86-e9fa-6b1a99edc992@suse.com \
--to=jbeulich@suse.com \
--cc=andrew.cooper3@citrix.com \
--cc=iwj@xenproject.org \
--cc=jgross@suse.com \
--cc=roger.pau@citrix.com \
--cc=wl@xen.org \
--cc=xen-devel@lists.xenproject.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).