xen-devel.lists.xenproject.org archive mirror
 help / color / mirror / Atom feed
* [PATCH 00/11] hvmctl hypercall
@ 2016-06-20 12:39 Jan Beulich
  2016-06-20 12:52 ` [PATCH 01/11] public / x86: introduce " Jan Beulich
                   ` (11 more replies)
  0 siblings, 12 replies; 31+ messages in thread
From: Jan Beulich @ 2016-06-20 12:39 UTC (permalink / raw)
  To: xen-devel
  Cc: Stefano Stabellini, Wei Liu, George Dunlap, Andrew Cooper,
	Ian Jackson, Tim Deegan, Paul Durrant, dgdegra

A long while back separating out all control kind operations (intended
for use by only the control domain or device model) from the currect
hvmop hypercall has been discussed. This series aims at finally making
this reality (at once allowing to streamline the associated XSM checking).

01: public / x86: introduce hvmctl hypercall
02: convert HVMOP_set_pci_intx_level
03: convert HVMOP_set_isa_irq_level
04: convert HVMOP_set_pci_link_route
05: convert HVMOP_track_dirty_vram
06: convert HVMOP_modified_memory
07: convert HVMOP_set_mem_type
08: convert HVMOP_inject_trap
09: convert HVMOP_inject_msi
10: convert HVMOP_*ioreq_server*
11: x86/HVM: serialize trap injecting producer and consumer

Signed-off-by: Jan Beulich <jbeulich@suse.com>


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
http://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 31+ messages in thread

* [PATCH 01/11] public / x86: introduce hvmctl hypercall
  2016-06-20 12:39 [PATCH 00/11] hvmctl hypercall Jan Beulich
@ 2016-06-20 12:52 ` Jan Beulich
  2016-06-21 10:14   ` Wei Liu
  2016-06-23 14:55   ` Andrew Cooper
  2016-06-20 12:53 ` [PATCH 02/11] hvmctl: convert HVMOP_set_pci_intx_level Jan Beulich
                   ` (10 subsequent siblings)
  11 siblings, 2 replies; 31+ messages in thread
From: Jan Beulich @ 2016-06-20 12:52 UTC (permalink / raw)
  To: xen-devel
  Cc: Stefano Stabellini, Wei Liu, George Dunlap, Andrew Cooper,
	Ian Jackson, Tim Deegan, Paul Durrant, dgdegra

[-- Attachment #1: Type: text/plain, Size: 9514 bytes --]

... as a means to replace all HVMOP_* which a domain can't issue on
itself (i.e. intended for use by only the control domain or device
model).

Signed-off-by: Jan Beulich <jbeulich@suse.com>

--- a/xen/arch/x86/hvm/Makefile
+++ b/xen/arch/x86/hvm/Makefile
@@ -2,6 +2,7 @@ subdir-y += svm
 subdir-y += vmx
 
 obj-y += asid.o
+obj-y += control.o
 obj-y += emulate.o
 obj-y += event.o
 obj-y += hpet.o
--- /dev/null
+++ b/xen/arch/x86/hvm/control.c
@@ -0,0 +1,96 @@
+/*
+ * control.c: Hardware virtual machine control operations.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <xen/hypercall.h>
+#include <xen/guest_access.h>
+#include <xen/sched.h>
+#include <xsm/xsm.h>
+
+/*
+ * Note that this value is effectively part of the ABI, even if we don't need
+ * to make it a formal part of it.  Hence this value may only be changed if
+ * accompanied by a suitable interface version increase.
+ */
+#define HVMCTL_iter_shift 8
+#define HVMCTL_iter_mask  ((1U << HVMCTL_iter_shift) - 1)
+#define HVMCTL_iter_max   (1U << (16 + HVMCTL_iter_shift))
+
+long do_hvmctl(XEN_GUEST_HANDLE_PARAM(xen_hvmctl_t) u_hvmctl)
+{
+    xen_hvmctl_t op;
+    struct domain *d;
+    unsigned int iter;
+    int rc;
+
+    BUILD_BUG_ON(sizeof(op.u) > sizeof(op.u.pad));
+
+    if ( copy_from_guest(&op, u_hvmctl, 1) )
+        return -EFAULT;
+
+    if ( op.interface_version != XEN_HVMCTL_INTERFACE_VERSION )
+        return -EACCES;
+
+    rc = rcu_lock_remote_domain_by_id(op.domain, &d);
+    if ( rc )
+        return rc;
+
+    if ( !has_hvm_container_domain(d) )
+    {
+        rcu_unlock_domain(d);
+        return -EINVAL;
+    }
+
+    rc = xsm_hvm_control(XSM_DM_PRIV, d, op.cmd);
+    if ( rc )
+    {
+        rcu_unlock_domain(d);
+        return rc;
+    }
+
+    iter = op.opaque << HVMCTL_iter_shift;
+
+    switch ( op.cmd )
+    {
+    default:
+        rc = -EOPNOTSUPP;
+        break;
+    }
+
+    rcu_unlock_domain(d);
+
+    if ( rc == -ERESTART )
+    {
+        ASSERT(!(iter & HVMCTL_iter_mask));
+        op.opaque = iter >> HVMCTL_iter_shift;
+        if ( unlikely(copy_field_to_guest(u_hvmctl, &op, opaque)) )
+            rc = -EFAULT;
+        else
+            rc = hypercall_create_continuation(__HYPERVISOR_hvmctl, "h",
+                                               u_hvmctl);
+    }
+
+    return rc;
+}
+
+/*
+ * Local variables:
+ * mode: C
+ * c-file-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -4113,6 +4113,7 @@ static const struct {
     COMPAT_CALL(platform_op),
     COMPAT_CALL(mmuext_op),
     HYPERCALL(xenpmu_op),
+    HYPERCALL(hvmctl),
     HYPERCALL(arch_1)
 };
 
--- a/xen/arch/x86/x86_64/compat/entry.S
+++ b/xen/arch/x86/x86_64/compat/entry.S
@@ -469,6 +469,7 @@ ENTRY(compat_hypercall_table)
         .quad do_tmem_op
         .quad do_ni_hypercall           /* reserved for XenClient */
         .quad do_xenpmu_op              /* 40 */
+        .quad do_hvmctl
         .rept __HYPERVISOR_arch_0-((.-compat_hypercall_table)/8)
         .quad compat_ni_hypercall
         .endr
@@ -520,6 +521,7 @@ ENTRY(compat_hypercall_args_table)
         .byte 1 /* do_tmem_op               */
         .byte 0 /* reserved for XenClient   */
         .byte 2 /* do_xenpmu_op             */  /* 40 */
+        .byte 1 /* do_hvmctl                */
         .rept __HYPERVISOR_arch_0-(.-compat_hypercall_args_table)
         .byte 0 /* compat_ni_hypercall      */
         .endr
--- a/xen/arch/x86/x86_64/entry.S
+++ b/xen/arch/x86/x86_64/entry.S
@@ -791,6 +791,7 @@ ENTRY(hypercall_table)
         .quad do_tmem_op
         .quad do_ni_hypercall       /* reserved for XenClient */
         .quad do_xenpmu_op          /* 40 */
+        .quad do_hvmctl
         .rept __HYPERVISOR_arch_0-((.-hypercall_table)/8)
         .quad do_ni_hypercall
         .endr
@@ -842,6 +843,7 @@ ENTRY(hypercall_args_table)
         .byte 1 /* do_tmem_op           */
         .byte 0 /* reserved for XenClient */
         .byte 2 /* do_xenpmu_op         */  /* 40 */
+        .byte 1 /* do_hvmctl            */
         .rept __HYPERVISOR_arch_0-(.-hypercall_args_table)
         .byte 0 /* do_ni_hypercall      */
         .endr
--- a/xen/include/Makefile
+++ b/xen/include/Makefile
@@ -93,7 +93,7 @@ all: headers.chk headers++.chk
 
 PUBLIC_HEADERS := $(filter-out public/arch-% public/dom0_ops.h, $(wildcard public/*.h public/*/*.h) $(public-y))
 
-PUBLIC_ANSI_HEADERS := $(filter-out public/%ctl.h public/xsm/% public/%hvm/save.h, $(PUBLIC_HEADERS))
+PUBLIC_ANSI_HEADERS := $(filter-out public/%ctl.h public/hvm/control.h public/xsm/% public/%hvm/save.h,$(PUBLIC_HEADERS))
 
 headers.chk: $(PUBLIC_ANSI_HEADERS) Makefile
 	for i in $(filter %.h,$^); do \
--- /dev/null
+++ b/xen/include/public/hvm/control.h
@@ -0,0 +1,54 @@
+/*
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef __XEN_PUBLIC_HVM_CONTROL_H__
+#define __XEN_PUBLIC_HVM_CONTROL_H__
+
+#if !defined(__XEN__) && !defined(__XEN_TOOLS__)
+#error "HVM control operations are intended for use by control tools only"
+#endif
+
+#include "../xen.h"
+
+#define XEN_HVMCTL_INTERFACE_VERSION 0x00000001
+
+struct xen_hvmctl {
+    uint16_t interface_version;    /* XEN_HVMCTL_INTERFACE_VERSION */
+    domid_t domain;
+    uint16_t cmd;
+    uint16_t opaque;               /* Must be zero on initial invocation. */
+    union {
+        uint8_t pad[120];
+    } u;
+};
+typedef struct xen_hvmctl xen_hvmctl_t;
+DEFINE_XEN_GUEST_HANDLE(xen_hvmctl_t);
+
+#endif /* __XEN_PUBLIC_HVM_CONTROL_H__ */
+
+/*
+ * Local variables:
+ * mode: C
+ * c-file-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
--- a/xen/include/public/xen.h
+++ b/xen/include/public/xen.h
@@ -115,6 +115,7 @@ DEFINE_XEN_GUEST_HANDLE(xen_ulong_t);
 #define __HYPERVISOR_tmem_op              38
 #define __HYPERVISOR_xc_reserved_op       39 /* reserved for XenClient */
 #define __HYPERVISOR_xenpmu_op            40
+#define __HYPERVISOR_hvmctl               41
 
 /* Architecture-specific hypercall definitions. */
 #define __HYPERVISOR_arch_0               48
--- a/xen/include/xen/hypercall.h
+++ b/xen/include/xen/hypercall.h
@@ -15,6 +15,7 @@
 #include <public/tmem.h>
 #include <public/version.h>
 #include <public/pmu.h>
+#include <public/hvm/control.h>
 #include <asm/hypercall.h>
 #include <xsm/xsm.h>
 
@@ -46,6 +47,10 @@ arch_do_sysctl(
     XEN_GUEST_HANDLE_PARAM(xen_sysctl_t) u_sysctl);
 
 extern long
+do_hvmctl(
+    XEN_GUEST_HANDLE_PARAM(xen_hvmctl_t) u_hvmctl);
+
+extern long
 do_platform_op(
     XEN_GUEST_HANDLE_PARAM(xen_platform_op_t) u_xenpf_op);
 
--- a/xen/xsm/flask/hooks.c
+++ b/xen/xsm/flask/hooks.c
@@ -1183,6 +1183,20 @@ static int flask_hvm_param(struct domain
     return current_has_perm(d, SECCLASS_HVM, perm);
 }
 
+static int flask_hvm_control(struct domain *d, unsigned long op)
+{
+    u32 perm;
+
+    switch ( op )
+    {
+    default:
+        perm = HVM__HVMCTL;
+        break;
+    }
+
+    return current_has_perm(d, SECCLASS_HVM, perm);
+}
+
 static int flask_hvm_param_nested(struct domain *d)
 {
     return current_has_perm(d, SECCLASS_HVM, HVM__NESTED);
@@ -1745,7 +1759,7 @@ static struct xsm_operations flask_ops =
     .page_offline = flask_page_offline,
     .tmem_op = flask_tmem_op,
     .hvm_param = flask_hvm_param,
-    .hvm_control = flask_hvm_param,
+    .hvm_control = flask_hvm_control,
     .hvm_param_nested = flask_hvm_param_nested,
     .hvm_param_altp2mhvm = flask_hvm_param_altp2mhvm,
     .hvm_altp2mhvm_op = flask_hvm_altp2mhvm_op,



[-- Attachment #2: hvmctl-intro.patch --]
[-- Type: text/plain, Size: 9554 bytes --]

public / x86: introduce hvmctl hypercall

... as a means to replace all HVMOP_* which a domain can't issue on
itself (i.e. intended for use by only the control domain or device
model).

Signed-off-by: Jan Beulich <jbeulich@suse.com>

--- a/xen/arch/x86/hvm/Makefile
+++ b/xen/arch/x86/hvm/Makefile
@@ -2,6 +2,7 @@ subdir-y += svm
 subdir-y += vmx
 
 obj-y += asid.o
+obj-y += control.o
 obj-y += emulate.o
 obj-y += event.o
 obj-y += hpet.o
--- /dev/null
+++ b/xen/arch/x86/hvm/control.c
@@ -0,0 +1,96 @@
+/*
+ * control.c: Hardware virtual machine control operations.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <xen/hypercall.h>
+#include <xen/guest_access.h>
+#include <xen/sched.h>
+#include <xsm/xsm.h>
+
+/*
+ * Note that this value is effectively part of the ABI, even if we don't need
+ * to make it a formal part of it.  Hence this value may only be changed if
+ * accompanied by a suitable interface version increase.
+ */
+#define HVMCTL_iter_shift 8
+#define HVMCTL_iter_mask  ((1U << HVMCTL_iter_shift) - 1)
+#define HVMCTL_iter_max   (1U << (16 + HVMCTL_iter_shift))
+
+long do_hvmctl(XEN_GUEST_HANDLE_PARAM(xen_hvmctl_t) u_hvmctl)
+{
+    xen_hvmctl_t op;
+    struct domain *d;
+    unsigned int iter;
+    int rc;
+
+    BUILD_BUG_ON(sizeof(op.u) > sizeof(op.u.pad));
+
+    if ( copy_from_guest(&op, u_hvmctl, 1) )
+        return -EFAULT;
+
+    if ( op.interface_version != XEN_HVMCTL_INTERFACE_VERSION )
+        return -EACCES;
+
+    rc = rcu_lock_remote_domain_by_id(op.domain, &d);
+    if ( rc )
+        return rc;
+
+    if ( !has_hvm_container_domain(d) )
+    {
+        rcu_unlock_domain(d);
+        return -EINVAL;
+    }
+
+    rc = xsm_hvm_control(XSM_DM_PRIV, d, op.cmd);
+    if ( rc )
+    {
+        rcu_unlock_domain(d);
+        return rc;
+    }
+
+    iter = op.opaque << HVMCTL_iter_shift;
+
+    switch ( op.cmd )
+    {
+    default:
+        rc = -EOPNOTSUPP;
+        break;
+    }
+
+    rcu_unlock_domain(d);
+
+    if ( rc == -ERESTART )
+    {
+        ASSERT(!(iter & HVMCTL_iter_mask));
+        op.opaque = iter >> HVMCTL_iter_shift;
+        if ( unlikely(copy_field_to_guest(u_hvmctl, &op, opaque)) )
+            rc = -EFAULT;
+        else
+            rc = hypercall_create_continuation(__HYPERVISOR_hvmctl, "h",
+                                               u_hvmctl);
+    }
+
+    return rc;
+}
+
+/*
+ * Local variables:
+ * mode: C
+ * c-file-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -4113,6 +4113,7 @@ static const struct {
     COMPAT_CALL(platform_op),
     COMPAT_CALL(mmuext_op),
     HYPERCALL(xenpmu_op),
+    HYPERCALL(hvmctl),
     HYPERCALL(arch_1)
 };
 
--- a/xen/arch/x86/x86_64/compat/entry.S
+++ b/xen/arch/x86/x86_64/compat/entry.S
@@ -469,6 +469,7 @@ ENTRY(compat_hypercall_table)
         .quad do_tmem_op
         .quad do_ni_hypercall           /* reserved for XenClient */
         .quad do_xenpmu_op              /* 40 */
+        .quad do_hvmctl
         .rept __HYPERVISOR_arch_0-((.-compat_hypercall_table)/8)
         .quad compat_ni_hypercall
         .endr
@@ -520,6 +521,7 @@ ENTRY(compat_hypercall_args_table)
         .byte 1 /* do_tmem_op               */
         .byte 0 /* reserved for XenClient   */
         .byte 2 /* do_xenpmu_op             */  /* 40 */
+        .byte 1 /* do_hvmctl                */
         .rept __HYPERVISOR_arch_0-(.-compat_hypercall_args_table)
         .byte 0 /* compat_ni_hypercall      */
         .endr
--- a/xen/arch/x86/x86_64/entry.S
+++ b/xen/arch/x86/x86_64/entry.S
@@ -791,6 +791,7 @@ ENTRY(hypercall_table)
         .quad do_tmem_op
         .quad do_ni_hypercall       /* reserved for XenClient */
         .quad do_xenpmu_op          /* 40 */
+        .quad do_hvmctl
         .rept __HYPERVISOR_arch_0-((.-hypercall_table)/8)
         .quad do_ni_hypercall
         .endr
@@ -842,6 +843,7 @@ ENTRY(hypercall_args_table)
         .byte 1 /* do_tmem_op           */
         .byte 0 /* reserved for XenClient */
         .byte 2 /* do_xenpmu_op         */  /* 40 */
+        .byte 1 /* do_hvmctl            */
         .rept __HYPERVISOR_arch_0-(.-hypercall_args_table)
         .byte 0 /* do_ni_hypercall      */
         .endr
--- a/xen/include/Makefile
+++ b/xen/include/Makefile
@@ -93,7 +93,7 @@ all: headers.chk headers++.chk
 
 PUBLIC_HEADERS := $(filter-out public/arch-% public/dom0_ops.h, $(wildcard public/*.h public/*/*.h) $(public-y))
 
-PUBLIC_ANSI_HEADERS := $(filter-out public/%ctl.h public/xsm/% public/%hvm/save.h, $(PUBLIC_HEADERS))
+PUBLIC_ANSI_HEADERS := $(filter-out public/%ctl.h public/hvm/control.h public/xsm/% public/%hvm/save.h,$(PUBLIC_HEADERS))
 
 headers.chk: $(PUBLIC_ANSI_HEADERS) Makefile
 	for i in $(filter %.h,$^); do \
--- /dev/null
+++ b/xen/include/public/hvm/control.h
@@ -0,0 +1,54 @@
+/*
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef __XEN_PUBLIC_HVM_CONTROL_H__
+#define __XEN_PUBLIC_HVM_CONTROL_H__
+
+#if !defined(__XEN__) && !defined(__XEN_TOOLS__)
+#error "HVM control operations are intended for use by control tools only"
+#endif
+
+#include "../xen.h"
+
+#define XEN_HVMCTL_INTERFACE_VERSION 0x00000001
+
+struct xen_hvmctl {
+    uint16_t interface_version;    /* XEN_HVMCTL_INTERFACE_VERSION */
+    domid_t domain;
+    uint16_t cmd;
+    uint16_t opaque;               /* Must be zero on initial invocation. */
+    union {
+        uint8_t pad[120];
+    } u;
+};
+typedef struct xen_hvmctl xen_hvmctl_t;
+DEFINE_XEN_GUEST_HANDLE(xen_hvmctl_t);
+
+#endif /* __XEN_PUBLIC_HVM_CONTROL_H__ */
+
+/*
+ * Local variables:
+ * mode: C
+ * c-file-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
--- a/xen/include/public/xen.h
+++ b/xen/include/public/xen.h
@@ -115,6 +115,7 @@ DEFINE_XEN_GUEST_HANDLE(xen_ulong_t);
 #define __HYPERVISOR_tmem_op              38
 #define __HYPERVISOR_xc_reserved_op       39 /* reserved for XenClient */
 #define __HYPERVISOR_xenpmu_op            40
+#define __HYPERVISOR_hvmctl               41
 
 /* Architecture-specific hypercall definitions. */
 #define __HYPERVISOR_arch_0               48
--- a/xen/include/xen/hypercall.h
+++ b/xen/include/xen/hypercall.h
@@ -15,6 +15,7 @@
 #include <public/tmem.h>
 #include <public/version.h>
 #include <public/pmu.h>
+#include <public/hvm/control.h>
 #include <asm/hypercall.h>
 #include <xsm/xsm.h>
 
@@ -46,6 +47,10 @@ arch_do_sysctl(
     XEN_GUEST_HANDLE_PARAM(xen_sysctl_t) u_sysctl);
 
 extern long
+do_hvmctl(
+    XEN_GUEST_HANDLE_PARAM(xen_hvmctl_t) u_hvmctl);
+
+extern long
 do_platform_op(
     XEN_GUEST_HANDLE_PARAM(xen_platform_op_t) u_xenpf_op);
 
--- a/xen/xsm/flask/hooks.c
+++ b/xen/xsm/flask/hooks.c
@@ -1183,6 +1183,20 @@ static int flask_hvm_param(struct domain
     return current_has_perm(d, SECCLASS_HVM, perm);
 }
 
+static int flask_hvm_control(struct domain *d, unsigned long op)
+{
+    u32 perm;
+
+    switch ( op )
+    {
+    default:
+        perm = HVM__HVMCTL;
+        break;
+    }
+
+    return current_has_perm(d, SECCLASS_HVM, perm);
+}
+
 static int flask_hvm_param_nested(struct domain *d)
 {
     return current_has_perm(d, SECCLASS_HVM, HVM__NESTED);
@@ -1745,7 +1759,7 @@ static struct xsm_operations flask_ops =
     .page_offline = flask_page_offline,
     .tmem_op = flask_tmem_op,
     .hvm_param = flask_hvm_param,
-    .hvm_control = flask_hvm_param,
+    .hvm_control = flask_hvm_control,
     .hvm_param_nested = flask_hvm_param_nested,
     .hvm_param_altp2mhvm = flask_hvm_param_altp2mhvm,
     .hvm_altp2mhvm_op = flask_hvm_altp2mhvm_op,

[-- Attachment #3: Type: text/plain, Size: 126 bytes --]

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
http://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 31+ messages in thread

* [PATCH 02/11] hvmctl: convert HVMOP_set_pci_intx_level
  2016-06-20 12:39 [PATCH 00/11] hvmctl hypercall Jan Beulich
  2016-06-20 12:52 ` [PATCH 01/11] public / x86: introduce " Jan Beulich
@ 2016-06-20 12:53 ` Jan Beulich
  2016-06-20 14:32   ` Daniel De Graaf
  2016-06-21 10:14   ` Wei Liu
  2016-06-20 12:53 ` [PATCH 03/11] hvmctl: convert HVMOP_set_isa_irq_level Jan Beulich
                   ` (9 subsequent siblings)
  11 siblings, 2 replies; 31+ messages in thread
From: Jan Beulich @ 2016-06-20 12:53 UTC (permalink / raw)
  To: xen-devel
  Cc: Stefano Stabellini, Wei Liu, George Dunlap, Andrew Cooper,
	Ian Jackson, Tim Deegan, Paul Durrant, dgdegra

[-- Attachment #1: Type: text/plain, Size: 12074 bytes --]

Note that this adds validation of the "domain" interface structure
field, which previously got ignored.

Note further that this retains the hvmop interface definitions as those
had (wrongly) been exposed to non-tool stack consumers (albeit the
operation wouldn't have succeeded when requested by a domain for
itself).

Signed-off-by: Jan Beulich <jbeulich@suse.com>
---
TBD: xen/xsm/flask/policy/access_vectors says "also needs hvmctl", but
     I don't see how this has been done so far. With the change here,
     doing two checks in flask_hvm_control() (the generic one always
     and a specific one if needed) would of course be simple, but it's
     unclear how subsequently added sub-ops should then be dealt with
     (which don't have a similar remark).

--- a/tools/libxc/xc_misc.c
+++ b/tools/libxc/xc_misc.c
@@ -473,30 +473,14 @@ int xc_hvm_set_pci_intx_level(
     uint8_t domain, uint8_t bus, uint8_t device, uint8_t intx,
     unsigned int level)
 {
-    DECLARE_HYPERCALL_BUFFER(struct xen_hvm_set_pci_intx_level, arg);
-    int rc;
+    DECLARE_HVMCTL(set_pci_intx_level, dom,
+                   .domain = domain,
+                   .bus    = bus,
+                   .device = device,
+                   .intx   = intx,
+                   .level =  level);
 
-    arg = xc_hypercall_buffer_alloc(xch, arg, sizeof(*arg));
-    if ( arg == NULL )
-    {
-        PERROR("Could not allocate memory for xc_hvm_set_pci_intx_level hypercall");
-        return -1;
-    }
-
-    arg->domid  = dom;
-    arg->domain = domain;
-    arg->bus    = bus;
-    arg->device = device;
-    arg->intx   = intx;
-    arg->level  = level;
-
-    rc = xencall2(xch->xcall, __HYPERVISOR_hvm_op,
-                  HVMOP_set_pci_intx_level,
-                  HYPERCALL_BUFFER_AS_ARG(arg));
-
-    xc_hypercall_buffer_free(xch, arg);
-
-    return rc;
+    return do_hvmctl(xch, &hvmctl);
 }
 
 int xc_hvm_set_isa_irq_level(
--- a/tools/libxc/xc_private.h
+++ b/tools/libxc/xc_private.h
@@ -34,6 +34,8 @@
 #define XC_INTERNAL_COMPAT_MAP_FOREIGN_API
 #include "xenctrl.h"
 
+#include <xen/hvm/control.h>
+
 #include <xencall.h>
 #include <xenforeignmemory.h>
 
@@ -61,6 +63,13 @@ struct iovec {
 
 #define DECLARE_DOMCTL struct xen_domctl domctl
 #define DECLARE_SYSCTL struct xen_sysctl sysctl
+#define DECLARE_HVMCTL(op, dom, init...) \
+    struct xen_hvmctl hvmctl = { \
+        .interface_version = XEN_HVMCTL_INTERFACE_VERSION, \
+        .domain = (dom), \
+        .cmd = XEN_HVMCTL_##op, \
+        .u.op = { init } \
+    }
 #define DECLARE_PHYSDEV_OP struct physdev_op physdev_op
 #define DECLARE_FLASK_OP struct xen_flask_op op
 #define DECLARE_PLATFORM_OP struct xen_platform_op platform_op
@@ -311,6 +320,31 @@ static inline int do_sysctl(xc_interface
     return ret;
 }
 
+static inline int do_hvmctl(xc_interface *xch, struct xen_hvmctl *hvmctl)
+{
+    int ret = -1;
+    DECLARE_HYPERCALL_BOUNCE(hvmctl, sizeof(*hvmctl), XC_HYPERCALL_BUFFER_BOUNCE_BOTH);
+
+    if ( xc_hypercall_bounce_pre(xch, hvmctl) )
+    {
+        PERROR("Could not bounce buffer for hvmctl hypercall");
+        return -1;
+    }
+
+    ret = xencall1(xch->xcall, __HYPERVISOR_hvmctl,
+                   HYPERCALL_BUFFER_AS_ARG(hvmctl));
+    if ( ret < 0 )
+    {
+        if ( errno == EACCES )
+            DPRINTF("hvmctl operation failed -- need to"
+                    " rebuild the user-space tool set?\n");
+    }
+
+    xc_hypercall_bounce_post(xch, hvmctl);
+
+    return ret;
+}
+
 static inline int do_platform_op(xc_interface *xch,
                                  struct xen_platform_op *platform_op)
 {
--- a/xen/arch/x86/hvm/control.c
+++ b/xen/arch/x86/hvm/control.c
@@ -19,6 +19,30 @@
 #include <xen/sched.h>
 #include <xsm/xsm.h>
 
+static int set_pci_intx_level(struct domain *d,
+                              const struct xen_hvm_set_pci_intx_level *op)
+{
+    if ( op->domain || op->bus || (op->device > 31) || (op->intx > 3) )
+        return -EINVAL;
+
+    if ( !is_hvm_domain(d) )
+        return -EINVAL;
+
+    switch ( op->level )
+    {
+    case 0:
+        hvm_pci_intx_deassert(d, op->device, op->intx);
+        break;
+    case 1:
+        hvm_pci_intx_assert(d, op->device, op->intx);
+        break;
+    default:
+        return -EINVAL;
+    }
+
+    return 0;
+}
+
 /*
  * Note that this value is effectively part of the ABI, even if we don't need
  * to make it a formal part of it.  Hence this value may only be changed if
@@ -64,6 +88,10 @@ long do_hvmctl(XEN_GUEST_HANDLE_PARAM(xe
 
     switch ( op.cmd )
     {
+    case XEN_HVMCTL_set_pci_intx_level:
+        rc = set_pci_intx_level(d, &op.u.set_pci_intx_level);
+        break;
+
     default:
         rc = -EOPNOTSUPP;
         break;
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -4303,50 +4303,6 @@ void hvm_hypercall_page_initialise(struc
     hvm_funcs.init_hypercall_page(d, hypercall_page);
 }
 
-static int hvmop_set_pci_intx_level(
-    XEN_GUEST_HANDLE_PARAM(xen_hvm_set_pci_intx_level_t) uop)
-{
-    struct xen_hvm_set_pci_intx_level op;
-    struct domain *d;
-    int rc;
-
-    if ( copy_from_guest(&op, uop, 1) )
-        return -EFAULT;
-
-    if ( (op.domain > 0) || (op.bus > 0) || (op.device > 31) || (op.intx > 3) )
-        return -EINVAL;
-
-    rc = rcu_lock_remote_domain_by_id(op.domid, &d);
-    if ( rc != 0 )
-        return rc;
-
-    rc = -EINVAL;
-    if ( !is_hvm_domain(d) )
-        goto out;
-
-    rc = xsm_hvm_set_pci_intx_level(XSM_DM_PRIV, d);
-    if ( rc )
-        goto out;
-
-    rc = 0;
-    switch ( op.level )
-    {
-    case 0:
-        hvm_pci_intx_deassert(d, op.device, op.intx);
-        break;
-    case 1:
-        hvm_pci_intx_assert(d, op.device, op.intx);
-        break;
-    default:
-        rc = -EINVAL;
-        break;
-    }
-
- out:
-    rcu_unlock_domain(d);
-    return rc;
-}
-
 void hvm_vcpu_reset_state(struct vcpu *v, uint16_t cs, uint16_t ip)
 {
     struct domain *d = v->domain;
@@ -5408,11 +5364,6 @@ long do_hvm_op(unsigned long op, XEN_GUE
             guest_handle_cast(arg, xen_hvm_param_t));
         break;
 
-    case HVMOP_set_pci_intx_level:
-        rc = hvmop_set_pci_intx_level(
-            guest_handle_cast(arg, xen_hvm_set_pci_intx_level_t));
-        break;
-
     case HVMOP_set_isa_irq_level:
         rc = hvmop_set_isa_irq_level(
             guest_handle_cast(arg, xen_hvm_set_isa_irq_level_t));
--- a/xen/include/public/hvm/control.h
+++ b/xen/include/public/hvm/control.h
@@ -29,12 +29,23 @@
 
 #define XEN_HVMCTL_INTERFACE_VERSION 0x00000001
 
+/* XEN_HVMCTL_set_pci_intx_level */
+/* Set the logical level of one of a domain's PCI INTx wires. */
+struct xen_hvm_set_pci_intx_level {
+    /* PCI INTx identification in PCI topology (domain:bus:device:intx). */
+    uint8_t domain, bus, device, intx;
+    /* Assertion level (0 = unasserted, 1 = asserted). */
+    uint8_t level;
+};
+
 struct xen_hvmctl {
     uint16_t interface_version;    /* XEN_HVMCTL_INTERFACE_VERSION */
     domid_t domain;
     uint16_t cmd;
+#define XEN_HVMCTL_set_pci_intx_level            1
     uint16_t opaque;               /* Must be zero on initial invocation. */
     union {
+        struct xen_hvm_set_pci_intx_level set_pci_intx_level;
         uint8_t pad[120];
     } u;
 };
--- a/xen/include/public/hvm/hvm_op.h
+++ b/xen/include/public/hvm/hvm_op.h
@@ -38,6 +38,8 @@ struct xen_hvm_param {
 typedef struct xen_hvm_param xen_hvm_param_t;
 DEFINE_XEN_GUEST_HANDLE(xen_hvm_param_t);
 
+#if __XEN_INTERFACE_VERSION__ < 0x00040800
+
 /* Set the logical level of one of a domain's PCI INTx wires. */
 #define HVMOP_set_pci_intx_level  2
 struct xen_hvm_set_pci_intx_level {
@@ -51,6 +53,8 @@ struct xen_hvm_set_pci_intx_level {
 typedef struct xen_hvm_set_pci_intx_level xen_hvm_set_pci_intx_level_t;
 DEFINE_XEN_GUEST_HANDLE(xen_hvm_set_pci_intx_level_t);
 
+#endif
+
 /* Set the logical level of one of a domain's ISA IRQ wires. */
 #define HVMOP_set_isa_irq_level   3
 struct xen_hvm_set_isa_irq_level {
--- a/xen/include/public/xen-compat.h
+++ b/xen/include/public/xen-compat.h
@@ -27,7 +27,7 @@
 #ifndef __XEN_PUBLIC_XEN_COMPAT_H__
 #define __XEN_PUBLIC_XEN_COMPAT_H__
 
-#define __XEN_LATEST_INTERFACE_VERSION__ 0x00040700
+#define __XEN_LATEST_INTERFACE_VERSION__ 0x00040800
 
 #if defined(__XEN__) || defined(__XEN_TOOLS__)
 /* Xen is built with matching headers and implements the latest interface. */
--- a/xen/include/xsm/dummy.h
+++ b/xen/include/xsm/dummy.h
@@ -609,12 +609,6 @@ static XSM_INLINE int xsm_shadow_control
     return xsm_default_action(action, current->domain, d);
 }
 
-static XSM_INLINE int xsm_hvm_set_pci_intx_level(XSM_DEFAULT_ARG struct domain *d)
-{
-    XSM_ASSERT_ACTION(XSM_DM_PRIV);
-    return xsm_default_action(action, current->domain, d);
-}
-
 static XSM_INLINE int xsm_hvm_set_isa_irq_level(XSM_DEFAULT_ARG struct domain *d)
 {
     XSM_ASSERT_ACTION(XSM_DM_PRIV);
--- a/xen/include/xsm/xsm.h
+++ b/xen/include/xsm/xsm.h
@@ -173,7 +173,6 @@ struct xsm_operations {
 #ifdef CONFIG_X86
     int (*do_mca) (void);
     int (*shadow_control) (struct domain *d, uint32_t op);
-    int (*hvm_set_pci_intx_level) (struct domain *d);
     int (*hvm_set_isa_irq_level) (struct domain *d);
     int (*hvm_set_pci_link_route) (struct domain *d);
     int (*hvm_inject_msi) (struct domain *d);
@@ -646,11 +645,6 @@ static inline int xsm_shadow_control (xs
     return xsm_ops->shadow_control(d, op);
 }
 
-static inline int xsm_hvm_set_pci_intx_level (xsm_default_t def, struct domain *d)
-{
-    return xsm_ops->hvm_set_pci_intx_level(d);
-}
-
 static inline int xsm_hvm_set_isa_irq_level (xsm_default_t def, struct domain *d)
 {
     return xsm_ops->hvm_set_isa_irq_level(d);
--- a/xen/xsm/dummy.c
+++ b/xen/xsm/dummy.c
@@ -145,7 +145,6 @@ void xsm_fixup_ops (struct xsm_operation
 #ifdef CONFIG_X86
     set_to_dummy_if_null(ops, do_mca);
     set_to_dummy_if_null(ops, shadow_control);
-    set_to_dummy_if_null(ops, hvm_set_pci_intx_level);
     set_to_dummy_if_null(ops, hvm_set_isa_irq_level);
     set_to_dummy_if_null(ops, hvm_set_pci_link_route);
     set_to_dummy_if_null(ops, hvm_inject_msi);
--- a/xen/xsm/flask/hooks.c
+++ b/xen/xsm/flask/hooks.c
@@ -27,6 +27,7 @@
 #include <public/physdev.h>
 #include <public/platform.h>
 #include <public/version.h>
+#include <public/hvm/control.h>
 
 #include <public/xsm/flask_op.h>
 
@@ -1189,6 +1190,9 @@ static int flask_hvm_control(struct doma
 
     switch ( op )
     {
+    case XEN_HVMCTL_set_pci_intx_level:
+        perm = HVM__PCILEVEL;
+        break;
     default:
         perm = HVM__HVMCTL;
         break;
@@ -1513,11 +1517,6 @@ static int flask_ioport_mapping(struct d
     return flask_ioport_permission(d, start, end, access);
 }
 
-static int flask_hvm_set_pci_intx_level(struct domain *d)
-{
-    return current_has_perm(d, SECCLASS_HVM, HVM__PCILEVEL);
-}
-
 static int flask_hvm_set_isa_irq_level(struct domain *d)
 {
     return current_has_perm(d, SECCLASS_HVM, HVM__IRQLEVEL);
@@ -1806,7 +1805,6 @@ static struct xsm_operations flask_ops =
 #ifdef CONFIG_X86
     .do_mca = flask_do_mca,
     .shadow_control = flask_shadow_control,
-    .hvm_set_pci_intx_level = flask_hvm_set_pci_intx_level,
     .hvm_set_isa_irq_level = flask_hvm_set_isa_irq_level,
     .hvm_set_pci_link_route = flask_hvm_set_pci_link_route,
     .hvm_inject_msi = flask_hvm_inject_msi,
--- a/xen/xsm/flask/policy/access_vectors
+++ b/xen/xsm/flask/policy/access_vectors
@@ -261,7 +261,7 @@ class hvm
     setparam
 # HVMOP_get_param
     getparam
-# HVMOP_set_pci_intx_level (also needs hvmctl)
+# XEN_HVMCTL_set_pci_intx_level (also needs hvmctl)
     pcilevel
 # HVMOP_set_isa_irq_level
     irqlevel



[-- Attachment #2: hvmctl-01.patch --]
[-- Type: text/plain, Size: 12114 bytes --]

hvmctl: convert HVMOP_set_pci_intx_level

Note that this adds validation of the "domain" interface structure
field, which previously got ignored.

Note further that this retains the hvmop interface definitions as those
had (wrongly) been exposed to non-tool stack consumers (albeit the
operation wouldn't have succeeded when requested by a domain for
itself).

Signed-off-by: Jan Beulich <jbeulich@suse.com>
---
TBD: xen/xsm/flask/policy/access_vectors says "also needs hvmctl", but
     I don't see how this has been done so far. With the change here,
     doing two checks in flask_hvm_control() (the generic one always
     and a specific one if needed) would of course be simple, but it's
     unclear how subsequently added sub-ops should then be dealt with
     (which don't have a similar remark).

--- a/tools/libxc/xc_misc.c
+++ b/tools/libxc/xc_misc.c
@@ -473,30 +473,14 @@ int xc_hvm_set_pci_intx_level(
     uint8_t domain, uint8_t bus, uint8_t device, uint8_t intx,
     unsigned int level)
 {
-    DECLARE_HYPERCALL_BUFFER(struct xen_hvm_set_pci_intx_level, arg);
-    int rc;
+    DECLARE_HVMCTL(set_pci_intx_level, dom,
+                   .domain = domain,
+                   .bus    = bus,
+                   .device = device,
+                   .intx   = intx,
+                   .level =  level);
 
-    arg = xc_hypercall_buffer_alloc(xch, arg, sizeof(*arg));
-    if ( arg == NULL )
-    {
-        PERROR("Could not allocate memory for xc_hvm_set_pci_intx_level hypercall");
-        return -1;
-    }
-
-    arg->domid  = dom;
-    arg->domain = domain;
-    arg->bus    = bus;
-    arg->device = device;
-    arg->intx   = intx;
-    arg->level  = level;
-
-    rc = xencall2(xch->xcall, __HYPERVISOR_hvm_op,
-                  HVMOP_set_pci_intx_level,
-                  HYPERCALL_BUFFER_AS_ARG(arg));
-
-    xc_hypercall_buffer_free(xch, arg);
-
-    return rc;
+    return do_hvmctl(xch, &hvmctl);
 }
 
 int xc_hvm_set_isa_irq_level(
--- a/tools/libxc/xc_private.h
+++ b/tools/libxc/xc_private.h
@@ -34,6 +34,8 @@
 #define XC_INTERNAL_COMPAT_MAP_FOREIGN_API
 #include "xenctrl.h"
 
+#include <xen/hvm/control.h>
+
 #include <xencall.h>
 #include <xenforeignmemory.h>
 
@@ -61,6 +63,13 @@ struct iovec {
 
 #define DECLARE_DOMCTL struct xen_domctl domctl
 #define DECLARE_SYSCTL struct xen_sysctl sysctl
+#define DECLARE_HVMCTL(op, dom, init...) \
+    struct xen_hvmctl hvmctl = { \
+        .interface_version = XEN_HVMCTL_INTERFACE_VERSION, \
+        .domain = (dom), \
+        .cmd = XEN_HVMCTL_##op, \
+        .u.op = { init } \
+    }
 #define DECLARE_PHYSDEV_OP struct physdev_op physdev_op
 #define DECLARE_FLASK_OP struct xen_flask_op op
 #define DECLARE_PLATFORM_OP struct xen_platform_op platform_op
@@ -311,6 +320,31 @@ static inline int do_sysctl(xc_interface
     return ret;
 }
 
+static inline int do_hvmctl(xc_interface *xch, struct xen_hvmctl *hvmctl)
+{
+    int ret = -1;
+    DECLARE_HYPERCALL_BOUNCE(hvmctl, sizeof(*hvmctl), XC_HYPERCALL_BUFFER_BOUNCE_BOTH);
+
+    if ( xc_hypercall_bounce_pre(xch, hvmctl) )
+    {
+        PERROR("Could not bounce buffer for hvmctl hypercall");
+        return -1;
+    }
+
+    ret = xencall1(xch->xcall, __HYPERVISOR_hvmctl,
+                   HYPERCALL_BUFFER_AS_ARG(hvmctl));
+    if ( ret < 0 )
+    {
+        if ( errno == EACCES )
+            DPRINTF("hvmctl operation failed -- need to"
+                    " rebuild the user-space tool set?\n");
+    }
+
+    xc_hypercall_bounce_post(xch, hvmctl);
+
+    return ret;
+}
+
 static inline int do_platform_op(xc_interface *xch,
                                  struct xen_platform_op *platform_op)
 {
--- a/xen/arch/x86/hvm/control.c
+++ b/xen/arch/x86/hvm/control.c
@@ -19,6 +19,30 @@
 #include <xen/sched.h>
 #include <xsm/xsm.h>
 
+static int set_pci_intx_level(struct domain *d,
+                              const struct xen_hvm_set_pci_intx_level *op)
+{
+    if ( op->domain || op->bus || (op->device > 31) || (op->intx > 3) )
+        return -EINVAL;
+
+    if ( !is_hvm_domain(d) )
+        return -EINVAL;
+
+    switch ( op->level )
+    {
+    case 0:
+        hvm_pci_intx_deassert(d, op->device, op->intx);
+        break;
+    case 1:
+        hvm_pci_intx_assert(d, op->device, op->intx);
+        break;
+    default:
+        return -EINVAL;
+    }
+
+    return 0;
+}
+
 /*
  * Note that this value is effectively part of the ABI, even if we don't need
  * to make it a formal part of it.  Hence this value may only be changed if
@@ -64,6 +88,10 @@ long do_hvmctl(XEN_GUEST_HANDLE_PARAM(xe
 
     switch ( op.cmd )
     {
+    case XEN_HVMCTL_set_pci_intx_level:
+        rc = set_pci_intx_level(d, &op.u.set_pci_intx_level);
+        break;
+
     default:
         rc = -EOPNOTSUPP;
         break;
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -4303,50 +4303,6 @@ void hvm_hypercall_page_initialise(struc
     hvm_funcs.init_hypercall_page(d, hypercall_page);
 }
 
-static int hvmop_set_pci_intx_level(
-    XEN_GUEST_HANDLE_PARAM(xen_hvm_set_pci_intx_level_t) uop)
-{
-    struct xen_hvm_set_pci_intx_level op;
-    struct domain *d;
-    int rc;
-
-    if ( copy_from_guest(&op, uop, 1) )
-        return -EFAULT;
-
-    if ( (op.domain > 0) || (op.bus > 0) || (op.device > 31) || (op.intx > 3) )
-        return -EINVAL;
-
-    rc = rcu_lock_remote_domain_by_id(op.domid, &d);
-    if ( rc != 0 )
-        return rc;
-
-    rc = -EINVAL;
-    if ( !is_hvm_domain(d) )
-        goto out;
-
-    rc = xsm_hvm_set_pci_intx_level(XSM_DM_PRIV, d);
-    if ( rc )
-        goto out;
-
-    rc = 0;
-    switch ( op.level )
-    {
-    case 0:
-        hvm_pci_intx_deassert(d, op.device, op.intx);
-        break;
-    case 1:
-        hvm_pci_intx_assert(d, op.device, op.intx);
-        break;
-    default:
-        rc = -EINVAL;
-        break;
-    }
-
- out:
-    rcu_unlock_domain(d);
-    return rc;
-}
-
 void hvm_vcpu_reset_state(struct vcpu *v, uint16_t cs, uint16_t ip)
 {
     struct domain *d = v->domain;
@@ -5408,11 +5364,6 @@ long do_hvm_op(unsigned long op, XEN_GUE
             guest_handle_cast(arg, xen_hvm_param_t));
         break;
 
-    case HVMOP_set_pci_intx_level:
-        rc = hvmop_set_pci_intx_level(
-            guest_handle_cast(arg, xen_hvm_set_pci_intx_level_t));
-        break;
-
     case HVMOP_set_isa_irq_level:
         rc = hvmop_set_isa_irq_level(
             guest_handle_cast(arg, xen_hvm_set_isa_irq_level_t));
--- a/xen/include/public/hvm/control.h
+++ b/xen/include/public/hvm/control.h
@@ -29,12 +29,23 @@
 
 #define XEN_HVMCTL_INTERFACE_VERSION 0x00000001
 
+/* XEN_HVMCTL_set_pci_intx_level */
+/* Set the logical level of one of a domain's PCI INTx wires. */
+struct xen_hvm_set_pci_intx_level {
+    /* PCI INTx identification in PCI topology (domain:bus:device:intx). */
+    uint8_t domain, bus, device, intx;
+    /* Assertion level (0 = unasserted, 1 = asserted). */
+    uint8_t level;
+};
+
 struct xen_hvmctl {
     uint16_t interface_version;    /* XEN_HVMCTL_INTERFACE_VERSION */
     domid_t domain;
     uint16_t cmd;
+#define XEN_HVMCTL_set_pci_intx_level            1
     uint16_t opaque;               /* Must be zero on initial invocation. */
     union {
+        struct xen_hvm_set_pci_intx_level set_pci_intx_level;
         uint8_t pad[120];
     } u;
 };
--- a/xen/include/public/hvm/hvm_op.h
+++ b/xen/include/public/hvm/hvm_op.h
@@ -38,6 +38,8 @@ struct xen_hvm_param {
 typedef struct xen_hvm_param xen_hvm_param_t;
 DEFINE_XEN_GUEST_HANDLE(xen_hvm_param_t);
 
+#if __XEN_INTERFACE_VERSION__ < 0x00040800
+
 /* Set the logical level of one of a domain's PCI INTx wires. */
 #define HVMOP_set_pci_intx_level  2
 struct xen_hvm_set_pci_intx_level {
@@ -51,6 +53,8 @@ struct xen_hvm_set_pci_intx_level {
 typedef struct xen_hvm_set_pci_intx_level xen_hvm_set_pci_intx_level_t;
 DEFINE_XEN_GUEST_HANDLE(xen_hvm_set_pci_intx_level_t);
 
+#endif
+
 /* Set the logical level of one of a domain's ISA IRQ wires. */
 #define HVMOP_set_isa_irq_level   3
 struct xen_hvm_set_isa_irq_level {
--- a/xen/include/public/xen-compat.h
+++ b/xen/include/public/xen-compat.h
@@ -27,7 +27,7 @@
 #ifndef __XEN_PUBLIC_XEN_COMPAT_H__
 #define __XEN_PUBLIC_XEN_COMPAT_H__
 
-#define __XEN_LATEST_INTERFACE_VERSION__ 0x00040700
+#define __XEN_LATEST_INTERFACE_VERSION__ 0x00040800
 
 #if defined(__XEN__) || defined(__XEN_TOOLS__)
 /* Xen is built with matching headers and implements the latest interface. */
--- a/xen/include/xsm/dummy.h
+++ b/xen/include/xsm/dummy.h
@@ -609,12 +609,6 @@ static XSM_INLINE int xsm_shadow_control
     return xsm_default_action(action, current->domain, d);
 }
 
-static XSM_INLINE int xsm_hvm_set_pci_intx_level(XSM_DEFAULT_ARG struct domain *d)
-{
-    XSM_ASSERT_ACTION(XSM_DM_PRIV);
-    return xsm_default_action(action, current->domain, d);
-}
-
 static XSM_INLINE int xsm_hvm_set_isa_irq_level(XSM_DEFAULT_ARG struct domain *d)
 {
     XSM_ASSERT_ACTION(XSM_DM_PRIV);
--- a/xen/include/xsm/xsm.h
+++ b/xen/include/xsm/xsm.h
@@ -173,7 +173,6 @@ struct xsm_operations {
 #ifdef CONFIG_X86
     int (*do_mca) (void);
     int (*shadow_control) (struct domain *d, uint32_t op);
-    int (*hvm_set_pci_intx_level) (struct domain *d);
     int (*hvm_set_isa_irq_level) (struct domain *d);
     int (*hvm_set_pci_link_route) (struct domain *d);
     int (*hvm_inject_msi) (struct domain *d);
@@ -646,11 +645,6 @@ static inline int xsm_shadow_control (xs
     return xsm_ops->shadow_control(d, op);
 }
 
-static inline int xsm_hvm_set_pci_intx_level (xsm_default_t def, struct domain *d)
-{
-    return xsm_ops->hvm_set_pci_intx_level(d);
-}
-
 static inline int xsm_hvm_set_isa_irq_level (xsm_default_t def, struct domain *d)
 {
     return xsm_ops->hvm_set_isa_irq_level(d);
--- a/xen/xsm/dummy.c
+++ b/xen/xsm/dummy.c
@@ -145,7 +145,6 @@ void xsm_fixup_ops (struct xsm_operation
 #ifdef CONFIG_X86
     set_to_dummy_if_null(ops, do_mca);
     set_to_dummy_if_null(ops, shadow_control);
-    set_to_dummy_if_null(ops, hvm_set_pci_intx_level);
     set_to_dummy_if_null(ops, hvm_set_isa_irq_level);
     set_to_dummy_if_null(ops, hvm_set_pci_link_route);
     set_to_dummy_if_null(ops, hvm_inject_msi);
--- a/xen/xsm/flask/hooks.c
+++ b/xen/xsm/flask/hooks.c
@@ -27,6 +27,7 @@
 #include <public/physdev.h>
 #include <public/platform.h>
 #include <public/version.h>
+#include <public/hvm/control.h>
 
 #include <public/xsm/flask_op.h>
 
@@ -1189,6 +1190,9 @@ static int flask_hvm_control(struct doma
 
     switch ( op )
     {
+    case XEN_HVMCTL_set_pci_intx_level:
+        perm = HVM__PCILEVEL;
+        break;
     default:
         perm = HVM__HVMCTL;
         break;
@@ -1513,11 +1517,6 @@ static int flask_ioport_mapping(struct d
     return flask_ioport_permission(d, start, end, access);
 }
 
-static int flask_hvm_set_pci_intx_level(struct domain *d)
-{
-    return current_has_perm(d, SECCLASS_HVM, HVM__PCILEVEL);
-}
-
 static int flask_hvm_set_isa_irq_level(struct domain *d)
 {
     return current_has_perm(d, SECCLASS_HVM, HVM__IRQLEVEL);
@@ -1806,7 +1805,6 @@ static struct xsm_operations flask_ops =
 #ifdef CONFIG_X86
     .do_mca = flask_do_mca,
     .shadow_control = flask_shadow_control,
-    .hvm_set_pci_intx_level = flask_hvm_set_pci_intx_level,
     .hvm_set_isa_irq_level = flask_hvm_set_isa_irq_level,
     .hvm_set_pci_link_route = flask_hvm_set_pci_link_route,
     .hvm_inject_msi = flask_hvm_inject_msi,
--- a/xen/xsm/flask/policy/access_vectors
+++ b/xen/xsm/flask/policy/access_vectors
@@ -261,7 +261,7 @@ class hvm
     setparam
 # HVMOP_get_param
     getparam
-# HVMOP_set_pci_intx_level (also needs hvmctl)
+# XEN_HVMCTL_set_pci_intx_level (also needs hvmctl)
     pcilevel
 # HVMOP_set_isa_irq_level
     irqlevel

[-- Attachment #3: Type: text/plain, Size: 126 bytes --]

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
http://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 31+ messages in thread

* [PATCH 03/11] hvmctl: convert HVMOP_set_isa_irq_level
  2016-06-20 12:39 [PATCH 00/11] hvmctl hypercall Jan Beulich
  2016-06-20 12:52 ` [PATCH 01/11] public / x86: introduce " Jan Beulich
  2016-06-20 12:53 ` [PATCH 02/11] hvmctl: convert HVMOP_set_pci_intx_level Jan Beulich
@ 2016-06-20 12:53 ` Jan Beulich
  2016-06-21 10:14   ` Wei Liu
  2016-06-20 12:54 ` [PATCH 04/11] hvmctl: convert HVMOP_set_pci_link_route Jan Beulich
                   ` (8 subsequent siblings)
  11 siblings, 1 reply; 31+ messages in thread
From: Jan Beulich @ 2016-06-20 12:53 UTC (permalink / raw)
  To: xen-devel
  Cc: Stefano Stabellini, Wei Liu, George Dunlap, Andrew Cooper,
	Ian Jackson, Tim Deegan, Paul Durrant, dgdegra

[-- Attachment #1: Type: text/plain, Size: 8918 bytes --]

Note that this retains the hvmop interface definitions as those had
(wrongly) been exposed to non-tool stack consumers (albeit the
operation wouldn't have succeeded when requested by a domain for
itself).

Signed-off-by: Jan Beulich <jbeulich@suse.com>

--- a/tools/libxc/xc_misc.c
+++ b/tools/libxc/xc_misc.c
@@ -488,27 +488,11 @@ int xc_hvm_set_isa_irq_level(
     uint8_t isa_irq,
     unsigned int level)
 {
-    DECLARE_HYPERCALL_BUFFER(struct xen_hvm_set_isa_irq_level, arg);
-    int rc;
+    DECLARE_HVMCTL(set_isa_irq_level, dom,
+                   .isa_irq = isa_irq,
+                   .level   = level);
 
-    arg = xc_hypercall_buffer_alloc(xch, arg, sizeof(*arg));
-    if ( arg == NULL )
-    {
-        PERROR("Could not allocate memory for xc_hvm_set_isa_irq_level hypercall");
-        return -1;
-    }
-
-    arg->domid   = dom;
-    arg->isa_irq = isa_irq;
-    arg->level   = level;
-
-    rc = xencall2(xch->xcall, __HYPERVISOR_hvm_op,
-                  HVMOP_set_isa_irq_level,
-                  HYPERCALL_BUFFER_AS_ARG(arg));
-
-    xc_hypercall_buffer_free(xch, arg);
-
-    return rc;
+    return do_hvmctl(xch, &hvmctl);
 }
 
 int xc_hvm_set_pci_link_route(
--- a/xen/arch/x86/hvm/control.c
+++ b/xen/arch/x86/hvm/control.c
@@ -43,6 +43,30 @@ static int set_pci_intx_level(struct dom
     return 0;
 }
 
+static int set_isa_irq_level(struct domain *d,
+                             const struct xen_hvm_set_isa_irq_level *op)
+{
+    if ( op->isa_irq > 15 )
+        return -EINVAL;
+
+    if ( !is_hvm_domain(d) )
+        return -EINVAL;
+
+    switch ( op->level )
+    {
+    case 0:
+        hvm_isa_irq_deassert(d, op->isa_irq);
+        break;
+    case 1:
+        hvm_isa_irq_assert(d, op->isa_irq);
+        break;
+    default:
+        return -EINVAL;
+    }
+
+    return 0;
+}
+
 /*
  * Note that this value is effectively part of the ABI, even if we don't need
  * to make it a formal part of it.  Hence this value may only be changed if
@@ -92,6 +116,10 @@ long do_hvmctl(XEN_GUEST_HANDLE_PARAM(xe
         rc = set_pci_intx_level(d, &op.u.set_pci_intx_level);
         break;
 
+    case XEN_HVMCTL_set_isa_irq_level:
+        rc = set_isa_irq_level(d, &op.u.set_isa_irq_level);
+        break;
+
     default:
         rc = -EOPNOTSUPP;
         break;
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -4446,50 +4446,6 @@ static void hvm_s3_resume(struct domain
     }
 }
 
-static int hvmop_set_isa_irq_level(
-    XEN_GUEST_HANDLE_PARAM(xen_hvm_set_isa_irq_level_t) uop)
-{
-    struct xen_hvm_set_isa_irq_level op;
-    struct domain *d;
-    int rc;
-
-    if ( copy_from_guest(&op, uop, 1) )
-        return -EFAULT;
-
-    if ( op.isa_irq > 15 )
-        return -EINVAL;
-
-    rc = rcu_lock_remote_domain_by_id(op.domid, &d);
-    if ( rc != 0 )
-        return rc;
-
-    rc = -EINVAL;
-    if ( !is_hvm_domain(d) )
-        goto out;
-
-    rc = xsm_hvm_set_isa_irq_level(XSM_DM_PRIV, d);
-    if ( rc )
-        goto out;
-
-    rc = 0;
-    switch ( op.level )
-    {
-    case 0:
-        hvm_isa_irq_deassert(d, op.isa_irq);
-        break;
-    case 1:
-        hvm_isa_irq_assert(d, op.isa_irq);
-        break;
-    default:
-        rc = -EINVAL;
-        break;
-    }
-
- out:
-    rcu_unlock_domain(d);
-    return rc;
-}
-
 static int hvmop_set_pci_link_route(
     XEN_GUEST_HANDLE_PARAM(xen_hvm_set_pci_link_route_t) uop)
 {
@@ -5364,11 +5320,6 @@ long do_hvm_op(unsigned long op, XEN_GUE
             guest_handle_cast(arg, xen_hvm_param_t));
         break;
 
-    case HVMOP_set_isa_irq_level:
-        rc = hvmop_set_isa_irq_level(
-            guest_handle_cast(arg, xen_hvm_set_isa_irq_level_t));
-        break;
-
     case HVMOP_inject_msi:
         rc = hvmop_inject_msi(
             guest_handle_cast(arg, xen_hvm_inject_msi_t));
--- a/xen/include/public/hvm/control.h
+++ b/xen/include/public/hvm/control.h
@@ -38,14 +38,25 @@ struct xen_hvm_set_pci_intx_level {
     uint8_t level;
 };
 
+/* XEN_HVMCTL_set_isa_irq_level */
+/* Set the logical level of one of a domain's ISA IRQ wires. */
+struct xen_hvm_set_isa_irq_level {
+    /* ISA device identification, by ISA IRQ (0-15). */
+    uint8_t  isa_irq;
+    /* Assertion level (0 = unasserted, 1 = asserted). */
+    uint8_t  level;
+};
+
 struct xen_hvmctl {
     uint16_t interface_version;    /* XEN_HVMCTL_INTERFACE_VERSION */
     domid_t domain;
     uint16_t cmd;
 #define XEN_HVMCTL_set_pci_intx_level            1
+#define XEN_HVMCTL_set_isa_irq_level             2
     uint16_t opaque;               /* Must be zero on initial invocation. */
     union {
         struct xen_hvm_set_pci_intx_level set_pci_intx_level;
+        struct xen_hvm_set_isa_irq_level set_isa_irq_level;
         uint8_t pad[120];
     } u;
 };
--- a/xen/include/public/hvm/hvm_op.h
+++ b/xen/include/public/hvm/hvm_op.h
@@ -53,8 +53,6 @@ struct xen_hvm_set_pci_intx_level {
 typedef struct xen_hvm_set_pci_intx_level xen_hvm_set_pci_intx_level_t;
 DEFINE_XEN_GUEST_HANDLE(xen_hvm_set_pci_intx_level_t);
 
-#endif
-
 /* Set the logical level of one of a domain's ISA IRQ wires. */
 #define HVMOP_set_isa_irq_level   3
 struct xen_hvm_set_isa_irq_level {
@@ -68,6 +66,8 @@ struct xen_hvm_set_isa_irq_level {
 typedef struct xen_hvm_set_isa_irq_level xen_hvm_set_isa_irq_level_t;
 DEFINE_XEN_GUEST_HANDLE(xen_hvm_set_isa_irq_level_t);
 
+#endif
+
 #define HVMOP_set_pci_link_route  4
 struct xen_hvm_set_pci_link_route {
     /* Domain to be updated. */
--- a/xen/include/xsm/dummy.h
+++ b/xen/include/xsm/dummy.h
@@ -609,12 +609,6 @@ static XSM_INLINE int xsm_shadow_control
     return xsm_default_action(action, current->domain, d);
 }
 
-static XSM_INLINE int xsm_hvm_set_isa_irq_level(XSM_DEFAULT_ARG struct domain *d)
-{
-    XSM_ASSERT_ACTION(XSM_DM_PRIV);
-    return xsm_default_action(action, current->domain, d);
-}
-
 static XSM_INLINE int xsm_hvm_set_pci_link_route(XSM_DEFAULT_ARG struct domain *d)
 {
     XSM_ASSERT_ACTION(XSM_DM_PRIV);
--- a/xen/include/xsm/xsm.h
+++ b/xen/include/xsm/xsm.h
@@ -173,7 +173,6 @@ struct xsm_operations {
 #ifdef CONFIG_X86
     int (*do_mca) (void);
     int (*shadow_control) (struct domain *d, uint32_t op);
-    int (*hvm_set_isa_irq_level) (struct domain *d);
     int (*hvm_set_pci_link_route) (struct domain *d);
     int (*hvm_inject_msi) (struct domain *d);
     int (*hvm_ioreq_server) (struct domain *d, int op);
@@ -645,11 +644,6 @@ static inline int xsm_shadow_control (xs
     return xsm_ops->shadow_control(d, op);
 }
 
-static inline int xsm_hvm_set_isa_irq_level (xsm_default_t def, struct domain *d)
-{
-    return xsm_ops->hvm_set_isa_irq_level(d);
-}
-
 static inline int xsm_hvm_set_pci_link_route (xsm_default_t def, struct domain *d)
 {
     return xsm_ops->hvm_set_pci_link_route(d);
--- a/xen/xsm/dummy.c
+++ b/xen/xsm/dummy.c
@@ -145,7 +145,6 @@ void xsm_fixup_ops (struct xsm_operation
 #ifdef CONFIG_X86
     set_to_dummy_if_null(ops, do_mca);
     set_to_dummy_if_null(ops, shadow_control);
-    set_to_dummy_if_null(ops, hvm_set_isa_irq_level);
     set_to_dummy_if_null(ops, hvm_set_pci_link_route);
     set_to_dummy_if_null(ops, hvm_inject_msi);
     set_to_dummy_if_null(ops, hvm_ioreq_server);
--- a/xen/xsm/flask/hooks.c
+++ b/xen/xsm/flask/hooks.c
@@ -1193,6 +1193,9 @@ static int flask_hvm_control(struct doma
     case XEN_HVMCTL_set_pci_intx_level:
         perm = HVM__PCILEVEL;
         break;
+    case XEN_HVMCTL_set_isa_irq_level:
+        perm = HVM__IRQLEVEL;
+        break;
     default:
         perm = HVM__HVMCTL;
         break;
@@ -1517,11 +1520,6 @@ static int flask_ioport_mapping(struct d
     return flask_ioport_permission(d, start, end, access);
 }
 
-static int flask_hvm_set_isa_irq_level(struct domain *d)
-{
-    return current_has_perm(d, SECCLASS_HVM, HVM__IRQLEVEL);
-}
-
 static int flask_hvm_set_pci_link_route(struct domain *d)
 {
     return current_has_perm(d, SECCLASS_HVM, HVM__PCIROUTE);
@@ -1805,7 +1803,6 @@ static struct xsm_operations flask_ops =
 #ifdef CONFIG_X86
     .do_mca = flask_do_mca,
     .shadow_control = flask_shadow_control,
-    .hvm_set_isa_irq_level = flask_hvm_set_isa_irq_level,
     .hvm_set_pci_link_route = flask_hvm_set_pci_link_route,
     .hvm_inject_msi = flask_hvm_inject_msi,
     .hvm_ioreq_server = flask_hvm_ioreq_server,
--- a/xen/xsm/flask/policy/access_vectors
+++ b/xen/xsm/flask/policy/access_vectors
@@ -263,7 +263,7 @@ class hvm
     getparam
 # XEN_HVMCTL_set_pci_intx_level (also needs hvmctl)
     pcilevel
-# HVMOP_set_isa_irq_level
+# XEN_HVMCTL_set_isa_irq_level
     irqlevel
 # HVMOP_set_pci_link_route
     pciroute



[-- Attachment #2: hvmctl-02.patch --]
[-- Type: text/plain, Size: 8957 bytes --]

hvmctl: convert HVMOP_set_isa_irq_level

Note that this retains the hvmop interface definitions as those had
(wrongly) been exposed to non-tool stack consumers (albeit the
operation wouldn't have succeeded when requested by a domain for
itself).

Signed-off-by: Jan Beulich <jbeulich@suse.com>

--- a/tools/libxc/xc_misc.c
+++ b/tools/libxc/xc_misc.c
@@ -488,27 +488,11 @@ int xc_hvm_set_isa_irq_level(
     uint8_t isa_irq,
     unsigned int level)
 {
-    DECLARE_HYPERCALL_BUFFER(struct xen_hvm_set_isa_irq_level, arg);
-    int rc;
+    DECLARE_HVMCTL(set_isa_irq_level, dom,
+                   .isa_irq = isa_irq,
+                   .level   = level);
 
-    arg = xc_hypercall_buffer_alloc(xch, arg, sizeof(*arg));
-    if ( arg == NULL )
-    {
-        PERROR("Could not allocate memory for xc_hvm_set_isa_irq_level hypercall");
-        return -1;
-    }
-
-    arg->domid   = dom;
-    arg->isa_irq = isa_irq;
-    arg->level   = level;
-
-    rc = xencall2(xch->xcall, __HYPERVISOR_hvm_op,
-                  HVMOP_set_isa_irq_level,
-                  HYPERCALL_BUFFER_AS_ARG(arg));
-
-    xc_hypercall_buffer_free(xch, arg);
-
-    return rc;
+    return do_hvmctl(xch, &hvmctl);
 }
 
 int xc_hvm_set_pci_link_route(
--- a/xen/arch/x86/hvm/control.c
+++ b/xen/arch/x86/hvm/control.c
@@ -43,6 +43,30 @@ static int set_pci_intx_level(struct dom
     return 0;
 }
 
+static int set_isa_irq_level(struct domain *d,
+                             const struct xen_hvm_set_isa_irq_level *op)
+{
+    if ( op->isa_irq > 15 )
+        return -EINVAL;
+
+    if ( !is_hvm_domain(d) )
+        return -EINVAL;
+
+    switch ( op->level )
+    {
+    case 0:
+        hvm_isa_irq_deassert(d, op->isa_irq);
+        break;
+    case 1:
+        hvm_isa_irq_assert(d, op->isa_irq);
+        break;
+    default:
+        return -EINVAL;
+    }
+
+    return 0;
+}
+
 /*
  * Note that this value is effectively part of the ABI, even if we don't need
  * to make it a formal part of it.  Hence this value may only be changed if
@@ -92,6 +116,10 @@ long do_hvmctl(XEN_GUEST_HANDLE_PARAM(xe
         rc = set_pci_intx_level(d, &op.u.set_pci_intx_level);
         break;
 
+    case XEN_HVMCTL_set_isa_irq_level:
+        rc = set_isa_irq_level(d, &op.u.set_isa_irq_level);
+        break;
+
     default:
         rc = -EOPNOTSUPP;
         break;
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -4446,50 +4446,6 @@ static void hvm_s3_resume(struct domain
     }
 }
 
-static int hvmop_set_isa_irq_level(
-    XEN_GUEST_HANDLE_PARAM(xen_hvm_set_isa_irq_level_t) uop)
-{
-    struct xen_hvm_set_isa_irq_level op;
-    struct domain *d;
-    int rc;
-
-    if ( copy_from_guest(&op, uop, 1) )
-        return -EFAULT;
-
-    if ( op.isa_irq > 15 )
-        return -EINVAL;
-
-    rc = rcu_lock_remote_domain_by_id(op.domid, &d);
-    if ( rc != 0 )
-        return rc;
-
-    rc = -EINVAL;
-    if ( !is_hvm_domain(d) )
-        goto out;
-
-    rc = xsm_hvm_set_isa_irq_level(XSM_DM_PRIV, d);
-    if ( rc )
-        goto out;
-
-    rc = 0;
-    switch ( op.level )
-    {
-    case 0:
-        hvm_isa_irq_deassert(d, op.isa_irq);
-        break;
-    case 1:
-        hvm_isa_irq_assert(d, op.isa_irq);
-        break;
-    default:
-        rc = -EINVAL;
-        break;
-    }
-
- out:
-    rcu_unlock_domain(d);
-    return rc;
-}
-
 static int hvmop_set_pci_link_route(
     XEN_GUEST_HANDLE_PARAM(xen_hvm_set_pci_link_route_t) uop)
 {
@@ -5364,11 +5320,6 @@ long do_hvm_op(unsigned long op, XEN_GUE
             guest_handle_cast(arg, xen_hvm_param_t));
         break;
 
-    case HVMOP_set_isa_irq_level:
-        rc = hvmop_set_isa_irq_level(
-            guest_handle_cast(arg, xen_hvm_set_isa_irq_level_t));
-        break;
-
     case HVMOP_inject_msi:
         rc = hvmop_inject_msi(
             guest_handle_cast(arg, xen_hvm_inject_msi_t));
--- a/xen/include/public/hvm/control.h
+++ b/xen/include/public/hvm/control.h
@@ -38,14 +38,25 @@ struct xen_hvm_set_pci_intx_level {
     uint8_t level;
 };
 
+/* XEN_HVMCTL_set_isa_irq_level */
+/* Set the logical level of one of a domain's ISA IRQ wires. */
+struct xen_hvm_set_isa_irq_level {
+    /* ISA device identification, by ISA IRQ (0-15). */
+    uint8_t  isa_irq;
+    /* Assertion level (0 = unasserted, 1 = asserted). */
+    uint8_t  level;
+};
+
 struct xen_hvmctl {
     uint16_t interface_version;    /* XEN_HVMCTL_INTERFACE_VERSION */
     domid_t domain;
     uint16_t cmd;
 #define XEN_HVMCTL_set_pci_intx_level            1
+#define XEN_HVMCTL_set_isa_irq_level             2
     uint16_t opaque;               /* Must be zero on initial invocation. */
     union {
         struct xen_hvm_set_pci_intx_level set_pci_intx_level;
+        struct xen_hvm_set_isa_irq_level set_isa_irq_level;
         uint8_t pad[120];
     } u;
 };
--- a/xen/include/public/hvm/hvm_op.h
+++ b/xen/include/public/hvm/hvm_op.h
@@ -53,8 +53,6 @@ struct xen_hvm_set_pci_intx_level {
 typedef struct xen_hvm_set_pci_intx_level xen_hvm_set_pci_intx_level_t;
 DEFINE_XEN_GUEST_HANDLE(xen_hvm_set_pci_intx_level_t);
 
-#endif
-
 /* Set the logical level of one of a domain's ISA IRQ wires. */
 #define HVMOP_set_isa_irq_level   3
 struct xen_hvm_set_isa_irq_level {
@@ -68,6 +66,8 @@ struct xen_hvm_set_isa_irq_level {
 typedef struct xen_hvm_set_isa_irq_level xen_hvm_set_isa_irq_level_t;
 DEFINE_XEN_GUEST_HANDLE(xen_hvm_set_isa_irq_level_t);
 
+#endif
+
 #define HVMOP_set_pci_link_route  4
 struct xen_hvm_set_pci_link_route {
     /* Domain to be updated. */
--- a/xen/include/xsm/dummy.h
+++ b/xen/include/xsm/dummy.h
@@ -609,12 +609,6 @@ static XSM_INLINE int xsm_shadow_control
     return xsm_default_action(action, current->domain, d);
 }
 
-static XSM_INLINE int xsm_hvm_set_isa_irq_level(XSM_DEFAULT_ARG struct domain *d)
-{
-    XSM_ASSERT_ACTION(XSM_DM_PRIV);
-    return xsm_default_action(action, current->domain, d);
-}
-
 static XSM_INLINE int xsm_hvm_set_pci_link_route(XSM_DEFAULT_ARG struct domain *d)
 {
     XSM_ASSERT_ACTION(XSM_DM_PRIV);
--- a/xen/include/xsm/xsm.h
+++ b/xen/include/xsm/xsm.h
@@ -173,7 +173,6 @@ struct xsm_operations {
 #ifdef CONFIG_X86
     int (*do_mca) (void);
     int (*shadow_control) (struct domain *d, uint32_t op);
-    int (*hvm_set_isa_irq_level) (struct domain *d);
     int (*hvm_set_pci_link_route) (struct domain *d);
     int (*hvm_inject_msi) (struct domain *d);
     int (*hvm_ioreq_server) (struct domain *d, int op);
@@ -645,11 +644,6 @@ static inline int xsm_shadow_control (xs
     return xsm_ops->shadow_control(d, op);
 }
 
-static inline int xsm_hvm_set_isa_irq_level (xsm_default_t def, struct domain *d)
-{
-    return xsm_ops->hvm_set_isa_irq_level(d);
-}
-
 static inline int xsm_hvm_set_pci_link_route (xsm_default_t def, struct domain *d)
 {
     return xsm_ops->hvm_set_pci_link_route(d);
--- a/xen/xsm/dummy.c
+++ b/xen/xsm/dummy.c
@@ -145,7 +145,6 @@ void xsm_fixup_ops (struct xsm_operation
 #ifdef CONFIG_X86
     set_to_dummy_if_null(ops, do_mca);
     set_to_dummy_if_null(ops, shadow_control);
-    set_to_dummy_if_null(ops, hvm_set_isa_irq_level);
     set_to_dummy_if_null(ops, hvm_set_pci_link_route);
     set_to_dummy_if_null(ops, hvm_inject_msi);
     set_to_dummy_if_null(ops, hvm_ioreq_server);
--- a/xen/xsm/flask/hooks.c
+++ b/xen/xsm/flask/hooks.c
@@ -1193,6 +1193,9 @@ static int flask_hvm_control(struct doma
     case XEN_HVMCTL_set_pci_intx_level:
         perm = HVM__PCILEVEL;
         break;
+    case XEN_HVMCTL_set_isa_irq_level:
+        perm = HVM__IRQLEVEL;
+        break;
     default:
         perm = HVM__HVMCTL;
         break;
@@ -1517,11 +1520,6 @@ static int flask_ioport_mapping(struct d
     return flask_ioport_permission(d, start, end, access);
 }
 
-static int flask_hvm_set_isa_irq_level(struct domain *d)
-{
-    return current_has_perm(d, SECCLASS_HVM, HVM__IRQLEVEL);
-}
-
 static int flask_hvm_set_pci_link_route(struct domain *d)
 {
     return current_has_perm(d, SECCLASS_HVM, HVM__PCIROUTE);
@@ -1805,7 +1803,6 @@ static struct xsm_operations flask_ops =
 #ifdef CONFIG_X86
     .do_mca = flask_do_mca,
     .shadow_control = flask_shadow_control,
-    .hvm_set_isa_irq_level = flask_hvm_set_isa_irq_level,
     .hvm_set_pci_link_route = flask_hvm_set_pci_link_route,
     .hvm_inject_msi = flask_hvm_inject_msi,
     .hvm_ioreq_server = flask_hvm_ioreq_server,
--- a/xen/xsm/flask/policy/access_vectors
+++ b/xen/xsm/flask/policy/access_vectors
@@ -263,7 +263,7 @@ class hvm
     getparam
 # XEN_HVMCTL_set_pci_intx_level (also needs hvmctl)
     pcilevel
-# HVMOP_set_isa_irq_level
+# XEN_HVMCTL_set_isa_irq_level
     irqlevel
 # HVMOP_set_pci_link_route
     pciroute

[-- Attachment #3: Type: text/plain, Size: 126 bytes --]

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
http://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 31+ messages in thread

* [PATCH 04/11] hvmctl: convert HVMOP_set_pci_link_route
  2016-06-20 12:39 [PATCH 00/11] hvmctl hypercall Jan Beulich
                   ` (2 preceding siblings ...)
  2016-06-20 12:53 ` [PATCH 03/11] hvmctl: convert HVMOP_set_isa_irq_level Jan Beulich
@ 2016-06-20 12:54 ` Jan Beulich
  2016-06-21 10:14   ` Wei Liu
  2016-06-20 12:54 ` [PATCH 05/11] hvmctl: convert HVMOP_track_dirty_vram Jan Beulich
                   ` (7 subsequent siblings)
  11 siblings, 1 reply; 31+ messages in thread
From: Jan Beulich @ 2016-06-20 12:54 UTC (permalink / raw)
  To: xen-devel
  Cc: Stefano Stabellini, Wei Liu, George Dunlap, Andrew Cooper,
	Ian Jackson, Tim Deegan, Paul Durrant, dgdegra

[-- Attachment #1: Type: text/plain, Size: 8522 bytes --]

Note that this retains the hvmop interface definitions as those had
(wrongly) been exposed to non-tool stack consumers (albeit the
operation wouldn't have succeeded when requested by a domain for
itself).

Signed-off-by: Jan Beulich <jbeulich@suse.com>

--- a/tools/libxc/xc_misc.c
+++ b/tools/libxc/xc_misc.c
@@ -498,27 +498,11 @@ int xc_hvm_set_isa_irq_level(
 int xc_hvm_set_pci_link_route(
     xc_interface *xch, domid_t dom, uint8_t link, uint8_t isa_irq)
 {
-    DECLARE_HYPERCALL_BUFFER(struct xen_hvm_set_pci_link_route, arg);
-    int rc;
+    DECLARE_HVMCTL(set_pci_link_route, dom,
+                   .link    = link,
+                   .isa_irq = isa_irq);
 
-    arg = xc_hypercall_buffer_alloc(xch, arg, sizeof(*arg));
-    if ( arg == NULL )
-    {
-        PERROR("Could not allocate memory for xc_hvm_set_pci_link_route hypercall");
-        return -1;
-    }
-
-    arg->domid   = dom;
-    arg->link    = link;
-    arg->isa_irq = isa_irq;
-
-    rc = xencall2(xch->xcall, __HYPERVISOR_hvm_op,
-                  HVMOP_set_pci_link_route,
-                  HYPERCALL_BUFFER_AS_ARG(arg));
-
-    xc_hypercall_buffer_free(xch, arg);
-
-    return rc;
+    return do_hvmctl(xch, &hvmctl);
 }
 
 int xc_hvm_inject_msi(
--- a/xen/arch/x86/hvm/control.c
+++ b/xen/arch/x86/hvm/control.c
@@ -120,6 +120,11 @@ long do_hvmctl(XEN_GUEST_HANDLE_PARAM(xe
         rc = set_isa_irq_level(d, &op.u.set_isa_irq_level);
         break;
 
+    case XEN_HVMCTL_set_pci_link_route:
+        rc = hvm_set_pci_link_route(d, op.u.set_pci_link_route.link,
+                                    op.u.set_pci_link_route.isa_irq);
+        break;
+
     default:
         rc = -EOPNOTSUPP;
         break;
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -4446,39 +4446,6 @@ static void hvm_s3_resume(struct domain
     }
 }
 
-static int hvmop_set_pci_link_route(
-    XEN_GUEST_HANDLE_PARAM(xen_hvm_set_pci_link_route_t) uop)
-{
-    struct xen_hvm_set_pci_link_route op;
-    struct domain *d;
-    int rc;
-
-    if ( copy_from_guest(&op, uop, 1) )
-        return -EFAULT;
-
-    if ( (op.link > 3) || (op.isa_irq > 15) )
-        return -EINVAL;
-
-    rc = rcu_lock_remote_domain_by_id(op.domid, &d);
-    if ( rc != 0 )
-        return rc;
-
-    rc = -EINVAL;
-    if ( !is_hvm_domain(d) )
-        goto out;
-
-    rc = xsm_hvm_set_pci_link_route(XSM_DM_PRIV, d);
-    if ( rc )
-        goto out;
-
-    rc = 0;
-    hvm_set_pci_link_route(d, op.link, op.isa_irq);
-
- out:
-    rcu_unlock_domain(d);
-    return rc;
-}
-
 static int hvmop_inject_msi(
     XEN_GUEST_HANDLE_PARAM(xen_hvm_inject_msi_t) uop)
 {
@@ -5325,11 +5292,6 @@ long do_hvm_op(unsigned long op, XEN_GUE
             guest_handle_cast(arg, xen_hvm_inject_msi_t));
         break;
 
-    case HVMOP_set_pci_link_route:
-        rc = hvmop_set_pci_link_route(
-            guest_handle_cast(arg, xen_hvm_set_pci_link_route_t));
-        break;
-
     case HVMOP_flush_tlbs:
         rc = guest_handle_is_null(arg) ? hvmop_flush_tlb_all() : -ENOSYS;
         break;
--- a/xen/arch/x86/hvm/irq.c
+++ b/xen/arch/x86/hvm/irq.c
@@ -229,13 +229,17 @@ void hvm_assert_evtchn_irq(struct vcpu *
         hvm_set_callback_irq_level(v);
 }
 
-void hvm_set_pci_link_route(struct domain *d, u8 link, u8 isa_irq)
+int hvm_set_pci_link_route(struct domain *d, u8 link, u8 isa_irq)
 {
     struct hvm_irq *hvm_irq = &d->arch.hvm_domain.irq;
     u8 old_isa_irq;
     int i;
 
-    ASSERT((link <= 3) && (isa_irq <= 15));
+    if ( link > 3 || isa_irq > 15 )
+        return -EINVAL;
+
+    if ( !is_hvm_domain(d) )
+        return -EINVAL;
 
     spin_lock(&d->arch.hvm_domain.irq_lock);
 
@@ -273,6 +277,8 @@ void hvm_set_pci_link_route(struct domai
 
     dprintk(XENLOG_G_INFO, "Dom%u PCI link %u changed %u -> %u\n",
             d->domain_id, link, old_isa_irq, isa_irq);
+
+    return 0;
 }
 
 int hvm_inject_msi(struct domain *d, uint64_t addr, uint32_t data)
--- a/xen/include/public/hvm/control.h
+++ b/xen/include/public/hvm/control.h
@@ -47,16 +47,26 @@ struct xen_hvm_set_isa_irq_level {
     uint8_t  level;
 };
 
+/* XEN_HVMCTL_set_pci_link_route */
+struct xen_hvm_set_pci_link_route {
+    /* PCI link identifier (0-3). */
+    uint8_t  link;
+    /* ISA IRQ (1-15), or 0 (disable link). */
+    uint8_t  isa_irq;
+};
+
 struct xen_hvmctl {
     uint16_t interface_version;    /* XEN_HVMCTL_INTERFACE_VERSION */
     domid_t domain;
     uint16_t cmd;
 #define XEN_HVMCTL_set_pci_intx_level            1
 #define XEN_HVMCTL_set_isa_irq_level             2
+#define XEN_HVMCTL_set_pci_link_route            3
     uint16_t opaque;               /* Must be zero on initial invocation. */
     union {
         struct xen_hvm_set_pci_intx_level set_pci_intx_level;
         struct xen_hvm_set_isa_irq_level set_isa_irq_level;
+        struct xen_hvm_set_pci_link_route set_pci_link_route;
         uint8_t pad[120];
     } u;
 };
--- a/xen/include/public/hvm/hvm_op.h
+++ b/xen/include/public/hvm/hvm_op.h
@@ -66,8 +66,6 @@ struct xen_hvm_set_isa_irq_level {
 typedef struct xen_hvm_set_isa_irq_level xen_hvm_set_isa_irq_level_t;
 DEFINE_XEN_GUEST_HANDLE(xen_hvm_set_isa_irq_level_t);
 
-#endif
-
 #define HVMOP_set_pci_link_route  4
 struct xen_hvm_set_pci_link_route {
     /* Domain to be updated. */
@@ -80,6 +78,8 @@ struct xen_hvm_set_pci_link_route {
 typedef struct xen_hvm_set_pci_link_route xen_hvm_set_pci_link_route_t;
 DEFINE_XEN_GUEST_HANDLE(xen_hvm_set_pci_link_route_t);
 
+#endif
+
 /* Flushes all VCPU TLBs: @arg must be NULL. */
 #define HVMOP_flush_tlbs          5
 
--- a/xen/include/xen/hvm/irq.h
+++ b/xen/include/xen/hvm/irq.h
@@ -122,7 +122,7 @@ void hvm_isa_irq_assert(
 void hvm_isa_irq_deassert(
     struct domain *d, unsigned int isa_irq);
 
-void hvm_set_pci_link_route(struct domain *d, u8 link, u8 isa_irq);
+int hvm_set_pci_link_route(struct domain *d, u8 link, u8 isa_irq);
 
 int hvm_inject_msi(struct domain *d, uint64_t addr, uint32_t data);
 
--- a/xen/include/xsm/dummy.h
+++ b/xen/include/xsm/dummy.h
@@ -609,12 +609,6 @@ static XSM_INLINE int xsm_shadow_control
     return xsm_default_action(action, current->domain, d);
 }
 
-static XSM_INLINE int xsm_hvm_set_pci_link_route(XSM_DEFAULT_ARG struct domain *d)
-{
-    XSM_ASSERT_ACTION(XSM_DM_PRIV);
-    return xsm_default_action(action, current->domain, d);
-}
-
 static XSM_INLINE int xsm_hvm_inject_msi(XSM_DEFAULT_ARG struct domain *d)
 {
     XSM_ASSERT_ACTION(XSM_DM_PRIV);
--- a/xen/xsm/dummy.c
+++ b/xen/xsm/dummy.c
@@ -145,7 +145,6 @@ void xsm_fixup_ops (struct xsm_operation
 #ifdef CONFIG_X86
     set_to_dummy_if_null(ops, do_mca);
     set_to_dummy_if_null(ops, shadow_control);
-    set_to_dummy_if_null(ops, hvm_set_pci_link_route);
     set_to_dummy_if_null(ops, hvm_inject_msi);
     set_to_dummy_if_null(ops, hvm_ioreq_server);
     set_to_dummy_if_null(ops, mem_sharing_op);
--- a/xen/xsm/flask/hooks.c
+++ b/xen/xsm/flask/hooks.c
@@ -1196,6 +1196,9 @@ static int flask_hvm_control(struct doma
     case XEN_HVMCTL_set_isa_irq_level:
         perm = HVM__IRQLEVEL;
         break;
+    case XEN_HVMCTL_set_pci_link_route:
+        perm = HVM__PCIROUTE;
+        break;
     default:
         perm = HVM__HVMCTL;
         break;
@@ -1520,11 +1523,6 @@ static int flask_ioport_mapping(struct d
     return flask_ioport_permission(d, start, end, access);
 }
 
-static int flask_hvm_set_pci_link_route(struct domain *d)
-{
-    return current_has_perm(d, SECCLASS_HVM, HVM__PCIROUTE);
-}
-
 static int flask_hvm_inject_msi(struct domain *d)
 {
     return current_has_perm(d, SECCLASS_HVM, HVM__SEND_IRQ);
@@ -1803,7 +1801,6 @@ static struct xsm_operations flask_ops =
 #ifdef CONFIG_X86
     .do_mca = flask_do_mca,
     .shadow_control = flask_shadow_control,
-    .hvm_set_pci_link_route = flask_hvm_set_pci_link_route,
     .hvm_inject_msi = flask_hvm_inject_msi,
     .hvm_ioreq_server = flask_hvm_ioreq_server,
     .mem_sharing_op = flask_mem_sharing_op,
--- a/xen/xsm/flask/policy/access_vectors
+++ b/xen/xsm/flask/policy/access_vectors
@@ -265,7 +265,7 @@ class hvm
     pcilevel
 # XEN_HVMCTL_set_isa_irq_level
     irqlevel
-# HVMOP_set_pci_link_route
+# XEN_HVMCTL_set_pci_link_route
     pciroute
     bind_irq
 # XEN_DOMCTL_pin_mem_cacheattr



[-- Attachment #2: hvmctl-03.patch --]
[-- Type: text/plain, Size: 8562 bytes --]

hvmctl: convert HVMOP_set_pci_link_route

Note that this retains the hvmop interface definitions as those had
(wrongly) been exposed to non-tool stack consumers (albeit the
operation wouldn't have succeeded when requested by a domain for
itself).

Signed-off-by: Jan Beulich <jbeulich@suse.com>

--- a/tools/libxc/xc_misc.c
+++ b/tools/libxc/xc_misc.c
@@ -498,27 +498,11 @@ int xc_hvm_set_isa_irq_level(
 int xc_hvm_set_pci_link_route(
     xc_interface *xch, domid_t dom, uint8_t link, uint8_t isa_irq)
 {
-    DECLARE_HYPERCALL_BUFFER(struct xen_hvm_set_pci_link_route, arg);
-    int rc;
+    DECLARE_HVMCTL(set_pci_link_route, dom,
+                   .link    = link,
+                   .isa_irq = isa_irq);
 
-    arg = xc_hypercall_buffer_alloc(xch, arg, sizeof(*arg));
-    if ( arg == NULL )
-    {
-        PERROR("Could not allocate memory for xc_hvm_set_pci_link_route hypercall");
-        return -1;
-    }
-
-    arg->domid   = dom;
-    arg->link    = link;
-    arg->isa_irq = isa_irq;
-
-    rc = xencall2(xch->xcall, __HYPERVISOR_hvm_op,
-                  HVMOP_set_pci_link_route,
-                  HYPERCALL_BUFFER_AS_ARG(arg));
-
-    xc_hypercall_buffer_free(xch, arg);
-
-    return rc;
+    return do_hvmctl(xch, &hvmctl);
 }
 
 int xc_hvm_inject_msi(
--- a/xen/arch/x86/hvm/control.c
+++ b/xen/arch/x86/hvm/control.c
@@ -120,6 +120,11 @@ long do_hvmctl(XEN_GUEST_HANDLE_PARAM(xe
         rc = set_isa_irq_level(d, &op.u.set_isa_irq_level);
         break;
 
+    case XEN_HVMCTL_set_pci_link_route:
+        rc = hvm_set_pci_link_route(d, op.u.set_pci_link_route.link,
+                                    op.u.set_pci_link_route.isa_irq);
+        break;
+
     default:
         rc = -EOPNOTSUPP;
         break;
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -4446,39 +4446,6 @@ static void hvm_s3_resume(struct domain
     }
 }
 
-static int hvmop_set_pci_link_route(
-    XEN_GUEST_HANDLE_PARAM(xen_hvm_set_pci_link_route_t) uop)
-{
-    struct xen_hvm_set_pci_link_route op;
-    struct domain *d;
-    int rc;
-
-    if ( copy_from_guest(&op, uop, 1) )
-        return -EFAULT;
-
-    if ( (op.link > 3) || (op.isa_irq > 15) )
-        return -EINVAL;
-
-    rc = rcu_lock_remote_domain_by_id(op.domid, &d);
-    if ( rc != 0 )
-        return rc;
-
-    rc = -EINVAL;
-    if ( !is_hvm_domain(d) )
-        goto out;
-
-    rc = xsm_hvm_set_pci_link_route(XSM_DM_PRIV, d);
-    if ( rc )
-        goto out;
-
-    rc = 0;
-    hvm_set_pci_link_route(d, op.link, op.isa_irq);
-
- out:
-    rcu_unlock_domain(d);
-    return rc;
-}
-
 static int hvmop_inject_msi(
     XEN_GUEST_HANDLE_PARAM(xen_hvm_inject_msi_t) uop)
 {
@@ -5325,11 +5292,6 @@ long do_hvm_op(unsigned long op, XEN_GUE
             guest_handle_cast(arg, xen_hvm_inject_msi_t));
         break;
 
-    case HVMOP_set_pci_link_route:
-        rc = hvmop_set_pci_link_route(
-            guest_handle_cast(arg, xen_hvm_set_pci_link_route_t));
-        break;
-
     case HVMOP_flush_tlbs:
         rc = guest_handle_is_null(arg) ? hvmop_flush_tlb_all() : -ENOSYS;
         break;
--- a/xen/arch/x86/hvm/irq.c
+++ b/xen/arch/x86/hvm/irq.c
@@ -229,13 +229,17 @@ void hvm_assert_evtchn_irq(struct vcpu *
         hvm_set_callback_irq_level(v);
 }
 
-void hvm_set_pci_link_route(struct domain *d, u8 link, u8 isa_irq)
+int hvm_set_pci_link_route(struct domain *d, u8 link, u8 isa_irq)
 {
     struct hvm_irq *hvm_irq = &d->arch.hvm_domain.irq;
     u8 old_isa_irq;
     int i;
 
-    ASSERT((link <= 3) && (isa_irq <= 15));
+    if ( link > 3 || isa_irq > 15 )
+        return -EINVAL;
+
+    if ( !is_hvm_domain(d) )
+        return -EINVAL;
 
     spin_lock(&d->arch.hvm_domain.irq_lock);
 
@@ -273,6 +277,8 @@ void hvm_set_pci_link_route(struct domai
 
     dprintk(XENLOG_G_INFO, "Dom%u PCI link %u changed %u -> %u\n",
             d->domain_id, link, old_isa_irq, isa_irq);
+
+    return 0;
 }
 
 int hvm_inject_msi(struct domain *d, uint64_t addr, uint32_t data)
--- a/xen/include/public/hvm/control.h
+++ b/xen/include/public/hvm/control.h
@@ -47,16 +47,26 @@ struct xen_hvm_set_isa_irq_level {
     uint8_t  level;
 };
 
+/* XEN_HVMCTL_set_pci_link_route */
+struct xen_hvm_set_pci_link_route {
+    /* PCI link identifier (0-3). */
+    uint8_t  link;
+    /* ISA IRQ (1-15), or 0 (disable link). */
+    uint8_t  isa_irq;
+};
+
 struct xen_hvmctl {
     uint16_t interface_version;    /* XEN_HVMCTL_INTERFACE_VERSION */
     domid_t domain;
     uint16_t cmd;
 #define XEN_HVMCTL_set_pci_intx_level            1
 #define XEN_HVMCTL_set_isa_irq_level             2
+#define XEN_HVMCTL_set_pci_link_route            3
     uint16_t opaque;               /* Must be zero on initial invocation. */
     union {
         struct xen_hvm_set_pci_intx_level set_pci_intx_level;
         struct xen_hvm_set_isa_irq_level set_isa_irq_level;
+        struct xen_hvm_set_pci_link_route set_pci_link_route;
         uint8_t pad[120];
     } u;
 };
--- a/xen/include/public/hvm/hvm_op.h
+++ b/xen/include/public/hvm/hvm_op.h
@@ -66,8 +66,6 @@ struct xen_hvm_set_isa_irq_level {
 typedef struct xen_hvm_set_isa_irq_level xen_hvm_set_isa_irq_level_t;
 DEFINE_XEN_GUEST_HANDLE(xen_hvm_set_isa_irq_level_t);
 
-#endif
-
 #define HVMOP_set_pci_link_route  4
 struct xen_hvm_set_pci_link_route {
     /* Domain to be updated. */
@@ -80,6 +78,8 @@ struct xen_hvm_set_pci_link_route {
 typedef struct xen_hvm_set_pci_link_route xen_hvm_set_pci_link_route_t;
 DEFINE_XEN_GUEST_HANDLE(xen_hvm_set_pci_link_route_t);
 
+#endif
+
 /* Flushes all VCPU TLBs: @arg must be NULL. */
 #define HVMOP_flush_tlbs          5
 
--- a/xen/include/xen/hvm/irq.h
+++ b/xen/include/xen/hvm/irq.h
@@ -122,7 +122,7 @@ void hvm_isa_irq_assert(
 void hvm_isa_irq_deassert(
     struct domain *d, unsigned int isa_irq);
 
-void hvm_set_pci_link_route(struct domain *d, u8 link, u8 isa_irq);
+int hvm_set_pci_link_route(struct domain *d, u8 link, u8 isa_irq);
 
 int hvm_inject_msi(struct domain *d, uint64_t addr, uint32_t data);
 
--- a/xen/include/xsm/dummy.h
+++ b/xen/include/xsm/dummy.h
@@ -609,12 +609,6 @@ static XSM_INLINE int xsm_shadow_control
     return xsm_default_action(action, current->domain, d);
 }
 
-static XSM_INLINE int xsm_hvm_set_pci_link_route(XSM_DEFAULT_ARG struct domain *d)
-{
-    XSM_ASSERT_ACTION(XSM_DM_PRIV);
-    return xsm_default_action(action, current->domain, d);
-}
-
 static XSM_INLINE int xsm_hvm_inject_msi(XSM_DEFAULT_ARG struct domain *d)
 {
     XSM_ASSERT_ACTION(XSM_DM_PRIV);
--- a/xen/xsm/dummy.c
+++ b/xen/xsm/dummy.c
@@ -145,7 +145,6 @@ void xsm_fixup_ops (struct xsm_operation
 #ifdef CONFIG_X86
     set_to_dummy_if_null(ops, do_mca);
     set_to_dummy_if_null(ops, shadow_control);
-    set_to_dummy_if_null(ops, hvm_set_pci_link_route);
     set_to_dummy_if_null(ops, hvm_inject_msi);
     set_to_dummy_if_null(ops, hvm_ioreq_server);
     set_to_dummy_if_null(ops, mem_sharing_op);
--- a/xen/xsm/flask/hooks.c
+++ b/xen/xsm/flask/hooks.c
@@ -1196,6 +1196,9 @@ static int flask_hvm_control(struct doma
     case XEN_HVMCTL_set_isa_irq_level:
         perm = HVM__IRQLEVEL;
         break;
+    case XEN_HVMCTL_set_pci_link_route:
+        perm = HVM__PCIROUTE;
+        break;
     default:
         perm = HVM__HVMCTL;
         break;
@@ -1520,11 +1523,6 @@ static int flask_ioport_mapping(struct d
     return flask_ioport_permission(d, start, end, access);
 }
 
-static int flask_hvm_set_pci_link_route(struct domain *d)
-{
-    return current_has_perm(d, SECCLASS_HVM, HVM__PCIROUTE);
-}
-
 static int flask_hvm_inject_msi(struct domain *d)
 {
     return current_has_perm(d, SECCLASS_HVM, HVM__SEND_IRQ);
@@ -1803,7 +1801,6 @@ static struct xsm_operations flask_ops =
 #ifdef CONFIG_X86
     .do_mca = flask_do_mca,
     .shadow_control = flask_shadow_control,
-    .hvm_set_pci_link_route = flask_hvm_set_pci_link_route,
     .hvm_inject_msi = flask_hvm_inject_msi,
     .hvm_ioreq_server = flask_hvm_ioreq_server,
     .mem_sharing_op = flask_mem_sharing_op,
--- a/xen/xsm/flask/policy/access_vectors
+++ b/xen/xsm/flask/policy/access_vectors
@@ -265,7 +265,7 @@ class hvm
     pcilevel
 # XEN_HVMCTL_set_isa_irq_level
     irqlevel
-# HVMOP_set_pci_link_route
+# XEN_HVMCTL_set_pci_link_route
     pciroute
     bind_irq
 # XEN_DOMCTL_pin_mem_cacheattr

[-- Attachment #3: Type: text/plain, Size: 126 bytes --]

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
http://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 31+ messages in thread

* [PATCH 05/11] hvmctl: convert HVMOP_track_dirty_vram
  2016-06-20 12:39 [PATCH 00/11] hvmctl hypercall Jan Beulich
                   ` (3 preceding siblings ...)
  2016-06-20 12:54 ` [PATCH 04/11] hvmctl: convert HVMOP_set_pci_link_route Jan Beulich
@ 2016-06-20 12:54 ` Jan Beulich
  2016-06-21 10:14   ` Wei Liu
  2016-06-20 12:55 ` [PATCH 06/11] hvmctl: convert HVMOP_modified_memory Jan Beulich
                   ` (6 subsequent siblings)
  11 siblings, 1 reply; 31+ messages in thread
From: Jan Beulich @ 2016-06-20 12:54 UTC (permalink / raw)
  To: xen-devel
  Cc: Stefano Stabellini, Wei Liu, George Dunlap, Andrew Cooper,
	Ian Jackson, Tim Deegan, Paul Durrant, dgdegra

[-- Attachment #1: Type: text/plain, Size: 8076 bytes --]

Also limiting "nr" at the libxc level to 32 bits (the high 32 bits of
the previous 64-bit parameter got ignore so far).

Signed-off-by: Jan Beulich <jbeulich@suse.com>

--- a/tools/libxc/include/xenctrl.h
+++ b/tools/libxc/include/xenctrl.h
@@ -1613,7 +1613,7 @@ int xc_hvm_inject_msi(
  */
 int xc_hvm_track_dirty_vram(
     xc_interface *xch, domid_t dom,
-    uint64_t first_pfn, uint64_t nr,
+    uint64_t first_gfn, uint32_t nr,
     unsigned long *bitmap);
 
 /*
--- a/tools/libxc/xc_misc.c
+++ b/tools/libxc/xc_misc.c
@@ -533,33 +533,27 @@ int xc_hvm_inject_msi(
 
 int xc_hvm_track_dirty_vram(
     xc_interface *xch, domid_t dom,
-    uint64_t first_pfn, uint64_t nr,
+    uint64_t first_gfn, uint32_t nr,
     unsigned long *dirty_bitmap)
 {
+    DECLARE_HVMCTL(track_dirty_vram, dom,
+                   .first_gfn = first_gfn,
+                   .nr        = nr);
     DECLARE_HYPERCALL_BOUNCE(dirty_bitmap, (nr+7) / 8, XC_HYPERCALL_BUFFER_BOUNCE_OUT);
-    DECLARE_HYPERCALL_BUFFER(struct xen_hvm_track_dirty_vram, arg);
     int rc;
 
-    arg = xc_hypercall_buffer_alloc(xch, arg, sizeof(*arg));
-    if ( arg == NULL || xc_hypercall_bounce_pre(xch, dirty_bitmap) )
+    if ( xc_hypercall_bounce_pre(xch, dirty_bitmap) )
     {
         PERROR("Could not bounce memory for xc_hvm_track_dirty_vram hypercall");
-        rc = -1;
-        goto out;
+        return -1;
     }
 
-    arg->domid     = dom;
-    arg->first_pfn = first_pfn;
-    arg->nr        = nr;
-    set_xen_guest_handle(arg->dirty_bitmap, dirty_bitmap);
-
-    rc = xencall2(xch->xcall, __HYPERVISOR_hvm_op,
-                  HVMOP_track_dirty_vram,
-                  HYPERCALL_BUFFER_AS_ARG(arg));
+    set_xen_guest_handle(hvmctl.u.track_dirty_vram.dirty_bitmap, dirty_bitmap);
+
+    rc = do_hvmctl(xch, &hvmctl);
 
-out:
-    xc_hypercall_buffer_free(xch, arg);
     xc_hypercall_bounce_post(xch, dirty_bitmap);
+
     return rc;
 }
 
--- a/xen/arch/x86/hvm/control.c
+++ b/xen/arch/x86/hvm/control.c
@@ -17,6 +17,8 @@
 #include <xen/hypercall.h>
 #include <xen/guest_access.h>
 #include <xen/sched.h>
+#include <asm/hap.h>
+#include <asm/shadow.h>
 #include <xsm/xsm.h>
 
 static int set_pci_intx_level(struct domain *d,
@@ -67,6 +69,27 @@ static int set_isa_irq_level(struct doma
     return 0;
 }
 
+static int track_dirty_vram(struct domain *d,
+                            const struct xen_hvm_track_dirty_vram *op)
+{
+    if ( !is_hvm_domain(d) )
+        return -EINVAL;
+
+    if ( op->rsvd || op->nr > (GB(1) >> PAGE_SHIFT) )
+        return -EINVAL;
+
+    if ( d->is_dying )
+        return -ESRCH;
+
+    if ( !d->max_vcpus || !d->vcpu[0] )
+        return -EINVAL;
+
+    return shadow_mode_enabled(d)
+           ? shadow_track_dirty_vram(d, op->first_gfn, op->nr,
+                                     op->dirty_bitmap)
+           : hap_track_dirty_vram(d, op->first_gfn, op->nr, op->dirty_bitmap);
+}
+
 /*
  * Note that this value is effectively part of the ABI, even if we don't need
  * to make it a formal part of it.  Hence this value may only be changed if
@@ -125,6 +148,10 @@ long do_hvmctl(XEN_GUEST_HANDLE_PARAM(xe
                                     op.u.set_pci_link_route.isa_irq);
         break;
 
+    case XEN_HVMCTL_track_dirty_vram:
+        rc = track_dirty_vram(d, &op.u.track_dirty_vram);
+        break;
+
     default:
         rc = -EOPNOTSUPP;
         break;
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -5296,47 +5296,6 @@ long do_hvm_op(unsigned long op, XEN_GUE
         rc = guest_handle_is_null(arg) ? hvmop_flush_tlb_all() : -ENOSYS;
         break;
 
-    case HVMOP_track_dirty_vram:
-    {
-        struct xen_hvm_track_dirty_vram a;
-        struct domain *d;
-
-        if ( copy_from_guest(&a, arg, 1) )
-            return -EFAULT;
-
-        rc = rcu_lock_remote_domain_by_id(a.domid, &d);
-        if ( rc != 0 )
-            return rc;
-
-        rc = -EINVAL;
-        if ( !is_hvm_domain(d) )
-            goto tdv_fail;
-
-        if ( a.nr > GB(1) >> PAGE_SHIFT )
-            goto tdv_fail;
-
-        rc = xsm_hvm_control(XSM_DM_PRIV, d, op);
-        if ( rc )
-            goto tdv_fail;
-
-        rc = -ESRCH;
-        if ( d->is_dying )
-            goto tdv_fail;
-
-        rc = -EINVAL;
-        if ( d->vcpu == NULL || d->vcpu[0] == NULL )
-            goto tdv_fail;
-
-        if ( shadow_mode_enabled(d) )
-            rc = shadow_track_dirty_vram(d, a.first_pfn, a.nr, a.dirty_bitmap);
-        else
-            rc = hap_track_dirty_vram(d, a.first_pfn, a.nr, a.dirty_bitmap);
-
-    tdv_fail:
-        rcu_unlock_domain(d);
-        break;
-    }
-
     case HVMOP_modified_memory:
     {
         struct xen_hvm_modified_memory a;
--- a/xen/include/public/hvm/control.h
+++ b/xen/include/public/hvm/control.h
@@ -55,6 +55,18 @@ struct xen_hvm_set_pci_link_route {
     uint8_t  isa_irq;
 };
 
+/* XEN_HVMCTL_track_dirty_vram */
+struct xen_hvm_track_dirty_vram {
+    /* Number of pages to track. */
+    uint32_t nr;
+    uint32_t rsvd;
+    /* First GFN to track. */
+    uint64_aligned_t first_gfn;
+    /* OUT variable. */
+    /* Dirty bitmap buffer. */
+    XEN_GUEST_HANDLE_64(uint8) dirty_bitmap;
+};
+
 struct xen_hvmctl {
     uint16_t interface_version;    /* XEN_HVMCTL_INTERFACE_VERSION */
     domid_t domain;
@@ -62,11 +74,13 @@ struct xen_hvmctl {
 #define XEN_HVMCTL_set_pci_intx_level            1
 #define XEN_HVMCTL_set_isa_irq_level             2
 #define XEN_HVMCTL_set_pci_link_route            3
+#define XEN_HVMCTL_track_dirty_vram              4
     uint16_t opaque;               /* Must be zero on initial invocation. */
     union {
         struct xen_hvm_set_pci_intx_level set_pci_intx_level;
         struct xen_hvm_set_isa_irq_level set_isa_irq_level;
         struct xen_hvm_set_pci_link_route set_pci_link_route;
+        struct xen_hvm_track_dirty_vram track_dirty_vram;
         uint8_t pad[120];
     } u;
 };
--- a/xen/include/public/hvm/hvm_op.h
+++ b/xen/include/public/hvm/hvm_op.h
@@ -98,22 +98,6 @@ typedef enum {
 /* Following tools-only interfaces may change in future. */
 #if defined(__XEN__) || defined(__XEN_TOOLS__)
 
-/* Track dirty VRAM. */
-#define HVMOP_track_dirty_vram    6
-struct xen_hvm_track_dirty_vram {
-    /* Domain to be tracked. */
-    domid_t  domid;
-    /* Number of pages to track. */
-    uint32_t nr;
-    /* First pfn to track. */
-    uint64_aligned_t first_pfn;
-    /* OUT variable. */
-    /* Dirty bitmap buffer. */
-    XEN_GUEST_HANDLE_64(uint8) dirty_bitmap;
-};
-typedef struct xen_hvm_track_dirty_vram xen_hvm_track_dirty_vram_t;
-DEFINE_XEN_GUEST_HANDLE(xen_hvm_track_dirty_vram_t);
-
 /* Notify that some pages got modified by the Device Model. */
 #define HVMOP_modified_memory    7
 struct xen_hvm_modified_memory {
--- a/xen/xsm/flask/hooks.c
+++ b/xen/xsm/flask/hooks.c
@@ -1174,9 +1174,6 @@ static int flask_hvm_param(struct domain
     case HVMOP_get_param:
         perm = HVM__GETPARAM;
         break;
-    case HVMOP_track_dirty_vram:
-        perm = HVM__TRACKDIRTYVRAM;
-        break;
     default:
         perm = HVM__HVMCTL;
     }
@@ -1199,6 +1196,9 @@ static int flask_hvm_control(struct doma
     case XEN_HVMCTL_set_pci_link_route:
         perm = HVM__PCIROUTE;
         break;
+    case XEN_HVMCTL_track_dirty_vram:
+        perm = HVM__TRACKDIRTYVRAM;
+        break;
     default:
         perm = HVM__HVMCTL;
         break;
--- a/xen/xsm/flask/policy/access_vectors
+++ b/xen/xsm/flask/policy/access_vectors
@@ -270,7 +270,7 @@ class hvm
     bind_irq
 # XEN_DOMCTL_pin_mem_cacheattr
     cacheattr
-# HVMOP_track_dirty_vram
+# XEN_HVMCTL_track_dirty_vram
     trackdirtyvram
 # HVMOP_modified_memory, HVMOP_get_mem_type, HVMOP_set_mem_type,
 # HVMOP_set_mem_access, HVMOP_get_mem_access, HVMOP_pagetable_dying,



[-- Attachment #2: hvmctl-04.patch --]
[-- Type: text/plain, Size: 8114 bytes --]

hvmctl: convert HVMOP_track_dirty_vram

Also limiting "nr" at the libxc level to 32 bits (the high 32 bits of
the previous 64-bit parameter got ignore so far).

Signed-off-by: Jan Beulich <jbeulich@suse.com>

--- a/tools/libxc/include/xenctrl.h
+++ b/tools/libxc/include/xenctrl.h
@@ -1613,7 +1613,7 @@ int xc_hvm_inject_msi(
  */
 int xc_hvm_track_dirty_vram(
     xc_interface *xch, domid_t dom,
-    uint64_t first_pfn, uint64_t nr,
+    uint64_t first_gfn, uint32_t nr,
     unsigned long *bitmap);
 
 /*
--- a/tools/libxc/xc_misc.c
+++ b/tools/libxc/xc_misc.c
@@ -533,33 +533,27 @@ int xc_hvm_inject_msi(
 
 int xc_hvm_track_dirty_vram(
     xc_interface *xch, domid_t dom,
-    uint64_t first_pfn, uint64_t nr,
+    uint64_t first_gfn, uint32_t nr,
     unsigned long *dirty_bitmap)
 {
+    DECLARE_HVMCTL(track_dirty_vram, dom,
+                   .first_gfn = first_gfn,
+                   .nr        = nr);
     DECLARE_HYPERCALL_BOUNCE(dirty_bitmap, (nr+7) / 8, XC_HYPERCALL_BUFFER_BOUNCE_OUT);
-    DECLARE_HYPERCALL_BUFFER(struct xen_hvm_track_dirty_vram, arg);
     int rc;
 
-    arg = xc_hypercall_buffer_alloc(xch, arg, sizeof(*arg));
-    if ( arg == NULL || xc_hypercall_bounce_pre(xch, dirty_bitmap) )
+    if ( xc_hypercall_bounce_pre(xch, dirty_bitmap) )
     {
         PERROR("Could not bounce memory for xc_hvm_track_dirty_vram hypercall");
-        rc = -1;
-        goto out;
+        return -1;
     }
 
-    arg->domid     = dom;
-    arg->first_pfn = first_pfn;
-    arg->nr        = nr;
-    set_xen_guest_handle(arg->dirty_bitmap, dirty_bitmap);
-
-    rc = xencall2(xch->xcall, __HYPERVISOR_hvm_op,
-                  HVMOP_track_dirty_vram,
-                  HYPERCALL_BUFFER_AS_ARG(arg));
+    set_xen_guest_handle(hvmctl.u.track_dirty_vram.dirty_bitmap, dirty_bitmap);
+
+    rc = do_hvmctl(xch, &hvmctl);
 
-out:
-    xc_hypercall_buffer_free(xch, arg);
     xc_hypercall_bounce_post(xch, dirty_bitmap);
+
     return rc;
 }
 
--- a/xen/arch/x86/hvm/control.c
+++ b/xen/arch/x86/hvm/control.c
@@ -17,6 +17,8 @@
 #include <xen/hypercall.h>
 #include <xen/guest_access.h>
 #include <xen/sched.h>
+#include <asm/hap.h>
+#include <asm/shadow.h>
 #include <xsm/xsm.h>
 
 static int set_pci_intx_level(struct domain *d,
@@ -67,6 +69,27 @@ static int set_isa_irq_level(struct doma
     return 0;
 }
 
+static int track_dirty_vram(struct domain *d,
+                            const struct xen_hvm_track_dirty_vram *op)
+{
+    if ( !is_hvm_domain(d) )
+        return -EINVAL;
+
+    if ( op->rsvd || op->nr > (GB(1) >> PAGE_SHIFT) )
+        return -EINVAL;
+
+    if ( d->is_dying )
+        return -ESRCH;
+
+    if ( !d->max_vcpus || !d->vcpu[0] )
+        return -EINVAL;
+
+    return shadow_mode_enabled(d)
+           ? shadow_track_dirty_vram(d, op->first_gfn, op->nr,
+                                     op->dirty_bitmap)
+           : hap_track_dirty_vram(d, op->first_gfn, op->nr, op->dirty_bitmap);
+}
+
 /*
  * Note that this value is effectively part of the ABI, even if we don't need
  * to make it a formal part of it.  Hence this value may only be changed if
@@ -125,6 +148,10 @@ long do_hvmctl(XEN_GUEST_HANDLE_PARAM(xe
                                     op.u.set_pci_link_route.isa_irq);
         break;
 
+    case XEN_HVMCTL_track_dirty_vram:
+        rc = track_dirty_vram(d, &op.u.track_dirty_vram);
+        break;
+
     default:
         rc = -EOPNOTSUPP;
         break;
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -5296,47 +5296,6 @@ long do_hvm_op(unsigned long op, XEN_GUE
         rc = guest_handle_is_null(arg) ? hvmop_flush_tlb_all() : -ENOSYS;
         break;
 
-    case HVMOP_track_dirty_vram:
-    {
-        struct xen_hvm_track_dirty_vram a;
-        struct domain *d;
-
-        if ( copy_from_guest(&a, arg, 1) )
-            return -EFAULT;
-
-        rc = rcu_lock_remote_domain_by_id(a.domid, &d);
-        if ( rc != 0 )
-            return rc;
-
-        rc = -EINVAL;
-        if ( !is_hvm_domain(d) )
-            goto tdv_fail;
-
-        if ( a.nr > GB(1) >> PAGE_SHIFT )
-            goto tdv_fail;
-
-        rc = xsm_hvm_control(XSM_DM_PRIV, d, op);
-        if ( rc )
-            goto tdv_fail;
-
-        rc = -ESRCH;
-        if ( d->is_dying )
-            goto tdv_fail;
-
-        rc = -EINVAL;
-        if ( d->vcpu == NULL || d->vcpu[0] == NULL )
-            goto tdv_fail;
-
-        if ( shadow_mode_enabled(d) )
-            rc = shadow_track_dirty_vram(d, a.first_pfn, a.nr, a.dirty_bitmap);
-        else
-            rc = hap_track_dirty_vram(d, a.first_pfn, a.nr, a.dirty_bitmap);
-
-    tdv_fail:
-        rcu_unlock_domain(d);
-        break;
-    }
-
     case HVMOP_modified_memory:
     {
         struct xen_hvm_modified_memory a;
--- a/xen/include/public/hvm/control.h
+++ b/xen/include/public/hvm/control.h
@@ -55,6 +55,18 @@ struct xen_hvm_set_pci_link_route {
     uint8_t  isa_irq;
 };
 
+/* XEN_HVMCTL_track_dirty_vram */
+struct xen_hvm_track_dirty_vram {
+    /* Number of pages to track. */
+    uint32_t nr;
+    uint32_t rsvd;
+    /* First GFN to track. */
+    uint64_aligned_t first_gfn;
+    /* OUT variable. */
+    /* Dirty bitmap buffer. */
+    XEN_GUEST_HANDLE_64(uint8) dirty_bitmap;
+};
+
 struct xen_hvmctl {
     uint16_t interface_version;    /* XEN_HVMCTL_INTERFACE_VERSION */
     domid_t domain;
@@ -62,11 +74,13 @@ struct xen_hvmctl {
 #define XEN_HVMCTL_set_pci_intx_level            1
 #define XEN_HVMCTL_set_isa_irq_level             2
 #define XEN_HVMCTL_set_pci_link_route            3
+#define XEN_HVMCTL_track_dirty_vram              4
     uint16_t opaque;               /* Must be zero on initial invocation. */
     union {
         struct xen_hvm_set_pci_intx_level set_pci_intx_level;
         struct xen_hvm_set_isa_irq_level set_isa_irq_level;
         struct xen_hvm_set_pci_link_route set_pci_link_route;
+        struct xen_hvm_track_dirty_vram track_dirty_vram;
         uint8_t pad[120];
     } u;
 };
--- a/xen/include/public/hvm/hvm_op.h
+++ b/xen/include/public/hvm/hvm_op.h
@@ -98,22 +98,6 @@ typedef enum {
 /* Following tools-only interfaces may change in future. */
 #if defined(__XEN__) || defined(__XEN_TOOLS__)
 
-/* Track dirty VRAM. */
-#define HVMOP_track_dirty_vram    6
-struct xen_hvm_track_dirty_vram {
-    /* Domain to be tracked. */
-    domid_t  domid;
-    /* Number of pages to track. */
-    uint32_t nr;
-    /* First pfn to track. */
-    uint64_aligned_t first_pfn;
-    /* OUT variable. */
-    /* Dirty bitmap buffer. */
-    XEN_GUEST_HANDLE_64(uint8) dirty_bitmap;
-};
-typedef struct xen_hvm_track_dirty_vram xen_hvm_track_dirty_vram_t;
-DEFINE_XEN_GUEST_HANDLE(xen_hvm_track_dirty_vram_t);
-
 /* Notify that some pages got modified by the Device Model. */
 #define HVMOP_modified_memory    7
 struct xen_hvm_modified_memory {
--- a/xen/xsm/flask/hooks.c
+++ b/xen/xsm/flask/hooks.c
@@ -1174,9 +1174,6 @@ static int flask_hvm_param(struct domain
     case HVMOP_get_param:
         perm = HVM__GETPARAM;
         break;
-    case HVMOP_track_dirty_vram:
-        perm = HVM__TRACKDIRTYVRAM;
-        break;
     default:
         perm = HVM__HVMCTL;
     }
@@ -1199,6 +1196,9 @@ static int flask_hvm_control(struct doma
     case XEN_HVMCTL_set_pci_link_route:
         perm = HVM__PCIROUTE;
         break;
+    case XEN_HVMCTL_track_dirty_vram:
+        perm = HVM__TRACKDIRTYVRAM;
+        break;
     default:
         perm = HVM__HVMCTL;
         break;
--- a/xen/xsm/flask/policy/access_vectors
+++ b/xen/xsm/flask/policy/access_vectors
@@ -270,7 +270,7 @@ class hvm
     bind_irq
 # XEN_DOMCTL_pin_mem_cacheattr
     cacheattr
-# HVMOP_track_dirty_vram
+# XEN_HVMCTL_track_dirty_vram
     trackdirtyvram
 # HVMOP_modified_memory, HVMOP_get_mem_type, HVMOP_set_mem_type,
 # HVMOP_set_mem_access, HVMOP_get_mem_access, HVMOP_pagetable_dying,

[-- Attachment #3: Type: text/plain, Size: 126 bytes --]

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
http://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 31+ messages in thread

* [PATCH 06/11] hvmctl: convert HVMOP_modified_memory
  2016-06-20 12:39 [PATCH 00/11] hvmctl hypercall Jan Beulich
                   ` (4 preceding siblings ...)
  2016-06-20 12:54 ` [PATCH 05/11] hvmctl: convert HVMOP_track_dirty_vram Jan Beulich
@ 2016-06-20 12:55 ` Jan Beulich
  2016-06-21 10:14   ` Wei Liu
  2016-06-20 12:56 ` [PATCH 07/11] hvmctl: convert HVMOP_set_mem_type Jan Beulich
                   ` (5 subsequent siblings)
  11 siblings, 1 reply; 31+ messages in thread
From: Jan Beulich @ 2016-06-20 12:55 UTC (permalink / raw)
  To: xen-devel
  Cc: Stefano Stabellini, Wei Liu, George Dunlap, Andrew Cooper,
	Ian Jackson, Tim Deegan, Paul Durrant, dgdegra

[-- Attachment #1: Type: text/plain, Size: 8737 bytes --]

Also limiting "nr" at the libxc level to 32 bits (the high 32 bits of
the previous 64-bit parameter got ignore so far).

Signed-off-by: Jan Beulich <jbeulich@suse.com>

--- a/tools/libxc/include/xenctrl.h
+++ b/tools/libxc/include/xenctrl.h
@@ -1620,7 +1620,7 @@ int xc_hvm_track_dirty_vram(
  * Notify that some pages got modified by the Device Model
  */
 int xc_hvm_modified_memory(
-    xc_interface *xch, domid_t dom, uint64_t first_pfn, uint64_t nr);
+    xc_interface *xch, domid_t dom, uint64_t first_gfn, uint32_t nr);
 
 /*
  * Set a range of memory to a specific type.
--- a/tools/libxc/xc_misc.c
+++ b/tools/libxc/xc_misc.c
@@ -558,29 +558,13 @@ int xc_hvm_track_dirty_vram(
 }
 
 int xc_hvm_modified_memory(
-    xc_interface *xch, domid_t dom, uint64_t first_pfn, uint64_t nr)
+    xc_interface *xch, domid_t dom, uint64_t first_gfn, uint32_t nr)
 {
-    DECLARE_HYPERCALL_BUFFER(struct xen_hvm_modified_memory, arg);
-    int rc;
+    DECLARE_HVMCTL(modified_memory, dom,
+                   .first_gfn = first_gfn,
+                   .nr        = nr);
 
-    arg = xc_hypercall_buffer_alloc(xch, arg, sizeof(*arg));
-    if ( arg == NULL )
-    {
-        PERROR("Could not allocate memory for xc_hvm_modified_memory hypercall");
-        return -1;
-    }
-
-    arg->domid     = dom;
-    arg->first_pfn = first_pfn;
-    arg->nr        = nr;
-
-    rc = xencall2(xch->xcall, __HYPERVISOR_hvm_op,
-                  HVMOP_modified_memory,
-                  HYPERCALL_BUFFER_AS_ARG(arg));
-
-    xc_hypercall_buffer_free(xch, arg);
-
-    return rc;
+    return do_hvmctl(xch, &hvmctl);
 }
 
 int xc_hvm_set_mem_type(
--- a/xen/arch/x86/hvm/control.c
+++ b/xen/arch/x86/hvm/control.c
@@ -14,6 +14,7 @@
  * this program; If not, see <http://www.gnu.org/licenses/>.
  */
 
+#include <xen/event.h>
 #include <xen/hypercall.h>
 #include <xen/guest_access.h>
 #include <xen/sched.h>
@@ -99,6 +100,48 @@ static int track_dirty_vram(struct domai
 #define HVMCTL_iter_mask  ((1U << HVMCTL_iter_shift) - 1)
 #define HVMCTL_iter_max   (1U << (16 + HVMCTL_iter_shift))
 
+static int modified_memory(struct domain *d,
+                           const struct xen_hvm_modified_memory *op,
+                           unsigned int *iter)
+{
+    if ( !is_hvm_domain(d) )
+        return -EINVAL;
+
+    if ( op->rsvd || op->nr > HVMCTL_iter_max || op->nr < *iter ||
+         ((op->first_gfn + op->nr - 1) < op->first_gfn) ||
+         ((op->first_gfn + op->nr - 1) > domain_get_maximum_gpfn(d)) )
+        return -EINVAL;
+
+    if ( !paging_mode_log_dirty(d) )
+        return 0;
+
+    while ( op->nr > *iter )
+    {
+        unsigned long gfn = op->first_gfn + *iter;
+        struct page_info *page = get_page_from_gfn(d, gfn, NULL, P2M_UNSHARE);
+
+        if ( page )
+        {
+            unsigned long mfn = page_to_mfn(page);
+
+            paging_mark_dirty(d, mfn);
+            /*
+             * These are most probably not page tables any more
+             * don't take a long time and don't die either.
+             */
+            sh_remove_shadows(d, _mfn(mfn), 1, 0);
+            put_page(page);
+        }
+
+        /* Check for continuation if it's not the last interation. */
+        if ( op->nr > ++*iter && !(*iter & HVMCTL_iter_mask) &&
+             hypercall_preempt_check() )
+            return -ERESTART;
+    }
+
+    return 0;
+}
+
 long do_hvmctl(XEN_GUEST_HANDLE_PARAM(xen_hvmctl_t) u_hvmctl)
 {
     xen_hvmctl_t op;
@@ -152,6 +195,10 @@ long do_hvmctl(XEN_GUEST_HANDLE_PARAM(xe
         rc = track_dirty_vram(d, &op.u.track_dirty_vram);
         break;
 
+    case XEN_HVMCTL_modified_memory:
+        rc = modified_memory(d, &op.u.modified_memory, &iter);
+        break;
+
     default:
         rc = -EOPNOTSUPP;
         break;
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -5233,7 +5233,6 @@ long do_hvm_op(unsigned long op, XEN_GUE
     default:
         mask = ~0UL;
         break;
-    case HVMOP_modified_memory:
     case HVMOP_set_mem_type:
         mask = HVMOP_op_mask;
         break;
@@ -5296,65 +5295,6 @@ long do_hvm_op(unsigned long op, XEN_GUE
         rc = guest_handle_is_null(arg) ? hvmop_flush_tlb_all() : -ENOSYS;
         break;
 
-    case HVMOP_modified_memory:
-    {
-        struct xen_hvm_modified_memory a;
-        struct domain *d;
-
-        if ( copy_from_guest(&a, arg, 1) )
-            return -EFAULT;
-
-        rc = rcu_lock_remote_domain_by_id(a.domid, &d);
-        if ( rc != 0 )
-            return rc;
-
-        rc = -EINVAL;
-        if ( !is_hvm_domain(d) )
-            goto modmem_fail;
-
-        rc = xsm_hvm_control(XSM_DM_PRIV, d, op);
-        if ( rc )
-            goto modmem_fail;
-
-        rc = -EINVAL;
-        if ( a.nr < start_iter ||
-             ((a.first_pfn + a.nr - 1) < a.first_pfn) ||
-             ((a.first_pfn + a.nr - 1) > domain_get_maximum_gpfn(d)) )
-            goto modmem_fail;
-
-        rc = 0;
-        if ( !paging_mode_log_dirty(d) )
-            goto modmem_fail;
-
-        while ( a.nr > start_iter )
-        {
-            unsigned long pfn = a.first_pfn + start_iter;
-            struct page_info *page;
-
-            page = get_page_from_gfn(d, pfn, NULL, P2M_UNSHARE);
-            if ( page )
-            {
-                paging_mark_dirty(d, page_to_mfn(page));
-                /* These are most probably not page tables any more */
-                /* don't take a long time and don't die either */
-                sh_remove_shadows(d, _mfn(page_to_mfn(page)), 1, 0);
-                put_page(page);
-            }
-
-            /* Check for continuation if it's not the last interation */
-            if ( a.nr > ++start_iter && !(start_iter & HVMOP_op_mask) &&
-                 hypercall_preempt_check() )
-            {
-                rc = -ERESTART;
-                break;
-            }
-        }
-
-    modmem_fail:
-        rcu_unlock_domain(d);
-        break;
-    }
-
     case HVMOP_get_mem_type:
     {
         struct xen_hvm_get_mem_type a;
--- a/xen/include/public/hvm/control.h
+++ b/xen/include/public/hvm/control.h
@@ -67,6 +67,16 @@ struct xen_hvm_track_dirty_vram {
     XEN_GUEST_HANDLE_64(uint8) dirty_bitmap;
 };
 
+/* XEN_HVMCTL_modified_memory */
+/* Notify that some pages got modified by the Device Model. */
+struct xen_hvm_modified_memory {
+    /* Number of pages. */
+    uint32_t nr;
+    uint32_t rsvd;
+    /* First GFN. */
+    uint64_aligned_t first_gfn;
+};
+
 struct xen_hvmctl {
     uint16_t interface_version;    /* XEN_HVMCTL_INTERFACE_VERSION */
     domid_t domain;
@@ -75,12 +85,14 @@ struct xen_hvmctl {
 #define XEN_HVMCTL_set_isa_irq_level             2
 #define XEN_HVMCTL_set_pci_link_route            3
 #define XEN_HVMCTL_track_dirty_vram              4
+#define XEN_HVMCTL_modified_memory               5
     uint16_t opaque;               /* Must be zero on initial invocation. */
     union {
         struct xen_hvm_set_pci_intx_level set_pci_intx_level;
         struct xen_hvm_set_isa_irq_level set_isa_irq_level;
         struct xen_hvm_set_pci_link_route set_pci_link_route;
         struct xen_hvm_track_dirty_vram track_dirty_vram;
+        struct xen_hvm_modified_memory modified_memory;
         uint8_t pad[120];
     } u;
 };
--- a/xen/include/public/hvm/hvm_op.h
+++ b/xen/include/public/hvm/hvm_op.h
@@ -98,19 +98,6 @@ typedef enum {
 /* Following tools-only interfaces may change in future. */
 #if defined(__XEN__) || defined(__XEN_TOOLS__)
 
-/* Notify that some pages got modified by the Device Model. */
-#define HVMOP_modified_memory    7
-struct xen_hvm_modified_memory {
-    /* Domain to be updated. */
-    domid_t  domid;
-    /* Number of pages. */
-    uint32_t nr;
-    /* First pfn. */
-    uint64_aligned_t first_pfn;
-};
-typedef struct xen_hvm_modified_memory xen_hvm_modified_memory_t;
-DEFINE_XEN_GUEST_HANDLE(xen_hvm_modified_memory_t);
-
 #define HVMOP_set_mem_type    8
 /* Notify that a region of memory is to be treated in a specific way. */
 struct xen_hvm_set_mem_type {
--- a/xen/xsm/flask/policy/access_vectors
+++ b/xen/xsm/flask/policy/access_vectors
@@ -272,7 +272,7 @@ class hvm
     cacheattr
 # XEN_HVMCTL_track_dirty_vram
     trackdirtyvram
-# HVMOP_modified_memory, HVMOP_get_mem_type, HVMOP_set_mem_type,
+# XEN_HVMCTL_modified_memory, HVMOP_get_mem_type, HVMOP_set_mem_type,
 # HVMOP_set_mem_access, HVMOP_get_mem_access, HVMOP_pagetable_dying,
 # HVMOP_inject_trap
     hvmctl



[-- Attachment #2: hvmctl-05.patch --]
[-- Type: text/plain, Size: 8774 bytes --]

hvmctl: convert HVMOP_modified_memory

Also limiting "nr" at the libxc level to 32 bits (the high 32 bits of
the previous 64-bit parameter got ignore so far).

Signed-off-by: Jan Beulich <jbeulich@suse.com>

--- a/tools/libxc/include/xenctrl.h
+++ b/tools/libxc/include/xenctrl.h
@@ -1620,7 +1620,7 @@ int xc_hvm_track_dirty_vram(
  * Notify that some pages got modified by the Device Model
  */
 int xc_hvm_modified_memory(
-    xc_interface *xch, domid_t dom, uint64_t first_pfn, uint64_t nr);
+    xc_interface *xch, domid_t dom, uint64_t first_gfn, uint32_t nr);
 
 /*
  * Set a range of memory to a specific type.
--- a/tools/libxc/xc_misc.c
+++ b/tools/libxc/xc_misc.c
@@ -558,29 +558,13 @@ int xc_hvm_track_dirty_vram(
 }
 
 int xc_hvm_modified_memory(
-    xc_interface *xch, domid_t dom, uint64_t first_pfn, uint64_t nr)
+    xc_interface *xch, domid_t dom, uint64_t first_gfn, uint32_t nr)
 {
-    DECLARE_HYPERCALL_BUFFER(struct xen_hvm_modified_memory, arg);
-    int rc;
+    DECLARE_HVMCTL(modified_memory, dom,
+                   .first_gfn = first_gfn,
+                   .nr        = nr);
 
-    arg = xc_hypercall_buffer_alloc(xch, arg, sizeof(*arg));
-    if ( arg == NULL )
-    {
-        PERROR("Could not allocate memory for xc_hvm_modified_memory hypercall");
-        return -1;
-    }
-
-    arg->domid     = dom;
-    arg->first_pfn = first_pfn;
-    arg->nr        = nr;
-
-    rc = xencall2(xch->xcall, __HYPERVISOR_hvm_op,
-                  HVMOP_modified_memory,
-                  HYPERCALL_BUFFER_AS_ARG(arg));
-
-    xc_hypercall_buffer_free(xch, arg);
-
-    return rc;
+    return do_hvmctl(xch, &hvmctl);
 }
 
 int xc_hvm_set_mem_type(
--- a/xen/arch/x86/hvm/control.c
+++ b/xen/arch/x86/hvm/control.c
@@ -14,6 +14,7 @@
  * this program; If not, see <http://www.gnu.org/licenses/>.
  */
 
+#include <xen/event.h>
 #include <xen/hypercall.h>
 #include <xen/guest_access.h>
 #include <xen/sched.h>
@@ -99,6 +100,48 @@ static int track_dirty_vram(struct domai
 #define HVMCTL_iter_mask  ((1U << HVMCTL_iter_shift) - 1)
 #define HVMCTL_iter_max   (1U << (16 + HVMCTL_iter_shift))
 
+static int modified_memory(struct domain *d,
+                           const struct xen_hvm_modified_memory *op,
+                           unsigned int *iter)
+{
+    if ( !is_hvm_domain(d) )
+        return -EINVAL;
+
+    if ( op->rsvd || op->nr > HVMCTL_iter_max || op->nr < *iter ||
+         ((op->first_gfn + op->nr - 1) < op->first_gfn) ||
+         ((op->first_gfn + op->nr - 1) > domain_get_maximum_gpfn(d)) )
+        return -EINVAL;
+
+    if ( !paging_mode_log_dirty(d) )
+        return 0;
+
+    while ( op->nr > *iter )
+    {
+        unsigned long gfn = op->first_gfn + *iter;
+        struct page_info *page = get_page_from_gfn(d, gfn, NULL, P2M_UNSHARE);
+
+        if ( page )
+        {
+            unsigned long mfn = page_to_mfn(page);
+
+            paging_mark_dirty(d, mfn);
+            /*
+             * These are most probably not page tables any more
+             * don't take a long time and don't die either.
+             */
+            sh_remove_shadows(d, _mfn(mfn), 1, 0);
+            put_page(page);
+        }
+
+        /* Check for continuation if it's not the last interation. */
+        if ( op->nr > ++*iter && !(*iter & HVMCTL_iter_mask) &&
+             hypercall_preempt_check() )
+            return -ERESTART;
+    }
+
+    return 0;
+}
+
 long do_hvmctl(XEN_GUEST_HANDLE_PARAM(xen_hvmctl_t) u_hvmctl)
 {
     xen_hvmctl_t op;
@@ -152,6 +195,10 @@ long do_hvmctl(XEN_GUEST_HANDLE_PARAM(xe
         rc = track_dirty_vram(d, &op.u.track_dirty_vram);
         break;
 
+    case XEN_HVMCTL_modified_memory:
+        rc = modified_memory(d, &op.u.modified_memory, &iter);
+        break;
+
     default:
         rc = -EOPNOTSUPP;
         break;
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -5233,7 +5233,6 @@ long do_hvm_op(unsigned long op, XEN_GUE
     default:
         mask = ~0UL;
         break;
-    case HVMOP_modified_memory:
     case HVMOP_set_mem_type:
         mask = HVMOP_op_mask;
         break;
@@ -5296,65 +5295,6 @@ long do_hvm_op(unsigned long op, XEN_GUE
         rc = guest_handle_is_null(arg) ? hvmop_flush_tlb_all() : -ENOSYS;
         break;
 
-    case HVMOP_modified_memory:
-    {
-        struct xen_hvm_modified_memory a;
-        struct domain *d;
-
-        if ( copy_from_guest(&a, arg, 1) )
-            return -EFAULT;
-
-        rc = rcu_lock_remote_domain_by_id(a.domid, &d);
-        if ( rc != 0 )
-            return rc;
-
-        rc = -EINVAL;
-        if ( !is_hvm_domain(d) )
-            goto modmem_fail;
-
-        rc = xsm_hvm_control(XSM_DM_PRIV, d, op);
-        if ( rc )
-            goto modmem_fail;
-
-        rc = -EINVAL;
-        if ( a.nr < start_iter ||
-             ((a.first_pfn + a.nr - 1) < a.first_pfn) ||
-             ((a.first_pfn + a.nr - 1) > domain_get_maximum_gpfn(d)) )
-            goto modmem_fail;
-
-        rc = 0;
-        if ( !paging_mode_log_dirty(d) )
-            goto modmem_fail;
-
-        while ( a.nr > start_iter )
-        {
-            unsigned long pfn = a.first_pfn + start_iter;
-            struct page_info *page;
-
-            page = get_page_from_gfn(d, pfn, NULL, P2M_UNSHARE);
-            if ( page )
-            {
-                paging_mark_dirty(d, page_to_mfn(page));
-                /* These are most probably not page tables any more */
-                /* don't take a long time and don't die either */
-                sh_remove_shadows(d, _mfn(page_to_mfn(page)), 1, 0);
-                put_page(page);
-            }
-
-            /* Check for continuation if it's not the last interation */
-            if ( a.nr > ++start_iter && !(start_iter & HVMOP_op_mask) &&
-                 hypercall_preempt_check() )
-            {
-                rc = -ERESTART;
-                break;
-            }
-        }
-
-    modmem_fail:
-        rcu_unlock_domain(d);
-        break;
-    }
-
     case HVMOP_get_mem_type:
     {
         struct xen_hvm_get_mem_type a;
--- a/xen/include/public/hvm/control.h
+++ b/xen/include/public/hvm/control.h
@@ -67,6 +67,16 @@ struct xen_hvm_track_dirty_vram {
     XEN_GUEST_HANDLE_64(uint8) dirty_bitmap;
 };
 
+/* XEN_HVMCTL_modified_memory */
+/* Notify that some pages got modified by the Device Model. */
+struct xen_hvm_modified_memory {
+    /* Number of pages. */
+    uint32_t nr;
+    uint32_t rsvd;
+    /* First GFN. */
+    uint64_aligned_t first_gfn;
+};
+
 struct xen_hvmctl {
     uint16_t interface_version;    /* XEN_HVMCTL_INTERFACE_VERSION */
     domid_t domain;
@@ -75,12 +85,14 @@ struct xen_hvmctl {
 #define XEN_HVMCTL_set_isa_irq_level             2
 #define XEN_HVMCTL_set_pci_link_route            3
 #define XEN_HVMCTL_track_dirty_vram              4
+#define XEN_HVMCTL_modified_memory               5
     uint16_t opaque;               /* Must be zero on initial invocation. */
     union {
         struct xen_hvm_set_pci_intx_level set_pci_intx_level;
         struct xen_hvm_set_isa_irq_level set_isa_irq_level;
         struct xen_hvm_set_pci_link_route set_pci_link_route;
         struct xen_hvm_track_dirty_vram track_dirty_vram;
+        struct xen_hvm_modified_memory modified_memory;
         uint8_t pad[120];
     } u;
 };
--- a/xen/include/public/hvm/hvm_op.h
+++ b/xen/include/public/hvm/hvm_op.h
@@ -98,19 +98,6 @@ typedef enum {
 /* Following tools-only interfaces may change in future. */
 #if defined(__XEN__) || defined(__XEN_TOOLS__)
 
-/* Notify that some pages got modified by the Device Model. */
-#define HVMOP_modified_memory    7
-struct xen_hvm_modified_memory {
-    /* Domain to be updated. */
-    domid_t  domid;
-    /* Number of pages. */
-    uint32_t nr;
-    /* First pfn. */
-    uint64_aligned_t first_pfn;
-};
-typedef struct xen_hvm_modified_memory xen_hvm_modified_memory_t;
-DEFINE_XEN_GUEST_HANDLE(xen_hvm_modified_memory_t);
-
 #define HVMOP_set_mem_type    8
 /* Notify that a region of memory is to be treated in a specific way. */
 struct xen_hvm_set_mem_type {
--- a/xen/xsm/flask/policy/access_vectors
+++ b/xen/xsm/flask/policy/access_vectors
@@ -272,7 +272,7 @@ class hvm
     cacheattr
 # XEN_HVMCTL_track_dirty_vram
     trackdirtyvram
-# HVMOP_modified_memory, HVMOP_get_mem_type, HVMOP_set_mem_type,
+# XEN_HVMCTL_modified_memory, HVMOP_get_mem_type, HVMOP_set_mem_type,
 # HVMOP_set_mem_access, HVMOP_get_mem_access, HVMOP_pagetable_dying,
 # HVMOP_inject_trap
     hvmctl

[-- Attachment #3: Type: text/plain, Size: 126 bytes --]

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
http://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 31+ messages in thread

* [PATCH 07/11] hvmctl: convert HVMOP_set_mem_type
  2016-06-20 12:39 [PATCH 00/11] hvmctl hypercall Jan Beulich
                   ` (5 preceding siblings ...)
  2016-06-20 12:55 ` [PATCH 06/11] hvmctl: convert HVMOP_modified_memory Jan Beulich
@ 2016-06-20 12:56 ` Jan Beulich
  2016-06-21 10:14   ` Wei Liu
  2016-06-20 12:56 ` [PATCH 08/11] hvmctl: convert HVMOP_inject_trap Jan Beulich
                   ` (4 subsequent siblings)
  11 siblings, 1 reply; 31+ messages in thread
From: Jan Beulich @ 2016-06-20 12:56 UTC (permalink / raw)
  To: xen-devel
  Cc: Stefano Stabellini, Wei Liu, George Dunlap, Andrew Cooper,
	Ian Jackson, Tim Deegan, Paul Durrant, dgdegra

[-- Attachment #1: Type: text/plain, Size: 11354 bytes --]

This allows elimination of the (ab)use of the high operation number
bits for encoding continuations.

Also limiting "nr" at the libxc level to 32 bits (the high 32 bits of
the previous 64-bit parameter got ignore so far).

Signed-off-by: Jan Beulich <jbeulich@suse.com>

--- a/tools/libxc/include/xenctrl.h
+++ b/tools/libxc/include/xenctrl.h
@@ -1627,7 +1627,8 @@ int xc_hvm_modified_memory(
  * Allowed types are HVMMEM_ram_rw, HVMMEM_ram_ro, HVMMEM_mmio_dm
  */
 int xc_hvm_set_mem_type(
-    xc_interface *xch, domid_t dom, hvmmem_type_t memtype, uint64_t first_pfn, uint64_t nr);
+    xc_interface *xch, domid_t dom, hvmmem_type_t memtype,
+    uint64_t first_gfn, uint32_t nr);
 
 /*
  * Injects a hardware/software CPU trap, to take effect the next time the HVM 
--- a/tools/libxc/xc_misc.c
+++ b/tools/libxc/xc_misc.c
@@ -568,30 +568,15 @@ int xc_hvm_modified_memory(
 }
 
 int xc_hvm_set_mem_type(
-    xc_interface *xch, domid_t dom, hvmmem_type_t mem_type, uint64_t first_pfn, uint64_t nr)
+    xc_interface *xch, domid_t dom, hvmmem_type_t mem_type,
+    uint64_t first_gfn, uint32_t nr)
 {
-    DECLARE_HYPERCALL_BUFFER(struct xen_hvm_set_mem_type, arg);
-    int rc;
+    DECLARE_HVMCTL(set_mem_type, dom,
+                   .hvmmem_type = mem_type,
+                   .first_gfn   = first_gfn,
+                   .nr          = nr);
 
-    arg = xc_hypercall_buffer_alloc(xch, arg, sizeof(*arg));
-    if ( arg == NULL )
-    {
-        PERROR("Could not allocate memory for xc_hvm_set_mem_type hypercall");
-        return -1;
-    }
-
-    arg->domid        = dom;
-    arg->hvmmem_type  = mem_type;
-    arg->first_pfn    = first_pfn;
-    arg->nr           = nr;
-
-    rc = xencall2(xch->xcall, __HYPERVISOR_hvm_op,
-                  HVMOP_set_mem_type,
-                  HYPERCALL_BUFFER_AS_ARG(arg));
-
-    xc_hypercall_buffer_free(xch, arg);
-
-    return rc;
+    return do_hvmctl(xch, &hvmctl);
 }
 
 int xc_hvm_inject_trap(
--- a/xen/arch/x86/hvm/control.c
+++ b/xen/arch/x86/hvm/control.c
@@ -142,6 +142,68 @@ static int modified_memory(struct domain
     return 0;
 }
 
+static int set_mem_type(struct domain *d,
+                        const struct xen_hvm_set_mem_type *op,
+                        unsigned int *iter)
+{
+    /* Interface types to internal p2m types. */
+    static const p2m_type_t memtype[] = {
+        [HVMMEM_ram_rw]  = p2m_ram_rw,
+        [HVMMEM_ram_ro]  = p2m_ram_ro,
+        [HVMMEM_mmio_dm] = p2m_mmio_dm,
+        [HVMMEM_unused]  = p2m_invalid
+    };
+
+    if ( !is_hvm_domain(d) )
+        return -EINVAL;
+
+    if ( op->rsvd || op->nr > HVMCTL_iter_max || op->nr < *iter ||
+         ((op->first_gfn + op->nr - 1) < op->first_gfn) ||
+         ((op->first_gfn + op->nr - 1) > domain_get_maximum_gpfn(d)) )
+        return -EINVAL;
+
+    if ( op->hvmmem_type >= ARRAY_SIZE(memtype) ||
+         unlikely(op->hvmmem_type == HVMMEM_unused) )
+        return -EINVAL;
+
+    while ( op->nr > *iter )
+    {
+        unsigned long gfn = op->first_gfn + *iter;
+        p2m_type_t t;
+        int rc;
+
+        get_gfn_unshare(d, gfn, &t);
+
+        if ( p2m_is_paging(t) )
+        {
+            put_gfn(d, gfn);
+            p2m_mem_paging_populate(d, gfn);
+            return -EAGAIN;
+        }
+
+        if ( p2m_is_shared(t) )
+            rc = -EAGAIN;
+        else if ( !p2m_is_ram(t) &&
+                  (!p2m_is_hole(t) || op->hvmmem_type != HVMMEM_mmio_dm) &&
+                  (t != p2m_mmio_write_dm || op->hvmmem_type != HVMMEM_ram_rw) )
+            rc = -EINVAL;
+        else
+            rc = p2m_change_type_one(d, gfn, t, memtype[op->hvmmem_type]);
+
+        put_gfn(d, gfn);
+
+        if ( rc )
+            return rc;
+
+        /* Check for continuation if it's not the last interation */
+        if ( op->nr > ++*iter && !(*iter & HVMCTL_iter_mask) &&
+             hypercall_preempt_check() )
+            return -ERESTART;
+    }
+
+    return 0;
+}
+
 long do_hvmctl(XEN_GUEST_HANDLE_PARAM(xen_hvmctl_t) u_hvmctl)
 {
     xen_hvmctl_t op;
@@ -199,6 +261,10 @@ long do_hvmctl(XEN_GUEST_HANDLE_PARAM(xe
         rc = modified_memory(d, &op.u.modified_memory, &iter);
         break;
 
+    case XEN_HVMCTL_set_mem_type:
+        rc = set_mem_type(d, &op.u.set_mem_type, &iter);
+        break;
+
     default:
         rc = -EOPNOTSUPP;
         break;
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -5215,31 +5215,11 @@ static int do_altp2m_op(
     return rc;
 }
 
-/*
- * Note that this value is effectively part of the ABI, even if we don't need
- * to make it a formal part of it: A guest suspended for migration in the
- * middle of a continuation would fail to work if resumed on a hypervisor
- * using a different value.
- */
-#define HVMOP_op_mask 0xff
-
 long do_hvm_op(unsigned long op, XEN_GUEST_HANDLE_PARAM(void) arg)
 {
-    unsigned long start_iter, mask;
     long rc = 0;
 
-    switch ( op & HVMOP_op_mask )
-    {
-    default:
-        mask = ~0UL;
-        break;
-    case HVMOP_set_mem_type:
-        mask = HVMOP_op_mask;
-        break;
-    }
-
-    start_iter = op & ~mask;
-    switch ( op &= mask )
+    switch ( op )
     {
     case HVMOP_create_ioreq_server:
         rc = hvmop_create_ioreq_server(
@@ -5339,92 +5319,6 @@ long do_hvm_op(unsigned long op, XEN_GUE
         break;
     }
 
-    case HVMOP_set_mem_type:
-    {
-        struct xen_hvm_set_mem_type a;
-        struct domain *d;
-        
-        /* Interface types to internal p2m types */
-        static const p2m_type_t memtype[] = {
-            [HVMMEM_ram_rw]  = p2m_ram_rw,
-            [HVMMEM_ram_ro]  = p2m_ram_ro,
-            [HVMMEM_mmio_dm] = p2m_mmio_dm,
-            [HVMMEM_unused] = p2m_invalid
-        };
-
-        if ( copy_from_guest(&a, arg, 1) )
-            return -EFAULT;
-
-        rc = rcu_lock_remote_domain_by_id(a.domid, &d);
-        if ( rc != 0 )
-            return rc;
-
-        rc = -EINVAL;
-        if ( !is_hvm_domain(d) )
-            goto setmemtype_fail;
-
-        rc = xsm_hvm_control(XSM_DM_PRIV, d, op);
-        if ( rc )
-            goto setmemtype_fail;
-
-        rc = -EINVAL;
-        if ( a.nr < start_iter ||
-             ((a.first_pfn + a.nr - 1) < a.first_pfn) ||
-             ((a.first_pfn + a.nr - 1) > domain_get_maximum_gpfn(d)) )
-            goto setmemtype_fail;
-            
-        if ( a.hvmmem_type >= ARRAY_SIZE(memtype) ||
-             unlikely(a.hvmmem_type == HVMMEM_unused) )
-            goto setmemtype_fail;
-
-        while ( a.nr > start_iter )
-        {
-            unsigned long pfn = a.first_pfn + start_iter;
-            p2m_type_t t;
-
-            get_gfn_unshare(d, pfn, &t);
-            if ( p2m_is_paging(t) )
-            {
-                put_gfn(d, pfn);
-                p2m_mem_paging_populate(d, pfn);
-                rc = -EAGAIN;
-                goto setmemtype_fail;
-            }
-            if ( p2m_is_shared(t) )
-            {
-                put_gfn(d, pfn);
-                rc = -EAGAIN;
-                goto setmemtype_fail;
-            }
-            if ( !p2m_is_ram(t) &&
-                 (!p2m_is_hole(t) || a.hvmmem_type != HVMMEM_mmio_dm) &&
-                 (t != p2m_mmio_write_dm || a.hvmmem_type != HVMMEM_ram_rw) )
-            {
-                put_gfn(d, pfn);
-                goto setmemtype_fail;
-            }
-
-            rc = p2m_change_type_one(d, pfn, t, memtype[a.hvmmem_type]);
-            put_gfn(d, pfn);
-            if ( rc )
-                goto setmemtype_fail;
-
-            /* Check for continuation if it's not the last interation */
-            if ( a.nr > ++start_iter && !(start_iter & HVMOP_op_mask) &&
-                 hypercall_preempt_check() )
-            {
-                rc = -ERESTART;
-                goto setmemtype_fail;
-            }
-        }
-
-        rc = 0;
-
-    setmemtype_fail:
-        rcu_unlock_domain(d);
-        break;
-    }
-
     case HVMOP_pagetable_dying:
     {
         struct xen_hvm_pagetable_dying a;
@@ -5533,13 +5427,6 @@ long do_hvm_op(unsigned long op, XEN_GUE
     }
     }
 
-    if ( rc == -ERESTART )
-    {
-        ASSERT(!(start_iter & mask));
-        rc = hypercall_create_continuation(__HYPERVISOR_hvm_op, "lh",
-                                           op | start_iter, arg);
-    }
-
     return rc;
 }
 
--- a/xen/include/public/hvm/control.h
+++ b/xen/include/public/hvm/control.h
@@ -77,6 +77,18 @@ struct xen_hvm_modified_memory {
     uint64_aligned_t first_gfn;
 };
 
+/* XEN_HVMCTL_set_mem_type */
+/* Notify that a region of memory is to be treated in a specific way. */
+struct xen_hvm_set_mem_type {
+    /* Memory type. */
+    uint16_t hvmmem_type;
+    uint16_t rsvd;
+    /* Number of pages. */
+    uint32_t nr;
+    /* First GFN. */
+    uint64_aligned_t first_gfn;
+};
+
 struct xen_hvmctl {
     uint16_t interface_version;    /* XEN_HVMCTL_INTERFACE_VERSION */
     domid_t domain;
@@ -86,6 +98,7 @@ struct xen_hvmctl {
 #define XEN_HVMCTL_set_pci_link_route            3
 #define XEN_HVMCTL_track_dirty_vram              4
 #define XEN_HVMCTL_modified_memory               5
+#define XEN_HVMCTL_set_mem_type                  6
     uint16_t opaque;               /* Must be zero on initial invocation. */
     union {
         struct xen_hvm_set_pci_intx_level set_pci_intx_level;
@@ -93,6 +106,7 @@ struct xen_hvmctl {
         struct xen_hvm_set_pci_link_route set_pci_link_route;
         struct xen_hvm_track_dirty_vram track_dirty_vram;
         struct xen_hvm_modified_memory modified_memory;
+        struct xen_hvm_set_mem_type set_mem_type;
         uint8_t pad[120];
     } u;
 };
--- a/xen/include/public/hvm/hvm_op.h
+++ b/xen/include/public/hvm/hvm_op.h
@@ -95,26 +95,6 @@ typedef enum {
 #endif
 } hvmmem_type_t;
 
-/* Following tools-only interfaces may change in future. */
-#if defined(__XEN__) || defined(__XEN_TOOLS__)
-
-#define HVMOP_set_mem_type    8
-/* Notify that a region of memory is to be treated in a specific way. */
-struct xen_hvm_set_mem_type {
-    /* Domain to be updated. */
-    domid_t domid;
-    /* Memory type */
-    uint16_t hvmmem_type;
-    /* Number of pages. */
-    uint32_t nr;
-    /* First pfn. */
-    uint64_aligned_t first_pfn;
-};
-typedef struct xen_hvm_set_mem_type xen_hvm_set_mem_type_t;
-DEFINE_XEN_GUEST_HANDLE(xen_hvm_set_mem_type_t);
-
-#endif /* defined(__XEN__) || defined(__XEN_TOOLS__) */
-
 /* Hint from PV drivers for pagetable destruction. */
 #define HVMOP_pagetable_dying        9
 struct xen_hvm_pagetable_dying {
--- a/xen/xsm/flask/policy/access_vectors
+++ b/xen/xsm/flask/policy/access_vectors
@@ -272,7 +272,7 @@ class hvm
     cacheattr
 # XEN_HVMCTL_track_dirty_vram
     trackdirtyvram
-# XEN_HVMCTL_modified_memory, HVMOP_get_mem_type, HVMOP_set_mem_type,
+# XEN_HVMCTL_modified_memory, HVMOP_get_mem_type, XEN_HVMCTL_set_mem_type,
 # HVMOP_set_mem_access, HVMOP_get_mem_access, HVMOP_pagetable_dying,
 # HVMOP_inject_trap
     hvmctl



[-- Attachment #2: hvmctl-06.patch --]
[-- Type: text/plain, Size: 11388 bytes --]

hvmctl: convert HVMOP_set_mem_type

This allows elimination of the (ab)use of the high operation number
bits for encoding continuations.

Also limiting "nr" at the libxc level to 32 bits (the high 32 bits of
the previous 64-bit parameter got ignore so far).

Signed-off-by: Jan Beulich <jbeulich@suse.com>

--- a/tools/libxc/include/xenctrl.h
+++ b/tools/libxc/include/xenctrl.h
@@ -1627,7 +1627,8 @@ int xc_hvm_modified_memory(
  * Allowed types are HVMMEM_ram_rw, HVMMEM_ram_ro, HVMMEM_mmio_dm
  */
 int xc_hvm_set_mem_type(
-    xc_interface *xch, domid_t dom, hvmmem_type_t memtype, uint64_t first_pfn, uint64_t nr);
+    xc_interface *xch, domid_t dom, hvmmem_type_t memtype,
+    uint64_t first_gfn, uint32_t nr);
 
 /*
  * Injects a hardware/software CPU trap, to take effect the next time the HVM 
--- a/tools/libxc/xc_misc.c
+++ b/tools/libxc/xc_misc.c
@@ -568,30 +568,15 @@ int xc_hvm_modified_memory(
 }
 
 int xc_hvm_set_mem_type(
-    xc_interface *xch, domid_t dom, hvmmem_type_t mem_type, uint64_t first_pfn, uint64_t nr)
+    xc_interface *xch, domid_t dom, hvmmem_type_t mem_type,
+    uint64_t first_gfn, uint32_t nr)
 {
-    DECLARE_HYPERCALL_BUFFER(struct xen_hvm_set_mem_type, arg);
-    int rc;
+    DECLARE_HVMCTL(set_mem_type, dom,
+                   .hvmmem_type = mem_type,
+                   .first_gfn   = first_gfn,
+                   .nr          = nr);
 
-    arg = xc_hypercall_buffer_alloc(xch, arg, sizeof(*arg));
-    if ( arg == NULL )
-    {
-        PERROR("Could not allocate memory for xc_hvm_set_mem_type hypercall");
-        return -1;
-    }
-
-    arg->domid        = dom;
-    arg->hvmmem_type  = mem_type;
-    arg->first_pfn    = first_pfn;
-    arg->nr           = nr;
-
-    rc = xencall2(xch->xcall, __HYPERVISOR_hvm_op,
-                  HVMOP_set_mem_type,
-                  HYPERCALL_BUFFER_AS_ARG(arg));
-
-    xc_hypercall_buffer_free(xch, arg);
-
-    return rc;
+    return do_hvmctl(xch, &hvmctl);
 }
 
 int xc_hvm_inject_trap(
--- a/xen/arch/x86/hvm/control.c
+++ b/xen/arch/x86/hvm/control.c
@@ -142,6 +142,68 @@ static int modified_memory(struct domain
     return 0;
 }
 
+static int set_mem_type(struct domain *d,
+                        const struct xen_hvm_set_mem_type *op,
+                        unsigned int *iter)
+{
+    /* Interface types to internal p2m types. */
+    static const p2m_type_t memtype[] = {
+        [HVMMEM_ram_rw]  = p2m_ram_rw,
+        [HVMMEM_ram_ro]  = p2m_ram_ro,
+        [HVMMEM_mmio_dm] = p2m_mmio_dm,
+        [HVMMEM_unused]  = p2m_invalid
+    };
+
+    if ( !is_hvm_domain(d) )
+        return -EINVAL;
+
+    if ( op->rsvd || op->nr > HVMCTL_iter_max || op->nr < *iter ||
+         ((op->first_gfn + op->nr - 1) < op->first_gfn) ||
+         ((op->first_gfn + op->nr - 1) > domain_get_maximum_gpfn(d)) )
+        return -EINVAL;
+
+    if ( op->hvmmem_type >= ARRAY_SIZE(memtype) ||
+         unlikely(op->hvmmem_type == HVMMEM_unused) )
+        return -EINVAL;
+
+    while ( op->nr > *iter )
+    {
+        unsigned long gfn = op->first_gfn + *iter;
+        p2m_type_t t;
+        int rc;
+
+        get_gfn_unshare(d, gfn, &t);
+
+        if ( p2m_is_paging(t) )
+        {
+            put_gfn(d, gfn);
+            p2m_mem_paging_populate(d, gfn);
+            return -EAGAIN;
+        }
+
+        if ( p2m_is_shared(t) )
+            rc = -EAGAIN;
+        else if ( !p2m_is_ram(t) &&
+                  (!p2m_is_hole(t) || op->hvmmem_type != HVMMEM_mmio_dm) &&
+                  (t != p2m_mmio_write_dm || op->hvmmem_type != HVMMEM_ram_rw) )
+            rc = -EINVAL;
+        else
+            rc = p2m_change_type_one(d, gfn, t, memtype[op->hvmmem_type]);
+
+        put_gfn(d, gfn);
+
+        if ( rc )
+            return rc;
+
+        /* Check for continuation if it's not the last interation */
+        if ( op->nr > ++*iter && !(*iter & HVMCTL_iter_mask) &&
+             hypercall_preempt_check() )
+            return -ERESTART;
+    }
+
+    return 0;
+}
+
 long do_hvmctl(XEN_GUEST_HANDLE_PARAM(xen_hvmctl_t) u_hvmctl)
 {
     xen_hvmctl_t op;
@@ -199,6 +261,10 @@ long do_hvmctl(XEN_GUEST_HANDLE_PARAM(xe
         rc = modified_memory(d, &op.u.modified_memory, &iter);
         break;
 
+    case XEN_HVMCTL_set_mem_type:
+        rc = set_mem_type(d, &op.u.set_mem_type, &iter);
+        break;
+
     default:
         rc = -EOPNOTSUPP;
         break;
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -5215,31 +5215,11 @@ static int do_altp2m_op(
     return rc;
 }
 
-/*
- * Note that this value is effectively part of the ABI, even if we don't need
- * to make it a formal part of it: A guest suspended for migration in the
- * middle of a continuation would fail to work if resumed on a hypervisor
- * using a different value.
- */
-#define HVMOP_op_mask 0xff
-
 long do_hvm_op(unsigned long op, XEN_GUEST_HANDLE_PARAM(void) arg)
 {
-    unsigned long start_iter, mask;
     long rc = 0;
 
-    switch ( op & HVMOP_op_mask )
-    {
-    default:
-        mask = ~0UL;
-        break;
-    case HVMOP_set_mem_type:
-        mask = HVMOP_op_mask;
-        break;
-    }
-
-    start_iter = op & ~mask;
-    switch ( op &= mask )
+    switch ( op )
     {
     case HVMOP_create_ioreq_server:
         rc = hvmop_create_ioreq_server(
@@ -5339,92 +5319,6 @@ long do_hvm_op(unsigned long op, XEN_GUE
         break;
     }
 
-    case HVMOP_set_mem_type:
-    {
-        struct xen_hvm_set_mem_type a;
-        struct domain *d;
-        
-        /* Interface types to internal p2m types */
-        static const p2m_type_t memtype[] = {
-            [HVMMEM_ram_rw]  = p2m_ram_rw,
-            [HVMMEM_ram_ro]  = p2m_ram_ro,
-            [HVMMEM_mmio_dm] = p2m_mmio_dm,
-            [HVMMEM_unused] = p2m_invalid
-        };
-
-        if ( copy_from_guest(&a, arg, 1) )
-            return -EFAULT;
-
-        rc = rcu_lock_remote_domain_by_id(a.domid, &d);
-        if ( rc != 0 )
-            return rc;
-
-        rc = -EINVAL;
-        if ( !is_hvm_domain(d) )
-            goto setmemtype_fail;
-
-        rc = xsm_hvm_control(XSM_DM_PRIV, d, op);
-        if ( rc )
-            goto setmemtype_fail;
-
-        rc = -EINVAL;
-        if ( a.nr < start_iter ||
-             ((a.first_pfn + a.nr - 1) < a.first_pfn) ||
-             ((a.first_pfn + a.nr - 1) > domain_get_maximum_gpfn(d)) )
-            goto setmemtype_fail;
-            
-        if ( a.hvmmem_type >= ARRAY_SIZE(memtype) ||
-             unlikely(a.hvmmem_type == HVMMEM_unused) )
-            goto setmemtype_fail;
-
-        while ( a.nr > start_iter )
-        {
-            unsigned long pfn = a.first_pfn + start_iter;
-            p2m_type_t t;
-
-            get_gfn_unshare(d, pfn, &t);
-            if ( p2m_is_paging(t) )
-            {
-                put_gfn(d, pfn);
-                p2m_mem_paging_populate(d, pfn);
-                rc = -EAGAIN;
-                goto setmemtype_fail;
-            }
-            if ( p2m_is_shared(t) )
-            {
-                put_gfn(d, pfn);
-                rc = -EAGAIN;
-                goto setmemtype_fail;
-            }
-            if ( !p2m_is_ram(t) &&
-                 (!p2m_is_hole(t) || a.hvmmem_type != HVMMEM_mmio_dm) &&
-                 (t != p2m_mmio_write_dm || a.hvmmem_type != HVMMEM_ram_rw) )
-            {
-                put_gfn(d, pfn);
-                goto setmemtype_fail;
-            }
-
-            rc = p2m_change_type_one(d, pfn, t, memtype[a.hvmmem_type]);
-            put_gfn(d, pfn);
-            if ( rc )
-                goto setmemtype_fail;
-
-            /* Check for continuation if it's not the last interation */
-            if ( a.nr > ++start_iter && !(start_iter & HVMOP_op_mask) &&
-                 hypercall_preempt_check() )
-            {
-                rc = -ERESTART;
-                goto setmemtype_fail;
-            }
-        }
-
-        rc = 0;
-
-    setmemtype_fail:
-        rcu_unlock_domain(d);
-        break;
-    }
-
     case HVMOP_pagetable_dying:
     {
         struct xen_hvm_pagetable_dying a;
@@ -5533,13 +5427,6 @@ long do_hvm_op(unsigned long op, XEN_GUE
     }
     }
 
-    if ( rc == -ERESTART )
-    {
-        ASSERT(!(start_iter & mask));
-        rc = hypercall_create_continuation(__HYPERVISOR_hvm_op, "lh",
-                                           op | start_iter, arg);
-    }
-
     return rc;
 }
 
--- a/xen/include/public/hvm/control.h
+++ b/xen/include/public/hvm/control.h
@@ -77,6 +77,18 @@ struct xen_hvm_modified_memory {
     uint64_aligned_t first_gfn;
 };
 
+/* XEN_HVMCTL_set_mem_type */
+/* Notify that a region of memory is to be treated in a specific way. */
+struct xen_hvm_set_mem_type {
+    /* Memory type. */
+    uint16_t hvmmem_type;
+    uint16_t rsvd;
+    /* Number of pages. */
+    uint32_t nr;
+    /* First GFN. */
+    uint64_aligned_t first_gfn;
+};
+
 struct xen_hvmctl {
     uint16_t interface_version;    /* XEN_HVMCTL_INTERFACE_VERSION */
     domid_t domain;
@@ -86,6 +98,7 @@ struct xen_hvmctl {
 #define XEN_HVMCTL_set_pci_link_route            3
 #define XEN_HVMCTL_track_dirty_vram              4
 #define XEN_HVMCTL_modified_memory               5
+#define XEN_HVMCTL_set_mem_type                  6
     uint16_t opaque;               /* Must be zero on initial invocation. */
     union {
         struct xen_hvm_set_pci_intx_level set_pci_intx_level;
@@ -93,6 +106,7 @@ struct xen_hvmctl {
         struct xen_hvm_set_pci_link_route set_pci_link_route;
         struct xen_hvm_track_dirty_vram track_dirty_vram;
         struct xen_hvm_modified_memory modified_memory;
+        struct xen_hvm_set_mem_type set_mem_type;
         uint8_t pad[120];
     } u;
 };
--- a/xen/include/public/hvm/hvm_op.h
+++ b/xen/include/public/hvm/hvm_op.h
@@ -95,26 +95,6 @@ typedef enum {
 #endif
 } hvmmem_type_t;
 
-/* Following tools-only interfaces may change in future. */
-#if defined(__XEN__) || defined(__XEN_TOOLS__)
-
-#define HVMOP_set_mem_type    8
-/* Notify that a region of memory is to be treated in a specific way. */
-struct xen_hvm_set_mem_type {
-    /* Domain to be updated. */
-    domid_t domid;
-    /* Memory type */
-    uint16_t hvmmem_type;
-    /* Number of pages. */
-    uint32_t nr;
-    /* First pfn. */
-    uint64_aligned_t first_pfn;
-};
-typedef struct xen_hvm_set_mem_type xen_hvm_set_mem_type_t;
-DEFINE_XEN_GUEST_HANDLE(xen_hvm_set_mem_type_t);
-
-#endif /* defined(__XEN__) || defined(__XEN_TOOLS__) */
-
 /* Hint from PV drivers for pagetable destruction. */
 #define HVMOP_pagetable_dying        9
 struct xen_hvm_pagetable_dying {
--- a/xen/xsm/flask/policy/access_vectors
+++ b/xen/xsm/flask/policy/access_vectors
@@ -272,7 +272,7 @@ class hvm
     cacheattr
 # XEN_HVMCTL_track_dirty_vram
     trackdirtyvram
-# XEN_HVMCTL_modified_memory, HVMOP_get_mem_type, HVMOP_set_mem_type,
+# XEN_HVMCTL_modified_memory, HVMOP_get_mem_type, XEN_HVMCTL_set_mem_type,
 # HVMOP_set_mem_access, HVMOP_get_mem_access, HVMOP_pagetable_dying,
 # HVMOP_inject_trap
     hvmctl

[-- Attachment #3: Type: text/plain, Size: 126 bytes --]

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
http://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 31+ messages in thread

* [PATCH 08/11] hvmctl: convert HVMOP_inject_trap
  2016-06-20 12:39 [PATCH 00/11] hvmctl hypercall Jan Beulich
                   ` (6 preceding siblings ...)
  2016-06-20 12:56 ` [PATCH 07/11] hvmctl: convert HVMOP_set_mem_type Jan Beulich
@ 2016-06-20 12:56 ` Jan Beulich
  2016-06-21 10:14   ` Wei Liu
  2016-06-20 12:57 ` [PATCH 09/11] hvmctl: convert HVMOP_inject_msi Jan Beulich
                   ` (3 subsequent siblings)
  11 siblings, 1 reply; 31+ messages in thread
From: Jan Beulich @ 2016-06-20 12:56 UTC (permalink / raw)
  To: xen-devel
  Cc: Stefano Stabellini, Wei Liu, George Dunlap, Andrew Cooper,
	Ian Jackson, Tim Deegan, Paul Durrant, dgdegra

[-- Attachment #1: Type: text/plain, Size: 9646 bytes --]

Signed-off-by: Jan Beulich <jbeulich@suse.com>

--- a/tools/libxc/xc_misc.c
+++ b/tools/libxc/xc_misc.c
@@ -584,31 +584,15 @@ int xc_hvm_inject_trap(
     uint32_t type, uint32_t error_code, uint32_t insn_len,
     uint64_t cr2)
 {
-    DECLARE_HYPERCALL_BUFFER(struct xen_hvm_inject_trap, arg);
-    int rc;
+    DECLARE_HVMCTL(inject_trap, dom,
+                   .vcpuid     = vcpu,
+                   .type       = type,
+                   .vector     = vector,
+                   .insn_len   = insn_len,
+                   .error_code = error_code,
+                   .cr2        = cr2);
 
-    arg = xc_hypercall_buffer_alloc(xch, arg, sizeof(*arg));
-    if ( arg == NULL )
-    {
-        PERROR("Could not allocate memory for xc_hvm_inject_trap hypercall");
-        return -1;
-    }
-
-    arg->domid       = dom;
-    arg->vcpuid      = vcpu;
-    arg->vector      = vector;
-    arg->type        = type;
-    arg->error_code  = error_code;
-    arg->insn_len    = insn_len;
-    arg->cr2         = cr2;
-
-    rc = xencall2(xch->xcall, __HYPERVISOR_hvm_op,
-                  HVMOP_inject_trap,
-                  HYPERCALL_BUFFER_AS_ARG(arg));
-
-    xc_hypercall_buffer_free(xch, arg);
-
-    return rc;
+    return do_hvmctl(xch, &hvmctl);
 }
 
 int xc_livepatch_upload(xc_interface *xch,
--- a/tools/tests/xen-access/xen-access.c
+++ b/tools/tests/xen-access/xen-access.c
@@ -41,6 +41,7 @@
 #include <xenctrl.h>
 #include <xenevtchn.h>
 #include <xen/vm_event.h>
+#include <xen/hvm/control.h>
 
 #if defined(__arm__) || defined(__aarch64__)
 #include <xen/arch-arm.h>
@@ -643,7 +644,7 @@ int main(int argc, char *argv[])
                 /* Reinject */
                 rc = xc_hvm_inject_trap(
                     xch, domain_id, req.vcpu_id, 3,
-                    HVMOP_TRAP_sw_exc, -1, 0, 0);
+                    XEN_HVMCTL_TRAP_sw_exc, -1, 0, 0);
                 if (rc < 0)
                 {
                     ERROR("Error %d injecting breakpoint\n", rc);
--- a/xen/arch/x86/hvm/control.c
+++ b/xen/arch/x86/hvm/control.c
@@ -91,6 +91,32 @@ static int track_dirty_vram(struct domai
            : hap_track_dirty_vram(d, op->first_gfn, op->nr, op->dirty_bitmap);
 }
 
+static int inject_trap(struct domain *d,
+                       const struct xen_hvm_inject_trap *op)
+{
+    struct vcpu *v;
+
+    if ( !is_hvm_domain(d) )
+        return -EINVAL;
+
+    if ( op->rsvd8 || op->rsvd32 )
+        return -EINVAL;
+
+    if ( op->vcpuid >= d->max_vcpus || (v = d->vcpu[op->vcpuid]) == NULL )
+        return -ENOENT;
+
+    if ( v->arch.hvm_vcpu.inject_trap.vector != -1 )
+        return -EBUSY;
+
+    v->arch.hvm_vcpu.inject_trap.vector     = op->vector;
+    v->arch.hvm_vcpu.inject_trap.type       = op->type;
+    v->arch.hvm_vcpu.inject_trap.error_code = op->error_code;
+    v->arch.hvm_vcpu.inject_trap.insn_len   = op->insn_len;
+    v->arch.hvm_vcpu.inject_trap.cr2        = op->cr2;
+
+    return 0;
+}
+
 /*
  * Note that this value is effectively part of the ABI, even if we don't need
  * to make it a formal part of it.  Hence this value may only be changed if
@@ -265,6 +291,10 @@ long do_hvmctl(XEN_GUEST_HANDLE_PARAM(xe
         rc = set_mem_type(d, &op.u.set_mem_type, &iter);
         break;
 
+    case XEN_HVMCTL_inject_trap:
+        rc = inject_trap(d, &op.u.inject_trap);
+        break;
+
     default:
         rc = -EOPNOTSUPP;
         break;
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -5366,48 +5366,6 @@ long do_hvm_op(unsigned long op, XEN_GUE
         break;
     }
 
-    case HVMOP_inject_trap: 
-    {
-        xen_hvm_inject_trap_t tr;
-        struct domain *d;
-        struct vcpu *v;
-
-        if ( copy_from_guest(&tr, arg, 1 ) )
-            return -EFAULT;
-
-        rc = rcu_lock_remote_domain_by_id(tr.domid, &d);
-        if ( rc != 0 )
-            return rc;
-
-        rc = -EINVAL;
-        if ( !is_hvm_domain(d) )
-            goto injtrap_fail;
-
-        rc = xsm_hvm_control(XSM_DM_PRIV, d, op);
-        if ( rc )
-            goto injtrap_fail;
-
-        rc = -ENOENT;
-        if ( tr.vcpuid >= d->max_vcpus || (v = d->vcpu[tr.vcpuid]) == NULL )
-            goto injtrap_fail;
-        
-        if ( v->arch.hvm_vcpu.inject_trap.vector != -1 )
-            rc = -EBUSY;
-        else 
-        {
-            v->arch.hvm_vcpu.inject_trap.vector = tr.vector;
-            v->arch.hvm_vcpu.inject_trap.type = tr.type;
-            v->arch.hvm_vcpu.inject_trap.error_code = tr.error_code;
-            v->arch.hvm_vcpu.inject_trap.insn_len = tr.insn_len;
-            v->arch.hvm_vcpu.inject_trap.cr2 = tr.cr2;
-            rc = 0;
-        }
-
-    injtrap_fail:
-        rcu_unlock_domain(d);
-        break;
-    }
-
     case HVMOP_guest_request_vm_event:
         if ( guest_handle_is_null(arg) )
             vm_event_monitor_guest_request();
--- a/xen/include/public/hvm/control.h
+++ b/xen/include/public/hvm/control.h
@@ -89,6 +89,37 @@ struct xen_hvm_set_mem_type {
     uint64_aligned_t first_gfn;
 };
 
+/* XEN_HVMCTL_inject_trap */
+/*
+ * Inject a trap into a VCPU, which will get taken up on the next
+ * scheduling of it. Note that the caller should know enough of the
+ * state of the CPU before injecting, to know what the effect of
+ * injecting the trap will be.
+ */
+struct xen_hvm_inject_trap {
+    /* VCPU */
+    uint32_t vcpuid;
+    /* Trap type (XEN_HVMCTL_TRAP_*). */
+    uint8_t type;
+/* NB. This enumeration precisely matches hvm.h:X86_EVENTTYPE_*. */
+#define XEN_HVMCTL_TRAP_ext_int    0 /* external interrupt */
+#define XEN_HVMCTL_TRAP_nmi        2 /* NMI */
+#define XEN_HVMCTL_TRAP_hw_exc     3 /* hardware exception */
+#define XEN_HVMCTL_TRAP_sw_int     4 /* software interrupt (CD nn) */
+#define XEN_HVMCTL_TRAP_pri_sw_exc 5 /* ICEBP (F1) */
+#define XEN_HVMCTL_TRAP_sw_exc     6 /* INT3 (CC), INTO (CE) */
+    /* Vector number. */
+    uint8_t vector;
+    /* Instruction length. */
+    uint8_t insn_len;
+    uint8_t rsvd8;
+    /* Error code, or ~0u to skip. */
+    uint32_t error_code;
+    uint32_t rsvd32;
+    /* CR2 for page faults. */
+    uint64_aligned_t cr2;
+};
+
 struct xen_hvmctl {
     uint16_t interface_version;    /* XEN_HVMCTL_INTERFACE_VERSION */
     domid_t domain;
@@ -99,6 +130,7 @@ struct xen_hvmctl {
 #define XEN_HVMCTL_track_dirty_vram              4
 #define XEN_HVMCTL_modified_memory               5
 #define XEN_HVMCTL_set_mem_type                  6
+#define XEN_HVMCTL_inject_trap                   7
     uint16_t opaque;               /* Must be zero on initial invocation. */
     union {
         struct xen_hvm_set_pci_intx_level set_pci_intx_level;
@@ -107,6 +139,7 @@ struct xen_hvmctl {
         struct xen_hvm_track_dirty_vram track_dirty_vram;
         struct xen_hvm_modified_memory modified_memory;
         struct xen_hvm_set_mem_type set_mem_type;
+        struct xen_hvm_inject_trap inject_trap;
         uint8_t pad[120];
     } u;
 };
--- a/xen/include/public/hvm/hvm_op.h
+++ b/xen/include/public/hvm/hvm_op.h
@@ -123,49 +123,6 @@ struct xen_hvm_xentrace {
 typedef struct xen_hvm_xentrace xen_hvm_xentrace_t;
 DEFINE_XEN_GUEST_HANDLE(xen_hvm_xentrace_t);
 
-/* Following tools-only interfaces may change in future. */
-#if defined(__XEN__) || defined(__XEN_TOOLS__)
-
-/* Deprecated by XENMEM_access_op_set_access */
-#define HVMOP_set_mem_access        12
-
-/* Deprecated by XENMEM_access_op_get_access */
-#define HVMOP_get_mem_access        13
-
-#define HVMOP_inject_trap            14
-/* Inject a trap into a VCPU, which will get taken up on the next
- * scheduling of it. Note that the caller should know enough of the
- * state of the CPU before injecting, to know what the effect of
- * injecting the trap will be.
- */
-struct xen_hvm_inject_trap {
-    /* Domain to be queried. */
-    domid_t domid;
-    /* VCPU */
-    uint32_t vcpuid;
-    /* Vector number */
-    uint32_t vector;
-    /* Trap type (HVMOP_TRAP_*) */
-    uint32_t type;
-/* NB. This enumeration precisely matches hvm.h:X86_EVENTTYPE_* */
-# define HVMOP_TRAP_ext_int    0 /* external interrupt */
-# define HVMOP_TRAP_nmi        2 /* nmi */
-# define HVMOP_TRAP_hw_exc     3 /* hardware exception */
-# define HVMOP_TRAP_sw_int     4 /* software interrupt (CD nn) */
-# define HVMOP_TRAP_pri_sw_exc 5 /* ICEBP (F1) */
-# define HVMOP_TRAP_sw_exc     6 /* INT3 (CC), INTO (CE) */
-    /* Error code, or ~0u to skip */
-    uint32_t error_code;
-    /* Intruction length */
-    uint32_t insn_len;
-    /* CR2 for page faults */
-    uint64_aligned_t cr2;
-};
-typedef struct xen_hvm_inject_trap xen_hvm_inject_trap_t;
-DEFINE_XEN_GUEST_HANDLE(xen_hvm_inject_trap_t);
-
-#endif /* defined(__XEN__) || defined(__XEN_TOOLS__) */
-
 #define HVMOP_get_mem_type    15
 /* Return hvmmem_type_t for the specified pfn. */
 struct xen_hvm_get_mem_type {
--- a/xen/xsm/flask/policy/access_vectors
+++ b/xen/xsm/flask/policy/access_vectors
@@ -273,8 +273,7 @@ class hvm
 # XEN_HVMCTL_track_dirty_vram
     trackdirtyvram
 # XEN_HVMCTL_modified_memory, HVMOP_get_mem_type, XEN_HVMCTL_set_mem_type,
-# HVMOP_set_mem_access, HVMOP_get_mem_access, HVMOP_pagetable_dying,
-# HVMOP_inject_trap
+# HVMOP_pagetable_dying, XEN_HVMCTL_inject_trap
     hvmctl
 # XEN_DOMCTL_mem_sharing_op and XENMEM_sharing_op_{share,add_physmap} with:
 #  source = the domain making the hypercall



[-- Attachment #2: hvmctl-07.patch --]
[-- Type: text/plain, Size: 9679 bytes --]

hvmctl: convert HVMOP_inject_trap

Signed-off-by: Jan Beulich <jbeulich@suse.com>

--- a/tools/libxc/xc_misc.c
+++ b/tools/libxc/xc_misc.c
@@ -584,31 +584,15 @@ int xc_hvm_inject_trap(
     uint32_t type, uint32_t error_code, uint32_t insn_len,
     uint64_t cr2)
 {
-    DECLARE_HYPERCALL_BUFFER(struct xen_hvm_inject_trap, arg);
-    int rc;
+    DECLARE_HVMCTL(inject_trap, dom,
+                   .vcpuid     = vcpu,
+                   .type       = type,
+                   .vector     = vector,
+                   .insn_len   = insn_len,
+                   .error_code = error_code,
+                   .cr2        = cr2);
 
-    arg = xc_hypercall_buffer_alloc(xch, arg, sizeof(*arg));
-    if ( arg == NULL )
-    {
-        PERROR("Could not allocate memory for xc_hvm_inject_trap hypercall");
-        return -1;
-    }
-
-    arg->domid       = dom;
-    arg->vcpuid      = vcpu;
-    arg->vector      = vector;
-    arg->type        = type;
-    arg->error_code  = error_code;
-    arg->insn_len    = insn_len;
-    arg->cr2         = cr2;
-
-    rc = xencall2(xch->xcall, __HYPERVISOR_hvm_op,
-                  HVMOP_inject_trap,
-                  HYPERCALL_BUFFER_AS_ARG(arg));
-
-    xc_hypercall_buffer_free(xch, arg);
-
-    return rc;
+    return do_hvmctl(xch, &hvmctl);
 }
 
 int xc_livepatch_upload(xc_interface *xch,
--- a/tools/tests/xen-access/xen-access.c
+++ b/tools/tests/xen-access/xen-access.c
@@ -41,6 +41,7 @@
 #include <xenctrl.h>
 #include <xenevtchn.h>
 #include <xen/vm_event.h>
+#include <xen/hvm/control.h>
 
 #if defined(__arm__) || defined(__aarch64__)
 #include <xen/arch-arm.h>
@@ -643,7 +644,7 @@ int main(int argc, char *argv[])
                 /* Reinject */
                 rc = xc_hvm_inject_trap(
                     xch, domain_id, req.vcpu_id, 3,
-                    HVMOP_TRAP_sw_exc, -1, 0, 0);
+                    XEN_HVMCTL_TRAP_sw_exc, -1, 0, 0);
                 if (rc < 0)
                 {
                     ERROR("Error %d injecting breakpoint\n", rc);
--- a/xen/arch/x86/hvm/control.c
+++ b/xen/arch/x86/hvm/control.c
@@ -91,6 +91,32 @@ static int track_dirty_vram(struct domai
            : hap_track_dirty_vram(d, op->first_gfn, op->nr, op->dirty_bitmap);
 }
 
+static int inject_trap(struct domain *d,
+                       const struct xen_hvm_inject_trap *op)
+{
+    struct vcpu *v;
+
+    if ( !is_hvm_domain(d) )
+        return -EINVAL;
+
+    if ( op->rsvd8 || op->rsvd32 )
+        return -EINVAL;
+
+    if ( op->vcpuid >= d->max_vcpus || (v = d->vcpu[op->vcpuid]) == NULL )
+        return -ENOENT;
+
+    if ( v->arch.hvm_vcpu.inject_trap.vector != -1 )
+        return -EBUSY;
+
+    v->arch.hvm_vcpu.inject_trap.vector     = op->vector;
+    v->arch.hvm_vcpu.inject_trap.type       = op->type;
+    v->arch.hvm_vcpu.inject_trap.error_code = op->error_code;
+    v->arch.hvm_vcpu.inject_trap.insn_len   = op->insn_len;
+    v->arch.hvm_vcpu.inject_trap.cr2        = op->cr2;
+
+    return 0;
+}
+
 /*
  * Note that this value is effectively part of the ABI, even if we don't need
  * to make it a formal part of it.  Hence this value may only be changed if
@@ -265,6 +291,10 @@ long do_hvmctl(XEN_GUEST_HANDLE_PARAM(xe
         rc = set_mem_type(d, &op.u.set_mem_type, &iter);
         break;
 
+    case XEN_HVMCTL_inject_trap:
+        rc = inject_trap(d, &op.u.inject_trap);
+        break;
+
     default:
         rc = -EOPNOTSUPP;
         break;
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -5366,48 +5366,6 @@ long do_hvm_op(unsigned long op, XEN_GUE
         break;
     }
 
-    case HVMOP_inject_trap: 
-    {
-        xen_hvm_inject_trap_t tr;
-        struct domain *d;
-        struct vcpu *v;
-
-        if ( copy_from_guest(&tr, arg, 1 ) )
-            return -EFAULT;
-
-        rc = rcu_lock_remote_domain_by_id(tr.domid, &d);
-        if ( rc != 0 )
-            return rc;
-
-        rc = -EINVAL;
-        if ( !is_hvm_domain(d) )
-            goto injtrap_fail;
-
-        rc = xsm_hvm_control(XSM_DM_PRIV, d, op);
-        if ( rc )
-            goto injtrap_fail;
-
-        rc = -ENOENT;
-        if ( tr.vcpuid >= d->max_vcpus || (v = d->vcpu[tr.vcpuid]) == NULL )
-            goto injtrap_fail;
-        
-        if ( v->arch.hvm_vcpu.inject_trap.vector != -1 )
-            rc = -EBUSY;
-        else 
-        {
-            v->arch.hvm_vcpu.inject_trap.vector = tr.vector;
-            v->arch.hvm_vcpu.inject_trap.type = tr.type;
-            v->arch.hvm_vcpu.inject_trap.error_code = tr.error_code;
-            v->arch.hvm_vcpu.inject_trap.insn_len = tr.insn_len;
-            v->arch.hvm_vcpu.inject_trap.cr2 = tr.cr2;
-            rc = 0;
-        }
-
-    injtrap_fail:
-        rcu_unlock_domain(d);
-        break;
-    }
-
     case HVMOP_guest_request_vm_event:
         if ( guest_handle_is_null(arg) )
             vm_event_monitor_guest_request();
--- a/xen/include/public/hvm/control.h
+++ b/xen/include/public/hvm/control.h
@@ -89,6 +89,37 @@ struct xen_hvm_set_mem_type {
     uint64_aligned_t first_gfn;
 };
 
+/* XEN_HVMCTL_inject_trap */
+/*
+ * Inject a trap into a VCPU, which will get taken up on the next
+ * scheduling of it. Note that the caller should know enough of the
+ * state of the CPU before injecting, to know what the effect of
+ * injecting the trap will be.
+ */
+struct xen_hvm_inject_trap {
+    /* VCPU */
+    uint32_t vcpuid;
+    /* Trap type (XEN_HVMCTL_TRAP_*). */
+    uint8_t type;
+/* NB. This enumeration precisely matches hvm.h:X86_EVENTTYPE_*. */
+#define XEN_HVMCTL_TRAP_ext_int    0 /* external interrupt */
+#define XEN_HVMCTL_TRAP_nmi        2 /* NMI */
+#define XEN_HVMCTL_TRAP_hw_exc     3 /* hardware exception */
+#define XEN_HVMCTL_TRAP_sw_int     4 /* software interrupt (CD nn) */
+#define XEN_HVMCTL_TRAP_pri_sw_exc 5 /* ICEBP (F1) */
+#define XEN_HVMCTL_TRAP_sw_exc     6 /* INT3 (CC), INTO (CE) */
+    /* Vector number. */
+    uint8_t vector;
+    /* Instruction length. */
+    uint8_t insn_len;
+    uint8_t rsvd8;
+    /* Error code, or ~0u to skip. */
+    uint32_t error_code;
+    uint32_t rsvd32;
+    /* CR2 for page faults. */
+    uint64_aligned_t cr2;
+};
+
 struct xen_hvmctl {
     uint16_t interface_version;    /* XEN_HVMCTL_INTERFACE_VERSION */
     domid_t domain;
@@ -99,6 +130,7 @@ struct xen_hvmctl {
 #define XEN_HVMCTL_track_dirty_vram              4
 #define XEN_HVMCTL_modified_memory               5
 #define XEN_HVMCTL_set_mem_type                  6
+#define XEN_HVMCTL_inject_trap                   7
     uint16_t opaque;               /* Must be zero on initial invocation. */
     union {
         struct xen_hvm_set_pci_intx_level set_pci_intx_level;
@@ -107,6 +139,7 @@ struct xen_hvmctl {
         struct xen_hvm_track_dirty_vram track_dirty_vram;
         struct xen_hvm_modified_memory modified_memory;
         struct xen_hvm_set_mem_type set_mem_type;
+        struct xen_hvm_inject_trap inject_trap;
         uint8_t pad[120];
     } u;
 };
--- a/xen/include/public/hvm/hvm_op.h
+++ b/xen/include/public/hvm/hvm_op.h
@@ -123,49 +123,6 @@ struct xen_hvm_xentrace {
 typedef struct xen_hvm_xentrace xen_hvm_xentrace_t;
 DEFINE_XEN_GUEST_HANDLE(xen_hvm_xentrace_t);
 
-/* Following tools-only interfaces may change in future. */
-#if defined(__XEN__) || defined(__XEN_TOOLS__)
-
-/* Deprecated by XENMEM_access_op_set_access */
-#define HVMOP_set_mem_access        12
-
-/* Deprecated by XENMEM_access_op_get_access */
-#define HVMOP_get_mem_access        13
-
-#define HVMOP_inject_trap            14
-/* Inject a trap into a VCPU, which will get taken up on the next
- * scheduling of it. Note that the caller should know enough of the
- * state of the CPU before injecting, to know what the effect of
- * injecting the trap will be.
- */
-struct xen_hvm_inject_trap {
-    /* Domain to be queried. */
-    domid_t domid;
-    /* VCPU */
-    uint32_t vcpuid;
-    /* Vector number */
-    uint32_t vector;
-    /* Trap type (HVMOP_TRAP_*) */
-    uint32_t type;
-/* NB. This enumeration precisely matches hvm.h:X86_EVENTTYPE_* */
-# define HVMOP_TRAP_ext_int    0 /* external interrupt */
-# define HVMOP_TRAP_nmi        2 /* nmi */
-# define HVMOP_TRAP_hw_exc     3 /* hardware exception */
-# define HVMOP_TRAP_sw_int     4 /* software interrupt (CD nn) */
-# define HVMOP_TRAP_pri_sw_exc 5 /* ICEBP (F1) */
-# define HVMOP_TRAP_sw_exc     6 /* INT3 (CC), INTO (CE) */
-    /* Error code, or ~0u to skip */
-    uint32_t error_code;
-    /* Intruction length */
-    uint32_t insn_len;
-    /* CR2 for page faults */
-    uint64_aligned_t cr2;
-};
-typedef struct xen_hvm_inject_trap xen_hvm_inject_trap_t;
-DEFINE_XEN_GUEST_HANDLE(xen_hvm_inject_trap_t);
-
-#endif /* defined(__XEN__) || defined(__XEN_TOOLS__) */
-
 #define HVMOP_get_mem_type    15
 /* Return hvmmem_type_t for the specified pfn. */
 struct xen_hvm_get_mem_type {
--- a/xen/xsm/flask/policy/access_vectors
+++ b/xen/xsm/flask/policy/access_vectors
@@ -273,8 +273,7 @@ class hvm
 # XEN_HVMCTL_track_dirty_vram
     trackdirtyvram
 # XEN_HVMCTL_modified_memory, HVMOP_get_mem_type, XEN_HVMCTL_set_mem_type,
-# HVMOP_set_mem_access, HVMOP_get_mem_access, HVMOP_pagetable_dying,
-# HVMOP_inject_trap
+# HVMOP_pagetable_dying, XEN_HVMCTL_inject_trap
     hvmctl
 # XEN_DOMCTL_mem_sharing_op and XENMEM_sharing_op_{share,add_physmap} with:
 #  source = the domain making the hypercall

[-- Attachment #3: Type: text/plain, Size: 126 bytes --]

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
http://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 31+ messages in thread

* [PATCH 09/11] hvmctl: convert HVMOP_inject_msi
  2016-06-20 12:39 [PATCH 00/11] hvmctl hypercall Jan Beulich
                   ` (7 preceding siblings ...)
  2016-06-20 12:56 ` [PATCH 08/11] hvmctl: convert HVMOP_inject_trap Jan Beulich
@ 2016-06-20 12:57 ` Jan Beulich
  2016-06-21 10:14   ` Wei Liu
  2016-06-20 12:57 ` [PATCH 10/11] hvmctl: convert HVMOP_*ioreq_server* Jan Beulich
                   ` (2 subsequent siblings)
  11 siblings, 1 reply; 31+ messages in thread
From: Jan Beulich @ 2016-06-20 12:57 UTC (permalink / raw)
  To: xen-devel
  Cc: Stefano Stabellini, Wei Liu, George Dunlap, Andrew Cooper,
	Ian Jackson, Tim Deegan, Paul Durrant, dgdegra

[-- Attachment #1: Type: text/plain, Size: 8130 bytes --]

Signed-off-by: Jan Beulich <jbeulich@suse.com>

--- a/tools/libxc/xc_misc.c
+++ b/tools/libxc/xc_misc.c
@@ -508,27 +508,11 @@ int xc_hvm_set_pci_link_route(
 int xc_hvm_inject_msi(
     xc_interface *xch, domid_t dom, uint64_t addr, uint32_t data)
 {
-    DECLARE_HYPERCALL_BUFFER(struct xen_hvm_inject_msi, arg);
-    int rc;
+    DECLARE_HVMCTL(inject_msi, dom,
+                   .data  = data,
+                   .addr  = addr);
 
-    arg = xc_hypercall_buffer_alloc(xch, arg, sizeof(*arg));
-    if ( arg == NULL )
-    {
-        PERROR("Could not allocate memory for xc_hvm_inject_msi hypercall");
-        return -1;
-    }
-
-    arg->domid = dom;
-    arg->addr  = addr;
-    arg->data  = data;
-
-    rc = xencall2(xch->xcall, __HYPERVISOR_hvm_op,
-                  HVMOP_inject_msi,
-                  HYPERCALL_BUFFER_AS_ARG(arg));
-
-    xc_hypercall_buffer_free(xch, arg);
-
-    return rc;
+    return do_hvmctl(xch, &hvmctl);
 }
 
 int xc_hvm_track_dirty_vram(
--- a/xen/arch/x86/hvm/control.c
+++ b/xen/arch/x86/hvm/control.c
@@ -295,6 +295,10 @@ long do_hvmctl(XEN_GUEST_HANDLE_PARAM(xe
         rc = inject_trap(d, &op.u.inject_trap);
         break;
 
+    case XEN_HVMCTL_inject_msi:
+        rc = hvm_inject_msi(d, op.u.inject_msi.addr, op.u.inject_msi.data);
+        break;
+
     default:
         rc = -EOPNOTSUPP;
         break;
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -4446,35 +4446,6 @@ static void hvm_s3_resume(struct domain
     }
 }
 
-static int hvmop_inject_msi(
-    XEN_GUEST_HANDLE_PARAM(xen_hvm_inject_msi_t) uop)
-{
-    struct xen_hvm_inject_msi op;
-    struct domain *d;
-    int rc;
-
-    if ( copy_from_guest(&op, uop, 1) )
-        return -EFAULT;
-
-    rc = rcu_lock_remote_domain_by_id(op.domid, &d);
-    if ( rc != 0 )
-        return rc;
-
-    rc = -EINVAL;
-    if ( !is_hvm_domain(d) )
-        goto out;
-
-    rc = xsm_hvm_inject_msi(XSM_DM_PRIV, d);
-    if ( rc )
-        goto out;
-
-    rc = hvm_inject_msi(d, op.addr, op.data);
-
- out:
-    rcu_unlock_domain(d);
-    return rc;
-}
-
 static int hvmop_flush_tlb_all(void)
 {
     struct domain *d = current->domain;
@@ -5266,11 +5237,6 @@ long do_hvm_op(unsigned long op, XEN_GUE
             guest_handle_cast(arg, xen_hvm_param_t));
         break;
 
-    case HVMOP_inject_msi:
-        rc = hvmop_inject_msi(
-            guest_handle_cast(arg, xen_hvm_inject_msi_t));
-        break;
-
     case HVMOP_flush_tlbs:
         rc = guest_handle_is_null(arg) ? hvmop_flush_tlb_all() : -ENOSYS;
         break;
--- a/xen/arch/x86/hvm/irq.c
+++ b/xen/arch/x86/hvm/irq.c
@@ -292,6 +292,9 @@ int hvm_inject_msi(struct domain *d, uin
         >> MSI_DATA_TRIGGER_SHIFT;
     uint8_t vector = data & MSI_DATA_VECTOR_MASK;
 
+    if ( !is_hvm_domain(d) )
+        return -EINVAL;
+
     if ( !vector )
     {
         int pirq = ((addr >> 32) & 0xffffff00) | dest;
--- a/xen/include/public/hvm/control.h
+++ b/xen/include/public/hvm/control.h
@@ -120,6 +120,16 @@ struct xen_hvm_inject_trap {
     uint64_aligned_t cr2;
 };
 
+/* XEN_HVMCTL_inject_msi */
+/* MSI injection for emulated devices. */
+struct xen_hvm_inject_msi {
+    /* Message data. */
+    uint32_t  data;
+    uint32_t  rsvd;
+    /* Message address (x86: 0xFEExxxxx). */
+    uint64_t  addr;
+};
+
 struct xen_hvmctl {
     uint16_t interface_version;    /* XEN_HVMCTL_INTERFACE_VERSION */
     domid_t domain;
@@ -131,6 +141,7 @@ struct xen_hvmctl {
 #define XEN_HVMCTL_modified_memory               5
 #define XEN_HVMCTL_set_mem_type                  6
 #define XEN_HVMCTL_inject_trap                   7
+#define XEN_HVMCTL_inject_msi                    8
     uint16_t opaque;               /* Must be zero on initial invocation. */
     union {
         struct xen_hvm_set_pci_intx_level set_pci_intx_level;
@@ -140,6 +151,7 @@ struct xen_hvmctl {
         struct xen_hvm_modified_memory modified_memory;
         struct xen_hvm_set_mem_type set_mem_type;
         struct xen_hvm_inject_trap inject_trap;
+        struct xen_hvm_inject_msi inject_msi;
         uint8_t pad[120];
     } u;
 };
--- a/xen/include/public/hvm/hvm_op.h
+++ b/xen/include/public/hvm/hvm_op.h
@@ -140,19 +140,6 @@ DEFINE_XEN_GUEST_HANDLE(xen_hvm_get_mem_
 /* Following tools-only interfaces may change in future. */
 #if defined(__XEN__) || defined(__XEN_TOOLS__)
 
-/* MSI injection for emulated devices */
-#define HVMOP_inject_msi         16
-struct xen_hvm_inject_msi {
-    /* Domain to be injected */
-    domid_t   domid;
-    /* Data -- lower 32 bits */
-    uint32_t  data;
-    /* Address (0xfeexxxxx) */
-    uint64_t  addr;
-};
-typedef struct xen_hvm_inject_msi xen_hvm_inject_msi_t;
-DEFINE_XEN_GUEST_HANDLE(xen_hvm_inject_msi_t);
-
 /*
  * IOREQ Servers
  *
--- a/xen/include/xsm/dummy.h
+++ b/xen/include/xsm/dummy.h
@@ -609,12 +609,6 @@ static XSM_INLINE int xsm_shadow_control
     return xsm_default_action(action, current->domain, d);
 }
 
-static XSM_INLINE int xsm_hvm_inject_msi(XSM_DEFAULT_ARG struct domain *d)
-{
-    XSM_ASSERT_ACTION(XSM_DM_PRIV);
-    return xsm_default_action(action, current->domain, d);
-}
-
 static XSM_INLINE int xsm_hvm_ioreq_server(XSM_DEFAULT_ARG struct domain *d, int op)
 {
     XSM_ASSERT_ACTION(XSM_DM_PRIV);
--- a/xen/include/xsm/xsm.h
+++ b/xen/include/xsm/xsm.h
@@ -174,7 +174,6 @@ struct xsm_operations {
     int (*do_mca) (void);
     int (*shadow_control) (struct domain *d, uint32_t op);
     int (*hvm_set_pci_link_route) (struct domain *d);
-    int (*hvm_inject_msi) (struct domain *d);
     int (*hvm_ioreq_server) (struct domain *d, int op);
     int (*mem_sharing_op) (struct domain *d, struct domain *cd, int op);
     int (*apic) (struct domain *d, int cmd);
@@ -649,11 +648,6 @@ static inline int xsm_hvm_set_pci_link_r
     return xsm_ops->hvm_set_pci_link_route(d);
 }
 
-static inline int xsm_hvm_inject_msi (xsm_default_t def, struct domain *d)
-{
-    return xsm_ops->hvm_inject_msi(d);
-}
-
 static inline int xsm_hvm_ioreq_server (xsm_default_t def, struct domain *d, int op)
 {
     return xsm_ops->hvm_ioreq_server(d, op);
--- a/xen/xsm/dummy.c
+++ b/xen/xsm/dummy.c
@@ -145,7 +145,6 @@ void xsm_fixup_ops (struct xsm_operation
 #ifdef CONFIG_X86
     set_to_dummy_if_null(ops, do_mca);
     set_to_dummy_if_null(ops, shadow_control);
-    set_to_dummy_if_null(ops, hvm_inject_msi);
     set_to_dummy_if_null(ops, hvm_ioreq_server);
     set_to_dummy_if_null(ops, mem_sharing_op);
     set_to_dummy_if_null(ops, apic);
--- a/xen/xsm/flask/hooks.c
+++ b/xen/xsm/flask/hooks.c
@@ -1199,6 +1199,9 @@ static int flask_hvm_control(struct doma
     case XEN_HVMCTL_track_dirty_vram:
         perm = HVM__TRACKDIRTYVRAM;
         break;
+    case XEN_HVMCTL_inject_msi:
+        perm = HVM__SEND_IRQ;
+        break;
     default:
         perm = HVM__HVMCTL;
         break;
@@ -1523,11 +1526,6 @@ static int flask_ioport_mapping(struct d
     return flask_ioport_permission(d, start, end, access);
 }
 
-static int flask_hvm_inject_msi(struct domain *d)
-{
-    return current_has_perm(d, SECCLASS_HVM, HVM__SEND_IRQ);
-}
-
 static int flask_hvm_ioreq_server(struct domain *d, int op)
 {
     return current_has_perm(d, SECCLASS_HVM, HVM__HVMCTL);
@@ -1801,7 +1799,6 @@ static struct xsm_operations flask_ops =
 #ifdef CONFIG_X86
     .do_mca = flask_do_mca,
     .shadow_control = flask_shadow_control,
-    .hvm_inject_msi = flask_hvm_inject_msi,
     .hvm_ioreq_server = flask_hvm_ioreq_server,
     .mem_sharing_op = flask_mem_sharing_op,
     .apic = flask_apic,
--- a/xen/xsm/flask/policy/access_vectors
+++ b/xen/xsm/flask/policy/access_vectors
@@ -281,7 +281,7 @@ class hvm
     mem_sharing
 # XEN_DOMCTL_audit_p2m
     audit_p2m
-# HVMOP_inject_msi
+# XEN_HVMCTL_inject_msi
     send_irq
 # checked in XENMEM_sharing_op_{share,add_physmap} with:
 #  source = domain whose memory is being shared



[-- Attachment #2: hvmctl-08.patch --]
[-- Type: text/plain, Size: 8162 bytes --]

hvmctl: convert HVMOP_inject_msi

Signed-off-by: Jan Beulich <jbeulich@suse.com>

--- a/tools/libxc/xc_misc.c
+++ b/tools/libxc/xc_misc.c
@@ -508,27 +508,11 @@ int xc_hvm_set_pci_link_route(
 int xc_hvm_inject_msi(
     xc_interface *xch, domid_t dom, uint64_t addr, uint32_t data)
 {
-    DECLARE_HYPERCALL_BUFFER(struct xen_hvm_inject_msi, arg);
-    int rc;
+    DECLARE_HVMCTL(inject_msi, dom,
+                   .data  = data,
+                   .addr  = addr);
 
-    arg = xc_hypercall_buffer_alloc(xch, arg, sizeof(*arg));
-    if ( arg == NULL )
-    {
-        PERROR("Could not allocate memory for xc_hvm_inject_msi hypercall");
-        return -1;
-    }
-
-    arg->domid = dom;
-    arg->addr  = addr;
-    arg->data  = data;
-
-    rc = xencall2(xch->xcall, __HYPERVISOR_hvm_op,
-                  HVMOP_inject_msi,
-                  HYPERCALL_BUFFER_AS_ARG(arg));
-
-    xc_hypercall_buffer_free(xch, arg);
-
-    return rc;
+    return do_hvmctl(xch, &hvmctl);
 }
 
 int xc_hvm_track_dirty_vram(
--- a/xen/arch/x86/hvm/control.c
+++ b/xen/arch/x86/hvm/control.c
@@ -295,6 +295,10 @@ long do_hvmctl(XEN_GUEST_HANDLE_PARAM(xe
         rc = inject_trap(d, &op.u.inject_trap);
         break;
 
+    case XEN_HVMCTL_inject_msi:
+        rc = hvm_inject_msi(d, op.u.inject_msi.addr, op.u.inject_msi.data);
+        break;
+
     default:
         rc = -EOPNOTSUPP;
         break;
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -4446,35 +4446,6 @@ static void hvm_s3_resume(struct domain
     }
 }
 
-static int hvmop_inject_msi(
-    XEN_GUEST_HANDLE_PARAM(xen_hvm_inject_msi_t) uop)
-{
-    struct xen_hvm_inject_msi op;
-    struct domain *d;
-    int rc;
-
-    if ( copy_from_guest(&op, uop, 1) )
-        return -EFAULT;
-
-    rc = rcu_lock_remote_domain_by_id(op.domid, &d);
-    if ( rc != 0 )
-        return rc;
-
-    rc = -EINVAL;
-    if ( !is_hvm_domain(d) )
-        goto out;
-
-    rc = xsm_hvm_inject_msi(XSM_DM_PRIV, d);
-    if ( rc )
-        goto out;
-
-    rc = hvm_inject_msi(d, op.addr, op.data);
-
- out:
-    rcu_unlock_domain(d);
-    return rc;
-}
-
 static int hvmop_flush_tlb_all(void)
 {
     struct domain *d = current->domain;
@@ -5266,11 +5237,6 @@ long do_hvm_op(unsigned long op, XEN_GUE
             guest_handle_cast(arg, xen_hvm_param_t));
         break;
 
-    case HVMOP_inject_msi:
-        rc = hvmop_inject_msi(
-            guest_handle_cast(arg, xen_hvm_inject_msi_t));
-        break;
-
     case HVMOP_flush_tlbs:
         rc = guest_handle_is_null(arg) ? hvmop_flush_tlb_all() : -ENOSYS;
         break;
--- a/xen/arch/x86/hvm/irq.c
+++ b/xen/arch/x86/hvm/irq.c
@@ -292,6 +292,9 @@ int hvm_inject_msi(struct domain *d, uin
         >> MSI_DATA_TRIGGER_SHIFT;
     uint8_t vector = data & MSI_DATA_VECTOR_MASK;
 
+    if ( !is_hvm_domain(d) )
+        return -EINVAL;
+
     if ( !vector )
     {
         int pirq = ((addr >> 32) & 0xffffff00) | dest;
--- a/xen/include/public/hvm/control.h
+++ b/xen/include/public/hvm/control.h
@@ -120,6 +120,16 @@ struct xen_hvm_inject_trap {
     uint64_aligned_t cr2;
 };
 
+/* XEN_HVMCTL_inject_msi */
+/* MSI injection for emulated devices. */
+struct xen_hvm_inject_msi {
+    /* Message data. */
+    uint32_t  data;
+    uint32_t  rsvd;
+    /* Message address (x86: 0xFEExxxxx). */
+    uint64_t  addr;
+};
+
 struct xen_hvmctl {
     uint16_t interface_version;    /* XEN_HVMCTL_INTERFACE_VERSION */
     domid_t domain;
@@ -131,6 +141,7 @@ struct xen_hvmctl {
 #define XEN_HVMCTL_modified_memory               5
 #define XEN_HVMCTL_set_mem_type                  6
 #define XEN_HVMCTL_inject_trap                   7
+#define XEN_HVMCTL_inject_msi                    8
     uint16_t opaque;               /* Must be zero on initial invocation. */
     union {
         struct xen_hvm_set_pci_intx_level set_pci_intx_level;
@@ -140,6 +151,7 @@ struct xen_hvmctl {
         struct xen_hvm_modified_memory modified_memory;
         struct xen_hvm_set_mem_type set_mem_type;
         struct xen_hvm_inject_trap inject_trap;
+        struct xen_hvm_inject_msi inject_msi;
         uint8_t pad[120];
     } u;
 };
--- a/xen/include/public/hvm/hvm_op.h
+++ b/xen/include/public/hvm/hvm_op.h
@@ -140,19 +140,6 @@ DEFINE_XEN_GUEST_HANDLE(xen_hvm_get_mem_
 /* Following tools-only interfaces may change in future. */
 #if defined(__XEN__) || defined(__XEN_TOOLS__)
 
-/* MSI injection for emulated devices */
-#define HVMOP_inject_msi         16
-struct xen_hvm_inject_msi {
-    /* Domain to be injected */
-    domid_t   domid;
-    /* Data -- lower 32 bits */
-    uint32_t  data;
-    /* Address (0xfeexxxxx) */
-    uint64_t  addr;
-};
-typedef struct xen_hvm_inject_msi xen_hvm_inject_msi_t;
-DEFINE_XEN_GUEST_HANDLE(xen_hvm_inject_msi_t);
-
 /*
  * IOREQ Servers
  *
--- a/xen/include/xsm/dummy.h
+++ b/xen/include/xsm/dummy.h
@@ -609,12 +609,6 @@ static XSM_INLINE int xsm_shadow_control
     return xsm_default_action(action, current->domain, d);
 }
 
-static XSM_INLINE int xsm_hvm_inject_msi(XSM_DEFAULT_ARG struct domain *d)
-{
-    XSM_ASSERT_ACTION(XSM_DM_PRIV);
-    return xsm_default_action(action, current->domain, d);
-}
-
 static XSM_INLINE int xsm_hvm_ioreq_server(XSM_DEFAULT_ARG struct domain *d, int op)
 {
     XSM_ASSERT_ACTION(XSM_DM_PRIV);
--- a/xen/include/xsm/xsm.h
+++ b/xen/include/xsm/xsm.h
@@ -174,7 +174,6 @@ struct xsm_operations {
     int (*do_mca) (void);
     int (*shadow_control) (struct domain *d, uint32_t op);
     int (*hvm_set_pci_link_route) (struct domain *d);
-    int (*hvm_inject_msi) (struct domain *d);
     int (*hvm_ioreq_server) (struct domain *d, int op);
     int (*mem_sharing_op) (struct domain *d, struct domain *cd, int op);
     int (*apic) (struct domain *d, int cmd);
@@ -649,11 +648,6 @@ static inline int xsm_hvm_set_pci_link_r
     return xsm_ops->hvm_set_pci_link_route(d);
 }
 
-static inline int xsm_hvm_inject_msi (xsm_default_t def, struct domain *d)
-{
-    return xsm_ops->hvm_inject_msi(d);
-}
-
 static inline int xsm_hvm_ioreq_server (xsm_default_t def, struct domain *d, int op)
 {
     return xsm_ops->hvm_ioreq_server(d, op);
--- a/xen/xsm/dummy.c
+++ b/xen/xsm/dummy.c
@@ -145,7 +145,6 @@ void xsm_fixup_ops (struct xsm_operation
 #ifdef CONFIG_X86
     set_to_dummy_if_null(ops, do_mca);
     set_to_dummy_if_null(ops, shadow_control);
-    set_to_dummy_if_null(ops, hvm_inject_msi);
     set_to_dummy_if_null(ops, hvm_ioreq_server);
     set_to_dummy_if_null(ops, mem_sharing_op);
     set_to_dummy_if_null(ops, apic);
--- a/xen/xsm/flask/hooks.c
+++ b/xen/xsm/flask/hooks.c
@@ -1199,6 +1199,9 @@ static int flask_hvm_control(struct doma
     case XEN_HVMCTL_track_dirty_vram:
         perm = HVM__TRACKDIRTYVRAM;
         break;
+    case XEN_HVMCTL_inject_msi:
+        perm = HVM__SEND_IRQ;
+        break;
     default:
         perm = HVM__HVMCTL;
         break;
@@ -1523,11 +1526,6 @@ static int flask_ioport_mapping(struct d
     return flask_ioport_permission(d, start, end, access);
 }
 
-static int flask_hvm_inject_msi(struct domain *d)
-{
-    return current_has_perm(d, SECCLASS_HVM, HVM__SEND_IRQ);
-}
-
 static int flask_hvm_ioreq_server(struct domain *d, int op)
 {
     return current_has_perm(d, SECCLASS_HVM, HVM__HVMCTL);
@@ -1801,7 +1799,6 @@ static struct xsm_operations flask_ops =
 #ifdef CONFIG_X86
     .do_mca = flask_do_mca,
     .shadow_control = flask_shadow_control,
-    .hvm_inject_msi = flask_hvm_inject_msi,
     .hvm_ioreq_server = flask_hvm_ioreq_server,
     .mem_sharing_op = flask_mem_sharing_op,
     .apic = flask_apic,
--- a/xen/xsm/flask/policy/access_vectors
+++ b/xen/xsm/flask/policy/access_vectors
@@ -281,7 +281,7 @@ class hvm
     mem_sharing
 # XEN_DOMCTL_audit_p2m
     audit_p2m
-# HVMOP_inject_msi
+# XEN_HVMCTL_inject_msi
     send_irq
 # checked in XENMEM_sharing_op_{share,add_physmap} with:
 #  source = domain whose memory is being shared

[-- Attachment #3: Type: text/plain, Size: 126 bytes --]

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
http://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 31+ messages in thread

* [PATCH 10/11] hvmctl: convert HVMOP_*ioreq_server*
  2016-06-20 12:39 [PATCH 00/11] hvmctl hypercall Jan Beulich
                   ` (8 preceding siblings ...)
  2016-06-20 12:57 ` [PATCH 09/11] hvmctl: convert HVMOP_inject_msi Jan Beulich
@ 2016-06-20 12:57 ` Jan Beulich
  2016-06-21 10:14   ` Wei Liu
  2016-06-21 12:44   ` Paul Durrant
  2016-06-20 12:58 ` [PATCH 11/11] x86/HVM: serialize trap injecting producer and consumer Jan Beulich
  2016-06-23 15:15 ` [PATCH 00/11] hvmctl hypercall Andrew Cooper
  11 siblings, 2 replies; 31+ messages in thread
From: Jan Beulich @ 2016-06-20 12:57 UTC (permalink / raw)
  To: xen-devel
  Cc: Stefano Stabellini, Wei Liu, George Dunlap, Andrew Cooper,
	Ian Jackson, Tim Deegan, Paul Durrant, dgdegra

[-- Attachment #1: Type: text/plain, Size: 46357 bytes --]

Note that we can't adjust HVM_IOREQSRV_BUFIOREQ_* to properly obey
name space rules, as these constants as in use by callers of the libxc
interface.

Signed-off-by: Jan Beulich <jbeulich@suse.com>

--- a/tools/libxc/include/xenctrl.h
+++ b/tools/libxc/include/xenctrl.h
@@ -41,6 +41,7 @@
 #include <xen/sched.h>
 #include <xen/memory.h>
 #include <xen/grant_table.h>
+#include <xen/hvm/control.h>
 #include <xen/hvm/params.h>
 #include <xen/xsm/flask_op.h>
 #include <xen/tmem.h>
--- a/tools/libxc/xc_domain.c
+++ b/tools/libxc/xc_domain.c
@@ -1416,23 +1416,14 @@ int xc_hvm_create_ioreq_server(xc_interf
                                int handle_bufioreq,
                                ioservid_t *id)
 {
-    DECLARE_HYPERCALL_BUFFER(xen_hvm_create_ioreq_server_t, arg);
+    DECLARE_HVMCTL(create_ioreq_server, domid,
+                   .handle_bufioreq = handle_bufioreq);
     int rc;
 
-    arg = xc_hypercall_buffer_alloc(xch, arg, sizeof(*arg));
-    if ( arg == NULL )
-        return -1;
-
-    arg->domid = domid;
-    arg->handle_bufioreq = handle_bufioreq;
-
-    rc = xencall2(xch->xcall, __HYPERVISOR_hvm_op,
-                  HVMOP_create_ioreq_server,
-                  HYPERCALL_BUFFER_AS_ARG(arg));
+    rc = do_hvmctl(xch, &hvmctl);
 
-    *id = arg->id;
+    *id = hvmctl.u.create_ioreq_server.id;
 
-    xc_hypercall_buffer_free(xch, arg);
     return rc;
 }
 
@@ -1443,84 +1434,52 @@ int xc_hvm_get_ioreq_server_info(xc_inte
                                  xen_pfn_t *bufioreq_pfn,
                                  evtchn_port_t *bufioreq_port)
 {
-    DECLARE_HYPERCALL_BUFFER(xen_hvm_get_ioreq_server_info_t, arg);
+    DECLARE_HVMCTL(get_ioreq_server_info, domid,
+                   .id = id);
     int rc;
 
-    arg = xc_hypercall_buffer_alloc(xch, arg, sizeof(*arg));
-    if ( arg == NULL )
-        return -1;
-
-    arg->domid = domid;
-    arg->id = id;
-
-    rc = xencall2(xch->xcall, __HYPERVISOR_hvm_op,
-                  HVMOP_get_ioreq_server_info,
-                  HYPERCALL_BUFFER_AS_ARG(arg));
+    rc = do_hvmctl(xch, &hvmctl);
     if ( rc != 0 )
-        goto done;
+        return rc;
 
     if ( ioreq_pfn )
-        *ioreq_pfn = arg->ioreq_pfn;
+        *ioreq_pfn = hvmctl.u.get_ioreq_server_info.ioreq_pfn;
 
     if ( bufioreq_pfn )
-        *bufioreq_pfn = arg->bufioreq_pfn;
+        *bufioreq_pfn = hvmctl.u.get_ioreq_server_info.bufioreq_pfn;
 
     if ( bufioreq_port )
-        *bufioreq_port = arg->bufioreq_port;
+        *bufioreq_port = hvmctl.u.get_ioreq_server_info.bufioreq_port;
 
-done:
-    xc_hypercall_buffer_free(xch, arg);
-    return rc;
+    return 0;
 }
 
 int xc_hvm_map_io_range_to_ioreq_server(xc_interface *xch, domid_t domid,
                                         ioservid_t id, int is_mmio,
                                         uint64_t start, uint64_t end)
 {
-    DECLARE_HYPERCALL_BUFFER(xen_hvm_io_range_t, arg);
-    int rc;
-
-    arg = xc_hypercall_buffer_alloc(xch, arg, sizeof(*arg));
-    if ( arg == NULL )
-        return -1;
-
-    arg->domid = domid;
-    arg->id = id;
-    arg->type = is_mmio ? HVMOP_IO_RANGE_MEMORY : HVMOP_IO_RANGE_PORT;
-    arg->start = start;
-    arg->end = end;
-
-    rc = xencall2(xch->xcall, __HYPERVISOR_hvm_op,
-                  HVMOP_map_io_range_to_ioreq_server,
-                  HYPERCALL_BUFFER_AS_ARG(arg));
+    DECLARE_HVMCTL(map_io_range_to_ioreq_server, domid,
+                   .id = id,
+                   .type = is_mmio ? XEN_HVMCTL_IO_RANGE_MEMORY
+                                   : XEN_HVMCTL_IO_RANGE_PORT,
+                   .start = start,
+                   .end = end);
 
-    xc_hypercall_buffer_free(xch, arg);
-    return rc;
+    return do_hvmctl(xch, &hvmctl);
 }
 
 int xc_hvm_unmap_io_range_from_ioreq_server(xc_interface *xch, domid_t domid,
                                             ioservid_t id, int is_mmio,
                                             uint64_t start, uint64_t end)
 {
-    DECLARE_HYPERCALL_BUFFER(xen_hvm_io_range_t, arg);
-    int rc;
-
-    arg = xc_hypercall_buffer_alloc(xch, arg, sizeof(*arg));
-    if ( arg == NULL )
-        return -1;
+    DECLARE_HVMCTL(unmap_io_range_from_ioreq_server, domid,
+                   .id = id,
+                   .type = is_mmio ? XEN_HVMCTL_IO_RANGE_MEMORY
+                                   : XEN_HVMCTL_IO_RANGE_PORT,
+                   .start = start,
+                   .end = end);
 
-    arg->domid = domid;
-    arg->id = id;
-    arg->type = is_mmio ? HVMOP_IO_RANGE_MEMORY : HVMOP_IO_RANGE_PORT;
-    arg->start = start;
-    arg->end = end;
-
-    rc = xencall2(xch->xcall, __HYPERVISOR_hvm_op,
-                  HVMOP_unmap_io_range_from_ioreq_server,
-                  HYPERCALL_BUFFER_AS_ARG(arg));
-
-    xc_hypercall_buffer_free(xch, arg);
-    return rc;
+    return do_hvmctl(xch, &hvmctl);
 }
 
 int xc_hvm_map_pcidev_to_ioreq_server(xc_interface *xch, domid_t domid,
@@ -1528,37 +1487,23 @@ int xc_hvm_map_pcidev_to_ioreq_server(xc
                                       uint8_t bus, uint8_t device,
                                       uint8_t function)
 {
-    DECLARE_HYPERCALL_BUFFER(xen_hvm_io_range_t, arg);
-    int rc;
+    /*
+     * The underlying hypercall will deal with ranges of PCI SBDF
+     * but, for simplicity, the API only uses singletons.
+     */
+    uint32_t sbdf = XEN_HVMCTL_PCI_SBDF(segment, bus, device, function);
+    DECLARE_HVMCTL(map_io_range_to_ioreq_server, domid,
+                   .id = id,
+                   .type = XEN_HVMCTL_IO_RANGE_PCI,
+                   .start = sbdf,
+                   .end = sbdf);
 
     if (device > 0x1f || function > 0x7) {
         errno = EINVAL;
         return -1;
     }
 
-    arg = xc_hypercall_buffer_alloc(xch, arg, sizeof(*arg));
-    if ( arg == NULL )
-        return -1;
-
-    arg->domid = domid;
-    arg->id = id;
-    arg->type = HVMOP_IO_RANGE_PCI;
-
-    /*
-     * The underlying hypercall will deal with ranges of PCI SBDF
-     * but, for simplicity, the API only uses singletons.
-     */
-    arg->start = arg->end = HVMOP_PCI_SBDF((uint64_t)segment,
-                                           (uint64_t)bus,
-                                           (uint64_t)device,
-                                           (uint64_t)function);
-
-    rc = xencall2(xch->xcall, __HYPERVISOR_hvm_op,
-                  HVMOP_map_io_range_to_ioreq_server,
-                  HYPERCALL_BUFFER_AS_ARG(arg));
-
-    xc_hypercall_buffer_free(xch, arg);
-    return rc;
+    return do_hvmctl(xch, &hvmctl);
 }
 
 int xc_hvm_unmap_pcidev_from_ioreq_server(xc_interface *xch, domid_t domid,
@@ -1566,54 +1511,29 @@ int xc_hvm_unmap_pcidev_from_ioreq_serve
                                           uint8_t bus, uint8_t device,
                                           uint8_t function)
 {
-    DECLARE_HYPERCALL_BUFFER(xen_hvm_io_range_t, arg);
-    int rc;
+    uint32_t sbdf = XEN_HVMCTL_PCI_SBDF(segment, bus, device, function);
+    DECLARE_HVMCTL(unmap_io_range_from_ioreq_server, domid,
+                   .id = id,
+                   .type = XEN_HVMCTL_IO_RANGE_PCI,
+                   .start = sbdf,
+                   .end = sbdf);
 
     if (device > 0x1f || function > 0x7) {
         errno = EINVAL;
         return -1;
     }
 
-    arg = xc_hypercall_buffer_alloc(xch, arg, sizeof(*arg));
-    if ( arg == NULL )
-        return -1;
-
-    arg->domid = domid;
-    arg->id = id;
-    arg->type = HVMOP_IO_RANGE_PCI;
-    arg->start = arg->end = HVMOP_PCI_SBDF((uint64_t)segment,
-                                           (uint64_t)bus,
-                                           (uint64_t)device,
-                                           (uint64_t)function);
-
-    rc = xencall2(xch->xcall, __HYPERVISOR_hvm_op,
-                  HVMOP_unmap_io_range_from_ioreq_server,
-                  HYPERCALL_BUFFER_AS_ARG(arg));
-
-    xc_hypercall_buffer_free(xch, arg);
-    return rc;
+    return do_hvmctl(xch, &hvmctl);
 }
 
 int xc_hvm_destroy_ioreq_server(xc_interface *xch,
                                 domid_t domid,
                                 ioservid_t id)
 {
-    DECLARE_HYPERCALL_BUFFER(xen_hvm_destroy_ioreq_server_t, arg);
-    int rc;
-
-    arg = xc_hypercall_buffer_alloc(xch, arg, sizeof(*arg));
-    if ( arg == NULL )
-        return -1;
+    DECLARE_HVMCTL(destroy_ioreq_server, domid,
+                   .id = id);
 
-    arg->domid = domid;
-    arg->id = id;
-
-    rc = xencall2(xch->xcall, __HYPERVISOR_hvm_op,
-                  HVMOP_destroy_ioreq_server,
-                  HYPERCALL_BUFFER_AS_ARG(arg));
-
-    xc_hypercall_buffer_free(xch, arg);
-    return rc;
+    return do_hvmctl(xch, &hvmctl);
 }
 
 int xc_hvm_set_ioreq_server_state(xc_interface *xch,
@@ -1621,23 +1541,11 @@ int xc_hvm_set_ioreq_server_state(xc_int
                                   ioservid_t id,
                                   int enabled)
 {
-    DECLARE_HYPERCALL_BUFFER(xen_hvm_set_ioreq_server_state_t, arg);
-    int rc;
-
-    arg = xc_hypercall_buffer_alloc(xch, arg, sizeof(*arg));
-    if ( arg == NULL )
-        return -1;
+    DECLARE_HVMCTL(set_ioreq_server_state, domid,
+                   .id = id,
+                   .enabled = !!enabled);
 
-    arg->domid = domid;
-    arg->id = id;
-    arg->enabled = !!enabled;
-
-    rc = xencall2(xch->xcall, __HYPERVISOR_hvm_op,
-                  HVMOP_set_ioreq_server_state,
-                  HYPERCALL_BUFFER_AS_ARG(arg));
-
-    xc_hypercall_buffer_free(xch, arg);
-    return rc;
+    return do_hvmctl(xch, &hvmctl);
 }
 
 int xc_domain_setdebugging(xc_interface *xch,
--- a/tools/libxc/xc_private.h
+++ b/tools/libxc/xc_private.h
@@ -34,8 +34,6 @@
 #define XC_INTERNAL_COMPAT_MAP_FOREIGN_API
 #include "xenctrl.h"
 
-#include <xen/hvm/control.h>
-
 #include <xencall.h>
 #include <xenforeignmemory.h>
 
--- a/xen/arch/x86/hvm/control.c
+++ b/xen/arch/x86/hvm/control.c
@@ -20,6 +20,7 @@
 #include <xen/sched.h>
 #include <asm/hap.h>
 #include <asm/shadow.h>
+#include <asm/hvm/ioreq.h>
 #include <xsm/xsm.h>
 
 static int set_pci_intx_level(struct domain *d,
@@ -299,6 +300,50 @@ long do_hvmctl(XEN_GUEST_HANDLE_PARAM(xe
         rc = hvm_inject_msi(d, op.u.inject_msi.addr, op.u.inject_msi.data);
         break;
 
+    case XEN_HVMCTL_create_ioreq_server:
+        rc = -EINVAL;
+        if ( op.u.create_ioreq_server.rsvd )
+            break;
+        rc = hvm_create_ioreq_server(d, current->domain->domain_id, 0,
+                                     op.u.create_ioreq_server.handle_bufioreq,
+                                     &op.u.create_ioreq_server.id);
+        if ( rc == 0 && copy_field_to_guest(u_hvmctl, &op,
+                                            u.create_ioreq_server.id) )
+            rc = -EFAULT;
+        break;
+
+    case XEN_HVMCTL_get_ioreq_server_info:
+        rc = -EINVAL;
+        if ( op.u.get_ioreq_server_info.rsvd )
+            break;
+        rc = hvm_get_ioreq_server_info(d, &op.u.get_ioreq_server_info);
+        if ( rc == 0 && copy_field_to_guest(u_hvmctl, &op,
+                                            u.get_ioreq_server_info) )
+            rc = -EFAULT;
+        break;
+
+    case XEN_HVMCTL_map_io_range_to_ioreq_server:
+        rc = hvm_map_io_range_to_ioreq_server(
+                 d, &op.u.map_io_range_to_ioreq_server);
+        break;
+
+    case XEN_HVMCTL_unmap_io_range_from_ioreq_server:
+        rc = hvm_unmap_io_range_from_ioreq_server(
+                 d, &op.u.unmap_io_range_from_ioreq_server);
+        break;
+
+    case XEN_HVMCTL_destroy_ioreq_server:
+        rc = hvm_destroy_ioreq_server(d, op.u.destroy_ioreq_server.id);
+        break;
+
+    case XEN_HVMCTL_set_ioreq_server_state:
+        rc = -EINVAL;
+        if ( op.u.set_ioreq_server_state.rsvd )
+            break;
+        rc = hvm_set_ioreq_server_state(d, op.u.set_ioreq_server_state.id,
+                                        !!op.u.set_ioreq_server_state.enabled);
+        break;
+
     default:
         rc = -EOPNOTSUPP;
         break;
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -4487,195 +4487,6 @@ static int hvmop_flush_tlb_all(void)
     return 0;
 }
 
-static int hvmop_create_ioreq_server(
-    XEN_GUEST_HANDLE_PARAM(xen_hvm_create_ioreq_server_t) uop)
-{
-    struct domain *curr_d = current->domain;
-    xen_hvm_create_ioreq_server_t op;
-    struct domain *d;
-    int rc;
-
-    if ( copy_from_guest(&op, uop, 1) )
-        return -EFAULT;
-
-    rc = rcu_lock_remote_domain_by_id(op.domid, &d);
-    if ( rc != 0 )
-        return rc;
-
-    rc = -EINVAL;
-    if ( !is_hvm_domain(d) )
-        goto out;
-
-    rc = xsm_hvm_ioreq_server(XSM_DM_PRIV, d, HVMOP_create_ioreq_server);
-    if ( rc != 0 )
-        goto out;
-
-    rc = hvm_create_ioreq_server(d, curr_d->domain_id, 0,
-                                 op.handle_bufioreq, &op.id);
-    if ( rc != 0 )
-        goto out;
-
-    rc = copy_to_guest(uop, &op, 1) ? -EFAULT : 0;
-    
- out:
-    rcu_unlock_domain(d);
-    return rc;
-}
-
-static int hvmop_get_ioreq_server_info(
-    XEN_GUEST_HANDLE_PARAM(xen_hvm_get_ioreq_server_info_t) uop)
-{
-    xen_hvm_get_ioreq_server_info_t op;
-    struct domain *d;
-    int rc;
-
-    if ( copy_from_guest(&op, uop, 1) )
-        return -EFAULT;
-
-    rc = rcu_lock_remote_domain_by_id(op.domid, &d);
-    if ( rc != 0 )
-        return rc;
-
-    rc = -EINVAL;
-    if ( !is_hvm_domain(d) )
-        goto out;
-
-    rc = xsm_hvm_ioreq_server(XSM_DM_PRIV, d, HVMOP_get_ioreq_server_info);
-    if ( rc != 0 )
-        goto out;
-
-    rc = hvm_get_ioreq_server_info(d, op.id,
-                                   &op.ioreq_pfn,
-                                   &op.bufioreq_pfn, 
-                                   &op.bufioreq_port);
-    if ( rc != 0 )
-        goto out;
-
-    rc = copy_to_guest(uop, &op, 1) ? -EFAULT : 0;
-    
- out:
-    rcu_unlock_domain(d);
-    return rc;
-}
-
-static int hvmop_map_io_range_to_ioreq_server(
-    XEN_GUEST_HANDLE_PARAM(xen_hvm_io_range_t) uop)
-{
-    xen_hvm_io_range_t op;
-    struct domain *d;
-    int rc;
-
-    if ( copy_from_guest(&op, uop, 1) )
-        return -EFAULT;
-
-    rc = rcu_lock_remote_domain_by_id(op.domid, &d);
-    if ( rc != 0 )
-        return rc;
-
-    rc = -EINVAL;
-    if ( !is_hvm_domain(d) )
-        goto out;
-
-    rc = xsm_hvm_ioreq_server(XSM_DM_PRIV, d, HVMOP_map_io_range_to_ioreq_server);
-    if ( rc != 0 )
-        goto out;
-
-    rc = hvm_map_io_range_to_ioreq_server(d, op.id, op.type,
-                                          op.start, op.end);
-
- out:
-    rcu_unlock_domain(d);
-    return rc;
-}
-
-static int hvmop_unmap_io_range_from_ioreq_server(
-    XEN_GUEST_HANDLE_PARAM(xen_hvm_io_range_t) uop)
-{
-    xen_hvm_io_range_t op;
-    struct domain *d;
-    int rc;
-
-    if ( copy_from_guest(&op, uop, 1) )
-        return -EFAULT;
-
-    rc = rcu_lock_remote_domain_by_id(op.domid, &d);
-    if ( rc != 0 )
-        return rc;
-
-    rc = -EINVAL;
-    if ( !is_hvm_domain(d) )
-        goto out;
-
-    rc = xsm_hvm_ioreq_server(XSM_DM_PRIV, d, HVMOP_unmap_io_range_from_ioreq_server);
-    if ( rc != 0 )
-        goto out;
-
-    rc = hvm_unmap_io_range_from_ioreq_server(d, op.id, op.type,
-                                              op.start, op.end);
-    
- out:
-    rcu_unlock_domain(d);
-    return rc;
-}
-
-static int hvmop_set_ioreq_server_state(
-    XEN_GUEST_HANDLE_PARAM(xen_hvm_set_ioreq_server_state_t) uop)
-{
-    xen_hvm_set_ioreq_server_state_t op;
-    struct domain *d;
-    int rc;
-
-    if ( copy_from_guest(&op, uop, 1) )
-        return -EFAULT;
-
-    rc = rcu_lock_remote_domain_by_id(op.domid, &d);
-    if ( rc != 0 )
-        return rc;
-
-    rc = -EINVAL;
-    if ( !is_hvm_domain(d) )
-        goto out;
-
-    rc = xsm_hvm_ioreq_server(XSM_DM_PRIV, d, HVMOP_set_ioreq_server_state);
-    if ( rc != 0 )
-        goto out;
-
-    rc = hvm_set_ioreq_server_state(d, op.id, !!op.enabled);
-
- out:
-    rcu_unlock_domain(d);
-    return rc;
-}
-
-static int hvmop_destroy_ioreq_server(
-    XEN_GUEST_HANDLE_PARAM(xen_hvm_destroy_ioreq_server_t) uop)
-{
-    xen_hvm_destroy_ioreq_server_t op;
-    struct domain *d;
-    int rc;
-
-    if ( copy_from_guest(&op, uop, 1) )
-        return -EFAULT;
-
-    rc = rcu_lock_remote_domain_by_id(op.domid, &d);
-    if ( rc != 0 )
-        return rc;
-
-    rc = -EINVAL;
-    if ( !is_hvm_domain(d) )
-        goto out;
-
-    rc = xsm_hvm_ioreq_server(XSM_DM_PRIV, d, HVMOP_destroy_ioreq_server);
-    if ( rc != 0 )
-        goto out;
-
-    rc = hvm_destroy_ioreq_server(d, op.id);
-
- out:
-    rcu_unlock_domain(d);
-    return rc;
-}
-
 static int hvmop_set_evtchn_upcall_vector(
     XEN_GUEST_HANDLE_PARAM(xen_hvm_evtchn_upcall_vector_t) uop)
 {
@@ -5192,36 +5003,6 @@ long do_hvm_op(unsigned long op, XEN_GUE
 
     switch ( op )
     {
-    case HVMOP_create_ioreq_server:
-        rc = hvmop_create_ioreq_server(
-            guest_handle_cast(arg, xen_hvm_create_ioreq_server_t));
-        break;
-    
-    case HVMOP_get_ioreq_server_info:
-        rc = hvmop_get_ioreq_server_info(
-            guest_handle_cast(arg, xen_hvm_get_ioreq_server_info_t));
-        break;
-    
-    case HVMOP_map_io_range_to_ioreq_server:
-        rc = hvmop_map_io_range_to_ioreq_server(
-            guest_handle_cast(arg, xen_hvm_io_range_t));
-        break;
-    
-    case HVMOP_unmap_io_range_from_ioreq_server:
-        rc = hvmop_unmap_io_range_from_ioreq_server(
-            guest_handle_cast(arg, xen_hvm_io_range_t));
-        break;
-
-    case HVMOP_set_ioreq_server_state:
-        rc = hvmop_set_ioreq_server_state(
-            guest_handle_cast(arg, xen_hvm_set_ioreq_server_state_t));
-        break;
-    
-    case HVMOP_destroy_ioreq_server:
-        rc = hvmop_destroy_ioreq_server(
-            guest_handle_cast(arg, xen_hvm_destroy_ioreq_server_t));
-        break;
-    
     case HVMOP_set_evtchn_upcall_vector:
         rc = hvmop_set_evtchn_upcall_vector(
             guest_handle_cast(arg, xen_hvm_evtchn_upcall_vector_t));
--- a/xen/arch/x86/hvm/ioreq.c
+++ b/xen/arch/x86/hvm/ioreq.c
@@ -513,9 +513,9 @@ static int hvm_ioreq_server_alloc_ranges
         char *name;
 
         rc = asprintf(&name, "ioreq_server %d %s", s->id,
-                      (i == HVMOP_IO_RANGE_PORT) ? "port" :
-                      (i == HVMOP_IO_RANGE_MEMORY) ? "memory" :
-                      (i == HVMOP_IO_RANGE_PCI) ? "pci" :
+                      (i == XEN_HVMCTL_IO_RANGE_PORT) ? "port" :
+                      (i == XEN_HVMCTL_IO_RANGE_MEMORY) ? "memory" :
+                      (i == XEN_HVMCTL_IO_RANGE_PCI) ? "pci" :
                       "");
         if ( rc )
             goto fail;
@@ -686,7 +686,8 @@ int hvm_create_ioreq_server(struct domai
     struct hvm_ioreq_server *s;
     int rc;
 
-    if ( bufioreq_handling > HVM_IOREQSRV_BUFIOREQ_ATOMIC )
+    if ( !is_hvm_domain(d) ||
+         bufioreq_handling > HVM_IOREQSRV_BUFIOREQ_ATOMIC )
         return -EINVAL;
 
     rc = -ENOMEM;
@@ -738,6 +739,9 @@ int hvm_destroy_ioreq_server(struct doma
     struct hvm_ioreq_server *s;
     int rc;
 
+    if ( !is_hvm_domain(d) )
+        return -EINVAL;
+
     spin_lock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
 
     rc = -ENOENT;
@@ -772,14 +776,15 @@ int hvm_destroy_ioreq_server(struct doma
     return rc;
 }
 
-int hvm_get_ioreq_server_info(struct domain *d, ioservid_t id,
-                              unsigned long *ioreq_pfn,
-                              unsigned long *bufioreq_pfn,
-                              evtchn_port_t *bufioreq_port)
+int hvm_get_ioreq_server_info(struct domain *d,
+                              struct xen_hvm_get_ioreq_server_info *info)
 {
     struct hvm_ioreq_server *s;
     int rc;
 
+    if ( !is_hvm_domain(d) )
+        return -EINVAL;
+
     spin_lock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
 
     rc = -ENOENT;
@@ -790,15 +795,15 @@ int hvm_get_ioreq_server_info(struct dom
         if ( s == d->arch.hvm_domain.default_ioreq_server )
             continue;
 
-        if ( s->id != id )
+        if ( s->id != info->id )
             continue;
 
-        *ioreq_pfn = s->ioreq.gmfn;
+        info->ioreq_pfn = s->ioreq.gmfn;
 
         if ( s->bufioreq.va != NULL )
         {
-            *bufioreq_pfn = s->bufioreq.gmfn;
-            *bufioreq_port = s->bufioreq_evtchn;
+            info->bufioreq_pfn = s->bufioreq.gmfn;
+            info->bufioreq_port = s->bufioreq_evtchn;
         }
 
         rc = 0;
@@ -810,13 +815,15 @@ int hvm_get_ioreq_server_info(struct dom
     return rc;
 }
 
-int hvm_map_io_range_to_ioreq_server(struct domain *d, ioservid_t id,
-                                     uint32_t type, uint64_t start,
-                                     uint64_t end)
+int hvm_map_io_range_to_ioreq_server(struct domain *d,
+                                     const struct xen_hvm_io_range *ior)
 {
     struct hvm_ioreq_server *s;
     int rc;
 
+    if ( ior->rsvd || !is_hvm_domain(d) )
+        return -EINVAL;
+
     spin_lock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
 
     rc = -ENOENT;
@@ -827,16 +834,16 @@ int hvm_map_io_range_to_ioreq_server(str
         if ( s == d->arch.hvm_domain.default_ioreq_server )
             continue;
 
-        if ( s->id == id )
+        if ( s->id == ior->id )
         {
             struct rangeset *r;
 
-            switch ( type )
+            switch ( ior->type )
             {
-            case HVMOP_IO_RANGE_PORT:
-            case HVMOP_IO_RANGE_MEMORY:
-            case HVMOP_IO_RANGE_PCI:
-                r = s->range[type];
+            case XEN_HVMCTL_IO_RANGE_PORT:
+            case XEN_HVMCTL_IO_RANGE_MEMORY:
+            case XEN_HVMCTL_IO_RANGE_PCI:
+                r = s->range[ior->type];
                 break;
 
             default:
@@ -849,10 +856,10 @@ int hvm_map_io_range_to_ioreq_server(str
                 break;
 
             rc = -EEXIST;
-            if ( rangeset_overlaps_range(r, start, end) )
+            if ( rangeset_overlaps_range(r, ior->start, ior->end) )
                 break;
 
-            rc = rangeset_add_range(r, start, end);
+            rc = rangeset_add_range(r, ior->start, ior->end);
             break;
         }
     }
@@ -862,13 +869,15 @@ int hvm_map_io_range_to_ioreq_server(str
     return rc;
 }
 
-int hvm_unmap_io_range_from_ioreq_server(struct domain *d, ioservid_t id,
-                                         uint32_t type, uint64_t start,
-                                         uint64_t end)
+int hvm_unmap_io_range_from_ioreq_server(struct domain *d,
+                                         const struct xen_hvm_io_range *ior)
 {
     struct hvm_ioreq_server *s;
     int rc;
 
+    if ( ior->rsvd || !is_hvm_domain(d) )
+        return -EINVAL;
+
     spin_lock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
 
     rc = -ENOENT;
@@ -879,16 +888,16 @@ int hvm_unmap_io_range_from_ioreq_server
         if ( s == d->arch.hvm_domain.default_ioreq_server )
             continue;
 
-        if ( s->id == id )
+        if ( s->id == ior->id )
         {
             struct rangeset *r;
 
-            switch ( type )
+            switch ( ior->type )
             {
-            case HVMOP_IO_RANGE_PORT:
-            case HVMOP_IO_RANGE_MEMORY:
-            case HVMOP_IO_RANGE_PCI:
-                r = s->range[type];
+            case XEN_HVMCTL_IO_RANGE_PORT:
+            case XEN_HVMCTL_IO_RANGE_MEMORY:
+            case XEN_HVMCTL_IO_RANGE_PCI:
+                r = s->range[ior->type];
                 break;
 
             default:
@@ -901,10 +910,10 @@ int hvm_unmap_io_range_from_ioreq_server
                 break;
 
             rc = -ENOENT;
-            if ( !rangeset_contains_range(r, start, end) )
+            if ( !rangeset_contains_range(r, ior->start, ior->end) )
                 break;
 
-            rc = rangeset_remove_range(r, start, end);
+            rc = rangeset_remove_range(r, ior->start, ior->end);
             break;
         }
     }
@@ -920,6 +929,9 @@ int hvm_set_ioreq_server_state(struct do
     struct list_head *entry;
     int rc;
 
+    if ( !is_hvm_domain(d) )
+        return -EINVAL;
+
     spin_lock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
 
     rc = -ENOENT;
@@ -1128,12 +1140,12 @@ struct hvm_ioreq_server *hvm_select_iore
 
         /* PCI config data cycle */
 
-        sbdf = HVMOP_PCI_SBDF(0,
-                              PCI_BUS(CF8_BDF(cf8)),
-                              PCI_SLOT(CF8_BDF(cf8)),
-                              PCI_FUNC(CF8_BDF(cf8)));
+        sbdf = XEN_HVMCTL_PCI_SBDF(0,
+                                   PCI_BUS(CF8_BDF(cf8)),
+                                   PCI_SLOT(CF8_BDF(cf8)),
+                                   PCI_FUNC(CF8_BDF(cf8)));
 
-        type = HVMOP_IO_RANGE_PCI;
+        type = XEN_HVMCTL_IO_RANGE_PCI;
         addr = ((uint64_t)sbdf << 32) |
                CF8_ADDR_LO(cf8) |
                (p->addr & 3);
@@ -1152,7 +1164,7 @@ struct hvm_ioreq_server *hvm_select_iore
     else
     {
         type = (p->type == IOREQ_TYPE_PIO) ?
-                HVMOP_IO_RANGE_PORT : HVMOP_IO_RANGE_MEMORY;
+                XEN_HVMCTL_IO_RANGE_PORT : XEN_HVMCTL_IO_RANGE_MEMORY;
         addr = p->addr;
     }
 
@@ -1174,19 +1186,19 @@ struct hvm_ioreq_server *hvm_select_iore
         {
             unsigned long end;
 
-        case HVMOP_IO_RANGE_PORT:
+        case XEN_HVMCTL_IO_RANGE_PORT:
             end = addr + p->size - 1;
             if ( rangeset_contains_range(r, addr, end) )
                 return s;
 
             break;
-        case HVMOP_IO_RANGE_MEMORY:
+        case XEN_HVMCTL_IO_RANGE_MEMORY:
             end = addr + (p->size * p->count) - 1;
             if ( rangeset_contains_range(r, addr, end) )
                 return s;
 
             break;
-        case HVMOP_IO_RANGE_PCI:
+        case XEN_HVMCTL_IO_RANGE_PCI:
             if ( rangeset_contains_singleton(r, addr >> 32) )
             {
                 p->type = IOREQ_TYPE_PCI_CONFIG;
--- a/xen/include/asm-x86/hvm/domain.h
+++ b/xen/include/asm-x86/hvm/domain.h
@@ -30,6 +30,7 @@
 #include <asm/hvm/vmx/vmcs.h>
 #include <asm/hvm/svm/vmcb.h>
 #include <public/grant_table.h>
+#include <public/hvm/control.h>
 #include <public/hvm/params.h>
 #include <public/hvm/save.h>
 #include <public/hvm/hvm_op.h>
@@ -47,7 +48,7 @@ struct hvm_ioreq_vcpu {
     bool_t           pending;
 };
 
-#define NR_IO_RANGE_TYPES (HVMOP_IO_RANGE_PCI + 1)
+#define NR_IO_RANGE_TYPES (XEN_HVMCTL_IO_RANGE_PCI + 1)
 #define MAX_NR_IO_RANGES  256
 
 struct hvm_ioreq_server {
--- a/xen/include/asm-x86/hvm/ioreq.h
+++ b/xen/include/asm-x86/hvm/ioreq.h
@@ -19,6 +19,8 @@
 #ifndef __ASM_X86_HVM_IOREQ_H__
 #define __ASM_X86_HVM_IOREQ_H__
 
+#include <public/hvm/control.h>
+
 bool_t hvm_io_pending(struct vcpu *v);
 bool_t handle_hvm_io_completion(struct vcpu *v);
 bool_t is_ioreq_server_page(struct domain *d, const struct page_info *page);
@@ -27,16 +29,12 @@ int hvm_create_ioreq_server(struct domai
                             bool_t is_default, int bufioreq_handling,
                             ioservid_t *id);
 int hvm_destroy_ioreq_server(struct domain *d, ioservid_t id);
-int hvm_get_ioreq_server_info(struct domain *d, ioservid_t id,
-                              unsigned long *ioreq_pfn,
-                              unsigned long *bufioreq_pfn,
-                              evtchn_port_t *bufioreq_port);
-int hvm_map_io_range_to_ioreq_server(struct domain *d, ioservid_t id,
-                                     uint32_t type, uint64_t start,
-                                     uint64_t end);
-int hvm_unmap_io_range_from_ioreq_server(struct domain *d, ioservid_t id,
-                                         uint32_t type, uint64_t start,
-                                         uint64_t end);
+int hvm_get_ioreq_server_info(struct domain *d,
+                              struct xen_hvm_get_ioreq_server_info *info);
+int hvm_map_io_range_to_ioreq_server(struct domain *d,
+                                     const struct xen_hvm_io_range *r);
+int hvm_unmap_io_range_from_ioreq_server(struct domain *d,
+                                         const struct xen_hvm_io_range *r);
 int hvm_set_ioreq_server_state(struct domain *d, ioservid_t id,
                                bool_t enabled);
 
--- a/xen/include/public/hvm/control.h
+++ b/xen/include/public/hvm/control.h
@@ -26,6 +26,7 @@
 #endif
 
 #include "../xen.h"
+#include "../event_channel.h"
 
 #define XEN_HVMCTL_INTERFACE_VERSION 0x00000001
 
@@ -130,6 +131,131 @@ struct xen_hvm_inject_msi {
     uint64_t  addr;
 };
 
+/*
+ * IOREQ Servers
+ *
+ * The interface between an I/O emulator an Xen is called an IOREQ Server.
+ * A domain supports a single 'legacy' IOREQ Server which is instantiated if
+ * parameter...
+ *
+ * HVM_PARAM_IOREQ_PFN is read (to get the gmfn containing the synchronous
+ * ioreq structures), or...
+ * HVM_PARAM_BUFIOREQ_PFN is read (to get the gmfn containing the buffered
+ * ioreq ring), or...
+ * HVM_PARAM_BUFIOREQ_EVTCHN is read (to get the event channel that Xen uses
+ * to request buffered I/O emulation).
+ *
+ * The following hypercalls facilitate the creation of IOREQ Servers for
+ * 'secondary' emulators which are invoked to implement port I/O, memory, or
+ * PCI config space ranges which they explicitly register.
+ */
+
+typedef uint16_t ioservid_t;
+
+/*
+ * XEN_HVMCTL_create_ioreq_server: Instantiate a new IOREQ Server for a
+ *                                 secondary emulator servicing domain
+ *                                 <domid>.
+ *
+ * The <id> handed back is unique for <domid>. If <handle_bufioreq> is zero
+ * the buffered ioreq ring will not be allocated and hence all emulation
+ * requestes to this server will be synchronous.
+ */
+struct xen_hvm_create_ioreq_server {
+#define HVM_IOREQSRV_BUFIOREQ_OFF    0
+#define HVM_IOREQSRV_BUFIOREQ_LEGACY 1
+/*
+ * Use this when read_pointer gets updated atomically and
+ * the pointer pair gets read atomically:
+ */
+#define HVM_IOREQSRV_BUFIOREQ_ATOMIC 2
+    uint8_t handle_bufioreq; /* IN - should server handle buffered ioreqs */
+    uint8_t rsvd;            /* IN - must be zero */
+    ioservid_t id;           /* OUT - server id */
+};
+
+/*
+ * XEN_HVMCTL_get_ioreq_server_info: Get all the information necessary to
+ *                                   access IOREQ Server <id>.
+ *
+ * The emulator needs to map the synchronous ioreq structures and buffered
+ * ioreq ring (if it exists) that Xen uses to request emulation. These are
+ * hosted in domain <domid>'s gmfns <ioreq_pfn> and <bufioreq_pfn>
+ * respectively. In addition, if the IOREQ Server is handling buffered
+ * emulation requests, the emulator needs to bind to event channel
+ * <bufioreq_port> to listen for them. (The event channels used for
+ * synchronous emulation requests are specified in the per-CPU ioreq
+ * structures in <ioreq_pfn>).
+ * If the IOREQ Server is not handling buffered emulation requests then the
+ * values handed back in <bufioreq_pfn> and <bufioreq_port> will both be 0.
+ */
+struct xen_hvm_get_ioreq_server_info {
+    ioservid_t id;                 /* IN - server id */
+    uint16_t rsvd;                 /* IN - must be zero */
+    evtchn_port_t bufioreq_port;   /* OUT - buffered ioreq port */
+    uint64_aligned_t ioreq_pfn;    /* OUT - sync ioreq pfn */
+    uint64_aligned_t bufioreq_pfn; /* OUT - buffered ioreq pfn */
+};
+
+/*
+ * XEN_HVMCTL_map_io_range_to_ioreq_server: Register an I/O range of domain
+ *                                          <domid> for emulation by the
+ *                                          client of IOREQ Server <id>
+ * XEN_HVMCTL_unmap_io_range_from_ioreq_server: Deregister an I/O range of
+ *                                              <domid> for emulation by the
+ *                                              client of IOREQ Server <id>
+ *
+ * There are three types of I/O that can be emulated: port I/O, memory accesses
+ * and PCI config space accesses. The <type> field denotes which type of range
+ * the <start> and <end> (inclusive) fields are specifying.
+ * PCI config space ranges are specified by segment/bus/device/function values
+ * which should be encoded using the XEN_HVMCTL_PCI_SBDF helper macro below.
+ *
+ * NOTE: unless an emulation request falls entirely within a range mapped
+ * by a secondary emulator, it will not be passed to that emulator.
+ */
+struct xen_hvm_io_range {
+    ioservid_t id;               /* IN - server id */
+    uint16_t type;               /* IN - type of range */
+    uint32_t rsvd;               /* IN - must be zero */
+#define XEN_HVMCTL_IO_RANGE_PORT   0 /* I/O port range */
+#define XEN_HVMCTL_IO_RANGE_MEMORY 1 /* MMIO range */
+#define XEN_HVMCTL_IO_RANGE_PCI    2 /* PCI segment/bus/dev/func range */
+    uint64_aligned_t start, end; /* IN - inclusive start and end of range */
+};
+
+#define XEN_HVMCTL_PCI_SBDF(s, b, d, f) \
+	((((s) & 0xffff) << 16) | \
+	 (((b) & 0xff) << 8) | \
+	 (((d) & 0x1f) << 3) | \
+	 ((f) & 0x07))
+
+/*
+ * XEN_HVMCTL_destroy_ioreq_server: Destroy the IOREQ Server <id> servicing
+ *                                  domain <domid>.
+ *
+ * Any registered I/O ranges will be automatically deregistered.
+ */
+struct xen_hvm_destroy_ioreq_server {
+    ioservid_t id; /* IN - server id */
+};
+
+/*
+ * XEN_HVMCTL_set_ioreq_server_state: Enable or disable the IOREQ Server <id>
+ *                                    servicing domain <domid>.
+ *
+ * The IOREQ Server will not be passed any emulation requests until it is in
+ * the enabled state.
+ * Note that the contents of the ioreq_pfn and bufioreq_fn (see
+ * XEN_HVMCTL_get_ioreq_server_info) are not meaningful until the IOREQ Server
+ * is in the enabled state.
+ */
+struct xen_hvm_set_ioreq_server_state {
+    ioservid_t id;   /* IN - server id */
+    uint8_t enabled; /* IN - enabled? */
+    uint8_t rsvd;    /* IN - must be zero */
+};
+
 struct xen_hvmctl {
     uint16_t interface_version;    /* XEN_HVMCTL_INTERFACE_VERSION */
     domid_t domain;
@@ -142,6 +268,12 @@ struct xen_hvmctl {
 #define XEN_HVMCTL_set_mem_type                  6
 #define XEN_HVMCTL_inject_trap                   7
 #define XEN_HVMCTL_inject_msi                    8
+#define XEN_HVMCTL_create_ioreq_server           9
+#define XEN_HVMCTL_get_ioreq_server_info        10
+#define XEN_HVMCTL_map_io_range_to_ioreq_server 11
+#define XEN_HVMCTL_unmap_io_range_from_ioreq_server 12
+#define XEN_HVMCTL_destroy_ioreq_server         13
+#define XEN_HVMCTL_set_ioreq_server_state       14
     uint16_t opaque;               /* Must be zero on initial invocation. */
     union {
         struct xen_hvm_set_pci_intx_level set_pci_intx_level;
@@ -152,6 +284,12 @@ struct xen_hvmctl {
         struct xen_hvm_set_mem_type set_mem_type;
         struct xen_hvm_inject_trap inject_trap;
         struct xen_hvm_inject_msi inject_msi;
+        struct xen_hvm_create_ioreq_server create_ioreq_server;
+        struct xen_hvm_get_ioreq_server_info get_ioreq_server_info;
+        struct xen_hvm_io_range map_io_range_to_ioreq_server;
+        struct xen_hvm_io_range unmap_io_range_from_ioreq_server;
+        struct xen_hvm_destroy_ioreq_server destroy_ioreq_server;
+        struct xen_hvm_set_ioreq_server_state set_ioreq_server_state;
         uint8_t pad[120];
     } u;
 };
--- a/xen/include/public/hvm/hvm_op.h
+++ b/xen/include/public/hvm/hvm_op.h
@@ -25,7 +25,6 @@
 
 #include "../xen.h"
 #include "../trace.h"
-#include "../event_channel.h"
 
 /* Get/set subcommands: extra argument == pointer to xen_hvm_param struct. */
 #define HVMOP_set_param           0
@@ -137,152 +136,6 @@ struct xen_hvm_get_mem_type {
 typedef struct xen_hvm_get_mem_type xen_hvm_get_mem_type_t;
 DEFINE_XEN_GUEST_HANDLE(xen_hvm_get_mem_type_t);
 
-/* Following tools-only interfaces may change in future. */
-#if defined(__XEN__) || defined(__XEN_TOOLS__)
-
-/*
- * IOREQ Servers
- *
- * The interface between an I/O emulator an Xen is called an IOREQ Server.
- * A domain supports a single 'legacy' IOREQ Server which is instantiated if
- * parameter...
- *
- * HVM_PARAM_IOREQ_PFN is read (to get the gmfn containing the synchronous
- * ioreq structures), or...
- * HVM_PARAM_BUFIOREQ_PFN is read (to get the gmfn containing the buffered
- * ioreq ring), or...
- * HVM_PARAM_BUFIOREQ_EVTCHN is read (to get the event channel that Xen uses
- * to request buffered I/O emulation).
- * 
- * The following hypercalls facilitate the creation of IOREQ Servers for
- * 'secondary' emulators which are invoked to implement port I/O, memory, or
- * PCI config space ranges which they explicitly register.
- */
-
-typedef uint16_t ioservid_t;
-
-/*
- * HVMOP_create_ioreq_server: Instantiate a new IOREQ Server for a secondary
- *                            emulator servicing domain <domid>.
- *
- * The <id> handed back is unique for <domid>. If <handle_bufioreq> is zero
- * the buffered ioreq ring will not be allocated and hence all emulation
- * requestes to this server will be synchronous.
- */
-#define HVMOP_create_ioreq_server 17
-struct xen_hvm_create_ioreq_server {
-    domid_t domid;           /* IN - domain to be serviced */
-#define HVM_IOREQSRV_BUFIOREQ_OFF    0
-#define HVM_IOREQSRV_BUFIOREQ_LEGACY 1
-/*
- * Use this when read_pointer gets updated atomically and
- * the pointer pair gets read atomically:
- */
-#define HVM_IOREQSRV_BUFIOREQ_ATOMIC 2
-    uint8_t handle_bufioreq; /* IN - should server handle buffered ioreqs */
-    ioservid_t id;           /* OUT - server id */
-};
-typedef struct xen_hvm_create_ioreq_server xen_hvm_create_ioreq_server_t;
-DEFINE_XEN_GUEST_HANDLE(xen_hvm_create_ioreq_server_t);
-
-/*
- * HVMOP_get_ioreq_server_info: Get all the information necessary to access
- *                              IOREQ Server <id>. 
- *
- * The emulator needs to map the synchronous ioreq structures and buffered
- * ioreq ring (if it exists) that Xen uses to request emulation. These are
- * hosted in domain <domid>'s gmfns <ioreq_pfn> and <bufioreq_pfn>
- * respectively. In addition, if the IOREQ Server is handling buffered
- * emulation requests, the emulator needs to bind to event channel
- * <bufioreq_port> to listen for them. (The event channels used for
- * synchronous emulation requests are specified in the per-CPU ioreq
- * structures in <ioreq_pfn>).
- * If the IOREQ Server is not handling buffered emulation requests then the
- * values handed back in <bufioreq_pfn> and <bufioreq_port> will both be 0.
- */
-#define HVMOP_get_ioreq_server_info 18
-struct xen_hvm_get_ioreq_server_info {
-    domid_t domid;                 /* IN - domain to be serviced */
-    ioservid_t id;                 /* IN - server id */
-    evtchn_port_t bufioreq_port;   /* OUT - buffered ioreq port */
-    uint64_aligned_t ioreq_pfn;    /* OUT - sync ioreq pfn */
-    uint64_aligned_t bufioreq_pfn; /* OUT - buffered ioreq pfn */
-};
-typedef struct xen_hvm_get_ioreq_server_info xen_hvm_get_ioreq_server_info_t;
-DEFINE_XEN_GUEST_HANDLE(xen_hvm_get_ioreq_server_info_t);
-
-/*
- * HVM_map_io_range_to_ioreq_server: Register an I/O range of domain <domid>
- *                                   for emulation by the client of IOREQ
- *                                   Server <id>
- * HVM_unmap_io_range_from_ioreq_server: Deregister an I/O range of <domid>
- *                                       for emulation by the client of IOREQ
- *                                       Server <id>
- *
- * There are three types of I/O that can be emulated: port I/O, memory accesses
- * and PCI config space accesses. The <type> field denotes which type of range
- * the <start> and <end> (inclusive) fields are specifying.
- * PCI config space ranges are specified by segment/bus/device/function values
- * which should be encoded using the HVMOP_PCI_SBDF helper macro below.
- *
- * NOTE: unless an emulation request falls entirely within a range mapped
- * by a secondary emulator, it will not be passed to that emulator.
- */
-#define HVMOP_map_io_range_to_ioreq_server 19
-#define HVMOP_unmap_io_range_from_ioreq_server 20
-struct xen_hvm_io_range {
-    domid_t domid;               /* IN - domain to be serviced */
-    ioservid_t id;               /* IN - server id */
-    uint32_t type;               /* IN - type of range */
-# define HVMOP_IO_RANGE_PORT   0 /* I/O port range */
-# define HVMOP_IO_RANGE_MEMORY 1 /* MMIO range */
-# define HVMOP_IO_RANGE_PCI    2 /* PCI segment/bus/dev/func range */
-    uint64_aligned_t start, end; /* IN - inclusive start and end of range */
-};
-typedef struct xen_hvm_io_range xen_hvm_io_range_t;
-DEFINE_XEN_GUEST_HANDLE(xen_hvm_io_range_t);
-
-#define HVMOP_PCI_SBDF(s,b,d,f)                 \
-	((((s) & 0xffff) << 16) |                   \
-	 (((b) & 0xff) << 8) |                      \
-	 (((d) & 0x1f) << 3) |                      \
-	 ((f) & 0x07))
-
-/*
- * HVMOP_destroy_ioreq_server: Destroy the IOREQ Server <id> servicing domain
- *                             <domid>.
- *
- * Any registered I/O ranges will be automatically deregistered.
- */
-#define HVMOP_destroy_ioreq_server 21
-struct xen_hvm_destroy_ioreq_server {
-    domid_t domid; /* IN - domain to be serviced */
-    ioservid_t id; /* IN - server id */
-};
-typedef struct xen_hvm_destroy_ioreq_server xen_hvm_destroy_ioreq_server_t;
-DEFINE_XEN_GUEST_HANDLE(xen_hvm_destroy_ioreq_server_t);
-
-/*
- * HVMOP_set_ioreq_server_state: Enable or disable the IOREQ Server <id> servicing
- *                               domain <domid>.
- *
- * The IOREQ Server will not be passed any emulation requests until it is in the
- * enabled state.
- * Note that the contents of the ioreq_pfn and bufioreq_fn (see
- * HVMOP_get_ioreq_server_info) are not meaningful until the IOREQ Server is in
- * the enabled state.
- */
-#define HVMOP_set_ioreq_server_state 22
-struct xen_hvm_set_ioreq_server_state {
-    domid_t domid;   /* IN - domain to be serviced */
-    ioservid_t id;   /* IN - server id */
-    uint8_t enabled; /* IN - enabled? */    
-};
-typedef struct xen_hvm_set_ioreq_server_state xen_hvm_set_ioreq_server_state_t;
-DEFINE_XEN_GUEST_HANDLE(xen_hvm_set_ioreq_server_state_t);
-
-#endif /* defined(__XEN__) || defined(__XEN_TOOLS__) */
-
 #if defined(__i386__) || defined(__x86_64__)
 
 /*
--- a/xen/include/xsm/dummy.h
+++ b/xen/include/xsm/dummy.h
@@ -609,12 +609,6 @@ static XSM_INLINE int xsm_shadow_control
     return xsm_default_action(action, current->domain, d);
 }
 
-static XSM_INLINE int xsm_hvm_ioreq_server(XSM_DEFAULT_ARG struct domain *d, int op)
-{
-    XSM_ASSERT_ACTION(XSM_DM_PRIV);
-    return xsm_default_action(action, current->domain, d);
-}
-
 static XSM_INLINE int xsm_mem_sharing_op(XSM_DEFAULT_ARG struct domain *d, struct domain *cd, int op)
 {
     XSM_ASSERT_ACTION(XSM_DM_PRIV);
--- a/xen/include/xsm/xsm.h
+++ b/xen/include/xsm/xsm.h
@@ -174,7 +174,6 @@ struct xsm_operations {
     int (*do_mca) (void);
     int (*shadow_control) (struct domain *d, uint32_t op);
     int (*hvm_set_pci_link_route) (struct domain *d);
-    int (*hvm_ioreq_server) (struct domain *d, int op);
     int (*mem_sharing_op) (struct domain *d, struct domain *cd, int op);
     int (*apic) (struct domain *d, int cmd);
     int (*memtype) (uint32_t access);
@@ -648,11 +647,6 @@ static inline int xsm_hvm_set_pci_link_r
     return xsm_ops->hvm_set_pci_link_route(d);
 }
 
-static inline int xsm_hvm_ioreq_server (xsm_default_t def, struct domain *d, int op)
-{
-    return xsm_ops->hvm_ioreq_server(d, op);
-}
-
 static inline int xsm_mem_sharing_op (xsm_default_t def, struct domain *d, struct domain *cd, int op)
 {
     return xsm_ops->mem_sharing_op(d, cd, op);
--- a/xen/xsm/dummy.c
+++ b/xen/xsm/dummy.c
@@ -145,7 +145,6 @@ void xsm_fixup_ops (struct xsm_operation
 #ifdef CONFIG_X86
     set_to_dummy_if_null(ops, do_mca);
     set_to_dummy_if_null(ops, shadow_control);
-    set_to_dummy_if_null(ops, hvm_ioreq_server);
     set_to_dummy_if_null(ops, mem_sharing_op);
     set_to_dummy_if_null(ops, apic);
     set_to_dummy_if_null(ops, machine_memory_map);
--- a/xen/xsm/flask/hooks.c
+++ b/xen/xsm/flask/hooks.c
@@ -1526,11 +1526,6 @@ static int flask_ioport_mapping(struct d
     return flask_ioport_permission(d, start, end, access);
 }
 
-static int flask_hvm_ioreq_server(struct domain *d, int op)
-{
-    return current_has_perm(d, SECCLASS_HVM, HVM__HVMCTL);
-}
-
 static int flask_mem_sharing_op(struct domain *d, struct domain *cd, int op)
 {
     int rc = current_has_perm(cd, SECCLASS_HVM, HVM__MEM_SHARING);
@@ -1799,7 +1794,6 @@ static struct xsm_operations flask_ops =
 #ifdef CONFIG_X86
     .do_mca = flask_do_mca,
     .shadow_control = flask_shadow_control,
-    .hvm_ioreq_server = flask_hvm_ioreq_server,
     .mem_sharing_op = flask_mem_sharing_op,
     .apic = flask_apic,
     .machine_memory_map = flask_machine_memory_map,



[-- Attachment #2: hvmctl-09.patch --]
[-- Type: text/plain, Size: 46393 bytes --]

hvmctl: convert HVMOP_*ioreq_server*

Note that we can't adjust HVM_IOREQSRV_BUFIOREQ_* to properly obey
name space rules, as these constants as in use by callers of the libxc
interface.

Signed-off-by: Jan Beulich <jbeulich@suse.com>

--- a/tools/libxc/include/xenctrl.h
+++ b/tools/libxc/include/xenctrl.h
@@ -41,6 +41,7 @@
 #include <xen/sched.h>
 #include <xen/memory.h>
 #include <xen/grant_table.h>
+#include <xen/hvm/control.h>
 #include <xen/hvm/params.h>
 #include <xen/xsm/flask_op.h>
 #include <xen/tmem.h>
--- a/tools/libxc/xc_domain.c
+++ b/tools/libxc/xc_domain.c
@@ -1416,23 +1416,14 @@ int xc_hvm_create_ioreq_server(xc_interf
                                int handle_bufioreq,
                                ioservid_t *id)
 {
-    DECLARE_HYPERCALL_BUFFER(xen_hvm_create_ioreq_server_t, arg);
+    DECLARE_HVMCTL(create_ioreq_server, domid,
+                   .handle_bufioreq = handle_bufioreq);
     int rc;
 
-    arg = xc_hypercall_buffer_alloc(xch, arg, sizeof(*arg));
-    if ( arg == NULL )
-        return -1;
-
-    arg->domid = domid;
-    arg->handle_bufioreq = handle_bufioreq;
-
-    rc = xencall2(xch->xcall, __HYPERVISOR_hvm_op,
-                  HVMOP_create_ioreq_server,
-                  HYPERCALL_BUFFER_AS_ARG(arg));
+    rc = do_hvmctl(xch, &hvmctl);
 
-    *id = arg->id;
+    *id = hvmctl.u.create_ioreq_server.id;
 
-    xc_hypercall_buffer_free(xch, arg);
     return rc;
 }
 
@@ -1443,84 +1434,52 @@ int xc_hvm_get_ioreq_server_info(xc_inte
                                  xen_pfn_t *bufioreq_pfn,
                                  evtchn_port_t *bufioreq_port)
 {
-    DECLARE_HYPERCALL_BUFFER(xen_hvm_get_ioreq_server_info_t, arg);
+    DECLARE_HVMCTL(get_ioreq_server_info, domid,
+                   .id = id);
     int rc;
 
-    arg = xc_hypercall_buffer_alloc(xch, arg, sizeof(*arg));
-    if ( arg == NULL )
-        return -1;
-
-    arg->domid = domid;
-    arg->id = id;
-
-    rc = xencall2(xch->xcall, __HYPERVISOR_hvm_op,
-                  HVMOP_get_ioreq_server_info,
-                  HYPERCALL_BUFFER_AS_ARG(arg));
+    rc = do_hvmctl(xch, &hvmctl);
     if ( rc != 0 )
-        goto done;
+        return rc;
 
     if ( ioreq_pfn )
-        *ioreq_pfn = arg->ioreq_pfn;
+        *ioreq_pfn = hvmctl.u.get_ioreq_server_info.ioreq_pfn;
 
     if ( bufioreq_pfn )
-        *bufioreq_pfn = arg->bufioreq_pfn;
+        *bufioreq_pfn = hvmctl.u.get_ioreq_server_info.bufioreq_pfn;
 
     if ( bufioreq_port )
-        *bufioreq_port = arg->bufioreq_port;
+        *bufioreq_port = hvmctl.u.get_ioreq_server_info.bufioreq_port;
 
-done:
-    xc_hypercall_buffer_free(xch, arg);
-    return rc;
+    return 0;
 }
 
 int xc_hvm_map_io_range_to_ioreq_server(xc_interface *xch, domid_t domid,
                                         ioservid_t id, int is_mmio,
                                         uint64_t start, uint64_t end)
 {
-    DECLARE_HYPERCALL_BUFFER(xen_hvm_io_range_t, arg);
-    int rc;
-
-    arg = xc_hypercall_buffer_alloc(xch, arg, sizeof(*arg));
-    if ( arg == NULL )
-        return -1;
-
-    arg->domid = domid;
-    arg->id = id;
-    arg->type = is_mmio ? HVMOP_IO_RANGE_MEMORY : HVMOP_IO_RANGE_PORT;
-    arg->start = start;
-    arg->end = end;
-
-    rc = xencall2(xch->xcall, __HYPERVISOR_hvm_op,
-                  HVMOP_map_io_range_to_ioreq_server,
-                  HYPERCALL_BUFFER_AS_ARG(arg));
+    DECLARE_HVMCTL(map_io_range_to_ioreq_server, domid,
+                   .id = id,
+                   .type = is_mmio ? XEN_HVMCTL_IO_RANGE_MEMORY
+                                   : XEN_HVMCTL_IO_RANGE_PORT,
+                   .start = start,
+                   .end = end);
 
-    xc_hypercall_buffer_free(xch, arg);
-    return rc;
+    return do_hvmctl(xch, &hvmctl);
 }
 
 int xc_hvm_unmap_io_range_from_ioreq_server(xc_interface *xch, domid_t domid,
                                             ioservid_t id, int is_mmio,
                                             uint64_t start, uint64_t end)
 {
-    DECLARE_HYPERCALL_BUFFER(xen_hvm_io_range_t, arg);
-    int rc;
-
-    arg = xc_hypercall_buffer_alloc(xch, arg, sizeof(*arg));
-    if ( arg == NULL )
-        return -1;
+    DECLARE_HVMCTL(unmap_io_range_from_ioreq_server, domid,
+                   .id = id,
+                   .type = is_mmio ? XEN_HVMCTL_IO_RANGE_MEMORY
+                                   : XEN_HVMCTL_IO_RANGE_PORT,
+                   .start = start,
+                   .end = end);
 
-    arg->domid = domid;
-    arg->id = id;
-    arg->type = is_mmio ? HVMOP_IO_RANGE_MEMORY : HVMOP_IO_RANGE_PORT;
-    arg->start = start;
-    arg->end = end;
-
-    rc = xencall2(xch->xcall, __HYPERVISOR_hvm_op,
-                  HVMOP_unmap_io_range_from_ioreq_server,
-                  HYPERCALL_BUFFER_AS_ARG(arg));
-
-    xc_hypercall_buffer_free(xch, arg);
-    return rc;
+    return do_hvmctl(xch, &hvmctl);
 }
 
 int xc_hvm_map_pcidev_to_ioreq_server(xc_interface *xch, domid_t domid,
@@ -1528,37 +1487,23 @@ int xc_hvm_map_pcidev_to_ioreq_server(xc
                                       uint8_t bus, uint8_t device,
                                       uint8_t function)
 {
-    DECLARE_HYPERCALL_BUFFER(xen_hvm_io_range_t, arg);
-    int rc;
+    /*
+     * The underlying hypercall will deal with ranges of PCI SBDF
+     * but, for simplicity, the API only uses singletons.
+     */
+    uint32_t sbdf = XEN_HVMCTL_PCI_SBDF(segment, bus, device, function);
+    DECLARE_HVMCTL(map_io_range_to_ioreq_server, domid,
+                   .id = id,
+                   .type = XEN_HVMCTL_IO_RANGE_PCI,
+                   .start = sbdf,
+                   .end = sbdf);
 
     if (device > 0x1f || function > 0x7) {
         errno = EINVAL;
         return -1;
     }
 
-    arg = xc_hypercall_buffer_alloc(xch, arg, sizeof(*arg));
-    if ( arg == NULL )
-        return -1;
-
-    arg->domid = domid;
-    arg->id = id;
-    arg->type = HVMOP_IO_RANGE_PCI;
-
-    /*
-     * The underlying hypercall will deal with ranges of PCI SBDF
-     * but, for simplicity, the API only uses singletons.
-     */
-    arg->start = arg->end = HVMOP_PCI_SBDF((uint64_t)segment,
-                                           (uint64_t)bus,
-                                           (uint64_t)device,
-                                           (uint64_t)function);
-
-    rc = xencall2(xch->xcall, __HYPERVISOR_hvm_op,
-                  HVMOP_map_io_range_to_ioreq_server,
-                  HYPERCALL_BUFFER_AS_ARG(arg));
-
-    xc_hypercall_buffer_free(xch, arg);
-    return rc;
+    return do_hvmctl(xch, &hvmctl);
 }
 
 int xc_hvm_unmap_pcidev_from_ioreq_server(xc_interface *xch, domid_t domid,
@@ -1566,54 +1511,29 @@ int xc_hvm_unmap_pcidev_from_ioreq_serve
                                           uint8_t bus, uint8_t device,
                                           uint8_t function)
 {
-    DECLARE_HYPERCALL_BUFFER(xen_hvm_io_range_t, arg);
-    int rc;
+    uint32_t sbdf = XEN_HVMCTL_PCI_SBDF(segment, bus, device, function);
+    DECLARE_HVMCTL(unmap_io_range_from_ioreq_server, domid,
+                   .id = id,
+                   .type = XEN_HVMCTL_IO_RANGE_PCI,
+                   .start = sbdf,
+                   .end = sbdf);
 
     if (device > 0x1f || function > 0x7) {
         errno = EINVAL;
         return -1;
     }
 
-    arg = xc_hypercall_buffer_alloc(xch, arg, sizeof(*arg));
-    if ( arg == NULL )
-        return -1;
-
-    arg->domid = domid;
-    arg->id = id;
-    arg->type = HVMOP_IO_RANGE_PCI;
-    arg->start = arg->end = HVMOP_PCI_SBDF((uint64_t)segment,
-                                           (uint64_t)bus,
-                                           (uint64_t)device,
-                                           (uint64_t)function);
-
-    rc = xencall2(xch->xcall, __HYPERVISOR_hvm_op,
-                  HVMOP_unmap_io_range_from_ioreq_server,
-                  HYPERCALL_BUFFER_AS_ARG(arg));
-
-    xc_hypercall_buffer_free(xch, arg);
-    return rc;
+    return do_hvmctl(xch, &hvmctl);
 }
 
 int xc_hvm_destroy_ioreq_server(xc_interface *xch,
                                 domid_t domid,
                                 ioservid_t id)
 {
-    DECLARE_HYPERCALL_BUFFER(xen_hvm_destroy_ioreq_server_t, arg);
-    int rc;
-
-    arg = xc_hypercall_buffer_alloc(xch, arg, sizeof(*arg));
-    if ( arg == NULL )
-        return -1;
+    DECLARE_HVMCTL(destroy_ioreq_server, domid,
+                   .id = id);
 
-    arg->domid = domid;
-    arg->id = id;
-
-    rc = xencall2(xch->xcall, __HYPERVISOR_hvm_op,
-                  HVMOP_destroy_ioreq_server,
-                  HYPERCALL_BUFFER_AS_ARG(arg));
-
-    xc_hypercall_buffer_free(xch, arg);
-    return rc;
+    return do_hvmctl(xch, &hvmctl);
 }
 
 int xc_hvm_set_ioreq_server_state(xc_interface *xch,
@@ -1621,23 +1541,11 @@ int xc_hvm_set_ioreq_server_state(xc_int
                                   ioservid_t id,
                                   int enabled)
 {
-    DECLARE_HYPERCALL_BUFFER(xen_hvm_set_ioreq_server_state_t, arg);
-    int rc;
-
-    arg = xc_hypercall_buffer_alloc(xch, arg, sizeof(*arg));
-    if ( arg == NULL )
-        return -1;
+    DECLARE_HVMCTL(set_ioreq_server_state, domid,
+                   .id = id,
+                   .enabled = !!enabled);
 
-    arg->domid = domid;
-    arg->id = id;
-    arg->enabled = !!enabled;
-
-    rc = xencall2(xch->xcall, __HYPERVISOR_hvm_op,
-                  HVMOP_set_ioreq_server_state,
-                  HYPERCALL_BUFFER_AS_ARG(arg));
-
-    xc_hypercall_buffer_free(xch, arg);
-    return rc;
+    return do_hvmctl(xch, &hvmctl);
 }
 
 int xc_domain_setdebugging(xc_interface *xch,
--- a/tools/libxc/xc_private.h
+++ b/tools/libxc/xc_private.h
@@ -34,8 +34,6 @@
 #define XC_INTERNAL_COMPAT_MAP_FOREIGN_API
 #include "xenctrl.h"
 
-#include <xen/hvm/control.h>
-
 #include <xencall.h>
 #include <xenforeignmemory.h>
 
--- a/xen/arch/x86/hvm/control.c
+++ b/xen/arch/x86/hvm/control.c
@@ -20,6 +20,7 @@
 #include <xen/sched.h>
 #include <asm/hap.h>
 #include <asm/shadow.h>
+#include <asm/hvm/ioreq.h>
 #include <xsm/xsm.h>
 
 static int set_pci_intx_level(struct domain *d,
@@ -299,6 +300,50 @@ long do_hvmctl(XEN_GUEST_HANDLE_PARAM(xe
         rc = hvm_inject_msi(d, op.u.inject_msi.addr, op.u.inject_msi.data);
         break;
 
+    case XEN_HVMCTL_create_ioreq_server:
+        rc = -EINVAL;
+        if ( op.u.create_ioreq_server.rsvd )
+            break;
+        rc = hvm_create_ioreq_server(d, current->domain->domain_id, 0,
+                                     op.u.create_ioreq_server.handle_bufioreq,
+                                     &op.u.create_ioreq_server.id);
+        if ( rc == 0 && copy_field_to_guest(u_hvmctl, &op,
+                                            u.create_ioreq_server.id) )
+            rc = -EFAULT;
+        break;
+
+    case XEN_HVMCTL_get_ioreq_server_info:
+        rc = -EINVAL;
+        if ( op.u.get_ioreq_server_info.rsvd )
+            break;
+        rc = hvm_get_ioreq_server_info(d, &op.u.get_ioreq_server_info);
+        if ( rc == 0 && copy_field_to_guest(u_hvmctl, &op,
+                                            u.get_ioreq_server_info) )
+            rc = -EFAULT;
+        break;
+
+    case XEN_HVMCTL_map_io_range_to_ioreq_server:
+        rc = hvm_map_io_range_to_ioreq_server(
+                 d, &op.u.map_io_range_to_ioreq_server);
+        break;
+
+    case XEN_HVMCTL_unmap_io_range_from_ioreq_server:
+        rc = hvm_unmap_io_range_from_ioreq_server(
+                 d, &op.u.unmap_io_range_from_ioreq_server);
+        break;
+
+    case XEN_HVMCTL_destroy_ioreq_server:
+        rc = hvm_destroy_ioreq_server(d, op.u.destroy_ioreq_server.id);
+        break;
+
+    case XEN_HVMCTL_set_ioreq_server_state:
+        rc = -EINVAL;
+        if ( op.u.set_ioreq_server_state.rsvd )
+            break;
+        rc = hvm_set_ioreq_server_state(d, op.u.set_ioreq_server_state.id,
+                                        !!op.u.set_ioreq_server_state.enabled);
+        break;
+
     default:
         rc = -EOPNOTSUPP;
         break;
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -4487,195 +4487,6 @@ static int hvmop_flush_tlb_all(void)
     return 0;
 }
 
-static int hvmop_create_ioreq_server(
-    XEN_GUEST_HANDLE_PARAM(xen_hvm_create_ioreq_server_t) uop)
-{
-    struct domain *curr_d = current->domain;
-    xen_hvm_create_ioreq_server_t op;
-    struct domain *d;
-    int rc;
-
-    if ( copy_from_guest(&op, uop, 1) )
-        return -EFAULT;
-
-    rc = rcu_lock_remote_domain_by_id(op.domid, &d);
-    if ( rc != 0 )
-        return rc;
-
-    rc = -EINVAL;
-    if ( !is_hvm_domain(d) )
-        goto out;
-
-    rc = xsm_hvm_ioreq_server(XSM_DM_PRIV, d, HVMOP_create_ioreq_server);
-    if ( rc != 0 )
-        goto out;
-
-    rc = hvm_create_ioreq_server(d, curr_d->domain_id, 0,
-                                 op.handle_bufioreq, &op.id);
-    if ( rc != 0 )
-        goto out;
-
-    rc = copy_to_guest(uop, &op, 1) ? -EFAULT : 0;
-    
- out:
-    rcu_unlock_domain(d);
-    return rc;
-}
-
-static int hvmop_get_ioreq_server_info(
-    XEN_GUEST_HANDLE_PARAM(xen_hvm_get_ioreq_server_info_t) uop)
-{
-    xen_hvm_get_ioreq_server_info_t op;
-    struct domain *d;
-    int rc;
-
-    if ( copy_from_guest(&op, uop, 1) )
-        return -EFAULT;
-
-    rc = rcu_lock_remote_domain_by_id(op.domid, &d);
-    if ( rc != 0 )
-        return rc;
-
-    rc = -EINVAL;
-    if ( !is_hvm_domain(d) )
-        goto out;
-
-    rc = xsm_hvm_ioreq_server(XSM_DM_PRIV, d, HVMOP_get_ioreq_server_info);
-    if ( rc != 0 )
-        goto out;
-
-    rc = hvm_get_ioreq_server_info(d, op.id,
-                                   &op.ioreq_pfn,
-                                   &op.bufioreq_pfn, 
-                                   &op.bufioreq_port);
-    if ( rc != 0 )
-        goto out;
-
-    rc = copy_to_guest(uop, &op, 1) ? -EFAULT : 0;
-    
- out:
-    rcu_unlock_domain(d);
-    return rc;
-}
-
-static int hvmop_map_io_range_to_ioreq_server(
-    XEN_GUEST_HANDLE_PARAM(xen_hvm_io_range_t) uop)
-{
-    xen_hvm_io_range_t op;
-    struct domain *d;
-    int rc;
-
-    if ( copy_from_guest(&op, uop, 1) )
-        return -EFAULT;
-
-    rc = rcu_lock_remote_domain_by_id(op.domid, &d);
-    if ( rc != 0 )
-        return rc;
-
-    rc = -EINVAL;
-    if ( !is_hvm_domain(d) )
-        goto out;
-
-    rc = xsm_hvm_ioreq_server(XSM_DM_PRIV, d, HVMOP_map_io_range_to_ioreq_server);
-    if ( rc != 0 )
-        goto out;
-
-    rc = hvm_map_io_range_to_ioreq_server(d, op.id, op.type,
-                                          op.start, op.end);
-
- out:
-    rcu_unlock_domain(d);
-    return rc;
-}
-
-static int hvmop_unmap_io_range_from_ioreq_server(
-    XEN_GUEST_HANDLE_PARAM(xen_hvm_io_range_t) uop)
-{
-    xen_hvm_io_range_t op;
-    struct domain *d;
-    int rc;
-
-    if ( copy_from_guest(&op, uop, 1) )
-        return -EFAULT;
-
-    rc = rcu_lock_remote_domain_by_id(op.domid, &d);
-    if ( rc != 0 )
-        return rc;
-
-    rc = -EINVAL;
-    if ( !is_hvm_domain(d) )
-        goto out;
-
-    rc = xsm_hvm_ioreq_server(XSM_DM_PRIV, d, HVMOP_unmap_io_range_from_ioreq_server);
-    if ( rc != 0 )
-        goto out;
-
-    rc = hvm_unmap_io_range_from_ioreq_server(d, op.id, op.type,
-                                              op.start, op.end);
-    
- out:
-    rcu_unlock_domain(d);
-    return rc;
-}
-
-static int hvmop_set_ioreq_server_state(
-    XEN_GUEST_HANDLE_PARAM(xen_hvm_set_ioreq_server_state_t) uop)
-{
-    xen_hvm_set_ioreq_server_state_t op;
-    struct domain *d;
-    int rc;
-
-    if ( copy_from_guest(&op, uop, 1) )
-        return -EFAULT;
-
-    rc = rcu_lock_remote_domain_by_id(op.domid, &d);
-    if ( rc != 0 )
-        return rc;
-
-    rc = -EINVAL;
-    if ( !is_hvm_domain(d) )
-        goto out;
-
-    rc = xsm_hvm_ioreq_server(XSM_DM_PRIV, d, HVMOP_set_ioreq_server_state);
-    if ( rc != 0 )
-        goto out;
-
-    rc = hvm_set_ioreq_server_state(d, op.id, !!op.enabled);
-
- out:
-    rcu_unlock_domain(d);
-    return rc;
-}
-
-static int hvmop_destroy_ioreq_server(
-    XEN_GUEST_HANDLE_PARAM(xen_hvm_destroy_ioreq_server_t) uop)
-{
-    xen_hvm_destroy_ioreq_server_t op;
-    struct domain *d;
-    int rc;
-
-    if ( copy_from_guest(&op, uop, 1) )
-        return -EFAULT;
-
-    rc = rcu_lock_remote_domain_by_id(op.domid, &d);
-    if ( rc != 0 )
-        return rc;
-
-    rc = -EINVAL;
-    if ( !is_hvm_domain(d) )
-        goto out;
-
-    rc = xsm_hvm_ioreq_server(XSM_DM_PRIV, d, HVMOP_destroy_ioreq_server);
-    if ( rc != 0 )
-        goto out;
-
-    rc = hvm_destroy_ioreq_server(d, op.id);
-
- out:
-    rcu_unlock_domain(d);
-    return rc;
-}
-
 static int hvmop_set_evtchn_upcall_vector(
     XEN_GUEST_HANDLE_PARAM(xen_hvm_evtchn_upcall_vector_t) uop)
 {
@@ -5192,36 +5003,6 @@ long do_hvm_op(unsigned long op, XEN_GUE
 
     switch ( op )
     {
-    case HVMOP_create_ioreq_server:
-        rc = hvmop_create_ioreq_server(
-            guest_handle_cast(arg, xen_hvm_create_ioreq_server_t));
-        break;
-    
-    case HVMOP_get_ioreq_server_info:
-        rc = hvmop_get_ioreq_server_info(
-            guest_handle_cast(arg, xen_hvm_get_ioreq_server_info_t));
-        break;
-    
-    case HVMOP_map_io_range_to_ioreq_server:
-        rc = hvmop_map_io_range_to_ioreq_server(
-            guest_handle_cast(arg, xen_hvm_io_range_t));
-        break;
-    
-    case HVMOP_unmap_io_range_from_ioreq_server:
-        rc = hvmop_unmap_io_range_from_ioreq_server(
-            guest_handle_cast(arg, xen_hvm_io_range_t));
-        break;
-
-    case HVMOP_set_ioreq_server_state:
-        rc = hvmop_set_ioreq_server_state(
-            guest_handle_cast(arg, xen_hvm_set_ioreq_server_state_t));
-        break;
-    
-    case HVMOP_destroy_ioreq_server:
-        rc = hvmop_destroy_ioreq_server(
-            guest_handle_cast(arg, xen_hvm_destroy_ioreq_server_t));
-        break;
-    
     case HVMOP_set_evtchn_upcall_vector:
         rc = hvmop_set_evtchn_upcall_vector(
             guest_handle_cast(arg, xen_hvm_evtchn_upcall_vector_t));
--- a/xen/arch/x86/hvm/ioreq.c
+++ b/xen/arch/x86/hvm/ioreq.c
@@ -513,9 +513,9 @@ static int hvm_ioreq_server_alloc_ranges
         char *name;
 
         rc = asprintf(&name, "ioreq_server %d %s", s->id,
-                      (i == HVMOP_IO_RANGE_PORT) ? "port" :
-                      (i == HVMOP_IO_RANGE_MEMORY) ? "memory" :
-                      (i == HVMOP_IO_RANGE_PCI) ? "pci" :
+                      (i == XEN_HVMCTL_IO_RANGE_PORT) ? "port" :
+                      (i == XEN_HVMCTL_IO_RANGE_MEMORY) ? "memory" :
+                      (i == XEN_HVMCTL_IO_RANGE_PCI) ? "pci" :
                       "");
         if ( rc )
             goto fail;
@@ -686,7 +686,8 @@ int hvm_create_ioreq_server(struct domai
     struct hvm_ioreq_server *s;
     int rc;
 
-    if ( bufioreq_handling > HVM_IOREQSRV_BUFIOREQ_ATOMIC )
+    if ( !is_hvm_domain(d) ||
+         bufioreq_handling > HVM_IOREQSRV_BUFIOREQ_ATOMIC )
         return -EINVAL;
 
     rc = -ENOMEM;
@@ -738,6 +739,9 @@ int hvm_destroy_ioreq_server(struct doma
     struct hvm_ioreq_server *s;
     int rc;
 
+    if ( !is_hvm_domain(d) )
+        return -EINVAL;
+
     spin_lock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
 
     rc = -ENOENT;
@@ -772,14 +776,15 @@ int hvm_destroy_ioreq_server(struct doma
     return rc;
 }
 
-int hvm_get_ioreq_server_info(struct domain *d, ioservid_t id,
-                              unsigned long *ioreq_pfn,
-                              unsigned long *bufioreq_pfn,
-                              evtchn_port_t *bufioreq_port)
+int hvm_get_ioreq_server_info(struct domain *d,
+                              struct xen_hvm_get_ioreq_server_info *info)
 {
     struct hvm_ioreq_server *s;
     int rc;
 
+    if ( !is_hvm_domain(d) )
+        return -EINVAL;
+
     spin_lock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
 
     rc = -ENOENT;
@@ -790,15 +795,15 @@ int hvm_get_ioreq_server_info(struct dom
         if ( s == d->arch.hvm_domain.default_ioreq_server )
             continue;
 
-        if ( s->id != id )
+        if ( s->id != info->id )
             continue;
 
-        *ioreq_pfn = s->ioreq.gmfn;
+        info->ioreq_pfn = s->ioreq.gmfn;
 
         if ( s->bufioreq.va != NULL )
         {
-            *bufioreq_pfn = s->bufioreq.gmfn;
-            *bufioreq_port = s->bufioreq_evtchn;
+            info->bufioreq_pfn = s->bufioreq.gmfn;
+            info->bufioreq_port = s->bufioreq_evtchn;
         }
 
         rc = 0;
@@ -810,13 +815,15 @@ int hvm_get_ioreq_server_info(struct dom
     return rc;
 }
 
-int hvm_map_io_range_to_ioreq_server(struct domain *d, ioservid_t id,
-                                     uint32_t type, uint64_t start,
-                                     uint64_t end)
+int hvm_map_io_range_to_ioreq_server(struct domain *d,
+                                     const struct xen_hvm_io_range *ior)
 {
     struct hvm_ioreq_server *s;
     int rc;
 
+    if ( ior->rsvd || !is_hvm_domain(d) )
+        return -EINVAL;
+
     spin_lock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
 
     rc = -ENOENT;
@@ -827,16 +834,16 @@ int hvm_map_io_range_to_ioreq_server(str
         if ( s == d->arch.hvm_domain.default_ioreq_server )
             continue;
 
-        if ( s->id == id )
+        if ( s->id == ior->id )
         {
             struct rangeset *r;
 
-            switch ( type )
+            switch ( ior->type )
             {
-            case HVMOP_IO_RANGE_PORT:
-            case HVMOP_IO_RANGE_MEMORY:
-            case HVMOP_IO_RANGE_PCI:
-                r = s->range[type];
+            case XEN_HVMCTL_IO_RANGE_PORT:
+            case XEN_HVMCTL_IO_RANGE_MEMORY:
+            case XEN_HVMCTL_IO_RANGE_PCI:
+                r = s->range[ior->type];
                 break;
 
             default:
@@ -849,10 +856,10 @@ int hvm_map_io_range_to_ioreq_server(str
                 break;
 
             rc = -EEXIST;
-            if ( rangeset_overlaps_range(r, start, end) )
+            if ( rangeset_overlaps_range(r, ior->start, ior->end) )
                 break;
 
-            rc = rangeset_add_range(r, start, end);
+            rc = rangeset_add_range(r, ior->start, ior->end);
             break;
         }
     }
@@ -862,13 +869,15 @@ int hvm_map_io_range_to_ioreq_server(str
     return rc;
 }
 
-int hvm_unmap_io_range_from_ioreq_server(struct domain *d, ioservid_t id,
-                                         uint32_t type, uint64_t start,
-                                         uint64_t end)
+int hvm_unmap_io_range_from_ioreq_server(struct domain *d,
+                                         const struct xen_hvm_io_range *ior)
 {
     struct hvm_ioreq_server *s;
     int rc;
 
+    if ( ior->rsvd || !is_hvm_domain(d) )
+        return -EINVAL;
+
     spin_lock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
 
     rc = -ENOENT;
@@ -879,16 +888,16 @@ int hvm_unmap_io_range_from_ioreq_server
         if ( s == d->arch.hvm_domain.default_ioreq_server )
             continue;
 
-        if ( s->id == id )
+        if ( s->id == ior->id )
         {
             struct rangeset *r;
 
-            switch ( type )
+            switch ( ior->type )
             {
-            case HVMOP_IO_RANGE_PORT:
-            case HVMOP_IO_RANGE_MEMORY:
-            case HVMOP_IO_RANGE_PCI:
-                r = s->range[type];
+            case XEN_HVMCTL_IO_RANGE_PORT:
+            case XEN_HVMCTL_IO_RANGE_MEMORY:
+            case XEN_HVMCTL_IO_RANGE_PCI:
+                r = s->range[ior->type];
                 break;
 
             default:
@@ -901,10 +910,10 @@ int hvm_unmap_io_range_from_ioreq_server
                 break;
 
             rc = -ENOENT;
-            if ( !rangeset_contains_range(r, start, end) )
+            if ( !rangeset_contains_range(r, ior->start, ior->end) )
                 break;
 
-            rc = rangeset_remove_range(r, start, end);
+            rc = rangeset_remove_range(r, ior->start, ior->end);
             break;
         }
     }
@@ -920,6 +929,9 @@ int hvm_set_ioreq_server_state(struct do
     struct list_head *entry;
     int rc;
 
+    if ( !is_hvm_domain(d) )
+        return -EINVAL;
+
     spin_lock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
 
     rc = -ENOENT;
@@ -1128,12 +1140,12 @@ struct hvm_ioreq_server *hvm_select_iore
 
         /* PCI config data cycle */
 
-        sbdf = HVMOP_PCI_SBDF(0,
-                              PCI_BUS(CF8_BDF(cf8)),
-                              PCI_SLOT(CF8_BDF(cf8)),
-                              PCI_FUNC(CF8_BDF(cf8)));
+        sbdf = XEN_HVMCTL_PCI_SBDF(0,
+                                   PCI_BUS(CF8_BDF(cf8)),
+                                   PCI_SLOT(CF8_BDF(cf8)),
+                                   PCI_FUNC(CF8_BDF(cf8)));
 
-        type = HVMOP_IO_RANGE_PCI;
+        type = XEN_HVMCTL_IO_RANGE_PCI;
         addr = ((uint64_t)sbdf << 32) |
                CF8_ADDR_LO(cf8) |
                (p->addr & 3);
@@ -1152,7 +1164,7 @@ struct hvm_ioreq_server *hvm_select_iore
     else
     {
         type = (p->type == IOREQ_TYPE_PIO) ?
-                HVMOP_IO_RANGE_PORT : HVMOP_IO_RANGE_MEMORY;
+                XEN_HVMCTL_IO_RANGE_PORT : XEN_HVMCTL_IO_RANGE_MEMORY;
         addr = p->addr;
     }
 
@@ -1174,19 +1186,19 @@ struct hvm_ioreq_server *hvm_select_iore
         {
             unsigned long end;
 
-        case HVMOP_IO_RANGE_PORT:
+        case XEN_HVMCTL_IO_RANGE_PORT:
             end = addr + p->size - 1;
             if ( rangeset_contains_range(r, addr, end) )
                 return s;
 
             break;
-        case HVMOP_IO_RANGE_MEMORY:
+        case XEN_HVMCTL_IO_RANGE_MEMORY:
             end = addr + (p->size * p->count) - 1;
             if ( rangeset_contains_range(r, addr, end) )
                 return s;
 
             break;
-        case HVMOP_IO_RANGE_PCI:
+        case XEN_HVMCTL_IO_RANGE_PCI:
             if ( rangeset_contains_singleton(r, addr >> 32) )
             {
                 p->type = IOREQ_TYPE_PCI_CONFIG;
--- a/xen/include/asm-x86/hvm/domain.h
+++ b/xen/include/asm-x86/hvm/domain.h
@@ -30,6 +30,7 @@
 #include <asm/hvm/vmx/vmcs.h>
 #include <asm/hvm/svm/vmcb.h>
 #include <public/grant_table.h>
+#include <public/hvm/control.h>
 #include <public/hvm/params.h>
 #include <public/hvm/save.h>
 #include <public/hvm/hvm_op.h>
@@ -47,7 +48,7 @@ struct hvm_ioreq_vcpu {
     bool_t           pending;
 };
 
-#define NR_IO_RANGE_TYPES (HVMOP_IO_RANGE_PCI + 1)
+#define NR_IO_RANGE_TYPES (XEN_HVMCTL_IO_RANGE_PCI + 1)
 #define MAX_NR_IO_RANGES  256
 
 struct hvm_ioreq_server {
--- a/xen/include/asm-x86/hvm/ioreq.h
+++ b/xen/include/asm-x86/hvm/ioreq.h
@@ -19,6 +19,8 @@
 #ifndef __ASM_X86_HVM_IOREQ_H__
 #define __ASM_X86_HVM_IOREQ_H__
 
+#include <public/hvm/control.h>
+
 bool_t hvm_io_pending(struct vcpu *v);
 bool_t handle_hvm_io_completion(struct vcpu *v);
 bool_t is_ioreq_server_page(struct domain *d, const struct page_info *page);
@@ -27,16 +29,12 @@ int hvm_create_ioreq_server(struct domai
                             bool_t is_default, int bufioreq_handling,
                             ioservid_t *id);
 int hvm_destroy_ioreq_server(struct domain *d, ioservid_t id);
-int hvm_get_ioreq_server_info(struct domain *d, ioservid_t id,
-                              unsigned long *ioreq_pfn,
-                              unsigned long *bufioreq_pfn,
-                              evtchn_port_t *bufioreq_port);
-int hvm_map_io_range_to_ioreq_server(struct domain *d, ioservid_t id,
-                                     uint32_t type, uint64_t start,
-                                     uint64_t end);
-int hvm_unmap_io_range_from_ioreq_server(struct domain *d, ioservid_t id,
-                                         uint32_t type, uint64_t start,
-                                         uint64_t end);
+int hvm_get_ioreq_server_info(struct domain *d,
+                              struct xen_hvm_get_ioreq_server_info *info);
+int hvm_map_io_range_to_ioreq_server(struct domain *d,
+                                     const struct xen_hvm_io_range *r);
+int hvm_unmap_io_range_from_ioreq_server(struct domain *d,
+                                         const struct xen_hvm_io_range *r);
 int hvm_set_ioreq_server_state(struct domain *d, ioservid_t id,
                                bool_t enabled);
 
--- a/xen/include/public/hvm/control.h
+++ b/xen/include/public/hvm/control.h
@@ -26,6 +26,7 @@
 #endif
 
 #include "../xen.h"
+#include "../event_channel.h"
 
 #define XEN_HVMCTL_INTERFACE_VERSION 0x00000001
 
@@ -130,6 +131,131 @@ struct xen_hvm_inject_msi {
     uint64_t  addr;
 };
 
+/*
+ * IOREQ Servers
+ *
+ * The interface between an I/O emulator an Xen is called an IOREQ Server.
+ * A domain supports a single 'legacy' IOREQ Server which is instantiated if
+ * parameter...
+ *
+ * HVM_PARAM_IOREQ_PFN is read (to get the gmfn containing the synchronous
+ * ioreq structures), or...
+ * HVM_PARAM_BUFIOREQ_PFN is read (to get the gmfn containing the buffered
+ * ioreq ring), or...
+ * HVM_PARAM_BUFIOREQ_EVTCHN is read (to get the event channel that Xen uses
+ * to request buffered I/O emulation).
+ *
+ * The following hypercalls facilitate the creation of IOREQ Servers for
+ * 'secondary' emulators which are invoked to implement port I/O, memory, or
+ * PCI config space ranges which they explicitly register.
+ */
+
+typedef uint16_t ioservid_t;
+
+/*
+ * XEN_HVMCTL_create_ioreq_server: Instantiate a new IOREQ Server for a
+ *                                 secondary emulator servicing domain
+ *                                 <domid>.
+ *
+ * The <id> handed back is unique for <domid>. If <handle_bufioreq> is zero
+ * the buffered ioreq ring will not be allocated and hence all emulation
+ * requestes to this server will be synchronous.
+ */
+struct xen_hvm_create_ioreq_server {
+#define HVM_IOREQSRV_BUFIOREQ_OFF    0
+#define HVM_IOREQSRV_BUFIOREQ_LEGACY 1
+/*
+ * Use this when read_pointer gets updated atomically and
+ * the pointer pair gets read atomically:
+ */
+#define HVM_IOREQSRV_BUFIOREQ_ATOMIC 2
+    uint8_t handle_bufioreq; /* IN - should server handle buffered ioreqs */
+    uint8_t rsvd;            /* IN - must be zero */
+    ioservid_t id;           /* OUT - server id */
+};
+
+/*
+ * XEN_HVMCTL_get_ioreq_server_info: Get all the information necessary to
+ *                                   access IOREQ Server <id>.
+ *
+ * The emulator needs to map the synchronous ioreq structures and buffered
+ * ioreq ring (if it exists) that Xen uses to request emulation. These are
+ * hosted in domain <domid>'s gmfns <ioreq_pfn> and <bufioreq_pfn>
+ * respectively. In addition, if the IOREQ Server is handling buffered
+ * emulation requests, the emulator needs to bind to event channel
+ * <bufioreq_port> to listen for them. (The event channels used for
+ * synchronous emulation requests are specified in the per-CPU ioreq
+ * structures in <ioreq_pfn>).
+ * If the IOREQ Server is not handling buffered emulation requests then the
+ * values handed back in <bufioreq_pfn> and <bufioreq_port> will both be 0.
+ */
+struct xen_hvm_get_ioreq_server_info {
+    ioservid_t id;                 /* IN - server id */
+    uint16_t rsvd;                 /* IN - must be zero */
+    evtchn_port_t bufioreq_port;   /* OUT - buffered ioreq port */
+    uint64_aligned_t ioreq_pfn;    /* OUT - sync ioreq pfn */
+    uint64_aligned_t bufioreq_pfn; /* OUT - buffered ioreq pfn */
+};
+
+/*
+ * XEN_HVMCTL_map_io_range_to_ioreq_server: Register an I/O range of domain
+ *                                          <domid> for emulation by the
+ *                                          client of IOREQ Server <id>
+ * XEN_HVMCTL_unmap_io_range_from_ioreq_server: Deregister an I/O range of
+ *                                              <domid> for emulation by the
+ *                                              client of IOREQ Server <id>
+ *
+ * There are three types of I/O that can be emulated: port I/O, memory accesses
+ * and PCI config space accesses. The <type> field denotes which type of range
+ * the <start> and <end> (inclusive) fields are specifying.
+ * PCI config space ranges are specified by segment/bus/device/function values
+ * which should be encoded using the XEN_HVMCTL_PCI_SBDF helper macro below.
+ *
+ * NOTE: unless an emulation request falls entirely within a range mapped
+ * by a secondary emulator, it will not be passed to that emulator.
+ */
+struct xen_hvm_io_range {
+    ioservid_t id;               /* IN - server id */
+    uint16_t type;               /* IN - type of range */
+    uint32_t rsvd;               /* IN - must be zero */
+#define XEN_HVMCTL_IO_RANGE_PORT   0 /* I/O port range */
+#define XEN_HVMCTL_IO_RANGE_MEMORY 1 /* MMIO range */
+#define XEN_HVMCTL_IO_RANGE_PCI    2 /* PCI segment/bus/dev/func range */
+    uint64_aligned_t start, end; /* IN - inclusive start and end of range */
+};
+
+#define XEN_HVMCTL_PCI_SBDF(s, b, d, f) \
+	((((s) & 0xffff) << 16) | \
+	 (((b) & 0xff) << 8) | \
+	 (((d) & 0x1f) << 3) | \
+	 ((f) & 0x07))
+
+/*
+ * XEN_HVMCTL_destroy_ioreq_server: Destroy the IOREQ Server <id> servicing
+ *                                  domain <domid>.
+ *
+ * Any registered I/O ranges will be automatically deregistered.
+ */
+struct xen_hvm_destroy_ioreq_server {
+    ioservid_t id; /* IN - server id */
+};
+
+/*
+ * XEN_HVMCTL_set_ioreq_server_state: Enable or disable the IOREQ Server <id>
+ *                                    servicing domain <domid>.
+ *
+ * The IOREQ Server will not be passed any emulation requests until it is in
+ * the enabled state.
+ * Note that the contents of the ioreq_pfn and bufioreq_fn (see
+ * XEN_HVMCTL_get_ioreq_server_info) are not meaningful until the IOREQ Server
+ * is in the enabled state.
+ */
+struct xen_hvm_set_ioreq_server_state {
+    ioservid_t id;   /* IN - server id */
+    uint8_t enabled; /* IN - enabled? */
+    uint8_t rsvd;    /* IN - must be zero */
+};
+
 struct xen_hvmctl {
     uint16_t interface_version;    /* XEN_HVMCTL_INTERFACE_VERSION */
     domid_t domain;
@@ -142,6 +268,12 @@ struct xen_hvmctl {
 #define XEN_HVMCTL_set_mem_type                  6
 #define XEN_HVMCTL_inject_trap                   7
 #define XEN_HVMCTL_inject_msi                    8
+#define XEN_HVMCTL_create_ioreq_server           9
+#define XEN_HVMCTL_get_ioreq_server_info        10
+#define XEN_HVMCTL_map_io_range_to_ioreq_server 11
+#define XEN_HVMCTL_unmap_io_range_from_ioreq_server 12
+#define XEN_HVMCTL_destroy_ioreq_server         13
+#define XEN_HVMCTL_set_ioreq_server_state       14
     uint16_t opaque;               /* Must be zero on initial invocation. */
     union {
         struct xen_hvm_set_pci_intx_level set_pci_intx_level;
@@ -152,6 +284,12 @@ struct xen_hvmctl {
         struct xen_hvm_set_mem_type set_mem_type;
         struct xen_hvm_inject_trap inject_trap;
         struct xen_hvm_inject_msi inject_msi;
+        struct xen_hvm_create_ioreq_server create_ioreq_server;
+        struct xen_hvm_get_ioreq_server_info get_ioreq_server_info;
+        struct xen_hvm_io_range map_io_range_to_ioreq_server;
+        struct xen_hvm_io_range unmap_io_range_from_ioreq_server;
+        struct xen_hvm_destroy_ioreq_server destroy_ioreq_server;
+        struct xen_hvm_set_ioreq_server_state set_ioreq_server_state;
         uint8_t pad[120];
     } u;
 };
--- a/xen/include/public/hvm/hvm_op.h
+++ b/xen/include/public/hvm/hvm_op.h
@@ -25,7 +25,6 @@
 
 #include "../xen.h"
 #include "../trace.h"
-#include "../event_channel.h"
 
 /* Get/set subcommands: extra argument == pointer to xen_hvm_param struct. */
 #define HVMOP_set_param           0
@@ -137,152 +136,6 @@ struct xen_hvm_get_mem_type {
 typedef struct xen_hvm_get_mem_type xen_hvm_get_mem_type_t;
 DEFINE_XEN_GUEST_HANDLE(xen_hvm_get_mem_type_t);
 
-/* Following tools-only interfaces may change in future. */
-#if defined(__XEN__) || defined(__XEN_TOOLS__)
-
-/*
- * IOREQ Servers
- *
- * The interface between an I/O emulator an Xen is called an IOREQ Server.
- * A domain supports a single 'legacy' IOREQ Server which is instantiated if
- * parameter...
- *
- * HVM_PARAM_IOREQ_PFN is read (to get the gmfn containing the synchronous
- * ioreq structures), or...
- * HVM_PARAM_BUFIOREQ_PFN is read (to get the gmfn containing the buffered
- * ioreq ring), or...
- * HVM_PARAM_BUFIOREQ_EVTCHN is read (to get the event channel that Xen uses
- * to request buffered I/O emulation).
- * 
- * The following hypercalls facilitate the creation of IOREQ Servers for
- * 'secondary' emulators which are invoked to implement port I/O, memory, or
- * PCI config space ranges which they explicitly register.
- */
-
-typedef uint16_t ioservid_t;
-
-/*
- * HVMOP_create_ioreq_server: Instantiate a new IOREQ Server for a secondary
- *                            emulator servicing domain <domid>.
- *
- * The <id> handed back is unique for <domid>. If <handle_bufioreq> is zero
- * the buffered ioreq ring will not be allocated and hence all emulation
- * requestes to this server will be synchronous.
- */
-#define HVMOP_create_ioreq_server 17
-struct xen_hvm_create_ioreq_server {
-    domid_t domid;           /* IN - domain to be serviced */
-#define HVM_IOREQSRV_BUFIOREQ_OFF    0
-#define HVM_IOREQSRV_BUFIOREQ_LEGACY 1
-/*
- * Use this when read_pointer gets updated atomically and
- * the pointer pair gets read atomically:
- */
-#define HVM_IOREQSRV_BUFIOREQ_ATOMIC 2
-    uint8_t handle_bufioreq; /* IN - should server handle buffered ioreqs */
-    ioservid_t id;           /* OUT - server id */
-};
-typedef struct xen_hvm_create_ioreq_server xen_hvm_create_ioreq_server_t;
-DEFINE_XEN_GUEST_HANDLE(xen_hvm_create_ioreq_server_t);
-
-/*
- * HVMOP_get_ioreq_server_info: Get all the information necessary to access
- *                              IOREQ Server <id>. 
- *
- * The emulator needs to map the synchronous ioreq structures and buffered
- * ioreq ring (if it exists) that Xen uses to request emulation. These are
- * hosted in domain <domid>'s gmfns <ioreq_pfn> and <bufioreq_pfn>
- * respectively. In addition, if the IOREQ Server is handling buffered
- * emulation requests, the emulator needs to bind to event channel
- * <bufioreq_port> to listen for them. (The event channels used for
- * synchronous emulation requests are specified in the per-CPU ioreq
- * structures in <ioreq_pfn>).
- * If the IOREQ Server is not handling buffered emulation requests then the
- * values handed back in <bufioreq_pfn> and <bufioreq_port> will both be 0.
- */
-#define HVMOP_get_ioreq_server_info 18
-struct xen_hvm_get_ioreq_server_info {
-    domid_t domid;                 /* IN - domain to be serviced */
-    ioservid_t id;                 /* IN - server id */
-    evtchn_port_t bufioreq_port;   /* OUT - buffered ioreq port */
-    uint64_aligned_t ioreq_pfn;    /* OUT - sync ioreq pfn */
-    uint64_aligned_t bufioreq_pfn; /* OUT - buffered ioreq pfn */
-};
-typedef struct xen_hvm_get_ioreq_server_info xen_hvm_get_ioreq_server_info_t;
-DEFINE_XEN_GUEST_HANDLE(xen_hvm_get_ioreq_server_info_t);
-
-/*
- * HVM_map_io_range_to_ioreq_server: Register an I/O range of domain <domid>
- *                                   for emulation by the client of IOREQ
- *                                   Server <id>
- * HVM_unmap_io_range_from_ioreq_server: Deregister an I/O range of <domid>
- *                                       for emulation by the client of IOREQ
- *                                       Server <id>
- *
- * There are three types of I/O that can be emulated: port I/O, memory accesses
- * and PCI config space accesses. The <type> field denotes which type of range
- * the <start> and <end> (inclusive) fields are specifying.
- * PCI config space ranges are specified by segment/bus/device/function values
- * which should be encoded using the HVMOP_PCI_SBDF helper macro below.
- *
- * NOTE: unless an emulation request falls entirely within a range mapped
- * by a secondary emulator, it will not be passed to that emulator.
- */
-#define HVMOP_map_io_range_to_ioreq_server 19
-#define HVMOP_unmap_io_range_from_ioreq_server 20
-struct xen_hvm_io_range {
-    domid_t domid;               /* IN - domain to be serviced */
-    ioservid_t id;               /* IN - server id */
-    uint32_t type;               /* IN - type of range */
-# define HVMOP_IO_RANGE_PORT   0 /* I/O port range */
-# define HVMOP_IO_RANGE_MEMORY 1 /* MMIO range */
-# define HVMOP_IO_RANGE_PCI    2 /* PCI segment/bus/dev/func range */
-    uint64_aligned_t start, end; /* IN - inclusive start and end of range */
-};
-typedef struct xen_hvm_io_range xen_hvm_io_range_t;
-DEFINE_XEN_GUEST_HANDLE(xen_hvm_io_range_t);
-
-#define HVMOP_PCI_SBDF(s,b,d,f)                 \
-	((((s) & 0xffff) << 16) |                   \
-	 (((b) & 0xff) << 8) |                      \
-	 (((d) & 0x1f) << 3) |                      \
-	 ((f) & 0x07))
-
-/*
- * HVMOP_destroy_ioreq_server: Destroy the IOREQ Server <id> servicing domain
- *                             <domid>.
- *
- * Any registered I/O ranges will be automatically deregistered.
- */
-#define HVMOP_destroy_ioreq_server 21
-struct xen_hvm_destroy_ioreq_server {
-    domid_t domid; /* IN - domain to be serviced */
-    ioservid_t id; /* IN - server id */
-};
-typedef struct xen_hvm_destroy_ioreq_server xen_hvm_destroy_ioreq_server_t;
-DEFINE_XEN_GUEST_HANDLE(xen_hvm_destroy_ioreq_server_t);
-
-/*
- * HVMOP_set_ioreq_server_state: Enable or disable the IOREQ Server <id> servicing
- *                               domain <domid>.
- *
- * The IOREQ Server will not be passed any emulation requests until it is in the
- * enabled state.
- * Note that the contents of the ioreq_pfn and bufioreq_fn (see
- * HVMOP_get_ioreq_server_info) are not meaningful until the IOREQ Server is in
- * the enabled state.
- */
-#define HVMOP_set_ioreq_server_state 22
-struct xen_hvm_set_ioreq_server_state {
-    domid_t domid;   /* IN - domain to be serviced */
-    ioservid_t id;   /* IN - server id */
-    uint8_t enabled; /* IN - enabled? */    
-};
-typedef struct xen_hvm_set_ioreq_server_state xen_hvm_set_ioreq_server_state_t;
-DEFINE_XEN_GUEST_HANDLE(xen_hvm_set_ioreq_server_state_t);
-
-#endif /* defined(__XEN__) || defined(__XEN_TOOLS__) */
-
 #if defined(__i386__) || defined(__x86_64__)
 
 /*
--- a/xen/include/xsm/dummy.h
+++ b/xen/include/xsm/dummy.h
@@ -609,12 +609,6 @@ static XSM_INLINE int xsm_shadow_control
     return xsm_default_action(action, current->domain, d);
 }
 
-static XSM_INLINE int xsm_hvm_ioreq_server(XSM_DEFAULT_ARG struct domain *d, int op)
-{
-    XSM_ASSERT_ACTION(XSM_DM_PRIV);
-    return xsm_default_action(action, current->domain, d);
-}
-
 static XSM_INLINE int xsm_mem_sharing_op(XSM_DEFAULT_ARG struct domain *d, struct domain *cd, int op)
 {
     XSM_ASSERT_ACTION(XSM_DM_PRIV);
--- a/xen/include/xsm/xsm.h
+++ b/xen/include/xsm/xsm.h
@@ -174,7 +174,6 @@ struct xsm_operations {
     int (*do_mca) (void);
     int (*shadow_control) (struct domain *d, uint32_t op);
     int (*hvm_set_pci_link_route) (struct domain *d);
-    int (*hvm_ioreq_server) (struct domain *d, int op);
     int (*mem_sharing_op) (struct domain *d, struct domain *cd, int op);
     int (*apic) (struct domain *d, int cmd);
     int (*memtype) (uint32_t access);
@@ -648,11 +647,6 @@ static inline int xsm_hvm_set_pci_link_r
     return xsm_ops->hvm_set_pci_link_route(d);
 }
 
-static inline int xsm_hvm_ioreq_server (xsm_default_t def, struct domain *d, int op)
-{
-    return xsm_ops->hvm_ioreq_server(d, op);
-}
-
 static inline int xsm_mem_sharing_op (xsm_default_t def, struct domain *d, struct domain *cd, int op)
 {
     return xsm_ops->mem_sharing_op(d, cd, op);
--- a/xen/xsm/dummy.c
+++ b/xen/xsm/dummy.c
@@ -145,7 +145,6 @@ void xsm_fixup_ops (struct xsm_operation
 #ifdef CONFIG_X86
     set_to_dummy_if_null(ops, do_mca);
     set_to_dummy_if_null(ops, shadow_control);
-    set_to_dummy_if_null(ops, hvm_ioreq_server);
     set_to_dummy_if_null(ops, mem_sharing_op);
     set_to_dummy_if_null(ops, apic);
     set_to_dummy_if_null(ops, machine_memory_map);
--- a/xen/xsm/flask/hooks.c
+++ b/xen/xsm/flask/hooks.c
@@ -1526,11 +1526,6 @@ static int flask_ioport_mapping(struct d
     return flask_ioport_permission(d, start, end, access);
 }
 
-static int flask_hvm_ioreq_server(struct domain *d, int op)
-{
-    return current_has_perm(d, SECCLASS_HVM, HVM__HVMCTL);
-}
-
 static int flask_mem_sharing_op(struct domain *d, struct domain *cd, int op)
 {
     int rc = current_has_perm(cd, SECCLASS_HVM, HVM__MEM_SHARING);
@@ -1799,7 +1794,6 @@ static struct xsm_operations flask_ops =
 #ifdef CONFIG_X86
     .do_mca = flask_do_mca,
     .shadow_control = flask_shadow_control,
-    .hvm_ioreq_server = flask_hvm_ioreq_server,
     .mem_sharing_op = flask_mem_sharing_op,
     .apic = flask_apic,
     .machine_memory_map = flask_machine_memory_map,

[-- Attachment #3: Type: text/plain, Size: 126 bytes --]

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
http://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 31+ messages in thread

* [PATCH 11/11] x86/HVM: serialize trap injecting producer and consumer
  2016-06-20 12:39 [PATCH 00/11] hvmctl hypercall Jan Beulich
                   ` (9 preceding siblings ...)
  2016-06-20 12:57 ` [PATCH 10/11] hvmctl: convert HVMOP_*ioreq_server* Jan Beulich
@ 2016-06-20 12:58 ` Jan Beulich
  2016-06-23 15:14   ` Andrew Cooper
  2016-06-23 15:15 ` [PATCH 00/11] hvmctl hypercall Andrew Cooper
  11 siblings, 1 reply; 31+ messages in thread
From: Jan Beulich @ 2016-06-20 12:58 UTC (permalink / raw)
  To: xen-devel; +Cc: Andrew Cooper

[-- Attachment #1: Type: text/plain, Size: 2390 bytes --]

Since injection works on a remote vCPU, and since there's no
enforcement of the subject vCPU being paused, there's a potential race
between the prodcing and consuming sides. Fix this by leveraging the
vector field as synchronization variable.

Signed-off-by: Jan Beulich <jbeulich@suse.com>

--- a/xen/arch/x86/hvm/control.c
+++ b/xen/arch/x86/hvm/control.c
@@ -106,14 +106,16 @@ static int inject_trap(struct domain *d,
     if ( op->vcpuid >= d->max_vcpus || (v = d->vcpu[op->vcpuid]) == NULL )
         return -ENOENT;
 
-    if ( v->arch.hvm_vcpu.inject_trap.vector != -1 )
+    if ( cmpxchg(&v->arch.hvm_vcpu.inject_trap.vector, HVM_TRAP_VECTOR_UNSET,
+                 HVM_TRAP_VECTOR_UPDATING) != HVM_TRAP_VECTOR_UNSET )
         return -EBUSY;
 
-    v->arch.hvm_vcpu.inject_trap.vector     = op->vector;
     v->arch.hvm_vcpu.inject_trap.type       = op->type;
     v->arch.hvm_vcpu.inject_trap.error_code = op->error_code;
     v->arch.hvm_vcpu.inject_trap.insn_len   = op->insn_len;
     v->arch.hvm_vcpu.inject_trap.cr2        = op->cr2;
+    smp_wmb();
+    v->arch.hvm_vcpu.inject_trap.vector     = op->vector;
 
     return 0;
 }
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -510,10 +510,11 @@ void hvm_do_resume(struct vcpu *v)
     }
 
     /* Inject pending hw/sw trap */
-    if ( v->arch.hvm_vcpu.inject_trap.vector != -1 )
+    if ( v->arch.hvm_vcpu.inject_trap.vector >= 0 )
     {
+        smp_rmb();
         hvm_inject_trap(&v->arch.hvm_vcpu.inject_trap);
-        v->arch.hvm_vcpu.inject_trap.vector = -1;
+        v->arch.hvm_vcpu.inject_trap.vector = HVM_TRAP_VECTOR_UNSET;
     }
 }
 
@@ -1508,7 +1509,7 @@ int hvm_vcpu_initialise(struct vcpu *v)
         (void(*)(unsigned long))hvm_assert_evtchn_irq,
         (unsigned long)v);
 
-    v->arch.hvm_vcpu.inject_trap.vector = -1;
+    v->arch.hvm_vcpu.inject_trap.vector = HVM_TRAP_VECTOR_UNSET;
 
     if ( is_pvh_domain(d) )
     {
--- a/xen/include/asm-x86/hvm/hvm.h
+++ b/xen/include/asm-x86/hvm/hvm.h
@@ -78,6 +78,8 @@ enum hvm_intblk {
 #define HVM_HAP_SUPERPAGE_1GB   0x00000002
 
 struct hvm_trap {
+#define HVM_TRAP_VECTOR_UNSET    (-1)
+#define HVM_TRAP_VECTOR_UPDATING (-2)
     int16_t       vector;
     uint8_t       type;         /* X86_EVENTTYPE_* */
     uint8_t       insn_len;     /* Instruction length */




[-- Attachment #2: x86-HVM-inject-trap-serialize.patch --]
[-- Type: text/plain, Size: 2443 bytes --]

x86/HVM: serialize trap injecting producer and consumer

Since injection works on a remote vCPU, and since there's no
enforcement of the subject vCPU being paused, there's a potential race
between the prodcing and consuming sides. Fix this by leveraging the
vector field as synchronization variable.

Signed-off-by: Jan Beulich <jbeulich@suse.com>

--- a/xen/arch/x86/hvm/control.c
+++ b/xen/arch/x86/hvm/control.c
@@ -106,14 +106,16 @@ static int inject_trap(struct domain *d,
     if ( op->vcpuid >= d->max_vcpus || (v = d->vcpu[op->vcpuid]) == NULL )
         return -ENOENT;
 
-    if ( v->arch.hvm_vcpu.inject_trap.vector != -1 )
+    if ( cmpxchg(&v->arch.hvm_vcpu.inject_trap.vector, HVM_TRAP_VECTOR_UNSET,
+                 HVM_TRAP_VECTOR_UPDATING) != HVM_TRAP_VECTOR_UNSET )
         return -EBUSY;
 
-    v->arch.hvm_vcpu.inject_trap.vector     = op->vector;
     v->arch.hvm_vcpu.inject_trap.type       = op->type;
     v->arch.hvm_vcpu.inject_trap.error_code = op->error_code;
     v->arch.hvm_vcpu.inject_trap.insn_len   = op->insn_len;
     v->arch.hvm_vcpu.inject_trap.cr2        = op->cr2;
+    smp_wmb();
+    v->arch.hvm_vcpu.inject_trap.vector     = op->vector;
 
     return 0;
 }
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -510,10 +510,11 @@ void hvm_do_resume(struct vcpu *v)
     }
 
     /* Inject pending hw/sw trap */
-    if ( v->arch.hvm_vcpu.inject_trap.vector != -1 )
+    if ( v->arch.hvm_vcpu.inject_trap.vector >= 0 )
     {
+        smp_rmb();
         hvm_inject_trap(&v->arch.hvm_vcpu.inject_trap);
-        v->arch.hvm_vcpu.inject_trap.vector = -1;
+        v->arch.hvm_vcpu.inject_trap.vector = HVM_TRAP_VECTOR_UNSET;
     }
 }
 
@@ -1508,7 +1509,7 @@ int hvm_vcpu_initialise(struct vcpu *v)
         (void(*)(unsigned long))hvm_assert_evtchn_irq,
         (unsigned long)v);
 
-    v->arch.hvm_vcpu.inject_trap.vector = -1;
+    v->arch.hvm_vcpu.inject_trap.vector = HVM_TRAP_VECTOR_UNSET;
 
     if ( is_pvh_domain(d) )
     {
--- a/xen/include/asm-x86/hvm/hvm.h
+++ b/xen/include/asm-x86/hvm/hvm.h
@@ -78,6 +78,8 @@ enum hvm_intblk {
 #define HVM_HAP_SUPERPAGE_1GB   0x00000002
 
 struct hvm_trap {
+#define HVM_TRAP_VECTOR_UNSET    (-1)
+#define HVM_TRAP_VECTOR_UPDATING (-2)
     int16_t       vector;
     uint8_t       type;         /* X86_EVENTTYPE_* */
     uint8_t       insn_len;     /* Instruction length */

[-- Attachment #3: Type: text/plain, Size: 126 bytes --]

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
http://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 31+ messages in thread

* Re: [PATCH 02/11] hvmctl: convert HVMOP_set_pci_intx_level
  2016-06-20 12:53 ` [PATCH 02/11] hvmctl: convert HVMOP_set_pci_intx_level Jan Beulich
@ 2016-06-20 14:32   ` Daniel De Graaf
  2016-06-20 14:48     ` Ian Jackson
  2016-06-21 10:14   ` Wei Liu
  1 sibling, 1 reply; 31+ messages in thread
From: Daniel De Graaf @ 2016-06-20 14:32 UTC (permalink / raw)
  To: Jan Beulich, xen-devel
  Cc: Stefano Stabellini, Wei Liu, George Dunlap, Andrew Cooper,
	Ian Jackson, Tim Deegan, Paul Durrant

On 06/20/2016 08:53 AM, Jan Beulich wrote:
> Note that this adds validation of the "domain" interface structure
> field, which previously got ignored.
>
> Note further that this retains the hvmop interface definitions as those
> had (wrongly) been exposed to non-tool stack consumers (albeit the
> operation wouldn't have succeeded when requested by a domain for
> itself).
>
> Signed-off-by: Jan Beulich <jbeulich@suse.com>
> ---
> TBD: xen/xsm/flask/policy/access_vectors says "also needs hvmctl", but
>      I don't see how this has been done so far. With the change here,
>      doing two checks in flask_hvm_control() (the generic one always
>      and a specific one if needed) would of course be simple, but it's
>      unclear how subsequently added sub-ops should then be dealt with
>      (which don't have a similar remark).

I am not sure why that remark is there: it seems like it refers to an
overall check in the HVM operation hypercall, which does not exist.

There is no reason to have an operation protected by two different
access checks, so I think that both the previous and patched code
are correct and the "also needs hvmctl" comment should be removed.
With that, Acked-by: Daniel De Graaf <dgdegra@tycho.nsa.gov>

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
http://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 31+ messages in thread

* Re: [PATCH 02/11] hvmctl: convert HVMOP_set_pci_intx_level
  2016-06-20 14:32   ` Daniel De Graaf
@ 2016-06-20 14:48     ` Ian Jackson
  2016-06-20 15:25       ` Jan Beulich
  0 siblings, 1 reply; 31+ messages in thread
From: Ian Jackson @ 2016-06-20 14:48 UTC (permalink / raw)
  To: Daniel De Graaf
  Cc: Stefano Stabellini, Wei Liu, George Dunlap, Andrew Cooper,
	Tim Deegan, Paul Durrant, Jan Beulich, xen-devel

Daniel De Graaf writes ("Re: [PATCH 02/11] hvmctl: convert HVMOP_set_pci_intx_level"):
> On 06/20/2016 08:53 AM, Jan Beulich wrote:
> > Note that this adds validation of the "domain" interface structure
> > field, which previously got ignored.
> >
> > Note further that this retains the hvmop interface definitions as those
> > had (wrongly) been exposed to non-tool stack consumers (albeit the
> > operation wouldn't have succeeded when requested by a domain for
> > itself).
> >
> > Signed-off-by: Jan Beulich <jbeulich@suse.com>
> > ---
> > TBD: xen/xsm/flask/policy/access_vectors says "also needs hvmctl", but
> >      I don't see how this has been done so far. With the change here,
> >      doing two checks in flask_hvm_control() (the generic one always
> >      and a specific one if needed) would of course be simple, but it's
> >      unclear how subsequently added sub-ops should then be dealt with
> >      (which don't have a similar remark).
> 
> I am not sure why that remark is there: it seems like it refers to an
> overall check in the HVM operation hypercall, which does not exist.
> 
> There is no reason to have an operation protected by two different
> access checks, so I think that both the previous and patched code
> are correct and the "also needs hvmctl" comment should be removed.
> With that, Acked-by: Daniel De Graaf <dgdegra@tycho.nsa.gov>

This is a slight digression, but is it intended that all of these
hvmctl's are safe to expose to a deprivileged device model process in
dom0, or to a device model stub domain ?

Ian.

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
http://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 31+ messages in thread

* Re: [PATCH 02/11] hvmctl: convert HVMOP_set_pci_intx_level
  2016-06-20 14:48     ` Ian Jackson
@ 2016-06-20 15:25       ` Jan Beulich
  0 siblings, 0 replies; 31+ messages in thread
From: Jan Beulich @ 2016-06-20 15:25 UTC (permalink / raw)
  To: Ian Jackson
  Cc: Stefano Stabellini, Wei Liu, George Dunlap, Andrew Cooper,
	Tim Deegan, Paul Durrant, xen-devel, Daniel De Graaf

>>> On 20.06.16 at 16:48, <ian.jackson@eu.citrix.com> wrote:
> Daniel De Graaf writes ("Re: [PATCH 02/11] hvmctl: convert 
> HVMOP_set_pci_intx_level"):
>> On 06/20/2016 08:53 AM, Jan Beulich wrote:
>> > Note that this adds validation of the "domain" interface structure
>> > field, which previously got ignored.
>> >
>> > Note further that this retains the hvmop interface definitions as those
>> > had (wrongly) been exposed to non-tool stack consumers (albeit the
>> > operation wouldn't have succeeded when requested by a domain for
>> > itself).
>> >
>> > Signed-off-by: Jan Beulich <jbeulich@suse.com>
>> > ---
>> > TBD: xen/xsm/flask/policy/access_vectors says "also needs hvmctl", but
>> >      I don't see how this has been done so far. With the change here,
>> >      doing two checks in flask_hvm_control() (the generic one always
>> >      and a specific one if needed) would of course be simple, but it's
>> >      unclear how subsequently added sub-ops should then be dealt with
>> >      (which don't have a similar remark).
>> 
>> I am not sure why that remark is there: it seems like it refers to an
>> overall check in the HVM operation hypercall, which does not exist.
>> 
>> There is no reason to have an operation protected by two different
>> access checks, so I think that both the previous and patched code
>> are correct and the "also needs hvmctl" comment should be removed.
>> With that, Acked-by: Daniel De Graaf <dgdegra@tycho.nsa.gov>
> 
> This is a slight digression, but is it intended that all of these
> hvmctl's are safe to expose to a deprivileged device model process in
> dom0, or to a device model stub domain ?

Yes, afaict (they've been exposed the same way before).

Jan


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
http://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 31+ messages in thread

* Re: [PATCH 01/11] public / x86: introduce hvmctl hypercall
  2016-06-20 12:52 ` [PATCH 01/11] public / x86: introduce " Jan Beulich
@ 2016-06-21 10:14   ` Wei Liu
  2016-06-23 14:55   ` Andrew Cooper
  1 sibling, 0 replies; 31+ messages in thread
From: Wei Liu @ 2016-06-21 10:14 UTC (permalink / raw)
  To: Jan Beulich
  Cc: Stefano Stabellini, Wei Liu, George Dunlap, Andrew Cooper,
	Ian Jackson, Tim Deegan, Paul Durrant, xen-devel, dgdegra

On Mon, Jun 20, 2016 at 06:52:41AM -0600, Jan Beulich wrote:
> ... as a means to replace all HVMOP_* which a domain can't issue on
> itself (i.e. intended for use by only the control domain or device
> model).
> 
> Signed-off-by: Jan Beulich <jbeulich@suse.com>

Reviewed-by: Wei Liu <wei.liu2@citrix.com>

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
http://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 31+ messages in thread

* Re: [PATCH 02/11] hvmctl: convert HVMOP_set_pci_intx_level
  2016-06-20 12:53 ` [PATCH 02/11] hvmctl: convert HVMOP_set_pci_intx_level Jan Beulich
  2016-06-20 14:32   ` Daniel De Graaf
@ 2016-06-21 10:14   ` Wei Liu
  1 sibling, 0 replies; 31+ messages in thread
From: Wei Liu @ 2016-06-21 10:14 UTC (permalink / raw)
  To: Jan Beulich
  Cc: Stefano Stabellini, Wei Liu, George Dunlap, Andrew Cooper,
	Ian Jackson, Tim Deegan, Paul Durrant, xen-devel, dgdegra

On Mon, Jun 20, 2016 at 06:53:23AM -0600, Jan Beulich wrote:
> Note that this adds validation of the "domain" interface structure
> field, which previously got ignored.
> 
> Note further that this retains the hvmop interface definitions as those
> had (wrongly) been exposed to non-tool stack consumers (albeit the
> operation wouldn't have succeeded when requested by a domain for
> itself).
> 
> Signed-off-by: Jan Beulich <jbeulich@suse.com>
> ---
> TBD: xen/xsm/flask/policy/access_vectors says "also needs hvmctl", but
>      I don't see how this has been done so far. With the change here,
>      doing two checks in flask_hvm_control() (the generic one always
>      and a specific one if needed) would of course be simple, but it's
>      unclear how subsequently added sub-ops should then be dealt with
>      (which don't have a similar remark).
> 
> --- a/tools/libxc/xc_misc.c
> +++ b/tools/libxc/xc_misc.c
> @@ -473,30 +473,14 @@ int xc_hvm_set_pci_intx_level(
>      uint8_t domain, uint8_t bus, uint8_t device, uint8_t intx,
>      unsigned int level)
>  {
> -    DECLARE_HYPERCALL_BUFFER(struct xen_hvm_set_pci_intx_level, arg);
> -    int rc;
> +    DECLARE_HVMCTL(set_pci_intx_level, dom,
> +                   .domain = domain,
> +                   .bus    = bus,
> +                   .device = device,
> +                   .intx   = intx,
> +                   .level =  level);

Minor nit: the "=" is not aligned.

For tool and hypervisor code changes, sans the XSM changes:

Reviewed-by: Wei Liu <wei.liu2@citrix.com>

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
http://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 31+ messages in thread

* Re: [PATCH 03/11] hvmctl: convert HVMOP_set_isa_irq_level
  2016-06-20 12:53 ` [PATCH 03/11] hvmctl: convert HVMOP_set_isa_irq_level Jan Beulich
@ 2016-06-21 10:14   ` Wei Liu
  0 siblings, 0 replies; 31+ messages in thread
From: Wei Liu @ 2016-06-21 10:14 UTC (permalink / raw)
  To: Jan Beulich
  Cc: Stefano Stabellini, Wei Liu, George Dunlap, Andrew Cooper,
	Ian Jackson, Tim Deegan, Paul Durrant, xen-devel, dgdegra

On Mon, Jun 20, 2016 at 06:53:53AM -0600, Jan Beulich wrote:
> Note that this retains the hvmop interface definitions as those had
> (wrongly) been exposed to non-tool stack consumers (albeit the
> operation wouldn't have succeeded when requested by a domain for
> itself).
> 
> Signed-off-by: Jan Beulich <jbeulich@suse.com>
> 

Reviewed-by: Wei Liu <wei.liu2@citrix.com>

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
http://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 31+ messages in thread

* Re: [PATCH 04/11] hvmctl: convert HVMOP_set_pci_link_route
  2016-06-20 12:54 ` [PATCH 04/11] hvmctl: convert HVMOP_set_pci_link_route Jan Beulich
@ 2016-06-21 10:14   ` Wei Liu
  0 siblings, 0 replies; 31+ messages in thread
From: Wei Liu @ 2016-06-21 10:14 UTC (permalink / raw)
  To: Jan Beulich
  Cc: Stefano Stabellini, Wei Liu, George Dunlap, Andrew Cooper,
	Ian Jackson, Tim Deegan, Paul Durrant, xen-devel, dgdegra

On Mon, Jun 20, 2016 at 06:54:24AM -0600, Jan Beulich wrote:
> Note that this retains the hvmop interface definitions as those had
> (wrongly) been exposed to non-tool stack consumers (albeit the
> operation wouldn't have succeeded when requested by a domain for
> itself).
> 
> Signed-off-by: Jan Beulich <jbeulich@suse.com>
> 

Reviewed-by: Wei Liu <wei.liu2@citrix.com>

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
http://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 31+ messages in thread

* Re: [PATCH 05/11] hvmctl: convert HVMOP_track_dirty_vram
  2016-06-20 12:54 ` [PATCH 05/11] hvmctl: convert HVMOP_track_dirty_vram Jan Beulich
@ 2016-06-21 10:14   ` Wei Liu
  0 siblings, 0 replies; 31+ messages in thread
From: Wei Liu @ 2016-06-21 10:14 UTC (permalink / raw)
  To: Jan Beulich
  Cc: Stefano Stabellini, Wei Liu, George Dunlap, Andrew Cooper,
	Ian Jackson, Tim Deegan, Paul Durrant, xen-devel, dgdegra

On Mon, Jun 20, 2016 at 06:54:57AM -0600, Jan Beulich wrote:
> Also limiting "nr" at the libxc level to 32 bits (the high 32 bits of
> the previous 64-bit parameter got ignore so far).
> 
> Signed-off-by: Jan Beulich <jbeulich@suse.com>
> 

Reviewed-by: Wei Liu <wei.liu2@citrix.com>

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
http://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 31+ messages in thread

* Re: [PATCH 06/11] hvmctl: convert HVMOP_modified_memory
  2016-06-20 12:55 ` [PATCH 06/11] hvmctl: convert HVMOP_modified_memory Jan Beulich
@ 2016-06-21 10:14   ` Wei Liu
  0 siblings, 0 replies; 31+ messages in thread
From: Wei Liu @ 2016-06-21 10:14 UTC (permalink / raw)
  To: Jan Beulich
  Cc: Stefano Stabellini, Wei Liu, George Dunlap, Andrew Cooper,
	Ian Jackson, Tim Deegan, Paul Durrant, xen-devel, dgdegra

On Mon, Jun 20, 2016 at 06:55:43AM -0600, Jan Beulich wrote:
> Also limiting "nr" at the libxc level to 32 bits (the high 32 bits of
> the previous 64-bit parameter got ignore so far).
> 
> Signed-off-by: Jan Beulich <jbeulich@suse.com>
> 

Reviewed-by: Wei Liu <wei.liu2@citrix.com>

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
http://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 31+ messages in thread

* Re: [PATCH 07/11] hvmctl: convert HVMOP_set_mem_type
  2016-06-20 12:56 ` [PATCH 07/11] hvmctl: convert HVMOP_set_mem_type Jan Beulich
@ 2016-06-21 10:14   ` Wei Liu
  0 siblings, 0 replies; 31+ messages in thread
From: Wei Liu @ 2016-06-21 10:14 UTC (permalink / raw)
  To: Jan Beulich
  Cc: Stefano Stabellini, Wei Liu, George Dunlap, Andrew Cooper,
	Ian Jackson, Tim Deegan, Paul Durrant, xen-devel, dgdegra

On Mon, Jun 20, 2016 at 06:56:14AM -0600, Jan Beulich wrote:
> This allows elimination of the (ab)use of the high operation number
> bits for encoding continuations.
> 
> Also limiting "nr" at the libxc level to 32 bits (the high 32 bits of
> the previous 64-bit parameter got ignore so far).
> 
> Signed-off-by: Jan Beulich <jbeulich@suse.com>

Reviewed-by: Wei Liu <wei.liu2@citrix.com>

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
http://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 31+ messages in thread

* Re: [PATCH 08/11] hvmctl: convert HVMOP_inject_trap
  2016-06-20 12:56 ` [PATCH 08/11] hvmctl: convert HVMOP_inject_trap Jan Beulich
@ 2016-06-21 10:14   ` Wei Liu
  0 siblings, 0 replies; 31+ messages in thread
From: Wei Liu @ 2016-06-21 10:14 UTC (permalink / raw)
  To: Jan Beulich
  Cc: Stefano Stabellini, Wei Liu, George Dunlap, Andrew Cooper,
	Ian Jackson, Tim Deegan, Paul Durrant, xen-devel, dgdegra

On Mon, Jun 20, 2016 at 06:56:41AM -0600, Jan Beulich wrote:
> Signed-off-by: Jan Beulich <jbeulich@suse.com>

Reviewed-by: Wei Liu <wei.liu2@citrix.com>

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
http://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 31+ messages in thread

* Re: [PATCH 09/11] hvmctl: convert HVMOP_inject_msi
  2016-06-20 12:57 ` [PATCH 09/11] hvmctl: convert HVMOP_inject_msi Jan Beulich
@ 2016-06-21 10:14   ` Wei Liu
  0 siblings, 0 replies; 31+ messages in thread
From: Wei Liu @ 2016-06-21 10:14 UTC (permalink / raw)
  To: Jan Beulich
  Cc: Stefano Stabellini, Wei Liu, George Dunlap, Andrew Cooper,
	Ian Jackson, Tim Deegan, Paul Durrant, xen-devel, dgdegra

On Mon, Jun 20, 2016 at 06:57:11AM -0600, Jan Beulich wrote:
> Signed-off-by: Jan Beulich <jbeulich@suse.com>
> 

Reviewed-by: Wei Liu <wei.liu2@citrix.com>

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
http://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 31+ messages in thread

* Re: [PATCH 10/11] hvmctl: convert HVMOP_*ioreq_server*
  2016-06-20 12:57 ` [PATCH 10/11] hvmctl: convert HVMOP_*ioreq_server* Jan Beulich
@ 2016-06-21 10:14   ` Wei Liu
  2016-06-21 12:44   ` Paul Durrant
  1 sibling, 0 replies; 31+ messages in thread
From: Wei Liu @ 2016-06-21 10:14 UTC (permalink / raw)
  To: Jan Beulich
  Cc: Stefano Stabellini, Wei Liu, George Dunlap, Andrew Cooper,
	Ian Jackson, Tim Deegan, Paul Durrant, xen-devel, dgdegra

On Mon, Jun 20, 2016 at 06:57:47AM -0600, Jan Beulich wrote:
> Note that we can't adjust HVM_IOREQSRV_BUFIOREQ_* to properly obey
> name space rules, as these constants as in use by callers of the libxc
> interface.
> 
> Signed-off-by: Jan Beulich <jbeulich@suse.com>
> 

Reviewed-by: Wei Liu <wei.liu2@citrix.com>

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
http://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 31+ messages in thread

* Re: [PATCH 10/11] hvmctl: convert HVMOP_*ioreq_server*
  2016-06-20 12:57 ` [PATCH 10/11] hvmctl: convert HVMOP_*ioreq_server* Jan Beulich
  2016-06-21 10:14   ` Wei Liu
@ 2016-06-21 12:44   ` Paul Durrant
  1 sibling, 0 replies; 31+ messages in thread
From: Paul Durrant @ 2016-06-21 12:44 UTC (permalink / raw)
  To: Jan Beulich, xen-devel
  Cc: Stefano Stabellini, Wei Liu, Andrew Cooper, Tim (Xen.org),
	George Dunlap, Ian Jackson, dgdegra

> -----Original Message-----
> From: Jan Beulich [mailto:JBeulich@suse.com]
> Sent: 20 June 2016 13:58
> To: xen-devel
> Cc: Andrew Cooper; Paul Durrant; Wei Liu; George Dunlap; Ian Jackson;
> Stefano Stabellini; dgdegra@tycho.nsa.gov; Tim (Xen.org)
> Subject: [PATCH 10/11] hvmctl: convert HVMOP_*ioreq_server*
> 
> Note that we can't adjust HVM_IOREQSRV_BUFIOREQ_* to properly obey
> name space rules, as these constants as in use by callers of the libxc
> interface.
> 
> Signed-off-by: Jan Beulich <jbeulich@suse.com>

Reviewed-by: Paul Durrant <paul.durrant@citrix.com>

> 
> --- a/tools/libxc/include/xenctrl.h
> +++ b/tools/libxc/include/xenctrl.h
> @@ -41,6 +41,7 @@
>  #include <xen/sched.h>
>  #include <xen/memory.h>
>  #include <xen/grant_table.h>
> +#include <xen/hvm/control.h>
>  #include <xen/hvm/params.h>
>  #include <xen/xsm/flask_op.h>
>  #include <xen/tmem.h>
> --- a/tools/libxc/xc_domain.c
> +++ b/tools/libxc/xc_domain.c
> @@ -1416,23 +1416,14 @@ int xc_hvm_create_ioreq_server(xc_interf
>                                 int handle_bufioreq,
>                                 ioservid_t *id)
>  {
> -    DECLARE_HYPERCALL_BUFFER(xen_hvm_create_ioreq_server_t, arg);
> +    DECLARE_HVMCTL(create_ioreq_server, domid,
> +                   .handle_bufioreq = handle_bufioreq);
>      int rc;
> 
> -    arg = xc_hypercall_buffer_alloc(xch, arg, sizeof(*arg));
> -    if ( arg == NULL )
> -        return -1;
> -
> -    arg->domid = domid;
> -    arg->handle_bufioreq = handle_bufioreq;
> -
> -    rc = xencall2(xch->xcall, __HYPERVISOR_hvm_op,
> -                  HVMOP_create_ioreq_server,
> -                  HYPERCALL_BUFFER_AS_ARG(arg));
> +    rc = do_hvmctl(xch, &hvmctl);
> 
> -    *id = arg->id;
> +    *id = hvmctl.u.create_ioreq_server.id;
> 
> -    xc_hypercall_buffer_free(xch, arg);
>      return rc;
>  }
> 
> @@ -1443,84 +1434,52 @@ int xc_hvm_get_ioreq_server_info(xc_inte
>                                   xen_pfn_t *bufioreq_pfn,
>                                   evtchn_port_t *bufioreq_port)
>  {
> -    DECLARE_HYPERCALL_BUFFER(xen_hvm_get_ioreq_server_info_t, arg);
> +    DECLARE_HVMCTL(get_ioreq_server_info, domid,
> +                   .id = id);
>      int rc;
> 
> -    arg = xc_hypercall_buffer_alloc(xch, arg, sizeof(*arg));
> -    if ( arg == NULL )
> -        return -1;
> -
> -    arg->domid = domid;
> -    arg->id = id;
> -
> -    rc = xencall2(xch->xcall, __HYPERVISOR_hvm_op,
> -                  HVMOP_get_ioreq_server_info,
> -                  HYPERCALL_BUFFER_AS_ARG(arg));
> +    rc = do_hvmctl(xch, &hvmctl);
>      if ( rc != 0 )
> -        goto done;
> +        return rc;
> 
>      if ( ioreq_pfn )
> -        *ioreq_pfn = arg->ioreq_pfn;
> +        *ioreq_pfn = hvmctl.u.get_ioreq_server_info.ioreq_pfn;
> 
>      if ( bufioreq_pfn )
> -        *bufioreq_pfn = arg->bufioreq_pfn;
> +        *bufioreq_pfn = hvmctl.u.get_ioreq_server_info.bufioreq_pfn;
> 
>      if ( bufioreq_port )
> -        *bufioreq_port = arg->bufioreq_port;
> +        *bufioreq_port = hvmctl.u.get_ioreq_server_info.bufioreq_port;
> 
> -done:
> -    xc_hypercall_buffer_free(xch, arg);
> -    return rc;
> +    return 0;
>  }
> 
>  int xc_hvm_map_io_range_to_ioreq_server(xc_interface *xch, domid_t
> domid,
>                                          ioservid_t id, int is_mmio,
>                                          uint64_t start, uint64_t end)
>  {
> -    DECLARE_HYPERCALL_BUFFER(xen_hvm_io_range_t, arg);
> -    int rc;
> -
> -    arg = xc_hypercall_buffer_alloc(xch, arg, sizeof(*arg));
> -    if ( arg == NULL )
> -        return -1;
> -
> -    arg->domid = domid;
> -    arg->id = id;
> -    arg->type = is_mmio ? HVMOP_IO_RANGE_MEMORY :
> HVMOP_IO_RANGE_PORT;
> -    arg->start = start;
> -    arg->end = end;
> -
> -    rc = xencall2(xch->xcall, __HYPERVISOR_hvm_op,
> -                  HVMOP_map_io_range_to_ioreq_server,
> -                  HYPERCALL_BUFFER_AS_ARG(arg));
> +    DECLARE_HVMCTL(map_io_range_to_ioreq_server, domid,
> +                   .id = id,
> +                   .type = is_mmio ? XEN_HVMCTL_IO_RANGE_MEMORY
> +                                   : XEN_HVMCTL_IO_RANGE_PORT,
> +                   .start = start,
> +                   .end = end);
> 
> -    xc_hypercall_buffer_free(xch, arg);
> -    return rc;
> +    return do_hvmctl(xch, &hvmctl);
>  }
> 
>  int xc_hvm_unmap_io_range_from_ioreq_server(xc_interface *xch,
> domid_t domid,
>                                              ioservid_t id, int is_mmio,
>                                              uint64_t start, uint64_t end)
>  {
> -    DECLARE_HYPERCALL_BUFFER(xen_hvm_io_range_t, arg);
> -    int rc;
> -
> -    arg = xc_hypercall_buffer_alloc(xch, arg, sizeof(*arg));
> -    if ( arg == NULL )
> -        return -1;
> +    DECLARE_HVMCTL(unmap_io_range_from_ioreq_server, domid,
> +                   .id = id,
> +                   .type = is_mmio ? XEN_HVMCTL_IO_RANGE_MEMORY
> +                                   : XEN_HVMCTL_IO_RANGE_PORT,
> +                   .start = start,
> +                   .end = end);
> 
> -    arg->domid = domid;
> -    arg->id = id;
> -    arg->type = is_mmio ? HVMOP_IO_RANGE_MEMORY :
> HVMOP_IO_RANGE_PORT;
> -    arg->start = start;
> -    arg->end = end;
> -
> -    rc = xencall2(xch->xcall, __HYPERVISOR_hvm_op,
> -                  HVMOP_unmap_io_range_from_ioreq_server,
> -                  HYPERCALL_BUFFER_AS_ARG(arg));
> -
> -    xc_hypercall_buffer_free(xch, arg);
> -    return rc;
> +    return do_hvmctl(xch, &hvmctl);
>  }
> 
>  int xc_hvm_map_pcidev_to_ioreq_server(xc_interface *xch, domid_t
> domid,
> @@ -1528,37 +1487,23 @@ int xc_hvm_map_pcidev_to_ioreq_server(xc
>                                        uint8_t bus, uint8_t device,
>                                        uint8_t function)
>  {
> -    DECLARE_HYPERCALL_BUFFER(xen_hvm_io_range_t, arg);
> -    int rc;
> +    /*
> +     * The underlying hypercall will deal with ranges of PCI SBDF
> +     * but, for simplicity, the API only uses singletons.
> +     */
> +    uint32_t sbdf = XEN_HVMCTL_PCI_SBDF(segment, bus, device, function);
> +    DECLARE_HVMCTL(map_io_range_to_ioreq_server, domid,
> +                   .id = id,
> +                   .type = XEN_HVMCTL_IO_RANGE_PCI,
> +                   .start = sbdf,
> +                   .end = sbdf);
> 
>      if (device > 0x1f || function > 0x7) {
>          errno = EINVAL;
>          return -1;
>      }
> 
> -    arg = xc_hypercall_buffer_alloc(xch, arg, sizeof(*arg));
> -    if ( arg == NULL )
> -        return -1;
> -
> -    arg->domid = domid;
> -    arg->id = id;
> -    arg->type = HVMOP_IO_RANGE_PCI;
> -
> -    /*
> -     * The underlying hypercall will deal with ranges of PCI SBDF
> -     * but, for simplicity, the API only uses singletons.
> -     */
> -    arg->start = arg->end = HVMOP_PCI_SBDF((uint64_t)segment,
> -                                           (uint64_t)bus,
> -                                           (uint64_t)device,
> -                                           (uint64_t)function);
> -
> -    rc = xencall2(xch->xcall, __HYPERVISOR_hvm_op,
> -                  HVMOP_map_io_range_to_ioreq_server,
> -                  HYPERCALL_BUFFER_AS_ARG(arg));
> -
> -    xc_hypercall_buffer_free(xch, arg);
> -    return rc;
> +    return do_hvmctl(xch, &hvmctl);
>  }
> 
>  int xc_hvm_unmap_pcidev_from_ioreq_server(xc_interface *xch, domid_t
> domid,
> @@ -1566,54 +1511,29 @@ int xc_hvm_unmap_pcidev_from_ioreq_serve
>                                            uint8_t bus, uint8_t device,
>                                            uint8_t function)
>  {
> -    DECLARE_HYPERCALL_BUFFER(xen_hvm_io_range_t, arg);
> -    int rc;
> +    uint32_t sbdf = XEN_HVMCTL_PCI_SBDF(segment, bus, device, function);
> +    DECLARE_HVMCTL(unmap_io_range_from_ioreq_server, domid,
> +                   .id = id,
> +                   .type = XEN_HVMCTL_IO_RANGE_PCI,
> +                   .start = sbdf,
> +                   .end = sbdf);
> 
>      if (device > 0x1f || function > 0x7) {
>          errno = EINVAL;
>          return -1;
>      }
> 
> -    arg = xc_hypercall_buffer_alloc(xch, arg, sizeof(*arg));
> -    if ( arg == NULL )
> -        return -1;
> -
> -    arg->domid = domid;
> -    arg->id = id;
> -    arg->type = HVMOP_IO_RANGE_PCI;
> -    arg->start = arg->end = HVMOP_PCI_SBDF((uint64_t)segment,
> -                                           (uint64_t)bus,
> -                                           (uint64_t)device,
> -                                           (uint64_t)function);
> -
> -    rc = xencall2(xch->xcall, __HYPERVISOR_hvm_op,
> -                  HVMOP_unmap_io_range_from_ioreq_server,
> -                  HYPERCALL_BUFFER_AS_ARG(arg));
> -
> -    xc_hypercall_buffer_free(xch, arg);
> -    return rc;
> +    return do_hvmctl(xch, &hvmctl);
>  }
> 
>  int xc_hvm_destroy_ioreq_server(xc_interface *xch,
>                                  domid_t domid,
>                                  ioservid_t id)
>  {
> -    DECLARE_HYPERCALL_BUFFER(xen_hvm_destroy_ioreq_server_t, arg);
> -    int rc;
> -
> -    arg = xc_hypercall_buffer_alloc(xch, arg, sizeof(*arg));
> -    if ( arg == NULL )
> -        return -1;
> +    DECLARE_HVMCTL(destroy_ioreq_server, domid,
> +                   .id = id);
> 
> -    arg->domid = domid;
> -    arg->id = id;
> -
> -    rc = xencall2(xch->xcall, __HYPERVISOR_hvm_op,
> -                  HVMOP_destroy_ioreq_server,
> -                  HYPERCALL_BUFFER_AS_ARG(arg));
> -
> -    xc_hypercall_buffer_free(xch, arg);
> -    return rc;
> +    return do_hvmctl(xch, &hvmctl);
>  }
> 
>  int xc_hvm_set_ioreq_server_state(xc_interface *xch,
> @@ -1621,23 +1541,11 @@ int xc_hvm_set_ioreq_server_state(xc_int
>                                    ioservid_t id,
>                                    int enabled)
>  {
> -    DECLARE_HYPERCALL_BUFFER(xen_hvm_set_ioreq_server_state_t, arg);
> -    int rc;
> -
> -    arg = xc_hypercall_buffer_alloc(xch, arg, sizeof(*arg));
> -    if ( arg == NULL )
> -        return -1;
> +    DECLARE_HVMCTL(set_ioreq_server_state, domid,
> +                   .id = id,
> +                   .enabled = !!enabled);
> 
> -    arg->domid = domid;
> -    arg->id = id;
> -    arg->enabled = !!enabled;
> -
> -    rc = xencall2(xch->xcall, __HYPERVISOR_hvm_op,
> -                  HVMOP_set_ioreq_server_state,
> -                  HYPERCALL_BUFFER_AS_ARG(arg));
> -
> -    xc_hypercall_buffer_free(xch, arg);
> -    return rc;
> +    return do_hvmctl(xch, &hvmctl);
>  }
> 
>  int xc_domain_setdebugging(xc_interface *xch,
> --- a/tools/libxc/xc_private.h
> +++ b/tools/libxc/xc_private.h
> @@ -34,8 +34,6 @@
>  #define XC_INTERNAL_COMPAT_MAP_FOREIGN_API
>  #include "xenctrl.h"
> 
> -#include <xen/hvm/control.h>
> -
>  #include <xencall.h>
>  #include <xenforeignmemory.h>
> 
> --- a/xen/arch/x86/hvm/control.c
> +++ b/xen/arch/x86/hvm/control.c
> @@ -20,6 +20,7 @@
>  #include <xen/sched.h>
>  #include <asm/hap.h>
>  #include <asm/shadow.h>
> +#include <asm/hvm/ioreq.h>
>  #include <xsm/xsm.h>
> 
>  static int set_pci_intx_level(struct domain *d,
> @@ -299,6 +300,50 @@ long do_hvmctl(XEN_GUEST_HANDLE_PARAM(xe
>          rc = hvm_inject_msi(d, op.u.inject_msi.addr, op.u.inject_msi.data);
>          break;
> 
> +    case XEN_HVMCTL_create_ioreq_server:
> +        rc = -EINVAL;
> +        if ( op.u.create_ioreq_server.rsvd )
> +            break;
> +        rc = hvm_create_ioreq_server(d, current->domain->domain_id, 0,
> +                                     op.u.create_ioreq_server.handle_bufioreq,
> +                                     &op.u.create_ioreq_server.id);
> +        if ( rc == 0 && copy_field_to_guest(u_hvmctl, &op,
> +                                            u.create_ioreq_server.id) )
> +            rc = -EFAULT;
> +        break;
> +
> +    case XEN_HVMCTL_get_ioreq_server_info:
> +        rc = -EINVAL;
> +        if ( op.u.get_ioreq_server_info.rsvd )
> +            break;
> +        rc = hvm_get_ioreq_server_info(d, &op.u.get_ioreq_server_info);
> +        if ( rc == 0 && copy_field_to_guest(u_hvmctl, &op,
> +                                            u.get_ioreq_server_info) )
> +            rc = -EFAULT;
> +        break;
> +
> +    case XEN_HVMCTL_map_io_range_to_ioreq_server:
> +        rc = hvm_map_io_range_to_ioreq_server(
> +                 d, &op.u.map_io_range_to_ioreq_server);
> +        break;
> +
> +    case XEN_HVMCTL_unmap_io_range_from_ioreq_server:
> +        rc = hvm_unmap_io_range_from_ioreq_server(
> +                 d, &op.u.unmap_io_range_from_ioreq_server);
> +        break;
> +
> +    case XEN_HVMCTL_destroy_ioreq_server:
> +        rc = hvm_destroy_ioreq_server(d, op.u.destroy_ioreq_server.id);
> +        break;
> +
> +    case XEN_HVMCTL_set_ioreq_server_state:
> +        rc = -EINVAL;
> +        if ( op.u.set_ioreq_server_state.rsvd )
> +            break;
> +        rc = hvm_set_ioreq_server_state(d, op.u.set_ioreq_server_state.id,
> +                                        !!op.u.set_ioreq_server_state.enabled);
> +        break;
> +
>      default:
>          rc = -EOPNOTSUPP;
>          break;
> --- a/xen/arch/x86/hvm/hvm.c
> +++ b/xen/arch/x86/hvm/hvm.c
> @@ -4487,195 +4487,6 @@ static int hvmop_flush_tlb_all(void)
>      return 0;
>  }
> 
> -static int hvmop_create_ioreq_server(
> -    XEN_GUEST_HANDLE_PARAM(xen_hvm_create_ioreq_server_t) uop)
> -{
> -    struct domain *curr_d = current->domain;
> -    xen_hvm_create_ioreq_server_t op;
> -    struct domain *d;
> -    int rc;
> -
> -    if ( copy_from_guest(&op, uop, 1) )
> -        return -EFAULT;
> -
> -    rc = rcu_lock_remote_domain_by_id(op.domid, &d);
> -    if ( rc != 0 )
> -        return rc;
> -
> -    rc = -EINVAL;
> -    if ( !is_hvm_domain(d) )
> -        goto out;
> -
> -    rc = xsm_hvm_ioreq_server(XSM_DM_PRIV, d,
> HVMOP_create_ioreq_server);
> -    if ( rc != 0 )
> -        goto out;
> -
> -    rc = hvm_create_ioreq_server(d, curr_d->domain_id, 0,
> -                                 op.handle_bufioreq, &op.id);
> -    if ( rc != 0 )
> -        goto out;
> -
> -    rc = copy_to_guest(uop, &op, 1) ? -EFAULT : 0;
> -
> - out:
> -    rcu_unlock_domain(d);
> -    return rc;
> -}
> -
> -static int hvmop_get_ioreq_server_info(
> -    XEN_GUEST_HANDLE_PARAM(xen_hvm_get_ioreq_server_info_t) uop)
> -{
> -    xen_hvm_get_ioreq_server_info_t op;
> -    struct domain *d;
> -    int rc;
> -
> -    if ( copy_from_guest(&op, uop, 1) )
> -        return -EFAULT;
> -
> -    rc = rcu_lock_remote_domain_by_id(op.domid, &d);
> -    if ( rc != 0 )
> -        return rc;
> -
> -    rc = -EINVAL;
> -    if ( !is_hvm_domain(d) )
> -        goto out;
> -
> -    rc = xsm_hvm_ioreq_server(XSM_DM_PRIV, d,
> HVMOP_get_ioreq_server_info);
> -    if ( rc != 0 )
> -        goto out;
> -
> -    rc = hvm_get_ioreq_server_info(d, op.id,
> -                                   &op.ioreq_pfn,
> -                                   &op.bufioreq_pfn,
> -                                   &op.bufioreq_port);
> -    if ( rc != 0 )
> -        goto out;
> -
> -    rc = copy_to_guest(uop, &op, 1) ? -EFAULT : 0;
> -
> - out:
> -    rcu_unlock_domain(d);
> -    return rc;
> -}
> -
> -static int hvmop_map_io_range_to_ioreq_server(
> -    XEN_GUEST_HANDLE_PARAM(xen_hvm_io_range_t) uop)
> -{
> -    xen_hvm_io_range_t op;
> -    struct domain *d;
> -    int rc;
> -
> -    if ( copy_from_guest(&op, uop, 1) )
> -        return -EFAULT;
> -
> -    rc = rcu_lock_remote_domain_by_id(op.domid, &d);
> -    if ( rc != 0 )
> -        return rc;
> -
> -    rc = -EINVAL;
> -    if ( !is_hvm_domain(d) )
> -        goto out;
> -
> -    rc = xsm_hvm_ioreq_server(XSM_DM_PRIV, d,
> HVMOP_map_io_range_to_ioreq_server);
> -    if ( rc != 0 )
> -        goto out;
> -
> -    rc = hvm_map_io_range_to_ioreq_server(d, op.id, op.type,
> -                                          op.start, op.end);
> -
> - out:
> -    rcu_unlock_domain(d);
> -    return rc;
> -}
> -
> -static int hvmop_unmap_io_range_from_ioreq_server(
> -    XEN_GUEST_HANDLE_PARAM(xen_hvm_io_range_t) uop)
> -{
> -    xen_hvm_io_range_t op;
> -    struct domain *d;
> -    int rc;
> -
> -    if ( copy_from_guest(&op, uop, 1) )
> -        return -EFAULT;
> -
> -    rc = rcu_lock_remote_domain_by_id(op.domid, &d);
> -    if ( rc != 0 )
> -        return rc;
> -
> -    rc = -EINVAL;
> -    if ( !is_hvm_domain(d) )
> -        goto out;
> -
> -    rc = xsm_hvm_ioreq_server(XSM_DM_PRIV, d,
> HVMOP_unmap_io_range_from_ioreq_server);
> -    if ( rc != 0 )
> -        goto out;
> -
> -    rc = hvm_unmap_io_range_from_ioreq_server(d, op.id, op.type,
> -                                              op.start, op.end);
> -
> - out:
> -    rcu_unlock_domain(d);
> -    return rc;
> -}
> -
> -static int hvmop_set_ioreq_server_state(
> -    XEN_GUEST_HANDLE_PARAM(xen_hvm_set_ioreq_server_state_t) uop)
> -{
> -    xen_hvm_set_ioreq_server_state_t op;
> -    struct domain *d;
> -    int rc;
> -
> -    if ( copy_from_guest(&op, uop, 1) )
> -        return -EFAULT;
> -
> -    rc = rcu_lock_remote_domain_by_id(op.domid, &d);
> -    if ( rc != 0 )
> -        return rc;
> -
> -    rc = -EINVAL;
> -    if ( !is_hvm_domain(d) )
> -        goto out;
> -
> -    rc = xsm_hvm_ioreq_server(XSM_DM_PRIV, d,
> HVMOP_set_ioreq_server_state);
> -    if ( rc != 0 )
> -        goto out;
> -
> -    rc = hvm_set_ioreq_server_state(d, op.id, !!op.enabled);
> -
> - out:
> -    rcu_unlock_domain(d);
> -    return rc;
> -}
> -
> -static int hvmop_destroy_ioreq_server(
> -    XEN_GUEST_HANDLE_PARAM(xen_hvm_destroy_ioreq_server_t) uop)
> -{
> -    xen_hvm_destroy_ioreq_server_t op;
> -    struct domain *d;
> -    int rc;
> -
> -    if ( copy_from_guest(&op, uop, 1) )
> -        return -EFAULT;
> -
> -    rc = rcu_lock_remote_domain_by_id(op.domid, &d);
> -    if ( rc != 0 )
> -        return rc;
> -
> -    rc = -EINVAL;
> -    if ( !is_hvm_domain(d) )
> -        goto out;
> -
> -    rc = xsm_hvm_ioreq_server(XSM_DM_PRIV, d,
> HVMOP_destroy_ioreq_server);
> -    if ( rc != 0 )
> -        goto out;
> -
> -    rc = hvm_destroy_ioreq_server(d, op.id);
> -
> - out:
> -    rcu_unlock_domain(d);
> -    return rc;
> -}
> -
>  static int hvmop_set_evtchn_upcall_vector(
>      XEN_GUEST_HANDLE_PARAM(xen_hvm_evtchn_upcall_vector_t) uop)
>  {
> @@ -5192,36 +5003,6 @@ long do_hvm_op(unsigned long op, XEN_GUE
> 
>      switch ( op )
>      {
> -    case HVMOP_create_ioreq_server:
> -        rc = hvmop_create_ioreq_server(
> -            guest_handle_cast(arg, xen_hvm_create_ioreq_server_t));
> -        break;
> -
> -    case HVMOP_get_ioreq_server_info:
> -        rc = hvmop_get_ioreq_server_info(
> -            guest_handle_cast(arg, xen_hvm_get_ioreq_server_info_t));
> -        break;
> -
> -    case HVMOP_map_io_range_to_ioreq_server:
> -        rc = hvmop_map_io_range_to_ioreq_server(
> -            guest_handle_cast(arg, xen_hvm_io_range_t));
> -        break;
> -
> -    case HVMOP_unmap_io_range_from_ioreq_server:
> -        rc = hvmop_unmap_io_range_from_ioreq_server(
> -            guest_handle_cast(arg, xen_hvm_io_range_t));
> -        break;
> -
> -    case HVMOP_set_ioreq_server_state:
> -        rc = hvmop_set_ioreq_server_state(
> -            guest_handle_cast(arg, xen_hvm_set_ioreq_server_state_t));
> -        break;
> -
> -    case HVMOP_destroy_ioreq_server:
> -        rc = hvmop_destroy_ioreq_server(
> -            guest_handle_cast(arg, xen_hvm_destroy_ioreq_server_t));
> -        break;
> -
>      case HVMOP_set_evtchn_upcall_vector:
>          rc = hvmop_set_evtchn_upcall_vector(
>              guest_handle_cast(arg, xen_hvm_evtchn_upcall_vector_t));
> --- a/xen/arch/x86/hvm/ioreq.c
> +++ b/xen/arch/x86/hvm/ioreq.c
> @@ -513,9 +513,9 @@ static int hvm_ioreq_server_alloc_ranges
>          char *name;
> 
>          rc = asprintf(&name, "ioreq_server %d %s", s->id,
> -                      (i == HVMOP_IO_RANGE_PORT) ? "port" :
> -                      (i == HVMOP_IO_RANGE_MEMORY) ? "memory" :
> -                      (i == HVMOP_IO_RANGE_PCI) ? "pci" :
> +                      (i == XEN_HVMCTL_IO_RANGE_PORT) ? "port" :
> +                      (i == XEN_HVMCTL_IO_RANGE_MEMORY) ? "memory" :
> +                      (i == XEN_HVMCTL_IO_RANGE_PCI) ? "pci" :
>                        "");
>          if ( rc )
>              goto fail;
> @@ -686,7 +686,8 @@ int hvm_create_ioreq_server(struct domai
>      struct hvm_ioreq_server *s;
>      int rc;
> 
> -    if ( bufioreq_handling > HVM_IOREQSRV_BUFIOREQ_ATOMIC )
> +    if ( !is_hvm_domain(d) ||
> +         bufioreq_handling > HVM_IOREQSRV_BUFIOREQ_ATOMIC )
>          return -EINVAL;
> 
>      rc = -ENOMEM;
> @@ -738,6 +739,9 @@ int hvm_destroy_ioreq_server(struct doma
>      struct hvm_ioreq_server *s;
>      int rc;
> 
> +    if ( !is_hvm_domain(d) )
> +        return -EINVAL;
> +
>      spin_lock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
> 
>      rc = -ENOENT;
> @@ -772,14 +776,15 @@ int hvm_destroy_ioreq_server(struct doma
>      return rc;
>  }
> 
> -int hvm_get_ioreq_server_info(struct domain *d, ioservid_t id,
> -                              unsigned long *ioreq_pfn,
> -                              unsigned long *bufioreq_pfn,
> -                              evtchn_port_t *bufioreq_port)
> +int hvm_get_ioreq_server_info(struct domain *d,
> +                              struct xen_hvm_get_ioreq_server_info *info)
>  {
>      struct hvm_ioreq_server *s;
>      int rc;
> 
> +    if ( !is_hvm_domain(d) )
> +        return -EINVAL;
> +
>      spin_lock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
> 
>      rc = -ENOENT;
> @@ -790,15 +795,15 @@ int hvm_get_ioreq_server_info(struct dom
>          if ( s == d->arch.hvm_domain.default_ioreq_server )
>              continue;
> 
> -        if ( s->id != id )
> +        if ( s->id != info->id )
>              continue;
> 
> -        *ioreq_pfn = s->ioreq.gmfn;
> +        info->ioreq_pfn = s->ioreq.gmfn;
> 
>          if ( s->bufioreq.va != NULL )
>          {
> -            *bufioreq_pfn = s->bufioreq.gmfn;
> -            *bufioreq_port = s->bufioreq_evtchn;
> +            info->bufioreq_pfn = s->bufioreq.gmfn;
> +            info->bufioreq_port = s->bufioreq_evtchn;
>          }
> 
>          rc = 0;
> @@ -810,13 +815,15 @@ int hvm_get_ioreq_server_info(struct dom
>      return rc;
>  }
> 
> -int hvm_map_io_range_to_ioreq_server(struct domain *d, ioservid_t id,
> -                                     uint32_t type, uint64_t start,
> -                                     uint64_t end)
> +int hvm_map_io_range_to_ioreq_server(struct domain *d,
> +                                     const struct xen_hvm_io_range *ior)
>  {
>      struct hvm_ioreq_server *s;
>      int rc;
> 
> +    if ( ior->rsvd || !is_hvm_domain(d) )
> +        return -EINVAL;
> +
>      spin_lock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
> 
>      rc = -ENOENT;
> @@ -827,16 +834,16 @@ int hvm_map_io_range_to_ioreq_server(str
>          if ( s == d->arch.hvm_domain.default_ioreq_server )
>              continue;
> 
> -        if ( s->id == id )
> +        if ( s->id == ior->id )
>          {
>              struct rangeset *r;
> 
> -            switch ( type )
> +            switch ( ior->type )
>              {
> -            case HVMOP_IO_RANGE_PORT:
> -            case HVMOP_IO_RANGE_MEMORY:
> -            case HVMOP_IO_RANGE_PCI:
> -                r = s->range[type];
> +            case XEN_HVMCTL_IO_RANGE_PORT:
> +            case XEN_HVMCTL_IO_RANGE_MEMORY:
> +            case XEN_HVMCTL_IO_RANGE_PCI:
> +                r = s->range[ior->type];
>                  break;
> 
>              default:
> @@ -849,10 +856,10 @@ int hvm_map_io_range_to_ioreq_server(str
>                  break;
> 
>              rc = -EEXIST;
> -            if ( rangeset_overlaps_range(r, start, end) )
> +            if ( rangeset_overlaps_range(r, ior->start, ior->end) )
>                  break;
> 
> -            rc = rangeset_add_range(r, start, end);
> +            rc = rangeset_add_range(r, ior->start, ior->end);
>              break;
>          }
>      }
> @@ -862,13 +869,15 @@ int hvm_map_io_range_to_ioreq_server(str
>      return rc;
>  }
> 
> -int hvm_unmap_io_range_from_ioreq_server(struct domain *d, ioservid_t
> id,
> -                                         uint32_t type, uint64_t start,
> -                                         uint64_t end)
> +int hvm_unmap_io_range_from_ioreq_server(struct domain *d,
> +                                         const struct xen_hvm_io_range *ior)
>  {
>      struct hvm_ioreq_server *s;
>      int rc;
> 
> +    if ( ior->rsvd || !is_hvm_domain(d) )
> +        return -EINVAL;
> +
>      spin_lock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
> 
>      rc = -ENOENT;
> @@ -879,16 +888,16 @@ int hvm_unmap_io_range_from_ioreq_server
>          if ( s == d->arch.hvm_domain.default_ioreq_server )
>              continue;
> 
> -        if ( s->id == id )
> +        if ( s->id == ior->id )
>          {
>              struct rangeset *r;
> 
> -            switch ( type )
> +            switch ( ior->type )
>              {
> -            case HVMOP_IO_RANGE_PORT:
> -            case HVMOP_IO_RANGE_MEMORY:
> -            case HVMOP_IO_RANGE_PCI:
> -                r = s->range[type];
> +            case XEN_HVMCTL_IO_RANGE_PORT:
> +            case XEN_HVMCTL_IO_RANGE_MEMORY:
> +            case XEN_HVMCTL_IO_RANGE_PCI:
> +                r = s->range[ior->type];
>                  break;
> 
>              default:
> @@ -901,10 +910,10 @@ int hvm_unmap_io_range_from_ioreq_server
>                  break;
> 
>              rc = -ENOENT;
> -            if ( !rangeset_contains_range(r, start, end) )
> +            if ( !rangeset_contains_range(r, ior->start, ior->end) )
>                  break;
> 
> -            rc = rangeset_remove_range(r, start, end);
> +            rc = rangeset_remove_range(r, ior->start, ior->end);
>              break;
>          }
>      }
> @@ -920,6 +929,9 @@ int hvm_set_ioreq_server_state(struct do
>      struct list_head *entry;
>      int rc;
> 
> +    if ( !is_hvm_domain(d) )
> +        return -EINVAL;
> +
>      spin_lock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
> 
>      rc = -ENOENT;
> @@ -1128,12 +1140,12 @@ struct hvm_ioreq_server *hvm_select_iore
> 
>          /* PCI config data cycle */
> 
> -        sbdf = HVMOP_PCI_SBDF(0,
> -                              PCI_BUS(CF8_BDF(cf8)),
> -                              PCI_SLOT(CF8_BDF(cf8)),
> -                              PCI_FUNC(CF8_BDF(cf8)));
> +        sbdf = XEN_HVMCTL_PCI_SBDF(0,
> +                                   PCI_BUS(CF8_BDF(cf8)),
> +                                   PCI_SLOT(CF8_BDF(cf8)),
> +                                   PCI_FUNC(CF8_BDF(cf8)));
> 
> -        type = HVMOP_IO_RANGE_PCI;
> +        type = XEN_HVMCTL_IO_RANGE_PCI;
>          addr = ((uint64_t)sbdf << 32) |
>                 CF8_ADDR_LO(cf8) |
>                 (p->addr & 3);
> @@ -1152,7 +1164,7 @@ struct hvm_ioreq_server *hvm_select_iore
>      else
>      {
>          type = (p->type == IOREQ_TYPE_PIO) ?
> -                HVMOP_IO_RANGE_PORT : HVMOP_IO_RANGE_MEMORY;
> +                XEN_HVMCTL_IO_RANGE_PORT :
> XEN_HVMCTL_IO_RANGE_MEMORY;
>          addr = p->addr;
>      }
> 
> @@ -1174,19 +1186,19 @@ struct hvm_ioreq_server *hvm_select_iore
>          {
>              unsigned long end;
> 
> -        case HVMOP_IO_RANGE_PORT:
> +        case XEN_HVMCTL_IO_RANGE_PORT:
>              end = addr + p->size - 1;
>              if ( rangeset_contains_range(r, addr, end) )
>                  return s;
> 
>              break;
> -        case HVMOP_IO_RANGE_MEMORY:
> +        case XEN_HVMCTL_IO_RANGE_MEMORY:
>              end = addr + (p->size * p->count) - 1;
>              if ( rangeset_contains_range(r, addr, end) )
>                  return s;
> 
>              break;
> -        case HVMOP_IO_RANGE_PCI:
> +        case XEN_HVMCTL_IO_RANGE_PCI:
>              if ( rangeset_contains_singleton(r, addr >> 32) )
>              {
>                  p->type = IOREQ_TYPE_PCI_CONFIG;
> --- a/xen/include/asm-x86/hvm/domain.h
> +++ b/xen/include/asm-x86/hvm/domain.h
> @@ -30,6 +30,7 @@
>  #include <asm/hvm/vmx/vmcs.h>
>  #include <asm/hvm/svm/vmcb.h>
>  #include <public/grant_table.h>
> +#include <public/hvm/control.h>
>  #include <public/hvm/params.h>
>  #include <public/hvm/save.h>
>  #include <public/hvm/hvm_op.h>
> @@ -47,7 +48,7 @@ struct hvm_ioreq_vcpu {
>      bool_t           pending;
>  };
> 
> -#define NR_IO_RANGE_TYPES (HVMOP_IO_RANGE_PCI + 1)
> +#define NR_IO_RANGE_TYPES (XEN_HVMCTL_IO_RANGE_PCI + 1)
>  #define MAX_NR_IO_RANGES  256
> 
>  struct hvm_ioreq_server {
> --- a/xen/include/asm-x86/hvm/ioreq.h
> +++ b/xen/include/asm-x86/hvm/ioreq.h
> @@ -19,6 +19,8 @@
>  #ifndef __ASM_X86_HVM_IOREQ_H__
>  #define __ASM_X86_HVM_IOREQ_H__
> 
> +#include <public/hvm/control.h>
> +
>  bool_t hvm_io_pending(struct vcpu *v);
>  bool_t handle_hvm_io_completion(struct vcpu *v);
>  bool_t is_ioreq_server_page(struct domain *d, const struct page_info
> *page);
> @@ -27,16 +29,12 @@ int hvm_create_ioreq_server(struct domai
>                              bool_t is_default, int bufioreq_handling,
>                              ioservid_t *id);
>  int hvm_destroy_ioreq_server(struct domain *d, ioservid_t id);
> -int hvm_get_ioreq_server_info(struct domain *d, ioservid_t id,
> -                              unsigned long *ioreq_pfn,
> -                              unsigned long *bufioreq_pfn,
> -                              evtchn_port_t *bufioreq_port);
> -int hvm_map_io_range_to_ioreq_server(struct domain *d, ioservid_t id,
> -                                     uint32_t type, uint64_t start,
> -                                     uint64_t end);
> -int hvm_unmap_io_range_from_ioreq_server(struct domain *d, ioservid_t
> id,
> -                                         uint32_t type, uint64_t start,
> -                                         uint64_t end);
> +int hvm_get_ioreq_server_info(struct domain *d,
> +                              struct xen_hvm_get_ioreq_server_info *info);
> +int hvm_map_io_range_to_ioreq_server(struct domain *d,
> +                                     const struct xen_hvm_io_range *r);
> +int hvm_unmap_io_range_from_ioreq_server(struct domain *d,
> +                                         const struct xen_hvm_io_range *r);
>  int hvm_set_ioreq_server_state(struct domain *d, ioservid_t id,
>                                 bool_t enabled);
> 
> --- a/xen/include/public/hvm/control.h
> +++ b/xen/include/public/hvm/control.h
> @@ -26,6 +26,7 @@
>  #endif
> 
>  #include "../xen.h"
> +#include "../event_channel.h"
> 
>  #define XEN_HVMCTL_INTERFACE_VERSION 0x00000001
> 
> @@ -130,6 +131,131 @@ struct xen_hvm_inject_msi {
>      uint64_t  addr;
>  };
> 
> +/*
> + * IOREQ Servers
> + *
> + * The interface between an I/O emulator an Xen is called an IOREQ Server.
> + * A domain supports a single 'legacy' IOREQ Server which is instantiated if
> + * parameter...
> + *
> + * HVM_PARAM_IOREQ_PFN is read (to get the gmfn containing the
> synchronous
> + * ioreq structures), or...
> + * HVM_PARAM_BUFIOREQ_PFN is read (to get the gmfn containing the
> buffered
> + * ioreq ring), or...
> + * HVM_PARAM_BUFIOREQ_EVTCHN is read (to get the event channel that
> Xen uses
> + * to request buffered I/O emulation).
> + *
> + * The following hypercalls facilitate the creation of IOREQ Servers for
> + * 'secondary' emulators which are invoked to implement port I/O,
> memory, or
> + * PCI config space ranges which they explicitly register.
> + */
> +
> +typedef uint16_t ioservid_t;
> +
> +/*
> + * XEN_HVMCTL_create_ioreq_server: Instantiate a new IOREQ Server for a
> + *                                 secondary emulator servicing domain
> + *                                 <domid>.
> + *
> + * The <id> handed back is unique for <domid>. If <handle_bufioreq> is
> zero
> + * the buffered ioreq ring will not be allocated and hence all emulation
> + * requestes to this server will be synchronous.
> + */
> +struct xen_hvm_create_ioreq_server {
> +#define HVM_IOREQSRV_BUFIOREQ_OFF    0
> +#define HVM_IOREQSRV_BUFIOREQ_LEGACY 1
> +/*
> + * Use this when read_pointer gets updated atomically and
> + * the pointer pair gets read atomically:
> + */
> +#define HVM_IOREQSRV_BUFIOREQ_ATOMIC 2
> +    uint8_t handle_bufioreq; /* IN - should server handle buffered ioreqs */
> +    uint8_t rsvd;            /* IN - must be zero */
> +    ioservid_t id;           /* OUT - server id */
> +};
> +
> +/*
> + * XEN_HVMCTL_get_ioreq_server_info: Get all the information necessary
> to
> + *                                   access IOREQ Server <id>.
> + *
> + * The emulator needs to map the synchronous ioreq structures and
> buffered
> + * ioreq ring (if it exists) that Xen uses to request emulation. These are
> + * hosted in domain <domid>'s gmfns <ioreq_pfn> and <bufioreq_pfn>
> + * respectively. In addition, if the IOREQ Server is handling buffered
> + * emulation requests, the emulator needs to bind to event channel
> + * <bufioreq_port> to listen for them. (The event channels used for
> + * synchronous emulation requests are specified in the per-CPU ioreq
> + * structures in <ioreq_pfn>).
> + * If the IOREQ Server is not handling buffered emulation requests then the
> + * values handed back in <bufioreq_pfn> and <bufioreq_port> will both be
> 0.
> + */
> +struct xen_hvm_get_ioreq_server_info {
> +    ioservid_t id;                 /* IN - server id */
> +    uint16_t rsvd;                 /* IN - must be zero */
> +    evtchn_port_t bufioreq_port;   /* OUT - buffered ioreq port */
> +    uint64_aligned_t ioreq_pfn;    /* OUT - sync ioreq pfn */
> +    uint64_aligned_t bufioreq_pfn; /* OUT - buffered ioreq pfn */
> +};
> +
> +/*
> + * XEN_HVMCTL_map_io_range_to_ioreq_server: Register an I/O range of
> domain
> + *                                          <domid> for emulation by the
> + *                                          client of IOREQ Server <id>
> + * XEN_HVMCTL_unmap_io_range_from_ioreq_server: Deregister an I/O
> range of
> + *                                              <domid> for emulation by the
> + *                                              client of IOREQ Server <id>
> + *
> + * There are three types of I/O that can be emulated: port I/O, memory
> accesses
> + * and PCI config space accesses. The <type> field denotes which type of
> range
> + * the <start> and <end> (inclusive) fields are specifying.
> + * PCI config space ranges are specified by segment/bus/device/function
> values
> + * which should be encoded using the XEN_HVMCTL_PCI_SBDF helper
> macro below.
> + *
> + * NOTE: unless an emulation request falls entirely within a range mapped
> + * by a secondary emulator, it will not be passed to that emulator.
> + */
> +struct xen_hvm_io_range {
> +    ioservid_t id;               /* IN - server id */
> +    uint16_t type;               /* IN - type of range */
> +    uint32_t rsvd;               /* IN - must be zero */
> +#define XEN_HVMCTL_IO_RANGE_PORT   0 /* I/O port range */
> +#define XEN_HVMCTL_IO_RANGE_MEMORY 1 /* MMIO range */
> +#define XEN_HVMCTL_IO_RANGE_PCI    2 /* PCI segment/bus/dev/func
> range */
> +    uint64_aligned_t start, end; /* IN - inclusive start and end of range */
> +};
> +
> +#define XEN_HVMCTL_PCI_SBDF(s, b, d, f) \
> +	((((s) & 0xffff) << 16) | \
> +	 (((b) & 0xff) << 8) | \
> +	 (((d) & 0x1f) << 3) | \
> +	 ((f) & 0x07))
> +
> +/*
> + * XEN_HVMCTL_destroy_ioreq_server: Destroy the IOREQ Server <id>
> servicing
> + *                                  domain <domid>.
> + *
> + * Any registered I/O ranges will be automatically deregistered.
> + */
> +struct xen_hvm_destroy_ioreq_server {
> +    ioservid_t id; /* IN - server id */
> +};
> +
> +/*
> + * XEN_HVMCTL_set_ioreq_server_state: Enable or disable the IOREQ
> Server <id>
> + *                                    servicing domain <domid>.
> + *
> + * The IOREQ Server will not be passed any emulation requests until it is in
> + * the enabled state.
> + * Note that the contents of the ioreq_pfn and bufioreq_fn (see
> + * XEN_HVMCTL_get_ioreq_server_info) are not meaningful until the
> IOREQ Server
> + * is in the enabled state.
> + */
> +struct xen_hvm_set_ioreq_server_state {
> +    ioservid_t id;   /* IN - server id */
> +    uint8_t enabled; /* IN - enabled? */
> +    uint8_t rsvd;    /* IN - must be zero */
> +};
> +
>  struct xen_hvmctl {
>      uint16_t interface_version;    /* XEN_HVMCTL_INTERFACE_VERSION */
>      domid_t domain;
> @@ -142,6 +268,12 @@ struct xen_hvmctl {
>  #define XEN_HVMCTL_set_mem_type                  6
>  #define XEN_HVMCTL_inject_trap                   7
>  #define XEN_HVMCTL_inject_msi                    8
> +#define XEN_HVMCTL_create_ioreq_server           9
> +#define XEN_HVMCTL_get_ioreq_server_info        10
> +#define XEN_HVMCTL_map_io_range_to_ioreq_server 11
> +#define XEN_HVMCTL_unmap_io_range_from_ioreq_server 12
> +#define XEN_HVMCTL_destroy_ioreq_server         13
> +#define XEN_HVMCTL_set_ioreq_server_state       14
>      uint16_t opaque;               /* Must be zero on initial invocation. */
>      union {
>          struct xen_hvm_set_pci_intx_level set_pci_intx_level;
> @@ -152,6 +284,12 @@ struct xen_hvmctl {
>          struct xen_hvm_set_mem_type set_mem_type;
>          struct xen_hvm_inject_trap inject_trap;
>          struct xen_hvm_inject_msi inject_msi;
> +        struct xen_hvm_create_ioreq_server create_ioreq_server;
> +        struct xen_hvm_get_ioreq_server_info get_ioreq_server_info;
> +        struct xen_hvm_io_range map_io_range_to_ioreq_server;
> +        struct xen_hvm_io_range unmap_io_range_from_ioreq_server;
> +        struct xen_hvm_destroy_ioreq_server destroy_ioreq_server;
> +        struct xen_hvm_set_ioreq_server_state set_ioreq_server_state;
>          uint8_t pad[120];
>      } u;
>  };
> --- a/xen/include/public/hvm/hvm_op.h
> +++ b/xen/include/public/hvm/hvm_op.h
> @@ -25,7 +25,6 @@
> 
>  #include "../xen.h"
>  #include "../trace.h"
> -#include "../event_channel.h"
> 
>  /* Get/set subcommands: extra argument == pointer to xen_hvm_param
> struct. */
>  #define HVMOP_set_param           0
> @@ -137,152 +136,6 @@ struct xen_hvm_get_mem_type {
>  typedef struct xen_hvm_get_mem_type xen_hvm_get_mem_type_t;
>  DEFINE_XEN_GUEST_HANDLE(xen_hvm_get_mem_type_t);
> 
> -/* Following tools-only interfaces may change in future. */
> -#if defined(__XEN__) || defined(__XEN_TOOLS__)
> -
> -/*
> - * IOREQ Servers
> - *
> - * The interface between an I/O emulator an Xen is called an IOREQ Server.
> - * A domain supports a single 'legacy' IOREQ Server which is instantiated if
> - * parameter...
> - *
> - * HVM_PARAM_IOREQ_PFN is read (to get the gmfn containing the
> synchronous
> - * ioreq structures), or...
> - * HVM_PARAM_BUFIOREQ_PFN is read (to get the gmfn containing the
> buffered
> - * ioreq ring), or...
> - * HVM_PARAM_BUFIOREQ_EVTCHN is read (to get the event channel that
> Xen uses
> - * to request buffered I/O emulation).
> - *
> - * The following hypercalls facilitate the creation of IOREQ Servers for
> - * 'secondary' emulators which are invoked to implement port I/O, memory,
> or
> - * PCI config space ranges which they explicitly register.
> - */
> -
> -typedef uint16_t ioservid_t;
> -
> -/*
> - * HVMOP_create_ioreq_server: Instantiate a new IOREQ Server for a
> secondary
> - *                            emulator servicing domain <domid>.
> - *
> - * The <id> handed back is unique for <domid>. If <handle_bufioreq> is zero
> - * the buffered ioreq ring will not be allocated and hence all emulation
> - * requestes to this server will be synchronous.
> - */
> -#define HVMOP_create_ioreq_server 17
> -struct xen_hvm_create_ioreq_server {
> -    domid_t domid;           /* IN - domain to be serviced */
> -#define HVM_IOREQSRV_BUFIOREQ_OFF    0
> -#define HVM_IOREQSRV_BUFIOREQ_LEGACY 1
> -/*
> - * Use this when read_pointer gets updated atomically and
> - * the pointer pair gets read atomically:
> - */
> -#define HVM_IOREQSRV_BUFIOREQ_ATOMIC 2
> -    uint8_t handle_bufioreq; /* IN - should server handle buffered ioreqs */
> -    ioservid_t id;           /* OUT - server id */
> -};
> -typedef struct xen_hvm_create_ioreq_server
> xen_hvm_create_ioreq_server_t;
> -DEFINE_XEN_GUEST_HANDLE(xen_hvm_create_ioreq_server_t);
> -
> -/*
> - * HVMOP_get_ioreq_server_info: Get all the information necessary to
> access
> - *                              IOREQ Server <id>.
> - *
> - * The emulator needs to map the synchronous ioreq structures and
> buffered
> - * ioreq ring (if it exists) that Xen uses to request emulation. These are
> - * hosted in domain <domid>'s gmfns <ioreq_pfn> and <bufioreq_pfn>
> - * respectively. In addition, if the IOREQ Server is handling buffered
> - * emulation requests, the emulator needs to bind to event channel
> - * <bufioreq_port> to listen for them. (The event channels used for
> - * synchronous emulation requests are specified in the per-CPU ioreq
> - * structures in <ioreq_pfn>).
> - * If the IOREQ Server is not handling buffered emulation requests then the
> - * values handed back in <bufioreq_pfn> and <bufioreq_port> will both be
> 0.
> - */
> -#define HVMOP_get_ioreq_server_info 18
> -struct xen_hvm_get_ioreq_server_info {
> -    domid_t domid;                 /* IN - domain to be serviced */
> -    ioservid_t id;                 /* IN - server id */
> -    evtchn_port_t bufioreq_port;   /* OUT - buffered ioreq port */
> -    uint64_aligned_t ioreq_pfn;    /* OUT - sync ioreq pfn */
> -    uint64_aligned_t bufioreq_pfn; /* OUT - buffered ioreq pfn */
> -};
> -typedef struct xen_hvm_get_ioreq_server_info
> xen_hvm_get_ioreq_server_info_t;
> -DEFINE_XEN_GUEST_HANDLE(xen_hvm_get_ioreq_server_info_t);
> -
> -/*
> - * HVM_map_io_range_to_ioreq_server: Register an I/O range of domain
> <domid>
> - *                                   for emulation by the client of IOREQ
> - *                                   Server <id>
> - * HVM_unmap_io_range_from_ioreq_server: Deregister an I/O range of
> <domid>
> - *                                       for emulation by the client of IOREQ
> - *                                       Server <id>
> - *
> - * There are three types of I/O that can be emulated: port I/O, memory
> accesses
> - * and PCI config space accesses. The <type> field denotes which type of
> range
> - * the <start> and <end> (inclusive) fields are specifying.
> - * PCI config space ranges are specified by segment/bus/device/function
> values
> - * which should be encoded using the HVMOP_PCI_SBDF helper macro
> below.
> - *
> - * NOTE: unless an emulation request falls entirely within a range mapped
> - * by a secondary emulator, it will not be passed to that emulator.
> - */
> -#define HVMOP_map_io_range_to_ioreq_server 19
> -#define HVMOP_unmap_io_range_from_ioreq_server 20
> -struct xen_hvm_io_range {
> -    domid_t domid;               /* IN - domain to be serviced */
> -    ioservid_t id;               /* IN - server id */
> -    uint32_t type;               /* IN - type of range */
> -# define HVMOP_IO_RANGE_PORT   0 /* I/O port range */
> -# define HVMOP_IO_RANGE_MEMORY 1 /* MMIO range */
> -# define HVMOP_IO_RANGE_PCI    2 /* PCI segment/bus/dev/func range
> */
> -    uint64_aligned_t start, end; /* IN - inclusive start and end of range */
> -};
> -typedef struct xen_hvm_io_range xen_hvm_io_range_t;
> -DEFINE_XEN_GUEST_HANDLE(xen_hvm_io_range_t);
> -
> -#define HVMOP_PCI_SBDF(s,b,d,f)                 \
> -	((((s) & 0xffff) << 16) |                   \
> -	 (((b) & 0xff) << 8) |                      \
> -	 (((d) & 0x1f) << 3) |                      \
> -	 ((f) & 0x07))
> -
> -/*
> - * HVMOP_destroy_ioreq_server: Destroy the IOREQ Server <id> servicing
> domain
> - *                             <domid>.
> - *
> - * Any registered I/O ranges will be automatically deregistered.
> - */
> -#define HVMOP_destroy_ioreq_server 21
> -struct xen_hvm_destroy_ioreq_server {
> -    domid_t domid; /* IN - domain to be serviced */
> -    ioservid_t id; /* IN - server id */
> -};
> -typedef struct xen_hvm_destroy_ioreq_server
> xen_hvm_destroy_ioreq_server_t;
> -DEFINE_XEN_GUEST_HANDLE(xen_hvm_destroy_ioreq_server_t);
> -
> -/*
> - * HVMOP_set_ioreq_server_state: Enable or disable the IOREQ Server <id>
> servicing
> - *                               domain <domid>.
> - *
> - * The IOREQ Server will not be passed any emulation requests until it is in
> the
> - * enabled state.
> - * Note that the contents of the ioreq_pfn and bufioreq_fn (see
> - * HVMOP_get_ioreq_server_info) are not meaningful until the IOREQ
> Server is in
> - * the enabled state.
> - */
> -#define HVMOP_set_ioreq_server_state 22
> -struct xen_hvm_set_ioreq_server_state {
> -    domid_t domid;   /* IN - domain to be serviced */
> -    ioservid_t id;   /* IN - server id */
> -    uint8_t enabled; /* IN - enabled? */
> -};
> -typedef struct xen_hvm_set_ioreq_server_state
> xen_hvm_set_ioreq_server_state_t;
> -DEFINE_XEN_GUEST_HANDLE(xen_hvm_set_ioreq_server_state_t);
> -
> -#endif /* defined(__XEN__) || defined(__XEN_TOOLS__) */
> -
>  #if defined(__i386__) || defined(__x86_64__)
> 
>  /*
> --- a/xen/include/xsm/dummy.h
> +++ b/xen/include/xsm/dummy.h
> @@ -609,12 +609,6 @@ static XSM_INLINE int xsm_shadow_control
>      return xsm_default_action(action, current->domain, d);
>  }
> 
> -static XSM_INLINE int xsm_hvm_ioreq_server(XSM_DEFAULT_ARG struct
> domain *d, int op)
> -{
> -    XSM_ASSERT_ACTION(XSM_DM_PRIV);
> -    return xsm_default_action(action, current->domain, d);
> -}
> -
>  static XSM_INLINE int xsm_mem_sharing_op(XSM_DEFAULT_ARG struct
> domain *d, struct domain *cd, int op)
>  {
>      XSM_ASSERT_ACTION(XSM_DM_PRIV);
> --- a/xen/include/xsm/xsm.h
> +++ b/xen/include/xsm/xsm.h
> @@ -174,7 +174,6 @@ struct xsm_operations {
>      int (*do_mca) (void);
>      int (*shadow_control) (struct domain *d, uint32_t op);
>      int (*hvm_set_pci_link_route) (struct domain *d);
> -    int (*hvm_ioreq_server) (struct domain *d, int op);
>      int (*mem_sharing_op) (struct domain *d, struct domain *cd, int op);
>      int (*apic) (struct domain *d, int cmd);
>      int (*memtype) (uint32_t access);
> @@ -648,11 +647,6 @@ static inline int xsm_hvm_set_pci_link_r
>      return xsm_ops->hvm_set_pci_link_route(d);
>  }
> 
> -static inline int xsm_hvm_ioreq_server (xsm_default_t def, struct domain
> *d, int op)
> -{
> -    return xsm_ops->hvm_ioreq_server(d, op);
> -}
> -
>  static inline int xsm_mem_sharing_op (xsm_default_t def, struct domain *d,
> struct domain *cd, int op)
>  {
>      return xsm_ops->mem_sharing_op(d, cd, op);
> --- a/xen/xsm/dummy.c
> +++ b/xen/xsm/dummy.c
> @@ -145,7 +145,6 @@ void xsm_fixup_ops (struct xsm_operation
>  #ifdef CONFIG_X86
>      set_to_dummy_if_null(ops, do_mca);
>      set_to_dummy_if_null(ops, shadow_control);
> -    set_to_dummy_if_null(ops, hvm_ioreq_server);
>      set_to_dummy_if_null(ops, mem_sharing_op);
>      set_to_dummy_if_null(ops, apic);
>      set_to_dummy_if_null(ops, machine_memory_map);
> --- a/xen/xsm/flask/hooks.c
> +++ b/xen/xsm/flask/hooks.c
> @@ -1526,11 +1526,6 @@ static int flask_ioport_mapping(struct d
>      return flask_ioport_permission(d, start, end, access);
>  }
> 
> -static int flask_hvm_ioreq_server(struct domain *d, int op)
> -{
> -    return current_has_perm(d, SECCLASS_HVM, HVM__HVMCTL);
> -}
> -
>  static int flask_mem_sharing_op(struct domain *d, struct domain *cd, int
> op)
>  {
>      int rc = current_has_perm(cd, SECCLASS_HVM, HVM__MEM_SHARING);
> @@ -1799,7 +1794,6 @@ static struct xsm_operations flask_ops =
>  #ifdef CONFIG_X86
>      .do_mca = flask_do_mca,
>      .shadow_control = flask_shadow_control,
> -    .hvm_ioreq_server = flask_hvm_ioreq_server,
>      .mem_sharing_op = flask_mem_sharing_op,
>      .apic = flask_apic,
>      .machine_memory_map = flask_machine_memory_map,
> 


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
http://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 31+ messages in thread

* Re: [PATCH 01/11] public / x86: introduce hvmctl hypercall
  2016-06-20 12:52 ` [PATCH 01/11] public / x86: introduce " Jan Beulich
  2016-06-21 10:14   ` Wei Liu
@ 2016-06-23 14:55   ` Andrew Cooper
  2016-06-23 15:10     ` Jan Beulich
  1 sibling, 1 reply; 31+ messages in thread
From: Andrew Cooper @ 2016-06-23 14:55 UTC (permalink / raw)
  To: Jan Beulich, xen-devel
  Cc: Stefano Stabellini, Wei Liu, George Dunlap, Tim Deegan,
	Ian Jackson, Paul Durrant, dgdegra

On 20/06/16 13:52, Jan Beulich wrote:
> +/*
> + * Note that this value is effectively part of the ABI, even if we don't need
> + * to make it a formal part of it.  Hence this value may only be changed if
> + * accompanied by a suitable interface version increase.
> + */
> +#define HVMCTL_iter_shift 8
> +#define HVMCTL_iter_mask  ((1U << HVMCTL_iter_shift) - 1)
> +#define HVMCTL_iter_max   (1U << (16 + HVMCTL_iter_shift))

This (mis)use of the cmd parameter is surely no longer necessary, given
that there is space in xen_hvmctl_t to encode continuation information?

~Andrew


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
http://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 31+ messages in thread

* Re: [PATCH 01/11] public / x86: introduce hvmctl hypercall
  2016-06-23 14:55   ` Andrew Cooper
@ 2016-06-23 15:10     ` Jan Beulich
  2016-06-23 15:35       ` Andrew Cooper
  0 siblings, 1 reply; 31+ messages in thread
From: Jan Beulich @ 2016-06-23 15:10 UTC (permalink / raw)
  To: Andrew Cooper
  Cc: Stefano Stabellini, Wei Liu, George Dunlap, Tim Deegan,
	Ian Jackson, Paul Durrant, xen-devel, dgdegra

>>> On 23.06.16 at 16:55, <andrew.cooper3@citrix.com> wrote:
> On 20/06/16 13:52, Jan Beulich wrote:
>> +/*
>> + * Note that this value is effectively part of the ABI, even if we don't 
> need
>> + * to make it a formal part of it.  Hence this value may only be changed if
>> + * accompanied by a suitable interface version increase.
>> + */
>> +#define HVMCTL_iter_shift 8
>> +#define HVMCTL_iter_mask  ((1U << HVMCTL_iter_shift) - 1)
>> +#define HVMCTL_iter_max   (1U << (16 + HVMCTL_iter_shift))
> 
> This (mis)use of the cmd parameter is surely no longer necessary, given
> that there is space in xen_hvmctl_t to encode continuation information?

There's no misuse of cmd anymore. This is just use to make the 16-bit
continuation value (the opaque structure member) cover a more useful
range, and at once avoid doing the preemption check on every
iteration.

Jan


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
http://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 31+ messages in thread

* Re: [PATCH 11/11] x86/HVM: serialize trap injecting producer and consumer
  2016-06-20 12:58 ` [PATCH 11/11] x86/HVM: serialize trap injecting producer and consumer Jan Beulich
@ 2016-06-23 15:14   ` Andrew Cooper
  0 siblings, 0 replies; 31+ messages in thread
From: Andrew Cooper @ 2016-06-23 15:14 UTC (permalink / raw)
  To: Jan Beulich, xen-devel

On 20/06/16 13:58, Jan Beulich wrote:
> Since injection works on a remote vCPU, and since there's no
> enforcement of the subject vCPU being paused, there's a potential race
> between the prodcing and consuming sides. Fix this by leveraging the

producing.

> vector field as synchronization variable.
>
> Signed-off-by: Jan Beulich <jbeulich@suse.com>

Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
http://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 31+ messages in thread

* Re: [PATCH 00/11] hvmctl hypercall
  2016-06-20 12:39 [PATCH 00/11] hvmctl hypercall Jan Beulich
                   ` (10 preceding siblings ...)
  2016-06-20 12:58 ` [PATCH 11/11] x86/HVM: serialize trap injecting producer and consumer Jan Beulich
@ 2016-06-23 15:15 ` Andrew Cooper
  11 siblings, 0 replies; 31+ messages in thread
From: Andrew Cooper @ 2016-06-23 15:15 UTC (permalink / raw)
  To: Jan Beulich, xen-devel
  Cc: Stefano Stabellini, Wei Liu, George Dunlap, Tim Deegan,
	Ian Jackson, Paul Durrant, dgdegra

On 20/06/16 13:39, Jan Beulich wrote:
> A long while back separating out all control kind operations (intended
> for use by only the control domain or device model) from the currect
> hvmop hypercall has been discussed. This series aims at finally making
> this reality (at once allowing to streamline the associated XSM checking).
>
> 01: public / x86: introduce hvmctl hypercall
> 02: convert HVMOP_set_pci_intx_level
> 03: convert HVMOP_set_isa_irq_level
> 04: convert HVMOP_set_pci_link_route
> 05: convert HVMOP_track_dirty_vram
> 06: convert HVMOP_modified_memory
> 07: convert HVMOP_set_mem_type
> 08: convert HVMOP_inject_trap
> 09: convert HVMOP_inject_msi
> 10: convert HVMOP_*ioreq_server*
> 11: x86/HVM: serialize trap injecting producer and consumer
>
> Signed-off-by: Jan Beulich <jbeulich@suse.com>
>

Patches 2 through 10 are all mechanical and look fine.  All Reviewed-by:
Andrew Cooper <andrew.cooper3@citrix.com>

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
http://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 31+ messages in thread

* Re: [PATCH 01/11] public / x86: introduce hvmctl hypercall
  2016-06-23 15:10     ` Jan Beulich
@ 2016-06-23 15:35       ` Andrew Cooper
  0 siblings, 0 replies; 31+ messages in thread
From: Andrew Cooper @ 2016-06-23 15:35 UTC (permalink / raw)
  To: Jan Beulich
  Cc: Stefano Stabellini, Wei Liu, George Dunlap, Ian Jackson,
	Tim Deegan, Paul Durrant, xen-devel, dgdegra

On 23/06/16 16:10, Jan Beulich wrote:
>>>> On 23.06.16 at 16:55, <andrew.cooper3@citrix.com> wrote:
>> On 20/06/16 13:52, Jan Beulich wrote:
>>> +/*
>>> + * Note that this value is effectively part of the ABI, even if we don't 
>> need
>>> + * to make it a formal part of it.  Hence this value may only be changed if
>>> + * accompanied by a suitable interface version increase.
>>> + */
>>> +#define HVMCTL_iter_shift 8
>>> +#define HVMCTL_iter_mask  ((1U << HVMCTL_iter_shift) - 1)
>>> +#define HVMCTL_iter_max   (1U << (16 + HVMCTL_iter_shift))
>> This (mis)use of the cmd parameter is surely no longer necessary, given
>> that there is space in xen_hvmctl_t to encode continuation information?
> There's no misuse of cmd anymore. This is just use to make the 16-bit
> continuation value (the opaque structure member) cover a more useful
> range, and at once avoid doing the preemption check on every
> iteration.

Ah ok, but it does leave the minimum iteration at 256, which could
easily be too large, depending on the underlying operation.

In this case, I think it would be far better to bump the cmd field to 32
bits, and opaque to 64bits, which affords us far more flexibility.

~Andrew

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
http://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 31+ messages in thread

end of thread, other threads:[~2016-06-23 15:35 UTC | newest]

Thread overview: 31+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2016-06-20 12:39 [PATCH 00/11] hvmctl hypercall Jan Beulich
2016-06-20 12:52 ` [PATCH 01/11] public / x86: introduce " Jan Beulich
2016-06-21 10:14   ` Wei Liu
2016-06-23 14:55   ` Andrew Cooper
2016-06-23 15:10     ` Jan Beulich
2016-06-23 15:35       ` Andrew Cooper
2016-06-20 12:53 ` [PATCH 02/11] hvmctl: convert HVMOP_set_pci_intx_level Jan Beulich
2016-06-20 14:32   ` Daniel De Graaf
2016-06-20 14:48     ` Ian Jackson
2016-06-20 15:25       ` Jan Beulich
2016-06-21 10:14   ` Wei Liu
2016-06-20 12:53 ` [PATCH 03/11] hvmctl: convert HVMOP_set_isa_irq_level Jan Beulich
2016-06-21 10:14   ` Wei Liu
2016-06-20 12:54 ` [PATCH 04/11] hvmctl: convert HVMOP_set_pci_link_route Jan Beulich
2016-06-21 10:14   ` Wei Liu
2016-06-20 12:54 ` [PATCH 05/11] hvmctl: convert HVMOP_track_dirty_vram Jan Beulich
2016-06-21 10:14   ` Wei Liu
2016-06-20 12:55 ` [PATCH 06/11] hvmctl: convert HVMOP_modified_memory Jan Beulich
2016-06-21 10:14   ` Wei Liu
2016-06-20 12:56 ` [PATCH 07/11] hvmctl: convert HVMOP_set_mem_type Jan Beulich
2016-06-21 10:14   ` Wei Liu
2016-06-20 12:56 ` [PATCH 08/11] hvmctl: convert HVMOP_inject_trap Jan Beulich
2016-06-21 10:14   ` Wei Liu
2016-06-20 12:57 ` [PATCH 09/11] hvmctl: convert HVMOP_inject_msi Jan Beulich
2016-06-21 10:14   ` Wei Liu
2016-06-20 12:57 ` [PATCH 10/11] hvmctl: convert HVMOP_*ioreq_server* Jan Beulich
2016-06-21 10:14   ` Wei Liu
2016-06-21 12:44   ` Paul Durrant
2016-06-20 12:58 ` [PATCH 11/11] x86/HVM: serialize trap injecting producer and consumer Jan Beulich
2016-06-23 15:14   ` Andrew Cooper
2016-06-23 15:15 ` [PATCH 00/11] hvmctl hypercall Andrew Cooper

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).