All of lore.kernel.org
 help / color / mirror / Atom feed
From: Paul Durrant <paul.durrant@citrix.com>
To: xen-devel@lists.xenproject.org
Cc: Paul Durrant <paul.durrant@citrix.com>,
	Ian Jackson <ian.jackson@eu.citrix.com>
Subject: [PATCH v7 2/8] dm_op: convert HVMOP_*ioreq_server*
Date: Mon, 23 Jan 2017 13:59:43 +0000	[thread overview]
Message-ID: <1485179989-1763-3-git-send-email-paul.durrant@citrix.com> (raw)
In-Reply-To: <1485179989-1763-1-git-send-email-paul.durrant@citrix.com>

The definitions of HVM_IOREQSRV_BUFIOREQ_* have to persist as they are
already in use by callers of the libxc interface.

Suggested-by: Jan Beulich <jbeulich@suse.com>
Signed-off-by: Paul Durrant <paul.durrant@citrix.com>
Reviewed-by: Jan Beulich <jbeulich@suse.com>
Acked-by: Wei Liu <wei.liu2@citrix.com>
Acked-by: Daniel De Graaf <dgdegra@tycho.nsa.gov>
Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>
--
Cc: Ian Jackson <ian.jackson@eu.citrix.com>

v6:
- Modify const_op as appropriate.

v4:
- #define uint64_aligned_t if necessary to handle compat code generation.

v3:
- Fix pad check.

v2:
- Addressed several comments from Jan.
---
 tools/libxc/xc_domain.c          | 212 ++++++++++++++++---------------------
 xen/arch/x86/hvm/dm.c            |  93 +++++++++++++++++
 xen/arch/x86/hvm/hvm.c           | 219 ---------------------------------------
 xen/arch/x86/hvm/ioreq.c         |  36 +++----
 xen/include/asm-x86/hvm/domain.h |   3 +-
 xen/include/public/hvm/dm_op.h   | 158 ++++++++++++++++++++++++++++
 xen/include/public/hvm/hvm_op.h  | 132 +----------------------
 xen/include/xsm/dummy.h          |   6 --
 xen/include/xsm/xsm.h            |   6 --
 xen/xsm/dummy.c                  |   1 -
 xen/xsm/flask/hooks.c            |   6 --
 11 files changed, 365 insertions(+), 507 deletions(-)

diff --git a/tools/libxc/xc_domain.c b/tools/libxc/xc_domain.c
index 296b852..419a897 100644
--- a/tools/libxc/xc_domain.c
+++ b/tools/libxc/xc_domain.c
@@ -1417,24 +1417,24 @@ int xc_hvm_create_ioreq_server(xc_interface *xch,
                                int handle_bufioreq,
                                ioservid_t *id)
 {
-    DECLARE_HYPERCALL_BUFFER(xen_hvm_create_ioreq_server_t, arg);
+    struct xen_dm_op op;
+    struct xen_dm_op_create_ioreq_server *data;
     int rc;
 
-    arg = xc_hypercall_buffer_alloc(xch, arg, sizeof(*arg));
-    if ( arg == NULL )
-        return -1;
+    memset(&op, 0, sizeof(op));
 
-    arg->domid = domid;
-    arg->handle_bufioreq = handle_bufioreq;
+    op.op = XEN_DMOP_create_ioreq_server;
+    data = &op.u.create_ioreq_server;
 
-    rc = xencall2(xch->xcall, __HYPERVISOR_hvm_op,
-                  HVMOP_create_ioreq_server,
-                  HYPERCALL_BUFFER_AS_ARG(arg));
+    data->handle_bufioreq = handle_bufioreq;
+
+    rc = do_dm_op(xch, domid, 1, &op, sizeof(op));
+    if ( rc )
+        return rc;
 
-    *id = arg->id;
+    *id = data->id;
 
-    xc_hypercall_buffer_free(xch, arg);
-    return rc;
+    return 0;
 }
 
 int xc_hvm_get_ioreq_server_info(xc_interface *xch,
@@ -1444,84 +1444,71 @@ int xc_hvm_get_ioreq_server_info(xc_interface *xch,
                                  xen_pfn_t *bufioreq_pfn,
                                  evtchn_port_t *bufioreq_port)
 {
-    DECLARE_HYPERCALL_BUFFER(xen_hvm_get_ioreq_server_info_t, arg);
+    struct xen_dm_op op;
+    struct xen_dm_op_get_ioreq_server_info *data;
     int rc;
 
-    arg = xc_hypercall_buffer_alloc(xch, arg, sizeof(*arg));
-    if ( arg == NULL )
-        return -1;
+    memset(&op, 0, sizeof(op));
 
-    arg->domid = domid;
-    arg->id = id;
+    op.op = XEN_DMOP_get_ioreq_server_info;
+    data = &op.u.get_ioreq_server_info;
 
-    rc = xencall2(xch->xcall, __HYPERVISOR_hvm_op,
-                  HVMOP_get_ioreq_server_info,
-                  HYPERCALL_BUFFER_AS_ARG(arg));
-    if ( rc != 0 )
-        goto done;
+    data->id = id;
+
+    rc = do_dm_op(xch, domid, 1, &op, sizeof(op));
+    if ( rc )
+        return rc;
 
     if ( ioreq_pfn )
-        *ioreq_pfn = arg->ioreq_pfn;
+        *ioreq_pfn = data->ioreq_pfn;
 
     if ( bufioreq_pfn )
-        *bufioreq_pfn = arg->bufioreq_pfn;
+        *bufioreq_pfn = data->bufioreq_pfn;
 
     if ( bufioreq_port )
-        *bufioreq_port = arg->bufioreq_port;
+        *bufioreq_port = data->bufioreq_port;
 
-done:
-    xc_hypercall_buffer_free(xch, arg);
-    return rc;
+    return 0;
 }
 
 int xc_hvm_map_io_range_to_ioreq_server(xc_interface *xch, domid_t domid,
                                         ioservid_t id, int is_mmio,
                                         uint64_t start, uint64_t end)
 {
-    DECLARE_HYPERCALL_BUFFER(xen_hvm_io_range_t, arg);
-    int rc;
+    struct xen_dm_op op;
+    struct xen_dm_op_ioreq_server_range *data;
 
-    arg = xc_hypercall_buffer_alloc(xch, arg, sizeof(*arg));
-    if ( arg == NULL )
-        return -1;
+    memset(&op, 0, sizeof(op));
 
-    arg->domid = domid;
-    arg->id = id;
-    arg->type = is_mmio ? HVMOP_IO_RANGE_MEMORY : HVMOP_IO_RANGE_PORT;
-    arg->start = start;
-    arg->end = end;
+    op.op = XEN_DMOP_map_io_range_to_ioreq_server;
+    data = &op.u.map_io_range_to_ioreq_server;
 
-    rc = xencall2(xch->xcall, __HYPERVISOR_hvm_op,
-                  HVMOP_map_io_range_to_ioreq_server,
-                  HYPERCALL_BUFFER_AS_ARG(arg));
+    data->id = id;
+    data->type = is_mmio ? XEN_DMOP_IO_RANGE_MEMORY : XEN_DMOP_IO_RANGE_PORT;
+    data->start = start;
+    data->end = end;
 
-    xc_hypercall_buffer_free(xch, arg);
-    return rc;
+    return do_dm_op(xch, domid, 1, &op, sizeof(op));
 }
 
 int xc_hvm_unmap_io_range_from_ioreq_server(xc_interface *xch, domid_t domid,
                                             ioservid_t id, int is_mmio,
                                             uint64_t start, uint64_t end)
 {
-    DECLARE_HYPERCALL_BUFFER(xen_hvm_io_range_t, arg);
-    int rc;
+    struct xen_dm_op op;
+    struct xen_dm_op_ioreq_server_range *data;
 
-    arg = xc_hypercall_buffer_alloc(xch, arg, sizeof(*arg));
-    if ( arg == NULL )
-        return -1;
+    memset(&op, 0, sizeof(op));
 
-    arg->domid = domid;
-    arg->id = id;
-    arg->type = is_mmio ? HVMOP_IO_RANGE_MEMORY : HVMOP_IO_RANGE_PORT;
-    arg->start = start;
-    arg->end = end;
+    op.op = XEN_DMOP_unmap_io_range_from_ioreq_server;
+    data = &op.u.unmap_io_range_from_ioreq_server;
 
-    rc = xencall2(xch->xcall, __HYPERVISOR_hvm_op,
-                  HVMOP_unmap_io_range_from_ioreq_server,
-                  HYPERCALL_BUFFER_AS_ARG(arg));
+    data->id = id;
+    data->type = is_mmio ? XEN_DMOP_IO_RANGE_MEMORY : XEN_DMOP_IO_RANGE_PORT;
+    data->start = start;
+    data->end = end;
 
-    xc_hypercall_buffer_free(xch, arg);
-    return rc;
+    return do_dm_op(xch, domid, 1, &op, sizeof(op));
 }
 
 int xc_hvm_map_pcidev_to_ioreq_server(xc_interface *xch, domid_t domid,
@@ -1529,37 +1516,32 @@ int xc_hvm_map_pcidev_to_ioreq_server(xc_interface *xch, domid_t domid,
                                       uint8_t bus, uint8_t device,
                                       uint8_t function)
 {
-    DECLARE_HYPERCALL_BUFFER(xen_hvm_io_range_t, arg);
-    int rc;
+    struct xen_dm_op op;
+    struct xen_dm_op_ioreq_server_range *data;
 
     if (device > 0x1f || function > 0x7) {
         errno = EINVAL;
         return -1;
     }
 
-    arg = xc_hypercall_buffer_alloc(xch, arg, sizeof(*arg));
-    if ( arg == NULL )
-        return -1;
+    memset(&op, 0, sizeof(op));
 
-    arg->domid = domid;
-    arg->id = id;
-    arg->type = HVMOP_IO_RANGE_PCI;
+    op.op = XEN_DMOP_map_io_range_to_ioreq_server;
+    data = &op.u.map_io_range_to_ioreq_server;
+
+    data->id = id;
+    data->type = XEN_DMOP_IO_RANGE_PCI;
 
     /*
      * The underlying hypercall will deal with ranges of PCI SBDF
      * but, for simplicity, the API only uses singletons.
      */
-    arg->start = arg->end = HVMOP_PCI_SBDF((uint64_t)segment,
-                                           (uint64_t)bus,
-                                           (uint64_t)device,
-                                           (uint64_t)function);
-
-    rc = xencall2(xch->xcall, __HYPERVISOR_hvm_op,
-                  HVMOP_map_io_range_to_ioreq_server,
-                  HYPERCALL_BUFFER_AS_ARG(arg));
+    data->start = data->end = XEN_DMOP_PCI_SBDF((uint64_t)segment,
+                                                (uint64_t)bus,
+                                                (uint64_t)device,
+                                                (uint64_t)function);
 
-    xc_hypercall_buffer_free(xch, arg);
-    return rc;
+    return do_dm_op(xch, domid, 1, &op, sizeof(op));
 }
 
 int xc_hvm_unmap_pcidev_from_ioreq_server(xc_interface *xch, domid_t domid,
@@ -1567,54 +1549,49 @@ int xc_hvm_unmap_pcidev_from_ioreq_server(xc_interface *xch, domid_t domid,
                                           uint8_t bus, uint8_t device,
                                           uint8_t function)
 {
-    DECLARE_HYPERCALL_BUFFER(xen_hvm_io_range_t, arg);
-    int rc;
+    struct xen_dm_op op;
+    struct xen_dm_op_ioreq_server_range *data;
 
     if (device > 0x1f || function > 0x7) {
         errno = EINVAL;
         return -1;
     }
 
-    arg = xc_hypercall_buffer_alloc(xch, arg, sizeof(*arg));
-    if ( arg == NULL )
-        return -1;
+    memset(&op, 0, sizeof(op));
 
-    arg->domid = domid;
-    arg->id = id;
-    arg->type = HVMOP_IO_RANGE_PCI;
-    arg->start = arg->end = HVMOP_PCI_SBDF((uint64_t)segment,
-                                           (uint64_t)bus,
-                                           (uint64_t)device,
-                                           (uint64_t)function);
-
-    rc = xencall2(xch->xcall, __HYPERVISOR_hvm_op,
-                  HVMOP_unmap_io_range_from_ioreq_server,
-                  HYPERCALL_BUFFER_AS_ARG(arg));
+    op.op = XEN_DMOP_unmap_io_range_from_ioreq_server;
+    data = &op.u.unmap_io_range_from_ioreq_server;
 
-    xc_hypercall_buffer_free(xch, arg);
-    return rc;
+    data->id = id;
+    data->type = XEN_DMOP_IO_RANGE_PCI;
+
+    /*
+     * The underlying hypercall will deal with ranges of PCI SBDF
+     * but, for simplicity, the API only uses singletons.
+     */
+    data->start = data->end = XEN_DMOP_PCI_SBDF((uint64_t)segment,
+                                                (uint64_t)bus,
+                                                (uint64_t)device,
+                                                (uint64_t)function);
+
+    return do_dm_op(xch, domid, 1, &op, sizeof(op));
 }
 
 int xc_hvm_destroy_ioreq_server(xc_interface *xch,
                                 domid_t domid,
                                 ioservid_t id)
 {
-    DECLARE_HYPERCALL_BUFFER(xen_hvm_destroy_ioreq_server_t, arg);
-    int rc;
+    struct xen_dm_op op;
+    struct xen_dm_op_destroy_ioreq_server *data;
 
-    arg = xc_hypercall_buffer_alloc(xch, arg, sizeof(*arg));
-    if ( arg == NULL )
-        return -1;
+    memset(&op, 0, sizeof(op));
 
-    arg->domid = domid;
-    arg->id = id;
+    op.op = XEN_DMOP_destroy_ioreq_server;
+    data = &op.u.destroy_ioreq_server;
 
-    rc = xencall2(xch->xcall, __HYPERVISOR_hvm_op,
-                  HVMOP_destroy_ioreq_server,
-                  HYPERCALL_BUFFER_AS_ARG(arg));
+    data->id = id;
 
-    xc_hypercall_buffer_free(xch, arg);
-    return rc;
+    return do_dm_op(xch, domid, 1, &op, sizeof(op));
 }
 
 int xc_hvm_set_ioreq_server_state(xc_interface *xch,
@@ -1622,23 +1599,18 @@ int xc_hvm_set_ioreq_server_state(xc_interface *xch,
                                   ioservid_t id,
                                   int enabled)
 {
-    DECLARE_HYPERCALL_BUFFER(xen_hvm_set_ioreq_server_state_t, arg);
-    int rc;
+    struct xen_dm_op op;
+    struct xen_dm_op_set_ioreq_server_state *data;
 
-    arg = xc_hypercall_buffer_alloc(xch, arg, sizeof(*arg));
-    if ( arg == NULL )
-        return -1;
+    memset(&op, 0, sizeof(op));
 
-    arg->domid = domid;
-    arg->id = id;
-    arg->enabled = !!enabled;
+    op.op = XEN_DMOP_set_ioreq_server_state;
+    data = &op.u.set_ioreq_server_state;
 
-    rc = xencall2(xch->xcall, __HYPERVISOR_hvm_op,
-                  HVMOP_set_ioreq_server_state,
-                  HYPERCALL_BUFFER_AS_ARG(arg));
+    data->id = id;
+    data->enabled = !!enabled;
 
-    xc_hypercall_buffer_free(xch, arg);
-    return rc;
+    return do_dm_op(xch, domid, 1, &op, sizeof(op));
 }
 
 int xc_domain_setdebugging(xc_interface *xch,
diff --git a/xen/arch/x86/hvm/dm.c b/xen/arch/x86/hvm/dm.c
index 738f8d1..72cb33a 100644
--- a/xen/arch/x86/hvm/dm.c
+++ b/xen/arch/x86/hvm/dm.c
@@ -68,8 +68,101 @@ static int dm_op(domid_t domid,
         goto out;
     }
 
+    rc = -EINVAL;
+    if ( op.pad )
+        goto out;
+
     switch ( op.op )
     {
+    case XEN_DMOP_create_ioreq_server:
+    {
+        struct domain *curr_d = current->domain;
+        struct xen_dm_op_create_ioreq_server *data =
+            &op.u.create_ioreq_server;
+
+        const_op = false;
+
+        rc = -EINVAL;
+        if ( data->pad[0] || data->pad[1] || data->pad[2] )
+            break;
+
+        rc = hvm_create_ioreq_server(d, curr_d->domain_id, 0,
+                                     data->handle_bufioreq, &data->id);
+        break;
+    }
+
+    case XEN_DMOP_get_ioreq_server_info:
+    {
+        struct xen_dm_op_get_ioreq_server_info *data =
+            &op.u.get_ioreq_server_info;
+
+        const_op = false;
+
+        rc = -EINVAL;
+        if ( data->pad )
+            break;
+
+        rc = hvm_get_ioreq_server_info(d, data->id,
+                                       &data->ioreq_pfn,
+                                       &data->bufioreq_pfn,
+                                       &data->bufioreq_port);
+        break;
+    }
+
+    case XEN_DMOP_map_io_range_to_ioreq_server:
+    {
+        const struct xen_dm_op_ioreq_server_range *data =
+            &op.u.map_io_range_to_ioreq_server;
+
+        rc = -EINVAL;
+        if ( data->pad )
+            break;
+
+        rc = hvm_map_io_range_to_ioreq_server(d, data->id, data->type,
+                                              data->start, data->end);
+        break;
+    }
+
+    case XEN_DMOP_unmap_io_range_from_ioreq_server:
+    {
+        const struct xen_dm_op_ioreq_server_range *data =
+            &op.u.unmap_io_range_from_ioreq_server;
+
+        rc = -EINVAL;
+        if ( data->pad )
+            break;
+
+        rc = hvm_unmap_io_range_from_ioreq_server(d, data->id, data->type,
+                                                  data->start, data->end);
+        break;
+    }
+
+    case XEN_DMOP_set_ioreq_server_state:
+    {
+        const struct xen_dm_op_set_ioreq_server_state *data =
+            &op.u.set_ioreq_server_state;
+
+        rc = -EINVAL;
+        if ( data->pad )
+            break;
+
+        rc = hvm_set_ioreq_server_state(d, data->id, !!data->enabled);
+        break;
+    }
+
+    case XEN_DMOP_destroy_ioreq_server:
+    {
+        const struct xen_dm_op_destroy_ioreq_server *data =
+            &op.u.destroy_ioreq_server;
+
+        rc = -EINVAL;
+        if ( data->pad )
+            break;
+
+        rc = hvm_destroy_ioreq_server(d, data->id);
+        break;
+    }
+
     default:
         rc = -EOPNOTSUPP;
         break;
diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index 916c867..3bcaab8 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -4379,195 +4379,6 @@ static int hvmop_flush_tlb_all(void)
     return 0;
 }
 
-static int hvmop_create_ioreq_server(
-    XEN_GUEST_HANDLE_PARAM(xen_hvm_create_ioreq_server_t) uop)
-{
-    struct domain *curr_d = current->domain;
-    xen_hvm_create_ioreq_server_t op;
-    struct domain *d;
-    int rc;
-
-    if ( copy_from_guest(&op, uop, 1) )
-        return -EFAULT;
-
-    rc = rcu_lock_remote_domain_by_id(op.domid, &d);
-    if ( rc != 0 )
-        return rc;
-
-    rc = -EINVAL;
-    if ( !is_hvm_domain(d) )
-        goto out;
-
-    rc = xsm_hvm_ioreq_server(XSM_DM_PRIV, d, HVMOP_create_ioreq_server);
-    if ( rc != 0 )
-        goto out;
-
-    rc = hvm_create_ioreq_server(d, curr_d->domain_id, 0,
-                                 op.handle_bufioreq, &op.id);
-    if ( rc != 0 )
-        goto out;
-
-    rc = copy_to_guest(uop, &op, 1) ? -EFAULT : 0;
-    
- out:
-    rcu_unlock_domain(d);
-    return rc;
-}
-
-static int hvmop_get_ioreq_server_info(
-    XEN_GUEST_HANDLE_PARAM(xen_hvm_get_ioreq_server_info_t) uop)
-{
-    xen_hvm_get_ioreq_server_info_t op;
-    struct domain *d;
-    int rc;
-
-    if ( copy_from_guest(&op, uop, 1) )
-        return -EFAULT;
-
-    rc = rcu_lock_remote_domain_by_id(op.domid, &d);
-    if ( rc != 0 )
-        return rc;
-
-    rc = -EINVAL;
-    if ( !is_hvm_domain(d) )
-        goto out;
-
-    rc = xsm_hvm_ioreq_server(XSM_DM_PRIV, d, HVMOP_get_ioreq_server_info);
-    if ( rc != 0 )
-        goto out;
-
-    rc = hvm_get_ioreq_server_info(d, op.id,
-                                   &op.ioreq_pfn,
-                                   &op.bufioreq_pfn, 
-                                   &op.bufioreq_port);
-    if ( rc != 0 )
-        goto out;
-
-    rc = copy_to_guest(uop, &op, 1) ? -EFAULT : 0;
-    
- out:
-    rcu_unlock_domain(d);
-    return rc;
-}
-
-static int hvmop_map_io_range_to_ioreq_server(
-    XEN_GUEST_HANDLE_PARAM(xen_hvm_io_range_t) uop)
-{
-    xen_hvm_io_range_t op;
-    struct domain *d;
-    int rc;
-
-    if ( copy_from_guest(&op, uop, 1) )
-        return -EFAULT;
-
-    rc = rcu_lock_remote_domain_by_id(op.domid, &d);
-    if ( rc != 0 )
-        return rc;
-
-    rc = -EINVAL;
-    if ( !is_hvm_domain(d) )
-        goto out;
-
-    rc = xsm_hvm_ioreq_server(XSM_DM_PRIV, d, HVMOP_map_io_range_to_ioreq_server);
-    if ( rc != 0 )
-        goto out;
-
-    rc = hvm_map_io_range_to_ioreq_server(d, op.id, op.type,
-                                          op.start, op.end);
-
- out:
-    rcu_unlock_domain(d);
-    return rc;
-}
-
-static int hvmop_unmap_io_range_from_ioreq_server(
-    XEN_GUEST_HANDLE_PARAM(xen_hvm_io_range_t) uop)
-{
-    xen_hvm_io_range_t op;
-    struct domain *d;
-    int rc;
-
-    if ( copy_from_guest(&op, uop, 1) )
-        return -EFAULT;
-
-    rc = rcu_lock_remote_domain_by_id(op.domid, &d);
-    if ( rc != 0 )
-        return rc;
-
-    rc = -EINVAL;
-    if ( !is_hvm_domain(d) )
-        goto out;
-
-    rc = xsm_hvm_ioreq_server(XSM_DM_PRIV, d, HVMOP_unmap_io_range_from_ioreq_server);
-    if ( rc != 0 )
-        goto out;
-
-    rc = hvm_unmap_io_range_from_ioreq_server(d, op.id, op.type,
-                                              op.start, op.end);
-    
- out:
-    rcu_unlock_domain(d);
-    return rc;
-}
-
-static int hvmop_set_ioreq_server_state(
-    XEN_GUEST_HANDLE_PARAM(xen_hvm_set_ioreq_server_state_t) uop)
-{
-    xen_hvm_set_ioreq_server_state_t op;
-    struct domain *d;
-    int rc;
-
-    if ( copy_from_guest(&op, uop, 1) )
-        return -EFAULT;
-
-    rc = rcu_lock_remote_domain_by_id(op.domid, &d);
-    if ( rc != 0 )
-        return rc;
-
-    rc = -EINVAL;
-    if ( !is_hvm_domain(d) )
-        goto out;
-
-    rc = xsm_hvm_ioreq_server(XSM_DM_PRIV, d, HVMOP_set_ioreq_server_state);
-    if ( rc != 0 )
-        goto out;
-
-    rc = hvm_set_ioreq_server_state(d, op.id, !!op.enabled);
-
- out:
-    rcu_unlock_domain(d);
-    return rc;
-}
-
-static int hvmop_destroy_ioreq_server(
-    XEN_GUEST_HANDLE_PARAM(xen_hvm_destroy_ioreq_server_t) uop)
-{
-    xen_hvm_destroy_ioreq_server_t op;
-    struct domain *d;
-    int rc;
-
-    if ( copy_from_guest(&op, uop, 1) )
-        return -EFAULT;
-
-    rc = rcu_lock_remote_domain_by_id(op.domid, &d);
-    if ( rc != 0 )
-        return rc;
-
-    rc = -EINVAL;
-    if ( !is_hvm_domain(d) )
-        goto out;
-
-    rc = xsm_hvm_ioreq_server(XSM_DM_PRIV, d, HVMOP_destroy_ioreq_server);
-    if ( rc != 0 )
-        goto out;
-
-    rc = hvm_destroy_ioreq_server(d, op.id);
-
- out:
-    rcu_unlock_domain(d);
-    return rc;
-}
-
 static int hvmop_set_evtchn_upcall_vector(
     XEN_GUEST_HANDLE_PARAM(xen_hvm_evtchn_upcall_vector_t) uop)
 {
@@ -5284,36 +5095,6 @@ long do_hvm_op(unsigned long op, XEN_GUEST_HANDLE_PARAM(void) arg)
     start_iter = op & ~mask;
     switch ( op &= mask )
     {
-    case HVMOP_create_ioreq_server:
-        rc = hvmop_create_ioreq_server(
-            guest_handle_cast(arg, xen_hvm_create_ioreq_server_t));
-        break;
-    
-    case HVMOP_get_ioreq_server_info:
-        rc = hvmop_get_ioreq_server_info(
-            guest_handle_cast(arg, xen_hvm_get_ioreq_server_info_t));
-        break;
-    
-    case HVMOP_map_io_range_to_ioreq_server:
-        rc = hvmop_map_io_range_to_ioreq_server(
-            guest_handle_cast(arg, xen_hvm_io_range_t));
-        break;
-    
-    case HVMOP_unmap_io_range_from_ioreq_server:
-        rc = hvmop_unmap_io_range_from_ioreq_server(
-            guest_handle_cast(arg, xen_hvm_io_range_t));
-        break;
-
-    case HVMOP_set_ioreq_server_state:
-        rc = hvmop_set_ioreq_server_state(
-            guest_handle_cast(arg, xen_hvm_set_ioreq_server_state_t));
-        break;
-    
-    case HVMOP_destroy_ioreq_server:
-        rc = hvmop_destroy_ioreq_server(
-            guest_handle_cast(arg, xen_hvm_destroy_ioreq_server_t));
-        break;
-    
     case HVMOP_set_evtchn_upcall_vector:
         rc = hvmop_set_evtchn_upcall_vector(
             guest_handle_cast(arg, xen_hvm_evtchn_upcall_vector_t));
diff --git a/xen/arch/x86/hvm/ioreq.c b/xen/arch/x86/hvm/ioreq.c
index 26a0cb8..899ba64 100644
--- a/xen/arch/x86/hvm/ioreq.c
+++ b/xen/arch/x86/hvm/ioreq.c
@@ -514,9 +514,9 @@ static int hvm_ioreq_server_alloc_rangesets(struct hvm_ioreq_server *s,
         char *name;
 
         rc = asprintf(&name, "ioreq_server %d %s", s->id,
-                      (i == HVMOP_IO_RANGE_PORT) ? "port" :
-                      (i == HVMOP_IO_RANGE_MEMORY) ? "memory" :
-                      (i == HVMOP_IO_RANGE_PCI) ? "pci" :
+                      (i == XEN_DMOP_IO_RANGE_PORT) ? "port" :
+                      (i == XEN_DMOP_IO_RANGE_MEMORY) ? "memory" :
+                      (i == XEN_DMOP_IO_RANGE_PCI) ? "pci" :
                       "");
         if ( rc )
             goto fail;
@@ -834,9 +834,9 @@ int hvm_map_io_range_to_ioreq_server(struct domain *d, ioservid_t id,
 
             switch ( type )
             {
-            case HVMOP_IO_RANGE_PORT:
-            case HVMOP_IO_RANGE_MEMORY:
-            case HVMOP_IO_RANGE_PCI:
+            case XEN_DMOP_IO_RANGE_PORT:
+            case XEN_DMOP_IO_RANGE_MEMORY:
+            case XEN_DMOP_IO_RANGE_PCI:
                 r = s->range[type];
                 break;
 
@@ -886,9 +886,9 @@ int hvm_unmap_io_range_from_ioreq_server(struct domain *d, ioservid_t id,
 
             switch ( type )
             {
-            case HVMOP_IO_RANGE_PORT:
-            case HVMOP_IO_RANGE_MEMORY:
-            case HVMOP_IO_RANGE_PCI:
+            case XEN_DMOP_IO_RANGE_PORT:
+            case XEN_DMOP_IO_RANGE_MEMORY:
+            case XEN_DMOP_IO_RANGE_PCI:
                 r = s->range[type];
                 break;
 
@@ -1129,12 +1129,12 @@ struct hvm_ioreq_server *hvm_select_ioreq_server(struct domain *d,
 
         /* PCI config data cycle */
 
-        sbdf = HVMOP_PCI_SBDF(0,
-                              PCI_BUS(CF8_BDF(cf8)),
-                              PCI_SLOT(CF8_BDF(cf8)),
-                              PCI_FUNC(CF8_BDF(cf8)));
+        sbdf = XEN_DMOP_PCI_SBDF(0,
+                                 PCI_BUS(CF8_BDF(cf8)),
+                                 PCI_SLOT(CF8_BDF(cf8)),
+                                 PCI_FUNC(CF8_BDF(cf8)));
 
-        type = HVMOP_IO_RANGE_PCI;
+        type = XEN_DMOP_IO_RANGE_PCI;
         addr = ((uint64_t)sbdf << 32) |
                CF8_ADDR_LO(cf8) |
                (p->addr & 3);
@@ -1155,7 +1155,7 @@ struct hvm_ioreq_server *hvm_select_ioreq_server(struct domain *d,
     else
     {
         type = (p->type == IOREQ_TYPE_PIO) ?
-                HVMOP_IO_RANGE_PORT : HVMOP_IO_RANGE_MEMORY;
+                XEN_DMOP_IO_RANGE_PORT : XEN_DMOP_IO_RANGE_MEMORY;
         addr = p->addr;
     }
 
@@ -1177,19 +1177,19 @@ struct hvm_ioreq_server *hvm_select_ioreq_server(struct domain *d,
         {
             unsigned long end;
 
-        case HVMOP_IO_RANGE_PORT:
+        case XEN_DMOP_IO_RANGE_PORT:
             end = addr + p->size - 1;
             if ( rangeset_contains_range(r, addr, end) )
                 return s;
 
             break;
-        case HVMOP_IO_RANGE_MEMORY:
+        case XEN_DMOP_IO_RANGE_MEMORY:
             end = addr + (p->size * p->count) - 1;
             if ( rangeset_contains_range(r, addr, end) )
                 return s;
 
             break;
-        case HVMOP_IO_RANGE_PCI:
+        case XEN_DMOP_IO_RANGE_PCI:
             if ( rangeset_contains_singleton(r, addr >> 32) )
             {
                 p->type = IOREQ_TYPE_PCI_CONFIG;
diff --git a/xen/include/asm-x86/hvm/domain.h b/xen/include/asm-x86/hvm/domain.h
index d55d180..350f013 100644
--- a/xen/include/asm-x86/hvm/domain.h
+++ b/xen/include/asm-x86/hvm/domain.h
@@ -33,6 +33,7 @@
 #include <public/hvm/params.h>
 #include <public/hvm/save.h>
 #include <public/hvm/hvm_op.h>
+#include <public/hvm/dm_op.h>
 
 struct hvm_ioreq_page {
     unsigned long gmfn;
@@ -47,7 +48,7 @@ struct hvm_ioreq_vcpu {
     bool_t           pending;
 };
 
-#define NR_IO_RANGE_TYPES (HVMOP_IO_RANGE_PCI + 1)
+#define NR_IO_RANGE_TYPES (XEN_DMOP_IO_RANGE_PCI + 1)
 #define MAX_NR_IO_RANGES  256
 
 struct hvm_ioreq_server {
diff --git a/xen/include/public/hvm/dm_op.h b/xen/include/public/hvm/dm_op.h
index 8627a64..dcd407b 100644
--- a/xen/include/public/hvm/dm_op.h
+++ b/xen/include/public/hvm/dm_op.h
@@ -28,8 +28,166 @@
 
 #if defined(__XEN__) || defined(__XEN_TOOLS__)
 
+#include "../event_channel.h"
+
+#ifndef uint64_aligned_t
+#define uint64_aligned_t uint64_t
+#endif
+
+/*
+ * IOREQ Servers
+ *
+ * The interface between an I/O emulator an Xen is called an IOREQ Server.
+ * A domain supports a single 'legacy' IOREQ Server which is instantiated if
+ * parameter...
+ *
+ * HVM_PARAM_IOREQ_PFN is read (to get the gmfn containing the synchronous
+ * ioreq structures), or...
+ * HVM_PARAM_BUFIOREQ_PFN is read (to get the gmfn containing the buffered
+ * ioreq ring), or...
+ * HVM_PARAM_BUFIOREQ_EVTCHN is read (to get the event channel that Xen uses
+ * to request buffered I/O emulation).
+ *
+ * The following hypercalls facilitate the creation of IOREQ Servers for
+ * 'secondary' emulators which are invoked to implement port I/O, memory, or
+ * PCI config space ranges which they explicitly register.
+ */
+
+typedef uint16_t ioservid_t;
+
+/*
+ * XEN_DMOP_create_ioreq_server: Instantiate a new IOREQ Server for a
+ *                               secondary emulator.
+ *
+ * The <id> handed back is unique for target domain. The valur of
+ * <handle_bufioreq> should be one of HVM_IOREQSRV_BUFIOREQ_* defined in
+ * hvm_op.h. If the value is HVM_IOREQSRV_BUFIOREQ_OFF then  the buffered
+ * ioreq ring will not be allocated and hence all emulation requests to
+ * this server will be synchronous.
+ */
+#define XEN_DMOP_create_ioreq_server 1
+
+struct xen_dm_op_create_ioreq_server {
+    /* IN - should server handle buffered ioreqs */
+    uint8_t handle_bufioreq;
+    uint8_t pad[3];
+    /* OUT - server id */
+    ioservid_t id;
+};
+
+/*
+ * XEN_DMOP_get_ioreq_server_info: Get all the information necessary to
+ *                                 access IOREQ Server <id>.
+ *
+ * The emulator needs to map the synchronous ioreq structures and buffered
+ * ioreq ring (if it exists) that Xen uses to request emulation. These are
+ * hosted in the target domain's gmfns <ioreq_pfn> and <bufioreq_pfn>
+ * respectively. In addition, if the IOREQ Server is handling buffered
+ * emulation requests, the emulator needs to bind to event channel
+ * <bufioreq_port> to listen for them. (The event channels used for
+ * synchronous emulation requests are specified in the per-CPU ioreq
+ * structures in <ioreq_pfn>).
+ * If the IOREQ Server is not handling buffered emulation requests then the
+ * values handed back in <bufioreq_pfn> and <bufioreq_port> will both be 0.
+ */
+#define XEN_DMOP_get_ioreq_server_info 2
+
+struct xen_dm_op_get_ioreq_server_info {
+    /* IN - server id */
+    ioservid_t id;
+    uint16_t pad;
+    /* OUT - buffered ioreq port */
+    evtchn_port_t bufioreq_port;
+    /* OUT - sync ioreq pfn */
+    uint64_aligned_t ioreq_pfn;
+    /* OUT - buffered ioreq pfn */
+    uint64_aligned_t bufioreq_pfn;
+};
+
+/*
+ * XEN_DMOP_map_io_range_to_ioreq_server: Register an I/O range for
+ *                                        emulation by the client of
+ *                                        IOREQ Server <id>.
+ * XEN_DMOP_unmap_io_range_from_ioreq_server: Deregister an I/O range
+ *                                            previously registered for
+ *                                            emulation by the client of
+ *                                            IOREQ Server <id>.
+ *
+ * There are three types of I/O that can be emulated: port I/O, memory
+ * accesses and PCI config space accesses. The <type> field denotes which
+ * type of range* the <start> and <end> (inclusive) fields are specifying.
+ * PCI config space ranges are specified by segment/bus/device/function
+ * values which should be encoded using the DMOP_PCI_SBDF helper macro
+ * below.
+ *
+ * NOTE: unless an emulation request falls entirely within a range mapped
+ * by a secondary emulator, it will not be passed to that emulator.
+ */
+#define XEN_DMOP_map_io_range_to_ioreq_server 3
+#define XEN_DMOP_unmap_io_range_from_ioreq_server 4
+
+struct xen_dm_op_ioreq_server_range {
+    /* IN - server id */
+    ioservid_t id;
+    uint16_t pad;
+    /* IN - type of range */
+    uint32_t type;
+# define XEN_DMOP_IO_RANGE_PORT   0 /* I/O port range */
+# define XEN_DMOP_IO_RANGE_MEMORY 1 /* MMIO range */
+# define XEN_DMOP_IO_RANGE_PCI    2 /* PCI segment/bus/dev/func range */
+    /* IN - inclusive start and end of range */
+    uint64_aligned_t start, end;
+};
+
+#define XEN_DMOP_PCI_SBDF(s,b,d,f) \
+	((((s) & 0xffff) << 16) |  \
+	 (((b) & 0xff) << 8) |     \
+	 (((d) & 0x1f) << 3) |     \
+	 ((f) & 0x07))
+
+/*
+ * XEN_DMOP_set_ioreq_server_state: Enable or disable the IOREQ Server <id>
+ *
+ * The IOREQ Server will not be passed any emulation requests until it is
+ * in the enabled state.
+ * Note that the contents of the ioreq_pfn and bufioreq_fn (see
+ * XEN_DMOP_get_ioreq_server_info) are not meaningful until the IOREQ Server
+ * is in the enabled state.
+ */
+#define XEN_DMOP_set_ioreq_server_state 5
+
+struct xen_dm_op_set_ioreq_server_state {
+    /* IN - server id */
+    ioservid_t id;
+    /* IN - enabled? */
+    uint8_t enabled;
+    uint8_t pad;
+};
+
+/*
+ * XEN_DMOP_destroy_ioreq_server: Destroy the IOREQ Server <id>.
+ *
+ * Any registered I/O ranges will be automatically deregistered.
+ */
+#define XEN_DMOP_destroy_ioreq_server 6
+
+struct xen_dm_op_destroy_ioreq_server {
+    /* IN - server id */
+    ioservid_t id;
+    uint16_t pad;
+};
+
 struct xen_dm_op {
     uint32_t op;
+    uint32_t pad;
+    union {
+        struct xen_dm_op_create_ioreq_server create_ioreq_server;
+        struct xen_dm_op_get_ioreq_server_info get_ioreq_server_info;
+        struct xen_dm_op_ioreq_server_range map_io_range_to_ioreq_server;
+        struct xen_dm_op_ioreq_server_range unmap_io_range_from_ioreq_server;
+        struct xen_dm_op_set_ioreq_server_state set_ioreq_server_state;
+        struct xen_dm_op_destroy_ioreq_server destroy_ioreq_server;
+    } u;
 };
 
 #endif /* __XEN__ || __XEN_TOOLS__ */
diff --git a/xen/include/public/hvm/hvm_op.h b/xen/include/public/hvm/hvm_op.h
index b3e45cf..6fcd86d 100644
--- a/xen/include/public/hvm/hvm_op.h
+++ b/xen/include/public/hvm/hvm_op.h
@@ -243,37 +243,10 @@ typedef struct xen_hvm_inject_msi xen_hvm_inject_msi_t;
 DEFINE_XEN_GUEST_HANDLE(xen_hvm_inject_msi_t);
 
 /*
- * IOREQ Servers
- *
- * The interface between an I/O emulator an Xen is called an IOREQ Server.
- * A domain supports a single 'legacy' IOREQ Server which is instantiated if
- * parameter...
- *
- * HVM_PARAM_IOREQ_PFN is read (to get the gmfn containing the synchronous
- * ioreq structures), or...
- * HVM_PARAM_BUFIOREQ_PFN is read (to get the gmfn containing the buffered
- * ioreq ring), or...
- * HVM_PARAM_BUFIOREQ_EVTCHN is read (to get the event channel that Xen uses
- * to request buffered I/O emulation).
- * 
- * The following hypercalls facilitate the creation of IOREQ Servers for
- * 'secondary' emulators which are invoked to implement port I/O, memory, or
- * PCI config space ranges which they explicitly register.
+ * Definitions relating to DMOP_create_ioreq_server. (Defined here for
+ * backwards compatibility).
  */
 
-typedef uint16_t ioservid_t;
-
-/*
- * HVMOP_create_ioreq_server: Instantiate a new IOREQ Server for a secondary
- *                            emulator servicing domain <domid>.
- *
- * The <id> handed back is unique for <domid>. If <handle_bufioreq> is zero
- * the buffered ioreq ring will not be allocated and hence all emulation
- * requestes to this server will be synchronous.
- */
-#define HVMOP_create_ioreq_server 17
-struct xen_hvm_create_ioreq_server {
-    domid_t domid;           /* IN - domain to be serviced */
 #define HVM_IOREQSRV_BUFIOREQ_OFF    0
 #define HVM_IOREQSRV_BUFIOREQ_LEGACY 1
 /*
@@ -281,107 +254,6 @@ struct xen_hvm_create_ioreq_server {
  * the pointer pair gets read atomically:
  */
 #define HVM_IOREQSRV_BUFIOREQ_ATOMIC 2
-    uint8_t handle_bufioreq; /* IN - should server handle buffered ioreqs */
-    ioservid_t id;           /* OUT - server id */
-};
-typedef struct xen_hvm_create_ioreq_server xen_hvm_create_ioreq_server_t;
-DEFINE_XEN_GUEST_HANDLE(xen_hvm_create_ioreq_server_t);
-
-/*
- * HVMOP_get_ioreq_server_info: Get all the information necessary to access
- *                              IOREQ Server <id>. 
- *
- * The emulator needs to map the synchronous ioreq structures and buffered
- * ioreq ring (if it exists) that Xen uses to request emulation. These are
- * hosted in domain <domid>'s gmfns <ioreq_pfn> and <bufioreq_pfn>
- * respectively. In addition, if the IOREQ Server is handling buffered
- * emulation requests, the emulator needs to bind to event channel
- * <bufioreq_port> to listen for them. (The event channels used for
- * synchronous emulation requests are specified in the per-CPU ioreq
- * structures in <ioreq_pfn>).
- * If the IOREQ Server is not handling buffered emulation requests then the
- * values handed back in <bufioreq_pfn> and <bufioreq_port> will both be 0.
- */
-#define HVMOP_get_ioreq_server_info 18
-struct xen_hvm_get_ioreq_server_info {
-    domid_t domid;                 /* IN - domain to be serviced */
-    ioservid_t id;                 /* IN - server id */
-    evtchn_port_t bufioreq_port;   /* OUT - buffered ioreq port */
-    uint64_aligned_t ioreq_pfn;    /* OUT - sync ioreq pfn */
-    uint64_aligned_t bufioreq_pfn; /* OUT - buffered ioreq pfn */
-};
-typedef struct xen_hvm_get_ioreq_server_info xen_hvm_get_ioreq_server_info_t;
-DEFINE_XEN_GUEST_HANDLE(xen_hvm_get_ioreq_server_info_t);
-
-/*
- * HVM_map_io_range_to_ioreq_server: Register an I/O range of domain <domid>
- *                                   for emulation by the client of IOREQ
- *                                   Server <id>
- * HVM_unmap_io_range_from_ioreq_server: Deregister an I/O range of <domid>
- *                                       for emulation by the client of IOREQ
- *                                       Server <id>
- *
- * There are three types of I/O that can be emulated: port I/O, memory accesses
- * and PCI config space accesses. The <type> field denotes which type of range
- * the <start> and <end> (inclusive) fields are specifying.
- * PCI config space ranges are specified by segment/bus/device/function values
- * which should be encoded using the HVMOP_PCI_SBDF helper macro below.
- *
- * NOTE: unless an emulation request falls entirely within a range mapped
- * by a secondary emulator, it will not be passed to that emulator.
- */
-#define HVMOP_map_io_range_to_ioreq_server 19
-#define HVMOP_unmap_io_range_from_ioreq_server 20
-struct xen_hvm_io_range {
-    domid_t domid;               /* IN - domain to be serviced */
-    ioservid_t id;               /* IN - server id */
-    uint32_t type;               /* IN - type of range */
-# define HVMOP_IO_RANGE_PORT   0 /* I/O port range */
-# define HVMOP_IO_RANGE_MEMORY 1 /* MMIO range */
-# define HVMOP_IO_RANGE_PCI    2 /* PCI segment/bus/dev/func range */
-    uint64_aligned_t start, end; /* IN - inclusive start and end of range */
-};
-typedef struct xen_hvm_io_range xen_hvm_io_range_t;
-DEFINE_XEN_GUEST_HANDLE(xen_hvm_io_range_t);
-
-#define HVMOP_PCI_SBDF(s,b,d,f)                 \
-	((((s) & 0xffff) << 16) |                   \
-	 (((b) & 0xff) << 8) |                      \
-	 (((d) & 0x1f) << 3) |                      \
-	 ((f) & 0x07))
-
-/*
- * HVMOP_destroy_ioreq_server: Destroy the IOREQ Server <id> servicing domain
- *                             <domid>.
- *
- * Any registered I/O ranges will be automatically deregistered.
- */
-#define HVMOP_destroy_ioreq_server 21
-struct xen_hvm_destroy_ioreq_server {
-    domid_t domid; /* IN - domain to be serviced */
-    ioservid_t id; /* IN - server id */
-};
-typedef struct xen_hvm_destroy_ioreq_server xen_hvm_destroy_ioreq_server_t;
-DEFINE_XEN_GUEST_HANDLE(xen_hvm_destroy_ioreq_server_t);
-
-/*
- * HVMOP_set_ioreq_server_state: Enable or disable the IOREQ Server <id> servicing
- *                               domain <domid>.
- *
- * The IOREQ Server will not be passed any emulation requests until it is in the
- * enabled state.
- * Note that the contents of the ioreq_pfn and bufioreq_fn (see
- * HVMOP_get_ioreq_server_info) are not meaningful until the IOREQ Server is in
- * the enabled state.
- */
-#define HVMOP_set_ioreq_server_state 22
-struct xen_hvm_set_ioreq_server_state {
-    domid_t domid;   /* IN - domain to be serviced */
-    ioservid_t id;   /* IN - server id */
-    uint8_t enabled; /* IN - enabled? */    
-};
-typedef struct xen_hvm_set_ioreq_server_state xen_hvm_set_ioreq_server_state_t;
-DEFINE_XEN_GUEST_HANDLE(xen_hvm_set_ioreq_server_state_t);
 
 #endif /* defined(__XEN__) || defined(__XEN_TOOLS__) */
 
diff --git a/xen/include/xsm/dummy.h b/xen/include/xsm/dummy.h
index b206f5a..9f7c174 100644
--- a/xen/include/xsm/dummy.h
+++ b/xen/include/xsm/dummy.h
@@ -634,12 +634,6 @@ static XSM_INLINE int xsm_hvm_inject_msi(XSM_DEFAULT_ARG struct domain *d)
     return xsm_default_action(action, current->domain, d);
 }
 
-static XSM_INLINE int xsm_hvm_ioreq_server(XSM_DEFAULT_ARG struct domain *d, int op)
-{
-    XSM_ASSERT_ACTION(XSM_DM_PRIV);
-    return xsm_default_action(action, current->domain, d);
-}
-
 static XSM_INLINE int xsm_mem_sharing_op(XSM_DEFAULT_ARG struct domain *d, struct domain *cd, int op)
 {
     XSM_ASSERT_ACTION(XSM_DM_PRIV);
diff --git a/xen/include/xsm/xsm.h b/xen/include/xsm/xsm.h
index e2d336f..b5845a2 100644
--- a/xen/include/xsm/xsm.h
+++ b/xen/include/xsm/xsm.h
@@ -166,7 +166,6 @@ struct xsm_operations {
     int (*hvm_set_isa_irq_level) (struct domain *d);
     int (*hvm_set_pci_link_route) (struct domain *d);
     int (*hvm_inject_msi) (struct domain *d);
-    int (*hvm_ioreq_server) (struct domain *d, int op);
     int (*mem_sharing_op) (struct domain *d, struct domain *cd, int op);
     int (*apic) (struct domain *d, int cmd);
     int (*memtype) (uint32_t access);
@@ -656,11 +655,6 @@ static inline int xsm_hvm_inject_msi (xsm_default_t def, struct domain *d)
     return xsm_ops->hvm_inject_msi(d);
 }
 
-static inline int xsm_hvm_ioreq_server (xsm_default_t def, struct domain *d, int op)
-{
-    return xsm_ops->hvm_ioreq_server(d, op);
-}
-
 static inline int xsm_mem_sharing_op (xsm_default_t def, struct domain *d, struct domain *cd, int op)
 {
     return xsm_ops->mem_sharing_op(d, cd, op);
diff --git a/xen/xsm/dummy.c b/xen/xsm/dummy.c
index a082b28..d544ec1 100644
--- a/xen/xsm/dummy.c
+++ b/xen/xsm/dummy.c
@@ -149,7 +149,6 @@ void __init xsm_fixup_ops (struct xsm_operations *ops)
     set_to_dummy_if_null(ops, hvm_set_isa_irq_level);
     set_to_dummy_if_null(ops, hvm_set_pci_link_route);
     set_to_dummy_if_null(ops, hvm_inject_msi);
-    set_to_dummy_if_null(ops, hvm_ioreq_server);
     set_to_dummy_if_null(ops, mem_sharing_op);
     set_to_dummy_if_null(ops, apic);
     set_to_dummy_if_null(ops, machine_memory_map);
diff --git a/xen/xsm/flask/hooks.c b/xen/xsm/flask/hooks.c
index 383dd4f..1ce8a36 100644
--- a/xen/xsm/flask/hooks.c
+++ b/xen/xsm/flask/hooks.c
@@ -1525,11 +1525,6 @@ static int flask_hvm_inject_msi(struct domain *d)
     return current_has_perm(d, SECCLASS_HVM, HVM__SEND_IRQ);
 }
 
-static int flask_hvm_ioreq_server(struct domain *d, int op)
-{
-    return current_has_perm(d, SECCLASS_HVM, HVM__HVMCTL);
-}
-
 static int flask_mem_sharing_op(struct domain *d, struct domain *cd, int op)
 {
     int rc = current_has_perm(cd, SECCLASS_HVM, HVM__MEM_SHARING);
@@ -1808,7 +1803,6 @@ static struct xsm_operations flask_ops = {
     .hvm_set_isa_irq_level = flask_hvm_set_isa_irq_level,
     .hvm_set_pci_link_route = flask_hvm_set_pci_link_route,
     .hvm_inject_msi = flask_hvm_inject_msi,
-    .hvm_ioreq_server = flask_hvm_ioreq_server,
     .mem_sharing_op = flask_mem_sharing_op,
     .apic = flask_apic,
     .machine_memory_map = flask_machine_memory_map,
-- 
2.1.4


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel

  parent reply	other threads:[~2017-01-23 13:59 UTC|newest]

Thread overview: 13+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2017-01-23 13:59 [PATCH v7 0/8] New hypercall for device models Paul Durrant
2017-01-23 13:59 ` [PATCH v7 1/8] public / x86: Introduce __HYPERCALL_dm_op Paul Durrant
2017-01-24  9:59   ` Jan Beulich
2017-01-24 10:19     ` Paul Durrant
2017-01-24 10:55       ` Jan Beulich
2017-01-24 11:02         ` Paul Durrant
2017-01-23 13:59 ` Paul Durrant [this message]
2017-01-23 13:59 ` [PATCH v7 3/8] dm_op: convert HVMOP_track_dirty_vram Paul Durrant
2017-01-23 13:59 ` [PATCH v7 4/8] dm_op: convert HVMOP_set_pci_intx_level, HVMOP_set_isa_irq_level, and Paul Durrant
2017-01-23 13:59 ` [PATCH v7 5/8] dm_op: convert HVMOP_modified_memory Paul Durrant
2017-01-23 13:59 ` [PATCH v7 6/8] dm_op: convert HVMOP_set_mem_type Paul Durrant
2017-01-23 13:59 ` [PATCH v7 7/8] dm_op: convert HVMOP_inject_trap and HVMOP_inject_msi Paul Durrant
2017-01-23 13:59 ` [PATCH v7 8/8] x86/hvm: serialize trap injecting producer and consumer Paul Durrant

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1485179989-1763-3-git-send-email-paul.durrant@citrix.com \
    --to=paul.durrant@citrix.com \
    --cc=ian.jackson@eu.citrix.com \
    --cc=xen-devel@lists.xenproject.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.