All of lore.kernel.org
 help / color / mirror / Atom feed
From: Petre Pircalabu <ppircalabu@bitdefender.com>
To: xen-devel@lists.xenproject.org
Cc: Petre Pircalabu <ppircalabu@bitdefender.com>,
	Ian Jackson <ian.jackson@eu.citrix.com>, Wei Liu <wl@xen.org>
Subject: [PATCH 1/9] tools/libxc: Consistent usage of xc_vm_event_* interface
Date: Thu, 30 May 2019 17:18:15 +0300	[thread overview]
Message-ID: <a7acd18a3c4bcd288338de12d13ce1cb9fb6d8b2.1559224640.git.ppircalabu@bitdefender.com> (raw)
In-Reply-To: <cover.1559224640.git.ppircalabu@bitdefender.com>
In-Reply-To: <cover.1559224640.git.ppircalabu@bitdefender.com>

Modified xc_mem_paging_enable to use directly xc_vm_event_enable and
moved the ring_page handling from client to libxc (xenpaging).

Restricted vm_event_control usage only to simplest domctls which do
not expect any return values and change xc_vm_event_enable to call do_domctl
directly.

Removed xc_memshr_ring_enable/disable and xc_memshr_domain_resume.

Signed-off-by: Petre Pircalabu <ppircalabu@bitdefender.com>
---
 tools/libxc/include/xenctrl.h | 49 +--------------------------------
 tools/libxc/xc_mem_paging.c   | 23 +++++-----------
 tools/libxc/xc_memshr.c       | 34 -----------------------
 tools/libxc/xc_monitor.c      | 31 +++++++++++++++++----
 tools/libxc/xc_private.h      |  2 +-
 tools/libxc/xc_vm_event.c     | 64 ++++++++++++++++---------------------------
 tools/xenpaging/xenpaging.c   | 42 +++-------------------------
 7 files changed, 62 insertions(+), 183 deletions(-)

diff --git a/tools/libxc/include/xenctrl.h b/tools/libxc/include/xenctrl.h
index 538007a..28fdbc0 100644
--- a/tools/libxc/include/xenctrl.h
+++ b/tools/libxc/include/xenctrl.h
@@ -1949,7 +1949,7 @@ int xc_altp2m_change_gfn(xc_interface *handle, uint32_t domid,
  * Hardware-Assisted Paging (i.e. Intel EPT, AMD NPT). Moreover, AMD NPT
  * support is considered experimental.
  */
-int xc_mem_paging_enable(xc_interface *xch, uint32_t domain_id, uint32_t *port);
+void *xc_mem_paging_enable(xc_interface *xch, uint32_t domain_id, uint32_t *port);
 int xc_mem_paging_disable(xc_interface *xch, uint32_t domain_id);
 int xc_mem_paging_resume(xc_interface *xch, uint32_t domain_id);
 int xc_mem_paging_nominate(xc_interface *xch, uint32_t domain_id,
@@ -2082,53 +2082,6 @@ int xc_memshr_control(xc_interface *xch,
                       uint32_t domid,
                       int enable);
 
-/* Create a communication ring in which the hypervisor will place ENOMEM
- * notifications.
- *
- * ENOMEM happens when unsharing pages: a Copy-on-Write duplicate needs to be
- * allocated, and thus the out-of-memory error occurr.
- *
- * For complete examples on how to plumb a notification ring, look into
- * xenpaging or xen-access.
- *
- * On receipt of a notification, the helper should ensure there is memory
- * available to the domain before retrying.
- *
- * If a domain encounters an ENOMEM condition when sharing and this ring
- * has not been set up, the hypervisor will crash the domain.
- *
- * Fails with:
- *  EINVAL if port is NULL
- *  EINVAL if the sharing ring has already been enabled
- *  ENOSYS if no guest gfn has been specified to host the ring via an hvm param
- *  EINVAL if the gfn for the ring has not been populated
- *  ENOENT if the gfn for the ring is paged out, or cannot be unshared
- *  EINVAL if the gfn for the ring cannot be written to
- *  EINVAL if the domain is dying
- *  ENOSPC if an event channel cannot be allocated for the ring
- *  ENOMEM if memory cannot be allocated for internal data structures
- *  EINVAL or EACCESS if the request is denied by the security policy
- */
-
-int xc_memshr_ring_enable(xc_interface *xch, 
-                          uint32_t domid,
-                          uint32_t *port);
-/* Disable the ring for ENOMEM communication.
- * May fail with EINVAL if the ring was not enabled in the first place.
- */
-int xc_memshr_ring_disable(xc_interface *xch, 
-                           uint32_t domid);
-
-/*
- * Calls below return EINVAL if sharing has not been enabled for the domain
- * Calls below return EINVAL if the domain is dying
- */
-/* Once a reponse to an ENOMEM notification is prepared, the tool can
- * notify the hypervisor to re-schedule the faulting vcpu of the domain with an
- * event channel kick and/or this call. */
-int xc_memshr_domain_resume(xc_interface *xch,
-                            uint32_t domid);
-
 /* Select a page for sharing. 
  *
  * A 64 bit opaque handle will be stored in handle.  The hypervisor ensures
diff --git a/tools/libxc/xc_mem_paging.c b/tools/libxc/xc_mem_paging.c
index a067706..08468fb 100644
--- a/tools/libxc/xc_mem_paging.c
+++ b/tools/libxc/xc_mem_paging.c
@@ -37,35 +37,26 @@ static int xc_mem_paging_memop(xc_interface *xch, uint32_t domain_id,
     return do_memory_op(xch, XENMEM_paging_op, &mpo, sizeof(mpo));
 }
 
-int xc_mem_paging_enable(xc_interface *xch, uint32_t domain_id,
-                         uint32_t *port)
+void *xc_mem_paging_enable(xc_interface *xch, uint32_t domain_id,
+                           uint32_t *port)
 {
-    if ( !port )
-    {
-        errno = EINVAL;
-        return -1;
-    }
-
-    return xc_vm_event_control(xch, domain_id,
-                               XEN_VM_EVENT_ENABLE,
-                               XEN_DOMCTL_VM_EVENT_OP_PAGING,
-                               port);
+    return xc_vm_event_enable(xch, domain_id,
+                              XEN_DOMCTL_VM_EVENT_OP_PAGING,
+                              port);
 }
 
 int xc_mem_paging_disable(xc_interface *xch, uint32_t domain_id)
 {
     return xc_vm_event_control(xch, domain_id,
                                XEN_VM_EVENT_DISABLE,
-                               XEN_DOMCTL_VM_EVENT_OP_PAGING,
-                               NULL);
+                               XEN_DOMCTL_VM_EVENT_OP_PAGING);
 }
 
 int xc_mem_paging_resume(xc_interface *xch, uint32_t domain_id)
 {
     return xc_vm_event_control(xch, domain_id,
                                XEN_VM_EVENT_RESUME,
-                               XEN_DOMCTL_VM_EVENT_OP_PAGING,
-                               NULL);
+                               XEN_DOMCTL_VM_EVENT_OP_PAGING);
 }
 
 int xc_mem_paging_nominate(xc_interface *xch, uint32_t domain_id, uint64_t gfn)
diff --git a/tools/libxc/xc_memshr.c b/tools/libxc/xc_memshr.c
index d5e135e..06f613a 100644
--- a/tools/libxc/xc_memshr.c
+++ b/tools/libxc/xc_memshr.c
@@ -41,31 +41,6 @@ int xc_memshr_control(xc_interface *xch,
     return do_domctl(xch, &domctl);
 }
 
-int xc_memshr_ring_enable(xc_interface *xch, 
-                          uint32_t domid,
-                          uint32_t *port)
-{
-    if ( !port )
-    {
-        errno = EINVAL;
-        return -1;
-    }
-
-    return xc_vm_event_control(xch, domid,
-                               XEN_VM_EVENT_ENABLE,
-                               XEN_DOMCTL_VM_EVENT_OP_SHARING,
-                               port);
-}
-
-int xc_memshr_ring_disable(xc_interface *xch, 
-                           uint32_t domid)
-{
-    return xc_vm_event_control(xch, domid,
-                               XEN_VM_EVENT_DISABLE,
-                               XEN_DOMCTL_VM_EVENT_OP_SHARING,
-                               NULL);
-}
-
 static int xc_memshr_memop(xc_interface *xch, uint32_t domid,
                             xen_mem_sharing_op_t *mso)
 {
@@ -200,15 +175,6 @@ int xc_memshr_range_share(xc_interface *xch,
     return xc_memshr_memop(xch, source_domain, &mso);
 }
 
-int xc_memshr_domain_resume(xc_interface *xch,
-                            uint32_t domid)
-{
-    return xc_vm_event_control(xch, domid,
-                               XEN_VM_EVENT_RESUME,
-                               XEN_DOMCTL_VM_EVENT_OP_SHARING,
-                               NULL);
-}
-
 int xc_memshr_debug_gfn(xc_interface *xch,
                         uint32_t domid,
                         unsigned long gfn)
diff --git a/tools/libxc/xc_monitor.c b/tools/libxc/xc_monitor.c
index 4ac823e..d190c29 100644
--- a/tools/libxc/xc_monitor.c
+++ b/tools/libxc/xc_monitor.c
@@ -24,24 +24,43 @@
 
 void *xc_monitor_enable(xc_interface *xch, uint32_t domain_id, uint32_t *port)
 {
-    return xc_vm_event_enable(xch, domain_id, HVM_PARAM_MONITOR_RING_PFN,
-                              port);
+    void *buffer;
+    int saved_errno;
+
+    /* Pause the domain for ring page setup */
+    if ( xc_domain_pause(xch, domain_id) )
+    {
+        PERROR("Unable to pause domain\n");
+        return NULL;
+    }
+
+    buffer = xc_vm_event_enable(xch, domain_id,
+                                HVM_PARAM_MONITOR_RING_PFN,
+                                port);
+    saved_errno = errno;
+    if ( xc_domain_unpause(xch, domain_id) )
+    {
+        if ( buffer )
+            saved_errno = errno;
+        PERROR("Unable to unpause domain");
+    }
+
+    errno = saved_errno;
+    return buffer;
 }
 
 int xc_monitor_disable(xc_interface *xch, uint32_t domain_id)
 {
     return xc_vm_event_control(xch, domain_id,
                                XEN_VM_EVENT_DISABLE,
-                               XEN_DOMCTL_VM_EVENT_OP_MONITOR,
-                               NULL);
+                               XEN_DOMCTL_VM_EVENT_OP_MONITOR);
 }
 
 int xc_monitor_resume(xc_interface *xch, uint32_t domain_id)
 {
     return xc_vm_event_control(xch, domain_id,
                                XEN_VM_EVENT_RESUME,
-                               XEN_DOMCTL_VM_EVENT_OP_MONITOR,
-                               NULL);
+                               XEN_DOMCTL_VM_EVENT_OP_MONITOR);
 }
 
 int xc_monitor_get_capabilities(xc_interface *xch, uint32_t domain_id,
diff --git a/tools/libxc/xc_private.h b/tools/libxc/xc_private.h
index adc3b6a..663e78b 100644
--- a/tools/libxc/xc_private.h
+++ b/tools/libxc/xc_private.h
@@ -412,7 +412,7 @@ int xc_ffs64(uint64_t x);
  * vm_event operations. Internal use only.
  */
 int xc_vm_event_control(xc_interface *xch, uint32_t domain_id, unsigned int op,
-                        unsigned int mode, uint32_t *port);
+                        unsigned int mode);
 /*
  * Enables vm_event and returns the mapped ring page indicated by param.
  * param can be HVM_PARAM_PAGING/ACCESS/SHARING_RING_PFN
diff --git a/tools/libxc/xc_vm_event.c b/tools/libxc/xc_vm_event.c
index a97c615..ea10366 100644
--- a/tools/libxc/xc_vm_event.c
+++ b/tools/libxc/xc_vm_event.c
@@ -23,20 +23,16 @@
 #include "xc_private.h"
 
 int xc_vm_event_control(xc_interface *xch, uint32_t domain_id, unsigned int op,
-                        unsigned int mode, uint32_t *port)
+                        unsigned int mode)
 {
     DECLARE_DOMCTL;
-    int rc;
 
     domctl.cmd = XEN_DOMCTL_vm_event_op;
     domctl.domain = domain_id;
     domctl.u.vm_event_op.op = op;
     domctl.u.vm_event_op.mode = mode;
 
-    rc = do_domctl(xch, &domctl);
-    if ( !rc && port )
-        *port = domctl.u.vm_event_op.u.enable.port;
-    return rc;
+    return do_domctl(xch, &domctl);
 }
 
 void *xc_vm_event_enable(xc_interface *xch, uint32_t domain_id, int param,
@@ -46,7 +42,8 @@ void *xc_vm_event_enable(xc_interface *xch, uint32_t domain_id, int param,
     uint64_t pfn;
     xen_pfn_t ring_pfn, mmap_pfn;
     unsigned int op, mode;
-    int rc1, rc2, saved_errno;
+    int rc;
+    DECLARE_DOMCTL;
 
     if ( !port )
     {
@@ -54,17 +51,9 @@ void *xc_vm_event_enable(xc_interface *xch, uint32_t domain_id, int param,
         return NULL;
     }
 
-    /* Pause the domain for ring page setup */
-    rc1 = xc_domain_pause(xch, domain_id);
-    if ( rc1 != 0 )
-    {
-        PERROR("Unable to pause domain\n");
-        return NULL;
-    }
-
     /* Get the pfn of the ring page */
-    rc1 = xc_hvm_param_get(xch, domain_id, param, &pfn);
-    if ( rc1 != 0 )
+    rc = xc_hvm_param_get(xch, domain_id, param, &pfn);
+    if ( rc != 0 )
     {
         PERROR("Failed to get pfn of ring page\n");
         goto out;
@@ -72,13 +61,13 @@ void *xc_vm_event_enable(xc_interface *xch, uint32_t domain_id, int param,
 
     ring_pfn = pfn;
     mmap_pfn = pfn;
-    rc1 = xc_get_pfn_type_batch(xch, domain_id, 1, &mmap_pfn);
-    if ( rc1 || mmap_pfn & XEN_DOMCTL_PFINFO_XTAB )
+    rc = xc_get_pfn_type_batch(xch, domain_id, 1, &mmap_pfn);
+    if ( rc || mmap_pfn & XEN_DOMCTL_PFINFO_XTAB )
     {
         /* Page not in the physmap, try to populate it */
-        rc1 = xc_domain_populate_physmap_exact(xch, domain_id, 1, 0, 0,
+        rc = xc_domain_populate_physmap_exact(xch, domain_id, 1, 0, 0,
                                               &ring_pfn);
-        if ( rc1 != 0 )
+        if ( rc != 0 )
         {
             PERROR("Failed to populate ring pfn\n");
             goto out;
@@ -87,7 +76,7 @@ void *xc_vm_event_enable(xc_interface *xch, uint32_t domain_id, int param,
 
     mmap_pfn = ring_pfn;
     ring_page = xc_map_foreign_pages(xch, domain_id, PROT_READ | PROT_WRITE,
-                                         &mmap_pfn, 1);
+                                     &mmap_pfn, 1);
     if ( !ring_page )
     {
         PERROR("Could not map the ring page\n");
@@ -117,40 +106,35 @@ void *xc_vm_event_enable(xc_interface *xch, uint32_t domain_id, int param,
      */
     default:
         errno = EINVAL;
-        rc1 = -1;
+        rc = -1;
         goto out;
     }
 
-    rc1 = xc_vm_event_control(xch, domain_id, op, mode, port);
-    if ( rc1 != 0 )
+    domctl.cmd = XEN_DOMCTL_vm_event_op;
+    domctl.domain = domain_id;
+    domctl.u.vm_event_op.op = op;
+    domctl.u.vm_event_op.mode = mode;
+
+    rc = do_domctl(xch, &domctl);
+    if ( rc != 0 )
     {
         PERROR("Failed to enable vm_event\n");
         goto out;
     }
 
+    *port = domctl.u.vm_event_op.u.enable.port;
+
     /* Remove the ring_pfn from the guest's physmap */
-    rc1 = xc_domain_decrease_reservation_exact(xch, domain_id, 1, 0, &ring_pfn);
-    if ( rc1 != 0 )
+    rc = xc_domain_decrease_reservation_exact(xch, domain_id, 1, 0, &ring_pfn);
+    if ( rc != 0 )
         PERROR("Failed to remove ring page from guest physmap");
 
  out:
-    saved_errno = errno;
-
-    rc2 = xc_domain_unpause(xch, domain_id);
-    if ( rc1 != 0 || rc2 != 0 )
+    if ( rc != 0 )
     {
-        if ( rc2 != 0 )
-        {
-            if ( rc1 == 0 )
-                saved_errno = errno;
-            PERROR("Unable to unpause domain");
-        }
-
         if ( ring_page )
             xenforeignmemory_unmap(xch->fmem, ring_page, 1);
         ring_page = NULL;
-
-        errno = saved_errno;
     }
 
     return ring_page;
diff --git a/tools/xenpaging/xenpaging.c b/tools/xenpaging/xenpaging.c
index d0571ca..b4a3a5c 100644
--- a/tools/xenpaging/xenpaging.c
+++ b/tools/xenpaging/xenpaging.c
@@ -337,40 +337,11 @@ static struct xenpaging *xenpaging_init(int argc, char *argv[])
         goto err;
     }
 
-    /* Map the ring page */
-    xc_get_hvm_param(xch, paging->vm_event.domain_id, 
-                        HVM_PARAM_PAGING_RING_PFN, &ring_pfn);
-    mmap_pfn = ring_pfn;
-    paging->vm_event.ring_page = 
-        xc_map_foreign_pages(xch, paging->vm_event.domain_id,
-                             PROT_READ | PROT_WRITE, &mmap_pfn, 1);
-    if ( !paging->vm_event.ring_page )
-    {
-        /* Map failed, populate ring page */
-        rc = xc_domain_populate_physmap_exact(paging->xc_handle, 
-                                              paging->vm_event.domain_id,
-                                              1, 0, 0, &ring_pfn);
-        if ( rc != 0 )
-        {
-            PERROR("Failed to populate ring gfn\n");
-            goto err;
-        }
-
-        paging->vm_event.ring_page = 
-            xc_map_foreign_pages(xch, paging->vm_event.domain_id,
-                                 PROT_READ | PROT_WRITE,
-                                 &mmap_pfn, 1);
-        if ( !paging->vm_event.ring_page )
-        {
-            PERROR("Could not map the ring page\n");
-            goto err;
-        }
-    }
-    
     /* Initialise Xen */
-    rc = xc_mem_paging_enable(xch, paging->vm_event.domain_id,
-                             &paging->vm_event.evtchn_port);
-    if ( rc != 0 )
+    paging->vm_event.ring_page =
+            xc_mem_paging_enable(xch, paging->vm_event.domain_id,
+                                 &paging->vm_event.evtchn_port);
+    if ( paging->vm_event.ring_page == NULL )
     {
         switch ( errno ) {
             case EBUSY:
@@ -418,11 +389,6 @@ static struct xenpaging *xenpaging_init(int argc, char *argv[])
                    (vm_event_sring_t *)paging->vm_event.ring_page,
                    PAGE_SIZE);
 
-    /* Now that the ring is set, remove it from the guest's physmap */
-    if ( xc_domain_decrease_reservation_exact(xch, 
-                    paging->vm_event.domain_id, 1, 0, &ring_pfn) )
-        PERROR("Failed to remove ring from guest physmap");
-
     /* Get max_pages from guest if not provided via cmdline */
     if ( !paging->max_pages )
     {
-- 
2.7.4


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

WARNING: multiple messages have this Message-ID (diff)
From: Petre Pircalabu <ppircalabu@bitdefender.com>
To: xen-devel@lists.xenproject.org
Cc: Petre Pircalabu <ppircalabu@bitdefender.com>,
	Ian Jackson <ian.jackson@eu.citrix.com>, Wei Liu <wl@xen.org>
Subject: [Xen-devel] [PATCH 1/9] tools/libxc: Consistent usage of xc_vm_event_* interface
Date: Thu, 30 May 2019 17:18:15 +0300	[thread overview]
Message-ID: <a7acd18a3c4bcd288338de12d13ce1cb9fb6d8b2.1559224640.git.ppircalabu@bitdefender.com> (raw)
Message-ID: <20190530141815.3qCNJ8ygM8NVoinC07aREEAkarn2di5jsck66pzHh6o@z> (raw)
In-Reply-To: <cover.1559224640.git.ppircalabu@bitdefender.com>
In-Reply-To: <cover.1559224640.git.ppircalabu@bitdefender.com>

Modified xc_mem_paging_enable to use directly xc_vm_event_enable and
moved the ring_page handling from client to libxc (xenpaging).

Restricted vm_event_control usage only to simplest domctls which do
not expect any return values and change xc_vm_event_enable to call do_domctl
directly.

Removed xc_memshr_ring_enable/disable and xc_memshr_domain_resume.

Signed-off-by: Petre Pircalabu <ppircalabu@bitdefender.com>
---
 tools/libxc/include/xenctrl.h | 49 +--------------------------------
 tools/libxc/xc_mem_paging.c   | 23 +++++-----------
 tools/libxc/xc_memshr.c       | 34 -----------------------
 tools/libxc/xc_monitor.c      | 31 +++++++++++++++++----
 tools/libxc/xc_private.h      |  2 +-
 tools/libxc/xc_vm_event.c     | 64 ++++++++++++++++---------------------------
 tools/xenpaging/xenpaging.c   | 42 +++-------------------------
 7 files changed, 62 insertions(+), 183 deletions(-)

diff --git a/tools/libxc/include/xenctrl.h b/tools/libxc/include/xenctrl.h
index 538007a..28fdbc0 100644
--- a/tools/libxc/include/xenctrl.h
+++ b/tools/libxc/include/xenctrl.h
@@ -1949,7 +1949,7 @@ int xc_altp2m_change_gfn(xc_interface *handle, uint32_t domid,
  * Hardware-Assisted Paging (i.e. Intel EPT, AMD NPT). Moreover, AMD NPT
  * support is considered experimental.
  */
-int xc_mem_paging_enable(xc_interface *xch, uint32_t domain_id, uint32_t *port);
+void *xc_mem_paging_enable(xc_interface *xch, uint32_t domain_id, uint32_t *port);
 int xc_mem_paging_disable(xc_interface *xch, uint32_t domain_id);
 int xc_mem_paging_resume(xc_interface *xch, uint32_t domain_id);
 int xc_mem_paging_nominate(xc_interface *xch, uint32_t domain_id,
@@ -2082,53 +2082,6 @@ int xc_memshr_control(xc_interface *xch,
                       uint32_t domid,
                       int enable);
 
-/* Create a communication ring in which the hypervisor will place ENOMEM
- * notifications.
- *
- * ENOMEM happens when unsharing pages: a Copy-on-Write duplicate needs to be
- * allocated, and thus the out-of-memory error occurr.
- *
- * For complete examples on how to plumb a notification ring, look into
- * xenpaging or xen-access.
- *
- * On receipt of a notification, the helper should ensure there is memory
- * available to the domain before retrying.
- *
- * If a domain encounters an ENOMEM condition when sharing and this ring
- * has not been set up, the hypervisor will crash the domain.
- *
- * Fails with:
- *  EINVAL if port is NULL
- *  EINVAL if the sharing ring has already been enabled
- *  ENOSYS if no guest gfn has been specified to host the ring via an hvm param
- *  EINVAL if the gfn for the ring has not been populated
- *  ENOENT if the gfn for the ring is paged out, or cannot be unshared
- *  EINVAL if the gfn for the ring cannot be written to
- *  EINVAL if the domain is dying
- *  ENOSPC if an event channel cannot be allocated for the ring
- *  ENOMEM if memory cannot be allocated for internal data structures
- *  EINVAL or EACCESS if the request is denied by the security policy
- */
-
-int xc_memshr_ring_enable(xc_interface *xch, 
-                          uint32_t domid,
-                          uint32_t *port);
-/* Disable the ring for ENOMEM communication.
- * May fail with EINVAL if the ring was not enabled in the first place.
- */
-int xc_memshr_ring_disable(xc_interface *xch, 
-                           uint32_t domid);
-
-/*
- * Calls below return EINVAL if sharing has not been enabled for the domain
- * Calls below return EINVAL if the domain is dying
- */
-/* Once a reponse to an ENOMEM notification is prepared, the tool can
- * notify the hypervisor to re-schedule the faulting vcpu of the domain with an
- * event channel kick and/or this call. */
-int xc_memshr_domain_resume(xc_interface *xch,
-                            uint32_t domid);
-
 /* Select a page for sharing. 
  *
  * A 64 bit opaque handle will be stored in handle.  The hypervisor ensures
diff --git a/tools/libxc/xc_mem_paging.c b/tools/libxc/xc_mem_paging.c
index a067706..08468fb 100644
--- a/tools/libxc/xc_mem_paging.c
+++ b/tools/libxc/xc_mem_paging.c
@@ -37,35 +37,26 @@ static int xc_mem_paging_memop(xc_interface *xch, uint32_t domain_id,
     return do_memory_op(xch, XENMEM_paging_op, &mpo, sizeof(mpo));
 }
 
-int xc_mem_paging_enable(xc_interface *xch, uint32_t domain_id,
-                         uint32_t *port)
+void *xc_mem_paging_enable(xc_interface *xch, uint32_t domain_id,
+                           uint32_t *port)
 {
-    if ( !port )
-    {
-        errno = EINVAL;
-        return -1;
-    }
-
-    return xc_vm_event_control(xch, domain_id,
-                               XEN_VM_EVENT_ENABLE,
-                               XEN_DOMCTL_VM_EVENT_OP_PAGING,
-                               port);
+    return xc_vm_event_enable(xch, domain_id,
+                              XEN_DOMCTL_VM_EVENT_OP_PAGING,
+                              port);
 }
 
 int xc_mem_paging_disable(xc_interface *xch, uint32_t domain_id)
 {
     return xc_vm_event_control(xch, domain_id,
                                XEN_VM_EVENT_DISABLE,
-                               XEN_DOMCTL_VM_EVENT_OP_PAGING,
-                               NULL);
+                               XEN_DOMCTL_VM_EVENT_OP_PAGING);
 }
 
 int xc_mem_paging_resume(xc_interface *xch, uint32_t domain_id)
 {
     return xc_vm_event_control(xch, domain_id,
                                XEN_VM_EVENT_RESUME,
-                               XEN_DOMCTL_VM_EVENT_OP_PAGING,
-                               NULL);
+                               XEN_DOMCTL_VM_EVENT_OP_PAGING);
 }
 
 int xc_mem_paging_nominate(xc_interface *xch, uint32_t domain_id, uint64_t gfn)
diff --git a/tools/libxc/xc_memshr.c b/tools/libxc/xc_memshr.c
index d5e135e..06f613a 100644
--- a/tools/libxc/xc_memshr.c
+++ b/tools/libxc/xc_memshr.c
@@ -41,31 +41,6 @@ int xc_memshr_control(xc_interface *xch,
     return do_domctl(xch, &domctl);
 }
 
-int xc_memshr_ring_enable(xc_interface *xch, 
-                          uint32_t domid,
-                          uint32_t *port)
-{
-    if ( !port )
-    {
-        errno = EINVAL;
-        return -1;
-    }
-
-    return xc_vm_event_control(xch, domid,
-                               XEN_VM_EVENT_ENABLE,
-                               XEN_DOMCTL_VM_EVENT_OP_SHARING,
-                               port);
-}
-
-int xc_memshr_ring_disable(xc_interface *xch, 
-                           uint32_t domid)
-{
-    return xc_vm_event_control(xch, domid,
-                               XEN_VM_EVENT_DISABLE,
-                               XEN_DOMCTL_VM_EVENT_OP_SHARING,
-                               NULL);
-}
-
 static int xc_memshr_memop(xc_interface *xch, uint32_t domid,
                             xen_mem_sharing_op_t *mso)
 {
@@ -200,15 +175,6 @@ int xc_memshr_range_share(xc_interface *xch,
     return xc_memshr_memop(xch, source_domain, &mso);
 }
 
-int xc_memshr_domain_resume(xc_interface *xch,
-                            uint32_t domid)
-{
-    return xc_vm_event_control(xch, domid,
-                               XEN_VM_EVENT_RESUME,
-                               XEN_DOMCTL_VM_EVENT_OP_SHARING,
-                               NULL);
-}
-
 int xc_memshr_debug_gfn(xc_interface *xch,
                         uint32_t domid,
                         unsigned long gfn)
diff --git a/tools/libxc/xc_monitor.c b/tools/libxc/xc_monitor.c
index 4ac823e..d190c29 100644
--- a/tools/libxc/xc_monitor.c
+++ b/tools/libxc/xc_monitor.c
@@ -24,24 +24,43 @@
 
 void *xc_monitor_enable(xc_interface *xch, uint32_t domain_id, uint32_t *port)
 {
-    return xc_vm_event_enable(xch, domain_id, HVM_PARAM_MONITOR_RING_PFN,
-                              port);
+    void *buffer;
+    int saved_errno;
+
+    /* Pause the domain for ring page setup */
+    if ( xc_domain_pause(xch, domain_id) )
+    {
+        PERROR("Unable to pause domain\n");
+        return NULL;
+    }
+
+    buffer = xc_vm_event_enable(xch, domain_id,
+                                HVM_PARAM_MONITOR_RING_PFN,
+                                port);
+    saved_errno = errno;
+    if ( xc_domain_unpause(xch, domain_id) )
+    {
+        if ( buffer )
+            saved_errno = errno;
+        PERROR("Unable to unpause domain");
+    }
+
+    errno = saved_errno;
+    return buffer;
 }
 
 int xc_monitor_disable(xc_interface *xch, uint32_t domain_id)
 {
     return xc_vm_event_control(xch, domain_id,
                                XEN_VM_EVENT_DISABLE,
-                               XEN_DOMCTL_VM_EVENT_OP_MONITOR,
-                               NULL);
+                               XEN_DOMCTL_VM_EVENT_OP_MONITOR);
 }
 
 int xc_monitor_resume(xc_interface *xch, uint32_t domain_id)
 {
     return xc_vm_event_control(xch, domain_id,
                                XEN_VM_EVENT_RESUME,
-                               XEN_DOMCTL_VM_EVENT_OP_MONITOR,
-                               NULL);
+                               XEN_DOMCTL_VM_EVENT_OP_MONITOR);
 }
 
 int xc_monitor_get_capabilities(xc_interface *xch, uint32_t domain_id,
diff --git a/tools/libxc/xc_private.h b/tools/libxc/xc_private.h
index adc3b6a..663e78b 100644
--- a/tools/libxc/xc_private.h
+++ b/tools/libxc/xc_private.h
@@ -412,7 +412,7 @@ int xc_ffs64(uint64_t x);
  * vm_event operations. Internal use only.
  */
 int xc_vm_event_control(xc_interface *xch, uint32_t domain_id, unsigned int op,
-                        unsigned int mode, uint32_t *port);
+                        unsigned int mode);
 /*
  * Enables vm_event and returns the mapped ring page indicated by param.
  * param can be HVM_PARAM_PAGING/ACCESS/SHARING_RING_PFN
diff --git a/tools/libxc/xc_vm_event.c b/tools/libxc/xc_vm_event.c
index a97c615..ea10366 100644
--- a/tools/libxc/xc_vm_event.c
+++ b/tools/libxc/xc_vm_event.c
@@ -23,20 +23,16 @@
 #include "xc_private.h"
 
 int xc_vm_event_control(xc_interface *xch, uint32_t domain_id, unsigned int op,
-                        unsigned int mode, uint32_t *port)
+                        unsigned int mode)
 {
     DECLARE_DOMCTL;
-    int rc;
 
     domctl.cmd = XEN_DOMCTL_vm_event_op;
     domctl.domain = domain_id;
     domctl.u.vm_event_op.op = op;
     domctl.u.vm_event_op.mode = mode;
 
-    rc = do_domctl(xch, &domctl);
-    if ( !rc && port )
-        *port = domctl.u.vm_event_op.u.enable.port;
-    return rc;
+    return do_domctl(xch, &domctl);
 }
 
 void *xc_vm_event_enable(xc_interface *xch, uint32_t domain_id, int param,
@@ -46,7 +42,8 @@ void *xc_vm_event_enable(xc_interface *xch, uint32_t domain_id, int param,
     uint64_t pfn;
     xen_pfn_t ring_pfn, mmap_pfn;
     unsigned int op, mode;
-    int rc1, rc2, saved_errno;
+    int rc;
+    DECLARE_DOMCTL;
 
     if ( !port )
     {
@@ -54,17 +51,9 @@ void *xc_vm_event_enable(xc_interface *xch, uint32_t domain_id, int param,
         return NULL;
     }
 
-    /* Pause the domain for ring page setup */
-    rc1 = xc_domain_pause(xch, domain_id);
-    if ( rc1 != 0 )
-    {
-        PERROR("Unable to pause domain\n");
-        return NULL;
-    }
-
     /* Get the pfn of the ring page */
-    rc1 = xc_hvm_param_get(xch, domain_id, param, &pfn);
-    if ( rc1 != 0 )
+    rc = xc_hvm_param_get(xch, domain_id, param, &pfn);
+    if ( rc != 0 )
     {
         PERROR("Failed to get pfn of ring page\n");
         goto out;
@@ -72,13 +61,13 @@ void *xc_vm_event_enable(xc_interface *xch, uint32_t domain_id, int param,
 
     ring_pfn = pfn;
     mmap_pfn = pfn;
-    rc1 = xc_get_pfn_type_batch(xch, domain_id, 1, &mmap_pfn);
-    if ( rc1 || mmap_pfn & XEN_DOMCTL_PFINFO_XTAB )
+    rc = xc_get_pfn_type_batch(xch, domain_id, 1, &mmap_pfn);
+    if ( rc || mmap_pfn & XEN_DOMCTL_PFINFO_XTAB )
     {
         /* Page not in the physmap, try to populate it */
-        rc1 = xc_domain_populate_physmap_exact(xch, domain_id, 1, 0, 0,
+        rc = xc_domain_populate_physmap_exact(xch, domain_id, 1, 0, 0,
                                               &ring_pfn);
-        if ( rc1 != 0 )
+        if ( rc != 0 )
         {
             PERROR("Failed to populate ring pfn\n");
             goto out;
@@ -87,7 +76,7 @@ void *xc_vm_event_enable(xc_interface *xch, uint32_t domain_id, int param,
 
     mmap_pfn = ring_pfn;
     ring_page = xc_map_foreign_pages(xch, domain_id, PROT_READ | PROT_WRITE,
-                                         &mmap_pfn, 1);
+                                     &mmap_pfn, 1);
     if ( !ring_page )
     {
         PERROR("Could not map the ring page\n");
@@ -117,40 +106,35 @@ void *xc_vm_event_enable(xc_interface *xch, uint32_t domain_id, int param,
      */
     default:
         errno = EINVAL;
-        rc1 = -1;
+        rc = -1;
         goto out;
     }
 
-    rc1 = xc_vm_event_control(xch, domain_id, op, mode, port);
-    if ( rc1 != 0 )
+    domctl.cmd = XEN_DOMCTL_vm_event_op;
+    domctl.domain = domain_id;
+    domctl.u.vm_event_op.op = op;
+    domctl.u.vm_event_op.mode = mode;
+
+    rc = do_domctl(xch, &domctl);
+    if ( rc != 0 )
     {
         PERROR("Failed to enable vm_event\n");
         goto out;
     }
 
+    *port = domctl.u.vm_event_op.u.enable.port;
+
     /* Remove the ring_pfn from the guest's physmap */
-    rc1 = xc_domain_decrease_reservation_exact(xch, domain_id, 1, 0, &ring_pfn);
-    if ( rc1 != 0 )
+    rc = xc_domain_decrease_reservation_exact(xch, domain_id, 1, 0, &ring_pfn);
+    if ( rc != 0 )
         PERROR("Failed to remove ring page from guest physmap");
 
  out:
-    saved_errno = errno;
-
-    rc2 = xc_domain_unpause(xch, domain_id);
-    if ( rc1 != 0 || rc2 != 0 )
+    if ( rc != 0 )
     {
-        if ( rc2 != 0 )
-        {
-            if ( rc1 == 0 )
-                saved_errno = errno;
-            PERROR("Unable to unpause domain");
-        }
-
         if ( ring_page )
             xenforeignmemory_unmap(xch->fmem, ring_page, 1);
         ring_page = NULL;
-
-        errno = saved_errno;
     }
 
     return ring_page;
diff --git a/tools/xenpaging/xenpaging.c b/tools/xenpaging/xenpaging.c
index d0571ca..b4a3a5c 100644
--- a/tools/xenpaging/xenpaging.c
+++ b/tools/xenpaging/xenpaging.c
@@ -337,40 +337,11 @@ static struct xenpaging *xenpaging_init(int argc, char *argv[])
         goto err;
     }
 
-    /* Map the ring page */
-    xc_get_hvm_param(xch, paging->vm_event.domain_id, 
-                        HVM_PARAM_PAGING_RING_PFN, &ring_pfn);
-    mmap_pfn = ring_pfn;
-    paging->vm_event.ring_page = 
-        xc_map_foreign_pages(xch, paging->vm_event.domain_id,
-                             PROT_READ | PROT_WRITE, &mmap_pfn, 1);
-    if ( !paging->vm_event.ring_page )
-    {
-        /* Map failed, populate ring page */
-        rc = xc_domain_populate_physmap_exact(paging->xc_handle, 
-                                              paging->vm_event.domain_id,
-                                              1, 0, 0, &ring_pfn);
-        if ( rc != 0 )
-        {
-            PERROR("Failed to populate ring gfn\n");
-            goto err;
-        }
-
-        paging->vm_event.ring_page = 
-            xc_map_foreign_pages(xch, paging->vm_event.domain_id,
-                                 PROT_READ | PROT_WRITE,
-                                 &mmap_pfn, 1);
-        if ( !paging->vm_event.ring_page )
-        {
-            PERROR("Could not map the ring page\n");
-            goto err;
-        }
-    }
-    
     /* Initialise Xen */
-    rc = xc_mem_paging_enable(xch, paging->vm_event.domain_id,
-                             &paging->vm_event.evtchn_port);
-    if ( rc != 0 )
+    paging->vm_event.ring_page =
+            xc_mem_paging_enable(xch, paging->vm_event.domain_id,
+                                 &paging->vm_event.evtchn_port);
+    if ( paging->vm_event.ring_page == NULL )
     {
         switch ( errno ) {
             case EBUSY:
@@ -418,11 +389,6 @@ static struct xenpaging *xenpaging_init(int argc, char *argv[])
                    (vm_event_sring_t *)paging->vm_event.ring_page,
                    PAGE_SIZE);
 
-    /* Now that the ring is set, remove it from the guest's physmap */
-    if ( xc_domain_decrease_reservation_exact(xch, 
-                    paging->vm_event.domain_id, 1, 0, &ring_pfn) )
-        PERROR("Failed to remove ring from guest physmap");
-
     /* Get max_pages from guest if not provided via cmdline */
     if ( !paging->max_pages )
     {
-- 
2.7.4


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

  reply	other threads:[~2019-05-30 14:18 UTC|newest]

Thread overview: 67+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2019-05-30 14:18 [PATCH 0/9] Per vcpu vm_event channels Petre Pircalabu
2019-05-30 14:18 ` [Xen-devel] " Petre Pircalabu
2019-05-30 14:18 ` Petre Pircalabu [this message]
2019-05-30 14:18   ` [Xen-devel] [PATCH 1/9] tools/libxc: Consistent usage of xc_vm_event_* interface Petre Pircalabu
2019-05-31 23:01   ` Andrew Cooper
2019-05-31 23:01     ` [Xen-devel] " Andrew Cooper
2019-06-04 14:23     ` Petre Ovidiu PIRCALABU
2019-06-04 16:07       ` Andrew Cooper
2019-05-30 14:18 ` [PATCH 2/9] vm_event: Define VM_EVENT type Petre Pircalabu
2019-05-30 14:18   ` [Xen-devel] " Petre Pircalabu
2019-05-31 23:26   ` Andrew Cooper
2019-05-31 23:26     ` [Xen-devel] " Andrew Cooper
2019-06-03 22:22     ` Tamas K Lengyel
2019-06-03 22:22       ` [Xen-devel] " Tamas K Lengyel
2019-06-03 22:26       ` Tamas K Lengyel
2019-06-03 22:26         ` [Xen-devel] " Tamas K Lengyel
2019-06-04 16:09         ` Andrew Cooper
2019-06-04 10:12     ` Petre Ovidiu PIRCALABU
2019-06-04 10:12       ` [Xen-devel] " Petre Ovidiu PIRCALABU
2019-06-03 15:51   ` Jan Beulich
2019-06-03 15:51     ` [Xen-devel] " Jan Beulich
2019-05-30 14:18 ` [PATCH 3/9] vm_event: Make ‘local’ functions ‘static’ Petre Pircalabu
2019-05-30 14:18   ` [Xen-devel] " Petre Pircalabu
2019-05-31 23:28   ` Andrew Cooper
2019-05-31 23:28     ` [Xen-devel] " Andrew Cooper
2019-06-02  0:36   ` Tamas K Lengyel
2019-06-02  0:36     ` [Xen-devel] " Tamas K Lengyel
2019-05-30 14:18 ` [PATCH 4/9] vm_event: Remove "ring" suffix from vm_event_check_ring Petre Pircalabu
2019-05-30 14:18   ` [Xen-devel] " Petre Pircalabu
2019-05-31 23:32   ` Andrew Cooper
2019-05-31 23:32     ` [Xen-devel] " Andrew Cooper
2019-06-05 15:52   ` Tamas K Lengyel
2019-05-30 14:18 ` [PATCH 5/9] vm_event: Simplify vm_event interface Petre Pircalabu
2019-05-30 14:18   ` [Xen-devel] " Petre Pircalabu
2019-05-31 23:43   ` Andrew Cooper
2019-05-31 23:43     ` [Xen-devel] " Andrew Cooper
2019-06-01  0:06     ` Andrew Cooper
2019-06-01  0:06       ` [Xen-devel] " Andrew Cooper
2019-06-03 15:33       ` Petre Ovidiu PIRCALABU
2019-06-03 15:33         ` [Xen-devel] " Petre Ovidiu PIRCALABU
2019-05-30 14:18 ` [PATCH 6/9] vm_event: Move struct vm_event_domain to vm_event.c Petre Pircalabu
2019-05-30 14:18   ` [Xen-devel] " Petre Pircalabu
2019-05-31 23:44   ` Andrew Cooper
2019-05-31 23:44     ` [Xen-devel] " Andrew Cooper
2019-06-03 11:28     ` Petre Ovidiu PIRCALABU
2019-06-03 11:28       ` [Xen-devel] " Petre Ovidiu PIRCALABU
2019-06-05 15:53   ` Tamas K Lengyel
2019-05-30 14:18 ` [PATCH 7/9] vm_event: Decouple implementation details from interface Petre Pircalabu
2019-05-30 14:18   ` [Xen-devel] " Petre Pircalabu
2019-05-30 14:18 ` [PATCH 8/9] vm_event: Add vm_event_ng interface Petre Pircalabu
2019-05-30 14:18   ` [Xen-devel] " Petre Pircalabu
2019-06-04 14:43   ` Andrew Cooper
2019-06-05 17:01     ` Petre Ovidiu PIRCALABU
2019-06-06  8:37       ` Jan Beulich
2019-06-06 13:48         ` Petre Ovidiu PIRCALABU
2019-06-06 14:16           ` Jan Beulich
2019-05-30 14:18 ` [PATCH 9/9] xen-access: Add support for " Petre Pircalabu
2019-05-30 14:18   ` [Xen-devel] " Petre Pircalabu
2019-06-04 16:04   ` Andrew Cooper
2019-05-30 15:27 ` [PATCH 0/9] Per vcpu vm_event channels Tamas K Lengyel
2019-05-30 15:27   ` [Xen-devel] " Tamas K Lengyel
2019-05-31  7:07   ` Petre Ovidiu PIRCALABU
2019-05-31  7:07     ` [Xen-devel] " Petre Ovidiu PIRCALABU
2019-06-01  0:25 ` Andrew Cooper
2019-06-01  0:25   ` [Xen-devel] " Andrew Cooper
2019-06-03 14:04   ` Petre Ovidiu PIRCALABU
2019-06-03 14:04     ` [Xen-devel] " Petre Ovidiu PIRCALABU

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=a7acd18a3c4bcd288338de12d13ce1cb9fb6d8b2.1559224640.git.ppircalabu@bitdefender.com \
    --to=ppircalabu@bitdefender.com \
    --cc=ian.jackson@eu.citrix.com \
    --cc=wl@xen.org \
    --cc=xen-devel@lists.xenproject.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.