All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH v5] common/vm_event: Initialize vm_event lists on domain creation
@ 2017-08-29 14:17 Alexandru Isaila
  2017-08-29 15:14 ` Tamas K Lengyel
  2017-08-29 15:59 ` Wei Liu
  0 siblings, 2 replies; 8+ messages in thread
From: Alexandru Isaila @ 2017-08-29 14:17 UTC (permalink / raw)
  To: xen-devel
  Cc: tamas, wei.liu2, rcojocaru, George.Dunlap, andrew.cooper3,
	ian.jackson, tim, julien.grall, sstabellini, jbeulich,
	Alexandru Isaila

The patch splits the vm_event into three structures:vm_event_share,
vm_event_paging, vm_event_monitor. The allocation for the
structure is moved to vm_event_enable so that it can be
allocated/init when needed and freed in vm_event_disable.

Signed-off-by: Alexandru Isaila <aisaila@bitdefender.com>

---
Changes since V4:
	- Replaced all NULL checks with vm_event_check_ring

Note: Did not test on arm, compliled on arm and x86.
---
 xen/arch/arm/mem_access.c     |   2 +-
 xen/arch/x86/mm/mem_access.c  |   2 +-
 xen/arch/x86/mm/mem_paging.c  |   3 +-
 xen/arch/x86/mm/mem_sharing.c |   4 +-
 xen/arch/x86/mm/p2m.c         |  10 +--
 xen/common/domain.c           |  13 ++--
 xen/common/mem_access.c       |   2 +-
 xen/common/monitor.c          |   4 +-
 xen/common/vm_event.c         | 146 ++++++++++++++++++++++++------------------
 xen/drivers/passthrough/pci.c |   3 +-
 xen/include/xen/sched.h       |  18 ++----
 11 files changed, 115 insertions(+), 92 deletions(-)

diff --git a/xen/arch/arm/mem_access.c b/xen/arch/arm/mem_access.c
index e0888bb..a7f0cae 100644
--- a/xen/arch/arm/mem_access.c
+++ b/xen/arch/arm/mem_access.c
@@ -256,7 +256,7 @@ bool_t p2m_mem_access_check(paddr_t gpa, vaddr_t gla, const struct npfec npfec)
     }
 
     /* Otherwise, check if there is a vm_event monitor subscriber */
-    if ( !vm_event_check_ring(&v->domain->vm_event->monitor) )
+    if ( !vm_event_check_ring(v->domain->vm_event_monitor) )
     {
         /* No listener */
         if ( p2m->access_required )
diff --git a/xen/arch/x86/mm/mem_access.c b/xen/arch/x86/mm/mem_access.c
index 5adaf6d..414e38f 100644
--- a/xen/arch/x86/mm/mem_access.c
+++ b/xen/arch/x86/mm/mem_access.c
@@ -179,7 +179,7 @@ bool_t p2m_mem_access_check(paddr_t gpa, unsigned long gla,
     gfn_unlock(p2m, gfn, 0);
 
     /* Otherwise, check if there is a memory event listener, and send the message along */
-    if ( !vm_event_check_ring(&d->vm_event->monitor) || !req_ptr )
+    if ( !vm_event_check_ring(d->vm_event_monitor) || !req_ptr )
     {
         /* No listener */
         if ( p2m->access_required )
diff --git a/xen/arch/x86/mm/mem_paging.c b/xen/arch/x86/mm/mem_paging.c
index a049e0d..54a94fa 100644
--- a/xen/arch/x86/mm/mem_paging.c
+++ b/xen/arch/x86/mm/mem_paging.c
@@ -22,6 +22,7 @@
 
 #include <asm/p2m.h>
 #include <xen/guest_access.h>
+#include <xen/vm_event.h>
 #include <xsm/xsm.h>
 
 int mem_paging_memop(XEN_GUEST_HANDLE_PARAM(xen_mem_paging_op_t) arg)
@@ -43,7 +44,7 @@ int mem_paging_memop(XEN_GUEST_HANDLE_PARAM(xen_mem_paging_op_t) arg)
         goto out;
 
     rc = -ENODEV;
-    if ( unlikely(!d->vm_event->paging.ring_page) )
+    if ( unlikely(!vm_event_check_ring(d->vm_event_paging)) )
         goto out;
 
     switch( mpo.op )
diff --git a/xen/arch/x86/mm/mem_sharing.c b/xen/arch/x86/mm/mem_sharing.c
index 1f20ce7..12fb9cc 100644
--- a/xen/arch/x86/mm/mem_sharing.c
+++ b/xen/arch/x86/mm/mem_sharing.c
@@ -563,7 +563,7 @@ int mem_sharing_notify_enomem(struct domain *d, unsigned long gfn,
     };
 
     if ( (rc = __vm_event_claim_slot(d, 
-                        &d->vm_event->share, allow_sleep)) < 0 )
+                        d->vm_event_share, allow_sleep)) < 0 )
         return rc;
 
     if ( v->domain == d )
@@ -572,7 +572,7 @@ int mem_sharing_notify_enomem(struct domain *d, unsigned long gfn,
         vm_event_vcpu_pause(v);
     }
 
-    vm_event_put_request(d, &d->vm_event->share, &req);
+    vm_event_put_request(d, d->vm_event_share, &req);
 
     return 0;
 }
diff --git a/xen/arch/x86/mm/p2m.c b/xen/arch/x86/mm/p2m.c
index e8a57d1..6ae23be 100644
--- a/xen/arch/x86/mm/p2m.c
+++ b/xen/arch/x86/mm/p2m.c
@@ -1454,7 +1454,7 @@ void p2m_mem_paging_drop_page(struct domain *d, unsigned long gfn,
      * correctness of the guest execution at this point.  If this is the only
      * page that happens to be paged-out, we'll be okay..  but it's likely the
      * guest will crash shortly anyways. */
-    int rc = vm_event_claim_slot(d, &d->vm_event->paging);
+    int rc = vm_event_claim_slot(d, d->vm_event_paging);
     if ( rc < 0 )
         return;
 
@@ -1468,7 +1468,7 @@ void p2m_mem_paging_drop_page(struct domain *d, unsigned long gfn,
         /* Evict will fail now, tag this request for pager */
         req.u.mem_paging.flags |= MEM_PAGING_EVICT_FAIL;
 
-    vm_event_put_request(d, &d->vm_event->paging, &req);
+    vm_event_put_request(d, d->vm_event_paging, &req);
 }
 
 /**
@@ -1505,7 +1505,7 @@ void p2m_mem_paging_populate(struct domain *d, unsigned long gfn)
     struct p2m_domain *p2m = p2m_get_hostp2m(d);
 
     /* We're paging. There should be a ring */
-    int rc = vm_event_claim_slot(d, &d->vm_event->paging);
+    int rc = vm_event_claim_slot(d, d->vm_event_paging);
     if ( rc == -ENOSYS )
     {
         gdprintk(XENLOG_ERR, "Domain %hu paging gfn %lx yet no ring "
@@ -1543,7 +1543,7 @@ void p2m_mem_paging_populate(struct domain *d, unsigned long gfn)
     else if ( p2mt != p2m_ram_paging_out && p2mt != p2m_ram_paged )
     {
         /* gfn is already on its way back and vcpu is not paused */
-        vm_event_cancel_slot(d, &d->vm_event->paging);
+        vm_event_cancel_slot(d, d->vm_event_paging);
         return;
     }
 
@@ -1551,7 +1551,7 @@ void p2m_mem_paging_populate(struct domain *d, unsigned long gfn)
     req.u.mem_paging.p2mt = p2mt;
     req.vcpu_id = v->vcpu_id;
 
-    vm_event_put_request(d, &d->vm_event->paging, &req);
+    vm_event_put_request(d, d->vm_event_paging, &req);
 }
 
 /**
diff --git a/xen/common/domain.c b/xen/common/domain.c
index b22aacc..30f507b 100644
--- a/xen/common/domain.c
+++ b/xen/common/domain.c
@@ -363,9 +363,6 @@ struct domain *domain_create(domid_t domid, unsigned int domcr_flags,
         poolid = 0;
 
         err = -ENOMEM;
-        d->vm_event = xzalloc(struct vm_event_per_domain);
-        if ( !d->vm_event )
-            goto fail;
 
         d->pbuf = xzalloc_array(char, DOMAIN_PBUF_SIZE);
         if ( !d->pbuf )
@@ -403,7 +400,6 @@ struct domain *domain_create(domid_t domid, unsigned int domcr_flags,
     if ( hardware_domain == d )
         hardware_domain = old_hwdom;
     atomic_set(&d->refcnt, DOMAIN_DESTROYED);
-    xfree(d->vm_event);
     xfree(d->pbuf);
     if ( init_status & INIT_arch )
         arch_domain_destroy(d);
@@ -820,7 +816,14 @@ static void complete_domain_destroy(struct rcu_head *head)
     free_xenoprof_pages(d);
 #endif
 
-    xfree(d->vm_event);
+#ifdef CONFIG_HAS_MEM_PAGING
+    xfree(d->vm_event_paging);
+#endif
+    xfree(d->vm_event_monitor);
+#ifdef CONFIG_HAS_MEM_SHARING
+    xfree(d->vm_event_share);
+#endif
+
     xfree(d->pbuf);
 
     for ( i = d->max_vcpus - 1; i >= 0; i-- )
diff --git a/xen/common/mem_access.c b/xen/common/mem_access.c
index 19f63bb..1bf6824 100644
--- a/xen/common/mem_access.c
+++ b/xen/common/mem_access.c
@@ -52,7 +52,7 @@ int mem_access_memop(unsigned long cmd,
         goto out;
 
     rc = -ENODEV;
-    if ( unlikely(!d->vm_event->monitor.ring_page) )
+    if ( unlikely(!vm_event_check_ring(d->vm_event_monitor)) )
         goto out;
 
     switch ( mao.op )
diff --git a/xen/common/monitor.c b/xen/common/monitor.c
index 451f42f..70d38d4 100644
--- a/xen/common/monitor.c
+++ b/xen/common/monitor.c
@@ -92,7 +92,7 @@ int monitor_traps(struct vcpu *v, bool_t sync, vm_event_request_t *req)
     int rc;
     struct domain *d = v->domain;
 
-    rc = vm_event_claim_slot(d, &d->vm_event->monitor);
+    rc = vm_event_claim_slot(d, d->vm_event_monitor);
     switch ( rc )
     {
     case 0:
@@ -123,7 +123,7 @@ int monitor_traps(struct vcpu *v, bool_t sync, vm_event_request_t *req)
     }
 
     vm_event_fill_regs(req);
-    vm_event_put_request(d, &d->vm_event->monitor, req);
+    vm_event_put_request(d, d->vm_event_monitor, req);
 
     return rc;
 }
diff --git a/xen/common/vm_event.c b/xen/common/vm_event.c
index 9291db6..a9b47e2 100644
--- a/xen/common/vm_event.c
+++ b/xen/common/vm_event.c
@@ -42,7 +42,7 @@
 static int vm_event_enable(
     struct domain *d,
     xen_domctl_vm_event_op_t *vec,
-    struct vm_event_domain *ved,
+    struct vm_event_domain **ved,
     int pause_flag,
     int param,
     xen_event_channel_notification_t notification_fn)
@@ -50,32 +50,37 @@ static int vm_event_enable(
     int rc;
     unsigned long ring_gfn = d->arch.hvm_domain.params[param];
 
+    if ( !*ved )
+        (*ved) = xzalloc(struct vm_event_domain);
+    if ( !*ved )
+        return -ENOMEM;
+
     /* Only one helper at a time. If the helper crashed,
      * the ring is in an undefined state and so is the guest.
      */
-    if ( ved->ring_page )
-        return -EBUSY;
+    if ( (*ved)->ring_page )
+        return -EBUSY;;
 
     /* The parameter defaults to zero, and it should be
      * set to something */
     if ( ring_gfn == 0 )
         return -ENOSYS;
 
-    vm_event_ring_lock_init(ved);
-    vm_event_ring_lock(ved);
+    vm_event_ring_lock_init(*ved);
+    vm_event_ring_lock(*ved);
 
     rc = vm_event_init_domain(d);
 
     if ( rc < 0 )
         goto err;
 
-    rc = prepare_ring_for_helper(d, ring_gfn, &ved->ring_pg_struct,
-                                    &ved->ring_page);
+    rc = prepare_ring_for_helper(d, ring_gfn, &(*ved)->ring_pg_struct,
+                                    &(*ved)->ring_page);
     if ( rc < 0 )
         goto err;
 
     /* Set the number of currently blocked vCPUs to 0. */
-    ved->blocked = 0;
+    (*ved)->blocked = 0;
 
     /* Allocate event channel */
     rc = alloc_unbound_xen_event_channel(d, 0, current->domain->domain_id,
@@ -83,26 +88,28 @@ static int vm_event_enable(
     if ( rc < 0 )
         goto err;
 
-    ved->xen_port = vec->port = rc;
+    (*ved)->xen_port = vec->port = rc;
 
     /* Prepare ring buffer */
-    FRONT_RING_INIT(&ved->front_ring,
-                    (vm_event_sring_t *)ved->ring_page,
+    FRONT_RING_INIT(&(*ved)->front_ring,
+                    (vm_event_sring_t *)(*ved)->ring_page,
                     PAGE_SIZE);
 
     /* Save the pause flag for this particular ring. */
-    ved->pause_flag = pause_flag;
+    (*ved)->pause_flag = pause_flag;
 
     /* Initialize the last-chance wait queue. */
-    init_waitqueue_head(&ved->wq);
+    init_waitqueue_head(&(*ved)->wq);
 
-    vm_event_ring_unlock(ved);
+    vm_event_ring_unlock((*ved));
     return 0;
 
  err:
-    destroy_ring_for_helper(&ved->ring_page,
-                            ved->ring_pg_struct);
-    vm_event_ring_unlock(ved);
+    destroy_ring_for_helper(&(*ved)->ring_page,
+                            (*ved)->ring_pg_struct);
+    vm_event_ring_unlock((*ved));
+    xfree(*ved);
+    *ved = NULL;
 
     return rc;
 }
@@ -187,41 +194,44 @@ void vm_event_wake(struct domain *d, struct vm_event_domain *ved)
         vm_event_wake_blocked(d, ved);
 }
 
-static int vm_event_disable(struct domain *d, struct vm_event_domain *ved)
+static int vm_event_disable(struct domain *d, struct vm_event_domain **ved)
 {
-    if ( ved->ring_page )
+    if ( vm_event_check_ring(*ved) )
     {
         struct vcpu *v;
 
-        vm_event_ring_lock(ved);
+        vm_event_ring_lock(*ved);
 
-        if ( !list_empty(&ved->wq.list) )
+        if ( !list_empty(&(*ved)->wq.list) )
         {
-            vm_event_ring_unlock(ved);
+            vm_event_ring_unlock(*ved);
             return -EBUSY;
         }
 
         /* Free domU's event channel and leave the other one unbound */
-        free_xen_event_channel(d, ved->xen_port);
+        free_xen_event_channel(d, (*ved)->xen_port);
 
         /* Unblock all vCPUs */
         for_each_vcpu ( d, v )
         {
-            if ( test_and_clear_bit(ved->pause_flag, &v->pause_flags) )
+            if ( test_and_clear_bit((*ved)->pause_flag, &v->pause_flags) )
             {
                 vcpu_unpause(v);
-                ved->blocked--;
+                (*ved)->blocked--;
             }
         }
 
-        destroy_ring_for_helper(&ved->ring_page,
-                                ved->ring_pg_struct);
+        destroy_ring_for_helper(&(*ved)->ring_page,
+                                (*ved)->ring_pg_struct);
 
         vm_event_cleanup_domain(d);
 
-        vm_event_ring_unlock(ved);
+        vm_event_ring_unlock(*ved);
     }
 
+    xfree(*ved);
+    *ved = NULL;
+
     return 0;
 }
 
@@ -267,6 +277,9 @@ void vm_event_put_request(struct domain *d,
     RING_IDX req_prod;
     struct vcpu *curr = current;
 
+    if( !vm_event_check_ring(ved))
+        return;
+
     if ( curr->domain != d )
     {
         req->flags |= VM_EVENT_FLAG_FOREIGN;
@@ -434,6 +447,9 @@ void vm_event_resume(struct domain *d, struct vm_event_domain *ved)
 
 void vm_event_cancel_slot(struct domain *d, struct vm_event_domain *ved)
 {
+    if( !vm_event_check_ring(ved) )
+        return;
+
     vm_event_ring_lock(ved);
     vm_event_release_slot(d, ved);
     vm_event_ring_unlock(ved);
@@ -482,7 +498,7 @@ static int vm_event_wait_slot(struct vm_event_domain *ved)
 
 bool_t vm_event_check_ring(struct vm_event_domain *ved)
 {
-    return (ved->ring_page != NULL);
+    return (ved != NULL && ved->ring_page != NULL);
 }
 
 /*
@@ -500,6 +516,9 @@ bool_t vm_event_check_ring(struct vm_event_domain *ved)
 int __vm_event_claim_slot(struct domain *d, struct vm_event_domain *ved,
                           bool_t allow_sleep)
 {
+    if ( !vm_event_check_ring(ved) )
+        return -EOPNOTSUPP;
+
     if ( (current->domain == d) && allow_sleep )
         return vm_event_wait_slot(ved);
     else
@@ -510,24 +529,30 @@ int __vm_event_claim_slot(struct domain *d, struct vm_event_domain *ved,
 /* Registered with Xen-bound event channel for incoming notifications. */
 static void mem_paging_notification(struct vcpu *v, unsigned int port)
 {
-    if ( likely(v->domain->vm_event->paging.ring_page != NULL) )
-        vm_event_resume(v->domain, &v->domain->vm_event->paging);
+    struct domain *domain = v->domain;
+
+    if ( likely(vm_event_check_ring(domain->vm_event_paging)) )
+        vm_event_resume(domain, domain->vm_event_paging);
 }
 #endif
 
 /* Registered with Xen-bound event channel for incoming notifications. */
 static void monitor_notification(struct vcpu *v, unsigned int port)
 {
-    if ( likely(v->domain->vm_event->monitor.ring_page != NULL) )
-        vm_event_resume(v->domain, &v->domain->vm_event->monitor);
+    struct domain *domain = v->domain;
+
+    if ( likely(vm_event_check_ring(domain->vm_event_monitor)) )
+        vm_event_resume(domain, domain->vm_event_monitor);
 }
 
 #ifdef CONFIG_HAS_MEM_SHARING
 /* Registered with Xen-bound event channel for incoming notifications. */
 static void mem_sharing_notification(struct vcpu *v, unsigned int port)
 {
-    if ( likely(v->domain->vm_event->share.ring_page != NULL) )
-        vm_event_resume(v->domain, &v->domain->vm_event->share);
+    struct domain *domain = v->domain;
+
+    if ( likely(vm_event_check_ring(domain->vm_event_share)) )
+        vm_event_resume(domain, domain->vm_event_share);
 }
 #endif
 
@@ -535,7 +560,7 @@ static void mem_sharing_notification(struct vcpu *v, unsigned int port)
 void vm_event_cleanup(struct domain *d)
 {
 #ifdef CONFIG_HAS_MEM_PAGING
-    if ( d->vm_event->paging.ring_page )
+    if ( vm_event_check_ring(d->vm_event_paging) )
     {
         /* Destroying the wait queue head means waking up all
          * queued vcpus. This will drain the list, allowing
@@ -544,20 +569,20 @@ void vm_event_cleanup(struct domain *d)
          * Finally, because this code path involves previously
          * pausing the domain (domain_kill), unpausing the
          * vcpus causes no harm. */
-        destroy_waitqueue_head(&d->vm_event->paging.wq);
-        (void)vm_event_disable(d, &d->vm_event->paging);
+        destroy_waitqueue_head(&d->vm_event_paging->wq);
+        (void)vm_event_disable(d, &d->vm_event_paging);
     }
 #endif
-    if ( d->vm_event->monitor.ring_page )
+    if ( vm_event_check_ring(d->vm_event_monitor) )
     {
-        destroy_waitqueue_head(&d->vm_event->monitor.wq);
-        (void)vm_event_disable(d, &d->vm_event->monitor);
+        destroy_waitqueue_head(&d->vm_event_monitor->wq);
+        (void)vm_event_disable(d, &d->vm_event_monitor);
     }
 #ifdef CONFIG_HAS_MEM_SHARING
-    if ( d->vm_event->share.ring_page )
+    if ( vm_event_check_ring(d->vm_event_share) )
     {
-        destroy_waitqueue_head(&d->vm_event->share.wq);
-        (void)vm_event_disable(d, &d->vm_event->share);
+        destroy_waitqueue_head(&d->vm_event_share->wq);
+        (void)vm_event_disable(d, &d->vm_event_share);
     }
 #endif
 }
@@ -599,7 +624,6 @@ int vm_event_domctl(struct domain *d, xen_domctl_vm_event_op_t *vec,
 #ifdef CONFIG_HAS_MEM_PAGING
     case XEN_DOMCTL_VM_EVENT_OP_PAGING:
     {
-        struct vm_event_domain *ved = &d->vm_event->paging;
         rc = -EINVAL;
 
         switch( vec->op )
@@ -629,24 +653,24 @@ int vm_event_domctl(struct domain *d, xen_domctl_vm_event_op_t *vec,
                 break;
 
             /* domain_pause() not required here, see XSA-99 */
-            rc = vm_event_enable(d, vec, ved, _VPF_mem_paging,
+            rc = vm_event_enable(d, vec, &d->vm_event_paging, _VPF_mem_paging,
                                  HVM_PARAM_PAGING_RING_PFN,
                                  mem_paging_notification);
         }
         break;
 
         case XEN_VM_EVENT_DISABLE:
-            if ( ved->ring_page )
+            if ( vm_event_check_ring(d->vm_event_paging) )
             {
                 domain_pause(d);
-                rc = vm_event_disable(d, ved);
+                rc = vm_event_disable(d, &d->vm_event_paging);
                 domain_unpause(d);
             }
             break;
 
         case XEN_VM_EVENT_RESUME:
-            if ( ved->ring_page )
-                vm_event_resume(d, ved);
+            if ( vm_event_check_ring(d->vm_event_paging) )
+                vm_event_resume(d, d->vm_event_paging);
             else
                 rc = -ENODEV;
             break;
@@ -661,7 +685,6 @@ int vm_event_domctl(struct domain *d, xen_domctl_vm_event_op_t *vec,
 
     case XEN_DOMCTL_VM_EVENT_OP_MONITOR:
     {
-        struct vm_event_domain *ved = &d->vm_event->monitor;
         rc = -EINVAL;
 
         switch( vec->op )
@@ -671,24 +694,24 @@ int vm_event_domctl(struct domain *d, xen_domctl_vm_event_op_t *vec,
             rc = arch_monitor_init_domain(d);
             if ( rc )
                 break;
-            rc = vm_event_enable(d, vec, ved, _VPF_mem_access,
+            rc = vm_event_enable(d, vec, &d->vm_event_monitor, _VPF_mem_access,
                                  HVM_PARAM_MONITOR_RING_PFN,
                                  monitor_notification);
             break;
 
         case XEN_VM_EVENT_DISABLE:
-            if ( ved->ring_page )
+            if ( vm_event_check_ring(d->vm_event_monitor) )
             {
                 domain_pause(d);
-                rc = vm_event_disable(d, ved);
+                rc = vm_event_disable(d, &d->vm_event_monitor);
                 arch_monitor_cleanup_domain(d);
                 domain_unpause(d);
             }
             break;
 
         case XEN_VM_EVENT_RESUME:
-            if ( ved->ring_page )
-                vm_event_resume(d, ved);
+            if ( vm_event_check_ring(d->vm_event_monitor) )
+                vm_event_resume(d, d->vm_event_monitor);
             else
                 rc = -ENODEV;
             break;
@@ -703,7 +726,6 @@ int vm_event_domctl(struct domain *d, xen_domctl_vm_event_op_t *vec,
 #ifdef CONFIG_HAS_MEM_SHARING
     case XEN_DOMCTL_VM_EVENT_OP_SHARING:
     {
-        struct vm_event_domain *ved = &d->vm_event->share;
         rc = -EINVAL;
 
         switch( vec->op )
@@ -720,23 +742,23 @@ int vm_event_domctl(struct domain *d, xen_domctl_vm_event_op_t *vec,
                 break;
 
             /* domain_pause() not required here, see XSA-99 */
-            rc = vm_event_enable(d, vec, ved, _VPF_mem_sharing,
+            rc = vm_event_enable(d, vec, &d->vm_event_share, _VPF_mem_sharing,
                                  HVM_PARAM_SHARING_RING_PFN,
                                  mem_sharing_notification);
             break;
 
         case XEN_VM_EVENT_DISABLE:
-            if ( ved->ring_page )
+            if ( vm_event_check_ring(d->vm_event_share) )
             {
                 domain_pause(d);
-                rc = vm_event_disable(d, ved);
+                rc = vm_event_disable(d, &d->vm_event_share);
                 domain_unpause(d);
             }
             break;
 
         case XEN_VM_EVENT_RESUME:
-            if ( ved->ring_page )
-                vm_event_resume(d, ved);
+            if ( vm_event_check_ring(d->vm_event_share) )
+                vm_event_resume(d, d->vm_event_share);
             else
                 rc = -ENODEV;
             break;
diff --git a/xen/drivers/passthrough/pci.c b/xen/drivers/passthrough/pci.c
index 27bdb71..391c473 100644
--- a/xen/drivers/passthrough/pci.c
+++ b/xen/drivers/passthrough/pci.c
@@ -21,6 +21,7 @@
 #include <xen/prefetch.h>
 #include <xen/iommu.h>
 #include <xen/irq.h>
+#include <xen/vm_event.h>
 #include <asm/hvm/irq.h>
 #include <xen/delay.h>
 #include <xen/keyhandler.h>
@@ -1365,7 +1366,7 @@ static int assign_device(struct domain *d, u16 seg, u8 bus, u8 devfn, u32 flag)
      * enabled for this domain */
     if ( unlikely(!need_iommu(d) &&
             (d->arch.hvm_domain.mem_sharing_enabled ||
-             d->vm_event->paging.ring_page ||
+             vm_event_check_ring(d->vm_event_paging) ||
              p2m_get_hostp2m(d)->global_logdirty)) )
         return -EXDEV;
 
diff --git a/xen/include/xen/sched.h b/xen/include/xen/sched.h
index 6673b27..e48487c 100644
--- a/xen/include/xen/sched.h
+++ b/xen/include/xen/sched.h
@@ -295,16 +295,6 @@ struct vm_event_domain
     unsigned int last_vcpu_wake_up;
 };
 
-struct vm_event_per_domain
-{
-    /* Memory sharing support */
-    struct vm_event_domain share;
-    /* Memory paging support */
-    struct vm_event_domain paging;
-    /* VM event monitor support */
-    struct vm_event_domain monitor;
-};
-
 struct evtchn_port_ops;
 
 enum guest_type {
@@ -464,7 +454,13 @@ struct domain
     struct lock_profile_qhead profile_head;
 
     /* Various vm_events */
-    struct vm_event_per_domain *vm_event;
+
+    /* Memory sharing support */
+    struct vm_event_domain *vm_event_share;
+    /* Memory paging support */
+    struct vm_event_domain *vm_event_paging;
+    /* VM event monitor support */
+    struct vm_event_domain *vm_event_monitor;
 
     /*
      * Can be specified by the user. If that is not the case, it is
-- 
2.7.4


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel

^ permalink raw reply related	[flat|nested] 8+ messages in thread

* Re: [PATCH v5] common/vm_event: Initialize vm_event lists on domain creation
  2017-08-29 14:17 [PATCH v5] common/vm_event: Initialize vm_event lists on domain creation Alexandru Isaila
@ 2017-08-29 15:14 ` Tamas K Lengyel
  2017-08-29 15:48   ` Alexandru Stefan ISAILA
  2017-08-29 15:59 ` Wei Liu
  1 sibling, 1 reply; 8+ messages in thread
From: Tamas K Lengyel @ 2017-08-29 15:14 UTC (permalink / raw)
  To: Alexandru Isaila
  Cc: Tim Deegan, Stefano Stabellini, wei.liu2, Razvan Cojocaru,
	George Dunlap, Andrew Cooper, Ian Jackson, Xen-devel,
	Julien Grall, Jan Beulich

On Tue, Aug 29, 2017 at 8:17 AM, Alexandru Isaila
<aisaila@bitdefender.com> wrote:
> The patch splits the vm_event into three structures:vm_event_share,
> vm_event_paging, vm_event_monitor. The allocation for the
> structure is moved to vm_event_enable so that it can be
> allocated/init when needed and freed in vm_event_disable.
>
> Signed-off-by: Alexandru Isaila <aisaila@bitdefender.com>
>
> ---
> Changes since V4:
>         - Replaced all NULL checks with vm_event_check_ring
>
> Note: Did not test on arm, compliled on arm and x86.

This looks good to me as is but could you at least check with the
xen-access test tool on x86 that it still works as expected?

Thanks,
Tamas

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [PATCH v5] common/vm_event: Initialize vm_event lists on domain creation
  2017-08-29 15:14 ` Tamas K Lengyel
@ 2017-08-29 15:48   ` Alexandru Stefan ISAILA
  0 siblings, 0 replies; 8+ messages in thread
From: Alexandru Stefan ISAILA @ 2017-08-29 15:48 UTC (permalink / raw)
  To: tamas
  Cc: tim, sstabellini, wei.liu2, rcojocaru, George.Dunlap,
	andrew.cooper3, ian.jackson, xen-devel, julien.grall, jbeulich

On Ma, 2017-08-29 at 09:14 -0600, Tamas K Lengyel wrote:
> On Tue, Aug 29, 2017 at 8:17 AM, Alexandru Isaila
> <aisaila@bitdefender.com> wrote:
> >
> > The patch splits the vm_event into three structures:vm_event_share,
> > vm_event_paging, vm_event_monitor. The allocation for the
> > structure is moved to vm_event_enable so that it can be
> > allocated/init when needed and freed in vm_event_disable.
> >
> > Signed-off-by: Alexandru Isaila <aisaila@bitdefender.com>
> >
> > ---
> > Changes since V4:
> >         - Replaced all NULL checks with vm_event_check_ring
> >
> > Note: Did not test on arm, compliled on arm and x86.
> This looks good to me as is but could you at least check with the
> xen-access test tool on x86 that it still works as expected?
>
>
I've just tested right now and it works fine.


Thanks,
Alex

________________________
This email was scanned by Bitdefender
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [PATCH v5] common/vm_event: Initialize vm_event lists on domain creation
  2017-08-29 14:17 [PATCH v5] common/vm_event: Initialize vm_event lists on domain creation Alexandru Isaila
  2017-08-29 15:14 ` Tamas K Lengyel
@ 2017-08-29 15:59 ` Wei Liu
  2017-08-29 17:38   ` Tamas K Lengyel
  2017-08-30  8:29   ` Alexandru Stefan ISAILA
  1 sibling, 2 replies; 8+ messages in thread
From: Wei Liu @ 2017-08-29 15:59 UTC (permalink / raw)
  To: Alexandru Isaila
  Cc: tim, tamas, wei.liu2, rcojocaru, George.Dunlap, andrew.cooper3,
	ian.jackson, xen-devel, julien.grall, sstabellini, jbeulich

On Tue, Aug 29, 2017 at 05:17:05PM +0300, Alexandru Isaila wrote:
[...]
>  
>  /**
> diff --git a/xen/common/domain.c b/xen/common/domain.c
> index b22aacc..30f507b 100644
> --- a/xen/common/domain.c
> +++ b/xen/common/domain.c
> @@ -363,9 +363,6 @@ struct domain *domain_create(domid_t domid, unsigned int domcr_flags,
>          poolid = 0;
>  
>          err = -ENOMEM;
> -        d->vm_event = xzalloc(struct vm_event_per_domain);
> -        if ( !d->vm_event )
> -            goto fail;
>  
>          d->pbuf = xzalloc_array(char, DOMAIN_PBUF_SIZE);
>          if ( !d->pbuf )
> @@ -403,7 +400,6 @@ struct domain *domain_create(domid_t domid, unsigned int domcr_flags,
>      if ( hardware_domain == d )
>          hardware_domain = old_hwdom;
>      atomic_set(&d->refcnt, DOMAIN_DESTROYED);
> -    xfree(d->vm_event);
>      xfree(d->pbuf);
>      if ( init_status & INIT_arch )
>          arch_domain_destroy(d);
> @@ -820,7 +816,14 @@ static void complete_domain_destroy(struct rcu_head *head)
>      free_xenoprof_pages(d);
>  #endif
>  
> -    xfree(d->vm_event);
> +#ifdef CONFIG_HAS_MEM_PAGING
> +    xfree(d->vm_event_paging);
> +#endif
> +    xfree(d->vm_event_monitor);

Why do you unconditionally xfree these vm_event_monitor while you don't
unconditionally allocate them?

Not that this is strictly a bug but this deviates from the original
behaviour.

> +#ifdef CONFIG_HAS_MEM_SHARING
> +    xfree(d->vm_event_share);
> +#endif
> +
>      xfree(d->pbuf);
>  
>      for ( i = d->max_vcpus - 1; i >= 0; i-- )
> diff --git a/xen/common/mem_access.c b/xen/common/mem_access.c
> index 19f63bb..1bf6824 100644
> --- a/xen/common/mem_access.c
> +++ b/xen/common/mem_access.c
> @@ -52,7 +52,7 @@ int mem_access_memop(unsigned long cmd,
>          goto out;
>  
>      rc = -ENODEV;
> -    if ( unlikely(!d->vm_event->monitor.ring_page) )
> +    if ( unlikely(!vm_event_check_ring(d->vm_event_monitor)) )
>          goto out;
>  
>      switch ( mao.op )
> diff --git a/xen/common/monitor.c b/xen/common/monitor.c
> index 451f42f..70d38d4 100644
> --- a/xen/common/monitor.c
> +++ b/xen/common/monitor.c
> @@ -92,7 +92,7 @@ int monitor_traps(struct vcpu *v, bool_t sync, vm_event_request_t *req)
>      int rc;
>      struct domain *d = v->domain;
>  
> -    rc = vm_event_claim_slot(d, &d->vm_event->monitor);
> +    rc = vm_event_claim_slot(d, d->vm_event_monitor);
>      switch ( rc )
>      {
>      case 0:
> @@ -123,7 +123,7 @@ int monitor_traps(struct vcpu *v, bool_t sync, vm_event_request_t *req)
>      }
>  
>      vm_event_fill_regs(req);
> -    vm_event_put_request(d, &d->vm_event->monitor, req);
> +    vm_event_put_request(d, d->vm_event_monitor, req);
>  
>      return rc;
>  }
> diff --git a/xen/common/vm_event.c b/xen/common/vm_event.c
> index 9291db6..a9b47e2 100644
> --- a/xen/common/vm_event.c
> +++ b/xen/common/vm_event.c
> @@ -42,7 +42,7 @@
>  static int vm_event_enable(
>      struct domain *d,
>      xen_domctl_vm_event_op_t *vec,
> -    struct vm_event_domain *ved,
> +    struct vm_event_domain **ved,
>      int pause_flag,
>      int param,
>      xen_event_channel_notification_t notification_fn)
> @@ -50,32 +50,37 @@ static int vm_event_enable(
>      int rc;
>      unsigned long ring_gfn = d->arch.hvm_domain.params[param];
>  
> +    if ( !*ved )
> +        (*ved) = xzalloc(struct vm_event_domain);

Unnecessary parentheses.

> +    if ( !*ved )
> +        return -ENOMEM;
> +
>      /* Only one helper at a time. If the helper crashed,
>       * the ring is in an undefined state and so is the guest.
>       */
> -    if ( ved->ring_page )
> -        return -EBUSY;
> +    if ( (*ved)->ring_page )
> +        return -EBUSY;;
>  
>      /* The parameter defaults to zero, and it should be
>       * set to something */
>      if ( ring_gfn == 0 )
>          return -ENOSYS;
>  
> -    vm_event_ring_lock_init(ved);
> -    vm_event_ring_lock(ved);
> +    vm_event_ring_lock_init(*ved);
> +    vm_event_ring_lock(*ved);
>  
>      rc = vm_event_init_domain(d);
>  
>      if ( rc < 0 )
>          goto err;
>  
> -    rc = prepare_ring_for_helper(d, ring_gfn, &ved->ring_pg_struct,
> -                                    &ved->ring_page);
> +    rc = prepare_ring_for_helper(d, ring_gfn, &(*ved)->ring_pg_struct,
> +                                    &(*ved)->ring_page);

Since you are modifying this, please align the second line to "d".

>      if ( rc < 0 )
>          goto err;
>  
>      /* Set the number of currently blocked vCPUs to 0. */
> -    ved->blocked = 0;
> +    (*ved)->blocked = 0;
>  
>      /* Allocate event channel */
>      rc = alloc_unbound_xen_event_channel(d, 0, current->domain->domain_id,
> @@ -83,26 +88,28 @@ static int vm_event_enable(
>      if ( rc < 0 )
>          goto err;
>  
> -    ved->xen_port = vec->port = rc;
> +    (*ved)->xen_port = vec->port = rc;
>  
>      /* Prepare ring buffer */
> -    FRONT_RING_INIT(&ved->front_ring,
> -                    (vm_event_sring_t *)ved->ring_page,
> +    FRONT_RING_INIT(&(*ved)->front_ring,
> +                    (vm_event_sring_t *)(*ved)->ring_page,
>                      PAGE_SIZE);
>  
>      /* Save the pause flag for this particular ring. */
> -    ved->pause_flag = pause_flag;
> +    (*ved)->pause_flag = pause_flag;
>  
>      /* Initialize the last-chance wait queue. */
> -    init_waitqueue_head(&ved->wq);
> +    init_waitqueue_head(&(*ved)->wq);
>  
> -    vm_event_ring_unlock(ved);
> +    vm_event_ring_unlock((*ved));

Unnecessary parentheses.

>      return 0;
>  
>   err:
> -    destroy_ring_for_helper(&ved->ring_page,
> -                            ved->ring_pg_struct);
> -    vm_event_ring_unlock(ved);
> +    destroy_ring_for_helper(&(*ved)->ring_page,
> +                            (*ved)->ring_pg_struct);
> +    vm_event_ring_unlock((*ved));
> +    xfree(*ved);
> +    *ved = NULL;
>  
>      return rc;
>  }
> @@ -187,41 +194,44 @@ void vm_event_wake(struct domain *d, struct vm_event_domain *ved)
>          vm_event_wake_blocked(d, ved);
>  }
>  
> -static int vm_event_disable(struct domain *d, struct vm_event_domain *ved)
> +static int vm_event_disable(struct domain *d, struct vm_event_domain **ved)
>  {

A lot of the code churn here and above could be avoided by changing ved
in parameter list to something else (vedp?) and  having a local variable
called

    struct vm_event_domain *ved = *vedp;

(I don't feel very strongly about this though)

> -    if ( ved->ring_page )
> +    if ( vm_event_check_ring(*ved) )
>      {
>          struct vcpu *v;
>  
> -        vm_event_ring_lock(ved);
> +        vm_event_ring_lock(*ved);
>  
> -        if ( !list_empty(&ved->wq.list) )
> +        if ( !list_empty(&(*ved)->wq.list) )
>          {
> -            vm_event_ring_unlock(ved);
> +            vm_event_ring_unlock(*ved);
>              return -EBUSY;
>          }
>  
>          /* Free domU's event channel and leave the other one unbound */
> -        free_xen_event_channel(d, ved->xen_port);
> +        free_xen_event_channel(d, (*ved)->xen_port);
>  
>          /* Unblock all vCPUs */
>          for_each_vcpu ( d, v )
>          {
> -            if ( test_and_clear_bit(ved->pause_flag, &v->pause_flags) )
> +            if ( test_and_clear_bit((*ved)->pause_flag, &v->pause_flags) )
>              {
>                  vcpu_unpause(v);
> -                ved->blocked--;
> +                (*ved)->blocked--;
>              }
>          }
>  
> -        destroy_ring_for_helper(&ved->ring_page,
> -                                ved->ring_pg_struct);
> +        destroy_ring_for_helper(&(*ved)->ring_page,
> +                                (*ved)->ring_pg_struct);
>  
>          vm_event_cleanup_domain(d);
>  
> -        vm_event_ring_unlock(ved);
> +        vm_event_ring_unlock(*ved);
>      }
>  
> +    xfree(*ved);
> +    *ved = NULL;
> +
>      return 0;
>  }
>  
> @@ -267,6 +277,9 @@ void vm_event_put_request(struct domain *d,
>      RING_IDX req_prod;
>      struct vcpu *curr = current;
>  
> +    if( !vm_event_check_ring(ved))
> +        return;
> +
>      if ( curr->domain != d )
>      {
>          req->flags |= VM_EVENT_FLAG_FOREIGN;
> @@ -434,6 +447,9 @@ void vm_event_resume(struct domain *d, struct vm_event_domain *ved)
>  
>  void vm_event_cancel_slot(struct domain *d, struct vm_event_domain *ved)
>  {
> +    if( !vm_event_check_ring(ved) )
> +        return;
> +
>      vm_event_ring_lock(ved);
>      vm_event_release_slot(d, ved);
>      vm_event_ring_unlock(ved);
> @@ -482,7 +498,7 @@ static int vm_event_wait_slot(struct vm_event_domain *ved)
>  
>  bool_t vm_event_check_ring(struct vm_event_domain *ved)
>  {
> -    return (ved->ring_page != NULL);
> +    return (ved != NULL && ved->ring_page != NULL);

ved && ved->ring_page

>  }
>  
>  /*
[...]
> diff --git a/xen/drivers/passthrough/pci.c b/xen/drivers/passthrough/pci.c
> index 27bdb71..391c473 100644
> --- a/xen/drivers/passthrough/pci.c
> +++ b/xen/drivers/passthrough/pci.c
> @@ -21,6 +21,7 @@
>  #include <xen/prefetch.h>
>  #include <xen/iommu.h>
>  #include <xen/irq.h>
> +#include <xen/vm_event.h>
>  #include <asm/hvm/irq.h>
>  #include <xen/delay.h>
>  #include <xen/keyhandler.h>
> @@ -1365,7 +1366,7 @@ static int assign_device(struct domain *d, u16 seg, u8 bus, u8 devfn, u32 flag)
>       * enabled for this domain */
>      if ( unlikely(!need_iommu(d) &&
>              (d->arch.hvm_domain.mem_sharing_enabled ||
> -             d->vm_event->paging.ring_page ||
> +             vm_event_check_ring(d->vm_event_paging) ||
>               p2m_get_hostp2m(d)->global_logdirty)) )
>          return -EXDEV;
>  
> diff --git a/xen/include/xen/sched.h b/xen/include/xen/sched.h
> index 6673b27..e48487c 100644
> --- a/xen/include/xen/sched.h
> +++ b/xen/include/xen/sched.h
> @@ -295,16 +295,6 @@ struct vm_event_domain
>      unsigned int last_vcpu_wake_up;
>  };
>  
> -struct vm_event_per_domain
> -{
> -    /* Memory sharing support */
> -    struct vm_event_domain share;
> -    /* Memory paging support */
> -    struct vm_event_domain paging;
> -    /* VM event monitor support */
> -    struct vm_event_domain monitor;
> -};
> -
>  struct evtchn_port_ops;
>  
>  enum guest_type {
> @@ -464,7 +454,13 @@ struct domain
>      struct lock_profile_qhead profile_head;
>  
>      /* Various vm_events */
> -    struct vm_event_per_domain *vm_event;
> +
> +    /* Memory sharing support */
> +    struct vm_event_domain *vm_event_share;
> +    /* Memory paging support */
> +    struct vm_event_domain *vm_event_paging;
> +    /* VM event monitor support */
> +    struct vm_event_domain *vm_event_monitor;

Please consider place them inside CONFIG options like you did above.

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [PATCH v5] common/vm_event: Initialize vm_event lists on domain creation
  2017-08-29 15:59 ` Wei Liu
@ 2017-08-29 17:38   ` Tamas K Lengyel
  2017-08-29 17:41     ` Wei Liu
  2017-08-30  8:29   ` Alexandru Stefan ISAILA
  1 sibling, 1 reply; 8+ messages in thread
From: Tamas K Lengyel @ 2017-08-29 17:38 UTC (permalink / raw)
  To: Wei Liu
  Cc: Tim Deegan, Stefano Stabellini, Razvan Cojocaru, George Dunlap,
	Andrew Cooper, Ian Jackson, Xen-devel, Julien Grall, Jan Beulich,
	Alexandru Isaila

On Tue, Aug 29, 2017 at 9:59 AM, Wei Liu <wei.liu2@citrix.com> wrote:
> On Tue, Aug 29, 2017 at 05:17:05PM +0300, Alexandru Isaila wrote:
> [...]
>>
>>  /**
>> diff --git a/xen/common/domain.c b/xen/common/domain.c
>> index b22aacc..30f507b 100644
>> --- a/xen/common/domain.c
>> +++ b/xen/common/domain.c
>> @@ -363,9 +363,6 @@ struct domain *domain_create(domid_t domid, unsigned int domcr_flags,
>>          poolid = 0;
>>
>>          err = -ENOMEM;
>> -        d->vm_event = xzalloc(struct vm_event_per_domain);
>> -        if ( !d->vm_event )
>> -            goto fail;
>>
>>          d->pbuf = xzalloc_array(char, DOMAIN_PBUF_SIZE);
>>          if ( !d->pbuf )
>> @@ -403,7 +400,6 @@ struct domain *domain_create(domid_t domid, unsigned int domcr_flags,
>>      if ( hardware_domain == d )
>>          hardware_domain = old_hwdom;
>>      atomic_set(&d->refcnt, DOMAIN_DESTROYED);
>> -    xfree(d->vm_event);
>>      xfree(d->pbuf);
>>      if ( init_status & INIT_arch )
>>          arch_domain_destroy(d);
>> @@ -820,7 +816,14 @@ static void complete_domain_destroy(struct rcu_head *head)
>>      free_xenoprof_pages(d);
>>  #endif
>>
>> -    xfree(d->vm_event);
>> +#ifdef CONFIG_HAS_MEM_PAGING
>> +    xfree(d->vm_event_paging);
>> +#endif
>> +    xfree(d->vm_event_monitor);
>
> Why do you unconditionally xfree these vm_event_monitor while you don't
> unconditionally allocate them?
>
> Not that this is strictly a bug but this deviates from the original
> behaviour.

Isn't xfree NULL safe?

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [PATCH v5] common/vm_event: Initialize vm_event lists on domain creation
  2017-08-29 17:38   ` Tamas K Lengyel
@ 2017-08-29 17:41     ` Wei Liu
  2017-08-30  7:06       ` Razvan Cojocaru
  0 siblings, 1 reply; 8+ messages in thread
From: Wei Liu @ 2017-08-29 17:41 UTC (permalink / raw)
  To: Tamas K Lengyel
  Cc: Tim Deegan, Stefano Stabellini, Wei Liu, Razvan Cojocaru,
	George Dunlap, Andrew Cooper, Ian Jackson, Xen-devel,
	Julien Grall, Jan Beulich, Alexandru Isaila

On Tue, Aug 29, 2017 at 11:38:12AM -0600, Tamas K Lengyel wrote:
> On Tue, Aug 29, 2017 at 9:59 AM, Wei Liu <wei.liu2@citrix.com> wrote:
> > On Tue, Aug 29, 2017 at 05:17:05PM +0300, Alexandru Isaila wrote:
> > [...]
> >>
> >>  /**
> >> diff --git a/xen/common/domain.c b/xen/common/domain.c
> >> index b22aacc..30f507b 100644
> >> --- a/xen/common/domain.c
> >> +++ b/xen/common/domain.c
> >> @@ -363,9 +363,6 @@ struct domain *domain_create(domid_t domid, unsigned int domcr_flags,
> >>          poolid = 0;
> >>
> >>          err = -ENOMEM;
> >> -        d->vm_event = xzalloc(struct vm_event_per_domain);
> >> -        if ( !d->vm_event )
> >> -            goto fail;
> >>
> >>          d->pbuf = xzalloc_array(char, DOMAIN_PBUF_SIZE);
> >>          if ( !d->pbuf )
> >> @@ -403,7 +400,6 @@ struct domain *domain_create(domid_t domid, unsigned int domcr_flags,
> >>      if ( hardware_domain == d )
> >>          hardware_domain = old_hwdom;
> >>      atomic_set(&d->refcnt, DOMAIN_DESTROYED);
> >> -    xfree(d->vm_event);
> >>      xfree(d->pbuf);
> >>      if ( init_status & INIT_arch )
> >>          arch_domain_destroy(d);
> >> @@ -820,7 +816,14 @@ static void complete_domain_destroy(struct rcu_head *head)
> >>      free_xenoprof_pages(d);
> >>  #endif
> >>
> >> -    xfree(d->vm_event);
> >> +#ifdef CONFIG_HAS_MEM_PAGING
> >> +    xfree(d->vm_event_paging);
> >> +#endif
> >> +    xfree(d->vm_event_monitor);
> >
> > Why do you unconditionally xfree these vm_event_monitor while you don't
> > unconditionally allocate them?
> >
> > Not that this is strictly a bug but this deviates from the original
> > behaviour.
> 
> Isn't xfree NULL safe?

Yes, it is.

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [PATCH v5] common/vm_event: Initialize vm_event lists on domain creation
  2017-08-29 17:41     ` Wei Liu
@ 2017-08-30  7:06       ` Razvan Cojocaru
  0 siblings, 0 replies; 8+ messages in thread
From: Razvan Cojocaru @ 2017-08-30  7:06 UTC (permalink / raw)
  To: Wei Liu, Tamas K Lengyel
  Cc: Tim Deegan, Stefano Stabellini, George Dunlap, Andrew Cooper,
	Ian Jackson, Xen-devel, Julien Grall, Jan Beulich,
	Alexandru Isaila

On 08/29/2017 08:41 PM, Wei Liu wrote:
>>>> -    xfree(d->vm_event);
>>>> +#ifdef CONFIG_HAS_MEM_PAGING
>>>> +    xfree(d->vm_event_paging);
>>>> +#endif
>>>> +    xfree(d->vm_event_monitor);
>>> Why do you unconditionally xfree these vm_event_monitor while you don't
>>> unconditionally allocate them?
>>>
>>> Not that this is strictly a bug but this deviates from the original
>>> behaviour.
>> Isn't xfree NULL safe?
> Yes, it is.

That was my suggestion: since xfree() can handle NULL I thought the
patch would have minimal changes and be easier to review if we omit
unnecessary lines.


Thanks,
Razvan



_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [PATCH v5] common/vm_event: Initialize vm_event lists on domain creation
  2017-08-29 15:59 ` Wei Liu
  2017-08-29 17:38   ` Tamas K Lengyel
@ 2017-08-30  8:29   ` Alexandru Stefan ISAILA
  1 sibling, 0 replies; 8+ messages in thread
From: Alexandru Stefan ISAILA @ 2017-08-30  8:29 UTC (permalink / raw)
  To: wei.liu2
  Cc: tim, sstabellini, rcojocaru, George.Dunlap, andrew.cooper3,
	ian.jackson, xen-devel, julien.grall, tamas, jbeulich


> >
> > -static int vm_event_disable(struct domain *d, struct
> > vm_event_domain *ved)
> > +static int vm_event_disable(struct domain *d, struct
> > vm_event_domain **ved)
> >  {
> A lot of the code churn here and above could be avoided by changing
> ved
> in parameter list to something else (vedp?) and  having a local
> variable
> called
>
>     struct vm_event_domain *ved = *vedp;
>
> (I don't feel very strongly about this though)
>
I don't think it is necessary but the decision comes to the
maintainers.


Regards,
Alex

________________________
This email was scanned by Bitdefender
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 8+ messages in thread

end of thread, other threads:[~2017-08-30  8:29 UTC | newest]

Thread overview: 8+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2017-08-29 14:17 [PATCH v5] common/vm_event: Initialize vm_event lists on domain creation Alexandru Isaila
2017-08-29 15:14 ` Tamas K Lengyel
2017-08-29 15:48   ` Alexandru Stefan ISAILA
2017-08-29 15:59 ` Wei Liu
2017-08-29 17:38   ` Tamas K Lengyel
2017-08-29 17:41     ` Wei Liu
2017-08-30  7:06       ` Razvan Cojocaru
2017-08-30  8:29   ` Alexandru Stefan ISAILA

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.