All of lore.kernel.org
 help / color / mirror / Atom feed
From: Alexandru Isaila <aisaila@bitdefender.com>
To: xen-devel@lists.xen.org
Cc: tamas@tklengyel.com, wei.liu2@citrix.com,
	rcojocaru@bitdefender.com, George.Dunlap@eu.citrix.com,
	andrew.cooper3@citrix.com, ian.jackson@eu.citrix.com,
	tim@xen.org, julien.grall@arm.com, sstabellini@kernel.org,
	jbeulich@suse.com, Alexandru Isaila <aisaila@bitdefender.com>
Subject: [PATCH v4] common/vm_event: Initialize vm_event lists on domain creation
Date: Mon, 28 Aug 2017 13:54:07 +0300	[thread overview]
Message-ID: <1503917647-24030-1-git-send-email-aisaila@bitdefender.com> (raw)

The patch splits the vm_event into three structures:vm_event_share,
vm_event_paging, vm_event_monitor. The allocation for the
structure is moved to vm_event_enable so that it can be
allocated/init when needed and freed in vm_event_disable.

Signed-off-by: Alexandru Isaila <aisaila@bitdefender.com>

---
Changes since V3:
	- Moved the d->vm_event_paging to the if below in the
	assign_device function

Note: Did not test on arm, compliled on arm and x86.
---
 xen/arch/arm/mem_access.c     |   2 +-
 xen/arch/x86/mm/mem_access.c  |   2 +-
 xen/arch/x86/mm/mem_paging.c  |   2 +-
 xen/arch/x86/mm/mem_sharing.c |   4 +-
 xen/arch/x86/mm/p2m.c         |  10 +--
 xen/common/domain.c           |  13 ++--
 xen/common/mem_access.c       |   2 +-
 xen/common/monitor.c          |   4 +-
 xen/common/vm_event.c         | 156 +++++++++++++++++++++++++-----------------
 xen/drivers/passthrough/pci.c |   2 +-
 xen/include/xen/sched.h       |  18 ++---
 11 files changed, 124 insertions(+), 91 deletions(-)

diff --git a/xen/arch/arm/mem_access.c b/xen/arch/arm/mem_access.c
index e0888bb..a7f0cae 100644
--- a/xen/arch/arm/mem_access.c
+++ b/xen/arch/arm/mem_access.c
@@ -256,7 +256,7 @@ bool_t p2m_mem_access_check(paddr_t gpa, vaddr_t gla, const struct npfec npfec)
     }
 
     /* Otherwise, check if there is a vm_event monitor subscriber */
-    if ( !vm_event_check_ring(&v->domain->vm_event->monitor) )
+    if ( !vm_event_check_ring(v->domain->vm_event_monitor) )
     {
         /* No listener */
         if ( p2m->access_required )
diff --git a/xen/arch/x86/mm/mem_access.c b/xen/arch/x86/mm/mem_access.c
index 5adaf6d..414e38f 100644
--- a/xen/arch/x86/mm/mem_access.c
+++ b/xen/arch/x86/mm/mem_access.c
@@ -179,7 +179,7 @@ bool_t p2m_mem_access_check(paddr_t gpa, unsigned long gla,
     gfn_unlock(p2m, gfn, 0);
 
     /* Otherwise, check if there is a memory event listener, and send the message along */
-    if ( !vm_event_check_ring(&d->vm_event->monitor) || !req_ptr )
+    if ( !vm_event_check_ring(d->vm_event_monitor) || !req_ptr )
     {
         /* No listener */
         if ( p2m->access_required )
diff --git a/xen/arch/x86/mm/mem_paging.c b/xen/arch/x86/mm/mem_paging.c
index a049e0d..20214ac 100644
--- a/xen/arch/x86/mm/mem_paging.c
+++ b/xen/arch/x86/mm/mem_paging.c
@@ -43,7 +43,7 @@ int mem_paging_memop(XEN_GUEST_HANDLE_PARAM(xen_mem_paging_op_t) arg)
         goto out;
 
     rc = -ENODEV;
-    if ( unlikely(!d->vm_event->paging.ring_page) )
+    if ( !d->vm_event_paging || unlikely(!d->vm_event_paging->ring_page) )
         goto out;
 
     switch( mpo.op )
diff --git a/xen/arch/x86/mm/mem_sharing.c b/xen/arch/x86/mm/mem_sharing.c
index 1f20ce7..12fb9cc 100644
--- a/xen/arch/x86/mm/mem_sharing.c
+++ b/xen/arch/x86/mm/mem_sharing.c
@@ -563,7 +563,7 @@ int mem_sharing_notify_enomem(struct domain *d, unsigned long gfn,
     };
 
     if ( (rc = __vm_event_claim_slot(d, 
-                        &d->vm_event->share, allow_sleep)) < 0 )
+                        d->vm_event_share, allow_sleep)) < 0 )
         return rc;
 
     if ( v->domain == d )
@@ -572,7 +572,7 @@ int mem_sharing_notify_enomem(struct domain *d, unsigned long gfn,
         vm_event_vcpu_pause(v);
     }
 
-    vm_event_put_request(d, &d->vm_event->share, &req);
+    vm_event_put_request(d, d->vm_event_share, &req);
 
     return 0;
 }
diff --git a/xen/arch/x86/mm/p2m.c b/xen/arch/x86/mm/p2m.c
index e8a57d1..6ae23be 100644
--- a/xen/arch/x86/mm/p2m.c
+++ b/xen/arch/x86/mm/p2m.c
@@ -1454,7 +1454,7 @@ void p2m_mem_paging_drop_page(struct domain *d, unsigned long gfn,
      * correctness of the guest execution at this point.  If this is the only
      * page that happens to be paged-out, we'll be okay..  but it's likely the
      * guest will crash shortly anyways. */
-    int rc = vm_event_claim_slot(d, &d->vm_event->paging);
+    int rc = vm_event_claim_slot(d, d->vm_event_paging);
     if ( rc < 0 )
         return;
 
@@ -1468,7 +1468,7 @@ void p2m_mem_paging_drop_page(struct domain *d, unsigned long gfn,
         /* Evict will fail now, tag this request for pager */
         req.u.mem_paging.flags |= MEM_PAGING_EVICT_FAIL;
 
-    vm_event_put_request(d, &d->vm_event->paging, &req);
+    vm_event_put_request(d, d->vm_event_paging, &req);
 }
 
 /**
@@ -1505,7 +1505,7 @@ void p2m_mem_paging_populate(struct domain *d, unsigned long gfn)
     struct p2m_domain *p2m = p2m_get_hostp2m(d);
 
     /* We're paging. There should be a ring */
-    int rc = vm_event_claim_slot(d, &d->vm_event->paging);
+    int rc = vm_event_claim_slot(d, d->vm_event_paging);
     if ( rc == -ENOSYS )
     {
         gdprintk(XENLOG_ERR, "Domain %hu paging gfn %lx yet no ring "
@@ -1543,7 +1543,7 @@ void p2m_mem_paging_populate(struct domain *d, unsigned long gfn)
     else if ( p2mt != p2m_ram_paging_out && p2mt != p2m_ram_paged )
     {
         /* gfn is already on its way back and vcpu is not paused */
-        vm_event_cancel_slot(d, &d->vm_event->paging);
+        vm_event_cancel_slot(d, d->vm_event_paging);
         return;
     }
 
@@ -1551,7 +1551,7 @@ void p2m_mem_paging_populate(struct domain *d, unsigned long gfn)
     req.u.mem_paging.p2mt = p2mt;
     req.vcpu_id = v->vcpu_id;
 
-    vm_event_put_request(d, &d->vm_event->paging, &req);
+    vm_event_put_request(d, d->vm_event_paging, &req);
 }
 
 /**
diff --git a/xen/common/domain.c b/xen/common/domain.c
index b22aacc..30f507b 100644
--- a/xen/common/domain.c
+++ b/xen/common/domain.c
@@ -363,9 +363,6 @@ struct domain *domain_create(domid_t domid, unsigned int domcr_flags,
         poolid = 0;
 
         err = -ENOMEM;
-        d->vm_event = xzalloc(struct vm_event_per_domain);
-        if ( !d->vm_event )
-            goto fail;
 
         d->pbuf = xzalloc_array(char, DOMAIN_PBUF_SIZE);
         if ( !d->pbuf )
@@ -403,7 +400,6 @@ struct domain *domain_create(domid_t domid, unsigned int domcr_flags,
     if ( hardware_domain == d )
         hardware_domain = old_hwdom;
     atomic_set(&d->refcnt, DOMAIN_DESTROYED);
-    xfree(d->vm_event);
     xfree(d->pbuf);
     if ( init_status & INIT_arch )
         arch_domain_destroy(d);
@@ -820,7 +816,14 @@ static void complete_domain_destroy(struct rcu_head *head)
     free_xenoprof_pages(d);
 #endif
 
-    xfree(d->vm_event);
+#ifdef CONFIG_HAS_MEM_PAGING
+    xfree(d->vm_event_paging);
+#endif
+    xfree(d->vm_event_monitor);
+#ifdef CONFIG_HAS_MEM_SHARING
+    xfree(d->vm_event_share);
+#endif
+
     xfree(d->pbuf);
 
     for ( i = d->max_vcpus - 1; i >= 0; i-- )
diff --git a/xen/common/mem_access.c b/xen/common/mem_access.c
index 19f63bb..10ea4ae 100644
--- a/xen/common/mem_access.c
+++ b/xen/common/mem_access.c
@@ -52,7 +52,7 @@ int mem_access_memop(unsigned long cmd,
         goto out;
 
     rc = -ENODEV;
-    if ( unlikely(!d->vm_event->monitor.ring_page) )
+    if ( !d->vm_event_monitor || unlikely(!d->vm_event_monitor->ring_page) )
         goto out;
 
     switch ( mao.op )
diff --git a/xen/common/monitor.c b/xen/common/monitor.c
index 451f42f..70d38d4 100644
--- a/xen/common/monitor.c
+++ b/xen/common/monitor.c
@@ -92,7 +92,7 @@ int monitor_traps(struct vcpu *v, bool_t sync, vm_event_request_t *req)
     int rc;
     struct domain *d = v->domain;
 
-    rc = vm_event_claim_slot(d, &d->vm_event->monitor);
+    rc = vm_event_claim_slot(d, d->vm_event_monitor);
     switch ( rc )
     {
     case 0:
@@ -123,7 +123,7 @@ int monitor_traps(struct vcpu *v, bool_t sync, vm_event_request_t *req)
     }
 
     vm_event_fill_regs(req);
-    vm_event_put_request(d, &d->vm_event->monitor, req);
+    vm_event_put_request(d, d->vm_event_monitor, req);
 
     return rc;
 }
diff --git a/xen/common/vm_event.c b/xen/common/vm_event.c
index 9291db6..e8bba1e 100644
--- a/xen/common/vm_event.c
+++ b/xen/common/vm_event.c
@@ -42,7 +42,7 @@
 static int vm_event_enable(
     struct domain *d,
     xen_domctl_vm_event_op_t *vec,
-    struct vm_event_domain *ved,
+    struct vm_event_domain **ved,
     int pause_flag,
     int param,
     xen_event_channel_notification_t notification_fn)
@@ -50,32 +50,37 @@ static int vm_event_enable(
     int rc;
     unsigned long ring_gfn = d->arch.hvm_domain.params[param];
 
+    if ( !*ved )
+        (*ved) = xzalloc(struct vm_event_domain);
+    if ( !*ved )
+        return -ENOMEM;
+
     /* Only one helper at a time. If the helper crashed,
      * the ring is in an undefined state and so is the guest.
      */
-    if ( ved->ring_page )
-        return -EBUSY;
+    if ( (*ved)->ring_page )
+        return -EBUSY;;
 
     /* The parameter defaults to zero, and it should be
      * set to something */
     if ( ring_gfn == 0 )
         return -ENOSYS;
 
-    vm_event_ring_lock_init(ved);
-    vm_event_ring_lock(ved);
+    vm_event_ring_lock_init(*ved);
+    vm_event_ring_lock(*ved);
 
     rc = vm_event_init_domain(d);
 
     if ( rc < 0 )
         goto err;
 
-    rc = prepare_ring_for_helper(d, ring_gfn, &ved->ring_pg_struct,
-                                    &ved->ring_page);
+    rc = prepare_ring_for_helper(d, ring_gfn, &(*ved)->ring_pg_struct,
+                                    &(*ved)->ring_page);
     if ( rc < 0 )
         goto err;
 
     /* Set the number of currently blocked vCPUs to 0. */
-    ved->blocked = 0;
+    (*ved)->blocked = 0;
 
     /* Allocate event channel */
     rc = alloc_unbound_xen_event_channel(d, 0, current->domain->domain_id,
@@ -83,26 +88,28 @@ static int vm_event_enable(
     if ( rc < 0 )
         goto err;
 
-    ved->xen_port = vec->port = rc;
+    (*ved)->xen_port = vec->port = rc;
 
     /* Prepare ring buffer */
-    FRONT_RING_INIT(&ved->front_ring,
-                    (vm_event_sring_t *)ved->ring_page,
+    FRONT_RING_INIT(&(*ved)->front_ring,
+                    (vm_event_sring_t *)(*ved)->ring_page,
                     PAGE_SIZE);
 
     /* Save the pause flag for this particular ring. */
-    ved->pause_flag = pause_flag;
+    (*ved)->pause_flag = pause_flag;
 
     /* Initialize the last-chance wait queue. */
-    init_waitqueue_head(&ved->wq);
+    init_waitqueue_head(&(*ved)->wq);
 
-    vm_event_ring_unlock(ved);
+    vm_event_ring_unlock((*ved));
     return 0;
 
  err:
-    destroy_ring_for_helper(&ved->ring_page,
-                            ved->ring_pg_struct);
-    vm_event_ring_unlock(ved);
+    destroy_ring_for_helper(&(*ved)->ring_page,
+                            (*ved)->ring_pg_struct);
+    vm_event_ring_unlock((*ved));
+    xfree(*ved);
+    *ved = NULL;
 
     return rc;
 }
@@ -187,41 +194,44 @@ void vm_event_wake(struct domain *d, struct vm_event_domain *ved)
         vm_event_wake_blocked(d, ved);
 }
 
-static int vm_event_disable(struct domain *d, struct vm_event_domain *ved)
+static int vm_event_disable(struct domain *d, struct vm_event_domain **ved)
 {
-    if ( ved->ring_page )
+    if ( *ved && (*ved)->ring_page )
     {
         struct vcpu *v;
 
-        vm_event_ring_lock(ved);
+        vm_event_ring_lock(*ved);
 
-        if ( !list_empty(&ved->wq.list) )
+        if ( !list_empty(&(*ved)->wq.list) )
         {
-            vm_event_ring_unlock(ved);
+            vm_event_ring_unlock(*ved);
             return -EBUSY;
         }
 
         /* Free domU's event channel and leave the other one unbound */
-        free_xen_event_channel(d, ved->xen_port);
+        free_xen_event_channel(d, (*ved)->xen_port);
 
         /* Unblock all vCPUs */
         for_each_vcpu ( d, v )
         {
-            if ( test_and_clear_bit(ved->pause_flag, &v->pause_flags) )
+            if ( test_and_clear_bit((*ved)->pause_flag, &v->pause_flags) )
             {
                 vcpu_unpause(v);
-                ved->blocked--;
+                (*ved)->blocked--;
             }
         }
 
-        destroy_ring_for_helper(&ved->ring_page,
-                                ved->ring_pg_struct);
+        destroy_ring_for_helper(&(*ved)->ring_page,
+                                (*ved)->ring_pg_struct);
 
         vm_event_cleanup_domain(d);
 
-        vm_event_ring_unlock(ved);
+        vm_event_ring_unlock(*ved);
     }
 
+    xfree(*ved);
+    *ved = NULL;
+
     return 0;
 }
 
@@ -267,6 +277,9 @@ void vm_event_put_request(struct domain *d,
     RING_IDX req_prod;
     struct vcpu *curr = current;
 
+    if( !ved )
+        return;
+
     if ( curr->domain != d )
     {
         req->flags |= VM_EVENT_FLAG_FOREIGN;
@@ -434,6 +447,9 @@ void vm_event_resume(struct domain *d, struct vm_event_domain *ved)
 
 void vm_event_cancel_slot(struct domain *d, struct vm_event_domain *ved)
 {
+    if( !ved )
+        return;
+
     vm_event_ring_lock(ved);
     vm_event_release_slot(d, ved);
     vm_event_ring_unlock(ved);
@@ -500,6 +516,9 @@ bool_t vm_event_check_ring(struct vm_event_domain *ved)
 int __vm_event_claim_slot(struct domain *d, struct vm_event_domain *ved,
                           bool_t allow_sleep)
 {
+    if ( !ved )
+        return -EOPNOTSUPP;
+
     if ( (current->domain == d) && allow_sleep )
         return vm_event_wait_slot(ved);
     else
@@ -510,24 +529,30 @@ int __vm_event_claim_slot(struct domain *d, struct vm_event_domain *ved,
 /* Registered with Xen-bound event channel for incoming notifications. */
 static void mem_paging_notification(struct vcpu *v, unsigned int port)
 {
-    if ( likely(v->domain->vm_event->paging.ring_page != NULL) )
-        vm_event_resume(v->domain, &v->domain->vm_event->paging);
+    struct domain *domain = v->domain;
+
+    if ( likely(domain->vm_event_paging->ring_page != NULL) )
+        vm_event_resume(domain, domain->vm_event_paging);
 }
 #endif
 
 /* Registered with Xen-bound event channel for incoming notifications. */
 static void monitor_notification(struct vcpu *v, unsigned int port)
 {
-    if ( likely(v->domain->vm_event->monitor.ring_page != NULL) )
-        vm_event_resume(v->domain, &v->domain->vm_event->monitor);
+    struct domain *domain = v->domain;
+
+    if ( likely(domain->vm_event_monitor->ring_page != NULL) )
+        vm_event_resume(domain, domain->vm_event_monitor);
 }
 
 #ifdef CONFIG_HAS_MEM_SHARING
 /* Registered with Xen-bound event channel for incoming notifications. */
 static void mem_sharing_notification(struct vcpu *v, unsigned int port)
 {
-    if ( likely(v->domain->vm_event->share.ring_page != NULL) )
-        vm_event_resume(v->domain, &v->domain->vm_event->share);
+    struct domain *domain = v->domain;
+
+    if ( likely(domain->vm_event_share->ring_page != NULL) )
+        vm_event_resume(domain, domain->vm_event_share);
 }
 #endif
 
@@ -535,7 +560,7 @@ static void mem_sharing_notification(struct vcpu *v, unsigned int port)
 void vm_event_cleanup(struct domain *d)
 {
 #ifdef CONFIG_HAS_MEM_PAGING
-    if ( d->vm_event->paging.ring_page )
+    if ( d->vm_event_paging && d->vm_event_paging->ring_page )
     {
         /* Destroying the wait queue head means waking up all
          * queued vcpus. This will drain the list, allowing
@@ -544,20 +569,20 @@ void vm_event_cleanup(struct domain *d)
          * Finally, because this code path involves previously
          * pausing the domain (domain_kill), unpausing the
          * vcpus causes no harm. */
-        destroy_waitqueue_head(&d->vm_event->paging.wq);
-        (void)vm_event_disable(d, &d->vm_event->paging);
+        destroy_waitqueue_head(&d->vm_event_paging->wq);
+        (void)vm_event_disable(d, &d->vm_event_paging);
     }
 #endif
-    if ( d->vm_event->monitor.ring_page )
+    if ( d->vm_event_monitor && d->vm_event_monitor->ring_page )
     {
-        destroy_waitqueue_head(&d->vm_event->monitor.wq);
-        (void)vm_event_disable(d, &d->vm_event->monitor);
+        destroy_waitqueue_head(&d->vm_event_monitor->wq);
+        (void)vm_event_disable(d, &d->vm_event_monitor);
     }
 #ifdef CONFIG_HAS_MEM_SHARING
-    if ( d->vm_event->share.ring_page )
+    if ( d->vm_event_share && d->vm_event_share->ring_page )
     {
-        destroy_waitqueue_head(&d->vm_event->share.wq);
-        (void)vm_event_disable(d, &d->vm_event->share);
+        destroy_waitqueue_head(&d->vm_event_share->wq);
+        (void)vm_event_disable(d, &d->vm_event_share);
     }
 #endif
 }
@@ -599,7 +624,6 @@ int vm_event_domctl(struct domain *d, xen_domctl_vm_event_op_t *vec,
 #ifdef CONFIG_HAS_MEM_PAGING
     case XEN_DOMCTL_VM_EVENT_OP_PAGING:
     {
-        struct vm_event_domain *ved = &d->vm_event->paging;
         rc = -EINVAL;
 
         switch( vec->op )
@@ -629,24 +653,28 @@ int vm_event_domctl(struct domain *d, xen_domctl_vm_event_op_t *vec,
                 break;
 
             /* domain_pause() not required here, see XSA-99 */
-            rc = vm_event_enable(d, vec, ved, _VPF_mem_paging,
+            rc = vm_event_enable(d, vec, &d->vm_event_paging, _VPF_mem_paging,
                                  HVM_PARAM_PAGING_RING_PFN,
                                  mem_paging_notification);
         }
         break;
 
         case XEN_VM_EVENT_DISABLE:
-            if ( ved->ring_page )
+            if ( !d->vm_event_paging )
+                break;
+            if ( d->vm_event_paging->ring_page )
             {
                 domain_pause(d);
-                rc = vm_event_disable(d, ved);
+                rc = vm_event_disable(d, &d->vm_event_paging);
                 domain_unpause(d);
             }
             break;
 
         case XEN_VM_EVENT_RESUME:
-            if ( ved->ring_page )
-                vm_event_resume(d, ved);
+            if ( !d->vm_event_paging )
+                break;
+            if ( d->vm_event_paging->ring_page )
+                vm_event_resume(d, d->vm_event_paging);
             else
                 rc = -ENODEV;
             break;
@@ -661,7 +689,6 @@ int vm_event_domctl(struct domain *d, xen_domctl_vm_event_op_t *vec,
 
     case XEN_DOMCTL_VM_EVENT_OP_MONITOR:
     {
-        struct vm_event_domain *ved = &d->vm_event->monitor;
         rc = -EINVAL;
 
         switch( vec->op )
@@ -671,24 +698,28 @@ int vm_event_domctl(struct domain *d, xen_domctl_vm_event_op_t *vec,
             rc = arch_monitor_init_domain(d);
             if ( rc )
                 break;
-            rc = vm_event_enable(d, vec, ved, _VPF_mem_access,
+            rc = vm_event_enable(d, vec, &d->vm_event_monitor, _VPF_mem_access,
                                  HVM_PARAM_MONITOR_RING_PFN,
                                  monitor_notification);
             break;
 
         case XEN_VM_EVENT_DISABLE:
-            if ( ved->ring_page )
+            if ( !d->vm_event_monitor )
+                break;
+            if ( d->vm_event_monitor->ring_page )
             {
                 domain_pause(d);
-                rc = vm_event_disable(d, ved);
+                rc = vm_event_disable(d, &d->vm_event_monitor);
                 arch_monitor_cleanup_domain(d);
                 domain_unpause(d);
             }
             break;
 
         case XEN_VM_EVENT_RESUME:
-            if ( ved->ring_page )
-                vm_event_resume(d, ved);
+            if ( !d->vm_event_monitor )
+                break;
+            if ( d->vm_event_monitor->ring_page )
+                vm_event_resume(d, d->vm_event_monitor);
             else
                 rc = -ENODEV;
             break;
@@ -703,7 +734,6 @@ int vm_event_domctl(struct domain *d, xen_domctl_vm_event_op_t *vec,
 #ifdef CONFIG_HAS_MEM_SHARING
     case XEN_DOMCTL_VM_EVENT_OP_SHARING:
     {
-        struct vm_event_domain *ved = &d->vm_event->share;
         rc = -EINVAL;
 
         switch( vec->op )
@@ -720,23 +750,27 @@ int vm_event_domctl(struct domain *d, xen_domctl_vm_event_op_t *vec,
                 break;
 
             /* domain_pause() not required here, see XSA-99 */
-            rc = vm_event_enable(d, vec, ved, _VPF_mem_sharing,
+            rc = vm_event_enable(d, vec, &d->vm_event_share, _VPF_mem_sharing,
                                  HVM_PARAM_SHARING_RING_PFN,
                                  mem_sharing_notification);
             break;
 
         case XEN_VM_EVENT_DISABLE:
-            if ( ved->ring_page )
+            if ( !d->vm_event_share )
+                break;
+            if ( d->vm_event_share->ring_page )
             {
                 domain_pause(d);
-                rc = vm_event_disable(d, ved);
+                rc = vm_event_disable(d, &d->vm_event_share);
                 domain_unpause(d);
             }
             break;
 
         case XEN_VM_EVENT_RESUME:
-            if ( ved->ring_page )
-                vm_event_resume(d, ved);
+            if ( !d->vm_event_share )
+                break;
+            if ( d->vm_event_share->ring_page )
+                vm_event_resume(d, d->vm_event_share);
             else
                 rc = -ENODEV;
             break;
diff --git a/xen/drivers/passthrough/pci.c b/xen/drivers/passthrough/pci.c
index 27bdb71..f547b48 100644
--- a/xen/drivers/passthrough/pci.c
+++ b/xen/drivers/passthrough/pci.c
@@ -1365,7 +1365,7 @@ static int assign_device(struct domain *d, u16 seg, u8 bus, u8 devfn, u32 flag)
      * enabled for this domain */
     if ( unlikely(!need_iommu(d) &&
             (d->arch.hvm_domain.mem_sharing_enabled ||
-             d->vm_event->paging.ring_page ||
+             (d->vm_event_paging && d->vm_event_paging->ring_page) ||
              p2m_get_hostp2m(d)->global_logdirty)) )
         return -EXDEV;
 
diff --git a/xen/include/xen/sched.h b/xen/include/xen/sched.h
index 6673b27..e48487c 100644
--- a/xen/include/xen/sched.h
+++ b/xen/include/xen/sched.h
@@ -295,16 +295,6 @@ struct vm_event_domain
     unsigned int last_vcpu_wake_up;
 };
 
-struct vm_event_per_domain
-{
-    /* Memory sharing support */
-    struct vm_event_domain share;
-    /* Memory paging support */
-    struct vm_event_domain paging;
-    /* VM event monitor support */
-    struct vm_event_domain monitor;
-};
-
 struct evtchn_port_ops;
 
 enum guest_type {
@@ -464,7 +454,13 @@ struct domain
     struct lock_profile_qhead profile_head;
 
     /* Various vm_events */
-    struct vm_event_per_domain *vm_event;
+
+    /* Memory sharing support */
+    struct vm_event_domain *vm_event_share;
+    /* Memory paging support */
+    struct vm_event_domain *vm_event_paging;
+    /* VM event monitor support */
+    struct vm_event_domain *vm_event_monitor;
 
     /*
      * Can be specified by the user. If that is not the case, it is
-- 
2.7.4


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel

             reply	other threads:[~2017-08-28 10:54 UTC|newest]

Thread overview: 2+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2017-08-28 10:54 Alexandru Isaila [this message]
2017-08-28 15:32 ` [PATCH v4] common/vm_event: Initialize vm_event lists on domain creation Tamas K Lengyel

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1503917647-24030-1-git-send-email-aisaila@bitdefender.com \
    --to=aisaila@bitdefender.com \
    --cc=George.Dunlap@eu.citrix.com \
    --cc=andrew.cooper3@citrix.com \
    --cc=ian.jackson@eu.citrix.com \
    --cc=jbeulich@suse.com \
    --cc=julien.grall@arm.com \
    --cc=rcojocaru@bitdefender.com \
    --cc=sstabellini@kernel.org \
    --cc=tamas@tklengyel.com \
    --cc=tim@xen.org \
    --cc=wei.liu2@citrix.com \
    --cc=xen-devel@lists.xen.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.