All of lore.kernel.org
 help / color / mirror / Atom feed
From: Petre Pircalabu <ppircalabu@bitdefender.com>
To: xen-devel@lists.xenproject.org
Cc: Petre Pircalabu <ppircalabu@bitdefender.com>,
	Alexandru Isaila <aisaila@bitdefender.com>,
	Tamas K Lengyel <tamas@tklengyel.com>,
	Razvan Cojocaru <rcojocaru@bitdefender.com>
Subject: [Xen-devel] [PATCH v2 06/10] vm_event: Decouple implementation details from interface.
Date: Tue, 16 Jul 2019 20:06:20 +0300	[thread overview]
Message-ID: <880b61f88b9d19b3ef2bd43713caaab0528a190e.1563293545.git.ppircalabu@bitdefender.com> (raw)
In-Reply-To: <cover.1563293545.git.ppircalabu@bitdefender.com>
In-Reply-To: <cover.1563293545.git.ppircalabu@bitdefender.com>

To accommodate a second implementation of the vm_event subsystem, the
current one (ring) should be decoupled from the xen/vm_event.h interface.

Signed-off-by: Petre Pircalabu <ppircalabu@bitdefender.com>
---
 xen/common/vm_event.c      | 368 ++++++++++++++++++++++-----------------------
 xen/include/xen/vm_event.h |  60 +++++++-
 2 files changed, 236 insertions(+), 192 deletions(-)

diff --git a/xen/common/vm_event.c b/xen/common/vm_event.c
index 21895c2..e6a7a29 100644
--- a/xen/common/vm_event.c
+++ b/xen/common/vm_event.c
@@ -35,12 +35,13 @@
 #define xen_rmb()  smp_rmb()
 #define xen_wmb()  smp_wmb()
 
-/* VM event */
-struct vm_event_domain
+#define to_ring(_ved) container_of((_ved), struct vm_event_ring_domain, ved)
+
+/* VM event ring implementation */
+struct vm_event_ring_domain
 {
-    /* Domain reference */
-    struct domain *d;
-    spinlock_t lock;
+    /* VM event domain */
+    struct vm_event_domain ved;
     /* The ring has 64 entries */
     unsigned char foreign_producers;
     unsigned char target_producers;
@@ -61,7 +62,9 @@ struct vm_event_domain
     unsigned int last_vcpu_wake_up;
 };
 
-static int vm_event_enable(
+static const struct vm_event_ops vm_event_ring_ops;
+
+static int vm_event_ring_enable(
     struct domain *d,
     struct xen_domctl_vm_event_op *vec,
     struct vm_event_domain **p_ved,
@@ -71,7 +74,7 @@ static int vm_event_enable(
 {
     int rc;
     unsigned long ring_gfn = d->arch.hvm.params[param];
-    struct vm_event_domain *ved;
+    struct vm_event_ring_domain *impl;
 
     /*
      * Only one connected agent at a time.  If the helper crashed, the ring is
@@ -84,28 +87,28 @@ static int vm_event_enable(
     if ( ring_gfn == 0 )
         return -EOPNOTSUPP;
 
-    ved = xzalloc(struct vm_event_domain);
-    if ( !ved )
+    impl = xzalloc(struct vm_event_ring_domain);
+    if ( !impl )
         return -ENOMEM;
 
     /* Trivial setup. */
-    spin_lock_init(&ved->lock);
-    init_waitqueue_head(&ved->wq);
-    ved->pause_flag = pause_flag;
+    spin_lock_init(&impl->ved.lock);
+    init_waitqueue_head(&impl->wq);
+    impl->ved.d = d;
+    impl->ved.ops = &vm_event_ring_ops;
+    impl->pause_flag = pause_flag;
 
     rc = vm_event_init_domain(d);
     if ( rc < 0 )
         goto err;
 
-    ved->d = d;
-
-    rc = prepare_ring_for_helper(d, ring_gfn, &ved->ring_pg_struct,
-                                 &ved->ring_page);
+    rc = prepare_ring_for_helper(d, ring_gfn, &impl->ring_pg_struct,
+                                 &impl->ring_page);
     if ( rc < 0 )
         goto err;
 
-    FRONT_RING_INIT(&ved->front_ring,
-                    (vm_event_sring_t *)ved->ring_page,
+    FRONT_RING_INIT(&impl->front_ring,
+                    (vm_event_sring_t *)impl->ring_page,
                     PAGE_SIZE);
 
     rc = alloc_unbound_xen_event_channel(d, 0, current->domain->domain_id,
@@ -113,26 +116,26 @@ static int vm_event_enable(
     if ( rc < 0 )
         goto err;
 
-    ved->xen_port = vec->u.enable.port = rc;
+    impl->xen_port = vec->u.enable.port = rc;
 
     /* Success.  Fill in the domain's appropriate ved. */
-    *p_ved = ved;
+    *p_ved = &impl->ved;
 
     return 0;
 
  err:
-    destroy_ring_for_helper(&ved->ring_page, ved->ring_pg_struct);
-    xfree(ved);
+    destroy_ring_for_helper(&impl->ring_page, impl->ring_pg_struct);
+    xfree(impl);
 
     return rc;
 }
 
-static unsigned int vm_event_ring_available(struct vm_event_domain *ved)
+static unsigned int vm_event_ring_available(struct vm_event_ring_domain *impl)
 {
-    int avail_req = RING_FREE_REQUESTS(&ved->front_ring);
+    int avail_req = RING_FREE_REQUESTS(&impl->front_ring);
 
-    avail_req -= ved->target_producers;
-    avail_req -= ved->foreign_producers;
+    avail_req -= impl->target_producers;
+    avail_req -= impl->foreign_producers;
 
     BUG_ON(avail_req < 0);
 
@@ -140,38 +143,38 @@ static unsigned int vm_event_ring_available(struct vm_event_domain *ved)
 }
 
 /*
- * vm_event_wake_blocked() will wakeup vcpus waiting for room in the
+ * vm_event_ring_wake_blocked() will wakeup vcpus waiting for room in the
  * ring. These vCPUs were paused on their way out after placing an event,
  * but need to be resumed where the ring is capable of processing at least
  * one event from them.
  */
-static void vm_event_wake_blocked(struct vm_event_domain *ved)
+static void vm_event_ring_wake_blocked(struct vm_event_ring_domain *impl)
 {
     struct vcpu *v;
-    unsigned int i, j, k, avail_req = vm_event_ring_available(ved);
-    struct domain *d = ved->d;
+    unsigned int i, j, k, avail_req = vm_event_ring_available(impl);
+    struct domain *d = impl->ved.d;
 
-    if ( avail_req == 0 || ved->blocked == 0 )
+    if ( avail_req == 0 || impl->blocked == 0 )
         return;
 
     /* We remember which vcpu last woke up to avoid scanning always linearly
      * from zero and starving higher-numbered vcpus under high load */
-    for ( i = ved->last_vcpu_wake_up + 1, j = 0; j < d->max_vcpus; i++, j++ )
+    for ( i = impl->last_vcpu_wake_up + 1, j = 0; j < d->max_vcpus; i++, j++ )
     {
         k = i % d->max_vcpus;
         v = d->vcpu[k];
         if ( !v )
             continue;
 
-        if ( !ved->blocked || avail_req == 0 )
+        if ( !impl->blocked || avail_req == 0 )
             break;
 
-        if ( test_and_clear_bit(ved->pause_flag, &v->pause_flags) )
+        if ( test_and_clear_bit(impl->pause_flag, &v->pause_flags) )
         {
             vcpu_unpause(v);
             avail_req--;
-            ved->blocked--;
-            ved->last_vcpu_wake_up = k;
+            impl->blocked--;
+            impl->last_vcpu_wake_up = k;
         }
     }
 }
@@ -181,93 +184,90 @@ static void vm_event_wake_blocked(struct vm_event_domain *ved)
  * was unable to do so, it is queued on a wait queue.  These are woken as
  * needed, and take precedence over the blocked vCPUs.
  */
-static void vm_event_wake_queued(struct vm_event_domain *ved)
+static void vm_event_ring_wake_queued(struct vm_event_ring_domain *impl)
 {
-    unsigned int avail_req = vm_event_ring_available(ved);
+    unsigned int avail_req = vm_event_ring_available(impl);
 
     if ( avail_req > 0 )
-        wake_up_nr(&ved->wq, avail_req);
+        wake_up_nr(&impl->wq, avail_req);
 }
 
 /*
- * vm_event_wake() will wakeup all vcpus waiting for the ring to
+ * vm_event_ring_wake() will wakeup all vcpus waiting for the ring to
  * become available.  If we have queued vCPUs, they get top priority. We
  * are guaranteed that they will go through code paths that will eventually
- * call vm_event_wake() again, ensuring that any blocked vCPUs will get
+ * call vm_event_ring_wake() again, ensuring that any blocked vCPUs will get
  * unpaused once all the queued vCPUs have made it through.
  */
-void vm_event_wake(struct vm_event_domain *ved)
+static void vm_event_ring_wake(struct vm_event_ring_domain *impl)
 {
-    if ( !list_empty(&ved->wq.list) )
-        vm_event_wake_queued(ved);
+    if ( !list_empty(&impl->wq.list) )
+        vm_event_ring_wake_queued(impl);
     else
-        vm_event_wake_blocked(ved);
+        vm_event_ring_wake_blocked(impl);
 }
 
-static int vm_event_disable(struct domain *d, struct vm_event_domain **p_ved)
+static int vm_event_ring_disable(struct vm_event_domain **p_ved)
 {
-    struct vm_event_domain *ved = *p_ved;
-
-    if ( vm_event_check(ved) )
-    {
-        struct vcpu *v;
+    struct vcpu *v;
+    struct domain *d = (*p_ved)->d;
+    struct vm_event_ring_domain *impl = to_ring(*p_ved);
 
-        spin_lock(&ved->lock);
+    spin_lock(&impl->ved.lock);
 
-        if ( !list_empty(&ved->wq.list) )
-        {
-            spin_unlock(&ved->lock);
-            return -EBUSY;
-        }
+    if ( !list_empty(&impl->wq.list) )
+    {
+        spin_unlock(&impl->ved.lock);
+        return -EBUSY;
+    }
 
-        /* Free domU's event channel and leave the other one unbound */
-        free_xen_event_channel(d, ved->xen_port);
+    /* Free domU's event channel and leave the other one unbound */
+    free_xen_event_channel(d, impl->xen_port);
 
-        /* Unblock all vCPUs */
-        for_each_vcpu ( d, v )
+    /* Unblock all vCPUs */
+    for_each_vcpu ( d, v )
+    {
+        if ( test_and_clear_bit(impl->pause_flag, &v->pause_flags) )
         {
-            if ( test_and_clear_bit(ved->pause_flag, &v->pause_flags) )
-            {
-                vcpu_unpause(v);
-                ved->blocked--;
-            }
+            vcpu_unpause(v);
+            impl->blocked--;
         }
+    }
 
-        destroy_ring_for_helper(&ved->ring_page, ved->ring_pg_struct);
+    destroy_ring_for_helper(&impl->ring_page, impl->ring_pg_struct);
 
-        vm_event_cleanup_domain(d);
+    vm_event_cleanup_domain(d);
 
-        spin_unlock(&ved->lock);
-    }
+    spin_unlock(&impl->ved.lock);
 
-    xfree(ved);
+    xfree(impl);
     *p_ved = NULL;
-
     return 0;
 }
 
-static void vm_event_release_slot(struct vm_event_domain *ved)
+static void vm_event_ring_release_slot(struct vm_event_ring_domain *impl)
 {
     /* Update the accounting */
-    if ( current->domain == ved->d )
-        ved->target_producers--;
+    if ( current->domain == impl->ved.d )
+        impl->target_producers--;
     else
-        ved->foreign_producers--;
+        impl->foreign_producers--;
 
     /* Kick any waiters */
-    vm_event_wake(ved);
+    vm_event_ring_wake(impl);
 }
 
 /*
- * vm_event_mark_and_pause() tags vcpu and put it to sleep.
- * The vcpu will resume execution in vm_event_wake_blocked().
+ * vm_event_ring_mark_and_pause() tags vcpu and put it to sleep.
+ * The vcpu will resume execution in vm_event_ring_wake_blocked().
  */
-static void vm_event_mark_and_pause(struct vcpu *v, struct vm_event_domain *ved)
+static void vm_event_ring_mark_and_pause(struct vcpu *v,
+                                         struct vm_event_ring_domain *impl)
 {
-    if ( !test_and_set_bit(ved->pause_flag, &v->pause_flags) )
+    if ( !test_and_set_bit(impl->pause_flag, &v->pause_flags) )
     {
         vcpu_pause_nosync(v);
-        ved->blocked++;
+        impl->blocked++;
     }
 }
 
@@ -277,34 +277,31 @@ static void vm_event_mark_and_pause(struct vcpu *v, struct vm_event_domain *ved)
  * overly full and its continued execution would cause stalling and excessive
  * waiting.  The vCPU will be automatically unpaused when the ring clears.
  */
-void vm_event_put_request(struct vm_event_domain *ved,
-                          vm_event_request_t *req)
+static void vm_event_ring_put_request(struct vm_event_domain *ved,
+                                      vm_event_request_t *req)
 {
     vm_event_front_ring_t *front_ring;
     int free_req;
     unsigned int avail_req;
     RING_IDX req_prod;
     struct vcpu *curr = current;
-    struct domain *d = ved->d;
-
-    if( !vm_event_check(ved) )
-        return;
+    struct vm_event_ring_domain *impl = to_ring(ved);
 
-    if ( curr->domain != d )
+    if ( curr->domain != ved->d )
     {
         req->flags |= VM_EVENT_FLAG_FOREIGN;
 
         if ( !(req->flags & VM_EVENT_FLAG_VCPU_PAUSED) )
             gdprintk(XENLOG_WARNING, "d%dv%d was not paused.\n",
-                     d->domain_id, req->vcpu_id);
+                     ved->d->domain_id, req->vcpu_id);
     }
 
     req->version = VM_EVENT_INTERFACE_VERSION;
 
-    spin_lock(&ved->lock);
+    spin_lock(&impl->ved.lock);
 
     /* Due to the reservations, this step must succeed. */
-    front_ring = &ved->front_ring;
+    front_ring = &impl->front_ring;
     free_req = RING_FREE_REQUESTS(front_ring);
     ASSERT(free_req > 0);
 
@@ -318,31 +315,31 @@ void vm_event_put_request(struct vm_event_domain *ved,
     RING_PUSH_REQUESTS(front_ring);
 
     /* We've actually *used* our reservation, so release the slot. */
-    vm_event_release_slot(ved);
+    vm_event_ring_release_slot(impl);
 
     /* Give this vCPU a black eye if necessary, on the way out.
      * See the comments above wake_blocked() for more information
      * on how this mechanism works to avoid waiting. */
-    avail_req = vm_event_ring_available(ved);
-    if( curr->domain == d && avail_req < d->max_vcpus &&
+    avail_req = vm_event_ring_available(impl);
+    if( curr->domain == ved->d && avail_req < ved->d->max_vcpus &&
         !atomic_read(&curr->vm_event_pause_count) )
-        vm_event_mark_and_pause(curr, ved);
+        vm_event_ring_mark_and_pause(curr, impl);
 
-    spin_unlock(&ved->lock);
+    spin_unlock(&impl->ved.lock);
 
-    notify_via_xen_event_channel(d, ved->xen_port);
+    notify_via_xen_event_channel(ved->d, impl->xen_port);
 }
 
-static int vm_event_get_response(struct domain *d, struct vm_event_domain *ved,
-                                 vm_event_response_t *rsp)
+static int vm_event_ring_get_response(struct vm_event_ring_domain *impl,
+                                      vm_event_response_t *rsp)
 {
     vm_event_front_ring_t *front_ring;
     RING_IDX rsp_cons;
     int rc = 0;
 
-    spin_lock(&ved->lock);
+    spin_lock(&impl->ved.lock);
 
-    front_ring = &ved->front_ring;
+    front_ring = &impl->front_ring;
     rsp_cons = front_ring->rsp_cons;
 
     if ( !RING_HAS_UNCONSUMED_RESPONSES(front_ring) )
@@ -358,12 +355,12 @@ static int vm_event_get_response(struct domain *d, struct vm_event_domain *ved,
 
     /* Kick any waiters -- since we've just consumed an event,
      * there may be additional space available in the ring. */
-    vm_event_wake(ved);
+    vm_event_ring_wake(impl);
 
     rc = 1;
 
  out:
-    spin_unlock(&ved->lock);
+    spin_unlock(&impl->ved.lock);
 
     return rc;
 }
@@ -376,10 +373,13 @@ static int vm_event_get_response(struct domain *d, struct vm_event_domain *ved,
  * Note: responses are handled the same way regardless of which ring they
  * arrive on.
  */
-static int vm_event_resume(struct domain *d, struct vm_event_domain *ved)
+static int vm_event_ring_resume(struct vm_event_ring_domain *impl)
 {
     vm_event_response_t rsp;
 
+    if ( unlikely(!impl || !vm_event_check(&impl->ved)) )
+         return -ENODEV;
+
     /*
      * vm_event_resume() runs in either XEN_VM_EVENT_* domctls, or
      * EVTCHN_send context from the introspection consumer. Both contexts
@@ -388,13 +388,10 @@ static int vm_event_resume(struct domain *d, struct vm_event_domain *ved)
      * below, this covers the case where we would need to iterate over all
      * of them more succintly.
      */
-    ASSERT(d != current->domain);
-
-    if ( unlikely(!vm_event_check(ved)) )
-         return -ENODEV;
+    ASSERT(impl->ved.d != current->domain);
 
     /* Pull all responses off the ring. */
-    while ( vm_event_get_response(d, ved, &rsp) )
+    while ( vm_event_ring_get_response(impl, &rsp) )
     {
         struct vcpu *v;
 
@@ -405,7 +402,7 @@ static int vm_event_resume(struct domain *d, struct vm_event_domain *ved)
         }
 
         /* Validate the vcpu_id in the response. */
-        v = domain_vcpu(d, rsp.vcpu_id);
+        v = domain_vcpu(impl->ved.d, rsp.vcpu_id);
         if ( !v )
             continue;
 
@@ -419,7 +416,7 @@ static int vm_event_resume(struct domain *d, struct vm_event_domain *ved)
         {
 #ifdef CONFIG_HAS_MEM_PAGING
             if ( rsp.reason == VM_EVENT_REASON_MEM_PAGING )
-                p2m_mem_paging_resume(d, &rsp);
+                p2m_mem_paging_resume(impl->ved.d, &rsp);
 #endif
 
             /*
@@ -439,7 +436,7 @@ static int vm_event_resume(struct domain *d, struct vm_event_domain *ved)
              * Check in arch-specific handler to avoid bitmask overhead when
              * not supported.
              */
-            vm_event_toggle_singlestep(d, v, &rsp);
+            vm_event_toggle_singlestep(impl->ved.d, v, &rsp);
 
             /* Check for altp2m switch */
             if ( rsp.flags & VM_EVENT_FLAG_ALTERNATE_P2M )
@@ -459,72 +456,69 @@ static int vm_event_resume(struct domain *d, struct vm_event_domain *ved)
     return 0;
 }
 
-void vm_event_cancel_slot(struct vm_event_domain *ved)
+static void vm_event_ring_cancel_slot(struct vm_event_domain *ved)
 {
-    if( !vm_event_check(ved) )
-        return;
-
     spin_lock(&ved->lock);
-    vm_event_release_slot(ved);
+    vm_event_ring_release_slot(to_ring(ved));
     spin_unlock(&ved->lock);
 }
 
-static int vm_event_grab_slot(struct vm_event_domain *ved, int foreign)
+static int vm_event_ring_grab_slot(struct vm_event_ring_domain *impl, int foreign)
 {
     unsigned int avail_req;
     int rc;
 
-    if ( !ved->ring_page )
+    if ( !impl->ring_page )
         return -EOPNOTSUPP;
 
-    spin_lock(&ved->lock);
+    spin_lock(&impl->ved.lock);
 
-    avail_req = vm_event_ring_available(ved);
+    avail_req = vm_event_ring_available(impl);
 
     rc = -EBUSY;
     if ( avail_req == 0 )
         goto out;
 
     if ( !foreign )
-        ved->target_producers++;
+        impl->target_producers++;
     else
-        ved->foreign_producers++;
+        impl->foreign_producers++;
 
     rc = 0;
 
  out:
-    spin_unlock(&ved->lock);
+    spin_unlock(&impl->ved.lock);
 
     return rc;
 }
 
 /* Simple try_grab wrapper for use in the wait_event() macro. */
-static int vm_event_wait_try_grab(struct vm_event_domain *ved, int *rc)
+static int vm_event_ring_wait_try_grab(struct vm_event_ring_domain *impl, int *rc)
 {
-    *rc = vm_event_grab_slot(ved, 0);
+    *rc = vm_event_ring_grab_slot(impl, 0);
 
     return *rc;
 }
 
-/* Call vm_event_grab_slot() until the ring doesn't exist, or is available. */
-static int vm_event_wait_slot(struct vm_event_domain *ved)
+/* Call vm_event_ring_grab_slot() until the ring doesn't exist, or is available. */
+static int vm_event_ring_wait_slot(struct vm_event_ring_domain *impl)
 {
     int rc = -EBUSY;
 
-    wait_event(ved->wq, vm_event_wait_try_grab(ved, &rc) != -EBUSY);
+    wait_event(impl->wq, vm_event_ring_wait_try_grab(impl, &rc) != -EBUSY);
 
     return rc;
 }
 
-bool vm_event_check(struct vm_event_domain *ved)
+static bool vm_event_ring_check(struct vm_event_domain *ved)
 {
-    return ved && ved->ring_page;
+    return to_ring(ved)->ring_page != NULL;
 }
 
 /*
  * Determines whether or not the current vCPU belongs to the target domain,
  * and calls the appropriate wait function.  If it is a guest vCPU, then we
- * use vm_event_wait_slot() to reserve a slot.  As long as there is a ring,
+ * use vm_event_ring_wait_slot() to reserve a slot.  As long as there is a ring,
  * this function will always return 0 for a guest.  For a non-guest, we check
  * for space and return -EBUSY if the ring is not available.
  *
@@ -533,36 +527,33 @@ bool vm_event_check(struct vm_event_domain *ved)
  *               0: a spot has been reserved
  *
  */
-int __vm_event_claim_slot(struct vm_event_domain *ved, bool allow_sleep)
+static int vm_event_ring_claim_slot(struct vm_event_domain *ved, bool allow_sleep)
 {
-    if ( !vm_event_check(ved) )
-        return -EOPNOTSUPP;
-
     if ( (current->domain == ved->d) && allow_sleep )
-        return vm_event_wait_slot(ved);
+        return vm_event_ring_wait_slot(to_ring(ved));
     else
-        return vm_event_grab_slot(ved, current->domain != ved->d);
+        return vm_event_ring_grab_slot(to_ring(ved), current->domain != ved->d);
 }
 
 #ifdef CONFIG_HAS_MEM_PAGING
 /* Registered with Xen-bound event channel for incoming notifications. */
 static void mem_paging_notification(struct vcpu *v, unsigned int port)
 {
-    vm_event_resume(v->domain, v->domain->vm_event_paging);
+    vm_event_ring_resume(to_ring(v->domain->vm_event_paging));
 }
 #endif
 
 /* Registered with Xen-bound event channel for incoming notifications. */
 static void monitor_notification(struct vcpu *v, unsigned int port)
 {
-    vm_event_resume(v->domain, v->domain->vm_event_monitor);
+    vm_event_ring_resume(to_ring(v->domain->vm_event_monitor));
 }
 
 #ifdef CONFIG_HAS_MEM_SHARING
 /* Registered with Xen-bound event channel for incoming notifications. */
 static void mem_sharing_notification(struct vcpu *v, unsigned int port)
 {
-    vm_event_resume(v->domain, v->domain->vm_event_share);
+    vm_event_ring_resume(to_ring(v->domain->vm_event_share));
 }
 #endif
 
@@ -571,32 +562,32 @@ void vm_event_cleanup(struct domain *d)
 {
 #ifdef CONFIG_HAS_MEM_PAGING
     if ( vm_event_check(d->vm_event_paging) )
-    {
-        /* Destroying the wait queue head means waking up all
-         * queued vcpus. This will drain the list, allowing
-         * the disable routine to complete. It will also drop
-         * all domain refs the wait-queued vcpus are holding.
-         * Finally, because this code path involves previously
-         * pausing the domain (domain_kill), unpausing the
-         * vcpus causes no harm. */
-        destroy_waitqueue_head(&d->vm_event_paging->wq);
-        (void)vm_event_disable(d, &d->vm_event_paging);
-    }
+        d->vm_event_paging->ops->cleanup(&d->vm_event_paging);
 #endif
+
     if ( vm_event_check(d->vm_event_monitor) )
-    {
-        destroy_waitqueue_head(&d->vm_event_monitor->wq);
-        (void)vm_event_disable(d, &d->vm_event_monitor);
-    }
+        d->vm_event_monitor->ops->cleanup(&d->vm_event_monitor);
+
 #ifdef CONFIG_HAS_MEM_SHARING
     if ( vm_event_check(d->vm_event_share) )
-    {
-        destroy_waitqueue_head(&d->vm_event_share->wq);
-        (void)vm_event_disable(d, &d->vm_event_share);
-    }
+        d->vm_event_share->ops->cleanup(&d->vm_event_share);
 #endif
 }
 
+static void vm_event_ring_cleanup(struct vm_event_domain **_ved)
+{
+    struct vm_event_ring_domain *impl = to_ring(*_ved);
+    /* Destroying the wait queue head means waking up all
+     * queued vcpus. This will drain the list, allowing
+     * the disable routine to complete. It will also drop
+     * all domain refs the wait-queued vcpus are holding.
+     * Finally, because this code path involves previously
+     * pausing the domain (domain_kill), unpausing the
+     * vcpus causes no harm. */
+    destroy_waitqueue_head(&impl->wq);
+    (void)vm_event_ring_disable(_ved);
+}
+
 int vm_event_domctl(struct domain *d, struct xen_domctl_vm_event_op *vec)
 {
     int rc;
@@ -666,23 +657,22 @@ int vm_event_domctl(struct domain *d, struct xen_domctl_vm_event_op *vec)
                 break;
 
             /* domain_pause() not required here, see XSA-99 */
-            rc = vm_event_enable(d, vec, &d->vm_event_paging, _VPF_mem_paging,
+            rc = vm_event_ring_enable(d, vec, &d->vm_event_paging, _VPF_mem_paging,
                                  HVM_PARAM_PAGING_RING_PFN,
                                  mem_paging_notification);
         }
         break;
 
         case XEN_VM_EVENT_DISABLE:
-            if ( vm_event_check(d->vm_event_paging) )
-            {
-                domain_pause(d);
-                rc = vm_event_disable(d, &d->vm_event_paging);
-                domain_unpause(d);
-            }
+            if ( !vm_event_check(d->vm_event_paging) )
+                break;
+            domain_pause(d);
+            rc = vm_event_ring_disable(&d->vm_event_paging);
+            domain_unpause(d);
             break;
 
         case XEN_VM_EVENT_RESUME:
-            rc = vm_event_resume(d, d->vm_event_paging);
+            rc = vm_event_ring_resume(to_ring(d->vm_event_paging));
             break;
 
         default:
@@ -704,23 +694,22 @@ int vm_event_domctl(struct domain *d, struct xen_domctl_vm_event_op *vec)
             rc = arch_monitor_init_domain(d);
             if ( rc )
                 break;
-            rc = vm_event_enable(d, vec, &d->vm_event_monitor, _VPF_mem_access,
+            rc = vm_event_ring_enable(d, vec, &d->vm_event_monitor, _VPF_mem_access,
                                  HVM_PARAM_MONITOR_RING_PFN,
                                  monitor_notification);
             break;
 
         case XEN_VM_EVENT_DISABLE:
-            if ( vm_event_check(d->vm_event_monitor) )
-            {
-                domain_pause(d);
-                rc = vm_event_disable(d, &d->vm_event_monitor);
-                arch_monitor_cleanup_domain(d);
-                domain_unpause(d);
-            }
+            if ( !vm_event_check(d->vm_event_monitor) )
+                break;
+            domain_pause(d);
+            rc = vm_event_ring_disable(&d->vm_event_monitor);
+            arch_monitor_cleanup_domain(d);
+            domain_unpause(d);
             break;
 
         case XEN_VM_EVENT_RESUME:
-            rc = vm_event_resume(d, d->vm_event_monitor);
+            rc = vm_event_ring_resume(to_ring(d->vm_event_monitor));
             break;
 
         default:
@@ -749,22 +738,21 @@ int vm_event_domctl(struct domain *d, struct xen_domctl_vm_event_op *vec)
                 break;
 
             /* domain_pause() not required here, see XSA-99 */
-            rc = vm_event_enable(d, vec, &d->vm_event_share, _VPF_mem_sharing,
+            rc = vm_event_ring_enable(d, vec, &d->vm_event_share, _VPF_mem_sharing,
                                  HVM_PARAM_SHARING_RING_PFN,
                                  mem_sharing_notification);
             break;
 
         case XEN_VM_EVENT_DISABLE:
-            if ( vm_event_check(d->vm_event_share) )
-            {
-                domain_pause(d);
-                rc = vm_event_disable(d, &d->vm_event_share);
-                domain_unpause(d);
-            }
+            if ( !vm_event_check(d->vm_event_share) )
+                break;
+            domain_pause(d);
+            rc = vm_event_ring_disable(&d->vm_event_share);
+            domain_unpause(d);
             break;
 
         case XEN_VM_EVENT_RESUME:
-            rc = vm_event_resume(d, d->vm_event_share);
+            rc = vm_event_ring_resume(to_ring(d->vm_event_share));
             break;
 
         default:
@@ -816,6 +804,14 @@ void vm_event_vcpu_unpause(struct vcpu *v)
     vcpu_unpause(v);
 }
 
+static const struct vm_event_ops vm_event_ring_ops = {
+    .check = vm_event_ring_check,
+    .cleanup = vm_event_ring_cleanup,
+    .claim_slot = vm_event_ring_claim_slot,
+    .cancel_slot = vm_event_ring_cancel_slot,
+    .put_request = vm_event_ring_put_request
+};
+
 /*
  * Local variables:
  * mode: C
diff --git a/xen/include/xen/vm_event.h b/xen/include/xen/vm_event.h
index ff30999..21a3f50 100644
--- a/xen/include/xen/vm_event.h
+++ b/xen/include/xen/vm_event.h
@@ -23,14 +23,43 @@
 #ifndef __VM_EVENT_H__
 #define __VM_EVENT_H__
 
-#include <xen/sched.h>
+#include <xen/errno.h>
+#include <xen/spinlock.h>
+#include <xen/types.h>
 #include <public/vm_event.h>
 
+struct domain;
+struct vm_event_domain;
+
+struct vm_event_ops
+{
+    bool (*check)(struct vm_event_domain *ved);
+    void (*cleanup)(struct vm_event_domain **_ved);
+    int (*claim_slot)(struct vm_event_domain *ved, bool allow_sleep);
+    void (*cancel_slot)(struct vm_event_domain *ved);
+    void (*put_request)(struct vm_event_domain *ved, vm_event_request_t *req);
+};
+
+struct vm_event_domain
+{
+    /* Domain reference */
+    struct domain *d;
+
+    /* vm_event_ops */
+    const struct vm_event_ops *ops;
+
+    /* vm_event domain lock */
+    spinlock_t lock;
+};
+
 /* Clean up on domain destruction */
 void vm_event_cleanup(struct domain *d);
 
 /* Returns whether the VM event domain has been set up */
-bool vm_event_check(struct vm_event_domain *ved);
+static inline bool vm_event_check(struct vm_event_domain *ved)
+{
+    return (ved) && ved->ops->check(ved);
+}
 
 /* Returns 0 on success, -ENOSYS if there is no ring, -EBUSY if there is no
  * available space and the caller is a foreign domain. If the guest itself
@@ -45,7 +74,14 @@ bool vm_event_check(struct vm_event_domain *ved);
  * cancel_slot(), both of which are guaranteed to
  * succeed.
  */
-int __vm_event_claim_slot(struct vm_event_domain *ved, bool allow_sleep);
+static inline int __vm_event_claim_slot(struct vm_event_domain *ved, bool allow_sleep)
+{
+    if ( !vm_event_check(ved) )
+        return -EOPNOTSUPP;
+
+    return ved->ops->claim_slot(ved, allow_sleep);
+}
+
 static inline int vm_event_claim_slot(struct vm_event_domain *ved)
 {
     return __vm_event_claim_slot(ved, true);
@@ -56,10 +92,22 @@ static inline int vm_event_claim_slot_nosleep(struct vm_event_domain *ved)
     return __vm_event_claim_slot(ved, false);
 }
 
-void vm_event_cancel_slot(struct vm_event_domain *ved);
+static inline void vm_event_cancel_slot(struct vm_event_domain *ved)
+{
+    if ( !vm_event_check(ved) )
+        return;
 
-void vm_event_put_request(struct vm_event_domain *ved,
-                          vm_event_request_t *req);
+    ved->ops->cancel_slot(ved);
+}
+
+static inline void vm_event_put_request(struct vm_event_domain *ved,
+                                        vm_event_request_t *req)
+{
+    if ( !vm_event_check(ved) )
+        return;
+
+    ved->ops->put_request(ved, req);
+}
 
 int vm_event_domctl(struct domain *d, struct xen_domctl_vm_event_op *vec);
 
-- 
2.7.4


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

  parent reply	other threads:[~2019-07-16 17:07 UTC|newest]

Thread overview: 55+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2019-07-16 17:06 [Xen-devel] [PATCH v2 00/10] Per vcpu vm_event channels Petre Pircalabu
2019-07-16 17:06 ` [Xen-devel] [PATCH v2 01/10] vm_event: Define VM_EVENT type Petre Pircalabu
2019-07-16 20:59   ` Tamas K Lengyel
2019-07-17  7:59     ` Petre Ovidiu PIRCALABU
2019-07-17  8:49   ` Alexandru Stefan ISAILA
2019-07-17  9:57     ` Petre Ovidiu PIRCALABU
2019-07-16 17:06 ` [Xen-devel] [PATCH v2 02/10] vm_event: Remove "ring" suffix from vm_event_check_ring Petre Pircalabu
2019-07-17  9:11   ` Alexandru Stefan ISAILA
2019-07-16 17:06 ` [Xen-devel] [PATCH v2 03/10] vm_event: Add 'struct domain' backpointer to vm_event_domain Petre Pircalabu
2019-07-16 21:02   ` Tamas K Lengyel
2019-07-17  9:28   ` Jan Beulich
2019-07-16 17:06 ` [Xen-devel] [PATCH v2 04/10] vm_event: Simplify vm_event interface Petre Pircalabu
2019-07-16 21:04   ` Tamas K Lengyel
2019-07-17 11:13   ` Alexandru Stefan ISAILA
2019-07-16 17:06 ` [Xen-devel] [PATCH v2 05/10] vm_event: Move struct vm_event_domain to vm_event.c Petre Pircalabu
2019-07-17  9:31   ` Jan Beulich
2019-07-17 12:26     ` Petre Ovidiu PIRCALABU
2019-07-16 17:06 ` Petre Pircalabu [this message]
2019-07-16 17:06 ` [Xen-devel] [PATCH v2 07/10] vm_event: Add vm_event_ng interface Petre Pircalabu
2019-07-16 21:13   ` Tamas K Lengyel
2019-07-17 12:13     ` Petre Ovidiu PIRCALABU
2019-07-17 10:06   ` Jan Beulich
2019-07-17 12:38     ` Tamas K Lengyel
2019-07-17 13:12       ` Jan Beulich
2019-07-17 14:41     ` Petre Ovidiu PIRCALABU
2019-07-17 16:32       ` Jan Beulich
2019-07-17 18:42         ` Paul Durrant
2019-07-18 13:59         ` Petre Ovidiu PIRCALABU
2019-07-18 14:44           ` Jan Beulich
2019-07-18 14:55             ` Petre Ovidiu PIRCALABU
2019-07-31 12:53             ` Petre Ovidiu PIRCALABU
2019-07-31 13:09               ` Jan Beulich
2019-07-19  7:56           ` Paul Durrant
2019-07-19  8:22             ` Jan Beulich
2019-07-19  8:26               ` Paul Durrant
2019-07-19 11:23                 ` Petre Ovidiu PIRCALABU
2019-07-19 12:11                   ` Paul Durrant
2019-07-19 12:32                     ` Jan Beulich
2019-07-19 12:37                       ` Paul Durrant
2019-07-19 12:59                         ` Jan Beulich
2019-07-19 17:40                           ` Petre Ovidiu PIRCALABU
2019-07-22  7:58                             ` Paul Durrant
2019-07-22  7:59                             ` Jan Beulich
2019-07-22 10:44                               ` Petre Ovidiu PIRCALABU
2019-07-17 13:42   ` Alexandru Stefan ISAILA
2019-07-17 14:46     ` Petre Ovidiu PIRCALABU
2019-07-16 17:06 ` [Xen-devel] [PATCH v2 08/10] xen-access: Use getopt_long for cmdline parsing Petre Pircalabu
2019-07-16 21:09   ` Tamas K Lengyel
2019-07-17 11:16   ` Alexandru Stefan ISAILA
2019-07-16 17:06 ` [Xen-devel] [PATCH v2 09/10] xen-access: Code cleanup Petre Pircalabu
2019-07-16 21:07   ` Tamas K Lengyel
2019-07-17 11:18   ` Alexandru Stefan ISAILA
2019-07-16 17:06 ` [Xen-devel] [PATCH v2 10/10] xen-access: Add support for vm_event_ng interface Petre Pircalabu
2019-07-16 20:45 ` [Xen-devel] [PATCH v2 00/10] Per vcpu vm_event channels Tamas K Lengyel
2019-07-17  9:14   ` Petre Ovidiu PIRCALABU

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=880b61f88b9d19b3ef2bd43713caaab0528a190e.1563293545.git.ppircalabu@bitdefender.com \
    --to=ppircalabu@bitdefender.com \
    --cc=aisaila@bitdefender.com \
    --cc=rcojocaru@bitdefender.com \
    --cc=tamas@tklengyel.com \
    --cc=xen-devel@lists.xenproject.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.