All of lore.kernel.org
 help / color / mirror / Atom feed
From: Tamas K Lengyel <tamas.lengyel@zentific.com>
To: xen-devel@lists.xen.org
Cc: kevin.tian@intel.com, wei.liu2@citrix.com,
	ian.campbell@citrix.com, steve@zentific.com,
	stefano.stabellini@eu.citrix.com, jun.nakajima@intel.com,
	tim@xen.org, ian.jackson@eu.citrix.com, eddie.dong@intel.com,
	andres@lagarcavilla.org, jbeulich@suse.com,
	Tamas K Lengyel <tamas.lengyel@zentific.com>,
	rshriram@cs.ubc.ca, keir@xen.org, dgdegra@tycho.nsa.gov,
	yanghy@cn.fujitsu.com
Subject: [PATCH V6 11/13] xen/vm_event: Relocate memop checks
Date: Wed, 18 Feb 2015 01:11:41 +0100	[thread overview]
Message-ID: <1424218303-11331-12-git-send-email-tamas.lengyel@zentific.com> (raw)
In-Reply-To: <1424218303-11331-1-git-send-email-tamas.lengyel@zentific.com>

The memop handler function for paging/sharing responsible for calling XSM
doesn't really have anything to do with vm_event, thus in this patch we
relocate it into mem_paging_memop and mem_sharing_memop. This has already
been the approach in mem_access_memop, so in this patch we just make it
consistent.

Signed-off-by: Tamas K Lengyel <tamas.lengyel@zentific.com>
Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>
---
v6: Don't pass superfluous cmd to the memops.
    Unlock rcu's in sharing/paging
    Style fixes
---
 xen/arch/x86/mm/mem_paging.c      |  44 +++++++++++---
 xen/arch/x86/mm/mem_sharing.c     | 125 +++++++++++++++++++++++++-------------
 xen/arch/x86/x86_64/compat/mm.c   |  26 +-------
 xen/arch/x86/x86_64/mm.c          |  24 +-------
 xen/common/vm_event.c             |  43 -------------
 xen/include/asm-x86/mem_paging.h  |   2 +-
 xen/include/asm-x86/mem_sharing.h |   3 +-
 xen/include/xen/vm_event.h        |   1 -
 8 files changed, 127 insertions(+), 141 deletions(-)

diff --git a/xen/arch/x86/mm/mem_paging.c b/xen/arch/x86/mm/mem_paging.c
index e63d8c1..cbdee99 100644
--- a/xen/arch/x86/mm/mem_paging.c
+++ b/xen/arch/x86/mm/mem_paging.c
@@ -22,34 +22,60 @@
 
 
 #include <asm/p2m.h>
-#include <xen/vm_event.h>
+#include <xen/guest_access.h>
+#include <xsm/xsm.h>
 
-
-int mem_paging_memop(struct domain *d, xen_mem_paging_op_t *mpo)
+int mem_paging_memop(XEN_GUEST_HANDLE_PARAM(xen_mem_paging_op_t) arg)
 {
-    int rc = -ENODEV;
-    if ( unlikely(!d->vm_event->paging.ring_page) )
+    int rc;
+    xen_mem_paging_op_t mpo;
+    struct domain *d;
+    bool_t copyback = 0;
+
+    rc = -EFAULT;
+    if ( copy_from_guest(&mpo, arg, 1) )
         return rc;
 
-    switch( mpo->op )
+    rc = rcu_lock_live_remote_domain_by_id(mpo.domain, &d);
+    if ( rc )
+        goto out;
+
+    rc = xsm_vm_event_op(XSM_DM_PRIV, d, XENMEM_paging_op);
+    if ( rc )
+        goto out;
+
+    rc = -ENODEV;
+    if ( unlikely(!d->vm_event->paging.ring_page) )
+        goto out;
+
+    switch( mpo.op )
     {
     case XENMEM_paging_op_nominate:
-        rc = p2m_mem_paging_nominate(d, mpo->gfn);
+        rc = p2m_mem_paging_nominate(d, mpo.gfn);
         break;
 
     case XENMEM_paging_op_evict:
-        rc = p2m_mem_paging_evict(d, mpo->gfn);
+        rc = p2m_mem_paging_evict(d, mpo.gfn);
         break;
 
     case XENMEM_paging_op_prep:
-        rc = p2m_mem_paging_prep(d, mpo->gfn, mpo->buffer);
+    {
+        rc = p2m_mem_paging_prep(d, mpo.gfn, mpo.buffer);
+        if ( !rc )
+            copyback = 1;
         break;
+    }
 
     default:
         rc = -ENOSYS;
         break;
     }
 
+    if ( copyback && __copy_to_guest(arg, &mpo, 1) )
+        rc = -EFAULT;
+
+out:
+    rcu_unlock_domain(d);
     return rc;
 }
 
diff --git a/xen/arch/x86/mm/mem_sharing.c b/xen/arch/x86/mm/mem_sharing.c
index 4959407..2d49bc4 100644
--- a/xen/arch/x86/mm/mem_sharing.c
+++ b/xen/arch/x86/mm/mem_sharing.c
@@ -28,6 +28,7 @@
 #include <xen/grant_table.h>
 #include <xen/sched.h>
 #include <xen/rcupdate.h>
+#include <xen/guest_access.h>
 #include <xen/vm_event.h>
 #include <asm/page.h>
 #include <asm/string.h>
@@ -1293,39 +1294,66 @@ int relinquish_shared_pages(struct domain *d)
     return rc;
 }
 
-int mem_sharing_memop(struct domain *d, xen_mem_sharing_op_t *mec)
+int mem_sharing_memop(XEN_GUEST_HANDLE_PARAM(xen_mem_sharing_op_t) arg)
 {
-    int rc = 0;
+    int rc;
+    xen_mem_sharing_op_t mso;
+    struct domain *d;
+
+    rc = -EFAULT;
+    if ( copy_from_guest(&mso, arg, 1) )
+        return rc;
+
+    if ( mso.op == XENMEM_sharing_op_audit )
+        return mem_sharing_audit();
+
+    rc = rcu_lock_live_remote_domain_by_id(mso.domain, &d);
+    if ( rc )
+        return rc;
 
     /* Only HAP is supported */
+    rc = -ENODEV;
     if ( !hap_enabled(d) || !d->arch.hvm_domain.mem_sharing_enabled )
-         return -ENODEV;
+         rc = -ENODEV;
 
-    switch(mec->op)
+    rc = xsm_vm_event_op(XSM_DM_PRIV, d, XENMEM_sharing_op);
+    if ( rc )
+        goto out;
+
+    rc = -ENODEV;
+    if ( unlikely(!d->vm_event->share.ring_page) )
+        goto out;
+
+    switch ( mso.op )
     {
         case XENMEM_sharing_op_nominate_gfn:
         {
-            unsigned long gfn = mec->u.nominate.u.gfn;
+            unsigned long gfn = mso.u.nominate.u.gfn;
             shr_handle_t handle;
+
+            rc = -EINVAL;
             if ( !mem_sharing_enabled(d) )
-                return -EINVAL;
+                goto out;
+
             rc = mem_sharing_nominate_page(d, gfn, 0, &handle);
-            mec->u.nominate.handle = handle;
+            mso.u.nominate.handle = handle;
         }
         break;
 
         case XENMEM_sharing_op_nominate_gref:
         {
-            grant_ref_t gref = mec->u.nominate.u.grant_ref;
+            grant_ref_t gref = mso.u.nominate.u.grant_ref;
             unsigned long gfn;
             shr_handle_t handle;
 
+            rc = -EINVAL;
             if ( !mem_sharing_enabled(d) )
-                return -EINVAL;
+                goto out;
             if ( mem_sharing_gref_to_gfn(d, gref, &gfn) < 0 )
-                return -EINVAL;
+                goto out;
+
             rc = mem_sharing_nominate_page(d, gfn, 3, &handle);
-            mec->u.nominate.handle = handle;
+            mso.u.nominate.handle = handle;
         }
         break;
 
@@ -1335,57 +1363,61 @@ int mem_sharing_memop(struct domain *d, xen_mem_sharing_op_t *mec)
             struct domain *cd;
             shr_handle_t sh, ch;
 
+            rc = -EINVAL;
             if ( !mem_sharing_enabled(d) )
-                return -EINVAL;
+                goto out;
 
-            rc = rcu_lock_live_remote_domain_by_id(mec->u.share.client_domain,
+            rc = rcu_lock_live_remote_domain_by_id(mso.u.share.client_domain,
                                                    &cd);
             if ( rc )
-                return rc;
+                goto out;
 
-            rc = xsm_mem_sharing_op(XSM_DM_PRIV, d, cd, mec->op);
+            rc = xsm_mem_sharing_op(XSM_DM_PRIV, d, cd, mso.op);
             if ( rc )
             {
                 rcu_unlock_domain(cd);
-                return rc;
+                goto out;
             }
 
             if ( !mem_sharing_enabled(cd) )
             {
                 rcu_unlock_domain(cd);
-                return -EINVAL;
+                rc = -EINVAL;
+                goto out;
             }
 
-            if ( XENMEM_SHARING_OP_FIELD_IS_GREF(mec->u.share.source_gfn) )
+            if ( XENMEM_SHARING_OP_FIELD_IS_GREF(mso.u.share.source_gfn) )
             {
                 grant_ref_t gref = (grant_ref_t) 
                                     (XENMEM_SHARING_OP_FIELD_GET_GREF(
-                                        mec->u.share.source_gfn));
+                                        mso.u.share.source_gfn));
                 if ( mem_sharing_gref_to_gfn(d, gref, &sgfn) < 0 )
                 {
                     rcu_unlock_domain(cd);
-                    return -EINVAL;
+                    rc = -EINVAL;
+                    goto out;
                 }
             } else {
-                sgfn  = mec->u.share.source_gfn;
+                sgfn  = mso.u.share.source_gfn;
             }
 
-            if ( XENMEM_SHARING_OP_FIELD_IS_GREF(mec->u.share.client_gfn) )
+            if ( XENMEM_SHARING_OP_FIELD_IS_GREF(mso.u.share.client_gfn) )
             {
                 grant_ref_t gref = (grant_ref_t) 
                                     (XENMEM_SHARING_OP_FIELD_GET_GREF(
-                                        mec->u.share.client_gfn));
+                                        mso.u.share.client_gfn));
                 if ( mem_sharing_gref_to_gfn(cd, gref, &cgfn) < 0 )
                 {
                     rcu_unlock_domain(cd);
-                    return -EINVAL;
+                    rc = -EINVAL;
+                    goto out;
                 }
             } else {
-                cgfn  = mec->u.share.client_gfn;
+                cgfn  = mso.u.share.client_gfn;
             }
 
-            sh = mec->u.share.source_handle;
-            ch = mec->u.share.client_handle;
+            sh = mso.u.share.source_handle;
+            ch = mso.u.share.client_handle;
 
             rc = mem_sharing_share_pages(d, sgfn, sh, cd, cgfn, ch); 
 
@@ -1399,37 +1431,40 @@ int mem_sharing_memop(struct domain *d, xen_mem_sharing_op_t *mec)
             struct domain *cd;
             shr_handle_t sh;
 
+            rc = -EINVAL;
             if ( !mem_sharing_enabled(d) )
-                return -EINVAL;
+                goto out;
 
-            rc = rcu_lock_live_remote_domain_by_id(mec->u.share.client_domain,
+            rc = rcu_lock_live_remote_domain_by_id(mso.u.share.client_domain,
                                                    &cd);
             if ( rc )
-                return rc;
+                goto out;
 
-            rc = xsm_mem_sharing_op(XSM_DM_PRIV, d, cd, mec->op);
+            rc = xsm_mem_sharing_op(XSM_DM_PRIV, d, cd, mso.op);
             if ( rc )
             {
                 rcu_unlock_domain(cd);
-                return rc;
+                goto out;
             }
 
             if ( !mem_sharing_enabled(cd) )
             {
                 rcu_unlock_domain(cd);
-                return -EINVAL;
+                rc = -EINVAL;
+                goto out;
             }
 
-            if ( XENMEM_SHARING_OP_FIELD_IS_GREF(mec->u.share.source_gfn) )
+            if ( XENMEM_SHARING_OP_FIELD_IS_GREF(mso.u.share.source_gfn) )
             {
                 /* Cannot add a gref to the physmap */
                 rcu_unlock_domain(cd);
-                return -EINVAL;
+                rc = -EINVAL;
+                goto out;
             }
 
-            sgfn    = mec->u.share.source_gfn;
-            sh      = mec->u.share.source_handle;
-            cgfn    = mec->u.share.client_gfn;
+            sgfn    = mso.u.share.source_gfn;
+            sh      = mso.u.share.source_handle;
+            cgfn    = mso.u.share.client_gfn;
 
             rc = mem_sharing_add_to_physmap(d, sgfn, sh, cd, cgfn); 
 
@@ -1440,7 +1475,10 @@ int mem_sharing_memop(struct domain *d, xen_mem_sharing_op_t *mec)
         case XENMEM_sharing_op_resume:
         {
             if ( !mem_sharing_enabled(d) )
-                return -EINVAL;
+            {
+                rc = -EINVAL;
+                goto out;
+            }
 
             vm_event_resume(d, &d->vm_event->share);
         }
@@ -1448,14 +1486,14 @@ int mem_sharing_memop(struct domain *d, xen_mem_sharing_op_t *mec)
 
         case XENMEM_sharing_op_debug_gfn:
         {
-            unsigned long gfn = mec->u.debug.u.gfn;
+            unsigned long gfn = mso.u.debug.u.gfn;
             rc = mem_sharing_debug_gfn(d, gfn);
         }
         break;
 
         case XENMEM_sharing_op_debug_gref:
         {
-            grant_ref_t gref = mec->u.debug.u.gref;
+            grant_ref_t gref = mso.u.debug.u.gref;
             rc = mem_sharing_debug_gref(d, gref);
         }
         break;
@@ -1465,6 +1503,11 @@ int mem_sharing_memop(struct domain *d, xen_mem_sharing_op_t *mec)
             break;
     }
 
+    if ( !rc && __copy_to_guest(arg, &mso, 1) )
+        rc = -EFAULT;
+
+out:
+    rcu_unlock_domain(d);
     return rc;
 }
 
diff --git a/xen/arch/x86/x86_64/compat/mm.c b/xen/arch/x86/x86_64/compat/mm.c
index 85f138b..db1fe35 100644
--- a/xen/arch/x86/x86_64/compat/mm.c
+++ b/xen/arch/x86/x86_64/compat/mm.c
@@ -1,9 +1,9 @@
 #include <xen/event.h>
-#include <xen/vm_event.h>
 #include <xen/mem_access.h>
 #include <xen/multicall.h>
 #include <compat/memory.h>
 #include <compat/xen.h>
+#include <asm/mem_paging.h>
 #include <asm/mem_sharing.h>
 
 int compat_set_gdt(XEN_GUEST_HANDLE_PARAM(uint) frame_list, unsigned int entries)
@@ -187,30 +187,10 @@ int compat_arch_memory_op(unsigned long cmd, XEN_GUEST_HANDLE_PARAM(void) arg)
         return mem_sharing_get_nr_shared_mfns();
 
     case XENMEM_paging_op:
-    {
-        xen_mem_paging_op_t mpo;
-
-        if ( copy_from_guest(&mpo, arg, 1) )
-            return -EFAULT;
-        rc = do_vm_event_op(cmd, mpo.domain, &mpo);
-        if ( !rc && __copy_to_guest(arg, &mpo, 1) )
-            return -EFAULT;
-        break;
-    }
+        return mem_paging_memop(guest_handle_cast(arg, xen_mem_paging_op_t));
 
     case XENMEM_sharing_op:
-    {
-        xen_mem_sharing_op_t mso;
-
-        if ( copy_from_guest(&mso, arg, 1) )
-            return -EFAULT;
-        if ( mso.op == XENMEM_sharing_op_audit )
-            return mem_sharing_audit(); 
-        rc = do_vm_event_op(cmd, mso.domain, &mso);
-        if ( !rc && __copy_to_guest(arg, &mso, 1) )
-            return -EFAULT;
-        break;
-    }
+        return mem_sharing_memop(guest_handle_cast(arg, xen_mem_sharing_op_t));
 
     default:
         rc = -ENOSYS;
diff --git a/xen/arch/x86/x86_64/mm.c b/xen/arch/x86/x86_64/mm.c
index 1e2bd1a..a99ee80 100644
--- a/xen/arch/x86/x86_64/mm.c
+++ b/xen/arch/x86/x86_64/mm.c
@@ -26,7 +26,6 @@
 #include <xen/nodemask.h>
 #include <xen/guest_access.h>
 #include <xen/hypercall.h>
-#include <xen/vm_event.h>
 #include <xen/mem_access.h>
 #include <asm/current.h>
 #include <asm/asm_defns.h>
@@ -37,6 +36,7 @@
 #include <asm/msr.h>
 #include <asm/setup.h>
 #include <asm/numa.h>
+#include <asm/mem_paging.h>
 #include <asm/mem_sharing.h>
 #include <public/memory.h>
 
@@ -984,28 +984,10 @@ long subarch_memory_op(unsigned long cmd, XEN_GUEST_HANDLE_PARAM(void) arg)
         return mem_sharing_get_nr_shared_mfns();
 
     case XENMEM_paging_op:
-    {
-        xen_mem_paging_op_t mpo;
-        if ( copy_from_guest(&mpo, arg, 1) )
-            return -EFAULT;
-        rc = do_vm_event_op(cmd, mpo.domain, &mpo);
-        if ( !rc && __copy_to_guest(arg, &mpo, 1) )
-            return -EFAULT;
-        break;
-    }
+        return mem_paging_memop(guest_handle_cast(arg, xen_mem_paging_op_t));
 
     case XENMEM_sharing_op:
-    {
-        xen_mem_sharing_op_t mso;
-        if ( copy_from_guest(&mso, arg, 1) )
-            return -EFAULT;
-        if ( mso.op == XENMEM_sharing_op_audit )
-            return mem_sharing_audit(); 
-        rc = do_vm_event_op(cmd, mso.domain, &mso);
-        if ( !rc && __copy_to_guest(arg, &mso, 1) )
-            return -EFAULT;
-        break;
-    }
+        return mem_sharing_memop(guest_handle_cast(arg, xen_mem_sharing_op_t));
 
     default:
         rc = -ENOSYS;
diff --git a/xen/common/vm_event.c b/xen/common/vm_event.c
index 83e6021..4fbb918 100644
--- a/xen/common/vm_event.c
+++ b/xen/common/vm_event.c
@@ -27,15 +27,6 @@
 #include <xen/vm_event.h>
 #include <xen/mem_access.h>
 #include <asm/p2m.h>
-
-#ifdef HAS_MEM_PAGING
-#include <asm/mem_paging.h>
-#endif
-
-#ifdef HAS_MEM_SHARING
-#include <asm/mem_sharing.h>
-#endif
-
 #include <xsm/xsm.h>
 
 /* for public/io/ring.h macros */
@@ -512,40 +503,6 @@ static void mem_sharing_notification(struct vcpu *v, unsigned int port)
 }
 #endif
 
-int do_vm_event_op(int op, uint32_t domain, void *arg)
-{
-    int ret;
-    struct domain *d;
-
-    ret = rcu_lock_live_remote_domain_by_id(domain, &d);
-    if ( ret )
-        return ret;
-
-    ret = xsm_vm_event_op(XSM_DM_PRIV, d, op);
-    if ( ret )
-        goto out;
-
-    switch (op)
-    {
-#ifdef HAS_MEM_PAGING
-        case XENMEM_paging_op:
-            ret = mem_paging_memop(d, arg);
-            break;
-#endif
-#ifdef HAS_MEM_SHARING
-        case XENMEM_sharing_op:
-            ret = mem_sharing_memop(d, arg);
-            break;
-#endif
-        default:
-            ret = -ENOSYS;
-    }
-
- out:
-    rcu_unlock_domain(d);
-    return ret;
-}
-
 /* Clean up on domain destruction */
 void vm_event_cleanup(struct domain *d)
 {
diff --git a/xen/include/asm-x86/mem_paging.h b/xen/include/asm-x86/mem_paging.h
index 5f0f91b..4179eb7 100644
--- a/xen/include/asm-x86/mem_paging.h
+++ b/xen/include/asm-x86/mem_paging.h
@@ -23,7 +23,7 @@
 #ifndef __ASM_X86_MEM_PAGING_H__
 #define __ASM_X86_MEM_PAGING_H__
 
-int mem_paging_memop(struct domain *d, xen_mem_paging_op_t *mpo);
+int mem_paging_memop(XEN_GUEST_HANDLE_PARAM(xen_mem_paging_op_t) arg);
 
 #endif /*__ASM_X86_MEM_PAGING_H__ */
 
diff --git a/xen/include/asm-x86/mem_sharing.h b/xen/include/asm-x86/mem_sharing.h
index da99d46..e4a1024 100644
--- a/xen/include/asm-x86/mem_sharing.h
+++ b/xen/include/asm-x86/mem_sharing.h
@@ -90,8 +90,7 @@ static inline int mem_sharing_unshare_page(struct domain *d,
  */
 int mem_sharing_notify_enomem(struct domain *d, unsigned long gfn,
                                 bool_t allow_sleep);
-int mem_sharing_memop(struct domain *d, 
-                       xen_mem_sharing_op_t *mec);
+int mem_sharing_memop(XEN_GUEST_HANDLE_PARAM(xen_mem_sharing_op_t) arg);
 int mem_sharing_domctl(struct domain *d, 
                        xen_domctl_mem_sharing_op_t *mec);
 int mem_sharing_audit(void);
diff --git a/xen/include/xen/vm_event.h b/xen/include/xen/vm_event.h
index 8daffd7..c4b99c2 100644
--- a/xen/include/xen/vm_event.h
+++ b/xen/include/xen/vm_event.h
@@ -69,7 +69,6 @@ int vm_event_get_response(struct domain *d, struct vm_event_domain *ved,
 
 void vm_event_resume(struct domain *d, struct vm_event_domain *ved);
 
-int do_vm_event_op(int op, uint32_t domain, void *arg);
 int vm_event_domctl(struct domain *d, xen_domctl_vm_event_op_t *vec,
                      XEN_GUEST_HANDLE_PARAM(void) u_domctl);
 
-- 
2.1.4

  parent reply	other threads:[~2015-02-18  0:11 UTC|newest]

Thread overview: 33+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2015-02-18  0:11 [PATCH V6 00/13] xen: Clean-up of mem_event subsystem Tamas K Lengyel
2015-02-18  0:11 ` [PATCH V6 01/13] xen/mem_event: Cleanup of mem_event structures Tamas K Lengyel
2015-03-11 16:45   ` Ian Campbell
2015-03-12 12:13   ` Tim Deegan
2015-03-12 12:32     ` Tamas Lengyel
2015-02-18  0:11 ` [PATCH V6 02/13] xen/mem_event: Cleanup mem_event names in rings, functions and domctls Tamas K Lengyel
2015-02-18  0:11 ` [PATCH V6 03/13] xen/mem_paging: Convert mem_event_op to mem_paging_op and cleanup Tamas K Lengyel
2015-02-18  0:11 ` [PATCH V6 04/13] xen: Rename mem_event to vm_event Tamas K Lengyel
2015-02-18  9:46   ` Jan Beulich
2015-02-18 12:21     ` Tamas K Lengyel
2015-02-18 13:35       ` Jan Beulich
2015-02-18 15:55         ` Tamas K Lengyel
2015-02-18 17:22           ` Tamas K Lengyel
2015-02-18  0:11 ` [PATCH V6 05/13] xen/vm_event: Style fixes Tamas K Lengyel
2015-03-12 12:52   ` Tim Deegan
2015-02-18  0:11 ` [PATCH V6 06/13] tools/tests: Clean-up tools/tests/xen-access Tamas K Lengyel
2015-02-18  0:11 ` [PATCH V6 07/13] x86/hvm: factor out and rename vm_event related functions Tamas K Lengyel
2015-03-12 12:57   ` Tim Deegan
2015-02-18  0:11 ` [PATCH V6 08/13] xen: Introduce monitor_op domctl Tamas K Lengyel
2015-02-18 18:15   ` Tamas Lengyel
2015-03-12 14:01   ` Tim Deegan
2015-02-18  0:11 ` [PATCH V6 09/13] xen/vm_event: Deprecate VM_EVENT_FLAG_DUMMY flag Tamas K Lengyel
2015-02-18  0:11 ` [PATCH V6 10/13] xen/vm_event: Decouple vm_event and mem_access Tamas K Lengyel
2015-02-18  0:11 ` Tamas K Lengyel [this message]
2015-03-12 15:36   ` [PATCH V6 11/13] xen/vm_event: Relocate memop checks Tim Deegan
2015-03-12 15:55     ` Tamas Lengyel
2015-02-18  0:11 ` [PATCH V6 12/13] xen/xsm: Split vm_event_op into three separate labels Tamas K Lengyel
2015-02-18  0:11 ` [PATCH V6 13/13] xen/vm_event: Add RESUME option to vm_event_op domctl Tamas K Lengyel
2015-03-12 15:56   ` Tim Deegan
2015-03-12 16:00     ` Tamas Lengyel
2015-03-11 16:49 ` [PATCH V6 00/13] xen: Clean-up of mem_event subsystem Ian Campbell
2015-03-11 17:16   ` Tamas K Lengyel
2015-03-12 15:58 ` Tim Deegan

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1424218303-11331-12-git-send-email-tamas.lengyel@zentific.com \
    --to=tamas.lengyel@zentific.com \
    --cc=andres@lagarcavilla.org \
    --cc=dgdegra@tycho.nsa.gov \
    --cc=eddie.dong@intel.com \
    --cc=ian.campbell@citrix.com \
    --cc=ian.jackson@eu.citrix.com \
    --cc=jbeulich@suse.com \
    --cc=jun.nakajima@intel.com \
    --cc=keir@xen.org \
    --cc=kevin.tian@intel.com \
    --cc=rshriram@cs.ubc.ca \
    --cc=stefano.stabellini@eu.citrix.com \
    --cc=steve@zentific.com \
    --cc=tim@xen.org \
    --cc=wei.liu2@citrix.com \
    --cc=xen-devel@lists.xen.org \
    --cc=yanghy@cn.fujitsu.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.