All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH 0/2] XENMEM_add_to_physmap_batch
@ 2013-12-20 13:04 Jan Beulich
  2013-12-20 13:07 ` [PATCH 1/2] rename XENMEM_add_to_physmap_{range => batch} (v2) Jan Beulich
                   ` (2 more replies)
  0 siblings, 3 replies; 7+ messages in thread
From: Jan Beulich @ 2013-12-20 13:04 UTC (permalink / raw)
  To: xen-devel; +Cc: Keir Fraser, Ian Campbell

1: rename XENMEM_add_to_physmap_{range => batch} (v2)
2: compat wrapper for XENMEM_add_to_physmap_batch

Signed-off-by: Jan Beulich <jbeulich@suse.com>

^ permalink raw reply	[flat|nested] 7+ messages in thread

* [PATCH 1/2] rename XENMEM_add_to_physmap_{range => batch} (v2)
  2013-12-20 13:04 [PATCH 0/2] XENMEM_add_to_physmap_batch Jan Beulich
@ 2013-12-20 13:07 ` Jan Beulich
  2014-01-07 12:24   ` Ian Campbell
  2013-12-20 13:08 ` [PATCH 2/2] compat wrapper for XENMEM_add_to_physmap_batch Jan Beulich
  2014-01-07 15:52 ` [PATCH 0/2] XENMEM_add_to_physmap_batch Keir Fraser
  2 siblings, 1 reply; 7+ messages in thread
From: Jan Beulich @ 2013-12-20 13:07 UTC (permalink / raw)
  To: xen-devel; +Cc: Keir Fraser, Ian Campbell

[-- Attachment #1: Type: text/plain, Size: 7510 bytes --]

The use of "range" here wasn't really correct - there are no ranges
involved. As the comment in the public header already correctly said,
all this is about is batching of XENMEM_add_to_physmap calls (with
the addition of having a way to specify a foreign domain for
XENMAPSPACE_gmfn_foreign).

Suggested-by: Ian Campbell <Ian.Campbell@citrix.com>
Signed-off-by: Jan Beulich <jbeulich@suse.com>
Acked-by: Ian Campbell <ian.campbell@citrix.com>
---
v2: fix the compatibility DEFINE_XEN_GUEST_HANDLE()

--- a/xen/common/memory.c
+++ b/xen/common/memory.c
@@ -595,54 +595,54 @@ static int xenmem_add_to_physmap(struct 
     return rc;
 }
 
-static int xenmem_add_to_physmap_range(struct domain *d,
-                                       struct xen_add_to_physmap_range *xatpr,
+static int xenmem_add_to_physmap_batch(struct domain *d,
+                                       struct xen_add_to_physmap_batch *xatpb,
                                        unsigned int start)
 {
     unsigned int done = 0;
     int rc;
 
-    if ( xatpr->size < start )
+    if ( xatpb->size < start )
         return -EILSEQ;
 
-    guest_handle_add_offset(xatpr->idxs, start);
-    guest_handle_add_offset(xatpr->gpfns, start);
-    guest_handle_add_offset(xatpr->errs, start);
-    xatpr->size -= start;
+    guest_handle_add_offset(xatpb->idxs, start);
+    guest_handle_add_offset(xatpb->gpfns, start);
+    guest_handle_add_offset(xatpb->errs, start);
+    xatpb->size -= start;
 
-    while ( xatpr->size > done )
+    while ( xatpb->size > done )
     {
         xen_ulong_t idx;
         xen_pfn_t gpfn;
 
-        if ( unlikely(__copy_from_guest_offset(&idx, xatpr->idxs, 0, 1)) )
+        if ( unlikely(__copy_from_guest_offset(&idx, xatpb->idxs, 0, 1)) )
         {
             rc = -EFAULT;
             goto out;
         }
 
-        if ( unlikely(__copy_from_guest_offset(&gpfn, xatpr->gpfns, 0, 1)) )
+        if ( unlikely(__copy_from_guest_offset(&gpfn, xatpb->gpfns, 0, 1)) )
         {
             rc = -EFAULT;
             goto out;
         }
 
-        rc = xenmem_add_to_physmap_one(d, xatpr->space,
-                                       xatpr->foreign_domid,
+        rc = xenmem_add_to_physmap_one(d, xatpb->space,
+                                       xatpb->foreign_domid,
                                        idx, gpfn);
 
-        if ( unlikely(__copy_to_guest_offset(xatpr->errs, 0, &rc, 1)) )
+        if ( unlikely(__copy_to_guest_offset(xatpb->errs, 0, &rc, 1)) )
         {
             rc = -EFAULT;
             goto out;
         }
 
-        guest_handle_add_offset(xatpr->idxs, 1);
-        guest_handle_add_offset(xatpr->gpfns, 1);
-        guest_handle_add_offset(xatpr->errs, 1);
+        guest_handle_add_offset(xatpb->idxs, 1);
+        guest_handle_add_offset(xatpb->gpfns, 1);
+        guest_handle_add_offset(xatpb->errs, 1);
 
         /* Check for continuation if it's not the last iteration. */
-        if ( xatpr->size > ++done && hypercall_preempt_check() )
+        if ( xatpb->size > ++done && hypercall_preempt_check() )
         {
             rc = start + done;
             goto out;
@@ -797,7 +797,7 @@ long do_memory_op(unsigned long cmd, XEN
         if ( copy_from_guest(&xatp, arg, 1) )
             return -EFAULT;
 
-        /* Foreign mapping is only possible via add_to_physmap_range. */
+        /* Foreign mapping is only possible via add_to_physmap_batch. */
         if ( xatp.space == XENMAPSPACE_gmfn_foreign )
             return -ENOSYS;
 
@@ -824,29 +824,29 @@ long do_memory_op(unsigned long cmd, XEN
         return rc;
     }
 
-    case XENMEM_add_to_physmap_range:
+    case XENMEM_add_to_physmap_batch:
     {
-        struct xen_add_to_physmap_range xatpr;
+        struct xen_add_to_physmap_batch xatpb;
         struct domain *d;
 
-        BUILD_BUG_ON((typeof(xatpr.size))-1 >
+        BUILD_BUG_ON((typeof(xatpb.size))-1 >
                      (UINT_MAX >> MEMOP_EXTENT_SHIFT));
 
         /* Check for malicious or buggy input. */
-        if ( start_extent != (typeof(xatpr.size))start_extent )
+        if ( start_extent != (typeof(xatpb.size))start_extent )
             return -EDOM;
 
-        if ( copy_from_guest(&xatpr, arg, 1) ||
-             !guest_handle_okay(xatpr.idxs, xatpr.size) ||
-             !guest_handle_okay(xatpr.gpfns, xatpr.size) ||
-             !guest_handle_okay(xatpr.errs, xatpr.size) )
+        if ( copy_from_guest(&xatpb, arg, 1) ||
+             !guest_handle_okay(xatpb.idxs, xatpb.size) ||
+             !guest_handle_okay(xatpb.gpfns, xatpb.size) ||
+             !guest_handle_okay(xatpb.errs, xatpb.size) )
             return -EFAULT;
 
         /* This mapspace is unsupported for this hypercall. */
-        if ( xatpr.space == XENMAPSPACE_gmfn_range )
+        if ( xatpb.space == XENMAPSPACE_gmfn_range )
             return -EOPNOTSUPP;
 
-        d = rcu_lock_domain_by_any_id(xatpr.domid);
+        d = rcu_lock_domain_by_any_id(xatpb.domid);
         if ( d == NULL )
             return -ESRCH;
 
@@ -857,7 +857,7 @@ long do_memory_op(unsigned long cmd, XEN
             return rc;
         }
 
-        rc = xenmem_add_to_physmap_range(d, &xatpr, start_extent);
+        rc = xenmem_add_to_physmap_batch(d, &xatpb, start_extent);
 
         rcu_unlock_domain(d);
 
--- a/xen/include/public/arch-arm.h
+++ b/xen/include/public/arch-arm.h
@@ -79,7 +79,7 @@
  *
  *   In addition the following arch specific sub-ops:
  *    * XENMEM_add_to_physmap
- *    * XENMEM_add_to_physmap_range
+ *    * XENMEM_add_to_physmap_batch
  *
  *  HYPERVISOR_domctl
  *   All generic sub-operations, with the exception of:
--- a/xen/include/public/memory.h
+++ b/xen/include/public/memory.h
@@ -207,8 +207,7 @@ DEFINE_XEN_GUEST_HANDLE(xen_machphys_map
 #define XENMAPSPACE_gmfn         2 /* GMFN */
 #define XENMAPSPACE_gmfn_range   3 /* GMFN range, XENMEM_add_to_physmap only. */
 #define XENMAPSPACE_gmfn_foreign 4 /* GMFN from another dom,
-                                    * XENMEM_add_to_physmap_range only.
-                                    */
+                                    * XENMEM_add_to_physmap_batch only. */
 /* ` } */
 
 /*
@@ -238,8 +237,8 @@ typedef struct xen_add_to_physmap xen_ad
 DEFINE_XEN_GUEST_HANDLE(xen_add_to_physmap_t);
 
 /* A batched version of add_to_physmap. */
-#define XENMEM_add_to_physmap_range 23
-struct xen_add_to_physmap_range {
+#define XENMEM_add_to_physmap_batch 23
+struct xen_add_to_physmap_batch {
     /* IN */
     /* Which domain to change the mapping for. */
     domid_t domid;
@@ -260,8 +259,15 @@ struct xen_add_to_physmap_range {
     /* Per index error code. */
     XEN_GUEST_HANDLE(int) errs;
 };
-typedef struct xen_add_to_physmap_range xen_add_to_physmap_range_t;
-DEFINE_XEN_GUEST_HANDLE(xen_add_to_physmap_range_t);
+typedef struct xen_add_to_physmap_batch xen_add_to_physmap_batch_t;
+DEFINE_XEN_GUEST_HANDLE(xen_add_to_physmap_batch_t);
+
+#if __XEN_INTERFACE_VERSION__ < 0x00040400
+#define XENMEM_add_to_physmap_range XENMEM_add_to_physmap_batch
+#define xen_add_to_physmap_range xen_add_to_physmap_batch
+typedef struct xen_add_to_physmap_batch xen_add_to_physmap_range_t;
+DEFINE_XEN_GUEST_HANDLE(xen_add_to_physmap_range_t);
+#endif
 
 /*
  * Unmaps the page appearing at a particular GPFN from the specified guest's



[-- Attachment #2: xatpr-naming.patch --]
[-- Type: text/plain, Size: 7555 bytes --]

rename XENMEM_add_to_physmap_{range => batch}

The use of "range" here wasn't really correct - there are no ranges
involved. As the comment in the public header already correctly said,
all this is about is batching of XENMEM_add_to_physmap calls (with
the addition of having a way to specify a foreign domain for
XENMAPSPACE_gmfn_foreign).

Suggested-by: Ian Campbell <Ian.Campbell@citrix.com>
Signed-off-by: Jan Beulich <jbeulich@suse.com>
Acked-by: Ian Campbell <ian.campbell@citrix.com>
---
v2: fix the compatibility DEFINE_XEN_GUEST_HANDLE()

--- a/xen/common/memory.c
+++ b/xen/common/memory.c
@@ -595,54 +595,54 @@ static int xenmem_add_to_physmap(struct 
     return rc;
 }
 
-static int xenmem_add_to_physmap_range(struct domain *d,
-                                       struct xen_add_to_physmap_range *xatpr,
+static int xenmem_add_to_physmap_batch(struct domain *d,
+                                       struct xen_add_to_physmap_batch *xatpb,
                                        unsigned int start)
 {
     unsigned int done = 0;
     int rc;
 
-    if ( xatpr->size < start )
+    if ( xatpb->size < start )
         return -EILSEQ;
 
-    guest_handle_add_offset(xatpr->idxs, start);
-    guest_handle_add_offset(xatpr->gpfns, start);
-    guest_handle_add_offset(xatpr->errs, start);
-    xatpr->size -= start;
+    guest_handle_add_offset(xatpb->idxs, start);
+    guest_handle_add_offset(xatpb->gpfns, start);
+    guest_handle_add_offset(xatpb->errs, start);
+    xatpb->size -= start;
 
-    while ( xatpr->size > done )
+    while ( xatpb->size > done )
     {
         xen_ulong_t idx;
         xen_pfn_t gpfn;
 
-        if ( unlikely(__copy_from_guest_offset(&idx, xatpr->idxs, 0, 1)) )
+        if ( unlikely(__copy_from_guest_offset(&idx, xatpb->idxs, 0, 1)) )
         {
             rc = -EFAULT;
             goto out;
         }
 
-        if ( unlikely(__copy_from_guest_offset(&gpfn, xatpr->gpfns, 0, 1)) )
+        if ( unlikely(__copy_from_guest_offset(&gpfn, xatpb->gpfns, 0, 1)) )
         {
             rc = -EFAULT;
             goto out;
         }
 
-        rc = xenmem_add_to_physmap_one(d, xatpr->space,
-                                       xatpr->foreign_domid,
+        rc = xenmem_add_to_physmap_one(d, xatpb->space,
+                                       xatpb->foreign_domid,
                                        idx, gpfn);
 
-        if ( unlikely(__copy_to_guest_offset(xatpr->errs, 0, &rc, 1)) )
+        if ( unlikely(__copy_to_guest_offset(xatpb->errs, 0, &rc, 1)) )
         {
             rc = -EFAULT;
             goto out;
         }
 
-        guest_handle_add_offset(xatpr->idxs, 1);
-        guest_handle_add_offset(xatpr->gpfns, 1);
-        guest_handle_add_offset(xatpr->errs, 1);
+        guest_handle_add_offset(xatpb->idxs, 1);
+        guest_handle_add_offset(xatpb->gpfns, 1);
+        guest_handle_add_offset(xatpb->errs, 1);
 
         /* Check for continuation if it's not the last iteration. */
-        if ( xatpr->size > ++done && hypercall_preempt_check() )
+        if ( xatpb->size > ++done && hypercall_preempt_check() )
         {
             rc = start + done;
             goto out;
@@ -797,7 +797,7 @@ long do_memory_op(unsigned long cmd, XEN
         if ( copy_from_guest(&xatp, arg, 1) )
             return -EFAULT;
 
-        /* Foreign mapping is only possible via add_to_physmap_range. */
+        /* Foreign mapping is only possible via add_to_physmap_batch. */
         if ( xatp.space == XENMAPSPACE_gmfn_foreign )
             return -ENOSYS;
 
@@ -824,29 +824,29 @@ long do_memory_op(unsigned long cmd, XEN
         return rc;
     }
 
-    case XENMEM_add_to_physmap_range:
+    case XENMEM_add_to_physmap_batch:
     {
-        struct xen_add_to_physmap_range xatpr;
+        struct xen_add_to_physmap_batch xatpb;
         struct domain *d;
 
-        BUILD_BUG_ON((typeof(xatpr.size))-1 >
+        BUILD_BUG_ON((typeof(xatpb.size))-1 >
                      (UINT_MAX >> MEMOP_EXTENT_SHIFT));
 
         /* Check for malicious or buggy input. */
-        if ( start_extent != (typeof(xatpr.size))start_extent )
+        if ( start_extent != (typeof(xatpb.size))start_extent )
             return -EDOM;
 
-        if ( copy_from_guest(&xatpr, arg, 1) ||
-             !guest_handle_okay(xatpr.idxs, xatpr.size) ||
-             !guest_handle_okay(xatpr.gpfns, xatpr.size) ||
-             !guest_handle_okay(xatpr.errs, xatpr.size) )
+        if ( copy_from_guest(&xatpb, arg, 1) ||
+             !guest_handle_okay(xatpb.idxs, xatpb.size) ||
+             !guest_handle_okay(xatpb.gpfns, xatpb.size) ||
+             !guest_handle_okay(xatpb.errs, xatpb.size) )
             return -EFAULT;
 
         /* This mapspace is unsupported for this hypercall. */
-        if ( xatpr.space == XENMAPSPACE_gmfn_range )
+        if ( xatpb.space == XENMAPSPACE_gmfn_range )
             return -EOPNOTSUPP;
 
-        d = rcu_lock_domain_by_any_id(xatpr.domid);
+        d = rcu_lock_domain_by_any_id(xatpb.domid);
         if ( d == NULL )
             return -ESRCH;
 
@@ -857,7 +857,7 @@ long do_memory_op(unsigned long cmd, XEN
             return rc;
         }
 
-        rc = xenmem_add_to_physmap_range(d, &xatpr, start_extent);
+        rc = xenmem_add_to_physmap_batch(d, &xatpb, start_extent);
 
         rcu_unlock_domain(d);
 
--- a/xen/include/public/arch-arm.h
+++ b/xen/include/public/arch-arm.h
@@ -79,7 +79,7 @@
  *
  *   In addition the following arch specific sub-ops:
  *    * XENMEM_add_to_physmap
- *    * XENMEM_add_to_physmap_range
+ *    * XENMEM_add_to_physmap_batch
  *
  *  HYPERVISOR_domctl
  *   All generic sub-operations, with the exception of:
--- a/xen/include/public/memory.h
+++ b/xen/include/public/memory.h
@@ -207,8 +207,7 @@ DEFINE_XEN_GUEST_HANDLE(xen_machphys_map
 #define XENMAPSPACE_gmfn         2 /* GMFN */
 #define XENMAPSPACE_gmfn_range   3 /* GMFN range, XENMEM_add_to_physmap only. */
 #define XENMAPSPACE_gmfn_foreign 4 /* GMFN from another dom,
-                                    * XENMEM_add_to_physmap_range only.
-                                    */
+                                    * XENMEM_add_to_physmap_batch only. */
 /* ` } */
 
 /*
@@ -238,8 +237,8 @@ typedef struct xen_add_to_physmap xen_ad
 DEFINE_XEN_GUEST_HANDLE(xen_add_to_physmap_t);
 
 /* A batched version of add_to_physmap. */
-#define XENMEM_add_to_physmap_range 23
-struct xen_add_to_physmap_range {
+#define XENMEM_add_to_physmap_batch 23
+struct xen_add_to_physmap_batch {
     /* IN */
     /* Which domain to change the mapping for. */
     domid_t domid;
@@ -260,8 +259,15 @@ struct xen_add_to_physmap_range {
     /* Per index error code. */
     XEN_GUEST_HANDLE(int) errs;
 };
-typedef struct xen_add_to_physmap_range xen_add_to_physmap_range_t;
-DEFINE_XEN_GUEST_HANDLE(xen_add_to_physmap_range_t);
+typedef struct xen_add_to_physmap_batch xen_add_to_physmap_batch_t;
+DEFINE_XEN_GUEST_HANDLE(xen_add_to_physmap_batch_t);
+
+#if __XEN_INTERFACE_VERSION__ < 0x00040400
+#define XENMEM_add_to_physmap_range XENMEM_add_to_physmap_batch
+#define xen_add_to_physmap_range xen_add_to_physmap_batch
+typedef struct xen_add_to_physmap_batch xen_add_to_physmap_range_t;
+DEFINE_XEN_GUEST_HANDLE(xen_add_to_physmap_range_t);
+#endif
 
 /*
  * Unmaps the page appearing at a particular GPFN from the specified guest's

[-- Attachment #3: Type: text/plain, Size: 126 bytes --]

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
http://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 7+ messages in thread

* [PATCH 2/2] compat wrapper for XENMEM_add_to_physmap_batch
  2013-12-20 13:04 [PATCH 0/2] XENMEM_add_to_physmap_batch Jan Beulich
  2013-12-20 13:07 ` [PATCH 1/2] rename XENMEM_add_to_physmap_{range => batch} (v2) Jan Beulich
@ 2013-12-20 13:08 ` Jan Beulich
  2014-01-07 15:52 ` [PATCH 0/2] XENMEM_add_to_physmap_batch Keir Fraser
  2 siblings, 0 replies; 7+ messages in thread
From: Jan Beulich @ 2013-12-20 13:08 UTC (permalink / raw)
  To: xen-devel; +Cc: Keir Fraser, Ian Campbell

[-- Attachment #1: Type: text/plain, Size: 3647 bytes --]

Signed-off-by: Jan Beulich <jbeulich@suse.com>

--- a/xen/common/compat/memory.c
+++ b/xen/common/compat/memory.c
@@ -27,12 +27,14 @@ int compat_memory_op(unsigned int cmd, X
             struct xen_memory_reservation *rsrv;
             struct xen_memory_exchange *xchg;
             struct xen_add_to_physmap *atp;
+            struct xen_add_to_physmap_batch *atpb;
             struct xen_remove_from_physmap *xrfp;
         } nat;
         union {
             struct compat_memory_reservation rsrv;
             struct compat_memory_exchange xchg;
             struct compat_add_to_physmap atp;
+            struct compat_add_to_physmap_batch atpb;
         } cmp;
 
         set_xen_guest_handle(nat.hnd, COMPAT_ARG_XLAT_VIRT_BASE);
@@ -200,6 +202,60 @@ int compat_memory_op(unsigned int cmd, X
 
             break;
 
+        case XENMEM_add_to_physmap_batch:
+        {
+            unsigned int limit = (COMPAT_ARG_XLAT_SIZE - sizeof(*nat.atpb))
+                                 / (sizeof(nat.atpb->idxs.p) + sizeof(nat.atpb->gpfns.p));
+            xen_ulong_t *idxs = (void *)(nat.atpb + 1);
+            xen_pfn_t *gpfns = (void *)(idxs + limit);
+
+            if ( copy_from_guest(&cmp.atpb, compat, 1) ||
+                 !compat_handle_okay(cmp.atpb.idxs, cmp.atpb.size) ||
+                 !compat_handle_okay(cmp.atpb.gpfns, cmp.atpb.size) ||
+                 !compat_handle_okay(cmp.atpb.errs, cmp.atpb.size) )
+                return -EFAULT;
+
+            end_extent = start_extent + limit;
+            if ( end_extent > cmp.atpb.size )
+                end_extent = cmp.atpb.size;
+
+            idxs -= start_extent;
+            gpfns -= start_extent;
+
+            for ( i = start_extent; i < end_extent; ++i )
+            {
+                compat_ulong_t idx;
+                compat_pfn_t gpfn;
+
+                if ( __copy_from_compat_offset(&idx, cmp.atpb.idxs, i, 1) ||
+                     __copy_from_compat_offset(&gpfn, cmp.atpb.gpfns, i, 1) )
+                    return -EFAULT;
+                idxs[i] = idx;
+                gpfns[i] = gpfn;
+            }
+
+#define XLAT_add_to_physmap_batch_HNDL_idxs(_d_, _s_) \
+            set_xen_guest_handle((_d_)->idxs, idxs)
+#define XLAT_add_to_physmap_batch_HNDL_gpfns(_d_, _s_) \
+            set_xen_guest_handle((_d_)->gpfns, gpfns)
+#define XLAT_add_to_physmap_batch_HNDL_errs(_d_, _s_) \
+            guest_from_compat_handle((_d_)->errs, (_s_)->errs)
+
+            XLAT_add_to_physmap_batch(nat.atpb, &cmp.atpb);
+
+#undef XLAT_add_to_physmap_batch_HNDL_errs
+#undef XLAT_add_to_physmap_batch_HNDL_gpfns
+#undef XLAT_add_to_physmap_batch_HNDL_idxs
+
+            if ( end_extent < cmp.atpb.size )
+            {
+                nat.atpb->size = end_extent;
+                ++split;
+            }
+
+            break;
+        }
+
         case XENMEM_remove_from_physmap:
         {
             struct compat_remove_from_physmap cmp;
@@ -321,6 +377,10 @@ int compat_memory_op(unsigned int cmd, X
             break;
         }
 
+        case XENMEM_add_to_physmap_batch:
+            start_extent = end_extent;
+            break;
+
         case XENMEM_maximum_ram_page:
         case XENMEM_current_reservation:
         case XENMEM_maximum_reservation:
--- a/xen/include/xlat.lst
+++ b/xen/include/xlat.lst
@@ -55,6 +55,7 @@
 !	kexec_image			kexec.h
 !	kexec_range			kexec.h
 !	add_to_physmap			memory.h
+!	add_to_physmap_batch		memory.h
 !	foreign_memory_map		memory.h
 !	memory_exchange			memory.h
 !	memory_map			memory.h




[-- Attachment #2: xatpb-compat.patch --]
[-- Type: text/plain, Size: 3691 bytes --]

compat wrapper for XENMEM_add_to_physmap_batch

Signed-off-by: Jan Beulich <jbeulich@suse.com>

--- a/xen/common/compat/memory.c
+++ b/xen/common/compat/memory.c
@@ -27,12 +27,14 @@ int compat_memory_op(unsigned int cmd, X
             struct xen_memory_reservation *rsrv;
             struct xen_memory_exchange *xchg;
             struct xen_add_to_physmap *atp;
+            struct xen_add_to_physmap_batch *atpb;
             struct xen_remove_from_physmap *xrfp;
         } nat;
         union {
             struct compat_memory_reservation rsrv;
             struct compat_memory_exchange xchg;
             struct compat_add_to_physmap atp;
+            struct compat_add_to_physmap_batch atpb;
         } cmp;
 
         set_xen_guest_handle(nat.hnd, COMPAT_ARG_XLAT_VIRT_BASE);
@@ -200,6 +202,60 @@ int compat_memory_op(unsigned int cmd, X
 
             break;
 
+        case XENMEM_add_to_physmap_batch:
+        {
+            unsigned int limit = (COMPAT_ARG_XLAT_SIZE - sizeof(*nat.atpb))
+                                 / (sizeof(nat.atpb->idxs.p) + sizeof(nat.atpb->gpfns.p));
+            xen_ulong_t *idxs = (void *)(nat.atpb + 1);
+            xen_pfn_t *gpfns = (void *)(idxs + limit);
+
+            if ( copy_from_guest(&cmp.atpb, compat, 1) ||
+                 !compat_handle_okay(cmp.atpb.idxs, cmp.atpb.size) ||
+                 !compat_handle_okay(cmp.atpb.gpfns, cmp.atpb.size) ||
+                 !compat_handle_okay(cmp.atpb.errs, cmp.atpb.size) )
+                return -EFAULT;
+
+            end_extent = start_extent + limit;
+            if ( end_extent > cmp.atpb.size )
+                end_extent = cmp.atpb.size;
+
+            idxs -= start_extent;
+            gpfns -= start_extent;
+
+            for ( i = start_extent; i < end_extent; ++i )
+            {
+                compat_ulong_t idx;
+                compat_pfn_t gpfn;
+
+                if ( __copy_from_compat_offset(&idx, cmp.atpb.idxs, i, 1) ||
+                     __copy_from_compat_offset(&gpfn, cmp.atpb.gpfns, i, 1) )
+                    return -EFAULT;
+                idxs[i] = idx;
+                gpfns[i] = gpfn;
+            }
+
+#define XLAT_add_to_physmap_batch_HNDL_idxs(_d_, _s_) \
+            set_xen_guest_handle((_d_)->idxs, idxs)
+#define XLAT_add_to_physmap_batch_HNDL_gpfns(_d_, _s_) \
+            set_xen_guest_handle((_d_)->gpfns, gpfns)
+#define XLAT_add_to_physmap_batch_HNDL_errs(_d_, _s_) \
+            guest_from_compat_handle((_d_)->errs, (_s_)->errs)
+
+            XLAT_add_to_physmap_batch(nat.atpb, &cmp.atpb);
+
+#undef XLAT_add_to_physmap_batch_HNDL_errs
+#undef XLAT_add_to_physmap_batch_HNDL_gpfns
+#undef XLAT_add_to_physmap_batch_HNDL_idxs
+
+            if ( end_extent < cmp.atpb.size )
+            {
+                nat.atpb->size = end_extent;
+                ++split;
+            }
+
+            break;
+        }
+
         case XENMEM_remove_from_physmap:
         {
             struct compat_remove_from_physmap cmp;
@@ -321,6 +377,10 @@ int compat_memory_op(unsigned int cmd, X
             break;
         }
 
+        case XENMEM_add_to_physmap_batch:
+            start_extent = end_extent;
+            break;
+
         case XENMEM_maximum_ram_page:
         case XENMEM_current_reservation:
         case XENMEM_maximum_reservation:
--- a/xen/include/xlat.lst
+++ b/xen/include/xlat.lst
@@ -55,6 +55,7 @@
 !	kexec_image			kexec.h
 !	kexec_range			kexec.h
 !	add_to_physmap			memory.h
+!	add_to_physmap_batch		memory.h
 !	foreign_memory_map		memory.h
 !	memory_exchange			memory.h
 !	memory_map			memory.h

[-- Attachment #3: Type: text/plain, Size: 126 bytes --]

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
http://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [PATCH 1/2] rename XENMEM_add_to_physmap_{range => batch} (v2)
  2013-12-20 13:07 ` [PATCH 1/2] rename XENMEM_add_to_physmap_{range => batch} (v2) Jan Beulich
@ 2014-01-07 12:24   ` Ian Campbell
  2014-01-07 12:31     ` Jan Beulich
  0 siblings, 1 reply; 7+ messages in thread
From: Ian Campbell @ 2014-01-07 12:24 UTC (permalink / raw)
  To: Jan Beulich; +Cc: xen-devel, Keir Fraser

On Fri, 2013-12-20 at 13:07 +0000, Jan Beulich wrote:
> The use of "range" here wasn't really correct - there are no ranges
> involved. As the comment in the public header already correctly said,
> all this is about is batching of XENMEM_add_to_physmap calls (with
> the addition of having a way to specify a foreign domain for
> XENMAPSPACE_gmfn_foreign).
> 
> Suggested-by: Ian Campbell <Ian.Campbell@citrix.com>
> Signed-off-by: Jan Beulich <jbeulich@suse.com>
> Acked-by: Ian Campbell <ian.campbell@citrix.com>

Were you targeting this one at 4.4?

> ---
> v2: fix the compatibility DEFINE_XEN_GUEST_HANDLE()
> 
> --- a/xen/common/memory.c
> +++ b/xen/common/memory.c
> @@ -595,54 +595,54 @@ static int xenmem_add_to_physmap(struct 
>      return rc;
>  }
>  
> -static int xenmem_add_to_physmap_range(struct domain *d,
> -                                       struct xen_add_to_physmap_range *xatpr,
> +static int xenmem_add_to_physmap_batch(struct domain *d,
> +                                       struct xen_add_to_physmap_batch *xatpb,
>                                         unsigned int start)
>  {
>      unsigned int done = 0;
>      int rc;
>  
> -    if ( xatpr->size < start )
> +    if ( xatpb->size < start )
>          return -EILSEQ;
>  
> -    guest_handle_add_offset(xatpr->idxs, start);
> -    guest_handle_add_offset(xatpr->gpfns, start);
> -    guest_handle_add_offset(xatpr->errs, start);
> -    xatpr->size -= start;
> +    guest_handle_add_offset(xatpb->idxs, start);
> +    guest_handle_add_offset(xatpb->gpfns, start);
> +    guest_handle_add_offset(xatpb->errs, start);
> +    xatpb->size -= start;
>  
> -    while ( xatpr->size > done )
> +    while ( xatpb->size > done )
>      {
>          xen_ulong_t idx;
>          xen_pfn_t gpfn;
>  
> -        if ( unlikely(__copy_from_guest_offset(&idx, xatpr->idxs, 0, 1)) )
> +        if ( unlikely(__copy_from_guest_offset(&idx, xatpb->idxs, 0, 1)) )
>          {
>              rc = -EFAULT;
>              goto out;
>          }
>  
> -        if ( unlikely(__copy_from_guest_offset(&gpfn, xatpr->gpfns, 0, 1)) )
> +        if ( unlikely(__copy_from_guest_offset(&gpfn, xatpb->gpfns, 0, 1)) )
>          {
>              rc = -EFAULT;
>              goto out;
>          }
>  
> -        rc = xenmem_add_to_physmap_one(d, xatpr->space,
> -                                       xatpr->foreign_domid,
> +        rc = xenmem_add_to_physmap_one(d, xatpb->space,
> +                                       xatpb->foreign_domid,
>                                         idx, gpfn);
>  
> -        if ( unlikely(__copy_to_guest_offset(xatpr->errs, 0, &rc, 1)) )
> +        if ( unlikely(__copy_to_guest_offset(xatpb->errs, 0, &rc, 1)) )
>          {
>              rc = -EFAULT;
>              goto out;
>          }
>  
> -        guest_handle_add_offset(xatpr->idxs, 1);
> -        guest_handle_add_offset(xatpr->gpfns, 1);
> -        guest_handle_add_offset(xatpr->errs, 1);
> +        guest_handle_add_offset(xatpb->idxs, 1);
> +        guest_handle_add_offset(xatpb->gpfns, 1);
> +        guest_handle_add_offset(xatpb->errs, 1);
>  
>          /* Check for continuation if it's not the last iteration. */
> -        if ( xatpr->size > ++done && hypercall_preempt_check() )
> +        if ( xatpb->size > ++done && hypercall_preempt_check() )
>          {
>              rc = start + done;
>              goto out;
> @@ -797,7 +797,7 @@ long do_memory_op(unsigned long cmd, XEN
>          if ( copy_from_guest(&xatp, arg, 1) )
>              return -EFAULT;
>  
> -        /* Foreign mapping is only possible via add_to_physmap_range. */
> +        /* Foreign mapping is only possible via add_to_physmap_batch. */
>          if ( xatp.space == XENMAPSPACE_gmfn_foreign )
>              return -ENOSYS;
>  
> @@ -824,29 +824,29 @@ long do_memory_op(unsigned long cmd, XEN
>          return rc;
>      }
>  
> -    case XENMEM_add_to_physmap_range:
> +    case XENMEM_add_to_physmap_batch:
>      {
> -        struct xen_add_to_physmap_range xatpr;
> +        struct xen_add_to_physmap_batch xatpb;
>          struct domain *d;
>  
> -        BUILD_BUG_ON((typeof(xatpr.size))-1 >
> +        BUILD_BUG_ON((typeof(xatpb.size))-1 >
>                       (UINT_MAX >> MEMOP_EXTENT_SHIFT));
>  
>          /* Check for malicious or buggy input. */
> -        if ( start_extent != (typeof(xatpr.size))start_extent )
> +        if ( start_extent != (typeof(xatpb.size))start_extent )
>              return -EDOM;
>  
> -        if ( copy_from_guest(&xatpr, arg, 1) ||
> -             !guest_handle_okay(xatpr.idxs, xatpr.size) ||
> -             !guest_handle_okay(xatpr.gpfns, xatpr.size) ||
> -             !guest_handle_okay(xatpr.errs, xatpr.size) )
> +        if ( copy_from_guest(&xatpb, arg, 1) ||
> +             !guest_handle_okay(xatpb.idxs, xatpb.size) ||
> +             !guest_handle_okay(xatpb.gpfns, xatpb.size) ||
> +             !guest_handle_okay(xatpb.errs, xatpb.size) )
>              return -EFAULT;
>  
>          /* This mapspace is unsupported for this hypercall. */
> -        if ( xatpr.space == XENMAPSPACE_gmfn_range )
> +        if ( xatpb.space == XENMAPSPACE_gmfn_range )
>              return -EOPNOTSUPP;
>  
> -        d = rcu_lock_domain_by_any_id(xatpr.domid);
> +        d = rcu_lock_domain_by_any_id(xatpb.domid);
>          if ( d == NULL )
>              return -ESRCH;
>  
> @@ -857,7 +857,7 @@ long do_memory_op(unsigned long cmd, XEN
>              return rc;
>          }
>  
> -        rc = xenmem_add_to_physmap_range(d, &xatpr, start_extent);
> +        rc = xenmem_add_to_physmap_batch(d, &xatpb, start_extent);
>  
>          rcu_unlock_domain(d);
>  
> --- a/xen/include/public/arch-arm.h
> +++ b/xen/include/public/arch-arm.h
> @@ -79,7 +79,7 @@
>   *
>   *   In addition the following arch specific sub-ops:
>   *    * XENMEM_add_to_physmap
> - *    * XENMEM_add_to_physmap_range
> + *    * XENMEM_add_to_physmap_batch
>   *
>   *  HYPERVISOR_domctl
>   *   All generic sub-operations, with the exception of:
> --- a/xen/include/public/memory.h
> +++ b/xen/include/public/memory.h
> @@ -207,8 +207,7 @@ DEFINE_XEN_GUEST_HANDLE(xen_machphys_map
>  #define XENMAPSPACE_gmfn         2 /* GMFN */
>  #define XENMAPSPACE_gmfn_range   3 /* GMFN range, XENMEM_add_to_physmap only. */
>  #define XENMAPSPACE_gmfn_foreign 4 /* GMFN from another dom,
> -                                    * XENMEM_add_to_physmap_range only.
> -                                    */
> +                                    * XENMEM_add_to_physmap_batch only. */
>  /* ` } */
>  
>  /*
> @@ -238,8 +237,8 @@ typedef struct xen_add_to_physmap xen_ad
>  DEFINE_XEN_GUEST_HANDLE(xen_add_to_physmap_t);
>  
>  /* A batched version of add_to_physmap. */
> -#define XENMEM_add_to_physmap_range 23
> -struct xen_add_to_physmap_range {
> +#define XENMEM_add_to_physmap_batch 23
> +struct xen_add_to_physmap_batch {
>      /* IN */
>      /* Which domain to change the mapping for. */
>      domid_t domid;
> @@ -260,8 +259,15 @@ struct xen_add_to_physmap_range {
>      /* Per index error code. */
>      XEN_GUEST_HANDLE(int) errs;
>  };
> -typedef struct xen_add_to_physmap_range xen_add_to_physmap_range_t;
> -DEFINE_XEN_GUEST_HANDLE(xen_add_to_physmap_range_t);
> +typedef struct xen_add_to_physmap_batch xen_add_to_physmap_batch_t;
> +DEFINE_XEN_GUEST_HANDLE(xen_add_to_physmap_batch_t);
> +
> +#if __XEN_INTERFACE_VERSION__ < 0x00040400
> +#define XENMEM_add_to_physmap_range XENMEM_add_to_physmap_batch
> +#define xen_add_to_physmap_range xen_add_to_physmap_batch
> +typedef struct xen_add_to_physmap_batch xen_add_to_physmap_range_t;
> +DEFINE_XEN_GUEST_HANDLE(xen_add_to_physmap_range_t);
> +#endif
>  
>  /*
>   * Unmaps the page appearing at a particular GPFN from the specified guest's
> 
> 

^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [PATCH 1/2] rename XENMEM_add_to_physmap_{range => batch} (v2)
  2014-01-07 12:24   ` Ian Campbell
@ 2014-01-07 12:31     ` Jan Beulich
  2014-01-07 13:05       ` Ian Campbell
  0 siblings, 1 reply; 7+ messages in thread
From: Jan Beulich @ 2014-01-07 12:31 UTC (permalink / raw)
  To: Ian Campbell; +Cc: xen-devel, Keir Fraser

>>> On 07.01.14 at 13:24, Ian Campbell <Ian.Campbell@citrix.com> wrote:
> On Fri, 2013-12-20 at 13:07 +0000, Jan Beulich wrote:
>> The use of "range" here wasn't really correct - there are no ranges
>> involved. As the comment in the public header already correctly said,
>> all this is about is batching of XENMEM_add_to_physmap calls (with
>> the addition of having a way to specify a foreign domain for
>> XENMAPSPACE_gmfn_foreign).
>> 
>> Suggested-by: Ian Campbell <Ian.Campbell@citrix.com>
>> Signed-off-by: Jan Beulich <jbeulich@suse.com>
>> Acked-by: Ian Campbell <ian.campbell@citrix.com>
> 
> Were you targeting this one at 4.4?

Yes, so that the old form of it doesn't go into wide spread use.
Also implied by ...

>> +#if __XEN_INTERFACE_VERSION__ < 0x00040400
>> +#define XENMEM_add_to_physmap_range XENMEM_add_to_physmap_batch
>> +#define xen_add_to_physmap_range xen_add_to_physmap_batch
>> +typedef struct xen_add_to_physmap_batch xen_add_to_physmap_range_t;
>> +DEFINE_XEN_GUEST_HANDLE(xen_add_to_physmap_range_t);
>> +#endif

... the conditional here.

Jan

^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [PATCH 1/2] rename XENMEM_add_to_physmap_{range => batch} (v2)
  2014-01-07 12:31     ` Jan Beulich
@ 2014-01-07 13:05       ` Ian Campbell
  0 siblings, 0 replies; 7+ messages in thread
From: Ian Campbell @ 2014-01-07 13:05 UTC (permalink / raw)
  To: Jan Beulich; +Cc: xen-devel, Keir Fraser

On Tue, 2014-01-07 at 12:31 +0000, Jan Beulich wrote:
> >>> On 07.01.14 at 13:24, Ian Campbell <Ian.Campbell@citrix.com> wrote:
> > On Fri, 2013-12-20 at 13:07 +0000, Jan Beulich wrote:
> >> The use of "range" here wasn't really correct - there are no ranges
> >> involved. As the comment in the public header already correctly said,
> >> all this is about is batching of XENMEM_add_to_physmap calls (with
> >> the addition of having a way to specify a foreign domain for
> >> XENMAPSPACE_gmfn_foreign).
> >> 
> >> Suggested-by: Ian Campbell <Ian.Campbell@citrix.com>
> >> Signed-off-by: Jan Beulich <jbeulich@suse.com>
> >> Acked-by: Ian Campbell <ian.campbell@citrix.com>
> > 
> > Were you targeting this one at 4.4?
> 
> Yes, so that the old form of it doesn't go into wide spread use.
> Also implied by ...
> 
> >> +#if __XEN_INTERFACE_VERSION__ < 0x00040400
> >> +#define XENMEM_add_to_physmap_range XENMEM_add_to_physmap_batch
> >> +#define xen_add_to_physmap_range xen_add_to_physmap_batch
> >> +typedef struct xen_add_to_physmap_batch xen_add_to_physmap_range_t;
> >> +DEFINE_XEN_GUEST_HANDLE(xen_add_to_physmap_range_t);
> >> +#endif
> 
> ... the conditional here.

Yes, that's what I inferred.

So if you need a release Ack then I think you can have one from me in
George's absence. The risk is basically build breakage which should be
trivially caught and the benefit is not exposing a broken API in the new
release.

Ian.

^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [PATCH 0/2] XENMEM_add_to_physmap_batch
  2013-12-20 13:04 [PATCH 0/2] XENMEM_add_to_physmap_batch Jan Beulich
  2013-12-20 13:07 ` [PATCH 1/2] rename XENMEM_add_to_physmap_{range => batch} (v2) Jan Beulich
  2013-12-20 13:08 ` [PATCH 2/2] compat wrapper for XENMEM_add_to_physmap_batch Jan Beulich
@ 2014-01-07 15:52 ` Keir Fraser
  2 siblings, 0 replies; 7+ messages in thread
From: Keir Fraser @ 2014-01-07 15:52 UTC (permalink / raw)
  To: Jan Beulich, xen-devel; +Cc: Ian Campbell

On 20/12/2013 13:04, "Jan Beulich" <JBeulich@suse.com> wrote:

> 1: rename XENMEM_add_to_physmap_{range => batch} (v2)
> 2: compat wrapper for XENMEM_add_to_physmap_batch
> 
> Signed-off-by: Jan Beulich <jbeulich@suse.com>

Acked-by: Keir Fraser <keir@xen.org>

^ permalink raw reply	[flat|nested] 7+ messages in thread

end of thread, other threads:[~2014-01-07 15:52 UTC | newest]

Thread overview: 7+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2013-12-20 13:04 [PATCH 0/2] XENMEM_add_to_physmap_batch Jan Beulich
2013-12-20 13:07 ` [PATCH 1/2] rename XENMEM_add_to_physmap_{range => batch} (v2) Jan Beulich
2014-01-07 12:24   ` Ian Campbell
2014-01-07 12:31     ` Jan Beulich
2014-01-07 13:05       ` Ian Campbell
2013-12-20 13:08 ` [PATCH 2/2] compat wrapper for XENMEM_add_to_physmap_batch Jan Beulich
2014-01-07 15:52 ` [PATCH 0/2] XENMEM_add_to_physmap_batch Keir Fraser

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.