All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH v3] dm_op: Add xendevicemodel_modified_memory_bulk.
@ 2017-03-28 13:18 Jennifer Herbert
  2017-03-29 10:38 ` Jan Beulich
  0 siblings, 1 reply; 5+ messages in thread
From: Jennifer Herbert @ 2017-03-28 13:18 UTC (permalink / raw)
  To: Xen-devel
  Cc: Wei Liu, Jennifer Herbert, Ian Jackson, Paul Durrant,
	Jan Beulich, Andrew Cooper

From: Jennifer Herbert <Jennifer.Herbert@citrix.com>

This new lib devicemodel call allows multiple extents of pages to be
marked as modified in a single call.  This is something needed for a
usecase I'm working on.

The xen side of the modified_memory call has been modified to accept
an array of extents.  The devicemodle library either provides an array
of length 1, to support the original library function, or a second
function allows an array to be provided.

Signed-off-by: Jennifer Herbert <Jennifer.Herbert@citrix.com>
---
Cc: Jan Beulich <jbeulich@suse.com>
Cc: Ian Jackson <ian.jackson@eu.citrix.com>
Cc: Wei Liu <wei.liu2@citrix.com>
Cc: Andrew Cooper <andrew.cooper3@citrix.com>
Cc: Paul Durrant <paul.durrant@citrix.com>
---
Changes as discussed on thread.

 tools/libs/devicemodel/core.c                   |   30 ++++--
 tools/libs/devicemodel/include/xendevicemodel.h |   19 +++-
 xen/arch/x86/hvm/dm.c                           |  115 +++++++++++++++--------
 xen/include/public/hvm/dm_op.h                  |   22 ++++-
 4 files changed, 133 insertions(+), 53 deletions(-)

diff --git a/tools/libs/devicemodel/core.c b/tools/libs/devicemodel/core.c
index a85cb49..f9e37a5 100644
--- a/tools/libs/devicemodel/core.c
+++ b/tools/libs/devicemodel/core.c
@@ -434,22 +434,36 @@ int xendevicemodel_track_dirty_vram(
                              dirty_bitmap, (size_t)(nr + 7) / 8);
 }
 
-int xendevicemodel_modified_memory(
-    xendevicemodel_handle *dmod, domid_t domid, uint64_t first_pfn,
-    uint32_t nr)
+int xendevicemodel_modified_memory_bulk(
+    xendevicemodel_handle *dmod, domid_t domid,
+    struct xen_dm_op_modified_memory_extent *extents, uint32_t nr)
 {
     struct xen_dm_op op;
-    struct xen_dm_op_modified_memory *data;
+    struct xen_dm_op_modified_memory *header;
+    size_t extents_size = nr * sizeof(struct xen_dm_op_modified_memory_extent);
 
     memset(&op, 0, sizeof(op));
 
     op.op = XEN_DMOP_modified_memory;
-    data = &op.u.modified_memory;
+    header = &op.u.modified_memory;
 
-    data->first_pfn = first_pfn;
-    data->nr = nr;
+    header->nr_extents = nr;
+    header->pfns_processed = 0;
 
-    return xendevicemodel_op(dmod, domid, 1, &op, sizeof(op));
+    return xendevicemodel_op(dmod, domid, 2, &op, sizeof(op),
+                             extents, extents_size);
+}
+
+int xendevicemodel_modified_memory(
+    xendevicemodel_handle *dmod, domid_t domid, uint64_t first_pfn,
+    uint32_t nr)
+{
+    struct xen_dm_op_modified_memory_extent extent;
+
+    extent.first_pfn = first_pfn;
+    extent.nr = nr;
+
+    return xendevicemodel_modified_memory_bulk(dmod, domid, &extent, 1);
 }
 
 int xendevicemodel_set_mem_type(
diff --git a/tools/libs/devicemodel/include/xendevicemodel.h b/tools/libs/devicemodel/include/xendevicemodel.h
index b3f600e..9c62bf9 100644
--- a/tools/libs/devicemodel/include/xendevicemodel.h
+++ b/tools/libs/devicemodel/include/xendevicemodel.h
@@ -236,8 +236,8 @@ int xendevicemodel_track_dirty_vram(
     uint32_t nr, unsigned long *dirty_bitmap);
 
 /**
- * This function notifies the hypervisor that a set of domain pages
- * have been modified.
+ * This function notifies the hypervisor that a set of contiguous
+ * domain pages have been modified.
  *
  * @parm dmod a handle to an open devicemodel interface.
  * @parm domid the domain id to be serviced
@@ -250,6 +250,21 @@ int xendevicemodel_modified_memory(
     uint32_t nr);
 
 /**
+ * This function notifies the hypervisor that a set of discontiguous
+ * domain pages have been modified.
+ *
+ * @parm dmod a handle to an open devicemodel interface.
+ * @parm domid the domain id to be serviced
+ * @parm extents an array of extent structs, which each hold
+                 a start_pfn and nr (number of pfns).
+ * @parm nr the number of extents in the array
+ * @return 0 on success, -1 on failure.
+ */
+int xendevicemodel_modified_memory_bulk(
+    xendevicemodel_handle *dmod, domid_t domid,
+    struct xen_dm_op_modified_memory_extent extents[], uint32_t nr);
+
+/**
  * This function notifies the hypervisor that a set of domain pages
  * are to be treated in a specific way. (See the definition of
  * hvmmem_type_t).
diff --git a/xen/arch/x86/hvm/dm.c b/xen/arch/x86/hvm/dm.c
index 2122c45..b5031ce 100644
--- a/xen/arch/x86/hvm/dm.c
+++ b/xen/arch/x86/hvm/dm.c
@@ -119,56 +119,96 @@ static int set_isa_irq_level(struct domain *d, uint8_t isa_irq,
 }
 
 static int modified_memory(struct domain *d,
-                           struct xen_dm_op_modified_memory *data)
+                           struct xen_dm_op_modified_memory *header,
+                           xen_dm_op_buf_t* buf)
 {
-    xen_pfn_t last_pfn = data->first_pfn + data->nr - 1;
-    unsigned int iter = 0;
-    int rc = 0;
-
-    if ( (data->first_pfn > last_pfn) ||
-         (last_pfn > domain_get_maximum_gpfn(d)) )
-        return -EINVAL;
+    /* Process maximum of 256 pfns before checking for continuation */
+    const unsigned int cont_check_interval = 0x100;
+    unsigned int rem_extents =  header->nr_extents;
+    unsigned int batch_rem_pfns = cont_check_interval;
 
     if ( !paging_mode_log_dirty(d) )
         return 0;
 
-    while ( iter < data->nr )
+    if ( (buf->size / sizeof(struct xen_dm_op_modified_memory_extent)) <
+         rem_extents )
+        return -EINVAL;
+
+    while ( rem_extents > 0)
     {
-        unsigned long pfn = data->first_pfn + iter;
-        struct page_info *page;
+        struct xen_dm_op_modified_memory_extent extent;
+        unsigned int batch_nr;
+        xen_pfn_t pfn;
+        xen_pfn_t end_pfn;
+        unsigned int *pfns_already_done;
 
-        page = get_page_from_gfn(d, pfn, NULL, P2M_UNSHARE);
-        if ( page )
+        if ( copy_from_guest_offset(&extent, buf->h, rem_extents - 1, 1) )
+            return -EFAULT;
+        /*
+         * In the case of continuation, header->opaque contains the
+         * number of pfns already processed for this extent
+         */
+        pfns_already_done = &header->opaque;
+
+        if (*pfns_already_done >= extent.nr || extent.pad)
+            return -EINVAL;
+
+        pfn = extent.first_pfn + *pfns_already_done;
+        batch_nr = extent.nr - *pfns_already_done;
+
+        if ( batch_nr > batch_rem_pfns )
         {
-            mfn_t gmfn = _mfn(page_to_mfn(page));
-
-            paging_mark_dirty(d, gmfn);
-            /*
-             * These are most probably not page tables any more
-             * don't take a long time and don't die either.
-             */
-            sh_remove_shadows(d, gmfn, 1, 0);
-            put_page(page);
+           batch_nr = batch_rem_pfns;
+           *pfns_already_done += batch_nr;
+        }
+        else
+        {
+            rem_extents--;
+            *pfns_already_done = 0;
         }
 
-        iter++;
+        batch_rem_pfns -= batch_nr;
+        end_pfn = pfn + batch_nr;
+
+        if ( (pfn >= end_pfn) ||
+             (end_pfn > domain_get_maximum_gpfn(d)) )
+            return -EINVAL;
+
+        header->nr_extents = rem_extents;
+
+        while ( pfn < end_pfn )
+        {
+            struct page_info *page;
+            page = get_page_from_gfn(d, pfn, NULL, P2M_UNSHARE);
+
+            if ( page )
+            {
+                mfn_t gmfn = _mfn(page_to_mfn(page));
+
+                paging_mark_dirty(d, gmfn);
+                /*
+                 * These are most probably not page tables any more
+                 * don't take a long time and don't die either.
+                 */
+                sh_remove_shadows(d, gmfn, 1, 0);
+                put_page(page);
+            }
+            pfn++;
+        }
 
         /*
-         * Check for continuation every 256th iteration and if the
-         * iteration is not the last.
+         * Check for continuation every 256th pfn and if the
+         * pfn is not the last.
          */
-        if ( (iter < data->nr) && ((iter & 0xff) == 0) &&
-             hypercall_preempt_check() )
+        if ( (batch_rem_pfns == 0) && (rem_extents > 0) )
         {
-            data->first_pfn += iter;
-            data->nr -= iter;
+            if ( hypercall_preempt_check() )
+                return -ERESTART;
 
-            rc = -ERESTART;
-            break;
+            batch_rem_pfns = cont_check_interval;
         }
     }
-
-    return rc;
+    return 0;
 }
 
 static bool allow_p2m_type_change(p2m_type_t old, p2m_type_t new)
@@ -441,13 +481,8 @@ static int dm_op(domid_t domid,
         struct xen_dm_op_modified_memory *data =
             &op.u.modified_memory;
 
-        const_op = false;
-
-        rc = -EINVAL;
-        if ( data->pad )
-            break;
-
-        rc = modified_memory(d, data);
+        rc = modified_memory(d, data, &bufs[1]);
+        const_op = (rc != 0);
         break;
     }
 
diff --git a/xen/include/public/hvm/dm_op.h b/xen/include/public/hvm/dm_op.h
index f54cece..34c831a 100644
--- a/xen/include/public/hvm/dm_op.h
+++ b/xen/include/public/hvm/dm_op.h
@@ -237,13 +237,29 @@ struct xen_dm_op_set_pci_link_route {
  * XEN_DMOP_modified_memory: Notify that a set of pages were modified by
  *                           an emulator.
  *
- * NOTE: In the event of a continuation, the @first_pfn is set to the
- *       value of the pfn of the remaining set of pages and @nr reduced
- *       to the size of the remaining set.
+ * DMOP buf 1 contains an array of xen_dm_op_modified_memory_extent with
+ * @nr_extents entries.
+ * @opaque must be initially set to 0.
+ *
+ * On error, @nr_extents will contain the index+1 of the extent that
+ * had the error.  It is not defined if or which pages may have been
+ * marked as dirty, in this event.
+ *
+ * @opaque must be initially set to 0.
  */
 #define XEN_DMOP_modified_memory 11
 
 struct xen_dm_op_modified_memory {
+    /*
+     * IN - Number of extents to be processed
+     * OUT -returns n+1 for failing extent
+     */
+    uint32_t nr_extents;
+    /* IN/OUT - Must be set to 0 */
+    uint32_t opaque;
+};
+
+struct xen_dm_op_modified_memory_extent {
     /* IN - number of contiguous pages modified */
     uint32_t nr;
     uint32_t pad;
-- 
1.7.10.4


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel

^ permalink raw reply related	[flat|nested] 5+ messages in thread

* Re: [PATCH v3] dm_op: Add xendevicemodel_modified_memory_bulk.
  2017-03-28 13:18 [PATCH v3] dm_op: Add xendevicemodel_modified_memory_bulk Jennifer Herbert
@ 2017-03-29 10:38 ` Jan Beulich
  2017-03-29 14:35   ` Jennifer Herbert
  0 siblings, 1 reply; 5+ messages in thread
From: Jan Beulich @ 2017-03-29 10:38 UTC (permalink / raw)
  To: Jennifer Herbert
  Cc: Andrew Cooper, PaulDurrant, Wei Liu, Ian Jackson, Xen-devel

>>> On 28.03.17 at 15:18, <jennifer.herbert@citrix.com> wrote:
> --- a/xen/arch/x86/hvm/dm.c
> +++ b/xen/arch/x86/hvm/dm.c
> @@ -119,56 +119,96 @@ static int set_isa_irq_level(struct domain *d, uint8_t isa_irq,
>  }
>  
>  static int modified_memory(struct domain *d,
> -                           struct xen_dm_op_modified_memory *data)
> +                           struct xen_dm_op_modified_memory *header,
> +                           xen_dm_op_buf_t* buf)
>  {
> -    xen_pfn_t last_pfn = data->first_pfn + data->nr - 1;
> -    unsigned int iter = 0;
> -    int rc = 0;
> -
> -    if ( (data->first_pfn > last_pfn) ||
> -         (last_pfn > domain_get_maximum_gpfn(d)) )
> -        return -EINVAL;
> +    /* Process maximum of 256 pfns before checking for continuation */
> +    const unsigned int cont_check_interval = 0x100;
> +    unsigned int rem_extents =  header->nr_extents;
> +    unsigned int batch_rem_pfns = cont_check_interval;
>  
>      if ( !paging_mode_log_dirty(d) )
>          return 0;
>  
> -    while ( iter < data->nr )
> +    if ( (buf->size / sizeof(struct xen_dm_op_modified_memory_extent)) <
> +         rem_extents )
> +        return -EINVAL;

I'm sorry for noticing this only now, but I think this together with the
open coded copy below calls for a copy_buf_from_guest_offset()
helper.

> +    while ( rem_extents > 0)
>      {
> -        unsigned long pfn = data->first_pfn + iter;
> -        struct page_info *page;
> +        struct xen_dm_op_modified_memory_extent extent;
> +        unsigned int batch_nr;
> +        xen_pfn_t pfn;
> +        xen_pfn_t end_pfn;
> +        unsigned int *pfns_already_done;

Perhaps drop "already"? Personally I also wouldn't mind you
dropping the variable altogether and using header->opaque
directly, but I guess that's too "opaque" for your taste?

> -        page = get_page_from_gfn(d, pfn, NULL, P2M_UNSHARE);
> -        if ( page )
> +        if ( copy_from_guest_offset(&extent, buf->h, rem_extents - 1, 1) )
> +            return -EFAULT;
> +        /*
> +         * In the case of continuation, header->opaque contains the
> +         * number of pfns already processed for this extent
> +         */
> +        pfns_already_done = &header->opaque;

If you want to keep the variable, this should be moved out of the
loop (as being loop invariant).

> +        if (*pfns_already_done >= extent.nr || extent.pad)
> +            return -EINVAL;
> +
> +        pfn = extent.first_pfn + *pfns_already_done;
> +        batch_nr = extent.nr - *pfns_already_done;
> +
> +        if ( batch_nr > batch_rem_pfns )
>          {
> -            mfn_t gmfn = _mfn(page_to_mfn(page));
> -
> -            paging_mark_dirty(d, gmfn);
> -            /*
> -             * These are most probably not page tables any more
> -             * don't take a long time and don't die either.
> -             */
> -            sh_remove_shadows(d, gmfn, 1, 0);
> -            put_page(page);
> +           batch_nr = batch_rem_pfns;
> +           *pfns_already_done += batch_nr;
> +        }
> +        else
> +        {
> +            rem_extents--;
> +            *pfns_already_done = 0;
>          }
>  
> -        iter++;
> +        batch_rem_pfns -= batch_nr;
> +        end_pfn = pfn + batch_nr;
> +
> +        if ( (pfn >= end_pfn) ||
> +             (end_pfn > domain_get_maximum_gpfn(d)) )
> +            return -EINVAL;

I think these checks should be done of the extent as a whole, not
just the portion you mean to process in this batch.

> +        header->nr_extents = rem_extents;
> +
> +        while ( pfn < end_pfn )
> +        {
> +            struct page_info *page;
> +            page = get_page_from_gfn(d, pfn, NULL, P2M_UNSHARE);

Either make this the initializer, or have a blank line between
declaration and statements.

> +            if ( page )
> +            {
> +                mfn_t gmfn = _mfn(page_to_mfn(page));
> +
> +                paging_mark_dirty(d, gmfn);
> +                /*
> +                 * These are most probably not page tables any more
> +                 * don't take a long time and don't die either.
> +                 */
> +                sh_remove_shadows(d, gmfn, 1, 0);
> +                put_page(page);
> +            }
> +            pfn++;
> +        }

Would you mind bringing this in for() form?

>          /*
> -         * Check for continuation every 256th iteration and if the
> -         * iteration is not the last.
> +         * Check for continuation every 256th pfn and if the
> +         * pfn is not the last.
>           */
> -        if ( (iter < data->nr) && ((iter & 0xff) == 0) &&
> -             hypercall_preempt_check() )
> +        if ( (batch_rem_pfns == 0) && (rem_extents > 0) )

Looking at this again I think it would be best to drop the mention
of 256 here, instead talking about a fully handled batch or some
such.

> @@ -441,13 +481,8 @@ static int dm_op(domid_t domid,
>          struct xen_dm_op_modified_memory *data =
>              &op.u.modified_memory;
>  
> -        const_op = false;
> -
> -        rc = -EINVAL;
> -        if ( data->pad )
> -            break;
> -
> -        rc = modified_memory(d, data);
> +        rc = modified_memory(d, data, &bufs[1]);
> +        const_op = (rc != 0);

Isn't this wrong now, i.e. don't you need to copy back the
header now in all cases?

> --- a/xen/include/public/hvm/dm_op.h
> +++ b/xen/include/public/hvm/dm_op.h
> @@ -237,13 +237,29 @@ struct xen_dm_op_set_pci_link_route {
>   * XEN_DMOP_modified_memory: Notify that a set of pages were modified by
>   *                           an emulator.
>   *
> - * NOTE: In the event of a continuation, the @first_pfn is set to the
> - *       value of the pfn of the remaining set of pages and @nr reduced
> - *       to the size of the remaining set.
> + * DMOP buf 1 contains an array of xen_dm_op_modified_memory_extent with
> + * @nr_extents entries.
> + * @opaque must be initially set to 0.
> + *
> + * On error, @nr_extents will contain the index+1 of the extent that
> + * had the error.  It is not defined if or which pages may have been
> + * marked as dirty, in this event.
> + *
> + * @opaque must be initially set to 0.

Please say so just once.

>  struct xen_dm_op_modified_memory {
> +    /*
> +     * IN - Number of extents to be processed
> +     * OUT -returns n+1 for failing extent
> +     */
> +    uint32_t nr_extents;

This n+1 thing is somewhat odd, but I guess it can't be prevented
without going through extra hoops.

Jan

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 5+ messages in thread

* Re: [PATCH v3] dm_op: Add xendevicemodel_modified_memory_bulk.
  2017-03-29 10:38 ` Jan Beulich
@ 2017-03-29 14:35   ` Jennifer Herbert
  2017-03-29 14:54     ` Jan Beulich
  0 siblings, 1 reply; 5+ messages in thread
From: Jennifer Herbert @ 2017-03-29 14:35 UTC (permalink / raw)
  To: Jan Beulich; +Cc: Andrew Cooper, PaulDurrant, Wei Liu, Ian Jackson, Xen-devel



On 29/03/17 11:38, Jan Beulich wrote:
>>>> On 28.03.17 at 15:18, <jennifer.herbert@citrix.com> wrote:
>> Perhaps drop "already"? Personally I also wouldn't mind you dropping 
>> the variable altogether and using header->opaque directly, but I 
>> guess that's too "opaque" for your taste? 

It would make the code too opaque for my taste, so I'll just drop the 
'already' bit.

>> @@ -441,13 +481,8 @@ static int dm_op(domid_t domid,
>>           struct xen_dm_op_modified_memory *data =
>>               &op.u.modified_memory;
>>   
>> -        const_op = false;
>> -
>> -        rc = -EINVAL;
>> -        if ( data->pad )
>> -            break;
>> -
>> -        rc = modified_memory(d, data);
>> +        rc = modified_memory(d, data, &bufs[1]);
>> +        const_op = (rc != 0);
> Isn't this wrong now, i.e. don't you need to copy back the
> header now in all cases?

I only define what I'll set nr_extents to in case of error, and of 
course opaque
is opaque.
If I where to write back, I'd be writing back 0 to nr_extents - which 
wouldn’t really
mean anything since I’m not defining the order for which I’m processing 
them.
In fact the only thing it tells you is that extent 0 is the last one 
processed, which
I don't think its all that useful.

Ideally I'd prefer to leave it untouched on success, but the original 
value is lost on
continuation, this would be more involved.

By only writing back on error, I hoped to improve efficiency for the 
common case,
(especially for existing use with calls of one extent).  (I know its 
only a small difference)
If you want me to write back - what do you want me to write back for 
success?


Cheers,

-jenny


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 5+ messages in thread

* Re: [PATCH v3] dm_op: Add xendevicemodel_modified_memory_bulk.
  2017-03-29 14:35   ` Jennifer Herbert
@ 2017-03-29 14:54     ` Jan Beulich
  2017-03-29 15:38       ` Jennifer Herbert
  0 siblings, 1 reply; 5+ messages in thread
From: Jan Beulich @ 2017-03-29 14:54 UTC (permalink / raw)
  To: Jennifer Herbert
  Cc: Andrew Cooper, PaulDurrant, Wei Liu, Ian Jackson, Xen-devel

>>> On 29.03.17 at 16:35, <Jennifer.Herbert@citrix.com> wrote:
> On 29/03/17 11:38, Jan Beulich wrote:
>>>>> On 28.03.17 at 15:18, <jennifer.herbert@citrix.com> wrote:
>>> @@ -441,13 +481,8 @@ static int dm_op(domid_t domid,
>>>           struct xen_dm_op_modified_memory *data =
>>>               &op.u.modified_memory;
>>>   
>>> -        const_op = false;
>>> -
>>> -        rc = -EINVAL;
>>> -        if ( data->pad )
>>> -            break;
>>> -
>>> -        rc = modified_memory(d, data);
>>> +        rc = modified_memory(d, data, &bufs[1]);
>>> +        const_op = (rc != 0);
>> Isn't this wrong now, i.e. don't you need to copy back the
>> header now in all cases?
> 
> I only define what I'll set nr_extents to in case of error, and of 
> course opaque
> is opaque.

Well, but you do need the opaque value for the continuation,
don't you? In which case you need to also write back on
-ERESTART. And as you say you need to write back in case
of error. So I'd expect

       const_op = !rc;

> By only writing back on error, I hoped to improve efficiency for the 
> common case,
> (especially for existing use with calls of one extent).  (I know its 
> only a small difference)
> If you want me to write back - what do you want me to write back for 
> success?

Right, avoiding to write something useless is sensible. If anything,
the original value of nr_extents would make sense to be written
back, but that value was long lost by that time.

Jan


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 5+ messages in thread

* Re: [PATCH v3] dm_op: Add xendevicemodel_modified_memory_bulk.
  2017-03-29 14:54     ` Jan Beulich
@ 2017-03-29 15:38       ` Jennifer Herbert
  0 siblings, 0 replies; 5+ messages in thread
From: Jennifer Herbert @ 2017-03-29 15:38 UTC (permalink / raw)
  To: Jan Beulich; +Cc: Andrew Cooper, PaulDurrant, Wei Liu, Ian Jackson, Xen-devel

On 29/03/17 15:54, Jan Beulich wrote:
>>>>>> On 28.03.17 at 15:18, <jennifer.herbert@citrix.com> wrote:
>>>> @@ -441,13 +481,8 @@ static int dm_op(domid_t domid,
>>>>            struct xen_dm_op_modified_memory *data =
>>>>                &op.u.modified_memory;
>>>>    
>>>> -        const_op = false;
>>>> -
>>>> -        rc = -EINVAL;
>>>> -        if ( data->pad )
>>>> -            break;
>>>> -
>>>> -        rc = modified_memory(d, data);
>>>> +        rc = modified_memory(d, data, &bufs[1]);
>>>> +        const_op = (rc != 0);
>>> Isn't this wrong now, i.e. don't you need to copy back the
>>> header now in all cases?
>> I only define what I'll set nr_extents to in case of error, and of
>> course opaque
>> is opaque.
> Well, but you do need the opaque value for the continuation,
> don't you? In which case you need to also write back on
> -ERESTART. And as you say you need to write back in case
> of error. So I'd expect
>
>         const_op = !rc;
>

Quite right, see you point now - I didn't notice I'd inverted the logic.

-jenny


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 5+ messages in thread

end of thread, other threads:[~2017-03-29 15:38 UTC | newest]

Thread overview: 5+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2017-03-28 13:18 [PATCH v3] dm_op: Add xendevicemodel_modified_memory_bulk Jennifer Herbert
2017-03-29 10:38 ` Jan Beulich
2017-03-29 14:35   ` Jennifer Herbert
2017-03-29 14:54     ` Jan Beulich
2017-03-29 15:38       ` Jennifer Herbert

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.