All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH v2] vhost: make SET_VRING_ADDR, SET_FEATURES send replies
@ 2021-07-19 14:21 Denis Plotnikov
  2021-07-23  9:59 ` [PING][PATCH " Denis Plotnikov
  2021-08-03 15:05 ` [PATCH " Michael S. Tsirkin
  0 siblings, 2 replies; 7+ messages in thread
From: Denis Plotnikov @ 2021-07-19 14:21 UTC (permalink / raw)
  To: qemu-devel; +Cc: yc-core, mst

On vhost-user-blk migration, qemu normally sends a number of commands
to enable logging if VHOST_USER_PROTOCOL_F_LOG_SHMFD is negotiated.
Qemu sends VHOST_USER_SET_FEATURES to enable buffers logging and
VHOST_USER_SET_VRING_ADDR per each started ring to enable "used ring"
data logging.
The issue is that qemu doesn't wait for reply from the vhost daemon
for these commands which may result in races between qemu expectation
of logging starting and actual login starting in vhost daemon.

The race can appear as follows: on migration setup, qemu enables dirty page
logging by sending VHOST_USER_SET_FEATURES. The command doesn't arrive to a
vhost-user-blk daemon immediately and the daemon needs some time to turn the
logging on internally. If qemu doesn't wait for reply, after sending the
command, qemu may start migrate memory pages to a destination. At this time,
the logging may not be actually turned on in the daemon but some guest pages,
which the daemon is about to write to, may have already been transferred
without logging to the destination. Since the logging wasn't turned on,
those pages won't be transferred again as dirty. So we may end up with
corrupted data on the destination.
The same scenario is applicable for "used ring" data logging, which is
turned on with VHOST_USER_SET_VRING_ADDR command.

To resolve this issue, this patch makes qemu wait for the commands result
explicilty if VHOST_USER_PROTOCOL_F_REPLY_ACK is negotiated and
logging is enabled.

Signed-off-by: Denis Plotnikov <den-plotnikov@yandex-team.ru>
---
v1 -> v2:
  * send reply only when logging is enabled [mst]

v0 -> v1:
  * send reply for SET_VRING_ADDR, SET_FEATURES only [mst]
  
 hw/virtio/vhost-user.c | 37 ++++++++++++++++++++++++++++++++++---
 1 file changed, 34 insertions(+), 3 deletions(-)

diff --git a/hw/virtio/vhost-user.c b/hw/virtio/vhost-user.c
index ee57abe04526..133588b3961e 100644
--- a/hw/virtio/vhost-user.c
+++ b/hw/virtio/vhost-user.c
@@ -1095,6 +1095,11 @@ static int vhost_user_set_mem_table(struct vhost_dev *dev,
     return 0;
 }
 
+static bool log_enabled(uint64_t features)
+{
+    return !!(features & (0x1ULL << VHOST_F_LOG_ALL));
+}
+
 static int vhost_user_set_vring_addr(struct vhost_dev *dev,
                                      struct vhost_vring_addr *addr)
 {
@@ -1105,10 +1110,21 @@ static int vhost_user_set_vring_addr(struct vhost_dev *dev,
         .hdr.size = sizeof(msg.payload.addr),
     };
 
+    bool reply_supported = virtio_has_feature(dev->protocol_features,
+                                              VHOST_USER_PROTOCOL_F_REPLY_ACK);
+
+    if (reply_supported && log_enabled(msg.hdr.flags)) {
+        msg.hdr.flags |= VHOST_USER_NEED_REPLY_MASK;
+    }
+
     if (vhost_user_write(dev, &msg, NULL, 0) < 0) {
         return -1;
     }
 
+    if (msg.hdr.flags & VHOST_USER_NEED_REPLY_MASK) {
+        return process_message_reply(dev, &msg);
+    }
+
     return 0;
 }
 
@@ -1288,7 +1304,8 @@ static int vhost_user_set_vring_call(struct vhost_dev *dev,
     return vhost_set_vring_file(dev, VHOST_USER_SET_VRING_CALL, file);
 }
 
-static int vhost_user_set_u64(struct vhost_dev *dev, int request, uint64_t u64)
+static int vhost_user_set_u64(struct vhost_dev *dev, int request, uint64_t u64,
+                              bool need_reply)
 {
     VhostUserMsg msg = {
         .hdr.request = request,
@@ -1297,23 +1314,37 @@ static int vhost_user_set_u64(struct vhost_dev *dev, int request, uint64_t u64)
         .hdr.size = sizeof(msg.payload.u64),
     };
 
+    if (need_reply) {
+        bool reply_supported = virtio_has_feature(dev->protocol_features,
+                                          VHOST_USER_PROTOCOL_F_REPLY_ACK);
+        if (reply_supported) {
+            msg.hdr.flags |= VHOST_USER_NEED_REPLY_MASK;
+        }
+    }
+
     if (vhost_user_write(dev, &msg, NULL, 0) < 0) {
         return -1;
     }
 
+    if (msg.hdr.flags & VHOST_USER_NEED_REPLY_MASK) {
+        return process_message_reply(dev, &msg);
+    }
+
     return 0;
 }
 
 static int vhost_user_set_features(struct vhost_dev *dev,
                                    uint64_t features)
 {
-    return vhost_user_set_u64(dev, VHOST_USER_SET_FEATURES, features);
+    return vhost_user_set_u64(dev, VHOST_USER_SET_FEATURES, features,
+                              log_enabled(features));
 }
 
 static int vhost_user_set_protocol_features(struct vhost_dev *dev,
                                             uint64_t features)
 {
-    return vhost_user_set_u64(dev, VHOST_USER_SET_PROTOCOL_FEATURES, features);
+    return vhost_user_set_u64(dev, VHOST_USER_SET_PROTOCOL_FEATURES, features,
+                              false);
 }
 
 static int vhost_user_get_u64(struct vhost_dev *dev, int request, uint64_t *u64)
-- 
2.25.1



^ permalink raw reply related	[flat|nested] 7+ messages in thread

* [PING][PATCH v2] vhost: make SET_VRING_ADDR, SET_FEATURES send replies
  2021-07-19 14:21 [PATCH v2] vhost: make SET_VRING_ADDR, SET_FEATURES send replies Denis Plotnikov
@ 2021-07-23  9:59 ` Denis Plotnikov
  2021-07-29 10:56   ` [PING][PING][PATCH " Denis Plotnikov
  2021-08-03 15:05 ` [PATCH " Michael S. Tsirkin
  1 sibling, 1 reply; 7+ messages in thread
From: Denis Plotnikov @ 2021-07-23  9:59 UTC (permalink / raw)
  To: qemu-devel; +Cc: yc-core, mst

[-- Attachment #1: Type: text/plain, Size: 5107 bytes --]

ping!

On 19.07.2021 17:21, Denis Plotnikov wrote:
> On vhost-user-blk migration, qemu normally sends a number of commands
> to enable logging if VHOST_USER_PROTOCOL_F_LOG_SHMFD is negotiated.
> Qemu sends VHOST_USER_SET_FEATURES to enable buffers logging and
> VHOST_USER_SET_VRING_ADDR per each started ring to enable "used ring"
> data logging.
> The issue is that qemu doesn't wait for reply from the vhost daemon
> for these commands which may result in races between qemu expectation
> of logging starting and actual login starting in vhost daemon.
>
> The race can appear as follows: on migration setup, qemu enables dirty page
> logging by sending VHOST_USER_SET_FEATURES. The command doesn't arrive to a
> vhost-user-blk daemon immediately and the daemon needs some time to turn the
> logging on internally. If qemu doesn't wait for reply, after sending the
> command, qemu may start migrate memory pages to a destination. At this time,
> the logging may not be actually turned on in the daemon but some guest pages,
> which the daemon is about to write to, may have already been transferred
> without logging to the destination. Since the logging wasn't turned on,
> those pages won't be transferred again as dirty. So we may end up with
> corrupted data on the destination.
> The same scenario is applicable for "used ring" data logging, which is
> turned on with VHOST_USER_SET_VRING_ADDR command.
>
> To resolve this issue, this patch makes qemu wait for the commands result
> explicilty if VHOST_USER_PROTOCOL_F_REPLY_ACK is negotiated and
> logging is enabled.
>
> Signed-off-by: Denis Plotnikov <den-plotnikov@yandex-team.ru>
> ---
> v1 -> v2:
>    * send reply only when logging is enabled [mst]
>
> v0 -> v1:
>    * send reply for SET_VRING_ADDR, SET_FEATURES only [mst]
>    
>   hw/virtio/vhost-user.c | 37 ++++++++++++++++++++++++++++++++++---
>   1 file changed, 34 insertions(+), 3 deletions(-)
>
> diff --git a/hw/virtio/vhost-user.c b/hw/virtio/vhost-user.c
> index ee57abe04526..133588b3961e 100644
> --- a/hw/virtio/vhost-user.c
> +++ b/hw/virtio/vhost-user.c
> @@ -1095,6 +1095,11 @@ static int vhost_user_set_mem_table(struct vhost_dev *dev,
>       return 0;
>   }
>   
> +static bool log_enabled(uint64_t features)
> +{
> +    return !!(features & (0x1ULL << VHOST_F_LOG_ALL));
> +}
> +
>   static int vhost_user_set_vring_addr(struct vhost_dev *dev,
>                                        struct vhost_vring_addr *addr)
>   {
> @@ -1105,10 +1110,21 @@ static int vhost_user_set_vring_addr(struct vhost_dev *dev,
>           .hdr.size = sizeof(msg.payload.addr),
>       };
>   
> +    bool reply_supported = virtio_has_feature(dev->protocol_features,
> +                                              VHOST_USER_PROTOCOL_F_REPLY_ACK);
> +
> +    if (reply_supported && log_enabled(msg.hdr.flags)) {
> +        msg.hdr.flags |= VHOST_USER_NEED_REPLY_MASK;
> +    }
> +
>       if (vhost_user_write(dev, &msg, NULL, 0) < 0) {
>           return -1;
>       }
>   
> +    if (msg.hdr.flags & VHOST_USER_NEED_REPLY_MASK) {
> +        return process_message_reply(dev, &msg);
> +    }
> +
>       return 0;
>   }
>   
> @@ -1288,7 +1304,8 @@ static int vhost_user_set_vring_call(struct vhost_dev *dev,
>       return vhost_set_vring_file(dev, VHOST_USER_SET_VRING_CALL, file);
>   }
>   
> -static int vhost_user_set_u64(struct vhost_dev *dev, int request, uint64_t u64)
> +static int vhost_user_set_u64(struct vhost_dev *dev, int request, uint64_t u64,
> +                              bool need_reply)
>   {
>       VhostUserMsg msg = {
>           .hdr.request = request,
> @@ -1297,23 +1314,37 @@ static int vhost_user_set_u64(struct vhost_dev *dev, int request, uint64_t u64)
>           .hdr.size = sizeof(msg.payload.u64),
>       };
>   
> +    if (need_reply) {
> +        bool reply_supported = virtio_has_feature(dev->protocol_features,
> +                                          VHOST_USER_PROTOCOL_F_REPLY_ACK);
> +        if (reply_supported) {
> +            msg.hdr.flags |= VHOST_USER_NEED_REPLY_MASK;
> +        }
> +    }
> +
>       if (vhost_user_write(dev, &msg, NULL, 0) < 0) {
>           return -1;
>       }
>   
> +    if (msg.hdr.flags & VHOST_USER_NEED_REPLY_MASK) {
> +        return process_message_reply(dev, &msg);
> +    }
> +
>       return 0;
>   }
>   
>   static int vhost_user_set_features(struct vhost_dev *dev,
>                                      uint64_t features)
>   {
> -    return vhost_user_set_u64(dev, VHOST_USER_SET_FEATURES, features);
> +    return vhost_user_set_u64(dev, VHOST_USER_SET_FEATURES, features,
> +                              log_enabled(features));
>   }
>   
>   static int vhost_user_set_protocol_features(struct vhost_dev *dev,
>                                               uint64_t features)
>   {
> -    return vhost_user_set_u64(dev, VHOST_USER_SET_PROTOCOL_FEATURES, features);
> +    return vhost_user_set_u64(dev, VHOST_USER_SET_PROTOCOL_FEATURES, features,
> +                              false);
>   }
>   
>   static int vhost_user_get_u64(struct vhost_dev *dev, int request, uint64_t *u64)

[-- Attachment #2: Type: text/html, Size: 5375 bytes --]

^ permalink raw reply	[flat|nested] 7+ messages in thread

* [PING][PING][PATCH v2] vhost: make SET_VRING_ADDR, SET_FEATURES send replies
  2021-07-23  9:59 ` [PING][PATCH " Denis Plotnikov
@ 2021-07-29 10:56   ` Denis Plotnikov
  2021-07-29 12:53     ` Philippe Mathieu-Daudé
  0 siblings, 1 reply; 7+ messages in thread
From: Denis Plotnikov @ 2021-07-29 10:56 UTC (permalink / raw)
  To: qemu-devel; +Cc: yc-core, mst

[-- Attachment #1: Type: text/plain, Size: 5283 bytes --]


On 23.07.2021 12:59, Denis Plotnikov wrote:
>
> ping!
>
> On 19.07.2021 17:21, Denis Plotnikov wrote:
>> On vhost-user-blk migration, qemu normally sends a number of commands
>> to enable logging if VHOST_USER_PROTOCOL_F_LOG_SHMFD is negotiated.
>> Qemu sends VHOST_USER_SET_FEATURES to enable buffers logging and
>> VHOST_USER_SET_VRING_ADDR per each started ring to enable "used ring"
>> data logging.
>> The issue is that qemu doesn't wait for reply from the vhost daemon
>> for these commands which may result in races between qemu expectation
>> of logging starting and actual login starting in vhost daemon.
>>
>> The race can appear as follows: on migration setup, qemu enables dirty page
>> logging by sending VHOST_USER_SET_FEATURES. The command doesn't arrive to a
>> vhost-user-blk daemon immediately and the daemon needs some time to turn the
>> logging on internally. If qemu doesn't wait for reply, after sending the
>> command, qemu may start migrate memory pages to a destination. At this time,
>> the logging may not be actually turned on in the daemon but some guest pages,
>> which the daemon is about to write to, may have already been transferred
>> without logging to the destination. Since the logging wasn't turned on,
>> those pages won't be transferred again as dirty. So we may end up with
>> corrupted data on the destination.
>> The same scenario is applicable for "used ring" data logging, which is
>> turned on with VHOST_USER_SET_VRING_ADDR command.
>>
>> To resolve this issue, this patch makes qemu wait for the commands result
>> explicilty if VHOST_USER_PROTOCOL_F_REPLY_ACK is negotiated and
>> logging is enabled.
>>
>> Signed-off-by: Denis Plotnikov<den-plotnikov@yandex-team.ru>
>> ---
>> v1 -> v2:
>>    * send reply only when logging is enabled [mst]
>>
>> v0 -> v1:
>>    * send reply for SET_VRING_ADDR, SET_FEATURES only [mst]
>>    
>>   hw/virtio/vhost-user.c | 37 ++++++++++++++++++++++++++++++++++---
>>   1 file changed, 34 insertions(+), 3 deletions(-)
>>
>> diff --git a/hw/virtio/vhost-user.c b/hw/virtio/vhost-user.c
>> index ee57abe04526..133588b3961e 100644
>> --- a/hw/virtio/vhost-user.c
>> +++ b/hw/virtio/vhost-user.c
>> @@ -1095,6 +1095,11 @@ static int vhost_user_set_mem_table(struct vhost_dev *dev,
>>       return 0;
>>   }
>>   
>> +static bool log_enabled(uint64_t features)
>> +{
>> +    return !!(features & (0x1ULL << VHOST_F_LOG_ALL));
>> +}
>> +
>>   static int vhost_user_set_vring_addr(struct vhost_dev *dev,
>>                                        struct vhost_vring_addr *addr)
>>   {
>> @@ -1105,10 +1110,21 @@ static int vhost_user_set_vring_addr(struct vhost_dev *dev,
>>           .hdr.size = sizeof(msg.payload.addr),
>>       };
>>   
>> +    bool reply_supported = virtio_has_feature(dev->protocol_features,
>> +                                              VHOST_USER_PROTOCOL_F_REPLY_ACK);
>> +
>> +    if (reply_supported && log_enabled(msg.hdr.flags)) {
>> +        msg.hdr.flags |= VHOST_USER_NEED_REPLY_MASK;
>> +    }
>> +
>>       if (vhost_user_write(dev, &msg, NULL, 0) < 0) {
>>           return -1;
>>       }
>>   
>> +    if (msg.hdr.flags & VHOST_USER_NEED_REPLY_MASK) {
>> +        return process_message_reply(dev, &msg);
>> +    }
>> +
>>       return 0;
>>   }
>>   
>> @@ -1288,7 +1304,8 @@ static int vhost_user_set_vring_call(struct vhost_dev *dev,
>>       return vhost_set_vring_file(dev, VHOST_USER_SET_VRING_CALL, file);
>>   }
>>   
>> -static int vhost_user_set_u64(struct vhost_dev *dev, int request, uint64_t u64)
>> +static int vhost_user_set_u64(struct vhost_dev *dev, int request, uint64_t u64,
>> +                              bool need_reply)
>>   {
>>       VhostUserMsg msg = {
>>           .hdr.request = request,
>> @@ -1297,23 +1314,37 @@ static int vhost_user_set_u64(struct vhost_dev *dev, int request, uint64_t u64)
>>           .hdr.size = sizeof(msg.payload.u64),
>>       };
>>   
>> +    if (need_reply) {
>> +        bool reply_supported = virtio_has_feature(dev->protocol_features,
>> +                                          VHOST_USER_PROTOCOL_F_REPLY_ACK);
>> +        if (reply_supported) {
>> +            msg.hdr.flags |= VHOST_USER_NEED_REPLY_MASK;
>> +        }
>> +    }
>> +
>>       if (vhost_user_write(dev, &msg, NULL, 0) < 0) {
>>           return -1;
>>       }
>>   
>> +    if (msg.hdr.flags & VHOST_USER_NEED_REPLY_MASK) {
>> +        return process_message_reply(dev, &msg);
>> +    }
>> +
>>       return 0;
>>   }
>>   
>>   static int vhost_user_set_features(struct vhost_dev *dev,
>>                                      uint64_t features)
>>   {
>> -    return vhost_user_set_u64(dev, VHOST_USER_SET_FEATURES, features);
>> +    return vhost_user_set_u64(dev, VHOST_USER_SET_FEATURES, features,
>> +                              log_enabled(features));
>>   }
>>   
>>   static int vhost_user_set_protocol_features(struct vhost_dev *dev,
>>                                               uint64_t features)
>>   {
>> -    return vhost_user_set_u64(dev, VHOST_USER_SET_PROTOCOL_FEATURES, features);
>> +    return vhost_user_set_u64(dev, VHOST_USER_SET_PROTOCOL_FEATURES, features,
>> +                              false);
>>   }
>>   
>>   static int vhost_user_get_u64(struct vhost_dev *dev, int request, uint64_t *u64)

[-- Attachment #2: Type: text/html, Size: 5725 bytes --]

^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [PING][PING][PATCH v2] vhost: make SET_VRING_ADDR, SET_FEATURES send replies
  2021-07-29 10:56   ` [PING][PING][PATCH " Denis Plotnikov
@ 2021-07-29 12:53     ` Philippe Mathieu-Daudé
  2021-07-29 16:13       ` Stefan Hajnoczi
  0 siblings, 1 reply; 7+ messages in thread
From: Philippe Mathieu-Daudé @ 2021-07-29 12:53 UTC (permalink / raw)
  To: Denis Plotnikov, qemu-devel
  Cc: Kevin Wolf, yc-core, mst, Raphael Norwitz, Stefan Hajnoczi,
	Stefano Garzarella

Cc more ppl.

On 7/29/21 12:56 PM, Denis Plotnikov wrote:
> 
> On 23.07.2021 12:59, Denis Plotnikov wrote:
>>
>> ping!
>>
>> On 19.07.2021 17:21, Denis Plotnikov wrote:
>>> On vhost-user-blk migration, qemu normally sends a number of commands
>>> to enable logging if VHOST_USER_PROTOCOL_F_LOG_SHMFD is negotiated.
>>> Qemu sends VHOST_USER_SET_FEATURES to enable buffers logging and
>>> VHOST_USER_SET_VRING_ADDR per each started ring to enable "used ring"
>>> data logging.
>>> The issue is that qemu doesn't wait for reply from the vhost daemon
>>> for these commands which may result in races between qemu expectation
>>> of logging starting and actual login starting in vhost daemon.
>>>
>>> The race can appear as follows: on migration setup, qemu enables dirty page
>>> logging by sending VHOST_USER_SET_FEATURES. The command doesn't arrive to a
>>> vhost-user-blk daemon immediately and the daemon needs some time to turn the
>>> logging on internally. If qemu doesn't wait for reply, after sending the
>>> command, qemu may start migrate memory pages to a destination. At this time,
>>> the logging may not be actually turned on in the daemon but some guest pages,
>>> which the daemon is about to write to, may have already been transferred
>>> without logging to the destination. Since the logging wasn't turned on,
>>> those pages won't be transferred again as dirty. So we may end up with
>>> corrupted data on the destination.
>>> The same scenario is applicable for "used ring" data logging, which is
>>> turned on with VHOST_USER_SET_VRING_ADDR command.
>>>
>>> To resolve this issue, this patch makes qemu wait for the commands result
>>> explicilty if VHOST_USER_PROTOCOL_F_REPLY_ACK is negotiated and
>>> logging is enabled.
>>>
>>> Signed-off-by: Denis Plotnikov <den-plotnikov@yandex-team.ru>
>>> ---
>>> v1 -> v2:
>>>   * send reply only when logging is enabled [mst]
>>>
>>> v0 -> v1:
>>>   * send reply for SET_VRING_ADDR, SET_FEATURES only [mst]
>>>   
>>>  hw/virtio/vhost-user.c | 37 ++++++++++++++++++++++++++++++++++---
>>>  1 file changed, 34 insertions(+), 3 deletions(-)
>>>
>>> diff --git a/hw/virtio/vhost-user.c b/hw/virtio/vhost-user.c
>>> index ee57abe04526..133588b3961e 100644
>>> --- a/hw/virtio/vhost-user.c
>>> +++ b/hw/virtio/vhost-user.c
>>> @@ -1095,6 +1095,11 @@ static int vhost_user_set_mem_table(struct vhost_dev *dev,
>>>      return 0;
>>>  }
>>>  
>>> +static bool log_enabled(uint64_t features)
>>> +{
>>> +    return !!(features & (0x1ULL << VHOST_F_LOG_ALL));
>>> +}
>>> +
>>>  static int vhost_user_set_vring_addr(struct vhost_dev *dev,
>>>                                       struct vhost_vring_addr *addr)
>>>  {
>>> @@ -1105,10 +1110,21 @@ static int vhost_user_set_vring_addr(struct vhost_dev *dev,
>>>          .hdr.size = sizeof(msg.payload.addr),
>>>      };
>>>  
>>> +    bool reply_supported = virtio_has_feature(dev->protocol_features,
>>> +                                              VHOST_USER_PROTOCOL_F_REPLY_ACK);
>>> +
>>> +    if (reply_supported && log_enabled(msg.hdr.flags)) {
>>> +        msg.hdr.flags |= VHOST_USER_NEED_REPLY_MASK;
>>> +    }
>>> +
>>>      if (vhost_user_write(dev, &msg, NULL, 0) < 0) {
>>>          return -1;
>>>      }
>>>  
>>> +    if (msg.hdr.flags & VHOST_USER_NEED_REPLY_MASK) {
>>> +        return process_message_reply(dev, &msg);
>>> +    }
>>> +
>>>      return 0;
>>>  }
>>>  
>>> @@ -1288,7 +1304,8 @@ static int vhost_user_set_vring_call(struct vhost_dev *dev,
>>>      return vhost_set_vring_file(dev, VHOST_USER_SET_VRING_CALL, file);
>>>  }
>>>  
>>> -static int vhost_user_set_u64(struct vhost_dev *dev, int request, uint64_t u64)
>>> +static int vhost_user_set_u64(struct vhost_dev *dev, int request, uint64_t u64,
>>> +                              bool need_reply)
>>>  {
>>>      VhostUserMsg msg = {
>>>          .hdr.request = request,
>>> @@ -1297,23 +1314,37 @@ static int vhost_user_set_u64(struct vhost_dev *dev, int request, uint64_t u64)
>>>          .hdr.size = sizeof(msg.payload.u64),
>>>      };
>>>  
>>> +    if (need_reply) {
>>> +        bool reply_supported = virtio_has_feature(dev->protocol_features,
>>> +                                          VHOST_USER_PROTOCOL_F_REPLY_ACK);
>>> +        if (reply_supported) {
>>> +            msg.hdr.flags |= VHOST_USER_NEED_REPLY_MASK;
>>> +        }
>>> +    }
>>> +
>>>      if (vhost_user_write(dev, &msg, NULL, 0) < 0) {
>>>          return -1;
>>>      }
>>>  
>>> +    if (msg.hdr.flags & VHOST_USER_NEED_REPLY_MASK) {
>>> +        return process_message_reply(dev, &msg);
>>> +    }
>>> +
>>>      return 0;
>>>  }
>>>  
>>>  static int vhost_user_set_features(struct vhost_dev *dev,
>>>                                     uint64_t features)
>>>  {
>>> -    return vhost_user_set_u64(dev, VHOST_USER_SET_FEATURES, features);
>>> +    return vhost_user_set_u64(dev, VHOST_USER_SET_FEATURES, features,
>>> +                              log_enabled(features));
>>>  }
>>>  
>>>  static int vhost_user_set_protocol_features(struct vhost_dev *dev,
>>>                                              uint64_t features)
>>>  {
>>> -    return vhost_user_set_u64(dev, VHOST_USER_SET_PROTOCOL_FEATURES, features);
>>> +    return vhost_user_set_u64(dev, VHOST_USER_SET_PROTOCOL_FEATURES, features,
>>> +                              false);
>>>  }
>>>  
>>>  static int vhost_user_get_u64(struct vhost_dev *dev, int request, uint64_t *u64)



^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [PING][PING][PATCH v2] vhost: make SET_VRING_ADDR, SET_FEATURES send replies
  2021-07-29 12:53     ` Philippe Mathieu-Daudé
@ 2021-07-29 16:13       ` Stefan Hajnoczi
  0 siblings, 0 replies; 7+ messages in thread
From: Stefan Hajnoczi @ 2021-07-29 16:13 UTC (permalink / raw)
  To: Philippe Mathieu-Daudé
  Cc: Kevin Wolf, mst, qemu-devel, Raphael Norwitz, Denis Plotnikov,
	yc-core, Stefano Garzarella

[-- Attachment #1: Type: text/plain, Size: 5970 bytes --]

On Thu, Jul 29, 2021 at 02:53:53PM +0200, Philippe Mathieu-Daudé wrote:
> Cc more ppl.

This needs to go through Michael Tsirkin's tree.

Stefan

> 
> On 7/29/21 12:56 PM, Denis Plotnikov wrote:
> > 
> > On 23.07.2021 12:59, Denis Plotnikov wrote:
> >>
> >> ping!
> >>
> >> On 19.07.2021 17:21, Denis Plotnikov wrote:
> >>> On vhost-user-blk migration, qemu normally sends a number of commands
> >>> to enable logging if VHOST_USER_PROTOCOL_F_LOG_SHMFD is negotiated.
> >>> Qemu sends VHOST_USER_SET_FEATURES to enable buffers logging and
> >>> VHOST_USER_SET_VRING_ADDR per each started ring to enable "used ring"
> >>> data logging.
> >>> The issue is that qemu doesn't wait for reply from the vhost daemon
> >>> for these commands which may result in races between qemu expectation
> >>> of logging starting and actual login starting in vhost daemon.
> >>>
> >>> The race can appear as follows: on migration setup, qemu enables dirty page
> >>> logging by sending VHOST_USER_SET_FEATURES. The command doesn't arrive to a
> >>> vhost-user-blk daemon immediately and the daemon needs some time to turn the
> >>> logging on internally. If qemu doesn't wait for reply, after sending the
> >>> command, qemu may start migrate memory pages to a destination. At this time,
> >>> the logging may not be actually turned on in the daemon but some guest pages,
> >>> which the daemon is about to write to, may have already been transferred
> >>> without logging to the destination. Since the logging wasn't turned on,
> >>> those pages won't be transferred again as dirty. So we may end up with
> >>> corrupted data on the destination.
> >>> The same scenario is applicable for "used ring" data logging, which is
> >>> turned on with VHOST_USER_SET_VRING_ADDR command.
> >>>
> >>> To resolve this issue, this patch makes qemu wait for the commands result
> >>> explicilty if VHOST_USER_PROTOCOL_F_REPLY_ACK is negotiated and
> >>> logging is enabled.
> >>>
> >>> Signed-off-by: Denis Plotnikov <den-plotnikov@yandex-team.ru>
> >>> ---
> >>> v1 -> v2:
> >>>   * send reply only when logging is enabled [mst]
> >>>
> >>> v0 -> v1:
> >>>   * send reply for SET_VRING_ADDR, SET_FEATURES only [mst]
> >>>   
> >>>  hw/virtio/vhost-user.c | 37 ++++++++++++++++++++++++++++++++++---
> >>>  1 file changed, 34 insertions(+), 3 deletions(-)
> >>>
> >>> diff --git a/hw/virtio/vhost-user.c b/hw/virtio/vhost-user.c
> >>> index ee57abe04526..133588b3961e 100644
> >>> --- a/hw/virtio/vhost-user.c
> >>> +++ b/hw/virtio/vhost-user.c
> >>> @@ -1095,6 +1095,11 @@ static int vhost_user_set_mem_table(struct vhost_dev *dev,
> >>>      return 0;
> >>>  }
> >>>  
> >>> +static bool log_enabled(uint64_t features)
> >>> +{
> >>> +    return !!(features & (0x1ULL << VHOST_F_LOG_ALL));
> >>> +}
> >>> +
> >>>  static int vhost_user_set_vring_addr(struct vhost_dev *dev,
> >>>                                       struct vhost_vring_addr *addr)
> >>>  {
> >>> @@ -1105,10 +1110,21 @@ static int vhost_user_set_vring_addr(struct vhost_dev *dev,
> >>>          .hdr.size = sizeof(msg.payload.addr),
> >>>      };
> >>>  
> >>> +    bool reply_supported = virtio_has_feature(dev->protocol_features,
> >>> +                                              VHOST_USER_PROTOCOL_F_REPLY_ACK);
> >>> +
> >>> +    if (reply_supported && log_enabled(msg.hdr.flags)) {
> >>> +        msg.hdr.flags |= VHOST_USER_NEED_REPLY_MASK;
> >>> +    }
> >>> +
> >>>      if (vhost_user_write(dev, &msg, NULL, 0) < 0) {
> >>>          return -1;
> >>>      }
> >>>  
> >>> +    if (msg.hdr.flags & VHOST_USER_NEED_REPLY_MASK) {
> >>> +        return process_message_reply(dev, &msg);
> >>> +    }
> >>> +
> >>>      return 0;
> >>>  }
> >>>  
> >>> @@ -1288,7 +1304,8 @@ static int vhost_user_set_vring_call(struct vhost_dev *dev,
> >>>      return vhost_set_vring_file(dev, VHOST_USER_SET_VRING_CALL, file);
> >>>  }
> >>>  
> >>> -static int vhost_user_set_u64(struct vhost_dev *dev, int request, uint64_t u64)
> >>> +static int vhost_user_set_u64(struct vhost_dev *dev, int request, uint64_t u64,
> >>> +                              bool need_reply)
> >>>  {
> >>>      VhostUserMsg msg = {
> >>>          .hdr.request = request,
> >>> @@ -1297,23 +1314,37 @@ static int vhost_user_set_u64(struct vhost_dev *dev, int request, uint64_t u64)
> >>>          .hdr.size = sizeof(msg.payload.u64),
> >>>      };
> >>>  
> >>> +    if (need_reply) {
> >>> +        bool reply_supported = virtio_has_feature(dev->protocol_features,
> >>> +                                          VHOST_USER_PROTOCOL_F_REPLY_ACK);
> >>> +        if (reply_supported) {
> >>> +            msg.hdr.flags |= VHOST_USER_NEED_REPLY_MASK;
> >>> +        }
> >>> +    }
> >>> +
> >>>      if (vhost_user_write(dev, &msg, NULL, 0) < 0) {
> >>>          return -1;
> >>>      }
> >>>  
> >>> +    if (msg.hdr.flags & VHOST_USER_NEED_REPLY_MASK) {
> >>> +        return process_message_reply(dev, &msg);
> >>> +    }
> >>> +
> >>>      return 0;
> >>>  }
> >>>  
> >>>  static int vhost_user_set_features(struct vhost_dev *dev,
> >>>                                     uint64_t features)
> >>>  {
> >>> -    return vhost_user_set_u64(dev, VHOST_USER_SET_FEATURES, features);
> >>> +    return vhost_user_set_u64(dev, VHOST_USER_SET_FEATURES, features,
> >>> +                              log_enabled(features));
> >>>  }
> >>>  
> >>>  static int vhost_user_set_protocol_features(struct vhost_dev *dev,
> >>>                                              uint64_t features)
> >>>  {
> >>> -    return vhost_user_set_u64(dev, VHOST_USER_SET_PROTOCOL_FEATURES, features);
> >>> +    return vhost_user_set_u64(dev, VHOST_USER_SET_PROTOCOL_FEATURES, features,
> >>> +                              false);
> >>>  }
> >>>  
> >>>  static int vhost_user_get_u64(struct vhost_dev *dev, int request, uint64_t *u64)
> 

[-- Attachment #2: signature.asc --]
[-- Type: application/pgp-signature, Size: 488 bytes --]

^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [PATCH v2] vhost: make SET_VRING_ADDR, SET_FEATURES send replies
  2021-07-19 14:21 [PATCH v2] vhost: make SET_VRING_ADDR, SET_FEATURES send replies Denis Plotnikov
  2021-07-23  9:59 ` [PING][PATCH " Denis Plotnikov
@ 2021-08-03 15:05 ` Michael S. Tsirkin
  2021-08-09  9:05   ` Denis Plotnikov
  1 sibling, 1 reply; 7+ messages in thread
From: Michael S. Tsirkin @ 2021-08-03 15:05 UTC (permalink / raw)
  To: Denis Plotnikov; +Cc: qemu-devel, yc-core

On Mon, Jul 19, 2021 at 05:21:38PM +0300, Denis Plotnikov wrote:
> On vhost-user-blk migration, qemu normally sends a number of commands
> to enable logging if VHOST_USER_PROTOCOL_F_LOG_SHMFD is negotiated.
> Qemu sends VHOST_USER_SET_FEATURES to enable buffers logging and
> VHOST_USER_SET_VRING_ADDR per each started ring to enable "used ring"
> data logging.
> The issue is that qemu doesn't wait for reply from the vhost daemon
> for these commands which may result in races between qemu expectation
> of logging starting and actual login starting in vhost daemon.
> 
> The race can appear as follows: on migration setup, qemu enables dirty page
> logging by sending VHOST_USER_SET_FEATURES. The command doesn't arrive to a
> vhost-user-blk daemon immediately and the daemon needs some time to turn the
> logging on internally. If qemu doesn't wait for reply, after sending the
> command, qemu may start migrate memory pages to a destination. At this time,
> the logging may not be actually turned on in the daemon but some guest pages,
> which the daemon is about to write to, may have already been transferred
> without logging to the destination. Since the logging wasn't turned on,
> those pages won't be transferred again as dirty. So we may end up with
> corrupted data on the destination.
> The same scenario is applicable for "used ring" data logging, which is
> turned on with VHOST_USER_SET_VRING_ADDR command.
> 
> To resolve this issue, this patch makes qemu wait for the commands result
> explicilty if VHOST_USER_PROTOCOL_F_REPLY_ACK is negotiated and
> logging is enabled.
> 
> Signed-off-by: Denis Plotnikov <den-plotnikov@yandex-team.ru>
> ---
> v1 -> v2:
>   * send reply only when logging is enabled [mst]
> 
> v0 -> v1:
>   * send reply for SET_VRING_ADDR, SET_FEATURES only [mst]
>   
>  hw/virtio/vhost-user.c | 37 ++++++++++++++++++++++++++++++++++---
>  1 file changed, 34 insertions(+), 3 deletions(-)
> 
> diff --git a/hw/virtio/vhost-user.c b/hw/virtio/vhost-user.c
> index ee57abe04526..133588b3961e 100644
> --- a/hw/virtio/vhost-user.c
> +++ b/hw/virtio/vhost-user.c
> @@ -1095,6 +1095,11 @@ static int vhost_user_set_mem_table(struct vhost_dev *dev,
>      return 0;
>  }
>  
> +static bool log_enabled(uint64_t features)
> +{
> +    return !!(features & (0x1ULL << VHOST_F_LOG_ALL));
> +}
> +
>  static int vhost_user_set_vring_addr(struct vhost_dev *dev,
>                                       struct vhost_vring_addr *addr)
>  {
> @@ -1105,10 +1110,21 @@ static int vhost_user_set_vring_addr(struct vhost_dev *dev,
>          .hdr.size = sizeof(msg.payload.addr),
>      };
>  
> +    bool reply_supported = virtio_has_feature(dev->protocol_features,
> +                                              VHOST_USER_PROTOCOL_F_REPLY_ACK);
> +
> +    if (reply_supported && log_enabled(msg.hdr.flags)) {
> +        msg.hdr.flags |= VHOST_USER_NEED_REPLY_MASK;
> +    }
> +
>      if (vhost_user_write(dev, &msg, NULL, 0) < 0) {
>          return -1;
>      }
>  
> +    if (msg.hdr.flags & VHOST_USER_NEED_REPLY_MASK) {
> +        return process_message_reply(dev, &msg);
> +    }
> +
>      return 0;
>  }
>

OK this is good, but the problem is that we then still have a race
if VHOST_USER_PROTOCOL_F_REPLY_ACK is not set. Bummer.

Let's send VHOST_USER_GET_FEATURES in this case to flush out outstanding
messages?

  
> @@ -1288,7 +1304,8 @@ static int vhost_user_set_vring_call(struct vhost_dev *dev,
>      return vhost_set_vring_file(dev, VHOST_USER_SET_VRING_CALL, file);
>  }
>  
> -static int vhost_user_set_u64(struct vhost_dev *dev, int request, uint64_t u64)
> +static int vhost_user_set_u64(struct vhost_dev *dev, int request, uint64_t u64,
> +                              bool need_reply)
>  {
>      VhostUserMsg msg = {
>          .hdr.request = request,
> @@ -1297,23 +1314,37 @@ static int vhost_user_set_u64(struct vhost_dev *dev, int request, uint64_t u64)
>          .hdr.size = sizeof(msg.payload.u64),
>      };
>  
> +    if (need_reply) {
> +        bool reply_supported = virtio_has_feature(dev->protocol_features,
> +                                          VHOST_USER_PROTOCOL_F_REPLY_ACK);
> +        if (reply_supported) {
> +            msg.hdr.flags |= VHOST_USER_NEED_REPLY_MASK;
> +        }
> +    }
> +
>      if (vhost_user_write(dev, &msg, NULL, 0) < 0) {
>          return -1;
>      }
>  
> +    if (msg.hdr.flags & VHOST_USER_NEED_REPLY_MASK) {
> +        return process_message_reply(dev, &msg);
> +    }
> +
>      return 0;
>  }
>  
>  static int vhost_user_set_features(struct vhost_dev *dev,
>                                     uint64_t features)
>  {
> -    return vhost_user_set_u64(dev, VHOST_USER_SET_FEATURES, features);
> +    return vhost_user_set_u64(dev, VHOST_USER_SET_FEATURES, features,
> +                              log_enabled(features));
>  }
>  
>  static int vhost_user_set_protocol_features(struct vhost_dev *dev,
>                                              uint64_t features)
>  {
> -    return vhost_user_set_u64(dev, VHOST_USER_SET_PROTOCOL_FEATURES, features);
> +    return vhost_user_set_u64(dev, VHOST_USER_SET_PROTOCOL_FEATURES, features,
> +                              false);
>  }
>  
>  static int vhost_user_get_u64(struct vhost_dev *dev, int request, uint64_t *u64)
> -- 
> 2.25.1



^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [PATCH v2] vhost: make SET_VRING_ADDR, SET_FEATURES send replies
  2021-08-03 15:05 ` [PATCH " Michael S. Tsirkin
@ 2021-08-09  9:05   ` Denis Plotnikov
  0 siblings, 0 replies; 7+ messages in thread
From: Denis Plotnikov @ 2021-08-09  9:05 UTC (permalink / raw)
  To: Michael S. Tsirkin; +Cc: qemu-devel, yc-core


On 03.08.2021 18:05, Michael S. Tsirkin wrote:
> On Mon, Jul 19, 2021 at 05:21:38PM +0300, Denis Plotnikov wrote:
>> On vhost-user-blk migration, qemu normally sends a number of commands
>> to enable logging if VHOST_USER_PROTOCOL_F_LOG_SHMFD is negotiated.
>> Qemu sends VHOST_USER_SET_FEATURES to enable buffers logging and
>> VHOST_USER_SET_VRING_ADDR per each started ring to enable "used ring"
>> data logging.
>> The issue is that qemu doesn't wait for reply from the vhost daemon
>> for these commands which may result in races between qemu expectation
>> of logging starting and actual login starting in vhost daemon.
>>
>> The race can appear as follows: on migration setup, qemu enables dirty page
>> logging by sending VHOST_USER_SET_FEATURES. The command doesn't arrive to a
>> vhost-user-blk daemon immediately and the daemon needs some time to turn the
>> logging on internally. If qemu doesn't wait for reply, after sending the
>> command, qemu may start migrate memory pages to a destination. At this time,
>> the logging may not be actually turned on in the daemon but some guest pages,
>> which the daemon is about to write to, may have already been transferred
>> without logging to the destination. Since the logging wasn't turned on,
>> those pages won't be transferred again as dirty. So we may end up with
>> corrupted data on the destination.
>> The same scenario is applicable for "used ring" data logging, which is
>> turned on with VHOST_USER_SET_VRING_ADDR command.
>>
>> To resolve this issue, this patch makes qemu wait for the commands result
>> explicilty if VHOST_USER_PROTOCOL_F_REPLY_ACK is negotiated and
>> logging is enabled.
>>
>> Signed-off-by: Denis Plotnikov <den-plotnikov@yandex-team.ru>
>> ---
>> v1 -> v2:
>>    * send reply only when logging is enabled [mst]
>>
>> v0 -> v1:
>>    * send reply for SET_VRING_ADDR, SET_FEATURES only [mst]
>>    
>>   hw/virtio/vhost-user.c | 37 ++++++++++++++++++++++++++++++++++---
>>   1 file changed, 34 insertions(+), 3 deletions(-)
>>
>> diff --git a/hw/virtio/vhost-user.c b/hw/virtio/vhost-user.c
>> index ee57abe04526..133588b3961e 100644
>> --- a/hw/virtio/vhost-user.c
>> +++ b/hw/virtio/vhost-user.c
>> @@ -1095,6 +1095,11 @@ static int vhost_user_set_mem_table(struct vhost_dev *dev,
>>       return 0;
>>   }
>>   
>> +static bool log_enabled(uint64_t features)
>> +{
>> +    return !!(features & (0x1ULL << VHOST_F_LOG_ALL));
>> +}
>> +
>>   static int vhost_user_set_vring_addr(struct vhost_dev *dev,
>>                                        struct vhost_vring_addr *addr)
>>   {
>> @@ -1105,10 +1110,21 @@ static int vhost_user_set_vring_addr(struct vhost_dev *dev,
>>           .hdr.size = sizeof(msg.payload.addr),
>>       };
>>   
>> +    bool reply_supported = virtio_has_feature(dev->protocol_features,
>> +                                              VHOST_USER_PROTOCOL_F_REPLY_ACK);
>> +
>> +    if (reply_supported && log_enabled(msg.hdr.flags)) {
>> +        msg.hdr.flags |= VHOST_USER_NEED_REPLY_MASK;
>> +    }
>> +
>>       if (vhost_user_write(dev, &msg, NULL, 0) < 0) {
>>           return -1;
>>       }
>>   
>> +    if (msg.hdr.flags & VHOST_USER_NEED_REPLY_MASK) {
>> +        return process_message_reply(dev, &msg);
>> +    }
>> +
>>       return 0;
>>   }
>>
> OK this is good, but the problem is that we then still have a race
> if VHOST_USER_PROTOCOL_F_REPLY_ACK is not set. Bummer.
>
> Let's send VHOST_USER_GET_FEATURES in this case to flush out outstanding
> messages?
Ok, I've already sent v3 with related changes.
>    
>> @@ -1288,7 +1304,8 @@ static int vhost_user_set_vring_call(struct vhost_dev *dev,
>>       return vhost_set_vring_file(dev, VHOST_USER_SET_VRING_CALL, file);
>>   }
>>   
>> -static int vhost_user_set_u64(struct vhost_dev *dev, int request, uint64_t u64)
>> +static int vhost_user_set_u64(struct vhost_dev *dev, int request, uint64_t u64,
>> +                              bool need_reply)
>>   {
>>       VhostUserMsg msg = {
>>           .hdr.request = request,
>> @@ -1297,23 +1314,37 @@ static int vhost_user_set_u64(struct vhost_dev *dev, int request, uint64_t u64)
>>           .hdr.size = sizeof(msg.payload.u64),
>>       };
>>   
>> +    if (need_reply) {
>> +        bool reply_supported = virtio_has_feature(dev->protocol_features,
>> +                                          VHOST_USER_PROTOCOL_F_REPLY_ACK);
>> +        if (reply_supported) {
>> +            msg.hdr.flags |= VHOST_USER_NEED_REPLY_MASK;
>> +        }
>> +    }
>> +
>>       if (vhost_user_write(dev, &msg, NULL, 0) < 0) {
>>           return -1;
>>       }
>>   
>> +    if (msg.hdr.flags & VHOST_USER_NEED_REPLY_MASK) {
>> +        return process_message_reply(dev, &msg);
>> +    }
>> +
>>       return 0;
>>   }
>>   
>>   static int vhost_user_set_features(struct vhost_dev *dev,
>>                                      uint64_t features)
>>   {
>> -    return vhost_user_set_u64(dev, VHOST_USER_SET_FEATURES, features);
>> +    return vhost_user_set_u64(dev, VHOST_USER_SET_FEATURES, features,
>> +                              log_enabled(features));
>>   }
>>   
>>   static int vhost_user_set_protocol_features(struct vhost_dev *dev,
>>                                               uint64_t features)
>>   {
>> -    return vhost_user_set_u64(dev, VHOST_USER_SET_PROTOCOL_FEATURES, features);
>> +    return vhost_user_set_u64(dev, VHOST_USER_SET_PROTOCOL_FEATURES, features,
>> +                              false);
>>   }
>>   
>>   static int vhost_user_get_u64(struct vhost_dev *dev, int request, uint64_t *u64)
>> -- 
>> 2.25.1


^ permalink raw reply	[flat|nested] 7+ messages in thread

end of thread, other threads:[~2021-08-09  9:09 UTC | newest]

Thread overview: 7+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2021-07-19 14:21 [PATCH v2] vhost: make SET_VRING_ADDR, SET_FEATURES send replies Denis Plotnikov
2021-07-23  9:59 ` [PING][PATCH " Denis Plotnikov
2021-07-29 10:56   ` [PING][PING][PATCH " Denis Plotnikov
2021-07-29 12:53     ` Philippe Mathieu-Daudé
2021-07-29 16:13       ` Stefan Hajnoczi
2021-08-03 15:05 ` [PATCH " Michael S. Tsirkin
2021-08-09  9:05   ` Denis Plotnikov

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.