linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH] vhost: add support for mandatory barriers
@ 2021-08-23  8:14 Vincent Whitchurch
  2021-08-23 21:19 ` Michael S. Tsirkin
  0 siblings, 1 reply; 4+ messages in thread
From: Vincent Whitchurch @ 2021-08-23  8:14 UTC (permalink / raw)
  To: Michael S. Tsirkin, Jason Wang
  Cc: kernel, Vincent Whitchurch, kvm, virtualization, netdev, linux-kernel

vhost always uses SMP-conditional barriers, but these may not be
sufficient when vhost is used to communicate between heterogeneous
processors in an AMP configuration, especially since they're NOPs on
!SMP builds.

To solve this, use the virtio_*() barrier functions and ask them for
non-weak barriers if requested by userspace.

Signed-off-by: Vincent Whitchurch <vincent.whitchurch@axis.com>
---
 drivers/vhost/vhost.c      | 23 ++++++++++++++---------
 drivers/vhost/vhost.h      |  2 ++
 include/uapi/linux/vhost.h |  2 ++
 3 files changed, 18 insertions(+), 9 deletions(-)

diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index b9e853e6094d..f7172e1bc395 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -500,6 +500,7 @@ void vhost_dev_init(struct vhost_dev *dev,
 		vq->indirect = NULL;
 		vq->heads = NULL;
 		vq->dev = dev;
+		vq->weak_barriers = true;
 		mutex_init(&vq->mutex);
 		vhost_vq_reset(dev, vq);
 		if (vq->handle_kick)
@@ -1801,6 +1802,10 @@ long vhost_dev_ioctl(struct vhost_dev *d, unsigned int ioctl, void __user *argp)
 		if (ctx)
 			eventfd_ctx_put(ctx);
 		break;
+	case VHOST_SET_STRONG_BARRIERS:
+		for (i = 0; i < d->nvqs; ++i)
+			d->vqs[i]->weak_barriers = false;
+		break;
 	default:
 		r = -ENOIOCTLCMD;
 		break;
@@ -1927,7 +1932,7 @@ int vhost_log_write(struct vhost_virtqueue *vq, struct vhost_log *log,
 	int i, r;
 
 	/* Make sure data written is seen before log. */
-	smp_wmb();
+	virtio_wmb(vq->weak_barriers);
 
 	if (vq->iotlb) {
 		for (i = 0; i < count; i++) {
@@ -1964,7 +1969,7 @@ static int vhost_update_used_flags(struct vhost_virtqueue *vq)
 		return -EFAULT;
 	if (unlikely(vq->log_used)) {
 		/* Make sure the flag is seen before log. */
-		smp_wmb();
+		virtio_wmb(vq->weak_barriers);
 		/* Log used flag write. */
 		used = &vq->used->flags;
 		log_used(vq, (used - (void __user *)vq->used),
@@ -1982,7 +1987,7 @@ static int vhost_update_avail_event(struct vhost_virtqueue *vq, u16 avail_event)
 	if (unlikely(vq->log_used)) {
 		void __user *used;
 		/* Make sure the event is seen before log. */
-		smp_wmb();
+		virtio_wmb(vq->weak_barriers);
 		/* Log avail event write */
 		used = vhost_avail_event(vq);
 		log_used(vq, (used - (void __user *)vq->used),
@@ -2228,7 +2233,7 @@ int vhost_get_vq_desc(struct vhost_virtqueue *vq,
 		/* Only get avail ring entries after they have been
 		 * exposed by guest.
 		 */
-		smp_rmb();
+		virtio_rmb(vq->weak_barriers);
 	}
 
 	/* Grab the next descriptor number they're advertising, and increment
@@ -2367,7 +2372,7 @@ static int __vhost_add_used_n(struct vhost_virtqueue *vq,
 	}
 	if (unlikely(vq->log_used)) {
 		/* Make sure data is seen before log. */
-		smp_wmb();
+		virtio_wmb(vq->weak_barriers);
 		/* Log used ring entry write. */
 		log_used(vq, ((void __user *)used - (void __user *)vq->used),
 			 count * sizeof *used);
@@ -2402,14 +2407,14 @@ int vhost_add_used_n(struct vhost_virtqueue *vq, struct vring_used_elem *heads,
 	r = __vhost_add_used_n(vq, heads, count);
 
 	/* Make sure buffer is written before we update index. */
-	smp_wmb();
+	virtio_wmb(vq->weak_barriers);
 	if (vhost_put_used_idx(vq)) {
 		vq_err(vq, "Failed to increment used idx");
 		return -EFAULT;
 	}
 	if (unlikely(vq->log_used)) {
 		/* Make sure used idx is seen before log. */
-		smp_wmb();
+		virtio_wmb(vq->weak_barriers);
 		/* Log used index update. */
 		log_used(vq, offsetof(struct vring_used, idx),
 			 sizeof vq->used->idx);
@@ -2428,7 +2433,7 @@ static bool vhost_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq)
 	/* Flush out used index updates. This is paired
 	 * with the barrier that the Guest executes when enabling
 	 * interrupts. */
-	smp_mb();
+	virtio_mb(vq->weak_barriers);
 
 	if (vhost_has_feature(vq, VIRTIO_F_NOTIFY_ON_EMPTY) &&
 	    unlikely(vq->avail_idx == vq->last_avail_idx))
@@ -2530,7 +2535,7 @@ bool vhost_enable_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq)
 	}
 	/* They could have slipped one in as we were doing that: make
 	 * sure it's written, then check again. */
-	smp_mb();
+	virtio_mb(vq->weak_barriers);
 	r = vhost_get_avail_idx(vq, &avail_idx);
 	if (r) {
 		vq_err(vq, "Failed to check avail idx at %p: %d\n",
diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h
index 638bb640d6b4..5bd20d0db457 100644
--- a/drivers/vhost/vhost.h
+++ b/drivers/vhost/vhost.h
@@ -108,6 +108,8 @@ struct vhost_virtqueue {
 	bool log_used;
 	u64 log_addr;
 
+	bool weak_barriers;
+
 	struct iovec iov[UIO_MAXIOV];
 	struct iovec iotlb_iov[64];
 	struct iovec *indirect;
diff --git a/include/uapi/linux/vhost.h b/include/uapi/linux/vhost.h
index c998860d7bbc..4b8656307f51 100644
--- a/include/uapi/linux/vhost.h
+++ b/include/uapi/linux/vhost.h
@@ -97,6 +97,8 @@
 #define VHOST_SET_BACKEND_FEATURES _IOW(VHOST_VIRTIO, 0x25, __u64)
 #define VHOST_GET_BACKEND_FEATURES _IOR(VHOST_VIRTIO, 0x26, __u64)
 
+#define VHOST_SET_STRONG_BARRIERS _IO(VHOST_VIRTIO, 0x27)
+
 /* VHOST_NET specific defines */
 
 /* Attach virtio net ring to a raw socket, or tap device.
-- 
2.28.0


^ permalink raw reply related	[flat|nested] 4+ messages in thread

* Re: [PATCH] vhost: add support for mandatory barriers
  2021-08-23  8:14 [PATCH] vhost: add support for mandatory barriers Vincent Whitchurch
@ 2021-08-23 21:19 ` Michael S. Tsirkin
  2021-08-24  2:56   ` Jason Wang
  2021-08-24  7:23   ` Vincent Whitchurch
  0 siblings, 2 replies; 4+ messages in thread
From: Michael S. Tsirkin @ 2021-08-23 21:19 UTC (permalink / raw)
  To: Vincent Whitchurch
  Cc: Jason Wang, kernel, kvm, virtualization, netdev, linux-kernel

On Mon, Aug 23, 2021 at 10:14:37AM +0200, Vincent Whitchurch wrote:
> vhost always uses SMP-conditional barriers, but these may not be
> sufficient when vhost is used to communicate between heterogeneous
> processors in an AMP configuration, especially since they're NOPs on
> !SMP builds.
> 
> To solve this, use the virtio_*() barrier functions and ask them for
> non-weak barriers if requested by userspace.
> 
> Signed-off-by: Vincent Whitchurch <vincent.whitchurch@axis.com>

I am inclined to say let's (ab)use VIRTIO_F_ORDER_PLATFORM for this.
Jason what do you think?

Also is the use of DMA variants really the intended thing here? Could
you point me at some examples please?


> ---
>  drivers/vhost/vhost.c      | 23 ++++++++++++++---------
>  drivers/vhost/vhost.h      |  2 ++
>  include/uapi/linux/vhost.h |  2 ++
>  3 files changed, 18 insertions(+), 9 deletions(-)
> 
> diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
> index b9e853e6094d..f7172e1bc395 100644
> --- a/drivers/vhost/vhost.c
> +++ b/drivers/vhost/vhost.c
> @@ -500,6 +500,7 @@ void vhost_dev_init(struct vhost_dev *dev,
>  		vq->indirect = NULL;
>  		vq->heads = NULL;
>  		vq->dev = dev;
> +		vq->weak_barriers = true;
>  		mutex_init(&vq->mutex);
>  		vhost_vq_reset(dev, vq);
>  		if (vq->handle_kick)
> @@ -1801,6 +1802,10 @@ long vhost_dev_ioctl(struct vhost_dev *d, unsigned int ioctl, void __user *argp)
>  		if (ctx)
>  			eventfd_ctx_put(ctx);
>  		break;
> +	case VHOST_SET_STRONG_BARRIERS:
> +		for (i = 0; i < d->nvqs; ++i)
> +			d->vqs[i]->weak_barriers = false;
> +		break;
>  	default:
>  		r = -ENOIOCTLCMD;
>  		break;
> @@ -1927,7 +1932,7 @@ int vhost_log_write(struct vhost_virtqueue *vq, struct vhost_log *log,
>  	int i, r;
>  
>  	/* Make sure data written is seen before log. */
> -	smp_wmb();
> +	virtio_wmb(vq->weak_barriers);
>  
>  	if (vq->iotlb) {
>  		for (i = 0; i < count; i++) {
> @@ -1964,7 +1969,7 @@ static int vhost_update_used_flags(struct vhost_virtqueue *vq)
>  		return -EFAULT;
>  	if (unlikely(vq->log_used)) {
>  		/* Make sure the flag is seen before log. */
> -		smp_wmb();
> +		virtio_wmb(vq->weak_barriers);
>  		/* Log used flag write. */
>  		used = &vq->used->flags;
>  		log_used(vq, (used - (void __user *)vq->used),
> @@ -1982,7 +1987,7 @@ static int vhost_update_avail_event(struct vhost_virtqueue *vq, u16 avail_event)
>  	if (unlikely(vq->log_used)) {
>  		void __user *used;
>  		/* Make sure the event is seen before log. */
> -		smp_wmb();
> +		virtio_wmb(vq->weak_barriers);
>  		/* Log avail event write */
>  		used = vhost_avail_event(vq);
>  		log_used(vq, (used - (void __user *)vq->used),
> @@ -2228,7 +2233,7 @@ int vhost_get_vq_desc(struct vhost_virtqueue *vq,
>  		/* Only get avail ring entries after they have been
>  		 * exposed by guest.
>  		 */
> -		smp_rmb();
> +		virtio_rmb(vq->weak_barriers);
>  	}
>  
>  	/* Grab the next descriptor number they're advertising, and increment
> @@ -2367,7 +2372,7 @@ static int __vhost_add_used_n(struct vhost_virtqueue *vq,
>  	}
>  	if (unlikely(vq->log_used)) {
>  		/* Make sure data is seen before log. */
> -		smp_wmb();
> +		virtio_wmb(vq->weak_barriers);
>  		/* Log used ring entry write. */
>  		log_used(vq, ((void __user *)used - (void __user *)vq->used),
>  			 count * sizeof *used);
> @@ -2402,14 +2407,14 @@ int vhost_add_used_n(struct vhost_virtqueue *vq, struct vring_used_elem *heads,
>  	r = __vhost_add_used_n(vq, heads, count);
>  
>  	/* Make sure buffer is written before we update index. */
> -	smp_wmb();
> +	virtio_wmb(vq->weak_barriers);
>  	if (vhost_put_used_idx(vq)) {
>  		vq_err(vq, "Failed to increment used idx");
>  		return -EFAULT;
>  	}
>  	if (unlikely(vq->log_used)) {
>  		/* Make sure used idx is seen before log. */
> -		smp_wmb();
> +		virtio_wmb(vq->weak_barriers);
>  		/* Log used index update. */
>  		log_used(vq, offsetof(struct vring_used, idx),
>  			 sizeof vq->used->idx);
> @@ -2428,7 +2433,7 @@ static bool vhost_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq)
>  	/* Flush out used index updates. This is paired
>  	 * with the barrier that the Guest executes when enabling
>  	 * interrupts. */
> -	smp_mb();
> +	virtio_mb(vq->weak_barriers);
>  
>  	if (vhost_has_feature(vq, VIRTIO_F_NOTIFY_ON_EMPTY) &&
>  	    unlikely(vq->avail_idx == vq->last_avail_idx))
> @@ -2530,7 +2535,7 @@ bool vhost_enable_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq)
>  	}
>  	/* They could have slipped one in as we were doing that: make
>  	 * sure it's written, then check again. */
> -	smp_mb();
> +	virtio_mb(vq->weak_barriers);
>  	r = vhost_get_avail_idx(vq, &avail_idx);
>  	if (r) {
>  		vq_err(vq, "Failed to check avail idx at %p: %d\n",
> diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h
> index 638bb640d6b4..5bd20d0db457 100644
> --- a/drivers/vhost/vhost.h
> +++ b/drivers/vhost/vhost.h
> @@ -108,6 +108,8 @@ struct vhost_virtqueue {
>  	bool log_used;
>  	u64 log_addr;
>  
> +	bool weak_barriers;
> +
>  	struct iovec iov[UIO_MAXIOV];
>  	struct iovec iotlb_iov[64];
>  	struct iovec *indirect;
> diff --git a/include/uapi/linux/vhost.h b/include/uapi/linux/vhost.h
> index c998860d7bbc..4b8656307f51 100644
> --- a/include/uapi/linux/vhost.h
> +++ b/include/uapi/linux/vhost.h
> @@ -97,6 +97,8 @@
>  #define VHOST_SET_BACKEND_FEATURES _IOW(VHOST_VIRTIO, 0x25, __u64)
>  #define VHOST_GET_BACKEND_FEATURES _IOR(VHOST_VIRTIO, 0x26, __u64)
>  
> +#define VHOST_SET_STRONG_BARRIERS _IO(VHOST_VIRTIO, 0x27)
> +
>  /* VHOST_NET specific defines */
>  
>  /* Attach virtio net ring to a raw socket, or tap device.
> -- 
> 2.28.0


^ permalink raw reply	[flat|nested] 4+ messages in thread

* Re: [PATCH] vhost: add support for mandatory barriers
  2021-08-23 21:19 ` Michael S. Tsirkin
@ 2021-08-24  2:56   ` Jason Wang
  2021-08-24  7:23   ` Vincent Whitchurch
  1 sibling, 0 replies; 4+ messages in thread
From: Jason Wang @ 2021-08-24  2:56 UTC (permalink / raw)
  To: Michael S. Tsirkin
  Cc: Vincent Whitchurch, kernel, kvm, virtualization, netdev, linux-kernel

On Tue, Aug 24, 2021 at 5:20 AM Michael S. Tsirkin <mst@redhat.com> wrote:
>
> On Mon, Aug 23, 2021 at 10:14:37AM +0200, Vincent Whitchurch wrote:
> > vhost always uses SMP-conditional barriers, but these may not be
> > sufficient when vhost is used to communicate between heterogeneous
> > processors in an AMP configuration, especially since they're NOPs on
> > !SMP builds.
> >
> > To solve this, use the virtio_*() barrier functions and ask them for
> > non-weak barriers if requested by userspace.
> >
> > Signed-off-by: Vincent Whitchurch <vincent.whitchurch@axis.com>
>
> I am inclined to say let's (ab)use VIRTIO_F_ORDER_PLATFORM for this.
> Jason what do you think?

Yes, it looks fine to me.

>
> Also is the use of DMA variants really the intended thing here? Could
> you point me at some examples please?

Yes, we need to know which setup we need.

Thanks

>
>
> > ---
> >  drivers/vhost/vhost.c      | 23 ++++++++++++++---------
> >  drivers/vhost/vhost.h      |  2 ++
> >  include/uapi/linux/vhost.h |  2 ++
> >  3 files changed, 18 insertions(+), 9 deletions(-)
> >
> > diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
> > index b9e853e6094d..f7172e1bc395 100644
> > --- a/drivers/vhost/vhost.c
> > +++ b/drivers/vhost/vhost.c
> > @@ -500,6 +500,7 @@ void vhost_dev_init(struct vhost_dev *dev,
> >               vq->indirect = NULL;
> >               vq->heads = NULL;
> >               vq->dev = dev;
> > +             vq->weak_barriers = true;
> >               mutex_init(&vq->mutex);
> >               vhost_vq_reset(dev, vq);
> >               if (vq->handle_kick)
> > @@ -1801,6 +1802,10 @@ long vhost_dev_ioctl(struct vhost_dev *d, unsigned int ioctl, void __user *argp)
> >               if (ctx)
> >                       eventfd_ctx_put(ctx);
> >               break;
> > +     case VHOST_SET_STRONG_BARRIERS:
> > +             for (i = 0; i < d->nvqs; ++i)
> > +                     d->vqs[i]->weak_barriers = false;
> > +             break;
> >       default:
> >               r = -ENOIOCTLCMD;
> >               break;
> > @@ -1927,7 +1932,7 @@ int vhost_log_write(struct vhost_virtqueue *vq, struct vhost_log *log,
> >       int i, r;
> >
> >       /* Make sure data written is seen before log. */
> > -     smp_wmb();
> > +     virtio_wmb(vq->weak_barriers);
> >
> >       if (vq->iotlb) {
> >               for (i = 0; i < count; i++) {
> > @@ -1964,7 +1969,7 @@ static int vhost_update_used_flags(struct vhost_virtqueue *vq)
> >               return -EFAULT;
> >       if (unlikely(vq->log_used)) {
> >               /* Make sure the flag is seen before log. */
> > -             smp_wmb();
> > +             virtio_wmb(vq->weak_barriers);
> >               /* Log used flag write. */
> >               used = &vq->used->flags;
> >               log_used(vq, (used - (void __user *)vq->used),
> > @@ -1982,7 +1987,7 @@ static int vhost_update_avail_event(struct vhost_virtqueue *vq, u16 avail_event)
> >       if (unlikely(vq->log_used)) {
> >               void __user *used;
> >               /* Make sure the event is seen before log. */
> > -             smp_wmb();
> > +             virtio_wmb(vq->weak_barriers);
> >               /* Log avail event write */
> >               used = vhost_avail_event(vq);
> >               log_used(vq, (used - (void __user *)vq->used),
> > @@ -2228,7 +2233,7 @@ int vhost_get_vq_desc(struct vhost_virtqueue *vq,
> >               /* Only get avail ring entries after they have been
> >                * exposed by guest.
> >                */
> > -             smp_rmb();
> > +             virtio_rmb(vq->weak_barriers);
> >       }
> >
> >       /* Grab the next descriptor number they're advertising, and increment
> > @@ -2367,7 +2372,7 @@ static int __vhost_add_used_n(struct vhost_virtqueue *vq,
> >       }
> >       if (unlikely(vq->log_used)) {
> >               /* Make sure data is seen before log. */
> > -             smp_wmb();
> > +             virtio_wmb(vq->weak_barriers);
> >               /* Log used ring entry write. */
> >               log_used(vq, ((void __user *)used - (void __user *)vq->used),
> >                        count * sizeof *used);
> > @@ -2402,14 +2407,14 @@ int vhost_add_used_n(struct vhost_virtqueue *vq, struct vring_used_elem *heads,
> >       r = __vhost_add_used_n(vq, heads, count);
> >
> >       /* Make sure buffer is written before we update index. */
> > -     smp_wmb();
> > +     virtio_wmb(vq->weak_barriers);
> >       if (vhost_put_used_idx(vq)) {
> >               vq_err(vq, "Failed to increment used idx");
> >               return -EFAULT;
> >       }
> >       if (unlikely(vq->log_used)) {
> >               /* Make sure used idx is seen before log. */
> > -             smp_wmb();
> > +             virtio_wmb(vq->weak_barriers);
> >               /* Log used index update. */
> >               log_used(vq, offsetof(struct vring_used, idx),
> >                        sizeof vq->used->idx);
> > @@ -2428,7 +2433,7 @@ static bool vhost_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq)
> >       /* Flush out used index updates. This is paired
> >        * with the barrier that the Guest executes when enabling
> >        * interrupts. */
> > -     smp_mb();
> > +     virtio_mb(vq->weak_barriers);
> >
> >       if (vhost_has_feature(vq, VIRTIO_F_NOTIFY_ON_EMPTY) &&
> >           unlikely(vq->avail_idx == vq->last_avail_idx))
> > @@ -2530,7 +2535,7 @@ bool vhost_enable_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq)
> >       }
> >       /* They could have slipped one in as we were doing that: make
> >        * sure it's written, then check again. */
> > -     smp_mb();
> > +     virtio_mb(vq->weak_barriers);
> >       r = vhost_get_avail_idx(vq, &avail_idx);
> >       if (r) {
> >               vq_err(vq, "Failed to check avail idx at %p: %d\n",
> > diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h
> > index 638bb640d6b4..5bd20d0db457 100644
> > --- a/drivers/vhost/vhost.h
> > +++ b/drivers/vhost/vhost.h
> > @@ -108,6 +108,8 @@ struct vhost_virtqueue {
> >       bool log_used;
> >       u64 log_addr;
> >
> > +     bool weak_barriers;
> > +
> >       struct iovec iov[UIO_MAXIOV];
> >       struct iovec iotlb_iov[64];
> >       struct iovec *indirect;
> > diff --git a/include/uapi/linux/vhost.h b/include/uapi/linux/vhost.h
> > index c998860d7bbc..4b8656307f51 100644
> > --- a/include/uapi/linux/vhost.h
> > +++ b/include/uapi/linux/vhost.h
> > @@ -97,6 +97,8 @@
> >  #define VHOST_SET_BACKEND_FEATURES _IOW(VHOST_VIRTIO, 0x25, __u64)
> >  #define VHOST_GET_BACKEND_FEATURES _IOR(VHOST_VIRTIO, 0x26, __u64)
> >
> > +#define VHOST_SET_STRONG_BARRIERS _IO(VHOST_VIRTIO, 0x27)
> > +
> >  /* VHOST_NET specific defines */
> >
> >  /* Attach virtio net ring to a raw socket, or tap device.
> > --
> > 2.28.0
>


^ permalink raw reply	[flat|nested] 4+ messages in thread

* Re: [PATCH] vhost: add support for mandatory barriers
  2021-08-23 21:19 ` Michael S. Tsirkin
  2021-08-24  2:56   ` Jason Wang
@ 2021-08-24  7:23   ` Vincent Whitchurch
  1 sibling, 0 replies; 4+ messages in thread
From: Vincent Whitchurch @ 2021-08-24  7:23 UTC (permalink / raw)
  To: Michael S. Tsirkin
  Cc: Jason Wang, kernel, kvm, virtualization, netdev, linux-kernel

On Mon, Aug 23, 2021 at 11:19:56PM +0200, Michael S. Tsirkin wrote:
> On Mon, Aug 23, 2021 at 10:14:37AM +0200, Vincent Whitchurch wrote:
> > vhost always uses SMP-conditional barriers, but these may not be
> > sufficient when vhost is used to communicate between heterogeneous
> > processors in an AMP configuration, especially since they're NOPs on
> > !SMP builds.
> > 
> > To solve this, use the virtio_*() barrier functions and ask them for
> > non-weak barriers if requested by userspace.
> > 
> > Signed-off-by: Vincent Whitchurch <vincent.whitchurch@axis.com>
> 
> I am inclined to say let's (ab)use VIRTIO_F_ORDER_PLATFORM for this.
> Jason what do you think?

OK, thanks, I'll look into that.

> Also is the use of DMA variants really the intended thing here? Could
> you point me at some examples please?

I'm using this on an ARM-based SoC.  The main processor is a Cortex-A53
(arm64) and this processor runs the virtio drivers.  The SoC also has
another processor which is a Cortex-A5 (arm32) and this processor runs
the virtio device end using vhost.  There is no coherency between these
two processors and to each other they look like any other DMA-capable
hardware.

^ permalink raw reply	[flat|nested] 4+ messages in thread

end of thread, other threads:[~2021-08-24  7:23 UTC | newest]

Thread overview: 4+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2021-08-23  8:14 [PATCH] vhost: add support for mandatory barriers Vincent Whitchurch
2021-08-23 21:19 ` Michael S. Tsirkin
2021-08-24  2:56   ` Jason Wang
2021-08-24  7:23   ` Vincent Whitchurch

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).