[RFC,V2,8/8] vhost: event suppression for packed ring
diff mbox series

Message ID 1522035533-11786-9-git-send-email-jasowang@redhat.com
State New, archived
Headers show
Series
  • Packed ring for vhost
Related show

Commit Message

Jason Wang March 26, 2018, 3:38 a.m. UTC
This patch introduces basic support for event suppression aka driver
and device area. Compile tested only.

Signed-off-by: Jason Wang <jasowang@redhat.com>
---
 drivers/vhost/vhost.c            | 169 ++++++++++++++++++++++++++++++++++++---
 drivers/vhost/vhost.h            |  10 ++-
 include/uapi/linux/virtio_ring.h |  19 +++++
 3 files changed, 183 insertions(+), 15 deletions(-)

Comments

Tiwei Bie March 30, 2018, 2:05 a.m. UTC | #1
On Mon, Mar 26, 2018 at 11:38:53AM +0800, Jason Wang wrote:
> This patch introduces basic support for event suppression aka driver
> and device area. Compile tested only.
> 
> Signed-off-by: Jason Wang <jasowang@redhat.com>
> ---
[...]
> +
> +static bool vhost_notify_packed(struct vhost_dev *dev,
> +				struct vhost_virtqueue *vq)
> +{
> +	__virtio16 event_off_wrap, event_flags;
> +	__u16 old, new;
> +	bool v, wrap;
> +	int off;
> +
> +	/* Flush out used descriptors updates. This is paired
> +	 * with the barrier that the Guest executes when enabling
> +	 * interrupts.
> +	 */
> +	smp_mb();
> +
> +	if (vhost_get_avail(vq, event_flags,
> +			   &vq->driver_event->desc_event_flags) < 0) {
> +		vq_err(vq, "Failed to get driver desc_event_flags");
> +		return true;
> +	}
> +
> +	if (!(event_flags & cpu_to_vhost16(vq, RING_EVENT_FLAGS_DESC)))
> +		return event_flags ==
> +		       cpu_to_vhost16(vq, RING_EVENT_FLAGS_ENABLE);

Maybe it would be better to not use '&' here. Because these flags
are not defined as bits which can be ORed or ANDed. Instead, they
are defined as values:

0x0  enable
0x1  disable
0x2  desc
0x3  reserved

> +
> +	/* Read desc event flags before event_off and event_wrap */
> +	smp_rmb();
> +
> +	if (vhost_get_avail(vq, event_off_wrap,
> +			    &vq->driver_event->desc_event_off_warp) < 0) {
> +		vq_err(vq, "Failed to get driver desc_event_off/wrap");
> +		return true;
> +	}
> +
> +	off = vhost16_to_cpu(vq, event_off_wrap);
> +
> +	wrap = off & 0x1;
> +	off >>= 1;

Based on the below definitions in spec, wrap counter is
the most significant bit.

struct pvirtq_event_suppress {
	le16 {
		desc_event_off : 15; /* Descriptor Ring Change Event Offset */
		desc_event_wrap : 1; /* Descriptor Ring Change Event Wrap Counter */
	} desc; /* If desc_event_flags set to RING_EVENT_FLAGS_DESC */
	le16 {
		desc_event_flags : 2, /* Descriptor Ring Change Event Flags */
		reserved : 14; /* Reserved, set to 0 */
	} flags;
};

> +
> +
> +	old = vq->signalled_used;
> +	v = vq->signalled_used_valid;
> +	new = vq->signalled_used = vq->last_used_idx;
> +	vq->signalled_used_valid = true;
> +
> +	if (unlikely(!v))
> +		return true;
> +
> +	return vhost_vring_packed_need_event(vq, new, old, off) &&
> +	       wrap == vq->used_wrap_counter;
> +}
> +
> +static bool vhost_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq)
> +{
> +	if (vhost_has_feature(vq, VIRTIO_F_RING_PACKED))
> +		return vhost_notify_packed(dev, vq);
> +	else
> +		return vhost_notify_split(dev, vq);
> +}
> +
>  /* This actually signals the guest, using eventfd. */
>  void vhost_signal(struct vhost_dev *dev, struct vhost_virtqueue *vq)
>  {
> @@ -2789,7 +2911,17 @@ static bool vhost_enable_notify_packed(struct vhost_dev *dev,
>  	__virtio16 flags;
>  	int ret;
>  
> -	/* FIXME: disable notification through device area */
> +	if (!(vq->used_flags & VRING_USED_F_NO_NOTIFY))
> +		return false;
> +	vq->used_flags &= ~VRING_USED_F_NO_NOTIFY;
> +
> +	flags = cpu_to_vhost16(vq, RING_EVENT_FLAGS_ENABLE);
> +	ret = vhost_update_device_flags(vq, flags);
> +	if (ret) {
> +		vq_err(vq, "Failed to enable notification at %p: %d\n",
> +		       &vq->device_event->desc_event_flags, ret);
> +		return false;
> +	}
>  
>  	/* They could have slipped one in as we were doing that: make
>  	 * sure it's written, then check again. */
> @@ -2855,7 +2987,18 @@ EXPORT_SYMBOL_GPL(vhost_enable_notify);
>  static void vhost_disable_notify_packed(struct vhost_dev *dev,
>  					struct vhost_virtqueue *vq)
>  {
> -	/* FIXME: disable notification through device area */
> +	__virtio16 flags;
> +	int r;
> +
> +	if (vq->used_flags & VRING_USED_F_NO_NOTIFY)
> +		return;
> +	vq->used_flags |= VRING_USED_F_NO_NOTIFY;
> +
> +	flags = cpu_to_vhost16(vq, RING_EVENT_FLAGS_DISABLE);
> +	r = vhost_update_device_flags(vq, flags);
> +	if (r)
> +		vq_err(vq, "Failed to enable notification at %p: %d\n",
> +		       &vq->device_event->desc_event_flags, r);
>  }
>  
>  static void vhost_disable_notify_split(struct vhost_dev *dev,
> diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h
> index 8a9df4f..02d7a36 100644
> --- a/drivers/vhost/vhost.h
> +++ b/drivers/vhost/vhost.h
> @@ -96,8 +96,14 @@ struct vhost_virtqueue {
>  		struct vring_desc __user *desc;
>  		struct vring_desc_packed __user *desc_packed;

Do you think it'd be better to name the desc type as
struct vring_packed_desc? And it will be consistent
with other names, like:

struct vring_packed;
struct vring_packed_desc_event;

>  	};
> -	struct vring_avail __user *avail;
> -	struct vring_used __user *used;
> +	union {
> +		struct vring_avail __user *avail;
> +		struct vring_packed_desc_event __user *driver_event;
> +	};
> +	union {
> +		struct vring_used __user *used;
> +		struct vring_packed_desc_event __user *device_event;
> +	};
>  	const struct vhost_umem_node *meta_iotlb[VHOST_NUM_ADDRS];
>  	struct file *kick;
>  	struct eventfd_ctx *call_ctx;
> diff --git a/include/uapi/linux/virtio_ring.h b/include/uapi/linux/virtio_ring.h
> index e297580..7cdbf06 100644
> --- a/include/uapi/linux/virtio_ring.h
> +++ b/include/uapi/linux/virtio_ring.h
> @@ -75,6 +75,25 @@ struct vring_desc_packed {
>  	__virtio16 flags;
>  };
>  
> +/* Enable events */
> +#define RING_EVENT_FLAGS_ENABLE 0x0
> +/* Disable events */
> +#define RING_EVENT_FLAGS_DISABLE 0x1
> +/*
> + * Enable events for a specific descriptor
> + * (as specified by Descriptor Ring Change Event Offset/Wrap Counter).
> + * Only valid if VIRTIO_F_RING_EVENT_IDX has been negotiated.
> + */
> +#define RING_EVENT_FLAGS_DESC 0x2
> +/* The value 0x3 is reserved */
> +
> +struct vring_packed_desc_event {
> +	/* Descriptor Ring Change Event Offset and Wrap Counter */
> +	__virtio16 desc_event_off_warp;
> +	/* Descriptor Ring Change Event Flags */
> +	__virtio16 desc_event_flags;

Do you think it'd be better to remove the prefix (desc_event_) for
the fields. And it will be consistent with other definitions, e.g.:

struct vring_packed_desc {
        /* Buffer Address. */
        __virtio64 addr;
        /* Buffer Length. */
        __virtio32 len;
        /* Buffer ID. */
        __virtio16 id;
        /* The flags depending on descriptor type. */
        __virtio16 flags;
};

> +};
> +
>  /* Virtio ring descriptors: 16 bytes.  These can chain together via "next". */
>  struct vring_desc {
>  	/* Address (guest-physical). */
> -- 
> 2.7.4
>
Jason Wang March 30, 2018, 2:46 a.m. UTC | #2
On 2018年03月30日 10:05, Tiwei Bie wrote:
> On Mon, Mar 26, 2018 at 11:38:53AM +0800, Jason Wang wrote:
>> This patch introduces basic support for event suppression aka driver
>> and device area. Compile tested only.
>>
>> Signed-off-by: Jason Wang <jasowang@redhat.com>
>> ---
> [...]
>> +
>> +static bool vhost_notify_packed(struct vhost_dev *dev,
>> +				struct vhost_virtqueue *vq)
>> +{
>> +	__virtio16 event_off_wrap, event_flags;
>> +	__u16 old, new;
>> +	bool v, wrap;
>> +	int off;
>> +
>> +	/* Flush out used descriptors updates. This is paired
>> +	 * with the barrier that the Guest executes when enabling
>> +	 * interrupts.
>> +	 */
>> +	smp_mb();
>> +
>> +	if (vhost_get_avail(vq, event_flags,
>> +			   &vq->driver_event->desc_event_flags) < 0) {
>> +		vq_err(vq, "Failed to get driver desc_event_flags");
>> +		return true;
>> +	}
>> +
>> +	if (!(event_flags & cpu_to_vhost16(vq, RING_EVENT_FLAGS_DESC)))
>> +		return event_flags ==
>> +		       cpu_to_vhost16(vq, RING_EVENT_FLAGS_ENABLE);
> Maybe it would be better to not use '&' here. Because these flags
> are not defined as bits which can be ORed or ANDed. Instead, they
> are defined as values:
>
> 0x0  enable
> 0x1  disable
> 0x2  desc
> 0x3  reserved

Yes the code seems tricky. Let me fix it in next version.

>
>> +
>> +	/* Read desc event flags before event_off and event_wrap */
>> +	smp_rmb();
>> +
>> +	if (vhost_get_avail(vq, event_off_wrap,
>> +			    &vq->driver_event->desc_event_off_warp) < 0) {
>> +		vq_err(vq, "Failed to get driver desc_event_off/wrap");
>> +		return true;
>> +	}
>> +
>> +	off = vhost16_to_cpu(vq, event_off_wrap);
>> +
>> +	wrap = off & 0x1;
>> +	off >>= 1;
> Based on the below definitions in spec, wrap counter is
> the most significant bit.
>
> struct pvirtq_event_suppress {
> 	le16 {
> 		desc_event_off : 15; /* Descriptor Ring Change Event Offset */
> 		desc_event_wrap : 1; /* Descriptor Ring Change Event Wrap Counter */
> 	} desc; /* If desc_event_flags set to RING_EVENT_FLAGS_DESC */
> 	le16 {
> 		desc_event_flags : 2, /* Descriptor Ring Change Event Flags */
> 		reserved : 14; /* Reserved, set to 0 */
> 	} flags;
> };

Will fix this in next version.

>
>> +
>> +
>> +	old = vq->signalled_used;
>> +	v = vq->signalled_used_valid;
>> +	new = vq->signalled_used = vq->last_used_idx;
>> +	vq->signalled_used_valid = true;
>> +
>> +	if (unlikely(!v))
>> +		return true;
>> +
>> +	return vhost_vring_packed_need_event(vq, new, old, off) &&
>> +	       wrap == vq->used_wrap_counter;
>> +}
>> +
>> +static bool vhost_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq)
>> +{
>> +	if (vhost_has_feature(vq, VIRTIO_F_RING_PACKED))
>> +		return vhost_notify_packed(dev, vq);
>> +	else
>> +		return vhost_notify_split(dev, vq);
>> +}
>> +
>>   /* This actually signals the guest, using eventfd. */
>>   void vhost_signal(struct vhost_dev *dev, struct vhost_virtqueue *vq)
>>   {
>> @@ -2789,7 +2911,17 @@ static bool vhost_enable_notify_packed(struct vhost_dev *dev,
>>   	__virtio16 flags;
>>   	int ret;
>>   
>> -	/* FIXME: disable notification through device area */
>> +	if (!(vq->used_flags & VRING_USED_F_NO_NOTIFY))
>> +		return false;
>> +	vq->used_flags &= ~VRING_USED_F_NO_NOTIFY;
>> +
>> +	flags = cpu_to_vhost16(vq, RING_EVENT_FLAGS_ENABLE);
>> +	ret = vhost_update_device_flags(vq, flags);
>> +	if (ret) {
>> +		vq_err(vq, "Failed to enable notification at %p: %d\n",
>> +		       &vq->device_event->desc_event_flags, ret);
>> +		return false;
>> +	}
>>   
>>   	/* They could have slipped one in as we were doing that: make
>>   	 * sure it's written, then check again. */
>> @@ -2855,7 +2987,18 @@ EXPORT_SYMBOL_GPL(vhost_enable_notify);
>>   static void vhost_disable_notify_packed(struct vhost_dev *dev,
>>   					struct vhost_virtqueue *vq)
>>   {
>> -	/* FIXME: disable notification through device area */
>> +	__virtio16 flags;
>> +	int r;
>> +
>> +	if (vq->used_flags & VRING_USED_F_NO_NOTIFY)
>> +		return;
>> +	vq->used_flags |= VRING_USED_F_NO_NOTIFY;
>> +
>> +	flags = cpu_to_vhost16(vq, RING_EVENT_FLAGS_DISABLE);
>> +	r = vhost_update_device_flags(vq, flags);
>> +	if (r)
>> +		vq_err(vq, "Failed to enable notification at %p: %d\n",
>> +		       &vq->device_event->desc_event_flags, r);
>>   }
>>   
>>   static void vhost_disable_notify_split(struct vhost_dev *dev,
>> diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h
>> index 8a9df4f..02d7a36 100644
>> --- a/drivers/vhost/vhost.h
>> +++ b/drivers/vhost/vhost.h
>> @@ -96,8 +96,14 @@ struct vhost_virtqueue {
>>   		struct vring_desc __user *desc;
>>   		struct vring_desc_packed __user *desc_packed;
> Do you think it'd be better to name the desc type as
> struct vring_packed_desc?

Ok. Will do.

> And it will be consistent
> with other names, like:
>
> struct vring_packed;
> struct vring_packed_desc_event;
>
>>   	};
>> -	struct vring_avail __user *avail;
>> -	struct vring_used __user *used;
>> +	union {
>> +		struct vring_avail __user *avail;
>> +		struct vring_packed_desc_event __user *driver_event;
>> +	};
>> +	union {
>> +		struct vring_used __user *used;
>> +		struct vring_packed_desc_event __user *device_event;
>> +	};
>>   	const struct vhost_umem_node *meta_iotlb[VHOST_NUM_ADDRS];
>>   	struct file *kick;
>>   	struct eventfd_ctx *call_ctx;
>> diff --git a/include/uapi/linux/virtio_ring.h b/include/uapi/linux/virtio_ring.h
>> index e297580..7cdbf06 100644
>> --- a/include/uapi/linux/virtio_ring.h
>> +++ b/include/uapi/linux/virtio_ring.h
>> @@ -75,6 +75,25 @@ struct vring_desc_packed {
>>   	__virtio16 flags;
>>   };
>>   
>> +/* Enable events */
>> +#define RING_EVENT_FLAGS_ENABLE 0x0
>> +/* Disable events */
>> +#define RING_EVENT_FLAGS_DISABLE 0x1
>> +/*
>> + * Enable events for a specific descriptor
>> + * (as specified by Descriptor Ring Change Event Offset/Wrap Counter).
>> + * Only valid if VIRTIO_F_RING_EVENT_IDX has been negotiated.
>> + */
>> +#define RING_EVENT_FLAGS_DESC 0x2
>> +/* The value 0x3 is reserved */
>> +
>> +struct vring_packed_desc_event {
>> +	/* Descriptor Ring Change Event Offset and Wrap Counter */
>> +	__virtio16 desc_event_off_warp;
>> +	/* Descriptor Ring Change Event Flags */
>> +	__virtio16 desc_event_flags;
> Do you think it'd be better to remove the prefix (desc_event_) for
> the fields. And it will be consistent with other definitions, e.g.:
>
> struct vring_packed_desc {
>          /* Buffer Address. */
>          __virtio64 addr;
>          /* Buffer Length. */
>          __virtio32 len;
>          /* Buffer ID. */
>          __virtio16 id;
>          /* The flags depending on descriptor type. */
>          __virtio16 flags;
> };

Yes. Let me do it in next version.

Thanks for the review!

>> +};
>> +
>>   /* Virtio ring descriptors: 16 bytes.  These can chain together via "next". */
>>   struct vring_desc {
>>   	/* Address (guest-physical). */
>> -- 
>> 2.7.4
>>

Patch
diff mbox series

diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index 6177e4d..ff83a2e 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -1143,10 +1143,15 @@  static int vq_access_ok_packed(struct vhost_virtqueue *vq, unsigned int num,
 			       struct vring_used __user *used)
 {
 	struct vring_desc_packed *packed = (struct vring_desc_packed *)desc;
+	struct vring_packed_desc_event *driver_event =
+		(struct vring_packed_desc_event *)avail;
+	struct vring_packed_desc_event *device_event =
+		(struct vring_packed_desc_event *)used;
 
-	/* FIXME: check device area and driver area */
 	return access_ok(VERIFY_READ, packed, num * sizeof(*packed)) &&
-	       access_ok(VERIFY_WRITE, packed, num * sizeof(*packed));
+	       access_ok(VERIFY_WRITE, packed, num * sizeof(*packed)) &&
+	       access_ok(VERIFY_READ, driver_event, sizeof(*driver_event)) &&
+	       access_ok(VERIFY_WRITE, device_event, sizeof(*device_event));
 }
 
 static int vq_access_ok_split(struct vhost_virtqueue *vq, unsigned int num,
@@ -1222,14 +1227,27 @@  static int iotlb_access_ok(struct vhost_virtqueue *vq,
 	return true;
 }
 
-int vq_iotlb_prefetch(struct vhost_virtqueue *vq)
+int vq_iotlb_prefetch_packed(struct vhost_virtqueue *vq)
+{
+	int num = vq->num;
+
+	return iotlb_access_ok(vq, VHOST_ACCESS_RO, (u64)(uintptr_t)vq->desc,
+			       num * sizeof(*vq->desc), VHOST_ADDR_DESC) &&
+	       iotlb_access_ok(vq, VHOST_ACCESS_WO, (u64)(uintptr_t)vq->desc,
+			       num * sizeof(*vq->desc), VHOST_ADDR_DESC) &&
+	       iotlb_access_ok(vq, VHOST_ACCESS_RO,
+			       (u64)(uintptr_t)vq->driver_event,
+			       sizeof(*vq->driver_event), VHOST_ADDR_AVAIL) &&
+	       iotlb_access_ok(vq, VHOST_ACCESS_WO,
+			       (u64)(uintptr_t)vq->device_event,
+			       sizeof(*vq->device_event), VHOST_ADDR_USED);
+}
+
+int vq_iotlb_prefetch_split(struct vhost_virtqueue *vq)
 {
 	size_t s = vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX) ? 2 : 0;
 	unsigned int num = vq->num;
 
-	if (!vq->iotlb)
-		return 1;
-
 	return iotlb_access_ok(vq, VHOST_ACCESS_RO, (u64)(uintptr_t)vq->desc,
 			       num * sizeof(*vq->desc), VHOST_ADDR_DESC) &&
 	       iotlb_access_ok(vq, VHOST_ACCESS_RO, (u64)(uintptr_t)vq->avail,
@@ -1241,6 +1259,17 @@  int vq_iotlb_prefetch(struct vhost_virtqueue *vq)
 			       num * sizeof(*vq->used->ring) + s,
 			       VHOST_ADDR_USED);
 }
+
+int vq_iotlb_prefetch(struct vhost_virtqueue *vq)
+{
+	if (!vq->iotlb)
+		return 1;
+
+	if (vhost_has_feature(vq, VIRTIO_F_RING_PACKED))
+		return vq_iotlb_prefetch_packed(vq);
+	else
+		return vq_iotlb_prefetch_split(vq);
+}
 EXPORT_SYMBOL_GPL(vq_iotlb_prefetch);
 
 /* Can we log writes? */
@@ -1756,6 +1785,29 @@  static int vhost_update_used_flags(struct vhost_virtqueue *vq)
 	return 0;
 }
 
+static int vhost_update_device_flags(struct vhost_virtqueue *vq,
+				     __virtio16 device_flags)
+{
+	void __user *flags;
+
+	if (vhost_put_user(vq, cpu_to_vhost16(vq, device_flags),
+			   &vq->device_event->desc_event_flags,
+			   VHOST_ADDR_DESC) < 0)
+		return -EFAULT;
+	if (unlikely(vq->log_used)) {
+		/* Make sure the flag is seen before log. */
+		smp_wmb();
+		/* Log used flag write. */
+		flags = &vq->device_event->desc_event_flags;
+		log_write(vq->log_base, vq->log_addr +
+			  (flags - (void __user *)vq->device_event),
+			  sizeof(vq->used->flags));
+		if (vq->log_ctx)
+			eventfd_signal(vq->log_ctx, 1);
+	}
+	return 0;
+}
+
 static int vhost_update_avail_event(struct vhost_virtqueue *vq, u16 avail_event)
 {
 	if (vhost_put_user(vq, cpu_to_vhost16(vq, vq->avail_idx),
@@ -2667,16 +2719,13 @@  int vhost_add_used_n(struct vhost_virtqueue *vq,
 }
 EXPORT_SYMBOL_GPL(vhost_add_used_n);
 
-static bool vhost_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq)
+static bool vhost_notify_split(struct vhost_dev *dev,
+			       struct vhost_virtqueue *vq)
 {
 	__u16 old, new;
 	__virtio16 event;
 	bool v;
 
-	/* FIXME: check driver area */
-	if (vhost_has_feature(vq, VIRTIO_F_RING_PACKED))
-		return false;
-
 	/* Flush out used index updates. This is paired
 	 * with the barrier that the Guest executes when enabling
 	 * interrupts. */
@@ -2709,6 +2758,79 @@  static bool vhost_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq)
 	return vring_need_event(vhost16_to_cpu(vq, event), new, old);
 }
 
+static bool vhost_idx_diff(struct vhost_virtqueue *vq, u16 old, u16 new)
+{
+	if (new > old)
+		return new - old;
+	return  (new + vq->num - old);
+}
+
+static bool vhost_vring_packed_need_event(struct vhost_virtqueue *vq,
+					  __u16 event_off, __u16 new,
+					  __u16 old)
+{
+	return (__u16)(vhost_idx_diff(vq, new, event_off) - 1) <
+	       (__u16)vhost_idx_diff(vq, new, old);
+}
+
+static bool vhost_notify_packed(struct vhost_dev *dev,
+				struct vhost_virtqueue *vq)
+{
+	__virtio16 event_off_wrap, event_flags;
+	__u16 old, new;
+	bool v, wrap;
+	int off;
+
+	/* Flush out used descriptors updates. This is paired
+	 * with the barrier that the Guest executes when enabling
+	 * interrupts.
+	 */
+	smp_mb();
+
+	if (vhost_get_avail(vq, event_flags,
+			   &vq->driver_event->desc_event_flags) < 0) {
+		vq_err(vq, "Failed to get driver desc_event_flags");
+		return true;
+	}
+
+	if (!(event_flags & cpu_to_vhost16(vq, RING_EVENT_FLAGS_DESC)))
+		return event_flags ==
+		       cpu_to_vhost16(vq, RING_EVENT_FLAGS_ENABLE);
+
+	/* Read desc event flags before event_off and event_wrap */
+	smp_rmb();
+
+	if (vhost_get_avail(vq, event_off_wrap,
+			    &vq->driver_event->desc_event_off_warp) < 0) {
+		vq_err(vq, "Failed to get driver desc_event_off/wrap");
+		return true;
+	}
+
+	off = vhost16_to_cpu(vq, event_off_wrap);
+
+	wrap = off & 0x1;
+	off >>= 1;
+
+	old = vq->signalled_used;
+	v = vq->signalled_used_valid;
+	new = vq->signalled_used = vq->last_used_idx;
+	vq->signalled_used_valid = true;
+
+	if (unlikely(!v))
+		return true;
+
+	return vhost_vring_packed_need_event(vq, new, old, off) &&
+	       wrap == vq->used_wrap_counter;
+}
+
+static bool vhost_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq)
+{
+	if (vhost_has_feature(vq, VIRTIO_F_RING_PACKED))
+		return vhost_notify_packed(dev, vq);
+	else
+		return vhost_notify_split(dev, vq);
+}
+
 /* This actually signals the guest, using eventfd. */
 void vhost_signal(struct vhost_dev *dev, struct vhost_virtqueue *vq)
 {
@@ -2789,7 +2911,17 @@  static bool vhost_enable_notify_packed(struct vhost_dev *dev,
 	__virtio16 flags;
 	int ret;
 
-	/* FIXME: disable notification through device area */
+	if (!(vq->used_flags & VRING_USED_F_NO_NOTIFY))
+		return false;
+	vq->used_flags &= ~VRING_USED_F_NO_NOTIFY;
+
+	flags = cpu_to_vhost16(vq, RING_EVENT_FLAGS_ENABLE);
+	ret = vhost_update_device_flags(vq, flags);
+	if (ret) {
+		vq_err(vq, "Failed to enable notification at %p: %d\n",
+		       &vq->device_event->desc_event_flags, ret);
+		return false;
+	}
 
 	/* They could have slipped one in as we were doing that: make
 	 * sure it's written, then check again. */
@@ -2855,7 +2987,18 @@  EXPORT_SYMBOL_GPL(vhost_enable_notify);
 static void vhost_disable_notify_packed(struct vhost_dev *dev,
 					struct vhost_virtqueue *vq)
 {
-	/* FIXME: disable notification through device area */
+	__virtio16 flags;
+	int r;
+
+	if (vq->used_flags & VRING_USED_F_NO_NOTIFY)
+		return;
+	vq->used_flags |= VRING_USED_F_NO_NOTIFY;
+
+	flags = cpu_to_vhost16(vq, RING_EVENT_FLAGS_DISABLE);
+	r = vhost_update_device_flags(vq, flags);
+	if (r)
+		vq_err(vq, "Failed to enable notification at %p: %d\n",
+		       &vq->device_event->desc_event_flags, r);
 }
 
 static void vhost_disable_notify_split(struct vhost_dev *dev,
diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h
index 8a9df4f..02d7a36 100644
--- a/drivers/vhost/vhost.h
+++ b/drivers/vhost/vhost.h
@@ -96,8 +96,14 @@  struct vhost_virtqueue {
 		struct vring_desc __user *desc;
 		struct vring_desc_packed __user *desc_packed;
 	};
-	struct vring_avail __user *avail;
-	struct vring_used __user *used;
+	union {
+		struct vring_avail __user *avail;
+		struct vring_packed_desc_event __user *driver_event;
+	};
+	union {
+		struct vring_used __user *used;
+		struct vring_packed_desc_event __user *device_event;
+	};
 	const struct vhost_umem_node *meta_iotlb[VHOST_NUM_ADDRS];
 	struct file *kick;
 	struct eventfd_ctx *call_ctx;
diff --git a/include/uapi/linux/virtio_ring.h b/include/uapi/linux/virtio_ring.h
index e297580..7cdbf06 100644
--- a/include/uapi/linux/virtio_ring.h
+++ b/include/uapi/linux/virtio_ring.h
@@ -75,6 +75,25 @@  struct vring_desc_packed {
 	__virtio16 flags;
 };
 
+/* Enable events */
+#define RING_EVENT_FLAGS_ENABLE 0x0
+/* Disable events */
+#define RING_EVENT_FLAGS_DISABLE 0x1
+/*
+ * Enable events for a specific descriptor
+ * (as specified by Descriptor Ring Change Event Offset/Wrap Counter).
+ * Only valid if VIRTIO_F_RING_EVENT_IDX has been negotiated.
+ */
+#define RING_EVENT_FLAGS_DESC 0x2
+/* The value 0x3 is reserved */
+
+struct vring_packed_desc_event {
+	/* Descriptor Ring Change Event Offset and Wrap Counter */
+	__virtio16 desc_event_off_warp;
+	/* Descriptor Ring Change Event Flags */
+	__virtio16 desc_event_flags;
+};
+
 /* Virtio ring descriptors: 16 bytes.  These can chain together via "next". */
 struct vring_desc {
 	/* Address (guest-physical). */