* [Qemu-devel] [PATCH v5] vhost-user interrupt management fixes
@ 2016-02-18 14:12 Victor Kaplansky
2016-02-19 9:02 ` Didier Pallard
0 siblings, 1 reply; 3+ messages in thread
From: Victor Kaplansky @ 2016-02-18 14:12 UTC (permalink / raw)
To: qemu-devel
Cc: thibaut.collet, Jason Wang, jmg, Didier Pallard, Michael S. Tsirkin
Since guest_mask_notifier can not be used in vhost-user mode due
to buffering implied by unix control socket, force
use_mask_notifier on virtio devices of vhost-user interfaces, and
send correct callfd to the guest at vhost start.
Using guest_notifier_mask function in vhost-user case may
break interrupt mask paradigm, because mask/unmask is not
really done when returning from guest_notifier_mask call, instead
message is posted in a unix socket, and processed later.
Add an option boolean flag 'use_mask_notifier' to disable the use
of guest_notifier_mask in virtio pci.
Signed-off-by: Didier Pallard <didier.pallard@6wind.com>
Signed-off-by: Victor Kaplansky <victork@redhat.com>
---
v5 changes:
- rebased to mst tree.
- removed a traling space.
v4 changes:
In respond to Michael S. Tsirkin comments:
- changed the name of new field to use_guest_notifier_mask
- clarified comments
- vhost_virtqueue_mask() called for unmask
- cosmetic changes
- moved the initialization of the new field from virtio_reset
to virtio_init.
v3 changes:
In respond to Michael S. Tsirkin comments:
- vhost_net.c: removed dependency on virtio-pci.h
- vhost.c: simplified the check for vhost-user backend,
replaced by checking use_mask_notifier; added comment
explaining why vring for vhost-user initialized in
unmasked state;
- cosmetic fixes.
v2 changes:
- a new boolean field is added to all virtio devices instead
of defining a property in some virtio-pci devices.
include/hw/virtio/virtio.h | 1 +
hw/net/vhost_net.c | 15 +++++++++++++--
hw/virtio/vhost.c | 9 +++++++++
hw/virtio/virtio-pci.c | 14 ++++++++------
hw/virtio/virtio.c | 1 +
5 files changed, 32 insertions(+), 8 deletions(-)
diff --git a/include/hw/virtio/virtio.h b/include/hw/virtio/virtio.h
index 108cdb0f..c38a2fef 100644
--- a/include/hw/virtio/virtio.h
+++ b/include/hw/virtio/virtio.h
@@ -90,6 +90,7 @@ struct VirtIODevice
VMChangeStateEntry *vmstate;
char *bus_name;
uint8_t device_endian;
+ bool use_guest_notifier_mask;
QLIST_HEAD(, VirtQueue) *vector_queues;
};
diff --git a/hw/net/vhost_net.c b/hw/net/vhost_net.c
index b2428324..6e1032fc 100644
--- a/hw/net/vhost_net.c
+++ b/hw/net/vhost_net.c
@@ -284,8 +284,19 @@ int vhost_net_start(VirtIODevice *dev, NetClientState *ncs,
}
for (i = 0; i < total_queues; i++) {
- vhost_net_set_vq_index(get_vhost_net(ncs[i].peer), i * 2);
- }
+ struct vhost_net *net;
+
+ net = get_vhost_net(ncs[i].peer);
+ vhost_net_set_vq_index(net, i * 2);
+
+ /* Suppress the masking guest notifiers on vhost user
+ * because vhost user doesn't interrupt masking/unmasking
+ * properly.
+ */
+ if (net->nc->info->type == NET_CLIENT_OPTIONS_KIND_VHOST_USER) {
+ dev->use_guest_notifier_mask = false;
+ }
+ }
r = k->set_guest_notifiers(qbus->parent, total_queues * 2, true);
if (r < 0) {
diff --git a/hw/virtio/vhost.c b/hw/virtio/vhost.c
index 9f8ac38c..72d0c9e9 100644
--- a/hw/virtio/vhost.c
+++ b/hw/virtio/vhost.c
@@ -875,6 +875,14 @@ static int vhost_virtqueue_start(struct vhost_dev *dev,
/* Clear and discard previous events if any. */
event_notifier_test_and_clear(&vq->masked_notifier);
+ /* Init vring in unmasked state, unless guest_notifier_mask
+ * will do it later.
+ */
+ if (!vdev->use_guest_notifier_mask) {
+ /* TODO: check and handle errors. */
+ vhost_virtqueue_mask(dev, vdev, idx, false);
+ }
+
return 0;
fail_kick:
@@ -1167,6 +1175,7 @@ void vhost_virtqueue_mask(struct vhost_dev *hdev, VirtIODevice *vdev, int n,
struct vhost_vring_file file;
if (mask) {
+ assert(vdev->use_guest_notifier_mask);
file.fd = event_notifier_get_fd(&hdev->vqs[index].masked_notifier);
} else {
file.fd = event_notifier_get_fd(virtio_queue_get_guest_notifier(vvq));
diff --git a/hw/virtio/virtio-pci.c b/hw/virtio/virtio-pci.c
index 5494ff4a..440776c0 100644
--- a/hw/virtio/virtio-pci.c
+++ b/hw/virtio/virtio-pci.c
@@ -806,7 +806,7 @@ static int kvm_virtio_pci_vector_use(VirtIOPCIProxy *proxy, int nvqs)
/* If guest supports masking, set up irqfd now.
* Otherwise, delay until unmasked in the frontend.
*/
- if (k->guest_notifier_mask) {
+ if (vdev->use_guest_notifier_mask && k->guest_notifier_mask) {
ret = kvm_virtio_pci_irqfd_use(proxy, queue_no, vector);
if (ret < 0) {
kvm_virtio_pci_vq_vector_release(proxy, vector);
@@ -822,7 +822,7 @@ undo:
if (vector >= msix_nr_vectors_allocated(dev)) {
continue;
}
- if (k->guest_notifier_mask) {
+ if (vdev->use_guest_notifier_mask && k->guest_notifier_mask) {
kvm_virtio_pci_irqfd_release(proxy, queue_no, vector);
}
kvm_virtio_pci_vq_vector_release(proxy, vector);
@@ -849,7 +849,7 @@ static void kvm_virtio_pci_vector_release(VirtIOPCIProxy *proxy, int nvqs)
/* If guest supports masking, clean up irqfd now.
* Otherwise, it was cleaned when masked in the frontend.
*/
- if (k->guest_notifier_mask) {
+ if (vdev->use_guest_notifier_mask && k->guest_notifier_mask) {
kvm_virtio_pci_irqfd_release(proxy, queue_no, vector);
}
kvm_virtio_pci_vq_vector_release(proxy, vector);
@@ -882,7 +882,7 @@ static int virtio_pci_vq_vector_unmask(VirtIOPCIProxy *proxy,
/* If guest supports masking, irqfd is already setup, unmask it.
* Otherwise, set it up now.
*/
- if (k->guest_notifier_mask) {
+ if (vdev->use_guest_notifier_mask && k->guest_notifier_mask) {
k->guest_notifier_mask(vdev, queue_no, false);
/* Test after unmasking to avoid losing events. */
if (k->guest_notifier_pending &&
@@ -905,7 +905,7 @@ static void virtio_pci_vq_vector_mask(VirtIOPCIProxy *proxy,
/* If guest supports masking, keep irqfd but mask it.
* Otherwise, clean it up now.
*/
- if (k->guest_notifier_mask) {
+ if (vdev->use_guest_notifier_mask && k->guest_notifier_mask) {
k->guest_notifier_mask(vdev, queue_no, true);
} else {
kvm_virtio_pci_irqfd_release(proxy, queue_no, vector);
@@ -1022,7 +1022,9 @@ static int virtio_pci_set_guest_notifier(DeviceState *d, int n, bool assign,
event_notifier_cleanup(notifier);
}
- if (!msix_enabled(&proxy->pci_dev) && vdc->guest_notifier_mask) {
+ if (!msix_enabled(&proxy->pci_dev) &&
+ vdev->use_guest_notifier_mask &&
+ vdc->guest_notifier_mask) {
vdc->guest_notifier_mask(vdev, n, !assign);
}
diff --git a/hw/virtio/virtio.c b/hw/virtio/virtio.c
index 90f25451..e365960b 100644
--- a/hw/virtio/virtio.c
+++ b/hw/virtio/virtio.c
@@ -1677,6 +1677,7 @@ void virtio_init(VirtIODevice *vdev, const char *name,
vdev->vmstate = qemu_add_vm_change_state_handler(virtio_vmstate_change,
vdev);
vdev->device_endian = virtio_default_endian();
+ vdev->use_guest_notifier_mask = true;
}
hwaddr virtio_queue_get_desc_addr(VirtIODevice *vdev, int n)
--
Victor
^ permalink raw reply related [flat|nested] 3+ messages in thread
* Re: [Qemu-devel] [PATCH v5] vhost-user interrupt management fixes
2016-02-18 14:12 [Qemu-devel] [PATCH v5] vhost-user interrupt management fixes Victor Kaplansky
@ 2016-02-19 9:02 ` Didier Pallard
2016-02-20 18:05 ` Michael S. Tsirkin
0 siblings, 1 reply; 3+ messages in thread
From: Didier Pallard @ 2016-02-19 9:02 UTC (permalink / raw)
To: Victor Kaplansky, qemu-devel
Cc: thibaut.collet, Jason Wang, jmg, Michael S. Tsirkin
Hi victor,
I'm sorry, i didn't get time to work on this patch.
Thanks for your work.
didier
On 02/18/2016 03:12 PM, Victor Kaplansky wrote:
> Since guest_mask_notifier can not be used in vhost-user mode due
> to buffering implied by unix control socket, force
> use_mask_notifier on virtio devices of vhost-user interfaces, and
> send correct callfd to the guest at vhost start.
>
> Using guest_notifier_mask function in vhost-user case may
> break interrupt mask paradigm, because mask/unmask is not
> really done when returning from guest_notifier_mask call, instead
> message is posted in a unix socket, and processed later.
>
> Add an option boolean flag 'use_mask_notifier' to disable the use
> of guest_notifier_mask in virtio pci.
>
> Signed-off-by: Didier Pallard <didier.pallard@6wind.com>
> Signed-off-by: Victor Kaplansky <victork@redhat.com>
> ---
> v5 changes:
> - rebased to mst tree.
> - removed a traling space.
>
> v4 changes:
> In respond to Michael S. Tsirkin comments:
> - changed the name of new field to use_guest_notifier_mask
> - clarified comments
> - vhost_virtqueue_mask() called for unmask
> - cosmetic changes
> - moved the initialization of the new field from virtio_reset
> to virtio_init.
>
> v3 changes:
> In respond to Michael S. Tsirkin comments:
> - vhost_net.c: removed dependency on virtio-pci.h
> - vhost.c: simplified the check for vhost-user backend,
> replaced by checking use_mask_notifier; added comment
> explaining why vring for vhost-user initialized in
> unmasked state;
> - cosmetic fixes.
>
> v2 changes:
> - a new boolean field is added to all virtio devices instead
> of defining a property in some virtio-pci devices.
>
> include/hw/virtio/virtio.h | 1 +
> hw/net/vhost_net.c | 15 +++++++++++++--
> hw/virtio/vhost.c | 9 +++++++++
> hw/virtio/virtio-pci.c | 14 ++++++++------
> hw/virtio/virtio.c | 1 +
> 5 files changed, 32 insertions(+), 8 deletions(-)
>
> diff --git a/include/hw/virtio/virtio.h b/include/hw/virtio/virtio.h
> index 108cdb0f..c38a2fef 100644
> --- a/include/hw/virtio/virtio.h
> +++ b/include/hw/virtio/virtio.h
> @@ -90,6 +90,7 @@ struct VirtIODevice
> VMChangeStateEntry *vmstate;
> char *bus_name;
> uint8_t device_endian;
> + bool use_guest_notifier_mask;
> QLIST_HEAD(, VirtQueue) *vector_queues;
> };
>
> diff --git a/hw/net/vhost_net.c b/hw/net/vhost_net.c
> index b2428324..6e1032fc 100644
> --- a/hw/net/vhost_net.c
> +++ b/hw/net/vhost_net.c
> @@ -284,8 +284,19 @@ int vhost_net_start(VirtIODevice *dev, NetClientState *ncs,
> }
>
> for (i = 0; i < total_queues; i++) {
> - vhost_net_set_vq_index(get_vhost_net(ncs[i].peer), i * 2);
> - }
> + struct vhost_net *net;
> +
> + net = get_vhost_net(ncs[i].peer);
> + vhost_net_set_vq_index(net, i * 2);
> +
> + /* Suppress the masking guest notifiers on vhost user
> + * because vhost user doesn't interrupt masking/unmasking
> + * properly.
> + */
> + if (net->nc->info->type == NET_CLIENT_OPTIONS_KIND_VHOST_USER) {
> + dev->use_guest_notifier_mask = false;
> + }
> + }
>
> r = k->set_guest_notifiers(qbus->parent, total_queues * 2, true);
> if (r < 0) {
> diff --git a/hw/virtio/vhost.c b/hw/virtio/vhost.c
> index 9f8ac38c..72d0c9e9 100644
> --- a/hw/virtio/vhost.c
> +++ b/hw/virtio/vhost.c
> @@ -875,6 +875,14 @@ static int vhost_virtqueue_start(struct vhost_dev *dev,
> /* Clear and discard previous events if any. */
> event_notifier_test_and_clear(&vq->masked_notifier);
>
> + /* Init vring in unmasked state, unless guest_notifier_mask
> + * will do it later.
> + */
> + if (!vdev->use_guest_notifier_mask) {
> + /* TODO: check and handle errors. */
> + vhost_virtqueue_mask(dev, vdev, idx, false);
> + }
> +
> return 0;
>
> fail_kick:
> @@ -1167,6 +1175,7 @@ void vhost_virtqueue_mask(struct vhost_dev *hdev, VirtIODevice *vdev, int n,
> struct vhost_vring_file file;
>
> if (mask) {
> + assert(vdev->use_guest_notifier_mask);
> file.fd = event_notifier_get_fd(&hdev->vqs[index].masked_notifier);
> } else {
> file.fd = event_notifier_get_fd(virtio_queue_get_guest_notifier(vvq));
> diff --git a/hw/virtio/virtio-pci.c b/hw/virtio/virtio-pci.c
> index 5494ff4a..440776c0 100644
> --- a/hw/virtio/virtio-pci.c
> +++ b/hw/virtio/virtio-pci.c
> @@ -806,7 +806,7 @@ static int kvm_virtio_pci_vector_use(VirtIOPCIProxy *proxy, int nvqs)
> /* If guest supports masking, set up irqfd now.
> * Otherwise, delay until unmasked in the frontend.
> */
> - if (k->guest_notifier_mask) {
> + if (vdev->use_guest_notifier_mask && k->guest_notifier_mask) {
> ret = kvm_virtio_pci_irqfd_use(proxy, queue_no, vector);
> if (ret < 0) {
> kvm_virtio_pci_vq_vector_release(proxy, vector);
> @@ -822,7 +822,7 @@ undo:
> if (vector >= msix_nr_vectors_allocated(dev)) {
> continue;
> }
> - if (k->guest_notifier_mask) {
> + if (vdev->use_guest_notifier_mask && k->guest_notifier_mask) {
> kvm_virtio_pci_irqfd_release(proxy, queue_no, vector);
> }
> kvm_virtio_pci_vq_vector_release(proxy, vector);
> @@ -849,7 +849,7 @@ static void kvm_virtio_pci_vector_release(VirtIOPCIProxy *proxy, int nvqs)
> /* If guest supports masking, clean up irqfd now.
> * Otherwise, it was cleaned when masked in the frontend.
> */
> - if (k->guest_notifier_mask) {
> + if (vdev->use_guest_notifier_mask && k->guest_notifier_mask) {
> kvm_virtio_pci_irqfd_release(proxy, queue_no, vector);
> }
> kvm_virtio_pci_vq_vector_release(proxy, vector);
> @@ -882,7 +882,7 @@ static int virtio_pci_vq_vector_unmask(VirtIOPCIProxy *proxy,
> /* If guest supports masking, irqfd is already setup, unmask it.
> * Otherwise, set it up now.
> */
> - if (k->guest_notifier_mask) {
> + if (vdev->use_guest_notifier_mask && k->guest_notifier_mask) {
> k->guest_notifier_mask(vdev, queue_no, false);
> /* Test after unmasking to avoid losing events. */
> if (k->guest_notifier_pending &&
> @@ -905,7 +905,7 @@ static void virtio_pci_vq_vector_mask(VirtIOPCIProxy *proxy,
> /* If guest supports masking, keep irqfd but mask it.
> * Otherwise, clean it up now.
> */
> - if (k->guest_notifier_mask) {
> + if (vdev->use_guest_notifier_mask && k->guest_notifier_mask) {
> k->guest_notifier_mask(vdev, queue_no, true);
> } else {
> kvm_virtio_pci_irqfd_release(proxy, queue_no, vector);
> @@ -1022,7 +1022,9 @@ static int virtio_pci_set_guest_notifier(DeviceState *d, int n, bool assign,
> event_notifier_cleanup(notifier);
> }
>
> - if (!msix_enabled(&proxy->pci_dev) && vdc->guest_notifier_mask) {
> + if (!msix_enabled(&proxy->pci_dev) &&
> + vdev->use_guest_notifier_mask &&
> + vdc->guest_notifier_mask) {
> vdc->guest_notifier_mask(vdev, n, !assign);
> }
>
> diff --git a/hw/virtio/virtio.c b/hw/virtio/virtio.c
> index 90f25451..e365960b 100644
> --- a/hw/virtio/virtio.c
> +++ b/hw/virtio/virtio.c
> @@ -1677,6 +1677,7 @@ void virtio_init(VirtIODevice *vdev, const char *name,
> vdev->vmstate = qemu_add_vm_change_state_handler(virtio_vmstate_change,
> vdev);
> vdev->device_endian = virtio_default_endian();
> + vdev->use_guest_notifier_mask = true;
> }
>
> hwaddr virtio_queue_get_desc_addr(VirtIODevice *vdev, int n)
^ permalink raw reply [flat|nested] 3+ messages in thread
* Re: [Qemu-devel] [PATCH v5] vhost-user interrupt management fixes
2016-02-19 9:02 ` Didier Pallard
@ 2016-02-20 18:05 ` Michael S. Tsirkin
0 siblings, 0 replies; 3+ messages in thread
From: Michael S. Tsirkin @ 2016-02-20 18:05 UTC (permalink / raw)
To: Didier Pallard
Cc: thibaut.collet, Jason Wang, Victor Kaplansky, qemu-devel, jmg
On Fri, Feb 19, 2016 at 10:02:16AM +0100, Didier Pallard wrote:
> Hi victor,
>
> I'm sorry, i didn't get time to work on this patch.
> Thanks for your work.
>
> didier
Could you please confirm that the patch works for you?
>
> On 02/18/2016 03:12 PM, Victor Kaplansky wrote:
> >Since guest_mask_notifier can not be used in vhost-user mode due
> >to buffering implied by unix control socket, force
> >use_mask_notifier on virtio devices of vhost-user interfaces, and
> >send correct callfd to the guest at vhost start.
> >
> >Using guest_notifier_mask function in vhost-user case may
> >break interrupt mask paradigm, because mask/unmask is not
> >really done when returning from guest_notifier_mask call, instead
> >message is posted in a unix socket, and processed later.
> >
> >Add an option boolean flag 'use_mask_notifier' to disable the use
> >of guest_notifier_mask in virtio pci.
> >
> >Signed-off-by: Didier Pallard <didier.pallard@6wind.com>
> >Signed-off-by: Victor Kaplansky <victork@redhat.com>
> >---
> >v5 changes:
> > - rebased to mst tree.
> > - removed a traling space.
> >
> >v4 changes:
> > In respond to Michael S. Tsirkin comments:
> > - changed the name of new field to use_guest_notifier_mask
> > - clarified comments
> > - vhost_virtqueue_mask() called for unmask
> > - cosmetic changes
> > - moved the initialization of the new field from virtio_reset
> > to virtio_init.
> >
> >v3 changes:
> > In respond to Michael S. Tsirkin comments:
> > - vhost_net.c: removed dependency on virtio-pci.h
> > - vhost.c: simplified the check for vhost-user backend,
> > replaced by checking use_mask_notifier; added comment
> > explaining why vring for vhost-user initialized in
> > unmasked state;
> > - cosmetic fixes.
> >
> >v2 changes:
> > - a new boolean field is added to all virtio devices instead
> > of defining a property in some virtio-pci devices.
> >
> > include/hw/virtio/virtio.h | 1 +
> > hw/net/vhost_net.c | 15 +++++++++++++--
> > hw/virtio/vhost.c | 9 +++++++++
> > hw/virtio/virtio-pci.c | 14 ++++++++------
> > hw/virtio/virtio.c | 1 +
> > 5 files changed, 32 insertions(+), 8 deletions(-)
> >
> >diff --git a/include/hw/virtio/virtio.h b/include/hw/virtio/virtio.h
> >index 108cdb0f..c38a2fef 100644
> >--- a/include/hw/virtio/virtio.h
> >+++ b/include/hw/virtio/virtio.h
> >@@ -90,6 +90,7 @@ struct VirtIODevice
> > VMChangeStateEntry *vmstate;
> > char *bus_name;
> > uint8_t device_endian;
> >+ bool use_guest_notifier_mask;
> > QLIST_HEAD(, VirtQueue) *vector_queues;
> > };
> >diff --git a/hw/net/vhost_net.c b/hw/net/vhost_net.c
> >index b2428324..6e1032fc 100644
> >--- a/hw/net/vhost_net.c
> >+++ b/hw/net/vhost_net.c
> >@@ -284,8 +284,19 @@ int vhost_net_start(VirtIODevice *dev, NetClientState *ncs,
> > }
> > for (i = 0; i < total_queues; i++) {
> >- vhost_net_set_vq_index(get_vhost_net(ncs[i].peer), i * 2);
> >- }
> >+ struct vhost_net *net;
> >+
> >+ net = get_vhost_net(ncs[i].peer);
> >+ vhost_net_set_vq_index(net, i * 2);
> >+
> >+ /* Suppress the masking guest notifiers on vhost user
> >+ * because vhost user doesn't interrupt masking/unmasking
> >+ * properly.
> >+ */
> >+ if (net->nc->info->type == NET_CLIENT_OPTIONS_KIND_VHOST_USER) {
> >+ dev->use_guest_notifier_mask = false;
> >+ }
> >+ }
> > r = k->set_guest_notifiers(qbus->parent, total_queues * 2, true);
> > if (r < 0) {
> >diff --git a/hw/virtio/vhost.c b/hw/virtio/vhost.c
> >index 9f8ac38c..72d0c9e9 100644
> >--- a/hw/virtio/vhost.c
> >+++ b/hw/virtio/vhost.c
> >@@ -875,6 +875,14 @@ static int vhost_virtqueue_start(struct vhost_dev *dev,
> > /* Clear and discard previous events if any. */
> > event_notifier_test_and_clear(&vq->masked_notifier);
> >+ /* Init vring in unmasked state, unless guest_notifier_mask
> >+ * will do it later.
> >+ */
> >+ if (!vdev->use_guest_notifier_mask) {
> >+ /* TODO: check and handle errors. */
> >+ vhost_virtqueue_mask(dev, vdev, idx, false);
> >+ }
> >+
> > return 0;
> > fail_kick:
> >@@ -1167,6 +1175,7 @@ void vhost_virtqueue_mask(struct vhost_dev *hdev, VirtIODevice *vdev, int n,
> > struct vhost_vring_file file;
> > if (mask) {
> >+ assert(vdev->use_guest_notifier_mask);
> > file.fd = event_notifier_get_fd(&hdev->vqs[index].masked_notifier);
> > } else {
> > file.fd = event_notifier_get_fd(virtio_queue_get_guest_notifier(vvq));
> >diff --git a/hw/virtio/virtio-pci.c b/hw/virtio/virtio-pci.c
> >index 5494ff4a..440776c0 100644
> >--- a/hw/virtio/virtio-pci.c
> >+++ b/hw/virtio/virtio-pci.c
> >@@ -806,7 +806,7 @@ static int kvm_virtio_pci_vector_use(VirtIOPCIProxy *proxy, int nvqs)
> > /* If guest supports masking, set up irqfd now.
> > * Otherwise, delay until unmasked in the frontend.
> > */
> >- if (k->guest_notifier_mask) {
> >+ if (vdev->use_guest_notifier_mask && k->guest_notifier_mask) {
> > ret = kvm_virtio_pci_irqfd_use(proxy, queue_no, vector);
> > if (ret < 0) {
> > kvm_virtio_pci_vq_vector_release(proxy, vector);
> >@@ -822,7 +822,7 @@ undo:
> > if (vector >= msix_nr_vectors_allocated(dev)) {
> > continue;
> > }
> >- if (k->guest_notifier_mask) {
> >+ if (vdev->use_guest_notifier_mask && k->guest_notifier_mask) {
> > kvm_virtio_pci_irqfd_release(proxy, queue_no, vector);
> > }
> > kvm_virtio_pci_vq_vector_release(proxy, vector);
> >@@ -849,7 +849,7 @@ static void kvm_virtio_pci_vector_release(VirtIOPCIProxy *proxy, int nvqs)
> > /* If guest supports masking, clean up irqfd now.
> > * Otherwise, it was cleaned when masked in the frontend.
> > */
> >- if (k->guest_notifier_mask) {
> >+ if (vdev->use_guest_notifier_mask && k->guest_notifier_mask) {
> > kvm_virtio_pci_irqfd_release(proxy, queue_no, vector);
> > }
> > kvm_virtio_pci_vq_vector_release(proxy, vector);
> >@@ -882,7 +882,7 @@ static int virtio_pci_vq_vector_unmask(VirtIOPCIProxy *proxy,
> > /* If guest supports masking, irqfd is already setup, unmask it.
> > * Otherwise, set it up now.
> > */
> >- if (k->guest_notifier_mask) {
> >+ if (vdev->use_guest_notifier_mask && k->guest_notifier_mask) {
> > k->guest_notifier_mask(vdev, queue_no, false);
> > /* Test after unmasking to avoid losing events. */
> > if (k->guest_notifier_pending &&
> >@@ -905,7 +905,7 @@ static void virtio_pci_vq_vector_mask(VirtIOPCIProxy *proxy,
> > /* If guest supports masking, keep irqfd but mask it.
> > * Otherwise, clean it up now.
> > */
> >- if (k->guest_notifier_mask) {
> >+ if (vdev->use_guest_notifier_mask && k->guest_notifier_mask) {
> > k->guest_notifier_mask(vdev, queue_no, true);
> > } else {
> > kvm_virtio_pci_irqfd_release(proxy, queue_no, vector);
> >@@ -1022,7 +1022,9 @@ static int virtio_pci_set_guest_notifier(DeviceState *d, int n, bool assign,
> > event_notifier_cleanup(notifier);
> > }
> >- if (!msix_enabled(&proxy->pci_dev) && vdc->guest_notifier_mask) {
> >+ if (!msix_enabled(&proxy->pci_dev) &&
> >+ vdev->use_guest_notifier_mask &&
> >+ vdc->guest_notifier_mask) {
> > vdc->guest_notifier_mask(vdev, n, !assign);
> > }
> >diff --git a/hw/virtio/virtio.c b/hw/virtio/virtio.c
> >index 90f25451..e365960b 100644
> >--- a/hw/virtio/virtio.c
> >+++ b/hw/virtio/virtio.c
> >@@ -1677,6 +1677,7 @@ void virtio_init(VirtIODevice *vdev, const char *name,
> > vdev->vmstate = qemu_add_vm_change_state_handler(virtio_vmstate_change,
> > vdev);
> > vdev->device_endian = virtio_default_endian();
> >+ vdev->use_guest_notifier_mask = true;
> > }
> > hwaddr virtio_queue_get_desc_addr(VirtIODevice *vdev, int n)
^ permalink raw reply [flat|nested] 3+ messages in thread
end of thread, other threads:[~2016-02-20 18:05 UTC | newest]
Thread overview: 3+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2016-02-18 14:12 [Qemu-devel] [PATCH v5] vhost-user interrupt management fixes Victor Kaplansky
2016-02-19 9:02 ` Didier Pallard
2016-02-20 18:05 ` Michael S. Tsirkin
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.