All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH v6 0/9] vhost-vdpa: add support for configure interrupt
@ 2021-04-27  3:39 Cindy Lu
  2021-04-27  3:39 ` [PATCH v6 1/9] hw: Add check for queue number Cindy Lu
                   ` (9 more replies)
  0 siblings, 10 replies; 20+ messages in thread
From: Cindy Lu @ 2021-04-27  3:39 UTC (permalink / raw)
  To: lulu, mst, jasowang, qemu-devel

these patches are add the support for configure interrupt 

These code are all tested in vp-vdpa (support configure interrupt)
vdpa_sim (not support configure interrupt)

test in virtio-pci bus and virtio-mmio bus

Change in v2:
Add support for virtio-mmio bus
active the notifier while the backend support configure interrupt
misc fixes form v1

Change in v3
fix the coding style problems

Change in v4
misc fixes form v3
merge the set_config_notifier to set_guest_notifier
when vdpa start, check the feature by VIRTIO_NET_F_STATUS 

Change in v5
misc fixes form v4
split the code for introduce configure interrupt type and callback function 
will init the configure interrupt in all virtio-pci and virtio-mmio bus, but will 
only active while using vhost-vdpa driver

Change in v6
misc fixes form v5
decouple virtqueue from interrupt setting and misc process
fix the bug in virtio_net_handle_rx
use -1 as the queue number to identify if the interrupt is configure interrupt

Cindy Lu (9):
  hw: Add check for queue number
  virtio-pci:decouple virtqueue from interrupt setting process
  vhost: add new call back function for config interrupt
  vhost-vdpa: add support for config interrupt call back
  vhost:add support for configure interrupt
  virtio-mmio: add support for configure interrupt
  virtio-pci: add support for configure interrupt
  virtio: decouple virtqueue from set notifier fd handler
  virtio-net: add peer_deleted check in virtio_net_handle_rx

 hw/display/vhost-user-gpu.c       |   8 +-
 hw/net/vhost_net.c                |   9 ++
 hw/net/virtio-net.c               |  20 ++-
 hw/s390x/virtio-ccw.c             |   6 +-
 hw/virtio/trace-events            |   2 +
 hw/virtio/vhost-user-fs.c         |  11 +-
 hw/virtio/vhost-vdpa.c            |   7 +
 hw/virtio/vhost-vsock-common.c    |   8 +-
 hw/virtio/vhost.c                 |  70 +++++++++-
 hw/virtio/virtio-crypto.c         |   8 +-
 hw/virtio/virtio-mmio.c           |  30 ++++-
 hw/virtio/virtio-pci.c            | 212 +++++++++++++++++++-----------
 hw/virtio/virtio.c                |  37 ++++--
 include/hw/virtio/vhost-backend.h |   3 +
 include/hw/virtio/vhost.h         |   3 +
 include/hw/virtio/virtio.h        |   4 +-
 include/net/vhost_net.h           |   3 +
 17 files changed, 336 insertions(+), 105 deletions(-)

-- 
2.21.3



^ permalink raw reply	[flat|nested] 20+ messages in thread

* [PATCH v6 1/9] hw: Add check for queue number
  2021-04-27  3:39 [PATCH v6 0/9] vhost-vdpa: add support for configure interrupt Cindy Lu
@ 2021-04-27  3:39 ` Cindy Lu
  2021-04-27  5:39   ` Jason Wang
  2021-04-27  3:39 ` [PATCH v6 2/9] virtio-pci:decouple virtqueue from interrupt setting process Cindy Lu
                   ` (8 subsequent siblings)
  9 siblings, 1 reply; 20+ messages in thread
From: Cindy Lu @ 2021-04-27  3:39 UTC (permalink / raw)
  To: lulu, mst, jasowang, qemu-devel

In order to support configure interrupt. we will use queue number -1
as configure interrupt
since all these device are not support the configure interrupt
So we will add an check here, if the idx is -1, the function
will return;

Signed-off-by: Cindy Lu <lulu@redhat.com>
---
 hw/display/vhost-user-gpu.c    |  8 ++++++--
 hw/net/virtio-net.c            | 10 +++++++---
 hw/virtio/vhost-user-fs.c      | 11 +++++++----
 hw/virtio/vhost-vsock-common.c |  8 ++++++--
 hw/virtio/virtio-crypto.c      |  8 ++++++--
 5 files changed, 32 insertions(+), 13 deletions(-)

diff --git a/hw/display/vhost-user-gpu.c b/hw/display/vhost-user-gpu.c
index 51f1747c4a..d8e26cedf1 100644
--- a/hw/display/vhost-user-gpu.c
+++ b/hw/display/vhost-user-gpu.c
@@ -490,7 +490,9 @@ static bool
 vhost_user_gpu_guest_notifier_pending(VirtIODevice *vdev, int idx)
 {
     VhostUserGPU *g = VHOST_USER_GPU(vdev);
-
+    if (idx == -1) {
+        return false;
+    }
     return vhost_virtqueue_pending(&g->vhost->dev, idx);
 }
 
@@ -498,7 +500,9 @@ static void
 vhost_user_gpu_guest_notifier_mask(VirtIODevice *vdev, int idx, bool mask)
 {
     VhostUserGPU *g = VHOST_USER_GPU(vdev);
-
+    if (idx == -1) {
+        return;
+    }
     vhost_virtqueue_mask(&g->vhost->dev, vdev, idx, mask);
 }
 
diff --git a/hw/net/virtio-net.c b/hw/net/virtio-net.c
index 9179013ac4..78ccaa228c 100644
--- a/hw/net/virtio-net.c
+++ b/hw/net/virtio-net.c
@@ -3060,7 +3060,10 @@ static bool virtio_net_guest_notifier_pending(VirtIODevice *vdev, int idx)
     VirtIONet *n = VIRTIO_NET(vdev);
     NetClientState *nc = qemu_get_subqueue(n->nic, vq2q(idx));
     assert(n->vhost_started);
-    return vhost_net_virtqueue_pending(get_vhost_net(nc->peer), idx);
+    if (idx != -1) {
+        return vhost_net_virtqueue_pending(get_vhost_net(nc->peer), idx);
+    }
+    return false;
 }
 
 static void virtio_net_guest_notifier_mask(VirtIODevice *vdev, int idx,
@@ -3069,8 +3072,9 @@ static void virtio_net_guest_notifier_mask(VirtIODevice *vdev, int idx,
     VirtIONet *n = VIRTIO_NET(vdev);
     NetClientState *nc = qemu_get_subqueue(n->nic, vq2q(idx));
     assert(n->vhost_started);
-    vhost_net_virtqueue_mask(get_vhost_net(nc->peer),
-                             vdev, idx, mask);
+    if (idx != -1) {
+        vhost_net_virtqueue_mask(get_vhost_net(nc->peer), vdev, idx, mask);
+     }
 }
 
 static void virtio_net_set_config_size(VirtIONet *n, uint64_t host_features)
diff --git a/hw/virtio/vhost-user-fs.c b/hw/virtio/vhost-user-fs.c
index 1bc5d03a00..37424c2193 100644
--- a/hw/virtio/vhost-user-fs.c
+++ b/hw/virtio/vhost-user-fs.c
@@ -142,18 +142,21 @@ static void vuf_handle_output(VirtIODevice *vdev, VirtQueue *vq)
      */
 }
 
-static void vuf_guest_notifier_mask(VirtIODevice *vdev, int idx,
-                                            bool mask)
+static void vuf_guest_notifier_mask(VirtIODevice *vdev, int idx, bool mask)
 {
     VHostUserFS *fs = VHOST_USER_FS(vdev);
-
+    if (idx == -1) {
+        return;
+    }
     vhost_virtqueue_mask(&fs->vhost_dev, vdev, idx, mask);
 }
 
 static bool vuf_guest_notifier_pending(VirtIODevice *vdev, int idx)
 {
     VHostUserFS *fs = VHOST_USER_FS(vdev);
-
+    if (idx == -1) {
+        return false;
+    }
     return vhost_virtqueue_pending(&fs->vhost_dev, idx);
 }
 
diff --git a/hw/virtio/vhost-vsock-common.c b/hw/virtio/vhost-vsock-common.c
index 5b2ebf3496..0adf823d37 100644
--- a/hw/virtio/vhost-vsock-common.c
+++ b/hw/virtio/vhost-vsock-common.c
@@ -100,7 +100,9 @@ static void vhost_vsock_common_guest_notifier_mask(VirtIODevice *vdev, int idx,
                                             bool mask)
 {
     VHostVSockCommon *vvc = VHOST_VSOCK_COMMON(vdev);
-
+    if (idx == -1) {
+        return;
+    }
     vhost_virtqueue_mask(&vvc->vhost_dev, vdev, idx, mask);
 }
 
@@ -108,7 +110,9 @@ static bool vhost_vsock_common_guest_notifier_pending(VirtIODevice *vdev,
                                                int idx)
 {
     VHostVSockCommon *vvc = VHOST_VSOCK_COMMON(vdev);
-
+    if (idx == -1) {
+        return false;
+    }
     return vhost_virtqueue_pending(&vvc->vhost_dev, idx);
 }
 
diff --git a/hw/virtio/virtio-crypto.c b/hw/virtio/virtio-crypto.c
index 54f9bbb789..c47f4ffb24 100644
--- a/hw/virtio/virtio-crypto.c
+++ b/hw/virtio/virtio-crypto.c
@@ -947,7 +947,9 @@ static void virtio_crypto_guest_notifier_mask(VirtIODevice *vdev, int idx,
     int queue = virtio_crypto_vq2q(idx);
 
     assert(vcrypto->vhost_started);
-
+    if (idx == -1) {
+        return;
+    }
     cryptodev_vhost_virtqueue_mask(vdev, queue, idx, mask);
 }
 
@@ -957,7 +959,9 @@ static bool virtio_crypto_guest_notifier_pending(VirtIODevice *vdev, int idx)
     int queue = virtio_crypto_vq2q(idx);
 
     assert(vcrypto->vhost_started);
-
+    if (idx == -1) {
+        return false;
+    }
     return cryptodev_vhost_virtqueue_pending(vdev, queue, idx);
 }
 
-- 
2.21.3



^ permalink raw reply related	[flat|nested] 20+ messages in thread

* [PATCH v6 2/9] virtio-pci:decouple virtqueue from interrupt setting process
  2021-04-27  3:39 [PATCH v6 0/9] vhost-vdpa: add support for configure interrupt Cindy Lu
  2021-04-27  3:39 ` [PATCH v6 1/9] hw: Add check for queue number Cindy Lu
@ 2021-04-27  3:39 ` Cindy Lu
  2021-04-27  6:40   ` Jason Wang
  2021-04-27  3:39 ` [PATCH v6 3/9] vhost: add new call back function for config interrupt Cindy Lu
                   ` (7 subsequent siblings)
  9 siblings, 1 reply; 20+ messages in thread
From: Cindy Lu @ 2021-04-27  3:39 UTC (permalink / raw)
  To: lulu, mst, jasowang, qemu-devel

Now the code for interrupt/vector are coupling
with the vq number, this patch will decouple the vritqueue
numbers from these functions.

Signed-off-by: Cindy Lu <lulu@redhat.com>
---
 hw/virtio/virtio-pci.c | 51 ++++++++++++++++++++++++------------------
 1 file changed, 29 insertions(+), 22 deletions(-)

diff --git a/hw/virtio/virtio-pci.c b/hw/virtio/virtio-pci.c
index 36524a5728..2b7e6cc0d9 100644
--- a/hw/virtio/virtio-pci.c
+++ b/hw/virtio/virtio-pci.c
@@ -691,23 +691,17 @@ static void kvm_virtio_pci_vq_vector_release(VirtIOPCIProxy *proxy,
 }
 
 static int kvm_virtio_pci_irqfd_use(VirtIOPCIProxy *proxy,
-                                 unsigned int queue_no,
+                                 EventNotifier *n,
                                  unsigned int vector)
 {
     VirtIOIRQFD *irqfd = &proxy->vector_irqfd[vector];
-    VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
-    VirtQueue *vq = virtio_get_queue(vdev, queue_no);
-    EventNotifier *n = virtio_queue_get_guest_notifier(vq);
     return kvm_irqchip_add_irqfd_notifier_gsi(kvm_state, n, NULL, irqfd->virq);
 }
 
 static void kvm_virtio_pci_irqfd_release(VirtIOPCIProxy *proxy,
-                                      unsigned int queue_no,
+                                      EventNotifier *n ,
                                       unsigned int vector)
 {
-    VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
-    VirtQueue *vq = virtio_get_queue(vdev, queue_no);
-    EventNotifier *n = virtio_queue_get_guest_notifier(vq);
     VirtIOIRQFD *irqfd = &proxy->vector_irqfd[vector];
     int ret;
 
@@ -722,7 +716,8 @@ static int kvm_virtio_pci_vector_use(VirtIOPCIProxy *proxy, int nvqs)
     VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
     unsigned int vector;
     int ret, queue_no;
-
+    VirtQueue *vq;
+    EventNotifier *n;
     for (queue_no = 0; queue_no < nvqs; queue_no++) {
         if (!virtio_queue_get_num(vdev, queue_no)) {
             break;
@@ -739,7 +734,9 @@ static int kvm_virtio_pci_vector_use(VirtIOPCIProxy *proxy, int nvqs)
          * Otherwise, delay until unmasked in the frontend.
          */
         if (vdev->use_guest_notifier_mask && k->guest_notifier_mask) {
-            ret = kvm_virtio_pci_irqfd_use(proxy, queue_no, vector);
+            vq = virtio_get_queue(vdev, queue_no);
+            n = virtio_queue_get_guest_notifier(vq);
+            ret = kvm_virtio_pci_irqfd_use(proxy, n, vector);
             if (ret < 0) {
                 kvm_virtio_pci_vq_vector_release(proxy, vector);
                 goto undo;
@@ -755,7 +752,9 @@ undo:
             continue;
         }
         if (vdev->use_guest_notifier_mask && k->guest_notifier_mask) {
-            kvm_virtio_pci_irqfd_release(proxy, queue_no, vector);
+            vq = virtio_get_queue(vdev, queue_no);
+            n = virtio_queue_get_guest_notifier(vq);
+            kvm_virtio_pci_irqfd_release(proxy, n, vector);
         }
         kvm_virtio_pci_vq_vector_release(proxy, vector);
     }
@@ -769,7 +768,8 @@ static void kvm_virtio_pci_vector_release(VirtIOPCIProxy *proxy, int nvqs)
     unsigned int vector;
     int queue_no;
     VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
-
+    VirtQueue *vq;
+    EventNotifier *n;
     for (queue_no = 0; queue_no < nvqs; queue_no++) {
         if (!virtio_queue_get_num(vdev, queue_no)) {
             break;
@@ -782,7 +782,9 @@ static void kvm_virtio_pci_vector_release(VirtIOPCIProxy *proxy, int nvqs)
          * Otherwise, it was cleaned when masked in the frontend.
          */
         if (vdev->use_guest_notifier_mask && k->guest_notifier_mask) {
-            kvm_virtio_pci_irqfd_release(proxy, queue_no, vector);
+            vq = virtio_get_queue(vdev, queue_no);
+            n = virtio_queue_get_guest_notifier(vq);
+            kvm_virtio_pci_irqfd_release(proxy, n, vector);
         }
         kvm_virtio_pci_vq_vector_release(proxy, vector);
     }
@@ -791,12 +793,11 @@ static void kvm_virtio_pci_vector_release(VirtIOPCIProxy *proxy, int nvqs)
 static int virtio_pci_vq_vector_unmask(VirtIOPCIProxy *proxy,
                                        unsigned int queue_no,
                                        unsigned int vector,
-                                       MSIMessage msg)
+                                       MSIMessage msg,
+                                        EventNotifier *n)
 {
     VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
     VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
-    VirtQueue *vq = virtio_get_queue(vdev, queue_no);
-    EventNotifier *n = virtio_queue_get_guest_notifier(vq);
     VirtIOIRQFD *irqfd;
     int ret = 0;
 
@@ -823,14 +824,15 @@ static int virtio_pci_vq_vector_unmask(VirtIOPCIProxy *proxy,
             event_notifier_set(n);
         }
     } else {
-        ret = kvm_virtio_pci_irqfd_use(proxy, queue_no, vector);
+        ret = kvm_virtio_pci_irqfd_use(proxy, n, vector);
     }
     return ret;
 }
 
 static void virtio_pci_vq_vector_mask(VirtIOPCIProxy *proxy,
                                              unsigned int queue_no,
-                                             unsigned int vector)
+                                             unsigned int vector,
+                                             EventNotifier *n)
 {
     VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
     VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
@@ -841,7 +843,7 @@ static void virtio_pci_vq_vector_mask(VirtIOPCIProxy *proxy,
     if (vdev->use_guest_notifier_mask && k->guest_notifier_mask) {
         k->guest_notifier_mask(vdev, queue_no, true);
     } else {
-        kvm_virtio_pci_irqfd_release(proxy, queue_no, vector);
+        kvm_virtio_pci_irqfd_release(proxy, n, vector);
     }
 }
 
@@ -851,6 +853,7 @@ static int virtio_pci_vector_unmask(PCIDevice *dev, unsigned vector,
     VirtIOPCIProxy *proxy = container_of(dev, VirtIOPCIProxy, pci_dev);
     VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
     VirtQueue *vq = virtio_vector_first_queue(vdev, vector);
+    EventNotifier *n;
     int ret, index, unmasked = 0;
 
     while (vq) {
@@ -859,7 +862,8 @@ static int virtio_pci_vector_unmask(PCIDevice *dev, unsigned vector,
             break;
         }
         if (index < proxy->nvqs_with_notifiers) {
-            ret = virtio_pci_vq_vector_unmask(proxy, index, vector, msg);
+            n = virtio_queue_get_guest_notifier(vq);
+            ret = virtio_pci_vq_vector_unmask(proxy, index, vector, msg, n);
             if (ret < 0) {
                 goto undo;
             }
@@ -875,7 +879,8 @@ undo:
     while (vq && unmasked >= 0) {
         index = virtio_get_queue_index(vq);
         if (index < proxy->nvqs_with_notifiers) {
-            virtio_pci_vq_vector_mask(proxy, index, vector);
+            n = virtio_queue_get_guest_notifier(vq);
+            virtio_pci_vq_vector_mask(proxy, index, vector, n);
             --unmasked;
         }
         vq = virtio_vector_next_queue(vq);
@@ -888,15 +893,17 @@ static void virtio_pci_vector_mask(PCIDevice *dev, unsigned vector)
     VirtIOPCIProxy *proxy = container_of(dev, VirtIOPCIProxy, pci_dev);
     VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
     VirtQueue *vq = virtio_vector_first_queue(vdev, vector);
+    EventNotifier *n;
     int index;
 
     while (vq) {
         index = virtio_get_queue_index(vq);
+         n = virtio_queue_get_guest_notifier(vq);
         if (!virtio_queue_get_num(vdev, index)) {
             break;
         }
         if (index < proxy->nvqs_with_notifiers) {
-            virtio_pci_vq_vector_mask(proxy, index, vector);
+            virtio_pci_vq_vector_mask(proxy, index, vector, n);
         }
         vq = virtio_vector_next_queue(vq);
     }
-- 
2.21.3



^ permalink raw reply related	[flat|nested] 20+ messages in thread

* [PATCH v6 3/9] vhost: add new call back function for config interrupt
  2021-04-27  3:39 [PATCH v6 0/9] vhost-vdpa: add support for configure interrupt Cindy Lu
  2021-04-27  3:39 ` [PATCH v6 1/9] hw: Add check for queue number Cindy Lu
  2021-04-27  3:39 ` [PATCH v6 2/9] virtio-pci:decouple virtqueue from interrupt setting process Cindy Lu
@ 2021-04-27  3:39 ` Cindy Lu
  2021-04-27  3:39 ` [PATCH v6 4/9] vhost-vdpa: add support for config interrupt call back Cindy Lu
                   ` (6 subsequent siblings)
  9 siblings, 0 replies; 20+ messages in thread
From: Cindy Lu @ 2021-04-27  3:39 UTC (permalink / raw)
  To: lulu, mst, jasowang, qemu-devel

To support configure interrupt, we need to
add a new call back function for config interrupt.

Signed-off-by: Cindy Lu <lulu@redhat.com>
---
 include/hw/virtio/vhost-backend.h | 3 +++
 1 file changed, 3 insertions(+)

diff --git a/include/hw/virtio/vhost-backend.h b/include/hw/virtio/vhost-backend.h
index 8a6f8e2a7a..adaf6982d2 100644
--- a/include/hw/virtio/vhost-backend.h
+++ b/include/hw/virtio/vhost-backend.h
@@ -125,6 +125,8 @@ typedef int (*vhost_get_device_id_op)(struct vhost_dev *dev, uint32_t *dev_id);
 
 typedef bool (*vhost_force_iommu_op)(struct vhost_dev *dev);
 
+typedef int (*vhost_set_config_call_op)(struct vhost_dev *dev,
+                                       int *fd);
 typedef struct VhostOps {
     VhostBackendType backend_type;
     vhost_backend_init vhost_backend_init;
@@ -170,6 +172,7 @@ typedef struct VhostOps {
     vhost_vq_get_addr_op  vhost_vq_get_addr;
     vhost_get_device_id_op vhost_get_device_id;
     vhost_force_iommu_op vhost_force_iommu;
+    vhost_set_config_call_op vhost_set_config_call;
 } VhostOps;
 
 extern const VhostOps user_ops;
-- 
2.21.3



^ permalink raw reply related	[flat|nested] 20+ messages in thread

* [PATCH v6 4/9] vhost-vdpa: add support for config interrupt call back
  2021-04-27  3:39 [PATCH v6 0/9] vhost-vdpa: add support for configure interrupt Cindy Lu
                   ` (2 preceding siblings ...)
  2021-04-27  3:39 ` [PATCH v6 3/9] vhost: add new call back function for config interrupt Cindy Lu
@ 2021-04-27  3:39 ` Cindy Lu
  2021-04-27  3:39 ` [PATCH v6 5/9] vhost:add support for configure interrupt Cindy Lu
                   ` (5 subsequent siblings)
  9 siblings, 0 replies; 20+ messages in thread
From: Cindy Lu @ 2021-04-27  3:39 UTC (permalink / raw)
  To: lulu, mst, jasowang, qemu-devel

Add new call back function in vhost-vdpa, this call back function only
supported in vhost-vdpa backend

Signed-off-by: Cindy Lu <lulu@redhat.com>
---
 hw/virtio/trace-events | 2 ++
 hw/virtio/vhost-vdpa.c | 7 +++++++
 2 files changed, 9 insertions(+)

diff --git a/hw/virtio/trace-events b/hw/virtio/trace-events
index 2060a144a2..6710835b46 100644
--- a/hw/virtio/trace-events
+++ b/hw/virtio/trace-events
@@ -52,6 +52,8 @@ vhost_vdpa_set_vring_call(void *dev, unsigned int index, int fd) "dev: %p index:
 vhost_vdpa_get_features(void *dev, uint64_t features) "dev: %p features: 0x%"PRIx64
 vhost_vdpa_set_owner(void *dev) "dev: %p"
 vhost_vdpa_vq_get_addr(void *dev, void *vq, uint64_t desc_user_addr, uint64_t avail_user_addr, uint64_t used_user_addr) "dev: %p vq: %p desc_user_addr: 0x%"PRIx64" avail_user_addr: 0x%"PRIx64" used_user_addr: 0x%"PRIx64
+vhost_vdpa_set_config_call(void *dev, int *fd)"dev: %p fd: %p"
+
 
 # virtio.c
 virtqueue_alloc_element(void *elem, size_t sz, unsigned in_num, unsigned out_num) "elem %p size %zd in_num %u out_num %u"
diff --git a/hw/virtio/vhost-vdpa.c b/hw/virtio/vhost-vdpa.c
index 01d2101d09..9ba2a2bed4 100644
--- a/hw/virtio/vhost-vdpa.c
+++ b/hw/virtio/vhost-vdpa.c
@@ -545,6 +545,12 @@ static int vhost_vdpa_set_vring_call(struct vhost_dev *dev,
     trace_vhost_vdpa_set_vring_call(dev, file->index, file->fd);
     return vhost_vdpa_call(dev, VHOST_SET_VRING_CALL, file);
 }
+static int vhost_vdpa_set_config_call(struct vhost_dev *dev,
+                                       int *fd)
+{
+    trace_vhost_vdpa_set_config_call(dev, fd);
+    return vhost_vdpa_call(dev, VHOST_VDPA_SET_CONFIG_CALL, fd);
+}
 
 static int vhost_vdpa_get_features(struct vhost_dev *dev,
                                      uint64_t *features)
@@ -611,4 +617,5 @@ const VhostOps vdpa_ops = {
         .vhost_get_device_id = vhost_vdpa_get_device_id,
         .vhost_vq_get_addr = vhost_vdpa_vq_get_addr,
         .vhost_force_iommu = vhost_vdpa_force_iommu,
+        .vhost_set_config_call = vhost_vdpa_set_config_call,
 };
-- 
2.21.3



^ permalink raw reply related	[flat|nested] 20+ messages in thread

* [PATCH v6 5/9] vhost:add support for configure interrupt
  2021-04-27  3:39 [PATCH v6 0/9] vhost-vdpa: add support for configure interrupt Cindy Lu
                   ` (3 preceding siblings ...)
  2021-04-27  3:39 ` [PATCH v6 4/9] vhost-vdpa: add support for config interrupt call back Cindy Lu
@ 2021-04-27  3:39 ` Cindy Lu
  2021-04-27  7:04   ` Jason Wang
  2021-04-27  3:39 ` [PATCH v6 6/9] virtio-mmio: add " Cindy Lu
                   ` (4 subsequent siblings)
  9 siblings, 1 reply; 20+ messages in thread
From: Cindy Lu @ 2021-04-27  3:39 UTC (permalink / raw)
  To: lulu, mst, jasowang, qemu-devel

Add configure notifier support in vhost and related driver
When backend support VIRTIO_NET_F_STATUS,setup the configure
interrupt function in vhost_dev_start and release the related
resource when vhost_dev_stop

Signed-off-by: Cindy Lu <lulu@redhat.com>
---
 hw/net/vhost_net.c         |  9 +++++
 hw/net/virtio-net.c        |  6 ++++
 hw/virtio/vhost.c          | 70 ++++++++++++++++++++++++++++++++++++--
 hw/virtio/virtio.c         | 22 ++++++++++++
 include/hw/virtio/vhost.h  |  3 ++
 include/hw/virtio/virtio.h |  4 +++
 include/net/vhost_net.h    |  3 ++
 7 files changed, 115 insertions(+), 2 deletions(-)

diff --git a/hw/net/vhost_net.c b/hw/net/vhost_net.c
index 24d555e764..12e30dc25e 100644
--- a/hw/net/vhost_net.c
+++ b/hw/net/vhost_net.c
@@ -426,6 +426,15 @@ void vhost_net_virtqueue_mask(VHostNetState *net, VirtIODevice *dev,
     vhost_virtqueue_mask(&net->dev, dev, idx, mask);
 }
 
+bool vhost_net_config_pending(VHostNetState *net, int idx)
+{
+    return vhost_config_pending(&net->dev, idx);
+}
+void vhost_net_config_mask(VHostNetState *net, VirtIODevice *dev,
+                              bool mask)
+{
+    vhost_config_mask(&net->dev, dev,  mask);
+}
 VHostNetState *get_vhost_net(NetClientState *nc)
 {
     VHostNetState *vhost_net = 0;
diff --git a/hw/net/virtio-net.c b/hw/net/virtio-net.c
index 78ccaa228c..43b912453a 100644
--- a/hw/net/virtio-net.c
+++ b/hw/net/virtio-net.c
@@ -3063,6 +3063,9 @@ static bool virtio_net_guest_notifier_pending(VirtIODevice *vdev, int idx)
     if (idx != -1) {
         return vhost_net_virtqueue_pending(get_vhost_net(nc->peer), idx);
     }
+    if (idx == -1) {
+        return vhost_net_config_pending(get_vhost_net(nc->peer), idx);
+   }
     return false;
 }
 
@@ -3075,6 +3078,9 @@ static void virtio_net_guest_notifier_mask(VirtIODevice *vdev, int idx,
     if (idx != -1) {
         vhost_net_virtqueue_mask(get_vhost_net(nc->peer), vdev, idx, mask);
      }
+    if (idx == -1) {
+        vhost_net_config_mask(get_vhost_net(nc->peer), vdev, mask);
+     }
 }
 
 static void virtio_net_set_config_size(VirtIONet *n, uint64_t host_features)
diff --git a/hw/virtio/vhost.c b/hw/virtio/vhost.c
index 614ccc2bcb..162a5dd90c 100644
--- a/hw/virtio/vhost.c
+++ b/hw/virtio/vhost.c
@@ -21,6 +21,7 @@
 #include "qemu/error-report.h"
 #include "qemu/memfd.h"
 #include "standard-headers/linux/vhost_types.h"
+#include "standard-headers/linux/virtio_net.h"
 #include "exec/address-spaces.h"
 #include "hw/virtio/virtio-bus.h"
 #include "hw/virtio/virtio-access.h"
@@ -1313,6 +1314,10 @@ int vhost_dev_init(struct vhost_dev *hdev, void *opaque,
             goto fail;
         }
     }
+    r = event_notifier_init(&hdev->masked_config_notifier, 0);
+    if (r < 0) {
+        return r;
+    }
 
     if (busyloop_timeout) {
         for (i = 0; i < hdev->nvqs; ++i) {
@@ -1405,6 +1410,7 @@ void vhost_dev_cleanup(struct vhost_dev *hdev)
     for (i = 0; i < hdev->nvqs; ++i) {
         vhost_virtqueue_cleanup(hdev->vqs + i);
     }
+    event_notifier_cleanup(&hdev->masked_config_notifier);
     if (hdev->mem) {
         /* those are only safe after successful init */
         memory_listener_unregister(&hdev->memory_listener);
@@ -1498,6 +1504,16 @@ bool vhost_virtqueue_pending(struct vhost_dev *hdev, int n)
     return event_notifier_test_and_clear(&vq->masked_notifier);
 }
 
+bool vhost_config_pending(struct vhost_dev *hdev, int n)
+{
+    assert(hdev->vhost_ops);
+
+    if ((hdev->started == false) ||
+        (hdev->vhost_ops->vhost_set_config_call == NULL)) {
+        return false;
+    }
+    return event_notifier_test_and_clear(&hdev->masked_config_notifier);
+}
 /* Mask/unmask events from this vq. */
 void vhost_virtqueue_mask(struct vhost_dev *hdev, VirtIODevice *vdev, int n,
                          bool mask)
@@ -1522,6 +1538,30 @@ void vhost_virtqueue_mask(struct vhost_dev *hdev, VirtIODevice *vdev, int n,
         VHOST_OPS_DEBUG("vhost_set_vring_call failed");
     }
 }
+void vhost_config_mask(struct vhost_dev *hdev, VirtIODevice *vdev,
+                         bool mask)
+{
+   int fd;
+   int r;
+   EventNotifier *masked_config_notifier = &hdev->masked_config_notifier;
+   EventNotifier *config_notifier = &vdev->config_notifier;
+   assert(hdev->vhost_ops);
+
+   if ((hdev->started == false) ||
+        (hdev->vhost_ops->vhost_set_config_call == NULL)) {
+        return ;
+    }
+    if (mask) {
+        assert(vdev->use_guest_notifier_mask);
+        fd = event_notifier_get_fd(masked_config_notifier);
+    } else {
+        fd = event_notifier_get_fd(config_notifier);
+    }
+   r = hdev->vhost_ops->vhost_set_config_call(hdev, &fd);
+   if (r < 0) {
+        error_report("vhost_set_config_call failed");
+    }
+}
 
 uint64_t vhost_get_features(struct vhost_dev *hdev, const int *feature_bits,
                             uint64_t features)
@@ -1701,6 +1741,7 @@ int vhost_dev_get_inflight(struct vhost_dev *dev, uint16_t queue_size,
 int vhost_dev_start(struct vhost_dev *hdev, VirtIODevice *vdev)
 {
     int i, r;
+    int fd = 0;
 
     /* should only be called after backend is connected */
     assert(hdev->vhost_ops);
@@ -1732,7 +1773,10 @@ int vhost_dev_start(struct vhost_dev *hdev, VirtIODevice *vdev)
             goto fail_vq;
         }
     }
-
+    event_notifier_test_and_clear(&hdev->masked_config_notifier);
+    if (!vdev->use_guest_notifier_mask) {
+        vhost_config_mask(hdev, vdev,  true);
+    }
     if (hdev->log_enabled) {
         uint64_t log_base;
 
@@ -1749,6 +1793,7 @@ int vhost_dev_start(struct vhost_dev *hdev, VirtIODevice *vdev)
             goto fail_log;
         }
     }
+
     if (hdev->vhost_ops->vhost_dev_start) {
         r = hdev->vhost_ops->vhost_dev_start(hdev, true);
         if (r) {
@@ -1766,6 +1811,19 @@ int vhost_dev_start(struct vhost_dev *hdev, VirtIODevice *vdev)
             vhost_device_iotlb_miss(hdev, vq->used_phys, true);
         }
     }
+   if (!(hdev->features & (0x1ULL << VIRTIO_NET_F_STATUS))) {
+        return 0;
+    }
+    if (hdev->vhost_ops->vhost_set_config_call) {
+        fd = event_notifier_get_fd(&vdev->config_notifier);
+        r = hdev->vhost_ops->vhost_set_config_call(hdev, &fd);
+        if (!r) {
+            event_notifier_set(&vdev->config_notifier);
+        }
+        if (r) {
+            goto fail_log;
+         }
+    }
     return 0;
 fail_log:
     vhost_log_put(hdev, false);
@@ -1788,10 +1846,18 @@ fail_features:
 void vhost_dev_stop(struct vhost_dev *hdev, VirtIODevice *vdev)
 {
     int i;
+    int fd;
 
     /* should only be called after backend is connected */
     assert(hdev->vhost_ops);
-
+    event_notifier_test_and_clear(&hdev->masked_config_notifier);
+    event_notifier_test_and_clear(&vdev->config_notifier);
+    if ((hdev->features & (0x1ULL << VIRTIO_NET_F_STATUS))) {
+        if (hdev->vhost_ops->vhost_set_config_call) {
+            fd = -1;
+            hdev->vhost_ops->vhost_set_config_call(hdev, &fd);
+        }
+    }
     if (hdev->vhost_ops->vhost_dev_start) {
         hdev->vhost_ops->vhost_dev_start(hdev, false);
     }
diff --git a/hw/virtio/virtio.c b/hw/virtio/virtio.c
index ceb58fda6c..5dff29c981 100644
--- a/hw/virtio/virtio.c
+++ b/hw/virtio/virtio.c
@@ -3502,6 +3502,14 @@ static void virtio_queue_guest_notifier_read(EventNotifier *n)
     }
 }
 
+static void virtio_config_read(EventNotifier *n)
+{
+    VirtIODevice *vdev = container_of(n, VirtIODevice, config_notifier);
+
+    if (event_notifier_test_and_clear(n)) {
+        virtio_notify_config(vdev);
+    }
+}
 void virtio_queue_set_guest_notifier_fd_handler(VirtQueue *vq, bool assign,
                                                 bool with_irqfd)
 {
@@ -3517,6 +3525,16 @@ void virtio_queue_set_guest_notifier_fd_handler(VirtQueue *vq, bool assign,
         virtio_queue_guest_notifier_read(&vq->guest_notifier);
     }
 }
+void virtio_set_config_notifier_fd_handler(VirtIODevice *vdev, bool assign,
+                                                bool with_irqfd)
+{
+    if (assign && !with_irqfd) {
+        event_notifier_set_handler(&vdev->config_notifier,
+                                   virtio_config_read);
+    } else {
+       event_notifier_set_handler(&vdev->config_notifier, NULL);
+    }
+}
 
 EventNotifier *virtio_queue_get_guest_notifier(VirtQueue *vq)
 {
@@ -3591,6 +3609,10 @@ EventNotifier *virtio_queue_get_host_notifier(VirtQueue *vq)
     return &vq->host_notifier;
 }
 
+EventNotifier *virtio_get_config_notifier(VirtIODevice *vdev)
+{
+    return &vdev->config_notifier;
+}
 void virtio_queue_set_host_notifier_enabled(VirtQueue *vq, bool enabled)
 {
     vq->host_notifier_enabled = enabled;
diff --git a/include/hw/virtio/vhost.h b/include/hw/virtio/vhost.h
index 4a8bc75415..22efa7008e 100644
--- a/include/hw/virtio/vhost.h
+++ b/include/hw/virtio/vhost.h
@@ -91,6 +91,7 @@ struct vhost_dev {
     QLIST_HEAD(, vhost_iommu) iommu_list;
     IOMMUNotifier n;
     const VhostDevConfigOps *config_ops;
+    EventNotifier masked_config_notifier;
 };
 
 struct vhost_net {
@@ -108,6 +109,8 @@ int vhost_dev_start(struct vhost_dev *hdev, VirtIODevice *vdev);
 void vhost_dev_stop(struct vhost_dev *hdev, VirtIODevice *vdev);
 int vhost_dev_enable_notifiers(struct vhost_dev *hdev, VirtIODevice *vdev);
 void vhost_dev_disable_notifiers(struct vhost_dev *hdev, VirtIODevice *vdev);
+bool vhost_config_pending(struct vhost_dev *hdev, int n);
+void vhost_config_mask(struct vhost_dev *hdev, VirtIODevice *vdev,  bool mask);
 
 /* Test and clear masked event pending status.
  * Should be called after unmask to avoid losing events.
diff --git a/include/hw/virtio/virtio.h b/include/hw/virtio/virtio.h
index b7ece7a6a8..b0b714f6d4 100644
--- a/include/hw/virtio/virtio.h
+++ b/include/hw/virtio/virtio.h
@@ -108,6 +108,7 @@ struct VirtIODevice
     bool use_guest_notifier_mask;
     AddressSpace *dma_as;
     QLIST_HEAD(, VirtQueue) *vector_queues;
+    EventNotifier config_notifier;
 };
 
 struct VirtioDeviceClass {
@@ -310,11 +311,14 @@ uint16_t virtio_get_queue_index(VirtQueue *vq);
 EventNotifier *virtio_queue_get_guest_notifier(VirtQueue *vq);
 void virtio_queue_set_guest_notifier_fd_handler(VirtQueue *vq, bool assign,
                                                 bool with_irqfd);
+void virtio_set_config_notifier_fd_handler(VirtIODevice *vdev, bool assign,
+                                                bool with_irqfd);
 int virtio_device_start_ioeventfd(VirtIODevice *vdev);
 int virtio_device_grab_ioeventfd(VirtIODevice *vdev);
 void virtio_device_release_ioeventfd(VirtIODevice *vdev);
 bool virtio_device_ioeventfd_enabled(VirtIODevice *vdev);
 EventNotifier *virtio_queue_get_host_notifier(VirtQueue *vq);
+EventNotifier *virtio_get_config_notifier(VirtIODevice *vdev);
 void virtio_queue_set_host_notifier_enabled(VirtQueue *vq, bool enabled);
 void virtio_queue_host_notifier_read(EventNotifier *n);
 void virtio_queue_aio_set_host_notifier_handler(VirtQueue *vq, AioContext *ctx,
diff --git a/include/net/vhost_net.h b/include/net/vhost_net.h
index 172b0051d8..0d38c97c94 100644
--- a/include/net/vhost_net.h
+++ b/include/net/vhost_net.h
@@ -36,6 +36,9 @@ int vhost_net_set_config(struct vhost_net *net, const uint8_t *data,
 bool vhost_net_virtqueue_pending(VHostNetState *net, int n);
 void vhost_net_virtqueue_mask(VHostNetState *net, VirtIODevice *dev,
                               int idx, bool mask);
+bool vhost_net_config_pending(VHostNetState *net, int n);
+void vhost_net_config_mask(VHostNetState *net, VirtIODevice *dev,
+                              bool mask);
 int vhost_net_notify_migration_done(VHostNetState *net, char* mac_addr);
 VHostNetState *get_vhost_net(NetClientState *nc);
 
-- 
2.21.3



^ permalink raw reply related	[flat|nested] 20+ messages in thread

* [PATCH v6 6/9] virtio-mmio: add support for configure interrupt
  2021-04-27  3:39 [PATCH v6 0/9] vhost-vdpa: add support for configure interrupt Cindy Lu
                   ` (4 preceding siblings ...)
  2021-04-27  3:39 ` [PATCH v6 5/9] vhost:add support for configure interrupt Cindy Lu
@ 2021-04-27  3:39 ` Cindy Lu
  2021-04-27  3:39 ` [PATCH v6 7/9] virtio-pci: " Cindy Lu
                   ` (3 subsequent siblings)
  9 siblings, 0 replies; 20+ messages in thread
From: Cindy Lu @ 2021-04-27  3:39 UTC (permalink / raw)
  To: lulu, mst, jasowang, qemu-devel

Add configure interrupt support for virtio-mmio bus. This
interrupt will working while backend is vhost-vdpa

Signed-off-by: Cindy Lu <lulu@redhat.com>
---
 hw/virtio/virtio-mmio.c | 26 ++++++++++++++++++++++++++
 1 file changed, 26 insertions(+)

diff --git a/hw/virtio/virtio-mmio.c b/hw/virtio/virtio-mmio.c
index e1b5c3b81e..d8cb368728 100644
--- a/hw/virtio/virtio-mmio.c
+++ b/hw/virtio/virtio-mmio.c
@@ -632,7 +632,26 @@ static int virtio_mmio_set_guest_notifier(DeviceState *d, int n, bool assign,
 
     return 0;
 }
+static int virtio_mmio_set_config_notifier(DeviceState *d,  bool assign)
+{
+    VirtIOMMIOProxy *proxy = VIRTIO_MMIO(d);
+    VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
+        VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(vdev);
 
+    EventNotifier *notifier = virtio_get_config_notifier(vdev);
+    int r = 0;
+    if (assign) {
+        r = event_notifier_init(notifier, 0);
+        virtio_set_config_notifier_fd_handler(vdev, true, false);
+    } else {
+        virtio_set_config_notifier_fd_handler(vdev, false, false);
+        event_notifier_cleanup(notifier);
+    }
+        if (vdc->guest_notifier_mask && vdev->use_guest_notifier_mask) {
+            vdc->guest_notifier_mask(vdev, -1, !assign);
+    }
+    return r;
+}
 static int virtio_mmio_set_guest_notifiers(DeviceState *d, int nvqs,
                                            bool assign)
 {
@@ -654,8 +673,15 @@ static int virtio_mmio_set_guest_notifiers(DeviceState *d, int nvqs,
             goto assign_error;
         }
     }
+   r = virtio_mmio_set_config_notifier(d, assign);
+   if (r < 0) {
+            goto config_assign_error;
+   }
 
     return 0;
+config_assign_error:
+    assert(assign);
+    r = virtio_mmio_set_config_notifier(d, false);
 
 assign_error:
     /* We get here on assignment failure. Recover by undoing for VQs 0 .. n. */
-- 
2.21.3



^ permalink raw reply related	[flat|nested] 20+ messages in thread

* [PATCH v6 7/9] virtio-pci: add support for configure interrupt
  2021-04-27  3:39 [PATCH v6 0/9] vhost-vdpa: add support for configure interrupt Cindy Lu
                   ` (5 preceding siblings ...)
  2021-04-27  3:39 ` [PATCH v6 6/9] virtio-mmio: add " Cindy Lu
@ 2021-04-27  3:39 ` Cindy Lu
  2021-04-27  7:12   ` Jason Wang
  2021-04-27  3:39 ` [PATCH v6 8/9] virtio: decouple virtqueue from set notifier fd handler Cindy Lu
                   ` (2 subsequent siblings)
  9 siblings, 1 reply; 20+ messages in thread
From: Cindy Lu @ 2021-04-27  3:39 UTC (permalink / raw)
  To: lulu, mst, jasowang, qemu-devel

Add support for configure interrupt, use kvm_irqfd_assign and set the
gsi to kernel. When the configure notifier was eventfd_signal by host
kernel, this will finally inject an msix interrupt to guest
---
 hw/virtio/virtio-pci.c | 186 ++++++++++++++++++++++++++---------------
 1 file changed, 120 insertions(+), 66 deletions(-)

diff --git a/hw/virtio/virtio-pci.c b/hw/virtio/virtio-pci.c
index 2b7e6cc0d9..07d28dd367 100644
--- a/hw/virtio/virtio-pci.c
+++ b/hw/virtio/virtio-pci.c
@@ -664,12 +664,10 @@ static uint32_t virtio_read_config(PCIDevice *pci_dev,
 }
 
 static int kvm_virtio_pci_vq_vector_use(VirtIOPCIProxy *proxy,
-                                        unsigned int queue_no,
                                         unsigned int vector)
 {
     VirtIOIRQFD *irqfd = &proxy->vector_irqfd[vector];
     int ret;
-
     if (irqfd->users == 0) {
         ret = kvm_irqchip_add_msi_route(kvm_state, vector, &proxy->pci_dev);
         if (ret < 0) {
@@ -708,93 +706,120 @@ static void kvm_virtio_pci_irqfd_release(VirtIOPCIProxy *proxy,
     ret = kvm_irqchip_remove_irqfd_notifier_gsi(kvm_state, n, irqfd->virq);
     assert(ret == 0);
 }
-
-static int kvm_virtio_pci_vector_use(VirtIOPCIProxy *proxy, int nvqs)
+ static int virtio_pci_get_notifier(VirtIOPCIProxy *proxy, int queue_no,
+                                      EventNotifier **n, unsigned int *vector)
 {
     PCIDevice *dev = &proxy->pci_dev;
     VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
-    VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
-    unsigned int vector;
-    int ret, queue_no;
     VirtQueue *vq;
-    EventNotifier *n;
-    for (queue_no = 0; queue_no < nvqs; queue_no++) {
+
+    if (queue_no == -1) {
+        *n = virtio_get_config_notifier(vdev);
+        *vector = vdev->config_vector;
+    } else {
         if (!virtio_queue_get_num(vdev, queue_no)) {
-            break;
-        }
-        vector = virtio_queue_vector(vdev, queue_no);
-        if (vector >= msix_nr_vectors_allocated(dev)) {
-            continue;
-        }
-        ret = kvm_virtio_pci_vq_vector_use(proxy, queue_no, vector);
-        if (ret < 0) {
-            goto undo;
-        }
-        /* If guest supports masking, set up irqfd now.
-         * Otherwise, delay until unmasked in the frontend.
-         */
-        if (vdev->use_guest_notifier_mask && k->guest_notifier_mask) {
-            vq = virtio_get_queue(vdev, queue_no);
-            n = virtio_queue_get_guest_notifier(vq);
-            ret = kvm_virtio_pci_irqfd_use(proxy, n, vector);
-            if (ret < 0) {
-                kvm_virtio_pci_vq_vector_release(proxy, vector);
-                goto undo;
-            }
+            return -1;
         }
+        *vector = virtio_queue_vector(vdev, queue_no);
+        vq = virtio_get_queue(vdev, queue_no);
+        *n = virtio_queue_get_guest_notifier(vq);
+    }
+    if (*vector >= msix_nr_vectors_allocated(dev)) {
+        return -1;
     }
     return 0;
+}
 
+static int kvm_virtio_pci_vector_use_one(VirtIOPCIProxy *proxy, int queue_no)
+{
+    unsigned int vector;
+    int ret;
+    EventNotifier *n;
+    ret = virtio_pci_get_notifier(proxy, queue_no, &n, &vector);
+    if (ret < 0) {
+        return ret;
+    }
+    ret = kvm_virtio_pci_vq_vector_use(proxy, vector);
+    if (ret < 0) {
+        goto undo;
+    }
+    ret = kvm_virtio_pci_irqfd_use(proxy,  n, vector);
+    if (ret < 0) {
+        goto undo;
+    }
+    return 0;
 undo:
-    while (--queue_no >= 0) {
-        vector = virtio_queue_vector(vdev, queue_no);
-        if (vector >= msix_nr_vectors_allocated(dev)) {
-            continue;
-        }
-        if (vdev->use_guest_notifier_mask && k->guest_notifier_mask) {
-            vq = virtio_get_queue(vdev, queue_no);
-            n = virtio_queue_get_guest_notifier(vq);
-            kvm_virtio_pci_irqfd_release(proxy, n, vector);
-        }
-        kvm_virtio_pci_vq_vector_release(proxy, vector);
+    kvm_virtio_pci_irqfd_release(proxy, n, vector);
+    return ret;
+}
+static int kvm_virtio_pci_vector_use(VirtIOPCIProxy *proxy, int nvqs)
+{
+    int queue_no;
+    int ret = 0;
+    for (queue_no = 0; queue_no < nvqs; queue_no++) {
+        ret = kvm_virtio_pci_vector_use_one(proxy, queue_no);
     }
     return ret;
 }
 
-static void kvm_virtio_pci_vector_release(VirtIOPCIProxy *proxy, int nvqs)
+static int kvm_virtio_pci_vector_config_use(VirtIOPCIProxy *proxy)
+{
+    return kvm_virtio_pci_vector_use_one(proxy, -1);
+ }
+
+static void kvm_virtio_pci_vector_release_one(VirtIOPCIProxy *proxy,
+                        int queue_no)
 {
-    PCIDevice *dev = &proxy->pci_dev;
     VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
     unsigned int vector;
-    int queue_no;
-    VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
-    VirtQueue *vq;
     EventNotifier *n;
+    int ret;
+    VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
+    ret = virtio_pci_get_notifier(proxy, queue_no, &n, &vector);
+    if (ret < 0) {
+        return;
+    }
+
+    if (vdev->use_guest_notifier_mask && k->guest_notifier_mask) {
+        kvm_virtio_pci_irqfd_release(proxy, n, vector);
+    }
+    kvm_virtio_pci_vq_vector_release(proxy, vector);
+}
+static void kvm_virtio_pci_vector_release(VirtIOPCIProxy *proxy, int nvqs)
+{
+    int queue_no;
+
     for (queue_no = 0; queue_no < nvqs; queue_no++) {
-        if (!virtio_queue_get_num(vdev, queue_no)) {
-            break;
-        }
-        vector = virtio_queue_vector(vdev, queue_no);
-        if (vector >= msix_nr_vectors_allocated(dev)) {
-            continue;
-        }
-        /* If guest supports masking, clean up irqfd now.
-         * Otherwise, it was cleaned when masked in the frontend.
-         */
-        if (vdev->use_guest_notifier_mask && k->guest_notifier_mask) {
-            vq = virtio_get_queue(vdev, queue_no);
-            n = virtio_queue_get_guest_notifier(vq);
-            kvm_virtio_pci_irqfd_release(proxy, n, vector);
-        }
-        kvm_virtio_pci_vq_vector_release(proxy, vector);
+        kvm_virtio_pci_vector_release_one(proxy, queue_no);
     }
 }
 
+static void kvm_virtio_pci_vector_config_release(VirtIOPCIProxy *proxy)
+{
+    kvm_virtio_pci_vector_release_one(proxy, -1);
+}
+static int virtio_pci_set_config_notifier(DeviceState *d,  bool assign)
+{
+    VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);
+    VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
+    EventNotifier *notifier = virtio_get_config_notifier(vdev);
+    int r = 0;
+    if (assign) {
+        r = event_notifier_init(notifier, 0);
+        virtio_set_config_notifier_fd_handler(vdev, true, true);
+        kvm_virtio_pci_vector_config_use(proxy);
+    } else {
+        virtio_set_config_notifier_fd_handler(vdev, false, true);
+        kvm_virtio_pci_vector_config_release(proxy);
+        event_notifier_cleanup(notifier);
+    }
+    return r;
+}
 static int virtio_pci_vq_vector_unmask(VirtIOPCIProxy *proxy,
                                        unsigned int queue_no,
                                        unsigned int vector,
                                        MSIMessage msg,
-                                        EventNotifier *n)
+                                       EventNotifier *n)
 {
     VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
     VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
@@ -871,9 +896,16 @@ static int virtio_pci_vector_unmask(PCIDevice *dev, unsigned vector,
         }
         vq = virtio_vector_next_queue(vq);
     }
-
+    n = virtio_get_config_notifier(vdev);
+    ret = virtio_pci_vq_vector_unmask(proxy, -1, vector, msg, n);
+    if (ret < 0) {
+        goto config_undo;
+    }
     return 0;
 
+ config_undo:
+    n = virtio_get_config_notifier(vdev);
+    virtio_pci_vq_vector_mask(proxy, -1, vector, n);
 undo:
     vq = virtio_vector_first_queue(vdev, vector);
     while (vq && unmasked >= 0) {
@@ -907,6 +939,8 @@ static void virtio_pci_vector_mask(PCIDevice *dev, unsigned vector)
         }
         vq = virtio_vector_next_queue(vq);
     }
+    n = virtio_get_config_notifier(vdev);
+    virtio_pci_vq_vector_mask(proxy, -1, vector,  n);
 }
 
 static void virtio_pci_vector_poll(PCIDevice *dev,
@@ -920,7 +954,7 @@ static void virtio_pci_vector_poll(PCIDevice *dev,
     unsigned int vector;
     EventNotifier *notifier;
     VirtQueue *vq;
-
+    /*check for every queue*/
     for (queue_no = 0; queue_no < proxy->nvqs_with_notifiers; queue_no++) {
         if (!virtio_queue_get_num(vdev, queue_no)) {
             break;
@@ -940,6 +974,21 @@ static void virtio_pci_vector_poll(PCIDevice *dev,
             msix_set_pending(dev, vector);
         }
     }
+    /*check for config interrupt*/
+
+   vector = vdev->config_vector;
+   notifier = virtio_get_config_notifier(vdev);
+   if (vector < vector_start || vector >= vector_end ||
+            !msix_is_masked(dev, vector)) {
+        return;
+   }
+   if (k->guest_notifier_pending) {
+        if (k->guest_notifier_pending(vdev, -1)) {
+            msix_set_pending(dev, vector);
+        }
+   } else if (event_notifier_test_and_clear(notifier)) {
+        msix_set_pending(dev, vector);
+   }
 }
 
 static int virtio_pci_set_guest_notifier(DeviceState *d, int n, bool assign,
@@ -1027,6 +1076,10 @@ static int virtio_pci_set_guest_notifiers(DeviceState *d, int nvqs, bool assign)
                 goto assign_error;
             }
         }
+            r = virtio_pci_set_config_notifier(d, assign);
+            if (r < 0) {
+                goto config_error;
+         }
         r = msix_set_vector_notifiers(&proxy->pci_dev,
                                       virtio_pci_vector_unmask,
                                       virtio_pci_vector_mask,
@@ -1043,7 +1096,8 @@ notifiers_error:
         assert(assign);
         kvm_virtio_pci_vector_release(proxy, nvqs);
     }
-
+ config_error:
+        kvm_virtio_pci_vector_config_release(proxy);
 assign_error:
     /* We get here on assignment failure. Recover by undoing for VQs 0 .. n. */
     assert(assign);
-- 
2.21.3



^ permalink raw reply related	[flat|nested] 20+ messages in thread

* [PATCH v6 8/9] virtio: decouple virtqueue from set notifier fd handler
  2021-04-27  3:39 [PATCH v6 0/9] vhost-vdpa: add support for configure interrupt Cindy Lu
                   ` (6 preceding siblings ...)
  2021-04-27  3:39 ` [PATCH v6 7/9] virtio-pci: " Cindy Lu
@ 2021-04-27  3:39 ` Cindy Lu
  2021-04-27  7:14   ` Jason Wang
  2021-04-27  3:39 ` [PATCH v6 9/9] virtio-net: add peer_deleted check in virtio_net_handle_rx Cindy Lu
  2021-04-27  3:57 ` [PATCH v6 0/9] vhost-vdpa: add support for configure interrupt no-reply
  9 siblings, 1 reply; 20+ messages in thread
From: Cindy Lu @ 2021-04-27  3:39 UTC (permalink / raw)
  To: lulu, mst, jasowang, qemu-devel

This patch will decouple virtqueue from
virtio_queue_set_guest_notifier_fd_handler,
here queue number -1 mean the configure interrupt. The funtion
will set the config_notify_read as fd handler

Signed-off-by: Cindy Lu <lulu@redhat.com>
---
 hw/s390x/virtio-ccw.c      |  6 +++---
 hw/virtio/virtio-mmio.c    |  8 ++++----
 hw/virtio/virtio-pci.c     |  9 +++++----
 hw/virtio/virtio.c         | 35 +++++++++++++++++------------------
 include/hw/virtio/virtio.h |  4 +---
 5 files changed, 30 insertions(+), 32 deletions(-)

diff --git a/hw/s390x/virtio-ccw.c b/hw/s390x/virtio-ccw.c
index 4582e94ae7..5d73c99d30 100644
--- a/hw/s390x/virtio-ccw.c
+++ b/hw/s390x/virtio-ccw.c
@@ -989,11 +989,11 @@ static int virtio_ccw_set_guest_notifier(VirtioCcwDevice *dev, int n,
         if (r < 0) {
             return r;
         }
-        virtio_queue_set_guest_notifier_fd_handler(vq, true, with_irqfd);
+        virtio_set_notifier_fd_handler(vdev, n, true, with_irqfd);
         if (with_irqfd) {
             r = virtio_ccw_add_irqfd(dev, n);
             if (r) {
-                virtio_queue_set_guest_notifier_fd_handler(vq, false,
+                virtio_set_notifier_fd_handler(vdev, n, false,
                                                            with_irqfd);
                 return r;
             }
@@ -1017,7 +1017,7 @@ static int virtio_ccw_set_guest_notifier(VirtioCcwDevice *dev, int n,
         if (with_irqfd) {
             virtio_ccw_remove_irqfd(dev, n);
         }
-        virtio_queue_set_guest_notifier_fd_handler(vq, false, with_irqfd);
+        virtio_set_notifier_fd_handler(vdev, n, false, with_irqfd);
         event_notifier_cleanup(notifier);
     }
     return 0;
diff --git a/hw/virtio/virtio-mmio.c b/hw/virtio/virtio-mmio.c
index d8cb368728..4ea55001be 100644
--- a/hw/virtio/virtio-mmio.c
+++ b/hw/virtio/virtio-mmio.c
@@ -620,9 +620,9 @@ static int virtio_mmio_set_guest_notifier(DeviceState *d, int n, bool assign,
         if (r < 0) {
             return r;
         }
-        virtio_queue_set_guest_notifier_fd_handler(vq, true, with_irqfd);
+        virtio_set_notifier_fd_handler(vdev, n, true, with_irqfd);
     } else {
-        virtio_queue_set_guest_notifier_fd_handler(vq, false, with_irqfd);
+        virtio_set_notifier_fd_handler(vdev, n, false, with_irqfd);
         event_notifier_cleanup(notifier);
     }
 
@@ -642,9 +642,9 @@ static int virtio_mmio_set_config_notifier(DeviceState *d,  bool assign)
     int r = 0;
     if (assign) {
         r = event_notifier_init(notifier, 0);
-        virtio_set_config_notifier_fd_handler(vdev, true, false);
+        virtio_set_notifier_fd_handler(vdev, -1, true, false);
     } else {
-        virtio_set_config_notifier_fd_handler(vdev, false, false);
+        virtio_set_notifier_fd_handler(vdev, -1, false, false);
         event_notifier_cleanup(notifier);
     }
         if (vdc->guest_notifier_mask && vdev->use_guest_notifier_mask) {
diff --git a/hw/virtio/virtio-pci.c b/hw/virtio/virtio-pci.c
index 07d28dd367..5033b3db4f 100644
--- a/hw/virtio/virtio-pci.c
+++ b/hw/virtio/virtio-pci.c
@@ -806,10 +806,10 @@ static int virtio_pci_set_config_notifier(DeviceState *d,  bool assign)
     int r = 0;
     if (assign) {
         r = event_notifier_init(notifier, 0);
-        virtio_set_config_notifier_fd_handler(vdev, true, true);
+        virtio_set_notifier_fd_handler(vdev, -1, true, true);
         kvm_virtio_pci_vector_config_use(proxy);
     } else {
-        virtio_set_config_notifier_fd_handler(vdev, false, true);
+        virtio_set_notifier_fd_handler(vdev, -1, false, true);
         kvm_virtio_pci_vector_config_release(proxy);
         event_notifier_cleanup(notifier);
     }
@@ -1005,9 +1005,9 @@ static int virtio_pci_set_guest_notifier(DeviceState *d, int n, bool assign,
         if (r < 0) {
             return r;
         }
-        virtio_queue_set_guest_notifier_fd_handler(vq, true, with_irqfd);
+        virtio_set_notifier_fd_handler(vdev, n, true, with_irqfd);
     } else {
-        virtio_queue_set_guest_notifier_fd_handler(vq, false, with_irqfd);
+        virtio_set_notifier_fd_handler(vdev, n, false, with_irqfd);
         event_notifier_cleanup(notifier);
     }
 
@@ -1049,6 +1049,7 @@ static int virtio_pci_set_guest_notifiers(DeviceState *d, int nvqs, bool assign)
         msix_unset_vector_notifiers(&proxy->pci_dev);
         if (proxy->vector_irqfd) {
             kvm_virtio_pci_vector_release(proxy, nvqs);
+            kvm_virtio_pci_vector_config_release(proxy);
             g_free(proxy->vector_irqfd);
             proxy->vector_irqfd = NULL;
         }
diff --git a/hw/virtio/virtio.c b/hw/virtio/virtio.c
index 5dff29c981..8f0087deac 100644
--- a/hw/virtio/virtio.c
+++ b/hw/virtio/virtio.c
@@ -3510,32 +3510,31 @@ static void virtio_config_read(EventNotifier *n)
         virtio_notify_config(vdev);
     }
 }
-void virtio_queue_set_guest_notifier_fd_handler(VirtQueue *vq, bool assign,
-                                                bool with_irqfd)
+
+void virtio_set_notifier_fd_handler(VirtIODevice *vdev, int queue_no,
+                               bool assign, bool with_irqfd)
 {
-    if (assign && !with_irqfd) {
-        event_notifier_set_handler(&vq->guest_notifier,
-                                   virtio_queue_guest_notifier_read);
+    EventNotifier *e ;
+    EventNotifierHandler *handler;
+    if (queue_no != -1) {
+        VirtQueue *vq = virtio_get_queue(vdev, queue_no);
+        e = &vq->guest_notifier;
+        handler = virtio_queue_guest_notifier_read;
+    } else {
+        e = &vdev->config_notifier;
+        handler = virtio_config_read;
+   }
+   if (assign && !with_irqfd) {
+        event_notifier_set_handler(e, handler);
     } else {
-        event_notifier_set_handler(&vq->guest_notifier, NULL);
+        event_notifier_set_handler(e, NULL);
     }
     if (!assign) {
         /* Test and clear notifier before closing it,
          * in case poll callback didn't have time to run. */
-        virtio_queue_guest_notifier_read(&vq->guest_notifier);
+        handler(e);
     }
 }
-void virtio_set_config_notifier_fd_handler(VirtIODevice *vdev, bool assign,
-                                                bool with_irqfd)
-{
-    if (assign && !with_irqfd) {
-        event_notifier_set_handler(&vdev->config_notifier,
-                                   virtio_config_read);
-    } else {
-       event_notifier_set_handler(&vdev->config_notifier, NULL);
-    }
-}
-
 EventNotifier *virtio_queue_get_guest_notifier(VirtQueue *vq)
 {
     return &vq->guest_notifier;
diff --git a/include/hw/virtio/virtio.h b/include/hw/virtio/virtio.h
index b0b714f6d4..d22f5a3e7e 100644
--- a/include/hw/virtio/virtio.h
+++ b/include/hw/virtio/virtio.h
@@ -309,9 +309,7 @@ void virtio_queue_update_used_idx(VirtIODevice *vdev, int n);
 VirtQueue *virtio_get_queue(VirtIODevice *vdev, int n);
 uint16_t virtio_get_queue_index(VirtQueue *vq);
 EventNotifier *virtio_queue_get_guest_notifier(VirtQueue *vq);
-void virtio_queue_set_guest_notifier_fd_handler(VirtQueue *vq, bool assign,
-                                                bool with_irqfd);
-void virtio_set_config_notifier_fd_handler(VirtIODevice *vdev, bool assign,
+void virtio_set_notifier_fd_handler(VirtIODevice *vdev, int n, bool assign,
                                                 bool with_irqfd);
 int virtio_device_start_ioeventfd(VirtIODevice *vdev);
 int virtio_device_grab_ioeventfd(VirtIODevice *vdev);
-- 
2.21.3



^ permalink raw reply related	[flat|nested] 20+ messages in thread

* [PATCH v6 9/9] virtio-net: add peer_deleted check in virtio_net_handle_rx
  2021-04-27  3:39 [PATCH v6 0/9] vhost-vdpa: add support for configure interrupt Cindy Lu
                   ` (7 preceding siblings ...)
  2021-04-27  3:39 ` [PATCH v6 8/9] virtio: decouple virtqueue from set notifier fd handler Cindy Lu
@ 2021-04-27  3:39 ` Cindy Lu
  2021-04-27  7:15   ` Jason Wang
  2021-04-27  3:57 ` [PATCH v6 0/9] vhost-vdpa: add support for configure interrupt no-reply
  9 siblings, 1 reply; 20+ messages in thread
From: Cindy Lu @ 2021-04-27  3:39 UTC (permalink / raw)
  To: lulu, mst, jasowang, qemu-devel

During the test, We found this fuction will continue running
while the peer is deleted, this will case the crash. so add
check for this.

Signed-off-by: Cindy Lu <lulu@redhat.com>
---
 hw/net/virtio-net.c | 4 +++-
 1 file changed, 3 insertions(+), 1 deletion(-)

diff --git a/hw/net/virtio-net.c b/hw/net/virtio-net.c
index 43b912453a..1be3f8e76f 100644
--- a/hw/net/virtio-net.c
+++ b/hw/net/virtio-net.c
@@ -1403,7 +1403,9 @@ static void virtio_net_handle_rx(VirtIODevice *vdev, VirtQueue *vq)
 {
     VirtIONet *n = VIRTIO_NET(vdev);
     int queue_index = vq2q(virtio_get_queue_index(vq));
-
+    if (n->nic->peer_deleted) {
+        return;
+    }
     qemu_flush_queued_packets(qemu_get_subqueue(n->nic, queue_index));
 }
 
-- 
2.21.3



^ permalink raw reply related	[flat|nested] 20+ messages in thread

* Re: [PATCH v6 0/9] vhost-vdpa: add support for configure interrupt
  2021-04-27  3:39 [PATCH v6 0/9] vhost-vdpa: add support for configure interrupt Cindy Lu
                   ` (8 preceding siblings ...)
  2021-04-27  3:39 ` [PATCH v6 9/9] virtio-net: add peer_deleted check in virtio_net_handle_rx Cindy Lu
@ 2021-04-27  3:57 ` no-reply
  9 siblings, 0 replies; 20+ messages in thread
From: no-reply @ 2021-04-27  3:57 UTC (permalink / raw)
  To: lulu; +Cc: jasowang, qemu-devel, lulu, mst

Patchew URL: https://patchew.org/QEMU/20210427033951.29805-1-lulu@redhat.com/



Hi,

This series seems to have some coding style problems. See output below for
more information:

Type: series
Message-id: 20210427033951.29805-1-lulu@redhat.com
Subject: [PATCH v6 0/9] vhost-vdpa: add support for configure interrupt

=== TEST SCRIPT BEGIN ===
#!/bin/bash
git rev-parse base > /dev/null || exit 0
git config --local diff.renamelimit 0
git config --local diff.renames True
git config --local diff.algorithm histogram
./scripts/checkpatch.pl --mailback base..
=== TEST SCRIPT END ===

Updating 3c8cf5a9c21ff8782164d1def7f44bd888713384
From https://github.com/patchew-project/qemu
 - [tag update]      patchew/20210424162229.3312116-1-f4bug@amsat.org -> patchew/20210424162229.3312116-1-f4bug@amsat.org
 * [new tag]         patchew/20210427033951.29805-1-lulu@redhat.com -> patchew/20210427033951.29805-1-lulu@redhat.com
Switched to a new branch 'test'
75b5f19 virtio-net: add peer_deleted check in virtio_net_handle_rx
e6c2a2d virtio: decouple virtqueue from set notifier fd handler
d7e3243 virtio-pci: add support for configure interrupt
55946c5 virtio-mmio: add support for configure interrupt
0e8ea81 vhost:add support for configure interrupt
665e11d vhost-vdpa: add support for config interrupt call back
e83f793 vhost: add new call back function for config interrupt
3ce0821 virtio-pci:decouple virtqueue from interrupt setting process
d4debe8 hw: Add check for queue number

=== OUTPUT BEGIN ===
1/9 Checking commit d4debe818e21 (hw: Add check for queue number)
2/9 Checking commit 3ce082180cca (virtio-pci:decouple virtqueue from interrupt setting process)
3/9 Checking commit e83f7938542d (vhost: add new call back function for config interrupt)
4/9 Checking commit 665e11d3c98f (vhost-vdpa: add support for config interrupt call back)
5/9 Checking commit 0e8ea81cf5a0 (vhost:add support for configure interrupt)
6/9 Checking commit 55946c55ee11 (virtio-mmio: add support for configure interrupt)
7/9 Checking commit d7e3243f82ec (virtio-pci: add support for configure interrupt)
ERROR: Missing Signed-off-by: line(s)

total: 1 errors, 0 warnings, 266 lines checked

Patch 7/9 has style problems, please review.  If any of these errors
are false positives report them to the maintainer, see
CHECKPATCH in MAINTAINERS.

8/9 Checking commit e6c2a2d36bce (virtio: decouple virtqueue from set notifier fd handler)
9/9 Checking commit 75b5f193ac32 (virtio-net: add peer_deleted check in virtio_net_handle_rx)
=== OUTPUT END ===

Test command exited with code: 1


The full log is available at
http://patchew.org/logs/20210427033951.29805-1-lulu@redhat.com/testing.checkpatch/?type=message.
---
Email generated automatically by Patchew [https://patchew.org/].
Please send your feedback to patchew-devel@redhat.com

^ permalink raw reply	[flat|nested] 20+ messages in thread

* Re: [PATCH v6 1/9] hw: Add check for queue number
  2021-04-27  3:39 ` [PATCH v6 1/9] hw: Add check for queue number Cindy Lu
@ 2021-04-27  5:39   ` Jason Wang
  2021-04-29  3:08     ` Cindy Lu
  0 siblings, 1 reply; 20+ messages in thread
From: Jason Wang @ 2021-04-27  5:39 UTC (permalink / raw)
  To: Cindy Lu, mst, qemu-devel


在 2021/4/27 上午11:39, Cindy Lu 写道:
> In order to support configure interrupt. we will use queue number -1
> as configure interrupt
> since all these device are not support the configure interrupt
> So we will add an check here, if the idx is -1, the function
> will return;


The title is confusing since the change is specific for the guest notifiers.

A better one would be "virtio: guest notifier support for config interrupt"


>
> Signed-off-by: Cindy Lu <lulu@redhat.com>
> ---
>   hw/display/vhost-user-gpu.c    |  8 ++++++--
>   hw/net/virtio-net.c            | 10 +++++++---
>   hw/virtio/vhost-user-fs.c      | 11 +++++++----
>   hw/virtio/vhost-vsock-common.c |  8 ++++++--
>   hw/virtio/virtio-crypto.c      |  8 ++++++--
>   5 files changed, 32 insertions(+), 13 deletions(-)
>
> diff --git a/hw/display/vhost-user-gpu.c b/hw/display/vhost-user-gpu.c
> index 51f1747c4a..d8e26cedf1 100644
> --- a/hw/display/vhost-user-gpu.c
> +++ b/hw/display/vhost-user-gpu.c
> @@ -490,7 +490,9 @@ static bool
>   vhost_user_gpu_guest_notifier_pending(VirtIODevice *vdev, int idx)
>   {
>       VhostUserGPU *g = VHOST_USER_GPU(vdev);
> -
> +    if (idx == -1) {


Let's introduce a macro for this instead of the magic number.

Thanks


> +        return false;
> +    }
>       return vhost_virtqueue_pending(&g->vhost->dev, idx);
>   }
>   
> @@ -498,7 +500,9 @@ static void
>   vhost_user_gpu_guest_notifier_mask(VirtIODevice *vdev, int idx, bool mask)
>   {
>       VhostUserGPU *g = VHOST_USER_GPU(vdev);
> -
> +    if (idx == -1) {
> +        return;
> +    }
>       vhost_virtqueue_mask(&g->vhost->dev, vdev, idx, mask);
>   }
>   
> diff --git a/hw/net/virtio-net.c b/hw/net/virtio-net.c
> index 9179013ac4..78ccaa228c 100644
> --- a/hw/net/virtio-net.c
> +++ b/hw/net/virtio-net.c
> @@ -3060,7 +3060,10 @@ static bool virtio_net_guest_notifier_pending(VirtIODevice *vdev, int idx)
>       VirtIONet *n = VIRTIO_NET(vdev);
>       NetClientState *nc = qemu_get_subqueue(n->nic, vq2q(idx));
>       assert(n->vhost_started);
> -    return vhost_net_virtqueue_pending(get_vhost_net(nc->peer), idx);
> +    if (idx != -1) {
> +        return vhost_net_virtqueue_pending(get_vhost_net(nc->peer), idx);
> +    }
> +    return false;
>   }
>   
>   static void virtio_net_guest_notifier_mask(VirtIODevice *vdev, int idx,
> @@ -3069,8 +3072,9 @@ static void virtio_net_guest_notifier_mask(VirtIODevice *vdev, int idx,
>       VirtIONet *n = VIRTIO_NET(vdev);
>       NetClientState *nc = qemu_get_subqueue(n->nic, vq2q(idx));
>       assert(n->vhost_started);
> -    vhost_net_virtqueue_mask(get_vhost_net(nc->peer),
> -                             vdev, idx, mask);
> +    if (idx != -1) {
> +        vhost_net_virtqueue_mask(get_vhost_net(nc->peer), vdev, idx, mask);
> +     }
>   }
>   
>   static void virtio_net_set_config_size(VirtIONet *n, uint64_t host_features)
> diff --git a/hw/virtio/vhost-user-fs.c b/hw/virtio/vhost-user-fs.c
> index 1bc5d03a00..37424c2193 100644
> --- a/hw/virtio/vhost-user-fs.c
> +++ b/hw/virtio/vhost-user-fs.c
> @@ -142,18 +142,21 @@ static void vuf_handle_output(VirtIODevice *vdev, VirtQueue *vq)
>        */
>   }
>   
> -static void vuf_guest_notifier_mask(VirtIODevice *vdev, int idx,
> -                                            bool mask)
> +static void vuf_guest_notifier_mask(VirtIODevice *vdev, int idx, bool mask)
>   {
>       VHostUserFS *fs = VHOST_USER_FS(vdev);
> -
> +    if (idx == -1) {
> +        return;
> +    }
>       vhost_virtqueue_mask(&fs->vhost_dev, vdev, idx, mask);
>   }
>   
>   static bool vuf_guest_notifier_pending(VirtIODevice *vdev, int idx)
>   {
>       VHostUserFS *fs = VHOST_USER_FS(vdev);
> -
> +    if (idx == -1) {
> +        return false;
> +    }
>       return vhost_virtqueue_pending(&fs->vhost_dev, idx);
>   }
>   
> diff --git a/hw/virtio/vhost-vsock-common.c b/hw/virtio/vhost-vsock-common.c
> index 5b2ebf3496..0adf823d37 100644
> --- a/hw/virtio/vhost-vsock-common.c
> +++ b/hw/virtio/vhost-vsock-common.c
> @@ -100,7 +100,9 @@ static void vhost_vsock_common_guest_notifier_mask(VirtIODevice *vdev, int idx,
>                                               bool mask)
>   {
>       VHostVSockCommon *vvc = VHOST_VSOCK_COMMON(vdev);
> -
> +    if (idx == -1) {
> +        return;
> +    }
>       vhost_virtqueue_mask(&vvc->vhost_dev, vdev, idx, mask);
>   }
>   
> @@ -108,7 +110,9 @@ static bool vhost_vsock_common_guest_notifier_pending(VirtIODevice *vdev,
>                                                  int idx)
>   {
>       VHostVSockCommon *vvc = VHOST_VSOCK_COMMON(vdev);
> -
> +    if (idx == -1) {
> +        return false;
> +    }
>       return vhost_virtqueue_pending(&vvc->vhost_dev, idx);
>   }
>   
> diff --git a/hw/virtio/virtio-crypto.c b/hw/virtio/virtio-crypto.c
> index 54f9bbb789..c47f4ffb24 100644
> --- a/hw/virtio/virtio-crypto.c
> +++ b/hw/virtio/virtio-crypto.c
> @@ -947,7 +947,9 @@ static void virtio_crypto_guest_notifier_mask(VirtIODevice *vdev, int idx,
>       int queue = virtio_crypto_vq2q(idx);
>   
>       assert(vcrypto->vhost_started);
> -
> +    if (idx == -1) {
> +        return;
> +    }
>       cryptodev_vhost_virtqueue_mask(vdev, queue, idx, mask);
>   }
>   
> @@ -957,7 +959,9 @@ static bool virtio_crypto_guest_notifier_pending(VirtIODevice *vdev, int idx)
>       int queue = virtio_crypto_vq2q(idx);
>   
>       assert(vcrypto->vhost_started);
> -
> +    if (idx == -1) {
> +        return false;
> +    }
>       return cryptodev_vhost_virtqueue_pending(vdev, queue, idx);
>   }
>   



^ permalink raw reply	[flat|nested] 20+ messages in thread

* Re: [PATCH v6 2/9] virtio-pci:decouple virtqueue from interrupt setting process
  2021-04-27  3:39 ` [PATCH v6 2/9] virtio-pci:decouple virtqueue from interrupt setting process Cindy Lu
@ 2021-04-27  6:40   ` Jason Wang
  2021-04-27  7:17     ` Jason Wang
  0 siblings, 1 reply; 20+ messages in thread
From: Jason Wang @ 2021-04-27  6:40 UTC (permalink / raw)
  To: Cindy Lu, mst, qemu-devel


在 2021/4/27 上午11:39, Cindy Lu 写道:
> Now the code for interrupt/vector are coupling
> with the vq number, this patch will decouple the vritqueue
> numbers from these functions.


So you need to describe why such kind of decoupling is needed,


>
> Signed-off-by: Cindy Lu <lulu@redhat.com>
> ---
>   hw/virtio/virtio-pci.c | 51 ++++++++++++++++++++++++------------------
>   1 file changed, 29 insertions(+), 22 deletions(-)
>
> diff --git a/hw/virtio/virtio-pci.c b/hw/virtio/virtio-pci.c
> index 36524a5728..2b7e6cc0d9 100644
> --- a/hw/virtio/virtio-pci.c
> +++ b/hw/virtio/virtio-pci.c
> @@ -691,23 +691,17 @@ static void kvm_virtio_pci_vq_vector_release(VirtIOPCIProxy *proxy,
>   }
>   
>   static int kvm_virtio_pci_irqfd_use(VirtIOPCIProxy *proxy,
> -                                 unsigned int queue_no,
> +                                 EventNotifier *n,
>                                    unsigned int vector)
>   {
>       VirtIOIRQFD *irqfd = &proxy->vector_irqfd[vector];
> -    VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
> -    VirtQueue *vq = virtio_get_queue(vdev, queue_no);
> -    EventNotifier *n = virtio_queue_get_guest_notifier(vq);
>       return kvm_irqchip_add_irqfd_notifier_gsi(kvm_state, n, NULL, irqfd->virq);
>   }
>   
>   static void kvm_virtio_pci_irqfd_release(VirtIOPCIProxy *proxy,
> -                                      unsigned int queue_no,
> +                                      EventNotifier *n ,
>                                         unsigned int vector)
>   {
> -    VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
> -    VirtQueue *vq = virtio_get_queue(vdev, queue_no);
> -    EventNotifier *n = virtio_queue_get_guest_notifier(vq);
>       VirtIOIRQFD *irqfd = &proxy->vector_irqfd[vector];
>       int ret;
>   
> @@ -722,7 +716,8 @@ static int kvm_virtio_pci_vector_use(VirtIOPCIProxy *proxy, int nvqs)
>       VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
>       unsigned int vector;
>       int ret, queue_no;
> -
> +    VirtQueue *vq;
> +    EventNotifier *n;
>       for (queue_no = 0; queue_no < nvqs; queue_no++) {
>           if (!virtio_queue_get_num(vdev, queue_no)) {
>               break;
> @@ -739,7 +734,9 @@ static int kvm_virtio_pci_vector_use(VirtIOPCIProxy *proxy, int nvqs)
>            * Otherwise, delay until unmasked in the frontend.
>            */
>           if (vdev->use_guest_notifier_mask && k->guest_notifier_mask) {
> -            ret = kvm_virtio_pci_irqfd_use(proxy, queue_no, vector);
> +            vq = virtio_get_queue(vdev, queue_no);
> +            n = virtio_queue_get_guest_notifier(vq);
> +            ret = kvm_virtio_pci_irqfd_use(proxy, n, vector);
>               if (ret < 0) {
>                   kvm_virtio_pci_vq_vector_release(proxy, vector);
>                   goto undo;
> @@ -755,7 +752,9 @@ undo:
>               continue;
>           }
>           if (vdev->use_guest_notifier_mask && k->guest_notifier_mask) {
> -            kvm_virtio_pci_irqfd_release(proxy, queue_no, vector);
> +            vq = virtio_get_queue(vdev, queue_no);
> +            n = virtio_queue_get_guest_notifier(vq);
> +            kvm_virtio_pci_irqfd_release(proxy, n, vector);
>           }
>           kvm_virtio_pci_vq_vector_release(proxy, vector);
>       }
> @@ -769,7 +768,8 @@ static void kvm_virtio_pci_vector_release(VirtIOPCIProxy *proxy, int nvqs)
>       unsigned int vector;
>       int queue_no;
>       VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
> -
> +    VirtQueue *vq;
> +    EventNotifier *n;
>       for (queue_no = 0; queue_no < nvqs; queue_no++) {
>           if (!virtio_queue_get_num(vdev, queue_no)) {
>               break;
> @@ -782,7 +782,9 @@ static void kvm_virtio_pci_vector_release(VirtIOPCIProxy *proxy, int nvqs)
>            * Otherwise, it was cleaned when masked in the frontend.
>            */
>           if (vdev->use_guest_notifier_mask && k->guest_notifier_mask) {
> -            kvm_virtio_pci_irqfd_release(proxy, queue_no, vector);
> +            vq = virtio_get_queue(vdev, queue_no);
> +            n = virtio_queue_get_guest_notifier(vq);
> +            kvm_virtio_pci_irqfd_release(proxy, n, vector);
>           }
>           kvm_virtio_pci_vq_vector_release(proxy, vector);
>       }
> @@ -791,12 +793,11 @@ static void kvm_virtio_pci_vector_release(VirtIOPCIProxy *proxy, int nvqs)
>   static int virtio_pci_vq_vector_unmask(VirtIOPCIProxy *proxy,
>                                          unsigned int queue_no,
>                                          unsigned int vector,
> -                                       MSIMessage msg)
> +                                       MSIMessage msg,
> +                                        EventNotifier *n)


So you switch to use EventNotifier but keep using queue_no/vector, this 
looks kind of duplication.

If we can keep queue_no or virtio_queue_get_guest_notifier working as in 
the past, I don't see any reason for this effort.

Thanks


>   {
>       VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
>       VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
> -    VirtQueue *vq = virtio_get_queue(vdev, queue_no);
> -    EventNotifier *n = virtio_queue_get_guest_notifier(vq);
>       VirtIOIRQFD *irqfd;
>       int ret = 0;
>   
> @@ -823,14 +824,15 @@ static int virtio_pci_vq_vector_unmask(VirtIOPCIProxy *proxy,
>               event_notifier_set(n);
>           }
>       } else {
> -        ret = kvm_virtio_pci_irqfd_use(proxy, queue_no, vector);
> +        ret = kvm_virtio_pci_irqfd_use(proxy, n, vector);
>       }
>       return ret;
>   }
>   
>   static void virtio_pci_vq_vector_mask(VirtIOPCIProxy *proxy,
>                                                unsigned int queue_no,
> -                                             unsigned int vector)
> +                                             unsigned int vector,
> +                                             EventNotifier *n)
>   {
>       VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
>       VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
> @@ -841,7 +843,7 @@ static void virtio_pci_vq_vector_mask(VirtIOPCIProxy *proxy,
>       if (vdev->use_guest_notifier_mask && k->guest_notifier_mask) {
>           k->guest_notifier_mask(vdev, queue_no, true);
>       } else {
> -        kvm_virtio_pci_irqfd_release(proxy, queue_no, vector);
> +        kvm_virtio_pci_irqfd_release(proxy, n, vector);
>       }
>   }
>   
> @@ -851,6 +853,7 @@ static int virtio_pci_vector_unmask(PCIDevice *dev, unsigned vector,
>       VirtIOPCIProxy *proxy = container_of(dev, VirtIOPCIProxy, pci_dev);
>       VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
>       VirtQueue *vq = virtio_vector_first_queue(vdev, vector);
> +    EventNotifier *n;
>       int ret, index, unmasked = 0;
>   
>       while (vq) {
> @@ -859,7 +862,8 @@ static int virtio_pci_vector_unmask(PCIDevice *dev, unsigned vector,
>               break;
>           }
>           if (index < proxy->nvqs_with_notifiers) {
> -            ret = virtio_pci_vq_vector_unmask(proxy, index, vector, msg);
> +            n = virtio_queue_get_guest_notifier(vq);
> +            ret = virtio_pci_vq_vector_unmask(proxy, index, vector, msg, n);
>               if (ret < 0) {
>                   goto undo;
>               }
> @@ -875,7 +879,8 @@ undo:
>       while (vq && unmasked >= 0) {
>           index = virtio_get_queue_index(vq);
>           if (index < proxy->nvqs_with_notifiers) {
> -            virtio_pci_vq_vector_mask(proxy, index, vector);
> +            n = virtio_queue_get_guest_notifier(vq);
> +            virtio_pci_vq_vector_mask(proxy, index, vector, n);
>               --unmasked;
>           }
>           vq = virtio_vector_next_queue(vq);
> @@ -888,15 +893,17 @@ static void virtio_pci_vector_mask(PCIDevice *dev, unsigned vector)
>       VirtIOPCIProxy *proxy = container_of(dev, VirtIOPCIProxy, pci_dev);
>       VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
>       VirtQueue *vq = virtio_vector_first_queue(vdev, vector);
> +    EventNotifier *n;
>       int index;
>   
>       while (vq) {
>           index = virtio_get_queue_index(vq);
> +         n = virtio_queue_get_guest_notifier(vq);
>           if (!virtio_queue_get_num(vdev, index)) {
>               break;
>           }
>           if (index < proxy->nvqs_with_notifiers) {
> -            virtio_pci_vq_vector_mask(proxy, index, vector);
> +            virtio_pci_vq_vector_mask(proxy, index, vector, n);
>           }
>           vq = virtio_vector_next_queue(vq);
>       }



^ permalink raw reply	[flat|nested] 20+ messages in thread

* Re: [PATCH v6 5/9] vhost:add support for configure interrupt
  2021-04-27  3:39 ` [PATCH v6 5/9] vhost:add support for configure interrupt Cindy Lu
@ 2021-04-27  7:04   ` Jason Wang
  0 siblings, 0 replies; 20+ messages in thread
From: Jason Wang @ 2021-04-27  7:04 UTC (permalink / raw)
  To: Cindy Lu, mst, qemu-devel


在 2021/4/27 上午11:39, Cindy Lu 写道:
> Add configure notifier support in vhost and related driver
> When backend support VIRTIO_NET_F_STATUS,setup the configure
> interrupt function in vhost_dev_start and release the related
> resource when vhost_dev_stop
>
> Signed-off-by: Cindy Lu <lulu@redhat.com>
> ---
>   hw/net/vhost_net.c         |  9 +++++
>   hw/net/virtio-net.c        |  6 ++++
>   hw/virtio/vhost.c          | 70 ++++++++++++++++++++++++++++++++++++--
>   hw/virtio/virtio.c         | 22 ++++++++++++
>   include/hw/virtio/vhost.h  |  3 ++
>   include/hw/virtio/virtio.h |  4 +++
>   include/net/vhost_net.h    |  3 ++
>   7 files changed, 115 insertions(+), 2 deletions(-)
>
> diff --git a/hw/net/vhost_net.c b/hw/net/vhost_net.c
> index 24d555e764..12e30dc25e 100644
> --- a/hw/net/vhost_net.c
> +++ b/hw/net/vhost_net.c
> @@ -426,6 +426,15 @@ void vhost_net_virtqueue_mask(VHostNetState *net, VirtIODevice *dev,
>       vhost_virtqueue_mask(&net->dev, dev, idx, mask);
>   }
>   
> +bool vhost_net_config_pending(VHostNetState *net, int idx)
> +{
> +    return vhost_config_pending(&net->dev, idx);
> +}
> +void vhost_net_config_mask(VHostNetState *net, VirtIODevice *dev,
> +                              bool mask)
> +{
> +    vhost_config_mask(&net->dev, dev,  mask);
> +}
>   VHostNetState *get_vhost_net(NetClientState *nc)
>   {
>       VHostNetState *vhost_net = 0;
> diff --git a/hw/net/virtio-net.c b/hw/net/virtio-net.c
> index 78ccaa228c..43b912453a 100644
> --- a/hw/net/virtio-net.c
> +++ b/hw/net/virtio-net.c
> @@ -3063,6 +3063,9 @@ static bool virtio_net_guest_notifier_pending(VirtIODevice *vdev, int idx)
>       if (idx != -1) {
>           return vhost_net_virtqueue_pending(get_vhost_net(nc->peer), idx);
>       }
> +    if (idx == -1) {
> +        return vhost_net_config_pending(get_vhost_net(nc->peer), idx);
> +   }


This looks wrong. Have you tested the case of multiqueue?

In the case of multiqeueu, there could be N 1:1 mappings between nc and 
vhost_dev. And what's more important, nc is not related to config 
interrupt but network queue pair.


>       return false;
>   }
>   
> @@ -3075,6 +3078,9 @@ static void virtio_net_guest_notifier_mask(VirtIODevice *vdev, int idx,
>       if (idx != -1) {
>           vhost_net_virtqueue_mask(get_vhost_net(nc->peer), vdev, idx, mask);
>        }
> +    if (idx == -1) {
> +        vhost_net_config_mask(get_vhost_net(nc->peer), vdev, mask);
> +     }
>   }
>   
>   static void virtio_net_set_config_size(VirtIONet *n, uint64_t host_features)
> diff --git a/hw/virtio/vhost.c b/hw/virtio/vhost.c
> index 614ccc2bcb..162a5dd90c 100644
> --- a/hw/virtio/vhost.c
> +++ b/hw/virtio/vhost.c
> @@ -21,6 +21,7 @@
>   #include "qemu/error-report.h"
>   #include "qemu/memfd.h"
>   #include "standard-headers/linux/vhost_types.h"
> +#include "standard-headers/linux/virtio_net.h"
>   #include "exec/address-spaces.h"
>   #include "hw/virtio/virtio-bus.h"
>   #include "hw/virtio/virtio-access.h"
> @@ -1313,6 +1314,10 @@ int vhost_dev_init(struct vhost_dev *hdev, void *opaque,
>               goto fail;
>           }
>       }
> +    r = event_notifier_init(&hdev->masked_config_notifier, 0);
> +    if (r < 0) {
> +        return r;
> +    }


Similarly, we don't need per hdev masked_config_notifier.


>   
>       if (busyloop_timeout) {
>           for (i = 0; i < hdev->nvqs; ++i) {
> @@ -1405,6 +1410,7 @@ void vhost_dev_cleanup(struct vhost_dev *hdev)
>       for (i = 0; i < hdev->nvqs; ++i) {
>           vhost_virtqueue_cleanup(hdev->vqs + i);
>       }
> +    event_notifier_cleanup(&hdev->masked_config_notifier);
>       if (hdev->mem) {
>           /* those are only safe after successful init */
>           memory_listener_unregister(&hdev->memory_listener);
> @@ -1498,6 +1504,16 @@ bool vhost_virtqueue_pending(struct vhost_dev *hdev, int n)
>       return event_notifier_test_and_clear(&vq->masked_notifier);
>   }
>   
> +bool vhost_config_pending(struct vhost_dev *hdev, int n)
> +{
> +    assert(hdev->vhost_ops);
> +
> +    if ((hdev->started == false) ||
> +        (hdev->vhost_ops->vhost_set_config_call == NULL)) {
> +        return false;
> +    }
> +    return event_notifier_test_and_clear(&hdev->masked_config_notifier);
> +}
>   /* Mask/unmask events from this vq. */
>   void vhost_virtqueue_mask(struct vhost_dev *hdev, VirtIODevice *vdev, int n,
>                            bool mask)
> @@ -1522,6 +1538,30 @@ void vhost_virtqueue_mask(struct vhost_dev *hdev, VirtIODevice *vdev, int n,
>           VHOST_OPS_DEBUG("vhost_set_vring_call failed");
>       }
>   }
> +void vhost_config_mask(struct vhost_dev *hdev, VirtIODevice *vdev,
> +                         bool mask)
> +{
> +   int fd;
> +   int r;
> +   EventNotifier *masked_config_notifier = &hdev->masked_config_notifier;
> +   EventNotifier *config_notifier = &vdev->config_notifier;
> +   assert(hdev->vhost_ops);
> +
> +   if ((hdev->started == false) ||
> +        (hdev->vhost_ops->vhost_set_config_call == NULL)) {
> +        return ;
> +    }
> +    if (mask) {
> +        assert(vdev->use_guest_notifier_mask);
> +        fd = event_notifier_get_fd(masked_config_notifier);
> +    } else {
> +        fd = event_notifier_get_fd(config_notifier);
> +    }
> +   r = hdev->vhost_ops->vhost_set_config_call(hdev, &fd);
> +   if (r < 0) {
> +        error_report("vhost_set_config_call failed");
> +    }
> +}
>   
>   uint64_t vhost_get_features(struct vhost_dev *hdev, const int *feature_bits,
>                               uint64_t features)
> @@ -1701,6 +1741,7 @@ int vhost_dev_get_inflight(struct vhost_dev *dev, uint16_t queue_size,
>   int vhost_dev_start(struct vhost_dev *hdev, VirtIODevice *vdev)
>   {
>       int i, r;
> +    int fd = 0;
>   
>       /* should only be called after backend is connected */
>       assert(hdev->vhost_ops);
> @@ -1732,7 +1773,10 @@ int vhost_dev_start(struct vhost_dev *hdev, VirtIODevice *vdev)
>               goto fail_vq;
>           }
>       }
> -
> +    event_notifier_test_and_clear(&hdev->masked_config_notifier);
> +    if (!vdev->use_guest_notifier_mask) {
> +        vhost_config_mask(hdev, vdev,  true);
> +    }
>       if (hdev->log_enabled) {
>           uint64_t log_base;
>   
> @@ -1749,6 +1793,7 @@ int vhost_dev_start(struct vhost_dev *hdev, VirtIODevice *vdev)
>               goto fail_log;
>           }
>       }
> +
>       if (hdev->vhost_ops->vhost_dev_start) {
>           r = hdev->vhost_ops->vhost_dev_start(hdev, true);
>           if (r) {
> @@ -1766,6 +1811,19 @@ int vhost_dev_start(struct vhost_dev *hdev, VirtIODevice *vdev)
>               vhost_device_iotlb_miss(hdev, vq->used_phys, true);
>           }
>       }
> +   if (!(hdev->features & (0x1ULL << VIRTIO_NET_F_STATUS))) {
> +        return 0;
> +    }
> +    if (hdev->vhost_ops->vhost_set_config_call) {
> +        fd = event_notifier_get_fd(&vdev->config_notifier);
> +        r = hdev->vhost_ops->vhost_set_config_call(hdev, &fd);
> +        if (!r) {
> +            event_notifier_set(&vdev->config_notifier);
> +        }
> +        if (r) {
> +            goto fail_log;
> +         }
> +    }
>       return 0;
>   fail_log:
>       vhost_log_put(hdev, false);
> @@ -1788,10 +1846,18 @@ fail_features:
>   void vhost_dev_stop(struct vhost_dev *hdev, VirtIODevice *vdev)
>   {
>       int i;
> +    int fd;
>   
>       /* should only be called after backend is connected */
>       assert(hdev->vhost_ops);
> -
> +    event_notifier_test_and_clear(&hdev->masked_config_notifier);
> +    event_notifier_test_and_clear(&vdev->config_notifier);
> +    if ((hdev->features & (0x1ULL << VIRTIO_NET_F_STATUS))) {


Any reason for such check. Let's try not check per device feature in the 
generic vhost core.

Note that the config interrupt is a basic facility which could be used 
by various other devices (e.g block).


> +        if (hdev->vhost_ops->vhost_set_config_call) {
> +            fd = -1;
> +            hdev->vhost_ops->vhost_set_config_call(hdev, &fd);
> +        }
> +    }
>       if (hdev->vhost_ops->vhost_dev_start) {
>           hdev->vhost_ops->vhost_dev_start(hdev, false);
>       }
> diff --git a/hw/virtio/virtio.c b/hw/virtio/virtio.c
> index ceb58fda6c..5dff29c981 100644
> --- a/hw/virtio/virtio.c
> +++ b/hw/virtio/virtio.c
> @@ -3502,6 +3502,14 @@ static void virtio_queue_guest_notifier_read(EventNotifier *n)
>       }
>   }
>   
> +static void virtio_config_read(EventNotifier *n)
> +{
> +    VirtIODevice *vdev = container_of(n, VirtIODevice, config_notifier);
> +
> +    if (event_notifier_test_and_clear(n)) {
> +        virtio_notify_config(vdev);
> +    }
> +}
>   void virtio_queue_set_guest_notifier_fd_handler(VirtQueue *vq, bool assign,
>                                                   bool with_irqfd)
>   {
> @@ -3517,6 +3525,16 @@ void virtio_queue_set_guest_notifier_fd_handler(VirtQueue *vq, bool assign,
>           virtio_queue_guest_notifier_read(&vq->guest_notifier);
>       }
>   }
> +void virtio_set_config_notifier_fd_handler(VirtIODevice *vdev, bool assign,
> +                                                bool with_irqfd)
> +{
> +    if (assign && !with_irqfd) {
> +        event_notifier_set_handler(&vdev->config_notifier,
> +                                   virtio_config_read);
> +    } else {
> +       event_notifier_set_handler(&vdev->config_notifier, NULL);
> +    }
> +}
>   
>   EventNotifier *virtio_queue_get_guest_notifier(VirtQueue *vq)
>   {
> @@ -3591,6 +3609,10 @@ EventNotifier *virtio_queue_get_host_notifier(VirtQueue *vq)
>       return &vq->host_notifier;
>   }
>   
> +EventNotifier *virtio_get_config_notifier(VirtIODevice *vdev)
> +{
> +    return &vdev->config_notifier;
> +}
>   void virtio_queue_set_host_notifier_enabled(VirtQueue *vq, bool enabled)
>   {
>       vq->host_notifier_enabled = enabled;
> diff --git a/include/hw/virtio/vhost.h b/include/hw/virtio/vhost.h
> index 4a8bc75415..22efa7008e 100644
> --- a/include/hw/virtio/vhost.h
> +++ b/include/hw/virtio/vhost.h
> @@ -91,6 +91,7 @@ struct vhost_dev {
>       QLIST_HEAD(, vhost_iommu) iommu_list;
>       IOMMUNotifier n;
>       const VhostDevConfigOps *config_ops;
> +    EventNotifier masked_config_notifier;


So I think it's wrong to store the masked_config_notifier in vhost_dev. 
See my above reply for the case of multiqueue. The correct way is to 
store them somewhere else, probably VirtIODevice.

Thanks


>   };
>   
>   struct vhost_net {
> @@ -108,6 +109,8 @@ int vhost_dev_start(struct vhost_dev *hdev, VirtIODevice *vdev);
>   void vhost_dev_stop(struct vhost_dev *hdev, VirtIODevice *vdev);
>   int vhost_dev_enable_notifiers(struct vhost_dev *hdev, VirtIODevice *vdev);
>   void vhost_dev_disable_notifiers(struct vhost_dev *hdev, VirtIODevice *vdev);
> +bool vhost_config_pending(struct vhost_dev *hdev, int n);
> +void vhost_config_mask(struct vhost_dev *hdev, VirtIODevice *vdev,  bool mask);
>   
>   /* Test and clear masked event pending status.
>    * Should be called after unmask to avoid losing events.
> diff --git a/include/hw/virtio/virtio.h b/include/hw/virtio/virtio.h
> index b7ece7a6a8..b0b714f6d4 100644
> --- a/include/hw/virtio/virtio.h
> +++ b/include/hw/virtio/virtio.h
> @@ -108,6 +108,7 @@ struct VirtIODevice
>       bool use_guest_notifier_mask;
>       AddressSpace *dma_as;
>       QLIST_HEAD(, VirtQueue) *vector_queues;
> +    EventNotifier config_notifier;
>   };
>   
>   struct VirtioDeviceClass {
> @@ -310,11 +311,14 @@ uint16_t virtio_get_queue_index(VirtQueue *vq);
>   EventNotifier *virtio_queue_get_guest_notifier(VirtQueue *vq);
>   void virtio_queue_set_guest_notifier_fd_handler(VirtQueue *vq, bool assign,
>                                                   bool with_irqfd);
> +void virtio_set_config_notifier_fd_handler(VirtIODevice *vdev, bool assign,
> +                                                bool with_irqfd);
>   int virtio_device_start_ioeventfd(VirtIODevice *vdev);
>   int virtio_device_grab_ioeventfd(VirtIODevice *vdev);
>   void virtio_device_release_ioeventfd(VirtIODevice *vdev);
>   bool virtio_device_ioeventfd_enabled(VirtIODevice *vdev);
>   EventNotifier *virtio_queue_get_host_notifier(VirtQueue *vq);
> +EventNotifier *virtio_get_config_notifier(VirtIODevice *vdev);
>   void virtio_queue_set_host_notifier_enabled(VirtQueue *vq, bool enabled);
>   void virtio_queue_host_notifier_read(EventNotifier *n);
>   void virtio_queue_aio_set_host_notifier_handler(VirtQueue *vq, AioContext *ctx,
> diff --git a/include/net/vhost_net.h b/include/net/vhost_net.h
> index 172b0051d8..0d38c97c94 100644
> --- a/include/net/vhost_net.h
> +++ b/include/net/vhost_net.h
> @@ -36,6 +36,9 @@ int vhost_net_set_config(struct vhost_net *net, const uint8_t *data,
>   bool vhost_net_virtqueue_pending(VHostNetState *net, int n);
>   void vhost_net_virtqueue_mask(VHostNetState *net, VirtIODevice *dev,
>                                 int idx, bool mask);
> +bool vhost_net_config_pending(VHostNetState *net, int n);
> +void vhost_net_config_mask(VHostNetState *net, VirtIODevice *dev,
> +                              bool mask);
>   int vhost_net_notify_migration_done(VHostNetState *net, char* mac_addr);
>   VHostNetState *get_vhost_net(NetClientState *nc);
>   



^ permalink raw reply	[flat|nested] 20+ messages in thread

* Re: [PATCH v6 7/9] virtio-pci: add support for configure interrupt
  2021-04-27  3:39 ` [PATCH v6 7/9] virtio-pci: " Cindy Lu
@ 2021-04-27  7:12   ` Jason Wang
  2021-04-29  3:07     ` Cindy Lu
  0 siblings, 1 reply; 20+ messages in thread
From: Jason Wang @ 2021-04-27  7:12 UTC (permalink / raw)
  To: Cindy Lu, mst, qemu-devel


在 2021/4/27 上午11:39, Cindy Lu 写道:
> Add support for configure interrupt, use kvm_irqfd_assign and set the
> gsi to kernel. When the configure notifier was eventfd_signal by host
> kernel, this will finally inject an msix interrupt to guest
> ---
>   hw/virtio/virtio-pci.c | 186 ++++++++++++++++++++++++++---------------
>   1 file changed, 120 insertions(+), 66 deletions(-)
>
> diff --git a/hw/virtio/virtio-pci.c b/hw/virtio/virtio-pci.c
> index 2b7e6cc0d9..07d28dd367 100644
> --- a/hw/virtio/virtio-pci.c
> +++ b/hw/virtio/virtio-pci.c
> @@ -664,12 +664,10 @@ static uint32_t virtio_read_config(PCIDevice *pci_dev,
>   }
>   
>   static int kvm_virtio_pci_vq_vector_use(VirtIOPCIProxy *proxy,
> -                                        unsigned int queue_no,
>                                           unsigned int vector)
>   {
>       VirtIOIRQFD *irqfd = &proxy->vector_irqfd[vector];
>       int ret;
> -


Unnecessary changes.


>       if (irqfd->users == 0) {
>           ret = kvm_irqchip_add_msi_route(kvm_state, vector, &proxy->pci_dev);
>           if (ret < 0) {
> @@ -708,93 +706,120 @@ static void kvm_virtio_pci_irqfd_release(VirtIOPCIProxy *proxy,
>       ret = kvm_irqchip_remove_irqfd_notifier_gsi(kvm_state, n, irqfd->virq);
>       assert(ret == 0);
>   }
> -


So did here.


> -static int kvm_virtio_pci_vector_use(VirtIOPCIProxy *proxy, int nvqs)
> + static int virtio_pci_get_notifier(VirtIOPCIProxy *proxy, int queue_no,
> +                                      EventNotifier **n, unsigned int *vector)


The indentation looks not correct.


>   {
>       PCIDevice *dev = &proxy->pci_dev;
>       VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
> -    VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
> -    unsigned int vector;
> -    int ret, queue_no;
>       VirtQueue *vq;
> -    EventNotifier *n;
> -    for (queue_no = 0; queue_no < nvqs; queue_no++) {
> +
> +    if (queue_no == -1) {
> +        *n = virtio_get_config_notifier(vdev);
> +        *vector = vdev->config_vector;
> +    } else {
>           if (!virtio_queue_get_num(vdev, queue_no)) {
> -            break;
> -        }
> -        vector = virtio_queue_vector(vdev, queue_no);
> -        if (vector >= msix_nr_vectors_allocated(dev)) {
> -            continue;
> -        }
> -        ret = kvm_virtio_pci_vq_vector_use(proxy, queue_no, vector);
> -        if (ret < 0) {
> -            goto undo;
> -        }
> -        /* If guest supports masking, set up irqfd now.
> -         * Otherwise, delay until unmasked in the frontend.
> -         */
> -        if (vdev->use_guest_notifier_mask && k->guest_notifier_mask) {
> -            vq = virtio_get_queue(vdev, queue_no);
> -            n = virtio_queue_get_guest_notifier(vq);
> -            ret = kvm_virtio_pci_irqfd_use(proxy, n, vector);
> -            if (ret < 0) {
> -                kvm_virtio_pci_vq_vector_release(proxy, vector);
> -                goto undo;
> -            }
> +            return -1;
>           }
> +        *vector = virtio_queue_vector(vdev, queue_no);
> +        vq = virtio_get_queue(vdev, queue_no);
> +        *n = virtio_queue_get_guest_notifier(vq);
> +    }
> +    if (*vector >= msix_nr_vectors_allocated(dev)) {
> +        return -1;
>       }
>       return 0;
> +}
>   
> +static int kvm_virtio_pci_vector_use_one(VirtIOPCIProxy *proxy, int queue_no)
> +{


Let's use separate patch for the introducing of 
kvm_virtio_pci_vector_user/release_one().

And then do the config interrupt support on top.

Thanks



^ permalink raw reply	[flat|nested] 20+ messages in thread

* Re: [PATCH v6 8/9] virtio: decouple virtqueue from set notifier fd handler
  2021-04-27  3:39 ` [PATCH v6 8/9] virtio: decouple virtqueue from set notifier fd handler Cindy Lu
@ 2021-04-27  7:14   ` Jason Wang
  0 siblings, 0 replies; 20+ messages in thread
From: Jason Wang @ 2021-04-27  7:14 UTC (permalink / raw)
  To: Cindy Lu, mst, qemu-devel


在 2021/4/27 上午11:39, Cindy Lu 写道:
> This patch will decouple virtqueue from
> virtio_queue_set_guest_notifier_fd_handler,
> here queue number -1 mean the configure interrupt. The funtion
> will set the config_notify_read as fd handler


Any reason that this is not done before patch 3?

Thanks


>
> Signed-off-by: Cindy Lu <lulu@redhat.com>
> ---
>   hw/s390x/virtio-ccw.c      |  6 +++---
>   hw/virtio/virtio-mmio.c    |  8 ++++----
>   hw/virtio/virtio-pci.c     |  9 +++++----
>   hw/virtio/virtio.c         | 35 +++++++++++++++++------------------
>   include/hw/virtio/virtio.h |  4 +---
>   5 files changed, 30 insertions(+), 32 deletions(-)
>
> diff --git a/hw/s390x/virtio-ccw.c b/hw/s390x/virtio-ccw.c
> index 4582e94ae7..5d73c99d30 100644
> --- a/hw/s390x/virtio-ccw.c
> +++ b/hw/s390x/virtio-ccw.c
> @@ -989,11 +989,11 @@ static int virtio_ccw_set_guest_notifier(VirtioCcwDevice *dev, int n,
>           if (r < 0) {
>               return r;
>           }
> -        virtio_queue_set_guest_notifier_fd_handler(vq, true, with_irqfd);
> +        virtio_set_notifier_fd_handler(vdev, n, true, with_irqfd);
>           if (with_irqfd) {
>               r = virtio_ccw_add_irqfd(dev, n);
>               if (r) {
> -                virtio_queue_set_guest_notifier_fd_handler(vq, false,
> +                virtio_set_notifier_fd_handler(vdev, n, false,
>                                                              with_irqfd);
>                   return r;
>               }
> @@ -1017,7 +1017,7 @@ static int virtio_ccw_set_guest_notifier(VirtioCcwDevice *dev, int n,
>           if (with_irqfd) {
>               virtio_ccw_remove_irqfd(dev, n);
>           }
> -        virtio_queue_set_guest_notifier_fd_handler(vq, false, with_irqfd);
> +        virtio_set_notifier_fd_handler(vdev, n, false, with_irqfd);
>           event_notifier_cleanup(notifier);
>       }
>       return 0;
> diff --git a/hw/virtio/virtio-mmio.c b/hw/virtio/virtio-mmio.c
> index d8cb368728..4ea55001be 100644
> --- a/hw/virtio/virtio-mmio.c
> +++ b/hw/virtio/virtio-mmio.c
> @@ -620,9 +620,9 @@ static int virtio_mmio_set_guest_notifier(DeviceState *d, int n, bool assign,
>           if (r < 0) {
>               return r;
>           }
> -        virtio_queue_set_guest_notifier_fd_handler(vq, true, with_irqfd);
> +        virtio_set_notifier_fd_handler(vdev, n, true, with_irqfd);
>       } else {
> -        virtio_queue_set_guest_notifier_fd_handler(vq, false, with_irqfd);
> +        virtio_set_notifier_fd_handler(vdev, n, false, with_irqfd);
>           event_notifier_cleanup(notifier);
>       }
>   
> @@ -642,9 +642,9 @@ static int virtio_mmio_set_config_notifier(DeviceState *d,  bool assign)
>       int r = 0;
>       if (assign) {
>           r = event_notifier_init(notifier, 0);
> -        virtio_set_config_notifier_fd_handler(vdev, true, false);
> +        virtio_set_notifier_fd_handler(vdev, -1, true, false);
>       } else {
> -        virtio_set_config_notifier_fd_handler(vdev, false, false);
> +        virtio_set_notifier_fd_handler(vdev, -1, false, false);
>           event_notifier_cleanup(notifier);
>       }
>           if (vdc->guest_notifier_mask && vdev->use_guest_notifier_mask) {
> diff --git a/hw/virtio/virtio-pci.c b/hw/virtio/virtio-pci.c
> index 07d28dd367..5033b3db4f 100644
> --- a/hw/virtio/virtio-pci.c
> +++ b/hw/virtio/virtio-pci.c
> @@ -806,10 +806,10 @@ static int virtio_pci_set_config_notifier(DeviceState *d,  bool assign)
>       int r = 0;
>       if (assign) {
>           r = event_notifier_init(notifier, 0);
> -        virtio_set_config_notifier_fd_handler(vdev, true, true);
> +        virtio_set_notifier_fd_handler(vdev, -1, true, true);
>           kvm_virtio_pci_vector_config_use(proxy);
>       } else {
> -        virtio_set_config_notifier_fd_handler(vdev, false, true);
> +        virtio_set_notifier_fd_handler(vdev, -1, false, true);
>           kvm_virtio_pci_vector_config_release(proxy);
>           event_notifier_cleanup(notifier);
>       }
> @@ -1005,9 +1005,9 @@ static int virtio_pci_set_guest_notifier(DeviceState *d, int n, bool assign,
>           if (r < 0) {
>               return r;
>           }
> -        virtio_queue_set_guest_notifier_fd_handler(vq, true, with_irqfd);
> +        virtio_set_notifier_fd_handler(vdev, n, true, with_irqfd);
>       } else {
> -        virtio_queue_set_guest_notifier_fd_handler(vq, false, with_irqfd);
> +        virtio_set_notifier_fd_handler(vdev, n, false, with_irqfd);
>           event_notifier_cleanup(notifier);
>       }
>   
> @@ -1049,6 +1049,7 @@ static int virtio_pci_set_guest_notifiers(DeviceState *d, int nvqs, bool assign)
>           msix_unset_vector_notifiers(&proxy->pci_dev);
>           if (proxy->vector_irqfd) {
>               kvm_virtio_pci_vector_release(proxy, nvqs);
> +            kvm_virtio_pci_vector_config_release(proxy);
>               g_free(proxy->vector_irqfd);
>               proxy->vector_irqfd = NULL;
>           }
> diff --git a/hw/virtio/virtio.c b/hw/virtio/virtio.c
> index 5dff29c981..8f0087deac 100644
> --- a/hw/virtio/virtio.c
> +++ b/hw/virtio/virtio.c
> @@ -3510,32 +3510,31 @@ static void virtio_config_read(EventNotifier *n)
>           virtio_notify_config(vdev);
>       }
>   }
> -void virtio_queue_set_guest_notifier_fd_handler(VirtQueue *vq, bool assign,
> -                                                bool with_irqfd)
> +
> +void virtio_set_notifier_fd_handler(VirtIODevice *vdev, int queue_no,
> +                               bool assign, bool with_irqfd)
>   {
> -    if (assign && !with_irqfd) {
> -        event_notifier_set_handler(&vq->guest_notifier,
> -                                   virtio_queue_guest_notifier_read);
> +    EventNotifier *e ;
> +    EventNotifierHandler *handler;
> +    if (queue_no != -1) {
> +        VirtQueue *vq = virtio_get_queue(vdev, queue_no);
> +        e = &vq->guest_notifier;
> +        handler = virtio_queue_guest_notifier_read;
> +    } else {
> +        e = &vdev->config_notifier;
> +        handler = virtio_config_read;
> +   }
> +   if (assign && !with_irqfd) {
> +        event_notifier_set_handler(e, handler);
>       } else {
> -        event_notifier_set_handler(&vq->guest_notifier, NULL);
> +        event_notifier_set_handler(e, NULL);
>       }
>       if (!assign) {
>           /* Test and clear notifier before closing it,
>            * in case poll callback didn't have time to run. */
> -        virtio_queue_guest_notifier_read(&vq->guest_notifier);
> +        handler(e);
>       }
>   }
> -void virtio_set_config_notifier_fd_handler(VirtIODevice *vdev, bool assign,
> -                                                bool with_irqfd)
> -{
> -    if (assign && !with_irqfd) {
> -        event_notifier_set_handler(&vdev->config_notifier,
> -                                   virtio_config_read);
> -    } else {
> -       event_notifier_set_handler(&vdev->config_notifier, NULL);
> -    }
> -}
> -
>   EventNotifier *virtio_queue_get_guest_notifier(VirtQueue *vq)
>   {
>       return &vq->guest_notifier;
> diff --git a/include/hw/virtio/virtio.h b/include/hw/virtio/virtio.h
> index b0b714f6d4..d22f5a3e7e 100644
> --- a/include/hw/virtio/virtio.h
> +++ b/include/hw/virtio/virtio.h
> @@ -309,9 +309,7 @@ void virtio_queue_update_used_idx(VirtIODevice *vdev, int n);
>   VirtQueue *virtio_get_queue(VirtIODevice *vdev, int n);
>   uint16_t virtio_get_queue_index(VirtQueue *vq);
>   EventNotifier *virtio_queue_get_guest_notifier(VirtQueue *vq);
> -void virtio_queue_set_guest_notifier_fd_handler(VirtQueue *vq, bool assign,
> -                                                bool with_irqfd);
> -void virtio_set_config_notifier_fd_handler(VirtIODevice *vdev, bool assign,
> +void virtio_set_notifier_fd_handler(VirtIODevice *vdev, int n, bool assign,
>                                                   bool with_irqfd);
>   int virtio_device_start_ioeventfd(VirtIODevice *vdev);
>   int virtio_device_grab_ioeventfd(VirtIODevice *vdev);



^ permalink raw reply	[flat|nested] 20+ messages in thread

* Re: [PATCH v6 9/9] virtio-net: add peer_deleted check in virtio_net_handle_rx
  2021-04-27  3:39 ` [PATCH v6 9/9] virtio-net: add peer_deleted check in virtio_net_handle_rx Cindy Lu
@ 2021-04-27  7:15   ` Jason Wang
  0 siblings, 0 replies; 20+ messages in thread
From: Jason Wang @ 2021-04-27  7:15 UTC (permalink / raw)
  To: Cindy Lu, mst, qemu-devel


在 2021/4/27 上午11:39, Cindy Lu 写道:
> During the test, We found this fuction will continue running
> while the peer is deleted, this will case the crash. so add
> check for this.


Please describe how the issue is reproduced and why 
qemu_flush_queued_packets() is not a better place to fix that.

Thanks


>
> Signed-off-by: Cindy Lu <lulu@redhat.com>
> ---
>   hw/net/virtio-net.c | 4 +++-
>   1 file changed, 3 insertions(+), 1 deletion(-)
>
> diff --git a/hw/net/virtio-net.c b/hw/net/virtio-net.c
> index 43b912453a..1be3f8e76f 100644
> --- a/hw/net/virtio-net.c
> +++ b/hw/net/virtio-net.c
> @@ -1403,7 +1403,9 @@ static void virtio_net_handle_rx(VirtIODevice *vdev, VirtQueue *vq)
>   {
>       VirtIONet *n = VIRTIO_NET(vdev);
>       int queue_index = vq2q(virtio_get_queue_index(vq));
> -
> +    if (n->nic->peer_deleted) {
> +        return;
> +    }
>       qemu_flush_queued_packets(qemu_get_subqueue(n->nic, queue_index));
>   }
>   



^ permalink raw reply	[flat|nested] 20+ messages in thread

* Re: [PATCH v6 2/9] virtio-pci:decouple virtqueue from interrupt setting process
  2021-04-27  6:40   ` Jason Wang
@ 2021-04-27  7:17     ` Jason Wang
  0 siblings, 0 replies; 20+ messages in thread
From: Jason Wang @ 2021-04-27  7:17 UTC (permalink / raw)
  To: Cindy Lu, mst, qemu-devel


在 2021/4/27 下午2:40, Jason Wang 写道:
>>   static int virtio_pci_vq_vector_unmask(VirtIOPCIProxy *proxy,
>>                                          unsigned int queue_no,
>>                                          unsigned int vector,
>> -                                       MSIMessage msg)
>> +                                       MSIMessage msg,
>> +                                        EventNotifier *n)
>
>
> So you switch to use EventNotifier but keep using queue_no/vector, 
> this looks kind of duplication.
>
> If we can keep queue_no or virtio_queue_get_guest_notifier working as 
> in the past, I don't see any reason for this effort.


Ok I see it is because we need to support config interrupt.

But using queue_no/vector/n at the same time is a hint that the decouple 
is not done completely.

Thanks


>
> Thanks 



^ permalink raw reply	[flat|nested] 20+ messages in thread

* Re: [PATCH v6 7/9] virtio-pci: add support for configure interrupt
  2021-04-27  7:12   ` Jason Wang
@ 2021-04-29  3:07     ` Cindy Lu
  0 siblings, 0 replies; 20+ messages in thread
From: Cindy Lu @ 2021-04-29  3:07 UTC (permalink / raw)
  To: Jason Wang; +Cc: QEMU Developers, Michael Tsirkin

On Tue, Apr 27, 2021 at 3:12 PM Jason Wang <jasowang@redhat.com> wrote:
>
>
> 在 2021/4/27 上午11:39, Cindy Lu 写道:
> > Add support for configure interrupt, use kvm_irqfd_assign and set the
> > gsi to kernel. When the configure notifier was eventfd_signal by host
> > kernel, this will finally inject an msix interrupt to guest
> > ---
> >   hw/virtio/virtio-pci.c | 186 ++++++++++++++++++++++++++---------------
> >   1 file changed, 120 insertions(+), 66 deletions(-)
> >
> > diff --git a/hw/virtio/virtio-pci.c b/hw/virtio/virtio-pci.c
> > index 2b7e6cc0d9..07d28dd367 100644
> > --- a/hw/virtio/virtio-pci.c
> > +++ b/hw/virtio/virtio-pci.c
> > @@ -664,12 +664,10 @@ static uint32_t virtio_read_config(PCIDevice *pci_dev,
> >   }
> >
> >   static int kvm_virtio_pci_vq_vector_use(VirtIOPCIProxy *proxy,
> > -                                        unsigned int queue_no,
> >                                           unsigned int vector)
> >   {
> >       VirtIOIRQFD *irqfd = &proxy->vector_irqfd[vector];
> >       int ret;
> > -
>
>
> Unnecessary changes.
>
will fix this
>
> >       if (irqfd->users == 0) {
> >           ret = kvm_irqchip_add_msi_route(kvm_state, vector, &proxy->pci_dev);
> >           if (ret < 0) {
> > @@ -708,93 +706,120 @@ static void kvm_virtio_pci_irqfd_release(VirtIOPCIProxy *proxy,
> >       ret = kvm_irqchip_remove_irqfd_notifier_gsi(kvm_state, n, irqfd->virq);
> >       assert(ret == 0);
> >   }
> > -
>
>
> So did here.
>
will fix this
>
> > -static int kvm_virtio_pci_vector_use(VirtIOPCIProxy *proxy, int nvqs)
> > + static int virtio_pci_get_notifier(VirtIOPCIProxy *proxy, int queue_no,
> > +                                      EventNotifier **n, unsigned int *vector)
>
>
> The indentation looks not correct.
>
>
> >   {
> >       PCIDevice *dev = &proxy->pci_dev;
> >       VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
> > -    VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
> > -    unsigned int vector;
> > -    int ret, queue_no;
> >       VirtQueue *vq;
> > -    EventNotifier *n;
> > -    for (queue_no = 0; queue_no < nvqs; queue_no++) {
> > +
> > +    if (queue_no == -1) {
> > +        *n = virtio_get_config_notifier(vdev);
> > +        *vector = vdev->config_vector;
> > +    } else {
> >           if (!virtio_queue_get_num(vdev, queue_no)) {
> > -            break;
> > -        }
> > -        vector = virtio_queue_vector(vdev, queue_no);
> > -        if (vector >= msix_nr_vectors_allocated(dev)) {
> > -            continue;
> > -        }
> > -        ret = kvm_virtio_pci_vq_vector_use(proxy, queue_no, vector);
> > -        if (ret < 0) {
> > -            goto undo;
> > -        }
> > -        /* If guest supports masking, set up irqfd now.
> > -         * Otherwise, delay until unmasked in the frontend.
> > -         */
> > -        if (vdev->use_guest_notifier_mask && k->guest_notifier_mask) {
> > -            vq = virtio_get_queue(vdev, queue_no);
> > -            n = virtio_queue_get_guest_notifier(vq);
> > -            ret = kvm_virtio_pci_irqfd_use(proxy, n, vector);
> > -            if (ret < 0) {
> > -                kvm_virtio_pci_vq_vector_release(proxy, vector);
> > -                goto undo;
> > -            }
> > +            return -1;
> >           }
> > +        *vector = virtio_queue_vector(vdev, queue_no);
> > +        vq = virtio_get_queue(vdev, queue_no);
> > +        *n = virtio_queue_get_guest_notifier(vq);
> > +    }
> > +    if (*vector >= msix_nr_vectors_allocated(dev)) {
> > +        return -1;
> >       }
> >       return 0;
> > +}
> >
> > +static int kvm_virtio_pci_vector_use_one(VirtIOPCIProxy *proxy, int queue_no)
> > +{
>
>
> Let's use separate patch for the introducing of
> kvm_virtio_pci_vector_user/release_one().
>
> And then do the config interrupt support on top.
>
Sure, will fix this
> Thanks
>



^ permalink raw reply	[flat|nested] 20+ messages in thread

* Re: [PATCH v6 1/9] hw: Add check for queue number
  2021-04-27  5:39   ` Jason Wang
@ 2021-04-29  3:08     ` Cindy Lu
  0 siblings, 0 replies; 20+ messages in thread
From: Cindy Lu @ 2021-04-29  3:08 UTC (permalink / raw)
  To: Jason Wang; +Cc: QEMU Developers, Michael Tsirkin

On Tue, Apr 27, 2021 at 1:39 PM Jason Wang <jasowang@redhat.com> wrote:
>
>
> 在 2021/4/27 上午11:39, Cindy Lu 写道:
> > In order to support configure interrupt. we will use queue number -1
> > as configure interrupt
> > since all these device are not support the configure interrupt
> > So we will add an check here, if the idx is -1, the function
> > will return;
>
>
> The title is confusing since the change is specific for the guest notifiers.
>
> A better one would be "virtio: guest notifier support for config interrupt"
>
sure, will fix this
>
> >
> > Signed-off-by: Cindy Lu <lulu@redhat.com>
> > ---
> >   hw/display/vhost-user-gpu.c    |  8 ++++++--
> >   hw/net/virtio-net.c            | 10 +++++++---
> >   hw/virtio/vhost-user-fs.c      | 11 +++++++----
> >   hw/virtio/vhost-vsock-common.c |  8 ++++++--
> >   hw/virtio/virtio-crypto.c      |  8 ++++++--
> >   5 files changed, 32 insertions(+), 13 deletions(-)
> >
> > diff --git a/hw/display/vhost-user-gpu.c b/hw/display/vhost-user-gpu.c
> > index 51f1747c4a..d8e26cedf1 100644
> > --- a/hw/display/vhost-user-gpu.c
> > +++ b/hw/display/vhost-user-gpu.c
> > @@ -490,7 +490,9 @@ static bool
> >   vhost_user_gpu_guest_notifier_pending(VirtIODevice *vdev, int idx)
> >   {
> >       VhostUserGPU *g = VHOST_USER_GPU(vdev);
> > -
> > +    if (idx == -1) {
>
>
> Let's introduce a macro for this instead of the magic number.
>
> Thanks
>
>
sure will fix this
> > +        return false;
> > +    }
> >       return vhost_virtqueue_pending(&g->vhost->dev, idx);
> >   }
> >
> > @@ -498,7 +500,9 @@ static void
> >   vhost_user_gpu_guest_notifier_mask(VirtIODevice *vdev, int idx, bool mask)
> >   {
> >       VhostUserGPU *g = VHOST_USER_GPU(vdev);
> > -
> > +    if (idx == -1) {
> > +        return;
> > +    }
> >       vhost_virtqueue_mask(&g->vhost->dev, vdev, idx, mask);
> >   }
> >
> > diff --git a/hw/net/virtio-net.c b/hw/net/virtio-net.c
> > index 9179013ac4..78ccaa228c 100644
> > --- a/hw/net/virtio-net.c
> > +++ b/hw/net/virtio-net.c
> > @@ -3060,7 +3060,10 @@ static bool virtio_net_guest_notifier_pending(VirtIODevice *vdev, int idx)
> >       VirtIONet *n = VIRTIO_NET(vdev);
> >       NetClientState *nc = qemu_get_subqueue(n->nic, vq2q(idx));
> >       assert(n->vhost_started);
> > -    return vhost_net_virtqueue_pending(get_vhost_net(nc->peer), idx);
> > +    if (idx != -1) {
> > +        return vhost_net_virtqueue_pending(get_vhost_net(nc->peer), idx);
> > +    }
> > +    return false;
> >   }
> >
> >   static void virtio_net_guest_notifier_mask(VirtIODevice *vdev, int idx,
> > @@ -3069,8 +3072,9 @@ static void virtio_net_guest_notifier_mask(VirtIODevice *vdev, int idx,
> >       VirtIONet *n = VIRTIO_NET(vdev);
> >       NetClientState *nc = qemu_get_subqueue(n->nic, vq2q(idx));
> >       assert(n->vhost_started);
> > -    vhost_net_virtqueue_mask(get_vhost_net(nc->peer),
> > -                             vdev, idx, mask);
> > +    if (idx != -1) {
> > +        vhost_net_virtqueue_mask(get_vhost_net(nc->peer), vdev, idx, mask);
> > +     }
> >   }
> >
> >   static void virtio_net_set_config_size(VirtIONet *n, uint64_t host_features)
> > diff --git a/hw/virtio/vhost-user-fs.c b/hw/virtio/vhost-user-fs.c
> > index 1bc5d03a00..37424c2193 100644
> > --- a/hw/virtio/vhost-user-fs.c
> > +++ b/hw/virtio/vhost-user-fs.c
> > @@ -142,18 +142,21 @@ static void vuf_handle_output(VirtIODevice *vdev, VirtQueue *vq)
> >        */
> >   }
> >
> > -static void vuf_guest_notifier_mask(VirtIODevice *vdev, int idx,
> > -                                            bool mask)
> > +static void vuf_guest_notifier_mask(VirtIODevice *vdev, int idx, bool mask)
> >   {
> >       VHostUserFS *fs = VHOST_USER_FS(vdev);
> > -
> > +    if (idx == -1) {
> > +        return;
> > +    }
> >       vhost_virtqueue_mask(&fs->vhost_dev, vdev, idx, mask);
> >   }
> >
> >   static bool vuf_guest_notifier_pending(VirtIODevice *vdev, int idx)
> >   {
> >       VHostUserFS *fs = VHOST_USER_FS(vdev);
> > -
> > +    if (idx == -1) {
> > +        return false;
> > +    }
> >       return vhost_virtqueue_pending(&fs->vhost_dev, idx);
> >   }
> >
> > diff --git a/hw/virtio/vhost-vsock-common.c b/hw/virtio/vhost-vsock-common.c
> > index 5b2ebf3496..0adf823d37 100644
> > --- a/hw/virtio/vhost-vsock-common.c
> > +++ b/hw/virtio/vhost-vsock-common.c
> > @@ -100,7 +100,9 @@ static void vhost_vsock_common_guest_notifier_mask(VirtIODevice *vdev, int idx,
> >                                               bool mask)
> >   {
> >       VHostVSockCommon *vvc = VHOST_VSOCK_COMMON(vdev);
> > -
> > +    if (idx == -1) {
> > +        return;
> > +    }
> >       vhost_virtqueue_mask(&vvc->vhost_dev, vdev, idx, mask);
> >   }
> >
> > @@ -108,7 +110,9 @@ static bool vhost_vsock_common_guest_notifier_pending(VirtIODevice *vdev,
> >                                                  int idx)
> >   {
> >       VHostVSockCommon *vvc = VHOST_VSOCK_COMMON(vdev);
> > -
> > +    if (idx == -1) {
> > +        return false;
> > +    }
> >       return vhost_virtqueue_pending(&vvc->vhost_dev, idx);
> >   }
> >
> > diff --git a/hw/virtio/virtio-crypto.c b/hw/virtio/virtio-crypto.c
> > index 54f9bbb789..c47f4ffb24 100644
> > --- a/hw/virtio/virtio-crypto.c
> > +++ b/hw/virtio/virtio-crypto.c
> > @@ -947,7 +947,9 @@ static void virtio_crypto_guest_notifier_mask(VirtIODevice *vdev, int idx,
> >       int queue = virtio_crypto_vq2q(idx);
> >
> >       assert(vcrypto->vhost_started);
> > -
> > +    if (idx == -1) {
> > +        return;
> > +    }
> >       cryptodev_vhost_virtqueue_mask(vdev, queue, idx, mask);
> >   }
> >
> > @@ -957,7 +959,9 @@ static bool virtio_crypto_guest_notifier_pending(VirtIODevice *vdev, int idx)
> >       int queue = virtio_crypto_vq2q(idx);
> >
> >       assert(vcrypto->vhost_started);
> > -
> > +    if (idx == -1) {
> > +        return false;
> > +    }
> >       return cryptodev_vhost_virtqueue_pending(vdev, queue, idx);
> >   }
> >
>



^ permalink raw reply	[flat|nested] 20+ messages in thread

end of thread, other threads:[~2021-04-29  3:09 UTC | newest]

Thread overview: 20+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2021-04-27  3:39 [PATCH v6 0/9] vhost-vdpa: add support for configure interrupt Cindy Lu
2021-04-27  3:39 ` [PATCH v6 1/9] hw: Add check for queue number Cindy Lu
2021-04-27  5:39   ` Jason Wang
2021-04-29  3:08     ` Cindy Lu
2021-04-27  3:39 ` [PATCH v6 2/9] virtio-pci:decouple virtqueue from interrupt setting process Cindy Lu
2021-04-27  6:40   ` Jason Wang
2021-04-27  7:17     ` Jason Wang
2021-04-27  3:39 ` [PATCH v6 3/9] vhost: add new call back function for config interrupt Cindy Lu
2021-04-27  3:39 ` [PATCH v6 4/9] vhost-vdpa: add support for config interrupt call back Cindy Lu
2021-04-27  3:39 ` [PATCH v6 5/9] vhost:add support for configure interrupt Cindy Lu
2021-04-27  7:04   ` Jason Wang
2021-04-27  3:39 ` [PATCH v6 6/9] virtio-mmio: add " Cindy Lu
2021-04-27  3:39 ` [PATCH v6 7/9] virtio-pci: " Cindy Lu
2021-04-27  7:12   ` Jason Wang
2021-04-29  3:07     ` Cindy Lu
2021-04-27  3:39 ` [PATCH v6 8/9] virtio: decouple virtqueue from set notifier fd handler Cindy Lu
2021-04-27  7:14   ` Jason Wang
2021-04-27  3:39 ` [PATCH v6 9/9] virtio-net: add peer_deleted check in virtio_net_handle_rx Cindy Lu
2021-04-27  7:15   ` Jason Wang
2021-04-27  3:57 ` [PATCH v6 0/9] vhost-vdpa: add support for configure interrupt no-reply

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.