qemu-devel.nongnu.org archive mirror
 help / color / mirror / Atom feed
* [PATCH v4 0/4] vhost-vdpa: add support for configure interrupt
@ 2021-03-23  1:56 Cindy Lu
  2021-03-23  1:56 ` [PATCH v4 1/4] virtio:add support in " Cindy Lu
                   ` (3 more replies)
  0 siblings, 4 replies; 13+ messages in thread
From: Cindy Lu @ 2021-03-23  1:56 UTC (permalink / raw)
  To: lulu, mst, jasowang, qemu-devel

These code are all tested in vp-vdpa (support configure interrupt)
vdpa_sim (not support configure interrupt)

test in virtio-pci bus and virtio-mmio bus

Change in v2:
Add support fot virtio-mmio bus
active the notifier wihle the backend support configure intterrput
misc fixes form v1

Change in v3
fix the coding style problems

Change in v4
misc fixes form v3
merge the set_config_notifier to set_guest_notifier
when vdpa start, check the feature by VIRTIO_NET_F_STATUS 


Cindy Lu (4):
  virtio:add support in configure interrupt
  vhost-vdpa: add callback function for configure interrupt
  virtio-mmio: add support for configure interrupt
  virtio-pci: add support for configure interrupt

 hw/display/vhost-user-gpu.c       |  14 ++-
 hw/net/vhost_net.c                |  16 ++-
 hw/net/virtio-net.c               |  24 ++++-
 hw/s390x/virtio-ccw.c             |   6 +-
 hw/virtio/trace-events            |   2 +
 hw/virtio/vhost-user-fs.c         |  12 ++-
 hw/virtio/vhost-vdpa.c            |  40 ++++++-
 hw/virtio/vhost-vsock-common.c    |  12 ++-
 hw/virtio/vhost.c                 |  44 +++++++-
 hw/virtio/virtio-crypto.c         |  13 ++-
 hw/virtio/virtio-mmio.c           |  30 +++++-
 hw/virtio/virtio-pci.c            | 171 ++++++++++++++++++++++++------
 hw/virtio/virtio.c                |  28 +++++
 include/hw/virtio/vhost-backend.h |   4 +
 include/hw/virtio/vhost.h         |   4 +
 include/hw/virtio/virtio.h        |  23 +++-
 include/net/vhost_net.h           |   3 +
 17 files changed, 378 insertions(+), 68 deletions(-)

-- 
2.21.3




^ permalink raw reply	[flat|nested] 13+ messages in thread

* [PATCH v4 1/4] virtio:add support in configure interrupt
  2021-03-23  1:56 [PATCH v4 0/4] vhost-vdpa: add support for configure interrupt Cindy Lu
@ 2021-03-23  1:56 ` Cindy Lu
  2021-03-24  6:30   ` Jason Wang
  2021-03-23  1:56 ` [PATCH v4 2/4] vhost-vdpa: add callback function for " Cindy Lu
                   ` (2 subsequent siblings)
  3 siblings, 1 reply; 13+ messages in thread
From: Cindy Lu @ 2021-03-23  1:56 UTC (permalink / raw)
  To: lulu, mst, jasowang, qemu-devel

Add configure notifier support in virtio and related driver
When peer is vhost vdpa, setup the configure interrupt function
vhost_net_start and release the resource when vhost_net_stop

Signed-off-by: Cindy Lu <lulu@redhat.com>
---
 hw/display/vhost-user-gpu.c    | 14 +++++++----
 hw/net/vhost_net.c             | 16 +++++++++++--
 hw/net/virtio-net.c            | 24 +++++++++++++++----
 hw/s390x/virtio-ccw.c          |  6 ++---
 hw/virtio/vhost-user-fs.c      | 12 ++++++----
 hw/virtio/vhost-vsock-common.c | 12 ++++++----
 hw/virtio/vhost.c              | 44 ++++++++++++++++++++++++++++++++--
 hw/virtio/virtio-crypto.c      | 13 ++++++----
 hw/virtio/virtio.c             | 28 ++++++++++++++++++++++
 include/hw/virtio/vhost.h      |  4 ++++
 include/hw/virtio/virtio.h     | 23 ++++++++++++++++--
 include/net/vhost_net.h        |  3 +++
 12 files changed, 169 insertions(+), 30 deletions(-)

diff --git a/hw/display/vhost-user-gpu.c b/hw/display/vhost-user-gpu.c
index 51f1747c4a..959ad115b6 100644
--- a/hw/display/vhost-user-gpu.c
+++ b/hw/display/vhost-user-gpu.c
@@ -487,18 +487,24 @@ vhost_user_gpu_set_status(VirtIODevice *vdev, uint8_t val)
 }
 
 static bool
-vhost_user_gpu_guest_notifier_pending(VirtIODevice *vdev, int idx)
+vhost_user_gpu_guest_notifier_pending(VirtIODevice *vdev, int idx,
+                                            int type)
 {
     VhostUserGPU *g = VHOST_USER_GPU(vdev);
-
+    if (type != VIRTIO_VQ_VECTOR) {
+        return false;
+    }
     return vhost_virtqueue_pending(&g->vhost->dev, idx);
 }
 
 static void
-vhost_user_gpu_guest_notifier_mask(VirtIODevice *vdev, int idx, bool mask)
+vhost_user_gpu_guest_notifier_mask(VirtIODevice *vdev, int idx, bool mask,
+                                        int type)
 {
     VhostUserGPU *g = VHOST_USER_GPU(vdev);
-
+    if (type != VIRTIO_VQ_VECTOR) {
+        return;
+    }
     vhost_virtqueue_mask(&g->vhost->dev, vdev, idx, mask);
 }
 
diff --git a/hw/net/vhost_net.c b/hw/net/vhost_net.c
index 24d555e764..2ef8cc608e 100644
--- a/hw/net/vhost_net.c
+++ b/hw/net/vhost_net.c
@@ -339,7 +339,9 @@ int vhost_net_start(VirtIODevice *dev, NetClientState *ncs,
             dev->use_guest_notifier_mask = false;
         }
      }
-
+    if (ncs->peer && ncs->peer->info->type == NET_CLIENT_DRIVER_VHOST_VDPA) {
+        dev->use_config_notifier = VIRTIO_CONFIG_SUPPORT;
+    }
     r = k->set_guest_notifiers(qbus->parent, total_queues * 2, true);
     if (r < 0) {
         error_report("Error binding guest notifier: %d", -r);
@@ -391,7 +393,6 @@ void vhost_net_stop(VirtIODevice *dev, NetClientState *ncs,
     for (i = 0; i < total_queues; i++) {
         vhost_net_stop_one(get_vhost_net(ncs[i].peer), dev);
     }
-
     r = k->set_guest_notifiers(qbus->parent, total_queues * 2, false);
     if (r < 0) {
         fprintf(stderr, "vhost guest notifier cleanup failed: %d\n", r);
@@ -426,6 +427,17 @@ void vhost_net_virtqueue_mask(VHostNetState *net, VirtIODevice *dev,
     vhost_virtqueue_mask(&net->dev, dev, idx, mask);
 }
 
+bool vhost_net_config_pending(VHostNetState *net, int idx)
+{
+    return vhost_config_pending(&net->dev, idx);
+}
+
+void vhost_net_config_mask(VHostNetState *net, VirtIODevice *dev,
+                              bool mask)
+{
+    vhost_config_mask(&net->dev, dev,  mask);
+}
+
 VHostNetState *get_vhost_net(NetClientState *nc)
 {
     VHostNetState *vhost_net = 0;
diff --git a/hw/net/virtio-net.c b/hw/net/virtio-net.c
index 9179013ac4..b84427fe99 100644
--- a/hw/net/virtio-net.c
+++ b/hw/net/virtio-net.c
@@ -3055,22 +3055,36 @@ static NetClientInfo net_virtio_info = {
     .announce = virtio_net_announce,
 };
 
-static bool virtio_net_guest_notifier_pending(VirtIODevice *vdev, int idx)
+
+static bool virtio_net_guest_notifier_pending(VirtIODevice *vdev, int idx,
+                                int type)
 {
     VirtIONet *n = VIRTIO_NET(vdev);
     NetClientState *nc = qemu_get_subqueue(n->nic, vq2q(idx));
     assert(n->vhost_started);
-    return vhost_net_virtqueue_pending(get_vhost_net(nc->peer), idx);
+
+    if (type == VIRTIO_VQ_VECTOR) {
+        return vhost_net_virtqueue_pending(get_vhost_net(nc->peer), idx);
+    }
+    if (type == VIRTIO_CONFIG_VECTOR) {
+        return vhost_net_config_pending(get_vhost_net(nc->peer), idx);
+    }
+    return false;
 }
 
 static void virtio_net_guest_notifier_mask(VirtIODevice *vdev, int idx,
-                                           bool mask)
+                                           bool mask, int type)
 {
     VirtIONet *n = VIRTIO_NET(vdev);
     NetClientState *nc = qemu_get_subqueue(n->nic, vq2q(idx));
     assert(n->vhost_started);
-    vhost_net_virtqueue_mask(get_vhost_net(nc->peer),
-                             vdev, idx, mask);
+
+    if (type == VIRTIO_VQ_VECTOR) {
+        vhost_net_virtqueue_mask(get_vhost_net(nc->peer), vdev, idx, mask);
+     }
+    if (type == VIRTIO_CONFIG_VECTOR) {
+        vhost_net_config_mask(get_vhost_net(nc->peer), vdev, mask);
+    }
 }
 
 static void virtio_net_set_config_size(VirtIONet *n, uint64_t host_features)
diff --git a/hw/s390x/virtio-ccw.c b/hw/s390x/virtio-ccw.c
index 4582e94ae7..234f749548 100644
--- a/hw/s390x/virtio-ccw.c
+++ b/hw/s390x/virtio-ccw.c
@@ -1003,16 +1003,16 @@ static int virtio_ccw_set_guest_notifier(VirtioCcwDevice *dev, int n,
          * need to manually trigger any guest masking callbacks here.
          */
         if (k->guest_notifier_mask && vdev->use_guest_notifier_mask) {
-            k->guest_notifier_mask(vdev, n, false);
+            k->guest_notifier_mask(vdev, n, false, VIRTIO_VQ_VECTOR);
         }
         /* get lost events and re-inject */
         if (k->guest_notifier_pending &&
-            k->guest_notifier_pending(vdev, n)) {
+            k->guest_notifier_pending(vdev, n, VIRTIO_VQ_VECTOR)) {
             event_notifier_set(notifier);
         }
     } else {
         if (k->guest_notifier_mask && vdev->use_guest_notifier_mask) {
-            k->guest_notifier_mask(vdev, n, true);
+            k->guest_notifier_mask(vdev, n, true, VIRTIO_VQ_VECTOR);
         }
         if (with_irqfd) {
             virtio_ccw_remove_irqfd(dev, n);
diff --git a/hw/virtio/vhost-user-fs.c b/hw/virtio/vhost-user-fs.c
index 1bc5d03a00..22358767f1 100644
--- a/hw/virtio/vhost-user-fs.c
+++ b/hw/virtio/vhost-user-fs.c
@@ -143,17 +143,21 @@ static void vuf_handle_output(VirtIODevice *vdev, VirtQueue *vq)
 }
 
 static void vuf_guest_notifier_mask(VirtIODevice *vdev, int idx,
-                                            bool mask)
+                                            bool mask, int type)
 {
     VHostUserFS *fs = VHOST_USER_FS(vdev);
-
+    if (type != VIRTIO_VQ_VECTOR) {
+        return;
+    }
     vhost_virtqueue_mask(&fs->vhost_dev, vdev, idx, mask);
 }
 
-static bool vuf_guest_notifier_pending(VirtIODevice *vdev, int idx)
+static bool vuf_guest_notifier_pending(VirtIODevice *vdev, int idx, int type)
 {
     VHostUserFS *fs = VHOST_USER_FS(vdev);
-
+    if (type != VIRTIO_VQ_VECTOR) {
+        return false;
+     }
     return vhost_virtqueue_pending(&fs->vhost_dev, idx);
 }
 
diff --git a/hw/virtio/vhost-vsock-common.c b/hw/virtio/vhost-vsock-common.c
index 5b2ebf3496..92c133c54c 100644
--- a/hw/virtio/vhost-vsock-common.c
+++ b/hw/virtio/vhost-vsock-common.c
@@ -97,18 +97,22 @@ static void vhost_vsock_common_handle_output(VirtIODevice *vdev, VirtQueue *vq)
 }
 
 static void vhost_vsock_common_guest_notifier_mask(VirtIODevice *vdev, int idx,
-                                            bool mask)
+                                            bool mask, int type)
 {
     VHostVSockCommon *vvc = VHOST_VSOCK_COMMON(vdev);
-
+    if (type != VIRTIO_VQ_VECTOR) {
+        return;
+    }
     vhost_virtqueue_mask(&vvc->vhost_dev, vdev, idx, mask);
 }
 
 static bool vhost_vsock_common_guest_notifier_pending(VirtIODevice *vdev,
-                                               int idx)
+                                               int idx, int type)
 {
     VHostVSockCommon *vvc = VHOST_VSOCK_COMMON(vdev);
-
+    if (type != VIRTIO_VQ_VECTOR) {
+        return false;
+    }
     return vhost_virtqueue_pending(&vvc->vhost_dev, idx);
 }
 
diff --git a/hw/virtio/vhost.c b/hw/virtio/vhost.c
index 614ccc2bcb..02e4d37dc0 100644
--- a/hw/virtio/vhost.c
+++ b/hw/virtio/vhost.c
@@ -1255,8 +1255,8 @@ static int vhost_virtqueue_init(struct vhost_dev *dev,
     if (r < 0) {
         return r;
     }
-
     file.fd = event_notifier_get_fd(&vq->masked_notifier);
+
     r = dev->vhost_ops->vhost_set_vring_call(dev, &file);
     if (r) {
         VHOST_OPS_DEBUG("vhost_set_vring_call failed");
@@ -1313,6 +1313,10 @@ int vhost_dev_init(struct vhost_dev *hdev, void *opaque,
             goto fail;
         }
     }
+    r = event_notifier_init(&hdev->masked_config_notifier, 0);
+    if (r < 0) {
+        return r;
+    }
 
     if (busyloop_timeout) {
         for (i = 0; i < hdev->nvqs; ++i) {
@@ -1405,6 +1409,8 @@ void vhost_dev_cleanup(struct vhost_dev *hdev)
     for (i = 0; i < hdev->nvqs; ++i) {
         vhost_virtqueue_cleanup(hdev->vqs + i);
     }
+    event_notifier_cleanup(&hdev->masked_config_notifier);
+
     if (hdev->mem) {
         /* those are only safe after successful init */
         memory_listener_unregister(&hdev->memory_listener);
@@ -1498,6 +1504,10 @@ bool vhost_virtqueue_pending(struct vhost_dev *hdev, int n)
     return event_notifier_test_and_clear(&vq->masked_notifier);
 }
 
+bool vhost_config_pending(struct vhost_dev *hdev, int n)
+{
+    return event_notifier_test_and_clear(&hdev->masked_config_notifier);
+}
 /* Mask/unmask events from this vq. */
 void vhost_virtqueue_mask(struct vhost_dev *hdev, VirtIODevice *vdev, int n,
                          bool mask)
@@ -1523,6 +1533,31 @@ void vhost_virtqueue_mask(struct vhost_dev *hdev, VirtIODevice *vdev, int n,
     }
 }
 
+/* Mask/unmask events from this config. */
+void vhost_config_mask(struct vhost_dev *hdev, VirtIODevice *vdev,
+                         bool mask)
+{
+    int fd;
+    int r;
+   EventNotifier *masked_config_notifier = &hdev->masked_config_notifier;
+   EventNotifier *config_notifier = &vdev->config_notifier;
+   if (vdev->use_config_notifier != VIRTIO_CONFIG_WORK) {
+        return;
+    }
+    /* should only be called after backend is connected */
+    assert(hdev->vhost_ops);
+    if (mask) {
+        assert(vdev->use_guest_notifier_mask);
+        fd = event_notifier_get_fd(masked_config_notifier);
+    } else {
+        fd = event_notifier_get_fd(config_notifier);
+    }
+   r = hdev->vhost_ops->vhost_set_config_call(hdev, &fd);
+    if (r < 0) {
+        error_report("vhost_set_config_call failed");
+    }
+}
+
 uint64_t vhost_get_features(struct vhost_dev *hdev, const int *feature_bits,
                             uint64_t features)
 {
@@ -1732,7 +1767,12 @@ int vhost_dev_start(struct vhost_dev *hdev, VirtIODevice *vdev)
             goto fail_vq;
         }
     }
-
+    if (vdev->use_config_notifier == VIRTIO_CONFIG_WORK) {
+        event_notifier_test_and_clear(&hdev->masked_config_notifier);
+        if (!vdev->use_guest_notifier_mask) {
+            vhost_config_mask(hdev, vdev,  false);
+        }
+    }
     if (hdev->log_enabled) {
         uint64_t log_base;
 
diff --git a/hw/virtio/virtio-crypto.c b/hw/virtio/virtio-crypto.c
index 54f9bbb789..ab7958465c 100644
--- a/hw/virtio/virtio-crypto.c
+++ b/hw/virtio/virtio-crypto.c
@@ -941,23 +941,28 @@ static void virtio_crypto_set_status(VirtIODevice *vdev, uint8_t status)
 }
 
 static void virtio_crypto_guest_notifier_mask(VirtIODevice *vdev, int idx,
-                                           bool mask)
+                                           bool mask, int type)
 {
     VirtIOCrypto *vcrypto = VIRTIO_CRYPTO(vdev);
     int queue = virtio_crypto_vq2q(idx);
 
     assert(vcrypto->vhost_started);
-
+    if (type != VIRTIO_VQ_VECTOR) {
+        return;
+    }
     cryptodev_vhost_virtqueue_mask(vdev, queue, idx, mask);
 }
 
-static bool virtio_crypto_guest_notifier_pending(VirtIODevice *vdev, int idx)
+static bool virtio_crypto_guest_notifier_pending(VirtIODevice *vdev, int idx,
+                                           int type)
 {
     VirtIOCrypto *vcrypto = VIRTIO_CRYPTO(vdev);
     int queue = virtio_crypto_vq2q(idx);
 
     assert(vcrypto->vhost_started);
-
+    if (type != VIRTIO_VQ_VECTOR) {
+        return false;
+    }
     return cryptodev_vhost_virtqueue_pending(vdev, queue, idx);
 }
 
diff --git a/hw/virtio/virtio.c b/hw/virtio/virtio.c
index ceb58fda6c..7d1a68c87a 100644
--- a/hw/virtio/virtio.c
+++ b/hw/virtio/virtio.c
@@ -3278,6 +3278,8 @@ void virtio_init(VirtIODevice *vdev, const char *name,
             virtio_vmstate_change, vdev);
     vdev->device_endian = virtio_default_endian();
     vdev->use_guest_notifier_mask = true;
+    vdev->use_config_notifier = VIRTIO_CONFIG_STATUS_UNKNOWN;
+
 }
 
 /*
@@ -3502,6 +3504,16 @@ static void virtio_queue_guest_notifier_read(EventNotifier *n)
     }
 }
 
+static void virtio_config_read(EventNotifier *n)
+{
+    VirtIODevice *vdev = container_of(n, VirtIODevice, config_notifier);
+    if (vdev->use_config_notifier != VIRTIO_CONFIG_WORK) {
+        return;
+    }
+    if (event_notifier_test_and_clear(n)) {
+        virtio_notify_config(vdev);
+    }
+}
 void virtio_queue_set_guest_notifier_fd_handler(VirtQueue *vq, bool assign,
                                                 bool with_irqfd)
 {
@@ -3518,6 +3530,17 @@ void virtio_queue_set_guest_notifier_fd_handler(VirtQueue *vq, bool assign,
     }
 }
 
+void virtio_set_config_notifier_fd_handler(VirtIODevice *vdev, bool assign,
+                                                bool with_irqfd)
+{
+    if (assign && !with_irqfd) {
+        event_notifier_set_handler(&vdev->config_notifier,
+                                   virtio_config_read);
+    } else {
+       event_notifier_set_handler(&vdev->config_notifier, NULL);
+    }
+}
+
 EventNotifier *virtio_queue_get_guest_notifier(VirtQueue *vq)
 {
     return &vq->guest_notifier;
@@ -3591,6 +3614,11 @@ EventNotifier *virtio_queue_get_host_notifier(VirtQueue *vq)
     return &vq->host_notifier;
 }
 
+EventNotifier *virtio_get_config_notifier(VirtIODevice *vdev)
+{
+    return &vdev->config_notifier;
+
+}
 void virtio_queue_set_host_notifier_enabled(VirtQueue *vq, bool enabled)
 {
     vq->host_notifier_enabled = enabled;
diff --git a/include/hw/virtio/vhost.h b/include/hw/virtio/vhost.h
index 4a8bc75415..75bbc1a4fa 100644
--- a/include/hw/virtio/vhost.h
+++ b/include/hw/virtio/vhost.h
@@ -91,6 +91,8 @@ struct vhost_dev {
     QLIST_HEAD(, vhost_iommu) iommu_list;
     IOMMUNotifier n;
     const VhostDevConfigOps *config_ops;
+   EventNotifier masked_config_notifier;
+
 };
 
 struct vhost_net {
@@ -108,6 +110,8 @@ int vhost_dev_start(struct vhost_dev *hdev, VirtIODevice *vdev);
 void vhost_dev_stop(struct vhost_dev *hdev, VirtIODevice *vdev);
 int vhost_dev_enable_notifiers(struct vhost_dev *hdev, VirtIODevice *vdev);
 void vhost_dev_disable_notifiers(struct vhost_dev *hdev, VirtIODevice *vdev);
+bool vhost_config_pending(struct vhost_dev *hdev, int n);
+void vhost_config_mask(struct vhost_dev *hdev, VirtIODevice *vdev,  bool mask);
 
 /* Test and clear masked event pending status.
  * Should be called after unmask to avoid losing events.
diff --git a/include/hw/virtio/virtio.h b/include/hw/virtio/virtio.h
index b7ece7a6a8..24e5bfae61 100644
--- a/include/hw/virtio/virtio.h
+++ b/include/hw/virtio/virtio.h
@@ -67,6 +67,19 @@ typedef struct VirtQueueElement
 
 #define VIRTIO_NO_VECTOR 0xffff
 
+enum virtio_vector_type {
+    VIRTIO_VQ_VECTOR,
+    VIRTIO_CONFIG_VECTOR,
+    VIRTIO_VECTOR_UNKNOWN,
+};
+
+enum virtio_config_status {
+    VIRTIO_CONFIG_SUPPORT,
+    VIRTIO_CONFIG_WORK,
+    VIRTIO_CONFIG_STOP,
+    VIRTIO_CONFIG_STATUS_UNKNOWN,
+};
+
 #define TYPE_VIRTIO_DEVICE "virtio-device"
 OBJECT_DECLARE_TYPE(VirtIODevice, VirtioDeviceClass, VIRTIO_DEVICE)
 
@@ -108,6 +121,8 @@ struct VirtIODevice
     bool use_guest_notifier_mask;
     AddressSpace *dma_as;
     QLIST_HEAD(, VirtQueue) *vector_queues;
+    EventNotifier config_notifier;
+    enum virtio_config_status use_config_notifier;
 };
 
 struct VirtioDeviceClass {
@@ -138,13 +153,13 @@ struct VirtioDeviceClass {
      * If backend does not support masking,
      * must check in frontend instead.
      */
-    bool (*guest_notifier_pending)(VirtIODevice *vdev, int n);
+    bool (*guest_notifier_pending)(VirtIODevice *vdev, int n, int type);
     /* Mask/unmask events from this vq. Any events reported
      * while masked will become pending.
      * If backend does not support masking,
      * must mask in frontend instead.
      */
-    void (*guest_notifier_mask)(VirtIODevice *vdev, int n, bool mask);
+    void (*guest_notifier_mask)(VirtIODevice *vdev, int n, bool mask, int type);
     int (*start_ioeventfd)(VirtIODevice *vdev);
     void (*stop_ioeventfd)(VirtIODevice *vdev);
     /* Saving and loading of a device; trying to deprecate save/load
@@ -310,11 +325,15 @@ uint16_t virtio_get_queue_index(VirtQueue *vq);
 EventNotifier *virtio_queue_get_guest_notifier(VirtQueue *vq);
 void virtio_queue_set_guest_notifier_fd_handler(VirtQueue *vq, bool assign,
                                                 bool with_irqfd);
+void virtio_set_config_notifier_fd_handler(VirtIODevice *vdev, bool assign,
+                                                bool with_irqfd);
+
 int virtio_device_start_ioeventfd(VirtIODevice *vdev);
 int virtio_device_grab_ioeventfd(VirtIODevice *vdev);
 void virtio_device_release_ioeventfd(VirtIODevice *vdev);
 bool virtio_device_ioeventfd_enabled(VirtIODevice *vdev);
 EventNotifier *virtio_queue_get_host_notifier(VirtQueue *vq);
+EventNotifier *virtio_get_config_notifier(VirtIODevice *vdev);
 void virtio_queue_set_host_notifier_enabled(VirtQueue *vq, bool enabled);
 void virtio_queue_host_notifier_read(EventNotifier *n);
 void virtio_queue_aio_set_host_notifier_handler(VirtQueue *vq, AioContext *ctx,
diff --git a/include/net/vhost_net.h b/include/net/vhost_net.h
index 172b0051d8..0d38c97c94 100644
--- a/include/net/vhost_net.h
+++ b/include/net/vhost_net.h
@@ -36,6 +36,9 @@ int vhost_net_set_config(struct vhost_net *net, const uint8_t *data,
 bool vhost_net_virtqueue_pending(VHostNetState *net, int n);
 void vhost_net_virtqueue_mask(VHostNetState *net, VirtIODevice *dev,
                               int idx, bool mask);
+bool vhost_net_config_pending(VHostNetState *net, int n);
+void vhost_net_config_mask(VHostNetState *net, VirtIODevice *dev,
+                              bool mask);
 int vhost_net_notify_migration_done(VHostNetState *net, char* mac_addr);
 VHostNetState *get_vhost_net(NetClientState *nc);
 
-- 
2.21.3



^ permalink raw reply related	[flat|nested] 13+ messages in thread

* [PATCH v4 2/4] vhost-vdpa: add callback function for configure interrupt
  2021-03-23  1:56 [PATCH v4 0/4] vhost-vdpa: add support for configure interrupt Cindy Lu
  2021-03-23  1:56 ` [PATCH v4 1/4] virtio:add support in " Cindy Lu
@ 2021-03-23  1:56 ` Cindy Lu
  2021-03-24  6:33   ` Jason Wang
  2021-03-23  1:56 ` [PATCH v4 3/4] virtio-mmio: add support " Cindy Lu
  2021-03-23  1:56 ` [PATCH v4 4/4] virtio-pci: " Cindy Lu
  3 siblings, 1 reply; 13+ messages in thread
From: Cindy Lu @ 2021-03-23  1:56 UTC (permalink / raw)
  To: lulu, mst, jasowang, qemu-devel

Add call back function for configure interrupt.
Set the notifier's fd to the kernel driver when vdpa start.
also set -1 while vdpa stop. then the kernel will release
the related resource

Signed-off-by: Cindy Lu <lulu@redhat.com>
---
 hw/virtio/trace-events            |  2 ++
 hw/virtio/vhost-vdpa.c            | 40 +++++++++++++++++++++++++++++--
 include/hw/virtio/vhost-backend.h |  4 ++++
 3 files changed, 44 insertions(+), 2 deletions(-)

diff --git a/hw/virtio/trace-events b/hw/virtio/trace-events
index 2060a144a2..6710835b46 100644
--- a/hw/virtio/trace-events
+++ b/hw/virtio/trace-events
@@ -52,6 +52,8 @@ vhost_vdpa_set_vring_call(void *dev, unsigned int index, int fd) "dev: %p index:
 vhost_vdpa_get_features(void *dev, uint64_t features) "dev: %p features: 0x%"PRIx64
 vhost_vdpa_set_owner(void *dev) "dev: %p"
 vhost_vdpa_vq_get_addr(void *dev, void *vq, uint64_t desc_user_addr, uint64_t avail_user_addr, uint64_t used_user_addr) "dev: %p vq: %p desc_user_addr: 0x%"PRIx64" avail_user_addr: 0x%"PRIx64" used_user_addr: 0x%"PRIx64
+vhost_vdpa_set_config_call(void *dev, int *fd)"dev: %p fd: %p"
+
 
 # virtio.c
 virtqueue_alloc_element(void *elem, size_t sz, unsigned in_num, unsigned out_num) "elem %p size %zd in_num %u out_num %u"
diff --git a/hw/virtio/vhost-vdpa.c b/hw/virtio/vhost-vdpa.c
index 01d2101d09..bde32eefe7 100644
--- a/hw/virtio/vhost-vdpa.c
+++ b/hw/virtio/vhost-vdpa.c
@@ -467,20 +467,47 @@ static int vhost_vdpa_get_config(struct vhost_dev *dev, uint8_t *config,
     }
     return ret;
  }
-
+static void vhost_vdpa_config_notify_start(struct vhost_dev *dev,
+                                struct VirtIODevice *vdev, bool start)
+{
+    int fd = 0;
+    int r = 0;
+    if (!(dev->features & (0x1ULL << VIRTIO_NET_F_STATUS))) {
+        return;
+    }
+    if (start) {
+        fd = event_notifier_get_fd(&vdev->config_notifier);
+        r = dev->vhost_ops->vhost_set_config_call(dev, &fd);
+     /*set the fd call back to vdpa driver*/
+        if (!r) {
+            vdev->use_config_notifier = VIRTIO_CONFIG_WORK;
+            event_notifier_set(&vdev->config_notifier);
+            info_report("vhost_vdpa_config_notify start!");
+      }
+    } else {
+        fd = -1;
+        vdev->use_config_notifier = VIRTIO_CONFIG_STOP;
+        r = dev->vhost_ops->vhost_set_config_call(dev, &fd);
+    }
+    return;
+}
 static int vhost_vdpa_dev_start(struct vhost_dev *dev, bool started)
 {
     struct vhost_vdpa *v = dev->opaque;
     trace_vhost_vdpa_dev_start(dev, started);
+    VirtIODevice *vdev = dev->vdev;
+
     if (started) {
         uint8_t status = 0;
         memory_listener_register(&v->listener, &address_space_memory);
         vhost_vdpa_set_vring_ready(dev);
         vhost_vdpa_add_status(dev, VIRTIO_CONFIG_S_DRIVER_OK);
         vhost_vdpa_call(dev, VHOST_VDPA_GET_STATUS, &status);
-
+        /*set the configure interrupt call back*/
+        vhost_vdpa_config_notify_start(dev, vdev, true);
         return !(status & VIRTIO_CONFIG_S_DRIVER_OK);
     } else {
+        vhost_vdpa_config_notify_start(dev, vdev, false);
         vhost_vdpa_reset_device(dev);
         vhost_vdpa_add_status(dev, VIRTIO_CONFIG_S_ACKNOWLEDGE |
                                    VIRTIO_CONFIG_S_DRIVER);
@@ -546,6 +573,14 @@ static int vhost_vdpa_set_vring_call(struct vhost_dev *dev,
     return vhost_vdpa_call(dev, VHOST_SET_VRING_CALL, file);
 }
 
+static int vhost_vdpa_set_config_call(struct vhost_dev *dev,
+                                       int *fd)
+{
+    trace_vhost_vdpa_set_config_call(dev, fd);
+
+    return vhost_vdpa_call(dev, VHOST_VDPA_SET_CONFIG_CALL, fd);
+}
+
 static int vhost_vdpa_get_features(struct vhost_dev *dev,
                                      uint64_t *features)
 {
@@ -611,4 +646,5 @@ const VhostOps vdpa_ops = {
         .vhost_get_device_id = vhost_vdpa_get_device_id,
         .vhost_vq_get_addr = vhost_vdpa_vq_get_addr,
         .vhost_force_iommu = vhost_vdpa_force_iommu,
+        .vhost_set_config_call = vhost_vdpa_set_config_call,
 };
diff --git a/include/hw/virtio/vhost-backend.h b/include/hw/virtio/vhost-backend.h
index 8a6f8e2a7a..1a2fee8994 100644
--- a/include/hw/virtio/vhost-backend.h
+++ b/include/hw/virtio/vhost-backend.h
@@ -125,6 +125,9 @@ typedef int (*vhost_get_device_id_op)(struct vhost_dev *dev, uint32_t *dev_id);
 
 typedef bool (*vhost_force_iommu_op)(struct vhost_dev *dev);
 
+typedef int (*vhost_set_config_call_op)(struct vhost_dev *dev,
+                                       int *fd);
+
 typedef struct VhostOps {
     VhostBackendType backend_type;
     vhost_backend_init vhost_backend_init;
@@ -170,6 +173,7 @@ typedef struct VhostOps {
     vhost_vq_get_addr_op  vhost_vq_get_addr;
     vhost_get_device_id_op vhost_get_device_id;
     vhost_force_iommu_op vhost_force_iommu;
+    vhost_set_config_call_op vhost_set_config_call;
 } VhostOps;
 
 extern const VhostOps user_ops;
-- 
2.21.3



^ permalink raw reply related	[flat|nested] 13+ messages in thread

* [PATCH v4 3/4] virtio-mmio: add support for configure interrupt
  2021-03-23  1:56 [PATCH v4 0/4] vhost-vdpa: add support for configure interrupt Cindy Lu
  2021-03-23  1:56 ` [PATCH v4 1/4] virtio:add support in " Cindy Lu
  2021-03-23  1:56 ` [PATCH v4 2/4] vhost-vdpa: add callback function for " Cindy Lu
@ 2021-03-23  1:56 ` Cindy Lu
  2021-03-23  1:56 ` [PATCH v4 4/4] virtio-pci: " Cindy Lu
  3 siblings, 0 replies; 13+ messages in thread
From: Cindy Lu @ 2021-03-23  1:56 UTC (permalink / raw)
  To: lulu, mst, jasowang, qemu-devel

add configure interrupt support for virtio-mmio bus. This
interrupt will working while backend is vhost-vdpa

Signed-off-by: Cindy Lu <lulu@redhat.com>
---
 hw/virtio/virtio-mmio.c | 30 ++++++++++++++++++++++++++++--
 1 file changed, 28 insertions(+), 2 deletions(-)

diff --git a/hw/virtio/virtio-mmio.c b/hw/virtio/virtio-mmio.c
index e1b5c3b81e..beabd129ef 100644
--- a/hw/virtio/virtio-mmio.c
+++ b/hw/virtio/virtio-mmio.c
@@ -627,12 +627,30 @@ static int virtio_mmio_set_guest_notifier(DeviceState *d, int n, bool assign,
     }
 
     if (vdc->guest_notifier_mask && vdev->use_guest_notifier_mask) {
-        vdc->guest_notifier_mask(vdev, n, !assign);
+        vdc->guest_notifier_mask(vdev, n, !assign, VIRTIO_VQ_VECTOR);
     }
-
     return 0;
 }
+static int virtio_mmio_set_config_notifier(DeviceState *d,  bool assign)
+{
+    VirtIOMMIOProxy *proxy = VIRTIO_MMIO(d);
+    VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
+        VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(vdev);
 
+    EventNotifier *notifier = virtio_get_config_notifier(vdev);
+    int r = 0;
+    if (assign) {
+        r = event_notifier_init(notifier, 0);
+        virtio_set_config_notifier_fd_handler(vdev, true, false);
+    } else {
+        virtio_set_config_notifier_fd_handler(vdev, false, false);
+        event_notifier_cleanup(notifier);
+    }
+        if (vdc->guest_notifier_mask && vdev->use_guest_notifier_mask) {
+            vdc->guest_notifier_mask(vdev, 0, !assign, VIRTIO_CONFIG_VECTOR);
+    }
+    return r;
+}
 static int virtio_mmio_set_guest_notifiers(DeviceState *d, int nvqs,
                                            bool assign)
 {
@@ -654,8 +672,15 @@ static int virtio_mmio_set_guest_notifiers(DeviceState *d, int nvqs,
             goto assign_error;
         }
     }
+   r = virtio_mmio_set_config_notifier(d, assign);
+   if (r < 0) {
+            goto config_assign_error;
+   }
 
     return 0;
+config_assign_error:
+    assert(assign);
+    r = virtio_mmio_set_config_notifier(d, false);
 
 assign_error:
     /* We get here on assignment failure. Recover by undoing for VQs 0 .. n. */
@@ -666,6 +691,7 @@ assign_error:
     return r;
 }
 
+
 static void virtio_mmio_pre_plugged(DeviceState *d, Error **errp)
 {
     VirtIOMMIOProxy *proxy = VIRTIO_MMIO(d);
-- 
2.21.3



^ permalink raw reply related	[flat|nested] 13+ messages in thread

* [PATCH v4 4/4] virtio-pci: add support for configure interrupt
  2021-03-23  1:56 [PATCH v4 0/4] vhost-vdpa: add support for configure interrupt Cindy Lu
                   ` (2 preceding siblings ...)
  2021-03-23  1:56 ` [PATCH v4 3/4] virtio-mmio: add support " Cindy Lu
@ 2021-03-23  1:56 ` Cindy Lu
  2021-03-24  6:34   ` Jason Wang
  3 siblings, 1 reply; 13+ messages in thread
From: Cindy Lu @ 2021-03-23  1:56 UTC (permalink / raw)
  To: lulu, mst, jasowang, qemu-devel

Add support for configure interrupt, use kvm_irqfd_assign and set the
gsi to kernel. When the configure notifier was eventfd_signal by host
kernel, this will finally inject an msix interrupt to guest

Signed-off-by: Cindy Lu <lulu@redhat.com>
---
 hw/virtio/virtio-pci.c | 171 +++++++++++++++++++++++++++++++++--------
 1 file changed, 137 insertions(+), 34 deletions(-)

diff --git a/hw/virtio/virtio-pci.c b/hw/virtio/virtio-pci.c
index 36524a5728..b0c190caba 100644
--- a/hw/virtio/virtio-pci.c
+++ b/hw/virtio/virtio-pci.c
@@ -664,7 +664,6 @@ static uint32_t virtio_read_config(PCIDevice *pci_dev,
 }
 
 static int kvm_virtio_pci_vq_vector_use(VirtIOPCIProxy *proxy,
-                                        unsigned int queue_no,
                                         unsigned int vector)
 {
     VirtIOIRQFD *irqfd = &proxy->vector_irqfd[vector];
@@ -691,23 +690,17 @@ static void kvm_virtio_pci_vq_vector_release(VirtIOPCIProxy *proxy,
 }
 
 static int kvm_virtio_pci_irqfd_use(VirtIOPCIProxy *proxy,
-                                 unsigned int queue_no,
+                                 EventNotifier *n,
                                  unsigned int vector)
 {
     VirtIOIRQFD *irqfd = &proxy->vector_irqfd[vector];
-    VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
-    VirtQueue *vq = virtio_get_queue(vdev, queue_no);
-    EventNotifier *n = virtio_queue_get_guest_notifier(vq);
     return kvm_irqchip_add_irqfd_notifier_gsi(kvm_state, n, NULL, irqfd->virq);
 }
 
 static void kvm_virtio_pci_irqfd_release(VirtIOPCIProxy *proxy,
-                                      unsigned int queue_no,
+                                      EventNotifier *n ,
                                       unsigned int vector)
 {
-    VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
-    VirtQueue *vq = virtio_get_queue(vdev, queue_no);
-    EventNotifier *n = virtio_queue_get_guest_notifier(vq);
     VirtIOIRQFD *irqfd = &proxy->vector_irqfd[vector];
     int ret;
 
@@ -722,7 +715,8 @@ static int kvm_virtio_pci_vector_use(VirtIOPCIProxy *proxy, int nvqs)
     VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
     unsigned int vector;
     int ret, queue_no;
-
+    VirtQueue *vq;
+    EventNotifier *n;
     for (queue_no = 0; queue_no < nvqs; queue_no++) {
         if (!virtio_queue_get_num(vdev, queue_no)) {
             break;
@@ -731,7 +725,7 @@ static int kvm_virtio_pci_vector_use(VirtIOPCIProxy *proxy, int nvqs)
         if (vector >= msix_nr_vectors_allocated(dev)) {
             continue;
         }
-        ret = kvm_virtio_pci_vq_vector_use(proxy, queue_no, vector);
+        ret = kvm_virtio_pci_vq_vector_use(proxy,  vector);
         if (ret < 0) {
             goto undo;
         }
@@ -739,7 +733,9 @@ static int kvm_virtio_pci_vector_use(VirtIOPCIProxy *proxy, int nvqs)
          * Otherwise, delay until unmasked in the frontend.
          */
         if (vdev->use_guest_notifier_mask && k->guest_notifier_mask) {
-            ret = kvm_virtio_pci_irqfd_use(proxy, queue_no, vector);
+            vq = virtio_get_queue(vdev, queue_no);
+            n = virtio_queue_get_guest_notifier(vq);
+            ret = kvm_virtio_pci_irqfd_use(proxy, n, vector);
             if (ret < 0) {
                 kvm_virtio_pci_vq_vector_release(proxy, vector);
                 goto undo;
@@ -755,13 +751,69 @@ undo:
             continue;
         }
         if (vdev->use_guest_notifier_mask && k->guest_notifier_mask) {
-            kvm_virtio_pci_irqfd_release(proxy, queue_no, vector);
+            vq = virtio_get_queue(vdev, queue_no);
+            n = virtio_queue_get_guest_notifier(vq);
+            kvm_virtio_pci_irqfd_release(proxy, n, vector);
         }
         kvm_virtio_pci_vq_vector_release(proxy, vector);
     }
     return ret;
 }
 
+static int kvm_virtio_pci_vector_config_use(VirtIOPCIProxy *proxy)
+{
+
+    VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
+    unsigned int vector;
+    int ret;
+    EventNotifier *n = virtio_get_config_notifier(vdev);
+
+    vector = vdev->config_vector ;
+    ret = kvm_virtio_pci_vq_vector_use(proxy, vector);
+    if (ret < 0) {
+        goto undo;
+    }
+    ret = kvm_virtio_pci_irqfd_use(proxy,  n, vector);
+    if (ret < 0) {
+        goto undo;
+    }
+    return 0;
+undo:
+    kvm_virtio_pci_irqfd_release(proxy, n, vector);
+    return ret;
+}
+static void kvm_virtio_pci_vector_config_release(VirtIOPCIProxy *proxy)
+{
+    PCIDevice *dev = &proxy->pci_dev;
+    VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
+    unsigned int vector;
+    EventNotifier *n = virtio_get_config_notifier(vdev);
+    vector = vdev->config_vector ;
+    if (vector >= msix_nr_vectors_allocated(dev)) {
+        return;
+    }
+    kvm_virtio_pci_irqfd_release(proxy, n, vector);
+    kvm_virtio_pci_vq_vector_release(proxy, vector);
+}
+
+static int virtio_pci_set_config_notifier(DeviceState *d,  bool assign)
+{
+    VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);
+    VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
+    EventNotifier *notifier = virtio_get_config_notifier(vdev);
+    int r = 0;
+    if (assign) {
+        r = event_notifier_init(notifier, 0);
+        virtio_set_config_notifier_fd_handler(vdev, true, true);
+        kvm_virtio_pci_vector_config_use(proxy);
+    } else {
+        virtio_set_config_notifier_fd_handler(vdev, false, true);
+        kvm_virtio_pci_vector_config_release(proxy);
+        event_notifier_cleanup(notifier);
+    }
+    return r;
+}
+
 static void kvm_virtio_pci_vector_release(VirtIOPCIProxy *proxy, int nvqs)
 {
     PCIDevice *dev = &proxy->pci_dev;
@@ -769,7 +821,8 @@ static void kvm_virtio_pci_vector_release(VirtIOPCIProxy *proxy, int nvqs)
     unsigned int vector;
     int queue_no;
     VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
-
+    VirtQueue *vq;
+    EventNotifier *n;
     for (queue_no = 0; queue_no < nvqs; queue_no++) {
         if (!virtio_queue_get_num(vdev, queue_no)) {
             break;
@@ -782,7 +835,9 @@ static void kvm_virtio_pci_vector_release(VirtIOPCIProxy *proxy, int nvqs)
          * Otherwise, it was cleaned when masked in the frontend.
          */
         if (vdev->use_guest_notifier_mask && k->guest_notifier_mask) {
-            kvm_virtio_pci_irqfd_release(proxy, queue_no, vector);
+            vq = virtio_get_queue(vdev, queue_no);
+            n = virtio_queue_get_guest_notifier(vq);
+            kvm_virtio_pci_irqfd_release(proxy, n, vector);
         }
         kvm_virtio_pci_vq_vector_release(proxy, vector);
     }
@@ -791,15 +846,14 @@ static void kvm_virtio_pci_vector_release(VirtIOPCIProxy *proxy, int nvqs)
 static int virtio_pci_vq_vector_unmask(VirtIOPCIProxy *proxy,
                                        unsigned int queue_no,
                                        unsigned int vector,
-                                       MSIMessage msg)
+                                       MSIMessage msg,
+                                       int type,
+                                        EventNotifier *n)
 {
     VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
     VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
-    VirtQueue *vq = virtio_get_queue(vdev, queue_no);
-    EventNotifier *n = virtio_queue_get_guest_notifier(vq);
     VirtIOIRQFD *irqfd;
     int ret = 0;
-
     if (proxy->vector_irqfd) {
         irqfd = &proxy->vector_irqfd[vector];
         if (irqfd->msg.data != msg.data || irqfd->msg.address != msg.address) {
@@ -816,32 +870,33 @@ static int virtio_pci_vq_vector_unmask(VirtIOPCIProxy *proxy,
      * Otherwise, set it up now.
      */
     if (vdev->use_guest_notifier_mask && k->guest_notifier_mask) {
-        k->guest_notifier_mask(vdev, queue_no, false);
+        k->guest_notifier_mask(vdev, queue_no, false, type);
         /* Test after unmasking to avoid losing events. */
         if (k->guest_notifier_pending &&
-            k->guest_notifier_pending(vdev, queue_no)) {
+            k->guest_notifier_pending(vdev, queue_no, type)) {
             event_notifier_set(n);
         }
     } else {
-        ret = kvm_virtio_pci_irqfd_use(proxy, queue_no, vector);
+        ret = kvm_virtio_pci_irqfd_use(proxy, n, vector);
     }
     return ret;
 }
 
 static void virtio_pci_vq_vector_mask(VirtIOPCIProxy *proxy,
                                              unsigned int queue_no,
-                                             unsigned int vector)
+                                             unsigned int vector,
+                                             int type,
+                                             EventNotifier *n)
 {
     VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
     VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
-
     /* If guest supports masking, keep irqfd but mask it.
      * Otherwise, clean it up now.
      */ 
     if (vdev->use_guest_notifier_mask && k->guest_notifier_mask) {
-        k->guest_notifier_mask(vdev, queue_no, true);
+        k->guest_notifier_mask(vdev, queue_no, true, type);
     } else {
-        kvm_virtio_pci_irqfd_release(proxy, queue_no, vector);
+        kvm_virtio_pci_irqfd_release(proxy, n, vector);
     }
 }
 
@@ -851,15 +906,26 @@ static int virtio_pci_vector_unmask(PCIDevice *dev, unsigned vector,
     VirtIOPCIProxy *proxy = container_of(dev, VirtIOPCIProxy, pci_dev);
     VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
     VirtQueue *vq = virtio_vector_first_queue(vdev, vector);
+    EventNotifier *n;
     int ret, index, unmasked = 0;
 
+   if (vdev->use_config_notifier == VIRTIO_CONFIG_WORK) {
+        n = virtio_get_config_notifier(vdev);
+        ret = virtio_pci_vq_vector_unmask(proxy, 0, vector, msg,
+                    VIRTIO_CONFIG_VECTOR, n);
+        if (ret < 0) {
+            goto config_undo;
+       }
+    }
     while (vq) {
         index = virtio_get_queue_index(vq);
         if (!virtio_queue_get_num(vdev, index)) {
             break;
         }
         if (index < proxy->nvqs_with_notifiers) {
-            ret = virtio_pci_vq_vector_unmask(proxy, index, vector, msg);
+            n = virtio_queue_get_guest_notifier(vq);
+            ret = virtio_pci_vq_vector_unmask(proxy, index, vector, msg,
+                        VIRTIO_VQ_VECTOR, n);
             if (ret < 0) {
                 goto undo;
             }
@@ -875,11 +941,17 @@ undo:
     while (vq && unmasked >= 0) {
         index = virtio_get_queue_index(vq);
         if (index < proxy->nvqs_with_notifiers) {
-            virtio_pci_vq_vector_mask(proxy, index, vector);
+            n = virtio_queue_get_guest_notifier(vq);
+            virtio_pci_vq_vector_mask(proxy, index, vector,
+                 VIRTIO_VQ_VECTOR, n);
             --unmasked;
         }
         vq = virtio_vector_next_queue(vq);
     }
+ config_undo:
+            n = virtio_get_config_notifier(vdev);
+            virtio_pci_vq_vector_mask(proxy, 0, vector,
+                VIRTIO_CONFIG_VECTOR, n);
     return ret;
 }
 
@@ -888,18 +960,26 @@ static void virtio_pci_vector_mask(PCIDevice *dev, unsigned vector)
     VirtIOPCIProxy *proxy = container_of(dev, VirtIOPCIProxy, pci_dev);
     VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
     VirtQueue *vq = virtio_vector_first_queue(vdev, vector);
+    EventNotifier *n;
     int index;
 
+   if (vdev->use_config_notifier == VIRTIO_CONFIG_WORK) {
+        n = virtio_get_config_notifier(vdev);
+        virtio_pci_vq_vector_mask(proxy, 0, vector, VIRTIO_CONFIG_VECTOR, n);
+   }
     while (vq) {
         index = virtio_get_queue_index(vq);
+         n = virtio_queue_get_guest_notifier(vq);
         if (!virtio_queue_get_num(vdev, index)) {
             break;
         }
         if (index < proxy->nvqs_with_notifiers) {
-            virtio_pci_vq_vector_mask(proxy, index, vector);
+            virtio_pci_vq_vector_mask(proxy, index, vector,
+                VIRTIO_VQ_VECTOR, n);
         }
         vq = virtio_vector_next_queue(vq);
     }
+
 }
 
 static void virtio_pci_vector_poll(PCIDevice *dev,
@@ -918,6 +998,7 @@ static void virtio_pci_vector_poll(PCIDevice *dev,
         if (!virtio_queue_get_num(vdev, queue_no)) {
             break;
         }
+
         vector = virtio_queue_vector(vdev, queue_no);
         if (vector < vector_start || vector >= vector_end ||
             !msix_is_masked(dev, vector)) {
@@ -926,7 +1007,22 @@ static void virtio_pci_vector_poll(PCIDevice *dev,
         vq = virtio_get_queue(vdev, queue_no);
         notifier = virtio_queue_get_guest_notifier(vq);
         if (k->guest_notifier_pending) {
-            if (k->guest_notifier_pending(vdev, queue_no)) {
+            if (k->guest_notifier_pending(vdev, queue_no, VIRTIO_VQ_VECTOR)) {
+                msix_set_pending(dev, vector);
+            }
+        } else if (event_notifier_test_and_clear(notifier)) {
+            msix_set_pending(dev, vector);
+        }
+    }
+   if (vdev->use_config_notifier == VIRTIO_CONFIG_WORK) {
+        vector = vdev->config_vector;
+        notifier = virtio_get_config_notifier(vdev);
+        if (vector < vector_start || vector >= vector_end ||
+            !msix_is_masked(dev, vector)) {
+            return;
+        }
+        if (k->guest_notifier_pending) {
+            if (k->guest_notifier_pending(vdev, 0,  VIRTIO_CONFIG_VECTOR)) {
                 msix_set_pending(dev, vector);
             }
         } else if (event_notifier_test_and_clear(notifier)) {
@@ -958,7 +1054,7 @@ static int virtio_pci_set_guest_notifier(DeviceState *d, int n, bool assign,
     if (!msix_enabled(&proxy->pci_dev) &&
         vdev->use_guest_notifier_mask &&
         vdc->guest_notifier_mask) {
-        vdc->guest_notifier_mask(vdev, n, !assign);
+        vdc->guest_notifier_mask(vdev, n, !assign, VIRTIO_VQ_VECTOR);
     }
 
     return 0;
@@ -1008,7 +1104,6 @@ static int virtio_pci_set_guest_notifiers(DeviceState *d, int nvqs, bool assign)
             goto assign_error;
         }
     }
-
     /* Must set vector notifier after guest notifier has been assigned */
     if ((with_irqfd || k->guest_notifier_mask) && assign) {
         if (with_irqfd) {
@@ -1020,6 +1115,12 @@ static int virtio_pci_set_guest_notifiers(DeviceState *d, int nvqs, bool assign)
                 goto assign_error;
             }
         }
+        if (vdev->use_config_notifier == VIRTIO_CONFIG_SUPPORT) {
+            r = virtio_pci_set_config_notifier(d, assign);
+            if (r < 0) {
+                goto config_error;
+         }
+     }
         r = msix_set_vector_notifiers(&proxy->pci_dev,
                                       virtio_pci_vector_unmask,
                                       virtio_pci_vector_mask,
@@ -1028,7 +1129,6 @@ static int virtio_pci_set_guest_notifiers(DeviceState *d, int nvqs, bool assign)
             goto notifiers_error;
         }
     }
-
     return 0;
 
 notifiers_error:
@@ -1036,13 +1136,16 @@ notifiers_error:
         assert(assign);
         kvm_virtio_pci_vector_release(proxy, nvqs);
     }
-
+ config_error:
+    /* We get here on assignment failure. Recover by undoing for VQs 0 .. n. */
+        kvm_virtio_pci_vector_config_release(proxy);
 assign_error:
     /* We get here on assignment failure. Recover by undoing for VQs 0 .. n. */
     assert(assign);
     while (--n >= 0) {
         virtio_pci_set_guest_notifier(d, n, !assign, with_irqfd);
     }
+
     return r;
 }
 
-- 
2.21.3



^ permalink raw reply related	[flat|nested] 13+ messages in thread

* Re: [PATCH v4 1/4] virtio:add support in configure interrupt
  2021-03-23  1:56 ` [PATCH v4 1/4] virtio:add support in " Cindy Lu
@ 2021-03-24  6:30   ` Jason Wang
  2021-03-25  7:15     ` Cindy Lu
  0 siblings, 1 reply; 13+ messages in thread
From: Jason Wang @ 2021-03-24  6:30 UTC (permalink / raw)
  To: Cindy Lu, mst, qemu-devel


在 2021/3/23 上午9:56, Cindy Lu 写道:
> Add configure notifier support in virtio and related driver
> When peer is vhost vdpa, setup the configure interrupt function
> vhost_net_start and release the resource when vhost_net_stop


So this patch doesn't complie, please fix.


>
> Signed-off-by: Cindy Lu <lulu@redhat.com>
> ---
>   hw/display/vhost-user-gpu.c    | 14 +++++++----
>   hw/net/vhost_net.c             | 16 +++++++++++--
>   hw/net/virtio-net.c            | 24 +++++++++++++++----
>   hw/s390x/virtio-ccw.c          |  6 ++---
>   hw/virtio/vhost-user-fs.c      | 12 ++++++----
>   hw/virtio/vhost-vsock-common.c | 12 ++++++----
>   hw/virtio/vhost.c              | 44 ++++++++++++++++++++++++++++++++--
>   hw/virtio/virtio-crypto.c      | 13 ++++++----
>   hw/virtio/virtio.c             | 28 ++++++++++++++++++++++
>   include/hw/virtio/vhost.h      |  4 ++++
>   include/hw/virtio/virtio.h     | 23 ++++++++++++++++--
>   include/net/vhost_net.h        |  3 +++
>   12 files changed, 169 insertions(+), 30 deletions(-)
>
> diff --git a/hw/display/vhost-user-gpu.c b/hw/display/vhost-user-gpu.c
> index 51f1747c4a..959ad115b6 100644
> --- a/hw/display/vhost-user-gpu.c
> +++ b/hw/display/vhost-user-gpu.c
> @@ -487,18 +487,24 @@ vhost_user_gpu_set_status(VirtIODevice *vdev, uint8_t val)
>   }
>   
>   static bool
> -vhost_user_gpu_guest_notifier_pending(VirtIODevice *vdev, int idx)
> +vhost_user_gpu_guest_notifier_pending(VirtIODevice *vdev, int idx,
> +                                            int type)
>   {
>       VhostUserGPU *g = VHOST_USER_GPU(vdev);
> -
> +    if (type != VIRTIO_VQ_VECTOR) {
> +        return false;
> +    }
>       return vhost_virtqueue_pending(&g->vhost->dev, idx);
>   }
>   
>   static void
> -vhost_user_gpu_guest_notifier_mask(VirtIODevice *vdev, int idx, bool mask)
> +vhost_user_gpu_guest_notifier_mask(VirtIODevice *vdev, int idx, bool mask,
> +                                        int type)
>   {
>       VhostUserGPU *g = VHOST_USER_GPU(vdev);
> -
> +    if (type != VIRTIO_VQ_VECTOR) {
> +        return;
> +    }
>       vhost_virtqueue_mask(&g->vhost->dev, vdev, idx, mask);
>   }
>   
> diff --git a/hw/net/vhost_net.c b/hw/net/vhost_net.c
> index 24d555e764..2ef8cc608e 100644
> --- a/hw/net/vhost_net.c
> +++ b/hw/net/vhost_net.c
> @@ -339,7 +339,9 @@ int vhost_net_start(VirtIODevice *dev, NetClientState *ncs,
>               dev->use_guest_notifier_mask = false;
>           }
>        }
> -
> +    if (ncs->peer && ncs->peer->info->type == NET_CLIENT_DRIVER_VHOST_VDPA) {
> +        dev->use_config_notifier = VIRTIO_CONFIG_SUPPORT;
> +    }
>       r = k->set_guest_notifiers(qbus->parent, total_queues * 2, true);
>       if (r < 0) {
>           error_report("Error binding guest notifier: %d", -r);
> @@ -391,7 +393,6 @@ void vhost_net_stop(VirtIODevice *dev, NetClientState *ncs,
>       for (i = 0; i < total_queues; i++) {
>           vhost_net_stop_one(get_vhost_net(ncs[i].peer), dev);
>       }
> -
>       r = k->set_guest_notifiers(qbus->parent, total_queues * 2, false);
>       if (r < 0) {
>           fprintf(stderr, "vhost guest notifier cleanup failed: %d\n", r);
> @@ -426,6 +427,17 @@ void vhost_net_virtqueue_mask(VHostNetState *net, VirtIODevice *dev,
>       vhost_virtqueue_mask(&net->dev, dev, idx, mask);
>   }
>   
> +bool vhost_net_config_pending(VHostNetState *net, int idx)
> +{
> +    return vhost_config_pending(&net->dev, idx);
> +}
> +
> +void vhost_net_config_mask(VHostNetState *net, VirtIODevice *dev,
> +                              bool mask)
> +{
> +    vhost_config_mask(&net->dev, dev,  mask);
> +}
> +
>   VHostNetState *get_vhost_net(NetClientState *nc)
>   {
>       VHostNetState *vhost_net = 0;
> diff --git a/hw/net/virtio-net.c b/hw/net/virtio-net.c
> index 9179013ac4..b84427fe99 100644
> --- a/hw/net/virtio-net.c
> +++ b/hw/net/virtio-net.c
> @@ -3055,22 +3055,36 @@ static NetClientInfo net_virtio_info = {
>       .announce = virtio_net_announce,
>   };
>   
> -static bool virtio_net_guest_notifier_pending(VirtIODevice *vdev, int idx)
> +
> +static bool virtio_net_guest_notifier_pending(VirtIODevice *vdev, int idx,
> +                                int type)
>   {
>       VirtIONet *n = VIRTIO_NET(vdev);
>       NetClientState *nc = qemu_get_subqueue(n->nic, vq2q(idx));
>       assert(n->vhost_started);
> -    return vhost_net_virtqueue_pending(get_vhost_net(nc->peer), idx);
> +
> +    if (type == VIRTIO_VQ_VECTOR) {
> +        return vhost_net_virtqueue_pending(get_vhost_net(nc->peer), idx);
> +    }
> +    if (type == VIRTIO_CONFIG_VECTOR) {
> +        return vhost_net_config_pending(get_vhost_net(nc->peer), idx);
> +    }
> +    return false;
>   }
>   
>   static void virtio_net_guest_notifier_mask(VirtIODevice *vdev, int idx,
> -                                           bool mask)
> +                                           bool mask, int type)
>   {
>       VirtIONet *n = VIRTIO_NET(vdev);
>       NetClientState *nc = qemu_get_subqueue(n->nic, vq2q(idx));
>       assert(n->vhost_started);
> -    vhost_net_virtqueue_mask(get_vhost_net(nc->peer),
> -                             vdev, idx, mask);
> +
> +    if (type == VIRTIO_VQ_VECTOR) {
> +        vhost_net_virtqueue_mask(get_vhost_net(nc->peer), vdev, idx, mask);
> +     }
> +    if (type == VIRTIO_CONFIG_VECTOR) {
> +        vhost_net_config_mask(get_vhost_net(nc->peer), vdev, mask);
> +    }
>   }
>   
>   static void virtio_net_set_config_size(VirtIONet *n, uint64_t host_features)
> diff --git a/hw/s390x/virtio-ccw.c b/hw/s390x/virtio-ccw.c
> index 4582e94ae7..234f749548 100644
> --- a/hw/s390x/virtio-ccw.c
> +++ b/hw/s390x/virtio-ccw.c
> @@ -1003,16 +1003,16 @@ static int virtio_ccw_set_guest_notifier(VirtioCcwDevice *dev, int n,
>            * need to manually trigger any guest masking callbacks here.
>            */
>           if (k->guest_notifier_mask && vdev->use_guest_notifier_mask) {
> -            k->guest_notifier_mask(vdev, n, false);
> +            k->guest_notifier_mask(vdev, n, false, VIRTIO_VQ_VECTOR);
>           }
>           /* get lost events and re-inject */
>           if (k->guest_notifier_pending &&
> -            k->guest_notifier_pending(vdev, n)) {
> +            k->guest_notifier_pending(vdev, n, VIRTIO_VQ_VECTOR)) {
>               event_notifier_set(notifier);
>           }
>       } else {
>           if (k->guest_notifier_mask && vdev->use_guest_notifier_mask) {
> -            k->guest_notifier_mask(vdev, n, true);
> +            k->guest_notifier_mask(vdev, n, true, VIRTIO_VQ_VECTOR);
>           }
>           if (with_irqfd) {
>               virtio_ccw_remove_irqfd(dev, n);
> diff --git a/hw/virtio/vhost-user-fs.c b/hw/virtio/vhost-user-fs.c
> index 1bc5d03a00..22358767f1 100644
> --- a/hw/virtio/vhost-user-fs.c
> +++ b/hw/virtio/vhost-user-fs.c
> @@ -143,17 +143,21 @@ static void vuf_handle_output(VirtIODevice *vdev, VirtQueue *vq)
>   }
>   
>   static void vuf_guest_notifier_mask(VirtIODevice *vdev, int idx,
> -                                            bool mask)
> +                                            bool mask, int type)
>   {
>       VHostUserFS *fs = VHOST_USER_FS(vdev);
> -
> +    if (type != VIRTIO_VQ_VECTOR) {
> +        return;
> +    }
>       vhost_virtqueue_mask(&fs->vhost_dev, vdev, idx, mask);
>   }
>   
> -static bool vuf_guest_notifier_pending(VirtIODevice *vdev, int idx)
> +static bool vuf_guest_notifier_pending(VirtIODevice *vdev, int idx, int type)
>   {
>       VHostUserFS *fs = VHOST_USER_FS(vdev);
> -
> +    if (type != VIRTIO_VQ_VECTOR) {
> +        return false;
> +     }
>       return vhost_virtqueue_pending(&fs->vhost_dev, idx);
>   }
>   
> diff --git a/hw/virtio/vhost-vsock-common.c b/hw/virtio/vhost-vsock-common.c
> index 5b2ebf3496..92c133c54c 100644
> --- a/hw/virtio/vhost-vsock-common.c
> +++ b/hw/virtio/vhost-vsock-common.c
> @@ -97,18 +97,22 @@ static void vhost_vsock_common_handle_output(VirtIODevice *vdev, VirtQueue *vq)
>   }
>   
>   static void vhost_vsock_common_guest_notifier_mask(VirtIODevice *vdev, int idx,
> -                                            bool mask)
> +                                            bool mask, int type)
>   {
>       VHostVSockCommon *vvc = VHOST_VSOCK_COMMON(vdev);
> -
> +    if (type != VIRTIO_VQ_VECTOR) {
> +        return;
> +    }
>       vhost_virtqueue_mask(&vvc->vhost_dev, vdev, idx, mask);
>   }
>   
>   static bool vhost_vsock_common_guest_notifier_pending(VirtIODevice *vdev,
> -                                               int idx)
> +                                               int idx, int type)
>   {
>       VHostVSockCommon *vvc = VHOST_VSOCK_COMMON(vdev);
> -
> +    if (type != VIRTIO_VQ_VECTOR) {
> +        return false;
> +    }
>       return vhost_virtqueue_pending(&vvc->vhost_dev, idx);
>   }
>   
> diff --git a/hw/virtio/vhost.c b/hw/virtio/vhost.c
> index 614ccc2bcb..02e4d37dc0 100644
> --- a/hw/virtio/vhost.c
> +++ b/hw/virtio/vhost.c
> @@ -1255,8 +1255,8 @@ static int vhost_virtqueue_init(struct vhost_dev *dev,
>       if (r < 0) {
>           return r;
>       }
> -
>       file.fd = event_notifier_get_fd(&vq->masked_notifier);
> +
>       r = dev->vhost_ops->vhost_set_vring_call(dev, &file);
>       if (r) {
>           VHOST_OPS_DEBUG("vhost_set_vring_call failed");
> @@ -1313,6 +1313,10 @@ int vhost_dev_init(struct vhost_dev *hdev, void *opaque,
>               goto fail;
>           }
>       }
> +    r = event_notifier_init(&hdev->masked_config_notifier, 0);
> +    if (r < 0) {
> +        return r;
> +    }
>   
>       if (busyloop_timeout) {
>           for (i = 0; i < hdev->nvqs; ++i) {
> @@ -1405,6 +1409,8 @@ void vhost_dev_cleanup(struct vhost_dev *hdev)
>       for (i = 0; i < hdev->nvqs; ++i) {
>           vhost_virtqueue_cleanup(hdev->vqs + i);
>       }
> +    event_notifier_cleanup(&hdev->masked_config_notifier);
> +
>       if (hdev->mem) {
>           /* those are only safe after successful init */
>           memory_listener_unregister(&hdev->memory_listener);
> @@ -1498,6 +1504,10 @@ bool vhost_virtqueue_pending(struct vhost_dev *hdev, int n)
>       return event_notifier_test_and_clear(&vq->masked_notifier);
>   }
>   
> +bool vhost_config_pending(struct vhost_dev *hdev, int n)
> +{
> +    return event_notifier_test_and_clear(&hdev->masked_config_notifier);
> +}
>   /* Mask/unmask events from this vq. */
>   void vhost_virtqueue_mask(struct vhost_dev *hdev, VirtIODevice *vdev, int n,
>                            bool mask)
> @@ -1523,6 +1533,31 @@ void vhost_virtqueue_mask(struct vhost_dev *hdev, VirtIODevice *vdev, int n,
>       }
>   }
>   
> +/* Mask/unmask events from this config. */
> +void vhost_config_mask(struct vhost_dev *hdev, VirtIODevice *vdev,
> +                         bool mask)
> +{
> +    int fd;
> +    int r;
> +   EventNotifier *masked_config_notifier = &hdev->masked_config_notifier;
> +   EventNotifier *config_notifier = &vdev->config_notifier;
> +   if (vdev->use_config_notifier != VIRTIO_CONFIG_WORK) {
> +        return;
> +    }
> +    /* should only be called after backend is connected */
> +    assert(hdev->vhost_ops);
> +    if (mask) {
> +        assert(vdev->use_guest_notifier_mask);
> +        fd = event_notifier_get_fd(masked_config_notifier);
> +    } else {
> +        fd = event_notifier_get_fd(config_notifier);
> +    }
> +   r = hdev->vhost_ops->vhost_set_config_call(hdev, &fd);
> +    if (r < 0) {
> +        error_report("vhost_set_config_call failed");
> +    }
> +}
> +
>   uint64_t vhost_get_features(struct vhost_dev *hdev, const int *feature_bits,
>                               uint64_t features)
>   {
> @@ -1732,7 +1767,12 @@ int vhost_dev_start(struct vhost_dev *hdev, VirtIODevice *vdev)
>               goto fail_vq;
>           }
>       }
> -
> +    if (vdev->use_config_notifier == VIRTIO_CONFIG_WORK) {
> +        event_notifier_test_and_clear(&hdev->masked_config_notifier);
> +        if (!vdev->use_guest_notifier_mask) {
> +            vhost_config_mask(hdev, vdev,  false);
> +        }
> +    }
>       if (hdev->log_enabled) {
>           uint64_t log_base;
>   
> diff --git a/hw/virtio/virtio-crypto.c b/hw/virtio/virtio-crypto.c
> index 54f9bbb789..ab7958465c 100644
> --- a/hw/virtio/virtio-crypto.c
> +++ b/hw/virtio/virtio-crypto.c
> @@ -941,23 +941,28 @@ static void virtio_crypto_set_status(VirtIODevice *vdev, uint8_t status)
>   }
>   
>   static void virtio_crypto_guest_notifier_mask(VirtIODevice *vdev, int idx,
> -                                           bool mask)
> +                                           bool mask, int type)
>   {
>       VirtIOCrypto *vcrypto = VIRTIO_CRYPTO(vdev);
>       int queue = virtio_crypto_vq2q(idx);
>   
>       assert(vcrypto->vhost_started);
> -
> +    if (type != VIRTIO_VQ_VECTOR) {
> +        return;
> +    }
>       cryptodev_vhost_virtqueue_mask(vdev, queue, idx, mask);
>   }
>   
> -static bool virtio_crypto_guest_notifier_pending(VirtIODevice *vdev, int idx)
> +static bool virtio_crypto_guest_notifier_pending(VirtIODevice *vdev, int idx,
> +                                           int type)
>   {
>       VirtIOCrypto *vcrypto = VIRTIO_CRYPTO(vdev);
>       int queue = virtio_crypto_vq2q(idx);
>   
>       assert(vcrypto->vhost_started);
> -
> +    if (type != VIRTIO_VQ_VECTOR) {
> +        return false;
> +    }
>       return cryptodev_vhost_virtqueue_pending(vdev, queue, idx);
>   }
>   
> diff --git a/hw/virtio/virtio.c b/hw/virtio/virtio.c
> index ceb58fda6c..7d1a68c87a 100644
> --- a/hw/virtio/virtio.c
> +++ b/hw/virtio/virtio.c
> @@ -3278,6 +3278,8 @@ void virtio_init(VirtIODevice *vdev, const char *name,
>               virtio_vmstate_change, vdev);
>       vdev->device_endian = virtio_default_endian();
>       vdev->use_guest_notifier_mask = true;
> +    vdev->use_config_notifier = VIRTIO_CONFIG_STATUS_UNKNOWN;
> +
>   }
>   
>   /*
> @@ -3502,6 +3504,16 @@ static void virtio_queue_guest_notifier_read(EventNotifier *n)
>       }
>   }
>   
> +static void virtio_config_read(EventNotifier *n)
> +{
> +    VirtIODevice *vdev = container_of(n, VirtIODevice, config_notifier);
> +    if (vdev->use_config_notifier != VIRTIO_CONFIG_WORK) {
> +        return;
> +    }
> +    if (event_notifier_test_and_clear(n)) {
> +        virtio_notify_config(vdev);
> +    }
> +}
>   void virtio_queue_set_guest_notifier_fd_handler(VirtQueue *vq, bool assign,
>                                                   bool with_irqfd)
>   {
> @@ -3518,6 +3530,17 @@ void virtio_queue_set_guest_notifier_fd_handler(VirtQueue *vq, bool assign,
>       }
>   }
>   
> +void virtio_set_config_notifier_fd_handler(VirtIODevice *vdev, bool assign,
> +                                                bool with_irqfd)
> +{
> +    if (assign && !with_irqfd) {
> +        event_notifier_set_handler(&vdev->config_notifier,
> +                                   virtio_config_read);
> +    } else {
> +       event_notifier_set_handler(&vdev->config_notifier, NULL);
> +    }
> +}
> +
>   EventNotifier *virtio_queue_get_guest_notifier(VirtQueue *vq)
>   {
>       return &vq->guest_notifier;
> @@ -3591,6 +3614,11 @@ EventNotifier *virtio_queue_get_host_notifier(VirtQueue *vq)
>       return &vq->host_notifier;
>   }
>   
> +EventNotifier *virtio_get_config_notifier(VirtIODevice *vdev)
> +{
> +    return &vdev->config_notifier;
> +
> +}
>   void virtio_queue_set_host_notifier_enabled(VirtQueue *vq, bool enabled)
>   {
>       vq->host_notifier_enabled = enabled;
> diff --git a/include/hw/virtio/vhost.h b/include/hw/virtio/vhost.h
> index 4a8bc75415..75bbc1a4fa 100644
> --- a/include/hw/virtio/vhost.h
> +++ b/include/hw/virtio/vhost.h
> @@ -91,6 +91,8 @@ struct vhost_dev {
>       QLIST_HEAD(, vhost_iommu) iommu_list;
>       IOMMUNotifier n;
>       const VhostDevConfigOps *config_ops;
> +   EventNotifier masked_config_notifier;
> +
>   };
>   
>   struct vhost_net {
> @@ -108,6 +110,8 @@ int vhost_dev_start(struct vhost_dev *hdev, VirtIODevice *vdev);
>   void vhost_dev_stop(struct vhost_dev *hdev, VirtIODevice *vdev);
>   int vhost_dev_enable_notifiers(struct vhost_dev *hdev, VirtIODevice *vdev);
>   void vhost_dev_disable_notifiers(struct vhost_dev *hdev, VirtIODevice *vdev);
> +bool vhost_config_pending(struct vhost_dev *hdev, int n);
> +void vhost_config_mask(struct vhost_dev *hdev, VirtIODevice *vdev,  bool mask);
>   
>   /* Test and clear masked event pending status.
>    * Should be called after unmask to avoid losing events.
> diff --git a/include/hw/virtio/virtio.h b/include/hw/virtio/virtio.h
> index b7ece7a6a8..24e5bfae61 100644
> --- a/include/hw/virtio/virtio.h
> +++ b/include/hw/virtio/virtio.h
> @@ -67,6 +67,19 @@ typedef struct VirtQueueElement
>   
>   #define VIRTIO_NO_VECTOR 0xffff
>   
> +enum virtio_vector_type {
> +    VIRTIO_VQ_VECTOR,
> +    VIRTIO_CONFIG_VECTOR,
> +    VIRTIO_VECTOR_UNKNOWN,
> +};


Actually, it's the type of notifier instead of the vector? And 
VIRTIO_VECTOR_UNKNOWN is not used in this patch.

So let's split the patch into three.

1) First patch to introduce the type of notifier
2) introduce the code to do config interrupt via guest notifier.
3) vhost support for config interrupt


> +
> +enum virtio_config_status {
> +    VIRTIO_CONFIG_SUPPORT,
> +    VIRTIO_CONFIG_WORK,
> +    VIRTIO_CONFIG_STOP,
> +    VIRTIO_CONFIG_STATUS_UNKNOWN,


Any reason for this extra state? I think we can know whether the config 
interrupt is being used through a transport specific method?

Thanks


> +};
> +
>   #define TYPE_VIRTIO_DEVICE "virtio-device"
>   OBJECT_DECLARE_TYPE(VirtIODevice, VirtioDeviceClass, VIRTIO_DEVICE)
>   
> @@ -108,6 +121,8 @@ struct VirtIODevice
>       bool use_guest_notifier_mask;
>       AddressSpace *dma_as;
>       QLIST_HEAD(, VirtQueue) *vector_queues;
> +    EventNotifier config_notifier;
> +    enum virtio_config_status use_config_notifier;
>   };
>   
>   struct VirtioDeviceClass {
> @@ -138,13 +153,13 @@ struct VirtioDeviceClass {
>        * If backend does not support masking,
>        * must check in frontend instead.
>        */
> -    bool (*guest_notifier_pending)(VirtIODevice *vdev, int n);
> +    bool (*guest_notifier_pending)(VirtIODevice *vdev, int n, int type);
>       /* Mask/unmask events from this vq. Any events reported
>        * while masked will become pending.
>        * If backend does not support masking,
>        * must mask in frontend instead.
>        */
> -    void (*guest_notifier_mask)(VirtIODevice *vdev, int n, bool mask);
> +    void (*guest_notifier_mask)(VirtIODevice *vdev, int n, bool mask, int type);
>       int (*start_ioeventfd)(VirtIODevice *vdev);
>       void (*stop_ioeventfd)(VirtIODevice *vdev);
>       /* Saving and loading of a device; trying to deprecate save/load
> @@ -310,11 +325,15 @@ uint16_t virtio_get_queue_index(VirtQueue *vq);
>   EventNotifier *virtio_queue_get_guest_notifier(VirtQueue *vq);
>   void virtio_queue_set_guest_notifier_fd_handler(VirtQueue *vq, bool assign,
>                                                   bool with_irqfd);
> +void virtio_set_config_notifier_fd_handler(VirtIODevice *vdev, bool assign,
> +                                                bool with_irqfd);
> +
>   int virtio_device_start_ioeventfd(VirtIODevice *vdev);
>   int virtio_device_grab_ioeventfd(VirtIODevice *vdev);
>   void virtio_device_release_ioeventfd(VirtIODevice *vdev);
>   bool virtio_device_ioeventfd_enabled(VirtIODevice *vdev);
>   EventNotifier *virtio_queue_get_host_notifier(VirtQueue *vq);
> +EventNotifier *virtio_get_config_notifier(VirtIODevice *vdev);
>   void virtio_queue_set_host_notifier_enabled(VirtQueue *vq, bool enabled);
>   void virtio_queue_host_notifier_read(EventNotifier *n);
>   void virtio_queue_aio_set_host_notifier_handler(VirtQueue *vq, AioContext *ctx,
> diff --git a/include/net/vhost_net.h b/include/net/vhost_net.h
> index 172b0051d8..0d38c97c94 100644
> --- a/include/net/vhost_net.h
> +++ b/include/net/vhost_net.h
> @@ -36,6 +36,9 @@ int vhost_net_set_config(struct vhost_net *net, const uint8_t *data,
>   bool vhost_net_virtqueue_pending(VHostNetState *net, int n);
>   void vhost_net_virtqueue_mask(VHostNetState *net, VirtIODevice *dev,
>                                 int idx, bool mask);
> +bool vhost_net_config_pending(VHostNetState *net, int n);
> +void vhost_net_config_mask(VHostNetState *net, VirtIODevice *dev,
> +                              bool mask);
>   int vhost_net_notify_migration_done(VHostNetState *net, char* mac_addr);
>   VHostNetState *get_vhost_net(NetClientState *nc);
>   



^ permalink raw reply	[flat|nested] 13+ messages in thread

* Re: [PATCH v4 2/4] vhost-vdpa: add callback function for configure interrupt
  2021-03-23  1:56 ` [PATCH v4 2/4] vhost-vdpa: add callback function for " Cindy Lu
@ 2021-03-24  6:33   ` Jason Wang
  2021-03-25  7:17     ` Cindy Lu
  0 siblings, 1 reply; 13+ messages in thread
From: Jason Wang @ 2021-03-24  6:33 UTC (permalink / raw)
  To: qemu-devel


在 2021/3/23 上午9:56, Cindy Lu 写道:
> Add call back function for configure interrupt.
> Set the notifier's fd to the kernel driver when vdpa start.
> also set -1 while vdpa stop. then the kernel will release
> the related resource
>
> Signed-off-by: Cindy Lu <lulu@redhat.com>
> ---
>   hw/virtio/trace-events            |  2 ++
>   hw/virtio/vhost-vdpa.c            | 40 +++++++++++++++++++++++++++++--
>   include/hw/virtio/vhost-backend.h |  4 ++++
>   3 files changed, 44 insertions(+), 2 deletions(-)
>
> diff --git a/hw/virtio/trace-events b/hw/virtio/trace-events
> index 2060a144a2..6710835b46 100644
> --- a/hw/virtio/trace-events
> +++ b/hw/virtio/trace-events
> @@ -52,6 +52,8 @@ vhost_vdpa_set_vring_call(void *dev, unsigned int index, int fd) "dev: %p index:
>   vhost_vdpa_get_features(void *dev, uint64_t features) "dev: %p features: 0x%"PRIx64
>   vhost_vdpa_set_owner(void *dev) "dev: %p"
>   vhost_vdpa_vq_get_addr(void *dev, void *vq, uint64_t desc_user_addr, uint64_t avail_user_addr, uint64_t used_user_addr) "dev: %p vq: %p desc_user_addr: 0x%"PRIx64" avail_user_addr: 0x%"PRIx64" used_user_addr: 0x%"PRIx64
> +vhost_vdpa_set_config_call(void *dev, int *fd)"dev: %p fd: %p"
> +
>   
>   # virtio.c
>   virtqueue_alloc_element(void *elem, size_t sz, unsigned in_num, unsigned out_num) "elem %p size %zd in_num %u out_num %u"
> diff --git a/hw/virtio/vhost-vdpa.c b/hw/virtio/vhost-vdpa.c
> index 01d2101d09..bde32eefe7 100644
> --- a/hw/virtio/vhost-vdpa.c
> +++ b/hw/virtio/vhost-vdpa.c
> @@ -467,20 +467,47 @@ static int vhost_vdpa_get_config(struct vhost_dev *dev, uint8_t *config,
>       }
>       return ret;
>    }
> -
> +static void vhost_vdpa_config_notify_start(struct vhost_dev *dev,
> +                                struct VirtIODevice *vdev, bool start)
> +{
> +    int fd = 0;
> +    int r = 0;
> +    if (!(dev->features & (0x1ULL << VIRTIO_NET_F_STATUS))) {
> +        return;
> +    }
> +    if (start) {
> +        fd = event_notifier_get_fd(&vdev->config_notifier);
> +        r = dev->vhost_ops->vhost_set_config_call(dev, &fd);
> +     /*set the fd call back to vdpa driver*/
> +        if (!r) {
> +            vdev->use_config_notifier = VIRTIO_CONFIG_WORK;
> +            event_notifier_set(&vdev->config_notifier);


Is this a workaround for the vdpa device without config interrupt? I 
wonder how much we could gain from this.


> +            info_report("vhost_vdpa_config_notify start!");


This is a debug code I guess.


> +      }
> +    } else {
> +        fd = -1;
> +        vdev->use_config_notifier = VIRTIO_CONFIG_STOP;


Looks like a duplicated state with vhost_dev->started?


> +        r = dev->vhost_ops->vhost_set_config_call(dev, &fd);
> +    }
> +    return;
> +}
>   static int vhost_vdpa_dev_start(struct vhost_dev *dev, bool started)
>   {
>       struct vhost_vdpa *v = dev->opaque;
>       trace_vhost_vdpa_dev_start(dev, started);
> +    VirtIODevice *vdev = dev->vdev;
> +
>       if (started) {
>           uint8_t status = 0;
>           memory_listener_register(&v->listener, &address_space_memory);
>           vhost_vdpa_set_vring_ready(dev);
>           vhost_vdpa_add_status(dev, VIRTIO_CONFIG_S_DRIVER_OK);
>           vhost_vdpa_call(dev, VHOST_VDPA_GET_STATUS, &status);
> -
> +        /*set the configure interrupt call back*/
> +        vhost_vdpa_config_notify_start(dev, vdev, true);
>           return !(status & VIRTIO_CONFIG_S_DRIVER_OK);
>       } else {
> +        vhost_vdpa_config_notify_start(dev, vdev, false);
>           vhost_vdpa_reset_device(dev);
>           vhost_vdpa_add_status(dev, VIRTIO_CONFIG_S_ACKNOWLEDGE |
>                                      VIRTIO_CONFIG_S_DRIVER);
> @@ -546,6 +573,14 @@ static int vhost_vdpa_set_vring_call(struct vhost_dev *dev,
>       return vhost_vdpa_call(dev, VHOST_SET_VRING_CALL, file);
>   }
>   
> +static int vhost_vdpa_set_config_call(struct vhost_dev *dev,
> +                                       int *fd)
> +{
> +    trace_vhost_vdpa_set_config_call(dev, fd);
> +
> +    return vhost_vdpa_call(dev, VHOST_VDPA_SET_CONFIG_CALL, fd);
> +}
> +
>   static int vhost_vdpa_get_features(struct vhost_dev *dev,
>                                        uint64_t *features)
>   {
> @@ -611,4 +646,5 @@ const VhostOps vdpa_ops = {
>           .vhost_get_device_id = vhost_vdpa_get_device_id,
>           .vhost_vq_get_addr = vhost_vdpa_vq_get_addr,
>           .vhost_force_iommu = vhost_vdpa_force_iommu,
> +        .vhost_set_config_call = vhost_vdpa_set_config_call,
>   };
> diff --git a/include/hw/virtio/vhost-backend.h b/include/hw/virtio/vhost-backend.h
> index 8a6f8e2a7a..1a2fee8994 100644
> --- a/include/hw/virtio/vhost-backend.h
> +++ b/include/hw/virtio/vhost-backend.h


A separated patch please.

Thanks


> @@ -125,6 +125,9 @@ typedef int (*vhost_get_device_id_op)(struct vhost_dev *dev, uint32_t *dev_id);
>   
>   typedef bool (*vhost_force_iommu_op)(struct vhost_dev *dev);
>   
> +typedef int (*vhost_set_config_call_op)(struct vhost_dev *dev,
> +                                       int *fd);
> +
>   typedef struct VhostOps {
>       VhostBackendType backend_type;
>       vhost_backend_init vhost_backend_init;
> @@ -170,6 +173,7 @@ typedef struct VhostOps {
>       vhost_vq_get_addr_op  vhost_vq_get_addr;
>       vhost_get_device_id_op vhost_get_device_id;
>       vhost_force_iommu_op vhost_force_iommu;
> +    vhost_set_config_call_op vhost_set_config_call;
>   } VhostOps;
>   
>   extern const VhostOps user_ops;



^ permalink raw reply	[flat|nested] 13+ messages in thread

* Re: [PATCH v4 4/4] virtio-pci: add support for configure interrupt
  2021-03-23  1:56 ` [PATCH v4 4/4] virtio-pci: " Cindy Lu
@ 2021-03-24  6:34   ` Jason Wang
  2021-03-25  6:07     ` Cindy Lu
  0 siblings, 1 reply; 13+ messages in thread
From: Jason Wang @ 2021-03-24  6:34 UTC (permalink / raw)
  To: Cindy Lu, mst, qemu-devel


在 2021/3/23 上午9:56, Cindy Lu 写道:
> Add support for configure interrupt, use kvm_irqfd_assign and set the
> gsi to kernel. When the configure notifier was eventfd_signal by host
> kernel, this will finally inject an msix interrupt to guest
>
> Signed-off-by: Cindy Lu <lulu@redhat.com>
> ---
>   hw/virtio/virtio-pci.c | 171 +++++++++++++++++++++++++++++++++--------
>   1 file changed, 137 insertions(+), 34 deletions(-)
>
> diff --git a/hw/virtio/virtio-pci.c b/hw/virtio/virtio-pci.c
> index 36524a5728..b0c190caba 100644
> --- a/hw/virtio/virtio-pci.c
> +++ b/hw/virtio/virtio-pci.c
> @@ -664,7 +664,6 @@ static uint32_t virtio_read_config(PCIDevice *pci_dev,
>   }
>   
>   static int kvm_virtio_pci_vq_vector_use(VirtIOPCIProxy *proxy,
> -                                        unsigned int queue_no,
>                                           unsigned int vector)


Let's use a separated patch for decoupling queue_no from those irqfd 
helpers.

Thanks


>   {
>       VirtIOIRQFD *irqfd = &proxy->vector_irqfd[vector];
> @@ -691,23 +690,17 @@ static void kvm_virtio_pci_vq_vector_release(VirtIOPCIProxy *proxy,
>   }
>   
>   static int kvm_virtio_pci_irqfd_use(VirtIOPCIProxy *proxy,
> -                                 unsigned int queue_no,
> +                                 EventNotifier *n,
>                                    unsigned int vector)
>   {
>       VirtIOIRQFD *irqfd = &proxy->vector_irqfd[vector];
> -    VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
> -    VirtQueue *vq = virtio_get_queue(vdev, queue_no);
> -    EventNotifier *n = virtio_queue_get_guest_notifier(vq);
>       return kvm_irqchip_add_irqfd_notifier_gsi(kvm_state, n, NULL, irqfd->virq);
>   }
>   
>   static void kvm_virtio_pci_irqfd_release(VirtIOPCIProxy *proxy,
> -                                      unsigned int queue_no,
> +                                      EventNotifier *n ,
>                                         unsigned int vector)
>   {
> -    VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
> -    VirtQueue *vq = virtio_get_queue(vdev, queue_no);
> -    EventNotifier *n = virtio_queue_get_guest_notifier(vq);
>       VirtIOIRQFD *irqfd = &proxy->vector_irqfd[vector];
>       int ret;
>   
> @@ -722,7 +715,8 @@ static int kvm_virtio_pci_vector_use(VirtIOPCIProxy *proxy, int nvqs)
>       VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
>       unsigned int vector;
>       int ret, queue_no;
> -
> +    VirtQueue *vq;
> +    EventNotifier *n;
>       for (queue_no = 0; queue_no < nvqs; queue_no++) {
>           if (!virtio_queue_get_num(vdev, queue_no)) {
>               break;
> @@ -731,7 +725,7 @@ static int kvm_virtio_pci_vector_use(VirtIOPCIProxy *proxy, int nvqs)
>           if (vector >= msix_nr_vectors_allocated(dev)) {
>               continue;
>           }
> -        ret = kvm_virtio_pci_vq_vector_use(proxy, queue_no, vector);
> +        ret = kvm_virtio_pci_vq_vector_use(proxy,  vector);
>           if (ret < 0) {
>               goto undo;
>           }
> @@ -739,7 +733,9 @@ static int kvm_virtio_pci_vector_use(VirtIOPCIProxy *proxy, int nvqs)
>            * Otherwise, delay until unmasked in the frontend.
>            */
>           if (vdev->use_guest_notifier_mask && k->guest_notifier_mask) {
> -            ret = kvm_virtio_pci_irqfd_use(proxy, queue_no, vector);
> +            vq = virtio_get_queue(vdev, queue_no);
> +            n = virtio_queue_get_guest_notifier(vq);
> +            ret = kvm_virtio_pci_irqfd_use(proxy, n, vector);
>               if (ret < 0) {
>                   kvm_virtio_pci_vq_vector_release(proxy, vector);
>                   goto undo;
> @@ -755,13 +751,69 @@ undo:
>               continue;
>           }
>           if (vdev->use_guest_notifier_mask && k->guest_notifier_mask) {
> -            kvm_virtio_pci_irqfd_release(proxy, queue_no, vector);
> +            vq = virtio_get_queue(vdev, queue_no);
> +            n = virtio_queue_get_guest_notifier(vq);
> +            kvm_virtio_pci_irqfd_release(proxy, n, vector);
>           }
>           kvm_virtio_pci_vq_vector_release(proxy, vector);
>       }
>       return ret;
>   }
>   
> +static int kvm_virtio_pci_vector_config_use(VirtIOPCIProxy *proxy)
> +{
> +
> +    VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
> +    unsigned int vector;
> +    int ret;
> +    EventNotifier *n = virtio_get_config_notifier(vdev);
> +
> +    vector = vdev->config_vector ;
> +    ret = kvm_virtio_pci_vq_vector_use(proxy, vector);
> +    if (ret < 0) {
> +        goto undo;
> +    }
> +    ret = kvm_virtio_pci_irqfd_use(proxy,  n, vector);
> +    if (ret < 0) {
> +        goto undo;
> +    }
> +    return 0;
> +undo:
> +    kvm_virtio_pci_irqfd_release(proxy, n, vector);
> +    return ret;
> +}
> +static void kvm_virtio_pci_vector_config_release(VirtIOPCIProxy *proxy)
> +{
> +    PCIDevice *dev = &proxy->pci_dev;
> +    VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
> +    unsigned int vector;
> +    EventNotifier *n = virtio_get_config_notifier(vdev);
> +    vector = vdev->config_vector ;
> +    if (vector >= msix_nr_vectors_allocated(dev)) {
> +        return;
> +    }
> +    kvm_virtio_pci_irqfd_release(proxy, n, vector);
> +    kvm_virtio_pci_vq_vector_release(proxy, vector);
> +}
> +
> +static int virtio_pci_set_config_notifier(DeviceState *d,  bool assign)
> +{
> +    VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);
> +    VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
> +    EventNotifier *notifier = virtio_get_config_notifier(vdev);
> +    int r = 0;
> +    if (assign) {
> +        r = event_notifier_init(notifier, 0);
> +        virtio_set_config_notifier_fd_handler(vdev, true, true);
> +        kvm_virtio_pci_vector_config_use(proxy);
> +    } else {
> +        virtio_set_config_notifier_fd_handler(vdev, false, true);
> +        kvm_virtio_pci_vector_config_release(proxy);
> +        event_notifier_cleanup(notifier);
> +    }
> +    return r;
> +}
> +
>   static void kvm_virtio_pci_vector_release(VirtIOPCIProxy *proxy, int nvqs)
>   {
>       PCIDevice *dev = &proxy->pci_dev;
> @@ -769,7 +821,8 @@ static void kvm_virtio_pci_vector_release(VirtIOPCIProxy *proxy, int nvqs)
>       unsigned int vector;
>       int queue_no;
>       VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
> -
> +    VirtQueue *vq;
> +    EventNotifier *n;
>       for (queue_no = 0; queue_no < nvqs; queue_no++) {
>           if (!virtio_queue_get_num(vdev, queue_no)) {
>               break;
> @@ -782,7 +835,9 @@ static void kvm_virtio_pci_vector_release(VirtIOPCIProxy *proxy, int nvqs)
>            * Otherwise, it was cleaned when masked in the frontend.
>            */
>           if (vdev->use_guest_notifier_mask && k->guest_notifier_mask) {
> -            kvm_virtio_pci_irqfd_release(proxy, queue_no, vector);
> +            vq = virtio_get_queue(vdev, queue_no);
> +            n = virtio_queue_get_guest_notifier(vq);
> +            kvm_virtio_pci_irqfd_release(proxy, n, vector);
>           }
>           kvm_virtio_pci_vq_vector_release(proxy, vector);
>       }
> @@ -791,15 +846,14 @@ static void kvm_virtio_pci_vector_release(VirtIOPCIProxy *proxy, int nvqs)
>   static int virtio_pci_vq_vector_unmask(VirtIOPCIProxy *proxy,
>                                          unsigned int queue_no,
>                                          unsigned int vector,
> -                                       MSIMessage msg)
> +                                       MSIMessage msg,
> +                                       int type,
> +                                        EventNotifier *n)
>   {
>       VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
>       VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
> -    VirtQueue *vq = virtio_get_queue(vdev, queue_no);
> -    EventNotifier *n = virtio_queue_get_guest_notifier(vq);
>       VirtIOIRQFD *irqfd;
>       int ret = 0;
> -
>       if (proxy->vector_irqfd) {
>           irqfd = &proxy->vector_irqfd[vector];
>           if (irqfd->msg.data != msg.data || irqfd->msg.address != msg.address) {
> @@ -816,32 +870,33 @@ static int virtio_pci_vq_vector_unmask(VirtIOPCIProxy *proxy,
>        * Otherwise, set it up now.
>        */
>       if (vdev->use_guest_notifier_mask && k->guest_notifier_mask) {
> -        k->guest_notifier_mask(vdev, queue_no, false);
> +        k->guest_notifier_mask(vdev, queue_no, false, type);
>           /* Test after unmasking to avoid losing events. */
>           if (k->guest_notifier_pending &&
> -            k->guest_notifier_pending(vdev, queue_no)) {
> +            k->guest_notifier_pending(vdev, queue_no, type)) {
>               event_notifier_set(n);
>           }
>       } else {
> -        ret = kvm_virtio_pci_irqfd_use(proxy, queue_no, vector);
> +        ret = kvm_virtio_pci_irqfd_use(proxy, n, vector);
>       }
>       return ret;
>   }
>   
>   static void virtio_pci_vq_vector_mask(VirtIOPCIProxy *proxy,
>                                                unsigned int queue_no,
> -                                             unsigned int vector)
> +                                             unsigned int vector,
> +                                             int type,
> +                                             EventNotifier *n)
>   {
>       VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
>       VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
> -
>       /* If guest supports masking, keep irqfd but mask it.
>        * Otherwise, clean it up now.
>        */
>       if (vdev->use_guest_notifier_mask && k->guest_notifier_mask) {
> -        k->guest_notifier_mask(vdev, queue_no, true);
> +        k->guest_notifier_mask(vdev, queue_no, true, type);
>       } else {
> -        kvm_virtio_pci_irqfd_release(proxy, queue_no, vector);
> +        kvm_virtio_pci_irqfd_release(proxy, n, vector);
>       }
>   }
>   
> @@ -851,15 +906,26 @@ static int virtio_pci_vector_unmask(PCIDevice *dev, unsigned vector,
>       VirtIOPCIProxy *proxy = container_of(dev, VirtIOPCIProxy, pci_dev);
>       VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
>       VirtQueue *vq = virtio_vector_first_queue(vdev, vector);
> +    EventNotifier *n;
>       int ret, index, unmasked = 0;
>   
> +   if (vdev->use_config_notifier == VIRTIO_CONFIG_WORK) {
> +        n = virtio_get_config_notifier(vdev);
> +        ret = virtio_pci_vq_vector_unmask(proxy, 0, vector, msg,
> +                    VIRTIO_CONFIG_VECTOR, n);
> +        if (ret < 0) {
> +            goto config_undo;
> +       }
> +    }
>       while (vq) {
>           index = virtio_get_queue_index(vq);
>           if (!virtio_queue_get_num(vdev, index)) {
>               break;
>           }
>           if (index < proxy->nvqs_with_notifiers) {
> -            ret = virtio_pci_vq_vector_unmask(proxy, index, vector, msg);
> +            n = virtio_queue_get_guest_notifier(vq);
> +            ret = virtio_pci_vq_vector_unmask(proxy, index, vector, msg,
> +                        VIRTIO_VQ_VECTOR, n);
>               if (ret < 0) {
>                   goto undo;
>               }
> @@ -875,11 +941,17 @@ undo:
>       while (vq && unmasked >= 0) {
>           index = virtio_get_queue_index(vq);
>           if (index < proxy->nvqs_with_notifiers) {
> -            virtio_pci_vq_vector_mask(proxy, index, vector);
> +            n = virtio_queue_get_guest_notifier(vq);
> +            virtio_pci_vq_vector_mask(proxy, index, vector,
> +                 VIRTIO_VQ_VECTOR, n);
>               --unmasked;
>           }
>           vq = virtio_vector_next_queue(vq);
>       }
> + config_undo:
> +            n = virtio_get_config_notifier(vdev);
> +            virtio_pci_vq_vector_mask(proxy, 0, vector,
> +                VIRTIO_CONFIG_VECTOR, n);
>       return ret;
>   }
>   
> @@ -888,18 +960,26 @@ static void virtio_pci_vector_mask(PCIDevice *dev, unsigned vector)
>       VirtIOPCIProxy *proxy = container_of(dev, VirtIOPCIProxy, pci_dev);
>       VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
>       VirtQueue *vq = virtio_vector_first_queue(vdev, vector);
> +    EventNotifier *n;
>       int index;
>   
> +   if (vdev->use_config_notifier == VIRTIO_CONFIG_WORK) {
> +        n = virtio_get_config_notifier(vdev);
> +        virtio_pci_vq_vector_mask(proxy, 0, vector, VIRTIO_CONFIG_VECTOR, n);
> +   }
>       while (vq) {
>           index = virtio_get_queue_index(vq);
> +         n = virtio_queue_get_guest_notifier(vq);
>           if (!virtio_queue_get_num(vdev, index)) {
>               break;
>           }
>           if (index < proxy->nvqs_with_notifiers) {
> -            virtio_pci_vq_vector_mask(proxy, index, vector);
> +            virtio_pci_vq_vector_mask(proxy, index, vector,
> +                VIRTIO_VQ_VECTOR, n);
>           }
>           vq = virtio_vector_next_queue(vq);
>       }
> +
>   }
>   
>   static void virtio_pci_vector_poll(PCIDevice *dev,
> @@ -918,6 +998,7 @@ static void virtio_pci_vector_poll(PCIDevice *dev,
>           if (!virtio_queue_get_num(vdev, queue_no)) {
>               break;
>           }
> +
>           vector = virtio_queue_vector(vdev, queue_no);
>           if (vector < vector_start || vector >= vector_end ||
>               !msix_is_masked(dev, vector)) {
> @@ -926,7 +1007,22 @@ static void virtio_pci_vector_poll(PCIDevice *dev,
>           vq = virtio_get_queue(vdev, queue_no);
>           notifier = virtio_queue_get_guest_notifier(vq);
>           if (k->guest_notifier_pending) {
> -            if (k->guest_notifier_pending(vdev, queue_no)) {
> +            if (k->guest_notifier_pending(vdev, queue_no, VIRTIO_VQ_VECTOR)) {
> +                msix_set_pending(dev, vector);
> +            }
> +        } else if (event_notifier_test_and_clear(notifier)) {
> +            msix_set_pending(dev, vector);
> +        }
> +    }
> +   if (vdev->use_config_notifier == VIRTIO_CONFIG_WORK) {
> +        vector = vdev->config_vector;
> +        notifier = virtio_get_config_notifier(vdev);
> +        if (vector < vector_start || vector >= vector_end ||
> +            !msix_is_masked(dev, vector)) {
> +            return;
> +        }
> +        if (k->guest_notifier_pending) {
> +            if (k->guest_notifier_pending(vdev, 0,  VIRTIO_CONFIG_VECTOR)) {
>                   msix_set_pending(dev, vector);
>               }
>           } else if (event_notifier_test_and_clear(notifier)) {
> @@ -958,7 +1054,7 @@ static int virtio_pci_set_guest_notifier(DeviceState *d, int n, bool assign,
>       if (!msix_enabled(&proxy->pci_dev) &&
>           vdev->use_guest_notifier_mask &&
>           vdc->guest_notifier_mask) {
> -        vdc->guest_notifier_mask(vdev, n, !assign);
> +        vdc->guest_notifier_mask(vdev, n, !assign, VIRTIO_VQ_VECTOR);
>       }
>   
>       return 0;
> @@ -1008,7 +1104,6 @@ static int virtio_pci_set_guest_notifiers(DeviceState *d, int nvqs, bool assign)
>               goto assign_error;
>           }
>       }
> -
>       /* Must set vector notifier after guest notifier has been assigned */
>       if ((with_irqfd || k->guest_notifier_mask) && assign) {
>           if (with_irqfd) {
> @@ -1020,6 +1115,12 @@ static int virtio_pci_set_guest_notifiers(DeviceState *d, int nvqs, bool assign)
>                   goto assign_error;
>               }
>           }
> +        if (vdev->use_config_notifier == VIRTIO_CONFIG_SUPPORT) {
> +            r = virtio_pci_set_config_notifier(d, assign);
> +            if (r < 0) {
> +                goto config_error;
> +         }
> +     }
>           r = msix_set_vector_notifiers(&proxy->pci_dev,
>                                         virtio_pci_vector_unmask,
>                                         virtio_pci_vector_mask,
> @@ -1028,7 +1129,6 @@ static int virtio_pci_set_guest_notifiers(DeviceState *d, int nvqs, bool assign)
>               goto notifiers_error;
>           }
>       }
> -
>       return 0;
>   
>   notifiers_error:
> @@ -1036,13 +1136,16 @@ notifiers_error:
>           assert(assign);
>           kvm_virtio_pci_vector_release(proxy, nvqs);
>       }
> -
> + config_error:
> +    /* We get here on assignment failure. Recover by undoing for VQs 0 .. n. */
> +        kvm_virtio_pci_vector_config_release(proxy);
>   assign_error:
>       /* We get here on assignment failure. Recover by undoing for VQs 0 .. n. */
>       assert(assign);
>       while (--n >= 0) {
>           virtio_pci_set_guest_notifier(d, n, !assign, with_irqfd);
>       }
> +
>       return r;
>   }
>   



^ permalink raw reply	[flat|nested] 13+ messages in thread

* Re: [PATCH v4 4/4] virtio-pci: add support for configure interrupt
  2021-03-24  6:34   ` Jason Wang
@ 2021-03-25  6:07     ` Cindy Lu
  0 siblings, 0 replies; 13+ messages in thread
From: Cindy Lu @ 2021-03-25  6:07 UTC (permalink / raw)
  To: Jason Wang; +Cc: QEMU Developers, Michael Tsirkin

On Wed, Mar 24, 2021 at 2:34 PM Jason Wang <jasowang@redhat.com> wrote:
>
>
> 在 2021/3/23 上午9:56, Cindy Lu 写道:
> > Add support for configure interrupt, use kvm_irqfd_assign and set the
> > gsi to kernel. When the configure notifier was eventfd_signal by host
> > kernel, this will finally inject an msix interrupt to guest
> >
> > Signed-off-by: Cindy Lu <lulu@redhat.com>
> > ---
> >   hw/virtio/virtio-pci.c | 171 +++++++++++++++++++++++++++++++++--------
> >   1 file changed, 137 insertions(+), 34 deletions(-)
> >
> > diff --git a/hw/virtio/virtio-pci.c b/hw/virtio/virtio-pci.c
> > index 36524a5728..b0c190caba 100644
> > --- a/hw/virtio/virtio-pci.c
> > +++ b/hw/virtio/virtio-pci.c
> > @@ -664,7 +664,6 @@ static uint32_t virtio_read_config(PCIDevice *pci_dev,
> >   }
> >
> >   static int kvm_virtio_pci_vq_vector_use(VirtIOPCIProxy *proxy,
> > -                                        unsigned int queue_no,
> >                                           unsigned int vector)
>
>
> Let's use a separated patch for decoupling queue_no from those irqfd
> helpers.
>
> Thanks
>
sure will split this
>
> >   {
> >       VirtIOIRQFD *irqfd = &proxy->vector_irqfd[vector];
> > @@ -691,23 +690,17 @@ static void kvm_virtio_pci_vq_vector_release(VirtIOPCIProxy *proxy,
> >   }
> >
> >   static int kvm_virtio_pci_irqfd_use(VirtIOPCIProxy *proxy,
> > -                                 unsigned int queue_no,
> > +                                 EventNotifier *n,
> >                                    unsigned int vector)
> >   {
> >       VirtIOIRQFD *irqfd = &proxy->vector_irqfd[vector];
> > -    VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
> > -    VirtQueue *vq = virtio_get_queue(vdev, queue_no);
> > -    EventNotifier *n = virtio_queue_get_guest_notifier(vq);
> >       return kvm_irqchip_add_irqfd_notifier_gsi(kvm_state, n, NULL, irqfd->virq);
> >   }
> >
> >   static void kvm_virtio_pci_irqfd_release(VirtIOPCIProxy *proxy,
> > -                                      unsigned int queue_no,
> > +                                      EventNotifier *n ,
> >                                         unsigned int vector)
> >   {
> > -    VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
> > -    VirtQueue *vq = virtio_get_queue(vdev, queue_no);
> > -    EventNotifier *n = virtio_queue_get_guest_notifier(vq);
> >       VirtIOIRQFD *irqfd = &proxy->vector_irqfd[vector];
> >       int ret;
> >
> > @@ -722,7 +715,8 @@ static int kvm_virtio_pci_vector_use(VirtIOPCIProxy *proxy, int nvqs)
> >       VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
> >       unsigned int vector;
> >       int ret, queue_no;
> > -
> > +    VirtQueue *vq;
> > +    EventNotifier *n;
> >       for (queue_no = 0; queue_no < nvqs; queue_no++) {
> >           if (!virtio_queue_get_num(vdev, queue_no)) {
> >               break;
> > @@ -731,7 +725,7 @@ static int kvm_virtio_pci_vector_use(VirtIOPCIProxy *proxy, int nvqs)
> >           if (vector >= msix_nr_vectors_allocated(dev)) {
> >               continue;
> >           }
> > -        ret = kvm_virtio_pci_vq_vector_use(proxy, queue_no, vector);
> > +        ret = kvm_virtio_pci_vq_vector_use(proxy,  vector);
> >           if (ret < 0) {
> >               goto undo;
> >           }
> > @@ -739,7 +733,9 @@ static int kvm_virtio_pci_vector_use(VirtIOPCIProxy *proxy, int nvqs)
> >            * Otherwise, delay until unmasked in the frontend.
> >            */
> >           if (vdev->use_guest_notifier_mask && k->guest_notifier_mask) {
> > -            ret = kvm_virtio_pci_irqfd_use(proxy, queue_no, vector);
> > +            vq = virtio_get_queue(vdev, queue_no);
> > +            n = virtio_queue_get_guest_notifier(vq);
> > +            ret = kvm_virtio_pci_irqfd_use(proxy, n, vector);
> >               if (ret < 0) {
> >                   kvm_virtio_pci_vq_vector_release(proxy, vector);
> >                   goto undo;
> > @@ -755,13 +751,69 @@ undo:
> >               continue;
> >           }
> >           if (vdev->use_guest_notifier_mask && k->guest_notifier_mask) {
> > -            kvm_virtio_pci_irqfd_release(proxy, queue_no, vector);
> > +            vq = virtio_get_queue(vdev, queue_no);
> > +            n = virtio_queue_get_guest_notifier(vq);
> > +            kvm_virtio_pci_irqfd_release(proxy, n, vector);
> >           }
> >           kvm_virtio_pci_vq_vector_release(proxy, vector);
> >       }
> >       return ret;
> >   }
> >
> > +static int kvm_virtio_pci_vector_config_use(VirtIOPCIProxy *proxy)
> > +{
> > +
> > +    VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
> > +    unsigned int vector;
> > +    int ret;
> > +    EventNotifier *n = virtio_get_config_notifier(vdev);
> > +
> > +    vector = vdev->config_vector ;
> > +    ret = kvm_virtio_pci_vq_vector_use(proxy, vector);
> > +    if (ret < 0) {
> > +        goto undo;
> > +    }
> > +    ret = kvm_virtio_pci_irqfd_use(proxy,  n, vector);
> > +    if (ret < 0) {
> > +        goto undo;
> > +    }
> > +    return 0;
> > +undo:
> > +    kvm_virtio_pci_irqfd_release(proxy, n, vector);
> > +    return ret;
> > +}
> > +static void kvm_virtio_pci_vector_config_release(VirtIOPCIProxy *proxy)
> > +{
> > +    PCIDevice *dev = &proxy->pci_dev;
> > +    VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
> > +    unsigned int vector;
> > +    EventNotifier *n = virtio_get_config_notifier(vdev);
> > +    vector = vdev->config_vector ;
> > +    if (vector >= msix_nr_vectors_allocated(dev)) {
> > +        return;
> > +    }
> > +    kvm_virtio_pci_irqfd_release(proxy, n, vector);
> > +    kvm_virtio_pci_vq_vector_release(proxy, vector);
> > +}
> > +
> > +static int virtio_pci_set_config_notifier(DeviceState *d,  bool assign)
> > +{
> > +    VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);
> > +    VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
> > +    EventNotifier *notifier = virtio_get_config_notifier(vdev);
> > +    int r = 0;
> > +    if (assign) {
> > +        r = event_notifier_init(notifier, 0);
> > +        virtio_set_config_notifier_fd_handler(vdev, true, true);
> > +        kvm_virtio_pci_vector_config_use(proxy);
> > +    } else {
> > +        virtio_set_config_notifier_fd_handler(vdev, false, true);
> > +        kvm_virtio_pci_vector_config_release(proxy);
> > +        event_notifier_cleanup(notifier);
> > +    }
> > +    return r;
> > +}
> > +
> >   static void kvm_virtio_pci_vector_release(VirtIOPCIProxy *proxy, int nvqs)
> >   {
> >       PCIDevice *dev = &proxy->pci_dev;
> > @@ -769,7 +821,8 @@ static void kvm_virtio_pci_vector_release(VirtIOPCIProxy *proxy, int nvqs)
> >       unsigned int vector;
> >       int queue_no;
> >       VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
> > -
> > +    VirtQueue *vq;
> > +    EventNotifier *n;
> >       for (queue_no = 0; queue_no < nvqs; queue_no++) {
> >           if (!virtio_queue_get_num(vdev, queue_no)) {
> >               break;
> > @@ -782,7 +835,9 @@ static void kvm_virtio_pci_vector_release(VirtIOPCIProxy *proxy, int nvqs)
> >            * Otherwise, it was cleaned when masked in the frontend.
> >            */
> >           if (vdev->use_guest_notifier_mask && k->guest_notifier_mask) {
> > -            kvm_virtio_pci_irqfd_release(proxy, queue_no, vector);
> > +            vq = virtio_get_queue(vdev, queue_no);
> > +            n = virtio_queue_get_guest_notifier(vq);
> > +            kvm_virtio_pci_irqfd_release(proxy, n, vector);
> >           }
> >           kvm_virtio_pci_vq_vector_release(proxy, vector);
> >       }
> > @@ -791,15 +846,14 @@ static void kvm_virtio_pci_vector_release(VirtIOPCIProxy *proxy, int nvqs)
> >   static int virtio_pci_vq_vector_unmask(VirtIOPCIProxy *proxy,
> >                                          unsigned int queue_no,
> >                                          unsigned int vector,
> > -                                       MSIMessage msg)
> > +                                       MSIMessage msg,
> > +                                       int type,
> > +                                        EventNotifier *n)
> >   {
> >       VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
> >       VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
> > -    VirtQueue *vq = virtio_get_queue(vdev, queue_no);
> > -    EventNotifier *n = virtio_queue_get_guest_notifier(vq);
> >       VirtIOIRQFD *irqfd;
> >       int ret = 0;
> > -
> >       if (proxy->vector_irqfd) {
> >           irqfd = &proxy->vector_irqfd[vector];
> >           if (irqfd->msg.data != msg.data || irqfd->msg.address != msg.address) {
> > @@ -816,32 +870,33 @@ static int virtio_pci_vq_vector_unmask(VirtIOPCIProxy *proxy,
> >        * Otherwise, set it up now.
> >        */
> >       if (vdev->use_guest_notifier_mask && k->guest_notifier_mask) {
> > -        k->guest_notifier_mask(vdev, queue_no, false);
> > +        k->guest_notifier_mask(vdev, queue_no, false, type);
> >           /* Test after unmasking to avoid losing events. */
> >           if (k->guest_notifier_pending &&
> > -            k->guest_notifier_pending(vdev, queue_no)) {
> > +            k->guest_notifier_pending(vdev, queue_no, type)) {
> >               event_notifier_set(n);
> >           }
> >       } else {
> > -        ret = kvm_virtio_pci_irqfd_use(proxy, queue_no, vector);
> > +        ret = kvm_virtio_pci_irqfd_use(proxy, n, vector);
> >       }
> >       return ret;
> >   }
> >
> >   static void virtio_pci_vq_vector_mask(VirtIOPCIProxy *proxy,
> >                                                unsigned int queue_no,
> > -                                             unsigned int vector)
> > +                                             unsigned int vector,
> > +                                             int type,
> > +                                             EventNotifier *n)
> >   {
> >       VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
> >       VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
> > -
> >       /* If guest supports masking, keep irqfd but mask it.
> >        * Otherwise, clean it up now.
> >        */
> >       if (vdev->use_guest_notifier_mask && k->guest_notifier_mask) {
> > -        k->guest_notifier_mask(vdev, queue_no, true);
> > +        k->guest_notifier_mask(vdev, queue_no, true, type);
> >       } else {
> > -        kvm_virtio_pci_irqfd_release(proxy, queue_no, vector);
> > +        kvm_virtio_pci_irqfd_release(proxy, n, vector);
> >       }
> >   }
> >
> > @@ -851,15 +906,26 @@ static int virtio_pci_vector_unmask(PCIDevice *dev, unsigned vector,
> >       VirtIOPCIProxy *proxy = container_of(dev, VirtIOPCIProxy, pci_dev);
> >       VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
> >       VirtQueue *vq = virtio_vector_first_queue(vdev, vector);
> > +    EventNotifier *n;
> >       int ret, index, unmasked = 0;
> >
> > +   if (vdev->use_config_notifier == VIRTIO_CONFIG_WORK) {
> > +        n = virtio_get_config_notifier(vdev);
> > +        ret = virtio_pci_vq_vector_unmask(proxy, 0, vector, msg,
> > +                    VIRTIO_CONFIG_VECTOR, n);
> > +        if (ret < 0) {
> > +            goto config_undo;
> > +       }
> > +    }
> >       while (vq) {
> >           index = virtio_get_queue_index(vq);
> >           if (!virtio_queue_get_num(vdev, index)) {
> >               break;
> >           }
> >           if (index < proxy->nvqs_with_notifiers) {
> > -            ret = virtio_pci_vq_vector_unmask(proxy, index, vector, msg);
> > +            n = virtio_queue_get_guest_notifier(vq);
> > +            ret = virtio_pci_vq_vector_unmask(proxy, index, vector, msg,
> > +                        VIRTIO_VQ_VECTOR, n);
> >               if (ret < 0) {
> >                   goto undo;
> >               }
> > @@ -875,11 +941,17 @@ undo:
> >       while (vq && unmasked >= 0) {
> >           index = virtio_get_queue_index(vq);
> >           if (index < proxy->nvqs_with_notifiers) {
> > -            virtio_pci_vq_vector_mask(proxy, index, vector);
> > +            n = virtio_queue_get_guest_notifier(vq);
> > +            virtio_pci_vq_vector_mask(proxy, index, vector,
> > +                 VIRTIO_VQ_VECTOR, n);
> >               --unmasked;
> >           }
> >           vq = virtio_vector_next_queue(vq);
> >       }
> > + config_undo:
> > +            n = virtio_get_config_notifier(vdev);
> > +            virtio_pci_vq_vector_mask(proxy, 0, vector,
> > +                VIRTIO_CONFIG_VECTOR, n);
> >       return ret;
> >   }
> >
> > @@ -888,18 +960,26 @@ static void virtio_pci_vector_mask(PCIDevice *dev, unsigned vector)
> >       VirtIOPCIProxy *proxy = container_of(dev, VirtIOPCIProxy, pci_dev);
> >       VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
> >       VirtQueue *vq = virtio_vector_first_queue(vdev, vector);
> > +    EventNotifier *n;
> >       int index;
> >
> > +   if (vdev->use_config_notifier == VIRTIO_CONFIG_WORK) {
> > +        n = virtio_get_config_notifier(vdev);
> > +        virtio_pci_vq_vector_mask(proxy, 0, vector, VIRTIO_CONFIG_VECTOR, n);
> > +   }
> >       while (vq) {
> >           index = virtio_get_queue_index(vq);
> > +         n = virtio_queue_get_guest_notifier(vq);
> >           if (!virtio_queue_get_num(vdev, index)) {
> >               break;
> >           }
> >           if (index < proxy->nvqs_with_notifiers) {
> > -            virtio_pci_vq_vector_mask(proxy, index, vector);
> > +            virtio_pci_vq_vector_mask(proxy, index, vector,
> > +                VIRTIO_VQ_VECTOR, n);
> >           }
> >           vq = virtio_vector_next_queue(vq);
> >       }
> > +
> >   }
> >
> >   static void virtio_pci_vector_poll(PCIDevice *dev,
> > @@ -918,6 +998,7 @@ static void virtio_pci_vector_poll(PCIDevice *dev,
> >           if (!virtio_queue_get_num(vdev, queue_no)) {
> >               break;
> >           }
> > +
> >           vector = virtio_queue_vector(vdev, queue_no);
> >           if (vector < vector_start || vector >= vector_end ||
> >               !msix_is_masked(dev, vector)) {
> > @@ -926,7 +1007,22 @@ static void virtio_pci_vector_poll(PCIDevice *dev,
> >           vq = virtio_get_queue(vdev, queue_no);
> >           notifier = virtio_queue_get_guest_notifier(vq);
> >           if (k->guest_notifier_pending) {
> > -            if (k->guest_notifier_pending(vdev, queue_no)) {
> > +            if (k->guest_notifier_pending(vdev, queue_no, VIRTIO_VQ_VECTOR)) {
> > +                msix_set_pending(dev, vector);
> > +            }
> > +        } else if (event_notifier_test_and_clear(notifier)) {
> > +            msix_set_pending(dev, vector);
> > +        }
> > +    }
> > +   if (vdev->use_config_notifier == VIRTIO_CONFIG_WORK) {
> > +        vector = vdev->config_vector;
> > +        notifier = virtio_get_config_notifier(vdev);
> > +        if (vector < vector_start || vector >= vector_end ||
> > +            !msix_is_masked(dev, vector)) {
> > +            return;
> > +        }
> > +        if (k->guest_notifier_pending) {
> > +            if (k->guest_notifier_pending(vdev, 0,  VIRTIO_CONFIG_VECTOR)) {
> >                   msix_set_pending(dev, vector);
> >               }
> >           } else if (event_notifier_test_and_clear(notifier)) {
> > @@ -958,7 +1054,7 @@ static int virtio_pci_set_guest_notifier(DeviceState *d, int n, bool assign,
> >       if (!msix_enabled(&proxy->pci_dev) &&
> >           vdev->use_guest_notifier_mask &&
> >           vdc->guest_notifier_mask) {
> > -        vdc->guest_notifier_mask(vdev, n, !assign);
> > +        vdc->guest_notifier_mask(vdev, n, !assign, VIRTIO_VQ_VECTOR);
> >       }
> >
> >       return 0;
> > @@ -1008,7 +1104,6 @@ static int virtio_pci_set_guest_notifiers(DeviceState *d, int nvqs, bool assign)
> >               goto assign_error;
> >           }
> >       }
> > -
> >       /* Must set vector notifier after guest notifier has been assigned */
> >       if ((with_irqfd || k->guest_notifier_mask) && assign) {
> >           if (with_irqfd) {
> > @@ -1020,6 +1115,12 @@ static int virtio_pci_set_guest_notifiers(DeviceState *d, int nvqs, bool assign)
> >                   goto assign_error;
> >               }
> >           }
> > +        if (vdev->use_config_notifier == VIRTIO_CONFIG_SUPPORT) {
> > +            r = virtio_pci_set_config_notifier(d, assign);
> > +            if (r < 0) {
> > +                goto config_error;
> > +         }
> > +     }
> >           r = msix_set_vector_notifiers(&proxy->pci_dev,
> >                                         virtio_pci_vector_unmask,
> >                                         virtio_pci_vector_mask,
> > @@ -1028,7 +1129,6 @@ static int virtio_pci_set_guest_notifiers(DeviceState *d, int nvqs, bool assign)
> >               goto notifiers_error;
> >           }
> >       }
> > -
> >       return 0;
> >
> >   notifiers_error:
> > @@ -1036,13 +1136,16 @@ notifiers_error:
> >           assert(assign);
> >           kvm_virtio_pci_vector_release(proxy, nvqs);
> >       }
> > -
> > + config_error:
> > +    /* We get here on assignment failure. Recover by undoing for VQs 0 .. n. */
> > +        kvm_virtio_pci_vector_config_release(proxy);
> >   assign_error:
> >       /* We get here on assignment failure. Recover by undoing for VQs 0 .. n. */
> >       assert(assign);
> >       while (--n >= 0) {
> >           virtio_pci_set_guest_notifier(d, n, !assign, with_irqfd);
> >       }
> > +
> >       return r;
> >   }
> >
>



^ permalink raw reply	[flat|nested] 13+ messages in thread

* Re: [PATCH v4 1/4] virtio:add support in configure interrupt
  2021-03-24  6:30   ` Jason Wang
@ 2021-03-25  7:15     ` Cindy Lu
  2021-03-26  8:29       ` Jason Wang
  0 siblings, 1 reply; 13+ messages in thread
From: Cindy Lu @ 2021-03-25  7:15 UTC (permalink / raw)
  To: Jason Wang; +Cc: QEMU Developers, Michael Tsirkin

On Wed, Mar 24, 2021 at 2:30 PM Jason Wang <jasowang@redhat.com> wrote:
>
>
> 在 2021/3/23 上午9:56, Cindy Lu 写道:
> > Add configure notifier support in virtio and related driver
> > When peer is vhost vdpa, setup the configure interrupt function
> > vhost_net_start and release the resource when vhost_net_stop
>
>
> So this patch doesn't complie, please fix.
>
>
> >
> > Signed-off-by: Cindy Lu <lulu@redhat.com>
> > ---
> >   hw/display/vhost-user-gpu.c    | 14 +++++++----
> >   hw/net/vhost_net.c             | 16 +++++++++++--
> >   hw/net/virtio-net.c            | 24 +++++++++++++++----
> >   hw/s390x/virtio-ccw.c          |  6 ++---
> >   hw/virtio/vhost-user-fs.c      | 12 ++++++----
> >   hw/virtio/vhost-vsock-common.c | 12 ++++++----
> >   hw/virtio/vhost.c              | 44 ++++++++++++++++++++++++++++++++--
> >   hw/virtio/virtio-crypto.c      | 13 ++++++----
> >   hw/virtio/virtio.c             | 28 ++++++++++++++++++++++
> >   include/hw/virtio/vhost.h      |  4 ++++
> >   include/hw/virtio/virtio.h     | 23 ++++++++++++++++--
> >   include/net/vhost_net.h        |  3 +++
> >   12 files changed, 169 insertions(+), 30 deletions(-)
> >
> > diff --git a/hw/display/vhost-user-gpu.c b/hw/display/vhost-user-gpu.c
> > index 51f1747c4a..959ad115b6 100644
> > --- a/hw/display/vhost-user-gpu.c
> > +++ b/hw/display/vhost-user-gpu.c
> > @@ -487,18 +487,24 @@ vhost_user_gpu_set_status(VirtIODevice *vdev, uint8_t val)
> >   }
> >
> >   static bool
> > -vhost_user_gpu_guest_notifier_pending(VirtIODevice *vdev, int idx)
> > +vhost_user_gpu_guest_notifier_pending(VirtIODevice *vdev, int idx,
> > +                                            int type)
> >   {
> >       VhostUserGPU *g = VHOST_USER_GPU(vdev);
> > -
> > +    if (type != VIRTIO_VQ_VECTOR) {
> > +        return false;
> > +    }
> >       return vhost_virtqueue_pending(&g->vhost->dev, idx);
> >   }
> >
> >   static void
> > -vhost_user_gpu_guest_notifier_mask(VirtIODevice *vdev, int idx, bool mask)
> > +vhost_user_gpu_guest_notifier_mask(VirtIODevice *vdev, int idx, bool mask,
> > +                                        int type)
> >   {
> >       VhostUserGPU *g = VHOST_USER_GPU(vdev);
> > -
> > +    if (type != VIRTIO_VQ_VECTOR) {
> > +        return;
> > +    }
> >       vhost_virtqueue_mask(&g->vhost->dev, vdev, idx, mask);
> >   }
> >
> > diff --git a/hw/net/vhost_net.c b/hw/net/vhost_net.c
> > index 24d555e764..2ef8cc608e 100644
> > --- a/hw/net/vhost_net.c
> > +++ b/hw/net/vhost_net.c
> > @@ -339,7 +339,9 @@ int vhost_net_start(VirtIODevice *dev, NetClientState *ncs,
> >               dev->use_guest_notifier_mask = false;
> >           }
> >        }
> > -
> > +    if (ncs->peer && ncs->peer->info->type == NET_CLIENT_DRIVER_VHOST_VDPA) {
> > +        dev->use_config_notifier = VIRTIO_CONFIG_SUPPORT;
> > +    }
> >       r = k->set_guest_notifiers(qbus->parent, total_queues * 2, true);
> >       if (r < 0) {
> >           error_report("Error binding guest notifier: %d", -r);
> > @@ -391,7 +393,6 @@ void vhost_net_stop(VirtIODevice *dev, NetClientState *ncs,
> >       for (i = 0; i < total_queues; i++) {
> >           vhost_net_stop_one(get_vhost_net(ncs[i].peer), dev);
> >       }
> > -
> >       r = k->set_guest_notifiers(qbus->parent, total_queues * 2, false);
> >       if (r < 0) {
> >           fprintf(stderr, "vhost guest notifier cleanup failed: %d\n", r);
> > @@ -426,6 +427,17 @@ void vhost_net_virtqueue_mask(VHostNetState *net, VirtIODevice *dev,
> >       vhost_virtqueue_mask(&net->dev, dev, idx, mask);
> >   }
> >
> > +bool vhost_net_config_pending(VHostNetState *net, int idx)
> > +{
> > +    return vhost_config_pending(&net->dev, idx);
> > +}
> > +
> > +void vhost_net_config_mask(VHostNetState *net, VirtIODevice *dev,
> > +                              bool mask)
> > +{
> > +    vhost_config_mask(&net->dev, dev,  mask);
> > +}
> > +
> >   VHostNetState *get_vhost_net(NetClientState *nc)
> >   {
> >       VHostNetState *vhost_net = 0;
> > diff --git a/hw/net/virtio-net.c b/hw/net/virtio-net.c
> > index 9179013ac4..b84427fe99 100644
> > --- a/hw/net/virtio-net.c
> > +++ b/hw/net/virtio-net.c
> > @@ -3055,22 +3055,36 @@ static NetClientInfo net_virtio_info = {
> >       .announce = virtio_net_announce,
> >   };
> >
> > -static bool virtio_net_guest_notifier_pending(VirtIODevice *vdev, int idx)
> > +
> > +static bool virtio_net_guest_notifier_pending(VirtIODevice *vdev, int idx,
> > +                                int type)
> >   {
> >       VirtIONet *n = VIRTIO_NET(vdev);
> >       NetClientState *nc = qemu_get_subqueue(n->nic, vq2q(idx));
> >       assert(n->vhost_started);
> > -    return vhost_net_virtqueue_pending(get_vhost_net(nc->peer), idx);
> > +
> > +    if (type == VIRTIO_VQ_VECTOR) {
> > +        return vhost_net_virtqueue_pending(get_vhost_net(nc->peer), idx);
> > +    }
> > +    if (type == VIRTIO_CONFIG_VECTOR) {
> > +        return vhost_net_config_pending(get_vhost_net(nc->peer), idx);
> > +    }
> > +    return false;
> >   }
> >
> >   static void virtio_net_guest_notifier_mask(VirtIODevice *vdev, int idx,
> > -                                           bool mask)
> > +                                           bool mask, int type)
> >   {
> >       VirtIONet *n = VIRTIO_NET(vdev);
> >       NetClientState *nc = qemu_get_subqueue(n->nic, vq2q(idx));
> >       assert(n->vhost_started);
> > -    vhost_net_virtqueue_mask(get_vhost_net(nc->peer),
> > -                             vdev, idx, mask);
> > +
> > +    if (type == VIRTIO_VQ_VECTOR) {
> > +        vhost_net_virtqueue_mask(get_vhost_net(nc->peer), vdev, idx, mask);
> > +     }
> > +    if (type == VIRTIO_CONFIG_VECTOR) {
> > +        vhost_net_config_mask(get_vhost_net(nc->peer), vdev, mask);
> > +    }
> >   }
> >
> >   static void virtio_net_set_config_size(VirtIONet *n, uint64_t host_features)
> > diff --git a/hw/s390x/virtio-ccw.c b/hw/s390x/virtio-ccw.c
> > index 4582e94ae7..234f749548 100644
> > --- a/hw/s390x/virtio-ccw.c
> > +++ b/hw/s390x/virtio-ccw.c
> > @@ -1003,16 +1003,16 @@ static int virtio_ccw_set_guest_notifier(VirtioCcwDevice *dev, int n,
> >            * need to manually trigger any guest masking callbacks here.
> >            */
> >           if (k->guest_notifier_mask && vdev->use_guest_notifier_mask) {
> > -            k->guest_notifier_mask(vdev, n, false);
> > +            k->guest_notifier_mask(vdev, n, false, VIRTIO_VQ_VECTOR);
> >           }
> >           /* get lost events and re-inject */
> >           if (k->guest_notifier_pending &&
> > -            k->guest_notifier_pending(vdev, n)) {
> > +            k->guest_notifier_pending(vdev, n, VIRTIO_VQ_VECTOR)) {
> >               event_notifier_set(notifier);
> >           }
> >       } else {
> >           if (k->guest_notifier_mask && vdev->use_guest_notifier_mask) {
> > -            k->guest_notifier_mask(vdev, n, true);
> > +            k->guest_notifier_mask(vdev, n, true, VIRTIO_VQ_VECTOR);
> >           }
> >           if (with_irqfd) {
> >               virtio_ccw_remove_irqfd(dev, n);
> > diff --git a/hw/virtio/vhost-user-fs.c b/hw/virtio/vhost-user-fs.c
> > index 1bc5d03a00..22358767f1 100644
> > --- a/hw/virtio/vhost-user-fs.c
> > +++ b/hw/virtio/vhost-user-fs.c
> > @@ -143,17 +143,21 @@ static void vuf_handle_output(VirtIODevice *vdev, VirtQueue *vq)
> >   }
> >
> >   static void vuf_guest_notifier_mask(VirtIODevice *vdev, int idx,
> > -                                            bool mask)
> > +                                            bool mask, int type)
> >   {
> >       VHostUserFS *fs = VHOST_USER_FS(vdev);
> > -
> > +    if (type != VIRTIO_VQ_VECTOR) {
> > +        return;
> > +    }
> >       vhost_virtqueue_mask(&fs->vhost_dev, vdev, idx, mask);
> >   }
> >
> > -static bool vuf_guest_notifier_pending(VirtIODevice *vdev, int idx)
> > +static bool vuf_guest_notifier_pending(VirtIODevice *vdev, int idx, int type)
> >   {
> >       VHostUserFS *fs = VHOST_USER_FS(vdev);
> > -
> > +    if (type != VIRTIO_VQ_VECTOR) {
> > +        return false;
> > +     }
> >       return vhost_virtqueue_pending(&fs->vhost_dev, idx);
> >   }
> >
> > diff --git a/hw/virtio/vhost-vsock-common.c b/hw/virtio/vhost-vsock-common.c
> > index 5b2ebf3496..92c133c54c 100644
> > --- a/hw/virtio/vhost-vsock-common.c
> > +++ b/hw/virtio/vhost-vsock-common.c
> > @@ -97,18 +97,22 @@ static void vhost_vsock_common_handle_output(VirtIODevice *vdev, VirtQueue *vq)
> >   }
> >
> >   static void vhost_vsock_common_guest_notifier_mask(VirtIODevice *vdev, int idx,
> > -                                            bool mask)
> > +                                            bool mask, int type)
> >   {
> >       VHostVSockCommon *vvc = VHOST_VSOCK_COMMON(vdev);
> > -
> > +    if (type != VIRTIO_VQ_VECTOR) {
> > +        return;
> > +    }
> >       vhost_virtqueue_mask(&vvc->vhost_dev, vdev, idx, mask);
> >   }
> >
> >   static bool vhost_vsock_common_guest_notifier_pending(VirtIODevice *vdev,
> > -                                               int idx)
> > +                                               int idx, int type)
> >   {
> >       VHostVSockCommon *vvc = VHOST_VSOCK_COMMON(vdev);
> > -
> > +    if (type != VIRTIO_VQ_VECTOR) {
> > +        return false;
> > +    }
> >       return vhost_virtqueue_pending(&vvc->vhost_dev, idx);
> >   }
> >
> > diff --git a/hw/virtio/vhost.c b/hw/virtio/vhost.c
> > index 614ccc2bcb..02e4d37dc0 100644
> > --- a/hw/virtio/vhost.c
> > +++ b/hw/virtio/vhost.c
> > @@ -1255,8 +1255,8 @@ static int vhost_virtqueue_init(struct vhost_dev *dev,
> >       if (r < 0) {
> >           return r;
> >       }
> > -
> >       file.fd = event_notifier_get_fd(&vq->masked_notifier);
> > +
> >       r = dev->vhost_ops->vhost_set_vring_call(dev, &file);
> >       if (r) {
> >           VHOST_OPS_DEBUG("vhost_set_vring_call failed");
> > @@ -1313,6 +1313,10 @@ int vhost_dev_init(struct vhost_dev *hdev, void *opaque,
> >               goto fail;
> >           }
> >       }
> > +    r = event_notifier_init(&hdev->masked_config_notifier, 0);
> > +    if (r < 0) {
> > +        return r;
> > +    }
> >
> >       if (busyloop_timeout) {
> >           for (i = 0; i < hdev->nvqs; ++i) {
> > @@ -1405,6 +1409,8 @@ void vhost_dev_cleanup(struct vhost_dev *hdev)
> >       for (i = 0; i < hdev->nvqs; ++i) {
> >           vhost_virtqueue_cleanup(hdev->vqs + i);
> >       }
> > +    event_notifier_cleanup(&hdev->masked_config_notifier);
> > +
> >       if (hdev->mem) {
> >           /* those are only safe after successful init */
> >           memory_listener_unregister(&hdev->memory_listener);
> > @@ -1498,6 +1504,10 @@ bool vhost_virtqueue_pending(struct vhost_dev *hdev, int n)
> >       return event_notifier_test_and_clear(&vq->masked_notifier);
> >   }
> >
> > +bool vhost_config_pending(struct vhost_dev *hdev, int n)
> > +{
> > +    return event_notifier_test_and_clear(&hdev->masked_config_notifier);
> > +}
> >   /* Mask/unmask events from this vq. */
> >   void vhost_virtqueue_mask(struct vhost_dev *hdev, VirtIODevice *vdev, int n,
> >                            bool mask)
> > @@ -1523,6 +1533,31 @@ void vhost_virtqueue_mask(struct vhost_dev *hdev, VirtIODevice *vdev, int n,
> >       }
> >   }
> >
> > +/* Mask/unmask events from this config. */
> > +void vhost_config_mask(struct vhost_dev *hdev, VirtIODevice *vdev,
> > +                         bool mask)
> > +{
> > +    int fd;
> > +    int r;
> > +   EventNotifier *masked_config_notifier = &hdev->masked_config_notifier;
> > +   EventNotifier *config_notifier = &vdev->config_notifier;
> > +   if (vdev->use_config_notifier != VIRTIO_CONFIG_WORK) {
> > +        return;
> > +    }
> > +    /* should only be called after backend is connected */
> > +    assert(hdev->vhost_ops);
> > +    if (mask) {
> > +        assert(vdev->use_guest_notifier_mask);
> > +        fd = event_notifier_get_fd(masked_config_notifier);
> > +    } else {
> > +        fd = event_notifier_get_fd(config_notifier);
> > +    }
> > +   r = hdev->vhost_ops->vhost_set_config_call(hdev, &fd);
> > +    if (r < 0) {
> > +        error_report("vhost_set_config_call failed");
> > +    }
> > +}
> > +
> >   uint64_t vhost_get_features(struct vhost_dev *hdev, const int *feature_bits,
> >                               uint64_t features)
> >   {
> > @@ -1732,7 +1767,12 @@ int vhost_dev_start(struct vhost_dev *hdev, VirtIODevice *vdev)
> >               goto fail_vq;
> >           }
> >       }
> > -
> > +    if (vdev->use_config_notifier == VIRTIO_CONFIG_WORK) {
> > +        event_notifier_test_and_clear(&hdev->masked_config_notifier);
> > +        if (!vdev->use_guest_notifier_mask) {
> > +            vhost_config_mask(hdev, vdev,  false);
> > +        }
> > +    }
> >       if (hdev->log_enabled) {
> >           uint64_t log_base;
> >
> > diff --git a/hw/virtio/virtio-crypto.c b/hw/virtio/virtio-crypto.c
> > index 54f9bbb789..ab7958465c 100644
> > --- a/hw/virtio/virtio-crypto.c
> > +++ b/hw/virtio/virtio-crypto.c
> > @@ -941,23 +941,28 @@ static void virtio_crypto_set_status(VirtIODevice *vdev, uint8_t status)
> >   }
> >
> >   static void virtio_crypto_guest_notifier_mask(VirtIODevice *vdev, int idx,
> > -                                           bool mask)
> > +                                           bool mask, int type)
> >   {
> >       VirtIOCrypto *vcrypto = VIRTIO_CRYPTO(vdev);
> >       int queue = virtio_crypto_vq2q(idx);
> >
> >       assert(vcrypto->vhost_started);
> > -
> > +    if (type != VIRTIO_VQ_VECTOR) {
> > +        return;
> > +    }
> >       cryptodev_vhost_virtqueue_mask(vdev, queue, idx, mask);
> >   }
> >
> > -static bool virtio_crypto_guest_notifier_pending(VirtIODevice *vdev, int idx)
> > +static bool virtio_crypto_guest_notifier_pending(VirtIODevice *vdev, int idx,
> > +                                           int type)
> >   {
> >       VirtIOCrypto *vcrypto = VIRTIO_CRYPTO(vdev);
> >       int queue = virtio_crypto_vq2q(idx);
> >
> >       assert(vcrypto->vhost_started);
> > -
> > +    if (type != VIRTIO_VQ_VECTOR) {
> > +        return false;
> > +    }
> >       return cryptodev_vhost_virtqueue_pending(vdev, queue, idx);
> >   }
> >
> > diff --git a/hw/virtio/virtio.c b/hw/virtio/virtio.c
> > index ceb58fda6c..7d1a68c87a 100644
> > --- a/hw/virtio/virtio.c
> > +++ b/hw/virtio/virtio.c
> > @@ -3278,6 +3278,8 @@ void virtio_init(VirtIODevice *vdev, const char *name,
> >               virtio_vmstate_change, vdev);
> >       vdev->device_endian = virtio_default_endian();
> >       vdev->use_guest_notifier_mask = true;
> > +    vdev->use_config_notifier = VIRTIO_CONFIG_STATUS_UNKNOWN;
> > +
> >   }
> >
> >   /*
> > @@ -3502,6 +3504,16 @@ static void virtio_queue_guest_notifier_read(EventNotifier *n)
> >       }
> >   }
> >
> > +static void virtio_config_read(EventNotifier *n)
> > +{
> > +    VirtIODevice *vdev = container_of(n, VirtIODevice, config_notifier);
> > +    if (vdev->use_config_notifier != VIRTIO_CONFIG_WORK) {
> > +        return;
> > +    }
> > +    if (event_notifier_test_and_clear(n)) {
> > +        virtio_notify_config(vdev);
> > +    }
> > +}
> >   void virtio_queue_set_guest_notifier_fd_handler(VirtQueue *vq, bool assign,
> >                                                   bool with_irqfd)
> >   {
> > @@ -3518,6 +3530,17 @@ void virtio_queue_set_guest_notifier_fd_handler(VirtQueue *vq, bool assign,
> >       }
> >   }
> >
> > +void virtio_set_config_notifier_fd_handler(VirtIODevice *vdev, bool assign,
> > +                                                bool with_irqfd)
> > +{
> > +    if (assign && !with_irqfd) {
> > +        event_notifier_set_handler(&vdev->config_notifier,
> > +                                   virtio_config_read);
> > +    } else {
> > +       event_notifier_set_handler(&vdev->config_notifier, NULL);
> > +    }
> > +}
> > +
> >   EventNotifier *virtio_queue_get_guest_notifier(VirtQueue *vq)
> >   {
> >       return &vq->guest_notifier;
> > @@ -3591,6 +3614,11 @@ EventNotifier *virtio_queue_get_host_notifier(VirtQueue *vq)
> >       return &vq->host_notifier;
> >   }
> >
> > +EventNotifier *virtio_get_config_notifier(VirtIODevice *vdev)
> > +{
> > +    return &vdev->config_notifier;
> > +
> > +}
> >   void virtio_queue_set_host_notifier_enabled(VirtQueue *vq, bool enabled)
> >   {
> >       vq->host_notifier_enabled = enabled;
> > diff --git a/include/hw/virtio/vhost.h b/include/hw/virtio/vhost.h
> > index 4a8bc75415..75bbc1a4fa 100644
> > --- a/include/hw/virtio/vhost.h
> > +++ b/include/hw/virtio/vhost.h
> > @@ -91,6 +91,8 @@ struct vhost_dev {
> >       QLIST_HEAD(, vhost_iommu) iommu_list;
> >       IOMMUNotifier n;
> >       const VhostDevConfigOps *config_ops;
> > +   EventNotifier masked_config_notifier;
> > +
> >   };
> >
> >   struct vhost_net {
> > @@ -108,6 +110,8 @@ int vhost_dev_start(struct vhost_dev *hdev, VirtIODevice *vdev);
> >   void vhost_dev_stop(struct vhost_dev *hdev, VirtIODevice *vdev);
> >   int vhost_dev_enable_notifiers(struct vhost_dev *hdev, VirtIODevice *vdev);
> >   void vhost_dev_disable_notifiers(struct vhost_dev *hdev, VirtIODevice *vdev);
> > +bool vhost_config_pending(struct vhost_dev *hdev, int n);
> > +void vhost_config_mask(struct vhost_dev *hdev, VirtIODevice *vdev,  bool mask);
> >
> >   /* Test and clear masked event pending status.
> >    * Should be called after unmask to avoid losing events.
> > diff --git a/include/hw/virtio/virtio.h b/include/hw/virtio/virtio.h
> > index b7ece7a6a8..24e5bfae61 100644
> > --- a/include/hw/virtio/virtio.h
> > +++ b/include/hw/virtio/virtio.h
> > @@ -67,6 +67,19 @@ typedef struct VirtQueueElement
> >
> >   #define VIRTIO_NO_VECTOR 0xffff
> >
> > +enum virtio_vector_type {
> > +    VIRTIO_VQ_VECTOR,
> > +    VIRTIO_CONFIG_VECTOR,
> > +    VIRTIO_VECTOR_UNKNOWN,
> > +};
>
>
> Actually, it's the type of notifier instead of the vector? And
> VIRTIO_VECTOR_UNKNOWN is not used in this patch.
>
> So let's split the patch into three.
>
> 1) First patch to introduce the type of notifier
> 2) introduce the code to do config interrupt via guest notifier.
> 3) vhost support for config interrupt
>
>
sure , Will split this patch
> > +
> > +enum virtio_config_status {
> > +    VIRTIO_CONFIG_SUPPORT,
> > +    VIRTIO_CONFIG_WORK,
> > +    VIRTIO_CONFIG_STOP,
> > +    VIRTIO_CONFIG_STATUS_UNKNOWN,
>
>
> Any reason for this extra state? I think we can know whether the config
> interrupt is being used through a
>
> Thanks
>
The problem is I need to split the backend devices into 3 types,
1) normal device
2)vdpa support config interrupt. and the configur interrupt is active now
3)vdpa not support config interrupt.
So I  add this bit and this bit will init in vpda /vhost modules and
qemu can check this bit to know the  which behariver we will do in
virtio bus  and other modules, Maybe I need to change this bit's name
to make it more clearly

Thanks
Cindy
>
> > +};
> > +
> >   #define TYPE_VIRTIO_DEVICE "virtio-device"
> >   OBJECT_DECLARE_TYPE(VirtIODevice, VirtioDeviceClass, VIRTIO_DEVICE)
> >
> > @@ -108,6 +121,8 @@ struct VirtIODevice
> >       bool use_guest_notifier_mask;
> >       AddressSpace *dma_as;
> >       QLIST_HEAD(, VirtQueue) *vector_queues;
> > +    EventNotifier config_notifier;
> > +    enum virtio_config_status use_config_notifier;
> >   };
> >
> >   struct VirtioDeviceClass {
> > @@ -138,13 +153,13 @@ struct VirtioDeviceClass {
> >        * If backend does not support masking,
> >        * must check in frontend instead.
> >        */
> > -    bool (*guest_notifier_pending)(VirtIODevice *vdev, int n);
> > +    bool (*guest_notifier_pending)(VirtIODevice *vdev, int n, int type);
> >       /* Mask/unmask events from this vq. Any events reported
> >        * while masked will become pending.
> >        * If backend does not support masking,
> >        * must mask in frontend instead.
> >        */
> > -    void (*guest_notifier_mask)(VirtIODevice *vdev, int n, bool mask);
> > +    void (*guest_notifier_mask)(VirtIODevice *vdev, int n, bool mask, int type);
> >       int (*start_ioeventfd)(VirtIODevice *vdev);
> >       void (*stop_ioeventfd)(VirtIODevice *vdev);
> >       /* Saving and loading of a device; trying to deprecate save/load
> > @@ -310,11 +325,15 @@ uint16_t virtio_get_queue_index(VirtQueue *vq);
> >   EventNotifier *virtio_queue_get_guest_notifier(VirtQueue *vq);
> >   void virtio_queue_set_guest_notifier_fd_handler(VirtQueue *vq, bool assign,
> >                                                   bool with_irqfd);
> > +void virtio_set_config_notifier_fd_handler(VirtIODevice *vdev, bool assign,
> > +                                                bool with_irqfd);
> > +
> >   int virtio_device_start_ioeventfd(VirtIODevice *vdev);
> >   int virtio_device_grab_ioeventfd(VirtIODevice *vdev);
> >   void virtio_device_release_ioeventfd(VirtIODevice *vdev);
> >   bool virtio_device_ioeventfd_enabled(VirtIODevice *vdev);
> >   EventNotifier *virtio_queue_get_host_notifier(VirtQueue *vq);
> > +EventNotifier *virtio_get_config_notifier(VirtIODevice *vdev);
> >   void virtio_queue_set_host_notifier_enabled(VirtQueue *vq, bool enabled);
> >   void virtio_queue_host_notifier_read(EventNotifier *n);
> >   void virtio_queue_aio_set_host_notifier_handler(VirtQueue *vq, AioContext *ctx,
> > diff --git a/include/net/vhost_net.h b/include/net/vhost_net.h
> > index 172b0051d8..0d38c97c94 100644
> > --- a/include/net/vhost_net.h
> > +++ b/include/net/vhost_net.h
> > @@ -36,6 +36,9 @@ int vhost_net_set_config(struct vhost_net *net, const uint8_t *data,
> >   bool vhost_net_virtqueue_pending(VHostNetState *net, int n);
> >   void vhost_net_virtqueue_mask(VHostNetState *net, VirtIODevice *dev,
> >                                 int idx, bool mask);
> > +bool vhost_net_config_pending(VHostNetState *net, int n);
> > +void vhost_net_config_mask(VHostNetState *net, VirtIODevice *dev,
> > +                              bool mask);
> >   int vhost_net_notify_migration_done(VHostNetState *net, char* mac_addr);
> >   VHostNetState *get_vhost_net(NetClientState *nc);
> >
>



^ permalink raw reply	[flat|nested] 13+ messages in thread

* Re: [PATCH v4 2/4] vhost-vdpa: add callback function for configure interrupt
  2021-03-24  6:33   ` Jason Wang
@ 2021-03-25  7:17     ` Cindy Lu
  0 siblings, 0 replies; 13+ messages in thread
From: Cindy Lu @ 2021-03-25  7:17 UTC (permalink / raw)
  To: Jason Wang; +Cc: QEMU Developers

On Wed, Mar 24, 2021 at 2:35 PM Jason Wang <jasowang@redhat.com> wrote:
>
>
> 在 2021/3/23 上午9:56, Cindy Lu 写道:
> > Add call back function for configure interrupt.
> > Set the notifier's fd to the kernel driver when vdpa start.
> > also set -1 while vdpa stop. then the kernel will release
> > the related resource
> >
> > Signed-off-by: Cindy Lu <lulu@redhat.com>
> > ---
> >   hw/virtio/trace-events            |  2 ++
> >   hw/virtio/vhost-vdpa.c            | 40 +++++++++++++++++++++++++++++--
> >   include/hw/virtio/vhost-backend.h |  4 ++++
> >   3 files changed, 44 insertions(+), 2 deletions(-)
> >
> > diff --git a/hw/virtio/trace-events b/hw/virtio/trace-events
> > index 2060a144a2..6710835b46 100644
> > --- a/hw/virtio/trace-events
> > +++ b/hw/virtio/trace-events
> > @@ -52,6 +52,8 @@ vhost_vdpa_set_vring_call(void *dev, unsigned int index, int fd) "dev: %p index:
> >   vhost_vdpa_get_features(void *dev, uint64_t features) "dev: %p features: 0x%"PRIx64
> >   vhost_vdpa_set_owner(void *dev) "dev: %p"
> >   vhost_vdpa_vq_get_addr(void *dev, void *vq, uint64_t desc_user_addr, uint64_t avail_user_addr, uint64_t used_user_addr) "dev: %p vq: %p desc_user_addr: 0x%"PRIx64" avail_user_addr: 0x%"PRIx64" used_user_addr: 0x%"PRIx64
> > +vhost_vdpa_set_config_call(void *dev, int *fd)"dev: %p fd: %p"
> > +
> >
> >   # virtio.c
> >   virtqueue_alloc_element(void *elem, size_t sz, unsigned in_num, unsigned out_num) "elem %p size %zd in_num %u out_num %u"
> > diff --git a/hw/virtio/vhost-vdpa.c b/hw/virtio/vhost-vdpa.c
> > index 01d2101d09..bde32eefe7 100644
> > --- a/hw/virtio/vhost-vdpa.c
> > +++ b/hw/virtio/vhost-vdpa.c
> > @@ -467,20 +467,47 @@ static int vhost_vdpa_get_config(struct vhost_dev *dev, uint8_t *config,
> >       }
> >       return ret;
> >    }
> > -
> > +static void vhost_vdpa_config_notify_start(struct vhost_dev *dev,
> > +                                struct VirtIODevice *vdev, bool start)
> > +{
> > +    int fd = 0;
> > +    int r = 0;
> > +    if (!(dev->features & (0x1ULL << VIRTIO_NET_F_STATUS))) {
> > +        return;
> > +    }
> > +    if (start) {
> > +        fd = event_notifier_get_fd(&vdev->config_notifier);
> > +        r = dev->vhost_ops->vhost_set_config_call(dev, &fd);
> > +     /*set the fd call back to vdpa driver*/
> > +        if (!r) {
> > +            vdev->use_config_notifier = VIRTIO_CONFIG_WORK;
> > +            event_notifier_set(&vdev->config_notifier);
>
>
> Is this a workaround for the vdpa device without config interrupt? I
> wonder how much we could gain from this.
>
this is a bit identify if the config notifier working now, this bit to check in
virtio bus  for different behavior , I will change this bit 's name to
make it more clearly

>
> > +            info_report("vhost_vdpa_config_notify start!");
>
>
> This is a debug code I guess.
>
sure I will remove this
>
> > +      }
> > +    } else {
> > +        fd = -1;
> > +        vdev->use_config_notifier = VIRTIO_CONFIG_STOP;
>
>
> Looks like a duplicated state with vhost_dev->started?
>
>
> > +        r = dev->vhost_ops->vhost_set_config_call(dev, &fd);
> > +    }
> > +    return;
> > +}
> >   static int vhost_vdpa_dev_start(struct vhost_dev *dev, bool started)
> >   {
> >       struct vhost_vdpa *v = dev->opaque;
> >       trace_vhost_vdpa_dev_start(dev, started);
> > +    VirtIODevice *vdev = dev->vdev;
> > +
> >       if (started) {
> >           uint8_t status = 0;
> >           memory_listener_register(&v->listener, &address_space_memory);
> >           vhost_vdpa_set_vring_ready(dev);
> >           vhost_vdpa_add_status(dev, VIRTIO_CONFIG_S_DRIVER_OK);
> >           vhost_vdpa_call(dev, VHOST_VDPA_GET_STATUS, &status);
> > -
> > +        /*set the configure interrupt call back*/
> > +        vhost_vdpa_config_notify_start(dev, vdev, true);
> >           return !(status & VIRTIO_CONFIG_S_DRIVER_OK);
> >       } else {
> > +        vhost_vdpa_config_notify_start(dev, vdev, false);
> >           vhost_vdpa_reset_device(dev);
> >           vhost_vdpa_add_status(dev, VIRTIO_CONFIG_S_ACKNOWLEDGE |
> >                                      VIRTIO_CONFIG_S_DRIVER);
> > @@ -546,6 +573,14 @@ static int vhost_vdpa_set_vring_call(struct vhost_dev *dev,
> >       return vhost_vdpa_call(dev, VHOST_SET_VRING_CALL, file);
> >   }
> >
> > +static int vhost_vdpa_set_config_call(struct vhost_dev *dev,
> > +                                       int *fd)
> > +{
> > +    trace_vhost_vdpa_set_config_call(dev, fd);
> > +
> > +    return vhost_vdpa_call(dev, VHOST_VDPA_SET_CONFIG_CALL, fd);
> > +}
> > +
> >   static int vhost_vdpa_get_features(struct vhost_dev *dev,
> >                                        uint64_t *features)
> >   {
> > @@ -611,4 +646,5 @@ const VhostOps vdpa_ops = {
> >           .vhost_get_device_id = vhost_vdpa_get_device_id,
> >           .vhost_vq_get_addr = vhost_vdpa_vq_get_addr,
> >           .vhost_force_iommu = vhost_vdpa_force_iommu,
> > +        .vhost_set_config_call = vhost_vdpa_set_config_call,
> >   };
> > diff --git a/include/hw/virtio/vhost-backend.h b/include/hw/virtio/vhost-backend.h
> > index 8a6f8e2a7a..1a2fee8994 100644
> > --- a/include/hw/virtio/vhost-backend.h
> > +++ b/include/hw/virtio/vhost-backend.h
>
>
> A separated patch please.
>
> Thanks
>
sure will split this
>
> > @@ -125,6 +125,9 @@ typedef int (*vhost_get_device_id_op)(struct vhost_dev *dev, uint32_t *dev_id);
> >
> >   typedef bool (*vhost_force_iommu_op)(struct vhost_dev *dev);
> >
> > +typedef int (*vhost_set_config_call_op)(struct vhost_dev *dev,
> > +                                       int *fd);
> > +
> >   typedef struct VhostOps {
> >       VhostBackendType backend_type;
> >       vhost_backend_init vhost_backend_init;
> > @@ -170,6 +173,7 @@ typedef struct VhostOps {
> >       vhost_vq_get_addr_op  vhost_vq_get_addr;
> >       vhost_get_device_id_op vhost_get_device_id;
> >       vhost_force_iommu_op vhost_force_iommu;
> > +    vhost_set_config_call_op vhost_set_config_call;
> >   } VhostOps;
> >
> >   extern const VhostOps user_ops;
>
>



^ permalink raw reply	[flat|nested] 13+ messages in thread

* Re: [PATCH v4 1/4] virtio:add support in configure interrupt
  2021-03-25  7:15     ` Cindy Lu
@ 2021-03-26  8:29       ` Jason Wang
  2021-03-29  6:05         ` Cindy Lu
  0 siblings, 1 reply; 13+ messages in thread
From: Jason Wang @ 2021-03-26  8:29 UTC (permalink / raw)
  To: Cindy Lu; +Cc: QEMU Developers, Michael Tsirkin

[-- Attachment #1: Type: text/plain, Size: 987 bytes --]


在 2021/3/25 下午3:15, Cindy Lu 写道:
>>> +enum virtio_config_status {
>>> +    VIRTIO_CONFIG_SUPPORT,
>>> +    VIRTIO_CONFIG_WORK,
>>> +    VIRTIO_CONFIG_STOP,
>>> +    VIRTIO_CONFIG_STATUS_UNKNOWN,
>> Any reason for this extra state? I think we can know whether the config
>> interrupt is being used through a
>>
>> Thanks
>>
> The problem is I need to split the backend devices into 3 types,
> 1) normal device
> 2)vdpa support config interrupt. and the configur interrupt is active now
> 3)vdpa not support config interrupt.
> So I  add this bit and this bit will init in vpda /vhost modules and
> qemu can check this bit to know the  which behariver we will do in
> virtio bus  and other modules,


I wonder whether it's a must. We can setup guest notifiers 
unconditionally, so if it's an vhost bakcend without config interrupt 
support, such notifiers won't be used.

Thanks


>   Maybe I need to change this bit's name
> to make it more clearly
>
> Thanks
> Cindy




[-- Attachment #2: Type: text/html, Size: 1842 bytes --]

^ permalink raw reply	[flat|nested] 13+ messages in thread

* Re: [PATCH v4 1/4] virtio:add support in configure interrupt
  2021-03-26  8:29       ` Jason Wang
@ 2021-03-29  6:05         ` Cindy Lu
  0 siblings, 0 replies; 13+ messages in thread
From: Cindy Lu @ 2021-03-29  6:05 UTC (permalink / raw)
  To: Jason Wang; +Cc: QEMU Developers, Michael Tsirkin

On Fri, Mar 26, 2021 at 4:29 PM Jason Wang <jasowang@redhat.com> wrote:
>
>
> 在 2021/3/25 下午3:15, Cindy Lu 写道:
>
> +enum virtio_config_status {
> +    VIRTIO_CONFIG_SUPPORT,
> +    VIRTIO_CONFIG_WORK,
> +    VIRTIO_CONFIG_STOP,
> +    VIRTIO_CONFIG_STATUS_UNKNOWN,
>
> Any reason for this extra state? I think we can know whether the config
> interrupt is being used through a
>
> Thanks
>
> The problem is I need to split the backend devices into 3 types,
> 1) normal device
> 2)vdpa support config interrupt. and the configur interrupt is active now
> 3)vdpa not support config interrupt.
> So I  add this bit and this bit will init in vpda /vhost modules and
> qemu can check this bit to know the  which behariver we will do in
> virtio bus  and other modules,
>
>
> I wonder whether it's a must. We can setup guest notifiers unconditionally, so if it's an vhost bakcend without config interrupt support, such notifiers won't be used.
>
> Thanks
>
sure,That make sense  I will post a new version
>
>  Maybe I need to change this bit's name
> to make it more clearly
>
> Thanks
> Cindy
>
>
>



^ permalink raw reply	[flat|nested] 13+ messages in thread

end of thread, other threads:[~2021-03-29  6:07 UTC | newest]

Thread overview: 13+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2021-03-23  1:56 [PATCH v4 0/4] vhost-vdpa: add support for configure interrupt Cindy Lu
2021-03-23  1:56 ` [PATCH v4 1/4] virtio:add support in " Cindy Lu
2021-03-24  6:30   ` Jason Wang
2021-03-25  7:15     ` Cindy Lu
2021-03-26  8:29       ` Jason Wang
2021-03-29  6:05         ` Cindy Lu
2021-03-23  1:56 ` [PATCH v4 2/4] vhost-vdpa: add callback function for " Cindy Lu
2021-03-24  6:33   ` Jason Wang
2021-03-25  7:17     ` Cindy Lu
2021-03-23  1:56 ` [PATCH v4 3/4] virtio-mmio: add support " Cindy Lu
2021-03-23  1:56 ` [PATCH v4 4/4] virtio-pci: " Cindy Lu
2021-03-24  6:34   ` Jason Wang
2021-03-25  6:07     ` Cindy Lu

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).