All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH 1/3] vhost: robustify virtqueue access lock asserts
@ 2023-10-23  9:55 David Marchand
  2023-10-23  9:55 ` [PATCH 2/3] vhost: fix virtqueue access lock in datapath David Marchand
                   ` (4 more replies)
  0 siblings, 5 replies; 27+ messages in thread
From: David Marchand @ 2023-10-23  9:55 UTC (permalink / raw)
  To: dev; +Cc: Maxime Coquelin, Chenbo Xia

A simple comment in vhost_user_msg_handler() is not that robust.

Add a lock_all_qps property to message handlers so that their
implementation can add a build check and assert a vq is locked.

Signed-off-by: David Marchand <david.marchand@redhat.com>
---
 lib/vhost/vhost_user.c | 110 +++++++++++++++++++----------------------
 1 file changed, 51 insertions(+), 59 deletions(-)

diff --git a/lib/vhost/vhost_user.c b/lib/vhost/vhost_user.c
index 901a80bbaa..5bbdbd54d8 100644
--- a/lib/vhost/vhost_user.c
+++ b/lib/vhost/vhost_user.c
@@ -56,14 +56,24 @@
 #define INFLIGHT_ALIGNMENT	64
 #define INFLIGHT_VERSION	0x1
 
-typedef struct vhost_message_handler {
+typedef const struct vhost_message_handler {
 	const char *description;
 	int (*callback)(struct virtio_net **pdev, struct vhu_msg_context *ctx,
 		int main_fd);
 	bool accepts_fd;
+	bool lock_all_qps;
 } vhost_message_handler_t;
 static vhost_message_handler_t vhost_message_handlers[];
 
+/* vhost_user_msg_handler() locks all qps based on a handler's lock_all_qps.
+ * Later, a handler may need to ensure the vq has been locked (for example,
+ * when calling lock annotated helpers).
+ */
+#define VHOST_USER_ASSERT_LOCK(dev, vq, id) do { \
+	RTE_BUILD_BUG_ON(!vhost_message_handlers[id].lock_all_qps); \
+	vq_assert_lock(dev, vq); \
+} while (0)
+
 static int send_vhost_reply(struct virtio_net *dev, int sockfd, struct vhu_msg_context *ctx);
 static int read_vhost_message(struct virtio_net *dev, int sockfd, struct vhu_msg_context *ctx);
 
@@ -400,7 +410,7 @@ vhost_user_set_features(struct virtio_net **pdev,
 			cleanup_vq(vq, 1);
 			cleanup_vq_inflight(dev, vq);
 			/* vhost_user_lock_all_queue_pairs locked all qps */
-			vq_assert_lock(dev, vq);
+			VHOST_USER_ASSERT_LOCK(dev, vq, VHOST_USER_SET_FEATURES);
 			rte_rwlock_write_unlock(&vq->access_lock);
 			free_vq(dev, vq);
 		}
@@ -2224,7 +2234,7 @@ vhost_user_set_vring_enable(struct virtio_net **pdev,
 	vq = dev->virtqueue[index];
 	if (!(dev->flags & VIRTIO_DEV_VDPA_CONFIGURED)) {
 		/* vhost_user_lock_all_queue_pairs locked all qps */
-		vq_assert_lock(dev, vq);
+		VHOST_USER_ASSERT_LOCK(dev, vq, VHOST_USER_SET_VRING_ENABLE);
 		if (enable && vq->async && vq->async->pkts_inflight_n) {
 			VHOST_LOG_CONFIG(dev->ifname, ERR,
 				"failed to enable vring. Inflight packets must be completed first\n");
@@ -2829,41 +2839,43 @@ vhost_user_set_status(struct virtio_net **pdev,
 }
 
 #define VHOST_MESSAGE_HANDLERS \
-VHOST_MESSAGE_HANDLER(VHOST_USER_NONE, NULL, false) \
-VHOST_MESSAGE_HANDLER(VHOST_USER_GET_FEATURES, vhost_user_get_features, false) \
-VHOST_MESSAGE_HANDLER(VHOST_USER_SET_FEATURES, vhost_user_set_features, false) \
-VHOST_MESSAGE_HANDLER(VHOST_USER_SET_OWNER, vhost_user_set_owner, false) \
-VHOST_MESSAGE_HANDLER(VHOST_USER_RESET_OWNER, vhost_user_reset_owner, false) \
-VHOST_MESSAGE_HANDLER(VHOST_USER_SET_MEM_TABLE, vhost_user_set_mem_table, true) \
-VHOST_MESSAGE_HANDLER(VHOST_USER_SET_LOG_BASE, vhost_user_set_log_base, true) \
-VHOST_MESSAGE_HANDLER(VHOST_USER_SET_LOG_FD, vhost_user_set_log_fd, true) \
-VHOST_MESSAGE_HANDLER(VHOST_USER_SET_VRING_NUM, vhost_user_set_vring_num, false) \
-VHOST_MESSAGE_HANDLER(VHOST_USER_SET_VRING_ADDR, vhost_user_set_vring_addr, false) \
-VHOST_MESSAGE_HANDLER(VHOST_USER_SET_VRING_BASE, vhost_user_set_vring_base, false) \
-VHOST_MESSAGE_HANDLER(VHOST_USER_GET_VRING_BASE, vhost_user_get_vring_base, false) \
-VHOST_MESSAGE_HANDLER(VHOST_USER_SET_VRING_KICK, vhost_user_set_vring_kick, true) \
-VHOST_MESSAGE_HANDLER(VHOST_USER_SET_VRING_CALL, vhost_user_set_vring_call, true) \
-VHOST_MESSAGE_HANDLER(VHOST_USER_SET_VRING_ERR, vhost_user_set_vring_err, true) \
-VHOST_MESSAGE_HANDLER(VHOST_USER_GET_PROTOCOL_FEATURES, vhost_user_get_protocol_features, false) \
-VHOST_MESSAGE_HANDLER(VHOST_USER_SET_PROTOCOL_FEATURES, vhost_user_set_protocol_features, false) \
-VHOST_MESSAGE_HANDLER(VHOST_USER_GET_QUEUE_NUM, vhost_user_get_queue_num, false) \
-VHOST_MESSAGE_HANDLER(VHOST_USER_SET_VRING_ENABLE, vhost_user_set_vring_enable, false) \
-VHOST_MESSAGE_HANDLER(VHOST_USER_SEND_RARP, vhost_user_send_rarp, false) \
-VHOST_MESSAGE_HANDLER(VHOST_USER_NET_SET_MTU, vhost_user_net_set_mtu, false) \
-VHOST_MESSAGE_HANDLER(VHOST_USER_SET_BACKEND_REQ_FD, vhost_user_set_req_fd, true) \
-VHOST_MESSAGE_HANDLER(VHOST_USER_IOTLB_MSG, vhost_user_iotlb_msg, false) \
-VHOST_MESSAGE_HANDLER(VHOST_USER_GET_CONFIG, vhost_user_get_config, false) \
-VHOST_MESSAGE_HANDLER(VHOST_USER_SET_CONFIG, vhost_user_set_config, false) \
-VHOST_MESSAGE_HANDLER(VHOST_USER_POSTCOPY_ADVISE, vhost_user_set_postcopy_advise, false) \
-VHOST_MESSAGE_HANDLER(VHOST_USER_POSTCOPY_LISTEN, vhost_user_set_postcopy_listen, false) \
-VHOST_MESSAGE_HANDLER(VHOST_USER_POSTCOPY_END, vhost_user_postcopy_end, false) \
-VHOST_MESSAGE_HANDLER(VHOST_USER_GET_INFLIGHT_FD, vhost_user_get_inflight_fd, false) \
-VHOST_MESSAGE_HANDLER(VHOST_USER_SET_INFLIGHT_FD, vhost_user_set_inflight_fd, true) \
-VHOST_MESSAGE_HANDLER(VHOST_USER_SET_STATUS, vhost_user_set_status, false) \
-VHOST_MESSAGE_HANDLER(VHOST_USER_GET_STATUS, vhost_user_get_status, false)
-
-#define VHOST_MESSAGE_HANDLER(id, handler, accepts_fd) \
-	[id] = { #id, handler, accepts_fd },
+VHOST_MESSAGE_HANDLER(VHOST_USER_NONE, NULL, false, false) \
+VHOST_MESSAGE_HANDLER(VHOST_USER_GET_FEATURES, vhost_user_get_features, false, false) \
+VHOST_MESSAGE_HANDLER(VHOST_USER_SET_FEATURES, vhost_user_set_features, false, true) \
+VHOST_MESSAGE_HANDLER(VHOST_USER_SET_OWNER, vhost_user_set_owner, false, true) \
+VHOST_MESSAGE_HANDLER(VHOST_USER_RESET_OWNER, vhost_user_reset_owner, false, false) \
+VHOST_MESSAGE_HANDLER(VHOST_USER_SET_MEM_TABLE, vhost_user_set_mem_table, true, true) \
+VHOST_MESSAGE_HANDLER(VHOST_USER_SET_LOG_BASE, vhost_user_set_log_base, true, true) \
+VHOST_MESSAGE_HANDLER(VHOST_USER_SET_LOG_FD, vhost_user_set_log_fd, true, true) \
+VHOST_MESSAGE_HANDLER(VHOST_USER_SET_VRING_NUM, vhost_user_set_vring_num, false, true) \
+VHOST_MESSAGE_HANDLER(VHOST_USER_SET_VRING_ADDR, vhost_user_set_vring_addr, false, true) \
+VHOST_MESSAGE_HANDLER(VHOST_USER_SET_VRING_BASE, vhost_user_set_vring_base, false, true) \
+VHOST_MESSAGE_HANDLER(VHOST_USER_GET_VRING_BASE, vhost_user_get_vring_base, false, false) \
+VHOST_MESSAGE_HANDLER(VHOST_USER_SET_VRING_KICK, vhost_user_set_vring_kick, true, true) \
+VHOST_MESSAGE_HANDLER(VHOST_USER_SET_VRING_CALL, vhost_user_set_vring_call, true, true) \
+VHOST_MESSAGE_HANDLER(VHOST_USER_SET_VRING_ERR, vhost_user_set_vring_err, true, true) \
+VHOST_MESSAGE_HANDLER(VHOST_USER_GET_PROTOCOL_FEATURES, vhost_user_get_protocol_features, \
+	false, false) \
+VHOST_MESSAGE_HANDLER(VHOST_USER_SET_PROTOCOL_FEATURES, vhost_user_set_protocol_features, \
+	false, true) \
+VHOST_MESSAGE_HANDLER(VHOST_USER_GET_QUEUE_NUM, vhost_user_get_queue_num, false, false) \
+VHOST_MESSAGE_HANDLER(VHOST_USER_SET_VRING_ENABLE, vhost_user_set_vring_enable, false, true) \
+VHOST_MESSAGE_HANDLER(VHOST_USER_SEND_RARP, vhost_user_send_rarp, false, true) \
+VHOST_MESSAGE_HANDLER(VHOST_USER_NET_SET_MTU, vhost_user_net_set_mtu, false, true) \
+VHOST_MESSAGE_HANDLER(VHOST_USER_SET_BACKEND_REQ_FD, vhost_user_set_req_fd, true, true) \
+VHOST_MESSAGE_HANDLER(VHOST_USER_IOTLB_MSG, vhost_user_iotlb_msg, false, false) \
+VHOST_MESSAGE_HANDLER(VHOST_USER_GET_CONFIG, vhost_user_get_config, false, false) \
+VHOST_MESSAGE_HANDLER(VHOST_USER_SET_CONFIG, vhost_user_set_config, false, false) \
+VHOST_MESSAGE_HANDLER(VHOST_USER_POSTCOPY_ADVISE, vhost_user_set_postcopy_advise, false, false) \
+VHOST_MESSAGE_HANDLER(VHOST_USER_POSTCOPY_LISTEN, vhost_user_set_postcopy_listen, false, false) \
+VHOST_MESSAGE_HANDLER(VHOST_USER_POSTCOPY_END, vhost_user_postcopy_end, false, false) \
+VHOST_MESSAGE_HANDLER(VHOST_USER_GET_INFLIGHT_FD, vhost_user_get_inflight_fd, false, false) \
+VHOST_MESSAGE_HANDLER(VHOST_USER_SET_INFLIGHT_FD, vhost_user_set_inflight_fd, true, false) \
+VHOST_MESSAGE_HANDLER(VHOST_USER_SET_STATUS, vhost_user_set_status, false, false) \
+VHOST_MESSAGE_HANDLER(VHOST_USER_GET_STATUS, vhost_user_get_status, false, false)
+
+#define VHOST_MESSAGE_HANDLER(id, handler, accepts_fd, lock_all_qps) \
+	[id] = { #id, handler, accepts_fd, lock_all_qps },
 static vhost_message_handler_t vhost_message_handlers[] = {
 	VHOST_MESSAGE_HANDLERS
 };
@@ -3131,31 +3143,11 @@ vhost_user_msg_handler(int vid, int fd)
 	 * inactive, so it is safe. Otherwise taking the access_lock
 	 * would cause a dead lock.
 	 */
-	switch (request) {
-	case VHOST_USER_SET_FEATURES:
-	case VHOST_USER_SET_PROTOCOL_FEATURES:
-	case VHOST_USER_SET_OWNER:
-	case VHOST_USER_SET_MEM_TABLE:
-	case VHOST_USER_SET_LOG_BASE:
-	case VHOST_USER_SET_LOG_FD:
-	case VHOST_USER_SET_VRING_NUM:
-	case VHOST_USER_SET_VRING_ADDR:
-	case VHOST_USER_SET_VRING_BASE:
-	case VHOST_USER_SET_VRING_KICK:
-	case VHOST_USER_SET_VRING_CALL:
-	case VHOST_USER_SET_VRING_ERR:
-	case VHOST_USER_SET_VRING_ENABLE:
-	case VHOST_USER_SEND_RARP:
-	case VHOST_USER_NET_SET_MTU:
-	case VHOST_USER_SET_BACKEND_REQ_FD:
+	if (msg_handler->lock_all_qps) {
 		if (!(dev->flags & VIRTIO_DEV_VDPA_CONFIGURED)) {
 			vhost_user_lock_all_queue_pairs(dev);
 			unlock_required = 1;
 		}
-		break;
-	default:
-		break;
-
 	}
 
 	handled = false;
-- 
2.41.0


^ permalink raw reply related	[flat|nested] 27+ messages in thread

* [PATCH 2/3] vhost: fix virtqueue access lock in datapath
  2023-10-23  9:55 [PATCH 1/3] vhost: robustify virtqueue access lock asserts David Marchand
@ 2023-10-23  9:55 ` David Marchand
  2023-10-27  9:03   ` Eelco Chaudron
  2023-12-05  9:10   ` Maxime Coquelin
  2023-10-23  9:55 ` [PATCH 3/3] vhost: annotate virtqueue access checks David Marchand
                   ` (3 subsequent siblings)
  4 siblings, 2 replies; 27+ messages in thread
From: David Marchand @ 2023-10-23  9:55 UTC (permalink / raw)
  To: dev; +Cc: stable, Maxime Coquelin, Chenbo Xia, Eelco Chaudron

Now that a r/w lock is used, the access_ok field should only be updated
under a write lock.

Since the datapath code only takes a read lock on the virtqueue to check
access_ok, this lock must be released and a write lock taken before
calling vring_translate().

Fixes: 03f77d66d966 ("vhost: change virtqueue access lock to a read/write one")
Cc: stable@dpdk.org

Signed-off-by: David Marchand <david.marchand@redhat.com>
---
 lib/vhost/virtio_net.c | 60 +++++++++++++++++++++++++++++++-----------
 1 file changed, 44 insertions(+), 16 deletions(-)

diff --git a/lib/vhost/virtio_net.c b/lib/vhost/virtio_net.c
index 759a78e3e3..4116f79d4f 100644
--- a/lib/vhost/virtio_net.c
+++ b/lib/vhost/virtio_net.c
@@ -1694,6 +1694,17 @@ virtio_dev_rx_packed(struct virtio_net *dev,
 	return pkt_idx;
 }
 
+static void
+virtio_dev_vring_translate(struct virtio_net *dev, struct vhost_virtqueue *vq)
+{
+	rte_rwlock_write_lock(&vq->access_lock);
+	vhost_user_iotlb_rd_lock(vq);
+	if (!vq->access_ok)
+		vring_translate(dev, vq);
+	vhost_user_iotlb_rd_unlock(vq);
+	rte_rwlock_write_unlock(&vq->access_lock);
+}
+
 static __rte_always_inline uint32_t
 virtio_dev_rx(struct virtio_net *dev, struct vhost_virtqueue *vq,
 	struct rte_mbuf **pkts, uint32_t count)
@@ -1708,9 +1719,13 @@ virtio_dev_rx(struct virtio_net *dev, struct vhost_virtqueue *vq,
 
 	vhost_user_iotlb_rd_lock(vq);
 
-	if (unlikely(!vq->access_ok))
-		if (unlikely(vring_translate(dev, vq) < 0))
-			goto out;
+	if (unlikely(!vq->access_ok)) {
+		vhost_user_iotlb_rd_unlock(vq);
+		rte_rwlock_read_unlock(&vq->access_lock);
+
+		virtio_dev_vring_translate(dev, vq);
+		goto out_no_unlock;
+	}
 
 	count = RTE_MIN((uint32_t)MAX_PKT_BURST, count);
 	if (count == 0)
@@ -1729,6 +1744,7 @@ virtio_dev_rx(struct virtio_net *dev, struct vhost_virtqueue *vq,
 out_access_unlock:
 	rte_rwlock_read_unlock(&vq->access_lock);
 
+out_no_unlock:
 	return nb_tx;
 }
 
@@ -2523,9 +2539,13 @@ virtio_dev_rx_async_submit(struct virtio_net *dev, struct vhost_virtqueue *vq,
 
 	vhost_user_iotlb_rd_lock(vq);
 
-	if (unlikely(!vq->access_ok))
-		if (unlikely(vring_translate(dev, vq) < 0))
-			goto out;
+	if (unlikely(!vq->access_ok)) {
+		vhost_user_iotlb_rd_unlock(vq);
+		rte_rwlock_read_unlock(&vq->access_lock);
+
+		virtio_dev_vring_translate(dev, vq);
+		goto out_no_unlock;
+	}
 
 	count = RTE_MIN((uint32_t)MAX_PKT_BURST, count);
 	if (count == 0)
@@ -2546,6 +2566,7 @@ virtio_dev_rx_async_submit(struct virtio_net *dev, struct vhost_virtqueue *vq,
 out_access_unlock:
 	rte_rwlock_write_unlock(&vq->access_lock);
 
+out_no_unlock:
 	return nb_tx;
 }
 
@@ -3576,11 +3597,13 @@ rte_vhost_dequeue_burst(int vid, uint16_t queue_id,
 
 	vhost_user_iotlb_rd_lock(vq);
 
-	if (unlikely(!vq->access_ok))
-		if (unlikely(vring_translate(dev, vq) < 0)) {
-			count = 0;
-			goto out;
-		}
+	if (unlikely(!vq->access_ok)) {
+		vhost_user_iotlb_rd_unlock(vq);
+		rte_rwlock_read_unlock(&vq->access_lock);
+
+		virtio_dev_vring_translate(dev, vq);
+		goto out_no_unlock;
+	}
 
 	/*
 	 * Construct a RARP broadcast packet, and inject it to the "pkts"
@@ -3641,6 +3664,7 @@ rte_vhost_dequeue_burst(int vid, uint16_t queue_id,
 	if (unlikely(rarp_mbuf != NULL))
 		count += 1;
 
+out_no_unlock:
 	return count;
 }
 
@@ -4190,11 +4214,14 @@ rte_vhost_async_try_dequeue_burst(int vid, uint16_t queue_id,
 
 	vhost_user_iotlb_rd_lock(vq);
 
-	if (unlikely(vq->access_ok == 0))
-		if (unlikely(vring_translate(dev, vq) < 0)) {
-			count = 0;
-			goto out;
-		}
+	if (unlikely(vq->access_ok == 0)) {
+		vhost_user_iotlb_rd_unlock(vq);
+		rte_rwlock_read_unlock(&vq->access_lock);
+
+		virtio_dev_vring_translate(dev, vq);
+		count = 0;
+		goto out_no_unlock;
+	}
 
 	/*
 	 * Construct a RARP broadcast packet, and inject it to the "pkts"
@@ -4260,5 +4287,6 @@ rte_vhost_async_try_dequeue_burst(int vid, uint16_t queue_id,
 	if (unlikely(rarp_mbuf != NULL))
 		count += 1;
 
+out_no_unlock:
 	return count;
 }
-- 
2.41.0


^ permalink raw reply related	[flat|nested] 27+ messages in thread

* [PATCH 3/3] vhost: annotate virtqueue access checks
  2023-10-23  9:55 [PATCH 1/3] vhost: robustify virtqueue access lock asserts David Marchand
  2023-10-23  9:55 ` [PATCH 2/3] vhost: fix virtqueue access lock in datapath David Marchand
@ 2023-10-23  9:55 ` David Marchand
  2023-10-27  9:04   ` Eelco Chaudron
  2023-10-27  8:59 ` [PATCH 1/3] vhost: robustify virtqueue access lock asserts Eelco Chaudron
                   ` (2 subsequent siblings)
  4 siblings, 1 reply; 27+ messages in thread
From: David Marchand @ 2023-10-23  9:55 UTC (permalink / raw)
  To: dev; +Cc: Maxime Coquelin, Chenbo Xia

Modifying vq->access_ok should be done with a write lock taken.
Annotate vring_translate() and vring_invalidate() and add missing locks.

Signed-off-by: David Marchand <david.marchand@redhat.com>
---
 lib/vhost/vduse.c      |  4 ++++
 lib/vhost/vhost.h      |  7 +++++--
 lib/vhost/vhost_user.c | 10 ++++++++++
 3 files changed, 19 insertions(+), 2 deletions(-)

diff --git a/lib/vhost/vduse.c b/lib/vhost/vduse.c
index 080b58f7de..e198eeef64 100644
--- a/lib/vhost/vduse.c
+++ b/lib/vhost/vduse.c
@@ -196,6 +196,7 @@ vduse_vring_setup(struct virtio_net *dev, unsigned int index)
 				vq->size * sizeof(struct batch_copy_elem),
 				RTE_CACHE_LINE_SIZE, 0);
 
+	rte_rwlock_write_lock(&vq->access_lock);
 	vhost_user_iotlb_rd_lock(vq);
 	if (vring_translate(dev, vq))
 		VHOST_LOG_CONFIG(dev->ifname, ERR, "Failed to translate vring %d addresses\n",
@@ -206,6 +207,7 @@ vduse_vring_setup(struct virtio_net *dev, unsigned int index)
 				"Failed to disable guest notifications on vring %d\n",
 				index);
 	vhost_user_iotlb_rd_unlock(vq);
+	rte_rwlock_write_unlock(&vq->access_lock);
 
 	vq_efd.index = index;
 	vq_efd.fd = vq->kickfd;
@@ -259,7 +261,9 @@ vduse_vring_cleanup(struct virtio_net *dev, unsigned int index)
 	close(vq->kickfd);
 	vq->kickfd = VIRTIO_UNINITIALIZED_EVENTFD;
 
+	rte_rwlock_write_lock(&vq->access_lock);
 	vring_invalidate(dev, vq);
+	rte_rwlock_write_unlock(&vq->access_lock);
 
 	rte_free(vq->batch_copy_elems);
 	vq->batch_copy_elems = NULL;
diff --git a/lib/vhost/vhost.h b/lib/vhost/vhost.h
index 5fc9035a1f..70d18bdfbf 100644
--- a/lib/vhost/vhost.h
+++ b/lib/vhost/vhost.h
@@ -295,7 +295,8 @@ struct vhost_virtqueue {
 #define VIRTIO_UNINITIALIZED_EVENTFD	(-2)
 
 	bool			enabled;
-	bool			access_ok;
+	/* Protected by vq->access_lock */
+	bool			access_ok __rte_guarded_var;
 	bool			ready;
 
 	rte_rwlock_t		access_lock;
@@ -874,11 +875,13 @@ void *vhost_alloc_copy_ind_table(struct virtio_net *dev,
 			uint64_t desc_addr, uint64_t desc_len)
 	__rte_shared_locks_required(&vq->iotlb_lock);
 int vring_translate(struct virtio_net *dev, struct vhost_virtqueue *vq)
+	__rte_exclusive_locks_required(&vq->access_lock)
 	__rte_shared_locks_required(&vq->iotlb_lock);
 uint64_t translate_log_addr(struct virtio_net *dev, struct vhost_virtqueue *vq,
 		uint64_t log_addr)
 	__rte_shared_locks_required(&vq->iotlb_lock);
-void vring_invalidate(struct virtio_net *dev, struct vhost_virtqueue *vq);
+void vring_invalidate(struct virtio_net *dev, struct vhost_virtqueue *vq)
+	__rte_exclusive_locks_required(&vq->access_lock);
 
 static __rte_always_inline uint64_t
 vhost_iova_to_vva(struct virtio_net *dev, struct vhost_virtqueue *vq,
diff --git a/lib/vhost/vhost_user.c b/lib/vhost/vhost_user.c
index 5bbdbd54d8..cbe2222ef3 100644
--- a/lib/vhost/vhost_user.c
+++ b/lib/vhost/vhost_user.c
@@ -797,6 +797,8 @@ translate_ring_addresses(struct virtio_net **pdev, struct vhost_virtqueue **pvq)
 	dev = *pdev;
 	vq = *pvq;
 
+	vq_assert_lock(dev, vq);
+
 	if (vq->ring_addrs.flags & (1 << VHOST_VRING_F_LOG)) {
 		vq->log_guest_addr =
 			log_addr_to_gpa(dev, vq);
@@ -934,6 +936,9 @@ vhost_user_set_vring_addr(struct virtio_net **pdev,
 	/* addr->index refers to the queue index. The txq 1, rxq is 0. */
 	vq = dev->virtqueue[ctx->msg.payload.addr.index];
 
+	/* vhost_user_lock_all_queue_pairs locked all qps */
+	VHOST_USER_ASSERT_LOCK(dev, vq, VHOST_USER_SET_VRING_ADDR);
+
 	access_ok = vq->access_ok;
 
 	/*
@@ -1446,6 +1451,9 @@ vhost_user_set_mem_table(struct virtio_net **pdev,
 			continue;
 
 		if (vq->desc || vq->avail || vq->used) {
+			/* vhost_user_lock_all_queue_pairs locked all qps */
+			VHOST_USER_ASSERT_LOCK(dev, vq, VHOST_USER_SET_MEM_TABLE);
+
 			/*
 			 * If the memory table got updated, the ring addresses
 			 * need to be translated again as virtual addresses have
@@ -2208,7 +2216,9 @@ vhost_user_get_vring_base(struct virtio_net **pdev,
 
 	vhost_user_iotlb_flush_all(dev);
 
+	rte_rwlock_write_lock(&vq->access_lock);
 	vring_invalidate(dev, vq);
+	rte_rwlock_write_unlock(&vq->access_lock);
 
 	return RTE_VHOST_MSG_RESULT_REPLY;
 }
-- 
2.41.0


^ permalink raw reply related	[flat|nested] 27+ messages in thread

* Re: [PATCH 1/3] vhost: robustify virtqueue access lock asserts
  2023-10-23  9:55 [PATCH 1/3] vhost: robustify virtqueue access lock asserts David Marchand
  2023-10-23  9:55 ` [PATCH 2/3] vhost: fix virtqueue access lock in datapath David Marchand
  2023-10-23  9:55 ` [PATCH 3/3] vhost: annotate virtqueue access checks David Marchand
@ 2023-10-27  8:59 ` Eelco Chaudron
  2023-12-05  9:02 ` Maxime Coquelin
  2023-12-05  9:45 ` [PATCH v2 1/5] vhost: fix virtqueue access check in datapath David Marchand
  4 siblings, 0 replies; 27+ messages in thread
From: Eelco Chaudron @ 2023-10-27  8:59 UTC (permalink / raw)
  To: David Marchand; +Cc: dev, Maxime Coquelin, Chenbo Xia



On 23 Oct 2023, at 11:55, David Marchand wrote:

> A simple comment in vhost_user_msg_handler() is not that robust.
>
> Add a lock_all_qps property to message handlers so that their
> implementation can add a build check and assert a vq is locked.
>
> Signed-off-by: David Marchand <david.marchand@redhat.com>

This change looks good to me.

Acked-by: Eelco Chaudron <echaudro@redhat.com>


^ permalink raw reply	[flat|nested] 27+ messages in thread

* Re: [PATCH 2/3] vhost: fix virtqueue access lock in datapath
  2023-10-23  9:55 ` [PATCH 2/3] vhost: fix virtqueue access lock in datapath David Marchand
@ 2023-10-27  9:03   ` Eelco Chaudron
  2023-10-27  9:22     ` David Marchand
  2023-12-05  9:10   ` Maxime Coquelin
  1 sibling, 1 reply; 27+ messages in thread
From: Eelco Chaudron @ 2023-10-27  9:03 UTC (permalink / raw)
  To: David Marchand; +Cc: dev, stable, Maxime Coquelin, Chenbo Xia



On 23 Oct 2023, at 11:55, David Marchand wrote:

> Now that a r/w lock is used, the access_ok field should only be updated
> under a write lock.
>
> Since the datapath code only takes a read lock on the virtqueue to check
> access_ok, this lock must be released and a write lock taken before
> calling vring_translate().
>
> Fixes: 03f77d66d966 ("vhost: change virtqueue access lock to a read/write one")
> Cc: stable@dpdk.org
>
> Signed-off-by: David Marchand <david.marchand@redhat.com>

Only one question, but whatever the outcome is the change looks good to me.

Acked-by: Eelco Chaudron <echaudro@redhat.com>

> ---
>  lib/vhost/virtio_net.c | 60 +++++++++++++++++++++++++++++++-----------
>  1 file changed, 44 insertions(+), 16 deletions(-)
>
> diff --git a/lib/vhost/virtio_net.c b/lib/vhost/virtio_net.c
> index 759a78e3e3..4116f79d4f 100644
> --- a/lib/vhost/virtio_net.c
> +++ b/lib/vhost/virtio_net.c
> @@ -1694,6 +1694,17 @@ virtio_dev_rx_packed(struct virtio_net *dev,
>  	return pkt_idx;
>  }
>
> +static void
> +virtio_dev_vring_translate(struct virtio_net *dev, struct vhost_virtqueue *vq)
> +{

Would it be an idea to annotate this function that it needs to be called with the “read locks” (and that it will free them) to avoid the duplicate:

+		vhost_user_iotlb_rd_unlock(vq);
+		rte_rwlock_read_unlock(&vq->access_lock);

> +	rte_rwlock_write_lock(&vq->access_lock);
> +	vhost_user_iotlb_rd_lock(vq);
> +	if (!vq->access_ok)
> +		vring_translate(dev, vq);
> +	vhost_user_iotlb_rd_unlock(vq);
> +	rte_rwlock_write_unlock(&vq->access_lock);
> +}
> +
>  static __rte_always_inline uint32_t
>  virtio_dev_rx(struct virtio_net *dev, struct vhost_virtqueue *vq,
>  	struct rte_mbuf **pkts, uint32_t count)
> @@ -1708,9 +1719,13 @@ virtio_dev_rx(struct virtio_net *dev, struct vhost_virtqueue *vq,
>
>  	vhost_user_iotlb_rd_lock(vq);
>
> -	if (unlikely(!vq->access_ok))
> -		if (unlikely(vring_translate(dev, vq) < 0))
> -			goto out;
> +	if (unlikely(!vq->access_ok)) {
> +		vhost_user_iotlb_rd_unlock(vq);
> +		rte_rwlock_read_unlock(&vq->access_lock);
> +
> +		virtio_dev_vring_translate(dev, vq);
> +		goto out_no_unlock;
> +	}
>
>  	count = RTE_MIN((uint32_t)MAX_PKT_BURST, count);
>  	if (count == 0)
> @@ -1729,6 +1744,7 @@ virtio_dev_rx(struct virtio_net *dev, struct vhost_virtqueue *vq,
>  out_access_unlock:
>  	rte_rwlock_read_unlock(&vq->access_lock);
>
> +out_no_unlock:
>  	return nb_tx;
>  }
>
> @@ -2523,9 +2539,13 @@ virtio_dev_rx_async_submit(struct virtio_net *dev, struct vhost_virtqueue *vq,
>
>  	vhost_user_iotlb_rd_lock(vq);
>
> -	if (unlikely(!vq->access_ok))
> -		if (unlikely(vring_translate(dev, vq) < 0))
> -			goto out;
> +	if (unlikely(!vq->access_ok)) {
> +		vhost_user_iotlb_rd_unlock(vq);
> +		rte_rwlock_read_unlock(&vq->access_lock);
> +
> +		virtio_dev_vring_translate(dev, vq);
> +		goto out_no_unlock;
> +	}
>
>  	count = RTE_MIN((uint32_t)MAX_PKT_BURST, count);
>  	if (count == 0)
> @@ -2546,6 +2566,7 @@ virtio_dev_rx_async_submit(struct virtio_net *dev, struct vhost_virtqueue *vq,
>  out_access_unlock:
>  	rte_rwlock_write_unlock(&vq->access_lock);
>
> +out_no_unlock:
>  	return nb_tx;
>  }
>
> @@ -3576,11 +3597,13 @@ rte_vhost_dequeue_burst(int vid, uint16_t queue_id,
>
>  	vhost_user_iotlb_rd_lock(vq);
>
> -	if (unlikely(!vq->access_ok))
> -		if (unlikely(vring_translate(dev, vq) < 0)) {
> -			count = 0;
> -			goto out;
> -		}
> +	if (unlikely(!vq->access_ok)) {
> +		vhost_user_iotlb_rd_unlock(vq);
> +		rte_rwlock_read_unlock(&vq->access_lock);
> +
> +		virtio_dev_vring_translate(dev, vq);
> +		goto out_no_unlock;
> +	}
>
>  	/*
>  	 * Construct a RARP broadcast packet, and inject it to the "pkts"
> @@ -3641,6 +3664,7 @@ rte_vhost_dequeue_burst(int vid, uint16_t queue_id,
>  	if (unlikely(rarp_mbuf != NULL))
>  		count += 1;
>
> +out_no_unlock:
>  	return count;
>  }
>
> @@ -4190,11 +4214,14 @@ rte_vhost_async_try_dequeue_burst(int vid, uint16_t queue_id,
>
>  	vhost_user_iotlb_rd_lock(vq);
>
> -	if (unlikely(vq->access_ok == 0))
> -		if (unlikely(vring_translate(dev, vq) < 0)) {
> -			count = 0;
> -			goto out;
> -		}
> +	if (unlikely(vq->access_ok == 0)) {
> +		vhost_user_iotlb_rd_unlock(vq);
> +		rte_rwlock_read_unlock(&vq->access_lock);
> +
> +		virtio_dev_vring_translate(dev, vq);
> +		count = 0;
> +		goto out_no_unlock;
> +	}
>
>  	/*
>  	 * Construct a RARP broadcast packet, and inject it to the "pkts"
> @@ -4260,5 +4287,6 @@ rte_vhost_async_try_dequeue_burst(int vid, uint16_t queue_id,
>  	if (unlikely(rarp_mbuf != NULL))
>  		count += 1;
>
> +out_no_unlock:
>  	return count;
>  }
> -- 
> 2.41.0


^ permalink raw reply	[flat|nested] 27+ messages in thread

* Re: [PATCH 3/3] vhost: annotate virtqueue access checks
  2023-10-23  9:55 ` [PATCH 3/3] vhost: annotate virtqueue access checks David Marchand
@ 2023-10-27  9:04   ` Eelco Chaudron
  0 siblings, 0 replies; 27+ messages in thread
From: Eelco Chaudron @ 2023-10-27  9:04 UTC (permalink / raw)
  To: David Marchand; +Cc: dev, Maxime Coquelin, Chenbo Xia



On 23 Oct 2023, at 11:55, David Marchand wrote:

> Modifying vq->access_ok should be done with a write lock taken.
> Annotate vring_translate() and vring_invalidate() and add missing locks.
>
> Signed-off-by: David Marchand <david.marchand@redhat.com>

The changes look good to me.

Acked-by: Eelco Chaudron <echaudro@redhat.com>


^ permalink raw reply	[flat|nested] 27+ messages in thread

* Re: [PATCH 2/3] vhost: fix virtqueue access lock in datapath
  2023-10-27  9:03   ` Eelco Chaudron
@ 2023-10-27  9:22     ` David Marchand
  2023-10-27 10:11       ` Eelco Chaudron
  0 siblings, 1 reply; 27+ messages in thread
From: David Marchand @ 2023-10-27  9:22 UTC (permalink / raw)
  To: Eelco Chaudron; +Cc: dev, stable, Maxime Coquelin, Chenbo Xia

On Fri, Oct 27, 2023 at 11:05 AM Eelco Chaudron <echaudro@redhat.com> wrote:
> > diff --git a/lib/vhost/virtio_net.c b/lib/vhost/virtio_net.c
> > index 759a78e3e3..4116f79d4f 100644
> > --- a/lib/vhost/virtio_net.c
> > +++ b/lib/vhost/virtio_net.c
> > @@ -1694,6 +1694,17 @@ virtio_dev_rx_packed(struct virtio_net *dev,
> >       return pkt_idx;
> >  }
> >
> > +static void
> > +virtio_dev_vring_translate(struct virtio_net *dev, struct vhost_virtqueue *vq)
> > +{
>
> Would it be an idea to annotate this function that it needs to be called with the “read locks” (and that it will free them) to avoid the duplicate:
>
> +               vhost_user_iotlb_rd_unlock(vq);
> +               rte_rwlock_read_unlock(&vq->access_lock);

The "unlock" annotations do not express read/write concerns for locks.
So that would make the code less readable and potentially hide some issues.

I prefer to keep as is, with clear calls to rd_lock / rd_unlock in
those functions.

>
> > +     rte_rwlock_write_lock(&vq->access_lock);
> > +     vhost_user_iotlb_rd_lock(vq);
> > +     if (!vq->access_ok)
> > +             vring_translate(dev, vq);
> > +     vhost_user_iotlb_rd_unlock(vq);
> > +     rte_rwlock_write_unlock(&vq->access_lock);
> > +}
> > +

-- 
David Marchand


^ permalink raw reply	[flat|nested] 27+ messages in thread

* Re: [PATCH 2/3] vhost: fix virtqueue access lock in datapath
  2023-10-27  9:22     ` David Marchand
@ 2023-10-27 10:11       ` Eelco Chaudron
  0 siblings, 0 replies; 27+ messages in thread
From: Eelco Chaudron @ 2023-10-27 10:11 UTC (permalink / raw)
  To: David Marchand; +Cc: dev, stable, Maxime Coquelin, Chenbo Xia



On 27 Oct 2023, at 11:22, David Marchand wrote:

> On Fri, Oct 27, 2023 at 11:05 AM Eelco Chaudron <echaudro@redhat.com> wrote:
>>> diff --git a/lib/vhost/virtio_net.c b/lib/vhost/virtio_net.c
>>> index 759a78e3e3..4116f79d4f 100644
>>> --- a/lib/vhost/virtio_net.c
>>> +++ b/lib/vhost/virtio_net.c
>>> @@ -1694,6 +1694,17 @@ virtio_dev_rx_packed(struct virtio_net *dev,
>>>       return pkt_idx;
>>>  }
>>>
>>> +static void
>>> +virtio_dev_vring_translate(struct virtio_net *dev, struct vhost_virtqueue *vq)
>>> +{
>>
>> Would it be an idea to annotate this function that it needs to be called with the “read locks” (and that it will free them) to avoid the duplicate:
>>
>> +               vhost_user_iotlb_rd_unlock(vq);
>> +               rte_rwlock_read_unlock(&vq->access_lock);
>
> The "unlock" annotations do not express read/write concerns for locks.
> So that would make the code less readable and potentially hide some issues.
>
> I prefer to keep as is, with clear calls to rd_lock / rd_unlock in
> those functions.

ACK, keeping this as is fine by me.

Acked-by: Eelco Chaudron <echaudro@redhat.com>

>>> +     rte_rwlock_write_lock(&vq->access_lock);
>>> +     vhost_user_iotlb_rd_lock(vq);
>>> +     if (!vq->access_ok)
>>> +             vring_translate(dev, vq);
>>> +     vhost_user_iotlb_rd_unlock(vq);
>>> +     rte_rwlock_write_unlock(&vq->access_lock);
>>> +}
>>> +
>
> -- 
> David Marchand


^ permalink raw reply	[flat|nested] 27+ messages in thread

* Re: [PATCH 1/3] vhost: robustify virtqueue access lock asserts
  2023-10-23  9:55 [PATCH 1/3] vhost: robustify virtqueue access lock asserts David Marchand
                   ` (2 preceding siblings ...)
  2023-10-27  8:59 ` [PATCH 1/3] vhost: robustify virtqueue access lock asserts Eelco Chaudron
@ 2023-12-05  9:02 ` Maxime Coquelin
  2023-12-05  9:45 ` [PATCH v2 1/5] vhost: fix virtqueue access check in datapath David Marchand
  4 siblings, 0 replies; 27+ messages in thread
From: Maxime Coquelin @ 2023-12-05  9:02 UTC (permalink / raw)
  To: David Marchand, dev; +Cc: Chenbo Xia

s/robustify/enhance/ ?

On 10/23/23 11:55, David Marchand wrote:
> A simple comment in vhost_user_msg_handler() is not that robust.
> 
> Add a lock_all_qps property to message handlers so that their
> implementation can add a build check and assert a vq is locked.
> 
> Signed-off-by: David Marchand <david.marchand@redhat.com>
> ---
>   lib/vhost/vhost_user.c | 110 +++++++++++++++++++----------------------
>   1 file changed, 51 insertions(+), 59 deletions(-)
> 

I can reword the commit title while applying:

Reviewed-by: Maxime Coquelin <maxime.coquelin@redhat.com>

Thanks,
Maxime


^ permalink raw reply	[flat|nested] 27+ messages in thread

* Re: [PATCH 2/3] vhost: fix virtqueue access lock in datapath
  2023-10-23  9:55 ` [PATCH 2/3] vhost: fix virtqueue access lock in datapath David Marchand
  2023-10-27  9:03   ` Eelco Chaudron
@ 2023-12-05  9:10   ` Maxime Coquelin
  1 sibling, 0 replies; 27+ messages in thread
From: Maxime Coquelin @ 2023-12-05  9:10 UTC (permalink / raw)
  To: David Marchand, dev; +Cc: stable, Chenbo Xia, Eelco Chaudron



On 10/23/23 11:55, David Marchand wrote:
> Now that a r/w lock is used, the access_ok field should only be updated
> under a write lock.
> 
> Since the datapath code only takes a read lock on the virtqueue to check
> access_ok, this lock must be released and a write lock taken before
> calling vring_translate().
> 
> Fixes: 03f77d66d966 ("vhost: change virtqueue access lock to a read/write one")
> Cc: stable@dpdk.org
> 
> Signed-off-by: David Marchand <david.marchand@redhat.com>
> ---
>   lib/vhost/virtio_net.c | 60 +++++++++++++++++++++++++++++++-----------
>   1 file changed, 44 insertions(+), 16 deletions(-)
> 

Reviewed-by: Maxime Coquelin <maxime.coquelin@redhat.com>


^ permalink raw reply	[flat|nested] 27+ messages in thread

* [PATCH v2 1/5] vhost: fix virtqueue access check in datapath
  2023-10-23  9:55 [PATCH 1/3] vhost: robustify virtqueue access lock asserts David Marchand
                   ` (3 preceding siblings ...)
  2023-12-05  9:02 ` Maxime Coquelin
@ 2023-12-05  9:45 ` David Marchand
  2023-12-05  9:45   ` [PATCH v2 2/5] vhost: fix virtqueue access check in VDUSE setup David Marchand
                     ` (4 more replies)
  4 siblings, 5 replies; 27+ messages in thread
From: David Marchand @ 2023-12-05  9:45 UTC (permalink / raw)
  To: dev; +Cc: stable, Eelco Chaudron, Maxime Coquelin, Chenbo Xia

Now that a r/w lock is used, the access_ok field should only be updated
under a write lock.

Since the datapath code only takes a read lock on the virtqueue to check
access_ok, this lock must be released and a write lock taken before
calling vring_translate().

Fixes: 03f77d66d966 ("vhost: change virtqueue access lock to a read/write one")
Cc: stable@dpdk.org

Signed-off-by: David Marchand <david.marchand@redhat.com>
Acked-by: Eelco Chaudron <echaudro@redhat.com>
Reviewed-by: Maxime Coquelin <maxime.coquelin@redhat.com>
---
 lib/vhost/virtio_net.c | 60 +++++++++++++++++++++++++++++++-----------
 1 file changed, 44 insertions(+), 16 deletions(-)

diff --git a/lib/vhost/virtio_net.c b/lib/vhost/virtio_net.c
index 8af20f1487..d00f4b03aa 100644
--- a/lib/vhost/virtio_net.c
+++ b/lib/vhost/virtio_net.c
@@ -1696,6 +1696,17 @@ virtio_dev_rx_packed(struct virtio_net *dev,
 	return pkt_idx;
 }
 
+static void
+virtio_dev_vring_translate(struct virtio_net *dev, struct vhost_virtqueue *vq)
+{
+	rte_rwlock_write_lock(&vq->access_lock);
+	vhost_user_iotlb_rd_lock(vq);
+	if (!vq->access_ok)
+		vring_translate(dev, vq);
+	vhost_user_iotlb_rd_unlock(vq);
+	rte_rwlock_write_unlock(&vq->access_lock);
+}
+
 static __rte_always_inline uint32_t
 virtio_dev_rx(struct virtio_net *dev, struct vhost_virtqueue *vq,
 	struct rte_mbuf **pkts, uint32_t count)
@@ -1710,9 +1721,13 @@ virtio_dev_rx(struct virtio_net *dev, struct vhost_virtqueue *vq,
 
 	vhost_user_iotlb_rd_lock(vq);
 
-	if (unlikely(!vq->access_ok))
-		if (unlikely(vring_translate(dev, vq) < 0))
-			goto out;
+	if (unlikely(!vq->access_ok)) {
+		vhost_user_iotlb_rd_unlock(vq);
+		rte_rwlock_read_unlock(&vq->access_lock);
+
+		virtio_dev_vring_translate(dev, vq);
+		goto out_no_unlock;
+	}
 
 	count = RTE_MIN((uint32_t)MAX_PKT_BURST, count);
 	if (count == 0)
@@ -1731,6 +1746,7 @@ virtio_dev_rx(struct virtio_net *dev, struct vhost_virtqueue *vq,
 out_access_unlock:
 	rte_rwlock_read_unlock(&vq->access_lock);
 
+out_no_unlock:
 	return nb_tx;
 }
 
@@ -2528,9 +2544,13 @@ virtio_dev_rx_async_submit(struct virtio_net *dev, struct vhost_virtqueue *vq,
 
 	vhost_user_iotlb_rd_lock(vq);
 
-	if (unlikely(!vq->access_ok))
-		if (unlikely(vring_translate(dev, vq) < 0))
-			goto out;
+	if (unlikely(!vq->access_ok)) {
+		vhost_user_iotlb_rd_unlock(vq);
+		rte_rwlock_read_unlock(&vq->access_lock);
+
+		virtio_dev_vring_translate(dev, vq);
+		goto out_no_unlock;
+	}
 
 	count = RTE_MIN((uint32_t)MAX_PKT_BURST, count);
 	if (count == 0)
@@ -2551,6 +2571,7 @@ virtio_dev_rx_async_submit(struct virtio_net *dev, struct vhost_virtqueue *vq,
 out_access_unlock:
 	rte_rwlock_write_unlock(&vq->access_lock);
 
+out_no_unlock:
 	return nb_tx;
 }
 
@@ -3581,11 +3602,13 @@ rte_vhost_dequeue_burst(int vid, uint16_t queue_id,
 
 	vhost_user_iotlb_rd_lock(vq);
 
-	if (unlikely(!vq->access_ok))
-		if (unlikely(vring_translate(dev, vq) < 0)) {
-			count = 0;
-			goto out;
-		}
+	if (unlikely(!vq->access_ok)) {
+		vhost_user_iotlb_rd_unlock(vq);
+		rte_rwlock_read_unlock(&vq->access_lock);
+
+		virtio_dev_vring_translate(dev, vq);
+		goto out_no_unlock;
+	}
 
 	/*
 	 * Construct a RARP broadcast packet, and inject it to the "pkts"
@@ -3646,6 +3669,7 @@ rte_vhost_dequeue_burst(int vid, uint16_t queue_id,
 	if (unlikely(rarp_mbuf != NULL))
 		count += 1;
 
+out_no_unlock:
 	return count;
 }
 
@@ -4196,11 +4220,14 @@ rte_vhost_async_try_dequeue_burst(int vid, uint16_t queue_id,
 
 	vhost_user_iotlb_rd_lock(vq);
 
-	if (unlikely(vq->access_ok == 0))
-		if (unlikely(vring_translate(dev, vq) < 0)) {
-			count = 0;
-			goto out;
-		}
+	if (unlikely(vq->access_ok == 0)) {
+		vhost_user_iotlb_rd_unlock(vq);
+		rte_rwlock_read_unlock(&vq->access_lock);
+
+		virtio_dev_vring_translate(dev, vq);
+		count = 0;
+		goto out_no_unlock;
+	}
 
 	/*
 	 * Construct a RARP broadcast packet, and inject it to the "pkts"
@@ -4266,5 +4293,6 @@ rte_vhost_async_try_dequeue_burst(int vid, uint16_t queue_id,
 	if (unlikely(rarp_mbuf != NULL))
 		count += 1;
 
+out_no_unlock:
 	return count;
 }
-- 
2.42.0


^ permalink raw reply related	[flat|nested] 27+ messages in thread

* [PATCH v2 2/5] vhost: fix virtqueue access check in VDUSE setup
  2023-12-05  9:45 ` [PATCH v2 1/5] vhost: fix virtqueue access check in datapath David Marchand
@ 2023-12-05  9:45   ` David Marchand
  2023-12-05  9:57     ` Maxime Coquelin
  2023-12-05  9:45   ` [PATCH v2 3/5] vhost: fix virtqueue access check in vhost-user setup David Marchand
                     ` (3 subsequent siblings)
  4 siblings, 1 reply; 27+ messages in thread
From: David Marchand @ 2023-12-05  9:45 UTC (permalink / raw)
  To: dev; +Cc: stable, Eelco Chaudron, Maxime Coquelin, Chenbo Xia

vring_translate and vring_invalidate change the vq access_ok field.
The access_ok field should only be updated under a (write) lock.

Fixes: a9120db8b98b ("vhost: add VDUSE device startup")
Fixes: ad67c65efda1 ("vhost: add VDUSE device stop")
Cc: stable@dpdk.org

Signed-off-by: David Marchand <david.marchand@redhat.com>
Acked-by: Eelco Chaudron <echaudro@redhat.com>
---
Changes since v1:
- moved fix out of patch 3,

---
 lib/vhost/vduse.c | 4 ++++
 1 file changed, 4 insertions(+)

diff --git a/lib/vhost/vduse.c b/lib/vhost/vduse.c
index 080b58f7de..e198eeef64 100644
--- a/lib/vhost/vduse.c
+++ b/lib/vhost/vduse.c
@@ -196,6 +196,7 @@ vduse_vring_setup(struct virtio_net *dev, unsigned int index)
 				vq->size * sizeof(struct batch_copy_elem),
 				RTE_CACHE_LINE_SIZE, 0);
 
+	rte_rwlock_write_lock(&vq->access_lock);
 	vhost_user_iotlb_rd_lock(vq);
 	if (vring_translate(dev, vq))
 		VHOST_LOG_CONFIG(dev->ifname, ERR, "Failed to translate vring %d addresses\n",
@@ -206,6 +207,7 @@ vduse_vring_setup(struct virtio_net *dev, unsigned int index)
 				"Failed to disable guest notifications on vring %d\n",
 				index);
 	vhost_user_iotlb_rd_unlock(vq);
+	rte_rwlock_write_unlock(&vq->access_lock);
 
 	vq_efd.index = index;
 	vq_efd.fd = vq->kickfd;
@@ -259,7 +261,9 @@ vduse_vring_cleanup(struct virtio_net *dev, unsigned int index)
 	close(vq->kickfd);
 	vq->kickfd = VIRTIO_UNINITIALIZED_EVENTFD;
 
+	rte_rwlock_write_lock(&vq->access_lock);
 	vring_invalidate(dev, vq);
+	rte_rwlock_write_unlock(&vq->access_lock);
 
 	rte_free(vq->batch_copy_elems);
 	vq->batch_copy_elems = NULL;
-- 
2.42.0


^ permalink raw reply related	[flat|nested] 27+ messages in thread

* [PATCH v2 3/5] vhost: fix virtqueue access check in vhost-user setup
  2023-12-05  9:45 ` [PATCH v2 1/5] vhost: fix virtqueue access check in datapath David Marchand
  2023-12-05  9:45   ` [PATCH v2 2/5] vhost: fix virtqueue access check in VDUSE setup David Marchand
@ 2023-12-05  9:45   ` David Marchand
  2023-12-05  9:59     ` Maxime Coquelin
  2023-12-05  9:45   ` [PATCH v2 4/5] vhost: annotate virtqueue access checks David Marchand
                     ` (2 subsequent siblings)
  4 siblings, 1 reply; 27+ messages in thread
From: David Marchand @ 2023-12-05  9:45 UTC (permalink / raw)
  To: dev; +Cc: stable, Eelco Chaudron, Maxime Coquelin, Chenbo Xia, Tiwei Bie

Calling vring_invalidate must be done with a (write) lock taken on the
virtqueue.

Fixes: 72d002b3ebda ("vhost: fix vring address handling during live migration")
Cc: stable@dpdk.org

Signed-off-by: David Marchand <david.marchand@redhat.com>
Acked-by: Eelco Chaudron <echaudro@redhat.com>
---
Changes since v1:
- moved fix out of patch 3,

---
 lib/vhost/vhost_user.c | 2 ++
 1 file changed, 2 insertions(+)

diff --git a/lib/vhost/vhost_user.c b/lib/vhost/vhost_user.c
index e36312181a..a323ce5fbf 100644
--- a/lib/vhost/vhost_user.c
+++ b/lib/vhost/vhost_user.c
@@ -2198,7 +2198,9 @@ vhost_user_get_vring_base(struct virtio_net **pdev,
 
 	vhost_user_iotlb_flush_all(dev);
 
+	rte_rwlock_write_lock(&vq->access_lock);
 	vring_invalidate(dev, vq);
+	rte_rwlock_write_unlock(&vq->access_lock);
 
 	return RTE_VHOST_MSG_RESULT_REPLY;
 }
-- 
2.42.0


^ permalink raw reply related	[flat|nested] 27+ messages in thread

* [PATCH v2 4/5] vhost: annotate virtqueue access checks
  2023-12-05  9:45 ` [PATCH v2 1/5] vhost: fix virtqueue access check in datapath David Marchand
  2023-12-05  9:45   ` [PATCH v2 2/5] vhost: fix virtqueue access check in VDUSE setup David Marchand
  2023-12-05  9:45   ` [PATCH v2 3/5] vhost: fix virtqueue access check in vhost-user setup David Marchand
@ 2023-12-05  9:45   ` David Marchand
  2023-12-05 11:04     ` Maxime Coquelin
  2023-12-05  9:45   ` [PATCH v2 5/5] vhost: enhance virtqueue access lock asserts David Marchand
  2023-12-12 11:37   ` [PATCH v2 1/5] vhost: fix virtqueue access check in datapath Maxime Coquelin
  4 siblings, 1 reply; 27+ messages in thread
From: David Marchand @ 2023-12-05  9:45 UTC (permalink / raw)
  To: dev; +Cc: Eelco Chaudron, Maxime Coquelin, Chenbo Xia

Modifying vq->access_ok should be done with a write lock taken.
Annotate vring_translate() and vring_invalidate().

Signed-off-by: David Marchand <david.marchand@redhat.com>
Acked-by: Eelco Chaudron <echaudro@redhat.com>
---
Changes since v1:
- moved fixes in separate patches,

---
 lib/vhost/vhost.h      | 7 +++++--
 lib/vhost/vhost_user.c | 8 ++++++++
 2 files changed, 13 insertions(+), 2 deletions(-)

diff --git a/lib/vhost/vhost.h b/lib/vhost/vhost.h
index f8624fba3d..6767246656 100644
--- a/lib/vhost/vhost.h
+++ b/lib/vhost/vhost.h
@@ -295,7 +295,8 @@ struct vhost_virtqueue {
 #define VIRTIO_UNINITIALIZED_EVENTFD	(-2)
 
 	bool			enabled;
-	bool			access_ok;
+	/* Protected by vq->access_lock */
+	bool			access_ok __rte_guarded_var;
 	bool			ready;
 
 	rte_rwlock_t		access_lock;
@@ -875,11 +876,13 @@ void *vhost_alloc_copy_ind_table(struct virtio_net *dev,
 			uint64_t desc_addr, uint64_t desc_len)
 	__rte_shared_locks_required(&vq->iotlb_lock);
 int vring_translate(struct virtio_net *dev, struct vhost_virtqueue *vq)
+	__rte_exclusive_locks_required(&vq->access_lock)
 	__rte_shared_locks_required(&vq->iotlb_lock);
 uint64_t translate_log_addr(struct virtio_net *dev, struct vhost_virtqueue *vq,
 		uint64_t log_addr)
 	__rte_shared_locks_required(&vq->iotlb_lock);
-void vring_invalidate(struct virtio_net *dev, struct vhost_virtqueue *vq);
+void vring_invalidate(struct virtio_net *dev, struct vhost_virtqueue *vq)
+	__rte_exclusive_locks_required(&vq->access_lock);
 
 static __rte_always_inline uint64_t
 vhost_iova_to_vva(struct virtio_net *dev, struct vhost_virtqueue *vq,
diff --git a/lib/vhost/vhost_user.c b/lib/vhost/vhost_user.c
index a323ce5fbf..651ea5854b 100644
--- a/lib/vhost/vhost_user.c
+++ b/lib/vhost/vhost_user.c
@@ -787,6 +787,8 @@ translate_ring_addresses(struct virtio_net **pdev, struct vhost_virtqueue **pvq)
 	dev = *pdev;
 	vq = *pvq;
 
+	vq_assert_lock(dev, vq);
+
 	if (vq->ring_addrs.flags & (1 << VHOST_VRING_F_LOG)) {
 		vq->log_guest_addr =
 			log_addr_to_gpa(dev, vq);
@@ -924,6 +926,9 @@ vhost_user_set_vring_addr(struct virtio_net **pdev,
 	/* addr->index refers to the queue index. The txq 1, rxq is 0. */
 	vq = dev->virtqueue[ctx->msg.payload.addr.index];
 
+	/* vhost_user_lock_all_queue_pairs locked all qps */
+	vq_assert_lock(dev, vq);
+
 	access_ok = vq->access_ok;
 
 	/*
@@ -1436,6 +1441,9 @@ vhost_user_set_mem_table(struct virtio_net **pdev,
 			continue;
 
 		if (vq->desc || vq->avail || vq->used) {
+			/* vhost_user_lock_all_queue_pairs locked all qps */
+			vq_assert_lock(dev, vq);
+
 			/*
 			 * If the memory table got updated, the ring addresses
 			 * need to be translated again as virtual addresses have
-- 
2.42.0


^ permalink raw reply related	[flat|nested] 27+ messages in thread

* [PATCH v2 5/5] vhost: enhance virtqueue access lock asserts
  2023-12-05  9:45 ` [PATCH v2 1/5] vhost: fix virtqueue access check in datapath David Marchand
                     ` (2 preceding siblings ...)
  2023-12-05  9:45   ` [PATCH v2 4/5] vhost: annotate virtqueue access checks David Marchand
@ 2023-12-05  9:45   ` David Marchand
  2024-02-19 10:52     ` Thomas Monjalon
                       ` (2 more replies)
  2023-12-12 11:37   ` [PATCH v2 1/5] vhost: fix virtqueue access check in datapath Maxime Coquelin
  4 siblings, 3 replies; 27+ messages in thread
From: David Marchand @ 2023-12-05  9:45 UTC (permalink / raw)
  To: dev; +Cc: Eelco Chaudron, Maxime Coquelin, Chenbo Xia

A simple comment in vhost_user_msg_handler() is not that robust.

Add a lock_all_qps property to message handlers so that their
implementation can add a build check and assert a vq is locked.

Signed-off-by: David Marchand <david.marchand@redhat.com>
Acked-by: Eelco Chaudron <echaudro@redhat.com>
Reviewed-by: Maxime Coquelin <maxime.coquelin@redhat.com>
---
Changes since v1:
- moved this patch as the last of the series,

---
 lib/vhost/vhost_user.c | 114 +++++++++++++++++++----------------------
 1 file changed, 53 insertions(+), 61 deletions(-)

diff --git a/lib/vhost/vhost_user.c b/lib/vhost/vhost_user.c
index 651ea5854b..e8020ccfd3 100644
--- a/lib/vhost/vhost_user.c
+++ b/lib/vhost/vhost_user.c
@@ -56,14 +56,24 @@
 #define INFLIGHT_ALIGNMENT	64
 #define INFLIGHT_VERSION	0x1
 
-typedef struct vhost_message_handler {
+typedef const struct vhost_message_handler {
 	const char *description;
 	int (*callback)(struct virtio_net **pdev, struct vhu_msg_context *ctx,
 		int main_fd);
 	bool accepts_fd;
+	bool lock_all_qps;
 } vhost_message_handler_t;
 static vhost_message_handler_t vhost_message_handlers[];
 
+/* vhost_user_msg_handler() locks all qps based on a handler's lock_all_qps.
+ * Later, a handler may need to ensure the vq has been locked (for example,
+ * when calling lock annotated helpers).
+ */
+#define VHOST_USER_ASSERT_LOCK(dev, vq, id) do { \
+	RTE_BUILD_BUG_ON(!vhost_message_handlers[id].lock_all_qps); \
+	vq_assert_lock(dev, vq); \
+} while (0)
+
 static int send_vhost_reply(struct virtio_net *dev, int sockfd, struct vhu_msg_context *ctx);
 static int read_vhost_message(struct virtio_net *dev, int sockfd, struct vhu_msg_context *ctx);
 
@@ -400,7 +410,7 @@ vhost_user_set_features(struct virtio_net **pdev,
 			cleanup_vq(vq, 1);
 			cleanup_vq_inflight(dev, vq);
 			/* vhost_user_lock_all_queue_pairs locked all qps */
-			vq_assert_lock(dev, vq);
+			VHOST_USER_ASSERT_LOCK(dev, vq, VHOST_USER_SET_FEATURES);
 			rte_rwlock_write_unlock(&vq->access_lock);
 			free_vq(dev, vq);
 		}
@@ -927,7 +937,7 @@ vhost_user_set_vring_addr(struct virtio_net **pdev,
 	vq = dev->virtqueue[ctx->msg.payload.addr.index];
 
 	/* vhost_user_lock_all_queue_pairs locked all qps */
-	vq_assert_lock(dev, vq);
+	VHOST_USER_ASSERT_LOCK(dev, vq, VHOST_USER_SET_VRING_ADDR);
 
 	access_ok = vq->access_ok;
 
@@ -1442,7 +1452,7 @@ vhost_user_set_mem_table(struct virtio_net **pdev,
 
 		if (vq->desc || vq->avail || vq->used) {
 			/* vhost_user_lock_all_queue_pairs locked all qps */
-			vq_assert_lock(dev, vq);
+			VHOST_USER_ASSERT_LOCK(dev, vq, VHOST_USER_SET_MEM_TABLE);
 
 			/*
 			 * If the memory table got updated, the ring addresses
@@ -2234,7 +2244,7 @@ vhost_user_set_vring_enable(struct virtio_net **pdev,
 	vq = dev->virtqueue[index];
 	if (!(dev->flags & VIRTIO_DEV_VDPA_CONFIGURED)) {
 		/* vhost_user_lock_all_queue_pairs locked all qps */
-		vq_assert_lock(dev, vq);
+		VHOST_USER_ASSERT_LOCK(dev, vq, VHOST_USER_SET_VRING_ENABLE);
 		if (enable && vq->async && vq->async->pkts_inflight_n) {
 			VHOST_LOG_CONFIG(dev->ifname, ERR,
 				"failed to enable vring. Inflight packets must be completed first\n");
@@ -2839,41 +2849,43 @@ vhost_user_set_status(struct virtio_net **pdev,
 }
 
 #define VHOST_MESSAGE_HANDLERS \
-VHOST_MESSAGE_HANDLER(VHOST_USER_NONE, NULL, false) \
-VHOST_MESSAGE_HANDLER(VHOST_USER_GET_FEATURES, vhost_user_get_features, false) \
-VHOST_MESSAGE_HANDLER(VHOST_USER_SET_FEATURES, vhost_user_set_features, false) \
-VHOST_MESSAGE_HANDLER(VHOST_USER_SET_OWNER, vhost_user_set_owner, false) \
-VHOST_MESSAGE_HANDLER(VHOST_USER_RESET_OWNER, vhost_user_reset_owner, false) \
-VHOST_MESSAGE_HANDLER(VHOST_USER_SET_MEM_TABLE, vhost_user_set_mem_table, true) \
-VHOST_MESSAGE_HANDLER(VHOST_USER_SET_LOG_BASE, vhost_user_set_log_base, true) \
-VHOST_MESSAGE_HANDLER(VHOST_USER_SET_LOG_FD, vhost_user_set_log_fd, true) \
-VHOST_MESSAGE_HANDLER(VHOST_USER_SET_VRING_NUM, vhost_user_set_vring_num, false) \
-VHOST_MESSAGE_HANDLER(VHOST_USER_SET_VRING_ADDR, vhost_user_set_vring_addr, false) \
-VHOST_MESSAGE_HANDLER(VHOST_USER_SET_VRING_BASE, vhost_user_set_vring_base, false) \
-VHOST_MESSAGE_HANDLER(VHOST_USER_GET_VRING_BASE, vhost_user_get_vring_base, false) \
-VHOST_MESSAGE_HANDLER(VHOST_USER_SET_VRING_KICK, vhost_user_set_vring_kick, true) \
-VHOST_MESSAGE_HANDLER(VHOST_USER_SET_VRING_CALL, vhost_user_set_vring_call, true) \
-VHOST_MESSAGE_HANDLER(VHOST_USER_SET_VRING_ERR, vhost_user_set_vring_err, true) \
-VHOST_MESSAGE_HANDLER(VHOST_USER_GET_PROTOCOL_FEATURES, vhost_user_get_protocol_features, false) \
-VHOST_MESSAGE_HANDLER(VHOST_USER_SET_PROTOCOL_FEATURES, vhost_user_set_protocol_features, false) \
-VHOST_MESSAGE_HANDLER(VHOST_USER_GET_QUEUE_NUM, vhost_user_get_queue_num, false) \
-VHOST_MESSAGE_HANDLER(VHOST_USER_SET_VRING_ENABLE, vhost_user_set_vring_enable, false) \
-VHOST_MESSAGE_HANDLER(VHOST_USER_SEND_RARP, vhost_user_send_rarp, false) \
-VHOST_MESSAGE_HANDLER(VHOST_USER_NET_SET_MTU, vhost_user_net_set_mtu, false) \
-VHOST_MESSAGE_HANDLER(VHOST_USER_SET_BACKEND_REQ_FD, vhost_user_set_req_fd, true) \
-VHOST_MESSAGE_HANDLER(VHOST_USER_IOTLB_MSG, vhost_user_iotlb_msg, false) \
-VHOST_MESSAGE_HANDLER(VHOST_USER_GET_CONFIG, vhost_user_get_config, false) \
-VHOST_MESSAGE_HANDLER(VHOST_USER_SET_CONFIG, vhost_user_set_config, false) \
-VHOST_MESSAGE_HANDLER(VHOST_USER_POSTCOPY_ADVISE, vhost_user_set_postcopy_advise, false) \
-VHOST_MESSAGE_HANDLER(VHOST_USER_POSTCOPY_LISTEN, vhost_user_set_postcopy_listen, false) \
-VHOST_MESSAGE_HANDLER(VHOST_USER_POSTCOPY_END, vhost_user_postcopy_end, false) \
-VHOST_MESSAGE_HANDLER(VHOST_USER_GET_INFLIGHT_FD, vhost_user_get_inflight_fd, false) \
-VHOST_MESSAGE_HANDLER(VHOST_USER_SET_INFLIGHT_FD, vhost_user_set_inflight_fd, true) \
-VHOST_MESSAGE_HANDLER(VHOST_USER_SET_STATUS, vhost_user_set_status, false) \
-VHOST_MESSAGE_HANDLER(VHOST_USER_GET_STATUS, vhost_user_get_status, false)
-
-#define VHOST_MESSAGE_HANDLER(id, handler, accepts_fd) \
-	[id] = { #id, handler, accepts_fd },
+VHOST_MESSAGE_HANDLER(VHOST_USER_NONE, NULL, false, false) \
+VHOST_MESSAGE_HANDLER(VHOST_USER_GET_FEATURES, vhost_user_get_features, false, false) \
+VHOST_MESSAGE_HANDLER(VHOST_USER_SET_FEATURES, vhost_user_set_features, false, true) \
+VHOST_MESSAGE_HANDLER(VHOST_USER_SET_OWNER, vhost_user_set_owner, false, true) \
+VHOST_MESSAGE_HANDLER(VHOST_USER_RESET_OWNER, vhost_user_reset_owner, false, false) \
+VHOST_MESSAGE_HANDLER(VHOST_USER_SET_MEM_TABLE, vhost_user_set_mem_table, true, true) \
+VHOST_MESSAGE_HANDLER(VHOST_USER_SET_LOG_BASE, vhost_user_set_log_base, true, true) \
+VHOST_MESSAGE_HANDLER(VHOST_USER_SET_LOG_FD, vhost_user_set_log_fd, true, true) \
+VHOST_MESSAGE_HANDLER(VHOST_USER_SET_VRING_NUM, vhost_user_set_vring_num, false, true) \
+VHOST_MESSAGE_HANDLER(VHOST_USER_SET_VRING_ADDR, vhost_user_set_vring_addr, false, true) \
+VHOST_MESSAGE_HANDLER(VHOST_USER_SET_VRING_BASE, vhost_user_set_vring_base, false, true) \
+VHOST_MESSAGE_HANDLER(VHOST_USER_GET_VRING_BASE, vhost_user_get_vring_base, false, false) \
+VHOST_MESSAGE_HANDLER(VHOST_USER_SET_VRING_KICK, vhost_user_set_vring_kick, true, true) \
+VHOST_MESSAGE_HANDLER(VHOST_USER_SET_VRING_CALL, vhost_user_set_vring_call, true, true) \
+VHOST_MESSAGE_HANDLER(VHOST_USER_SET_VRING_ERR, vhost_user_set_vring_err, true, true) \
+VHOST_MESSAGE_HANDLER(VHOST_USER_GET_PROTOCOL_FEATURES, vhost_user_get_protocol_features, \
+	false, false) \
+VHOST_MESSAGE_HANDLER(VHOST_USER_SET_PROTOCOL_FEATURES, vhost_user_set_protocol_features, \
+	false, true) \
+VHOST_MESSAGE_HANDLER(VHOST_USER_GET_QUEUE_NUM, vhost_user_get_queue_num, false, false) \
+VHOST_MESSAGE_HANDLER(VHOST_USER_SET_VRING_ENABLE, vhost_user_set_vring_enable, false, true) \
+VHOST_MESSAGE_HANDLER(VHOST_USER_SEND_RARP, vhost_user_send_rarp, false, true) \
+VHOST_MESSAGE_HANDLER(VHOST_USER_NET_SET_MTU, vhost_user_net_set_mtu, false, true) \
+VHOST_MESSAGE_HANDLER(VHOST_USER_SET_BACKEND_REQ_FD, vhost_user_set_req_fd, true, true) \
+VHOST_MESSAGE_HANDLER(VHOST_USER_IOTLB_MSG, vhost_user_iotlb_msg, false, false) \
+VHOST_MESSAGE_HANDLER(VHOST_USER_GET_CONFIG, vhost_user_get_config, false, false) \
+VHOST_MESSAGE_HANDLER(VHOST_USER_SET_CONFIG, vhost_user_set_config, false, false) \
+VHOST_MESSAGE_HANDLER(VHOST_USER_POSTCOPY_ADVISE, vhost_user_set_postcopy_advise, false, false) \
+VHOST_MESSAGE_HANDLER(VHOST_USER_POSTCOPY_LISTEN, vhost_user_set_postcopy_listen, false, false) \
+VHOST_MESSAGE_HANDLER(VHOST_USER_POSTCOPY_END, vhost_user_postcopy_end, false, false) \
+VHOST_MESSAGE_HANDLER(VHOST_USER_GET_INFLIGHT_FD, vhost_user_get_inflight_fd, false, false) \
+VHOST_MESSAGE_HANDLER(VHOST_USER_SET_INFLIGHT_FD, vhost_user_set_inflight_fd, true, false) \
+VHOST_MESSAGE_HANDLER(VHOST_USER_SET_STATUS, vhost_user_set_status, false, false) \
+VHOST_MESSAGE_HANDLER(VHOST_USER_GET_STATUS, vhost_user_get_status, false, false)
+
+#define VHOST_MESSAGE_HANDLER(id, handler, accepts_fd, lock_all_qps) \
+	[id] = { #id, handler, accepts_fd, lock_all_qps },
 static vhost_message_handler_t vhost_message_handlers[] = {
 	VHOST_MESSAGE_HANDLERS
 };
@@ -3141,31 +3153,11 @@ vhost_user_msg_handler(int vid, int fd)
 	 * inactive, so it is safe. Otherwise taking the access_lock
 	 * would cause a dead lock.
 	 */
-	switch (request) {
-	case VHOST_USER_SET_FEATURES:
-	case VHOST_USER_SET_PROTOCOL_FEATURES:
-	case VHOST_USER_SET_OWNER:
-	case VHOST_USER_SET_MEM_TABLE:
-	case VHOST_USER_SET_LOG_BASE:
-	case VHOST_USER_SET_LOG_FD:
-	case VHOST_USER_SET_VRING_NUM:
-	case VHOST_USER_SET_VRING_ADDR:
-	case VHOST_USER_SET_VRING_BASE:
-	case VHOST_USER_SET_VRING_KICK:
-	case VHOST_USER_SET_VRING_CALL:
-	case VHOST_USER_SET_VRING_ERR:
-	case VHOST_USER_SET_VRING_ENABLE:
-	case VHOST_USER_SEND_RARP:
-	case VHOST_USER_NET_SET_MTU:
-	case VHOST_USER_SET_BACKEND_REQ_FD:
+	if (msg_handler->lock_all_qps) {
 		if (!(dev->flags & VIRTIO_DEV_VDPA_CONFIGURED)) {
 			vhost_user_lock_all_queue_pairs(dev);
 			unlock_required = 1;
 		}
-		break;
-	default:
-		break;
-
 	}
 
 	handled = false;
-- 
2.42.0


^ permalink raw reply related	[flat|nested] 27+ messages in thread

* Re: [PATCH v2 2/5] vhost: fix virtqueue access check in VDUSE setup
  2023-12-05  9:45   ` [PATCH v2 2/5] vhost: fix virtqueue access check in VDUSE setup David Marchand
@ 2023-12-05  9:57     ` Maxime Coquelin
  0 siblings, 0 replies; 27+ messages in thread
From: Maxime Coquelin @ 2023-12-05  9:57 UTC (permalink / raw)
  To: David Marchand, dev; +Cc: stable, Eelco Chaudron, Chenbo Xia



On 12/5/23 10:45, David Marchand wrote:
> vring_translate and vring_invalidate change the vq access_ok field.
> The access_ok field should only be updated under a (write) lock.
> 
> Fixes: a9120db8b98b ("vhost: add VDUSE device startup")
> Fixes: ad67c65efda1 ("vhost: add VDUSE device stop")
> Cc: stable@dpdk.org
> 
> Signed-off-by: David Marchand <david.marchand@redhat.com>
> Acked-by: Eelco Chaudron <echaudro@redhat.com>
> ---
> Changes since v1:
> - moved fix out of patch 3,
> 
> ---
>   lib/vhost/vduse.c | 4 ++++
>   1 file changed, 4 insertions(+)
> 

Reviewed-by: Maxime Coquelin <maxime.coquelin@redhat.com>

Thanks,
Maxime


^ permalink raw reply	[flat|nested] 27+ messages in thread

* Re: [PATCH v2 3/5] vhost: fix virtqueue access check in vhost-user setup
  2023-12-05  9:45   ` [PATCH v2 3/5] vhost: fix virtqueue access check in vhost-user setup David Marchand
@ 2023-12-05  9:59     ` Maxime Coquelin
  0 siblings, 0 replies; 27+ messages in thread
From: Maxime Coquelin @ 2023-12-05  9:59 UTC (permalink / raw)
  To: David Marchand, dev; +Cc: stable, Eelco Chaudron, Chenbo Xia, Tiwei Bie



On 12/5/23 10:45, David Marchand wrote:
> Calling vring_invalidate must be done with a (write) lock taken on the
> virtqueue.
> 
> Fixes: 72d002b3ebda ("vhost: fix vring address handling during live migration")
> Cc: stable@dpdk.org
> 
> Signed-off-by: David Marchand <david.marchand@redhat.com>
> Acked-by: Eelco Chaudron <echaudro@redhat.com>
> ---
> Changes since v1:
> - moved fix out of patch 3,
> 
> ---
>   lib/vhost/vhost_user.c | 2 ++
>   1 file changed, 2 insertions(+)

Reviewed-by: Maxime Coquelin <maxime.coquelin@redhat.com>

Thanks,
Maxime


^ permalink raw reply	[flat|nested] 27+ messages in thread

* Re: [PATCH v2 4/5] vhost: annotate virtqueue access checks
  2023-12-05  9:45   ` [PATCH v2 4/5] vhost: annotate virtqueue access checks David Marchand
@ 2023-12-05 11:04     ` Maxime Coquelin
  0 siblings, 0 replies; 27+ messages in thread
From: Maxime Coquelin @ 2023-12-05 11:04 UTC (permalink / raw)
  To: David Marchand, dev; +Cc: Eelco Chaudron, Chenbo Xia



On 12/5/23 10:45, David Marchand wrote:
> Modifying vq->access_ok should be done with a write lock taken.
> Annotate vring_translate() and vring_invalidate().
> 
> Signed-off-by: David Marchand <david.marchand@redhat.com>
> Acked-by: Eelco Chaudron <echaudro@redhat.com>
> ---
> Changes since v1:
> - moved fixes in separate patches,
> 
> ---
>   lib/vhost/vhost.h      | 7 +++++--
>   lib/vhost/vhost_user.c | 8 ++++++++
>   2 files changed, 13 insertions(+), 2 deletions(-)
> 

Reviewed-by: Maxime Coquelin <maxime.coquelin@redhat.com>

Thanks,
Maxime


^ permalink raw reply	[flat|nested] 27+ messages in thread

* Re: [PATCH v2 1/5] vhost: fix virtqueue access check in datapath
  2023-12-05  9:45 ` [PATCH v2 1/5] vhost: fix virtqueue access check in datapath David Marchand
                     ` (3 preceding siblings ...)
  2023-12-05  9:45   ` [PATCH v2 5/5] vhost: enhance virtqueue access lock asserts David Marchand
@ 2023-12-12 11:37   ` Maxime Coquelin
  4 siblings, 0 replies; 27+ messages in thread
From: Maxime Coquelin @ 2023-12-12 11:37 UTC (permalink / raw)
  To: David Marchand, dev; +Cc: stable, Eelco Chaudron, Chenbo Xia



On 12/5/23 10:45, David Marchand wrote:
> Now that a r/w lock is used, the access_ok field should only be updated
> under a write lock.
> 
> Since the datapath code only takes a read lock on the virtqueue to check
> access_ok, this lock must be released and a write lock taken before
> calling vring_translate().
> 
> Fixes: 03f77d66d966 ("vhost: change virtqueue access lock to a read/write one")
> Cc: stable@dpdk.org
> 
> Signed-off-by: David Marchand <david.marchand@redhat.com>
> Acked-by: Eelco Chaudron <echaudro@redhat.com>
> Reviewed-by: Maxime Coquelin <maxime.coquelin@redhat.com>
> ---
>   lib/vhost/virtio_net.c | 60 +++++++++++++++++++++++++++++++-----------
>   1 file changed, 44 insertions(+), 16 deletions(-)
> 
> diff --git a/lib/vhost/virtio_net.c b/lib/vhost/virtio_net.c
> index 8af20f1487..d00f4b03aa 100644
> --- a/lib/vhost/virtio_net.c
> +++ b/lib/vhost/virtio_net.c
> @@ -1696,6 +1696,17 @@ virtio_dev_rx_packed(struct virtio_net *dev,
>   	return pkt_idx;
>   }
>   
> +static void
> +virtio_dev_vring_translate(struct virtio_net *dev, struct vhost_virtqueue *vq)
> +{
> +	rte_rwlock_write_lock(&vq->access_lock);
> +	vhost_user_iotlb_rd_lock(vq);
> +	if (!vq->access_ok)
> +		vring_translate(dev, vq);
> +	vhost_user_iotlb_rd_unlock(vq);
> +	rte_rwlock_write_unlock(&vq->access_lock);
> +}
> +
>   static __rte_always_inline uint32_t
>   virtio_dev_rx(struct virtio_net *dev, struct vhost_virtqueue *vq,
>   	struct rte_mbuf **pkts, uint32_t count)
> @@ -1710,9 +1721,13 @@ virtio_dev_rx(struct virtio_net *dev, struct vhost_virtqueue *vq,
>   
>   	vhost_user_iotlb_rd_lock(vq);
>   
> -	if (unlikely(!vq->access_ok))
> -		if (unlikely(vring_translate(dev, vq) < 0))
> -			goto out;
> +	if (unlikely(!vq->access_ok)) {
> +		vhost_user_iotlb_rd_unlock(vq);
> +		rte_rwlock_read_unlock(&vq->access_lock);
> +
> +		virtio_dev_vring_translate(dev, vq);
> +		goto out_no_unlock;
> +	}
>   
>   	count = RTE_MIN((uint32_t)MAX_PKT_BURST, count);
>   	if (count == 0)
> @@ -1731,6 +1746,7 @@ virtio_dev_rx(struct virtio_net *dev, struct vhost_virtqueue *vq,
>   out_access_unlock:
>   	rte_rwlock_read_unlock(&vq->access_lock);
>   
> +out_no_unlock:
>   	return nb_tx;
>   }
>   
> @@ -2528,9 +2544,13 @@ virtio_dev_rx_async_submit(struct virtio_net *dev, struct vhost_virtqueue *vq,
>   
>   	vhost_user_iotlb_rd_lock(vq);
>   
> -	if (unlikely(!vq->access_ok))
> -		if (unlikely(vring_translate(dev, vq) < 0))
> -			goto out;
> +	if (unlikely(!vq->access_ok)) {
> +		vhost_user_iotlb_rd_unlock(vq);
> +		rte_rwlock_read_unlock(&vq->access_lock);
> +
> +		virtio_dev_vring_translate(dev, vq);
> +		goto out_no_unlock;
> +	}
>   
>   	count = RTE_MIN((uint32_t)MAX_PKT_BURST, count);
>   	if (count == 0)
> @@ -2551,6 +2571,7 @@ virtio_dev_rx_async_submit(struct virtio_net *dev, struct vhost_virtqueue *vq,
>   out_access_unlock:
>   	rte_rwlock_write_unlock(&vq->access_lock);
>   
> +out_no_unlock:
>   	return nb_tx;
>   }
>   
> @@ -3581,11 +3602,13 @@ rte_vhost_dequeue_burst(int vid, uint16_t queue_id,
>   
>   	vhost_user_iotlb_rd_lock(vq);
>   
> -	if (unlikely(!vq->access_ok))
> -		if (unlikely(vring_translate(dev, vq) < 0)) {
> -			count = 0;
> -			goto out;
> -		}
> +	if (unlikely(!vq->access_ok)) {
> +		vhost_user_iotlb_rd_unlock(vq);
> +		rte_rwlock_read_unlock(&vq->access_lock);
> +
> +		virtio_dev_vring_translate(dev, vq);
> +		goto out_no_unlock;
> +	}
>   
>   	/*
>   	 * Construct a RARP broadcast packet, and inject it to the "pkts"
> @@ -3646,6 +3669,7 @@ rte_vhost_dequeue_burst(int vid, uint16_t queue_id,
>   	if (unlikely(rarp_mbuf != NULL))
>   		count += 1;
>   
> +out_no_unlock:
>   	return count;
>   }
>   
> @@ -4196,11 +4220,14 @@ rte_vhost_async_try_dequeue_burst(int vid, uint16_t queue_id,
>   
>   	vhost_user_iotlb_rd_lock(vq);
>   
> -	if (unlikely(vq->access_ok == 0))
> -		if (unlikely(vring_translate(dev, vq) < 0)) {
> -			count = 0;
> -			goto out;
> -		}
> +	if (unlikely(vq->access_ok == 0)) {
> +		vhost_user_iotlb_rd_unlock(vq);
> +		rte_rwlock_read_unlock(&vq->access_lock);
> +
> +		virtio_dev_vring_translate(dev, vq);
> +		count = 0;
> +		goto out_no_unlock;
> +	}
>   
>   	/*
>   	 * Construct a RARP broadcast packet, and inject it to the "pkts"
> @@ -4266,5 +4293,6 @@ rte_vhost_async_try_dequeue_burst(int vid, uint16_t queue_id,
>   	if (unlikely(rarp_mbuf != NULL))
>   		count += 1;
>   
> +out_no_unlock:
>   	return count;
>   }

Series applied to next-virtio/for-next-net

Thanks,
Maxime


^ permalink raw reply	[flat|nested] 27+ messages in thread

* Re: [PATCH v2 5/5] vhost: enhance virtqueue access lock asserts
  2023-12-05  9:45   ` [PATCH v2 5/5] vhost: enhance virtqueue access lock asserts David Marchand
@ 2024-02-19 10:52     ` Thomas Monjalon
  2024-02-26 15:46       ` David Marchand
  2024-02-26 17:05     ` [PATCH v3] " David Marchand
  2024-02-27 10:39     ` [PATCH v4] " David Marchand
  2 siblings, 1 reply; 27+ messages in thread
From: Thomas Monjalon @ 2024-02-19 10:52 UTC (permalink / raw)
  To: David Marchand; +Cc: dev, Eelco Chaudron, Maxime Coquelin, Chenbo Xia

05/12/2023 10:45, David Marchand:
> +#define VHOST_USER_ASSERT_LOCK(dev, vq, id) do { \
> +	RTE_BUILD_BUG_ON(!vhost_message_handlers[id].lock_all_qps); \
> +	vq_assert_lock(dev, vq); \
> +} while (0)

Since "eal: enhance compile-time checks using C11 assert",
it is not allowed to have non-constant check in RTE_BUILD_BUG_ON:

lib/vhost/vhost_user.c:413:25: note: in expansion of macro 'VHOST_USER_ASSERT_LOCK'
lib/vhost/vhost_user.c: In function 'vhost_user_set_vring_addr':
lib/eal/include/rte_common.h:518:56: error: expression in static assertion is not constant
  #define RTE_BUILD_BUG_ON(condition) do { static_assert(!(condition), #condition); } while (0)

I suppose we can make this check at compile-time with few adjustments.
For -rc1, I am dropping this patch.




^ permalink raw reply	[flat|nested] 27+ messages in thread

* Re: [PATCH v2 5/5] vhost: enhance virtqueue access lock asserts
  2024-02-19 10:52     ` Thomas Monjalon
@ 2024-02-26 15:46       ` David Marchand
  0 siblings, 0 replies; 27+ messages in thread
From: David Marchand @ 2024-02-26 15:46 UTC (permalink / raw)
  To: Thomas Monjalon
  Cc: dev, Eelco Chaudron, Maxime Coquelin, Chenbo Xia, Tyler Retzlaff,
	Stephen Hemminger

On Mon, Feb 19, 2024 at 11:52 AM Thomas Monjalon <thomas@monjalon.net> wrote:
>
> 05/12/2023 10:45, David Marchand:
> > +#define VHOST_USER_ASSERT_LOCK(dev, vq, id) do { \
> > +     RTE_BUILD_BUG_ON(!vhost_message_handlers[id].lock_all_qps); \
> > +     vq_assert_lock(dev, vq); \
> > +} while (0)
>
> Since "eal: enhance compile-time checks using C11 assert",
> it is not allowed to have non-constant check in RTE_BUILD_BUG_ON:
>
> lib/vhost/vhost_user.c:413:25: note: in expansion of macro 'VHOST_USER_ASSERT_LOCK'
> lib/vhost/vhost_user.c: In function 'vhost_user_set_vring_addr':
> lib/eal/include/rte_common.h:518:56: error: expression in static assertion is not constant
>   #define RTE_BUILD_BUG_ON(condition) do { static_assert(!(condition), #condition); } while (0)
>
> I suppose we can make this check at compile-time with few adjustments.
> For -rc1, I am dropping this patch.

Iiuc, an array content is not constant (from a compiler pov) unless
the elements are integers.

I suspect there is some gcc-specific implementation that could work,
since with the previous implementation of RTE_BUILD_BUG_ON, this check
was working with both gcc and clang...
But I could not find how to.

I have an alternate solution using enums with the existing macros..
I'll post a new revision.


-- 
David Marchand


^ permalink raw reply	[flat|nested] 27+ messages in thread

* [PATCH v3] vhost: enhance virtqueue access lock asserts
  2023-12-05  9:45   ` [PATCH v2 5/5] vhost: enhance virtqueue access lock asserts David Marchand
  2024-02-19 10:52     ` Thomas Monjalon
@ 2024-02-26 17:05     ` David Marchand
  2024-02-26 19:40       ` Maxime Coquelin
  2024-02-27 10:39     ` [PATCH v4] " David Marchand
  2 siblings, 1 reply; 27+ messages in thread
From: David Marchand @ 2024-02-26 17:05 UTC (permalink / raw)
  To: dev; +Cc: echaudro, Maxime Coquelin, Chenbo Xia

A simple comment in vhost_user_msg_handler() is not that robust.

Add a lock_all_qps property to message handlers so that their
implementation can add a build check and assert a vq is locked.

Signed-off-by: David Marchand <david.marchand@redhat.com>
---
Changes since v2:
- dropped review tags,
- following use of static_assert() in RTE_BUILD_BUG_ON, reworked build
  check by using enums (one enum is now defined per message type),
- as the added enums must be defined early, moved the definitions of
  handlers at the top of the file,

Changes since v1:
- moved this patch as the last of the series,

---
 lib/vhost/vhost_user.c | 128 +++++++++++++++++++++--------------------
 1 file changed, 66 insertions(+), 62 deletions(-)

diff --git a/lib/vhost/vhost_user.c b/lib/vhost/vhost_user.c
index 3e8a87e517..84dae612c7 100644
--- a/lib/vhost/vhost_user.c
+++ b/lib/vhost/vhost_user.c
@@ -56,14 +56,72 @@
 #define INFLIGHT_ALIGNMENT	64
 #define INFLIGHT_VERSION	0x1
 
-typedef struct vhost_message_handler {
+typedef const struct vhost_message_handler {
 	const char *description;
 	int (*callback)(struct virtio_net **pdev, struct vhu_msg_context *ctx,
 		int main_fd);
 	bool accepts_fd;
+	bool lock_all_qps;
 } vhost_message_handler_t;
 static vhost_message_handler_t vhost_message_handlers[];
 
+#define VHOST_MESSAGE_HANDLERS \
+VHOST_MESSAGE_HANDLER(VHOST_USER_NONE, NULL, false, false) \
+VHOST_MESSAGE_HANDLER(VHOST_USER_GET_FEATURES, vhost_user_get_features, false, false) \
+VHOST_MESSAGE_HANDLER(VHOST_USER_SET_FEATURES, vhost_user_set_features, false, true) \
+VHOST_MESSAGE_HANDLER(VHOST_USER_SET_OWNER, vhost_user_set_owner, false, true) \
+VHOST_MESSAGE_HANDLER(VHOST_USER_RESET_OWNER, vhost_user_reset_owner, false, false) \
+VHOST_MESSAGE_HANDLER(VHOST_USER_SET_MEM_TABLE, vhost_user_set_mem_table, true, true) \
+VHOST_MESSAGE_HANDLER(VHOST_USER_SET_LOG_BASE, vhost_user_set_log_base, true, true) \
+VHOST_MESSAGE_HANDLER(VHOST_USER_SET_LOG_FD, vhost_user_set_log_fd, true, true) \
+VHOST_MESSAGE_HANDLER(VHOST_USER_SET_VRING_NUM, vhost_user_set_vring_num, false, true) \
+VHOST_MESSAGE_HANDLER(VHOST_USER_SET_VRING_ADDR, vhost_user_set_vring_addr, false, true) \
+VHOST_MESSAGE_HANDLER(VHOST_USER_SET_VRING_BASE, vhost_user_set_vring_base, false, true) \
+VHOST_MESSAGE_HANDLER(VHOST_USER_GET_VRING_BASE, vhost_user_get_vring_base, false, false) \
+VHOST_MESSAGE_HANDLER(VHOST_USER_SET_VRING_KICK, vhost_user_set_vring_kick, true, true) \
+VHOST_MESSAGE_HANDLER(VHOST_USER_SET_VRING_CALL, vhost_user_set_vring_call, true, true) \
+VHOST_MESSAGE_HANDLER(VHOST_USER_SET_VRING_ERR, vhost_user_set_vring_err, true, true) \
+VHOST_MESSAGE_HANDLER(VHOST_USER_GET_PROTOCOL_FEATURES, vhost_user_get_protocol_features, \
+	false, false) \
+VHOST_MESSAGE_HANDLER(VHOST_USER_SET_PROTOCOL_FEATURES, vhost_user_set_protocol_features, \
+	false, true) \
+VHOST_MESSAGE_HANDLER(VHOST_USER_GET_QUEUE_NUM, vhost_user_get_queue_num, false, false) \
+VHOST_MESSAGE_HANDLER(VHOST_USER_SET_VRING_ENABLE, vhost_user_set_vring_enable, false, true) \
+VHOST_MESSAGE_HANDLER(VHOST_USER_SEND_RARP, vhost_user_send_rarp, false, true) \
+VHOST_MESSAGE_HANDLER(VHOST_USER_NET_SET_MTU, vhost_user_net_set_mtu, false, true) \
+VHOST_MESSAGE_HANDLER(VHOST_USER_SET_BACKEND_REQ_FD, vhost_user_set_req_fd, true, true) \
+VHOST_MESSAGE_HANDLER(VHOST_USER_IOTLB_MSG, vhost_user_iotlb_msg, false, false) \
+VHOST_MESSAGE_HANDLER(VHOST_USER_GET_CONFIG, vhost_user_get_config, false, false) \
+VHOST_MESSAGE_HANDLER(VHOST_USER_SET_CONFIG, vhost_user_set_config, false, false) \
+VHOST_MESSAGE_HANDLER(VHOST_USER_POSTCOPY_ADVISE, vhost_user_set_postcopy_advise, false, false) \
+VHOST_MESSAGE_HANDLER(VHOST_USER_POSTCOPY_LISTEN, vhost_user_set_postcopy_listen, false, false) \
+VHOST_MESSAGE_HANDLER(VHOST_USER_POSTCOPY_END, vhost_user_postcopy_end, false, false) \
+VHOST_MESSAGE_HANDLER(VHOST_USER_GET_INFLIGHT_FD, vhost_user_get_inflight_fd, false, false) \
+VHOST_MESSAGE_HANDLER(VHOST_USER_SET_INFLIGHT_FD, vhost_user_set_inflight_fd, true, false) \
+VHOST_MESSAGE_HANDLER(VHOST_USER_SET_STATUS, vhost_user_set_status, false, false) \
+VHOST_MESSAGE_HANDLER(VHOST_USER_GET_STATUS, vhost_user_get_status, false, false)
+
+#define VHOST_MESSAGE_HANDLER(id, handler, accepts_fd, lock_all_qps) \
+	id ## _LOCK_ALL_QPS = lock_all_qps,
+enum {
+	VHOST_MESSAGE_HANDLERS
+};
+#undef VHOST_MESSAGE_HANDLER
+
+/* vhost_user_msg_handler() locks all qps based on a handler's lock_all_qps.
+ * Later, a handler may need to ensure the vq has been locked (for example,
+ * when calling lock annotated helpers).
+ *
+ * Note: unfortunately, static_assert() does not see an array content as a
+ * constant expression. Because of this, we can't simply check for
+ * vhost_user_msg_handler[].lock_all_qps.
+ * Instead, define an enum for each handler.
+ */
+#define VHOST_USER_ASSERT_LOCK(dev, vq, id) do { \
+	RTE_BUILD_BUG_ON(id ## _LOCK_ALL_QPS == false); \
+	vq_assert_lock(dev, vq); \
+} while (0)
+
 static int send_vhost_reply(struct virtio_net *dev, int sockfd, struct vhu_msg_context *ctx);
 static int read_vhost_message(struct virtio_net *dev, int sockfd, struct vhu_msg_context *ctx);
 
@@ -400,7 +458,7 @@ vhost_user_set_features(struct virtio_net **pdev,
 			cleanup_vq(vq, 1);
 			cleanup_vq_inflight(dev, vq);
 			/* vhost_user_lock_all_queue_pairs locked all qps */
-			vq_assert_lock(dev, vq);
+			VHOST_USER_ASSERT_LOCK(dev, vq, VHOST_USER_SET_FEATURES);
 			rte_rwlock_write_unlock(&vq->access_lock);
 			free_vq(dev, vq);
 		}
@@ -927,7 +985,7 @@ vhost_user_set_vring_addr(struct virtio_net **pdev,
 	vq = dev->virtqueue[ctx->msg.payload.addr.index];
 
 	/* vhost_user_lock_all_queue_pairs locked all qps */
-	vq_assert_lock(dev, vq);
+	VHOST_USER_ASSERT_LOCK(dev, vq, VHOST_USER_SET_VRING_ADDR);
 
 	access_ok = vq->access_ok;
 
@@ -1442,7 +1500,7 @@ vhost_user_set_mem_table(struct virtio_net **pdev,
 
 		if (vq->desc || vq->avail || vq->used) {
 			/* vhost_user_lock_all_queue_pairs locked all qps */
-			vq_assert_lock(dev, vq);
+			VHOST_USER_ASSERT_LOCK(dev, vq, VHOST_USER_SET_MEM_TABLE);
 
 			/*
 			 * If the memory table got updated, the ring addresses
@@ -2234,7 +2292,7 @@ vhost_user_set_vring_enable(struct virtio_net **pdev,
 	vq = dev->virtqueue[index];
 	if (!(dev->flags & VIRTIO_DEV_VDPA_CONFIGURED)) {
 		/* vhost_user_lock_all_queue_pairs locked all qps */
-		vq_assert_lock(dev, vq);
+		VHOST_USER_ASSERT_LOCK(dev, vq, VHOST_USER_SET_VRING_ENABLE);
 		if (enable && vq->async && vq->async->pkts_inflight_n) {
 			VHOST_CONFIG_LOG(dev->ifname, ERR,
 				"failed to enable vring. Inflight packets must be completed first");
@@ -2838,42 +2896,8 @@ vhost_user_set_status(struct virtio_net **pdev,
 	return RTE_VHOST_MSG_RESULT_OK;
 }
 
-#define VHOST_MESSAGE_HANDLERS \
-VHOST_MESSAGE_HANDLER(VHOST_USER_NONE, NULL, false) \
-VHOST_MESSAGE_HANDLER(VHOST_USER_GET_FEATURES, vhost_user_get_features, false) \
-VHOST_MESSAGE_HANDLER(VHOST_USER_SET_FEATURES, vhost_user_set_features, false) \
-VHOST_MESSAGE_HANDLER(VHOST_USER_SET_OWNER, vhost_user_set_owner, false) \
-VHOST_MESSAGE_HANDLER(VHOST_USER_RESET_OWNER, vhost_user_reset_owner, false) \
-VHOST_MESSAGE_HANDLER(VHOST_USER_SET_MEM_TABLE, vhost_user_set_mem_table, true) \
-VHOST_MESSAGE_HANDLER(VHOST_USER_SET_LOG_BASE, vhost_user_set_log_base, true) \
-VHOST_MESSAGE_HANDLER(VHOST_USER_SET_LOG_FD, vhost_user_set_log_fd, true) \
-VHOST_MESSAGE_HANDLER(VHOST_USER_SET_VRING_NUM, vhost_user_set_vring_num, false) \
-VHOST_MESSAGE_HANDLER(VHOST_USER_SET_VRING_ADDR, vhost_user_set_vring_addr, false) \
-VHOST_MESSAGE_HANDLER(VHOST_USER_SET_VRING_BASE, vhost_user_set_vring_base, false) \
-VHOST_MESSAGE_HANDLER(VHOST_USER_GET_VRING_BASE, vhost_user_get_vring_base, false) \
-VHOST_MESSAGE_HANDLER(VHOST_USER_SET_VRING_KICK, vhost_user_set_vring_kick, true) \
-VHOST_MESSAGE_HANDLER(VHOST_USER_SET_VRING_CALL, vhost_user_set_vring_call, true) \
-VHOST_MESSAGE_HANDLER(VHOST_USER_SET_VRING_ERR, vhost_user_set_vring_err, true) \
-VHOST_MESSAGE_HANDLER(VHOST_USER_GET_PROTOCOL_FEATURES, vhost_user_get_protocol_features, false) \
-VHOST_MESSAGE_HANDLER(VHOST_USER_SET_PROTOCOL_FEATURES, vhost_user_set_protocol_features, false) \
-VHOST_MESSAGE_HANDLER(VHOST_USER_GET_QUEUE_NUM, vhost_user_get_queue_num, false) \
-VHOST_MESSAGE_HANDLER(VHOST_USER_SET_VRING_ENABLE, vhost_user_set_vring_enable, false) \
-VHOST_MESSAGE_HANDLER(VHOST_USER_SEND_RARP, vhost_user_send_rarp, false) \
-VHOST_MESSAGE_HANDLER(VHOST_USER_NET_SET_MTU, vhost_user_net_set_mtu, false) \
-VHOST_MESSAGE_HANDLER(VHOST_USER_SET_BACKEND_REQ_FD, vhost_user_set_req_fd, true) \
-VHOST_MESSAGE_HANDLER(VHOST_USER_IOTLB_MSG, vhost_user_iotlb_msg, false) \
-VHOST_MESSAGE_HANDLER(VHOST_USER_GET_CONFIG, vhost_user_get_config, false) \
-VHOST_MESSAGE_HANDLER(VHOST_USER_SET_CONFIG, vhost_user_set_config, false) \
-VHOST_MESSAGE_HANDLER(VHOST_USER_POSTCOPY_ADVISE, vhost_user_set_postcopy_advise, false) \
-VHOST_MESSAGE_HANDLER(VHOST_USER_POSTCOPY_LISTEN, vhost_user_set_postcopy_listen, false) \
-VHOST_MESSAGE_HANDLER(VHOST_USER_POSTCOPY_END, vhost_user_postcopy_end, false) \
-VHOST_MESSAGE_HANDLER(VHOST_USER_GET_INFLIGHT_FD, vhost_user_get_inflight_fd, false) \
-VHOST_MESSAGE_HANDLER(VHOST_USER_SET_INFLIGHT_FD, vhost_user_set_inflight_fd, true) \
-VHOST_MESSAGE_HANDLER(VHOST_USER_SET_STATUS, vhost_user_set_status, false) \
-VHOST_MESSAGE_HANDLER(VHOST_USER_GET_STATUS, vhost_user_get_status, false)
-
-#define VHOST_MESSAGE_HANDLER(id, handler, accepts_fd) \
-	[id] = { #id, handler, accepts_fd },
+#define VHOST_MESSAGE_HANDLER(id, handler, accepts_fd, lock_all_qps) \
+	[id] = { #id, handler, accepts_fd, id ## _LOCK_ALL_QPS },
 static vhost_message_handler_t vhost_message_handlers[] = {
 	VHOST_MESSAGE_HANDLERS
 };
@@ -3141,31 +3165,11 @@ vhost_user_msg_handler(int vid, int fd)
 	 * inactive, so it is safe. Otherwise taking the access_lock
 	 * would cause a dead lock.
 	 */
-	switch (request) {
-	case VHOST_USER_SET_FEATURES:
-	case VHOST_USER_SET_PROTOCOL_FEATURES:
-	case VHOST_USER_SET_OWNER:
-	case VHOST_USER_SET_MEM_TABLE:
-	case VHOST_USER_SET_LOG_BASE:
-	case VHOST_USER_SET_LOG_FD:
-	case VHOST_USER_SET_VRING_NUM:
-	case VHOST_USER_SET_VRING_ADDR:
-	case VHOST_USER_SET_VRING_BASE:
-	case VHOST_USER_SET_VRING_KICK:
-	case VHOST_USER_SET_VRING_CALL:
-	case VHOST_USER_SET_VRING_ERR:
-	case VHOST_USER_SET_VRING_ENABLE:
-	case VHOST_USER_SEND_RARP:
-	case VHOST_USER_NET_SET_MTU:
-	case VHOST_USER_SET_BACKEND_REQ_FD:
+	if (msg_handler->lock_all_qps) {
 		if (!(dev->flags & VIRTIO_DEV_VDPA_CONFIGURED)) {
 			vhost_user_lock_all_queue_pairs(dev);
 			unlock_required = 1;
 		}
-		break;
-	default:
-		break;
-
 	}
 
 	handled = false;
-- 
2.43.0


^ permalink raw reply related	[flat|nested] 27+ messages in thread

* Re: [PATCH v3] vhost: enhance virtqueue access lock asserts
  2024-02-26 17:05     ` [PATCH v3] " David Marchand
@ 2024-02-26 19:40       ` Maxime Coquelin
  2024-02-26 20:56         ` Stephen Hemminger
  0 siblings, 1 reply; 27+ messages in thread
From: Maxime Coquelin @ 2024-02-26 19:40 UTC (permalink / raw)
  To: David Marchand, dev; +Cc: echaudro, Chenbo Xia



On 2/26/24 18:05, David Marchand wrote:
> A simple comment in vhost_user_msg_handler() is not that robust.
> 
> Add a lock_all_qps property to message handlers so that their
> implementation can add a build check and assert a vq is locked.
> 
> Signed-off-by: David Marchand <david.marchand@redhat.com>
> ---
> Changes since v2:
> - dropped review tags,
> - following use of static_assert() in RTE_BUILD_BUG_ON, reworked build
>    check by using enums (one enum is now defined per message type),

Thanks for working on an alternative, it looks good to me:

Reviewed-by: Maxime Coquelin <maxime.coquelin@redhat.com>

Maxime

> - as the added enums must be defined early, moved the definitions of
>    handlers at the top of the file,
> 
> Changes since v1:
> - moved this patch as the last of the series,
> 
> ---
>   lib/vhost/vhost_user.c | 128 +++++++++++++++++++++--------------------
>   1 file changed, 66 insertions(+), 62 deletions(-)
> 
> diff --git a/lib/vhost/vhost_user.c b/lib/vhost/vhost_user.c
> index 3e8a87e517..84dae612c7 100644
> --- a/lib/vhost/vhost_user.c
> +++ b/lib/vhost/vhost_user.c
> @@ -56,14 +56,72 @@
>   #define INFLIGHT_ALIGNMENT	64
>   #define INFLIGHT_VERSION	0x1
>   
> -typedef struct vhost_message_handler {
> +typedef const struct vhost_message_handler {
>   	const char *description;
>   	int (*callback)(struct virtio_net **pdev, struct vhu_msg_context *ctx,
>   		int main_fd);
>   	bool accepts_fd;
> +	bool lock_all_qps;
>   } vhost_message_handler_t;
>   static vhost_message_handler_t vhost_message_handlers[];
>   
> +#define VHOST_MESSAGE_HANDLERS \
> +VHOST_MESSAGE_HANDLER(VHOST_USER_NONE, NULL, false, false) \
> +VHOST_MESSAGE_HANDLER(VHOST_USER_GET_FEATURES, vhost_user_get_features, false, false) \
> +VHOST_MESSAGE_HANDLER(VHOST_USER_SET_FEATURES, vhost_user_set_features, false, true) \
> +VHOST_MESSAGE_HANDLER(VHOST_USER_SET_OWNER, vhost_user_set_owner, false, true) \
> +VHOST_MESSAGE_HANDLER(VHOST_USER_RESET_OWNER, vhost_user_reset_owner, false, false) \
> +VHOST_MESSAGE_HANDLER(VHOST_USER_SET_MEM_TABLE, vhost_user_set_mem_table, true, true) \
> +VHOST_MESSAGE_HANDLER(VHOST_USER_SET_LOG_BASE, vhost_user_set_log_base, true, true) \
> +VHOST_MESSAGE_HANDLER(VHOST_USER_SET_LOG_FD, vhost_user_set_log_fd, true, true) \
> +VHOST_MESSAGE_HANDLER(VHOST_USER_SET_VRING_NUM, vhost_user_set_vring_num, false, true) \
> +VHOST_MESSAGE_HANDLER(VHOST_USER_SET_VRING_ADDR, vhost_user_set_vring_addr, false, true) \
> +VHOST_MESSAGE_HANDLER(VHOST_USER_SET_VRING_BASE, vhost_user_set_vring_base, false, true) \
> +VHOST_MESSAGE_HANDLER(VHOST_USER_GET_VRING_BASE, vhost_user_get_vring_base, false, false) \
> +VHOST_MESSAGE_HANDLER(VHOST_USER_SET_VRING_KICK, vhost_user_set_vring_kick, true, true) \
> +VHOST_MESSAGE_HANDLER(VHOST_USER_SET_VRING_CALL, vhost_user_set_vring_call, true, true) \
> +VHOST_MESSAGE_HANDLER(VHOST_USER_SET_VRING_ERR, vhost_user_set_vring_err, true, true) \
> +VHOST_MESSAGE_HANDLER(VHOST_USER_GET_PROTOCOL_FEATURES, vhost_user_get_protocol_features, \
> +	false, false) \
> +VHOST_MESSAGE_HANDLER(VHOST_USER_SET_PROTOCOL_FEATURES, vhost_user_set_protocol_features, \
> +	false, true) \
> +VHOST_MESSAGE_HANDLER(VHOST_USER_GET_QUEUE_NUM, vhost_user_get_queue_num, false, false) \
> +VHOST_MESSAGE_HANDLER(VHOST_USER_SET_VRING_ENABLE, vhost_user_set_vring_enable, false, true) \
> +VHOST_MESSAGE_HANDLER(VHOST_USER_SEND_RARP, vhost_user_send_rarp, false, true) \
> +VHOST_MESSAGE_HANDLER(VHOST_USER_NET_SET_MTU, vhost_user_net_set_mtu, false, true) \
> +VHOST_MESSAGE_HANDLER(VHOST_USER_SET_BACKEND_REQ_FD, vhost_user_set_req_fd, true, true) \
> +VHOST_MESSAGE_HANDLER(VHOST_USER_IOTLB_MSG, vhost_user_iotlb_msg, false, false) \
> +VHOST_MESSAGE_HANDLER(VHOST_USER_GET_CONFIG, vhost_user_get_config, false, false) \
> +VHOST_MESSAGE_HANDLER(VHOST_USER_SET_CONFIG, vhost_user_set_config, false, false) \
> +VHOST_MESSAGE_HANDLER(VHOST_USER_POSTCOPY_ADVISE, vhost_user_set_postcopy_advise, false, false) \
> +VHOST_MESSAGE_HANDLER(VHOST_USER_POSTCOPY_LISTEN, vhost_user_set_postcopy_listen, false, false) \
> +VHOST_MESSAGE_HANDLER(VHOST_USER_POSTCOPY_END, vhost_user_postcopy_end, false, false) \
> +VHOST_MESSAGE_HANDLER(VHOST_USER_GET_INFLIGHT_FD, vhost_user_get_inflight_fd, false, false) \
> +VHOST_MESSAGE_HANDLER(VHOST_USER_SET_INFLIGHT_FD, vhost_user_set_inflight_fd, true, false) \
> +VHOST_MESSAGE_HANDLER(VHOST_USER_SET_STATUS, vhost_user_set_status, false, false) \
> +VHOST_MESSAGE_HANDLER(VHOST_USER_GET_STATUS, vhost_user_get_status, false, false)
> +
> +#define VHOST_MESSAGE_HANDLER(id, handler, accepts_fd, lock_all_qps) \
> +	id ## _LOCK_ALL_QPS = lock_all_qps,
> +enum {
> +	VHOST_MESSAGE_HANDLERS
> +};
> +#undef VHOST_MESSAGE_HANDLER
> +
> +/* vhost_user_msg_handler() locks all qps based on a handler's lock_all_qps.
> + * Later, a handler may need to ensure the vq has been locked (for example,
> + * when calling lock annotated helpers).
> + *
> + * Note: unfortunately, static_assert() does not see an array content as a
> + * constant expression. Because of this, we can't simply check for
> + * vhost_user_msg_handler[].lock_all_qps.
> + * Instead, define an enum for each handler.
> + */
> +#define VHOST_USER_ASSERT_LOCK(dev, vq, id) do { \
> +	RTE_BUILD_BUG_ON(id ## _LOCK_ALL_QPS == false); \
> +	vq_assert_lock(dev, vq); \
> +} while (0)
> +
>   static int send_vhost_reply(struct virtio_net *dev, int sockfd, struct vhu_msg_context *ctx);
>   static int read_vhost_message(struct virtio_net *dev, int sockfd, struct vhu_msg_context *ctx);
>   
> @@ -400,7 +458,7 @@ vhost_user_set_features(struct virtio_net **pdev,
>   			cleanup_vq(vq, 1);
>   			cleanup_vq_inflight(dev, vq);
>   			/* vhost_user_lock_all_queue_pairs locked all qps */
> -			vq_assert_lock(dev, vq);
> +			VHOST_USER_ASSERT_LOCK(dev, vq, VHOST_USER_SET_FEATURES);
>   			rte_rwlock_write_unlock(&vq->access_lock);
>   			free_vq(dev, vq);
>   		}
> @@ -927,7 +985,7 @@ vhost_user_set_vring_addr(struct virtio_net **pdev,
>   	vq = dev->virtqueue[ctx->msg.payload.addr.index];
>   
>   	/* vhost_user_lock_all_queue_pairs locked all qps */
> -	vq_assert_lock(dev, vq);
> +	VHOST_USER_ASSERT_LOCK(dev, vq, VHOST_USER_SET_VRING_ADDR);
>   
>   	access_ok = vq->access_ok;
>   
> @@ -1442,7 +1500,7 @@ vhost_user_set_mem_table(struct virtio_net **pdev,
>   
>   		if (vq->desc || vq->avail || vq->used) {
>   			/* vhost_user_lock_all_queue_pairs locked all qps */
> -			vq_assert_lock(dev, vq);
> +			VHOST_USER_ASSERT_LOCK(dev, vq, VHOST_USER_SET_MEM_TABLE);
>   
>   			/*
>   			 * If the memory table got updated, the ring addresses
> @@ -2234,7 +2292,7 @@ vhost_user_set_vring_enable(struct virtio_net **pdev,
>   	vq = dev->virtqueue[index];
>   	if (!(dev->flags & VIRTIO_DEV_VDPA_CONFIGURED)) {
>   		/* vhost_user_lock_all_queue_pairs locked all qps */
> -		vq_assert_lock(dev, vq);
> +		VHOST_USER_ASSERT_LOCK(dev, vq, VHOST_USER_SET_VRING_ENABLE);
>   		if (enable && vq->async && vq->async->pkts_inflight_n) {
>   			VHOST_CONFIG_LOG(dev->ifname, ERR,
>   				"failed to enable vring. Inflight packets must be completed first");
> @@ -2838,42 +2896,8 @@ vhost_user_set_status(struct virtio_net **pdev,
>   	return RTE_VHOST_MSG_RESULT_OK;
>   }
>   
> -#define VHOST_MESSAGE_HANDLERS \
> -VHOST_MESSAGE_HANDLER(VHOST_USER_NONE, NULL, false) \
> -VHOST_MESSAGE_HANDLER(VHOST_USER_GET_FEATURES, vhost_user_get_features, false) \
> -VHOST_MESSAGE_HANDLER(VHOST_USER_SET_FEATURES, vhost_user_set_features, false) \
> -VHOST_MESSAGE_HANDLER(VHOST_USER_SET_OWNER, vhost_user_set_owner, false) \
> -VHOST_MESSAGE_HANDLER(VHOST_USER_RESET_OWNER, vhost_user_reset_owner, false) \
> -VHOST_MESSAGE_HANDLER(VHOST_USER_SET_MEM_TABLE, vhost_user_set_mem_table, true) \
> -VHOST_MESSAGE_HANDLER(VHOST_USER_SET_LOG_BASE, vhost_user_set_log_base, true) \
> -VHOST_MESSAGE_HANDLER(VHOST_USER_SET_LOG_FD, vhost_user_set_log_fd, true) \
> -VHOST_MESSAGE_HANDLER(VHOST_USER_SET_VRING_NUM, vhost_user_set_vring_num, false) \
> -VHOST_MESSAGE_HANDLER(VHOST_USER_SET_VRING_ADDR, vhost_user_set_vring_addr, false) \
> -VHOST_MESSAGE_HANDLER(VHOST_USER_SET_VRING_BASE, vhost_user_set_vring_base, false) \
> -VHOST_MESSAGE_HANDLER(VHOST_USER_GET_VRING_BASE, vhost_user_get_vring_base, false) \
> -VHOST_MESSAGE_HANDLER(VHOST_USER_SET_VRING_KICK, vhost_user_set_vring_kick, true) \
> -VHOST_MESSAGE_HANDLER(VHOST_USER_SET_VRING_CALL, vhost_user_set_vring_call, true) \
> -VHOST_MESSAGE_HANDLER(VHOST_USER_SET_VRING_ERR, vhost_user_set_vring_err, true) \
> -VHOST_MESSAGE_HANDLER(VHOST_USER_GET_PROTOCOL_FEATURES, vhost_user_get_protocol_features, false) \
> -VHOST_MESSAGE_HANDLER(VHOST_USER_SET_PROTOCOL_FEATURES, vhost_user_set_protocol_features, false) \
> -VHOST_MESSAGE_HANDLER(VHOST_USER_GET_QUEUE_NUM, vhost_user_get_queue_num, false) \
> -VHOST_MESSAGE_HANDLER(VHOST_USER_SET_VRING_ENABLE, vhost_user_set_vring_enable, false) \
> -VHOST_MESSAGE_HANDLER(VHOST_USER_SEND_RARP, vhost_user_send_rarp, false) \
> -VHOST_MESSAGE_HANDLER(VHOST_USER_NET_SET_MTU, vhost_user_net_set_mtu, false) \
> -VHOST_MESSAGE_HANDLER(VHOST_USER_SET_BACKEND_REQ_FD, vhost_user_set_req_fd, true) \
> -VHOST_MESSAGE_HANDLER(VHOST_USER_IOTLB_MSG, vhost_user_iotlb_msg, false) \
> -VHOST_MESSAGE_HANDLER(VHOST_USER_GET_CONFIG, vhost_user_get_config, false) \
> -VHOST_MESSAGE_HANDLER(VHOST_USER_SET_CONFIG, vhost_user_set_config, false) \
> -VHOST_MESSAGE_HANDLER(VHOST_USER_POSTCOPY_ADVISE, vhost_user_set_postcopy_advise, false) \
> -VHOST_MESSAGE_HANDLER(VHOST_USER_POSTCOPY_LISTEN, vhost_user_set_postcopy_listen, false) \
> -VHOST_MESSAGE_HANDLER(VHOST_USER_POSTCOPY_END, vhost_user_postcopy_end, false) \
> -VHOST_MESSAGE_HANDLER(VHOST_USER_GET_INFLIGHT_FD, vhost_user_get_inflight_fd, false) \
> -VHOST_MESSAGE_HANDLER(VHOST_USER_SET_INFLIGHT_FD, vhost_user_set_inflight_fd, true) \
> -VHOST_MESSAGE_HANDLER(VHOST_USER_SET_STATUS, vhost_user_set_status, false) \
> -VHOST_MESSAGE_HANDLER(VHOST_USER_GET_STATUS, vhost_user_get_status, false)
> -
> -#define VHOST_MESSAGE_HANDLER(id, handler, accepts_fd) \
> -	[id] = { #id, handler, accepts_fd },
> +#define VHOST_MESSAGE_HANDLER(id, handler, accepts_fd, lock_all_qps) \
> +	[id] = { #id, handler, accepts_fd, id ## _LOCK_ALL_QPS },
>   static vhost_message_handler_t vhost_message_handlers[] = {
>   	VHOST_MESSAGE_HANDLERS
>   };
> @@ -3141,31 +3165,11 @@ vhost_user_msg_handler(int vid, int fd)
>   	 * inactive, so it is safe. Otherwise taking the access_lock
>   	 * would cause a dead lock.
>   	 */
> -	switch (request) {
> -	case VHOST_USER_SET_FEATURES:
> -	case VHOST_USER_SET_PROTOCOL_FEATURES:
> -	case VHOST_USER_SET_OWNER:
> -	case VHOST_USER_SET_MEM_TABLE:
> -	case VHOST_USER_SET_LOG_BASE:
> -	case VHOST_USER_SET_LOG_FD:
> -	case VHOST_USER_SET_VRING_NUM:
> -	case VHOST_USER_SET_VRING_ADDR:
> -	case VHOST_USER_SET_VRING_BASE:
> -	case VHOST_USER_SET_VRING_KICK:
> -	case VHOST_USER_SET_VRING_CALL:
> -	case VHOST_USER_SET_VRING_ERR:
> -	case VHOST_USER_SET_VRING_ENABLE:
> -	case VHOST_USER_SEND_RARP:
> -	case VHOST_USER_NET_SET_MTU:
> -	case VHOST_USER_SET_BACKEND_REQ_FD:
> +	if (msg_handler->lock_all_qps) {
>   		if (!(dev->flags & VIRTIO_DEV_VDPA_CONFIGURED)) {
>   			vhost_user_lock_all_queue_pairs(dev);
>   			unlock_required = 1;
>   		}
> -		break;
> -	default:
> -		break;
> -
>   	}
>   
>   	handled = false;


^ permalink raw reply	[flat|nested] 27+ messages in thread

* Re: [PATCH v3] vhost: enhance virtqueue access lock asserts
  2024-02-26 19:40       ` Maxime Coquelin
@ 2024-02-26 20:56         ` Stephen Hemminger
  0 siblings, 0 replies; 27+ messages in thread
From: Stephen Hemminger @ 2024-02-26 20:56 UTC (permalink / raw)
  To: Maxime Coquelin; +Cc: David Marchand, dev, echaudro, Chenbo Xia

On Mon, 26 Feb 2024 20:40:57 +0100
Maxime Coquelin <maxime.coquelin@redhat.com> wrote:

> > +
> > +/* vhost_user_msg_handler() locks all qps based on a handler's lock_all_qps.
> > + * Later, a handler may need to ensure the vq has been locked (for example,
> > + * when calling lock annotated helpers).
> > + *
> > + * Note: unfortunately, static_assert() does not see an array content as a
> > + * constant expression. Because of this, we can't simply check for
> > + * vhost_user_msg_handler[].lock_all_qps.
> > + * Instead, define an enum for each handler.
> > + */
> > +#define VHOST_USER_ASSERT_LOCK(dev, vq, id) do { \
> > +	RTE_BUILD_BUG_ON(id ## _LOCK_ALL_QPS == false); \

Rather than the opaque output of RTE_BUILD_BUG_ON, please use
static_assert() directly.

^ permalink raw reply	[flat|nested] 27+ messages in thread

* [PATCH v4] vhost: enhance virtqueue access lock asserts
  2023-12-05  9:45   ` [PATCH v2 5/5] vhost: enhance virtqueue access lock asserts David Marchand
  2024-02-19 10:52     ` Thomas Monjalon
  2024-02-26 17:05     ` [PATCH v3] " David Marchand
@ 2024-02-27 10:39     ` David Marchand
  2024-03-01 15:22       ` Maxime Coquelin
  2024-03-05  9:04       ` David Marchand
  2 siblings, 2 replies; 27+ messages in thread
From: David Marchand @ 2024-02-27 10:39 UTC (permalink / raw)
  To: dev; +Cc: echaudro, Maxime Coquelin, Chenbo Xia

A simple comment in vhost_user_msg_handler() is not that robust.

Add a lock_all_qps property to message handlers so that their
implementation can add a build check and assert a vq is locked.

Signed-off-by: David Marchand <david.marchand@redhat.com>
Reviewed-by: Maxime Coquelin <maxime.coquelin@redhat.com>
---
Changes since v3:
- directly called static_assert() with improved message,

Changes since v2:
- dropped review tags,
- following use of static_assert() in RTE_BUILD_BUG_ON, reworked build
  check by using enums (one enum is now defined per message type),
- as the added enums must be defined early, moved the definitions of
  handlers at the top of the file,

Changes since v1:
- moved this patch as the last of the series,

---
 lib/vhost/vhost_user.c | 130 +++++++++++++++++++++--------------------
 1 file changed, 68 insertions(+), 62 deletions(-)

diff --git a/lib/vhost/vhost_user.c b/lib/vhost/vhost_user.c
index 3e8a87e517..3aba32c95a 100644
--- a/lib/vhost/vhost_user.c
+++ b/lib/vhost/vhost_user.c
@@ -19,6 +19,7 @@
  * Do not assume received VhostUserMsg fields contain sensible values!
  */
 
+#include <assert.h>
 #include <stdint.h>
 #include <stdio.h>
 #include <stdlib.h>
@@ -56,14 +57,73 @@
 #define INFLIGHT_ALIGNMENT	64
 #define INFLIGHT_VERSION	0x1
 
-typedef struct vhost_message_handler {
+typedef const struct vhost_message_handler {
 	const char *description;
 	int (*callback)(struct virtio_net **pdev, struct vhu_msg_context *ctx,
 		int main_fd);
 	bool accepts_fd;
+	bool lock_all_qps;
 } vhost_message_handler_t;
 static vhost_message_handler_t vhost_message_handlers[];
 
+#define VHOST_MESSAGE_HANDLERS \
+VHOST_MESSAGE_HANDLER(VHOST_USER_NONE, NULL, false, false) \
+VHOST_MESSAGE_HANDLER(VHOST_USER_GET_FEATURES, vhost_user_get_features, false, false) \
+VHOST_MESSAGE_HANDLER(VHOST_USER_SET_FEATURES, vhost_user_set_features, false, true) \
+VHOST_MESSAGE_HANDLER(VHOST_USER_SET_OWNER, vhost_user_set_owner, false, true) \
+VHOST_MESSAGE_HANDLER(VHOST_USER_RESET_OWNER, vhost_user_reset_owner, false, false) \
+VHOST_MESSAGE_HANDLER(VHOST_USER_SET_MEM_TABLE, vhost_user_set_mem_table, true, true) \
+VHOST_MESSAGE_HANDLER(VHOST_USER_SET_LOG_BASE, vhost_user_set_log_base, true, true) \
+VHOST_MESSAGE_HANDLER(VHOST_USER_SET_LOG_FD, vhost_user_set_log_fd, true, true) \
+VHOST_MESSAGE_HANDLER(VHOST_USER_SET_VRING_NUM, vhost_user_set_vring_num, false, true) \
+VHOST_MESSAGE_HANDLER(VHOST_USER_SET_VRING_ADDR, vhost_user_set_vring_addr, false, true) \
+VHOST_MESSAGE_HANDLER(VHOST_USER_SET_VRING_BASE, vhost_user_set_vring_base, false, true) \
+VHOST_MESSAGE_HANDLER(VHOST_USER_GET_VRING_BASE, vhost_user_get_vring_base, false, false) \
+VHOST_MESSAGE_HANDLER(VHOST_USER_SET_VRING_KICK, vhost_user_set_vring_kick, true, true) \
+VHOST_MESSAGE_HANDLER(VHOST_USER_SET_VRING_CALL, vhost_user_set_vring_call, true, true) \
+VHOST_MESSAGE_HANDLER(VHOST_USER_SET_VRING_ERR, vhost_user_set_vring_err, true, true) \
+VHOST_MESSAGE_HANDLER(VHOST_USER_GET_PROTOCOL_FEATURES, vhost_user_get_protocol_features, \
+	false, false) \
+VHOST_MESSAGE_HANDLER(VHOST_USER_SET_PROTOCOL_FEATURES, vhost_user_set_protocol_features, \
+	false, true) \
+VHOST_MESSAGE_HANDLER(VHOST_USER_GET_QUEUE_NUM, vhost_user_get_queue_num, false, false) \
+VHOST_MESSAGE_HANDLER(VHOST_USER_SET_VRING_ENABLE, vhost_user_set_vring_enable, false, true) \
+VHOST_MESSAGE_HANDLER(VHOST_USER_SEND_RARP, vhost_user_send_rarp, false, true) \
+VHOST_MESSAGE_HANDLER(VHOST_USER_NET_SET_MTU, vhost_user_net_set_mtu, false, true) \
+VHOST_MESSAGE_HANDLER(VHOST_USER_SET_BACKEND_REQ_FD, vhost_user_set_req_fd, true, true) \
+VHOST_MESSAGE_HANDLER(VHOST_USER_IOTLB_MSG, vhost_user_iotlb_msg, false, false) \
+VHOST_MESSAGE_HANDLER(VHOST_USER_GET_CONFIG, vhost_user_get_config, false, false) \
+VHOST_MESSAGE_HANDLER(VHOST_USER_SET_CONFIG, vhost_user_set_config, false, false) \
+VHOST_MESSAGE_HANDLER(VHOST_USER_POSTCOPY_ADVISE, vhost_user_set_postcopy_advise, false, false) \
+VHOST_MESSAGE_HANDLER(VHOST_USER_POSTCOPY_LISTEN, vhost_user_set_postcopy_listen, false, false) \
+VHOST_MESSAGE_HANDLER(VHOST_USER_POSTCOPY_END, vhost_user_postcopy_end, false, false) \
+VHOST_MESSAGE_HANDLER(VHOST_USER_GET_INFLIGHT_FD, vhost_user_get_inflight_fd, false, false) \
+VHOST_MESSAGE_HANDLER(VHOST_USER_SET_INFLIGHT_FD, vhost_user_set_inflight_fd, true, false) \
+VHOST_MESSAGE_HANDLER(VHOST_USER_SET_STATUS, vhost_user_set_status, false, false) \
+VHOST_MESSAGE_HANDLER(VHOST_USER_GET_STATUS, vhost_user_get_status, false, false)
+
+#define VHOST_MESSAGE_HANDLER(id, handler, accepts_fd, lock_all_qps) \
+	id ## _LOCK_ALL_QPS = lock_all_qps,
+enum {
+	VHOST_MESSAGE_HANDLERS
+};
+#undef VHOST_MESSAGE_HANDLER
+
+/* vhost_user_msg_handler() locks all qps based on a handler's lock_all_qps.
+ * Later, a handler may need to ensure the vq has been locked (for example,
+ * when calling lock annotated helpers).
+ *
+ * Note: unfortunately, static_assert() does not see an array content as a
+ * constant expression. Because of this, we can't simply check for
+ * vhost_user_msg_handler[].lock_all_qps.
+ * Instead, define an enum for each handler.
+ */
+#define VHOST_USER_ASSERT_LOCK(dev, vq, id) do { \
+	static_assert(id ## _LOCK_ALL_QPS == true, \
+		#id " handler is not declared as locking all queue pairs"); \
+	vq_assert_lock(dev, vq); \
+} while (0)
+
 static int send_vhost_reply(struct virtio_net *dev, int sockfd, struct vhu_msg_context *ctx);
 static int read_vhost_message(struct virtio_net *dev, int sockfd, struct vhu_msg_context *ctx);
 
@@ -400,7 +460,7 @@ vhost_user_set_features(struct virtio_net **pdev,
 			cleanup_vq(vq, 1);
 			cleanup_vq_inflight(dev, vq);
 			/* vhost_user_lock_all_queue_pairs locked all qps */
-			vq_assert_lock(dev, vq);
+			VHOST_USER_ASSERT_LOCK(dev, vq, VHOST_USER_SET_FEATURES);
 			rte_rwlock_write_unlock(&vq->access_lock);
 			free_vq(dev, vq);
 		}
@@ -927,7 +987,7 @@ vhost_user_set_vring_addr(struct virtio_net **pdev,
 	vq = dev->virtqueue[ctx->msg.payload.addr.index];
 
 	/* vhost_user_lock_all_queue_pairs locked all qps */
-	vq_assert_lock(dev, vq);
+	VHOST_USER_ASSERT_LOCK(dev, vq, VHOST_USER_SET_VRING_ADDR);
 
 	access_ok = vq->access_ok;
 
@@ -1442,7 +1502,7 @@ vhost_user_set_mem_table(struct virtio_net **pdev,
 
 		if (vq->desc || vq->avail || vq->used) {
 			/* vhost_user_lock_all_queue_pairs locked all qps */
-			vq_assert_lock(dev, vq);
+			VHOST_USER_ASSERT_LOCK(dev, vq, VHOST_USER_SET_MEM_TABLE);
 
 			/*
 			 * If the memory table got updated, the ring addresses
@@ -2234,7 +2294,7 @@ vhost_user_set_vring_enable(struct virtio_net **pdev,
 	vq = dev->virtqueue[index];
 	if (!(dev->flags & VIRTIO_DEV_VDPA_CONFIGURED)) {
 		/* vhost_user_lock_all_queue_pairs locked all qps */
-		vq_assert_lock(dev, vq);
+		VHOST_USER_ASSERT_LOCK(dev, vq, VHOST_USER_SET_VRING_ENABLE);
 		if (enable && vq->async && vq->async->pkts_inflight_n) {
 			VHOST_CONFIG_LOG(dev->ifname, ERR,
 				"failed to enable vring. Inflight packets must be completed first");
@@ -2838,42 +2898,8 @@ vhost_user_set_status(struct virtio_net **pdev,
 	return RTE_VHOST_MSG_RESULT_OK;
 }
 
-#define VHOST_MESSAGE_HANDLERS \
-VHOST_MESSAGE_HANDLER(VHOST_USER_NONE, NULL, false) \
-VHOST_MESSAGE_HANDLER(VHOST_USER_GET_FEATURES, vhost_user_get_features, false) \
-VHOST_MESSAGE_HANDLER(VHOST_USER_SET_FEATURES, vhost_user_set_features, false) \
-VHOST_MESSAGE_HANDLER(VHOST_USER_SET_OWNER, vhost_user_set_owner, false) \
-VHOST_MESSAGE_HANDLER(VHOST_USER_RESET_OWNER, vhost_user_reset_owner, false) \
-VHOST_MESSAGE_HANDLER(VHOST_USER_SET_MEM_TABLE, vhost_user_set_mem_table, true) \
-VHOST_MESSAGE_HANDLER(VHOST_USER_SET_LOG_BASE, vhost_user_set_log_base, true) \
-VHOST_MESSAGE_HANDLER(VHOST_USER_SET_LOG_FD, vhost_user_set_log_fd, true) \
-VHOST_MESSAGE_HANDLER(VHOST_USER_SET_VRING_NUM, vhost_user_set_vring_num, false) \
-VHOST_MESSAGE_HANDLER(VHOST_USER_SET_VRING_ADDR, vhost_user_set_vring_addr, false) \
-VHOST_MESSAGE_HANDLER(VHOST_USER_SET_VRING_BASE, vhost_user_set_vring_base, false) \
-VHOST_MESSAGE_HANDLER(VHOST_USER_GET_VRING_BASE, vhost_user_get_vring_base, false) \
-VHOST_MESSAGE_HANDLER(VHOST_USER_SET_VRING_KICK, vhost_user_set_vring_kick, true) \
-VHOST_MESSAGE_HANDLER(VHOST_USER_SET_VRING_CALL, vhost_user_set_vring_call, true) \
-VHOST_MESSAGE_HANDLER(VHOST_USER_SET_VRING_ERR, vhost_user_set_vring_err, true) \
-VHOST_MESSAGE_HANDLER(VHOST_USER_GET_PROTOCOL_FEATURES, vhost_user_get_protocol_features, false) \
-VHOST_MESSAGE_HANDLER(VHOST_USER_SET_PROTOCOL_FEATURES, vhost_user_set_protocol_features, false) \
-VHOST_MESSAGE_HANDLER(VHOST_USER_GET_QUEUE_NUM, vhost_user_get_queue_num, false) \
-VHOST_MESSAGE_HANDLER(VHOST_USER_SET_VRING_ENABLE, vhost_user_set_vring_enable, false) \
-VHOST_MESSAGE_HANDLER(VHOST_USER_SEND_RARP, vhost_user_send_rarp, false) \
-VHOST_MESSAGE_HANDLER(VHOST_USER_NET_SET_MTU, vhost_user_net_set_mtu, false) \
-VHOST_MESSAGE_HANDLER(VHOST_USER_SET_BACKEND_REQ_FD, vhost_user_set_req_fd, true) \
-VHOST_MESSAGE_HANDLER(VHOST_USER_IOTLB_MSG, vhost_user_iotlb_msg, false) \
-VHOST_MESSAGE_HANDLER(VHOST_USER_GET_CONFIG, vhost_user_get_config, false) \
-VHOST_MESSAGE_HANDLER(VHOST_USER_SET_CONFIG, vhost_user_set_config, false) \
-VHOST_MESSAGE_HANDLER(VHOST_USER_POSTCOPY_ADVISE, vhost_user_set_postcopy_advise, false) \
-VHOST_MESSAGE_HANDLER(VHOST_USER_POSTCOPY_LISTEN, vhost_user_set_postcopy_listen, false) \
-VHOST_MESSAGE_HANDLER(VHOST_USER_POSTCOPY_END, vhost_user_postcopy_end, false) \
-VHOST_MESSAGE_HANDLER(VHOST_USER_GET_INFLIGHT_FD, vhost_user_get_inflight_fd, false) \
-VHOST_MESSAGE_HANDLER(VHOST_USER_SET_INFLIGHT_FD, vhost_user_set_inflight_fd, true) \
-VHOST_MESSAGE_HANDLER(VHOST_USER_SET_STATUS, vhost_user_set_status, false) \
-VHOST_MESSAGE_HANDLER(VHOST_USER_GET_STATUS, vhost_user_get_status, false)
-
-#define VHOST_MESSAGE_HANDLER(id, handler, accepts_fd) \
-	[id] = { #id, handler, accepts_fd },
+#define VHOST_MESSAGE_HANDLER(id, handler, accepts_fd, lock_all_qps) \
+	[id] = { #id, handler, accepts_fd, id ## _LOCK_ALL_QPS },
 static vhost_message_handler_t vhost_message_handlers[] = {
 	VHOST_MESSAGE_HANDLERS
 };
@@ -3141,31 +3167,11 @@ vhost_user_msg_handler(int vid, int fd)
 	 * inactive, so it is safe. Otherwise taking the access_lock
 	 * would cause a dead lock.
 	 */
-	switch (request) {
-	case VHOST_USER_SET_FEATURES:
-	case VHOST_USER_SET_PROTOCOL_FEATURES:
-	case VHOST_USER_SET_OWNER:
-	case VHOST_USER_SET_MEM_TABLE:
-	case VHOST_USER_SET_LOG_BASE:
-	case VHOST_USER_SET_LOG_FD:
-	case VHOST_USER_SET_VRING_NUM:
-	case VHOST_USER_SET_VRING_ADDR:
-	case VHOST_USER_SET_VRING_BASE:
-	case VHOST_USER_SET_VRING_KICK:
-	case VHOST_USER_SET_VRING_CALL:
-	case VHOST_USER_SET_VRING_ERR:
-	case VHOST_USER_SET_VRING_ENABLE:
-	case VHOST_USER_SEND_RARP:
-	case VHOST_USER_NET_SET_MTU:
-	case VHOST_USER_SET_BACKEND_REQ_FD:
+	if (msg_handler->lock_all_qps) {
 		if (!(dev->flags & VIRTIO_DEV_VDPA_CONFIGURED)) {
 			vhost_user_lock_all_queue_pairs(dev);
 			unlock_required = 1;
 		}
-		break;
-	default:
-		break;
-
 	}
 
 	handled = false;
-- 
2.43.0


^ permalink raw reply related	[flat|nested] 27+ messages in thread

* Re: [PATCH v4] vhost: enhance virtqueue access lock asserts
  2024-02-27 10:39     ` [PATCH v4] " David Marchand
@ 2024-03-01 15:22       ` Maxime Coquelin
  2024-03-05  9:04       ` David Marchand
  1 sibling, 0 replies; 27+ messages in thread
From: Maxime Coquelin @ 2024-03-01 15:22 UTC (permalink / raw)
  To: David Marchand, dev; +Cc: echaudro, Chenbo Xia



On 2/27/24 11:39, David Marchand wrote:
> A simple comment in vhost_user_msg_handler() is not that robust.
> 
> Add a lock_all_qps property to message handlers so that their
> implementation can add a build check and assert a vq is locked.
> 
> Signed-off-by: David Marchand <david.marchand@redhat.com>
> Reviewed-by: Maxime Coquelin <maxime.coquelin@redhat.com>
> ---
> Changes since v3:
> - directly called static_assert() with improved message,
> 
> Changes since v2:
> - dropped review tags,
> - following use of static_assert() in RTE_BUILD_BUG_ON, reworked build
>    check by using enums (one enum is now defined per message type),
> - as the added enums must be defined early, moved the definitions of
>    handlers at the top of the file,
> 
> Changes since v1:
> - moved this patch as the last of the series,
> 
> ---
>   lib/vhost/vhost_user.c | 130 +++++++++++++++++++++--------------------
>   1 file changed, 68 insertions(+), 62 deletions(-)
> 

Reviewed-by: Maxime Coquelin <maxime.coquelin@redhat.com>

Thanks,
Maxime


^ permalink raw reply	[flat|nested] 27+ messages in thread

* Re: [PATCH v4] vhost: enhance virtqueue access lock asserts
  2024-02-27 10:39     ` [PATCH v4] " David Marchand
  2024-03-01 15:22       ` Maxime Coquelin
@ 2024-03-05  9:04       ` David Marchand
  1 sibling, 0 replies; 27+ messages in thread
From: David Marchand @ 2024-03-05  9:04 UTC (permalink / raw)
  To: David Marchand; +Cc: dev, echaudro, Maxime Coquelin, Chenbo Xia

On Tue, Feb 27, 2024 at 11:39 AM David Marchand
<david.marchand@redhat.com> wrote:
>
> A simple comment in vhost_user_msg_handler() is not that robust.
>
> Add a lock_all_qps property to message handlers so that their
> implementation can add a build check and assert a vq is locked.
>
> Signed-off-by: David Marchand <david.marchand@redhat.com>
> Reviewed-by: Maxime Coquelin <maxime.coquelin@redhat.com>
> ---
> Changes since v3:
> - directly called static_assert() with improved message,
>
> Changes since v2:
> - dropped review tags,
> - following use of static_assert() in RTE_BUILD_BUG_ON, reworked build
>   check by using enums (one enum is now defined per message type),
> - as the added enums must be defined early, moved the definitions of
>   handlers at the top of the file,
>
> Changes since v1:
> - moved this patch as the last of the series,

Applied, thanks.


-- 
David Marchand


^ permalink raw reply	[flat|nested] 27+ messages in thread

end of thread, other threads:[~2024-03-05  9:04 UTC | newest]

Thread overview: 27+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2023-10-23  9:55 [PATCH 1/3] vhost: robustify virtqueue access lock asserts David Marchand
2023-10-23  9:55 ` [PATCH 2/3] vhost: fix virtqueue access lock in datapath David Marchand
2023-10-27  9:03   ` Eelco Chaudron
2023-10-27  9:22     ` David Marchand
2023-10-27 10:11       ` Eelco Chaudron
2023-12-05  9:10   ` Maxime Coquelin
2023-10-23  9:55 ` [PATCH 3/3] vhost: annotate virtqueue access checks David Marchand
2023-10-27  9:04   ` Eelco Chaudron
2023-10-27  8:59 ` [PATCH 1/3] vhost: robustify virtqueue access lock asserts Eelco Chaudron
2023-12-05  9:02 ` Maxime Coquelin
2023-12-05  9:45 ` [PATCH v2 1/5] vhost: fix virtqueue access check in datapath David Marchand
2023-12-05  9:45   ` [PATCH v2 2/5] vhost: fix virtqueue access check in VDUSE setup David Marchand
2023-12-05  9:57     ` Maxime Coquelin
2023-12-05  9:45   ` [PATCH v2 3/5] vhost: fix virtqueue access check in vhost-user setup David Marchand
2023-12-05  9:59     ` Maxime Coquelin
2023-12-05  9:45   ` [PATCH v2 4/5] vhost: annotate virtqueue access checks David Marchand
2023-12-05 11:04     ` Maxime Coquelin
2023-12-05  9:45   ` [PATCH v2 5/5] vhost: enhance virtqueue access lock asserts David Marchand
2024-02-19 10:52     ` Thomas Monjalon
2024-02-26 15:46       ` David Marchand
2024-02-26 17:05     ` [PATCH v3] " David Marchand
2024-02-26 19:40       ` Maxime Coquelin
2024-02-26 20:56         ` Stephen Hemminger
2024-02-27 10:39     ` [PATCH v4] " David Marchand
2024-03-01 15:22       ` Maxime Coquelin
2024-03-05  9:04       ` David Marchand
2023-12-12 11:37   ` [PATCH v2 1/5] vhost: fix virtqueue access check in datapath Maxime Coquelin

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.