All of lore.kernel.org
 help / color / mirror / Atom feed
From: David Marchand <david.marchand@redhat.com>
To: dev@dpdk.org
Cc: maxime.coquelin@redhat.com, chenbo.xia@intel.com
Subject: [RFC PATCH 5/5] vhost: annotate IOTLB locks
Date: Mon, 28 Mar 2022 14:17:58 +0200	[thread overview]
Message-ID: <20220328121758.26632-6-david.marchand@redhat.com> (raw)
In-Reply-To: <20220328121758.26632-1-david.marchand@redhat.com>

The IOTLB code uses r/w locks.

Introduce a wrapper around this type of locks and annotate iotlb_lock
and iotlb_pending_lock.

clang does not support conditionally held locks, so always take iotlb
locks regardless of VIRTIO_F_IOMMU_PLATFORM feature.

vdpa and vhost_crypto code are annotated though they end up not taking
a IOTLB lock and have been marked with a FIXME.

Signed-off-by: David Marchand <david.marchand@redhat.com>
---
 lib/vhost/iotlb.c        | 36 +++++++--------
 lib/vhost/iotlb.h        | 24 ----------
 lib/vhost/vdpa.c         |  1 +
 lib/vhost/vhost.c        | 16 +++----
 lib/vhost/vhost.h        | 94 +++++++++++++++++++++++++++++++++++-----
 lib/vhost/vhost_crypto.c |  7 +++
 lib/vhost/vhost_user.c   | 16 +++++--
 lib/vhost/virtio_net.c   | 49 ++++++++++++++++-----
 8 files changed, 164 insertions(+), 79 deletions(-)

diff --git a/lib/vhost/iotlb.c b/lib/vhost/iotlb.c
index 5a5ba8b82a..e5367d3af9 100644
--- a/lib/vhost/iotlb.c
+++ b/lib/vhost/iotlb.c
@@ -30,14 +30,14 @@ vhost_user_iotlb_pending_remove_all(struct vhost_virtqueue *vq)
 {
 	struct vhost_iotlb_entry *node, *temp_node;
 
-	rte_rwlock_write_lock(&vq->iotlb_pending_lock);
+	vhost_rwlock_write_lock(&vq->iotlb_pending_lock);
 
 	RTE_TAILQ_FOREACH_SAFE(node, &vq->iotlb_pending_list, next, temp_node) {
 		TAILQ_REMOVE(&vq->iotlb_pending_list, node, next);
 		rte_mempool_put(vq->iotlb_pool, node);
 	}
 
-	rte_rwlock_write_unlock(&vq->iotlb_pending_lock);
+	vhost_rwlock_write_unlock(&vq->iotlb_pending_lock);
 }
 
 bool
@@ -47,7 +47,7 @@ vhost_user_iotlb_pending_miss(struct vhost_virtqueue *vq, uint64_t iova,
 	struct vhost_iotlb_entry *node;
 	bool found = false;
 
-	rte_rwlock_read_lock(&vq->iotlb_pending_lock);
+	vhost_rwlock_read_lock(&vq->iotlb_pending_lock);
 
 	TAILQ_FOREACH(node, &vq->iotlb_pending_list, next) {
 		if ((node->iova == iova) && (node->perm == perm)) {
@@ -56,7 +56,7 @@ vhost_user_iotlb_pending_miss(struct vhost_virtqueue *vq, uint64_t iova,
 		}
 	}
 
-	rte_rwlock_read_unlock(&vq->iotlb_pending_lock);
+	vhost_rwlock_read_unlock(&vq->iotlb_pending_lock);
 
 	return found;
 }
@@ -89,11 +89,11 @@ vhost_user_iotlb_pending_insert(struct virtio_net *dev, struct vhost_virtqueue *
 	node->iova = iova;
 	node->perm = perm;
 
-	rte_rwlock_write_lock(&vq->iotlb_pending_lock);
+	vhost_rwlock_write_lock(&vq->iotlb_pending_lock);
 
 	TAILQ_INSERT_TAIL(&vq->iotlb_pending_list, node, next);
 
-	rte_rwlock_write_unlock(&vq->iotlb_pending_lock);
+	vhost_rwlock_write_unlock(&vq->iotlb_pending_lock);
 }
 
 void
@@ -102,7 +102,7 @@ vhost_user_iotlb_pending_remove(struct vhost_virtqueue *vq,
 {
 	struct vhost_iotlb_entry *node, *temp_node;
 
-	rte_rwlock_write_lock(&vq->iotlb_pending_lock);
+	vhost_rwlock_write_lock(&vq->iotlb_pending_lock);
 
 	RTE_TAILQ_FOREACH_SAFE(node, &vq->iotlb_pending_list, next,
 				temp_node) {
@@ -116,7 +116,7 @@ vhost_user_iotlb_pending_remove(struct vhost_virtqueue *vq,
 		rte_mempool_put(vq->iotlb_pool, node);
 	}
 
-	rte_rwlock_write_unlock(&vq->iotlb_pending_lock);
+	vhost_rwlock_write_unlock(&vq->iotlb_pending_lock);
 }
 
 static void
@@ -124,7 +124,7 @@ vhost_user_iotlb_cache_remove_all(struct vhost_virtqueue *vq)
 {
 	struct vhost_iotlb_entry *node, *temp_node;
 
-	rte_rwlock_write_lock(&vq->iotlb_lock);
+	vhost_rwlock_write_lock(&vq->iotlb_lock);
 
 	RTE_TAILQ_FOREACH_SAFE(node, &vq->iotlb_list, next, temp_node) {
 		TAILQ_REMOVE(&vq->iotlb_list, node, next);
@@ -133,7 +133,7 @@ vhost_user_iotlb_cache_remove_all(struct vhost_virtqueue *vq)
 
 	vq->iotlb_cache_nr = 0;
 
-	rte_rwlock_write_unlock(&vq->iotlb_lock);
+	vhost_rwlock_write_unlock(&vq->iotlb_lock);
 }
 
 static void
@@ -142,7 +142,7 @@ vhost_user_iotlb_cache_random_evict(struct vhost_virtqueue *vq)
 	struct vhost_iotlb_entry *node, *temp_node;
 	int entry_idx;
 
-	rte_rwlock_write_lock(&vq->iotlb_lock);
+	vhost_rwlock_write_lock(&vq->iotlb_lock);
 
 	entry_idx = rte_rand() % vq->iotlb_cache_nr;
 
@@ -156,7 +156,7 @@ vhost_user_iotlb_cache_random_evict(struct vhost_virtqueue *vq)
 		entry_idx--;
 	}
 
-	rte_rwlock_write_unlock(&vq->iotlb_lock);
+	vhost_rwlock_write_unlock(&vq->iotlb_lock);
 }
 
 void
@@ -190,7 +190,7 @@ vhost_user_iotlb_cache_insert(struct virtio_net *dev, struct vhost_virtqueue *vq
 	new_node->size = size;
 	new_node->perm = perm;
 
-	rte_rwlock_write_lock(&vq->iotlb_lock);
+	vhost_rwlock_write_lock(&vq->iotlb_lock);
 
 	TAILQ_FOREACH(node, &vq->iotlb_list, next) {
 		/*
@@ -213,7 +213,7 @@ vhost_user_iotlb_cache_insert(struct virtio_net *dev, struct vhost_virtqueue *vq
 unlock:
 	vhost_user_iotlb_pending_remove(vq, iova, size, perm);
 
-	rte_rwlock_write_unlock(&vq->iotlb_lock);
+	vhost_rwlock_write_unlock(&vq->iotlb_lock);
 
 }
 
@@ -226,7 +226,7 @@ vhost_user_iotlb_cache_remove(struct vhost_virtqueue *vq,
 	if (unlikely(!size))
 		return;
 
-	rte_rwlock_write_lock(&vq->iotlb_lock);
+	vhost_rwlock_write_lock(&vq->iotlb_lock);
 
 	RTE_TAILQ_FOREACH_SAFE(node, &vq->iotlb_list, next, temp_node) {
 		/* Sorted list */
@@ -240,7 +240,7 @@ vhost_user_iotlb_cache_remove(struct vhost_virtqueue *vq,
 		}
 	}
 
-	rte_rwlock_write_unlock(&vq->iotlb_lock);
+	vhost_rwlock_write_unlock(&vq->iotlb_lock);
 }
 
 uint64_t
@@ -312,8 +312,8 @@ vhost_user_iotlb_init(struct virtio_net *dev, int vq_index)
 		socket = 0;
 #endif
 
-	rte_rwlock_init(&vq->iotlb_lock);
-	rte_rwlock_init(&vq->iotlb_pending_lock);
+	vhost_rwlock_init(&vq->iotlb_lock);
+	vhost_rwlock_init(&vq->iotlb_pending_lock);
 
 	TAILQ_INIT(&vq->iotlb_list);
 	TAILQ_INIT(&vq->iotlb_pending_list);
diff --git a/lib/vhost/iotlb.h b/lib/vhost/iotlb.h
index 8d0ff7473b..96ec4d608f 100644
--- a/lib/vhost/iotlb.h
+++ b/lib/vhost/iotlb.h
@@ -9,30 +9,6 @@
 
 #include "vhost.h"
 
-static __rte_always_inline void
-vhost_user_iotlb_rd_lock(struct vhost_virtqueue *vq)
-{
-	rte_rwlock_read_lock(&vq->iotlb_lock);
-}
-
-static __rte_always_inline void
-vhost_user_iotlb_rd_unlock(struct vhost_virtqueue *vq)
-{
-	rte_rwlock_read_unlock(&vq->iotlb_lock);
-}
-
-static __rte_always_inline void
-vhost_user_iotlb_wr_lock(struct vhost_virtqueue *vq)
-{
-	rte_rwlock_write_lock(&vq->iotlb_lock);
-}
-
-static __rte_always_inline void
-vhost_user_iotlb_wr_unlock(struct vhost_virtqueue *vq)
-{
-	rte_rwlock_write_unlock(&vq->iotlb_lock);
-}
-
 void vhost_user_iotlb_cache_insert(struct virtio_net *dev, struct vhost_virtqueue *vq,
 					uint64_t iova, uint64_t uaddr,
 					uint64_t size, uint8_t perm);
diff --git a/lib/vhost/vdpa.c b/lib/vhost/vdpa.c
index 8fa2153023..406f13288b 100644
--- a/lib/vhost/vdpa.c
+++ b/lib/vhost/vdpa.c
@@ -130,6 +130,7 @@ rte_vdpa_unregister_device(struct rte_vdpa_device *dev)
 
 int
 rte_vdpa_relay_vring_used(int vid, uint16_t qid, void *vring_m)
+	VHOST_NO_THREAD_SAFETY_ANALYSIS /* FIXME: requires iotlb_lock? */
 {
 	struct virtio_net *dev = get_device(vid);
 	uint16_t idx, idx_m, desc_id;
diff --git a/lib/vhost/vhost.c b/lib/vhost/vhost.c
index ea28323367..466a24dabe 100644
--- a/lib/vhost/vhost.c
+++ b/lib/vhost/vhost.c
@@ -50,7 +50,7 @@ __vhost_iova_to_vva(struct virtio_net *dev, struct vhost_virtqueue *vq,
 		 * which could cause a deadlock with QEMU if an IOTLB update
 		 * is being handled. We can safely unlock here to avoid it.
 		 */
-		vhost_user_iotlb_rd_unlock(vq);
+		vhost_rwlock_read_unlock(&vq->iotlb_lock);
 
 		vhost_user_iotlb_pending_insert(dev, vq, iova, perm);
 		if (vhost_user_iotlb_miss(dev, iova, perm)) {
@@ -59,7 +59,7 @@ __vhost_iova_to_vva(struct virtio_net *dev, struct vhost_virtqueue *vq,
 			vhost_user_iotlb_pending_remove(vq, iova, 1, perm);
 		}
 
-		vhost_user_iotlb_rd_lock(vq);
+		vhost_rwlock_read_lock(&vq->iotlb_lock);
 	}
 
 	return 0;
@@ -383,6 +383,7 @@ free_device(struct virtio_net *dev)
 
 static __rte_always_inline int
 log_translate(struct virtio_net *dev, struct vhost_virtqueue *vq)
+	VHOST_RDLOCK_REQUIRES(vq->iotlb_lock)
 {
 	if (likely(!(vq->ring_addrs.flags & (1 << VHOST_VRING_F_LOG))))
 		return 0;
@@ -434,6 +435,7 @@ translate_log_addr(struct virtio_net *dev, struct vhost_virtqueue *vq,
 /* Caller should have iotlb_lock read-locked */
 static int
 vring_translate_split(struct virtio_net *dev, struct vhost_virtqueue *vq)
+	VHOST_RDLOCK_REQUIRES(vq->iotlb_lock)
 {
 	uint64_t req_size, size;
 
@@ -473,6 +475,7 @@ vring_translate_split(struct virtio_net *dev, struct vhost_virtqueue *vq)
 /* Caller should have iotlb_lock read-locked */
 static int
 vring_translate_packed(struct virtio_net *dev, struct vhost_virtqueue *vq)
+	VHOST_RDLOCK_REQUIRES(vq->iotlb_lock)
 {
 	uint64_t req_size, size;
 
@@ -506,7 +509,6 @@ vring_translate_packed(struct virtio_net *dev, struct vhost_virtqueue *vq)
 int
 vring_translate(struct virtio_net *dev, struct vhost_virtqueue *vq)
 {
-
 	if (!(dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM)))
 		return -1;
 
@@ -527,19 +529,13 @@ vring_translate(struct virtio_net *dev, struct vhost_virtqueue *vq)
 }
 
 void
-vring_invalidate(struct virtio_net *dev, struct vhost_virtqueue *vq)
+vring_invalidate(struct virtio_net *dev __rte_unused, struct vhost_virtqueue *vq)
 {
-	if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
-		vhost_user_iotlb_wr_lock(vq);
-
 	vq->access_ok = false;
 	vq->desc = NULL;
 	vq->avail = NULL;
 	vq->used = NULL;
 	vq->log_guest_addr = 0;
-
-	if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
-		vhost_user_iotlb_wr_unlock(vq);
 }
 
 static void
diff --git a/lib/vhost/vhost.h b/lib/vhost/vhost.h
index 4b301ec152..08ace9994a 100644
--- a/lib/vhost/vhost.h
+++ b/lib/vhost/vhost.h
@@ -108,6 +108,19 @@
 #define VHOST_SPINLOCK_RELEASES(...) \
 	__attribute__((unlock_function(__VA_ARGS__)))
 
+#define VHOST_RDLOCK_REQUIRES(...) \
+	__attribute__((shared_locks_required(__VA_ARGS__)))
+#define VHOST_RDLOCK_ACQUIRES(...) \
+	__attribute__((shared_lock_function(__VA_ARGS__)))
+#define VHOST_RDLOCK_RELEASES(...) \
+	__attribute__((unlock_function(__VA_ARGS__)))
+#define VHOST_WRLOCK_REQUIRES(...) \
+	__attribute__((exclusive_locks_required(__VA_ARGS__)))
+#define VHOST_WRLOCK_ACQUIRES(...) \
+	__attribute__((exclusive_lock_function(__VA_ARGS__)))
+#define VHOST_WRLOCK_RELEASES(...) \
+	__attribute__((unlock_function(__VA_ARGS__)))
+
 #else
 #define VHOST_NO_THREAD_SAFETY_ANALYSIS
 #define VHOST_LOCKABLE
@@ -117,6 +130,13 @@
 #define VHOST_SPINLOCK_ACQUIRES(...)
 #define VHOST_SPINLOCK_TRYLOCK(...)
 #define VHOST_SPINLOCK_RELEASES(...)
+
+#define VHOST_RDLOCK_ACQUIRES(...)
+#define VHOST_RDLOCK_REQUIRES(...)
+#define VHOST_RDLOCK_RELEASES(...)
+#define VHOST_WRLOCK_REQUIRES(...)
+#define VHOST_WRLOCK_ACQUIRES(...)
+#define VHOST_WRLOCK_RELEASES(...)
 #endif
 
 typedef struct VHOST_LOCKABLE {
@@ -153,6 +173,48 @@ vhost_spinlock_unlock(vhost_spinlock_t *l)
 	rte_spinlock_unlock(&l->l);
 }
 
+typedef struct VHOST_LOCKABLE {
+	rte_rwlock_t l;
+} vhost_rwlock_t;
+
+static __rte_always_inline void
+vhost_rwlock_init(vhost_rwlock_t *l)
+{
+	rte_rwlock_init(&l->l);
+}
+
+static __rte_always_inline void
+vhost_rwlock_read_lock(vhost_rwlock_t *l)
+	VHOST_RDLOCK_ACQUIRES(l)
+	VHOST_NO_THREAD_SAFETY_ANALYSIS
+{
+	rte_rwlock_read_lock(&l->l);
+}
+
+static __rte_always_inline void
+vhost_rwlock_read_unlock(vhost_rwlock_t *l)
+	VHOST_RDLOCK_RELEASES(l)
+	VHOST_NO_THREAD_SAFETY_ANALYSIS
+{
+	rte_rwlock_read_unlock(&l->l);
+}
+
+static __rte_always_inline void
+vhost_rwlock_write_lock(vhost_rwlock_t *l)
+	VHOST_WRLOCK_ACQUIRES(l)
+	VHOST_NO_THREAD_SAFETY_ANALYSIS
+{
+	rte_rwlock_write_lock(&l->l);
+}
+
+static __rte_always_inline void
+vhost_rwlock_write_unlock(vhost_rwlock_t *l)
+	VHOST_WRLOCK_RELEASES(l)
+	VHOST_NO_THREAD_SAFETY_ANALYSIS
+{
+	rte_rwlock_write_unlock(&l->l);
+}
+
 /**
  * Structure contains buffer address, length and descriptor index
  * from vring to do scatter RX.
@@ -346,8 +408,8 @@ struct vhost_virtqueue {
 	uint64_t		log_guest_addr;
 	struct log_cache_entry	*log_cache;
 
-	rte_rwlock_t	iotlb_lock;
-	rte_rwlock_t	iotlb_pending_lock;
+	vhost_rwlock_t	iotlb_lock;
+	vhost_rwlock_t	iotlb_pending_lock;
 	struct rte_mempool *iotlb_pool;
 	TAILQ_HEAD(, vhost_iotlb_entry) iotlb_list;
 	TAILQ_HEAD(, vhost_iotlb_entry) iotlb_pending_list;
@@ -592,12 +654,15 @@ void __vhost_log_cache_write(struct virtio_net *dev,
 		uint64_t addr, uint64_t len);
 void __vhost_log_cache_write_iova(struct virtio_net *dev,
 		struct vhost_virtqueue *vq,
-		uint64_t iova, uint64_t len);
+		uint64_t iova, uint64_t len)
+	VHOST_RDLOCK_REQUIRES(vq->iotlb_lock);
 void __vhost_log_cache_sync(struct virtio_net *dev,
 		struct vhost_virtqueue *vq);
+
 void __vhost_log_write(struct virtio_net *dev, uint64_t addr, uint64_t len);
 void __vhost_log_write_iova(struct virtio_net *dev, struct vhost_virtqueue *vq,
-			    uint64_t iova, uint64_t len);
+		uint64_t iova, uint64_t len)
+	VHOST_RDLOCK_REQUIRES(vq->iotlb_lock);
 
 static __rte_always_inline void
 vhost_log_write(struct virtio_net *dev, uint64_t addr, uint64_t len)
@@ -647,6 +712,7 @@ vhost_log_used_vring(struct virtio_net *dev, struct vhost_virtqueue *vq,
 static __rte_always_inline void
 vhost_log_cache_write_iova(struct virtio_net *dev, struct vhost_virtqueue *vq,
 			   uint64_t iova, uint64_t len)
+	VHOST_RDLOCK_REQUIRES(vq->iotlb_lock)
 {
 	if (likely(!(dev->features & (1ULL << VHOST_F_LOG_ALL))))
 		return;
@@ -660,6 +726,7 @@ vhost_log_cache_write_iova(struct virtio_net *dev, struct vhost_virtqueue *vq,
 static __rte_always_inline void
 vhost_log_write_iova(struct virtio_net *dev, struct vhost_virtqueue *vq,
 			   uint64_t iova, uint64_t len)
+	VHOST_RDLOCK_REQUIRES(vq->iotlb_lock)
 {
 	if (likely(!(dev->features & (1ULL << VHOST_F_LOG_ALL))))
 		return;
@@ -863,18 +930,23 @@ struct rte_vhost_device_ops const *vhost_driver_callback_get(const char *path);
 void vhost_backend_cleanup(struct virtio_net *dev);
 
 uint64_t __vhost_iova_to_vva(struct virtio_net *dev, struct vhost_virtqueue *vq,
-			uint64_t iova, uint64_t *len, uint8_t perm);
-void *vhost_alloc_copy_ind_table(struct virtio_net *dev,
-			struct vhost_virtqueue *vq,
-			uint64_t desc_addr, uint64_t desc_len);
-int vring_translate(struct virtio_net *dev, struct vhost_virtqueue *vq);
+		uint64_t iova, uint64_t *len, uint8_t perm)
+	VHOST_RDLOCK_REQUIRES(vq->iotlb_lock);
+void *vhost_alloc_copy_ind_table(struct virtio_net *dev, struct vhost_virtqueue *vq,
+		uint64_t desc_addr, uint64_t desc_len)
+	VHOST_RDLOCK_REQUIRES(vq->iotlb_lock);
+int vring_translate(struct virtio_net *dev, struct vhost_virtqueue *vq)
+	VHOST_RDLOCK_REQUIRES(vq->iotlb_lock);
 uint64_t translate_log_addr(struct virtio_net *dev, struct vhost_virtqueue *vq,
-		uint64_t log_addr);
-void vring_invalidate(struct virtio_net *dev, struct vhost_virtqueue *vq);
+		uint64_t log_addr)
+	VHOST_RDLOCK_REQUIRES(vq->iotlb_lock);
+void vring_invalidate(struct virtio_net *dev, struct vhost_virtqueue *vq)
+	VHOST_WRLOCK_REQUIRES(vq->iotlb_lock);
 
 static __rte_always_inline uint64_t
 vhost_iova_to_vva(struct virtio_net *dev, struct vhost_virtqueue *vq,
 			uint64_t iova, uint64_t *len, uint8_t perm)
+	VHOST_RDLOCK_REQUIRES(vq->iotlb_lock)
 {
 	if (!(dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM)))
 		return rte_vhost_va_from_guest_pa(dev->mem, iova, len);
diff --git a/lib/vhost/vhost_crypto.c b/lib/vhost/vhost_crypto.c
index b1c0eb6a0f..2d0a1250fb 100644
--- a/lib/vhost/vhost_crypto.c
+++ b/lib/vhost/vhost_crypto.c
@@ -506,6 +506,7 @@ static __rte_always_inline struct virtio_crypto_inhdr *
 reach_inhdr(struct vhost_crypto_data_req *vc_req,
 		struct vhost_crypto_desc *head,
 		uint32_t max_n_descs)
+	VHOST_RDLOCK_REQUIRES(vc_req->vq->iotlb_lock)
 {
 	struct virtio_crypto_inhdr *inhdr;
 	struct vhost_crypto_desc *last = head + (max_n_descs - 1);
@@ -552,6 +553,7 @@ static __rte_always_inline void *
 get_data_ptr(struct vhost_crypto_data_req *vc_req,
 		struct vhost_crypto_desc *cur_desc,
 		uint8_t perm)
+	VHOST_RDLOCK_REQUIRES(vc_req->vq->iotlb_lock)
 {
 	void *data;
 	uint64_t dlen = cur_desc->len;
@@ -570,6 +572,7 @@ copy_data(void *dst_data, struct vhost_crypto_data_req *vc_req,
 		struct vhost_crypto_desc *head,
 		struct vhost_crypto_desc **cur_desc,
 		uint32_t size, uint32_t max_n_descs)
+	VHOST_RDLOCK_REQUIRES(vc_req->vq->iotlb_lock)
 {
 	struct vhost_crypto_desc *desc = *cur_desc;
 	uint64_t remain, addr, dlen, len;
@@ -718,6 +721,7 @@ prepare_write_back_data(struct vhost_crypto_data_req *vc_req,
 		uint32_t offset,
 		uint64_t write_back_len,
 		uint32_t max_n_descs)
+	VHOST_RDLOCK_REQUIRES(vc_req->vq->iotlb_lock)
 {
 	struct vhost_crypto_writeback_data *wb_data, *head;
 	struct vhost_crypto_desc *desc = *cur_desc;
@@ -838,6 +842,7 @@ prepare_sym_cipher_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
 		struct virtio_crypto_cipher_data_req *cipher,
 		struct vhost_crypto_desc *head,
 		uint32_t max_n_descs)
+	VHOST_RDLOCK_REQUIRES(vc_req->vq->iotlb_lock)
 {
 	struct vhost_crypto_desc *desc = head;
 	struct vhost_crypto_writeback_data *ewb = NULL;
@@ -990,6 +995,7 @@ prepare_sym_chain_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
 		struct virtio_crypto_alg_chain_data_req *chain,
 		struct vhost_crypto_desc *head,
 		uint32_t max_n_descs)
+	VHOST_RDLOCK_REQUIRES(vc_req->vq->iotlb_lock)
 {
 	struct vhost_crypto_desc *desc = head, *digest_desc;
 	struct vhost_crypto_writeback_data *ewb = NULL, *ewb2 = NULL;
@@ -1172,6 +1178,7 @@ vhost_crypto_process_one_req(struct vhost_crypto *vcrypto,
 		struct vhost_virtqueue *vq, struct rte_crypto_op *op,
 		struct vring_desc *head, struct vhost_crypto_desc *descs,
 		uint16_t desc_idx)
+	VHOST_NO_THREAD_SAFETY_ANALYSIS /* FIXME: requires iotlb_lock? */
 {
 	struct vhost_crypto_data_req *vc_req = rte_mbuf_to_priv(op->sym->m_src);
 	struct rte_cryptodev_sym_session *session;
diff --git a/lib/vhost/vhost_user.c b/lib/vhost/vhost_user.c
index 35f1e23995..0482c00801 100644
--- a/lib/vhost/vhost_user.c
+++ b/lib/vhost/vhost_user.c
@@ -754,10 +754,10 @@ ring_addr_to_vva(struct virtio_net *dev, struct vhost_virtqueue *vq,
 	if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM)) {
 		uint64_t vva;
 
-		vhost_user_iotlb_rd_lock(vq);
+		vhost_rwlock_read_lock(&vq->iotlb_lock);
 		vva = vhost_iova_to_vva(dev, vq, ra,
 					size, VHOST_ACCESS_RW);
-		vhost_user_iotlb_rd_unlock(vq);
+		vhost_rwlock_read_unlock(&vq->iotlb_lock);
 
 		return vva;
 	}
@@ -770,9 +770,9 @@ log_addr_to_gpa(struct virtio_net *dev, struct vhost_virtqueue *vq)
 {
 	uint64_t log_gpa;
 
-	vhost_user_iotlb_rd_lock(vq);
+	vhost_rwlock_read_lock(&vq->iotlb_lock);
 	log_gpa = translate_log_addr(dev, vq, vq->ring_addrs.log_guest_addr);
-	vhost_user_iotlb_rd_unlock(vq);
+	vhost_rwlock_read_unlock(&vq->iotlb_lock);
 
 	return log_gpa;
 }
@@ -927,7 +927,9 @@ vhost_user_set_vring_addr(struct virtio_net **pdev,
 	 */
 	memcpy(&vq->ring_addrs, addr, sizeof(*addr));
 
+	vhost_rwlock_write_lock(&vq->iotlb_lock);
 	vring_invalidate(dev, vq);
+	vhost_rwlock_write_unlock(&vq->iotlb_lock);
 
 	if ((vq->enabled && (dev->features &
 				(1ULL << VHOST_USER_F_PROTOCOL_FEATURES))) ||
@@ -1437,7 +1439,9 @@ vhost_user_set_mem_table(struct virtio_net **pdev,
 			 * need to be translated again as virtual addresses have
 			 * changed.
 			 */
+			vhost_rwlock_write_lock(&vq->iotlb_lock);
 			vring_invalidate(dev, vq);
+			vhost_rwlock_write_unlock(&vq->iotlb_lock);
 
 			dev = translate_ring_addresses(dev, i);
 			if (!dev) {
@@ -2186,7 +2190,9 @@ vhost_user_get_vring_base(struct virtio_net **pdev,
 
 	vhost_user_iotlb_flush_all(vq);
 
+	vhost_rwlock_write_lock(&vq->iotlb_lock);
 	vring_invalidate(dev, vq);
+	vhost_rwlock_write_unlock(&vq->iotlb_lock);
 
 	return RTE_VHOST_MSG_RESULT_REPLY;
 }
@@ -2591,7 +2597,9 @@ vhost_user_iotlb_msg(struct virtio_net **pdev,
 
 			if (is_vring_iotlb(dev, vq, imsg)) {
 				vhost_spinlock_lock(&vq->access_lock);
+				vhost_rwlock_write_lock(&vq->iotlb_lock);
 				vring_invalidate(dev, vq);
+				vhost_rwlock_write_unlock(&vq->iotlb_lock);
 				vhost_spinlock_unlock(&vq->access_lock);
 			}
 		}
diff --git a/lib/vhost/virtio_net.c b/lib/vhost/virtio_net.c
index 9bdba992dd..ec342e772d 100644
--- a/lib/vhost/virtio_net.c
+++ b/lib/vhost/virtio_net.c
@@ -180,6 +180,7 @@ vhost_async_dma_check_completed(struct virtio_net *dev, int16_t dma_id, uint16_t
 
 static inline void
 do_data_copy_enqueue(struct virtio_net *dev, struct vhost_virtqueue *vq)
+	VHOST_RDLOCK_REQUIRES(vq->iotlb_lock)
 {
 	struct batch_copy_elem *elem = vq->batch_copy_elems;
 	uint16_t count = vq->batch_copy_nb_elems;
@@ -526,6 +527,7 @@ vhost_shadow_enqueue_single_packed(struct virtio_net *dev,
 				   uint16_t *id,
 				   uint16_t *count,
 				   uint16_t num_buffers)
+	VHOST_RDLOCK_REQUIRES(vq->iotlb_lock)
 {
 	vhost_shadow_enqueue_packed(vq, len, id, count, num_buffers);
 
@@ -607,6 +609,7 @@ static __rte_always_inline int
 map_one_desc(struct virtio_net *dev, struct vhost_virtqueue *vq,
 		struct buf_vector *buf_vec, uint16_t *vec_idx,
 		uint64_t desc_iova, uint64_t desc_len, uint8_t perm)
+	VHOST_RDLOCK_REQUIRES(vq->iotlb_lock)
 {
 	uint16_t vec_id = *vec_idx;
 
@@ -644,6 +647,7 @@ fill_vec_buf_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
 			 uint32_t avail_idx, uint16_t *vec_idx,
 			 struct buf_vector *buf_vec, uint16_t *desc_chain_head,
 			 uint32_t *desc_chain_len, uint8_t perm)
+	VHOST_RDLOCK_REQUIRES(vq->iotlb_lock)
 {
 	uint16_t idx = vq->avail->ring[avail_idx & (vq->size - 1)];
 	uint16_t vec_id = *vec_idx;
@@ -727,6 +731,7 @@ reserve_avail_buf_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
 				uint32_t size, struct buf_vector *buf_vec,
 				uint16_t *num_buffers, uint16_t avail_head,
 				uint16_t *nr_vec)
+	VHOST_RDLOCK_REQUIRES(vq->iotlb_lock)
 {
 	uint16_t cur_idx;
 	uint16_t vec_idx = 0;
@@ -777,6 +782,7 @@ fill_vec_buf_packed_indirect(struct virtio_net *dev,
 			struct vhost_virtqueue *vq,
 			struct vring_packed_desc *desc, uint16_t *vec_idx,
 			struct buf_vector *buf_vec, uint32_t *len, uint8_t perm)
+	VHOST_RDLOCK_REQUIRES(vq->iotlb_lock)
 {
 	uint16_t i;
 	uint32_t nr_descs;
@@ -835,6 +841,7 @@ fill_vec_buf_packed(struct virtio_net *dev, struct vhost_virtqueue *vq,
 				uint16_t avail_idx, uint16_t *desc_count,
 				struct buf_vector *buf_vec, uint16_t *vec_idx,
 				uint16_t *buf_id, uint32_t *len, uint8_t perm)
+	VHOST_RDLOCK_REQUIRES(vq->iotlb_lock)
 {
 	bool wrap_counter = vq->avail_wrap_counter;
 	struct vring_packed_desc *descs = vq->desc_packed;
@@ -900,6 +907,7 @@ static __rte_noinline void
 copy_vnet_hdr_to_desc(struct virtio_net *dev, struct vhost_virtqueue *vq,
 		struct buf_vector *buf_vec,
 		struct virtio_net_hdr_mrg_rxbuf *hdr)
+	VHOST_RDLOCK_REQUIRES(vq->iotlb_lock)
 {
 	uint64_t len;
 	uint64_t remain = dev->vhost_hlen;
@@ -1036,6 +1044,7 @@ static __rte_always_inline void
 sync_mbuf_to_desc_seg(struct virtio_net *dev, struct vhost_virtqueue *vq,
 		struct rte_mbuf *m, uint32_t mbuf_offset,
 		uint64_t buf_addr, uint64_t buf_iova, uint32_t cpy_len)
+	VHOST_RDLOCK_REQUIRES(vq->iotlb_lock)
 {
 	struct batch_copy_elem *batch_copy = vq->batch_copy_elems;
 
@@ -1061,6 +1070,7 @@ mbuf_to_desc(struct virtio_net *dev, struct vhost_virtqueue *vq,
 		struct rte_mbuf *m, struct buf_vector *buf_vec,
 		uint16_t nr_vec, uint16_t num_buffers, bool is_async)
 	VHOST_SPINLOCK_REQUIRES(vq->access_lock)
+	VHOST_RDLOCK_REQUIRES(vq->iotlb_lock)
 {
 	uint32_t vec_idx = 0;
 	uint32_t mbuf_offset, mbuf_avail;
@@ -1191,6 +1201,7 @@ vhost_enqueue_single_packed(struct virtio_net *dev,
 			    struct buf_vector *buf_vec,
 			    uint16_t *nr_descs)
 	VHOST_SPINLOCK_REQUIRES(vq->access_lock)
+	VHOST_RDLOCK_REQUIRES(vq->iotlb_lock)
 {
 	uint16_t nr_vec = 0;
 	uint16_t avail_idx = vq->last_avail_idx;
@@ -1252,6 +1263,7 @@ static __rte_noinline uint32_t
 virtio_dev_rx_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
 	struct rte_mbuf **pkts, uint32_t count)
 	VHOST_SPINLOCK_REQUIRES(vq->access_lock)
+	VHOST_RDLOCK_REQUIRES(vq->iotlb_lock)
 {
 	uint32_t pkt_idx = 0;
 	uint16_t num_buffers;
@@ -1309,6 +1321,7 @@ virtio_dev_rx_sync_batch_check(struct virtio_net *dev,
 			   struct rte_mbuf **pkts,
 			   uint64_t *desc_addrs,
 			   uint64_t *lens)
+	VHOST_RDLOCK_REQUIRES(vq->iotlb_lock)
 {
 	bool wrap_counter = vq->avail_wrap_counter;
 	struct vring_packed_desc *descs = vq->desc_packed;
@@ -1360,6 +1373,7 @@ virtio_dev_rx_batch_packed_copy(struct virtio_net *dev,
 			   struct rte_mbuf **pkts,
 			   uint64_t *desc_addrs,
 			   uint64_t *lens)
+	VHOST_RDLOCK_REQUIRES(vq->iotlb_lock)
 {
 	uint32_t buf_offset = sizeof(struct virtio_net_hdr_mrg_rxbuf);
 	struct virtio_net_hdr_mrg_rxbuf *hdrs[PACKED_BATCH_SIZE];
@@ -1401,6 +1415,7 @@ static __rte_always_inline int
 virtio_dev_rx_sync_batch_packed(struct virtio_net *dev,
 			   struct vhost_virtqueue *vq,
 			   struct rte_mbuf **pkts)
+	VHOST_RDLOCK_REQUIRES(vq->iotlb_lock)
 {
 	uint64_t desc_addrs[PACKED_BATCH_SIZE];
 	uint64_t lens[PACKED_BATCH_SIZE];
@@ -1423,6 +1438,7 @@ virtio_dev_rx_single_packed(struct virtio_net *dev,
 			    struct vhost_virtqueue *vq,
 			    struct rte_mbuf *pkt)
 	VHOST_SPINLOCK_REQUIRES(vq->access_lock)
+	VHOST_RDLOCK_REQUIRES(vq->iotlb_lock)
 {
 	struct buf_vector buf_vec[BUF_VECTOR_MAX];
 	uint16_t nr_descs = 0;
@@ -1449,6 +1465,7 @@ virtio_dev_rx_packed(struct virtio_net *dev,
 		     struct rte_mbuf **__rte_restrict pkts,
 		     uint32_t count)
 	VHOST_SPINLOCK_REQUIRES(vq->access_lock)
+	VHOST_RDLOCK_REQUIRES(vq->iotlb_lock)
 {
 	uint32_t pkt_idx = 0;
 
@@ -1501,8 +1518,7 @@ virtio_dev_rx(struct virtio_net *dev, uint16_t queue_id,
 	if (unlikely(!vq->enabled))
 		goto out_access_unlock;
 
-	if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
-		vhost_user_iotlb_rd_lock(vq);
+	vhost_rwlock_read_lock(&vq->iotlb_lock);
 
 	if (unlikely(!vq->access_ok))
 		if (unlikely(vring_translate(dev, vq) < 0))
@@ -1518,8 +1534,7 @@ virtio_dev_rx(struct virtio_net *dev, uint16_t queue_id,
 		nb_tx = virtio_dev_rx_split(dev, vq, pkts, count);
 
 out:
-	if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
-		vhost_user_iotlb_rd_unlock(vq);
+	vhost_rwlock_read_unlock(&vq->iotlb_lock);
 
 out_access_unlock:
 	vhost_spinlock_unlock(&vq->access_lock);
@@ -1595,6 +1610,7 @@ virtio_dev_rx_async_submit_split(struct virtio_net *dev, struct vhost_virtqueue
 		uint16_t queue_id, struct rte_mbuf **pkts, uint32_t count,
 		int16_t dma_id, uint16_t vchan_id)
 	VHOST_SPINLOCK_REQUIRES(vq->access_lock)
+	VHOST_RDLOCK_REQUIRES(vq->iotlb_lock)
 {
 	struct buf_vector buf_vec[BUF_VECTOR_MAX];
 	uint32_t pkt_idx = 0;
@@ -1700,6 +1716,7 @@ vhost_enqueue_async_packed(struct virtio_net *dev,
 			    uint16_t *nr_descs,
 			    uint16_t *nr_buffers)
 	VHOST_SPINLOCK_REQUIRES(vq->access_lock)
+	VHOST_RDLOCK_REQUIRES(vq->iotlb_lock)
 {
 	uint16_t nr_vec = 0;
 	uint16_t avail_idx = vq->last_avail_idx;
@@ -1758,6 +1775,7 @@ static __rte_always_inline int16_t
 virtio_dev_rx_async_packed(struct virtio_net *dev, struct vhost_virtqueue *vq,
 			    struct rte_mbuf *pkt, uint16_t *nr_descs, uint16_t *nr_buffers)
 	VHOST_SPINLOCK_REQUIRES(vq->access_lock)
+	VHOST_RDLOCK_REQUIRES(vq->iotlb_lock)
 {
 	struct buf_vector buf_vec[BUF_VECTOR_MAX];
 
@@ -1805,6 +1823,7 @@ virtio_dev_rx_async_submit_packed(struct virtio_net *dev, struct vhost_virtqueue
 		uint16_t queue_id, struct rte_mbuf **pkts, uint32_t count,
 		int16_t dma_id, uint16_t vchan_id)
 	VHOST_SPINLOCK_REQUIRES(vq->access_lock)
+	VHOST_RDLOCK_REQUIRES(vq->iotlb_lock)
 {
 	uint32_t pkt_idx = 0;
 	uint32_t remained = count;
@@ -2154,8 +2173,7 @@ virtio_dev_rx_async_submit(struct virtio_net *dev, uint16_t queue_id,
 	if (unlikely(!vq->enabled || !vq->async))
 		goto out_access_unlock;
 
-	if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
-		vhost_user_iotlb_rd_lock(vq);
+	vhost_rwlock_read_lock(&vq->iotlb_lock);
 
 	if (unlikely(!vq->access_ok))
 		if (unlikely(vring_translate(dev, vq) < 0))
@@ -2173,8 +2191,7 @@ virtio_dev_rx_async_submit(struct virtio_net *dev, uint16_t queue_id,
 				pkts, count, dma_id, vchan_id);
 
 out:
-	if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
-		vhost_user_iotlb_rd_unlock(vq);
+	vhost_rwlock_read_unlock(&vq->iotlb_lock);
 
 out_access_unlock:
 	vhost_spinlock_unlock(&vq->access_lock);
@@ -2697,6 +2714,7 @@ virtio_dev_tx_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
 	struct rte_mempool *mbuf_pool, struct rte_mbuf **pkts, uint16_t count,
 	bool legacy_ol_flags)
 	VHOST_SPINLOCK_REQUIRES(vq->access_lock)
+	VHOST_RDLOCK_REQUIRES(vq->iotlb_lock)
 {
 	uint16_t i;
 	uint16_t free_entries;
@@ -2793,6 +2811,7 @@ virtio_dev_tx_split_legacy(struct virtio_net *dev,
 	struct vhost_virtqueue *vq, struct rte_mempool *mbuf_pool,
 	struct rte_mbuf **pkts, uint16_t count)
 	VHOST_SPINLOCK_REQUIRES(vq->access_lock)
+	VHOST_RDLOCK_REQUIRES(vq->iotlb_lock)
 {
 	return virtio_dev_tx_split(dev, vq, mbuf_pool, pkts, count, true);
 }
@@ -2803,6 +2822,7 @@ virtio_dev_tx_split_compliant(struct virtio_net *dev,
 	struct vhost_virtqueue *vq, struct rte_mempool *mbuf_pool,
 	struct rte_mbuf **pkts, uint16_t count)
 	VHOST_SPINLOCK_REQUIRES(vq->access_lock)
+	VHOST_RDLOCK_REQUIRES(vq->iotlb_lock)
 {
 	return virtio_dev_tx_split(dev, vq, mbuf_pool, pkts, count, false);
 }
@@ -2814,6 +2834,7 @@ vhost_reserve_avail_batch_packed(struct virtio_net *dev,
 				 uint16_t avail_idx,
 				 uintptr_t *desc_addrs,
 				 uint16_t *ids)
+	VHOST_RDLOCK_REQUIRES(vq->iotlb_lock)
 {
 	bool wrap = vq->avail_wrap_counter;
 	struct vring_packed_desc *descs = vq->desc_packed;
@@ -2883,6 +2904,7 @@ virtio_dev_tx_batch_packed(struct virtio_net *dev,
 			   struct vhost_virtqueue *vq,
 			   struct rte_mbuf **pkts,
 			   bool legacy_ol_flags)
+	VHOST_RDLOCK_REQUIRES(vq->iotlb_lock)
 {
 	uint16_t avail_idx = vq->last_avail_idx;
 	uint32_t buf_offset = sizeof(struct virtio_net_hdr_mrg_rxbuf);
@@ -2929,6 +2951,7 @@ vhost_dequeue_single_packed(struct virtio_net *dev,
 			    uint16_t *buf_id,
 			    uint16_t *desc_count,
 			    bool legacy_ol_flags)
+	VHOST_RDLOCK_REQUIRES(vq->iotlb_lock)
 {
 	struct buf_vector buf_vec[BUF_VECTOR_MAX];
 	uint32_t buf_len;
@@ -2972,6 +2995,7 @@ virtio_dev_tx_single_packed(struct virtio_net *dev,
 			    struct rte_mempool *mbuf_pool,
 			    struct rte_mbuf *pkts,
 			    bool legacy_ol_flags)
+	VHOST_RDLOCK_REQUIRES(vq->iotlb_lock)
 {
 
 	uint16_t buf_id, desc_count = 0;
@@ -3003,6 +3027,7 @@ virtio_dev_tx_packed(struct virtio_net *dev,
 		     uint32_t count,
 		     bool legacy_ol_flags)
 	VHOST_SPINLOCK_REQUIRES(vq->access_lock)
+	VHOST_RDLOCK_REQUIRES(vq->iotlb_lock)
 {
 	uint32_t pkt_idx = 0;
 
@@ -3047,6 +3072,7 @@ virtio_dev_tx_packed_legacy(struct virtio_net *dev,
 	struct vhost_virtqueue *__rte_restrict vq, struct rte_mempool *mbuf_pool,
 	struct rte_mbuf **__rte_restrict pkts, uint32_t count)
 	VHOST_SPINLOCK_REQUIRES(vq->access_lock)
+	VHOST_RDLOCK_REQUIRES(vq->iotlb_lock)
 {
 	return virtio_dev_tx_packed(dev, vq, mbuf_pool, pkts, count, true);
 }
@@ -3057,6 +3083,7 @@ virtio_dev_tx_packed_compliant(struct virtio_net *dev,
 	struct vhost_virtqueue *__rte_restrict vq, struct rte_mempool *mbuf_pool,
 	struct rte_mbuf **__rte_restrict pkts, uint32_t count)
 	VHOST_SPINLOCK_REQUIRES(vq->access_lock)
+	VHOST_RDLOCK_REQUIRES(vq->iotlb_lock)
 {
 	return virtio_dev_tx_packed(dev, vq, mbuf_pool, pkts, count, false);
 }
@@ -3096,8 +3123,7 @@ rte_vhost_dequeue_burst(int vid, uint16_t queue_id,
 		goto out_access_unlock;
 	}
 
-	if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
-		vhost_user_iotlb_rd_lock(vq);
+	vhost_rwlock_read_lock(&vq->iotlb_lock);
 
 	if (unlikely(!vq->access_ok))
 		if (unlikely(vring_translate(dev, vq) < 0)) {
@@ -3153,8 +3179,7 @@ rte_vhost_dequeue_burst(int vid, uint16_t queue_id,
 	}
 
 out:
-	if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
-		vhost_user_iotlb_rd_unlock(vq);
+	vhost_rwlock_read_unlock(&vq->iotlb_lock);
 
 out_access_unlock:
 	vhost_spinlock_unlock(&vq->access_lock);
-- 
2.23.0


  parent reply	other threads:[~2022-03-28 12:19 UTC|newest]

Thread overview: 110+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2022-03-28 12:17 [RFC PATCH 0/5] vhost lock annotations David Marchand
2022-03-28 12:17 ` [RFC PATCH 1/5] vhost: fix missing virtqueue lock protection David Marchand
2022-03-28 12:17 ` [RFC PATCH 2/5] vhost: annotate virtqueue access lock David Marchand
2022-03-28 12:17 ` [RFC PATCH 3/5] vhost: fix async access David Marchand
2022-03-28 12:17 ` [RFC PATCH 4/5] vhost: annotate async locking requirement David Marchand
2022-03-28 12:17 ` David Marchand [this message]
2022-03-30 13:49 ` [RFC PATCH v2 0/9] vhost lock annotations David Marchand
2022-03-30 13:49   ` [RFC PATCH v2 1/9] vhost: fix missing virtqueue lock protection David Marchand
2022-03-30 13:49   ` [RFC PATCH v2 2/9] eal: annotate spinlock and rwlock David Marchand
2022-03-31  9:22     ` David Marchand
2022-04-04  6:21     ` Stephen Hemminger
2022-04-07  8:20       ` David Marchand
2022-03-30 13:49   ` [RFC PATCH v2 3/9] vhost: annotate virtqueue access lock David Marchand
2022-04-07  1:40     ` Hu, Jiayu
2022-04-07  7:03       ` David Marchand
2022-03-30 13:49   ` [RFC PATCH v2 4/9] vhost: fix async access David Marchand
2022-03-31  8:00     ` Maxime Coquelin
2022-03-31 10:23       ` Hu, Jiayu
2022-04-04  6:57     ` Pai G, Sunil
2022-03-30 13:49   ` [RFC PATCH v2 5/9] vhost: annotate async acesses David Marchand
2022-03-30 13:49   ` [RFC PATCH v2 6/9] vhost: annotate need reply handling David Marchand
2022-03-30 13:49   ` [RFC PATCH v2 7/9] vhost: annotate VDPA device list accesses David Marchand
2022-03-30 13:49   ` [RFC PATCH v2 8/9] vhost: annotate IOTLB locks David Marchand
2022-03-30 13:49   ` [RFC PATCH v2 9/9] vhost: enable lock check David Marchand
2022-03-30 14:03   ` [RFC PATCH v2 0/9] vhost lock annotations David Marchand
2022-03-30 14:37     ` Ali Alnubani
2022-04-05  7:11       ` David Marchand
2022-04-11 11:00 ` [RFC PATCH v3 0/8] " David Marchand
2022-04-11 11:00   ` [RFC PATCH v3 1/8] eal: annotate spinlock and rwlock David Marchand
2022-04-21 13:48     ` Maxime Coquelin
2022-04-28 12:16       ` David Marchand
2022-04-11 11:00   ` [RFC PATCH v3 2/8] vhost: annotate virtqueue access lock David Marchand
2022-04-21 15:25     ` Maxime Coquelin
2022-04-22  9:49       ` David Marchand
2022-04-11 11:00   ` [RFC PATCH v3 3/8] vhost: fix async access David Marchand
2022-04-21 19:21     ` Maxime Coquelin
2022-05-17 13:24     ` Maxime Coquelin
2022-04-11 11:00   ` [RFC PATCH v3 4/8] vhost: annotate async accesses David Marchand
2022-04-22  7:20     ` Maxime Coquelin
2022-04-11 11:00   ` [RFC PATCH v3 5/8] vhost: annotate need reply handling David Marchand
2022-04-22  7:25     ` Maxime Coquelin
2022-04-11 11:00   ` [RFC PATCH v3 6/8] vhost: annotate vDPA device list accesses David Marchand
2022-04-22  7:26     ` Maxime Coquelin
2022-04-11 11:00   ` [RFC PATCH v3 7/8] vhost: annotate IOTLB locks David Marchand
2022-04-22  7:46     ` Maxime Coquelin
2022-04-11 11:00   ` [RFC PATCH v3 8/8] vhost: enable lock check David Marchand
2022-04-22  7:47     ` Maxime Coquelin
2023-01-19 18:46 ` [PATCH v4 0/9] vhost lock annotations David Marchand
2023-01-19 18:46   ` [PATCH v4 1/9] eal: annotate spinlock, rwlock and seqlock David Marchand
2023-01-19 19:42     ` Stephen Hemminger
2023-01-19 20:39       ` Tyler Retzlaff
2023-01-19 21:16         ` David Marchand
2023-01-19 21:50           ` Tyler Retzlaff
2023-01-26 12:18             ` David Marchand
2023-01-19 20:55       ` David Marchand
2023-01-19 19:43     ` Stephen Hemminger
2023-01-31 16:18     ` Maxime Coquelin
2023-01-19 18:46   ` [PATCH v4 2/9] vhost: simplify need reply handling David Marchand
2023-01-31 16:41     ` Maxime Coquelin
2023-01-19 18:46   ` [PATCH v4 3/9] vhost: terminate when access lock is not taken David Marchand
2023-01-31 16:47     ` Maxime Coquelin
2023-01-19 18:46   ` [PATCH v4 4/9] vhost: annotate virtqueue access lock David Marchand
2023-01-31 16:50     ` Maxime Coquelin
2023-01-19 18:46   ` [PATCH v4 5/9] vhost: annotate async accesses David Marchand
2023-01-31 16:54     ` Maxime Coquelin
2023-01-19 18:46   ` [PATCH v4 6/9] vhost: always take IOTLB lock David Marchand
2023-01-31 16:59     ` Maxime Coquelin
2023-01-19 18:46   ` [PATCH v4 7/9] vhost: annotate " David Marchand
2023-01-31 17:05     ` Maxime Coquelin
2023-01-19 18:46   ` [PATCH v4 8/9] vhost: annotate vDPA device list accesses David Marchand
2023-01-31 17:08     ` Maxime Coquelin
2023-01-19 18:46   ` [PATCH v4 9/9] vhost: enable lock check David Marchand
2023-01-31 17:14     ` Maxime Coquelin
2023-01-19 19:20   ` [PATCH v4 0/9] vhost lock annotations Morten Brørup
2023-02-01 11:14 ` [PATCH v5 0/9] Lock annotations David Marchand
2023-02-01 11:14   ` [PATCH v5 1/9] eal: annotate spinlock, rwlock and seqlock David Marchand
2023-02-01 12:32     ` David Marchand
2023-02-06  1:01       ` Tu, Lijuan
2023-02-06  8:12         ` David Marchand
2023-02-01 11:14   ` [PATCH v5 2/9] vhost: simplify need reply handling David Marchand
2023-02-01 11:14   ` [PATCH v5 3/9] vhost: terminate when access lock is not taken David Marchand
2023-02-01 11:14   ` [PATCH v5 4/9] vhost: annotate virtqueue access lock David Marchand
2023-02-01 11:14   ` [PATCH v5 5/9] vhost: annotate async accesses David Marchand
2023-02-01 11:14   ` [PATCH v5 6/9] vhost: always take IOTLB lock David Marchand
2023-02-01 11:14   ` [PATCH v5 7/9] vhost: annotate " David Marchand
2023-02-01 11:14   ` [PATCH v5 8/9] vhost: annotate vDPA device list accesses David Marchand
2023-02-01 11:14   ` [PATCH v5 9/9] vhost: enable lock check David Marchand
2023-02-07 10:45 ` [PATCH v6 0/9] Lock annotations David Marchand
2023-02-07 10:45   ` [PATCH v6 1/9] eal: annotate spinlock, rwlock and seqlock David Marchand
2023-02-09  8:00     ` Xia, Chenbo
2023-02-07 10:45   ` [PATCH v6 2/9] vhost: simplify need reply handling David Marchand
2023-02-09  8:00     ` Xia, Chenbo
2023-02-07 10:45   ` [PATCH v6 3/9] vhost: terminate when access lock is not taken David Marchand
2023-02-09  8:01     ` Xia, Chenbo
2023-02-07 10:45   ` [PATCH v6 4/9] vhost: annotate virtqueue access lock David Marchand
2023-02-09  8:01     ` Xia, Chenbo
2023-02-07 10:45   ` [PATCH v6 5/9] vhost: annotate async accesses David Marchand
2023-02-09  8:01     ` Xia, Chenbo
2023-02-07 10:45   ` [PATCH v6 6/9] vhost: always take IOTLB lock David Marchand
2023-02-09  8:01     ` Xia, Chenbo
2023-02-07 10:45   ` [PATCH v6 7/9] vhost: annotate " David Marchand
2023-02-09  8:02     ` Xia, Chenbo
2023-02-07 10:45   ` [PATCH v6 8/9] vhost: annotate vDPA device list accesses David Marchand
2023-02-09  8:02     ` Xia, Chenbo
2023-02-07 10:45   ` [PATCH v6 9/9] vhost: enable lock check David Marchand
2023-02-09  8:05     ` Xia, Chenbo
2023-02-09  7:59   ` [PATCH v6 0/9] Lock annotations Xia, Chenbo
2023-02-09  8:08     ` David Marchand
2023-02-09  8:24       ` Xia, Chenbo
2023-02-09 13:48   ` David Marchand

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20220328121758.26632-6-david.marchand@redhat.com \
    --to=david.marchand@redhat.com \
    --cc=chenbo.xia@intel.com \
    --cc=dev@dpdk.org \
    --cc=maxime.coquelin@redhat.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.