All of lore.kernel.org
 help / color / mirror / Atom feed
From: David Marchand <david.marchand@redhat.com>
To: dev@dpdk.org
Cc: maxime.coquelin@redhat.com, stephen@networkplumber.org,
	chenbo.xia@intel.com, jiayu.hu@intel.com, yuanx.wang@intel.com,
	xuan.ding@intel.com, mb@smartsharesystems.com
Subject: [PATCH v6 7/9] vhost: annotate IOTLB lock
Date: Tue,  7 Feb 2023 11:45:30 +0100	[thread overview]
Message-ID: <20230207104532.2370869-8-david.marchand@redhat.com> (raw)
In-Reply-To: <20230207104532.2370869-1-david.marchand@redhat.com>

The starting point for this is __vhost_iova_to_vva() which requires the
lock to be taken. Annotate all the code leading to a call to it.

vdpa and vhost_crypto code are annotated but they end up not taking
a IOTLB lock and have been marked with a FIXME at the top level.

Signed-off-by: David Marchand <david.marchand@redhat.com>
Acked-by: Morten Brørup <mb@smartsharesystems.com>
Reviewed-by: Maxime Coquelin <maxime.coquelin@redhat.com>
---
Changes since v5:
- rebased after packed support was added to async code,

Changes since RFC v3:
- rebased,
- moved unconditionnal lock acquire in separate patch,
- fixed annotations,

---
 lib/vhost/iotlb.h        |  4 ++++
 lib/vhost/vdpa.c         |  1 +
 lib/vhost/vhost.c        |  8 +++-----
 lib/vhost/vhost.h        | 22 ++++++++++++++------
 lib/vhost/vhost_crypto.c |  8 ++++++++
 lib/vhost/virtio_net.c   | 43 ++++++++++++++++++++++++++++++++++++++++
 6 files changed, 75 insertions(+), 11 deletions(-)

diff --git a/lib/vhost/iotlb.h b/lib/vhost/iotlb.h
index e27ebebcf5..be079a8aa7 100644
--- a/lib/vhost/iotlb.h
+++ b/lib/vhost/iotlb.h
@@ -11,24 +11,28 @@
 
 static __rte_always_inline void
 vhost_user_iotlb_rd_lock(struct vhost_virtqueue *vq)
+	__rte_shared_lock_function(&vq->iotlb_lock)
 {
 	rte_rwlock_read_lock(&vq->iotlb_lock);
 }
 
 static __rte_always_inline void
 vhost_user_iotlb_rd_unlock(struct vhost_virtqueue *vq)
+	__rte_unlock_function(&vq->iotlb_lock)
 {
 	rte_rwlock_read_unlock(&vq->iotlb_lock);
 }
 
 static __rte_always_inline void
 vhost_user_iotlb_wr_lock(struct vhost_virtqueue *vq)
+	__rte_exclusive_lock_function(&vq->iotlb_lock)
 {
 	rte_rwlock_write_lock(&vq->iotlb_lock);
 }
 
 static __rte_always_inline void
 vhost_user_iotlb_wr_unlock(struct vhost_virtqueue *vq)
+	__rte_unlock_function(&vq->iotlb_lock)
 {
 	rte_rwlock_write_unlock(&vq->iotlb_lock);
 }
diff --git a/lib/vhost/vdpa.c b/lib/vhost/vdpa.c
index 577cb00a43..a430a66970 100644
--- a/lib/vhost/vdpa.c
+++ b/lib/vhost/vdpa.c
@@ -146,6 +146,7 @@ rte_vdpa_unregister_device(struct rte_vdpa_device *dev)
 
 int
 rte_vdpa_relay_vring_used(int vid, uint16_t qid, void *vring_m)
+	__rte_no_thread_safety_analysis /* FIXME: requires iotlb_lock? */
 {
 	struct virtio_net *dev = get_device(vid);
 	uint16_t idx, idx_m, desc_id;
diff --git a/lib/vhost/vhost.c b/lib/vhost/vhost.c
index 1e0c30791e..c36dc06a0a 100644
--- a/lib/vhost/vhost.c
+++ b/lib/vhost/vhost.c
@@ -52,7 +52,6 @@ static const struct vhost_vq_stats_name_off vhost_vq_stat_strings[] = {
 
 #define VHOST_NB_VQ_STATS RTE_DIM(vhost_vq_stat_strings)
 
-/* Called with iotlb_lock read-locked */
 uint64_t
 __vhost_iova_to_vva(struct virtio_net *dev, struct vhost_virtqueue *vq,
 		    uint64_t iova, uint64_t *size, uint8_t perm)
@@ -419,6 +418,7 @@ free_device(struct virtio_net *dev)
 
 static __rte_always_inline int
 log_translate(struct virtio_net *dev, struct vhost_virtqueue *vq)
+	__rte_shared_locks_required(&vq->iotlb_lock)
 {
 	if (likely(!(vq->ring_addrs.flags & (1 << VHOST_VRING_F_LOG))))
 		return 0;
@@ -435,8 +435,6 @@ log_translate(struct virtio_net *dev, struct vhost_virtqueue *vq)
  * Converts vring log address to GPA
  * If IOMMU is enabled, the log address is IOVA
  * If IOMMU not enabled, the log address is already GPA
- *
- * Caller should have iotlb_lock read-locked
  */
 uint64_t
 translate_log_addr(struct virtio_net *dev, struct vhost_virtqueue *vq,
@@ -467,9 +465,9 @@ translate_log_addr(struct virtio_net *dev, struct vhost_virtqueue *vq,
 		return log_addr;
 }
 
-/* Caller should have iotlb_lock read-locked */
 static int
 vring_translate_split(struct virtio_net *dev, struct vhost_virtqueue *vq)
+	__rte_shared_locks_required(&vq->iotlb_lock)
 {
 	uint64_t req_size, size;
 
@@ -506,9 +504,9 @@ vring_translate_split(struct virtio_net *dev, struct vhost_virtqueue *vq)
 	return 0;
 }
 
-/* Caller should have iotlb_lock read-locked */
 static int
 vring_translate_packed(struct virtio_net *dev, struct vhost_virtqueue *vq)
+	__rte_shared_locks_required(&vq->iotlb_lock)
 {
 	uint64_t req_size, size;
 
diff --git a/lib/vhost/vhost.h b/lib/vhost/vhost.h
index c05313cf37..5750f0c005 100644
--- a/lib/vhost/vhost.h
+++ b/lib/vhost/vhost.h
@@ -563,12 +563,15 @@ void __vhost_log_cache_write(struct virtio_net *dev,
 		uint64_t addr, uint64_t len);
 void __vhost_log_cache_write_iova(struct virtio_net *dev,
 		struct vhost_virtqueue *vq,
-		uint64_t iova, uint64_t len);
+		uint64_t iova, uint64_t len)
+	__rte_shared_locks_required(&vq->iotlb_lock);
 void __vhost_log_cache_sync(struct virtio_net *dev,
 		struct vhost_virtqueue *vq);
+
 void __vhost_log_write(struct virtio_net *dev, uint64_t addr, uint64_t len);
 void __vhost_log_write_iova(struct virtio_net *dev, struct vhost_virtqueue *vq,
-			    uint64_t iova, uint64_t len);
+			    uint64_t iova, uint64_t len)
+	__rte_shared_locks_required(&vq->iotlb_lock);
 
 static __rte_always_inline void
 vhost_log_write(struct virtio_net *dev, uint64_t addr, uint64_t len)
@@ -618,6 +621,7 @@ vhost_log_used_vring(struct virtio_net *dev, struct vhost_virtqueue *vq,
 static __rte_always_inline void
 vhost_log_cache_write_iova(struct virtio_net *dev, struct vhost_virtqueue *vq,
 			   uint64_t iova, uint64_t len)
+	__rte_shared_locks_required(&vq->iotlb_lock)
 {
 	if (likely(!(dev->features & (1ULL << VHOST_F_LOG_ALL))))
 		return;
@@ -631,6 +635,7 @@ vhost_log_cache_write_iova(struct virtio_net *dev, struct vhost_virtqueue *vq,
 static __rte_always_inline void
 vhost_log_write_iova(struct virtio_net *dev, struct vhost_virtqueue *vq,
 			   uint64_t iova, uint64_t len)
+	__rte_shared_locks_required(&vq->iotlb_lock)
 {
 	if (likely(!(dev->features & (1ULL << VHOST_F_LOG_ALL))))
 		return;
@@ -834,18 +839,23 @@ struct rte_vhost_device_ops const *vhost_driver_callback_get(const char *path);
 void vhost_backend_cleanup(struct virtio_net *dev);
 
 uint64_t __vhost_iova_to_vva(struct virtio_net *dev, struct vhost_virtqueue *vq,
-			uint64_t iova, uint64_t *len, uint8_t perm);
+			uint64_t iova, uint64_t *len, uint8_t perm)
+	__rte_shared_locks_required(&vq->iotlb_lock);
 void *vhost_alloc_copy_ind_table(struct virtio_net *dev,
 			struct vhost_virtqueue *vq,
-			uint64_t desc_addr, uint64_t desc_len);
-int vring_translate(struct virtio_net *dev, struct vhost_virtqueue *vq);
+			uint64_t desc_addr, uint64_t desc_len)
+	__rte_shared_locks_required(&vq->iotlb_lock);
+int vring_translate(struct virtio_net *dev, struct vhost_virtqueue *vq)
+	__rte_shared_locks_required(&vq->iotlb_lock);
 uint64_t translate_log_addr(struct virtio_net *dev, struct vhost_virtqueue *vq,
-		uint64_t log_addr);
+		uint64_t log_addr)
+	__rte_shared_locks_required(&vq->iotlb_lock);
 void vring_invalidate(struct virtio_net *dev, struct vhost_virtqueue *vq);
 
 static __rte_always_inline uint64_t
 vhost_iova_to_vva(struct virtio_net *dev, struct vhost_virtqueue *vq,
 			uint64_t iova, uint64_t *len, uint8_t perm)
+	__rte_shared_locks_required(&vq->iotlb_lock)
 {
 	if (!(dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM)))
 		return rte_vhost_va_from_guest_pa(dev->mem, iova, len);
diff --git a/lib/vhost/vhost_crypto.c b/lib/vhost/vhost_crypto.c
index b448b6685d..f02bf865c3 100644
--- a/lib/vhost/vhost_crypto.c
+++ b/lib/vhost/vhost_crypto.c
@@ -490,6 +490,7 @@ static __rte_always_inline struct virtio_crypto_inhdr *
 reach_inhdr(struct vhost_crypto_data_req *vc_req,
 		struct vhost_crypto_desc *head,
 		uint32_t max_n_descs)
+	__rte_shared_locks_required(&vc_req->vq->iotlb_lock)
 {
 	struct virtio_crypto_inhdr *inhdr;
 	struct vhost_crypto_desc *last = head + (max_n_descs - 1);
@@ -536,6 +537,7 @@ static __rte_always_inline void *
 get_data_ptr(struct vhost_crypto_data_req *vc_req,
 		struct vhost_crypto_desc *cur_desc,
 		uint8_t perm)
+	__rte_shared_locks_required(&vc_req->vq->iotlb_lock)
 {
 	void *data;
 	uint64_t dlen = cur_desc->len;
@@ -552,6 +554,7 @@ get_data_ptr(struct vhost_crypto_data_req *vc_req,
 static __rte_always_inline uint32_t
 copy_data_from_desc(void *dst, struct vhost_crypto_data_req *vc_req,
 	struct vhost_crypto_desc *desc, uint32_t size)
+	__rte_shared_locks_required(&vc_req->vq->iotlb_lock)
 {
 	uint64_t remain;
 	uint64_t addr;
@@ -582,6 +585,7 @@ static __rte_always_inline int
 copy_data(void *data, struct vhost_crypto_data_req *vc_req,
 	struct vhost_crypto_desc *head, struct vhost_crypto_desc **cur_desc,
 	uint32_t size, uint32_t max_n_descs)
+	__rte_shared_locks_required(&vc_req->vq->iotlb_lock)
 {
 	struct vhost_crypto_desc *desc = *cur_desc;
 	uint32_t left = size;
@@ -665,6 +669,7 @@ prepare_write_back_data(struct vhost_crypto_data_req *vc_req,
 		uint32_t offset,
 		uint64_t write_back_len,
 		uint32_t max_n_descs)
+	__rte_shared_locks_required(&vc_req->vq->iotlb_lock)
 {
 	struct vhost_crypto_writeback_data *wb_data, *head;
 	struct vhost_crypto_desc *desc = *cur_desc;
@@ -785,6 +790,7 @@ prepare_sym_cipher_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
 		struct virtio_crypto_cipher_data_req *cipher,
 		struct vhost_crypto_desc *head,
 		uint32_t max_n_descs)
+	__rte_shared_locks_required(&vc_req->vq->iotlb_lock)
 {
 	struct vhost_crypto_desc *desc = head;
 	struct vhost_crypto_writeback_data *ewb = NULL;
@@ -938,6 +944,7 @@ prepare_sym_chain_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
 		struct virtio_crypto_alg_chain_data_req *chain,
 		struct vhost_crypto_desc *head,
 		uint32_t max_n_descs)
+	__rte_shared_locks_required(&vc_req->vq->iotlb_lock)
 {
 	struct vhost_crypto_desc *desc = head, *digest_desc;
 	struct vhost_crypto_writeback_data *ewb = NULL, *ewb2 = NULL;
@@ -1123,6 +1130,7 @@ vhost_crypto_process_one_req(struct vhost_crypto *vcrypto,
 		struct vhost_virtqueue *vq, struct rte_crypto_op *op,
 		struct vring_desc *head, struct vhost_crypto_desc *descs,
 		uint16_t desc_idx)
+	__rte_no_thread_safety_analysis /* FIXME: requires iotlb_lock? */
 {
 	struct vhost_crypto_data_req *vc_req = rte_mbuf_to_priv(op->sym->m_src);
 	struct rte_cryptodev_sym_session *session;
diff --git a/lib/vhost/virtio_net.c b/lib/vhost/virtio_net.c
index 49fc46e127..51dc3c96d3 100644
--- a/lib/vhost/virtio_net.c
+++ b/lib/vhost/virtio_net.c
@@ -233,6 +233,7 @@ vhost_async_dma_check_completed(struct virtio_net *dev, int16_t dma_id, uint16_t
 
 static inline void
 do_data_copy_enqueue(struct virtio_net *dev, struct vhost_virtqueue *vq)
+	__rte_shared_locks_required(&vq->iotlb_lock)
 {
 	struct batch_copy_elem *elem = vq->batch_copy_elems;
 	uint16_t count = vq->batch_copy_nb_elems;
@@ -637,6 +638,7 @@ vhost_shadow_enqueue_single_packed(struct virtio_net *dev,
 				   uint16_t *id,
 				   uint16_t *count,
 				   uint16_t num_buffers)
+	__rte_shared_locks_required(&vq->iotlb_lock)
 {
 	vhost_shadow_enqueue_packed(vq, len, id, count, num_buffers);
 
@@ -728,6 +730,7 @@ static __rte_always_inline int
 map_one_desc(struct virtio_net *dev, struct vhost_virtqueue *vq,
 		struct buf_vector *buf_vec, uint16_t *vec_idx,
 		uint64_t desc_iova, uint64_t desc_len, uint8_t perm)
+	__rte_shared_locks_required(&vq->iotlb_lock)
 {
 	uint16_t vec_id = *vec_idx;
 
@@ -765,6 +768,7 @@ fill_vec_buf_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
 			 uint32_t avail_idx, uint16_t *vec_idx,
 			 struct buf_vector *buf_vec, uint16_t *desc_chain_head,
 			 uint32_t *desc_chain_len, uint8_t perm)
+	__rte_shared_locks_required(&vq->iotlb_lock)
 {
 	uint16_t idx = vq->avail->ring[avail_idx & (vq->size - 1)];
 	uint16_t vec_id = *vec_idx;
@@ -848,6 +852,7 @@ reserve_avail_buf_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
 				uint64_t size, struct buf_vector *buf_vec,
 				uint16_t *num_buffers, uint16_t avail_head,
 				uint16_t *nr_vec)
+	__rte_shared_locks_required(&vq->iotlb_lock)
 {
 	uint16_t cur_idx;
 	uint16_t vec_idx = 0;
@@ -898,6 +903,7 @@ fill_vec_buf_packed_indirect(struct virtio_net *dev,
 			struct vhost_virtqueue *vq,
 			struct vring_packed_desc *desc, uint16_t *vec_idx,
 			struct buf_vector *buf_vec, uint32_t *len, uint8_t perm)
+	__rte_shared_locks_required(&vq->iotlb_lock)
 {
 	uint16_t i;
 	uint32_t nr_descs;
@@ -956,6 +962,7 @@ fill_vec_buf_packed(struct virtio_net *dev, struct vhost_virtqueue *vq,
 				uint16_t avail_idx, uint16_t *desc_count,
 				struct buf_vector *buf_vec, uint16_t *vec_idx,
 				uint16_t *buf_id, uint32_t *len, uint8_t perm)
+	__rte_shared_locks_required(&vq->iotlb_lock)
 {
 	bool wrap_counter = vq->avail_wrap_counter;
 	struct vring_packed_desc *descs = vq->desc_packed;
@@ -1021,6 +1028,7 @@ static __rte_noinline void
 copy_vnet_hdr_to_desc(struct virtio_net *dev, struct vhost_virtqueue *vq,
 		struct buf_vector *buf_vec,
 		struct virtio_net_hdr_mrg_rxbuf *hdr)
+	__rte_shared_locks_required(&vq->iotlb_lock)
 {
 	uint64_t len;
 	uint64_t remain = dev->vhost_hlen;
@@ -1124,6 +1132,7 @@ async_fill_seg(struct virtio_net *dev, struct vhost_virtqueue *vq,
 		struct rte_mbuf *m, uint32_t mbuf_offset,
 		uint64_t buf_iova, uint32_t cpy_len, bool to_desc)
 	__rte_exclusive_locks_required(&vq->access_lock)
+	__rte_shared_locks_required(&vq->iotlb_lock)
 {
 	struct vhost_async *async = vq->async;
 	uint64_t mapped_len;
@@ -1164,6 +1173,7 @@ static __rte_always_inline void
 sync_fill_seg(struct virtio_net *dev, struct vhost_virtqueue *vq,
 		struct rte_mbuf *m, uint32_t mbuf_offset,
 		uint64_t buf_addr, uint64_t buf_iova, uint32_t cpy_len, bool to_desc)
+	__rte_shared_locks_required(&vq->iotlb_lock)
 {
 	struct batch_copy_elem *batch_copy = vq->batch_copy_elems;
 
@@ -1202,6 +1212,7 @@ mbuf_to_desc(struct virtio_net *dev, struct vhost_virtqueue *vq,
 		struct rte_mbuf *m, struct buf_vector *buf_vec,
 		uint16_t nr_vec, uint16_t num_buffers, bool is_async)
 	__rte_exclusive_locks_required(&vq->access_lock)
+	__rte_shared_locks_required(&vq->iotlb_lock)
 {
 	uint32_t vec_idx = 0;
 	uint32_t mbuf_offset, mbuf_avail;
@@ -1331,6 +1342,7 @@ vhost_enqueue_single_packed(struct virtio_net *dev,
 			    struct buf_vector *buf_vec,
 			    uint16_t *nr_descs)
 	__rte_exclusive_locks_required(&vq->access_lock)
+	__rte_shared_locks_required(&vq->iotlb_lock)
 {
 	uint16_t nr_vec = 0;
 	uint16_t avail_idx = vq->last_avail_idx;
@@ -1392,6 +1404,7 @@ static __rte_noinline uint32_t
 virtio_dev_rx_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
 	struct rte_mbuf **pkts, uint32_t count)
 	__rte_exclusive_locks_required(&vq->access_lock)
+	__rte_shared_locks_required(&vq->iotlb_lock)
 {
 	uint32_t pkt_idx = 0;
 	uint16_t num_buffers;
@@ -1448,6 +1461,7 @@ virtio_dev_rx_sync_batch_check(struct virtio_net *dev,
 			   struct rte_mbuf **pkts,
 			   uint64_t *desc_addrs,
 			   uint64_t *lens)
+	__rte_shared_locks_required(&vq->iotlb_lock)
 {
 	bool wrap_counter = vq->avail_wrap_counter;
 	struct vring_packed_desc *descs = vq->desc_packed;
@@ -1551,6 +1565,7 @@ virtio_dev_rx_batch_packed_copy(struct virtio_net *dev,
 			   struct rte_mbuf **pkts,
 			   uint64_t *desc_addrs,
 			   uint64_t *lens)
+	__rte_shared_locks_required(&vq->iotlb_lock)
 {
 	uint32_t buf_offset = sizeof(struct virtio_net_hdr_mrg_rxbuf);
 	struct virtio_net_hdr_mrg_rxbuf *hdrs[PACKED_BATCH_SIZE];
@@ -1598,6 +1613,7 @@ static __rte_always_inline int
 virtio_dev_rx_sync_batch_packed(struct virtio_net *dev,
 			   struct vhost_virtqueue *vq,
 			   struct rte_mbuf **pkts)
+	__rte_shared_locks_required(&vq->iotlb_lock)
 {
 	uint64_t desc_addrs[PACKED_BATCH_SIZE];
 	uint64_t lens[PACKED_BATCH_SIZE];
@@ -1620,6 +1636,7 @@ virtio_dev_rx_single_packed(struct virtio_net *dev,
 			    struct vhost_virtqueue *vq,
 			    struct rte_mbuf *pkt)
 	__rte_exclusive_locks_required(&vq->access_lock)
+	__rte_shared_locks_required(&vq->iotlb_lock)
 {
 	struct buf_vector buf_vec[BUF_VECTOR_MAX];
 	uint16_t nr_descs = 0;
@@ -1645,6 +1662,7 @@ virtio_dev_rx_packed(struct virtio_net *dev,
 		     struct rte_mbuf **__rte_restrict pkts,
 		     uint32_t count)
 	__rte_exclusive_locks_required(&vq->access_lock)
+	__rte_shared_locks_required(&vq->iotlb_lock)
 {
 	uint32_t pkt_idx = 0;
 
@@ -1772,6 +1790,7 @@ static __rte_noinline uint32_t
 virtio_dev_rx_async_submit_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
 	struct rte_mbuf **pkts, uint32_t count, int16_t dma_id, uint16_t vchan_id)
 	__rte_exclusive_locks_required(&vq->access_lock)
+	__rte_shared_locks_required(&vq->iotlb_lock)
 {
 	struct buf_vector buf_vec[BUF_VECTOR_MAX];
 	uint32_t pkt_idx = 0;
@@ -1879,6 +1898,7 @@ vhost_enqueue_async_packed(struct virtio_net *dev,
 			    uint16_t *nr_descs,
 			    uint16_t *nr_buffers)
 	__rte_exclusive_locks_required(&vq->access_lock)
+	__rte_shared_locks_required(&vq->iotlb_lock)
 {
 	uint16_t nr_vec = 0;
 	uint16_t avail_idx = vq->last_avail_idx;
@@ -1938,6 +1958,7 @@ static __rte_always_inline int16_t
 virtio_dev_rx_async_packed(struct virtio_net *dev, struct vhost_virtqueue *vq,
 			    struct rte_mbuf *pkt, uint16_t *nr_descs, uint16_t *nr_buffers)
 	__rte_exclusive_locks_required(&vq->access_lock)
+	__rte_shared_locks_required(&vq->iotlb_lock)
 {
 	struct buf_vector buf_vec[BUF_VECTOR_MAX];
 
@@ -1961,6 +1982,7 @@ virtio_dev_rx_async_packed_batch_enqueue(struct virtio_net *dev,
 			   uint64_t *desc_addrs,
 			   uint64_t *lens)
 	__rte_exclusive_locks_required(&vq->access_lock)
+	__rte_shared_locks_required(&vq->iotlb_lock)
 {
 	uint32_t buf_offset = sizeof(struct virtio_net_hdr_mrg_rxbuf);
 	struct virtio_net_hdr_mrg_rxbuf *hdrs[PACKED_BATCH_SIZE];
@@ -2022,6 +2044,7 @@ virtio_dev_rx_async_packed_batch(struct virtio_net *dev,
 			   struct rte_mbuf **pkts,
 			   int16_t dma_id, uint16_t vchan_id)
 	__rte_exclusive_locks_required(&vq->access_lock)
+	__rte_shared_locks_required(&vq->iotlb_lock)
 {
 	uint64_t desc_addrs[PACKED_BATCH_SIZE];
 	uint64_t lens[PACKED_BATCH_SIZE];
@@ -2069,6 +2092,7 @@ static __rte_noinline uint32_t
 virtio_dev_rx_async_submit_packed(struct virtio_net *dev, struct vhost_virtqueue *vq,
 	struct rte_mbuf **pkts, uint32_t count, int16_t dma_id, uint16_t vchan_id)
 	__rte_exclusive_locks_required(&vq->access_lock)
+	__rte_shared_locks_required(&vq->iotlb_lock)
 {
 	uint32_t pkt_idx = 0;
 	uint16_t n_xfer;
@@ -2843,6 +2867,7 @@ desc_to_mbuf(struct virtio_net *dev, struct vhost_virtqueue *vq,
 		  struct rte_mbuf *m, struct rte_mempool *mbuf_pool,
 		  bool legacy_ol_flags, uint16_t slot_idx, bool is_async)
 	__rte_exclusive_locks_required(&vq->access_lock)
+	__rte_shared_locks_required(&vq->iotlb_lock)
 {
 	uint32_t buf_avail, buf_offset, buf_len;
 	uint64_t buf_addr, buf_iova;
@@ -3049,6 +3074,7 @@ virtio_dev_tx_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
 	struct rte_mempool *mbuf_pool, struct rte_mbuf **pkts, uint16_t count,
 	bool legacy_ol_flags)
 	__rte_exclusive_locks_required(&vq->access_lock)
+	__rte_shared_locks_required(&vq->iotlb_lock)
 {
 	uint16_t i;
 	uint16_t avail_entries;
@@ -3153,6 +3179,7 @@ virtio_dev_tx_split_legacy(struct virtio_net *dev,
 	struct vhost_virtqueue *vq, struct rte_mempool *mbuf_pool,
 	struct rte_mbuf **pkts, uint16_t count)
 	__rte_exclusive_locks_required(&vq->access_lock)
+	__rte_shared_locks_required(&vq->iotlb_lock)
 {
 	return virtio_dev_tx_split(dev, vq, mbuf_pool, pkts, count, true);
 }
@@ -3163,6 +3190,7 @@ virtio_dev_tx_split_compliant(struct virtio_net *dev,
 	struct vhost_virtqueue *vq, struct rte_mempool *mbuf_pool,
 	struct rte_mbuf **pkts, uint16_t count)
 	__rte_exclusive_locks_required(&vq->access_lock)
+	__rte_shared_locks_required(&vq->iotlb_lock)
 {
 	return virtio_dev_tx_split(dev, vq, mbuf_pool, pkts, count, false);
 }
@@ -3174,6 +3202,7 @@ vhost_reserve_avail_batch_packed(struct virtio_net *dev,
 				 uint16_t avail_idx,
 				 uintptr_t *desc_addrs,
 				 uint16_t *ids)
+	__rte_shared_locks_required(&vq->iotlb_lock)
 {
 	bool wrap = vq->avail_wrap_counter;
 	struct vring_packed_desc *descs = vq->desc_packed;
@@ -3317,6 +3346,7 @@ virtio_dev_tx_batch_packed(struct virtio_net *dev,
 			   struct vhost_virtqueue *vq,
 			   struct rte_mbuf **pkts,
 			   bool legacy_ol_flags)
+	__rte_shared_locks_required(&vq->iotlb_lock)
 {
 	uint16_t avail_idx = vq->last_avail_idx;
 	uint32_t buf_offset = sizeof(struct virtio_net_hdr_mrg_rxbuf);
@@ -3364,6 +3394,7 @@ vhost_dequeue_single_packed(struct virtio_net *dev,
 			    uint16_t *desc_count,
 			    bool legacy_ol_flags)
 	__rte_exclusive_locks_required(&vq->access_lock)
+	__rte_shared_locks_required(&vq->iotlb_lock)
 {
 	struct buf_vector buf_vec[BUF_VECTOR_MAX];
 	uint32_t buf_len;
@@ -3413,6 +3444,7 @@ virtio_dev_tx_single_packed(struct virtio_net *dev,
 			    struct rte_mbuf *pkts,
 			    bool legacy_ol_flags)
 	__rte_exclusive_locks_required(&vq->access_lock)
+	__rte_shared_locks_required(&vq->iotlb_lock)
 {
 
 	uint16_t buf_id, desc_count = 0;
@@ -3444,6 +3476,7 @@ virtio_dev_tx_packed(struct virtio_net *dev,
 		     uint32_t count,
 		     bool legacy_ol_flags)
 	__rte_exclusive_locks_required(&vq->access_lock)
+	__rte_shared_locks_required(&vq->iotlb_lock)
 {
 	uint32_t pkt_idx = 0;
 
@@ -3488,6 +3521,7 @@ virtio_dev_tx_packed_legacy(struct virtio_net *dev,
 	struct vhost_virtqueue *__rte_restrict vq, struct rte_mempool *mbuf_pool,
 	struct rte_mbuf **__rte_restrict pkts, uint32_t count)
 	__rte_exclusive_locks_required(&vq->access_lock)
+	__rte_shared_locks_required(&vq->iotlb_lock)
 {
 	return virtio_dev_tx_packed(dev, vq, mbuf_pool, pkts, count, true);
 }
@@ -3498,6 +3532,7 @@ virtio_dev_tx_packed_compliant(struct virtio_net *dev,
 	struct vhost_virtqueue *__rte_restrict vq, struct rte_mempool *mbuf_pool,
 	struct rte_mbuf **__rte_restrict pkts, uint32_t count)
 	__rte_exclusive_locks_required(&vq->access_lock)
+	__rte_shared_locks_required(&vq->iotlb_lock)
 {
 	return virtio_dev_tx_packed(dev, vq, mbuf_pool, pkts, count, false);
 }
@@ -3661,6 +3696,7 @@ virtio_dev_tx_async_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
 		struct rte_mempool *mbuf_pool, struct rte_mbuf **pkts, uint16_t count,
 		int16_t dma_id, uint16_t vchan_id, bool legacy_ol_flags)
 	__rte_exclusive_locks_required(&vq->access_lock)
+	__rte_shared_locks_required(&vq->iotlb_lock)
 {
 	static bool allocerr_warned;
 	bool dropped = false;
@@ -3808,6 +3844,7 @@ virtio_dev_tx_async_split_legacy(struct virtio_net *dev,
 		struct rte_mbuf **pkts, uint16_t count,
 		int16_t dma_id, uint16_t vchan_id)
 	__rte_exclusive_locks_required(&vq->access_lock)
+	__rte_shared_locks_required(&vq->iotlb_lock)
 {
 	return virtio_dev_tx_async_split(dev, vq, mbuf_pool,
 				pkts, count, dma_id, vchan_id, true);
@@ -3820,6 +3857,7 @@ virtio_dev_tx_async_split_compliant(struct virtio_net *dev,
 		struct rte_mbuf **pkts, uint16_t count,
 		int16_t dma_id, uint16_t vchan_id)
 	__rte_exclusive_locks_required(&vq->access_lock)
+	__rte_shared_locks_required(&vq->iotlb_lock)
 {
 	return virtio_dev_tx_async_split(dev, vq, mbuf_pool,
 				pkts, count, dma_id, vchan_id, false);
@@ -3851,6 +3889,7 @@ virtio_dev_tx_async_single_packed(struct virtio_net *dev,
 			uint16_t slot_idx,
 			bool legacy_ol_flags)
 	__rte_exclusive_locks_required(&vq->access_lock)
+	__rte_shared_locks_required(&vq->iotlb_lock)
 {
 	int err;
 	uint16_t buf_id, desc_count = 0;
@@ -3903,6 +3942,7 @@ virtio_dev_tx_async_packed_batch(struct virtio_net *dev,
 			   struct rte_mbuf **pkts, uint16_t slot_idx,
 			   uint16_t dma_id, uint16_t vchan_id)
 	__rte_exclusive_locks_required(&vq->access_lock)
+	__rte_shared_locks_required(&vq->iotlb_lock)
 {
 	uint16_t avail_idx = vq->last_avail_idx;
 	uint32_t buf_offset = sizeof(struct virtio_net_hdr_mrg_rxbuf);
@@ -3960,6 +4000,7 @@ virtio_dev_tx_async_packed(struct virtio_net *dev, struct vhost_virtqueue *vq,
 		struct rte_mempool *mbuf_pool, struct rte_mbuf **pkts,
 		uint16_t count, uint16_t dma_id, uint16_t vchan_id, bool legacy_ol_flags)
 	__rte_exclusive_locks_required(&vq->access_lock)
+	__rte_shared_locks_required(&vq->iotlb_lock)
 {
 	uint32_t pkt_idx = 0;
 	uint16_t slot_idx = 0;
@@ -4070,6 +4111,7 @@ virtio_dev_tx_async_packed_legacy(struct virtio_net *dev, struct vhost_virtqueue
 		struct rte_mempool *mbuf_pool, struct rte_mbuf **pkts,
 		uint16_t count, uint16_t dma_id, uint16_t vchan_id)
 	__rte_exclusive_locks_required(&vq->access_lock)
+	__rte_shared_locks_required(&vq->iotlb_lock)
 {
 	return virtio_dev_tx_async_packed(dev, vq, mbuf_pool,
 				pkts, count, dma_id, vchan_id, true);
@@ -4081,6 +4123,7 @@ virtio_dev_tx_async_packed_compliant(struct virtio_net *dev, struct vhost_virtqu
 		struct rte_mempool *mbuf_pool, struct rte_mbuf **pkts,
 		uint16_t count, uint16_t dma_id, uint16_t vchan_id)
 	__rte_exclusive_locks_required(&vq->access_lock)
+	__rte_shared_locks_required(&vq->iotlb_lock)
 {
 	return virtio_dev_tx_async_packed(dev, vq, mbuf_pool,
 				pkts, count, dma_id, vchan_id, false);
-- 
2.39.1


  parent reply	other threads:[~2023-02-07 10:46 UTC|newest]

Thread overview: 110+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2022-03-28 12:17 [RFC PATCH 0/5] vhost lock annotations David Marchand
2022-03-28 12:17 ` [RFC PATCH 1/5] vhost: fix missing virtqueue lock protection David Marchand
2022-03-28 12:17 ` [RFC PATCH 2/5] vhost: annotate virtqueue access lock David Marchand
2022-03-28 12:17 ` [RFC PATCH 3/5] vhost: fix async access David Marchand
2022-03-28 12:17 ` [RFC PATCH 4/5] vhost: annotate async locking requirement David Marchand
2022-03-28 12:17 ` [RFC PATCH 5/5] vhost: annotate IOTLB locks David Marchand
2022-03-30 13:49 ` [RFC PATCH v2 0/9] vhost lock annotations David Marchand
2022-03-30 13:49   ` [RFC PATCH v2 1/9] vhost: fix missing virtqueue lock protection David Marchand
2022-03-30 13:49   ` [RFC PATCH v2 2/9] eal: annotate spinlock and rwlock David Marchand
2022-03-31  9:22     ` David Marchand
2022-04-04  6:21     ` Stephen Hemminger
2022-04-07  8:20       ` David Marchand
2022-03-30 13:49   ` [RFC PATCH v2 3/9] vhost: annotate virtqueue access lock David Marchand
2022-04-07  1:40     ` Hu, Jiayu
2022-04-07  7:03       ` David Marchand
2022-03-30 13:49   ` [RFC PATCH v2 4/9] vhost: fix async access David Marchand
2022-03-31  8:00     ` Maxime Coquelin
2022-03-31 10:23       ` Hu, Jiayu
2022-04-04  6:57     ` Pai G, Sunil
2022-03-30 13:49   ` [RFC PATCH v2 5/9] vhost: annotate async acesses David Marchand
2022-03-30 13:49   ` [RFC PATCH v2 6/9] vhost: annotate need reply handling David Marchand
2022-03-30 13:49   ` [RFC PATCH v2 7/9] vhost: annotate VDPA device list accesses David Marchand
2022-03-30 13:49   ` [RFC PATCH v2 8/9] vhost: annotate IOTLB locks David Marchand
2022-03-30 13:49   ` [RFC PATCH v2 9/9] vhost: enable lock check David Marchand
2022-03-30 14:03   ` [RFC PATCH v2 0/9] vhost lock annotations David Marchand
2022-03-30 14:37     ` Ali Alnubani
2022-04-05  7:11       ` David Marchand
2022-04-11 11:00 ` [RFC PATCH v3 0/8] " David Marchand
2022-04-11 11:00   ` [RFC PATCH v3 1/8] eal: annotate spinlock and rwlock David Marchand
2022-04-21 13:48     ` Maxime Coquelin
2022-04-28 12:16       ` David Marchand
2022-04-11 11:00   ` [RFC PATCH v3 2/8] vhost: annotate virtqueue access lock David Marchand
2022-04-21 15:25     ` Maxime Coquelin
2022-04-22  9:49       ` David Marchand
2022-04-11 11:00   ` [RFC PATCH v3 3/8] vhost: fix async access David Marchand
2022-04-21 19:21     ` Maxime Coquelin
2022-05-17 13:24     ` Maxime Coquelin
2022-04-11 11:00   ` [RFC PATCH v3 4/8] vhost: annotate async accesses David Marchand
2022-04-22  7:20     ` Maxime Coquelin
2022-04-11 11:00   ` [RFC PATCH v3 5/8] vhost: annotate need reply handling David Marchand
2022-04-22  7:25     ` Maxime Coquelin
2022-04-11 11:00   ` [RFC PATCH v3 6/8] vhost: annotate vDPA device list accesses David Marchand
2022-04-22  7:26     ` Maxime Coquelin
2022-04-11 11:00   ` [RFC PATCH v3 7/8] vhost: annotate IOTLB locks David Marchand
2022-04-22  7:46     ` Maxime Coquelin
2022-04-11 11:00   ` [RFC PATCH v3 8/8] vhost: enable lock check David Marchand
2022-04-22  7:47     ` Maxime Coquelin
2023-01-19 18:46 ` [PATCH v4 0/9] vhost lock annotations David Marchand
2023-01-19 18:46   ` [PATCH v4 1/9] eal: annotate spinlock, rwlock and seqlock David Marchand
2023-01-19 19:42     ` Stephen Hemminger
2023-01-19 20:39       ` Tyler Retzlaff
2023-01-19 21:16         ` David Marchand
2023-01-19 21:50           ` Tyler Retzlaff
2023-01-26 12:18             ` David Marchand
2023-01-19 20:55       ` David Marchand
2023-01-19 19:43     ` Stephen Hemminger
2023-01-31 16:18     ` Maxime Coquelin
2023-01-19 18:46   ` [PATCH v4 2/9] vhost: simplify need reply handling David Marchand
2023-01-31 16:41     ` Maxime Coquelin
2023-01-19 18:46   ` [PATCH v4 3/9] vhost: terminate when access lock is not taken David Marchand
2023-01-31 16:47     ` Maxime Coquelin
2023-01-19 18:46   ` [PATCH v4 4/9] vhost: annotate virtqueue access lock David Marchand
2023-01-31 16:50     ` Maxime Coquelin
2023-01-19 18:46   ` [PATCH v4 5/9] vhost: annotate async accesses David Marchand
2023-01-31 16:54     ` Maxime Coquelin
2023-01-19 18:46   ` [PATCH v4 6/9] vhost: always take IOTLB lock David Marchand
2023-01-31 16:59     ` Maxime Coquelin
2023-01-19 18:46   ` [PATCH v4 7/9] vhost: annotate " David Marchand
2023-01-31 17:05     ` Maxime Coquelin
2023-01-19 18:46   ` [PATCH v4 8/9] vhost: annotate vDPA device list accesses David Marchand
2023-01-31 17:08     ` Maxime Coquelin
2023-01-19 18:46   ` [PATCH v4 9/9] vhost: enable lock check David Marchand
2023-01-31 17:14     ` Maxime Coquelin
2023-01-19 19:20   ` [PATCH v4 0/9] vhost lock annotations Morten Brørup
2023-02-01 11:14 ` [PATCH v5 0/9] Lock annotations David Marchand
2023-02-01 11:14   ` [PATCH v5 1/9] eal: annotate spinlock, rwlock and seqlock David Marchand
2023-02-01 12:32     ` David Marchand
2023-02-06  1:01       ` Tu, Lijuan
2023-02-06  8:12         ` David Marchand
2023-02-01 11:14   ` [PATCH v5 2/9] vhost: simplify need reply handling David Marchand
2023-02-01 11:14   ` [PATCH v5 3/9] vhost: terminate when access lock is not taken David Marchand
2023-02-01 11:14   ` [PATCH v5 4/9] vhost: annotate virtqueue access lock David Marchand
2023-02-01 11:14   ` [PATCH v5 5/9] vhost: annotate async accesses David Marchand
2023-02-01 11:14   ` [PATCH v5 6/9] vhost: always take IOTLB lock David Marchand
2023-02-01 11:14   ` [PATCH v5 7/9] vhost: annotate " David Marchand
2023-02-01 11:14   ` [PATCH v5 8/9] vhost: annotate vDPA device list accesses David Marchand
2023-02-01 11:14   ` [PATCH v5 9/9] vhost: enable lock check David Marchand
2023-02-07 10:45 ` [PATCH v6 0/9] Lock annotations David Marchand
2023-02-07 10:45   ` [PATCH v6 1/9] eal: annotate spinlock, rwlock and seqlock David Marchand
2023-02-09  8:00     ` Xia, Chenbo
2023-02-07 10:45   ` [PATCH v6 2/9] vhost: simplify need reply handling David Marchand
2023-02-09  8:00     ` Xia, Chenbo
2023-02-07 10:45   ` [PATCH v6 3/9] vhost: terminate when access lock is not taken David Marchand
2023-02-09  8:01     ` Xia, Chenbo
2023-02-07 10:45   ` [PATCH v6 4/9] vhost: annotate virtqueue access lock David Marchand
2023-02-09  8:01     ` Xia, Chenbo
2023-02-07 10:45   ` [PATCH v6 5/9] vhost: annotate async accesses David Marchand
2023-02-09  8:01     ` Xia, Chenbo
2023-02-07 10:45   ` [PATCH v6 6/9] vhost: always take IOTLB lock David Marchand
2023-02-09  8:01     ` Xia, Chenbo
2023-02-07 10:45   ` David Marchand [this message]
2023-02-09  8:02     ` [PATCH v6 7/9] vhost: annotate " Xia, Chenbo
2023-02-07 10:45   ` [PATCH v6 8/9] vhost: annotate vDPA device list accesses David Marchand
2023-02-09  8:02     ` Xia, Chenbo
2023-02-07 10:45   ` [PATCH v6 9/9] vhost: enable lock check David Marchand
2023-02-09  8:05     ` Xia, Chenbo
2023-02-09  7:59   ` [PATCH v6 0/9] Lock annotations Xia, Chenbo
2023-02-09  8:08     ` David Marchand
2023-02-09  8:24       ` Xia, Chenbo
2023-02-09 13:48   ` David Marchand

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20230207104532.2370869-8-david.marchand@redhat.com \
    --to=david.marchand@redhat.com \
    --cc=chenbo.xia@intel.com \
    --cc=dev@dpdk.org \
    --cc=jiayu.hu@intel.com \
    --cc=maxime.coquelin@redhat.com \
    --cc=mb@smartsharesystems.com \
    --cc=stephen@networkplumber.org \
    --cc=xuan.ding@intel.com \
    --cc=yuanx.wang@intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.