All of lore.kernel.org
 help / color / mirror / Atom feed
From: Marvin Liu <yong.liu@intel.com>
To: maxime.coquelin@redhat.com, chenbo.xia@intel.com, zhihong.wang@intel.com
Cc: dev@dpdk.org, Marvin Liu <yong.liu@intel.com>
Subject: [dpdk-dev] [PATCH v1 5/5] vhost: add packed ring vectorized enqueue
Date: Wed, 19 Aug 2020 11:24:14 +0800	[thread overview]
Message-ID: <20200819032414.51430-6-yong.liu@intel.com> (raw)
In-Reply-To: <20200819032414.51430-1-yong.liu@intel.com>

Optimize vhost packed ring enqueue path with SIMD instructions. Four
descriptors status and length are batched handled with AVX512
instructions. Address translation operations are also accelerated
by AVX512 instructions.

Signed-off-by: Marvin Liu <yong.liu@intel.com>

diff --git a/lib/librte_vhost/vhost.h b/lib/librte_vhost/vhost.h
index fc7daf2145..b78b2c5c1b 100644
--- a/lib/librte_vhost/vhost.h
+++ b/lib/librte_vhost/vhost.h
@@ -1132,4 +1132,10 @@ vhost_reserve_avail_batch_packed_avx(struct virtio_net *dev,
 				 uint16_t avail_idx,
 				 uintptr_t *desc_addrs,
 				 uint16_t *ids);
+
+int
+virtio_dev_rx_batch_packed_avx(struct virtio_net *dev,
+			       struct vhost_virtqueue *vq,
+			       struct rte_mbuf **pkts);
+
 #endif /* _VHOST_NET_CDEV_H_ */
diff --git a/lib/librte_vhost/vhost_vec_avx.c b/lib/librte_vhost/vhost_vec_avx.c
index e8361d18fa..12b902253a 100644
--- a/lib/librte_vhost/vhost_vec_avx.c
+++ b/lib/librte_vhost/vhost_vec_avx.c
@@ -35,9 +35,15 @@
 #define PACKED_AVAIL_FLAG ((0ULL | VRING_DESC_F_AVAIL) << FLAGS_BITS_OFFSET)
 #define PACKED_AVAIL_FLAG_WRAP ((0ULL | VRING_DESC_F_USED) << \
 	FLAGS_BITS_OFFSET)
+#define PACKED_WRITE_AVAIL_FLAG (PACKED_AVAIL_FLAG | \
+	((0ULL | VRING_DESC_F_WRITE) << FLAGS_BITS_OFFSET))
+#define PACKED_WRITE_AVAIL_FLAG_WRAP (PACKED_AVAIL_FLAG_WRAP | \
+	((0ULL | VRING_DESC_F_WRITE) << FLAGS_BITS_OFFSET))
 
 #define DESC_FLAGS_POS 0xaa
 #define MBUF_LENS_POS 0x6666
+#define DESC_LENS_POS 0x4444
+#define DESC_LENS_FLAGS_POS 0xB0B0B0B0
 
 int
 vhost_reserve_avail_batch_packed_avx(struct virtio_net *dev,
@@ -150,3 +156,137 @@ vhost_reserve_avail_batch_packed_avx(struct virtio_net *dev,
 
 	return -1;
 }
+
+int
+virtio_dev_rx_batch_packed_avx(struct virtio_net *dev,
+			       struct vhost_virtqueue *vq,
+			       struct rte_mbuf **pkts)
+{
+	struct vring_packed_desc *descs = vq->desc_packed;
+	uint16_t avail_idx = vq->last_avail_idx;
+	uint64_t desc_addrs[PACKED_BATCH_SIZE];
+	uint32_t buf_offset = dev->vhost_hlen;
+	uint32_t desc_status;
+	uint64_t lens[PACKED_BATCH_SIZE];
+	uint16_t i;
+	void *desc_addr;
+	uint8_t cmp_low, cmp_high, cmp_result;
+
+	if (unlikely(avail_idx & PACKED_BATCH_MASK))
+		return -1;
+
+	/* check refcnt and nb_segs */
+	__m256i mbuf_ref = _mm256_set1_epi64x(DEFAULT_REARM_DATA);
+
+	/* load four mbufs rearm data */
+	__m256i mbufs = _mm256_set_epi64x(
+				*pkts[3]->rearm_data,
+				*pkts[2]->rearm_data,
+				*pkts[1]->rearm_data,
+				*pkts[0]->rearm_data);
+
+	uint16_t cmp = _mm256_cmpneq_epu16_mask(mbufs, mbuf_ref);
+	if (cmp & MBUF_LENS_POS)
+		return -1;
+
+	/* check desc status */
+	desc_addr = &vq->desc_packed[avail_idx];
+	__m512i desc_vec = _mm512_loadu_si512(desc_addr);
+
+	__m512i avail_flag_vec;
+	__m512i used_flag_vec;
+	if (vq->avail_wrap_counter) {
+#if defined(RTE_ARCH_I686)
+		avail_flag_vec = _mm512_set4_epi64(PACKED_WRITE_AVAIL_FLAG,
+					0x0, PACKED_WRITE_AVAIL_FLAG, 0x0);
+		used_flag_vec = _mm512_set4_epi64(PACKED_FLAGS_MASK, 0x0,
+					PACKED_FLAGS_MASK, 0x0);
+#else
+		avail_flag_vec = _mm512_maskz_set1_epi64(DESC_FLAGS_POS,
+					PACKED_WRITE_AVAIL_FLAG);
+		used_flag_vec = _mm512_maskz_set1_epi64(DESC_FLAGS_POS,
+					PACKED_FLAGS_MASK);
+#endif
+	} else {
+#if defined(RTE_ARCH_I686)
+		avail_flag_vec = _mm512_set4_epi64(
+					PACKED_WRITE_AVAIL_FLAG_WRAP, 0x0,
+					PACKED_WRITE_AVAIL_FLAG, 0x0);
+		used_flag_vec = _mm512_set4_epi64(0x0, 0x0, 0x0, 0x0);
+#else
+		avail_flag_vec = _mm512_maskz_set1_epi64(DESC_FLAGS_POS,
+					PACKED_WRITE_AVAIL_FLAG_WRAP);
+		used_flag_vec = _mm512_setzero_epi32();
+#endif
+	}
+
+	desc_status = _mm512_mask_cmp_epu16_mask(BATCH_FLAGS_MASK, desc_vec,
+				avail_flag_vec, _MM_CMPINT_NE);
+	if (desc_status)
+		return -1;
+
+	/* check buffer fit into one region & translate address */
+	__m512i regions_low_addrs =
+		_mm512_loadu_si512((void *)&dev->regions_low_addrs);
+	__m512i regions_high_addrs =
+		_mm512_loadu_si512((void *)&dev->regions_high_addrs);
+	vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
+		uint64_t addr_low = descs[avail_idx + i].addr;
+		uint64_t addr_high = addr_low + descs[avail_idx + i].len;
+		__m512i low_addr_vec = _mm512_set1_epi64(addr_low);
+		__m512i high_addr_vec = _mm512_set1_epi64(addr_high);
+
+		cmp_low = _mm512_cmp_epi64_mask(low_addr_vec,
+				regions_low_addrs, _MM_CMPINT_NLT);
+		cmp_high = _mm512_cmp_epi64_mask(high_addr_vec,
+				regions_high_addrs, _MM_CMPINT_LT);
+		cmp_result = cmp_low & cmp_high;
+		int index = __builtin_ctz(cmp_result);
+		if (unlikely((uint32_t)index >= dev->mem->nregions))
+			return -1;
+
+		desc_addrs[i] = addr_low +
+			dev->mem->regions[index].host_user_addr -
+			dev->mem->regions[index].guest_phys_addr;
+		rte_prefetch0(rte_pktmbuf_mtod_offset(pkts[i], void *, 0));
+	}
+
+	/* check length is enough */
+	__m512i pkt_lens = _mm512_set_epi32(
+			0, pkts[3]->pkt_len, 0, 0,
+			0, pkts[2]->pkt_len, 0, 0,
+			0, pkts[1]->pkt_len, 0, 0,
+			0, pkts[0]->pkt_len, 0, 0);
+
+	__m512i mbuf_len_offset = _mm512_maskz_set1_epi32(DESC_LENS_POS,
+					dev->vhost_hlen);
+	__m512i buf_len_vec = _mm512_add_epi32(pkt_lens, mbuf_len_offset);
+	uint16_t lens_cmp = _mm512_mask_cmp_epu32_mask(DESC_LENS_POS,
+				desc_vec, buf_len_vec, _MM_CMPINT_LT);
+	if (lens_cmp)
+		return -1;
+
+	vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
+		rte_memcpy((void *)(uintptr_t)(desc_addrs[i] + buf_offset),
+			   rte_pktmbuf_mtod_offset(pkts[i], void *, 0),
+			   pkts[i]->pkt_len);
+	}
+
+	if (unlikely((dev->features & (1ULL << VHOST_F_LOG_ALL)))) {
+		vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
+			lens[i] = descs[avail_idx + i].len;
+			vhost_log_cache_write_iova(dev, vq,
+				descs[avail_idx + i].addr, lens[i]);
+		}
+	}
+
+	vq_inc_last_avail_packed(vq, PACKED_BATCH_SIZE);
+	vq_inc_last_used_packed(vq, PACKED_BATCH_SIZE);
+	/* save len and flags, skip addr and id */
+	__m512i desc_updated = _mm512_mask_add_epi16(desc_vec,
+					DESC_LENS_FLAGS_POS, buf_len_vec,
+					used_flag_vec);
+	_mm512_storeu_si512(desc_addr, desc_updated);
+
+	return 0;
+}
diff --git a/lib/librte_vhost/virtio_net.c b/lib/librte_vhost/virtio_net.c
index e4d2e2e7d6..5c56a8d6ff 100644
--- a/lib/librte_vhost/virtio_net.c
+++ b/lib/librte_vhost/virtio_net.c
@@ -1354,6 +1354,21 @@ virtio_dev_rx_single_packed(struct virtio_net *dev,
 	return 0;
 }
 
+static __rte_always_inline int
+virtio_dev_rx_handle_batch_packed(struct virtio_net *dev,
+			   struct vhost_virtqueue *vq,
+			   struct rte_mbuf **pkts)
+
+{
+	if (unlikely(dev->vectorized))
+#ifdef CC_AVX512_SUPPORT
+		return virtio_dev_rx_batch_packed_avx(dev, vq, pkts);
+#else
+		return virtio_dev_rx_batch_packed(dev, vq, pkts);
+#endif
+	return virtio_dev_rx_batch_packed(dev, vq, pkts);
+}
+
 static __rte_noinline uint32_t
 virtio_dev_rx_packed(struct virtio_net *dev,
 		     struct vhost_virtqueue *__rte_restrict vq,
@@ -1367,8 +1382,8 @@ virtio_dev_rx_packed(struct virtio_net *dev,
 		rte_prefetch0(&vq->desc_packed[vq->last_avail_idx]);
 
 		if (remained >= PACKED_BATCH_SIZE) {
-			if (!virtio_dev_rx_batch_packed(dev, vq,
-							&pkts[pkt_idx])) {
+			if (!virtio_dev_rx_handle_batch_packed(dev, vq,
+				&pkts[pkt_idx])) {
 				pkt_idx += PACKED_BATCH_SIZE;
 				remained -= PACKED_BATCH_SIZE;
 				continue;
-- 
2.17.1


      parent reply	other threads:[~2020-08-19  3:25 UTC|newest]

Thread overview: 36+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-08-19  3:24 [dpdk-dev] [PATCH v1 0/5] vhost add vectorized data path Marvin Liu
2020-08-19  3:24 ` [dpdk-dev] [PATCH v1 1/5] vhost: " Marvin Liu
2020-09-21  6:48   ` [dpdk-dev] [PATCH v2 0/5] vhost " Marvin Liu
2020-09-21  6:48     ` [dpdk-dev] [PATCH v2 1/5] vhost: " Marvin Liu
2020-09-21  6:48     ` [dpdk-dev] [PATCH v2 2/5] vhost: reuse packed ring functions Marvin Liu
2020-09-21  6:48     ` [dpdk-dev] [PATCH v2 3/5] vhost: prepare memory regions addresses Marvin Liu
2020-10-06 15:06       ` Maxime Coquelin
2020-09-21  6:48     ` [dpdk-dev] [PATCH v2 4/5] vhost: add packed ring vectorized dequeue Marvin Liu
2020-10-06 14:59       ` Maxime Coquelin
2020-10-08  7:05         ` Liu, Yong
2020-10-06 15:18       ` Maxime Coquelin
2020-10-09  7:59         ` Liu, Yong
2020-09-21  6:48     ` [dpdk-dev] [PATCH v2 5/5] vhost: add packed ring vectorized enqueue Marvin Liu
2020-10-06 15:00       ` Maxime Coquelin
2020-10-08  7:09         ` Liu, Yong
2020-10-06 13:34     ` [dpdk-dev] [PATCH v2 0/5] vhost add vectorized data path Maxime Coquelin
2020-10-08  6:20       ` Liu, Yong
2020-10-09  8:14   ` [dpdk-dev] [PATCH v3 " Marvin Liu
2020-10-09  8:14     ` [dpdk-dev] [PATCH v3 1/5] vhost: " Marvin Liu
2020-10-09  8:14     ` [dpdk-dev] [PATCH v3 2/5] vhost: reuse packed ring functions Marvin Liu
2020-10-09  8:14     ` [dpdk-dev] [PATCH v3 3/5] vhost: prepare memory regions addresses Marvin Liu
2020-10-09  8:14     ` [dpdk-dev] [PATCH v3 4/5] vhost: add packed ring vectorized dequeue Marvin Liu
2020-10-09  8:14     ` [dpdk-dev] [PATCH v3 5/5] vhost: add packed ring vectorized enqueue Marvin Liu
2020-10-12  8:21     ` [dpdk-dev] [PATCH v3 0/5] vhost add vectorized data path Maxime Coquelin
2020-10-12  9:10       ` Liu, Yong
2020-10-12  9:57         ` Maxime Coquelin
2020-10-12 13:24           ` Liu, Yong
2020-10-15 15:28       ` Liu, Yong
2020-10-15 15:35         ` Maxime Coquelin
2020-08-19  3:24 ` [dpdk-dev] [PATCH v1 2/5] vhost: reuse packed ring functions Marvin Liu
2020-08-19  3:24 ` [dpdk-dev] [PATCH v1 3/5] vhost: prepare memory regions addresses Marvin Liu
2020-08-19  3:24 ` [dpdk-dev] [PATCH v1 4/5] vhost: add packed ring vectorized dequeue Marvin Liu
2020-09-18 13:44   ` Maxime Coquelin
2020-09-21  6:26     ` Liu, Yong
2020-09-21  7:47       ` Liu, Yong
2020-08-19  3:24 ` Marvin Liu [this message]

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20200819032414.51430-6-yong.liu@intel.com \
    --to=yong.liu@intel.com \
    --cc=chenbo.xia@intel.com \
    --cc=dev@dpdk.org \
    --cc=maxime.coquelin@redhat.com \
    --cc=zhihong.wang@intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.