All of lore.kernel.org
 help / color / mirror / Atom feed
From: Cheng Jiang <Cheng1.jiang@intel.com>
To: maxime.coquelin@redhat.com, chenbo.xia@intel.com
Cc: dev@dpdk.org, Jiayu.Hu@intel.com, YvonneX.Yang@intel.com,
	Cheng Jiang <Cheng1.jiang@intel.com>
Subject: [dpdk-dev] [PATCH v1 3/3] examples/vhost: refactor vhost async data path
Date: Fri, 18 Dec 2020 11:33:27 +0000	[thread overview]
Message-ID: <20201218113327.70528-4-Cheng1.jiang@intel.com> (raw)
In-Reply-To: <20201218113327.70528-1-Cheng1.jiang@intel.com>

Support latest async vhost api, refactor vhost async data path, and
clean some codes.

Signed-off-by: Cheng Jiang <Cheng1.jiang@intel.com>
---
 examples/vhost/main.c | 88 ++++++++++++++++++++-----------------------
 examples/vhost/main.h |  2 +-
 2 files changed, 42 insertions(+), 48 deletions(-)

diff --git a/examples/vhost/main.c b/examples/vhost/main.c
index 28226a4ff7..0113147876 100644
--- a/examples/vhost/main.c
+++ b/examples/vhost/main.c
@@ -817,26 +817,26 @@ free_pkts(struct rte_mbuf **pkts, uint16_t n)
 }
 
 static __rte_always_inline void
-virtio_xmit(struct vhost_dev *dst_vdev, struct vhost_dev *src_vdev,
+complete_async_pkts(struct vhost_dev *vdev)
+{
+	struct rte_mbuf *p_cpl[MAX_PKT_BURST];
+	uint16_t complete_count;
+
+	complete_count = rte_vhost_poll_enqueue_completed(vdev->vid,
+					VIRTIO_RXQ, p_cpl, MAX_PKT_BURST);
+	rte_atomic16_sub(&vdev->nr_async_pkts, complete_count);
+	if (complete_count)
+		free_pkts(p_cpl, complete_count);
+}
+
+static __rte_always_inline void
+sync_virtio_xmit(struct vhost_dev *dst_vdev, struct vhost_dev *src_vdev,
 	    struct rte_mbuf *m)
 {
 	uint16_t ret;
-	struct rte_mbuf *m_cpl[1];
 
 	if (builtin_net_driver) {
 		ret = vs_enqueue_pkts(dst_vdev, VIRTIO_RXQ, &m, 1);
-	} else if (async_vhost_driver) {
-		ret = rte_vhost_submit_enqueue_burst(dst_vdev->vid, VIRTIO_RXQ,
-						&m, 1);
-
-		if (likely(ret))
-			dst_vdev->nr_async_pkts++;
-
-		while (likely(dst_vdev->nr_async_pkts)) {
-			if (rte_vhost_poll_enqueue_completed(dst_vdev->vid,
-					VIRTIO_RXQ, m_cpl, 1))
-				dst_vdev->nr_async_pkts--;
-		}
 	} else {
 		ret = rte_vhost_enqueue_burst(dst_vdev->vid, VIRTIO_RXQ, &m, 1);
 	}
@@ -850,25 +850,25 @@ virtio_xmit(struct vhost_dev *dst_vdev, struct vhost_dev *src_vdev,
 }
 
 static __rte_always_inline void
-drain_vhost(struct vhost_dev *dst_vdev, struct rte_mbuf **m, uint16_t nr_xmit)
+drain_vhost(struct vhost_dev *dst_vdev)
 {
-	uint16_t ret, nr_cpl;
-	struct rte_mbuf *m_cpl[MAX_PKT_BURST];
+	uint16_t ret;
+	uint16_t nr_xmit = vhost_m_table[dst_vdev->vid].len;
+	struct rte_mbuf **m = vhost_m_table[dst_vdev->vid].m_table;
 
 	if (builtin_net_driver) {
 		ret = vs_enqueue_pkts(dst_vdev, VIRTIO_RXQ, m, nr_xmit);
 	} else if (async_vhost_driver) {
+		uint32_t cpu_cpl_nr;
+		struct rte_mbuf *m_cpu_cpl[nr_xmit];
+		complete_async_pkts(dst_vdev);
+		while (rte_atomic16_read(&dst_vdev->nr_async_pkts) >= 128)
+			complete_async_pkts(dst_vdev);
+
 		ret = rte_vhost_submit_enqueue_burst(dst_vdev->vid, VIRTIO_RXQ,
-						m, nr_xmit);
-		dst_vdev->nr_async_pkts += ret;
+					m, nr_xmit, m_cpu_cpl, &cpu_cpl_nr);
+		rte_atomic16_add(&dst_vdev->nr_async_pkts, ret - cpu_cpl_nr);
 		free_pkts(&m[ret], nr_xmit - ret);
-
-		while (likely(dst_vdev->nr_async_pkts)) {
-			nr_cpl = rte_vhost_poll_enqueue_completed(dst_vdev->vid,
-					VIRTIO_RXQ, m_cpl, MAX_PKT_BURST);
-			dst_vdev->nr_async_pkts -= nr_cpl;
-			free_pkts(m_cpl, nr_cpl);
-		}
 	} else {
 		ret = rte_vhost_enqueue_burst(dst_vdev->vid, VIRTIO_RXQ,
 						m, nr_xmit);
@@ -925,7 +925,7 @@ virtio_tx_local(struct vhost_dev *vdev, struct rte_mbuf *m)
 	}
 
 	if (unlikely(vhost_txq->len == MAX_PKT_BURST)) {
-		drain_vhost(dst_vdev, vhost_txq->m_table, MAX_PKT_BURST);
+		drain_vhost(dst_vdev);
 		vhost_txq->len = 0;
 		vhost_tsc[dst_vdev->vid] = rte_rdtsc();
 	}
@@ -1031,7 +1031,7 @@ virtio_tx_route(struct vhost_dev *vdev, struct rte_mbuf *m, uint16_t vlan_tag)
 
 		TAILQ_FOREACH(vdev2, &vhost_dev_list, global_vdev_entry) {
 			if (vdev2 != vdev)
-				virtio_xmit(vdev2, vdev, m);
+				sync_virtio_xmit(vdev2, vdev, m);
 		}
 		goto queue2nic;
 	}
@@ -1124,31 +1124,17 @@ drain_mbuf_table(struct mbuf_table *tx_q)
 	}
 }
 
-static __rte_always_inline void
-complete_async_pkts(struct vhost_dev *vdev, uint16_t qid)
-{
-	struct rte_mbuf *p_cpl[MAX_PKT_BURST];
-	uint16_t complete_count;
-
-	complete_count = rte_vhost_poll_enqueue_completed(vdev->vid,
-						qid, p_cpl, MAX_PKT_BURST);
-	vdev->nr_async_pkts -= complete_count;
-	if (complete_count)
-		free_pkts(p_cpl, complete_count);
-}
-
 static __rte_always_inline void
 drain_eth_rx(struct vhost_dev *vdev)
 {
 	uint16_t rx_count, enqueue_count;
+	uint32_t cpu_cpl_nr;
 	struct rte_mbuf *pkts[MAX_PKT_BURST];
+	struct rte_mbuf *m_cpu_cpl[MAX_PKT_BURST];
 
 	rx_count = rte_eth_rx_burst(ports[0], vdev->vmdq_rx_q,
 				    pkts, MAX_PKT_BURST);
 
-	while (likely(vdev->nr_async_pkts))
-		complete_async_pkts(vdev, VIRTIO_RXQ);
-
 	if (!rx_count)
 		return;
 
@@ -1170,13 +1156,21 @@ drain_eth_rx(struct vhost_dev *vdev)
 		}
 	}
 
+	complete_async_pkts(vdev);
+	while (rte_atomic16_read(&vdev->nr_async_pkts) >= 128)
+		complete_async_pkts(vdev);
+
 	if (builtin_net_driver) {
 		enqueue_count = vs_enqueue_pkts(vdev, VIRTIO_RXQ,
 						pkts, rx_count);
 	} else if (async_vhost_driver) {
 		enqueue_count = rte_vhost_submit_enqueue_burst(vdev->vid,
-					VIRTIO_RXQ, pkts, rx_count);
-		vdev->nr_async_pkts += enqueue_count;
+					VIRTIO_RXQ, pkts, rx_count,
+					m_cpu_cpl, &cpu_cpl_nr);
+		rte_atomic16_add(&vdev->nr_async_pkts,
+					enqueue_count - cpu_cpl_nr);
+		if (cpu_cpl_nr)
+			free_pkts(m_cpu_cpl, cpu_cpl_nr);
 	} else {
 		enqueue_count = rte_vhost_enqueue_burst(vdev->vid, VIRTIO_RXQ,
 						pkts, rx_count);
@@ -1224,7 +1218,7 @@ drain_virtio_tx(struct vhost_dev *vdev)
 			RTE_LOG_DP(DEBUG, VHOST_DATA,
 				"Vhost tX queue drained after timeout with burst size %u\n",
 				vhost_txq->len);
-			drain_vhost(vdev, vhost_txq->m_table, vhost_txq->len);
+			drain_vhost(vdev);
 			vhost_txq->len = 0;
 			vhost_tsc[vdev->vid] = cur_tsc;
 		}
diff --git a/examples/vhost/main.h b/examples/vhost/main.h
index 4317b6ae81..d33ddb411b 100644
--- a/examples/vhost/main.h
+++ b/examples/vhost/main.h
@@ -51,7 +51,7 @@ struct vhost_dev {
 	uint64_t features;
 	size_t hdr_len;
 	uint16_t nr_vrings;
-	uint16_t nr_async_pkts;
+	rte_atomic16_t nr_async_pkts;
 	struct rte_vhost_memory *mem;
 	struct device_statistics stats;
 	TAILQ_ENTRY(vhost_dev) global_vdev_entry;
-- 
2.29.2


  parent reply	other threads:[~2020-12-18 11:45 UTC|newest]

Thread overview: 44+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-12-18 11:33 [dpdk-dev] [PATCH v1 0/3] examples/vhost: sample code refactor Cheng Jiang
2020-12-18 11:33 ` [dpdk-dev] [PATCH v1 1/3] examples/vhost: add ioat ring space count and check Cheng Jiang
2020-12-18 11:33 ` [dpdk-dev] [PATCH v1 2/3] examples/vhost: optimize vhost data path for batch Cheng Jiang
2020-12-18 11:33 ` Cheng Jiang [this message]
2020-12-22  8:36 ` [dpdk-dev] [PATCH v2 0/2] examples/vhost: sample code refactor Cheng Jiang
2020-12-22  8:36   ` [dpdk-dev] [PATCH v2 1/2] examples/vhost: add ioat ring space count and check Cheng Jiang
2020-12-22  8:36   ` [dpdk-dev] [PATCH v2 2/2] examples/vhost: refactor vhost data path Cheng Jiang
2020-12-24  8:49 ` [dpdk-dev] [PATCH v3 0/2] examples/vhost: sample code refactor Cheng Jiang
2020-12-24  8:49   ` [dpdk-dev] [PATCH v3 1/2] examples/vhost: add ioat ring space count and check Cheng Jiang
2020-12-24  8:49   ` [dpdk-dev] [PATCH v3 2/2] examples/vhost: refactor vhost data path Cheng Jiang
2020-12-25  8:07 ` [dpdk-dev] [PATCH v4 0/2] examples/vhost: sample code refactor Cheng Jiang
2020-12-25  8:07   ` [dpdk-dev] [PATCH v4 1/2] examples/vhost: add ioat ring space count and check Cheng Jiang
2020-12-28  2:50     ` Hu, Jiayu
2020-12-28  8:08       ` Jiang, Cheng1
2020-12-25  8:07   ` [dpdk-dev] [PATCH v4 2/2] examples/vhost: refactor vhost data path Cheng Jiang
2020-12-28  4:03     ` Hu, Jiayu
2020-12-28  8:21       ` Jiang, Cheng1
2020-12-28  7:16 ` [dpdk-dev] [PATCH v5 0/2] examples/vhost: sample code refactor Cheng Jiang
2020-12-28  7:16   ` [dpdk-dev] [PATCH v5 1/2] examples/vhost: add ioat ring space count and check Cheng Jiang
2020-12-28  7:16   ` [dpdk-dev] [PATCH v5 2/2] examples/vhost: refactor vhost data path Cheng Jiang
2021-01-04  4:57 ` [dpdk-dev] [PATCH v6 0/2] examples/vhost: sample code refactor Cheng Jiang
2021-01-04  4:57   ` [dpdk-dev] [PATCH v6 1/2] examples/vhost: add ioat ring space count and check Cheng Jiang
2021-01-05  1:19     ` Hu, Jiayu
2021-01-05  1:51       ` Jiang, Cheng1
2021-01-04  4:57   ` [dpdk-dev] [PATCH v6 2/2] examples/vhost: refactor vhost data path Cheng Jiang
2021-01-05  1:43     ` Hu, Jiayu
2021-01-05  2:15 ` [dpdk-dev] [PATCH v7 0/2] examples/vhost: sample code refactor Cheng Jiang
2021-01-05  2:15   ` [dpdk-dev] [PATCH v7 1/2] examples/vhost: add ioat ring space count and check Cheng Jiang
2021-01-05  6:56     ` Hu, Jiayu
2021-01-05  2:15   ` [dpdk-dev] [PATCH v7 2/2] examples/vhost: refactor vhost data path Cheng Jiang
2021-01-06  7:47   ` [dpdk-dev] [PATCH v7 0/2] examples/vhost: sample code refactor Ling, WeiX
2021-01-11  5:52 ` [dpdk-dev] [PATCH v8 " Cheng Jiang
2021-01-11  5:52   ` [dpdk-dev] [PATCH v8 1/2] examples/vhost: add ioat ring space count and check Cheng Jiang
2021-01-11 14:15     ` Maxime Coquelin
2021-01-11  5:52   ` [dpdk-dev] [PATCH v8 2/2] examples/vhost: refactor vhost data path Cheng Jiang
2021-01-11 14:25     ` Maxime Coquelin
2021-01-12  4:51       ` Jiang, Cheng1
2021-01-12  4:38 ` [dpdk-dev] [PATCH v9 0/2] examples/vhost: sample code refactor Cheng Jiang
2021-01-12  4:38   ` [dpdk-dev] [PATCH v9 1/2] examples/vhost: add ioat ring space count and check Cheng Jiang
2021-01-21 12:34     ` Maxime Coquelin
2021-01-22  2:16       ` Jiang, Cheng1
2021-01-12  4:38   ` [dpdk-dev] [PATCH v9 2/2] examples/vhost: refactor vhost data path Cheng Jiang
2021-01-21 12:39     ` Maxime Coquelin
2021-01-27 12:00   ` [dpdk-dev] [PATCH v9 0/2] examples/vhost: sample code refactor Maxime Coquelin

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20201218113327.70528-4-Cheng1.jiang@intel.com \
    --to=cheng1.jiang@intel.com \
    --cc=Jiayu.Hu@intel.com \
    --cc=YvonneX.Yang@intel.com \
    --cc=chenbo.xia@intel.com \
    --cc=dev@dpdk.org \
    --cc=maxime.coquelin@redhat.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.