bpf.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Magnus Karlsson <magnus.karlsson@gmail.com>
To: magnus.karlsson@intel.com, bjorn@kernel.org, ast@kernel.org,
	daniel@iogearbox.net, netdev@vger.kernel.org,
	maciej.fijalkowski@intel.com, ciara.loftus@intel.com
Cc: jonathan.lemon@gmail.com, bpf@vger.kernel.org,
	anthony.l.nguyen@intel.com
Subject: [PATCH bpf-next 10/13] selftests: xsk: introduce pacing of traffic
Date: Wed, 22 Sep 2021 09:56:10 +0200	[thread overview]
Message-ID: <20210922075613.12186-11-magnus.karlsson@gmail.com> (raw)
In-Reply-To: <20210922075613.12186-1-magnus.karlsson@gmail.com>

From: Magnus Karlsson <magnus.karlsson@intel.com>

Introduce pacing of traffic so that the Tx thread can never send more
packets than the receiver has processed plus the number of packets it
can have in its umem. So at any point in time, the number of in flight
packets (not processed by the Rx thread) are less than or equal to the
number of packets that can be held in the Rx thread's umem.

The batch size is also increased to improve running time.

Signed-off-by: Magnus Karlsson <magnus.karlsson@intel.com>
---
 tools/testing/selftests/bpf/xdpxceiver.c | 29 +++++++++++++++++++-----
 tools/testing/selftests/bpf/xdpxceiver.h |  7 +++++-
 2 files changed, 29 insertions(+), 7 deletions(-)

diff --git a/tools/testing/selftests/bpf/xdpxceiver.c b/tools/testing/selftests/bpf/xdpxceiver.c
index aa5660dc0699..597fbe206026 100644
--- a/tools/testing/selftests/bpf/xdpxceiver.c
+++ b/tools/testing/selftests/bpf/xdpxceiver.c
@@ -384,6 +384,7 @@ static void __test_spec_init(struct test_spec *test, struct ifobject *ifobj_tx,
 		ifobj->umem = &ifobj->umem_arr[0];
 		ifobj->xsk = &ifobj->xsk_arr[0];
 		ifobj->use_poll = false;
+		ifobj->pacing_on = true;
 		ifobj->pkt_stream = test->pkt_stream_default;
 
 		if (i == 0) {
@@ -724,6 +725,7 @@ static void receive_pkts(struct pkt_stream *pkt_stream, struct xsk_socket_info *
 {
 	struct pkt *pkt = pkt_stream_get_next_rx_pkt(pkt_stream);
 	u32 idx_rx = 0, idx_fq = 0, rcvd, i;
+	u32 total = 0;
 	int ret;
 
 	while (pkt) {
@@ -772,6 +774,13 @@ static void receive_pkts(struct pkt_stream *pkt_stream, struct xsk_socket_info *
 
 		xsk_ring_prod__submit(&xsk->umem->fq, rcvd);
 		xsk_ring_cons__release(&xsk->rx, rcvd);
+
+		pthread_mutex_lock(&pacing_mutex);
+		pkts_in_flight -= rcvd;
+		total += rcvd;
+		if (pkts_in_flight < umem->num_frames)
+			pthread_cond_signal(&pacing_cond);
+		pthread_mutex_unlock(&pacing_mutex);
 	}
 }
 
@@ -797,10 +806,19 @@ static u32 __send_pkts(struct ifobject *ifobject, u32 pkt_nb)
 			valid_pkts++;
 	}
 
+	pthread_mutex_lock(&pacing_mutex);
+	pkts_in_flight += valid_pkts;
+	if (ifobject->pacing_on && pkts_in_flight >= ifobject->umem->num_frames - BATCH_SIZE) {
+		kick_tx(xsk);
+		pthread_cond_wait(&pacing_cond, &pacing_mutex);
+	}
+	pthread_mutex_unlock(&pacing_mutex);
+
 	xsk_ring_prod__submit(&xsk->tx, i);
 	xsk->outstanding_tx += valid_pkts;
-	complete_pkts(xsk, BATCH_SIZE);
+	complete_pkts(xsk, i);
 
+	usleep(10);
 	return i;
 }
 
@@ -819,8 +837,6 @@ static void send_pkts(struct ifobject *ifobject)
 	fds.events = POLLOUT;
 
 	while (pkt_cnt < ifobject->pkt_stream->nb_pkts) {
-		u32 sent;
-
 		if (ifobject->use_poll) {
 			int ret;
 
@@ -832,9 +848,7 @@ static void send_pkts(struct ifobject *ifobject)
 				continue;
 		}
 
-		sent = __send_pkts(ifobject, pkt_cnt);
-		pkt_cnt += sent;
-		usleep(10);
+		pkt_cnt += __send_pkts(ifobject, pkt_cnt);
 	}
 
 	wait_for_tx_completion(ifobject->xsk);
@@ -1043,6 +1057,7 @@ static void testapp_validate_traffic(struct test_spec *test)
 
 	test->current_step++;
 	pkt_stream_reset(ifobj_rx->pkt_stream);
+	pkts_in_flight = 0;
 
 	/*Spawn RX thread */
 	pthread_create(&t0, NULL, ifobj_rx->func_ptr, test);
@@ -1126,6 +1141,8 @@ static void testapp_stats(struct test_spec *test)
 	for (i = 0; i < STAT_TEST_TYPE_MAX; i++) {
 		test_spec_reset(test);
 		stat_test_type = i;
+		/* No or few packets will be received so cannot pace packets */
+		test->ifobj_tx->pacing_on = false;
 
 		switch (stat_test_type) {
 		case STAT_TEST_RX_DROPPED:
diff --git a/tools/testing/selftests/bpf/xdpxceiver.h b/tools/testing/selftests/bpf/xdpxceiver.h
index 5ac4a5e64744..00790c976f4f 100644
--- a/tools/testing/selftests/bpf/xdpxceiver.h
+++ b/tools/testing/selftests/bpf/xdpxceiver.h
@@ -35,7 +35,7 @@
 #define UDP_PKT_DATA_SIZE (UDP_PKT_SIZE - sizeof(struct udphdr))
 #define USLEEP_MAX 10000
 #define SOCK_RECONF_CTR 10
-#define BATCH_SIZE 8
+#define BATCH_SIZE 64
 #define POLL_TMOUT 1000
 #define DEFAULT_PKT_CNT (4 * 1024)
 #define DEFAULT_UMEM_BUFFERS (DEFAULT_PKT_CNT / 4)
@@ -136,6 +136,7 @@ struct ifobject {
 	bool tx_on;
 	bool rx_on;
 	bool use_poll;
+	bool pacing_on;
 	u8 dst_mac[ETH_ALEN];
 	u8 src_mac[ETH_ALEN];
 };
@@ -151,5 +152,9 @@ struct test_spec {
 };
 
 pthread_barrier_t barr;
+pthread_mutex_t pacing_mutex = PTHREAD_MUTEX_INITIALIZER;
+pthread_cond_t pacing_cond = PTHREAD_COND_INITIALIZER;
+
+u32 pkts_in_flight;
 
 #endif				/* XDPXCEIVER_H */
-- 
2.29.0


  parent reply	other threads:[~2021-09-22  7:57 UTC|newest]

Thread overview: 18+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-09-22  7:56 [PATCH bpf-next 00/13] xsk: i40e: ice: introduce batching for Rx buffer allocation Magnus Karlsson
2021-09-22  7:56 ` [PATCH bpf-next 01/13] xsk: get rid of unused entry in struct xdp_buff_xsk Magnus Karlsson
2021-09-22  7:56 ` [PATCH bpf-next 02/13] xsk: batched buffer allocation for the pool Magnus Karlsson
2021-09-22  7:56 ` [PATCH bpf-next 03/13] ice: use xdp_buf instead of rx_buf for xsk zero-copy Magnus Karlsson
2021-09-22  7:56 ` [PATCH bpf-next 04/13] ice: use the xsk batched rx allocation interface Magnus Karlsson
2021-09-22  7:56 ` [PATCH bpf-next 05/13] i40e: " Magnus Karlsson
2021-09-22  7:56 ` [PATCH bpf-next 06/13] xsk: optimize for aligned case Magnus Karlsson
2021-09-28 23:15   ` Nathan Chancellor
2021-09-29  5:52     ` Magnus Karlsson
2021-09-29 15:33   ` kernel test robot
2021-09-22  7:56 ` [PATCH bpf-next 07/13] selftests: xsk: fix missing initialization Magnus Karlsson
2021-09-22  7:56 ` [PATCH bpf-next 08/13] selftests: xsk: put the same buffer only once in the fill ring Magnus Karlsson
2021-09-22  7:56 ` [PATCH bpf-next 09/13] selftests: xsk: fix socket creation retry Magnus Karlsson
2021-09-22  7:56 ` Magnus Karlsson [this message]
2021-09-22  7:56 ` [PATCH bpf-next 11/13] selftests: xsk: add single packet test Magnus Karlsson
2021-09-22  7:56 ` [PATCH bpf-next 12/13] selftests: xsk: change interleaving of packets in unaligned mode Magnus Karlsson
2021-09-22  7:56 ` [PATCH bpf-next 13/13] selftests: xsk: add frame_headroom test Magnus Karlsson
2021-09-27 22:30 ` [PATCH bpf-next 00/13] xsk: i40e: ice: introduce batching for Rx buffer allocation patchwork-bot+netdevbpf

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20210922075613.12186-11-magnus.karlsson@gmail.com \
    --to=magnus.karlsson@gmail.com \
    --cc=anthony.l.nguyen@intel.com \
    --cc=ast@kernel.org \
    --cc=bjorn@kernel.org \
    --cc=bpf@vger.kernel.org \
    --cc=ciara.loftus@intel.com \
    --cc=daniel@iogearbox.net \
    --cc=jonathan.lemon@gmail.com \
    --cc=maciej.fijalkowski@intel.com \
    --cc=magnus.karlsson@intel.com \
    --cc=netdev@vger.kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).