All of lore.kernel.org
 help / color / mirror / Atom feed
From: David Marchand <david.marchand@redhat.com>
To: dev@dpdk.org
Cc: Jerin Jacob <jerinj@marvell.com>,
	Pavan Nikhilesh <pbhagavatula@marvell.com>,
	Liang Ma <liang.j.ma@intel.com>,
	Peter Mccarthy <peter.mccarthy@intel.com>,
	Harry van Haaren <harry.van.haaren@intel.com>,
	Ray Kinsella <mdr@ashroe.eu>, Neil Horman <nhorman@tuxdriver.com>
Subject: [dpdk-dev] [PATCH 7/8] event: switch sequence number to dynamic field
Date: Tue, 27 Oct 2020 23:13:42 +0100	[thread overview]
Message-ID: <20201027221343.28551-8-david.marchand@redhat.com> (raw)
In-Reply-To: <20201027221343.28551-1-david.marchand@redhat.com>

The eventdev drivers have been hacking the deprecated field seqn for
internal test usage.
It is moved to a dynamic mbuf field in order to allow removal of seqn.

Signed-off-by: David Marchand <david.marchand@redhat.com>
---
 app/test-eventdev/evt_main.c                  |  3 ++
 app/test-eventdev/test_order_common.c         |  2 +-
 app/test-eventdev/test_order_common.h         |  5 ++-
 drivers/event/octeontx/ssovf_evdev_selftest.c | 32 ++++++++--------
 drivers/event/octeontx2/otx2_evdev_selftest.c | 31 +++++++--------
 drivers/event/opdl/opdl_test.c                |  8 ++--
 drivers/event/sw/sw_evdev_selftest.c          | 34 +++++++++--------
 lib/librte_eventdev/rte_eventdev.c            | 21 +++++++++-
 lib/librte_eventdev/rte_eventdev.h            | 38 ++++++++++++++++---
 lib/librte_eventdev/version.map               |  2 +
 10 files changed, 116 insertions(+), 60 deletions(-)

diff --git a/app/test-eventdev/evt_main.c b/app/test-eventdev/evt_main.c
index a8d304bab3..832bb21d7c 100644
--- a/app/test-eventdev/evt_main.c
+++ b/app/test-eventdev/evt_main.c
@@ -89,6 +89,9 @@ main(int argc, char **argv)
 	if (!evdevs)
 		rte_panic("no eventdev devices found\n");
 
+	if (rte_event_test_seqn_dynfield_register() < 0)
+		rte_panic("failed to register event dev sequence number\n");
+
 	/* Populate the default values of the options */
 	evt_options_default(&opt);
 
diff --git a/app/test-eventdev/test_order_common.c b/app/test-eventdev/test_order_common.c
index c5f7317440..d15ff80273 100644
--- a/app/test-eventdev/test_order_common.c
+++ b/app/test-eventdev/test_order_common.c
@@ -50,7 +50,7 @@ order_producer(void *arg)
 
 		const flow_id_t flow = (uintptr_t)m % nb_flows;
 		/* Maintain seq number per flow */
-		m->seqn = producer_flow_seq[flow]++;
+		*rte_event_test_seqn(m) = producer_flow_seq[flow]++;
 		flow_id_save(flow, m, &ev);
 
 		while (rte_event_enqueue_burst(dev_id, port, &ev, 1) != 1) {
diff --git a/app/test-eventdev/test_order_common.h b/app/test-eventdev/test_order_common.h
index 9e3415e421..d4ad31da46 100644
--- a/app/test-eventdev/test_order_common.h
+++ b/app/test-eventdev/test_order_common.h
@@ -89,9 +89,10 @@ order_process_stage_1(struct test_order *const t,
 {
 	const uint32_t flow = (uintptr_t)ev->mbuf % nb_flows;
 	/* compare the seqn against expected value */
-	if (ev->mbuf->seqn != expected_flow_seq[flow]) {
+	if (*rte_event_test_seqn(ev->mbuf) != expected_flow_seq[flow]) {
 		evt_err("flow=%x seqn mismatch got=%x expected=%x",
-			flow, ev->mbuf->seqn, expected_flow_seq[flow]);
+			flow, *rte_event_test_seqn(ev->mbuf),
+			expected_flow_seq[flow]);
 		t->err = true;
 		rte_smp_wmb();
 	}
diff --git a/drivers/event/octeontx/ssovf_evdev_selftest.c b/drivers/event/octeontx/ssovf_evdev_selftest.c
index 7a2b7ded25..b99889e2cc 100644
--- a/drivers/event/octeontx/ssovf_evdev_selftest.c
+++ b/drivers/event/octeontx/ssovf_evdev_selftest.c
@@ -300,7 +300,7 @@ inject_events(uint32_t flow_id, uint8_t event_type, uint8_t sub_event_type,
 		m = rte_pktmbuf_alloc(eventdev_test_mempool);
 		RTE_TEST_ASSERT_NOT_NULL(m, "mempool alloc failed");
 
-		m->seqn = i;
+		*rte_event_test_seqn(m) = i;
 		update_event_and_validation_attr(m, &ev, flow_id, event_type,
 			sub_event_type, sched_type, queue, port);
 		rte_event_enqueue_burst(evdev, port, &ev, 1);
@@ -320,7 +320,8 @@ check_excess_events(uint8_t port)
 		valid_event = rte_event_dequeue_burst(evdev, port, &ev, 1, 0);
 
 		RTE_TEST_ASSERT_SUCCESS(valid_event,
-				"Unexpected valid event=%d", ev.mbuf->seqn);
+			"Unexpected valid event=%d",
+			*rte_event_test_seqn(ev.mbuf));
 	}
 	return 0;
 }
@@ -425,8 +426,9 @@ static int
 validate_simple_enqdeq(uint32_t index, uint8_t port, struct rte_event *ev)
 {
 	RTE_SET_USED(port);
-	RTE_TEST_ASSERT_EQUAL(index, ev->mbuf->seqn, "index=%d != seqn=%d",
-			index, ev->mbuf->seqn);
+	RTE_TEST_ASSERT_EQUAL(index, *rte_event_test_seqn(ev->mbuf),
+		"index=%d != seqn=%d", index,
+		*rte_event_test_seqn(ev->mbuf));
 	return 0;
 }
 
@@ -509,10 +511,10 @@ validate_queue_priority(uint32_t index, uint8_t port, struct rte_event *ev)
 
 	expected_val += ev->queue_id;
 	RTE_SET_USED(port);
-	RTE_TEST_ASSERT_EQUAL(ev->mbuf->seqn, expected_val,
-	"seqn=%d index=%d expected=%d range=%d nb_queues=%d max_event=%d",
-			ev->mbuf->seqn, index, expected_val, range,
-			queue_count, MAX_EVENTS);
+	RTE_TEST_ASSERT_EQUAL(*rte_event_test_seqn(ev->mbuf), expected_val,
+		"seqn=%d index=%d expected=%d range=%d nb_queues=%d max_event=%d",
+		*rte_event_test_seqn(ev->mbuf), index, expected_val, range,
+		queue_count, MAX_EVENTS);
 	return 0;
 }
 
@@ -537,7 +539,7 @@ test_multi_queue_priority(void)
 		m = rte_pktmbuf_alloc(eventdev_test_mempool);
 		RTE_TEST_ASSERT_NOT_NULL(m, "mempool alloc failed");
 
-		m->seqn = i;
+		*rte_event_test_seqn(m) = i;
 		queue = i % queue_count;
 		update_event_and_validation_attr(m, &ev, 0, RTE_EVENT_TYPE_CPU,
 			0, RTE_SCHED_TYPE_PARALLEL, queue, 0);
@@ -904,7 +906,7 @@ worker_flow_based_pipeline(void *arg)
 			ev.op = RTE_EVENT_OP_FORWARD;
 			rte_event_enqueue_burst(evdev, port, &ev, 1);
 		} else if (ev.sub_event_type == 1) { /* Events from stage 1*/
-			if (seqn_list_update(ev.mbuf->seqn) == 0) {
+			if (seqn_list_update(*rte_event_test_seqn(ev.mbuf)) == 0) {
 				rte_pktmbuf_free(ev.mbuf);
 				rte_atomic32_sub(total_events, 1);
 			} else {
@@ -939,7 +941,7 @@ test_multiport_flow_sched_type_test(uint8_t in_sched_type,
 		return 0;
 	}
 
-	/* Injects events with m->seqn=0 to total_events */
+	/* Injects events with a 0 sequence number to total_events */
 	ret = inject_events(
 		0x1 /*flow_id */,
 		RTE_EVENT_TYPE_CPU /* event_type */,
@@ -1059,7 +1061,7 @@ worker_group_based_pipeline(void *arg)
 			ev.op = RTE_EVENT_OP_FORWARD;
 			rte_event_enqueue_burst(evdev, port, &ev, 1);
 		} else if (ev.queue_id == 1) { /* Events from stage 1(group 1)*/
-			if (seqn_list_update(ev.mbuf->seqn) == 0) {
+			if (seqn_list_update(*rte_event_test_seqn(ev.mbuf)) == 0) {
 				rte_pktmbuf_free(ev.mbuf);
 				rte_atomic32_sub(total_events, 1);
 			} else {
@@ -1101,7 +1103,7 @@ test_multiport_queue_sched_type_test(uint8_t in_sched_type,
 		return 0;
 	}
 
-	/* Injects events with m->seqn=0 to total_events */
+	/* Injects events with a 0 sequence number to total_events */
 	ret = inject_events(
 		0x1 /*flow_id */,
 		RTE_EVENT_TYPE_CPU /* event_type */,
@@ -1238,7 +1240,7 @@ launch_multi_port_max_stages_random_sched_type(int (*fn)(void *))
 		return 0;
 	}
 
-	/* Injects events with m->seqn=0 to total_events */
+	/* Injects events with a 0 sequence number to total_events */
 	ret = inject_events(
 		0x1 /*flow_id */,
 		RTE_EVENT_TYPE_CPU /* event_type */,
@@ -1360,7 +1362,7 @@ worker_ordered_flow_producer(void *arg)
 		if (m == NULL)
 			continue;
 
-		m->seqn = counter++;
+		*rte_event_test_seqn(m) = counter++;
 
 		struct rte_event ev = {.event = 0, .u64 = 0};
 
diff --git a/drivers/event/octeontx2/otx2_evdev_selftest.c b/drivers/event/octeontx2/otx2_evdev_selftest.c
index 334a9ccb7c..c6381ac785 100644
--- a/drivers/event/octeontx2/otx2_evdev_selftest.c
+++ b/drivers/event/octeontx2/otx2_evdev_selftest.c
@@ -279,7 +279,7 @@ inject_events(uint32_t flow_id, uint8_t event_type, uint8_t sub_event_type,
 		m = rte_pktmbuf_alloc(eventdev_test_mempool);
 		RTE_TEST_ASSERT_NOT_NULL(m, "mempool alloc failed");
 
-		m->seqn = i;
+		*rte_event_test_seqn(m) = i;
 		update_event_and_validation_attr(m, &ev, flow_id, event_type,
 						 sub_event_type, sched_type,
 						 queue, port);
@@ -301,7 +301,7 @@ check_excess_events(uint8_t port)
 
 		RTE_TEST_ASSERT_SUCCESS(valid_event,
 					"Unexpected valid event=%d",
-					ev.mbuf->seqn);
+					*rte_event_test_seqn(ev.mbuf));
 	}
 	return 0;
 }
@@ -406,8 +406,9 @@ static int
 validate_simple_enqdeq(uint32_t index, uint8_t port, struct rte_event *ev)
 {
 	RTE_SET_USED(port);
-	RTE_TEST_ASSERT_EQUAL(index, ev->mbuf->seqn, "index=%d != seqn=%d",
-			      index, ev->mbuf->seqn);
+	RTE_TEST_ASSERT_EQUAL(index, *rte_event_test_seqn(ev->mbuf),
+		"index=%d != seqn=%d",
+		index, *rte_event_test_seqn(ev->mbuf));
 	return 0;
 }
 
@@ -493,10 +494,10 @@ validate_queue_priority(uint32_t index, uint8_t port, struct rte_event *ev)
 
 	expected_val += ev->queue_id;
 	RTE_SET_USED(port);
-	RTE_TEST_ASSERT_EQUAL(ev->mbuf->seqn, expected_val,
-	"seqn=%d index=%d expected=%d range=%d nb_queues=%d max_event=%d",
-			      ev->mbuf->seqn, index, expected_val, range,
-			      queue_count, MAX_EVENTS);
+	RTE_TEST_ASSERT_EQUAL(*rte_event_test_seqn(ev->mbuf), expected_val,
+		"seqn=%d index=%d expected=%d range=%d nb_queues=%d max_event=%d",
+		*rte_event_test_seqn(ev->mbuf), index, expected_val, range,
+		queue_count, MAX_EVENTS);
 	return 0;
 }
 
@@ -523,7 +524,7 @@ test_multi_queue_priority(void)
 		m = rte_pktmbuf_alloc(eventdev_test_mempool);
 		RTE_TEST_ASSERT_NOT_NULL(m, "mempool alloc failed");
 
-		m->seqn = i;
+		*rte_event_test_seqn(m) = i;
 		queue = i % queue_count;
 		update_event_and_validation_attr(m, &ev, 0, RTE_EVENT_TYPE_CPU,
 						 0, RTE_SCHED_TYPE_PARALLEL,
@@ -888,7 +889,7 @@ worker_flow_based_pipeline(void *arg)
 			ev.op = RTE_EVENT_OP_FORWARD;
 			rte_event_enqueue_burst(evdev, port, &ev, 1);
 		} else if (ev.sub_event_type == 1) { /* Events from stage 1*/
-			if (seqn_list_update(ev.mbuf->seqn) == 0) {
+			if (seqn_list_update(*rte_event_test_seqn(ev.mbuf)) == 0) {
 				rte_pktmbuf_free(ev.mbuf);
 				rte_atomic32_sub(total_events, 1);
 			} else {
@@ -923,7 +924,7 @@ test_multiport_flow_sched_type_test(uint8_t in_sched_type,
 		return 0;
 	}
 
-	/* Injects events with m->seqn=0 to total_events */
+	/* Injects events with a 0 sequence number to total_events */
 	ret = inject_events(0x1 /*flow_id */,
 			    RTE_EVENT_TYPE_CPU /* event_type */,
 			    0 /* sub_event_type (stage 0) */,
@@ -1043,7 +1044,7 @@ worker_group_based_pipeline(void *arg)
 			ev.op = RTE_EVENT_OP_FORWARD;
 			rte_event_enqueue_burst(evdev, port, &ev, 1);
 		} else if (ev.queue_id == 1) { /* Events from stage 1(group 1)*/
-			if (seqn_list_update(ev.mbuf->seqn) == 0) {
+			if (seqn_list_update(*rte_event_test_seqn(ev.mbuf)) == 0) {
 				rte_pktmbuf_free(ev.mbuf);
 				rte_atomic32_sub(total_events, 1);
 			} else {
@@ -1084,7 +1085,7 @@ test_multiport_queue_sched_type_test(uint8_t in_sched_type,
 		return 0;
 	}
 
-	/* Injects events with m->seqn=0 to total_events */
+	/* Injects events with a 0 sequence number to total_events */
 	ret = inject_events(0x1 /*flow_id */,
 			    RTE_EVENT_TYPE_CPU /* event_type */,
 			    0 /* sub_event_type (stage 0) */,
@@ -1222,7 +1223,7 @@ launch_multi_port_max_stages_random_sched_type(int (*fn)(void *))
 		return 0;
 	}
 
-	/* Injects events with m->seqn=0 to total_events */
+	/* Injects events with a 0 sequence number to total_events */
 	ret = inject_events(0x1 /*flow_id */,
 			    RTE_EVENT_TYPE_CPU /* event_type */,
 			    0 /* sub_event_type (stage 0) */,
@@ -1348,7 +1349,7 @@ worker_ordered_flow_producer(void *arg)
 		if (m == NULL)
 			continue;
 
-		m->seqn = counter++;
+		*rte_event_test_seqn(m) = counter++;
 
 		struct rte_event ev = {.event = 0, .u64 = 0};
 
diff --git a/drivers/event/opdl/opdl_test.c b/drivers/event/opdl/opdl_test.c
index e7a32fbd31..cbf33d38f7 100644
--- a/drivers/event/opdl/opdl_test.c
+++ b/drivers/event/opdl/opdl_test.c
@@ -256,7 +256,7 @@ ordered_basic(struct test *t)
 		ev.queue_id = t->qid[0];
 		ev.op = RTE_EVENT_OP_NEW;
 		ev.mbuf = mbufs[i];
-		mbufs[i]->seqn = MAGIC_SEQN + i;
+		*rte_event_test_seqn(mbufs[i]) = MAGIC_SEQN + i;
 
 		/* generate pkt and enqueue */
 		err = rte_event_enqueue_burst(evdev, t->port[rx_port], &ev, 1);
@@ -281,7 +281,7 @@ ordered_basic(struct test *t)
 			rte_event_dev_dump(evdev, stdout);
 			return -1;
 		}
-		seq = deq_ev[i].mbuf->seqn  - MAGIC_SEQN;
+		seq = *rte_event_test_seqn(deq_ev[i].mbuf)  - MAGIC_SEQN;
 
 		if (seq != (i-1)) {
 			PMD_DRV_LOG(ERR, " seq test failed ! eq is %d , "
@@ -396,7 +396,7 @@ atomic_basic(struct test *t)
 		ev.op = RTE_EVENT_OP_NEW;
 		ev.flow_id = 1;
 		ev.mbuf = mbufs[i];
-		mbufs[i]->seqn = MAGIC_SEQN + i;
+		*rte_event_test_seqn(mbufs[i]) = MAGIC_SEQN + i;
 
 		/* generate pkt and enqueue */
 		err = rte_event_enqueue_burst(evdev, t->port[rx_port], &ev, 1);
@@ -625,7 +625,7 @@ single_link_w_stats(struct test *t)
 		ev.queue_id = t->qid[0];
 		ev.op = RTE_EVENT_OP_NEW;
 		ev.mbuf = mbufs[i];
-		mbufs[i]->seqn = 1234 + i;
+		*rte_event_test_seqn(mbufs[i]) = 1234 + i;
 
 		/* generate pkt and enqueue */
 		err = rte_event_enqueue_burst(evdev, t->port[rx_port], &ev, 1);
diff --git a/drivers/event/sw/sw_evdev_selftest.c b/drivers/event/sw/sw_evdev_selftest.c
index ad4fc0eed7..47f5b55651 100644
--- a/drivers/event/sw/sw_evdev_selftest.c
+++ b/drivers/event/sw/sw_evdev_selftest.c
@@ -380,7 +380,7 @@ run_prio_packet_test(struct test *t)
 			printf("%d: gen of pkt failed\n", __LINE__);
 			return -1;
 		}
-		arp->seqn = MAGIC_SEQN[i];
+		*rte_event_test_seqn(arp) = MAGIC_SEQN[i];
 
 		ev = (struct rte_event){
 			.priority = PRIORITY[i],
@@ -419,7 +419,7 @@ run_prio_packet_test(struct test *t)
 		rte_event_dev_dump(evdev, stdout);
 		return -1;
 	}
-	if (ev.mbuf->seqn != MAGIC_SEQN[1]) {
+	if (*rte_event_test_seqn(ev.mbuf) != MAGIC_SEQN[1]) {
 		printf("%d: first packet out not highest priority\n",
 				__LINE__);
 		rte_event_dev_dump(evdev, stdout);
@@ -433,7 +433,7 @@ run_prio_packet_test(struct test *t)
 		rte_event_dev_dump(evdev, stdout);
 		return -1;
 	}
-	if (ev2.mbuf->seqn != MAGIC_SEQN[0]) {
+	if (*rte_event_test_seqn(ev2.mbuf) != MAGIC_SEQN[0]) {
 		printf("%d: second packet out not lower priority\n",
 				__LINE__);
 		rte_event_dev_dump(evdev, stdout);
@@ -477,7 +477,7 @@ test_single_directed_packet(struct test *t)
 	}
 
 	const uint32_t MAGIC_SEQN = 4711;
-	arp->seqn = MAGIC_SEQN;
+	*rte_event_test_seqn(arp) = MAGIC_SEQN;
 
 	/* generate pkt and enqueue */
 	err = rte_event_enqueue_burst(evdev, rx_enq, &ev, 1);
@@ -516,7 +516,7 @@ test_single_directed_packet(struct test *t)
 		return -1;
 	}
 
-	if (ev.mbuf->seqn != MAGIC_SEQN) {
+	if (*rte_event_test_seqn(ev.mbuf) != MAGIC_SEQN) {
 		printf("%d: error magic sequence number not dequeued\n",
 				__LINE__);
 		return -1;
@@ -934,7 +934,7 @@ xstats_tests(struct test *t)
 		ev.op = RTE_EVENT_OP_NEW;
 		ev.mbuf = arp;
 		ev.flow_id = 7;
-		arp->seqn = i;
+		*rte_event_test_seqn(arp) = i;
 
 		int err = rte_event_enqueue_burst(evdev, t->port[0], &ev, 1);
 		if (err != 1) {
@@ -1485,7 +1485,7 @@ xstats_id_reset_tests(struct test *t)
 		ev.queue_id = t->qid[i];
 		ev.op = RTE_EVENT_OP_NEW;
 		ev.mbuf = arp;
-		arp->seqn = i;
+		*rte_event_test_seqn(arp) = i;
 
 		int err = rte_event_enqueue_burst(evdev, t->port[0], &ev, 1);
 		if (err != 1) {
@@ -1873,7 +1873,7 @@ qid_priorities(struct test *t)
 		ev.queue_id = t->qid[i];
 		ev.op = RTE_EVENT_OP_NEW;
 		ev.mbuf = arp;
-		arp->seqn = i;
+		*rte_event_test_seqn(arp) = i;
 
 		int err = rte_event_enqueue_burst(evdev, t->port[0], &ev, 1);
 		if (err != 1) {
@@ -1894,7 +1894,7 @@ qid_priorities(struct test *t)
 		return -1;
 	}
 	for (i = 0; i < 3; i++) {
-		if (ev[i].mbuf->seqn != 2-i) {
+		if (*rte_event_test_seqn(ev[i].mbuf) != 2-i) {
 			printf(
 				"%d: qid priority test: seqn %d incorrectly prioritized\n",
 					__LINE__, i);
@@ -2371,7 +2371,7 @@ single_packet(struct test *t)
 	ev.mbuf = arp;
 	ev.queue_id = 0;
 	ev.flow_id = 3;
-	arp->seqn = MAGIC_SEQN;
+	*rte_event_test_seqn(arp) = MAGIC_SEQN;
 
 	err = rte_event_enqueue_burst(evdev, t->port[rx_enq], &ev, 1);
 	if (err != 1) {
@@ -2411,7 +2411,7 @@ single_packet(struct test *t)
 	}
 
 	err = test_event_dev_stats_get(evdev, &stats);
-	if (ev.mbuf->seqn != MAGIC_SEQN) {
+	if (*rte_event_test_seqn(ev.mbuf) != MAGIC_SEQN) {
 		printf("%d: magic sequence number not dequeued\n", __LINE__);
 		return -1;
 	}
@@ -2684,7 +2684,7 @@ parallel_basic(struct test *t, int check_order)
 		ev.queue_id = t->qid[0];
 		ev.op = RTE_EVENT_OP_NEW;
 		ev.mbuf = mbufs[i];
-		mbufs[i]->seqn = MAGIC_SEQN + i;
+		*rte_event_test_seqn(mbufs[i]) = MAGIC_SEQN + i;
 
 		/* generate pkt and enqueue */
 		err = rte_event_enqueue_burst(evdev, t->port[rx_port], &ev, 1);
@@ -2739,10 +2739,12 @@ parallel_basic(struct test *t, int check_order)
 	/* Check to see if the sequence numbers are in expected order */
 	if (check_order) {
 		for (j = 0 ; j < deq_pkts ; j++) {
-			if (deq_ev[j].mbuf->seqn != MAGIC_SEQN + j) {
-				printf(
-					"%d: Incorrect sequence number(%d) from port %d\n",
-					__LINE__, mbufs_out[j]->seqn, tx_port);
+			if (*rte_event_test_seqn(deq_ev[j].mbuf) !=
+					MAGIC_SEQN + j) {
+				printf("%d: Incorrect sequence number(%d) from port %d\n",
+					__LINE__,
+					*rte_event_test_seqn(mbufs_out[j]),
+					tx_port);
 				return -1;
 			}
 		}
diff --git a/lib/librte_eventdev/rte_eventdev.c b/lib/librte_eventdev/rte_eventdev.c
index 322453c532..61ff6d3404 100644
--- a/lib/librte_eventdev/rte_eventdev.c
+++ b/lib/librte_eventdev/rte_eventdev.c
@@ -109,6 +109,22 @@ rte_event_dev_info_get(uint8_t dev_id, struct rte_event_dev_info *dev_info)
 	return 0;
 }
 
+#define RTE_EVENT_TEST_SEQN_DYNFIELD_NAME "rte_event_test_seqn_dynfield"
+int rte_event_test_seqn_dynfield_offset = -1;
+
+int
+rte_event_test_seqn_dynfield_register(void)
+{
+	static const struct rte_mbuf_dynfield event_test_seqn_dynfield_desc = {
+		.name = RTE_EVENT_TEST_SEQN_DYNFIELD_NAME,
+		.size = sizeof(rte_event_test_seqn_t),
+		.align = __alignof__(rte_event_test_seqn_t),
+	};
+	rte_event_test_seqn_dynfield_offset =
+		rte_mbuf_dynfield_register(&event_test_seqn_dynfield_desc);
+	return rte_event_test_seqn_dynfield_offset;
+}
+
 int
 rte_event_eth_rx_adapter_caps_get(uint8_t dev_id, uint16_t eth_port_id,
 				uint32_t *caps)
@@ -1247,8 +1263,11 @@ int rte_event_dev_selftest(uint8_t dev_id)
 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
 	struct rte_eventdev *dev = &rte_eventdevs[dev_id];
 
-	if (dev->dev_ops->dev_selftest != NULL)
+	if (dev->dev_ops->dev_selftest != NULL) {
+		if (rte_event_test_seqn_dynfield_register() < 0)
+			return -ENOMEM;
 		return (*dev->dev_ops->dev_selftest)();
+	}
 	return -ENOTSUP;
 }
 
diff --git a/lib/librte_eventdev/rte_eventdev.h b/lib/librte_eventdev/rte_eventdev.h
index ce1fc2ce0f..1656ff8dce 100644
--- a/lib/librte_eventdev/rte_eventdev.h
+++ b/lib/librte_eventdev/rte_eventdev.h
@@ -211,13 +211,15 @@ extern "C" {
 #endif
 
 #include <rte_common.h>
+#include <rte_compat.h>
 #include <rte_config.h>
+#include <rte_mbuf.h>
+#include <rte_mbuf_dyn.h>
 #include <rte_memory.h>
 #include <rte_errno.h>
 
 #include "rte_eventdev_trace_fp.h"
 
-struct rte_mbuf; /* we just use mbuf pointers; no need to include rte_mbuf.h */
 struct rte_event;
 
 /* Event device capability bitmap flags */
@@ -570,9 +572,9 @@ struct rte_event_queue_conf {
 	 */
 	uint32_t nb_atomic_order_sequences;
 	/**< The maximum number of outstanding events waiting to be
-	 * reordered by this queue. In other words, the number of entries in
-	 * this queue’s reorder buffer.When the number of events in the
-	 * reorder buffer reaches to *nb_atomic_order_sequences* then the
+	 * event_tested by this queue. In other words, the number of entries in
+	 * this queue’s event_test buffer.When the number of events in the
+	 * event_test buffer reaches to *nb_atomic_order_sequences* then the
 	 * scheduler cannot schedule the events from this queue and invalid
 	 * event will be returned from dequeue until one or more entries are
 	 * freed up/released.
@@ -935,7 +937,7 @@ rte_event_dev_close(uint8_t dev_id);
  * Event ordering is based on the received event(s), but also other
  * (newly allocated or stored) events are ordered when enqueued within the same
  * ordered context. Events not enqueued (e.g. released or stored) within the
- * context are  considered missing from reordering and are skipped at this time
+ * context are  considered missing from event_testing and are skipped at this time
  * (but can be ordered again within another context).
  *
  * @see rte_event_queue_setup(), rte_event_dequeue_burst(), RTE_EVENT_OP_RELEASE
@@ -1021,7 +1023,7 @@ rte_event_dev_close(uint8_t dev_id);
  * then this function hints the scheduler that the user has done all that need
  * to maintain event order in the current ordered context.
  * The scheduler is allowed to release the ordered context of this port and
- * avoid reordering any following enqueues.
+ * avoid event_testing any following enqueues.
  *
  * Early ordered context release may increase parallelism and thus system
  * performance.
@@ -1111,6 +1113,30 @@ struct rte_event {
 	};
 };
 
+typedef uint32_t rte_event_test_seqn_t;
+extern int rte_event_test_seqn_dynfield_offset;
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * Read test sequence number from mbuf.
+ *
+ * @param mbuf Structure to read from.
+ * @return pointer to test sequence number.
+ */
+__rte_experimental
+static inline rte_event_test_seqn_t *
+rte_event_test_seqn(const struct rte_mbuf *mbuf)
+{
+	return RTE_MBUF_DYNFIELD(mbuf, rte_event_test_seqn_dynfield_offset,
+		rte_event_test_seqn_t *);
+}
+
+__rte_experimental
+int
+rte_event_test_seqn_dynfield_register(void);
+
 /* Ethdev Rx adapter capability bitmap flags */
 #define RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT	0x1
 /**< This flag is sent when the packet transfer mechanism is in HW.
diff --git a/lib/librte_eventdev/version.map b/lib/librte_eventdev/version.map
index 8ae8420f9b..e49382ba99 100644
--- a/lib/librte_eventdev/version.map
+++ b/lib/librte_eventdev/version.map
@@ -138,4 +138,6 @@ EXPERIMENTAL {
 	__rte_eventdev_trace_port_setup;
 	# added in 20.11
 	rte_event_pmd_pci_probe_named;
+	rte_event_test_seqn_dynfield_offset;
+	rte_event_test_seqn_dynfield_register;
 };
-- 
2.23.0


  parent reply	other threads:[~2020-10-27 22:16 UTC|newest]

Thread overview: 29+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-10-27 22:13 [dpdk-dev] [PATCH 0/8] remove mbuf seqn David Marchand
2020-10-27 22:13 ` [dpdk-dev] [PATCH 1/8] event/dpaa2: remove dead code David Marchand
2020-10-27 22:13 ` [dpdk-dev] [PATCH 2/8] crypto/scheduler: remove unused internal seqn David Marchand
2020-10-27 22:13 ` [dpdk-dev] [PATCH 3/8] net/ark: remove use of seqn for debug David Marchand
2020-10-28 12:19   ` Ed Czeck
2020-10-27 22:13 ` [dpdk-dev] [PATCH 4/8] reorder: switch sequence number to dynamic mbuf field David Marchand
2020-10-27 22:13 ` [dpdk-dev] [PATCH 5/8] dpaa: switch sequence number to dynamic field David Marchand
2020-10-27 22:13 ` [dpdk-dev] [PATCH 6/8] fslmc: " David Marchand
2020-10-27 22:13 ` David Marchand [this message]
2020-10-27 22:18   ` [dpdk-dev] [PATCH 7/8] event: " David Marchand
2020-10-28  7:27   ` Jerin Jacob
2020-10-28  8:55     ` David Marchand
2020-10-28  9:09       ` Jerin Jacob
2020-10-27 22:13 ` [dpdk-dev] [PATCH 8/8] mbuf: remove seqn field David Marchand
2020-10-28 10:27   ` Andrew Rybchenko
2020-10-28 12:20 ` [dpdk-dev] [PATCH v2 0/9] remove mbuf seqn David Marchand
2020-10-28 12:20   ` [dpdk-dev] [PATCH v2 1/9] event/dpaa2: remove dead code David Marchand
2020-10-31 18:28     ` Nipun Gupta
2020-10-28 12:20   ` [dpdk-dev] [PATCH v2 2/9] crypto/scheduler: remove unused internal seqn David Marchand
2020-10-28 12:20   ` [dpdk-dev] [PATCH v2 3/9] net/ark: remove use of seqn for debug David Marchand
2020-10-28 12:20   ` [dpdk-dev] [PATCH v2 4/9] reorder: switch sequence number to dynamic mbuf field David Marchand
2020-10-28 12:54     ` Andrew Rybchenko
2020-10-28 12:20   ` [dpdk-dev] [PATCH v2 5/9] dpaa: " David Marchand
2020-10-28 12:20   ` [dpdk-dev] [PATCH v2 6/9] fslmc: " David Marchand
2020-10-28 12:20   ` [dpdk-dev] [PATCH v2 7/9] eventdev: " David Marchand
2020-10-28 12:20   ` [dpdk-dev] [PATCH v2 8/9] app/eventdev: " David Marchand
2020-10-28 12:20   ` [dpdk-dev] [PATCH v2 9/9] mbuf: remove seqn field David Marchand
2020-10-31 21:09     ` Thomas Monjalon
2020-10-31 21:11   ` [dpdk-dev] [PATCH v2 0/9] remove mbuf seqn Thomas Monjalon

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20201027221343.28551-8-david.marchand@redhat.com \
    --to=david.marchand@redhat.com \
    --cc=dev@dpdk.org \
    --cc=harry.van.haaren@intel.com \
    --cc=jerinj@marvell.com \
    --cc=liang.j.ma@intel.com \
    --cc=mdr@ashroe.eu \
    --cc=nhorman@tuxdriver.com \
    --cc=pbhagavatula@marvell.com \
    --cc=peter.mccarthy@intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.