All of lore.kernel.org
 help / color / mirror / Atom feed
From: "Mattias Rönnblom" <mattias.ronnblom@ericsson.com>
To: <jerinj@marvell.com>
Cc: "Jerin Jacob" <jerinjacobk@gmail.com>,
	hofors@lysator.liu.se, dev@dpdk.org, harry.van.haaren@intel.com,
	peter.j.nilsson@ericsson.com,
	"Mattias Rönnblom" <mattias.ronnblom@ericsson.com>
Subject: [RFC v4 1/3] eventdev: introduce event dispatcher
Date: Fri, 9 Jun 2023 09:08:24 +0200	[thread overview]
Message-ID: <20230609070826.149336-2-mattias.ronnblom@ericsson.com> (raw)
In-Reply-To: <20230609070826.149336-1-mattias.ronnblom@ericsson.com>

The purpose of the event dispatcher is to help reduce coupling in an
Eventdev-based DPDK application.

In addition, the event dispatcher also provides a convenient and
flexible way for the application to use service cores for
application-level processing.

Signed-off-by: Mattias Rönnblom <mattias.ronnblom@ericsson.com>
Tested-by: Peter Nilsson <peter.j.nilsson@ericsson.com>

--

RFC v4:
 o Move handlers to per-lcore data structures.
 o Introduce mechanism which rearranges handlers so that often-used
   handlers tend to be tried first.
 o Terminate dispatch loop in case all events are delivered.
 o To avoid the dispatcher's service function hogging the CPU, process
   only one batch per call.
 o Have service function return -EAGAIN if no work is performed.
 o Events delivered in the process function is no longer marked 'const',
   since modifying them may be useful for the application and cause
   no difficulties for the dispatcher.
 o Various minor API documentation improvements.

RFC v3:
 o Add stats_get() function to the version.map file.
---
 lib/eventdev/meson.build            |   2 +
 lib/eventdev/rte_event_dispatcher.c | 770 ++++++++++++++++++++++++++++
 lib/eventdev/rte_event_dispatcher.h | 448 ++++++++++++++++
 lib/eventdev/version.map            |  13 +
 4 files changed, 1233 insertions(+)
 create mode 100644 lib/eventdev/rte_event_dispatcher.c
 create mode 100644 lib/eventdev/rte_event_dispatcher.h

diff --git a/lib/eventdev/meson.build b/lib/eventdev/meson.build
index 6edf98dfa5..c0edc744fe 100644
--- a/lib/eventdev/meson.build
+++ b/lib/eventdev/meson.build
@@ -19,6 +19,7 @@ sources = files(
         'rte_event_crypto_adapter.c',
         'rte_event_eth_rx_adapter.c',
         'rte_event_eth_tx_adapter.c',
+        'rte_event_dispatcher.c',
         'rte_event_ring.c',
         'rte_event_timer_adapter.c',
         'rte_eventdev.c',
@@ -27,6 +28,7 @@ headers = files(
         'rte_event_crypto_adapter.h',
         'rte_event_eth_rx_adapter.h',
         'rte_event_eth_tx_adapter.h',
+        'rte_event_dispatcher.h',
         'rte_event_ring.h',
         'rte_event_timer_adapter.h',
         'rte_eventdev.h',
diff --git a/lib/eventdev/rte_event_dispatcher.c b/lib/eventdev/rte_event_dispatcher.c
new file mode 100644
index 0000000000..5e5096e2cc
--- /dev/null
+++ b/lib/eventdev/rte_event_dispatcher.c
@@ -0,0 +1,770 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Ericsson AB
+ */
+
+#include <stdbool.h>
+#include <stdint.h>
+
+#include <rte_branch_prediction.h>
+#include <rte_common.h>
+#include <rte_lcore.h>
+#include <rte_random.h>
+#include <rte_service_component.h>
+
+#include "eventdev_pmd.h"
+
+#include <rte_event_dispatcher.h>
+
+#define RED_MAX_PORTS_PER_LCORE 4
+#define RED_MAX_HANDLERS 32
+#define RED_MAX_FINALIZERS 16
+#define RED_AVG_PRIO_INTERVAL 2000
+
+struct rte_event_dispatcher_lcore_port {
+	uint8_t port_id;
+	uint16_t batch_size;
+	uint64_t timeout;
+};
+
+struct rte_event_dispatcher_handler {
+	int id;
+	rte_event_dispatcher_match_t match_fun;
+	void *match_data;
+	rte_event_dispatcher_process_t process_fun;
+	void *process_data;
+};
+
+struct rte_event_dispatcher_finalizer {
+	int id;
+	rte_event_dispatcher_finalize_t finalize_fun;
+	void *finalize_data;
+};
+
+struct rte_event_dispatcher_lcore {
+	uint8_t num_ports;
+	uint16_t num_handlers;
+	int32_t prio_count;
+	struct rte_event_dispatcher_lcore_port ports[RED_MAX_PORTS_PER_LCORE];
+	struct rte_event_dispatcher_handler handlers[RED_MAX_HANDLERS];
+	struct rte_event_dispatcher_stats stats;
+} __rte_cache_aligned;
+
+struct rte_event_dispatcher {
+	uint8_t id;
+	uint8_t event_dev_id;
+	int socket_id;
+	uint32_t service_id;
+	struct rte_event_dispatcher_lcore lcores[RTE_MAX_LCORE];
+	uint16_t num_finalizers;
+	struct rte_event_dispatcher_finalizer finalizers[RED_MAX_FINALIZERS];
+};
+
+static struct rte_event_dispatcher *dispatchers[UINT8_MAX];
+
+static bool
+red_has_dispatcher(uint8_t id)
+{
+	return dispatchers[id] != NULL;
+}
+
+static struct rte_event_dispatcher *
+red_get_dispatcher(uint8_t id)
+{
+	return dispatchers[id];
+}
+
+static void
+red_set_dispatcher(uint8_t id, struct rte_event_dispatcher *dispatcher)
+{
+	dispatchers[id] = dispatcher;
+}
+
+#define RED_VALID_ID_OR_RET_EINVAL(id)					\
+	do {								\
+		if (unlikely(!red_has_dispatcher(id))) {		\
+			RTE_EDEV_LOG_ERR("Invalid dispatcher id %d\n", id); \
+			return -EINVAL;					\
+		}							\
+	} while (0)
+
+static int
+red_lookup_handler_idx(struct rte_event_dispatcher_lcore *lcore,
+		       const struct rte_event *event)
+{
+	uint16_t i;
+
+	for (i = 0; i < lcore->num_handlers; i++) {
+		struct rte_event_dispatcher_handler *handler =
+			&lcore->handlers[i];
+
+		if (handler->match_fun(event, handler->match_data))
+			return i;
+	}
+
+	return -1;
+}
+
+static void
+red_prioritize_handler(struct rte_event_dispatcher_lcore *lcore,
+		       int handler_idx)
+{
+	struct rte_event_dispatcher_handler tmp;
+
+	if (handler_idx == 0)
+		return;
+
+	/* Let the lucky handler "bubble" up the list */
+
+	tmp = lcore->handlers[handler_idx - 1];
+
+	lcore->handlers[handler_idx - 1] = lcore->handlers[handler_idx];
+
+	lcore->handlers[handler_idx] = tmp;
+}
+
+static inline void
+red_consider_prioritize_handler(struct rte_event_dispatcher_lcore *lcore,
+				int handler_idx, uint16_t handler_events)
+{
+	lcore->prio_count -= handler_events;
+
+	if (unlikely(lcore->prio_count <= 0)) {
+		red_prioritize_handler(lcore, handler_idx);
+
+		/*
+		 * Randomize the interval in the unlikely case
+		 * the traffic follow some very strict pattern.
+		 */
+		lcore->prio_count =
+			rte_rand_max(RED_AVG_PRIO_INTERVAL) +
+			RED_AVG_PRIO_INTERVAL / 2;
+	}
+}
+
+static inline void
+red_dispatch_events(struct rte_event_dispatcher *dispatcher,
+		    struct rte_event_dispatcher_lcore *lcore,
+		    struct rte_event_dispatcher_lcore_port *port,
+		    struct rte_event *events, uint16_t num_events)
+{
+	int i;
+	struct rte_event bursts[RED_MAX_HANDLERS][num_events];
+	uint16_t burst_lens[RED_MAX_HANDLERS] = { 0 };
+	uint16_t drop_count = 0;
+	uint16_t dispatch_count;
+	uint16_t dispatched = 0;
+
+	for (i = 0; i < num_events; i++) {
+		struct rte_event *event = &events[i];
+		int handler_idx;
+
+		handler_idx = red_lookup_handler_idx(lcore, event);
+
+		if (unlikely(handler_idx < 0)) {
+			drop_count++;
+			continue;
+		}
+
+		bursts[handler_idx][burst_lens[handler_idx]] = *event;
+		burst_lens[handler_idx]++;
+	}
+
+	dispatch_count = num_events - drop_count;
+
+	for (i = 0; i < lcore->num_handlers &&
+		 dispatched < dispatch_count; i++) {
+		struct rte_event_dispatcher_handler *handler =
+			&lcore->handlers[i];
+		uint16_t len = burst_lens[i];
+
+		if (len == 0)
+			continue;
+
+		handler->process_fun(dispatcher->event_dev_id, port->port_id,
+				     bursts[i], len, handler->process_data);
+
+		dispatched += len;
+
+		/*
+		 * Safe, since any reshuffling will only involve
+		 * already-processed handlers.
+		 */
+		red_consider_prioritize_handler(lcore, i, len);
+	}
+
+	lcore->stats.ev_dispatch_count += dispatch_count;
+	lcore->stats.ev_drop_count += drop_count;
+
+	for (i = 0; i < dispatcher->num_finalizers; i++) {
+		struct rte_event_dispatcher_finalizer *finalizer =
+			&dispatcher->finalizers[i];
+
+		finalizer->finalize_fun(dispatcher->event_dev_id,
+					port->port_id,
+					finalizer->finalize_data);
+	}
+}
+
+static __rte_always_inline uint16_t
+red_port_dequeue(struct rte_event_dispatcher *dispatcher,
+		 struct rte_event_dispatcher_lcore *lcore,
+		 struct rte_event_dispatcher_lcore_port *port)
+{
+	uint16_t batch_size = port->batch_size;
+	struct rte_event events[batch_size];
+	uint16_t n;
+
+	n = rte_event_dequeue_burst(dispatcher->event_dev_id, port->port_id,
+				    events, batch_size, port->timeout);
+
+	if (likely(n > 0))
+		red_dispatch_events(dispatcher, lcore, port, events, n);
+
+	lcore->stats.poll_count++;
+
+	return n;
+}
+
+static __rte_always_inline uint16_t
+red_lcore_process(struct rte_event_dispatcher *dispatcher,
+		  struct rte_event_dispatcher_lcore *lcore)
+{
+	uint16_t i;
+	uint16_t event_count = 0;
+
+	for (i = 0; i < lcore->num_ports; i++) {
+		struct rte_event_dispatcher_lcore_port *port =
+			&lcore->ports[i];
+
+		event_count += red_port_dequeue(dispatcher, lcore, port);
+	}
+
+	return event_count;
+}
+
+static int32_t
+red_process(void *userdata)
+{
+	struct rte_event_dispatcher *dispatcher = userdata;
+	unsigned int lcore_id = rte_lcore_id();
+	struct rte_event_dispatcher_lcore *lcore =
+		&dispatcher->lcores[lcore_id];
+	uint64_t event_count;
+
+	event_count = red_lcore_process(dispatcher, lcore);
+
+	if (unlikely(event_count == 0))
+		return -EAGAIN;
+
+	return 0;
+}
+
+static int
+red_service_register(struct rte_event_dispatcher *dispatcher)
+{
+	struct rte_service_spec service = {
+		.callback = red_process,
+		.callback_userdata = dispatcher,
+		.capabilities = RTE_SERVICE_CAP_MT_SAFE,
+		.socket_id = dispatcher->socket_id
+	};
+	int rc;
+
+	snprintf(service.name, RTE_SERVICE_NAME_MAX - 1, "red_%d",
+		 dispatcher->id);
+
+	rc = rte_service_component_register(&service, &dispatcher->service_id);
+
+	if (rc)
+		RTE_EDEV_LOG_ERR("Registration of event dispatcher service "
+				 "%s failed with error code %d\n",
+				 service.name, rc);
+
+	return rc;
+}
+
+static int
+red_service_unregister(struct rte_event_dispatcher *dispatcher)
+{
+	int rc;
+
+	rc = rte_service_component_unregister(dispatcher->service_id);
+
+	if (rc)
+		RTE_EDEV_LOG_ERR("Unregistration of event dispatcher service "
+				 "failed with error code %d\n", rc);
+
+	return rc;
+}
+
+int
+rte_event_dispatcher_create(uint8_t id, uint8_t event_dev_id)
+{
+	int socket_id;
+	struct rte_event_dispatcher *dispatcher;
+	int rc;
+
+	if (red_has_dispatcher(id)) {
+		RTE_EDEV_LOG_ERR("Dispatcher with id %d already exists\n",
+				 id);
+		return -EEXIST;
+	}
+
+	socket_id = rte_event_dev_socket_id(event_dev_id);
+
+	dispatcher =
+		rte_malloc_socket("event dispatcher",
+				  sizeof(struct rte_event_dispatcher),
+				  RTE_CACHE_LINE_SIZE, socket_id);
+
+	if (dispatcher == NULL) {
+		RTE_EDEV_LOG_ERR("Unable to allocate memory for event "
+				 "dispatcher\n");
+		return -ENOMEM;
+	}
+
+	*dispatcher = (struct rte_event_dispatcher) {
+		.id = id,
+		.event_dev_id = event_dev_id,
+		.socket_id = socket_id
+	};
+
+	rc = red_service_register(dispatcher);
+
+	if (rc < 0) {
+		rte_free(dispatcher);
+		return rc;
+	}
+
+	red_set_dispatcher(id, dispatcher);
+
+	return 0;
+}
+
+int
+rte_event_dispatcher_free(uint8_t id)
+{
+	struct rte_event_dispatcher *dispatcher;
+	int rc;
+
+	RED_VALID_ID_OR_RET_EINVAL(id);
+	dispatcher = red_get_dispatcher(id);
+
+	rc = red_service_unregister(dispatcher);
+
+	if (rc)
+		return rc;
+
+	red_set_dispatcher(id, NULL);
+
+	rte_free(dispatcher);
+
+	return 0;
+}
+
+int
+rte_event_dispatcher_service_id_get(uint8_t id, uint32_t *service_id)
+{
+	struct rte_event_dispatcher *dispatcher;
+
+	RED_VALID_ID_OR_RET_EINVAL(id);
+	dispatcher = red_get_dispatcher(id);
+
+	*service_id = dispatcher->service_id;
+
+	return 0;
+}
+
+static int
+lcore_port_index(struct rte_event_dispatcher_lcore *lcore,
+		 uint8_t event_port_id)
+{
+	uint16_t i;
+
+	for (i = 0; i < lcore->num_ports; i++) {
+		struct rte_event_dispatcher_lcore_port *port =
+			&lcore->ports[i];
+
+		if (port->port_id == event_port_id)
+			return i;
+	}
+
+	return -1;
+}
+
+int
+rte_event_dispatcher_bind_port_to_lcore(uint8_t id, uint8_t event_port_id,
+					uint16_t batch_size, uint64_t timeout,
+					unsigned int lcore_id)
+{
+	struct rte_event_dispatcher *dispatcher;
+	struct rte_event_dispatcher_lcore *lcore;
+	struct rte_event_dispatcher_lcore_port *port;
+
+	RED_VALID_ID_OR_RET_EINVAL(id);
+	dispatcher = red_get_dispatcher(id);
+
+	lcore =	&dispatcher->lcores[lcore_id];
+
+	if (lcore->num_ports == RED_MAX_PORTS_PER_LCORE)
+		return -ENOMEM;
+
+	if (lcore_port_index(lcore, event_port_id) >= 0)
+		return -EEXIST;
+
+	port = &lcore->ports[lcore->num_ports];
+
+	*port = (struct rte_event_dispatcher_lcore_port) {
+		.port_id = event_port_id,
+		.batch_size = batch_size,
+		.timeout = timeout
+	};
+
+	lcore->num_ports++;
+
+	return 0;
+}
+
+int
+rte_event_dispatcher_unbind_port_from_lcore(uint8_t id, uint8_t event_port_id,
+					    unsigned int lcore_id)
+{
+	struct rte_event_dispatcher *dispatcher;
+	struct rte_event_dispatcher_lcore *lcore;
+	int port_idx;
+	struct rte_event_dispatcher_lcore_port *port;
+	struct rte_event_dispatcher_lcore_port *last;
+
+	RED_VALID_ID_OR_RET_EINVAL(id);
+	dispatcher = red_get_dispatcher(id);
+
+	lcore =	&dispatcher->lcores[lcore_id];
+
+	port_idx = lcore_port_index(lcore, event_port_id);
+
+	if (port_idx < 0)
+		return -ENOENT;
+
+	port = &lcore->ports[port_idx];
+	last = &lcore->ports[lcore->num_ports - 1];
+
+	if (port != last)
+		*port = *last;
+
+	lcore->num_ports--;
+
+	return 0;
+}
+
+static struct rte_event_dispatcher_handler*
+red_lcore_get_handler_by_id(struct rte_event_dispatcher_lcore *lcore,
+			    int handler_id)
+{
+	int i;
+
+	for (i = 0; i < lcore->num_handlers; i++) {
+		struct rte_event_dispatcher_handler *handler =
+			&lcore->handlers[i];
+
+		if (handler->id == handler_id)
+			return handler;
+	}
+
+	return NULL;
+}
+
+static int
+red_alloc_handler_id(struct rte_event_dispatcher *dispatcher)
+{
+	int handler_id = 0;
+	struct rte_event_dispatcher_lcore *reference_lcore =
+		&dispatcher->lcores[0];
+
+	if (reference_lcore->num_handlers == RED_MAX_HANDLERS)
+		return -1;
+
+	while (red_lcore_get_handler_by_id(reference_lcore, handler_id) != NULL)
+		handler_id++;
+
+	return handler_id;
+}
+
+static void
+red_lcore_install_handler(struct rte_event_dispatcher_lcore *lcore,
+		    const struct rte_event_dispatcher_handler *handler)
+{
+	int handler_idx = lcore->num_handlers;
+
+	lcore->handlers[handler_idx] = *handler;
+	lcore->num_handlers++;
+}
+
+static void
+red_install_handler(struct rte_event_dispatcher *dispatcher,
+		    const struct rte_event_dispatcher_handler *handler)
+{
+	int i;
+
+	for (i = 0; i < RTE_MAX_LCORE; i++) {
+		struct rte_event_dispatcher_lcore *lcore =
+			&dispatcher->lcores[i];
+		red_lcore_install_handler(lcore, handler);
+	}
+}
+
+int
+rte_event_dispatcher_register(uint8_t id,
+			      rte_event_dispatcher_match_t match_fun,
+			      void *match_data,
+			      rte_event_dispatcher_process_t process_fun,
+			      void *process_data)
+{
+	struct rte_event_dispatcher *dispatcher;
+	struct rte_event_dispatcher_handler handler = {
+		.match_fun = match_fun,
+		.match_data = match_data,
+		.process_fun = process_fun,
+		.process_data = process_data
+	};
+
+	RED_VALID_ID_OR_RET_EINVAL(id);
+	dispatcher = red_get_dispatcher(id);
+
+	handler.id = red_alloc_handler_id(dispatcher);
+
+	if (handler.id < 0)
+		return -ENOMEM;
+
+	red_install_handler(dispatcher, &handler);
+
+	return handler.id;
+}
+
+static int
+red_lcore_uninstall_handler(struct rte_event_dispatcher_lcore *lcore,
+			    int handler_id)
+{
+	struct rte_event_dispatcher_handler *unreg_handler;
+	int handler_idx;
+	uint16_t last_idx;
+
+	unreg_handler = red_lcore_get_handler_by_id(lcore, handler_id);
+
+	if (unreg_handler == NULL)
+		return -EINVAL;
+
+	handler_idx = &lcore->handlers[0] - unreg_handler;
+
+	last_idx = lcore->num_handlers - 1;
+
+	if (handler_idx != last_idx) {
+		/* move all handlers to maintain handler order */
+		int n = last_idx - handler_idx;
+		memmove(unreg_handler, unreg_handler + 1,
+			sizeof(struct rte_event_dispatcher_handler) * n);
+	}
+
+	lcore->num_handlers--;
+
+	return 0;
+}
+
+static int
+red_uninstall_handler(struct rte_event_dispatcher *dispatcher,
+		      int handler_id)
+{
+	unsigned int lcore_id;
+
+	for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
+		struct rte_event_dispatcher_lcore *lcore =
+			&dispatcher->lcores[lcore_id];
+		int rc;
+
+		rc = red_lcore_uninstall_handler(lcore, handler_id);
+
+		if (rc < 0)
+			return rc;
+	}
+
+	return 0;
+}
+
+int
+rte_event_dispatcher_unregister(uint8_t id, int handler_id)
+{
+	struct rte_event_dispatcher *dispatcher;
+	int rc;
+
+	RED_VALID_ID_OR_RET_EINVAL(id);
+	dispatcher = red_get_dispatcher(id);
+
+	rc = red_uninstall_handler(dispatcher, handler_id);
+
+	return rc;
+}
+
+static struct rte_event_dispatcher_finalizer*
+red_get_finalizer_by_id(struct rte_event_dispatcher *dispatcher,
+		       int handler_id)
+{
+	int i;
+
+	for (i = 0; i < dispatcher->num_finalizers; i++) {
+		struct rte_event_dispatcher_finalizer *finalizer =
+			&dispatcher->finalizers[i];
+
+		if (finalizer->id == handler_id)
+			return finalizer;
+	}
+
+	return NULL;
+}
+
+static int
+red_alloc_finalizer_id(struct rte_event_dispatcher *dispatcher)
+{
+	int finalizer_id = 0;
+
+	while (red_get_finalizer_by_id(dispatcher, finalizer_id) != NULL)
+		finalizer_id++;
+
+	return finalizer_id;
+}
+
+static struct rte_event_dispatcher_finalizer *
+red_alloc_finalizer(struct rte_event_dispatcher *dispatcher)
+{
+	int finalizer_idx;
+	struct rte_event_dispatcher_finalizer *finalizer;
+
+	if (dispatcher->num_finalizers == RED_MAX_FINALIZERS)
+		return NULL;
+
+	finalizer_idx = dispatcher->num_finalizers;
+	finalizer = &dispatcher->finalizers[finalizer_idx];
+
+	finalizer->id = red_alloc_finalizer_id(dispatcher);
+
+	dispatcher->num_finalizers++;
+
+	return finalizer;
+}
+
+int
+rte_event_dispatcher_finalize_register(uint8_t id,
+			      rte_event_dispatcher_finalize_t finalize_fun,
+			      void *finalize_data)
+{
+	struct rte_event_dispatcher *dispatcher;
+	struct rte_event_dispatcher_finalizer *finalizer;
+
+	RED_VALID_ID_OR_RET_EINVAL(id);
+	dispatcher = red_get_dispatcher(id);
+
+	finalizer = red_alloc_finalizer(dispatcher);
+
+	if (finalizer == NULL)
+		return -ENOMEM;
+
+	finalizer->finalize_fun = finalize_fun;
+	finalizer->finalize_data = finalize_data;
+
+	return finalizer->id;
+}
+
+int
+rte_event_dispatcher_finalize_unregister(uint8_t id, int handler_id)
+{
+	struct rte_event_dispatcher *dispatcher;
+	struct rte_event_dispatcher_finalizer *unreg_finalizer;
+	int handler_idx;
+	uint16_t last_idx;
+
+	RED_VALID_ID_OR_RET_EINVAL(id);
+	dispatcher = red_get_dispatcher(id);
+
+	unreg_finalizer = red_get_finalizer_by_id(dispatcher, handler_id);
+
+	if (unreg_finalizer == NULL)
+		return -EINVAL;
+
+	handler_idx = &dispatcher->finalizers[0] - unreg_finalizer;
+
+	last_idx = dispatcher->num_finalizers - 1;
+
+	if (handler_idx != last_idx) {
+		/* move all finalizers to maintain finalizer order */
+		int n = last_idx - handler_idx;
+		memmove(unreg_finalizer, unreg_finalizer + 1,
+			sizeof(struct rte_event_dispatcher_finalizer) * n);
+	}
+
+	dispatcher->num_finalizers--;
+
+	return 0;
+}
+
+static void
+red_aggregate_stats(struct rte_event_dispatcher_stats *result,
+		    const struct rte_event_dispatcher_stats *part)
+{
+	result->poll_count += part->poll_count;
+	result->ev_dispatch_count += part->ev_dispatch_count;
+	result->ev_drop_count += part->ev_drop_count;
+}
+
+int
+rte_event_dispatcher_stats_get(uint8_t id,
+			       struct rte_event_dispatcher_stats *stats)
+{
+	struct rte_event_dispatcher *dispatcher;
+	unsigned int lcore_id;
+
+	RED_VALID_ID_OR_RET_EINVAL(id);
+	dispatcher = red_get_dispatcher(id);
+
+	*stats = (struct rte_event_dispatcher_stats) {};
+
+	for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
+		struct rte_event_dispatcher_lcore *lcore =
+			&dispatcher->lcores[lcore_id];
+
+		red_aggregate_stats(stats, &lcore->stats);
+	}
+
+	return 0;
+}
+
+static int
+red_set_service_runstate(uint8_t id, int state)
+{
+	struct rte_event_dispatcher *dispatcher;
+	int rc;
+
+	RED_VALID_ID_OR_RET_EINVAL(id);
+	dispatcher = red_get_dispatcher(id);
+
+	rc = rte_service_component_runstate_set(dispatcher->service_id,
+						state);
+
+	if (rc != 0) {
+		RTE_EDEV_LOG_ERR("Unexpected error %d occurred while setting "
+				 "service component run state to %d\n", rc,
+				 state);
+		RTE_ASSERT(0);
+	}
+
+	return 0;
+}
+
+int
+rte_event_dispatcher_start(uint8_t id)
+{
+	return red_set_service_runstate(id, 1);
+}
+
+int
+rte_event_dispatcher_stop(uint8_t id)
+{
+	return red_set_service_runstate(id, 0);
+}
diff --git a/lib/eventdev/rte_event_dispatcher.h b/lib/eventdev/rte_event_dispatcher.h
new file mode 100644
index 0000000000..927e7e0b3c
--- /dev/null
+++ b/lib/eventdev/rte_event_dispatcher.h
@@ -0,0 +1,448 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Ericsson AB
+ */
+
+#ifndef __RTE_EVENT_DISPATCHER_H__
+#define __RTE_EVENT_DISPATCHER_H__
+
+/**
+ * @file
+ *
+ * RTE Event Dispatcher
+ *
+ * The purpose of the event dispatcher is to help decouple different parts
+ * of an application (e.g., modules), sharing the same underlying
+ * event device.
+ */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <rte_eventdev.h>
+
+/**
+ * Function prototype for match callbacks.
+ *
+ * Match callbacks are used by an application to decide how the
+ * event dispatcher distributes events to different parts of the
+ * application.
+ *
+ * The application is not expected to process the event at the point
+ * of the match call. Such matters should be deferred to the process
+ * callback invocation.
+ *
+ * The match callback may be used as an opportunity to prefetch data.
+ *
+ * @param event
+ *  Pointer to event
+ *
+ * @param cb_data
+ *  The pointer supplied by the application in
+ *  rte_event_dispatcher_register().
+ *
+ * @return
+ *   Returns true in case this events should be delivered (via
+ *   the process callback), and false otherwise.
+ */
+typedef bool
+(*rte_event_dispatcher_match_t)(const struct rte_event *event, void *cb_data);
+
+/**
+ * Function prototype for process callbacks.
+ *
+ * The process callbacks are used by the event dispatcher to deliver
+ * events for processing.
+ *
+ * @param event_dev_id
+ *  The originating event device id.
+ *
+ * @param event_port_id
+ *  The originating event port.
+ *
+ * @param events
+ *  Pointer to an array of events.
+ *
+ * @param num
+ *  The number of events in the @p events array.
+ *
+ * @param cb_data
+ *  The pointer supplied by the application in
+ *  rte_event_dispatcher_register().
+ */
+
+typedef void
+(*rte_event_dispatcher_process_t)(uint8_t event_dev_id, uint8_t event_port_id,
+				  struct rte_event *events, uint16_t num,
+				  void *cb_data);
+
+/**
+ * Function prototype for finalize callbacks.
+ *
+ * The finalize callbacks are used by the event dispatcher to notify
+ * the application it has delivered all events from a particular batch
+ * dequeued from the event device.
+ *
+ * @param event_dev_id
+ *  The originating event device id.
+ *
+ * @param event_port_id
+ *  The originating event port.
+ *
+ * @param cb_data
+ *  The pointer supplied by the application in
+ *  rte_event_dispatcher_finalize_register().
+ */
+
+typedef void
+(*rte_event_dispatcher_finalize_t)(uint8_t event_dev_id, uint8_t event_port_id,
+				   void *cb_data);
+
+/**
+ * Event dispatcher statistics
+ */
+struct rte_event_dispatcher_stats {
+	uint64_t poll_count;
+	/**< Number of event dequeue calls made toward the event device. */
+	uint64_t ev_dispatch_count;
+	/**< Number of events dispatched to a handler.*/
+	uint64_t ev_drop_count;
+	/**< Number of events dropped because no handler was found. */
+};
+
+/**
+ * Create an event dispatcher with the specified id.
+ *
+ * @param id
+ *  An application-specified, unique (across all event dispatcher
+ *  instances) identifier.
+ *
+ * @param event_dev_id
+ *  The identifier of the event device from which this event dispatcher
+ *  will dequeue events.
+ *
+ * @return
+ *   - 0: Success
+ *   - <0: Error code on failure
+ */
+__rte_experimental
+int
+rte_event_dispatcher_create(uint8_t id, uint8_t event_dev_id);
+
+/**
+ * Free an event dispatcher.
+ *
+ * @param id
+ *  The event dispatcher identifier.
+ *
+ * @return
+ *  - 0: Success
+ *  - <0: Error code on failure
+ */
+__rte_experimental
+int
+rte_event_dispatcher_free(uint8_t id);
+
+/**
+ * Retrieve the service identifier of an event dispatcher.
+ *
+ * @param id
+ *  The event dispatcher identifier.
+ *
+ * @param [out] service_id
+ *  A pointer to a caller-supplied buffer where the event dispatcher's
+ *  service id will be stored.
+ *
+ * @return
+ *  - 0: Success
+ *  - <0: Error code on failure.
+ */
+__rte_experimental
+int
+rte_event_dispatcher_service_id_get(uint8_t id, uint32_t *service_id);
+
+/**
+ * Binds an event device port to a specific lcore on the specified
+ * event dispatcher.
+ *
+ * This function configures the event port id to be used by the event
+ * dispatcher service, if run on the specified lcore.
+ *
+ * Multiple event device ports may be bound to the same lcore. A
+ * particular port must not be bound to more than one lcore.
+ *
+ * If the event dispatcher service is mapped (with
+ * rte_service_map_lcore_set()) to a lcore for which no ports are
+ * bound, the service function will be a no-operation.
+ *
+ * This function is not MT safe.
+ *
+ * @param id
+ *  The event dispatcher identifier.
+ *
+ * @param event_port_id
+ *  The event device port identifier.
+ *
+ * @param batch_size
+ *  The batch size to use in rte_event_dequeue_burst(), for the
+ *  configured event device port and lcore.
+ *
+ * @param timeout
+ *  The timeout parameter to use in rte_event_dequeue_burst(), for the
+ *  configured event device port and lcore.
+ *
+ * @param lcore_id
+ *  The lcore by which this event port will be used.
+ *
+ * @return
+ *  - 0: Success
+ *  - -ENOMEM: Unable to allocate sufficient resources.
+ *  - -EEXISTS: Event port is already configured.
+ *  - -EINVAL: Invalid arguments.
+ */
+__rte_experimental
+int
+rte_event_dispatcher_bind_port_to_lcore(uint8_t id, uint8_t event_port_id,
+					uint16_t batch_size, uint64_t timeout,
+					unsigned int lcore_id);
+
+/**
+ * Unbind an event device port from a specific lcore.
+ *
+ * This function is not MT safe.
+ *
+ * @param id
+ *  The event dispatcher identifier.
+ *
+ * @param event_port_id
+ *  The event device port identifier.
+ *
+ * @param lcore_id
+ *  The lcore which was using this event port.
+ *
+ * @return
+ *  - 0: Success
+ *  - -EINVAL: Invalid @c id.
+ *  - -ENOENT: Event port id not bound to this @c lcore_id.
+ */
+__rte_experimental
+int
+rte_event_dispatcher_unbind_port_from_lcore(uint8_t id, uint8_t event_port_id,
+					    unsigned int lcore_id);
+
+/**
+ * Register an event handler.
+ *
+ * The match callback function is used to select if a particular event
+ * should be delivered, using the corresponding process callback
+ * function.
+ *
+ * The reason for having two distinct steps is to allow the dispatcher
+ * to deliver all events as a batch. This in turn will cause
+ * processing of a particular kind of events to happen in a
+ * back-to-back manner, improving cache locality.
+ *
+ * The list of handler callback functions is shared among all lcores,
+ * but will only be executed on lcores which has an eventdev port
+ * bound to them, and which are running the event dispatcher service.
+ *
+ * An event is delivered to at most one handler. Events where no
+ * handler is found are dropped.
+ *
+ * The application must not depend on the order of which the match
+ * functions are invoked.
+ *
+ * Ordering of events is not guaranteed to be maintained between
+ * different deliver callbacks. For example, suppose there are two
+ * callbacks registered, matching different subsets of events arriving
+ * on an atomic queue. A batch of events [ev0, ev1, ev2] are dequeued
+ * on a particular port, all pertaining to the same flow. The match
+ * callback for registration A returns true for ev0 and ev2, and the
+ * matching function for registration B for ev1. In that scenario, the
+ * event dispatcher may choose to deliver first [ev0, ev2] using A's
+ * deliver function, and then [ev1] to B - or vice versa.
+ *
+ * rte_event_dispatcher_register() is not MT safe.
+ *
+ * @param id
+ *  The event dispatcher identifier.
+ *
+ * @param match_fun
+ *  The match callback function.
+ *
+ * @param match_cb_data
+ *  A pointer to some application-specific opaque data (or NULL),
+ *  which is supplied back to the application when match_fun is
+ *  called.
+ *
+ * @param process_fun
+ *  The process callback function.
+ *
+ * @param process_cb_data
+ *  A pointer to some application-specific opaque data (or NULL),
+ *  which is supplied back to the application when process_fun is
+ *  called.
+ *
+ * @return
+ *  - >= 0: The identifier for this registration.
+ *  - -ENOMEM: Unable to allocate sufficient resources.
+ */
+__rte_experimental
+int
+rte_event_dispatcher_register(uint8_t id,
+			      rte_event_dispatcher_match_t match_fun,
+			      void *match_cb_data,
+			      rte_event_dispatcher_process_t process_fun,
+			      void *process_cb_data);
+
+/**
+ * Unregister an event handler.
+ *
+ * This function is not MT safe.
+ *
+ * @param id
+ *  The event dispatcher identifier.
+ *
+ * @param handler_id
+ *  The handler registration id returned by the original
+ *  rte_event_dispatcher_register() call.
+ *
+ * @return
+ *  - 0: Success
+ *  - -EINVAL: The @c id and/or the @c handler_id parameter was invalid.
+ */
+__rte_experimental
+int
+rte_event_dispatcher_unregister(uint8_t id, int handler_id);
+
+/**
+ * Register a finalize callback function.
+ *
+ * An application may optionally install one or more finalize
+ * callbacks.
+ *
+ * All finalize callbacks are invoked by the event dispatcher when a
+ * complete batch of events (retrieve using rte_event_dequeue_burst())
+ * have been delivered to the application (or have been dropped).
+ *
+ * The finalize callback is not tied to any particular handler.
+ *
+ * The finalize callback provides an opportunity for the application
+ * to do per-batch processing. One case where this may be useful is if
+ * an event output buffer is used, and is shared among several
+ * handlers. In such a case, proper output buffer flushing may be
+ * assured using a finalize callback.
+ *
+ * rte_event_dispatcher_finalize_register() is not MT safe.
+ *
+ * @param id
+ *  The event dispatcher identifier.
+ *
+ * @param finalize_fun
+ *  The function called after completing the processing of a
+ *  dequeue batch.
+ *
+ * @param finalize_data
+ *  A pointer to some application-specific opaque data (or NULL),
+ *  which is supplied back to the application when @c finalize_fun is
+ *  called.
+ *
+ * @return
+ *  - >= 0: The identifier for this registration.
+ *  - -ENOMEM: Unable to allocate sufficient resources.
+ */
+__rte_experimental
+int
+rte_event_dispatcher_finalize_register(uint8_t id,
+			    rte_event_dispatcher_finalize_t finalize_fun,
+			    void *finalize_data);
+
+/**
+ * Unregister a finalize callback.
+ *
+ * This function is not MT safe.
+ *
+ * @param id
+ *  The event dispatcher identifier.
+ *
+ * @param reg_id
+ *  The finalize registration id returned by the original
+ *  rte_event_dispatcher_finalize_register() call.
+ *
+ * @return
+ *  - 0: Success
+ *  - -EINVAL: The @c id and/or the @c reg_id parameter was invalid.
+ */
+__rte_experimental
+int
+rte_event_dispatcher_finalize_unregister(uint8_t id, int reg_id);
+
+/**
+ * Start an event dispatcher instance.
+ *
+ * Enables the event dispatcher service.
+ *
+ * The underlying event device must have been started prior to calling
+ * rte_event_dispatcher_start().
+ *
+ * For the event dispatcher to actually perform work (i.e., dispatch
+ * events), its service must have been mapped to one or more service
+ * lcores, and its service run state set to '1'. An event dispatcher's
+ * service is retrieved using rte_event_dispatcher_service_id_get().
+ *
+ * Each service lcore to which the event dispatcher is mapped should
+ * have at least one event port configured. Such configuration is
+ * performed by calling rte_event_dispatcher_bind_port_to_lcore(),
+ * prior to starting the event dispatcher.
+ *
+ * @param id
+ *  The event dispatcher identifier.
+ *
+ * @return
+ *  - 0: Success
+ *  - -EINVAL: Invalid @c id.
+ */
+__rte_experimental
+int
+rte_event_dispatcher_start(uint8_t id);
+
+/**
+ * Stop a running event dispatcher instance.
+ *
+ * Disables the event dispatcher service.
+ *
+ * @param id
+ *  The event dispatcher identifier.
+ *
+ * @return
+ *  - 0: Success
+ *  - -EINVAL: Invalid @c id.
+ */
+__rte_experimental
+int
+rte_event_dispatcher_stop(uint8_t id);
+
+/**
+ * Retrieve statistics for an event dispatcher instance.
+ *
+ * This function is MT safe and may be called from any thread
+ * (including unregistered non-EAL threads).
+ *
+ * @param id
+ *  The event dispatcher identifier.
+ * @param[out] stats
+ *   A pointer to a structure to fill with statistics.
+ * @return
+ *  - 0: Success
+ *  - -EINVAL: The @c id parameter was invalid.
+ */
+__rte_experimental
+int
+rte_event_dispatcher_stats_get(uint8_t id,
+			       struct rte_event_dispatcher_stats *stats);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __RTE_EVENT_DISPATCHER__ */
diff --git a/lib/eventdev/version.map b/lib/eventdev/version.map
index 89068a5713..d3aa878686 100644
--- a/lib/eventdev/version.map
+++ b/lib/eventdev/version.map
@@ -131,6 +131,19 @@ EXPERIMENTAL {
 	rte_event_eth_tx_adapter_runtime_params_init;
 	rte_event_eth_tx_adapter_runtime_params_set;
 	rte_event_timer_remaining_ticks_get;
+
+	rte_event_dispatcher_create;
+	rte_event_dispatcher_free;
+	rte_event_dispatcher_service_id_get;
+	rte_event_dispatcher_bind_port_to_lcore;
+	rte_event_dispatcher_unbind_port_from_lcore;
+	rte_event_dispatcher_register;
+	rte_event_dispatcher_unregister;
+	rte_event_dispatcher_finalize_register;
+	rte_event_dispatcher_finalize_unregister;
+	rte_event_dispatcher_start;
+	rte_event_dispatcher_stop;
+	rte_event_dispatcher_stats_get;
 };
 
 INTERNAL {
-- 
2.34.1


  reply	other threads:[~2023-06-09  7:15 UTC|newest]

Thread overview: 102+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-02-18 18:30 [dpdk-dev] [RFC] eventdev: introduce event dispatcher Mattias Rönnblom
2021-02-22 15:28 ` Luca Boccassi
2021-02-26  7:48   ` Mattias Rönnblom
2021-02-25 12:32 ` Jerin Jacob
2021-02-26  8:01   ` Mattias Rönnblom
2021-03-07 13:04     ` Jerin Jacob
2021-03-15 14:44       ` Mattias Rönnblom
2021-03-15 15:00         ` Van Haaren, Harry
2021-03-22  9:50           ` Mattias Rönnblom
2021-04-09 11:32             ` [dpdk-dev] [RFC v2] " Mattias Rönnblom
2023-05-22  9:16               ` [RFC v3 0/3] Add " Mattias Rönnblom
2023-05-22  9:16                 ` [RFC v3 1/3] eventdev: introduce " Mattias Rönnblom
2023-06-09  7:08                   ` [RFC v4 0/3] Add " Mattias Rönnblom
2023-06-09  7:08                     ` Mattias Rönnblom [this message]
2023-06-09 14:34                       ` [RFC v4 1/3] eventdev: introduce " Stephen Hemminger
2023-06-09 17:51                         ` Mattias Rönnblom
2023-06-14 17:25                       ` [PATCH 0/3] Add " Mattias Rönnblom
2023-06-14 17:25                         ` [PATCH 1/3] eventdev: introduce " Mattias Rönnblom
2023-06-14 18:13                           ` Stephen Hemminger
2023-06-15  6:07                             ` Mattias Rönnblom
2023-06-16  7:40                           ` [PATCH v2 0/3] Add " Mattias Rönnblom
2023-06-16  7:40                             ` [PATCH v2 1/3] eventdev: introduce " Mattias Rönnblom
2023-08-18  6:09                               ` Jerin Jacob
2023-08-22  8:42                                 ` Mattias Rönnblom
2023-08-22 12:32                                   ` Jerin Jacob
2023-08-24 11:17                                     ` Mattias Rönnblom
2023-08-25  7:27                                       ` Jerin Jacob
2023-09-01 10:53                                 ` Mattias Rönnblom
2023-09-01 10:56                                   ` Jerin Jacob
2023-09-04 13:03                               ` [PATCH v3 0/3] Add dispatcher library Mattias Rönnblom
2023-09-04 13:03                                 ` [PATCH v3 1/3] lib: introduce " Mattias Rönnblom
2023-09-17 16:46                                   ` Naga Harish K, S V
2023-09-19  9:20                                     ` Mattias Rönnblom
2023-09-20  9:11                                       ` Naga Harish K, S V
2023-09-20  9:32                                     ` Jerin Jacob
2023-09-21  5:59                                       ` Naga Harish K, S V
2023-09-21  7:23                                         ` Jerin Jacob
2023-09-19 10:58                                   ` Jerin Jacob
2023-09-21 16:47                                     ` Mattias Rönnblom
2023-09-21 17:47                                       ` Jerin Jacob
2023-09-21 18:36                                   ` Jerin Jacob
2023-09-22  6:32                                     ` Mattias Rönnblom
2023-09-22  7:38                                   ` [PATCH v4 0/3] Add " Mattias Rönnblom
2023-09-22  7:38                                     ` [PATCH v4 1/3] lib: introduce " Mattias Rönnblom
2023-09-25  7:11                                       ` Mattias Rönnblom
2023-09-25  7:59                                         ` Bruce Richardson
2023-09-26 18:28                                         ` Jerin Jacob
2023-09-27  8:13                                           ` Bruce Richardson
2023-09-28  7:44                                             ` Mattias Rönnblom
2023-10-03 17:31                                             ` Jerin Jacob
2023-09-28  7:30                                       ` [PATCH v5 0/3] Add " Mattias Rönnblom
2023-09-28  7:30                                         ` [PATCH v5 1/3] lib: introduce " Mattias Rönnblom
2023-10-05  8:36                                           ` David Marchand
2023-10-05 10:08                                             ` Mattias Rönnblom
2023-10-06  8:46                                               ` David Marchand
2023-10-06  9:03                                                 ` Thomas Monjalon
2023-10-09 17:40                                                   ` Mattias Rönnblom
2023-10-09 16:49                                                 ` Mattias Rönnblom
2023-10-11 14:57                                                   ` David Marchand
2023-10-11 20:51                                                     ` Mattias Rönnblom
2023-10-09 18:17                                           ` [PATCH v6 0/3] Add " Mattias Rönnblom
2023-10-09 18:17                                             ` [PATCH v6 1/3] lib: introduce " Mattias Rönnblom
2023-10-11  7:16                                               ` [PATCH v7 0/3] Add " Mattias Rönnblom
2023-10-11  7:16                                                 ` [PATCH v7 1/3] lib: introduce " Mattias Rönnblom
2023-10-12  8:50                                                   ` [PATCH v8 0/3] Add " Mattias Rönnblom
2023-10-12  8:50                                                     ` [PATCH v8 1/3] lib: introduce " Mattias Rönnblom
2023-10-12  8:50                                                     ` [PATCH v8 2/3] test: add dispatcher test suite Mattias Rönnblom
2023-10-12  8:50                                                     ` [PATCH v8 3/3] doc: add dispatcher programming guide Mattias Rönnblom
2023-10-12 12:48                                                     ` [PATCH v8 0/3] Add dispatcher library David Marchand
2023-10-11  7:16                                                 ` [PATCH v7 2/3] test: add dispatcher test suite Mattias Rönnblom
2023-10-11  7:17                                                 ` [PATCH v7 3/3] doc: add dispatcher programming guide Mattias Rönnblom
2023-10-09 18:17                                             ` [PATCH v6 2/3] test: add dispatcher test suite Mattias Rönnblom
2023-10-10 11:56                                               ` David Marchand
2023-10-11  6:28                                                 ` Mattias Rönnblom
2023-10-11  7:26                                                   ` David Marchand
2023-10-10 14:02                                               ` David Marchand
2023-10-11  6:45                                                 ` Mattias Rönnblom
2023-10-09 18:17                                             ` [PATCH v6 3/3] doc: add dispatcher programming guide Mattias Rönnblom
2023-10-10 13:31                                               ` David Marchand
2023-10-11  6:38                                                 ` Mattias Rönnblom
2023-09-28  7:30                                         ` [PATCH v5 2/3] test: add dispatcher test suite Mattias Rönnblom
2023-10-05  8:36                                           ` David Marchand
2023-10-05 11:25                                             ` Mattias Rönnblom
2023-10-06  8:52                                               ` David Marchand
2023-10-09 17:16                                                 ` Mattias Rönnblom
2023-09-28  7:30                                         ` [PATCH v5 3/3] doc: add dispatcher programming guide Mattias Rönnblom
2023-10-05  8:36                                           ` David Marchand
2023-10-05 11:33                                             ` Mattias Rönnblom
2023-09-22  7:38                                     ` [PATCH v4 2/3] test: add dispatcher test suite Mattias Rönnblom
2023-09-22  7:38                                     ` [PATCH v4 3/3] doc: add dispatcher programming guide Mattias Rönnblom
2023-09-04 13:03                                 ` [PATCH v3 2/3] test: add dispatcher test suite Mattias Rönnblom
2023-09-04 13:03                                 ` [PATCH v3 3/3] doc: add dispatcher programming guide Mattias Rönnblom
2023-09-06 19:32                                 ` [PATCH v3 0/3] Add dispatcher library Stephen Hemminger
2023-09-06 20:28                                   ` Mattias Rönnblom
2023-06-16  7:40                             ` [PATCH v2 2/3] test: add event dispatcher test suite Mattias Rönnblom
2023-06-16  7:40                             ` [PATCH v2 3/3] doc: add event dispatcher programming guide Mattias Rönnblom
2023-06-14 17:25                         ` [PATCH 2/3] test: add event dispatcher test suite Mattias Rönnblom
2023-06-14 17:25                         ` [PATCH 3/3] doc: add event dispatcher programming guide Mattias Rönnblom
2023-06-09  7:08                     ` [RFC v4 2/3] test: add event dispatcher test suite Mattias Rönnblom
2023-06-09  7:08                     ` [RFC v4 3/3] doc: add event dispatcher programming guide Mattias Rönnblom
2023-05-22  9:16                 ` [RFC v3 2/3] test: add event dispatcher test suite Mattias Rönnblom
2023-05-22  9:16                 ` [RFC v3 3/3] doc: add event dispatcher programming guide Mattias Rönnblom

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20230609070826.149336-2-mattias.ronnblom@ericsson.com \
    --to=mattias.ronnblom@ericsson.com \
    --cc=dev@dpdk.org \
    --cc=harry.van.haaren@intel.com \
    --cc=hofors@lysator.liu.se \
    --cc=jerinj@marvell.com \
    --cc=jerinjacobk@gmail.com \
    --cc=peter.j.nilsson@ericsson.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.