All of lore.kernel.org
 help / color / mirror / Atom feed
* [RFC 1/2] sched: new APIs for per-subport queue sizes
@ 2018-02-28 13:39 alangordondewar
  2018-02-28 13:39 ` [RFC 2/2] sched: support per-subport wred configurations alangordondewar
  0 siblings, 1 reply; 2+ messages in thread
From: alangordondewar @ 2018-02-28 13:39 UTC (permalink / raw)
  To: cristian.dumitrescu; +Cc: dev, Alan Dewar

From: Alan Dewar <alan.dewar@att.com>

Added new APIs to allow the maximum queue sizes for each traffic class
to be configured on a per-subport basis, rather than all subport's
inheriting their maximum queue sizes from their parent port.

Added new sched unit-test to exercise the new APIs.

Signed-off-by: Alan Dewar <alan.dewar@att.com>
---
 lib/librte_sched/rte_sched.c           | 243 +++++++++++++++++++++---------
 lib/librte_sched/rte_sched.h           |  48 ++++++
 lib/librte_sched/rte_sched_version.map |   8 +
 test/test/test_sched.c                 | 260 ++++++++++++++++++++++++++++++++-
 4 files changed, 485 insertions(+), 74 deletions(-)

diff --git a/lib/librte_sched/rte_sched.c b/lib/librte_sched/rte_sched.c
index 634486c..9436ba5 100644
--- a/lib/librte_sched/rte_sched.c
+++ b/lib/librte_sched/rte_sched.c
@@ -58,6 +58,7 @@ struct rte_sched_subport {
 	uint64_t tc_time; /* time of next update */
 	uint32_t tc_credits_per_period[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
 	uint32_t tc_credits[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
+	uint16_t qsize[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
 	uint32_t tc_period;
 
 	/* TC oversubscription */
@@ -71,6 +72,11 @@ struct rte_sched_subport {
 
 	/* Statistics */
 	struct rte_sched_subport_stats stats;
+
+	/* Queue base calculation */
+	uint32_t qsize_add[RTE_SCHED_QUEUES_PER_PIPE];
+	uint32_t qsize_sum;
+	uint32_t qoffset;
 };
 
 struct rte_sched_pipe_profile {
@@ -215,10 +221,6 @@ struct rte_sched_port {
 	struct rte_mbuf **pkts_out;
 	uint32_t n_pkts_out;
 
-	/* Queue base calculation */
-	uint32_t qsize_add[RTE_SCHED_QUEUES_PER_PIPE];
-	uint32_t qsize_sum;
-
 	/* Large data structures */
 	struct rte_sched_subport *subport;
 	struct rte_sched_pipe *pipe;
@@ -241,16 +243,12 @@ enum rte_sched_port_array {
 	e_RTE_SCHED_PORT_ARRAY_TOTAL,
 };
 
-#ifdef RTE_SCHED_COLLECT_STATS
-
 static inline uint32_t
 rte_sched_port_queues_per_subport(struct rte_sched_port *port)
 {
 	return RTE_SCHED_QUEUES_PER_PIPE * port->n_pipes_per_subport;
 }
 
-#endif
-
 static inline uint32_t
 rte_sched_port_queues_per_port(struct rte_sched_port *port)
 {
@@ -260,19 +258,27 @@ rte_sched_port_queues_per_port(struct rte_sched_port *port)
 static inline struct rte_mbuf **
 rte_sched_port_qbase(struct rte_sched_port *port, uint32_t qindex)
 {
-	uint32_t pindex = qindex >> 4;
 	uint32_t qpos = qindex & 0xF;
+	uint32_t subport_id = qindex / rte_sched_port_queues_per_subport(port);
+	struct rte_sched_subport *subport = port->subport + subport_id;
+	uint32_t subport_pipe_offset;
+
+	subport_pipe_offset = qindex % rte_sched_port_queues_per_subport(port);
+	subport_pipe_offset /= RTE_SCHED_QUEUES_PER_PIPE;
+	subport_pipe_offset *= RTE_SCHED_QUEUES_PER_PIPE;
 
-	return (port->queue_array + pindex *
-		port->qsize_sum + port->qsize_add[qpos]);
+	return (port->queue_array + subport->qoffset + subport_pipe_offset +
+		subport->qsize_add[qpos]);
 }
 
 static inline uint16_t
 rte_sched_port_qsize(struct rte_sched_port *port, uint32_t qindex)
 {
 	uint32_t tc = (qindex >> 2) & 0x3;
+	uint32_t subport_id = qindex / rte_sched_port_queues_per_subport(port);
+	struct rte_sched_subport *subport = port->subport + subport_id;
 
-	return port->qsize[tc];
+	return subport->qsize[tc];
 }
 
 static int
@@ -360,7 +366,9 @@ rte_sched_port_check_params(struct rte_sched_port_params *params)
 }
 
 static uint32_t
-rte_sched_port_get_array_base(struct rte_sched_port_params *params, enum rte_sched_port_array array)
+rte_sched_port_get_array_base(struct rte_sched_port_params *params,
+			      enum rte_sched_port_array array,
+			      uint32_t size_queue_array)
 {
 	uint32_t n_subports_per_port = params->n_subports_per_port;
 	uint32_t n_pipes_per_subport = params->n_pipes_per_subport;
@@ -375,16 +383,19 @@ rte_sched_port_get_array_base(struct rte_sched_port_params *params, enum rte_sch
 	uint32_t size_pipe_profiles
 		= RTE_SCHED_PIPE_PROFILES_PER_PORT * sizeof(struct rte_sched_pipe_profile);
 	uint32_t size_bmp_array = rte_bitmap_get_memory_footprint(n_queues_per_port);
-	uint32_t size_per_pipe_queue_array, size_queue_array;
+	uint32_t size_per_pipe_queue_array;
 
 	uint32_t base, i;
 
-	size_per_pipe_queue_array = 0;
-	for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++) {
-		size_per_pipe_queue_array += RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS
-			* params->qsize[i] * sizeof(struct rte_mbuf *);
+	if (size_queue_array == 0) {
+		size_per_pipe_queue_array = 0;
+		for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++) {
+			size_per_pipe_queue_array +=
+				RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS
+				* params->qsize[i] * sizeof(struct rte_mbuf *);
+		}
+		size_queue_array = n_pipes_per_port * size_per_pipe_queue_array;
 	}
-	size_queue_array = n_pipes_per_port * size_per_pipe_queue_array;
 
 	base = 0;
 
@@ -419,8 +430,9 @@ rte_sched_port_get_array_base(struct rte_sched_port_params *params, enum rte_sch
 	return base;
 }
 
-uint32_t
-rte_sched_port_get_memory_footprint(struct rte_sched_port_params *params)
+static uint32_t
+rte_sched_port_get_memory_footprint_common(struct rte_sched_port_params *params,
+					   uint32_t size_queue_array)
 {
 	uint32_t size0, size1;
 	int status;
@@ -434,39 +446,93 @@ rte_sched_port_get_memory_footprint(struct rte_sched_port_params *params)
 	}
 
 	size0 = sizeof(struct rte_sched_port);
-	size1 = rte_sched_port_get_array_base(params, e_RTE_SCHED_PORT_ARRAY_TOTAL);
+	size1 = rte_sched_port_get_array_base(params,
+					      e_RTE_SCHED_PORT_ARRAY_TOTAL,
+					      size_queue_array);
 
 	return size0 + size1;
 }
 
+uint32_t
+rte_sched_port_get_memory_footprint(struct rte_sched_port_params *params)
+{
+	return rte_sched_port_get_memory_footprint_common(params, 0);
+}
+
+uint32_t
+rte_sched_port_get_memory_footprint_v2(struct rte_sched_port_params *params,
+				       uint32_t size_queue_array)
+{
+	return rte_sched_port_get_memory_footprint_common(params,
+							  size_queue_array);
+}
+
 static void
-rte_sched_port_config_qsize(struct rte_sched_port *port)
+rte_sched_subport_config_qsize(struct rte_sched_port *port,
+			       uint32_t subport_id,
+			       uint16_t *qsize)
 {
+	struct rte_sched_subport *subport = port->subport + subport_id;
+	uint32_t tc;
+
+	for (tc = 0; tc < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; tc++) {
+		if (qsize == NULL)
+			/* The subport inherits its qsizes from the port */
+			subport->qsize[tc] = port->qsize[tc];
+		else
+			/* The subport has explicity configured qsizes */
+			subport->qsize[tc] = qsize[tc];
+	}
+
 	/* TC 0 */
-	port->qsize_add[0] = 0;
-	port->qsize_add[1] = port->qsize_add[0] + port->qsize[0];
-	port->qsize_add[2] = port->qsize_add[1] + port->qsize[0];
-	port->qsize_add[3] = port->qsize_add[2] + port->qsize[0];
+	subport->qsize_add[0] = 0;
+	subport->qsize_add[1] = subport->qsize_add[0] + subport->qsize[0];
+	subport->qsize_add[2] = subport->qsize_add[1] + subport->qsize[0];
+	subport->qsize_add[3] = subport->qsize_add[2] + subport->qsize[0];
 
 	/* TC 1 */
-	port->qsize_add[4] = port->qsize_add[3] + port->qsize[0];
-	port->qsize_add[5] = port->qsize_add[4] + port->qsize[1];
-	port->qsize_add[6] = port->qsize_add[5] + port->qsize[1];
-	port->qsize_add[7] = port->qsize_add[6] + port->qsize[1];
+	subport->qsize_add[4] = subport->qsize_add[3] + subport->qsize[0];
+	subport->qsize_add[5] = subport->qsize_add[4] + subport->qsize[1];
+	subport->qsize_add[6] = subport->qsize_add[5] + subport->qsize[1];
+	subport->qsize_add[7] = subport->qsize_add[6] + subport->qsize[1];
 
 	/* TC 2 */
-	port->qsize_add[8] = port->qsize_add[7] + port->qsize[1];
-	port->qsize_add[9] = port->qsize_add[8] + port->qsize[2];
-	port->qsize_add[10] = port->qsize_add[9] + port->qsize[2];
-	port->qsize_add[11] = port->qsize_add[10] + port->qsize[2];
+	subport->qsize_add[8] = subport->qsize_add[7] + subport->qsize[1];
+	subport->qsize_add[9] = subport->qsize_add[8] + subport->qsize[2];
+	subport->qsize_add[10] = subport->qsize_add[9] + subport->qsize[2];
+	subport->qsize_add[11] = subport->qsize_add[10] + subport->qsize[2];
 
 	/* TC 3 */
-	port->qsize_add[12] = port->qsize_add[11] + port->qsize[2];
-	port->qsize_add[13] = port->qsize_add[12] + port->qsize[3];
-	port->qsize_add[14] = port->qsize_add[13] + port->qsize[3];
-	port->qsize_add[15] = port->qsize_add[14] + port->qsize[3];
+	subport->qsize_add[12] = subport->qsize_add[11] + subport->qsize[2];
+	subport->qsize_add[13] = subport->qsize_add[12] + subport->qsize[3];
+	subport->qsize_add[14] = subport->qsize_add[13] + subport->qsize[3];
+	subport->qsize_add[15] = subport->qsize_add[14] + subport->qsize[3];
+
+	subport->qsize_sum = subport->qsize_add[15] + subport->qsize[3];
 
-	port->qsize_sum = port->qsize_add[15] + port->qsize[3];
+	if (subport_id != 0) {
+		struct rte_sched_subport *prev = port->subport +
+			(subport_id - 1);
+
+		subport->qoffset = prev->qoffset + prev->qsize_sum;
+	}
+}
+
+static char *
+rte_sched_build_queue_size_string(uint16_t *qsize, char *output_str)
+{
+	uint32_t tc;
+	int str_len;
+
+	str_len = sprintf(output_str, "[");
+	for (tc = 0; tc < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; tc++) {
+		str_len += sprintf(output_str + str_len, "%u",
+				   qsize[tc]);
+		if (tc != RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE - 1)
+			str_len += sprintf(output_str + str_len, ", ");
+	}
+	sprintf(output_str + str_len, "]");
+	return output_str;
 }
 
 static void
@@ -590,16 +656,12 @@ rte_sched_port_config_pipe_profile_table(struct rte_sched_port *port, struct rte
 	}
 }
 
-struct rte_sched_port *
-rte_sched_port_config(struct rte_sched_port_params *params)
+static struct rte_sched_port *
+rte_sched_port_config_common(struct rte_sched_port_params *params,
+			     uint32_t mem_size)
 {
 	struct rte_sched_port *port = NULL;
-	uint32_t mem_size, bmp_mem_size, n_queues_per_port, i, cycles_per_byte;
-
-	/* Check user parameters. Determine the amount of memory to allocate */
-	mem_size = rte_sched_port_get_memory_footprint(params);
-	if (mem_size == 0)
-		return NULL;
+	uint32_t bmp_mem_size, n_queues_per_port, i, cycles_per_byte;
 
 	/* Allocate memory to store the data structures */
 	port = rte_zmalloc("qos_params", mem_size, RTE_CACHE_LINE_SIZE);
@@ -659,30 +721,28 @@ rte_sched_port_config(struct rte_sched_port_params *params)
 	port->pkts_out = NULL;
 	port->n_pkts_out = 0;
 
-	/* Queue base calculation */
-	rte_sched_port_config_qsize(port);
-
 	/* Large data structures */
 	port->subport = (struct rte_sched_subport *)
 		(port->memory + rte_sched_port_get_array_base(params,
-							      e_RTE_SCHED_PORT_ARRAY_SUBPORT));
+			e_RTE_SCHED_PORT_ARRAY_SUBPORT, 0));
 	port->pipe = (struct rte_sched_pipe *)
 		(port->memory + rte_sched_port_get_array_base(params,
-							      e_RTE_SCHED_PORT_ARRAY_PIPE));
+			e_RTE_SCHED_PORT_ARRAY_PIPE, 0));
 	port->queue = (struct rte_sched_queue *)
 		(port->memory + rte_sched_port_get_array_base(params,
-							      e_RTE_SCHED_PORT_ARRAY_QUEUE));
+			e_RTE_SCHED_PORT_ARRAY_QUEUE, 0));
 	port->queue_extra = (struct rte_sched_queue_extra *)
 		(port->memory + rte_sched_port_get_array_base(params,
-							      e_RTE_SCHED_PORT_ARRAY_QUEUE_EXTRA));
+			e_RTE_SCHED_PORT_ARRAY_QUEUE_EXTRA, 0));
 	port->pipe_profiles = (struct rte_sched_pipe_profile *)
 		(port->memory + rte_sched_port_get_array_base(params,
-							      e_RTE_SCHED_PORT_ARRAY_PIPE_PROFILES));
+			e_RTE_SCHED_PORT_ARRAY_PIPE_PROFILES, 0));
 	port->bmp_array =  port->memory
-		+ rte_sched_port_get_array_base(params, e_RTE_SCHED_PORT_ARRAY_BMP_ARRAY);
+		+ rte_sched_port_get_array_base(params,
+			e_RTE_SCHED_PORT_ARRAY_BMP_ARRAY, 0);
 	port->queue_array = (struct rte_mbuf **)
 		(port->memory + rte_sched_port_get_array_base(params,
-							      e_RTE_SCHED_PORT_ARRAY_QUEUE_ARRAY));
+			e_RTE_SCHED_PORT_ARRAY_QUEUE_ARRAY, 0));
 
 	/* Pipe profile table */
 	rte_sched_port_config_pipe_profile_table(port, params);
@@ -704,6 +764,35 @@ rte_sched_port_config(struct rte_sched_port_params *params)
 	return port;
 }
 
+
+struct rte_sched_port *
+rte_sched_port_config(struct rte_sched_port_params *params)
+{
+	uint32_t mem_size;
+
+	/* Check user parameters. Determine the amount of memory to allocate */
+	mem_size = rte_sched_port_get_memory_footprint(params);
+	if (mem_size == 0)
+		return NULL;
+
+	return rte_sched_port_config_common(params, mem_size);
+}
+
+struct rte_sched_port *
+rte_sched_port_config_v2(struct rte_sched_port_params *params,
+			 uint32_t queue_array_size)
+{
+	uint32_t mem_size;
+
+	/* Check user parameters. Determine the amount of memory to allocate */
+	mem_size = rte_sched_port_get_memory_footprint_common(params,
+							      queue_array_size);
+	if (mem_size == 0)
+		return NULL;
+
+	return rte_sched_port_config_common(params, mem_size);
+}
+
 void
 rte_sched_port_free(struct rte_sched_port *port)
 {
@@ -736,10 +825,13 @@ static void
 rte_sched_port_log_subport_config(struct rte_sched_port *port, uint32_t i)
 {
 	struct rte_sched_subport *s = port->subport + i;
+	char queue_size_str[(7 * RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE) + 3];
 
+	rte_sched_build_queue_size_string(s->qsize, queue_size_str);
 	RTE_LOG(DEBUG, SCHED, "Low level config for subport %u:\n"
 		"    Token bucket: period = %u, credits per period = %u, size = %u\n"
 		"    Traffic classes: period = %u, credits per period = [%u, %u, %u, %u]\n"
+		"    Traffic class queue-sizes: %s\n"
 		"    Traffic class 3 oversubscription: wm min = %u, wm max = %u\n",
 		i,
 
@@ -754,16 +846,18 @@ rte_sched_port_log_subport_config(struct rte_sched_port *port, uint32_t i)
 		s->tc_credits_per_period[1],
 		s->tc_credits_per_period[2],
 		s->tc_credits_per_period[3],
+		queue_size_str,
 
 		/* Traffic class 3 oversubscription */
 		s->tc_ov_wm_min,
 		s->tc_ov_wm_max);
 }
 
-int
-rte_sched_subport_config(struct rte_sched_port *port,
-	uint32_t subport_id,
-	struct rte_sched_subport_params *params)
+static int
+rte_sched_subport_config_common(struct rte_sched_port *port,
+				uint32_t subport_id,
+				struct rte_sched_subport_params *params,
+				uint16_t *qsize)
 {
 	struct rte_sched_subport *s;
 	uint32_t i;
@@ -808,15 +902,13 @@ rte_sched_subport_config(struct rte_sched_port *port,
 
 	/* Traffic Classes (TCs) */
 	s->tc_period = rte_sched_time_ms_to_bytes(params->tc_period, port->rate);
+	s->tc_time = port->time + s->tc_period;
 	for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++) {
 		s->tc_credits_per_period[i]
 			= rte_sched_time_ms_to_bytes(params->tc_period,
 						     params->tc_rate[i]);
-	}
-	s->tc_time = port->time + s->tc_period;
-	for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++)
 		s->tc_credits[i] = s->tc_credits_per_period[i];
-
+	}
 #ifdef RTE_SCHED_SUBPORT_TC_OV
 	/* TC oversubscription */
 	s->tc_ov_wm_min = port->mtu;
@@ -829,12 +921,31 @@ rte_sched_subport_config(struct rte_sched_port *port,
 	s->tc_ov_rate = 0;
 #endif
 
+	rte_sched_subport_config_qsize(port, subport_id, qsize);
 	rte_sched_port_log_subport_config(port, subport_id);
 
 	return 0;
 }
 
 int
+rte_sched_subport_config(struct rte_sched_port *port,
+			 uint32_t subport_id,
+			 struct rte_sched_subport_params *params)
+{
+	return rte_sched_subport_config_common(port, subport_id, params, NULL);
+}
+
+int
+rte_sched_subport_config_v2(struct rte_sched_port *port,
+			    uint32_t subport_id,
+			    struct rte_sched_subport_params *params,
+			    uint16_t *qsize)
+{
+	return rte_sched_subport_config_common(port, subport_id, params,
+					       qsize);
+}
+
+int
 rte_sched_pipe_config(struct rte_sched_port *port,
 	uint32_t subport_id,
 	uint32_t pipe_id,
diff --git a/lib/librte_sched/rte_sched.h b/lib/librte_sched/rte_sched.h
index 5d2a688..1e1d618 100644
--- a/lib/librte_sched/rte_sched.h
+++ b/lib/librte_sched/rte_sched.h
@@ -225,6 +225,20 @@ struct rte_sched_port *
 rte_sched_port_config(struct rte_sched_port_params *params);
 
 /**
+ * Hierarchical scheduler port configuration
+ *
+ * @param params
+ *   Port scheduler configuration parameter structure
+ * @param size_queue_array
+ *   Pre-calculated size of the port's queue-array
+ * @return
+ *   Handle to port scheduler instance upon success or NULL otherwise.
+ */
+struct rte_sched_port *
+rte_sched_port_config_v2(struct rte_sched_port_params *params,
+			 uint32_t size_queue_array);
+
+/**
  * Hierarchical scheduler port free
  *
  * @param port
@@ -251,6 +265,26 @@ rte_sched_subport_config(struct rte_sched_port *port,
 	struct rte_sched_subport_params *params);
 
 /**
+ * Hierarchical scheduler subport configuration
+ *
+ * @param port
+ *   Handle to port scheduler instance
+ * @param subport_id
+ *   Subport ID
+ * @param params
+ *   Subport configuration parameters
+ * @param qsize
+ *   Array of traffic-class maximum queue-lengths
+ * @return
+ *   0 upon success, error code otherwise
+ */
+int
+rte_sched_subport_config_v2(struct rte_sched_port *port,
+			    uint32_t subport_id,
+			    struct rte_sched_subport_params *params,
+			    uint16_t *qsize);
+
+/**
  * Hierarchical scheduler pipe configuration
  *
  * @param port
@@ -281,6 +315,20 @@ rte_sched_pipe_config(struct rte_sched_port *port,
 uint32_t
 rte_sched_port_get_memory_footprint(struct rte_sched_port_params *params);
 
+/**
+ * Hierarchical scheduler memory footprint size per port
+ *
+ * @param params
+ *   Port scheduler configuration parameter structure
+ * @param size_queue_array
+ *   The required size of the port's queue-array
+ * @return
+ *   Memory footprint size in bytes upon success, 0 otherwise
+ */
+uint32_t
+rte_sched_port_get_memory_footprint_v2(struct rte_sched_port_params *params,
+				       uint32_t size_queue_array);
+
 /*
  * Statistics
  *
diff --git a/lib/librte_sched/rte_sched_version.map b/lib/librte_sched/rte_sched_version.map
index 3aa159a..ce92b82 100644
--- a/lib/librte_sched/rte_sched_version.map
+++ b/lib/librte_sched/rte_sched_version.map
@@ -29,3 +29,11 @@ DPDK_2.1 {
 	rte_sched_port_pkt_read_color;
 
 } DPDK_2.0;
+
+DPDK_18.05 {
+	global;
+
+	rte_sched_port_config_v2;
+	rte_sched_subport_config_v2;
+	rte_sched_port_get_memory_footprint_v2;
+} DPDK_2.1;
diff --git a/test/test/test_sched.c b/test/test/test_sched.c
index 32e500b..2b22ebe 100644
--- a/test/test/test_sched.c
+++ b/test/test/test_sched.c
@@ -15,7 +15,7 @@
 #include <rte_ip.h>
 #include <rte_byteorder.h>
 #include <rte_sched.h>
-
+#include <rte_malloc.h>
 
 #define SUBPORT         0
 #define PIPE            1
@@ -56,7 +56,7 @@ static struct rte_sched_port_params port_param = {
 	.n_pipe_profiles = 1,
 };
 
-#define NB_MBUF          32
+#define NB_MBUF          8192
 #define MBUF_DATA_SZ     (2048 + RTE_PKTMBUF_HEADROOM)
 #define MEMPOOL_CACHE_SZ 0
 #define SOCKET           0
@@ -76,7 +76,8 @@ create_mempool(void)
 }
 
 static void
-prepare_pkt(struct rte_mbuf *mbuf)
+prepare_pkt(struct rte_mbuf *mbuf, uint32_t subport, uint32_t pipe, uint32_t tc,
+	    uint32_t queue)
 {
 	struct ether_hdr *eth_hdr;
 	struct vlan_hdr *vlan1, *vlan2;
@@ -89,13 +90,14 @@ prepare_pkt(struct rte_mbuf *mbuf)
 	eth_hdr = (struct ether_hdr *)((uintptr_t)&eth_hdr->ether_type + 2 *sizeof(struct vlan_hdr));
 	ip_hdr = (struct ipv4_hdr *)((uintptr_t)eth_hdr +  sizeof(eth_hdr->ether_type));
 
-	vlan1->vlan_tci = rte_cpu_to_be_16(SUBPORT);
-	vlan2->vlan_tci = rte_cpu_to_be_16(PIPE);
+	vlan1->vlan_tci = rte_cpu_to_be_16(subport);
+	vlan2->vlan_tci = rte_cpu_to_be_16(pipe);
 	eth_hdr->ether_type =  rte_cpu_to_be_16(ETHER_TYPE_IPv4);
-	ip_hdr->dst_addr = IPv4(0,0,TC,QUEUE);
+	ip_hdr->dst_addr = IPv4(0, 0, tc, queue);
 
 
-	rte_sched_port_pkt_write(mbuf, SUBPORT, PIPE, TC, QUEUE, e_RTE_METER_YELLOW);
+	rte_sched_port_pkt_write(mbuf, subport, pipe, tc, queue,
+				 e_RTE_METER_YELLOW);
 
 	/* 64 byte packet */
 	mbuf->pkt_len  = 60;
@@ -138,7 +140,7 @@ test_sched(void)
 	for (i = 0; i < 10; i++) {
 		in_mbufs[i] = rte_pktmbuf_alloc(mp);
 		TEST_ASSERT_NOT_NULL(in_mbufs[i], "Packet allocation failed\n");
-		prepare_pkt(in_mbufs[i]);
+		prepare_pkt(in_mbufs[i], SUBPORT, PIPE, TC, QUEUE);
 	}
 
 
@@ -185,3 +187,245 @@ test_sched(void)
 }
 
 REGISTER_TEST_COMMAND(sched_autotest, test_sched);
+
+#define NB_SUBPORTS 2
+
+static struct rte_sched_subport_params subport_param_v2[] = {
+	{
+		.tb_rate = 1250000000,
+		.tb_size = 1000000,
+
+		.tc_rate = {1250000000, 1250000000, 1250000000, 1250000000},
+		.tc_period = 10,
+	},
+	{
+		.tb_rate = 1250000000,
+		.tb_size = 1000000,
+
+		.tc_rate = {1250000000, 1250000000, 1250000000, 1250000000},
+		.tc_period = 10,
+	},
+};
+
+static struct rte_sched_pipe_params pipe_profile_v2[] = {
+	{ /* Profile #0 */
+		.tb_rate = 1250000000,
+		.tb_size = 1000000,
+
+		.tc_rate = {1250000000, 1250000000, 1250000000, 1250000000},
+		.tc_period = 10,
+
+		.wrr_weights = {1, 1, 1, 1,
+				1, 1, 1, 1,
+				1, 1, 1, 1,
+				1, 1, 1, 1},
+	},
+};
+
+static uint16_t subport_qsize[][RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE] = {
+	{ 16, 32, 64, 128 },
+	{ 256, 512, 1024, 2048 },
+};
+
+static struct rte_sched_port_params port_param_v2 = {
+	.socket = 0, /* computed */
+	.rate = 0, /* computed */
+	.mtu = 1522,
+	.frame_overhead = RTE_SCHED_FRAME_OVERHEAD_DEFAULT,
+	.n_subports_per_port = 2,
+	.n_pipes_per_subport = 128,
+	.qsize = {32, 32, 32, 32},
+	.pipe_profiles = pipe_profile_v2,
+	.n_pipe_profiles = 1,
+};
+
+static uint32_t subport_total_qsize(struct rte_sched_port_params *pp,
+				    uint16_t *qsize)
+{
+	uint32_t queue_array_size = 0;
+	uint32_t tc;
+
+	for (tc = 0; tc < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; tc++)
+		queue_array_size += qsize[tc];
+
+	return (queue_array_size * RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS *
+		pp->n_pipes_per_subport * sizeof(struct rte_mbuf *));
+}
+
+static int fill_queue_to_drop(struct rte_mempool *mp,
+			      struct rte_sched_port *port,
+			      uint32_t in_subport, uint32_t in_pipe,
+			      uint32_t in_tc, uint16_t qsize)
+{
+	struct rte_mbuf **in_mbufs;
+	struct rte_mbuf **out_mbufs;
+	uint32_t in_queue = 0;
+	uint32_t i;
+	int err;
+
+	in_mbufs = rte_malloc(NULL, ((qsize + 1) * sizeof(struct rte_mbuf *)),
+			      RTE_CACHE_LINE_SIZE);
+	TEST_ASSERT_NOT_NULL(in_mbufs, "Buffer array allocation failed\n");
+
+	out_mbufs = rte_malloc(NULL, ((qsize + 1) * sizeof(struct rte_mbuf *)),
+			       RTE_CACHE_LINE_SIZE);
+	TEST_ASSERT_NOT_NULL(out_mbufs, "Buffer array allocation failed\n");
+
+	/*
+	 * Allocate qsize + 1 buffers so that we can completely fill the
+	 * queue, then try to enqueue one more packet so that it will be tail
+	 * dropped.
+	 */
+	for (i = 0; i <= qsize; i++) {
+		in_mbufs[i] = rte_pktmbuf_alloc(mp);
+		TEST_ASSERT_NOT_NULL(in_mbufs[i], "Packet allocation failed\n");
+		prepare_pkt(in_mbufs[i], in_subport, in_pipe, in_tc, in_queue);
+	}
+
+	/*
+	 * All these packets should be queued correctly.
+	 */
+	err = rte_sched_port_enqueue(port, in_mbufs, qsize);
+	TEST_ASSERT_EQUAL(err, qsize, "Wrong enqueue, err=%d\n", err);
+
+	/*
+	 * This packet should fail to be queued, it will be freed when dropped.
+	 */
+	err = rte_sched_port_enqueue(port, &in_mbufs[qsize], 1);
+	TEST_ASSERT_EQUAL(err, 0, "Enqueue didn't fail, but should have\n");
+	in_mbufs[qsize] = NULL;
+
+	/*
+	 * With small queues we should be able to dequeue a full queue's worth
+	 * of packets with a single call to rte_sched_port_dequeue.  With
+	 * larger queues we will probably need to make multiple calls as we
+	 * could run out of credit to dequeue all the packet in one attempt.
+	 */
+	i = 0;
+	err = 1;
+	while (i < qsize && err != 0) {
+		err = rte_sched_port_dequeue(port, out_mbufs, qsize);
+		i += err;
+	}
+	TEST_ASSERT_EQUAL(i, qsize,
+			  "Wrong dequeue, err=%d, i: %u, qsize: %u\n",
+			  err, i, qsize);
+
+	/*
+	 * Check that all the dequeued packets have to right numbers in them.
+	 */
+	for (i = 0; i < qsize; i++) {
+		enum rte_meter_color color;
+		uint32_t out_subport, out_pipe, out_tc, out_queue;
+
+		color = rte_sched_port_pkt_read_color(out_mbufs[i]);
+		TEST_ASSERT_EQUAL(color, e_RTE_METER_YELLOW, "Wrong color\n");
+
+		rte_sched_port_pkt_read_tree_path(out_mbufs[i],
+				&out_subport, &out_pipe, &out_tc, &out_queue);
+
+		TEST_ASSERT_EQUAL(in_subport, out_subport, "Wrong subport\n");
+		TEST_ASSERT_EQUAL(in_pipe, out_pipe, "Wrong pipe\n");
+		TEST_ASSERT_EQUAL(in_tc, out_tc, "Wrong traffic_class\n");
+		TEST_ASSERT_EQUAL(in_queue, out_queue, "Wrong queue\n");
+		rte_pktmbuf_free(out_mbufs[i]);
+	}
+
+#ifdef RTE_SCHED_COLLECT_STATS
+	struct rte_sched_subport_stats subport_stats;
+	uint32_t tc_ov;
+
+	/*
+	 * Did the subport stats see a packet dropped in this traffic-class?
+	 */
+	rte_sched_subport_read_stats(port, in_subport, &subport_stats, &tc_ov);
+	TEST_ASSERT_EQUAL(subport_stats.n_pkts_tc_dropped[in_tc], 1,
+			  "Wrong subport stats\n");
+#endif
+
+	rte_free(in_mbufs);
+	rte_free(out_mbufs);
+
+	return 0;
+}
+
+static int
+subport_fill_queues(struct rte_mempool *mp, struct rte_sched_port *port,
+		    uint32_t subport)
+{
+	uint32_t pipe;
+	uint32_t tc;
+	int err;
+
+	for (pipe = 0; pipe < port_param_v2.n_pipes_per_subport; pipe++) {
+		for (tc = 0; tc < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; tc++) {
+			err = fill_queue_to_drop(mp, port, subport, pipe, tc,
+						 subport_qsize[subport][tc]);
+			TEST_ASSERT_SUCCESS(err, "fill-queue-to-drop failed, "
+					  "err=%d\n", err);
+		}
+	}
+	return 0;
+}
+
+/**
+ * test main entrance for library sched using the v2 APIs that
+ * allow queue-size and WRED configurations on a per-subport basis.
+ */
+static int
+test_sched_v2(void)
+{
+	struct rte_mempool *mp = NULL;
+	struct rte_sched_port *port = NULL;
+	uint32_t subport;
+	uint32_t pipe;
+	uint32_t queue_array_size;
+	int err;
+
+	rte_log_set_level(RTE_LOGTYPE_EAL, RTE_LOG_DEBUG);
+
+	mp = create_mempool();
+	TEST_ASSERT_NOT_NULL(mp, "Error creating mempool\n");
+
+	port_param_v2.socket = 0;
+	port_param_v2.rate = (uint64_t) 10000 * 1000 * 1000 / 8;
+
+	queue_array_size = 0;
+	for (subport = 0; subport < NB_SUBPORTS; subport++)
+		queue_array_size +=
+			subport_total_qsize(&port_param_v2,
+					    &subport_qsize[subport][0]);
+
+	port = rte_sched_port_config_v2(&port_param_v2,
+					queue_array_size);
+	TEST_ASSERT_NOT_NULL(port, "Error config sched port\n");
+
+	for (subport = 0; subport < NB_SUBPORTS; subport++) {
+		err = rte_sched_subport_config_v2(port, subport,
+						  &subport_param_v2[subport],
+						  &subport_qsize[subport][0]);
+		TEST_ASSERT_SUCCESS(err,
+				    "Error config sched subport %u, err=%d\n",
+				    subport, err);
+		for (pipe = 0; pipe < port_param_v2.n_pipes_per_subport;
+		     pipe++) {
+			err = rte_sched_pipe_config(port, subport, pipe, 0);
+			TEST_ASSERT_SUCCESS(err,
+					    "Error config sched subport %u "
+					    "pipe %u, err=%d\n",
+					    subport, pipe, err);
+		}
+	}
+
+	for (subport = 0; subport < NB_SUBPORTS; subport++) {
+		err = subport_fill_queues(mp, port, subport);
+		TEST_ASSERT_SUCCESS(err, "subport-fill-queue failed, err=%d\n",
+				    err);
+	}
+
+	rte_sched_port_free(port);
+
+	return 0;
+}
+
+REGISTER_TEST_COMMAND(sched_autotest_v2, test_sched_v2);
-- 
2.7.4

^ permalink raw reply related	[flat|nested] 2+ messages in thread

* [RFC 2/2] sched: support per-subport wred configurations
  2018-02-28 13:39 [RFC 1/2] sched: new APIs for per-subport queue sizes alangordondewar
@ 2018-02-28 13:39 ` alangordondewar
  0 siblings, 0 replies; 2+ messages in thread
From: alangordondewar @ 2018-02-28 13:39 UTC (permalink / raw)
  To: cristian.dumitrescu; +Cc: dev, Alan Dewar

From: Alan Dewar <alan.dewar@att.com>

Move the WRED queue configuration parameters from rte_sched_port_params
into rte_sched_subport_params so that we can have different WRED
configuations on each subport.

Updated sched unit-test to exercise new functionality.

Signed-off-by: Alan Dewar <alan.dewar@att.com>
---
 lib/librte_sched/rte_sched.c |  54 +++++-
 lib/librte_sched/rte_sched.h |   6 +-
 test/test/test_sched.c       | 402 +++++++++++++++++++++++++++++++++----------
 3 files changed, 363 insertions(+), 99 deletions(-)

diff --git a/lib/librte_sched/rte_sched.c b/lib/librte_sched/rte_sched.c
index 9436ba5..087d7fc 100644
--- a/lib/librte_sched/rte_sched.c
+++ b/lib/librte_sched/rte_sched.c
@@ -77,6 +77,11 @@ struct rte_sched_subport {
 	uint32_t qsize_add[RTE_SCHED_QUEUES_PER_PIPE];
 	uint32_t qsize_sum;
 	uint32_t qoffset;
+
+#ifdef RTE_SCHED_RED
+	struct rte_red_config red_config[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE]
+		[e_RTE_METER_COLORS];
+#endif
 };
 
 struct rte_sched_pipe_profile {
@@ -857,7 +862,9 @@ static int
 rte_sched_subport_config_common(struct rte_sched_port *port,
 				uint32_t subport_id,
 				struct rte_sched_subport_params *params,
-				uint16_t *qsize)
+				uint16_t *qsize,
+				struct rte_red_params red_params[]
+					[e_RTE_METER_COLORS])
 {
 	struct rte_sched_subport *s;
 	uint32_t i;
@@ -909,6 +916,38 @@ rte_sched_subport_config_common(struct rte_sched_port *port,
 						     params->tc_rate[i]);
 		s->tc_credits[i] = s->tc_credits_per_period[i];
 	}
+
+#ifdef RTE_SCHED_RED
+	for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++) {
+		uint32_t j;
+
+		if (!red_params) {
+			/* Copy the red configuration from port */
+			for (j = 0; j < e_RTE_METER_COLORS; j++)
+				s->red_config[i][j] = port->red_config[i][j];
+		} else {
+			/* Subport has an individual red configuration */
+			for (j = 0; j < e_RTE_METER_COLORS; j++) {
+				/* if min/max are both zero, then RED is
+				 * disabled
+				 */
+				if ((red_params[i][j].min_th |
+				     red_params[i][j].max_th) == 0) {
+					continue;
+				}
+
+				if (rte_red_config_init(&s->red_config[i][j],
+					red_params[i][j].wq_log2,
+					red_params[i][j].min_th,
+					red_params[i][j].max_th,
+					red_params[i][j].maxp_inv) != 0) {
+					return -6;
+				}
+			}
+		}
+	}
+#endif
+
 #ifdef RTE_SCHED_SUBPORT_TC_OV
 	/* TC oversubscription */
 	s->tc_ov_wm_min = port->mtu;
@@ -932,17 +971,20 @@ rte_sched_subport_config(struct rte_sched_port *port,
 			 uint32_t subport_id,
 			 struct rte_sched_subport_params *params)
 {
-	return rte_sched_subport_config_common(port, subport_id, params, NULL);
+	return rte_sched_subport_config_common(port, subport_id, params, NULL,
+					       NULL);
 }
 
 int
 rte_sched_subport_config_v2(struct rte_sched_port *port,
 			    uint32_t subport_id,
 			    struct rte_sched_subport_params *params,
-			    uint16_t *qsize)
+			    uint16_t *qsize,
+			    struct rte_red_params red_params[]
+				[e_RTE_METER_COLORS])
 {
 	return rte_sched_subport_config_common(port, subport_id, params,
-					       qsize);
+					       qsize, red_params);
 }
 
 int
@@ -1236,6 +1278,8 @@ rte_sched_port_update_queue_stats_on_drop(struct rte_sched_port *port,
 static inline int
 rte_sched_port_red_drop(struct rte_sched_port *port, struct rte_mbuf *pkt, uint32_t qindex, uint16_t qlen)
 {
+	struct rte_sched_subport *subport = port->subport +
+		(qindex / rte_sched_port_queues_per_subport(port));
 	struct rte_sched_queue_extra *qe;
 	struct rte_red_config *red_cfg;
 	struct rte_red *red;
@@ -1244,7 +1288,7 @@ rte_sched_port_red_drop(struct rte_sched_port *port, struct rte_mbuf *pkt, uint3
 
 	tc_index = (qindex >> 2) & 0x3;
 	color = rte_sched_port_pkt_read_color(pkt);
-	red_cfg = &port->red_config[tc_index][color];
+	red_cfg = &subport->red_config[tc_index][color];
 
 	if ((red_cfg->min_th | red_cfg->max_th) == 0)
 		return 0;
diff --git a/lib/librte_sched/rte_sched.h b/lib/librte_sched/rte_sched.h
index 1e1d618..fe41ae4 100644
--- a/lib/librte_sched/rte_sched.h
+++ b/lib/librte_sched/rte_sched.h
@@ -275,6 +275,8 @@ rte_sched_subport_config(struct rte_sched_port *port,
  *   Subport configuration parameters
  * @param qsize
  *   Array of traffic-class maximum queue-lengths
+ * @param red_params
+ *   Subport WRED queue configuration parameters
  * @return
  *   0 upon success, error code otherwise
  */
@@ -282,7 +284,9 @@ int
 rte_sched_subport_config_v2(struct rte_sched_port *port,
 			    uint32_t subport_id,
 			    struct rte_sched_subport_params *params,
-			    uint16_t *qsize);
+			    uint16_t *qsize,
+			    struct rte_red_params red_params[]
+				[e_RTE_METER_COLORS]);
 
 /**
  * Hierarchical scheduler pipe configuration
diff --git a/test/test/test_sched.c b/test/test/test_sched.c
index 2b22ebe..bc25d34 100644
--- a/test/test/test_sched.c
+++ b/test/test/test_sched.c
@@ -104,6 +104,32 @@ prepare_pkt(struct rte_mbuf *mbuf, uint32_t subport, uint32_t pipe, uint32_t tc,
 	mbuf->data_len = 60;
 }
 
+static int
+pkt_check(struct rte_mbuf **mbufs, uint32_t nb_pkts, uint32_t in_subport,
+	  uint32_t in_pipe, uint32_t in_tc, uint32_t in_queue)
+{
+	uint32_t i;
+
+	for (i = 0; i < nb_pkts; i++) {
+		enum rte_meter_color color;
+		uint32_t out_subport, out_pipe, out_tc, out_queue;
+
+		color = rte_sched_port_pkt_read_color(mbufs[i]);
+		TEST_ASSERT_EQUAL(color, e_RTE_METER_YELLOW, "Wrong color\n");
+
+		rte_sched_port_pkt_read_tree_path(mbufs[i], &out_subport,
+						  &out_pipe, &out_tc,
+						  &out_queue);
+
+		TEST_ASSERT_EQUAL(in_subport, out_subport, "Wrong subport\n");
+		TEST_ASSERT_EQUAL(in_pipe, out_pipe, "Wrong pipe\n");
+		TEST_ASSERT_EQUAL(in_tc, out_tc, "Wrong traffic_class\n");
+		TEST_ASSERT_EQUAL(in_queue, out_queue, "Wrong queue\n");
+		rte_pktmbuf_free(mbufs[i]);
+	}
+
+	return 0;
+}
 
 /**
  * test main entrance for library sched
@@ -143,41 +169,33 @@ test_sched(void)
 		prepare_pkt(in_mbufs[i], SUBPORT, PIPE, TC, QUEUE);
 	}
 
-
 	err = rte_sched_port_enqueue(port, in_mbufs, 10);
 	TEST_ASSERT_EQUAL(err, 10, "Wrong enqueue, err=%d\n", err);
 
 	err = rte_sched_port_dequeue(port, out_mbufs, 10);
 	TEST_ASSERT_EQUAL(err, 10, "Wrong dequeue, err=%d\n", err);
 
-	for (i = 0; i < 10; i++) {
-		enum rte_meter_color color;
-		uint32_t subport, traffic_class, queue;
-
-		color = rte_sched_port_pkt_read_color(out_mbufs[i]);
-		TEST_ASSERT_EQUAL(color, e_RTE_METER_YELLOW, "Wrong color\n");
-
-		rte_sched_port_pkt_read_tree_path(out_mbufs[i],
-				&subport, &pipe, &traffic_class, &queue);
-
-		TEST_ASSERT_EQUAL(subport, SUBPORT, "Wrong subport\n");
-		TEST_ASSERT_EQUAL(pipe, PIPE, "Wrong pipe\n");
-		TEST_ASSERT_EQUAL(traffic_class, TC, "Wrong traffic_class\n");
-		TEST_ASSERT_EQUAL(queue, QUEUE, "Wrong queue\n");
-
-	}
-
+	err = pkt_check(out_mbufs, err, SUBPORT, PIPE, TC, QUEUE);
+	TEST_ASSERT_SUCCESS(err, "Packet checking failed\n");
 
 	struct rte_sched_subport_stats subport_stats;
 	uint32_t tc_ov;
 	rte_sched_subport_read_stats(port, SUBPORT, &subport_stats, &tc_ov);
-#if 0
-	TEST_ASSERT_EQUAL(subport_stats.n_pkts_tc[TC-1], 10, "Wrong subport stats\n");
+#ifdef RTE_SCHED_COLLECT_STATS
+	TEST_ASSERT_EQUAL(subport_stats.n_pkts_tc[TC], 10,
+			  "Wrong subport stats\n");
 #endif
 	struct rte_sched_queue_stats queue_stats;
 	uint16_t qlen;
 	rte_sched_queue_read_stats(port, QUEUE, &queue_stats, &qlen);
 #if 0
+	/*
+	 * This assert fails because the wrong queue_id is passed into
+	 * rte_sched_queue_read_stats.  To calculate the correct queue_id
+	 * we really need to call rte_sched_port_qindex passing in port,
+	 * subport, pipe, traffic-class and queue-number.  Unfortunately
+	 * rte_sched_port_qindex is a static function.
+	 */
 	TEST_ASSERT_EQUAL(queue_stats.n_pkts, 10, "Wrong queue stats\n");
 #endif
 
@@ -222,7 +240,7 @@ static struct rte_sched_pipe_params pipe_profile_v2[] = {
 	},
 };
 
-static uint16_t subport_qsize[][RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE] = {
+static uint16_t config_subport_qsize[][RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE] = {
 	{ 16, 32, 64, 128 },
 	{ 256, 512, 1024, 2048 },
 };
@@ -239,6 +257,64 @@ static struct rte_sched_port_params port_param_v2 = {
 	.n_pipe_profiles = 1,
 };
 
+/*
+ * Note that currently all the packets are coloured yellow.
+ */
+struct rte_red_params subport_0_redparams[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE]
+						[e_RTE_METER_COLORS] = {
+	{ /* TC-0 queue-size 16 */
+		/* min_th  max_th  maxp_inv  wq_log2 */
+		{       0,      0,        0,       0 }, /* Green */
+		{       1,     15,      255,      12 }, /* Yellow */
+		{       1,     15,        1,       1 }  /* Red */
+	},
+	{ /* TC-1 queue-size 32 */
+		{      24,     31,        1,       1 }, /* Green */
+		{      16,     31,       10,       1 }, /* Yellow */
+		{       8,     31,      100,       1 }  /* Red */
+	},
+	{ /* TC-2 queue-size 64 */
+		{      32,     63,        1,       1 }, /* Green */
+		{      16,     31,        1,       1 }, /* Yellow */
+		{       8,     15,        1,       1 }  /* Red */
+	},
+	{ /* TC-3 queue-size 128 */
+		{      64,    127,      255,      12 }, /* Green */
+		{      32,     63,      255,      12 }, /* Yellow */
+		{      16,     31,      255,      12 }  /* Red */
+	}
+};
+
+struct rte_red_params subport_1_redparams[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE]
+						[e_RTE_METER_COLORS] = {
+	{ /* TC-0 queue-size 256 */
+		/* min_th  max_th  maxp_inv  wq_log2 */
+		{       0,      0,        0,       0 }, /* Green */
+		{     128,    255,      100,       1 }, /* Yellow */
+		{     128,    255,        2,      10 }  /* Red */
+	},
+	{ /* TC-1 queue-size 512 */
+		{     256,    511,        2,       1 }, /* Green */
+		{     128,    511,       20,       1 }, /* Yellow */
+		{      64,    511,      200,       1 }  /* Red */
+	},
+	{ /* TC-2 queue-size 1024 */
+		{     512,   1023,        6,       4 }, /* Green */
+		{     256,   1023,        6,       4 }, /* Yellow */
+		{     128,   1023,        6,       4 }  /* Red */
+	},
+	{ /* TC-3 queue-size 2048 - RTE_RED_MAX_TH_MAX = 1023 */
+		{    1022,   1023,      128,       9 }, /* Green */
+		{     512,   1023,       64,       6 }, /* Yellow */
+		{     256,   1023,       32,       3 }  /* Red */
+	}
+};
+
+struct rte_red_params *config_subport_redparams[] = {
+	&subport_0_redparams[0][0],
+	&subport_1_redparams[0][0]
+};
+
 static uint32_t subport_total_qsize(struct rte_sched_port_params *pp,
 				    uint16_t *qsize)
 {
@@ -252,14 +328,81 @@ static uint32_t subport_total_qsize(struct rte_sched_port_params *pp,
 		pp->n_pipes_per_subport * sizeof(struct rte_mbuf *));
 }
 
-static int fill_queue_to_drop(struct rte_mempool *mp,
-			      struct rte_sched_port *port,
-			      uint32_t in_subport, uint32_t in_pipe,
-			      uint32_t in_tc, uint16_t qsize)
+static int
+test_dequeue_pkts(struct rte_sched_port *port, struct rte_mbuf **mbufs,
+		  uint16_t nb_pkts)
+{
+	uint16_t total_dequeued;
+	int err;
+
+	total_dequeued = 0;
+	err = 1;
+
+	/*
+	 * With small queues we should be able to dequeue a full queue's worth
+	 * of packets with a single call to rte_sched_port_dequeue.  With
+	 * larger queues we will probably need to make multiple calls as we
+	 * could run out of credit to dequeue all the packet in one attempt.
+	 */
+	while (total_dequeued < nb_pkts && err != 0) {
+		err = rte_sched_port_dequeue(port, mbufs, nb_pkts);
+		total_dequeued += err;
+	}
+	return total_dequeued;
+}
+
+static int
+test_sched_v2_setup(uint16_t qsize[][RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE],
+		    struct rte_red_params **subport_redparams,
+		    struct rte_sched_port **port)
+{
+	uint32_t queue_array_size;
+	uint32_t subport;
+	uint32_t pipe;
+	int err;
+
+	queue_array_size = 0;
+	for (subport = 0; subport < NB_SUBPORTS; subport++)
+		queue_array_size +=
+			subport_total_qsize(&port_param_v2, &qsize[subport][0]);
+
+	*port = rte_sched_port_config_v2(&port_param_v2,
+					 queue_array_size);
+	TEST_ASSERT_NOT_NULL(port, "Error config sched port\n");
+
+	for (subport = 0; subport < NB_SUBPORTS; subport++) {
+		void *redparams = NULL;
+
+		if (subport_redparams)
+			redparams = subport_redparams[subport];
+
+		err = rte_sched_subport_config_v2(*port, subport,
+						  &subport_param_v2[subport],
+						  &qsize[subport][0],
+						  redparams);
+		TEST_ASSERT_SUCCESS(err,
+				    "Error config sched subport %u, err=%d\n",
+				    subport, err);
+		for (pipe = 0; pipe < port_param_v2.n_pipes_per_subport;
+		     pipe++) {
+			err = rte_sched_pipe_config(*port, subport, pipe, 0);
+			TEST_ASSERT_SUCCESS(err,
+					    "Error config sched subport %u "
+					    "pipe %u, err=%d\n",
+					    subport, pipe, err);
+		}
+	}
+	return 0;
+}
+
+static int
+test_queue_size_drop(struct rte_mempool *mp, struct rte_sched_port *port,
+		     uint32_t subport, uint32_t pipe, uint32_t tc,
+		     uint16_t qsize)
 {
 	struct rte_mbuf **in_mbufs;
 	struct rte_mbuf **out_mbufs;
-	uint32_t in_queue = 0;
+	uint32_t queue = 0;
 	uint32_t i;
 	int err;
 
@@ -279,7 +422,7 @@ static int fill_queue_to_drop(struct rte_mempool *mp,
 	for (i = 0; i <= qsize; i++) {
 		in_mbufs[i] = rte_pktmbuf_alloc(mp);
 		TEST_ASSERT_NOT_NULL(in_mbufs[i], "Packet allocation failed\n");
-		prepare_pkt(in_mbufs[i], in_subport, in_pipe, in_tc, in_queue);
+		prepare_pkt(in_mbufs[i], subport, pipe, tc, queue);
 	}
 
 	/*
@@ -296,53 +439,117 @@ static int fill_queue_to_drop(struct rte_mempool *mp,
 	in_mbufs[qsize] = NULL;
 
 	/*
-	 * With small queues we should be able to dequeue a full queue's worth
-	 * of packets with a single call to rte_sched_port_dequeue.  With
-	 * larger queues we will probably need to make multiple calls as we
-	 * could run out of credit to dequeue all the packet in one attempt.
+	 * Dequeue all the packets off the queue.
 	 */
-	i = 0;
-	err = 1;
-	while (i < qsize && err != 0) {
-		err = rte_sched_port_dequeue(port, out_mbufs, qsize);
-		i += err;
-	}
-	TEST_ASSERT_EQUAL(i, qsize,
-			  "Wrong dequeue, err=%d, i: %u, qsize: %u\n",
-			  err, i, qsize);
+	i = test_dequeue_pkts(port, out_mbufs, qsize);
+	TEST_ASSERT_EQUAL(i, qsize, "Failed to dequeue all pkts\n");
 
 	/*
-	 * Check that all the dequeued packets have to right numbers in them.
+	 * Check that all the dequeued packets have the right numbers in them.
 	 */
-	for (i = 0; i < qsize; i++) {
-		enum rte_meter_color color;
-		uint32_t out_subport, out_pipe, out_tc, out_queue;
+	err = pkt_check(out_mbufs, qsize, subport, pipe, tc, queue);
+	TEST_ASSERT_SUCCESS(err, "Packet checking failed\n");
 
-		color = rte_sched_port_pkt_read_color(out_mbufs[i]);
-		TEST_ASSERT_EQUAL(color, e_RTE_METER_YELLOW, "Wrong color\n");
+#ifdef RTE_SCHED_COLLECT_STATS
+	struct rte_sched_subport_stats subport_stats;
+	uint32_t tc_ov;
 
-		rte_sched_port_pkt_read_tree_path(out_mbufs[i],
-				&out_subport, &out_pipe, &out_tc, &out_queue);
+	/*
+	 * Did the subport stats see a packet dropped in this traffic-class?
+	 */
+	rte_sched_subport_read_stats(port, subport, &subport_stats, &tc_ov);
+	TEST_ASSERT_EQUAL(subport_stats.n_pkts_tc_dropped[tc], 1,
+			  "Wrong subport stats\n");
+#endif
 
-		TEST_ASSERT_EQUAL(in_subport, out_subport, "Wrong subport\n");
-		TEST_ASSERT_EQUAL(in_pipe, out_pipe, "Wrong pipe\n");
-		TEST_ASSERT_EQUAL(in_tc, out_tc, "Wrong traffic_class\n");
-		TEST_ASSERT_EQUAL(in_queue, out_queue, "Wrong queue\n");
-		rte_pktmbuf_free(out_mbufs[i]);
+	rte_free(in_mbufs);
+	rte_free(out_mbufs);
+
+	return 0;
+}
+
+static int
+test_queue_size(struct rte_mempool *mp, struct rte_sched_port *port,
+		uint32_t subport)
+{
+	uint32_t pipe;
+	uint32_t tc;
+	int err;
+
+	for (pipe = 0; pipe < port_param_v2.n_pipes_per_subport; pipe++) {
+		for (tc = 0; tc < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; tc++) {
+			err = test_queue_size_drop(mp, port, subport, pipe, tc,
+					config_subport_qsize[subport][tc]);
+			TEST_ASSERT_SUCCESS(err, "test_queue_size_drop "
+					    "failed\n");
+		}
+	}
+	return 0;
+}
+
+static int test_red(struct rte_mempool *mp, struct rte_sched_port *port,
+		    uint32_t subport, uint32_t pipe, uint32_t tc,
+		    uint16_t qsize)
+{
+	struct rte_mbuf **in_mbufs;
+	struct rte_mbuf **out_mbufs;
+	uint32_t queue = 0;
+	uint32_t i;
+	int err;
+	int queued;
+
+	in_mbufs = rte_malloc(NULL, ((qsize + 1) * sizeof(struct rte_mbuf *)),
+			      RTE_CACHE_LINE_SIZE);
+	TEST_ASSERT_NOT_NULL(in_mbufs, "Buffer array allocation failed\n");
+
+	out_mbufs = rte_malloc(NULL, ((qsize + 1) * sizeof(struct rte_mbuf *)),
+			       RTE_CACHE_LINE_SIZE);
+	TEST_ASSERT_NOT_NULL(out_mbufs, "Buffer array allocation failed\n");
+
+	/*
+	 * Allocate qsize buffers so that we can attemp to completely fill the
+	 * queue, then check the subport stats to see if any packets were
+	 * red-dropped.
+	 */
+	for (i = 0; i < qsize; i++) {
+		in_mbufs[i] = rte_pktmbuf_alloc(mp);
+		TEST_ASSERT_NOT_NULL(in_mbufs[i], "Packet allocation failed\n");
+		prepare_pkt(in_mbufs[i], subport, pipe, tc, queue);
 	}
 
+	/*
+	 * Some of these packets might not get queued correctly due to
+	 * red-drops.
+	 */
+	queued = rte_sched_port_enqueue(port, in_mbufs, qsize);
+
 #ifdef RTE_SCHED_COLLECT_STATS
 	struct rte_sched_subport_stats subport_stats;
 	uint32_t tc_ov;
+	uint32_t red_drops;
 
 	/*
-	 * Did the subport stats see a packet dropped in this traffic-class?
+	 * Did the subport stats see any packets red-dropped in this
+	 * traffic-class?
 	 */
-	rte_sched_subport_read_stats(port, in_subport, &subport_stats, &tc_ov);
-	TEST_ASSERT_EQUAL(subport_stats.n_pkts_tc_dropped[in_tc], 1,
-			  "Wrong subport stats\n");
+	rte_sched_subport_read_stats(port, subport, &subport_stats, &tc_ov);
+	red_drops = subport_stats.n_pkts_red_dropped[tc];
+	TEST_ASSERT_EQUAL((qsize - red_drops), (uint32_t)queued,
+			  "Red-drop count doesn't agree queued count\n");
 #endif
 
+	/*
+	 * Dequeue all the packets off the queue.
+	 */
+	i = test_dequeue_pkts(port, out_mbufs, queued);
+	TEST_ASSERT_EQUAL(i, (uint32_t)queued, "Failed to dequeue all pkts\n");
+
+	/*
+	 * Check that all the dequeued packets have the right numbers in them.
+	 */
+	err = pkt_check(out_mbufs, queued, subport, pipe, tc, queue);
+	TEST_ASSERT_SUCCESS(err, "Packet checking failed\n");
+
 	rte_free(in_mbufs);
 	rte_free(out_mbufs);
 
@@ -350,8 +557,8 @@ static int fill_queue_to_drop(struct rte_mempool *mp,
 }
 
 static int
-subport_fill_queues(struct rte_mempool *mp, struct rte_sched_port *port,
-		    uint32_t subport)
+test_red_queues(struct rte_mempool *mp, struct rte_sched_port *port,
+		uint32_t subport)
 {
 	uint32_t pipe;
 	uint32_t tc;
@@ -359,27 +566,24 @@ subport_fill_queues(struct rte_mempool *mp, struct rte_sched_port *port,
 
 	for (pipe = 0; pipe < port_param_v2.n_pipes_per_subport; pipe++) {
 		for (tc = 0; tc < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; tc++) {
-			err = fill_queue_to_drop(mp, port, subport, pipe, tc,
-						 subport_qsize[subport][tc]);
-			TEST_ASSERT_SUCCESS(err, "fill-queue-to-drop failed, "
-					  "err=%d\n", err);
+			err = test_red(mp, port, subport, pipe, tc,
+				       config_subport_qsize[subport][tc]);
+			TEST_ASSERT_SUCCESS(err, "test_red failed\n");
 		}
 	}
 	return 0;
 }
 
 /**
- * test main entrance for library sched using the v2 APIs that
- * allow queue-size and WRED configurations on a per-subport basis.
+ * test main entrance for library sched using the v2 APIs that allow
+ * queue-size on a per-subport basis.
  */
 static int
-test_sched_v2(void)
+test_sched_v2_qsize(void)
 {
 	struct rte_mempool *mp = NULL;
 	struct rte_sched_port *port = NULL;
 	uint32_t subport;
-	uint32_t pipe;
-	uint32_t queue_array_size;
 	int err;
 
 	rte_log_set_level(RTE_LOGTYPE_EAL, RTE_LOG_DEBUG);
@@ -390,35 +594,45 @@ test_sched_v2(void)
 	port_param_v2.socket = 0;
 	port_param_v2.rate = (uint64_t) 10000 * 1000 * 1000 / 8;
 
-	queue_array_size = 0;
-	for (subport = 0; subport < NB_SUBPORTS; subport++)
-		queue_array_size +=
-			subport_total_qsize(&port_param_v2,
-					    &subport_qsize[subport][0]);
-
-	port = rte_sched_port_config_v2(&port_param_v2,
-					queue_array_size);
-	TEST_ASSERT_NOT_NULL(port, "Error config sched port\n");
+	err = test_sched_v2_setup(config_subport_qsize, NULL, &port);
+	TEST_ASSERT_SUCCESS(err, "test_sched_v2_setup failed\n");
 
 	for (subport = 0; subport < NB_SUBPORTS; subport++) {
-		err = rte_sched_subport_config_v2(port, subport,
-						  &subport_param_v2[subport],
-						  &subport_qsize[subport][0]);
-		TEST_ASSERT_SUCCESS(err,
-				    "Error config sched subport %u, err=%d\n",
-				    subport, err);
-		for (pipe = 0; pipe < port_param_v2.n_pipes_per_subport;
-		     pipe++) {
-			err = rte_sched_pipe_config(port, subport, pipe, 0);
-			TEST_ASSERT_SUCCESS(err,
-					    "Error config sched subport %u "
-					    "pipe %u, err=%d\n",
-					    subport, pipe, err);
-		}
+		err = test_queue_size(mp, port, subport);
+		TEST_ASSERT_SUCCESS(err, "test_queue_size failed\n");
 	}
 
+	rte_sched_port_free(port);
+
+	return 0;
+}
+
+/**
+ * test main entrance for library sched using the v2 APIs that allow WRED
+ * configurations on a per-subport basis.
+ */
+static int
+test_sched_v2_red(void)
+{
+	struct rte_mempool *mp = NULL;
+	struct rte_sched_port *port = NULL;
+	uint32_t subport;
+	int err;
+
+	rte_log_set_level(RTE_LOGTYPE_EAL, RTE_LOG_DEBUG);
+
+	mp = create_mempool();
+	TEST_ASSERT_NOT_NULL(mp, "Error creating mempool\n");
+
+	port_param_v2.socket = 0;
+	port_param_v2.rate = (uint64_t) 10000 * 1000 * 1000 / 8;
+
+	err = test_sched_v2_setup(config_subport_qsize,
+				  config_subport_redparams, &port);
+	TEST_ASSERT_SUCCESS(err, "Test setup failed\n");
+
 	for (subport = 0; subport < NB_SUBPORTS; subport++) {
-		err = subport_fill_queues(mp, port, subport);
+		err = test_red_queues(mp, port, subport);
 		TEST_ASSERT_SUCCESS(err, "subport-fill-queue failed, err=%d\n",
 				    err);
 	}
@@ -428,4 +642,6 @@ test_sched_v2(void)
 	return 0;
 }
 
-REGISTER_TEST_COMMAND(sched_autotest_v2, test_sched_v2);
+
+REGISTER_TEST_COMMAND(sched_autotest_v2_qsize, test_sched_v2_qsize);
+REGISTER_TEST_COMMAND(sched_autotest_v2_red, test_sched_v2_red);
-- 
2.7.4

^ permalink raw reply related	[flat|nested] 2+ messages in thread

end of thread, other threads:[~2018-02-28 13:40 UTC | newest]

Thread overview: 2+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2018-02-28 13:39 [RFC 1/2] sched: new APIs for per-subport queue sizes alangordondewar
2018-02-28 13:39 ` [RFC 2/2] sched: support per-subport wred configurations alangordondewar

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.