dev.dpdk.org archive mirror
 help / color / mirror / Atom feed
From: Jasvinder Singh <jasvinder.singh@intel.com>
To: dev@dpdk.org
Cc: cristian.dumitrescu@intel.com,
	Abraham Tovar <abrahamx.tovar@intel.com>,
	Lukasz Krakowiak <lukaszx.krakowiak@intel.com>
Subject: [dpdk-dev] [PATCH v2 25/28] examples/qos_sched: update qos sched sample app
Date: Tue, 25 Jun 2019 16:32:14 +0100	[thread overview]
Message-ID: <20190625153217.24301-26-jasvinder.singh@intel.com> (raw)
In-Reply-To: <20190625153217.24301-1-jasvinder.singh@intel.com>

Update qos sched sample app to allow configuration flexibility for
pipe traffic classes and queues, and subport level configuration
of the pipe parameters.

Signed-off-by: Jasvinder Singh <jasvinder.singh@intel.com>
Signed-off-by: Abraham Tovar <abrahamx.tovar@intel.com>
Signed-off-by: Lukasz Krakowiak <lukaszx.krakowiak@intel.com>
---
 examples/qos_sched/app_thread.c   |  11 +-
 examples/qos_sched/cfg_file.c     | 283 +++++++++--------
 examples/qos_sched/init.c         | 109 ++++---
 examples/qos_sched/main.h         |   7 +-
 examples/qos_sched/profile.cfg    |  59 +++-
 examples/qos_sched/profile_ov.cfg |  47 ++-
 examples/qos_sched/stats.c        | 483 +++++++++++++++++-------------
 7 files changed, 593 insertions(+), 406 deletions(-)

diff --git a/examples/qos_sched/app_thread.c b/examples/qos_sched/app_thread.c
index e14b275e3..25a8d42a0 100644
--- a/examples/qos_sched/app_thread.c
+++ b/examples/qos_sched/app_thread.c
@@ -20,13 +20,11 @@
  * QoS parameters are encoded as follows:
  *		Outer VLAN ID defines subport
  *		Inner VLAN ID defines pipe
- *		Destination IP 0.0.XXX.0 defines traffic class
  *		Destination IP host (0.0.0.XXX) defines queue
  * Values below define offset to each field from start of frame
  */
 #define SUBPORT_OFFSET	7
 #define PIPE_OFFSET		9
-#define TC_OFFSET		20
 #define QUEUE_OFFSET	20
 #define COLOR_OFFSET	19
 
@@ -39,11 +37,10 @@ get_pkt_sched(struct rte_mbuf *m, uint32_t *subport, uint32_t *pipe,
 	*subport = (rte_be_to_cpu_16(pdata[SUBPORT_OFFSET]) & 0x0FFF) &
 			(port_params.n_subports_per_port - 1); /* Outer VLAN ID*/
 	*pipe = (rte_be_to_cpu_16(pdata[PIPE_OFFSET]) & 0x0FFF) &
-			(port_params.n_pipes_per_subport - 1); /* Inner VLAN ID */
-	*traffic_class = (pdata[QUEUE_OFFSET] & 0x0F) &
-			(RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE - 1); /* Destination IP */
-	*queue = ((pdata[QUEUE_OFFSET] >> 8) & 0x0F) &
-			(RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS - 1) ; /* Destination IP */
+			(subport_params[*subport].n_subport_pipes - 1); /* Inner VLAN ID */
+	*queue = active_queues[(pdata[QUEUE_OFFSET] >> 8) % n_active_queues];
+	*traffic_class = (*queue > (RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE - 1) ?
+			(RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE - 1) : *queue); /* Destination IP */
 	*color = pdata[COLOR_OFFSET] & 0x03; 	/* Destination IP */
 
 	return 0;
diff --git a/examples/qos_sched/cfg_file.c b/examples/qos_sched/cfg_file.c
index 76ffffc4b..7f54bfe22 100644
--- a/examples/qos_sched/cfg_file.c
+++ b/examples/qos_sched/cfg_file.c
@@ -24,7 +24,6 @@ int
 cfg_load_port(struct rte_cfgfile *cfg, struct rte_sched_port_params *port_params)
 {
 	const char *entry;
-	int j;
 
 	if (!cfg || !port_params)
 		return -1;
@@ -37,93 +36,6 @@ cfg_load_port(struct rte_cfgfile *cfg, struct rte_sched_port_params *port_params
 	if (entry)
 		port_params->n_subports_per_port = (uint32_t)atoi(entry);
 
-	entry = rte_cfgfile_get_entry(cfg, "port", "number of pipes per subport");
-	if (entry)
-		port_params->n_pipes_per_subport = (uint32_t)atoi(entry);
-
-	entry = rte_cfgfile_get_entry(cfg, "port", "queue sizes");
-	if (entry) {
-		char *next;
-
-		for(j = 0; j < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; j++) {
-			port_params->qsize[j] = (uint16_t)strtol(entry, &next, 10);
-			if (next == NULL)
-				break;
-			entry = next;
-		}
-	}
-
-#ifdef RTE_SCHED_RED
-	for (j = 0; j < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; j++) {
-		char str[32];
-
-		/* Parse WRED min thresholds */
-		snprintf(str, sizeof(str), "tc %d wred min", j);
-		entry = rte_cfgfile_get_entry(cfg, "red", str);
-		if (entry) {
-			char *next;
-			int k;
-			/* for each packet colour (green, yellow, red) */
-			for (k = 0; k < RTE_COLORS; k++) {
-				port_params->red_params[j][k].min_th
-					= (uint16_t)strtol(entry, &next, 10);
-				if (next == NULL)
-					break;
-				entry = next;
-			}
-		}
-
-		/* Parse WRED max thresholds */
-		snprintf(str, sizeof(str), "tc %d wred max", j);
-		entry = rte_cfgfile_get_entry(cfg, "red", str);
-		if (entry) {
-			char *next;
-			int k;
-			/* for each packet colour (green, yellow, red) */
-			for (k = 0; k < RTE_COLORS; k++) {
-				port_params->red_params[j][k].max_th
-					= (uint16_t)strtol(entry, &next, 10);
-				if (next == NULL)
-					break;
-				entry = next;
-			}
-		}
-
-		/* Parse WRED inverse mark probabilities */
-		snprintf(str, sizeof(str), "tc %d wred inv prob", j);
-		entry = rte_cfgfile_get_entry(cfg, "red", str);
-		if (entry) {
-			char *next;
-			int k;
-			/* for each packet colour (green, yellow, red) */
-			for (k = 0; k < RTE_COLORS; k++) {
-				port_params->red_params[j][k].maxp_inv
-					= (uint8_t)strtol(entry, &next, 10);
-
-				if (next == NULL)
-					break;
-				entry = next;
-			}
-		}
-
-		/* Parse WRED EWMA filter weights */
-		snprintf(str, sizeof(str), "tc %d wred weight", j);
-		entry = rte_cfgfile_get_entry(cfg, "red", str);
-		if (entry) {
-			char *next;
-			int k;
-			/* for each packet colour (green, yellow, red) */
-			for (k = 0; k < RTE_COLORS; k++) {
-				port_params->red_params[j][k].wq_log2
-					= (uint8_t)strtol(entry, &next, 10);
-				if (next == NULL)
-					break;
-				entry = next;
-			}
-		}
-	}
-#endif /* RTE_SCHED_RED */
-
 	return 0;
 }
 
@@ -139,7 +51,7 @@ cfg_load_pipe(struct rte_cfgfile *cfg, struct rte_sched_pipe_params *pipe_params
 		return -1;
 
 	profiles = rte_cfgfile_num_sections(cfg, "pipe profile", sizeof("pipe profile") - 1);
-	port_params.n_pipe_profiles = profiles;
+	subport_params[0].n_pipe_profiles = profiles;
 
 	for (j = 0; j < profiles; j++) {
 		char pipe_name[32];
@@ -173,46 +85,36 @@ cfg_load_pipe(struct rte_cfgfile *cfg, struct rte_sched_pipe_params *pipe_params
 		if (entry)
 			pipe_params[j].tc_rate[3] = (uint32_t)atoi(entry);
 
+		entry = rte_cfgfile_get_entry(cfg, pipe_name, "tc 4 rate");
+		if (entry)
+			pipe_params[j].tc_rate[4] = (uint32_t)atoi(entry);
+
+		entry = rte_cfgfile_get_entry(cfg, pipe_name, "tc 5 rate");
+		if (entry)
+			pipe_params[j].tc_rate[5] = (uint32_t)atoi(entry);
+
+		entry = rte_cfgfile_get_entry(cfg, pipe_name, "tc 6 rate");
+		if (entry)
+			pipe_params[j].tc_rate[6] = (uint32_t)atoi(entry);
+
+		entry = rte_cfgfile_get_entry(cfg, pipe_name, "tc 7 rate");
+		if (entry)
+			pipe_params[j].tc_rate[7] = (uint32_t)atoi(entry);
+
+		entry = rte_cfgfile_get_entry(cfg, pipe_name, "tc 8 rate");
+		if (entry)
+			pipe_params[j].tc_rate[8] = (uint32_t)atoi(entry);
+
 #ifdef RTE_SCHED_SUBPORT_TC_OV
-		entry = rte_cfgfile_get_entry(cfg, pipe_name, "tc 3 oversubscription weight");
+		entry = rte_cfgfile_get_entry(cfg, pipe_name, "tc 8 oversubscription weight");
 		if (entry)
 			pipe_params[j].tc_ov_weight = (uint8_t)atoi(entry);
 #endif
 
-		entry = rte_cfgfile_get_entry(cfg, pipe_name, "tc 0 wrr weights");
-		if (entry) {
-			for(i = 0; i < RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS; i++) {
-				pipe_params[j].wrr_weights[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE*0 + i] =
-					(uint8_t)strtol(entry, &next, 10);
-				if (next == NULL)
-					break;
-				entry = next;
-			}
-		}
-		entry = rte_cfgfile_get_entry(cfg, pipe_name, "tc 1 wrr weights");
-		if (entry) {
-			for(i = 0; i < RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS; i++) {
-				pipe_params[j].wrr_weights[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE*1 + i] =
-					(uint8_t)strtol(entry, &next, 10);
-				if (next == NULL)
-					break;
-				entry = next;
-			}
-		}
-		entry = rte_cfgfile_get_entry(cfg, pipe_name, "tc 2 wrr weights");
-		if (entry) {
-			for(i = 0; i < RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS; i++) {
-				pipe_params[j].wrr_weights[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE*2 + i] =
-					(uint8_t)strtol(entry, &next, 10);
-				if (next == NULL)
-					break;
-				entry = next;
-			}
-		}
-		entry = rte_cfgfile_get_entry(cfg, pipe_name, "tc 3 wrr weights");
+		entry = rte_cfgfile_get_entry(cfg, pipe_name, "tc 8 wrr weights");
 		if (entry) {
-			for(i = 0; i < RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS; i++) {
-				pipe_params[j].wrr_weights[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE*3 + i] =
+			for (i = 0; i < RTE_SCHED_BE_QUEUES_PER_PIPE; i++) {
+				pipe_params[j].wrr_weights[i] =
 					(uint8_t)strtol(entry, &next, 10);
 				if (next == NULL)
 					break;
@@ -233,12 +135,112 @@ cfg_load_subport(struct rte_cfgfile *cfg, struct rte_sched_subport_params *subpo
 		return -1;
 
 	memset(app_pipe_to_profile, -1, sizeof(app_pipe_to_profile));
+	memset(active_queues, 0, sizeof(active_queues));
+	n_active_queues = 0;
+
+#ifdef RTE_SCHED_RED
+	char sec_name[CFG_NAME_LEN];
+	struct rte_red_params red_params[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE][RTE_COLORS];
+
+	snprintf(sec_name, sizeof(sec_name), "red");
+
+	if (rte_cfgfile_has_section(cfg, sec_name)) {
+
+		for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++) {
+			char str[32];
+
+			/* Parse WRED min thresholds */
+			snprintf(str, sizeof(str), "tc %d wred min", i);
+			entry = rte_cfgfile_get_entry(cfg, sec_name, str);
+			if (entry) {
+				char *next;
+				/* for each packet colour (green, yellow, red) */
+				for (j = 0; j < RTE_COLORS; j++) {
+					red_params[i][j].min_th
+						= (uint16_t)strtol(entry, &next, 10);
+					if (next == NULL)
+						break;
+					entry = next;
+				}
+			}
+
+			/* Parse WRED max thresholds */
+			snprintf(str, sizeof(str), "tc %d wred max", i);
+			entry = rte_cfgfile_get_entry(cfg, "red", str);
+			if (entry) {
+				char *next;
+				/* for each packet colour (green, yellow, red) */
+				for (j = 0; j < RTE_COLORS; j++) {
+					red_params[i][j].max_th
+						= (uint16_t)strtol(entry, &next, 10);
+					if (next == NULL)
+						break;
+					entry = next;
+				}
+			}
+
+			/* Parse WRED inverse mark probabilities */
+			snprintf(str, sizeof(str), "tc %d wred inv prob", i);
+			entry = rte_cfgfile_get_entry(cfg, "red", str);
+			if (entry) {
+				char *next;
+				/* for each packet colour (green, yellow, red) */
+				for (j = 0; j < RTE_COLORS; j++) {
+					red_params[i][j].maxp_inv
+						= (uint8_t)strtol(entry, &next, 10);
+
+					if (next == NULL)
+						break;
+					entry = next;
+				}
+			}
+
+			/* Parse WRED EWMA filter weights */
+			snprintf(str, sizeof(str), "tc %d wred weight", i);
+			entry = rte_cfgfile_get_entry(cfg, "red", str);
+			if (entry) {
+				char *next;
+				/* for each packet colour (green, yellow, red) */
+				for (j = 0; j < RTE_COLORS; j++) {
+					red_params[i][j].wq_log2
+						= (uint8_t)strtol(entry, &next, 10);
+					if (next == NULL)
+						break;
+					entry = next;
+				}
+			}
+		}
+	}
+#endif /* RTE_SCHED_RED */
 
 	for (i = 0; i < MAX_SCHED_SUBPORTS; i++) {
 		char sec_name[CFG_NAME_LEN];
 		snprintf(sec_name, sizeof(sec_name), "subport %d", i);
 
 		if (rte_cfgfile_has_section(cfg, sec_name)) {
+			entry = rte_cfgfile_get_entry(cfg, sec_name,
+				"number of pipes per subport");
+			if (entry)
+				subport_params[i].n_subport_pipes = (uint32_t)atoi(entry);
+
+			entry = rte_cfgfile_get_entry(cfg, sec_name, "queue sizes");
+			if (entry) {
+				char *next;
+
+				for (j = 0; j < RTE_SCHED_QUEUES_PER_PIPE; j++) {
+				subport_params[i].qsize[j] =
+					(uint16_t)strtol(entry, &next, 10);
+				if (subport_params[i].qsize[j] != 0) {
+					active_queues[n_active_queues] = j;
+					n_active_queues++;
+				}
+
+				if (next == NULL)
+					break;
+				entry = next;
+				}
+			}
+
 			entry = rte_cfgfile_get_entry(cfg, sec_name, "tb rate");
 			if (entry)
 				subport_params[i].tb_rate = (uint32_t)atoi(entry);
@@ -267,6 +269,26 @@ cfg_load_subport(struct rte_cfgfile *cfg, struct rte_sched_subport_params *subpo
 			if (entry)
 				subport_params[i].tc_rate[3] = (uint32_t)atoi(entry);
 
+			entry = rte_cfgfile_get_entry(cfg, sec_name, "tc 4 rate");
+			if (entry)
+				subport_params[i].tc_rate[4] = (uint32_t)atoi(entry);
+
+			entry = rte_cfgfile_get_entry(cfg, sec_name, "tc 5 rate");
+			if (entry)
+				subport_params[i].tc_rate[5] = (uint32_t)atoi(entry);
+
+			entry = rte_cfgfile_get_entry(cfg, sec_name, "tc 6 rate");
+			if (entry)
+				subport_params[i].tc_rate[6] = (uint32_t)atoi(entry);
+
+			entry = rte_cfgfile_get_entry(cfg, sec_name, "tc 7 rate");
+			if (entry)
+				subport_params[i].tc_rate[7] = (uint32_t)atoi(entry);
+
+			entry = rte_cfgfile_get_entry(cfg, sec_name, "tc 8 rate");
+			if (entry)
+				subport_params[i].tc_rate[8] = (uint32_t)atoi(entry);
+
 			int n_entries = rte_cfgfile_section_num_entries(cfg, sec_name);
 			struct rte_cfgfile_entry entries[n_entries];
 
@@ -306,6 +328,21 @@ cfg_load_subport(struct rte_cfgfile *cfg, struct rte_sched_subport_params *subpo
 					}
 				}
 			}
+
+#ifdef RTE_SCHED_RED
+			for (j = 0; j < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; j++) {
+				for (k = 0; k < RTE_COLORS; k++) {
+					subport_params[i].red_params[j][k].min_th =
+						red_params[j][k].min_th;
+					subport_params[i].red_params[j][k].max_th =
+						red_params[j][k].max_th;
+					subport_params[i].red_params[j][k].maxp_inv =
+						red_params[j][k].maxp_inv;
+					subport_params[i].red_params[j][k].wq_log2 =
+						red_params[j][k].wq_log2;
+				}
+			}
+#endif
 		}
 	}
 
diff --git a/examples/qos_sched/init.c b/examples/qos_sched/init.c
index f6e9af16b..55000b9ae 100644
--- a/examples/qos_sched/init.c
+++ b/examples/qos_sched/init.c
@@ -165,22 +165,12 @@ app_init_port(uint16_t portid, struct rte_mempool *mp)
 	return 0;
 }
 
-static struct rte_sched_subport_params subport_params[MAX_SCHED_SUBPORTS] = {
-	{
-		.tb_rate = 1250000000,
-		.tb_size = 1000000,
-
-		.tc_rate = {1250000000, 1250000000, 1250000000, 1250000000},
-		.tc_period = 10,
-	},
-};
-
-static struct rte_sched_pipe_params pipe_profiles[RTE_SCHED_PIPE_PROFILES_PER_PORT] = {
+static struct rte_sched_pipe_params pipe_profiles[MAX_SCHED_PIPE_PROFILES] = {
 	{ /* Profile #0 */
 		.tb_rate = 305175,
 		.tb_size = 1000000,
 
-		.tc_rate = {305175, 305175, 305175, 305175},
+		.tc_rate = {305175, 305175, 305175, 305175, 305175, 305175, 305175, 305175, 305175},
 		.tc_period = 40,
 #ifdef RTE_SCHED_SUBPORT_TC_OV
 		.tc_ov_weight = 1,
@@ -190,6 +180,70 @@ static struct rte_sched_pipe_params pipe_profiles[RTE_SCHED_PIPE_PROFILES_PER_PO
 	},
 };
 
+struct rte_sched_subport_params subport_params[MAX_SCHED_SUBPORTS] = {
+	{
+		.tb_rate = 1250000000,
+		.tb_size = 1000000,
+
+		.tc_rate = {1250000000, 1250000000, 1250000000, 1250000000, 1250000000, 1250000000, 1250000000, 1250000000, 1250000000},
+		.tc_period = 10,
+		.n_subport_pipes = 4096,
+		.qsize = {64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64},
+		.pipe_profiles = pipe_profiles,
+		.n_pipe_profiles = sizeof(pipe_profiles) / sizeof(struct rte_sched_pipe_params),
+		.n_max_pipe_profiles = MAX_SCHED_PIPE_PROFILES,
+
+#ifdef RTE_SCHED_RED
+		.red_params = {
+			/* Traffic Class 0 Colors Green / Yellow / Red */
+			[0][0] = {.min_th = 48, .max_th = 64, .maxp_inv = 10, .wq_log2 = 9},
+			[0][1] = {.min_th = 40, .max_th = 64, .maxp_inv = 10, .wq_log2 = 9},
+			[0][2] = {.min_th = 32, .max_th = 64, .maxp_inv = 10, .wq_log2 = 9},
+
+			/* Traffic Class 1 - Colors Green / Yellow / Red */
+			[1][0] = {.min_th = 48, .max_th = 64, .maxp_inv = 10, .wq_log2 = 9},
+			[1][1] = {.min_th = 40, .max_th = 64, .maxp_inv = 10, .wq_log2 = 9},
+			[1][2] = {.min_th = 32, .max_th = 64, .maxp_inv = 10, .wq_log2 = 9},
+
+			/* Traffic Class 2 - Colors Green / Yellow / Red */
+			[2][0] = {.min_th = 48, .max_th = 64, .maxp_inv = 10, .wq_log2 = 9},
+			[2][1] = {.min_th = 40, .max_th = 64, .maxp_inv = 10, .wq_log2 = 9},
+			[2][2] = {.min_th = 32, .max_th = 64, .maxp_inv = 10, .wq_log2 = 9},
+
+			/* Traffic Class 3 - Colors Green / Yellow / Red */
+			[3][0] = {.min_th = 48, .max_th = 64, .maxp_inv = 10, .wq_log2 = 9},
+			[3][1] = {.min_th = 40, .max_th = 64, .maxp_inv = 10, .wq_log2 = 9},
+			[3][2] = {.min_th = 32, .max_th = 64, .maxp_inv = 10, .wq_log2 = 9},
+
+			/* Traffic Class 4 - Colors Green / Yellow / Red */
+			[4][0] = {.min_th = 48, .max_th = 64, .maxp_inv = 10, .wq_log2 = 9},
+			[4][1] = {.min_th = 40, .max_th = 64, .maxp_inv = 10, .wq_log2 = 9},
+			[4][2] = {.min_th = 32, .max_th = 64, .maxp_inv = 10, .wq_log2 = 9},
+
+			/* Traffic Class 5 - Colors Green / Yellow / Red */
+			[5][0] = {.min_th = 48, .max_th = 64, .maxp_inv = 10, .wq_log2 = 9},
+			[5][1] = {.min_th = 40, .max_th = 64, .maxp_inv = 10, .wq_log2 = 9},
+			[5][2] = {.min_th = 32, .max_th = 64, .maxp_inv = 10, .wq_log2 = 9},
+
+			/* Traffic Class 6 - Colors Green / Yellow / Red */
+			[6][0] = {.min_th = 48, .max_th = 64, .maxp_inv = 10, .wq_log2 = 9},
+			[6][1] = {.min_th = 40, .max_th = 64, .maxp_inv = 10, .wq_log2 = 9},
+			[6][2] = {.min_th = 32, .max_th = 64, .maxp_inv = 10, .wq_log2 = 9},
+
+			/* Traffic Class 7 - Colors Green / Yellow / Red */
+			[7][0] = {.min_th = 48, .max_th = 64, .maxp_inv = 10, .wq_log2 = 9},
+			[7][1] = {.min_th = 40, .max_th = 64, .maxp_inv = 10, .wq_log2 = 9},
+			[7][2] = {.min_th = 32, .max_th = 64, .maxp_inv = 10, .wq_log2 = 9},
+
+			/* Traffic Class 8 - Colors Green / Yellow / Red */
+			[8][0] = {.min_th = 48, .max_th = 64, .maxp_inv = 10, .wq_log2 = 9},
+			[8][1] = {.min_th = 40, .max_th = 64, .maxp_inv = 10, .wq_log2 = 9},
+			[8][2] = {.min_th = 32, .max_th = 64, .maxp_inv = 10, .wq_log2 = 9},
+		},
+#endif /* RTE_SCHED_RED */
+	},
+};
+
 struct rte_sched_port_params port_params = {
 	.name = "port_scheduler_0",
 	.socket = 0, /* computed */
@@ -197,34 +251,6 @@ struct rte_sched_port_params port_params = {
 	.mtu = 6 + 6 + 4 + 4 + 2 + 1500,
 	.frame_overhead = RTE_SCHED_FRAME_OVERHEAD_DEFAULT,
 	.n_subports_per_port = 1,
-	.n_pipes_per_subport = 4096,
-	.qsize = {64, 64, 64, 64},
-	.pipe_profiles = pipe_profiles,
-	.n_pipe_profiles = sizeof(pipe_profiles) / sizeof(struct rte_sched_pipe_params),
-
-#ifdef RTE_SCHED_RED
-	.red_params = {
-		/* Traffic Class 0 Colors Green / Yellow / Red */
-		[0][0] = {.min_th = 48, .max_th = 64, .maxp_inv = 10, .wq_log2 = 9},
-		[0][1] = {.min_th = 40, .max_th = 64, .maxp_inv = 10, .wq_log2 = 9},
-		[0][2] = {.min_th = 32, .max_th = 64, .maxp_inv = 10, .wq_log2 = 9},
-
-		/* Traffic Class 1 - Colors Green / Yellow / Red */
-		[1][0] = {.min_th = 48, .max_th = 64, .maxp_inv = 10, .wq_log2 = 9},
-		[1][1] = {.min_th = 40, .max_th = 64, .maxp_inv = 10, .wq_log2 = 9},
-		[1][2] = {.min_th = 32, .max_th = 64, .maxp_inv = 10, .wq_log2 = 9},
-
-		/* Traffic Class 2 - Colors Green / Yellow / Red */
-		[2][0] = {.min_th = 48, .max_th = 64, .maxp_inv = 10, .wq_log2 = 9},
-		[2][1] = {.min_th = 40, .max_th = 64, .maxp_inv = 10, .wq_log2 = 9},
-		[2][2] = {.min_th = 32, .max_th = 64, .maxp_inv = 10, .wq_log2 = 9},
-
-		/* Traffic Class 3 - Colors Green / Yellow / Red */
-		[3][0] = {.min_th = 48, .max_th = 64, .maxp_inv = 10, .wq_log2 = 9},
-		[3][1] = {.min_th = 40, .max_th = 64, .maxp_inv = 10, .wq_log2 = 9},
-		[3][2] = {.min_th = 32, .max_th = 64, .maxp_inv = 10, .wq_log2 = 9}
-	}
-#endif /* RTE_SCHED_RED */
 };
 
 static struct rte_sched_port *
@@ -255,7 +281,8 @@ app_init_sched_port(uint32_t portid, uint32_t socketid)
 					subport, err);
 		}
 
-		for (pipe = 0; pipe < port_params.n_pipes_per_subport; pipe ++) {
+		uint32_t n_subport_pipes = subport_params[subport].n_subport_pipes;
+		for (pipe = 0; pipe < n_subport_pipes; pipe++) {
 			if (app_pipe_to_profile[subport][pipe] != -1) {
 				err = rte_sched_pipe_config(port, subport, pipe,
 						app_pipe_to_profile[subport][pipe]);
diff --git a/examples/qos_sched/main.h b/examples/qos_sched/main.h
index 8a2741c58..219aa9a95 100644
--- a/examples/qos_sched/main.h
+++ b/examples/qos_sched/main.h
@@ -26,7 +26,7 @@ extern "C" {
 
 #define MAX_PKT_RX_BURST 64
 #define PKT_ENQUEUE 64
-#define PKT_DEQUEUE 32
+#define PKT_DEQUEUE 60
 #define MAX_PKT_TX_BURST 64
 
 #define RX_PTHRESH 8 /**< Default values of RX prefetch threshold reg. */
@@ -50,6 +50,7 @@ extern "C" {
 #define MAX_DATA_STREAMS (APP_MAX_LCORE/2)
 #define MAX_SCHED_SUBPORTS		8
 #define MAX_SCHED_PIPES		4096
+#define MAX_SCHED_PIPE_PROFILES		256
 
 #ifndef APP_COLLECT_STAT
 #define APP_COLLECT_STAT		1
@@ -147,7 +148,11 @@ extern struct burst_conf burst_conf;
 extern struct ring_thresh rx_thresh;
 extern struct ring_thresh tx_thresh;
 
+uint32_t active_queues[RTE_SCHED_QUEUES_PER_PIPE];
+uint32_t n_active_queues;
+
 extern struct rte_sched_port_params port_params;
+extern struct rte_sched_subport_params subport_params[MAX_SCHED_SUBPORTS];
 
 int app_parse_args(int argc, char **argv);
 int app_init(void);
diff --git a/examples/qos_sched/profile.cfg b/examples/qos_sched/profile.cfg
index f5b704cc6..02fd8a00e 100644
--- a/examples/qos_sched/profile.cfg
+++ b/examples/qos_sched/profile.cfg
@@ -1,6 +1,6 @@
 ;   BSD LICENSE
 ;
-;   Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+;   Copyright(c) 2010-2019 Intel Corporation. All rights reserved.
 ;   All rights reserved.
 ;
 ;   Redistribution and use in source and binary forms, with or without
@@ -33,12 +33,12 @@
 ; 10GbE output port:
 ;	* Single subport (subport 0):
 ;		- Subport rate set to 100% of port rate
-;		- Each of the 4 traffic classes has rate set to 100% of port rate
+;		- Each of the 9 traffic classes has rate set to 100% of port rate
 ;	* 4K pipes per subport 0 (pipes 0 .. 4095) with identical configuration:
 ;		- Pipe rate set to 1/4K of port rate
-;		- Each of the 4 traffic classes has rate set to 100% of pipe rate
-;		- Within each traffic class, the byte-level WRR weights for the 4 queues
-;         are set to 1:1:1:1
+;		- Each of the 9 traffic classes has rate set to 100% of pipe rate
+;		- Within lowest priority traffic class (best-effort), the byte-level
+;		  WRR weights for the 8 queues are set to 1:1:1:1:1:1:1:1
 ;
 ; For more details, please refer to chapter "Quality of Service (QoS) Framework"
 ; of Data Plane Development Kit (DPDK) Programmer's Guide.
@@ -47,11 +47,12 @@
 [port]
 frame overhead = 24
 number of subports per port = 1
-number of pipes per subport = 4096
-queue sizes = 64 64 64 64
 
 ; Subport configuration
 [subport 0]
+number of pipes per subport = 4096
+queue sizes = 64 64 64 64 64 64 64 64 64 64 64 64 64 64 64 64
+
 tb rate = 1250000000           ; Bytes per second
 tb size = 1000000              ; Bytes
 
@@ -59,6 +60,11 @@ tc 0 rate = 1250000000         ; Bytes per second
 tc 1 rate = 1250000000         ; Bytes per second
 tc 2 rate = 1250000000         ; Bytes per second
 tc 3 rate = 1250000000         ; Bytes per second
+tc 4 rate = 1250000000         ; Bytes per second
+tc 5 rate = 1250000000         ; Bytes per second
+tc 6 rate = 1250000000         ; Bytes per second
+tc 7 rate = 1250000000         ; Bytes per second
+tc 8 rate = 1250000000         ; Bytes per second
 tc period = 10                 ; Milliseconds
 
 pipe 0-4095 = 0                ; These pipes are configured with pipe profile 0
@@ -72,14 +78,16 @@ tc 0 rate = 305175             ; Bytes per second
 tc 1 rate = 305175             ; Bytes per second
 tc 2 rate = 305175             ; Bytes per second
 tc 3 rate = 305175             ; Bytes per second
-tc period = 40                 ; Milliseconds
+tc 4 rate = 305175             ; Bytes per second
+tc 5 rate = 305175             ; Bytes per second
+tc 6 rate = 305175             ; Bytes per second
+tc 7 rate = 305175             ; Bytes per second
+tc 8 rate = 305175             ; Bytes per second
+tc period = 160                ; Milliseconds
 
-tc 3 oversubscription weight = 1
+tc 8 oversubscription weight = 1
 
-tc 0 wrr weights = 1 1 1 1
-tc 1 wrr weights = 1 1 1 1
-tc 2 wrr weights = 1 1 1 1
-tc 3 wrr weights = 1 1 1 1
+tc 8 wrr weights = 1 1 1 1 1 1 1 1
 
 ; RED params per traffic class and color (Green / Yellow / Red)
 [red]
@@ -102,3 +110,28 @@ tc 3 wred min = 48 40 32
 tc 3 wred max = 64 64 64
 tc 3 wred inv prob = 10 10 10
 tc 3 wred weight = 9 9 9
+
+tc 4 wred min = 48 40 32
+tc 4 wred max = 64 64 64
+tc 4 wred inv prob = 10 10 10
+tc 4 wred weight = 9 9 9
+
+tc 5 wred min = 48 40 32
+tc 5 wred max = 64 64 64
+tc 5 wred inv prob = 10 10 10
+tc 5 wred weight = 9 9 9
+
+tc 6 wred min = 48 40 32
+tc 6 wred max = 64 64 64
+tc 6 wred inv prob = 10 10 10
+tc 6 wred weight = 9 9 9
+
+tc 7 wred min = 48 40 32
+tc 7 wred max = 64 64 64
+tc 7 wred inv prob = 10 10 10
+tc 7 wred weight = 9 9 9
+
+tc 8 wred min = 48 40 32
+tc 8 wred max = 64 64 64
+tc 8 wred inv prob = 10 10 10
+tc 8 wred weight = 9 9 9
diff --git a/examples/qos_sched/profile_ov.cfg b/examples/qos_sched/profile_ov.cfg
index 33000df9e..450001d2b 100644
--- a/examples/qos_sched/profile_ov.cfg
+++ b/examples/qos_sched/profile_ov.cfg
@@ -1,6 +1,6 @@
 ;   BSD LICENSE
 ;
-;   Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+;   Copyright(c) 2010-2019 Intel Corporation. All rights reserved.
 ;   All rights reserved.
 ;
 ;   Redistribution and use in source and binary forms, with or without
@@ -33,11 +33,12 @@
 [port]
 frame overhead = 24
 number of subports per port = 1
-number of pipes per subport = 32
-queue sizes = 64 64 64 64
 
 ; Subport configuration
 [subport 0]
+number of pipes per subport = 32
+queue sizes = 64 64 64 64 64 64 64 64 64 64 64 64 64 64 64 64
+
 tb rate = 8400000           ; Bytes per second
 tb size = 100000            ; Bytes
 
@@ -45,6 +46,11 @@ tc 0 rate = 8400000         ; Bytes per second
 tc 1 rate = 8400000         ; Bytes per second
 tc 2 rate = 8400000         ; Bytes per second
 tc 3 rate = 8400000         ; Bytes per second
+tc 4 rate = 8400000         ; Bytes per second
+tc 5 rate = 8400000         ; Bytes per second
+tc 6 rate = 8400000         ; Bytes per second
+tc 7 rate = 8400000         ; Bytes per second
+tc 8 rate = 8400000         ; Bytes per second
 tc period = 10              ; Milliseconds
 
 pipe 0-31 = 0               ; These pipes are configured with pipe profile 0
@@ -58,14 +64,16 @@ tc 0 rate = 16800000           ; Bytes per second
 tc 1 rate = 16800000           ; Bytes per second
 tc 2 rate = 16800000           ; Bytes per second
 tc 3 rate = 16800000           ; Bytes per second
+tc 4 rate = 16800000           ; Bytes per second
+tc 5 rate = 16800000           ; Bytes per second
+tc 6 rate = 16800000           ; Bytes per second
+tc 7 rate = 16800000           ; Bytes per second
+tc 8 rate = 16800000           ; Bytes per second
 tc period = 28                 ; Milliseconds
 
 tc 3 oversubscription weight = 1
 
-tc 0 wrr weights = 1 1 1 1
-tc 1 wrr weights = 1 1 1 1
-tc 2 wrr weights = 1 1 1 1
-tc 3 wrr weights = 1 1 1 1
+tc 8 wrr weights = 1 1 1 1 1 1 1 1
 
 ; RED params per traffic class and color (Green / Yellow / Red)
 [red]
@@ -88,3 +96,28 @@ tc 3 wred min = 48 40 32
 tc 3 wred max = 64 64 64
 tc 3 wred inv prob = 10 10 10
 tc 3 wred weight = 9 9 9
+
+tc 4 wred min = 48 40 32
+tc 4 wred max = 64 64 64
+tc 4 wred inv prob = 10 10 10
+tc 4 wred weight = 9 9 9
+
+tc 5 wred min = 48 40 32
+tc 5 wred max = 64 64 64
+tc 5 wred inv prob = 10 10 10
+tc 5 wred weight = 9 9 9
+
+tc 6 wred min = 48 40 32
+tc 6 wred max = 64 64 64
+tc 6 wred inv prob = 10 10 10
+tc 6 wred weight = 9 9 9
+
+tc 7 wred min = 48 40 32
+tc 7 wred max = 64 64 64
+tc 7 wred inv prob = 10 10 10
+tc 7 wred weight = 9 9 9
+
+tc 8 wred min = 48 40 32
+tc 8 wred max = 64 64 64
+tc 8 wred inv prob = 10 10 10
+tc 8 wred weight = 9 9 9
diff --git a/examples/qos_sched/stats.c b/examples/qos_sched/stats.c
index 8193d964c..f69c5afb0 100644
--- a/examples/qos_sched/stats.c
+++ b/examples/qos_sched/stats.c
@@ -11,278 +11,333 @@ int
 qavg_q(uint16_t port_id, uint32_t subport_id, uint32_t pipe_id, uint8_t tc,
 		uint8_t q)
 {
-        struct rte_sched_queue_stats stats;
-        struct rte_sched_port *port;
-        uint16_t qlen;
-        uint32_t queue_id, count, i;
-        uint32_t average;
-
-        for (i = 0; i < nb_pfc; i++) {
-                if (qos_conf[i].tx_port == port_id)
-                        break;
-        }
-        if (i == nb_pfc || subport_id >= port_params.n_subports_per_port || pipe_id >= port_params.n_pipes_per_subport
-                        || tc >= RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE || q >= RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS)
-                return -1;
-
-        port = qos_conf[i].sched_port;
-
-        queue_id = RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE * RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS * (subport_id * port_params.n_pipes_per_subport + pipe_id);
-        queue_id = queue_id + (tc * RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS + q);
-
-        average = 0;
-
-        for (count = 0; count < qavg_ntimes; count++) {
-                rte_sched_queue_read_stats(port, queue_id, &stats, &qlen);
-                average += qlen;
-                usleep(qavg_period);
-        }
-
-        average /= qavg_ntimes;
-
-        printf("\nAverage queue size: %" PRIu32 " bytes.\n\n", average);
-
-        return 0;
+	struct rte_sched_queue_stats stats;
+	struct rte_sched_port *port;
+	uint16_t qlen;
+	uint32_t count, i, queue_id = 0;
+	uint32_t average;
+
+	for (i = 0; i < nb_pfc; i++) {
+		if (qos_conf[i].tx_port == port_id)
+			break;
+	}
+
+	if (i == nb_pfc || subport_id >= port_params.n_subports_per_port ||
+		pipe_id >= subport_params[subport_id].n_subport_pipes  ||
+		tc >= RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE ||
+		q >= RTE_SCHED_BE_QUEUES_PER_PIPE ||
+		(tc < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE - 1 && q > 0))
+			return -1;
+
+	port = qos_conf[i].sched_port;
+	for (i = 0; i < subport_id; i++)
+		queue_id += subport_params[i].n_subport_pipes *
+				RTE_SCHED_QUEUES_PER_PIPE;
+	if (tc < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE - 1)
+		queue_id += pipe_id * RTE_SCHED_QUEUES_PER_PIPE + tc;
+	else
+		queue_id += pipe_id * RTE_SCHED_QUEUES_PER_PIPE + tc + q;
+
+	average = 0;
+	for (count = 0; count < qavg_ntimes; count++) {
+		rte_sched_queue_read_stats(port, queue_id, &stats, &qlen);
+		average += qlen;
+		usleep(qavg_period);
+	}
+
+	average /= qavg_ntimes;
+
+	printf("\nAverage queue size: %" PRIu32 " bytes.\n\n", average);
+
+	return 0;
 }
 
 int
 qavg_tcpipe(uint16_t port_id, uint32_t subport_id, uint32_t pipe_id,
-	     uint8_t tc)
+		uint8_t tc)
 {
-        struct rte_sched_queue_stats stats;
-        struct rte_sched_port *port;
-        uint16_t qlen;
-        uint32_t queue_id, count, i;
-        uint32_t average, part_average;
+	struct rte_sched_queue_stats stats;
+	struct rte_sched_port *port;
+	uint16_t qlen;
+	uint32_t count, i, queue_id = 0;
+	uint32_t average, part_average;
+
+	for (i = 0; i < nb_pfc; i++) {
+		if (qos_conf[i].tx_port == port_id)
+			break;
+	}
+
+	if (i == nb_pfc || subport_id >= port_params.n_subports_per_port ||
+		pipe_id >= subport_params[subport_id].n_subport_pipes ||
+		tc >= RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE)
+		return -1;
+
+	port = qos_conf[i].sched_port;
 
-        for (i = 0; i < nb_pfc; i++) {
-                if (qos_conf[i].tx_port == port_id)
-                        break;
-        }
-        if (i == nb_pfc || subport_id >= port_params.n_subports_per_port || pipe_id >= port_params.n_pipes_per_subport
-                        || tc >= RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE)
-                return -1;
+	for (i = 0; i < subport_id; i++)
+		queue_id += subport_params[i].n_subport_pipes * RTE_SCHED_QUEUES_PER_PIPE;
 
-        port = qos_conf[i].sched_port;
+	queue_id += pipe_id * RTE_SCHED_QUEUES_PER_PIPE + tc;
 
-        queue_id = RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE * RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS * (subport_id * port_params.n_pipes_per_subport + pipe_id);
+	average = 0;
 
-        average = 0;
+	for (count = 0; count < qavg_ntimes; count++) {
+		part_average = 0;
 
-        for (count = 0; count < qavg_ntimes; count++) {
-                part_average = 0;
-                for (i = 0; i < RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS; i++) {
-                        rte_sched_queue_read_stats(port, queue_id + (tc * RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS + i), &stats, &qlen);
-                        part_average += qlen;
-                }
-                average += part_average / RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS;
-                usleep(qavg_period);
-        }
+		if (tc < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE - 1) {
+			rte_sched_queue_read_stats(port, queue_id, &stats, &qlen);
+			part_average += qlen;
+		} else {
+			for (i = 0; i < RTE_SCHED_BE_QUEUES_PER_PIPE; i++) {
+				rte_sched_queue_read_stats(port, queue_id + i, &stats, &qlen);
+				part_average += qlen;
+			}
+			average += part_average / RTE_SCHED_BE_QUEUES_PER_PIPE;
+		}
+		usleep(qavg_period);
+	}
 
-        average /= qavg_ntimes;
+	average /= qavg_ntimes;
 
-        printf("\nAverage queue size: %" PRIu32 " bytes.\n\n", average);
+	printf("\nAverage queue size: %" PRIu32 " bytes.\n\n", average);
 
-        return 0;
+	return 0;
 }
 
 int
 qavg_pipe(uint16_t port_id, uint32_t subport_id, uint32_t pipe_id)
 {
-        struct rte_sched_queue_stats stats;
-        struct rte_sched_port *port;
-        uint16_t qlen;
-        uint32_t queue_id, count, i;
-        uint32_t average, part_average;
+	struct rte_sched_queue_stats stats;
+	struct rte_sched_port *port;
+	uint16_t qlen;
+	uint32_t count, i, queue_id = 0;
+	uint32_t average, part_average;
 
-        for (i = 0; i < nb_pfc; i++) {
-                if (qos_conf[i].tx_port == port_id)
-                        break;
-        }
-        if (i == nb_pfc || subport_id >= port_params.n_subports_per_port || pipe_id >= port_params.n_pipes_per_subport)
-                return -1;
+	for (i = 0; i < nb_pfc; i++) {
+		if (qos_conf[i].tx_port == port_id)
+			break;
+	}
 
-        port = qos_conf[i].sched_port;
+	if (i == nb_pfc ||
+		subport_id >= port_params.n_subports_per_port ||
+		pipe_id >= subport_params[subport_id].n_subport_pipes)
+		return -1;
 
-        queue_id = RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE * RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS * (subport_id * port_params.n_pipes_per_subport + pipe_id);
+	port = qos_conf[i].sched_port;
 
-        average = 0;
+	for (i = 0; i < subport_id; i++)
+		queue_id += subport_params[i].n_subport_pipes *
+				RTE_SCHED_QUEUES_PER_PIPE;
 
-        for (count = 0; count < qavg_ntimes; count++) {
-                part_average = 0;
-                for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE * RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS; i++) {
-                        rte_sched_queue_read_stats(port, queue_id + i, &stats, &qlen);
-                        part_average += qlen;
-                }
-                average += part_average / (RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE * RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS);
-                usleep(qavg_period);
-        }
+	queue_id += pipe_id * RTE_SCHED_QUEUES_PER_PIPE;
 
-        average /= qavg_ntimes;
+	average = 0;
 
-        printf("\nAverage queue size: %" PRIu32 " bytes.\n\n", average);
+	for (count = 0; count < qavg_ntimes; count++) {
+		part_average = 0;
+		for (i = 0; i < RTE_SCHED_QUEUES_PER_PIPE; i++) {
+			rte_sched_queue_read_stats(port, queue_id + i, &stats, &qlen);
+			part_average += qlen;
+		}
+		average += part_average / RTE_SCHED_QUEUES_PER_PIPE;
+		usleep(qavg_period);
+	}
 
-        return 0;
+	average /= qavg_ntimes;
+
+	printf("\nAverage queue size: %" PRIu32 " bytes.\n\n", average);
+
+	return 0;
 }
 
 int
 qavg_tcsubport(uint16_t port_id, uint32_t subport_id, uint8_t tc)
 {
-        struct rte_sched_queue_stats stats;
-        struct rte_sched_port *port;
-        uint16_t qlen;
-        uint32_t queue_id, count, i, j;
-        uint32_t average, part_average;
-
-        for (i = 0; i < nb_pfc; i++) {
-                if (qos_conf[i].tx_port == port_id)
-                        break;
-        }
-        if (i == nb_pfc || subport_id >= port_params.n_subports_per_port || tc >= RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE)
-                return -1;
-
-        port = qos_conf[i].sched_port;
-
-        average = 0;
-
-        for (count = 0; count < qavg_ntimes; count++) {
-                part_average = 0;
-                for (i = 0; i < port_params.n_pipes_per_subport; i++) {
-                        queue_id = RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE * RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS * (subport_id * port_params.n_pipes_per_subport + i);
-
-                        for (j = 0; j < RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS; j++) {
-                                rte_sched_queue_read_stats(port, queue_id + (tc * RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS + j), &stats, &qlen);
-                                part_average += qlen;
-                        }
-                }
-
-                average += part_average / (port_params.n_pipes_per_subport * RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS);
-                usleep(qavg_period);
-        }
-
-        average /= qavg_ntimes;
-
-        printf("\nAverage queue size: %" PRIu32 " bytes.\n\n", average);
-
-        return 0;
+	struct rte_sched_queue_stats stats;
+	struct rte_sched_port *port;
+	uint16_t qlen;
+	uint32_t queue_id, count, i, j, subport_queue_id = 0;
+	uint32_t average, part_average;
+
+	for (i = 0; i < nb_pfc; i++) {
+		if (qos_conf[i].tx_port == port_id)
+			break;
+	}
+
+	if (i == nb_pfc ||
+		subport_id >= port_params.n_subports_per_port ||
+		tc >= RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE)
+		return -1;
+
+	port = qos_conf[i].sched_port;
+
+	for (i = 0; i < subport_id; i++)
+		subport_queue_id += subport_params[i].n_subport_pipes * RTE_SCHED_QUEUES_PER_PIPE;
+
+	average = 0;
+
+	for (count = 0; count < qavg_ntimes; count++) {
+		part_average = 0;
+		for (i = 0; i < subport_params[subport_id].n_subport_pipes; i++) {
+			if (tc < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE - 1) {
+				queue_id = subport_queue_id + i * RTE_SCHED_QUEUES_PER_PIPE + tc;
+				rte_sched_queue_read_stats(port, queue_id, &stats, &qlen);
+				part_average += qlen;
+			} else {
+				for (j = 0; j < RTE_SCHED_BE_QUEUES_PER_PIPE; j++) {
+					queue_id = subport_queue_id +
+							i * RTE_SCHED_QUEUES_PER_PIPE + tc + j;
+					rte_sched_queue_read_stats(port, queue_id, &stats, &qlen);
+					part_average += qlen;
+				}
+			}
+		}
+
+		if (tc < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE - 1)
+			average += part_average / (subport_params[subport_id].n_subport_pipes);
+		else
+			average += part_average / (subport_params[subport_id].n_subport_pipes) * RTE_SCHED_BE_QUEUES_PER_PIPE;
+
+		usleep(qavg_period);
+	}
+
+	average /= qavg_ntimes;
+
+	printf("\nAverage queue size: %" PRIu32 " bytes.\n\n", average);
+
+	return 0;
 }
 
 int
 qavg_subport(uint16_t port_id, uint32_t subport_id)
 {
-        struct rte_sched_queue_stats stats;
-        struct rte_sched_port *port;
-        uint16_t qlen;
-        uint32_t queue_id, count, i, j;
-        uint32_t average, part_average;
+	struct rte_sched_queue_stats stats;
+	struct rte_sched_port *port;
+	uint16_t qlen;
+	uint32_t queue_id, count, i, j, subport_queue_id = 0;
+	uint32_t average, part_average;
+
+	for (i = 0; i < nb_pfc; i++) {
+		if (qos_conf[i].tx_port == port_id)
+			break;
+	}
+
+	if (i == nb_pfc ||
+		subport_id >= port_params.n_subports_per_port)
+		return -1;
 
-        for (i = 0; i < nb_pfc; i++) {
-                if (qos_conf[i].tx_port == port_id)
-                        break;
-        }
-        if (i == nb_pfc || subport_id >= port_params.n_subports_per_port)
-                return -1;
+	port = qos_conf[i].sched_port;
 
-        port = qos_conf[i].sched_port;
+	for (i = 0; i < subport_id; i++)
+		subport_queue_id += subport_params[i].n_subport_pipes * RTE_SCHED_QUEUES_PER_PIPE;
 
-        average = 0;
+	average = 0;
 
-        for (count = 0; count < qavg_ntimes; count++) {
-                part_average = 0;
-                for (i = 0; i < port_params.n_pipes_per_subport; i++) {
-                        queue_id = RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE * RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS * (subport_id * port_params.n_pipes_per_subport + i);
+	for (count = 0; count < qavg_ntimes; count++) {
+		part_average = 0;
+		for (i = 0; i < subport_params[subport_id].n_subport_pipes; i++) {
+			queue_id = subport_queue_id + i * RTE_SCHED_QUEUES_PER_PIPE;
 
-                        for (j = 0; j < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE * RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS; j++) {
-                                rte_sched_queue_read_stats(port, queue_id + j, &stats, &qlen);
-                                part_average += qlen;
-                        }
-                }
+			for (j = 0; j < RTE_SCHED_QUEUES_PER_PIPE; j++) {
+				rte_sched_queue_read_stats(port, queue_id + j, &stats, &qlen);
+				part_average += qlen;
+			}
+		}
 
-                average += part_average / (port_params.n_pipes_per_subport * RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE * RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS);
-                usleep(qavg_period);
-        }
+		average += part_average / (subport_params[subport_id].n_subport_pipes * RTE_SCHED_QUEUES_PER_PIPE);
+		usleep(qavg_period);
+	}
 
-        average /= qavg_ntimes;
+	average /= qavg_ntimes;
 
-        printf("\nAverage queue size: %" PRIu32 " bytes.\n\n", average);
+	printf("\nAverage queue size: %" PRIu32 " bytes.\n\n", average);
 
-        return 0;
+	return 0;
 }
 
 int
 subport_stat(uint16_t port_id, uint32_t subport_id)
 {
-        struct rte_sched_subport_stats stats;
-        struct rte_sched_port *port;
-        uint32_t tc_ov[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
-        uint8_t i;
-
-        for (i = 0; i < nb_pfc; i++) {
-                if (qos_conf[i].tx_port == port_id)
-                        break;
-        }
-        if (i == nb_pfc || subport_id >= port_params.n_subports_per_port)
-                return -1;
-
-        port = qos_conf[i].sched_port;
+	struct rte_sched_subport_stats stats;
+	struct rte_sched_port *port;
+	uint32_t tc_ov[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
+	uint8_t i;
+
+	for (i = 0; i < nb_pfc; i++) {
+		if (qos_conf[i].tx_port == port_id)
+			break;
+	}
+
+	if (i == nb_pfc || subport_id >= port_params.n_subports_per_port)
+		return -1;
+
+	port = qos_conf[i].sched_port;
 	memset (tc_ov, 0, sizeof(tc_ov));
 
-        rte_sched_subport_read_stats(port, subport_id, &stats, tc_ov);
+	rte_sched_subport_read_stats(port, subport_id, &stats, tc_ov);
 
-        printf("\n");
-        printf("+----+-------------+-------------+-------------+-------------+-------------+\n");
-        printf("| TC |   Pkts OK   |Pkts Dropped |  Bytes OK   |Bytes Dropped|  OV Status  |\n");
-        printf("+----+-------------+-------------+-------------+-------------+-------------+\n");
+	printf("\n");
+	printf("+----+-------------+-------------+-------------+-------------+-------------+\n");
+	printf("| TC |   Pkts OK   |Pkts Dropped |  Bytes OK   |Bytes Dropped|  OV Status  |\n");
+	printf("+----+-------------+-------------+-------------+-------------+-------------+\n");
 
-        for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++) {
-                printf("|  %d | %11" PRIu32 " | %11" PRIu32 " | %11" PRIu32 " | %11" PRIu32 " | %11" PRIu32 " |\n", i,
-                                stats.n_pkts_tc[i], stats.n_pkts_tc_dropped[i],
-                                stats.n_bytes_tc[i], stats.n_bytes_tc_dropped[i], tc_ov[i]);
-                printf("+----+-------------+-------------+-------------+-------------+-------------+\n");
-        }
-        printf("\n");
+	for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++) {
+		printf("|  %d | %11" PRIu32 " | %11" PRIu32 " | %11" PRIu32 " | %11" PRIu32 " | %11" PRIu32 " |\n", i,
+		stats.n_pkts_tc[i], stats.n_pkts_tc_dropped[i],
+		stats.n_bytes_tc[i], stats.n_bytes_tc_dropped[i], tc_ov[i]);
+		printf("+----+-------------+-------------+-------------+-------------+-------------+\n");
+	}
+	printf("\n");
 
-        return 0;
+	return 0;
 }
 
 int
 pipe_stat(uint16_t port_id, uint32_t subport_id, uint32_t pipe_id)
 {
-        struct rte_sched_queue_stats stats;
-        struct rte_sched_port *port;
-        uint16_t qlen;
-        uint8_t i, j;
-        uint32_t queue_id;
-
-        for (i = 0; i < nb_pfc; i++) {
-                if (qos_conf[i].tx_port == port_id)
-                        break;
-        }
-        if (i == nb_pfc || subport_id >= port_params.n_subports_per_port || pipe_id >= port_params.n_pipes_per_subport)
-                return -1;
-
-        port = qos_conf[i].sched_port;
-
-        queue_id = RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE * RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS * (subport_id * port_params.n_pipes_per_subport + pipe_id);
-
-        printf("\n");
-        printf("+----+-------+-------------+-------------+-------------+-------------+-------------+\n");
-        printf("| TC | Queue |   Pkts OK   |Pkts Dropped |  Bytes OK   |Bytes Dropped|    Length   |\n");
-        printf("+----+-------+-------------+-------------+-------------+-------------+-------------+\n");
-
-        for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++) {
-                for (j = 0; j < RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS; j++) {
-
-                        rte_sched_queue_read_stats(port, queue_id + (i * RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS + j), &stats, &qlen);
-
-                        printf("|  %d |   %d   | %11" PRIu32 " | %11" PRIu32 " | %11" PRIu32 " | %11" PRIu32 " | %11i |\n", i, j,
-                                        stats.n_pkts, stats.n_pkts_dropped, stats.n_bytes, stats.n_bytes_dropped, qlen);
-                        printf("+----+-------+-------------+-------------+-------------+-------------+-------------+\n");
-                }
-                if (i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE - 1)
-                        printf("+----+-------+-------------+-------------+-------------+-------------+-------------+\n");
-        }
-        printf("\n");
-
-        return 0;
+	struct rte_sched_queue_stats stats;
+	struct rte_sched_port *port;
+	uint16_t qlen;
+	uint8_t i, j;
+	uint32_t queue_id = 0;
+
+	for (i = 0; i < nb_pfc; i++) {
+		if (qos_conf[i].tx_port == port_id)
+			break;
+	}
+
+	if (i == nb_pfc ||
+		subport_id >= port_params.n_subports_per_port ||
+		pipe_id >= subport_params[subport_id].n_subport_pipes)
+		return -1;
+
+	port = qos_conf[i].sched_port;
+	for (i = 0; i < subport_id; i++)
+		queue_id += subport_params[i].n_subport_pipes * RTE_SCHED_QUEUES_PER_PIPE;
+
+	queue_id += pipe_id * RTE_SCHED_QUEUES_PER_PIPE;
+
+	printf("\n");
+	printf("+----+-------+-------------+-------------+-------------+-------------+-------------+\n");
+	printf("| TC | Queue |   Pkts OK   |Pkts Dropped |  Bytes OK   |Bytes Dropped|    Length   |\n");
+	printf("+----+-------+-------------+-------------+-------------+-------------+-------------+\n");
+
+	for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++) {
+		if (i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE - 1) {
+			rte_sched_queue_read_stats(port, queue_id + i, &stats, &qlen);
+			printf("|  %d |   %d   | %11" PRIu32 " | %11" PRIu32 " | %11" PRIu32 " | %11" PRIu32 " | %11i |\n", i, 0,
+				stats.n_pkts, stats.n_pkts_dropped, stats.n_bytes, stats.n_bytes_dropped, qlen);
+			printf("+----+-------+-------------+-------------+-------------+-------------+-------------+\n");
+		} else {
+			for (j = 0; j < RTE_SCHED_BE_QUEUES_PER_PIPE; j++) {
+				rte_sched_queue_read_stats(port, queue_id + i + j, &stats, &qlen);
+				printf("|  %d |   %d   | %11" PRIu32 " | %11" PRIu32 " | %11" PRIu32 " | %11" PRIu32 " | %11i |\n", i, j,
+					stats.n_pkts, stats.n_pkts_dropped, stats.n_bytes, stats.n_bytes_dropped, qlen);
+				printf("+----+-------+-------------+-------------+-------------+-------------+-------------+\n");
+			}
+		}
+	}
+	printf("\n");
+
+	return 0;
 }
-- 
2.21.0


  parent reply	other threads:[~2019-06-25 15:37 UTC|newest]

Thread overview: 163+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2019-05-28 12:05 [dpdk-dev] [PATCH 00/27] sched: feature enhancements Lukasz Krakowiak
2019-05-28 12:05 ` [dpdk-dev] [PATCH 01/27] sched: update macros for flexible config Lukasz Krakowiak
2019-06-25 15:31   ` [dpdk-dev] [PATCH v2 00/28] sched: feature enhancements Jasvinder Singh
2019-06-25 15:31     ` [dpdk-dev] [PATCH v2 01/28] sched: update macros for flexible config Jasvinder Singh
2019-07-01 19:04       ` Dumitrescu, Cristian
2019-07-02 13:26         ` Singh, Jasvinder
2019-07-11 10:26       ` [dpdk-dev] [PATCH v3 00/11] sched: feature enhancements Jasvinder Singh
2019-07-11 10:26         ` [dpdk-dev] [PATCH v3 01/11] sched: remove wrr from strict priority tc queues Jasvinder Singh
2019-07-12  9:57           ` [dpdk-dev] [PATCH v4 00/11] sched: feature enhancements Jasvinder Singh
2019-07-12  9:57             ` [dpdk-dev] [PATCH v4 01/11] sched: remove wrr from strict priority tc queues Jasvinder Singh
2019-07-15 23:50               ` Dumitrescu, Cristian
2019-07-17 14:49                 ` Singh, Jasvinder
2019-07-17 14:42               ` [dpdk-dev] [PATCH v5 00/11] sched: feature enhancements Jasvinder Singh
2019-07-17 14:42                 ` [dpdk-dev] [PATCH v5 01/11] sched: remove wrr from strict priority tc queues Jasvinder Singh
2019-07-19 14:18                   ` [dpdk-dev] [PATCH v6 00/11] sched: feature enhancements Jasvinder Singh
2019-07-19 14:18                     ` [dpdk-dev] [PATCH v6 01/11] sched: remove wrr from strict priority tc queues Jasvinder Singh
2019-07-22 11:01                       ` [dpdk-dev] [PATCH v7 00/11] sched: feature enhancements Jasvinder Singh
2019-07-22 11:01                         ` [dpdk-dev] [PATCH v7 01/11] sched: remove wrr from strict priority tc queues Jasvinder Singh
2019-07-22 11:01                         ` [dpdk-dev] [PATCH v7 02/11] sched: add config flexibility to tc queue sizes Jasvinder Singh
2019-07-22 11:01                         ` [dpdk-dev] [PATCH v7 03/11] sched: add max pipe profiles config in run time Jasvinder Singh
2019-07-22 11:01                         ` [dpdk-dev] [PATCH v7 04/11] sched: rename tc3 params to best-effort tc Jasvinder Singh
2019-07-22 11:01                         ` [dpdk-dev] [PATCH v7 05/11] sched: improve error log messages Jasvinder Singh
2019-07-22 11:01                         ` [dpdk-dev] [PATCH v7 06/11] sched: improve doxygen comments Jasvinder Singh
2019-07-22 11:01                         ` [dpdk-dev] [PATCH v7 07/11] net/softnic: add config flexibility to softnic tm Jasvinder Singh
2019-07-22 11:01                         ` [dpdk-dev] [PATCH v7 08/11] test_sched: modify tests for config flexibility Jasvinder Singh
2019-07-22 11:01                         ` [dpdk-dev] [PATCH v7 09/11] examples/ip_pipeline: add config flexibility to tm function Jasvinder Singh
2019-07-22 11:01                         ` [dpdk-dev] [PATCH v7 10/11] examples/qos_sched: add tc and queue config flexibility Jasvinder Singh
2019-07-22 11:01                         ` [dpdk-dev] [PATCH v7 11/11] sched: remove redundant macros Jasvinder Singh
2019-07-22 13:15                         ` [dpdk-dev] [PATCH v7 00/11] sched: feature enhancements Thomas Monjalon
2019-07-22 13:22                           ` Singh, Jasvinder
2019-07-22 13:33                             ` Thomas Monjalon
2019-07-22 13:53                               ` Ferruh Yigit
2019-07-22 13:56                                 ` Bruce Richardson
2019-07-22 14:08                                   ` Ferruh Yigit
2019-07-22 14:08                                   ` Thomas Monjalon
2019-07-19 14:18                     ` [dpdk-dev] [PATCH v6 02/11] sched: add config flexibility to tc queue sizes Jasvinder Singh
2019-07-19 14:18                     ` [dpdk-dev] [PATCH v6 03/11] sched: add max pipe profiles config in run time Jasvinder Singh
2019-07-19 14:18                     ` [dpdk-dev] [PATCH v6 04/11] sched: rename tc3 params to best-effort tc Jasvinder Singh
2019-07-19 14:18                     ` [dpdk-dev] [PATCH v6 05/11] sched: improve error log messages Jasvinder Singh
2019-07-19 14:18                     ` [dpdk-dev] [PATCH v6 06/11] sched: improve doxygen comments Jasvinder Singh
2019-07-19 14:18                     ` [dpdk-dev] [PATCH v6 07/11] net/softnic: add config flexibility to softnic tm Jasvinder Singh
2019-07-19 14:18                     ` [dpdk-dev] [PATCH v6 08/11] test_sched: modify tests for config flexibility Jasvinder Singh
2019-07-19 14:18                     ` [dpdk-dev] [PATCH v6 09/11] examples/ip_pipeline: add config flexibility to tm function Jasvinder Singh
2019-07-19 14:18                     ` [dpdk-dev] [PATCH v6 10/11] examples/qos_sched: add tc and queue config flexibility Jasvinder Singh
2019-07-19 14:18                     ` [dpdk-dev] [PATCH v6 11/11] sched: remove redundant macros Jasvinder Singh
2019-07-22  8:19                     ` [dpdk-dev] [PATCH v6 00/11] sched: feature enhancements Thomas Monjalon
2019-07-22 11:05                       ` Singh, Jasvinder
2019-07-22  9:56                     ` Dumitrescu, Cristian
2019-07-17 14:42                 ` [dpdk-dev] [PATCH v5 02/11] sched: add config flexibility to tc queue sizes Jasvinder Singh
2019-07-18 23:04                   ` Dumitrescu, Cristian
2019-07-19 15:25                     ` Singh, Jasvinder
2019-07-17 14:42                 ` [dpdk-dev] [PATCH v5 03/11] sched: add max pipe profiles config in run time Jasvinder Singh
2019-07-17 14:42                 ` [dpdk-dev] [PATCH v5 04/11] sched: rename tc3 params to best-effort tc Jasvinder Singh
2019-07-17 14:42                 ` [dpdk-dev] [PATCH v5 05/11] sched: improve error log messages Jasvinder Singh
2019-07-17 14:42                 ` [dpdk-dev] [PATCH v5 06/11] sched: improve doxygen comments Jasvinder Singh
2019-07-18 23:12                   ` Dumitrescu, Cristian
2019-07-19 15:25                     ` Singh, Jasvinder
2019-07-17 14:42                 ` [dpdk-dev] [PATCH v5 07/11] net/softnic: add config flexibility to softnic tm Jasvinder Singh
2019-07-17 14:42                 ` [dpdk-dev] [PATCH v5 08/11] test_sched: modify tests for config flexibility Jasvinder Singh
2019-07-17 14:42                 ` [dpdk-dev] [PATCH v5 09/11] examples/ip_pipeline: add config flexibility to tm function Jasvinder Singh
2019-07-17 14:42                 ` [dpdk-dev] [PATCH v5 10/11] examples/qos_sched: add tc and queue config flexibility Jasvinder Singh
2019-07-17 14:42                 ` [dpdk-dev] [PATCH v5 11/11] sched: remove redundant macros Jasvinder Singh
2019-07-18 22:57                 ` [dpdk-dev] [PATCH v5 00/11] sched: feature enhancements Dumitrescu, Cristian
2019-07-19 10:41                   ` Thomas Monjalon
2019-07-19 11:16                     ` Singh, Jasvinder
2019-07-19 11:40                       ` Thomas Monjalon
2019-07-19 11:42                         ` Singh, Jasvinder
2019-07-12  9:57             ` [dpdk-dev] [PATCH v4 02/11] sched: add config flexibility to tc queue sizes Jasvinder Singh
2019-07-16  0:37               ` Dumitrescu, Cristian
2019-07-17 14:57                 ` Singh, Jasvinder
2019-07-16  0:57               ` Dumitrescu, Cristian
2019-07-17 15:03                 ` Singh, Jasvinder
2019-07-12  9:57             ` [dpdk-dev] [PATCH v4 03/11] sched: add max pipe profiles config in run time Jasvinder Singh
2019-07-12  9:57             ` [dpdk-dev] [PATCH v4 04/11] sched: rename tc3 params to best-effort tc Jasvinder Singh
2019-07-12  9:57             ` [dpdk-dev] [PATCH v4 05/11] sched: improve error log messages Jasvinder Singh
2019-07-12  9:57             ` [dpdk-dev] [PATCH v4 06/11] sched: improve doxygen comments Jasvinder Singh
2019-07-16  0:44               ` Dumitrescu, Cristian
2019-07-17 14:58                 ` Singh, Jasvinder
2019-07-16  0:49               ` Dumitrescu, Cristian
2019-07-17 15:00                 ` Singh, Jasvinder
2019-07-12  9:57             ` [dpdk-dev] [PATCH v4 07/11] net/softnic: add config flexibility to softnic tm Jasvinder Singh
2019-07-12  9:57             ` [dpdk-dev] [PATCH v4 08/11] test_sched: modify tests for config flexibility Jasvinder Singh
2019-07-12  9:57             ` [dpdk-dev] [PATCH v4 09/11] examples/ip_pipeline: add config flexibility to tm function Jasvinder Singh
2019-07-12  9:57             ` [dpdk-dev] [PATCH v4 10/11] examples/qos_sched: add tc and queue config flexibility Jasvinder Singh
2019-07-12  9:57             ` [dpdk-dev] [PATCH v4 11/11] sched: remove redundant macros Jasvinder Singh
2019-07-11 10:26         ` [dpdk-dev] [PATCH v3 02/11] sched: add config flexibility to tc queue sizes Jasvinder Singh
2019-07-11 10:26         ` [dpdk-dev] [PATCH v3 03/11] sched: add max pipe profiles config in run time Jasvinder Singh
2019-07-11 10:26         ` [dpdk-dev] [PATCH v3 04/11] sched: rename tc3 params to best-effort tc Jasvinder Singh
2019-07-11 10:26         ` [dpdk-dev] [PATCH v3 05/11] sched: improve error log messages Jasvinder Singh
2019-07-11 10:26         ` [dpdk-dev] [PATCH v3 06/11] sched: improve doxygen comments Jasvinder Singh
2019-07-11 10:26         ` [dpdk-dev] [PATCH v3 07/11] net/softnic: add config flexibility to softnic tm Jasvinder Singh
2019-07-11 10:26         ` [dpdk-dev] [PATCH v3 08/11] test_sched: modify tests for config flexibility Jasvinder Singh
2019-07-11 10:26         ` [dpdk-dev] [PATCH v3 09/11] examples/ip_pipeline: add config flexibility to tm function Jasvinder Singh
2019-07-11 10:26         ` [dpdk-dev] [PATCH v3 10/11] examples/qos_sched: add tc and queue config flexibility Jasvinder Singh
2019-07-11 10:26         ` [dpdk-dev] [PATCH v3 11/11] sched: remove redundant macros Jasvinder Singh
2019-06-25 15:31     ` [dpdk-dev] [PATCH v2 02/28] sched: update subport and pipe data structures Jasvinder Singh
2019-07-01 18:58       ` Dumitrescu, Cristian
2019-07-02 13:20         ` Singh, Jasvinder
2019-07-01 19:12       ` Dumitrescu, Cristian
2019-06-25 15:31     ` [dpdk-dev] [PATCH v2 03/28] sched: update internal " Jasvinder Singh
2019-06-25 15:31     ` [dpdk-dev] [PATCH v2 04/28] sched: update port config API Jasvinder Singh
2019-06-25 15:31     ` [dpdk-dev] [PATCH v2 05/28] sched: update port free API Jasvinder Singh
2019-06-25 15:31     ` [dpdk-dev] [PATCH v2 06/28] sched: update subport config API Jasvinder Singh
2019-06-25 15:31     ` [dpdk-dev] [PATCH v2 07/28] sched: update pipe profile add API Jasvinder Singh
2019-06-25 15:31     ` [dpdk-dev] [PATCH v2 08/28] sched: update pipe config API Jasvinder Singh
2019-06-25 15:31     ` [dpdk-dev] [PATCH v2 09/28] sched: update pkt read and write API Jasvinder Singh
2019-07-01 23:25       ` Dumitrescu, Cristian
2019-07-02 21:05         ` Singh, Jasvinder
2019-07-03 13:40           ` Dumitrescu, Cristian
2019-06-25 15:31     ` [dpdk-dev] [PATCH v2 10/28] sched: update subport and tc queue stats Jasvinder Singh
2019-06-25 15:32     ` [dpdk-dev] [PATCH v2 11/28] sched: update port memory footprint API Jasvinder Singh
2019-06-25 15:32     ` [dpdk-dev] [PATCH v2 12/28] sched: update packet enqueue API Jasvinder Singh
2019-06-25 15:32     ` [dpdk-dev] [PATCH v2 13/28] sched: update grinder pipe and tc cache Jasvinder Singh
2019-06-25 15:32     ` [dpdk-dev] [PATCH v2 14/28] sched: update grinder next pipe and tc functions Jasvinder Singh
2019-06-25 15:32     ` [dpdk-dev] [PATCH v2 15/28] sched: update pipe and tc queues prefetch Jasvinder Singh
2019-06-25 15:32     ` [dpdk-dev] [PATCH v2 16/28] sched: update grinder wrr compute function Jasvinder Singh
2019-06-25 15:32     ` [dpdk-dev] [PATCH v2 17/28] sched: modify credits update function Jasvinder Singh
2019-06-25 15:32     ` [dpdk-dev] [PATCH v2 18/28] sched: update mbuf prefetch function Jasvinder Singh
2019-06-25 15:32     ` [dpdk-dev] [PATCH v2 19/28] sched: update grinder schedule function Jasvinder Singh
2019-06-25 15:32     ` [dpdk-dev] [PATCH v2 20/28] sched: update grinder handle function Jasvinder Singh
2019-06-25 15:32     ` [dpdk-dev] [PATCH v2 21/28] sched: update packet dequeue API Jasvinder Singh
2019-06-25 15:32     ` [dpdk-dev] [PATCH v2 22/28] sched: update sched queue stats API Jasvinder Singh
2019-06-25 15:32     ` [dpdk-dev] [PATCH v2 23/28] test/sched: update unit test Jasvinder Singh
2019-06-25 15:32     ` [dpdk-dev] [PATCH v2 24/28] net/softnic: update softnic tm function Jasvinder Singh
2019-06-25 15:32     ` Jasvinder Singh [this message]
2019-06-25 15:32     ` [dpdk-dev] [PATCH v2 26/28] examples/ip_pipeline: update ip pipeline sample app Jasvinder Singh
2019-06-25 15:32     ` [dpdk-dev] [PATCH v2 27/28] sched: code cleanup Jasvinder Singh
2019-06-25 15:32     ` [dpdk-dev] [PATCH v2 28/28] sched: add release note Jasvinder Singh
2019-06-26 21:31       ` Thomas Monjalon
2019-06-27 10:50         ` Singh, Jasvinder
2019-06-26 21:33     ` [dpdk-dev] [PATCH v2 00/28] sched: feature enhancements Thomas Monjalon
2019-06-27 10:52       ` Singh, Jasvinder
2019-06-27  0:04     ` Stephen Hemminger
2019-06-27 10:49       ` Singh, Jasvinder
2019-07-01 18:51     ` Dumitrescu, Cristian
2019-07-02  9:32       ` Singh, Jasvinder
2019-05-28 12:05 ` [dpdk-dev] [PATCH 02/27] sched: update subport and pipe data structures Lukasz Krakowiak
2019-05-28 12:05 ` [dpdk-dev] [PATCH 03/27] sched: update internal " Lukasz Krakowiak
2019-05-28 12:05 ` [dpdk-dev] [PATCH 04/27] sched: update port config api Lukasz Krakowiak
2019-05-28 12:05 ` [dpdk-dev] [PATCH 05/27] sched: update port free api Lukasz Krakowiak
2019-05-28 12:05 ` [dpdk-dev] [PATCH 06/27] sched: update subport config api Lukasz Krakowiak
2019-05-28 12:05 ` [dpdk-dev] [PATCH 07/27] sched: update pipe profile add api Lukasz Krakowiak
2019-05-28 14:06   ` Stephen Hemminger
2019-05-28 12:05 ` [dpdk-dev] [PATCH 08/27] sched: update pipe config api Lukasz Krakowiak
2019-05-28 12:05 ` [dpdk-dev] [PATCH 09/27] sched: update pkt read and write api Lukasz Krakowiak
2019-05-28 12:05 ` [dpdk-dev] [PATCH 10/27] sched: update subport and tc queue stats Lukasz Krakowiak
2019-05-28 12:05 ` [dpdk-dev] [PATCH 11/27] sched: update port memory footprint api Lukasz Krakowiak
2019-05-28 12:05 ` [dpdk-dev] [PATCH 12/27] sched: update packet enqueue api Lukasz Krakowiak
2019-05-28 12:05 ` [dpdk-dev] [PATCH 13/27] sched: update grinder pipe and tc cache Lukasz Krakowiak
2019-05-28 12:05 ` [dpdk-dev] [PATCH 14/27] sched: update grinder next pipe and tc functions Lukasz Krakowiak
2019-05-28 12:05 ` [dpdk-dev] [PATCH 15/27] sched: update pipe and tc queues prefetch Lukasz Krakowiak
2019-05-28 12:05 ` [dpdk-dev] [PATCH 16/27] sched: update grinder wrr compute function Lukasz Krakowiak
2019-05-28 12:05 ` [dpdk-dev] [PATCH 17/27] sched: modify credits update function Lukasz Krakowiak
2019-05-28 12:05 ` [dpdk-dev] [PATCH 18/27] sched: update mbuf prefetch function Lukasz Krakowiak
2019-05-28 12:05 ` [dpdk-dev] [PATCH 19/27] sched: update grinder schedule function Lukasz Krakowiak
2019-05-28 12:05 ` [dpdk-dev] [PATCH 20/27] sched: update grinder handle function Lukasz Krakowiak
2019-05-28 12:05 ` [dpdk-dev] [PATCH 21/27] sched: update packet dequeue api Lukasz Krakowiak
2019-05-28 12:05 ` [dpdk-dev] [PATCH 22/27] sched: update sched queue stats api Lukasz Krakowiak
2019-05-28 12:05 ` [dpdk-dev] [PATCH 23/27] test/sched: update unit test Lukasz Krakowiak
2019-05-28 12:05 ` [dpdk-dev] [PATCH 24/27] net/softnic: update softnic tm function Lukasz Krakowiak
2019-05-28 12:05 ` [dpdk-dev] [PATCH 25/27] examples/qos_sched: update qos sched sample app Lukasz Krakowiak
2019-05-28 12:05 ` [dpdk-dev] [PATCH 26/27] examples/ip_pipeline: update ip pipeline " Lukasz Krakowiak
2019-05-28 12:05 ` [dpdk-dev] [PATCH 27/27] sched: code cleanup Lukasz Krakowiak

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20190625153217.24301-26-jasvinder.singh@intel.com \
    --to=jasvinder.singh@intel.com \
    --cc=abrahamx.tovar@intel.com \
    --cc=cristian.dumitrescu@intel.com \
    --cc=dev@dpdk.org \
    --cc=lukaszx.krakowiak@intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).