dev.dpdk.org archive mirror
 help / color / mirror / Atom feed
From: Jasvinder Singh <jasvinder.singh@intel.com>
To: dev@dpdk.org
Cc: cristian.dumitrescu@intel.com,
	Abraham Tovar <abrahamx.tovar@intel.com>,
	Lukasz Krakowiak <lukaszx.krakowiak@intel.com>
Subject: [dpdk-dev] [PATCH v3 07/11] net/softnic: add config flexibility to softnic tm
Date: Thu, 11 Jul 2019 11:26:55 +0100	[thread overview]
Message-ID: <20190711102659.59001-8-jasvinder.singh@intel.com> (raw)
In-Reply-To: <20190711102659.59001-1-jasvinder.singh@intel.com>

Update softnic tm function for configuration flexiblity of pipe
traffic classes and queues size.

Signed-off-by: Jasvinder Singh <jasvinder.singh@intel.com>
Signed-off-by: Abraham Tovar <abrahamx.tovar@intel.com>
Signed-off-by: Lukasz Krakowiak <lukaszx.krakowiak@intel.com>
---
 drivers/net/softnic/rte_eth_softnic.c         | 131 ++++++
 drivers/net/softnic/rte_eth_softnic_cli.c     | 433 ++++++++++++++++--
 .../net/softnic/rte_eth_softnic_internals.h   |   8 +-
 drivers/net/softnic/rte_eth_softnic_tm.c      |  64 ++-
 4 files changed, 582 insertions(+), 54 deletions(-)

diff --git a/drivers/net/softnic/rte_eth_softnic.c b/drivers/net/softnic/rte_eth_softnic.c
index 4bda2f2b0..50a48e90b 100644
--- a/drivers/net/softnic/rte_eth_softnic.c
+++ b/drivers/net/softnic/rte_eth_softnic.c
@@ -28,6 +28,19 @@
 #define PMD_PARAM_TM_QSIZE1                                "tm_qsize1"
 #define PMD_PARAM_TM_QSIZE2                                "tm_qsize2"
 #define PMD_PARAM_TM_QSIZE3                                "tm_qsize3"
+#define PMD_PARAM_TM_QSIZE4                                "tm_qsize4"
+#define PMD_PARAM_TM_QSIZE5                                "tm_qsize5"
+#define PMD_PARAM_TM_QSIZE6                                "tm_qsize6"
+#define PMD_PARAM_TM_QSIZE7                                "tm_qsize7"
+#define PMD_PARAM_TM_QSIZE8                                "tm_qsize8"
+#define PMD_PARAM_TM_QSIZE9                                "tm_qsize9"
+#define PMD_PARAM_TM_QSIZE10                               "tm_qsize10"
+#define PMD_PARAM_TM_QSIZE11                               "tm_qsize11"
+#define PMD_PARAM_TM_QSIZE12                               "tm_qsize12"
+#define PMD_PARAM_TM_QSIZE13                               "tm_qsize13"
+#define PMD_PARAM_TM_QSIZE14                               "tm_qsize14"
+#define PMD_PARAM_TM_QSIZE15                               "tm_qsize15"
+
 
 static const char * const pmd_valid_args[] = {
 	PMD_PARAM_FIRMWARE,
@@ -39,6 +52,18 @@ static const char * const pmd_valid_args[] = {
 	PMD_PARAM_TM_QSIZE1,
 	PMD_PARAM_TM_QSIZE2,
 	PMD_PARAM_TM_QSIZE3,
+	PMD_PARAM_TM_QSIZE4,
+	PMD_PARAM_TM_QSIZE5,
+	PMD_PARAM_TM_QSIZE6,
+	PMD_PARAM_TM_QSIZE7,
+	PMD_PARAM_TM_QSIZE8,
+	PMD_PARAM_TM_QSIZE9,
+	PMD_PARAM_TM_QSIZE10,
+	PMD_PARAM_TM_QSIZE11,
+	PMD_PARAM_TM_QSIZE12,
+	PMD_PARAM_TM_QSIZE13,
+	PMD_PARAM_TM_QSIZE14,
+	PMD_PARAM_TM_QSIZE15,
 	NULL
 };
 
@@ -434,6 +459,18 @@ pmd_parse_args(struct pmd_params *p, const char *params)
 	p->tm.qsize[1] = SOFTNIC_TM_QUEUE_SIZE;
 	p->tm.qsize[2] = SOFTNIC_TM_QUEUE_SIZE;
 	p->tm.qsize[3] = SOFTNIC_TM_QUEUE_SIZE;
+	p->tm.qsize[4] = SOFTNIC_TM_QUEUE_SIZE;
+	p->tm.qsize[5] = SOFTNIC_TM_QUEUE_SIZE;
+	p->tm.qsize[6] = SOFTNIC_TM_QUEUE_SIZE;
+	p->tm.qsize[7] = SOFTNIC_TM_QUEUE_SIZE;
+	p->tm.qsize[8] = SOFTNIC_TM_QUEUE_SIZE;
+	p->tm.qsize[9] = SOFTNIC_TM_QUEUE_SIZE;
+	p->tm.qsize[10] = SOFTNIC_TM_QUEUE_SIZE;
+	p->tm.qsize[11] = SOFTNIC_TM_QUEUE_SIZE;
+	p->tm.qsize[12] = SOFTNIC_TM_QUEUE_SIZE;
+	p->tm.qsize[13] = SOFTNIC_TM_QUEUE_SIZE;
+	p->tm.qsize[14] = SOFTNIC_TM_QUEUE_SIZE;
+	p->tm.qsize[15] = SOFTNIC_TM_QUEUE_SIZE;
 
 	/* Firmware script (optional) */
 	if (rte_kvargs_count(kvlist, PMD_PARAM_FIRMWARE) == 1) {
@@ -504,6 +541,88 @@ pmd_parse_args(struct pmd_params *p, const char *params)
 			goto out_free;
 	}
 
+	if (rte_kvargs_count(kvlist, PMD_PARAM_TM_QSIZE4) == 1) {
+		ret = rte_kvargs_process(kvlist, PMD_PARAM_TM_QSIZE4,
+			&get_uint32, &p->tm.qsize[4]);
+		if (ret < 0)
+			goto out_free;
+	}
+
+	if (rte_kvargs_count(kvlist, PMD_PARAM_TM_QSIZE5) == 1) {
+		ret = rte_kvargs_process(kvlist, PMD_PARAM_TM_QSIZE5,
+			&get_uint32, &p->tm.qsize[5]);
+		if (ret < 0)
+			goto out_free;
+	}
+
+	if (rte_kvargs_count(kvlist, PMD_PARAM_TM_QSIZE6) == 1) {
+		ret = rte_kvargs_process(kvlist, PMD_PARAM_TM_QSIZE6,
+			&get_uint32, &p->tm.qsize[6]);
+		if (ret < 0)
+			goto out_free;
+	}
+
+	if (rte_kvargs_count(kvlist, PMD_PARAM_TM_QSIZE7) == 1) {
+		ret = rte_kvargs_process(kvlist, PMD_PARAM_TM_QSIZE7,
+			&get_uint32, &p->tm.qsize[7]);
+		if (ret < 0)
+			goto out_free;
+	}
+	if (rte_kvargs_count(kvlist, PMD_PARAM_TM_QSIZE8) == 1) {
+		ret = rte_kvargs_process(kvlist, PMD_PARAM_TM_QSIZE8,
+			&get_uint32, &p->tm.qsize[8]);
+		if (ret < 0)
+			goto out_free;
+	}
+	if (rte_kvargs_count(kvlist, PMD_PARAM_TM_QSIZE9) == 1) {
+		ret = rte_kvargs_process(kvlist, PMD_PARAM_TM_QSIZE9,
+			&get_uint32, &p->tm.qsize[9]);
+		if (ret < 0)
+			goto out_free;
+	}
+
+	if (rte_kvargs_count(kvlist, PMD_PARAM_TM_QSIZE10) == 1) {
+		ret = rte_kvargs_process(kvlist, PMD_PARAM_TM_QSIZE10,
+			&get_uint32, &p->tm.qsize[10]);
+		if (ret < 0)
+			goto out_free;
+	}
+
+	if (rte_kvargs_count(kvlist, PMD_PARAM_TM_QSIZE11) == 1) {
+		ret = rte_kvargs_process(kvlist, PMD_PARAM_TM_QSIZE11,
+			&get_uint32, &p->tm.qsize[11]);
+		if (ret < 0)
+			goto out_free;
+	}
+
+	if (rte_kvargs_count(kvlist, PMD_PARAM_TM_QSIZE12) == 1) {
+		ret = rte_kvargs_process(kvlist, PMD_PARAM_TM_QSIZE12,
+			&get_uint32, &p->tm.qsize[12]);
+		if (ret < 0)
+			goto out_free;
+	}
+
+	if (rte_kvargs_count(kvlist, PMD_PARAM_TM_QSIZE13) == 1) {
+		ret = rte_kvargs_process(kvlist, PMD_PARAM_TM_QSIZE13,
+			&get_uint32, &p->tm.qsize[13]);
+		if (ret < 0)
+			goto out_free;
+	}
+
+	if (rte_kvargs_count(kvlist, PMD_PARAM_TM_QSIZE14) == 1) {
+		ret = rte_kvargs_process(kvlist, PMD_PARAM_TM_QSIZE14,
+			&get_uint32, &p->tm.qsize[14]);
+		if (ret < 0)
+			goto out_free;
+	}
+
+	if (rte_kvargs_count(kvlist, PMD_PARAM_TM_QSIZE15) == 1) {
+		ret = rte_kvargs_process(kvlist, PMD_PARAM_TM_QSIZE15,
+			&get_uint32, &p->tm.qsize[15]);
+		if (ret < 0)
+			goto out_free;
+	}
+
 out_free:
 	rte_kvargs_free(kvlist);
 	return ret;
@@ -588,6 +707,18 @@ RTE_PMD_REGISTER_PARAM_STRING(net_softnic,
 	PMD_PARAM_TM_QSIZE1 "=<uint32> "
 	PMD_PARAM_TM_QSIZE2 "=<uint32> "
 	PMD_PARAM_TM_QSIZE3 "=<uint32>"
+	PMD_PARAM_TM_QSIZE4 "=<uint32> "
+	PMD_PARAM_TM_QSIZE5 "=<uint32> "
+	PMD_PARAM_TM_QSIZE6 "=<uint32> "
+	PMD_PARAM_TM_QSIZE7 "=<uint32>"
+	PMD_PARAM_TM_QSIZE8 "=<uint32> "
+	PMD_PARAM_TM_QSIZE9 "=<uint32> "
+	PMD_PARAM_TM_QSIZE10 "=<uint32> "
+	PMD_PARAM_TM_QSIZE11 "=<uint32>"
+	PMD_PARAM_TM_QSIZE12 "=<uint32> "
+	PMD_PARAM_TM_QSIZE13 "=<uint32> "
+	PMD_PARAM_TM_QSIZE14 "=<uint32> "
+	PMD_PARAM_TM_QSIZE15 "=<uint32>"
 );
 
 
diff --git a/drivers/net/softnic/rte_eth_softnic_cli.c b/drivers/net/softnic/rte_eth_softnic_cli.c
index 56fc92ba2..7db77a33a 100644
--- a/drivers/net/softnic/rte_eth_softnic_cli.c
+++ b/drivers/net/softnic/rte_eth_softnic_cli.c
@@ -566,8 +566,7 @@ queue_node_id(uint32_t n_spp __rte_unused,
 	uint32_t tc_id,
 	uint32_t queue_id)
 {
-	return queue_id +
-		tc_id * RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE +
+	return queue_id + tc_id +
 		(pipe_id + subport_id * n_pps) * RTE_SCHED_QUEUES_PER_PIPE;
 }
 
@@ -617,10 +616,19 @@ tmgr_hierarchy_default(struct pmd_internals *softnic,
 		},
 	};
 
+	uint32_t *shared_shaper_id =
+		(uint32_t *) calloc(RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE,
+		sizeof(uint32_t));
+	if (shared_shaper_id == NULL)
+		return -1;
+
+	memcpy(shared_shaper_id, params->shared_shaper_id.tc,
+		sizeof(params->shared_shaper_id.tc));
+
 	struct rte_tm_node_params tc_node_params[] = {
 		[0] = {
 			.shaper_profile_id = params->shaper_profile_id.tc[0],
-			.shared_shaper_id = &params->shared_shaper_id.tc[0],
+			.shared_shaper_id = &shared_shaper_id[0],
 			.n_shared_shapers =
 				(&params->shared_shaper_id.tc_valid[0]) ? 1 : 0,
 			.nonleaf = {
@@ -630,7 +638,7 @@ tmgr_hierarchy_default(struct pmd_internals *softnic,
 
 		[1] = {
 			.shaper_profile_id = params->shaper_profile_id.tc[1],
-			.shared_shaper_id = &params->shared_shaper_id.tc[1],
+			.shared_shaper_id = &shared_shaper_id[1],
 			.n_shared_shapers =
 				(&params->shared_shaper_id.tc_valid[1]) ? 1 : 0,
 			.nonleaf = {
@@ -640,7 +648,7 @@ tmgr_hierarchy_default(struct pmd_internals *softnic,
 
 		[2] = {
 			.shaper_profile_id = params->shaper_profile_id.tc[2],
-			.shared_shaper_id = &params->shared_shaper_id.tc[2],
+			.shared_shaper_id = &shared_shaper_id[2],
 			.n_shared_shapers =
 				(&params->shared_shaper_id.tc_valid[2]) ? 1 : 0,
 			.nonleaf = {
@@ -650,13 +658,103 @@ tmgr_hierarchy_default(struct pmd_internals *softnic,
 
 		[3] = {
 			.shaper_profile_id = params->shaper_profile_id.tc[3],
-			.shared_shaper_id = &params->shared_shaper_id.tc[3],
+			.shared_shaper_id = &shared_shaper_id[3],
 			.n_shared_shapers =
 				(&params->shared_shaper_id.tc_valid[3]) ? 1 : 0,
 			.nonleaf = {
 				.n_sp_priorities = 1,
 			},
 		},
+
+		[4] = {
+			.shaper_profile_id = params->shaper_profile_id.tc[4],
+			.shared_shaper_id = &shared_shaper_id[4],
+			.n_shared_shapers =
+				(&params->shared_shaper_id.tc_valid[4]) ? 1 : 0,
+			.nonleaf = {
+				.n_sp_priorities = 1,
+			},
+		},
+
+		[5] = {
+			.shaper_profile_id = params->shaper_profile_id.tc[5],
+			.shared_shaper_id = &shared_shaper_id[5],
+			.n_shared_shapers =
+				(&params->shared_shaper_id.tc_valid[5]) ? 1 : 0,
+			.nonleaf = {
+				.n_sp_priorities = 1,
+			},
+		},
+
+		[6] = {
+			.shaper_profile_id = params->shaper_profile_id.tc[6],
+			.shared_shaper_id = &shared_shaper_id[6],
+			.n_shared_shapers =
+				(&params->shared_shaper_id.tc_valid[6]) ? 1 : 0,
+			.nonleaf = {
+				.n_sp_priorities = 1,
+			},
+		},
+
+		[7] = {
+			.shaper_profile_id = params->shaper_profile_id.tc[7],
+			.shared_shaper_id = &shared_shaper_id[7],
+			.n_shared_shapers =
+				(&params->shared_shaper_id.tc_valid[7]) ? 1 : 0,
+			.nonleaf = {
+				.n_sp_priorities = 1,
+			},
+		},
+
+		[8] = {
+			.shaper_profile_id = params->shaper_profile_id.tc[8],
+			.shared_shaper_id = &shared_shaper_id[8],
+			.n_shared_shapers =
+				(&params->shared_shaper_id.tc_valid[8]) ? 1 : 0,
+			.nonleaf = {
+				.n_sp_priorities = 1,
+			},
+		},
+
+		[9] = {
+			.shaper_profile_id = params->shaper_profile_id.tc[9],
+			.shared_shaper_id = &shared_shaper_id[9],
+			.n_shared_shapers =
+				(&params->shared_shaper_id.tc_valid[9]) ? 1 : 0,
+			.nonleaf = {
+				.n_sp_priorities = 1,
+			},
+		},
+
+		[10] = {
+			.shaper_profile_id = params->shaper_profile_id.tc[10],
+			.shared_shaper_id = &shared_shaper_id[10],
+			.n_shared_shapers =
+				(&params->shared_shaper_id.tc_valid[10]) ? 1 : 0,
+			.nonleaf = {
+				.n_sp_priorities = 1,
+			},
+		},
+
+		[11] = {
+			.shaper_profile_id = params->shaper_profile_id.tc[11],
+			.shared_shaper_id = &shared_shaper_id[11],
+			.n_shared_shapers =
+				(&params->shared_shaper_id.tc_valid[11]) ? 1 : 0,
+			.nonleaf = {
+				.n_sp_priorities = 1,
+			},
+		},
+
+		[12] = {
+			.shaper_profile_id = params->shaper_profile_id.tc[12],
+			.shared_shaper_id = &shared_shaper_id[12],
+			.n_shared_shapers =
+				(&params->shared_shaper_id.tc_valid[12]) ? 1 : 0,
+			.nonleaf = {
+				.n_sp_priorities = 1,
+			},
+		},
 	};
 
 	struct rte_tm_node_params queue_node_params = {
@@ -730,7 +828,23 @@ tmgr_hierarchy_default(struct pmd_internals *softnic,
 					return -1;
 
 				/* Hierarchy level 4: Queue nodes */
-				for (q = 0; q < RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS; q++) {
+				if (t == RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE - 1) {
+					/* Best-effort traffic class queues */
+					for (q = 0; q < RTE_SCHED_BE_QUEUES_PER_PIPE; q++) {
+						status = rte_tm_node_add(port_id,
+							queue_node_id(n_spp, n_pps, s, p, t, q),
+							tc_node_id(n_spp, n_pps, s, p, t),
+							0,
+							params->weight.queue[q],
+							RTE_TM_NODE_LEVEL_ID_ANY,
+							&queue_node_params,
+							&error);
+						if (status)
+							return -1;
+					}
+				} else {
+					/* Strict-priority traffic class queues */
+					q = 0;
 					status = rte_tm_node_add(port_id,
 						queue_node_id(n_spp, n_pps, s, p, t, q),
 						tc_node_id(n_spp, n_pps, s, p, t),
@@ -741,7 +855,7 @@ tmgr_hierarchy_default(struct pmd_internals *softnic,
 						&error);
 					if (status)
 						return -1;
-				} /* Queue */
+				}
 			} /* TC */
 		} /* Pipe */
 	} /* Subport */
@@ -762,13 +876,31 @@ tmgr_hierarchy_default(struct pmd_internals *softnic,
  *   tc1 <profile_id>
  *   tc2 <profile_id>
  *   tc3 <profile_id>
+ *   tc4 <profile_id>
+ *   tc5 <profile_id>
+ *   tc6 <profile_id>
+ *   tc7 <profile_id>
+ *   tc8 <profile_id>
+ *   tc9 <profile_id>
+ *   tc10 <profile_id>
+ *   tc11 <profile_id>
+ *   tc12 <profile_id>
  *  shared shaper
  *   tc0 <id | none>
  *   tc1 <id | none>
  *   tc2 <id | none>
  *   tc3 <id | none>
+ *   tc4 <id | none>
+ *   tc5 <id | none>
+ *   tc6 <id | none>
+ *   tc7 <id | none>
+ *   tc8 <id | none>
+ *   tc9 <id | none>
+ *   tc10 <id | none>
+ *   tc11 <id | none>
+ *   tc12 <id | none>
  *  weight
- *   queue  <q0> ... <q15>
+ *   queue  <q12> ... <q15>
  */
 static void
 cmd_tmgr_hierarchy_default(struct pmd_internals *softnic,
@@ -778,11 +910,11 @@ cmd_tmgr_hierarchy_default(struct pmd_internals *softnic,
 	size_t out_size)
 {
 	struct tmgr_hierarchy_default_params p;
-	int i, status;
+	int i, j, status;
 
 	memset(&p, 0, sizeof(p));
 
-	if (n_tokens != 50) {
+	if (n_tokens != 74) {
 		snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]);
 		return;
 	}
@@ -894,27 +1026,117 @@ cmd_tmgr_hierarchy_default(struct pmd_internals *softnic,
 		return;
 	}
 
+	if (strcmp(tokens[22], "tc4") != 0) {
+		snprintf(out, out_size, MSG_ARG_NOT_FOUND, "tc4");
+		return;
+	}
+
+	if (softnic_parser_read_uint32(&p.shaper_profile_id.tc[4], tokens[23]) != 0) {
+		snprintf(out, out_size, MSG_ARG_INVALID, "tc4 profile id");
+		return;
+	}
+
+	if (strcmp(tokens[24], "tc5") != 0) {
+		snprintf(out, out_size, MSG_ARG_NOT_FOUND, "tc5");
+		return;
+	}
+
+	if (softnic_parser_read_uint32(&p.shaper_profile_id.tc[5], tokens[25]) != 0) {
+		snprintf(out, out_size, MSG_ARG_INVALID, "tc5 profile id");
+		return;
+	}
+
+	if (strcmp(tokens[26], "tc6") != 0) {
+		snprintf(out, out_size, MSG_ARG_NOT_FOUND, "tc6");
+		return;
+	}
+
+	if (softnic_parser_read_uint32(&p.shaper_profile_id.tc[6], tokens[27]) != 0) {
+		snprintf(out, out_size, MSG_ARG_INVALID, "tc6 profile id");
+		return;
+	}
+
+	if (strcmp(tokens[28], "tc7") != 0) {
+		snprintf(out, out_size, MSG_ARG_NOT_FOUND, "tc7");
+		return;
+	}
+
+	if (softnic_parser_read_uint32(&p.shaper_profile_id.tc[7], tokens[29]) != 0) {
+		snprintf(out, out_size, MSG_ARG_INVALID, "tc7 profile id");
+		return;
+	}
+
+	if (strcmp(tokens[30], "tc8") != 0) {
+		snprintf(out, out_size, MSG_ARG_NOT_FOUND, "tc8");
+		return;
+	}
+
+	if (softnic_parser_read_uint32(&p.shaper_profile_id.tc[8], tokens[31]) != 0) {
+		snprintf(out, out_size, MSG_ARG_INVALID, "tc8 profile id");
+		return;
+	}
+
+	if (strcmp(tokens[32], "tc9") != 0) {
+		snprintf(out, out_size, MSG_ARG_NOT_FOUND, "tc9");
+		return;
+	}
+
+	if (softnic_parser_read_uint32(&p.shaper_profile_id.tc[9], tokens[33]) != 0) {
+		snprintf(out, out_size, MSG_ARG_INVALID, "tc9 profile id");
+		return;
+	}
+
+	if (strcmp(tokens[34], "tc10") != 0) {
+		snprintf(out, out_size, MSG_ARG_NOT_FOUND, "tc10");
+		return;
+	}
+
+	if (softnic_parser_read_uint32(&p.shaper_profile_id.tc[10], tokens[35]) != 0) {
+		snprintf(out, out_size, MSG_ARG_INVALID, "tc10 profile id");
+		return;
+	}
+
+	if (strcmp(tokens[36], "tc11") != 0) {
+		snprintf(out, out_size, MSG_ARG_NOT_FOUND, "tc11");
+		return;
+	}
+
+	if (softnic_parser_read_uint32(&p.shaper_profile_id.tc[11], tokens[37]) != 0) {
+		snprintf(out, out_size, MSG_ARG_INVALID, "tc11 profile id");
+		return;
+	}
+
+	if (strcmp(tokens[38], "tc12") != 0) {
+		snprintf(out, out_size, MSG_ARG_NOT_FOUND, "tc12");
+		return;
+	}
+
+	if (softnic_parser_read_uint32(&p.shaper_profile_id.tc[12], tokens[39]) != 0) {
+		snprintf(out, out_size, MSG_ARG_INVALID, "tc12 profile id");
+		return;
+	}
+
 	/* Shared shaper */
 
-	if (strcmp(tokens[22], "shared") != 0) {
+	if (strcmp(tokens[40], "shared") != 0) {
 		snprintf(out, out_size, MSG_ARG_NOT_FOUND, "shared");
 		return;
 	}
 
-	if (strcmp(tokens[23], "shaper") != 0) {
+	if (strcmp(tokens[41], "shaper") != 0) {
 		snprintf(out, out_size, MSG_ARG_NOT_FOUND, "shaper");
 		return;
 	}
 
-	if (strcmp(tokens[24], "tc0") != 0) {
+	if (strcmp(tokens[42], "tc0") != 0) {
 		snprintf(out, out_size, MSG_ARG_NOT_FOUND, "tc0");
 		return;
 	}
 
-	if (strcmp(tokens[25], "none") == 0)
+	if (strcmp(tokens[43], "none") == 0)
 		p.shared_shaper_id.tc_valid[0] = 0;
 	else {
-		if (softnic_parser_read_uint32(&p.shared_shaper_id.tc[0], tokens[25]) != 0) {
+		if (softnic_parser_read_uint32(&p.shared_shaper_id.tc[0], tokens[43]) != 0) {
 			snprintf(out, out_size, MSG_ARG_INVALID, "shared shaper tc0");
 			return;
 		}
@@ -922,15 +1144,15 @@ cmd_tmgr_hierarchy_default(struct pmd_internals *softnic,
 		p.shared_shaper_id.tc_valid[0] = 1;
 	}
 
-	if (strcmp(tokens[26], "tc1") != 0) {
+	if (strcmp(tokens[44], "tc1") != 0) {
 		snprintf(out, out_size, MSG_ARG_NOT_FOUND, "tc1");
 		return;
 	}
 
-	if (strcmp(tokens[27], "none") == 0)
+	if (strcmp(tokens[45], "none") == 0)
 		p.shared_shaper_id.tc_valid[1] = 0;
 	else {
-		if (softnic_parser_read_uint32(&p.shared_shaper_id.tc[1], tokens[27]) != 0) {
+		if (softnic_parser_read_uint32(&p.shared_shaper_id.tc[1], tokens[45]) != 0) {
 			snprintf(out, out_size, MSG_ARG_INVALID, "shared shaper tc1");
 			return;
 		}
@@ -938,15 +1160,15 @@ cmd_tmgr_hierarchy_default(struct pmd_internals *softnic,
 		p.shared_shaper_id.tc_valid[1] = 1;
 	}
 
-	if (strcmp(tokens[28], "tc2") != 0) {
+	if (strcmp(tokens[46], "tc2") != 0) {
 		snprintf(out, out_size, MSG_ARG_NOT_FOUND, "tc2");
 		return;
 	}
 
-	if (strcmp(tokens[29], "none") == 0)
+	if (strcmp(tokens[47], "none") == 0)
 		p.shared_shaper_id.tc_valid[2] = 0;
 	else {
-		if (softnic_parser_read_uint32(&p.shared_shaper_id.tc[2], tokens[29]) != 0) {
+		if (softnic_parser_read_uint32(&p.shared_shaper_id.tc[2], tokens[47]) != 0) {
 			snprintf(out, out_size, MSG_ARG_INVALID, "shared shaper tc2");
 			return;
 		}
@@ -954,15 +1176,15 @@ cmd_tmgr_hierarchy_default(struct pmd_internals *softnic,
 		p.shared_shaper_id.tc_valid[2] = 1;
 	}
 
-	if (strcmp(tokens[30], "tc3") != 0) {
+	if (strcmp(tokens[48], "tc3") != 0) {
 		snprintf(out, out_size, MSG_ARG_NOT_FOUND, "tc3");
 		return;
 	}
 
-	if (strcmp(tokens[31], "none") == 0)
+	if (strcmp(tokens[49], "none") == 0)
 		p.shared_shaper_id.tc_valid[3] = 0;
 	else {
-		if (softnic_parser_read_uint32(&p.shared_shaper_id.tc[3], tokens[31]) != 0) {
+		if (softnic_parser_read_uint32(&p.shared_shaper_id.tc[3], tokens[49]) != 0) {
 			snprintf(out, out_size, MSG_ARG_INVALID, "shared shaper tc3");
 			return;
 		}
@@ -970,22 +1192,171 @@ cmd_tmgr_hierarchy_default(struct pmd_internals *softnic,
 		p.shared_shaper_id.tc_valid[3] = 1;
 	}
 
+	if (strcmp(tokens[50], "tc4") != 0) {
+		snprintf(out, out_size, MSG_ARG_NOT_FOUND, "tc4");
+		return;
+	}
+
+	if (strcmp(tokens[51], "none") == 0)
+		p.shared_shaper_id.tc_valid[4] = 0;
+	else {
+		if (softnic_parser_read_uint32(&p.shared_shaper_id.tc[4], tokens[51]) != 0) {
+			snprintf(out, out_size, MSG_ARG_INVALID, "shared shaper tc4");
+			return;
+		}
+
+		p.shared_shaper_id.tc_valid[4] = 1;
+	}
+
+	if (strcmp(tokens[52], "tc5") != 0) {
+		snprintf(out, out_size, MSG_ARG_NOT_FOUND, "tc5");
+		return;
+	}
+
+	if (strcmp(tokens[53], "none") == 0)
+		p.shared_shaper_id.tc_valid[5] = 0;
+	else {
+		if (softnic_parser_read_uint32(&p.shared_shaper_id.tc[5], tokens[53]) != 0) {
+			snprintf(out, out_size, MSG_ARG_INVALID, "shared shaper tc5");
+			return;
+		}
+
+		p.shared_shaper_id.tc_valid[5] = 1;
+	}
+
+	if (strcmp(tokens[54], "tc6") != 0) {
+		snprintf(out, out_size, MSG_ARG_NOT_FOUND, "tc6");
+		return;
+	}
+
+	if (strcmp(tokens[55], "none") == 0)
+		p.shared_shaper_id.tc_valid[6] = 0;
+	else {
+		if (softnic_parser_read_uint32(&p.shared_shaper_id.tc[6], tokens[55]) != 0) {
+			snprintf(out, out_size, MSG_ARG_INVALID, "shared shaper tc6");
+			return;
+		}
+
+		p.shared_shaper_id.tc_valid[6] = 1;
+	}
+
+	if (strcmp(tokens[56], "tc7") != 0) {
+		snprintf(out, out_size, MSG_ARG_NOT_FOUND, "tc7");
+		return;
+	}
+
+	if (strcmp(tokens[57], "none") == 0)
+		p.shared_shaper_id.tc_valid[7] = 0;
+	else {
+		if (softnic_parser_read_uint32(&p.shared_shaper_id.tc[7], tokens[57]) != 0) {
+			snprintf(out, out_size, MSG_ARG_INVALID, "shared shaper tc7");
+			return;
+		}
+
+		p.shared_shaper_id.tc_valid[7] = 1;
+	}
+
+	if (strcmp(tokens[58], "tc8") != 0) {
+		snprintf(out, out_size, MSG_ARG_NOT_FOUND, "tc8");
+		return;
+	}
+
+	if (strcmp(tokens[59], "none") == 0)
+		p.shared_shaper_id.tc_valid[8] = 0;
+	else {
+		if (softnic_parser_read_uint32(&p.shared_shaper_id.tc[8], tokens[59]) != 0) {
+			snprintf(out, out_size, MSG_ARG_INVALID, "shared shaper tc8");
+			return;
+		}
+
+		p.shared_shaper_id.tc_valid[8] = 1;
+	}
+
+	if (strcmp(tokens[60], "tc9") != 0) {
+		snprintf(out, out_size, MSG_ARG_NOT_FOUND, "tc9");
+		return;
+	}
+
+	if (strcmp(tokens[61], "none") == 0)
+		p.shared_shaper_id.tc_valid[9] = 0;
+	else {
+		if (softnic_parser_read_uint32(&p.shared_shaper_id.tc[9], tokens[61]) != 0) {
+			snprintf(out, out_size, MSG_ARG_INVALID, "shared shaper tc9");
+			return;
+		}
+
+		p.shared_shaper_id.tc_valid[9] = 1;
+	}
+
+	if (strcmp(tokens[62], "tc10") != 0) {
+		snprintf(out, out_size, MSG_ARG_NOT_FOUND, "tc10");
+		return;
+	}
+
+	if (strcmp(tokens[63], "none") == 0)
+		p.shared_shaper_id.tc_valid[10] = 0;
+	else {
+		if (softnic_parser_read_uint32(&p.shared_shaper_id.tc[10], tokens[63]) != 0) {
+			snprintf(out, out_size, MSG_ARG_INVALID, "shared shaper tc10");
+			return;
+		}
+
+		p.shared_shaper_id.tc_valid[10] = 1;
+	}
+
+	if (strcmp(tokens[64], "tc11") != 0) {
+		snprintf(out, out_size, MSG_ARG_NOT_FOUND, "tc11");
+		return;
+	}
+
+	if (strcmp(tokens[65], "none") == 0)
+		p.shared_shaper_id.tc_valid[11] = 0;
+	else {
+		if (softnic_parser_read_uint32(&p.shared_shaper_id.tc[11], tokens[65]) != 0) {
+			snprintf(out, out_size, MSG_ARG_INVALID, "shared shaper tc11");
+			return;
+		}
+
+		p.shared_shaper_id.tc_valid[11] = 1;
+	}
+
+	if (strcmp(tokens[66], "tc12") != 0) {
+		snprintf(out, out_size, MSG_ARG_NOT_FOUND, "tc12");
+		return;
+	}
+
+	if (strcmp(tokens[67], "none") == 0)
+		p.shared_shaper_id.tc_valid[12] = 0;
+	else {
+		if (softnic_parser_read_uint32(&p.shared_shaper_id.tc[12], tokens[67]) != 0) {
+			snprintf(out, out_size, MSG_ARG_INVALID, "shared shaper tc12");
+			return;
+		}
+
+		p.shared_shaper_id.tc_valid[12] = 1;
+	}
+
 	/* Weight */
 
-	if (strcmp(tokens[32], "weight") != 0) {
+	if (strcmp(tokens[68], "weight") != 0) {
 		snprintf(out, out_size, MSG_ARG_NOT_FOUND, "weight");
 		return;
 	}
 
-	if (strcmp(tokens[33], "queue") != 0) {
+	if (strcmp(tokens[69], "queue") != 0) {
 		snprintf(out, out_size, MSG_ARG_NOT_FOUND, "queue");
 		return;
 	}
 
-	for (i = 0; i < 16; i++) {
-		if (softnic_parser_read_uint32(&p.weight.queue[i], tokens[34 + i]) != 0) {
-			snprintf(out, out_size, MSG_ARG_INVALID, "weight queue");
-			return;
+	for (i = 0, j = 0; i < 16; i++) {
+		if (i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE - 1) {
+			p.weight.queue[i] = 1;
+		} else {
+			if (softnic_parser_read_uint32(&p.weight.queue[i], tokens[70 + j]) != 0) {
+				snprintf(out, out_size, MSG_ARG_INVALID, "weight queue");
+				return;
+			}
+			j++;
 		}
 	}
 
diff --git a/drivers/net/softnic/rte_eth_softnic_internals.h b/drivers/net/softnic/rte_eth_softnic_internals.h
index 415434d0d..5525dff98 100644
--- a/drivers/net/softnic/rte_eth_softnic_internals.h
+++ b/drivers/net/softnic/rte_eth_softnic_internals.h
@@ -43,7 +43,7 @@ struct pmd_params {
 	/** Traffic Management (TM) */
 	struct {
 		uint32_t n_queues; /**< Number of queues */
-		uint16_t qsize[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
+		uint16_t qsize[RTE_SCHED_QUEUES_PER_PIPE];
 	} tm;
 };
 
@@ -161,13 +161,15 @@ TAILQ_HEAD(softnic_link_list, softnic_link);
 #define TM_MAX_PIPES_PER_SUBPORT			4096
 #endif
 
+#ifndef TM_MAX_PIPE_PROFILE
+#define TM_MAX_PIPE_PROFILE				256
+#endif
 struct tm_params {
 	struct rte_sched_port_params port_params;
 
 	struct rte_sched_subport_params subport_params[TM_MAX_SUBPORTS];
 
-	struct rte_sched_pipe_params
-		pipe_profiles[RTE_SCHED_PIPE_PROFILES_PER_PORT];
+	struct rte_sched_pipe_params pipe_profiles[TM_MAX_PIPE_PROFILE];
 	uint32_t n_pipe_profiles;
 	uint32_t pipe_to_profile[TM_MAX_SUBPORTS * TM_MAX_PIPES_PER_SUBPORT];
 };
diff --git a/drivers/net/softnic/rte_eth_softnic_tm.c b/drivers/net/softnic/rte_eth_softnic_tm.c
index 58744a9eb..c7a74836b 100644
--- a/drivers/net/softnic/rte_eth_softnic_tm.c
+++ b/drivers/net/softnic/rte_eth_softnic_tm.c
@@ -367,7 +367,8 @@ tm_level_get_max_nodes(struct rte_eth_dev *dev, enum tm_node_level level)
 {
 	struct pmd_internals *p = dev->data->dev_private;
 	uint32_t n_queues_max = p->params.tm.n_queues;
-	uint32_t n_tc_max = n_queues_max / RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS;
+	uint32_t n_tc_max =
+		(n_queues_max * RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE) / RTE_SCHED_QUEUES_PER_PIPE;
 	uint32_t n_pipes_max = n_tc_max / RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE;
 	uint32_t n_subports_max = n_pipes_max;
 	uint32_t n_root_max = 1;
@@ -625,10 +626,10 @@ static const struct rte_tm_level_capabilities tm_level_cap[] = {
 			.shaper_shared_n_max = 1,
 
 			.sched_n_children_max =
-				RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS,
+				RTE_SCHED_BE_QUEUES_PER_PIPE,
 			.sched_sp_n_priorities_max = 1,
 			.sched_wfq_n_children_per_group_max =
-				RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS,
+				RTE_SCHED_BE_QUEUES_PER_PIPE,
 			.sched_wfq_n_groups_max = 1,
 			.sched_wfq_weight_max = UINT32_MAX,
 
@@ -793,10 +794,10 @@ static const struct rte_tm_node_capabilities tm_node_cap[] = {
 
 		{.nonleaf = {
 			.sched_n_children_max =
-				RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS,
+				RTE_SCHED_BE_QUEUES_PER_PIPE,
 			.sched_sp_n_priorities_max = 1,
 			.sched_wfq_n_children_per_group_max =
-				RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS,
+				RTE_SCHED_BE_QUEUES_PER_PIPE,
 			.sched_wfq_n_groups_max = 1,
 			.sched_wfq_weight_max = UINT32_MAX,
 		} },
@@ -2043,15 +2044,13 @@ pipe_profile_build(struct rte_eth_dev *dev,
 
 		/* Queue */
 		TAILQ_FOREACH(nq, nl, node) {
-			uint32_t pipe_queue_id;
 
 			if (nq->level != TM_NODE_LEVEL_QUEUE ||
 				nq->parent_node_id != nt->node_id)
 				continue;
 
-			pipe_queue_id = nt->priority *
-				RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS + queue_id;
-			pp->wrr_weights[pipe_queue_id] = nq->weight;
+			if (nt->priority == RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE - 1)
+				pp->wrr_weights[queue_id] = nq->weight;
 
 			queue_id++;
 		}
@@ -2065,7 +2064,7 @@ pipe_profile_free_exists(struct rte_eth_dev *dev,
 	struct pmd_internals *p = dev->data->dev_private;
 	struct tm_params *t = &p->soft.tm.params;
 
-	if (t->n_pipe_profiles < RTE_SCHED_PIPE_PROFILES_PER_PORT) {
+	if (t->n_pipe_profiles < TM_MAX_PIPE_PROFILE) {
 		*pipe_profile_id = t->n_pipe_profiles;
 		return 1;
 	}
@@ -2217,6 +2216,7 @@ wred_profiles_set(struct rte_eth_dev *dev)
 {
 	struct pmd_internals *p = dev->data->dev_private;
 	struct rte_sched_port_params *pp = &p->soft.tm.params.port_params;
+
 	uint32_t tc_id;
 	enum rte_color color;
 
@@ -2332,7 +2332,7 @@ hierarchy_commit_check(struct rte_eth_dev *dev, struct rte_tm_error *error)
 				rte_strerror(EINVAL));
 	}
 
-	/* Each pipe has exactly 4 TCs, with exactly one TC for each priority */
+	/* Each pipe has exactly 13 TCs, with exactly one TC for each priority */
 	TAILQ_FOREACH(np, nl, node) {
 		uint32_t mask = 0, mask_expected =
 			RTE_LEN2MASK(RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE,
@@ -2364,12 +2364,14 @@ hierarchy_commit_check(struct rte_eth_dev *dev, struct rte_tm_error *error)
 				rte_strerror(EINVAL));
 	}
 
-	/* Each TC has exactly 4 packet queues. */
+	/** Each Strict priority TC has exactly 1 packet queues while
+	 *	lowest priority TC (Best-effort) has 4 queues.
+	 */
 	TAILQ_FOREACH(nt, nl, node) {
 		if (nt->level != TM_NODE_LEVEL_TC)
 			continue;
 
-		if (nt->n_children != RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS)
+		if (nt->n_children != 1 && nt->n_children != RTE_SCHED_BE_QUEUES_PER_PIPE)
 			return -rte_tm_error_set(error,
 				EINVAL,
 				RTE_TM_ERROR_TYPE_UNSPECIFIED,
@@ -2531,9 +2533,22 @@ hierarchy_blueprints_create(struct rte_eth_dev *dev)
 			p->params.tm.qsize[1],
 			p->params.tm.qsize[2],
 			p->params.tm.qsize[3],
+			p->params.tm.qsize[4],
+			p->params.tm.qsize[5],
+			p->params.tm.qsize[6],
+			p->params.tm.qsize[7],
+			p->params.tm.qsize[8],
+			p->params.tm.qsize[9],
+			p->params.tm.qsize[10],
+			p->params.tm.qsize[11],
+			p->params.tm.qsize[12],
+			p->params.tm.qsize[13],
+			p->params.tm.qsize[14],
+			p->params.tm.qsize[15],
 		},
 		.pipe_profiles = t->pipe_profiles,
 		.n_pipe_profiles = t->n_pipe_profiles,
+		.n_max_pipe_profiles = TM_MAX_PIPE_PROFILE,
 	};
 
 	wred_profiles_set(dev);
@@ -2566,8 +2581,17 @@ hierarchy_blueprints_create(struct rte_eth_dev *dev)
 					tc_rate[1],
 					tc_rate[2],
 					tc_rate[3],
-			},
-			.tc_period = SUBPORT_TC_PERIOD,
+					tc_rate[4],
+					tc_rate[5],
+					tc_rate[6],
+					tc_rate[7],
+					tc_rate[8],
+					tc_rate[9],
+					tc_rate[10],
+					tc_rate[11],
+					tc_rate[12],
+				},
+				.tc_period = SUBPORT_TC_PERIOD,
 		};
 
 		subport_id++;
@@ -2666,7 +2690,7 @@ update_queue_weight(struct rte_eth_dev *dev,
 	uint32_t subport_id = tm_node_subport_id(dev, ns);
 
 	uint32_t pipe_queue_id =
-		tc_id * RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS + queue_id;
+		tc_id * RTE_SCHED_QUEUES_PER_PIPE + queue_id;
 
 	struct rte_sched_pipe_params *profile0 = pipe_profile_get(dev, np);
 	struct rte_sched_pipe_params profile1;
@@ -3023,7 +3047,7 @@ tm_port_queue_id(struct rte_eth_dev *dev,
 	uint32_t port_tc_id =
 		port_pipe_id * RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE + pipe_tc_id;
 	uint32_t port_queue_id =
-		port_tc_id * RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS + tc_queue_id;
+		port_tc_id * RTE_SCHED_QUEUES_PER_PIPE + tc_queue_id;
 
 	return port_queue_id;
 }
@@ -3149,8 +3173,8 @@ read_pipe_stats(struct rte_eth_dev *dev,
 		uint32_t qid = tm_port_queue_id(dev,
 			subport_id,
 			pipe_id,
-			i / RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS,
-			i % RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS);
+			i / RTE_SCHED_QUEUES_PER_PIPE,
+			i % RTE_SCHED_QUEUES_PER_PIPE);
 
 		int status = rte_sched_queue_read_stats(SCHED(p),
 			qid,
@@ -3202,7 +3226,7 @@ read_tc_stats(struct rte_eth_dev *dev,
 	uint32_t i;
 
 	/* Stats read */
-	for (i = 0; i < RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS; i++) {
+	for (i = 0; i < RTE_SCHED_QUEUES_PER_PIPE; i++) {
 		struct rte_sched_queue_stats s;
 		uint16_t qlen;
 
-- 
2.21.0


  parent reply	other threads:[~2019-07-11 10:27 UTC|newest]

Thread overview: 163+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2019-05-28 12:05 [dpdk-dev] [PATCH 00/27] sched: feature enhancements Lukasz Krakowiak
2019-05-28 12:05 ` [dpdk-dev] [PATCH 01/27] sched: update macros for flexible config Lukasz Krakowiak
2019-06-25 15:31   ` [dpdk-dev] [PATCH v2 00/28] sched: feature enhancements Jasvinder Singh
2019-06-25 15:31     ` [dpdk-dev] [PATCH v2 01/28] sched: update macros for flexible config Jasvinder Singh
2019-07-01 19:04       ` Dumitrescu, Cristian
2019-07-02 13:26         ` Singh, Jasvinder
2019-07-11 10:26       ` [dpdk-dev] [PATCH v3 00/11] sched: feature enhancements Jasvinder Singh
2019-07-11 10:26         ` [dpdk-dev] [PATCH v3 01/11] sched: remove wrr from strict priority tc queues Jasvinder Singh
2019-07-12  9:57           ` [dpdk-dev] [PATCH v4 00/11] sched: feature enhancements Jasvinder Singh
2019-07-12  9:57             ` [dpdk-dev] [PATCH v4 01/11] sched: remove wrr from strict priority tc queues Jasvinder Singh
2019-07-15 23:50               ` Dumitrescu, Cristian
2019-07-17 14:49                 ` Singh, Jasvinder
2019-07-17 14:42               ` [dpdk-dev] [PATCH v5 00/11] sched: feature enhancements Jasvinder Singh
2019-07-17 14:42                 ` [dpdk-dev] [PATCH v5 01/11] sched: remove wrr from strict priority tc queues Jasvinder Singh
2019-07-19 14:18                   ` [dpdk-dev] [PATCH v6 00/11] sched: feature enhancements Jasvinder Singh
2019-07-19 14:18                     ` [dpdk-dev] [PATCH v6 01/11] sched: remove wrr from strict priority tc queues Jasvinder Singh
2019-07-22 11:01                       ` [dpdk-dev] [PATCH v7 00/11] sched: feature enhancements Jasvinder Singh
2019-07-22 11:01                         ` [dpdk-dev] [PATCH v7 01/11] sched: remove wrr from strict priority tc queues Jasvinder Singh
2019-07-22 11:01                         ` [dpdk-dev] [PATCH v7 02/11] sched: add config flexibility to tc queue sizes Jasvinder Singh
2019-07-22 11:01                         ` [dpdk-dev] [PATCH v7 03/11] sched: add max pipe profiles config in run time Jasvinder Singh
2019-07-22 11:01                         ` [dpdk-dev] [PATCH v7 04/11] sched: rename tc3 params to best-effort tc Jasvinder Singh
2019-07-22 11:01                         ` [dpdk-dev] [PATCH v7 05/11] sched: improve error log messages Jasvinder Singh
2019-07-22 11:01                         ` [dpdk-dev] [PATCH v7 06/11] sched: improve doxygen comments Jasvinder Singh
2019-07-22 11:01                         ` [dpdk-dev] [PATCH v7 07/11] net/softnic: add config flexibility to softnic tm Jasvinder Singh
2019-07-22 11:01                         ` [dpdk-dev] [PATCH v7 08/11] test_sched: modify tests for config flexibility Jasvinder Singh
2019-07-22 11:01                         ` [dpdk-dev] [PATCH v7 09/11] examples/ip_pipeline: add config flexibility to tm function Jasvinder Singh
2019-07-22 11:01                         ` [dpdk-dev] [PATCH v7 10/11] examples/qos_sched: add tc and queue config flexibility Jasvinder Singh
2019-07-22 11:01                         ` [dpdk-dev] [PATCH v7 11/11] sched: remove redundant macros Jasvinder Singh
2019-07-22 13:15                         ` [dpdk-dev] [PATCH v7 00/11] sched: feature enhancements Thomas Monjalon
2019-07-22 13:22                           ` Singh, Jasvinder
2019-07-22 13:33                             ` Thomas Monjalon
2019-07-22 13:53                               ` Ferruh Yigit
2019-07-22 13:56                                 ` Bruce Richardson
2019-07-22 14:08                                   ` Ferruh Yigit
2019-07-22 14:08                                   ` Thomas Monjalon
2019-07-19 14:18                     ` [dpdk-dev] [PATCH v6 02/11] sched: add config flexibility to tc queue sizes Jasvinder Singh
2019-07-19 14:18                     ` [dpdk-dev] [PATCH v6 03/11] sched: add max pipe profiles config in run time Jasvinder Singh
2019-07-19 14:18                     ` [dpdk-dev] [PATCH v6 04/11] sched: rename tc3 params to best-effort tc Jasvinder Singh
2019-07-19 14:18                     ` [dpdk-dev] [PATCH v6 05/11] sched: improve error log messages Jasvinder Singh
2019-07-19 14:18                     ` [dpdk-dev] [PATCH v6 06/11] sched: improve doxygen comments Jasvinder Singh
2019-07-19 14:18                     ` [dpdk-dev] [PATCH v6 07/11] net/softnic: add config flexibility to softnic tm Jasvinder Singh
2019-07-19 14:18                     ` [dpdk-dev] [PATCH v6 08/11] test_sched: modify tests for config flexibility Jasvinder Singh
2019-07-19 14:18                     ` [dpdk-dev] [PATCH v6 09/11] examples/ip_pipeline: add config flexibility to tm function Jasvinder Singh
2019-07-19 14:18                     ` [dpdk-dev] [PATCH v6 10/11] examples/qos_sched: add tc and queue config flexibility Jasvinder Singh
2019-07-19 14:18                     ` [dpdk-dev] [PATCH v6 11/11] sched: remove redundant macros Jasvinder Singh
2019-07-22  8:19                     ` [dpdk-dev] [PATCH v6 00/11] sched: feature enhancements Thomas Monjalon
2019-07-22 11:05                       ` Singh, Jasvinder
2019-07-22  9:56                     ` Dumitrescu, Cristian
2019-07-17 14:42                 ` [dpdk-dev] [PATCH v5 02/11] sched: add config flexibility to tc queue sizes Jasvinder Singh
2019-07-18 23:04                   ` Dumitrescu, Cristian
2019-07-19 15:25                     ` Singh, Jasvinder
2019-07-17 14:42                 ` [dpdk-dev] [PATCH v5 03/11] sched: add max pipe profiles config in run time Jasvinder Singh
2019-07-17 14:42                 ` [dpdk-dev] [PATCH v5 04/11] sched: rename tc3 params to best-effort tc Jasvinder Singh
2019-07-17 14:42                 ` [dpdk-dev] [PATCH v5 05/11] sched: improve error log messages Jasvinder Singh
2019-07-17 14:42                 ` [dpdk-dev] [PATCH v5 06/11] sched: improve doxygen comments Jasvinder Singh
2019-07-18 23:12                   ` Dumitrescu, Cristian
2019-07-19 15:25                     ` Singh, Jasvinder
2019-07-17 14:42                 ` [dpdk-dev] [PATCH v5 07/11] net/softnic: add config flexibility to softnic tm Jasvinder Singh
2019-07-17 14:42                 ` [dpdk-dev] [PATCH v5 08/11] test_sched: modify tests for config flexibility Jasvinder Singh
2019-07-17 14:42                 ` [dpdk-dev] [PATCH v5 09/11] examples/ip_pipeline: add config flexibility to tm function Jasvinder Singh
2019-07-17 14:42                 ` [dpdk-dev] [PATCH v5 10/11] examples/qos_sched: add tc and queue config flexibility Jasvinder Singh
2019-07-17 14:42                 ` [dpdk-dev] [PATCH v5 11/11] sched: remove redundant macros Jasvinder Singh
2019-07-18 22:57                 ` [dpdk-dev] [PATCH v5 00/11] sched: feature enhancements Dumitrescu, Cristian
2019-07-19 10:41                   ` Thomas Monjalon
2019-07-19 11:16                     ` Singh, Jasvinder
2019-07-19 11:40                       ` Thomas Monjalon
2019-07-19 11:42                         ` Singh, Jasvinder
2019-07-12  9:57             ` [dpdk-dev] [PATCH v4 02/11] sched: add config flexibility to tc queue sizes Jasvinder Singh
2019-07-16  0:37               ` Dumitrescu, Cristian
2019-07-17 14:57                 ` Singh, Jasvinder
2019-07-16  0:57               ` Dumitrescu, Cristian
2019-07-17 15:03                 ` Singh, Jasvinder
2019-07-12  9:57             ` [dpdk-dev] [PATCH v4 03/11] sched: add max pipe profiles config in run time Jasvinder Singh
2019-07-12  9:57             ` [dpdk-dev] [PATCH v4 04/11] sched: rename tc3 params to best-effort tc Jasvinder Singh
2019-07-12  9:57             ` [dpdk-dev] [PATCH v4 05/11] sched: improve error log messages Jasvinder Singh
2019-07-12  9:57             ` [dpdk-dev] [PATCH v4 06/11] sched: improve doxygen comments Jasvinder Singh
2019-07-16  0:44               ` Dumitrescu, Cristian
2019-07-17 14:58                 ` Singh, Jasvinder
2019-07-16  0:49               ` Dumitrescu, Cristian
2019-07-17 15:00                 ` Singh, Jasvinder
2019-07-12  9:57             ` [dpdk-dev] [PATCH v4 07/11] net/softnic: add config flexibility to softnic tm Jasvinder Singh
2019-07-12  9:57             ` [dpdk-dev] [PATCH v4 08/11] test_sched: modify tests for config flexibility Jasvinder Singh
2019-07-12  9:57             ` [dpdk-dev] [PATCH v4 09/11] examples/ip_pipeline: add config flexibility to tm function Jasvinder Singh
2019-07-12  9:57             ` [dpdk-dev] [PATCH v4 10/11] examples/qos_sched: add tc and queue config flexibility Jasvinder Singh
2019-07-12  9:57             ` [dpdk-dev] [PATCH v4 11/11] sched: remove redundant macros Jasvinder Singh
2019-07-11 10:26         ` [dpdk-dev] [PATCH v3 02/11] sched: add config flexibility to tc queue sizes Jasvinder Singh
2019-07-11 10:26         ` [dpdk-dev] [PATCH v3 03/11] sched: add max pipe profiles config in run time Jasvinder Singh
2019-07-11 10:26         ` [dpdk-dev] [PATCH v3 04/11] sched: rename tc3 params to best-effort tc Jasvinder Singh
2019-07-11 10:26         ` [dpdk-dev] [PATCH v3 05/11] sched: improve error log messages Jasvinder Singh
2019-07-11 10:26         ` [dpdk-dev] [PATCH v3 06/11] sched: improve doxygen comments Jasvinder Singh
2019-07-11 10:26         ` Jasvinder Singh [this message]
2019-07-11 10:26         ` [dpdk-dev] [PATCH v3 08/11] test_sched: modify tests for config flexibility Jasvinder Singh
2019-07-11 10:26         ` [dpdk-dev] [PATCH v3 09/11] examples/ip_pipeline: add config flexibility to tm function Jasvinder Singh
2019-07-11 10:26         ` [dpdk-dev] [PATCH v3 10/11] examples/qos_sched: add tc and queue config flexibility Jasvinder Singh
2019-07-11 10:26         ` [dpdk-dev] [PATCH v3 11/11] sched: remove redundant macros Jasvinder Singh
2019-06-25 15:31     ` [dpdk-dev] [PATCH v2 02/28] sched: update subport and pipe data structures Jasvinder Singh
2019-07-01 18:58       ` Dumitrescu, Cristian
2019-07-02 13:20         ` Singh, Jasvinder
2019-07-01 19:12       ` Dumitrescu, Cristian
2019-06-25 15:31     ` [dpdk-dev] [PATCH v2 03/28] sched: update internal " Jasvinder Singh
2019-06-25 15:31     ` [dpdk-dev] [PATCH v2 04/28] sched: update port config API Jasvinder Singh
2019-06-25 15:31     ` [dpdk-dev] [PATCH v2 05/28] sched: update port free API Jasvinder Singh
2019-06-25 15:31     ` [dpdk-dev] [PATCH v2 06/28] sched: update subport config API Jasvinder Singh
2019-06-25 15:31     ` [dpdk-dev] [PATCH v2 07/28] sched: update pipe profile add API Jasvinder Singh
2019-06-25 15:31     ` [dpdk-dev] [PATCH v2 08/28] sched: update pipe config API Jasvinder Singh
2019-06-25 15:31     ` [dpdk-dev] [PATCH v2 09/28] sched: update pkt read and write API Jasvinder Singh
2019-07-01 23:25       ` Dumitrescu, Cristian
2019-07-02 21:05         ` Singh, Jasvinder
2019-07-03 13:40           ` Dumitrescu, Cristian
2019-06-25 15:31     ` [dpdk-dev] [PATCH v2 10/28] sched: update subport and tc queue stats Jasvinder Singh
2019-06-25 15:32     ` [dpdk-dev] [PATCH v2 11/28] sched: update port memory footprint API Jasvinder Singh
2019-06-25 15:32     ` [dpdk-dev] [PATCH v2 12/28] sched: update packet enqueue API Jasvinder Singh
2019-06-25 15:32     ` [dpdk-dev] [PATCH v2 13/28] sched: update grinder pipe and tc cache Jasvinder Singh
2019-06-25 15:32     ` [dpdk-dev] [PATCH v2 14/28] sched: update grinder next pipe and tc functions Jasvinder Singh
2019-06-25 15:32     ` [dpdk-dev] [PATCH v2 15/28] sched: update pipe and tc queues prefetch Jasvinder Singh
2019-06-25 15:32     ` [dpdk-dev] [PATCH v2 16/28] sched: update grinder wrr compute function Jasvinder Singh
2019-06-25 15:32     ` [dpdk-dev] [PATCH v2 17/28] sched: modify credits update function Jasvinder Singh
2019-06-25 15:32     ` [dpdk-dev] [PATCH v2 18/28] sched: update mbuf prefetch function Jasvinder Singh
2019-06-25 15:32     ` [dpdk-dev] [PATCH v2 19/28] sched: update grinder schedule function Jasvinder Singh
2019-06-25 15:32     ` [dpdk-dev] [PATCH v2 20/28] sched: update grinder handle function Jasvinder Singh
2019-06-25 15:32     ` [dpdk-dev] [PATCH v2 21/28] sched: update packet dequeue API Jasvinder Singh
2019-06-25 15:32     ` [dpdk-dev] [PATCH v2 22/28] sched: update sched queue stats API Jasvinder Singh
2019-06-25 15:32     ` [dpdk-dev] [PATCH v2 23/28] test/sched: update unit test Jasvinder Singh
2019-06-25 15:32     ` [dpdk-dev] [PATCH v2 24/28] net/softnic: update softnic tm function Jasvinder Singh
2019-06-25 15:32     ` [dpdk-dev] [PATCH v2 25/28] examples/qos_sched: update qos sched sample app Jasvinder Singh
2019-06-25 15:32     ` [dpdk-dev] [PATCH v2 26/28] examples/ip_pipeline: update ip pipeline " Jasvinder Singh
2019-06-25 15:32     ` [dpdk-dev] [PATCH v2 27/28] sched: code cleanup Jasvinder Singh
2019-06-25 15:32     ` [dpdk-dev] [PATCH v2 28/28] sched: add release note Jasvinder Singh
2019-06-26 21:31       ` Thomas Monjalon
2019-06-27 10:50         ` Singh, Jasvinder
2019-06-26 21:33     ` [dpdk-dev] [PATCH v2 00/28] sched: feature enhancements Thomas Monjalon
2019-06-27 10:52       ` Singh, Jasvinder
2019-06-27  0:04     ` Stephen Hemminger
2019-06-27 10:49       ` Singh, Jasvinder
2019-07-01 18:51     ` Dumitrescu, Cristian
2019-07-02  9:32       ` Singh, Jasvinder
2019-05-28 12:05 ` [dpdk-dev] [PATCH 02/27] sched: update subport and pipe data structures Lukasz Krakowiak
2019-05-28 12:05 ` [dpdk-dev] [PATCH 03/27] sched: update internal " Lukasz Krakowiak
2019-05-28 12:05 ` [dpdk-dev] [PATCH 04/27] sched: update port config api Lukasz Krakowiak
2019-05-28 12:05 ` [dpdk-dev] [PATCH 05/27] sched: update port free api Lukasz Krakowiak
2019-05-28 12:05 ` [dpdk-dev] [PATCH 06/27] sched: update subport config api Lukasz Krakowiak
2019-05-28 12:05 ` [dpdk-dev] [PATCH 07/27] sched: update pipe profile add api Lukasz Krakowiak
2019-05-28 14:06   ` Stephen Hemminger
2019-05-28 12:05 ` [dpdk-dev] [PATCH 08/27] sched: update pipe config api Lukasz Krakowiak
2019-05-28 12:05 ` [dpdk-dev] [PATCH 09/27] sched: update pkt read and write api Lukasz Krakowiak
2019-05-28 12:05 ` [dpdk-dev] [PATCH 10/27] sched: update subport and tc queue stats Lukasz Krakowiak
2019-05-28 12:05 ` [dpdk-dev] [PATCH 11/27] sched: update port memory footprint api Lukasz Krakowiak
2019-05-28 12:05 ` [dpdk-dev] [PATCH 12/27] sched: update packet enqueue api Lukasz Krakowiak
2019-05-28 12:05 ` [dpdk-dev] [PATCH 13/27] sched: update grinder pipe and tc cache Lukasz Krakowiak
2019-05-28 12:05 ` [dpdk-dev] [PATCH 14/27] sched: update grinder next pipe and tc functions Lukasz Krakowiak
2019-05-28 12:05 ` [dpdk-dev] [PATCH 15/27] sched: update pipe and tc queues prefetch Lukasz Krakowiak
2019-05-28 12:05 ` [dpdk-dev] [PATCH 16/27] sched: update grinder wrr compute function Lukasz Krakowiak
2019-05-28 12:05 ` [dpdk-dev] [PATCH 17/27] sched: modify credits update function Lukasz Krakowiak
2019-05-28 12:05 ` [dpdk-dev] [PATCH 18/27] sched: update mbuf prefetch function Lukasz Krakowiak
2019-05-28 12:05 ` [dpdk-dev] [PATCH 19/27] sched: update grinder schedule function Lukasz Krakowiak
2019-05-28 12:05 ` [dpdk-dev] [PATCH 20/27] sched: update grinder handle function Lukasz Krakowiak
2019-05-28 12:05 ` [dpdk-dev] [PATCH 21/27] sched: update packet dequeue api Lukasz Krakowiak
2019-05-28 12:05 ` [dpdk-dev] [PATCH 22/27] sched: update sched queue stats api Lukasz Krakowiak
2019-05-28 12:05 ` [dpdk-dev] [PATCH 23/27] test/sched: update unit test Lukasz Krakowiak
2019-05-28 12:05 ` [dpdk-dev] [PATCH 24/27] net/softnic: update softnic tm function Lukasz Krakowiak
2019-05-28 12:05 ` [dpdk-dev] [PATCH 25/27] examples/qos_sched: update qos sched sample app Lukasz Krakowiak
2019-05-28 12:05 ` [dpdk-dev] [PATCH 26/27] examples/ip_pipeline: update ip pipeline " Lukasz Krakowiak
2019-05-28 12:05 ` [dpdk-dev] [PATCH 27/27] sched: code cleanup Lukasz Krakowiak

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20190711102659.59001-8-jasvinder.singh@intel.com \
    --to=jasvinder.singh@intel.com \
    --cc=abrahamx.tovar@intel.com \
    --cc=cristian.dumitrescu@intel.com \
    --cc=dev@dpdk.org \
    --cc=lukaszx.krakowiak@intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).