All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH v2] ip_pipeline: add flow id parameter to flow classification
@ 2015-10-05 11:13 Jasvinder Singh
  2015-10-05 13:10 ` Dumitrescu, Cristian
                   ` (2 more replies)
  0 siblings, 3 replies; 7+ messages in thread
From: Jasvinder Singh @ 2015-10-05 11:13 UTC (permalink / raw)
  To: dev

This patch adds flow id field to the flow
classification table entries and adds table action
handlers to read flow id from table entry and
write it into the packet meta-data. The flow_id
(32-bit) parameter is also added to CLI commands
flow add, flow delete, etc.

*v2
fixed bug: flow table entry size power of 2

Signed-off-by: Jasvinder Singh <jasvinder.singh@intel.com>
---
 .../pipeline/pipeline_flow_classification.c        | 206 ++++++++++++++++++---
 .../pipeline/pipeline_flow_classification.h        |   4 +-
 .../pipeline/pipeline_flow_classification_be.c     | 115 +++++++++++-
 .../pipeline/pipeline_flow_classification_be.h     |   2 +
 4 files changed, 296 insertions(+), 31 deletions(-)

diff --git a/examples/ip_pipeline/pipeline/pipeline_flow_classification.c b/examples/ip_pipeline/pipeline/pipeline_flow_classification.c
index 4b82180..04b6915 100644
--- a/examples/ip_pipeline/pipeline/pipeline_flow_classification.c
+++ b/examples/ip_pipeline/pipeline/pipeline_flow_classification.c
@@ -152,6 +152,7 @@ app_pipeline_fc_key_convert(struct pipeline_fc_key *key_in,
 struct app_pipeline_fc_flow {
 	struct pipeline_fc_key key;
 	uint32_t port_id;
+	uint32_t flow_id;
 	uint32_t signature;
 	void *entry_ptr;
 
@@ -280,7 +281,8 @@ int
 app_pipeline_fc_add(struct app_params *app,
 	uint32_t pipeline_id,
 	struct pipeline_fc_key *key,
-	uint32_t port_id)
+	uint32_t port_id,
+	uint32_t flow_id)
 {
 	struct app_pipeline_fc *p;
 	struct app_pipeline_fc_flow *flow;
@@ -325,6 +327,7 @@ app_pipeline_fc_add(struct app_params *app,
 	req->subtype = PIPELINE_FC_MSG_REQ_FLOW_ADD;
 	app_pipeline_fc_key_convert(key, req->key, &signature);
 	req->port_id = port_id;
+	req->flow_id = flow_id;
 
 	/* Send request and wait for response */
 	rsp = app_msg_send_recv(app, pipeline_id, req, MSG_TIMEOUT_DEFAULT);
@@ -348,6 +351,7 @@ app_pipeline_fc_add(struct app_params *app,
 	memset(&flow->key, 0, sizeof(flow->key));
 	memcpy(&flow->key, key, sizeof(flow->key));
 	flow->port_id = port_id;
+	flow->flow_id = flow_id;
 	flow->signature = signature;
 	flow->entry_ptr = rsp->entry_ptr;
 
@@ -370,6 +374,7 @@ app_pipeline_fc_add_bulk(struct app_params *app,
 	uint32_t pipeline_id,
 	struct pipeline_fc_key *key,
 	uint32_t *port_id,
+	uint32_t *flow_id,
 	uint32_t n_keys)
 {
 	struct app_pipeline_fc *p;
@@ -389,6 +394,7 @@ app_pipeline_fc_add_bulk(struct app_params *app,
 	if ((app == NULL) ||
 		(key == NULL) ||
 		(port_id == NULL) ||
+		(flow_id == NULL) ||
 		(n_keys == 0))
 		return -1;
 
@@ -496,6 +502,7 @@ app_pipeline_fc_add_bulk(struct app_params *app,
 			flow_req[i].key,
 			&signature[i]);
 		flow_req[i].port_id = port_id[i];
+		flow_req[i].flow_id = flow_id[i];
 	}
 
 	req->type = PIPELINE_MSG_REQ_CUSTOM;
@@ -535,6 +542,7 @@ app_pipeline_fc_add_bulk(struct app_params *app,
 	for (i = 0; i < rsp->n_keys; i++) {
 		memcpy(&flow[i]->key, &key[i], sizeof(flow[i]->key));
 		flow[i]->port_id = port_id[i];
+		flow[i]->flow_id = flow_id[i];
 		flow[i]->signature = signature[i];
 		flow[i]->entry_ptr = flow_rsp[i].entry_ptr;
 
@@ -731,13 +739,15 @@ print_fc_qinq_flow(struct app_pipeline_fc_flow *flow)
 {
 	printf("(SVLAN = %" PRIu32 ", "
 		"CVLAN = %" PRIu32 ") => "
-		"Port = %" PRIu32 " "
+		"Port = %" PRIu32 ", "
+		"Flow ID = %" PRIu32 ", "
 		"(signature = 0x%08" PRIx32 ", "
 		"entry_ptr = %p)\n",
 
 		flow->key.key.qinq.svlan,
 		flow->key.key.qinq.cvlan,
 		flow->port_id,
+		flow->flow_id,
 		flow->signature,
 		flow->entry_ptr);
 }
@@ -750,7 +760,8 @@ print_fc_ipv4_5tuple_flow(struct app_pipeline_fc_flow *flow)
 		   "SP = %" PRIu32 ", "
 		   "DP = %" PRIu32 ", "
 		   "Proto = %" PRIu32 ") => "
-		   "Port = %" PRIu32 " "
+		   "Port = %" PRIu32 ", "
+		   "Flow ID = %" PRIu32 " "
 		   "(signature = 0x%08" PRIx32 ", "
 		   "entry_ptr = %p)\n",
 
@@ -770,6 +781,7 @@ print_fc_ipv4_5tuple_flow(struct app_pipeline_fc_flow *flow)
 		   flow->key.key.ipv4_5tuple.proto,
 
 		   flow->port_id,
+		   flow->flow_id,
 		   flow->signature,
 		   flow->entry_ptr);
 }
@@ -787,7 +799,8 @@ print_fc_ipv6_5tuple_flow(struct app_pipeline_fc_flow *flow) {
 		"SP = %" PRIu32 ", "
 		"DP = %" PRIu32 " "
 		"Proto = %" PRIu32 " "
-		"=> Port = %" PRIu32 " "
+		"=> Port = %" PRIu32 ", "
+		"Flow ID = %" PRIu32 " "
 		"(signature = 0x%08" PRIx32 ", "
 		"entry_ptr = %p)\n",
 
@@ -831,6 +844,7 @@ print_fc_ipv6_5tuple_flow(struct app_pipeline_fc_flow *flow) {
 		flow->key.key.ipv6_5tuple.proto,
 
 		flow->port_id,
+		flow->flow_id,
 		flow->signature,
 		flow->entry_ptr);
 }
@@ -895,7 +909,10 @@ struct cmd_fc_add_qinq_result {
 	cmdline_fixed_string_t qinq_string;
 	uint16_t svlan;
 	uint16_t cvlan;
+	cmdline_fixed_string_t port_string;
 	uint32_t port;
+	cmdline_fixed_string_t flowid_string;
+	uint32_t flow_id;
 };
 
 static void
@@ -917,7 +934,8 @@ cmd_fc_add_qinq_parsed(
 	status = app_pipeline_fc_add(app,
 		params->pipeline_id,
 		&key,
-		params->port);
+		params->port,
+		params->flow_id);
 	if (status != 0)
 		printf("Command failed\n");
 }
@@ -947,9 +965,20 @@ cmdline_parse_token_num_t cmd_fc_add_qinq_svlan =
 cmdline_parse_token_num_t cmd_fc_add_qinq_cvlan =
 	TOKEN_NUM_INITIALIZER(struct cmd_fc_add_qinq_result, cvlan, UINT16);
 
+cmdline_parse_token_string_t cmd_fc_add_qinq_port_string =
+	TOKEN_STRING_INITIALIZER(struct cmd_fc_add_qinq_result, port_string,
+		"port");
+
 cmdline_parse_token_num_t cmd_fc_add_qinq_port =
 	TOKEN_NUM_INITIALIZER(struct cmd_fc_add_qinq_result, port, UINT32);
 
+cmdline_parse_token_string_t cmd_fc_add_qinq_flowid_string =
+	TOKEN_STRING_INITIALIZER(struct cmd_fc_add_qinq_result, flowid_string,
+		"flowid");
+
+cmdline_parse_token_num_t cmd_fc_add_qinq_flow_id =
+	TOKEN_NUM_INITIALIZER(struct cmd_fc_add_qinq_result, flow_id, UINT32);
+
 cmdline_parse_inst_t cmd_fc_add_qinq = {
 	.f = cmd_fc_add_qinq_parsed,
 	.data = NULL,
@@ -962,7 +991,10 @@ cmdline_parse_inst_t cmd_fc_add_qinq = {
 		(void *) &cmd_fc_add_qinq_qinq_string,
 		(void *) &cmd_fc_add_qinq_svlan,
 		(void *) &cmd_fc_add_qinq_cvlan,
+		(void *) &cmd_fc_add_qinq_port_string,
 		(void *) &cmd_fc_add_qinq_port,
+		(void *) &cmd_fc_add_qinq_flowid_string,
+		(void *) &cmd_fc_add_qinq_flow_id,
 		NULL,
 	},
 };
@@ -996,8 +1028,21 @@ cmd_fc_add_qinq_all_parsed(
 	struct app_params *app = data;
 	struct pipeline_fc_key *key;
 	uint32_t *port_id;
-	uint32_t flow_id;
+	uint32_t *flow_id;
+	uint32_t id;
 
+	/* Check input arguments */
+	if (params->n_flows == 0) {
+		printf("Invalid number of flows\n");
+		return;
+	}
+
+	if (params->n_ports == 0) {
+		printf("Invalid number of output ports\n");
+		return;
+	}
+
+	/* Memory allocation */
 	key = rte_zmalloc(NULL,
 		N_FLOWS_BULK * sizeof(*key),
 		RTE_CACHE_LINE_SIZE);
@@ -1015,23 +1060,36 @@ cmd_fc_add_qinq_all_parsed(
 		return;
 	}
 
-	for (flow_id = 0; flow_id < params->n_flows; flow_id++) {
-		uint32_t pos = flow_id & (N_FLOWS_BULK - 1);
+	flow_id = rte_malloc(NULL,
+		N_FLOWS_BULK * sizeof(*flow_id),
+		RTE_CACHE_LINE_SIZE);
+	if (flow_id == NULL) {
+		rte_free(port_id);
+		rte_free(key);
+		printf("Memory allocation failed\n");
+		return;
+	}
+
+	/* Flow add */
+	for (id = 0; id < params->n_flows; id++) {
+		uint32_t pos = id & (N_FLOWS_BULK - 1);
 
 		key[pos].type = FLOW_KEY_QINQ;
-		key[pos].key.qinq.svlan = flow_id >> 12;
-		key[pos].key.qinq.cvlan = flow_id & 0xFFF;
+		key[pos].key.qinq.svlan = id >> 12;
+		key[pos].key.qinq.cvlan = id & 0xFFF;
 
-		port_id[pos] = flow_id % params->n_ports;
+		port_id[pos] = id % params->n_ports;
+		flow_id[pos] = id;
 
 		if ((pos == N_FLOWS_BULK - 1) ||
-			(flow_id == params->n_flows - 1)) {
+			(id == params->n_flows - 1)) {
 			int status;
 
 			status = app_pipeline_fc_add_bulk(app,
 				params->pipeline_id,
 				key,
 				port_id,
+				flow_id,
 				pos + 1);
 
 			if (status != 0) {
@@ -1042,6 +1100,8 @@ cmd_fc_add_qinq_all_parsed(
 		}
 	}
 
+	/* Memory free */
+	rte_free(flow_id);
 	rte_free(port_id);
 	rte_free(key);
 }
@@ -1110,7 +1170,10 @@ struct cmd_fc_add_ipv4_5tuple_result {
 	uint16_t port_src;
 	uint16_t port_dst;
 	uint32_t proto;
+	cmdline_fixed_string_t port_string;
 	uint32_t port;
+	cmdline_fixed_string_t flowid_string;
+	uint32_t flow_id;
 };
 
 static void
@@ -1137,7 +1200,8 @@ cmd_fc_add_ipv4_5tuple_parsed(
 	status = app_pipeline_fc_add(app,
 		params->pipeline_id,
 		&key,
-		params->port);
+		params->port,
+		params->flow_id);
 	if (status != 0)
 		printf("Command failed\n");
 }
@@ -1180,10 +1244,22 @@ cmdline_parse_token_num_t cmd_fc_add_ipv4_5tuple_proto =
 	TOKEN_NUM_INITIALIZER(struct cmd_fc_add_ipv4_5tuple_result, proto,
 		UINT32);
 
+cmdline_parse_token_string_t cmd_fc_add_ipv4_5tuple_port_string =
+	TOKEN_STRING_INITIALIZER(struct cmd_fc_add_ipv4_5tuple_result, port_string,
+		"port");
+
 cmdline_parse_token_num_t cmd_fc_add_ipv4_5tuple_port =
 	TOKEN_NUM_INITIALIZER(struct cmd_fc_add_ipv4_5tuple_result, port,
 		UINT32);
 
+cmdline_parse_token_string_t cmd_fc_add_ipv4_5tuple_flowid_string =
+	TOKEN_STRING_INITIALIZER(struct cmd_fc_add_ipv4_5tuple_result,
+		flowid_string, "flowid");
+
+cmdline_parse_token_num_t cmd_fc_add_ipv4_5tuple_flow_id =
+	TOKEN_NUM_INITIALIZER(struct cmd_fc_add_ipv4_5tuple_result, flow_id,
+		UINT32);
+
 cmdline_parse_inst_t cmd_fc_add_ipv4_5tuple = {
 	.f = cmd_fc_add_ipv4_5tuple_parsed,
 	.data = NULL,
@@ -1199,7 +1275,10 @@ cmdline_parse_inst_t cmd_fc_add_ipv4_5tuple = {
 		(void *) &cmd_fc_add_ipv4_5tuple_port_src,
 		(void *) &cmd_fc_add_ipv4_5tuple_port_dst,
 		(void *) &cmd_fc_add_ipv4_5tuple_proto,
+		(void *) &cmd_fc_add_ipv4_5tuple_port_string,
 		(void *) &cmd_fc_add_ipv4_5tuple_port,
+		(void *) &cmd_fc_add_ipv4_5tuple_flowid_string,
+		(void *) &cmd_fc_add_ipv4_5tuple_flow_id,
 		NULL,
 	},
 };
@@ -1229,8 +1308,21 @@ cmd_fc_add_ipv4_5tuple_all_parsed(
 	struct app_params *app = data;
 	struct pipeline_fc_key *key;
 	uint32_t *port_id;
-	uint32_t flow_id;
+	uint32_t *flow_id;
+	uint32_t id;
+
+	/* Check input parameters */
+	if (params->n_flows == 0) {
+		printf("Invalid number of flows\n");
+		return;
+	}
 
+	if (params->n_ports == 0) {
+		printf("Invalid number of ports\n");
+		return;
+	}
+
+	/* Memory allocation */
 	key = rte_zmalloc(NULL,
 		N_FLOWS_BULK * sizeof(*key),
 		RTE_CACHE_LINE_SIZE);
@@ -1248,26 +1340,39 @@ cmd_fc_add_ipv4_5tuple_all_parsed(
 		return;
 	}
 
-	for (flow_id = 0; flow_id < params->n_flows; flow_id++) {
-		uint32_t pos = flow_id & (N_FLOWS_BULK - 1);
+	flow_id = rte_malloc(NULL,
+		N_FLOWS_BULK * sizeof(*flow_id),
+		RTE_CACHE_LINE_SIZE);
+	if (flow_id == NULL) {
+		rte_free(port_id);
+		rte_free(key);
+		printf("Memory allocation failed\n");
+		return;
+	}
+
+	/* Flow add */
+	for (id = 0; id < params->n_flows; id++) {
+		uint32_t pos = id & (N_FLOWS_BULK - 1);
 
 		key[pos].type = FLOW_KEY_IPV4_5TUPLE;
 		key[pos].key.ipv4_5tuple.ip_src = 0;
-		key[pos].key.ipv4_5tuple.ip_dst = flow_id;
+		key[pos].key.ipv4_5tuple.ip_dst = id;
 		key[pos].key.ipv4_5tuple.port_src = 0;
 		key[pos].key.ipv4_5tuple.port_dst = 0;
 		key[pos].key.ipv4_5tuple.proto = 6;
 
-		port_id[pos] = flow_id % params->n_ports;
+		port_id[pos] = id % params->n_ports;
+		flow_id[pos] = id;
 
 		if ((pos == N_FLOWS_BULK - 1) ||
-			(flow_id == params->n_flows - 1)) {
+			(id == params->n_flows - 1)) {
 			int status;
 
 			status = app_pipeline_fc_add_bulk(app,
 				params->pipeline_id,
 				key,
 				port_id,
+				flow_id,
 				pos + 1);
 
 			if (status != 0) {
@@ -1278,6 +1383,8 @@ cmd_fc_add_ipv4_5tuple_all_parsed(
 		}
 	}
 
+	/* Memory free */
+	rte_free(flow_id);
 	rte_free(port_id);
 	rte_free(key);
 }
@@ -1346,7 +1453,10 @@ struct cmd_fc_add_ipv6_5tuple_result {
 	uint16_t port_src;
 	uint16_t port_dst;
 	uint32_t proto;
+	cmdline_fixed_string_t port_string;
 	uint32_t port;
+	cmdline_fixed_string_t flowid_string;
+	uint32_t flow_id;
 };
 
 static void
@@ -1375,7 +1485,8 @@ cmd_fc_add_ipv6_5tuple_parsed(
 	status = app_pipeline_fc_add(app,
 		params->pipeline_id,
 		&key,
-		params->port);
+		params->port,
+		params->flow_id);
 	if (status != 0)
 		printf("Command failed\n");
 }
@@ -1418,10 +1529,22 @@ cmdline_parse_token_num_t cmd_fc_add_ipv6_5tuple_proto =
 	TOKEN_NUM_INITIALIZER(struct cmd_fc_add_ipv6_5tuple_result, proto,
 		UINT32);
 
+cmdline_parse_token_string_t cmd_fc_add_ipv6_5tuple_port_string =
+	TOKEN_STRING_INITIALIZER(struct cmd_fc_add_ipv6_5tuple_result,
+		port_string, "port");
+
 cmdline_parse_token_num_t cmd_fc_add_ipv6_5tuple_port =
 	TOKEN_NUM_INITIALIZER(struct cmd_fc_add_ipv6_5tuple_result, port,
 		UINT32);
 
+cmdline_parse_token_string_t cmd_fc_add_ipv6_5tuple_flowid_string =
+	TOKEN_STRING_INITIALIZER(struct cmd_fc_add_ipv6_5tuple_result,
+		flowid_string, "flowid");
+
+cmdline_parse_token_num_t cmd_fc_add_ipv6_5tuple_flow_id =
+	TOKEN_NUM_INITIALIZER(struct cmd_fc_add_ipv6_5tuple_result, flow_id,
+		UINT32);
+
 cmdline_parse_inst_t cmd_fc_add_ipv6_5tuple = {
 	.f = cmd_fc_add_ipv6_5tuple_parsed,
 	.data = NULL,
@@ -1437,7 +1560,10 @@ cmdline_parse_inst_t cmd_fc_add_ipv6_5tuple = {
 		(void *) &cmd_fc_add_ipv6_5tuple_port_src,
 		(void *) &cmd_fc_add_ipv6_5tuple_port_dst,
 		(void *) &cmd_fc_add_ipv6_5tuple_proto,
+		(void *) &cmd_fc_add_ipv6_5tuple_port_string,
 		(void *) &cmd_fc_add_ipv6_5tuple_port,
+		(void *) &cmd_fc_add_ipv6_5tuple_flowid_string,
+		(void *) &cmd_fc_add_ipv6_5tuple_flow_id,
 		NULL,
 	},
 };
@@ -1467,8 +1593,21 @@ cmd_fc_add_ipv6_5tuple_all_parsed(
 	struct app_params *app = data;
 	struct pipeline_fc_key *key;
 	uint32_t *port_id;
-	uint32_t flow_id;
+	uint32_t *flow_id;
+	uint32_t id;
+
+	/* Check input parameters */
+	if (params->n_flows == 0) {
+		printf("Invalid number of flows\n");
+		return;
+	}
+
+	if (params->n_ports == 0) {
+		printf("Invalid number of ports\n");
+		return;
+	}
 
+	/* Memory allocation */
 	key = rte_zmalloc(NULL,
 		N_FLOWS_BULK * sizeof(*key),
 		RTE_CACHE_LINE_SIZE);
@@ -1486,25 +1625,38 @@ cmd_fc_add_ipv6_5tuple_all_parsed(
 		return;
 	}
 
-	for (flow_id = 0; flow_id < params->n_flows; flow_id++) {
-		uint32_t pos = flow_id & (N_FLOWS_BULK - 1);
+	flow_id = rte_malloc(NULL,
+		N_FLOWS_BULK * sizeof(*flow_id),
+		RTE_CACHE_LINE_SIZE);
+	if (flow_id == NULL) {
+		rte_free(port_id);
+		rte_free(key);
+		printf("Memory allocation failed\n");
+		return;
+	}
+
+	/* Flow add */
+	for (id = 0; id < params->n_flows; id++) {
+		uint32_t pos = id & (N_FLOWS_BULK - 1);
 		uint32_t *x;
 
 		key[pos].type = FLOW_KEY_IPV6_5TUPLE;
 		x = (uint32_t *) key[pos].key.ipv6_5tuple.ip_dst;
-		*x = rte_bswap32(flow_id);
+		*x = rte_bswap32(id);
 		key[pos].key.ipv6_5tuple.proto = 6;
 
-		port_id[pos] = flow_id % params->n_ports;
+		port_id[pos] = id % params->n_ports;
+		flow_id[pos] = id;
 
 		if ((pos == N_FLOWS_BULK - 1) ||
-			(flow_id == params->n_flows - 1)) {
+			(id == params->n_flows - 1)) {
 			int status;
 
 			status = app_pipeline_fc_add_bulk(app,
 				params->pipeline_id,
 				key,
 				port_id,
+				flow_id,
 				pos + 1);
 
 			if (status != 0) {
@@ -1515,6 +1667,8 @@ cmd_fc_add_ipv6_5tuple_all_parsed(
 		}
 	}
 
+	/* Memory free */
+	rte_free(flow_id);
 	rte_free(port_id);
 	rte_free(key);
 }
diff --git a/examples/ip_pipeline/pipeline/pipeline_flow_classification.h b/examples/ip_pipeline/pipeline/pipeline_flow_classification.h
index 7529314..9c77500 100644
--- a/examples/ip_pipeline/pipeline/pipeline_flow_classification.h
+++ b/examples/ip_pipeline/pipeline/pipeline_flow_classification.h
@@ -77,13 +77,15 @@ int
 app_pipeline_fc_add(struct app_params *app,
 	uint32_t pipeline_id,
 	struct pipeline_fc_key *key,
-	uint32_t port_id);
+	uint32_t port_id,
+	uint32_t flow_id);
 
 int
 app_pipeline_fc_add_bulk(struct app_params *app,
 	uint32_t pipeline_id,
 	struct pipeline_fc_key *key,
 	uint32_t *port_id,
+	uint32_t *flow_id,
 	uint32_t n_keys);
 
 int
diff --git a/examples/ip_pipeline/pipeline/pipeline_flow_classification_be.c b/examples/ip_pipeline/pipeline/pipeline_flow_classification_be.c
index 06a648d..0b87a10 100644
--- a/examples/ip_pipeline/pipeline/pipeline_flow_classification_be.c
+++ b/examples/ip_pipeline/pipeline/pipeline_flow_classification_be.c
@@ -39,6 +39,7 @@
 #include <rte_byteorder.h>
 
 #include "pipeline_flow_classification_be.h"
+#include "pipeline_actions_common.h"
 #include "hash_func.h"
 
 struct pipeline_flow_classification {
@@ -46,9 +47,13 @@ struct pipeline_flow_classification {
 	pipeline_msg_req_handler custom_handlers[PIPELINE_FC_MSG_REQS];
 
 	uint32_t n_flows;
-	uint32_t key_offset;
 	uint32_t key_size;
+	uint32_t flow_id;
+
+	uint32_t key_offset;
 	uint32_t hash_offset;
+	uint32_t flow_id_offset;
+
 } __rte_cache_aligned;
 
 static void *
@@ -104,6 +109,9 @@ static pipeline_msg_req_handler custom_handlers[] = {
  */
 struct flow_table_entry {
 	struct rte_pipeline_table_entry head;
+
+	uint32_t flow_id;
+	uint32_t pad;
 };
 
 rte_table_hash_op_hash hash_func[] = {
@@ -117,6 +125,86 @@ rte_table_hash_op_hash hash_func[] = {
 	hash_default_key64
 };
 
+/*
+ * Flow table AH - Write flow_id to packet meta-data
+ */
+static inline void
+pkt_work_flow_id(
+	struct rte_mbuf *pkt,
+	struct rte_pipeline_table_entry *table_entry,
+	void *arg)
+{
+	struct pipeline_flow_classification *p_fc = arg;
+	uint32_t *flow_id_ptr =
+		RTE_MBUF_METADATA_UINT32_PTR(pkt, p_fc->flow_id_offset);
+	struct flow_table_entry *entry =
+		(struct flow_table_entry *) table_entry;
+
+	/* Read */
+	uint32_t flow_id = entry->flow_id;
+
+	/* Compute */
+
+	/* Write */
+	*flow_id_ptr = flow_id;
+}
+
+static inline void
+pkt4_work_flow_id(
+	struct rte_mbuf **pkts,
+	struct rte_pipeline_table_entry **table_entries,
+	void *arg)
+{
+	struct pipeline_flow_classification *p_fc = arg;
+
+	uint32_t *flow_id_ptr0 =
+		RTE_MBUF_METADATA_UINT32_PTR(pkts[0], p_fc->flow_id_offset);
+	uint32_t *flow_id_ptr1 =
+		RTE_MBUF_METADATA_UINT32_PTR(pkts[1], p_fc->flow_id_offset);
+	uint32_t *flow_id_ptr2 =
+		RTE_MBUF_METADATA_UINT32_PTR(pkts[2], p_fc->flow_id_offset);
+	uint32_t *flow_id_ptr3 =
+		RTE_MBUF_METADATA_UINT32_PTR(pkts[3], p_fc->flow_id_offset);
+
+	struct flow_table_entry *entry0 =
+		(struct flow_table_entry *) table_entries[0];
+	struct flow_table_entry *entry1 =
+		(struct flow_table_entry *) table_entries[1];
+	struct flow_table_entry *entry2 =
+		(struct flow_table_entry *) table_entries[2];
+	struct flow_table_entry *entry3 =
+		(struct flow_table_entry *) table_entries[3];
+
+	/* Read */
+	uint32_t flow_id0 = entry0->flow_id;
+	uint32_t flow_id1 = entry1->flow_id;
+	uint32_t flow_id2 = entry2->flow_id;
+	uint32_t flow_id3 = entry3->flow_id;
+
+	/* Compute */
+
+	/* Write */
+	*flow_id_ptr0 = flow_id0;
+	*flow_id_ptr1 = flow_id1;
+	*flow_id_ptr2 = flow_id2;
+	*flow_id_ptr3 = flow_id3;
+}
+
+PIPELINE_TABLE_AH_HIT(fc_table_ah_hit,
+		pkt_work_flow_id, pkt4_work_flow_id);
+
+static rte_pipeline_table_action_handler_hit
+get_fc_table_ah_hit(struct pipeline_flow_classification *p)
+{
+	if (p->flow_id)
+		return fc_table_ah_hit;
+
+	return NULL;
+}
+
+/*
+ * Argument parsing
+ */
 static int
 pipeline_fc_parse_args(struct pipeline_flow_classification *p,
 	struct pipeline_params *params)
@@ -125,9 +213,12 @@ pipeline_fc_parse_args(struct pipeline_flow_classification *p,
 	uint32_t key_offset_present = 0;
 	uint32_t key_size_present = 0;
 	uint32_t hash_offset_present = 0;
-
+	uint32_t flow_id_offset_present = 0;
 	uint32_t i;
 
+	/* default values */
+	p->flow_id = 0;
+
 	for (i = 0; i < params->n_args; i++) {
 		char *arg_name = params->args_name[i];
 		char *arg_value = params->args_value[i];
@@ -182,6 +273,18 @@ pipeline_fc_parse_args(struct pipeline_flow_classification *p,
 			continue;
 		}
 
+		/* flow_id_offset */
+		if (strcmp(arg_name, "flowid_offset") == 0) {
+			if (flow_id_offset_present)
+				return -1;
+			flow_id_offset_present = 1;
+
+			p->flow_id = 1;
+			p->flow_id_offset = atoi(arg_value);
+
+			continue;
+		}
+
 		/* Unknown argument */
 		return -1;
 	}
@@ -325,9 +428,9 @@ static void *pipeline_fc_init(struct pipeline_params *params,
 		struct rte_pipeline_table_params table_params = {
 			.ops = NULL, /* set below */
 			.arg_create = NULL, /* set below */
-			.f_action_hit = NULL,
+			.f_action_hit = get_fc_table_ah_hit(p_fc),
 			.f_action_miss = NULL,
-			.arg_ah = NULL,
+			.arg_ah = p_fc,
 			.action_data_size = sizeof(struct flow_table_entry) -
 				sizeof(struct rte_pipeline_table_entry),
 		};
@@ -485,6 +588,7 @@ pipeline_fc_msg_req_add_handler(struct pipeline *p, void *msg)
 			.action = RTE_PIPELINE_ACTION_PORT,
 			{.port_id = p->port_out_id[req->port_id]},
 		},
+		.flow_id = req->flow_id,
 	};
 
 	rsp->status = rte_pipeline_table_entry_add(p->p,
@@ -513,6 +617,7 @@ pipeline_fc_msg_req_add_bulk_handler(struct pipeline *p, void *msg)
 				.action = RTE_PIPELINE_ACTION_PORT,
 				{.port_id = p->port_out_id[flow_req->port_id]},
 			},
+			.flow_id = flow_req->flow_id,
 		};
 
 		int status = rte_pipeline_table_entry_add(p->p,
@@ -558,6 +663,8 @@ pipeline_fc_msg_req_add_default_handler(struct pipeline *p, void *msg)
 			.action = RTE_PIPELINE_ACTION_PORT,
 			{.port_id = p->port_out_id[req->port_id]},
 		},
+
+		.flow_id = 0,
 	};
 
 	rsp->status = rte_pipeline_table_default_entry_add(p->p,
diff --git a/examples/ip_pipeline/pipeline/pipeline_flow_classification_be.h b/examples/ip_pipeline/pipeline/pipeline_flow_classification_be.h
index 46403d5..d8129b2 100644
--- a/examples/ip_pipeline/pipeline/pipeline_flow_classification_be.h
+++ b/examples/ip_pipeline/pipeline/pipeline_flow_classification_be.h
@@ -59,6 +59,7 @@ struct pipeline_fc_add_msg_req {
 	uint8_t key[PIPELINE_FC_FLOW_KEY_MAX_SIZE];
 
 	uint32_t port_id;
+	uint32_t flow_id;
 };
 
 struct pipeline_fc_add_msg_rsp {
@@ -73,6 +74,7 @@ struct pipeline_fc_add_msg_rsp {
 struct pipeline_fc_add_bulk_flow_req {
 	uint8_t key[PIPELINE_FC_FLOW_KEY_MAX_SIZE];
 	uint32_t port_id;
+	uint32_t flow_id;
 };
 
 struct pipeline_fc_add_bulk_flow_rsp {
-- 
2.1.0

^ permalink raw reply related	[flat|nested] 7+ messages in thread

* Re: [PATCH v2] ip_pipeline: add flow id parameter to flow classification
  2015-10-05 11:13 [PATCH v2] ip_pipeline: add flow id parameter to flow classification Jasvinder Singh
@ 2015-10-05 13:10 ` Dumitrescu, Cristian
  2015-10-12 15:41 ` [PATCH v3] " Jasvinder Singh
  2015-11-30 14:08 ` [PATCH v4] " Jasvinder Singh
  2 siblings, 0 replies; 7+ messages in thread
From: Dumitrescu, Cristian @ 2015-10-05 13:10 UTC (permalink / raw)
  To: Singh, Jasvinder, dev



> -----Original Message-----
> From: Singh, Jasvinder
> Sent: Monday, October 5, 2015 12:14 PM
> To: dev@dpdk.org
> Cc: Dumitrescu, Cristian
> Subject: [PATCH v2] ip_pipeline: add flow id parameter to flow classification
> 
> This patch adds flow id field to the flow
> classification table entries and adds table action
> handlers to read flow id from table entry and
> write it into the packet meta-data. The flow_id
> (32-bit) parameter is also added to CLI commands
> flow add, flow delete, etc.
> 
> *v2
> fixed bug: flow table entry size power of 2
> 
> Signed-off-by: Jasvinder Singh <jasvinder.singh@intel.com>
> ---

Acked-by: Cristian Dumitrescu <cristian.dumitrescu@intel.com>

^ permalink raw reply	[flat|nested] 7+ messages in thread

* [PATCH v3] ip_pipeline: add flow id parameter to flow classification
  2015-10-05 11:13 [PATCH v2] ip_pipeline: add flow id parameter to flow classification Jasvinder Singh
  2015-10-05 13:10 ` Dumitrescu, Cristian
@ 2015-10-12 15:41 ` Jasvinder Singh
  2015-10-12 15:46   ` Dumitrescu, Cristian
  2015-11-30 14:08 ` [PATCH v4] " Jasvinder Singh
  2 siblings, 1 reply; 7+ messages in thread
From: Jasvinder Singh @ 2015-10-12 15:41 UTC (permalink / raw)
  To: dev

This patch adds flow id field to the flow
classification table entries and adds table action
handlers to read flow id from table entry and
write it into the packet meta-data. The flow_id
(32-bit) parameter is also added to CLI commands
flow add, flow delete, etc.

*v2
fixed bug: flow table entry size power of 2

*v3
fixed bug: changed LRU hash table operation to
extendible bucket hash table operation

Signed-off-by: Jasvinder Singh <jasvinder.singh@intel.com>
---
 .../pipeline/pipeline_flow_classification.c        | 206 ++++++++++++++++++---
 .../pipeline/pipeline_flow_classification.h        |   4 +-
 .../pipeline/pipeline_flow_classification_be.c     | 117 +++++++++++-
 .../pipeline/pipeline_flow_classification_be.h     |   2 +
 4 files changed, 297 insertions(+), 32 deletions(-)

diff --git a/examples/ip_pipeline/pipeline/pipeline_flow_classification.c b/examples/ip_pipeline/pipeline/pipeline_flow_classification.c
index 4b82180..04b6915 100644
--- a/examples/ip_pipeline/pipeline/pipeline_flow_classification.c
+++ b/examples/ip_pipeline/pipeline/pipeline_flow_classification.c
@@ -152,6 +152,7 @@ app_pipeline_fc_key_convert(struct pipeline_fc_key *key_in,
 struct app_pipeline_fc_flow {
 	struct pipeline_fc_key key;
 	uint32_t port_id;
+	uint32_t flow_id;
 	uint32_t signature;
 	void *entry_ptr;
 
@@ -280,7 +281,8 @@ int
 app_pipeline_fc_add(struct app_params *app,
 	uint32_t pipeline_id,
 	struct pipeline_fc_key *key,
-	uint32_t port_id)
+	uint32_t port_id,
+	uint32_t flow_id)
 {
 	struct app_pipeline_fc *p;
 	struct app_pipeline_fc_flow *flow;
@@ -325,6 +327,7 @@ app_pipeline_fc_add(struct app_params *app,
 	req->subtype = PIPELINE_FC_MSG_REQ_FLOW_ADD;
 	app_pipeline_fc_key_convert(key, req->key, &signature);
 	req->port_id = port_id;
+	req->flow_id = flow_id;
 
 	/* Send request and wait for response */
 	rsp = app_msg_send_recv(app, pipeline_id, req, MSG_TIMEOUT_DEFAULT);
@@ -348,6 +351,7 @@ app_pipeline_fc_add(struct app_params *app,
 	memset(&flow->key, 0, sizeof(flow->key));
 	memcpy(&flow->key, key, sizeof(flow->key));
 	flow->port_id = port_id;
+	flow->flow_id = flow_id;
 	flow->signature = signature;
 	flow->entry_ptr = rsp->entry_ptr;
 
@@ -370,6 +374,7 @@ app_pipeline_fc_add_bulk(struct app_params *app,
 	uint32_t pipeline_id,
 	struct pipeline_fc_key *key,
 	uint32_t *port_id,
+	uint32_t *flow_id,
 	uint32_t n_keys)
 {
 	struct app_pipeline_fc *p;
@@ -389,6 +394,7 @@ app_pipeline_fc_add_bulk(struct app_params *app,
 	if ((app == NULL) ||
 		(key == NULL) ||
 		(port_id == NULL) ||
+		(flow_id == NULL) ||
 		(n_keys == 0))
 		return -1;
 
@@ -496,6 +502,7 @@ app_pipeline_fc_add_bulk(struct app_params *app,
 			flow_req[i].key,
 			&signature[i]);
 		flow_req[i].port_id = port_id[i];
+		flow_req[i].flow_id = flow_id[i];
 	}
 
 	req->type = PIPELINE_MSG_REQ_CUSTOM;
@@ -535,6 +542,7 @@ app_pipeline_fc_add_bulk(struct app_params *app,
 	for (i = 0; i < rsp->n_keys; i++) {
 		memcpy(&flow[i]->key, &key[i], sizeof(flow[i]->key));
 		flow[i]->port_id = port_id[i];
+		flow[i]->flow_id = flow_id[i];
 		flow[i]->signature = signature[i];
 		flow[i]->entry_ptr = flow_rsp[i].entry_ptr;
 
@@ -731,13 +739,15 @@ print_fc_qinq_flow(struct app_pipeline_fc_flow *flow)
 {
 	printf("(SVLAN = %" PRIu32 ", "
 		"CVLAN = %" PRIu32 ") => "
-		"Port = %" PRIu32 " "
+		"Port = %" PRIu32 ", "
+		"Flow ID = %" PRIu32 ", "
 		"(signature = 0x%08" PRIx32 ", "
 		"entry_ptr = %p)\n",
 
 		flow->key.key.qinq.svlan,
 		flow->key.key.qinq.cvlan,
 		flow->port_id,
+		flow->flow_id,
 		flow->signature,
 		flow->entry_ptr);
 }
@@ -750,7 +760,8 @@ print_fc_ipv4_5tuple_flow(struct app_pipeline_fc_flow *flow)
 		   "SP = %" PRIu32 ", "
 		   "DP = %" PRIu32 ", "
 		   "Proto = %" PRIu32 ") => "
-		   "Port = %" PRIu32 " "
+		   "Port = %" PRIu32 ", "
+		   "Flow ID = %" PRIu32 " "
 		   "(signature = 0x%08" PRIx32 ", "
 		   "entry_ptr = %p)\n",
 
@@ -770,6 +781,7 @@ print_fc_ipv4_5tuple_flow(struct app_pipeline_fc_flow *flow)
 		   flow->key.key.ipv4_5tuple.proto,
 
 		   flow->port_id,
+		   flow->flow_id,
 		   flow->signature,
 		   flow->entry_ptr);
 }
@@ -787,7 +799,8 @@ print_fc_ipv6_5tuple_flow(struct app_pipeline_fc_flow *flow) {
 		"SP = %" PRIu32 ", "
 		"DP = %" PRIu32 " "
 		"Proto = %" PRIu32 " "
-		"=> Port = %" PRIu32 " "
+		"=> Port = %" PRIu32 ", "
+		"Flow ID = %" PRIu32 " "
 		"(signature = 0x%08" PRIx32 ", "
 		"entry_ptr = %p)\n",
 
@@ -831,6 +844,7 @@ print_fc_ipv6_5tuple_flow(struct app_pipeline_fc_flow *flow) {
 		flow->key.key.ipv6_5tuple.proto,
 
 		flow->port_id,
+		flow->flow_id,
 		flow->signature,
 		flow->entry_ptr);
 }
@@ -895,7 +909,10 @@ struct cmd_fc_add_qinq_result {
 	cmdline_fixed_string_t qinq_string;
 	uint16_t svlan;
 	uint16_t cvlan;
+	cmdline_fixed_string_t port_string;
 	uint32_t port;
+	cmdline_fixed_string_t flowid_string;
+	uint32_t flow_id;
 };
 
 static void
@@ -917,7 +934,8 @@ cmd_fc_add_qinq_parsed(
 	status = app_pipeline_fc_add(app,
 		params->pipeline_id,
 		&key,
-		params->port);
+		params->port,
+		params->flow_id);
 	if (status != 0)
 		printf("Command failed\n");
 }
@@ -947,9 +965,20 @@ cmdline_parse_token_num_t cmd_fc_add_qinq_svlan =
 cmdline_parse_token_num_t cmd_fc_add_qinq_cvlan =
 	TOKEN_NUM_INITIALIZER(struct cmd_fc_add_qinq_result, cvlan, UINT16);
 
+cmdline_parse_token_string_t cmd_fc_add_qinq_port_string =
+	TOKEN_STRING_INITIALIZER(struct cmd_fc_add_qinq_result, port_string,
+		"port");
+
 cmdline_parse_token_num_t cmd_fc_add_qinq_port =
 	TOKEN_NUM_INITIALIZER(struct cmd_fc_add_qinq_result, port, UINT32);
 
+cmdline_parse_token_string_t cmd_fc_add_qinq_flowid_string =
+	TOKEN_STRING_INITIALIZER(struct cmd_fc_add_qinq_result, flowid_string,
+		"flowid");
+
+cmdline_parse_token_num_t cmd_fc_add_qinq_flow_id =
+	TOKEN_NUM_INITIALIZER(struct cmd_fc_add_qinq_result, flow_id, UINT32);
+
 cmdline_parse_inst_t cmd_fc_add_qinq = {
 	.f = cmd_fc_add_qinq_parsed,
 	.data = NULL,
@@ -962,7 +991,10 @@ cmdline_parse_inst_t cmd_fc_add_qinq = {
 		(void *) &cmd_fc_add_qinq_qinq_string,
 		(void *) &cmd_fc_add_qinq_svlan,
 		(void *) &cmd_fc_add_qinq_cvlan,
+		(void *) &cmd_fc_add_qinq_port_string,
 		(void *) &cmd_fc_add_qinq_port,
+		(void *) &cmd_fc_add_qinq_flowid_string,
+		(void *) &cmd_fc_add_qinq_flow_id,
 		NULL,
 	},
 };
@@ -996,8 +1028,21 @@ cmd_fc_add_qinq_all_parsed(
 	struct app_params *app = data;
 	struct pipeline_fc_key *key;
 	uint32_t *port_id;
-	uint32_t flow_id;
+	uint32_t *flow_id;
+	uint32_t id;
 
+	/* Check input arguments */
+	if (params->n_flows == 0) {
+		printf("Invalid number of flows\n");
+		return;
+	}
+
+	if (params->n_ports == 0) {
+		printf("Invalid number of output ports\n");
+		return;
+	}
+
+	/* Memory allocation */
 	key = rte_zmalloc(NULL,
 		N_FLOWS_BULK * sizeof(*key),
 		RTE_CACHE_LINE_SIZE);
@@ -1015,23 +1060,36 @@ cmd_fc_add_qinq_all_parsed(
 		return;
 	}
 
-	for (flow_id = 0; flow_id < params->n_flows; flow_id++) {
-		uint32_t pos = flow_id & (N_FLOWS_BULK - 1);
+	flow_id = rte_malloc(NULL,
+		N_FLOWS_BULK * sizeof(*flow_id),
+		RTE_CACHE_LINE_SIZE);
+	if (flow_id == NULL) {
+		rte_free(port_id);
+		rte_free(key);
+		printf("Memory allocation failed\n");
+		return;
+	}
+
+	/* Flow add */
+	for (id = 0; id < params->n_flows; id++) {
+		uint32_t pos = id & (N_FLOWS_BULK - 1);
 
 		key[pos].type = FLOW_KEY_QINQ;
-		key[pos].key.qinq.svlan = flow_id >> 12;
-		key[pos].key.qinq.cvlan = flow_id & 0xFFF;
+		key[pos].key.qinq.svlan = id >> 12;
+		key[pos].key.qinq.cvlan = id & 0xFFF;
 
-		port_id[pos] = flow_id % params->n_ports;
+		port_id[pos] = id % params->n_ports;
+		flow_id[pos] = id;
 
 		if ((pos == N_FLOWS_BULK - 1) ||
-			(flow_id == params->n_flows - 1)) {
+			(id == params->n_flows - 1)) {
 			int status;
 
 			status = app_pipeline_fc_add_bulk(app,
 				params->pipeline_id,
 				key,
 				port_id,
+				flow_id,
 				pos + 1);
 
 			if (status != 0) {
@@ -1042,6 +1100,8 @@ cmd_fc_add_qinq_all_parsed(
 		}
 	}
 
+	/* Memory free */
+	rte_free(flow_id);
 	rte_free(port_id);
 	rte_free(key);
 }
@@ -1110,7 +1170,10 @@ struct cmd_fc_add_ipv4_5tuple_result {
 	uint16_t port_src;
 	uint16_t port_dst;
 	uint32_t proto;
+	cmdline_fixed_string_t port_string;
 	uint32_t port;
+	cmdline_fixed_string_t flowid_string;
+	uint32_t flow_id;
 };
 
 static void
@@ -1137,7 +1200,8 @@ cmd_fc_add_ipv4_5tuple_parsed(
 	status = app_pipeline_fc_add(app,
 		params->pipeline_id,
 		&key,
-		params->port);
+		params->port,
+		params->flow_id);
 	if (status != 0)
 		printf("Command failed\n");
 }
@@ -1180,10 +1244,22 @@ cmdline_parse_token_num_t cmd_fc_add_ipv4_5tuple_proto =
 	TOKEN_NUM_INITIALIZER(struct cmd_fc_add_ipv4_5tuple_result, proto,
 		UINT32);
 
+cmdline_parse_token_string_t cmd_fc_add_ipv4_5tuple_port_string =
+	TOKEN_STRING_INITIALIZER(struct cmd_fc_add_ipv4_5tuple_result, port_string,
+		"port");
+
 cmdline_parse_token_num_t cmd_fc_add_ipv4_5tuple_port =
 	TOKEN_NUM_INITIALIZER(struct cmd_fc_add_ipv4_5tuple_result, port,
 		UINT32);
 
+cmdline_parse_token_string_t cmd_fc_add_ipv4_5tuple_flowid_string =
+	TOKEN_STRING_INITIALIZER(struct cmd_fc_add_ipv4_5tuple_result,
+		flowid_string, "flowid");
+
+cmdline_parse_token_num_t cmd_fc_add_ipv4_5tuple_flow_id =
+	TOKEN_NUM_INITIALIZER(struct cmd_fc_add_ipv4_5tuple_result, flow_id,
+		UINT32);
+
 cmdline_parse_inst_t cmd_fc_add_ipv4_5tuple = {
 	.f = cmd_fc_add_ipv4_5tuple_parsed,
 	.data = NULL,
@@ -1199,7 +1275,10 @@ cmdline_parse_inst_t cmd_fc_add_ipv4_5tuple = {
 		(void *) &cmd_fc_add_ipv4_5tuple_port_src,
 		(void *) &cmd_fc_add_ipv4_5tuple_port_dst,
 		(void *) &cmd_fc_add_ipv4_5tuple_proto,
+		(void *) &cmd_fc_add_ipv4_5tuple_port_string,
 		(void *) &cmd_fc_add_ipv4_5tuple_port,
+		(void *) &cmd_fc_add_ipv4_5tuple_flowid_string,
+		(void *) &cmd_fc_add_ipv4_5tuple_flow_id,
 		NULL,
 	},
 };
@@ -1229,8 +1308,21 @@ cmd_fc_add_ipv4_5tuple_all_parsed(
 	struct app_params *app = data;
 	struct pipeline_fc_key *key;
 	uint32_t *port_id;
-	uint32_t flow_id;
+	uint32_t *flow_id;
+	uint32_t id;
+
+	/* Check input parameters */
+	if (params->n_flows == 0) {
+		printf("Invalid number of flows\n");
+		return;
+	}
 
+	if (params->n_ports == 0) {
+		printf("Invalid number of ports\n");
+		return;
+	}
+
+	/* Memory allocation */
 	key = rte_zmalloc(NULL,
 		N_FLOWS_BULK * sizeof(*key),
 		RTE_CACHE_LINE_SIZE);
@@ -1248,26 +1340,39 @@ cmd_fc_add_ipv4_5tuple_all_parsed(
 		return;
 	}
 
-	for (flow_id = 0; flow_id < params->n_flows; flow_id++) {
-		uint32_t pos = flow_id & (N_FLOWS_BULK - 1);
+	flow_id = rte_malloc(NULL,
+		N_FLOWS_BULK * sizeof(*flow_id),
+		RTE_CACHE_LINE_SIZE);
+	if (flow_id == NULL) {
+		rte_free(port_id);
+		rte_free(key);
+		printf("Memory allocation failed\n");
+		return;
+	}
+
+	/* Flow add */
+	for (id = 0; id < params->n_flows; id++) {
+		uint32_t pos = id & (N_FLOWS_BULK - 1);
 
 		key[pos].type = FLOW_KEY_IPV4_5TUPLE;
 		key[pos].key.ipv4_5tuple.ip_src = 0;
-		key[pos].key.ipv4_5tuple.ip_dst = flow_id;
+		key[pos].key.ipv4_5tuple.ip_dst = id;
 		key[pos].key.ipv4_5tuple.port_src = 0;
 		key[pos].key.ipv4_5tuple.port_dst = 0;
 		key[pos].key.ipv4_5tuple.proto = 6;
 
-		port_id[pos] = flow_id % params->n_ports;
+		port_id[pos] = id % params->n_ports;
+		flow_id[pos] = id;
 
 		if ((pos == N_FLOWS_BULK - 1) ||
-			(flow_id == params->n_flows - 1)) {
+			(id == params->n_flows - 1)) {
 			int status;
 
 			status = app_pipeline_fc_add_bulk(app,
 				params->pipeline_id,
 				key,
 				port_id,
+				flow_id,
 				pos + 1);
 
 			if (status != 0) {
@@ -1278,6 +1383,8 @@ cmd_fc_add_ipv4_5tuple_all_parsed(
 		}
 	}
 
+	/* Memory free */
+	rte_free(flow_id);
 	rte_free(port_id);
 	rte_free(key);
 }
@@ -1346,7 +1453,10 @@ struct cmd_fc_add_ipv6_5tuple_result {
 	uint16_t port_src;
 	uint16_t port_dst;
 	uint32_t proto;
+	cmdline_fixed_string_t port_string;
 	uint32_t port;
+	cmdline_fixed_string_t flowid_string;
+	uint32_t flow_id;
 };
 
 static void
@@ -1375,7 +1485,8 @@ cmd_fc_add_ipv6_5tuple_parsed(
 	status = app_pipeline_fc_add(app,
 		params->pipeline_id,
 		&key,
-		params->port);
+		params->port,
+		params->flow_id);
 	if (status != 0)
 		printf("Command failed\n");
 }
@@ -1418,10 +1529,22 @@ cmdline_parse_token_num_t cmd_fc_add_ipv6_5tuple_proto =
 	TOKEN_NUM_INITIALIZER(struct cmd_fc_add_ipv6_5tuple_result, proto,
 		UINT32);
 
+cmdline_parse_token_string_t cmd_fc_add_ipv6_5tuple_port_string =
+	TOKEN_STRING_INITIALIZER(struct cmd_fc_add_ipv6_5tuple_result,
+		port_string, "port");
+
 cmdline_parse_token_num_t cmd_fc_add_ipv6_5tuple_port =
 	TOKEN_NUM_INITIALIZER(struct cmd_fc_add_ipv6_5tuple_result, port,
 		UINT32);
 
+cmdline_parse_token_string_t cmd_fc_add_ipv6_5tuple_flowid_string =
+	TOKEN_STRING_INITIALIZER(struct cmd_fc_add_ipv6_5tuple_result,
+		flowid_string, "flowid");
+
+cmdline_parse_token_num_t cmd_fc_add_ipv6_5tuple_flow_id =
+	TOKEN_NUM_INITIALIZER(struct cmd_fc_add_ipv6_5tuple_result, flow_id,
+		UINT32);
+
 cmdline_parse_inst_t cmd_fc_add_ipv6_5tuple = {
 	.f = cmd_fc_add_ipv6_5tuple_parsed,
 	.data = NULL,
@@ -1437,7 +1560,10 @@ cmdline_parse_inst_t cmd_fc_add_ipv6_5tuple = {
 		(void *) &cmd_fc_add_ipv6_5tuple_port_src,
 		(void *) &cmd_fc_add_ipv6_5tuple_port_dst,
 		(void *) &cmd_fc_add_ipv6_5tuple_proto,
+		(void *) &cmd_fc_add_ipv6_5tuple_port_string,
 		(void *) &cmd_fc_add_ipv6_5tuple_port,
+		(void *) &cmd_fc_add_ipv6_5tuple_flowid_string,
+		(void *) &cmd_fc_add_ipv6_5tuple_flow_id,
 		NULL,
 	},
 };
@@ -1467,8 +1593,21 @@ cmd_fc_add_ipv6_5tuple_all_parsed(
 	struct app_params *app = data;
 	struct pipeline_fc_key *key;
 	uint32_t *port_id;
-	uint32_t flow_id;
+	uint32_t *flow_id;
+	uint32_t id;
+
+	/* Check input parameters */
+	if (params->n_flows == 0) {
+		printf("Invalid number of flows\n");
+		return;
+	}
+
+	if (params->n_ports == 0) {
+		printf("Invalid number of ports\n");
+		return;
+	}
 
+	/* Memory allocation */
 	key = rte_zmalloc(NULL,
 		N_FLOWS_BULK * sizeof(*key),
 		RTE_CACHE_LINE_SIZE);
@@ -1486,25 +1625,38 @@ cmd_fc_add_ipv6_5tuple_all_parsed(
 		return;
 	}
 
-	for (flow_id = 0; flow_id < params->n_flows; flow_id++) {
-		uint32_t pos = flow_id & (N_FLOWS_BULK - 1);
+	flow_id = rte_malloc(NULL,
+		N_FLOWS_BULK * sizeof(*flow_id),
+		RTE_CACHE_LINE_SIZE);
+	if (flow_id == NULL) {
+		rte_free(port_id);
+		rte_free(key);
+		printf("Memory allocation failed\n");
+		return;
+	}
+
+	/* Flow add */
+	for (id = 0; id < params->n_flows; id++) {
+		uint32_t pos = id & (N_FLOWS_BULK - 1);
 		uint32_t *x;
 
 		key[pos].type = FLOW_KEY_IPV6_5TUPLE;
 		x = (uint32_t *) key[pos].key.ipv6_5tuple.ip_dst;
-		*x = rte_bswap32(flow_id);
+		*x = rte_bswap32(id);
 		key[pos].key.ipv6_5tuple.proto = 6;
 
-		port_id[pos] = flow_id % params->n_ports;
+		port_id[pos] = id % params->n_ports;
+		flow_id[pos] = id;
 
 		if ((pos == N_FLOWS_BULK - 1) ||
-			(flow_id == params->n_flows - 1)) {
+			(id == params->n_flows - 1)) {
 			int status;
 
 			status = app_pipeline_fc_add_bulk(app,
 				params->pipeline_id,
 				key,
 				port_id,
+				flow_id,
 				pos + 1);
 
 			if (status != 0) {
@@ -1515,6 +1667,8 @@ cmd_fc_add_ipv6_5tuple_all_parsed(
 		}
 	}
 
+	/* Memory free */
+	rte_free(flow_id);
 	rte_free(port_id);
 	rte_free(key);
 }
diff --git a/examples/ip_pipeline/pipeline/pipeline_flow_classification.h b/examples/ip_pipeline/pipeline/pipeline_flow_classification.h
index 7529314..9c77500 100644
--- a/examples/ip_pipeline/pipeline/pipeline_flow_classification.h
+++ b/examples/ip_pipeline/pipeline/pipeline_flow_classification.h
@@ -77,13 +77,15 @@ int
 app_pipeline_fc_add(struct app_params *app,
 	uint32_t pipeline_id,
 	struct pipeline_fc_key *key,
-	uint32_t port_id);
+	uint32_t port_id,
+	uint32_t flow_id);
 
 int
 app_pipeline_fc_add_bulk(struct app_params *app,
 	uint32_t pipeline_id,
 	struct pipeline_fc_key *key,
 	uint32_t *port_id,
+	uint32_t *flow_id,
 	uint32_t n_keys);
 
 int
diff --git a/examples/ip_pipeline/pipeline/pipeline_flow_classification_be.c b/examples/ip_pipeline/pipeline/pipeline_flow_classification_be.c
index 06a648d..a6cd6f3 100644
--- a/examples/ip_pipeline/pipeline/pipeline_flow_classification_be.c
+++ b/examples/ip_pipeline/pipeline/pipeline_flow_classification_be.c
@@ -39,6 +39,7 @@
 #include <rte_byteorder.h>
 
 #include "pipeline_flow_classification_be.h"
+#include "pipeline_actions_common.h"
 #include "hash_func.h"
 
 struct pipeline_flow_classification {
@@ -46,9 +47,13 @@ struct pipeline_flow_classification {
 	pipeline_msg_req_handler custom_handlers[PIPELINE_FC_MSG_REQS];
 
 	uint32_t n_flows;
-	uint32_t key_offset;
 	uint32_t key_size;
+	uint32_t flow_id;
+
+	uint32_t key_offset;
 	uint32_t hash_offset;
+	uint32_t flow_id_offset;
+
 } __rte_cache_aligned;
 
 static void *
@@ -104,6 +109,9 @@ static pipeline_msg_req_handler custom_handlers[] = {
  */
 struct flow_table_entry {
 	struct rte_pipeline_table_entry head;
+
+	uint32_t flow_id;
+	uint32_t pad;
 };
 
 rte_table_hash_op_hash hash_func[] = {
@@ -117,6 +125,86 @@ rte_table_hash_op_hash hash_func[] = {
 	hash_default_key64
 };
 
+/*
+ * Flow table AH - Write flow_id to packet meta-data
+ */
+static inline void
+pkt_work_flow_id(
+	struct rte_mbuf *pkt,
+	struct rte_pipeline_table_entry *table_entry,
+	void *arg)
+{
+	struct pipeline_flow_classification *p_fc = arg;
+	uint32_t *flow_id_ptr =
+		RTE_MBUF_METADATA_UINT32_PTR(pkt, p_fc->flow_id_offset);
+	struct flow_table_entry *entry =
+		(struct flow_table_entry *) table_entry;
+
+	/* Read */
+	uint32_t flow_id = entry->flow_id;
+
+	/* Compute */
+
+	/* Write */
+	*flow_id_ptr = flow_id;
+}
+
+static inline void
+pkt4_work_flow_id(
+	struct rte_mbuf **pkts,
+	struct rte_pipeline_table_entry **table_entries,
+	void *arg)
+{
+	struct pipeline_flow_classification *p_fc = arg;
+
+	uint32_t *flow_id_ptr0 =
+		RTE_MBUF_METADATA_UINT32_PTR(pkts[0], p_fc->flow_id_offset);
+	uint32_t *flow_id_ptr1 =
+		RTE_MBUF_METADATA_UINT32_PTR(pkts[1], p_fc->flow_id_offset);
+	uint32_t *flow_id_ptr2 =
+		RTE_MBUF_METADATA_UINT32_PTR(pkts[2], p_fc->flow_id_offset);
+	uint32_t *flow_id_ptr3 =
+		RTE_MBUF_METADATA_UINT32_PTR(pkts[3], p_fc->flow_id_offset);
+
+	struct flow_table_entry *entry0 =
+		(struct flow_table_entry *) table_entries[0];
+	struct flow_table_entry *entry1 =
+		(struct flow_table_entry *) table_entries[1];
+	struct flow_table_entry *entry2 =
+		(struct flow_table_entry *) table_entries[2];
+	struct flow_table_entry *entry3 =
+		(struct flow_table_entry *) table_entries[3];
+
+	/* Read */
+	uint32_t flow_id0 = entry0->flow_id;
+	uint32_t flow_id1 = entry1->flow_id;
+	uint32_t flow_id2 = entry2->flow_id;
+	uint32_t flow_id3 = entry3->flow_id;
+
+	/* Compute */
+
+	/* Write */
+	*flow_id_ptr0 = flow_id0;
+	*flow_id_ptr1 = flow_id1;
+	*flow_id_ptr2 = flow_id2;
+	*flow_id_ptr3 = flow_id3;
+}
+
+PIPELINE_TABLE_AH_HIT(fc_table_ah_hit,
+		pkt_work_flow_id, pkt4_work_flow_id);
+
+static rte_pipeline_table_action_handler_hit
+get_fc_table_ah_hit(struct pipeline_flow_classification *p)
+{
+	if (p->flow_id)
+		return fc_table_ah_hit;
+
+	return NULL;
+}
+
+/*
+ * Argument parsing
+ */
 static int
 pipeline_fc_parse_args(struct pipeline_flow_classification *p,
 	struct pipeline_params *params)
@@ -125,9 +213,12 @@ pipeline_fc_parse_args(struct pipeline_flow_classification *p,
 	uint32_t key_offset_present = 0;
 	uint32_t key_size_present = 0;
 	uint32_t hash_offset_present = 0;
-
+	uint32_t flow_id_offset_present = 0;
 	uint32_t i;
 
+	/* default values */
+	p->flow_id = 0;
+
 	for (i = 0; i < params->n_args; i++) {
 		char *arg_name = params->args_name[i];
 		char *arg_value = params->args_value[i];
@@ -182,6 +273,18 @@ pipeline_fc_parse_args(struct pipeline_flow_classification *p,
 			continue;
 		}
 
+		/* flow_id_offset */
+		if (strcmp(arg_name, "flowid_offset") == 0) {
+			if (flow_id_offset_present)
+				return -1;
+			flow_id_offset_present = 1;
+
+			p->flow_id = 1;
+			p->flow_id_offset = atoi(arg_value);
+
+			continue;
+		}
+
 		/* Unknown argument */
 		return -1;
 	}
@@ -325,9 +428,9 @@ static void *pipeline_fc_init(struct pipeline_params *params,
 		struct rte_pipeline_table_params table_params = {
 			.ops = NULL, /* set below */
 			.arg_create = NULL, /* set below */
-			.f_action_hit = NULL,
+			.f_action_hit = get_fc_table_ah_hit(p_fc),
 			.f_action_miss = NULL,
-			.arg_ah = NULL,
+			.arg_ah = p_fc,
 			.action_data_size = sizeof(struct flow_table_entry) -
 				sizeof(struct rte_pipeline_table_entry),
 		};
@@ -336,7 +439,7 @@ static void *pipeline_fc_init(struct pipeline_params *params,
 
 		switch (p_fc->key_size) {
 		case 8:
-			table_params.ops = &rte_table_hash_key8_lru_ops;
+			table_params.ops = &rte_table_hash_key8_ext_ops;
 			table_params.arg_create = &table_hash_key8_params;
 			break;
 
@@ -485,6 +588,7 @@ pipeline_fc_msg_req_add_handler(struct pipeline *p, void *msg)
 			.action = RTE_PIPELINE_ACTION_PORT,
 			{.port_id = p->port_out_id[req->port_id]},
 		},
+		.flow_id = req->flow_id,
 	};
 
 	rsp->status = rte_pipeline_table_entry_add(p->p,
@@ -513,6 +617,7 @@ pipeline_fc_msg_req_add_bulk_handler(struct pipeline *p, void *msg)
 				.action = RTE_PIPELINE_ACTION_PORT,
 				{.port_id = p->port_out_id[flow_req->port_id]},
 			},
+			.flow_id = flow_req->flow_id,
 		};
 
 		int status = rte_pipeline_table_entry_add(p->p,
@@ -558,6 +663,8 @@ pipeline_fc_msg_req_add_default_handler(struct pipeline *p, void *msg)
 			.action = RTE_PIPELINE_ACTION_PORT,
 			{.port_id = p->port_out_id[req->port_id]},
 		},
+
+		.flow_id = 0,
 	};
 
 	rsp->status = rte_pipeline_table_default_entry_add(p->p,
diff --git a/examples/ip_pipeline/pipeline/pipeline_flow_classification_be.h b/examples/ip_pipeline/pipeline/pipeline_flow_classification_be.h
index 46403d5..d8129b2 100644
--- a/examples/ip_pipeline/pipeline/pipeline_flow_classification_be.h
+++ b/examples/ip_pipeline/pipeline/pipeline_flow_classification_be.h
@@ -59,6 +59,7 @@ struct pipeline_fc_add_msg_req {
 	uint8_t key[PIPELINE_FC_FLOW_KEY_MAX_SIZE];
 
 	uint32_t port_id;
+	uint32_t flow_id;
 };
 
 struct pipeline_fc_add_msg_rsp {
@@ -73,6 +74,7 @@ struct pipeline_fc_add_msg_rsp {
 struct pipeline_fc_add_bulk_flow_req {
 	uint8_t key[PIPELINE_FC_FLOW_KEY_MAX_SIZE];
 	uint32_t port_id;
+	uint32_t flow_id;
 };
 
 struct pipeline_fc_add_bulk_flow_rsp {
-- 
2.1.0

^ permalink raw reply related	[flat|nested] 7+ messages in thread

* Re: [PATCH v3] ip_pipeline: add flow id parameter to flow classification
  2015-10-12 15:41 ` [PATCH v3] " Jasvinder Singh
@ 2015-10-12 15:46   ` Dumitrescu, Cristian
  2015-10-12 15:51     ` Singh, Jasvinder
  0 siblings, 1 reply; 7+ messages in thread
From: Dumitrescu, Cristian @ 2015-10-12 15:46 UTC (permalink / raw)
  To: Singh, Jasvinder, dev



> -----Original Message-----
> From: Singh, Jasvinder
> Sent: Monday, October 12, 2015 4:42 PM
> To: dev@dpdk.org
> Cc: Dumitrescu, Cristian
> Subject: [PATCH v3] ip_pipeline: add flow id parameter to flow classification
> 
> *v3
> fixed bug: changed LRU hash table operation to
> extendible bucket hash table operation
> 
> Signed-off-by: Jasvinder Singh <jasvinder.singh@intel.com>
> ---


Acked-by: Cristian Dumitrescu <cristian.dumitrescu@intel.com>

Jasvinder, next time we send new patch version with just a small modification (like this one), you can also include my Ack line from the previous version (just under the signoff line), as Thomas kindly suggested.

^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [PATCH v3] ip_pipeline: add flow id parameter to flow classification
  2015-10-12 15:46   ` Dumitrescu, Cristian
@ 2015-10-12 15:51     ` Singh, Jasvinder
  0 siblings, 0 replies; 7+ messages in thread
From: Singh, Jasvinder @ 2015-10-12 15:51 UTC (permalink / raw)
  To: Dumitrescu, Cristian, dev



> -----Original Message-----
> From: Dumitrescu, Cristian
> Sent: Monday, October 12, 2015 4:47 PM
> To: Singh, Jasvinder; dev@dpdk.org
> Subject: RE: [PATCH v3] ip_pipeline: add flow id parameter to flow
> classification
> 
> 
> 
> > -----Original Message-----
> > From: Singh, Jasvinder
> > Sent: Monday, October 12, 2015 4:42 PM
> > To: dev@dpdk.org
> > Cc: Dumitrescu, Cristian
> > Subject: [PATCH v3] ip_pipeline: add flow id parameter to flow
> > classification
> >
> > *v3
> > fixed bug: changed LRU hash table operation to extendible bucket hash
> > table operation
> >
> > Signed-off-by: Jasvinder Singh <jasvinder.singh@intel.com>
> > ---
> 
> 
> Acked-by: Cristian Dumitrescu <cristian.dumitrescu@intel.com>
> 
> Jasvinder, next time we send new patch version with just a small
> modification (like this one), you can also include my Ack line from the
> previous version (just under the signoff line), as Thomas kindly suggested.

I will remember next time. Thanks.

^ permalink raw reply	[flat|nested] 7+ messages in thread

* [PATCH v4] ip_pipeline: add flow id parameter to flow classification
  2015-10-05 11:13 [PATCH v2] ip_pipeline: add flow id parameter to flow classification Jasvinder Singh
  2015-10-05 13:10 ` Dumitrescu, Cristian
  2015-10-12 15:41 ` [PATCH v3] " Jasvinder Singh
@ 2015-11-30 14:08 ` Jasvinder Singh
  2015-12-07  0:50   ` Thomas Monjalon
  2 siblings, 1 reply; 7+ messages in thread
From: Jasvinder Singh @ 2015-11-30 14:08 UTC (permalink / raw)
  To: dev

This patch adds flow id field to the flow
classification table entries and adds table action
handlers to read flow id from table entry and
write it into the packet meta-data. The flow_id
(32-bit) parameter is also added to CLI commands
flow add, flow delete, etc.

*v2
fixed bug: flow table entry size power of 2

*v3
fixed bug: changed LRU hash table operation to
extendible bucket hash table operation

*v4
Coverity issue: 120147
Fixes: 7122d30131ad ("examples/ip_pipeline: rework flow classification
pipeline")

Signed-off-by: Jasvinder Singh <jasvinder.singh@intel.com>
Acked-by: Cristian Dumitrescu <cristian.dumitrescu@intel.com>
---
 .../pipeline/pipeline_flow_classification.c        | 206 ++++++++++++++++++---
 .../pipeline/pipeline_flow_classification.h        |   4 +-
 .../pipeline/pipeline_flow_classification_be.c     | 148 +++++++++++++--
 .../pipeline/pipeline_flow_classification_be.h     |   2 +
 4 files changed, 316 insertions(+), 44 deletions(-)

diff --git a/examples/ip_pipeline/pipeline/pipeline_flow_classification.c b/examples/ip_pipeline/pipeline/pipeline_flow_classification.c
index 4b82180..04b6915 100644
--- a/examples/ip_pipeline/pipeline/pipeline_flow_classification.c
+++ b/examples/ip_pipeline/pipeline/pipeline_flow_classification.c
@@ -152,6 +152,7 @@ app_pipeline_fc_key_convert(struct pipeline_fc_key *key_in,
 struct app_pipeline_fc_flow {
 	struct pipeline_fc_key key;
 	uint32_t port_id;
+	uint32_t flow_id;
 	uint32_t signature;
 	void *entry_ptr;
 
@@ -280,7 +281,8 @@ int
 app_pipeline_fc_add(struct app_params *app,
 	uint32_t pipeline_id,
 	struct pipeline_fc_key *key,
-	uint32_t port_id)
+	uint32_t port_id,
+	uint32_t flow_id)
 {
 	struct app_pipeline_fc *p;
 	struct app_pipeline_fc_flow *flow;
@@ -325,6 +327,7 @@ app_pipeline_fc_add(struct app_params *app,
 	req->subtype = PIPELINE_FC_MSG_REQ_FLOW_ADD;
 	app_pipeline_fc_key_convert(key, req->key, &signature);
 	req->port_id = port_id;
+	req->flow_id = flow_id;
 
 	/* Send request and wait for response */
 	rsp = app_msg_send_recv(app, pipeline_id, req, MSG_TIMEOUT_DEFAULT);
@@ -348,6 +351,7 @@ app_pipeline_fc_add(struct app_params *app,
 	memset(&flow->key, 0, sizeof(flow->key));
 	memcpy(&flow->key, key, sizeof(flow->key));
 	flow->port_id = port_id;
+	flow->flow_id = flow_id;
 	flow->signature = signature;
 	flow->entry_ptr = rsp->entry_ptr;
 
@@ -370,6 +374,7 @@ app_pipeline_fc_add_bulk(struct app_params *app,
 	uint32_t pipeline_id,
 	struct pipeline_fc_key *key,
 	uint32_t *port_id,
+	uint32_t *flow_id,
 	uint32_t n_keys)
 {
 	struct app_pipeline_fc *p;
@@ -389,6 +394,7 @@ app_pipeline_fc_add_bulk(struct app_params *app,
 	if ((app == NULL) ||
 		(key == NULL) ||
 		(port_id == NULL) ||
+		(flow_id == NULL) ||
 		(n_keys == 0))
 		return -1;
 
@@ -496,6 +502,7 @@ app_pipeline_fc_add_bulk(struct app_params *app,
 			flow_req[i].key,
 			&signature[i]);
 		flow_req[i].port_id = port_id[i];
+		flow_req[i].flow_id = flow_id[i];
 	}
 
 	req->type = PIPELINE_MSG_REQ_CUSTOM;
@@ -535,6 +542,7 @@ app_pipeline_fc_add_bulk(struct app_params *app,
 	for (i = 0; i < rsp->n_keys; i++) {
 		memcpy(&flow[i]->key, &key[i], sizeof(flow[i]->key));
 		flow[i]->port_id = port_id[i];
+		flow[i]->flow_id = flow_id[i];
 		flow[i]->signature = signature[i];
 		flow[i]->entry_ptr = flow_rsp[i].entry_ptr;
 
@@ -731,13 +739,15 @@ print_fc_qinq_flow(struct app_pipeline_fc_flow *flow)
 {
 	printf("(SVLAN = %" PRIu32 ", "
 		"CVLAN = %" PRIu32 ") => "
-		"Port = %" PRIu32 " "
+		"Port = %" PRIu32 ", "
+		"Flow ID = %" PRIu32 ", "
 		"(signature = 0x%08" PRIx32 ", "
 		"entry_ptr = %p)\n",
 
 		flow->key.key.qinq.svlan,
 		flow->key.key.qinq.cvlan,
 		flow->port_id,
+		flow->flow_id,
 		flow->signature,
 		flow->entry_ptr);
 }
@@ -750,7 +760,8 @@ print_fc_ipv4_5tuple_flow(struct app_pipeline_fc_flow *flow)
 		   "SP = %" PRIu32 ", "
 		   "DP = %" PRIu32 ", "
 		   "Proto = %" PRIu32 ") => "
-		   "Port = %" PRIu32 " "
+		   "Port = %" PRIu32 ", "
+		   "Flow ID = %" PRIu32 " "
 		   "(signature = 0x%08" PRIx32 ", "
 		   "entry_ptr = %p)\n",
 
@@ -770,6 +781,7 @@ print_fc_ipv4_5tuple_flow(struct app_pipeline_fc_flow *flow)
 		   flow->key.key.ipv4_5tuple.proto,
 
 		   flow->port_id,
+		   flow->flow_id,
 		   flow->signature,
 		   flow->entry_ptr);
 }
@@ -787,7 +799,8 @@ print_fc_ipv6_5tuple_flow(struct app_pipeline_fc_flow *flow) {
 		"SP = %" PRIu32 ", "
 		"DP = %" PRIu32 " "
 		"Proto = %" PRIu32 " "
-		"=> Port = %" PRIu32 " "
+		"=> Port = %" PRIu32 ", "
+		"Flow ID = %" PRIu32 " "
 		"(signature = 0x%08" PRIx32 ", "
 		"entry_ptr = %p)\n",
 
@@ -831,6 +844,7 @@ print_fc_ipv6_5tuple_flow(struct app_pipeline_fc_flow *flow) {
 		flow->key.key.ipv6_5tuple.proto,
 
 		flow->port_id,
+		flow->flow_id,
 		flow->signature,
 		flow->entry_ptr);
 }
@@ -895,7 +909,10 @@ struct cmd_fc_add_qinq_result {
 	cmdline_fixed_string_t qinq_string;
 	uint16_t svlan;
 	uint16_t cvlan;
+	cmdline_fixed_string_t port_string;
 	uint32_t port;
+	cmdline_fixed_string_t flowid_string;
+	uint32_t flow_id;
 };
 
 static void
@@ -917,7 +934,8 @@ cmd_fc_add_qinq_parsed(
 	status = app_pipeline_fc_add(app,
 		params->pipeline_id,
 		&key,
-		params->port);
+		params->port,
+		params->flow_id);
 	if (status != 0)
 		printf("Command failed\n");
 }
@@ -947,9 +965,20 @@ cmdline_parse_token_num_t cmd_fc_add_qinq_svlan =
 cmdline_parse_token_num_t cmd_fc_add_qinq_cvlan =
 	TOKEN_NUM_INITIALIZER(struct cmd_fc_add_qinq_result, cvlan, UINT16);
 
+cmdline_parse_token_string_t cmd_fc_add_qinq_port_string =
+	TOKEN_STRING_INITIALIZER(struct cmd_fc_add_qinq_result, port_string,
+		"port");
+
 cmdline_parse_token_num_t cmd_fc_add_qinq_port =
 	TOKEN_NUM_INITIALIZER(struct cmd_fc_add_qinq_result, port, UINT32);
 
+cmdline_parse_token_string_t cmd_fc_add_qinq_flowid_string =
+	TOKEN_STRING_INITIALIZER(struct cmd_fc_add_qinq_result, flowid_string,
+		"flowid");
+
+cmdline_parse_token_num_t cmd_fc_add_qinq_flow_id =
+	TOKEN_NUM_INITIALIZER(struct cmd_fc_add_qinq_result, flow_id, UINT32);
+
 cmdline_parse_inst_t cmd_fc_add_qinq = {
 	.f = cmd_fc_add_qinq_parsed,
 	.data = NULL,
@@ -962,7 +991,10 @@ cmdline_parse_inst_t cmd_fc_add_qinq = {
 		(void *) &cmd_fc_add_qinq_qinq_string,
 		(void *) &cmd_fc_add_qinq_svlan,
 		(void *) &cmd_fc_add_qinq_cvlan,
+		(void *) &cmd_fc_add_qinq_port_string,
 		(void *) &cmd_fc_add_qinq_port,
+		(void *) &cmd_fc_add_qinq_flowid_string,
+		(void *) &cmd_fc_add_qinq_flow_id,
 		NULL,
 	},
 };
@@ -996,8 +1028,21 @@ cmd_fc_add_qinq_all_parsed(
 	struct app_params *app = data;
 	struct pipeline_fc_key *key;
 	uint32_t *port_id;
-	uint32_t flow_id;
+	uint32_t *flow_id;
+	uint32_t id;
 
+	/* Check input arguments */
+	if (params->n_flows == 0) {
+		printf("Invalid number of flows\n");
+		return;
+	}
+
+	if (params->n_ports == 0) {
+		printf("Invalid number of output ports\n");
+		return;
+	}
+
+	/* Memory allocation */
 	key = rte_zmalloc(NULL,
 		N_FLOWS_BULK * sizeof(*key),
 		RTE_CACHE_LINE_SIZE);
@@ -1015,23 +1060,36 @@ cmd_fc_add_qinq_all_parsed(
 		return;
 	}
 
-	for (flow_id = 0; flow_id < params->n_flows; flow_id++) {
-		uint32_t pos = flow_id & (N_FLOWS_BULK - 1);
+	flow_id = rte_malloc(NULL,
+		N_FLOWS_BULK * sizeof(*flow_id),
+		RTE_CACHE_LINE_SIZE);
+	if (flow_id == NULL) {
+		rte_free(port_id);
+		rte_free(key);
+		printf("Memory allocation failed\n");
+		return;
+	}
+
+	/* Flow add */
+	for (id = 0; id < params->n_flows; id++) {
+		uint32_t pos = id & (N_FLOWS_BULK - 1);
 
 		key[pos].type = FLOW_KEY_QINQ;
-		key[pos].key.qinq.svlan = flow_id >> 12;
-		key[pos].key.qinq.cvlan = flow_id & 0xFFF;
+		key[pos].key.qinq.svlan = id >> 12;
+		key[pos].key.qinq.cvlan = id & 0xFFF;
 
-		port_id[pos] = flow_id % params->n_ports;
+		port_id[pos] = id % params->n_ports;
+		flow_id[pos] = id;
 
 		if ((pos == N_FLOWS_BULK - 1) ||
-			(flow_id == params->n_flows - 1)) {
+			(id == params->n_flows - 1)) {
 			int status;
 
 			status = app_pipeline_fc_add_bulk(app,
 				params->pipeline_id,
 				key,
 				port_id,
+				flow_id,
 				pos + 1);
 
 			if (status != 0) {
@@ -1042,6 +1100,8 @@ cmd_fc_add_qinq_all_parsed(
 		}
 	}
 
+	/* Memory free */
+	rte_free(flow_id);
 	rte_free(port_id);
 	rte_free(key);
 }
@@ -1110,7 +1170,10 @@ struct cmd_fc_add_ipv4_5tuple_result {
 	uint16_t port_src;
 	uint16_t port_dst;
 	uint32_t proto;
+	cmdline_fixed_string_t port_string;
 	uint32_t port;
+	cmdline_fixed_string_t flowid_string;
+	uint32_t flow_id;
 };
 
 static void
@@ -1137,7 +1200,8 @@ cmd_fc_add_ipv4_5tuple_parsed(
 	status = app_pipeline_fc_add(app,
 		params->pipeline_id,
 		&key,
-		params->port);
+		params->port,
+		params->flow_id);
 	if (status != 0)
 		printf("Command failed\n");
 }
@@ -1180,10 +1244,22 @@ cmdline_parse_token_num_t cmd_fc_add_ipv4_5tuple_proto =
 	TOKEN_NUM_INITIALIZER(struct cmd_fc_add_ipv4_5tuple_result, proto,
 		UINT32);
 
+cmdline_parse_token_string_t cmd_fc_add_ipv4_5tuple_port_string =
+	TOKEN_STRING_INITIALIZER(struct cmd_fc_add_ipv4_5tuple_result, port_string,
+		"port");
+
 cmdline_parse_token_num_t cmd_fc_add_ipv4_5tuple_port =
 	TOKEN_NUM_INITIALIZER(struct cmd_fc_add_ipv4_5tuple_result, port,
 		UINT32);
 
+cmdline_parse_token_string_t cmd_fc_add_ipv4_5tuple_flowid_string =
+	TOKEN_STRING_INITIALIZER(struct cmd_fc_add_ipv4_5tuple_result,
+		flowid_string, "flowid");
+
+cmdline_parse_token_num_t cmd_fc_add_ipv4_5tuple_flow_id =
+	TOKEN_NUM_INITIALIZER(struct cmd_fc_add_ipv4_5tuple_result, flow_id,
+		UINT32);
+
 cmdline_parse_inst_t cmd_fc_add_ipv4_5tuple = {
 	.f = cmd_fc_add_ipv4_5tuple_parsed,
 	.data = NULL,
@@ -1199,7 +1275,10 @@ cmdline_parse_inst_t cmd_fc_add_ipv4_5tuple = {
 		(void *) &cmd_fc_add_ipv4_5tuple_port_src,
 		(void *) &cmd_fc_add_ipv4_5tuple_port_dst,
 		(void *) &cmd_fc_add_ipv4_5tuple_proto,
+		(void *) &cmd_fc_add_ipv4_5tuple_port_string,
 		(void *) &cmd_fc_add_ipv4_5tuple_port,
+		(void *) &cmd_fc_add_ipv4_5tuple_flowid_string,
+		(void *) &cmd_fc_add_ipv4_5tuple_flow_id,
 		NULL,
 	},
 };
@@ -1229,8 +1308,21 @@ cmd_fc_add_ipv4_5tuple_all_parsed(
 	struct app_params *app = data;
 	struct pipeline_fc_key *key;
 	uint32_t *port_id;
-	uint32_t flow_id;
+	uint32_t *flow_id;
+	uint32_t id;
+
+	/* Check input parameters */
+	if (params->n_flows == 0) {
+		printf("Invalid number of flows\n");
+		return;
+	}
 
+	if (params->n_ports == 0) {
+		printf("Invalid number of ports\n");
+		return;
+	}
+
+	/* Memory allocation */
 	key = rte_zmalloc(NULL,
 		N_FLOWS_BULK * sizeof(*key),
 		RTE_CACHE_LINE_SIZE);
@@ -1248,26 +1340,39 @@ cmd_fc_add_ipv4_5tuple_all_parsed(
 		return;
 	}
 
-	for (flow_id = 0; flow_id < params->n_flows; flow_id++) {
-		uint32_t pos = flow_id & (N_FLOWS_BULK - 1);
+	flow_id = rte_malloc(NULL,
+		N_FLOWS_BULK * sizeof(*flow_id),
+		RTE_CACHE_LINE_SIZE);
+	if (flow_id == NULL) {
+		rte_free(port_id);
+		rte_free(key);
+		printf("Memory allocation failed\n");
+		return;
+	}
+
+	/* Flow add */
+	for (id = 0; id < params->n_flows; id++) {
+		uint32_t pos = id & (N_FLOWS_BULK - 1);
 
 		key[pos].type = FLOW_KEY_IPV4_5TUPLE;
 		key[pos].key.ipv4_5tuple.ip_src = 0;
-		key[pos].key.ipv4_5tuple.ip_dst = flow_id;
+		key[pos].key.ipv4_5tuple.ip_dst = id;
 		key[pos].key.ipv4_5tuple.port_src = 0;
 		key[pos].key.ipv4_5tuple.port_dst = 0;
 		key[pos].key.ipv4_5tuple.proto = 6;
 
-		port_id[pos] = flow_id % params->n_ports;
+		port_id[pos] = id % params->n_ports;
+		flow_id[pos] = id;
 
 		if ((pos == N_FLOWS_BULK - 1) ||
-			(flow_id == params->n_flows - 1)) {
+			(id == params->n_flows - 1)) {
 			int status;
 
 			status = app_pipeline_fc_add_bulk(app,
 				params->pipeline_id,
 				key,
 				port_id,
+				flow_id,
 				pos + 1);
 
 			if (status != 0) {
@@ -1278,6 +1383,8 @@ cmd_fc_add_ipv4_5tuple_all_parsed(
 		}
 	}
 
+	/* Memory free */
+	rte_free(flow_id);
 	rte_free(port_id);
 	rte_free(key);
 }
@@ -1346,7 +1453,10 @@ struct cmd_fc_add_ipv6_5tuple_result {
 	uint16_t port_src;
 	uint16_t port_dst;
 	uint32_t proto;
+	cmdline_fixed_string_t port_string;
 	uint32_t port;
+	cmdline_fixed_string_t flowid_string;
+	uint32_t flow_id;
 };
 
 static void
@@ -1375,7 +1485,8 @@ cmd_fc_add_ipv6_5tuple_parsed(
 	status = app_pipeline_fc_add(app,
 		params->pipeline_id,
 		&key,
-		params->port);
+		params->port,
+		params->flow_id);
 	if (status != 0)
 		printf("Command failed\n");
 }
@@ -1418,10 +1529,22 @@ cmdline_parse_token_num_t cmd_fc_add_ipv6_5tuple_proto =
 	TOKEN_NUM_INITIALIZER(struct cmd_fc_add_ipv6_5tuple_result, proto,
 		UINT32);
 
+cmdline_parse_token_string_t cmd_fc_add_ipv6_5tuple_port_string =
+	TOKEN_STRING_INITIALIZER(struct cmd_fc_add_ipv6_5tuple_result,
+		port_string, "port");
+
 cmdline_parse_token_num_t cmd_fc_add_ipv6_5tuple_port =
 	TOKEN_NUM_INITIALIZER(struct cmd_fc_add_ipv6_5tuple_result, port,
 		UINT32);
 
+cmdline_parse_token_string_t cmd_fc_add_ipv6_5tuple_flowid_string =
+	TOKEN_STRING_INITIALIZER(struct cmd_fc_add_ipv6_5tuple_result,
+		flowid_string, "flowid");
+
+cmdline_parse_token_num_t cmd_fc_add_ipv6_5tuple_flow_id =
+	TOKEN_NUM_INITIALIZER(struct cmd_fc_add_ipv6_5tuple_result, flow_id,
+		UINT32);
+
 cmdline_parse_inst_t cmd_fc_add_ipv6_5tuple = {
 	.f = cmd_fc_add_ipv6_5tuple_parsed,
 	.data = NULL,
@@ -1437,7 +1560,10 @@ cmdline_parse_inst_t cmd_fc_add_ipv6_5tuple = {
 		(void *) &cmd_fc_add_ipv6_5tuple_port_src,
 		(void *) &cmd_fc_add_ipv6_5tuple_port_dst,
 		(void *) &cmd_fc_add_ipv6_5tuple_proto,
+		(void *) &cmd_fc_add_ipv6_5tuple_port_string,
 		(void *) &cmd_fc_add_ipv6_5tuple_port,
+		(void *) &cmd_fc_add_ipv6_5tuple_flowid_string,
+		(void *) &cmd_fc_add_ipv6_5tuple_flow_id,
 		NULL,
 	},
 };
@@ -1467,8 +1593,21 @@ cmd_fc_add_ipv6_5tuple_all_parsed(
 	struct app_params *app = data;
 	struct pipeline_fc_key *key;
 	uint32_t *port_id;
-	uint32_t flow_id;
+	uint32_t *flow_id;
+	uint32_t id;
+
+	/* Check input parameters */
+	if (params->n_flows == 0) {
+		printf("Invalid number of flows\n");
+		return;
+	}
+
+	if (params->n_ports == 0) {
+		printf("Invalid number of ports\n");
+		return;
+	}
 
+	/* Memory allocation */
 	key = rte_zmalloc(NULL,
 		N_FLOWS_BULK * sizeof(*key),
 		RTE_CACHE_LINE_SIZE);
@@ -1486,25 +1625,38 @@ cmd_fc_add_ipv6_5tuple_all_parsed(
 		return;
 	}
 
-	for (flow_id = 0; flow_id < params->n_flows; flow_id++) {
-		uint32_t pos = flow_id & (N_FLOWS_BULK - 1);
+	flow_id = rte_malloc(NULL,
+		N_FLOWS_BULK * sizeof(*flow_id),
+		RTE_CACHE_LINE_SIZE);
+	if (flow_id == NULL) {
+		rte_free(port_id);
+		rte_free(key);
+		printf("Memory allocation failed\n");
+		return;
+	}
+
+	/* Flow add */
+	for (id = 0; id < params->n_flows; id++) {
+		uint32_t pos = id & (N_FLOWS_BULK - 1);
 		uint32_t *x;
 
 		key[pos].type = FLOW_KEY_IPV6_5TUPLE;
 		x = (uint32_t *) key[pos].key.ipv6_5tuple.ip_dst;
-		*x = rte_bswap32(flow_id);
+		*x = rte_bswap32(id);
 		key[pos].key.ipv6_5tuple.proto = 6;
 
-		port_id[pos] = flow_id % params->n_ports;
+		port_id[pos] = id % params->n_ports;
+		flow_id[pos] = id;
 
 		if ((pos == N_FLOWS_BULK - 1) ||
-			(flow_id == params->n_flows - 1)) {
+			(id == params->n_flows - 1)) {
 			int status;
 
 			status = app_pipeline_fc_add_bulk(app,
 				params->pipeline_id,
 				key,
 				port_id,
+				flow_id,
 				pos + 1);
 
 			if (status != 0) {
@@ -1515,6 +1667,8 @@ cmd_fc_add_ipv6_5tuple_all_parsed(
 		}
 	}
 
+	/* Memory free */
+	rte_free(flow_id);
 	rte_free(port_id);
 	rte_free(key);
 }
diff --git a/examples/ip_pipeline/pipeline/pipeline_flow_classification.h b/examples/ip_pipeline/pipeline/pipeline_flow_classification.h
index 7529314..9c77500 100644
--- a/examples/ip_pipeline/pipeline/pipeline_flow_classification.h
+++ b/examples/ip_pipeline/pipeline/pipeline_flow_classification.h
@@ -77,13 +77,15 @@ int
 app_pipeline_fc_add(struct app_params *app,
 	uint32_t pipeline_id,
 	struct pipeline_fc_key *key,
-	uint32_t port_id);
+	uint32_t port_id,
+	uint32_t flow_id);
 
 int
 app_pipeline_fc_add_bulk(struct app_params *app,
 	uint32_t pipeline_id,
 	struct pipeline_fc_key *key,
 	uint32_t *port_id,
+	uint32_t *flow_id,
 	uint32_t n_keys);
 
 int
diff --git a/examples/ip_pipeline/pipeline/pipeline_flow_classification_be.c b/examples/ip_pipeline/pipeline/pipeline_flow_classification_be.c
index e22f96f..11a78fc 100644
--- a/examples/ip_pipeline/pipeline/pipeline_flow_classification_be.c
+++ b/examples/ip_pipeline/pipeline/pipeline_flow_classification_be.c
@@ -40,6 +40,7 @@
 #include <pipeline.h>
 
 #include "pipeline_flow_classification_be.h"
+#include "pipeline_actions_common.h"
 #include "hash_func.h"
 
 struct pipeline_flow_classification {
@@ -47,10 +48,14 @@ struct pipeline_flow_classification {
 	pipeline_msg_req_handler custom_handlers[PIPELINE_FC_MSG_REQS];
 
 	uint32_t n_flows;
-	uint32_t key_offset;
 	uint32_t key_size;
+	uint32_t flow_id;
+
+	uint32_t key_offset;
 	uint32_t hash_offset;
 	uint8_t *key_mask;
+	uint32_t flow_id_offset;
+
 } __rte_cache_aligned;
 
 static void *
@@ -106,6 +111,9 @@ static pipeline_msg_req_handler custom_handlers[] = {
  */
 struct flow_table_entry {
 	struct rte_pipeline_table_entry head;
+
+	uint32_t flow_id;
+	uint32_t pad;
 };
 
 rte_table_hash_op_hash hash_func[] = {
@@ -119,6 +127,86 @@ rte_table_hash_op_hash hash_func[] = {
 	hash_default_key64
 };
 
+/*
+ * Flow table AH - Write flow_id to packet meta-data
+ */
+static inline void
+pkt_work_flow_id(
+	struct rte_mbuf *pkt,
+	struct rte_pipeline_table_entry *table_entry,
+	void *arg)
+{
+	struct pipeline_flow_classification *p_fc = arg;
+	uint32_t *flow_id_ptr =
+		RTE_MBUF_METADATA_UINT32_PTR(pkt, p_fc->flow_id_offset);
+	struct flow_table_entry *entry =
+		(struct flow_table_entry *) table_entry;
+
+	/* Read */
+	uint32_t flow_id = entry->flow_id;
+
+	/* Compute */
+
+	/* Write */
+	*flow_id_ptr = flow_id;
+}
+
+static inline void
+pkt4_work_flow_id(
+	struct rte_mbuf **pkts,
+	struct rte_pipeline_table_entry **table_entries,
+	void *arg)
+{
+	struct pipeline_flow_classification *p_fc = arg;
+
+	uint32_t *flow_id_ptr0 =
+		RTE_MBUF_METADATA_UINT32_PTR(pkts[0], p_fc->flow_id_offset);
+	uint32_t *flow_id_ptr1 =
+		RTE_MBUF_METADATA_UINT32_PTR(pkts[1], p_fc->flow_id_offset);
+	uint32_t *flow_id_ptr2 =
+		RTE_MBUF_METADATA_UINT32_PTR(pkts[2], p_fc->flow_id_offset);
+	uint32_t *flow_id_ptr3 =
+		RTE_MBUF_METADATA_UINT32_PTR(pkts[3], p_fc->flow_id_offset);
+
+	struct flow_table_entry *entry0 =
+		(struct flow_table_entry *) table_entries[0];
+	struct flow_table_entry *entry1 =
+		(struct flow_table_entry *) table_entries[1];
+	struct flow_table_entry *entry2 =
+		(struct flow_table_entry *) table_entries[2];
+	struct flow_table_entry *entry3 =
+		(struct flow_table_entry *) table_entries[3];
+
+	/* Read */
+	uint32_t flow_id0 = entry0->flow_id;
+	uint32_t flow_id1 = entry1->flow_id;
+	uint32_t flow_id2 = entry2->flow_id;
+	uint32_t flow_id3 = entry3->flow_id;
+
+	/* Compute */
+
+	/* Write */
+	*flow_id_ptr0 = flow_id0;
+	*flow_id_ptr1 = flow_id1;
+	*flow_id_ptr2 = flow_id2;
+	*flow_id_ptr3 = flow_id3;
+}
+
+PIPELINE_TABLE_AH_HIT(fc_table_ah_hit,
+		pkt_work_flow_id, pkt4_work_flow_id);
+
+static rte_pipeline_table_action_handler_hit
+get_fc_table_ah_hit(struct pipeline_flow_classification *p)
+{
+	if (p->flow_id)
+		return fc_table_ah_hit;
+
+	return NULL;
+}
+
+/*
+ * Argument parsing
+ */
 static int
 pipeline_fc_parse_args(struct pipeline_flow_classification *p,
 	struct pipeline_params *params)
@@ -128,12 +216,16 @@ pipeline_fc_parse_args(struct pipeline_flow_classification *p,
 	uint32_t key_size_present = 0;
 	uint32_t hash_offset_present = 0;
 	uint32_t key_mask_present = 0;
+	uint32_t flow_id_offset_present = 0;
 
 	uint32_t i;
 	char *key_mask_str = NULL;
 
 	p->hash_offset = 0;
 
+	/* default values */
+	p->flow_id = 0;
+
 	for (i = 0; i < params->n_args; i++) {
 		char *arg_name = params->args_name[i];
 		char *arg_value = params->args_value[i];
@@ -141,12 +233,12 @@ pipeline_fc_parse_args(struct pipeline_flow_classification *p,
 		/* n_flows */
 		if (strcmp(arg_name, "n_flows") == 0) {
 			if (n_flows_present)
-				return -1;
+				goto error_parse;
 			n_flows_present = 1;
 
 			p->n_flows = atoi(arg_value);
 			if (p->n_flows == 0)
-				return -1;
+				goto error_parse;
 
 			continue;
 		}
@@ -154,7 +246,7 @@ pipeline_fc_parse_args(struct pipeline_flow_classification *p,
 		/* key_offset */
 		if (strcmp(arg_name, "key_offset") == 0) {
 			if (key_offset_present)
-				return -1;
+				goto error_parse;
 			key_offset_present = 1;
 
 			p->key_offset = atoi(arg_value);
@@ -165,14 +257,14 @@ pipeline_fc_parse_args(struct pipeline_flow_classification *p,
 		/* key_size */
 		if (strcmp(arg_name, "key_size") == 0) {
 			if (key_size_present)
-				return -1;
+				goto error_parse;
 			key_size_present = 1;
 
 			p->key_size = atoi(arg_value);
 			if ((p->key_size == 0) ||
 				(p->key_size > PIPELINE_FC_FLOW_KEY_MAX_SIZE) ||
 				(p->key_size % 8))
-				return -1;
+				goto error_parse;
 
 			continue;
 		}
@@ -180,11 +272,11 @@ pipeline_fc_parse_args(struct pipeline_flow_classification *p,
 		/* key_mask */
 		if (strcmp(arg_name, "key_mask") == 0) {
 			if (key_mask_present)
-				return -1;
+				goto error_parse;
 
 			key_mask_str = strdup(arg_value);
 			if (key_mask_str == NULL)
-				return -1;
+				goto error_parse;
 
 			key_mask_present = 1;
 
@@ -194,7 +286,7 @@ pipeline_fc_parse_args(struct pipeline_flow_classification *p,
 		/* hash_offset */
 		if (strcmp(arg_name, "hash_offset") == 0) {
 			if (hash_offset_present)
-				return -1;
+				goto error_parse;
 			hash_offset_present = 1;
 
 			p->hash_offset = atoi(arg_value);
@@ -202,31 +294,50 @@ pipeline_fc_parse_args(struct pipeline_flow_classification *p,
 			continue;
 		}
 
+		/* flow_id_offset */
+		if (strcmp(arg_name, "flowid_offset") == 0) {
+			if (flow_id_offset_present)
+				goto error_parse;
+			flow_id_offset_present = 1;
+
+			p->flow_id = 1;
+			p->flow_id_offset = atoi(arg_value);
+
+			continue;
+		}
+
 		/* Unknown argument */
-		return -1;
+		goto error_parse;
 	}
 
 	/* Check that mandatory arguments are present */
 	if ((n_flows_present == 0) ||
 		(key_offset_present == 0) ||
 		(key_size_present == 0))
-		return -1;
+		goto error_parse;
 
 	if (key_mask_present) {
 		p->key_mask = rte_malloc(NULL, p->key_size, 0);
 		if (p->key_mask == NULL)
-			return -1;
+			goto error_parse;
 
 		if (parse_hex_string(key_mask_str, p->key_mask, &p->key_size)
 			!= 0) {
-			free(p->key_mask);
-			return -1;
+			goto error_parse;
 		}
 
 		free(key_mask_str);
 	}
 
 	return 0;
+
+error_parse:
+	if (key_mask_str != NULL)
+		free(key_mask_str);
+	if (p->key_mask != NULL)
+		free(p->key_mask);
+
+	return -1;
 }
 
 static void *pipeline_fc_init(struct pipeline_params *params,
@@ -360,9 +471,9 @@ static void *pipeline_fc_init(struct pipeline_params *params,
 		struct rte_pipeline_table_params table_params = {
 			.ops = NULL, /* set below */
 			.arg_create = NULL, /* set below */
-			.f_action_hit = NULL,
+			.f_action_hit = get_fc_table_ah_hit(p_fc),
 			.f_action_miss = NULL,
-			.arg_ah = NULL,
+			.arg_ah = p_fc,
 			.action_data_size = sizeof(struct flow_table_entry) -
 				sizeof(struct rte_pipeline_table_entry),
 		};
@@ -380,7 +491,6 @@ static void *pipeline_fc_init(struct pipeline_params *params,
 			}
 			table_params.arg_create = &table_hash_key8_params;
 			break;
-			break;
 
 		case 16:
 			if (p_fc->hash_offset != 0) {
@@ -533,6 +643,7 @@ pipeline_fc_msg_req_add_handler(struct pipeline *p, void *msg)
 			.action = RTE_PIPELINE_ACTION_PORT,
 			{.port_id = p->port_out_id[req->port_id]},
 		},
+		.flow_id = req->flow_id,
 	};
 
 	rsp->status = rte_pipeline_table_entry_add(p->p,
@@ -561,6 +672,7 @@ pipeline_fc_msg_req_add_bulk_handler(struct pipeline *p, void *msg)
 				.action = RTE_PIPELINE_ACTION_PORT,
 				{.port_id = p->port_out_id[flow_req->port_id]},
 			},
+			.flow_id = flow_req->flow_id,
 		};
 
 		int status = rte_pipeline_table_entry_add(p->p,
@@ -606,6 +718,8 @@ pipeline_fc_msg_req_add_default_handler(struct pipeline *p, void *msg)
 			.action = RTE_PIPELINE_ACTION_PORT,
 			{.port_id = p->port_out_id[req->port_id]},
 		},
+
+		.flow_id = 0,
 	};
 
 	rsp->status = rte_pipeline_table_default_entry_add(p->p,
diff --git a/examples/ip_pipeline/pipeline/pipeline_flow_classification_be.h b/examples/ip_pipeline/pipeline/pipeline_flow_classification_be.h
index 46403d5..d8129b2 100644
--- a/examples/ip_pipeline/pipeline/pipeline_flow_classification_be.h
+++ b/examples/ip_pipeline/pipeline/pipeline_flow_classification_be.h
@@ -59,6 +59,7 @@ struct pipeline_fc_add_msg_req {
 	uint8_t key[PIPELINE_FC_FLOW_KEY_MAX_SIZE];
 
 	uint32_t port_id;
+	uint32_t flow_id;
 };
 
 struct pipeline_fc_add_msg_rsp {
@@ -73,6 +74,7 @@ struct pipeline_fc_add_msg_rsp {
 struct pipeline_fc_add_bulk_flow_req {
 	uint8_t key[PIPELINE_FC_FLOW_KEY_MAX_SIZE];
 	uint32_t port_id;
+	uint32_t flow_id;
 };
 
 struct pipeline_fc_add_bulk_flow_rsp {
-- 
2.5.0

^ permalink raw reply related	[flat|nested] 7+ messages in thread

* Re: [PATCH v4] ip_pipeline: add flow id parameter to flow classification
  2015-11-30 14:08 ` [PATCH v4] " Jasvinder Singh
@ 2015-12-07  0:50   ` Thomas Monjalon
  0 siblings, 0 replies; 7+ messages in thread
From: Thomas Monjalon @ 2015-12-07  0:50 UTC (permalink / raw)
  To: Jasvinder Singh; +Cc: dev

2015-11-30 14:08, Jasvinder Singh:
> This patch adds flow id field to the flow
> classification table entries and adds table action
> handlers to read flow id from table entry and
> write it into the packet meta-data. The flow_id
> (32-bit) parameter is also added to CLI commands
> flow add, flow delete, etc.
> 
> *v2
> fixed bug: flow table entry size power of 2
> 
> *v3
> fixed bug: changed LRU hash table operation to
> extendible bucket hash table operation
> 
> *v4
> Coverity issue: 120147
> Fixes: 7122d30131ad ("examples/ip_pipeline: rework flow classification
> pipeline")
> 
> Signed-off-by: Jasvinder Singh <jasvinder.singh@intel.com>
> Acked-by: Cristian Dumitrescu <cristian.dumitrescu@intel.com>

Applied, thanks

^ permalink raw reply	[flat|nested] 7+ messages in thread

end of thread, other threads:[~2015-12-07  0:51 UTC | newest]

Thread overview: 7+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2015-10-05 11:13 [PATCH v2] ip_pipeline: add flow id parameter to flow classification Jasvinder Singh
2015-10-05 13:10 ` Dumitrescu, Cristian
2015-10-12 15:41 ` [PATCH v3] " Jasvinder Singh
2015-10-12 15:46   ` Dumitrescu, Cristian
2015-10-12 15:51     ` Singh, Jasvinder
2015-11-30 14:08 ` [PATCH v4] " Jasvinder Singh
2015-12-07  0:50   ` Thomas Monjalon

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.