All of lore.kernel.org
 help / color / mirror / Atom feed
From: Xiaoyu Min <jackmin@mellanox.com>
To: Wenzhuo Lu <wenzhuo.lu@intel.com>,
	Jingjing Wu <jingjing.wu@intel.com>,
	Bernard Iremonger <bernard.iremonger@intel.com>,
	Adrien Mazarguil <adrien.mazarguil@6wind.com>,
	John McNamara <john.mcnamara@intel.com>,
	Marko Kovacevic <marko.kovacevic@intel.com>
Cc: dev@dpdk.org
Subject: [dpdk-dev] [PATCH 1/2] app/testpmd: support raw encap/decap actions
Date: Mon, 24 Jun 2019 23:37:35 +0800	[thread overview]
Message-ID: <20190624153736.127968-2-jackmin@mellanox.com> (raw)
In-Reply-To: <20190624153736.127968-1-jackmin@mellanox.com>

This patch intend to support
action_raw_encap/decap [1] in a generic and convenient way.

Two new commands - set raw_encap, set raw_decap are introduced just
like the other commands for encap/decap, i.e. set vxlan.

These two commands have corresponding global buffers
which can be used by PMD as the input buffer for raw encap/decap.

The commands use the rte_flow pattern syntax to help user build the
raw buffer in a convenient way.

A common way to use it:

- encap matched egress packet with VxLAN tunnel:
testpmd> set raw_encap eth src is 10:11:22:33:44:55 / vlan tci is 1
	 inner_type is 0x0800 / ipv4 / udp dst is 4789 / vxlan vni
	 is 2 / end_set
testpmd> flow create 0 egress pattern eth / ipv4 / end actions
	 raw_encap / end

- decap l2 header and encap GRE tunnel on matched egress packet:
testpmd> set raw_decap eth / end_set
testpmd> set raw_encap eth dst is 10:22:33:44:55:66 / ipv4 / gre
	 protocol is 0x0800 / end_set
testpmd> flow create 0 egress pattern eth / ipv4 / end actions
	 raw_decap / raw_encap / end

- decap VxLAN tunnel and encap l2 header on matched ingress packet:
testpmd> set raw_encap eth src is 10:11:22:33:44:55 type is 0x0800 /
	 end_set
testpmd> set raw_decap eth / ipv4 / udp / vxlan / end_set
testpmd> flow create 0 ingress pattern eth / ipv4 / udp dst is 250 /
         vxlan vni is 0x1234 / ipv4 / end actions raw_decap /
         raw_encap / queue index 1 / mark id 0x1234 / end

[1] http://mails.dpdk.org/archives/dev/2018-October/116092.html

Signed-off-by: Xiaoyu Min <jackmin@mellanox.com>
---
 app/test-pmd/cmdline.c                      |  13 +
 app/test-pmd/cmdline_flow.c                 | 536 +++++++++++++++++++-
 app/test-pmd/testpmd.h                      |   2 +
 doc/guides/testpmd_app_ug/testpmd_funcs.rst |  48 ++
 4 files changed, 598 insertions(+), 1 deletion(-)

diff --git a/app/test-pmd/cmdline.c b/app/test-pmd/cmdline.c
index d1e0d4402c..3cbfdcd2c1 100644
--- a/app/test-pmd/cmdline.c
+++ b/app/test-pmd/cmdline.c
@@ -760,6 +760,12 @@ static void cmd_help_long_parsed(void *parsed_result,
 			" eth-src (eth-src) eth-dst (eth-dst)\n"
 			"       Configure the NVGRE encapsulation for flows.\n\n"
 
+			"raw_encap {flow items}"
+			"	Configure the encapsulation with raw data.\n\n"
+
+			"raw_decap {flow items}"
+			"	Configure the decapsulation with raw data.\n\n"
+
 			, list_pkt_forwarding_modes()
 		);
 	}
@@ -1137,6 +1143,12 @@ static void cmd_help_long_parsed(void *parsed_result,
 			"flow isolate {port_id} {boolean}\n"
 			"    Restrict ingress traffic to the defined"
 			" flow rules\n\n"
+
+			"set raw_encap {flow items}\n"
+			"	Config encap with raw data.\n\n"
+
+			"set raw_decap {flow items}\n"
+			"	Config decap with raw data.\n\n"
 		);
 	}
 
@@ -18979,6 +18991,7 @@ cmdline_parse_ctx_t main_ctx[] = {
 #endif
 	(cmdline_parse_inst_t *)&cmd_config_tx_metadata_specific,
 	(cmdline_parse_inst_t *)&cmd_show_tx_metadata,
+	(cmdline_parse_inst_t *)&cmd_set_raw,
 	NULL,
 };
 
diff --git a/app/test-pmd/cmdline_flow.c b/app/test-pmd/cmdline_flow.c
index 201bd9de56..e7da0fad2a 100644
--- a/app/test-pmd/cmdline_flow.c
+++ b/app/test-pmd/cmdline_flow.c
@@ -28,6 +28,8 @@ enum index {
 	/* Special tokens. */
 	ZERO = 0,
 	END,
+	START_SET,
+	END_SET,
 
 	/* Common tokens. */
 	INTEGER,
@@ -45,8 +47,13 @@ enum index {
 	PRIORITY_LEVEL,
 
 	/* Top-level command. */
-	FLOW,
+	SET,
+	/* Sub-leve commands. */
+	SET_RAW_ENCAP,
+	SET_RAW_DECAP,
 
+	/* Top-level command. */
+	FLOW,
 	/* Sub-level commands. */
 	VALIDATE,
 	CREATE,
@@ -272,6 +279,8 @@ enum index {
 	ACTION_SET_MAC_SRC_MAC_SRC,
 	ACTION_SET_MAC_DST,
 	ACTION_SET_MAC_DST_MAC_DST,
+	ACTION_RAW_ENCAP,
+	ACTION_RAW_DECAP,
 };
 
 /** Maximum size for pattern in struct rte_flow_item_raw. */
@@ -294,6 +303,25 @@ struct action_rss_data {
 /** Maximum number of items in struct rte_flow_action_vxlan_encap. */
 #define ACTION_VXLAN_ENCAP_ITEMS_NUM 6
 
+#define ACTION_RAW_ENCAP_MAX_DATA 128
+
+/** Storage for struct rte_flow_action_raw_encap. */
+struct raw_encap_data {
+	uint8_t data[ACTION_RAW_ENCAP_MAX_DATA];
+	uint8_t preserve[ACTION_RAW_ENCAP_MAX_DATA];
+	size_t size;
+};
+
+struct raw_encap_data raw_encap_data = {.size = 0};
+
+/** Storage for struct rte_flow_action_raw_decap. */
+struct raw_decap_data {
+	uint8_t data[ACTION_RAW_ENCAP_MAX_DATA];
+	size_t size;
+};
+
+struct raw_decap_data raw_decap_data = {.size = 0};
+
 /** Storage for struct rte_flow_action_vxlan_encap including external data. */
 struct action_vxlan_encap_data {
 	struct rte_flow_action_vxlan_encap conf;
@@ -610,6 +638,7 @@ static const enum index next_item[] = {
 	ITEM_ICMP6_ND_OPT_SLA_ETH,
 	ITEM_ICMP6_ND_OPT_TLA_ETH,
 	ITEM_META,
+	END_SET,
 	ZERO,
 };
 
@@ -885,6 +914,8 @@ static const enum index next_action[] = {
 	ACTION_SET_TTL,
 	ACTION_SET_MAC_SRC,
 	ACTION_SET_MAC_DST,
+	ACTION_RAW_ENCAP,
+	ACTION_RAW_DECAP,
 	ZERO,
 };
 
@@ -1047,6 +1078,12 @@ static const enum index action_set_mac_dst[] = {
 	ZERO,
 };
 
+static int parse_set_raw_encap_decap(struct context *, const struct token *,
+				     const char *, unsigned int,
+				     void *, unsigned int);
+static int parse_set_init(struct context *, const struct token *,
+			  const char *, unsigned int,
+			  void *, unsigned int);
 static int parse_init(struct context *, const struct token *,
 		      const char *, unsigned int,
 		      void *, unsigned int);
@@ -1093,6 +1130,12 @@ static int parse_vc_action_mplsoudp_encap(struct context *,
 static int parse_vc_action_mplsoudp_decap(struct context *,
 					  const struct token *, const char *,
 					  unsigned int, void *, unsigned int);
+static int parse_vc_action_raw_encap(struct context *,
+				     const struct token *, const char *,
+				     unsigned int, void *, unsigned int);
+static int parse_vc_action_raw_decap(struct context *,
+				     const struct token *, const char *,
+				     unsigned int, void *, unsigned int);
 static int parse_destroy(struct context *, const struct token *,
 			 const char *, unsigned int,
 			 void *, unsigned int);
@@ -1166,6 +1209,16 @@ static const struct token token_list[] = {
 		.type = "RETURN",
 		.help = "command may end here",
 	},
+	[START_SET] = {
+		.name = "START_SET",
+		.help = "null entry, abused as the entry point for set",
+		.next = NEXT(NEXT_ENTRY(SET)),
+	},
+	[END_SET] = {
+		.name = "end_set",
+		.type = "RETURN",
+		.help = "set command may end here",
+	},
 	/* Common tokens. */
 	[INTEGER] = {
 		.name = "{int}",
@@ -2854,6 +2907,46 @@ static const struct token token_list[] = {
 			     (struct rte_flow_action_set_mac, mac_addr)),
 		.call = parse_vc_conf,
 	},
+	[ACTION_RAW_ENCAP] = {
+		.name = "raw_encap",
+		.help = "encapsulation data, defined by set raw_encap",
+		.priv = PRIV_ACTION(RAW_ENCAP,
+			sizeof(struct rte_flow_action_raw_encap)),
+		.next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
+		.call = parse_vc_action_raw_encap,
+	},
+	[ACTION_RAW_DECAP] = {
+		.name = "raw_decap",
+		.help = "decapsulation data, defined by set raw_encap",
+		.priv = PRIV_ACTION(RAW_DECAP,
+			sizeof(struct rte_flow_action_raw_decap)),
+		.next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
+		.call = parse_vc_action_raw_decap,
+	},
+	/* Top level command. */
+	[SET] = {
+		.name = "set",
+		.help = "set raw encap/decap data",
+		.type = "set raw_encap|raw_decap <pattern>",
+		.next = NEXT(NEXT_ENTRY
+			     (SET_RAW_ENCAP,
+			      SET_RAW_DECAP)),
+		.call = parse_set_init,
+	},
+	/* Sub-level commands. */
+	[SET_RAW_ENCAP] = {
+		.name = "raw_encap",
+		.help = "set raw encap data",
+		.next = NEXT(next_item),
+		.call = parse_set_raw_encap_decap,
+	},
+	[SET_RAW_DECAP] = {
+		.name = "raw_decap",
+		.help = "set raw decap data",
+		.next = NEXT(next_item),
+		.call = parse_set_raw_encap_decap,
+	}
+
 };
 
 /** Remove and return last entry from argument stack. */
@@ -4140,6 +4233,75 @@ parse_vc_action_mplsoudp_decap(struct context *ctx, const struct token *token,
 	return ret;
 }
 
+static int
+parse_vc_action_raw_encap(struct context *ctx, const struct token *token,
+			  const char *str, unsigned int len, void *buf,
+			  unsigned int size)
+{
+	struct buffer *out = buf;
+	struct rte_flow_action *action;
+	struct rte_flow_action_raw_encap *raw_encap_conf = NULL;
+	uint8_t *data = NULL;
+	int ret;
+
+	ret = parse_vc(ctx, token, str, len, buf, size);
+	if (ret < 0)
+		return ret;
+	/* Nothing else to do if there is no buffer. */
+	if (!out)
+		return ret;
+	if (!out->args.vc.actions_n)
+		return -1;
+	action = &out->args.vc.actions[out->args.vc.actions_n - 1];
+	/* Point to selected object. */
+	ctx->object = out->args.vc.data;
+	ctx->objmask = NULL;
+	/* Copy the headers to the buffer. */
+	raw_encap_conf = ctx->object;
+	/* data stored from tail of data buffer */
+	data = (uint8_t *)&(raw_encap_data.data) +
+		ACTION_RAW_ENCAP_MAX_DATA - raw_encap_data.size;
+	raw_encap_conf->data = data;
+	raw_encap_conf->preserve = NULL;
+	raw_encap_conf->size = raw_encap_data.size;
+	action->conf = raw_encap_conf;
+	return ret;
+}
+
+static int
+parse_vc_action_raw_decap(struct context *ctx, const struct token *token,
+			  const char *str, unsigned int len, void *buf,
+			  unsigned int size)
+{
+	struct buffer *out = buf;
+	struct rte_flow_action *action;
+	struct rte_flow_action_raw_decap *raw_decap_conf = NULL;
+	uint8_t *data = NULL;
+	int ret;
+
+	ret = parse_vc(ctx, token, str, len, buf, size);
+	if (ret < 0)
+		return ret;
+	/* Nothing else to do if there is no buffer. */
+	if (!out)
+		return ret;
+	if (!out->args.vc.actions_n)
+		return -1;
+	action = &out->args.vc.actions[out->args.vc.actions_n - 1];
+	/* Point to selected object. */
+	ctx->object = out->args.vc.data;
+	ctx->objmask = NULL;
+	/* Copy the headers to the buffer. */
+	raw_decap_conf = ctx->object;
+	/* data stored from tail of data buffer */
+	data = (uint8_t *)&(raw_decap_data.data) +
+		ACTION_RAW_ENCAP_MAX_DATA - raw_decap_data.size;
+	raw_decap_conf->data = data;
+	raw_decap_conf->size = raw_decap_data.size;
+	action->conf = raw_decap_conf;
+	return ret;
+}
+
 /** Parse tokens for destroy command. */
 static int
 parse_destroy(struct context *ctx, const struct token *token,
@@ -4796,6 +4958,73 @@ parse_port(struct context *ctx, const struct token *token,
 	return ret;
 }
 
+/** Parse set command, initialize output buffer for subsequent tokens. */
+static int
+parse_set_raw_encap_decap(struct context *ctx, const struct token *token,
+			  const char *str, unsigned int len,
+			  void *buf, unsigned int size)
+{
+	struct buffer *out = buf;
+
+	/* Token name must match. */
+	if (parse_default(ctx, token, str, len, NULL, 0) < 0)
+		return -1;
+	/* Nothing else to do if there is no buffer. */
+	if (!out)
+		return len;
+	/* Make sure buffer is large enough. */
+	if (size < sizeof(*out))
+		return -1;
+	ctx->objdata = 0;
+	ctx->objmask = NULL;
+	if (!out->command)
+		return -1;
+	out->command = ctx->curr;
+	return len;
+}
+
+/**
+ * Parse set raw_encap/raw_decap command,
+ * initialize output buffer for subsequent tokens.
+ */
+static int
+parse_set_init(struct context *ctx, const struct token *token,
+	       const char *str, unsigned int len,
+	       void *buf, unsigned int size)
+{
+	struct buffer *out = buf;
+
+	/* Token name must match. */
+	if (parse_default(ctx, token, str, len, NULL, 0) < 0)
+		return -1;
+	/* Nothing else to do if there is no buffer. */
+	if (!out)
+		return len;
+	/* Make sure buffer is large enough. */
+	if (size < sizeof(*out))
+		return -1;
+	/* Initialize buffer. */
+	memset(out, 0x00, sizeof(*out));
+	memset((uint8_t *)out + sizeof(*out), 0x22, size - sizeof(*out));
+	ctx->objdata = 0;
+	ctx->object = out;
+	ctx->objmask = NULL;
+	if (!out->command) {
+		if (ctx->curr != SET)
+			return -1;
+		if (sizeof(*out) > size)
+			return -1;
+		out->command = ctx->curr;
+		out->args.vc.data = (uint8_t *)out + size;
+		/* All we need is pattern */
+		out->args.vc.pattern =
+			(void *)RTE_ALIGN_CEIL((uintptr_t)(out + 1),
+					       sizeof(double));
+		ctx->object = out->args.vc.pattern;
+	}
+	return len;
+}
+
 /** No completion. */
 static int
 comp_none(struct context *ctx, const struct token *token,
@@ -4929,6 +5158,7 @@ static struct context cmd_flow_context;
 
 /** Global parser instance (cmdline API). */
 cmdline_parse_inst_t cmd_flow;
+cmdline_parse_inst_t cmd_set_raw;
 
 /** Initialize context. */
 static void
@@ -5208,3 +5438,307 @@ cmdline_parse_inst_t cmd_flow = {
 		NULL,
 	}, /**< Tokens are returned by cmd_flow_tok(). */
 };
+
+/** set cmd facility. Reuse cmd flow's infrastructure as much as possible. */
+
+static void
+update_fields(uint8_t *buf, struct rte_flow_item *item, uint16_t next_proto)
+{
+	struct rte_flow_item_ipv4 *ipv4;
+	struct rte_flow_item_eth *eth;
+	struct rte_flow_item_ipv6 *ipv6;
+	struct rte_flow_item_vxlan *vxlan;
+	struct rte_flow_item_vxlan_gpe *gpe;
+	struct rte_flow_item_nvgre *nvgre;
+	uint32_t ipv6_vtc_flow;
+
+	switch (item->type) {
+	case RTE_FLOW_ITEM_TYPE_ETH:
+		eth = (struct rte_flow_item_eth *)buf;
+		if (next_proto)
+			eth->type = rte_cpu_to_be_16(next_proto);
+		break;
+	case RTE_FLOW_ITEM_TYPE_IPV4:
+		ipv4 = (struct rte_flow_item_ipv4 *)buf;
+		ipv4->hdr.version_ihl = 0x45;
+		ipv4->hdr.next_proto_id = (uint8_t)next_proto;
+		break;
+	case RTE_FLOW_ITEM_TYPE_IPV6:
+		ipv6 = (struct rte_flow_item_ipv6 *)buf;
+		ipv6->hdr.proto = (uint8_t)next_proto;
+		ipv6_vtc_flow = rte_be_to_cpu_32(ipv6->hdr.vtc_flow);
+		ipv6_vtc_flow &= 0x0FFFFFFF; /*< reset version bits. */
+		ipv6_vtc_flow |= 0x60000000; /*< set ipv6 version. */
+		ipv6->hdr.vtc_flow = rte_cpu_to_be_32(ipv6_vtc_flow);
+		break;
+	case RTE_FLOW_ITEM_TYPE_VXLAN:
+		vxlan = (struct rte_flow_item_vxlan *)buf;
+		vxlan->flags = 0x08;
+		break;
+	case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
+		gpe = (struct rte_flow_item_vxlan_gpe *)buf;
+		gpe->flags = 0x0C;
+		break;
+	case RTE_FLOW_ITEM_TYPE_NVGRE:
+		nvgre = (struct rte_flow_item_nvgre *)buf;
+		nvgre->protocol = rte_cpu_to_be_16(0x6558);
+		nvgre->c_k_s_rsvd0_ver = rte_cpu_to_be_16(0x2000);
+		break;
+	default:
+		break;
+	}
+}
+
+/** Helper of get item's default mask. */
+static const void *
+flow_item_default_mask(const struct rte_flow_item *item)
+{
+	const void *mask = NULL;
+
+	switch (item->type) {
+	case RTE_FLOW_ITEM_TYPE_ANY:
+		mask = &rte_flow_item_any_mask;
+		break;
+	case RTE_FLOW_ITEM_TYPE_VF:
+		mask = &rte_flow_item_vf_mask;
+		break;
+	case RTE_FLOW_ITEM_TYPE_PORT_ID:
+		mask = &rte_flow_item_port_id_mask;
+		break;
+	case RTE_FLOW_ITEM_TYPE_RAW:
+		mask = &rte_flow_item_raw_mask;
+		break;
+	case RTE_FLOW_ITEM_TYPE_ETH:
+		mask = &rte_flow_item_eth_mask;
+		break;
+	case RTE_FLOW_ITEM_TYPE_VLAN:
+		mask = &rte_flow_item_vlan_mask;
+		break;
+	case RTE_FLOW_ITEM_TYPE_IPV4:
+		mask = &rte_flow_item_ipv4_mask;
+		break;
+	case RTE_FLOW_ITEM_TYPE_IPV6:
+		mask = &rte_flow_item_ipv6_mask;
+		break;
+	case RTE_FLOW_ITEM_TYPE_ICMP:
+		mask = &rte_flow_item_icmp_mask;
+		break;
+	case RTE_FLOW_ITEM_TYPE_UDP:
+		mask = &rte_flow_item_udp_mask;
+		break;
+	case RTE_FLOW_ITEM_TYPE_TCP:
+		mask = &rte_flow_item_tcp_mask;
+		break;
+	case RTE_FLOW_ITEM_TYPE_SCTP:
+		mask = &rte_flow_item_sctp_mask;
+		break;
+	case RTE_FLOW_ITEM_TYPE_VXLAN:
+		mask = &rte_flow_item_vxlan_mask;
+		break;
+	case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
+		mask = &rte_flow_item_vxlan_gpe_mask;
+		break;
+	case RTE_FLOW_ITEM_TYPE_E_TAG:
+		mask = &rte_flow_item_e_tag_mask;
+		break;
+	case RTE_FLOW_ITEM_TYPE_NVGRE:
+		mask = &rte_flow_item_nvgre_mask;
+		break;
+	case RTE_FLOW_ITEM_TYPE_MPLS:
+		mask = &rte_flow_item_mpls_mask;
+		break;
+	case RTE_FLOW_ITEM_TYPE_GRE:
+		mask = &rte_flow_item_gre_mask;
+		break;
+	case RTE_FLOW_ITEM_TYPE_META:
+		mask = &rte_flow_item_meta_mask;
+		break;
+	case RTE_FLOW_ITEM_TYPE_FUZZY:
+		mask = &rte_flow_item_fuzzy_mask;
+		break;
+	case RTE_FLOW_ITEM_TYPE_GTP:
+		mask = &rte_flow_item_gtp_mask;
+		break;
+	case RTE_FLOW_ITEM_TYPE_ESP:
+		mask = &rte_flow_item_esp_mask;
+		break;
+	default:
+		break;
+	}
+	return mask;
+}
+
+
+
+/** Dispatch parsed buffer to function calls. */
+static void
+cmd_set_raw_parsed(const struct buffer *in)
+{
+	uint32_t n = in->args.vc.pattern_n;
+	int i = 0;
+	struct rte_flow_item *item = NULL;
+	size_t size = 0;
+	uint8_t *data = NULL;
+	uint8_t *data_tail = NULL;
+	size_t *total_size = NULL;
+	uint16_t upper_layer = 0;
+	uint16_t proto = 0;
+
+	RTE_ASSERT(in->command == SET_RAW_ENCAP ||
+		   in->command == SET_RAW_DECAP);
+	if (in->command == SET_RAW_ENCAP) {
+		total_size = &raw_encap_data.size;
+		data = (uint8_t *)&raw_encap_data.data;
+	} else {
+		total_size = &raw_decap_data.size;
+		data = (uint8_t *)&raw_decap_data.data;
+	}
+	*total_size = 0;
+	memset(data, 0x00, ACTION_RAW_ENCAP_MAX_DATA);
+	/* process hdr from upper layer to low layer (L3/L4 -> L2). */
+	data_tail = data + ACTION_RAW_ENCAP_MAX_DATA;
+	for (i = n - 1 ; i >= 0; --i) {
+		item = in->args.vc.pattern + i;
+		if (item->spec == NULL)
+			item->spec = flow_item_default_mask(item);
+		switch (item->type) {
+		case RTE_FLOW_ITEM_TYPE_ETH:
+			size = sizeof(struct rte_flow_item_eth);
+			break;
+		case RTE_FLOW_ITEM_TYPE_VLAN:
+			size = sizeof(struct rte_flow_item_vlan);
+			proto = RTE_ETHER_TYPE_VLAN;
+			break;
+		case RTE_FLOW_ITEM_TYPE_IPV4:
+			size = sizeof(struct rte_flow_item_ipv4);
+			proto = RTE_ETHER_TYPE_IPV4;
+			break;
+		case RTE_FLOW_ITEM_TYPE_IPV6:
+			size = sizeof(struct rte_flow_item_ipv6);
+			proto = RTE_ETHER_TYPE_IPV6;
+			break;
+		case RTE_FLOW_ITEM_TYPE_UDP:
+			size = sizeof(struct rte_flow_item_udp);
+			proto = 0x11;
+			break;
+		case RTE_FLOW_ITEM_TYPE_TCP:
+			size = sizeof(struct rte_flow_item_tcp);
+			proto = 0x06;
+			break;
+		case RTE_FLOW_ITEM_TYPE_VXLAN:
+			size = sizeof(struct rte_flow_item_vxlan);
+			break;
+		case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
+			size = sizeof(struct rte_flow_item_vxlan_gpe);
+			break;
+		case RTE_FLOW_ITEM_TYPE_GRE:
+			size = sizeof(struct rte_flow_item_gre);
+			proto = 0x2F;
+			break;
+		case RTE_FLOW_ITEM_TYPE_MPLS:
+			size = sizeof(struct rte_flow_item_mpls);
+			break;
+		case RTE_FLOW_ITEM_TYPE_NVGRE:
+			size = sizeof(struct rte_flow_item_nvgre);
+			proto = 0x2F;
+			break;
+		default:
+			printf("Error - Not supported item\n");
+			*total_size = 0;
+			memset(data, 0x00, ACTION_RAW_ENCAP_MAX_DATA);
+			return;
+		}
+		*total_size += size;
+		rte_memcpy(data_tail - (*total_size), item->spec, size);
+		/* update some fields which cannot be set by cmdline */
+		update_fields((data_tail - (*total_size)), item,
+			      upper_layer);
+		upper_layer = proto;
+	}
+	if (verbose_level & 0x1)
+		printf("total data size is %zu\n", (*total_size));
+	RTE_ASSERT((*total_size) <= ACTION_RAW_ENCAP_MAX_DATA);
+}
+
+/** Populate help strings for current token (cmdline API). */
+static int
+cmd_set_raw_get_help(cmdline_parse_token_hdr_t *hdr, char *dst,
+		     unsigned int size)
+{
+	struct context *ctx = &cmd_flow_context;
+	const struct token *token = &token_list[ctx->prev];
+
+	(void)hdr;
+	if (!size)
+		return -1;
+	/* Set token type and update global help with details. */
+	snprintf(dst, size, "%s", (token->type ? token->type : "TOKEN"));
+	if (token->help)
+		cmd_set_raw.help_str = token->help;
+	else
+		cmd_set_raw.help_str = token->name;
+	return 0;
+}
+
+/** Token definition template (cmdline API). */
+static struct cmdline_token_hdr cmd_set_raw_token_hdr = {
+	.ops = &(struct cmdline_token_ops){
+		.parse = cmd_flow_parse,
+		.complete_get_nb = cmd_flow_complete_get_nb,
+		.complete_get_elt = cmd_flow_complete_get_elt,
+		.get_help = cmd_set_raw_get_help,
+	},
+	.offset = 0,
+};
+
+/** Populate the next dynamic token. */
+static void
+cmd_set_raw_tok(cmdline_parse_token_hdr_t **hdr,
+	     cmdline_parse_token_hdr_t **hdr_inst)
+{
+	struct context *ctx = &cmd_flow_context;
+
+	/* Always reinitialize context before requesting the first token. */
+	if (!(hdr_inst - cmd_set_raw.tokens)) {
+		cmd_flow_context_init(ctx);
+		ctx->curr = START_SET;
+	}
+	/* Return NULL when no more tokens are expected. */
+	if (!ctx->next_num && (ctx->curr != START_SET)) {
+		*hdr = NULL;
+		return;
+	}
+	/* Determine if command should end here. */
+	if (ctx->eol && ctx->last && ctx->next_num) {
+		const enum index *list = ctx->next[ctx->next_num - 1];
+		int i;
+
+		for (i = 0; list[i]; ++i) {
+			if (list[i] != END)
+				continue;
+			*hdr = NULL;
+			return;
+		}
+	}
+	*hdr = &cmd_set_raw_token_hdr;
+}
+
+/** Token generator and output processing callback (cmdline API). */
+static void
+cmd_set_raw_cb(void *arg0, struct cmdline *cl, void *arg2)
+{
+	if (cl == NULL)
+		cmd_set_raw_tok(arg0, arg2);
+	else
+		cmd_set_raw_parsed(arg0);
+}
+
+/** Global parser instance (cmdline API). */
+cmdline_parse_inst_t cmd_set_raw = {
+	.f = cmd_set_raw_cb,
+	.data = NULL, /**< Unused. */
+	.help_str = NULL, /**< Updated by cmd_flow_get_help(). */
+	.tokens = {
+		NULL,
+	}, /**< Tokens are returned by cmd_flow_tok(). */
+};
diff --git a/app/test-pmd/testpmd.h b/app/test-pmd/testpmd.h
index e3a6f7c717..ba04c3ba53 100644
--- a/app/test-pmd/testpmd.h
+++ b/app/test-pmd/testpmd.h
@@ -11,6 +11,7 @@
 #include <rte_bus_pci.h>
 #include <rte_gro.h>
 #include <rte_gso.h>
+#include <cmdline.h>
 
 #define RTE_PORT_ALL            (~(portid_t)0x0)
 
@@ -263,6 +264,7 @@ extern struct fwd_engine ieee1588_fwd_engine;
 #endif
 
 extern struct fwd_engine * fwd_engines[]; /**< NULL terminated array. */
+extern cmdline_parse_inst_t cmd_set_raw;
 
 extern uint16_t mempool_flags;
 
diff --git a/doc/guides/testpmd_app_ug/testpmd_funcs.rst b/doc/guides/testpmd_app_ug/testpmd_funcs.rst
index cb83a3ce8a..a0c10367ab 100644
--- a/doc/guides/testpmd_app_ug/testpmd_funcs.rst
+++ b/doc/guides/testpmd_app_ug/testpmd_funcs.rst
@@ -1815,6 +1815,30 @@ flow rule using the action mplsoudp_decap will use the last configuration set.
 To have a different decapsulation header, one of those commands must be called
 before the flow rule creation.
 
+Config Raw Encapsulation
+~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Configure the raw data to be used when encapsulating a packet by
+rte_flow_action_raw_encap::
+
+ set raw_encap {item} [/ {item} [...]] / end_set
+
+This command will set an internal buffer inside testpmd, any following flow rule
+using the action raw_encap will use the last configuration set.
+To have a different encapsulation header, this command must be called before the
+flow rule creation.
+
+Config Raw Decapsulation
+~~~~~~~~~~~~~~~~~~~~~~~~
+
+Configure the raw data to be used when decapsulating a packet by
+rte_flow_action_raw_decap::
+
+ set raw_decap {item} [/ {item} [...]] / end_set
+
+This command will set an internal buffer inside testpmd, any following flow rule
+using the action raw_decap will use the last configuration set.
+
 Port Functions
 --------------
 
@@ -4620,6 +4644,30 @@ IPv6 MPLSoUDP with VLAN outer header::
  testpmd> flow create 0 ingress pattern eth / vlan / ipv6 / udp / mpls / end
         actions mplsoudp_decap / l2_encap / end
 
+Sample Raw encapsulation rule
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Raw encapsulation configuration can be set by the following commands
+
+Eecapsulating VxLAN::
+
+ testpmd> set raw_encap eth src is 10:11:22:33:44:55 / vlan tci is 1
+        inner_type is 0x0800 / ipv4 / udp dst is 4789 / vxlan vni
+        is 2 / end_set
+ testpmd> flow create 0 egress pattern eth / ipv4 / end actions
+        raw_encap / end
+
+Sample Raw decapsulation rule
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Raw decapsulation configuration can be set by the following commands
+
+Decapsulating VxLAN::
+
+ testpmd> set raw_decap eth / ipv4 / udp / vxlan / end_set
+ testpmd> flow create 0 ingress pattern eth / ipv4 / udp / vxlan / eth / ipv4 /
+        end actions raw_decap / queue index 0 / end
+
 BPF Functions
 --------------
 
-- 
2.21.0


  reply	other threads:[~2019-06-24 15:37 UTC|newest]

Thread overview: 23+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2019-06-24 15:37 [dpdk-dev] [PATCH 0/2] app/testpmd: support raw encap/decap actions Xiaoyu Min
2019-06-24 15:37 ` Xiaoyu Min [this message]
2019-06-24 15:37 ` [dpdk-dev] [PATCH 2/2] app/testpmd: support MPLS's TC and S bits Xiaoyu Min
2019-07-02 13:53 ` [dpdk-dev] [PATCH v2 0/3] app/testpmd: support raw encap/decap actions Xiaoyu Min
2019-07-02 13:53   ` [dpdk-dev] [PATCH v2 1/3] " Xiaoyu Min
2019-07-08 17:41     ` Ferruh Yigit
2019-07-09  4:25       ` Jack Min
2019-07-02 13:53   ` [dpdk-dev] [PATCH v2 2/3] app/testpmd: support MPLS's TC and S bits Xiaoyu Min
2019-07-02 13:53   ` [dpdk-dev] [PATCH v2 3/3] app/testpmd: add GRE key for raw encap/decap Xiaoyu Min
2019-07-10 10:43 ` [dpdk-dev] [PATCH v3 0/3] app/testpmd: support raw encap/decap actions Xiaoyu Min
2019-07-10 10:43   ` [dpdk-dev] [PATCH v3 1/3] " Xiaoyu Min
2019-07-16 17:41     ` Ferruh Yigit
2019-07-17  1:24       ` Jack Min
2019-07-17  8:20         ` Ferruh Yigit
2019-07-17  9:18           ` Jack Min
2019-07-10 10:43   ` [dpdk-dev] [PATCH v3 2/3] app/testpmd: support MPLS's TC and S bits Xiaoyu Min
2019-07-10 10:43   ` [dpdk-dev] [PATCH v3 3/3] app/testpmd: add GRE key for raw encap/decap Xiaoyu Min
2019-07-17 12:27 ` [dpdk-dev] [PATCH v4 0/4] app/testpmd: support raw encap/decap actions Xiaoyu Min
2019-07-17 12:27   ` [dpdk-dev] [PATCH v4 1/4] app/testpmd: put set vxlan/nvgre help in filters section Xiaoyu Min
2019-07-17 12:27   ` [dpdk-dev] [PATCH v4 2/4] app/testpmd: support raw encap/decap actions Xiaoyu Min
2019-07-17 12:27   ` [dpdk-dev] [PATCH v4 3/4] app/testpmd: support MPLS's TC and S bits Xiaoyu Min
2019-07-17 12:27   ` [dpdk-dev] [PATCH v4 4/4] app/testpmd: add GRE key for raw encap/decap Xiaoyu Min
2019-07-17 16:31   ` [dpdk-dev] [PATCH v4 0/4] app/testpmd: support raw encap/decap actions Ferruh Yigit

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20190624153736.127968-2-jackmin@mellanox.com \
    --to=jackmin@mellanox.com \
    --cc=adrien.mazarguil@6wind.com \
    --cc=bernard.iremonger@intel.com \
    --cc=dev@dpdk.org \
    --cc=jingjing.wu@intel.com \
    --cc=john.mcnamara@intel.com \
    --cc=marko.kovacevic@intel.com \
    --cc=wenzhuo.lu@intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.