All of lore.kernel.org
 help / color / mirror / Atom feed
From: Andrew Rybchenko <arybchenko@solarflare.com>
To: <dev@dpdk.org>
Subject: [PATCH 11/14] net/sfc: support VXLAN and NVGRE packet types classification
Date: Sun, 24 Dec 2017 10:46:41 +0000	[thread overview]
Message-ID: <1514112404-13398-12-git-send-email-arybchenko@solarflare.com> (raw)
In-Reply-To: <1514112404-13398-1-git-send-email-arybchenko@solarflare.com>

Signed-off-by: Andrew Rybchenko <arybchenko@solarflare.com>
Reviewed-by: Ivan Malov <ivan.malov@oktetlabs.ru>
Reviewed-by: Andy Moreton <amoreton@solarflare.com>
---
 doc/guides/nics/sfc_efx.rst   |  11 +++++
 drivers/net/sfc/sfc_dp_rx.h   |   4 +-
 drivers/net/sfc/sfc_ef10_rx.c | 102 ++++++++++++++++++++++++++++++++++++------
 drivers/net/sfc/sfc_ethdev.c  |   8 +++-
 drivers/net/sfc/sfc_rx.c      |   6 ++-
 5 files changed, 114 insertions(+), 17 deletions(-)

diff --git a/doc/guides/nics/sfc_efx.rst b/doc/guides/nics/sfc_efx.rst
index bde3cc8..994e111 100644
--- a/doc/guides/nics/sfc_efx.rst
+++ b/doc/guides/nics/sfc_efx.rst
@@ -124,6 +124,17 @@ with full-feature firmware variant running.
 **sfboot** should be used to configure NIC to run full-feature firmware variant.
 See Solarflare Server Adapter User's Guide for details.
 
+SFN8xxx family adapters provide either inner or outer packet classes.
+If adapter firmware advertises support for tunnels then the PMD
+configures the hardware to report inner classes, and outer classes are
+not reported in received packets.
+However, for VXLAN and GENEVE tunnels the PMD does report UDP as the
+outer layer 4 packet type.
+
+SFN8xxx family adapters report GENEVE packets as VXLAN.
+If UDP ports are configured for only one tunnel type then it is safe to
+treat VXLAN packet type indication as the corresponding UDP tunnel type.
+
 
 Flow API support
 ----------------
diff --git a/drivers/net/sfc/sfc_dp_rx.h b/drivers/net/sfc/sfc_dp_rx.h
index 3f6a604..33e06ac 100644
--- a/drivers/net/sfc/sfc_dp_rx.h
+++ b/drivers/net/sfc/sfc_dp_rx.h
@@ -150,7 +150,8 @@ typedef bool (sfc_dp_rx_qrx_ev_t)(struct sfc_dp_rxq *dp_rxq, unsigned int id);
 typedef void (sfc_dp_rx_qpurge_t)(struct sfc_dp_rxq *dp_rxq);
 
 /** Get packet types recognized/classified */
-typedef const uint32_t * (sfc_dp_rx_supported_ptypes_get_t)(void);
+typedef const uint32_t * (sfc_dp_rx_supported_ptypes_get_t)(
+				uint32_t tunnel_encaps);
 
 /** Get number of pending Rx descriptors */
 typedef unsigned int (sfc_dp_rx_qdesc_npending_t)(struct sfc_dp_rxq *dp_rxq);
@@ -166,6 +167,7 @@ struct sfc_dp_rx {
 	unsigned int				features;
 #define SFC_DP_RX_FEAT_SCATTER			0x1
 #define SFC_DP_RX_FEAT_MULTI_PROCESS		0x2
+#define SFC_DP_RX_FEAT_TUNNELS			0x4
 	sfc_dp_rx_qcreate_t			*qcreate;
 	sfc_dp_rx_qdestroy_t			*qdestroy;
 	sfc_dp_rx_qstart_t			*qstart;
diff --git a/drivers/net/sfc/sfc_ef10_rx.c b/drivers/net/sfc/sfc_ef10_rx.c
index 4c76f74..41c2885 100644
--- a/drivers/net/sfc/sfc_ef10_rx.c
+++ b/drivers/net/sfc/sfc_ef10_rx.c
@@ -251,6 +251,7 @@ static void
 sfc_ef10_rx_ev_to_offloads(struct sfc_ef10_rxq *rxq, const efx_qword_t rx_ev,
 			   struct rte_mbuf *m)
 {
+	uint32_t tun_ptype = 0;
 	uint32_t l2_ptype = 0;
 	uint32_t l3_ptype = 0;
 	uint32_t l4_ptype = 0;
@@ -259,15 +260,40 @@ sfc_ef10_rx_ev_to_offloads(struct sfc_ef10_rxq *rxq, const efx_qword_t rx_ev,
 	if (unlikely(EFX_TEST_QWORD_BIT(rx_ev, ESF_DZ_RX_PARSE_INCOMPLETE_LBN)))
 		goto done;
 
+	switch (EFX_QWORD_FIELD(rx_ev, ESF_EZ_RX_ENCAP_HDR)) {
+	default:
+		/* Unexpected encapsulation tag class */
+		SFC_ASSERT(false);
+		/* FALLTHROUGH */
+	case ESE_EZ_ENCAP_HDR_NONE:
+		break;
+	case ESE_EZ_ENCAP_HDR_VXLAN:
+		/*
+		 * It is definitely UDP, but we have no information
+		 * about IPv4 vs IPv6 and VLAN tagging.
+		 */
+		tun_ptype = RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_L4_UDP;
+		break;
+	case ESE_EZ_ENCAP_HDR_GRE:
+		/*
+		 * We have no information about IPv4 vs IPv6 and VLAN tagging.
+		 */
+		tun_ptype = RTE_PTYPE_TUNNEL_NVGRE;
+		break;
+	}
+
 	switch (EFX_QWORD_FIELD(rx_ev, ESF_DZ_RX_ETH_TAG_CLASS)) {
 	case ESE_DZ_ETH_TAG_CLASS_NONE:
-		l2_ptype = RTE_PTYPE_L2_ETHER;
+		l2_ptype = (tun_ptype == 0) ? RTE_PTYPE_L2_ETHER :
+			RTE_PTYPE_INNER_L2_ETHER;
 		break;
 	case ESE_DZ_ETH_TAG_CLASS_VLAN1:
-		l2_ptype = RTE_PTYPE_L2_ETHER_VLAN;
+		l2_ptype = (tun_ptype == 0) ? RTE_PTYPE_L2_ETHER_VLAN :
+			RTE_PTYPE_INNER_L2_ETHER_VLAN;
 		break;
 	case ESE_DZ_ETH_TAG_CLASS_VLAN2:
-		l2_ptype = RTE_PTYPE_L2_ETHER_QINQ;
+		l2_ptype = (tun_ptype == 0) ? RTE_PTYPE_L2_ETHER_QINQ :
+			RTE_PTYPE_INNER_L2_ETHER_QINQ;
 		break;
 	default:
 		/* Unexpected Eth tag class */
@@ -276,25 +302,31 @@ sfc_ef10_rx_ev_to_offloads(struct sfc_ef10_rxq *rxq, const efx_qword_t rx_ev,
 
 	switch (EFX_QWORD_FIELD(rx_ev, ESF_DZ_RX_L3_CLASS)) {
 	case ESE_DZ_L3_CLASS_IP4_FRAG:
-		l4_ptype = RTE_PTYPE_L4_FRAG;
+		l4_ptype = (tun_ptype == 0) ? RTE_PTYPE_L4_FRAG :
+			RTE_PTYPE_INNER_L4_FRAG;
 		/* FALLTHROUGH */
 	case ESE_DZ_L3_CLASS_IP4:
-		l3_ptype = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN;
+		l3_ptype = (tun_ptype == 0) ? RTE_PTYPE_L3_IPV4_EXT_UNKNOWN :
+			RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN;
 		ol_flags |= PKT_RX_RSS_HASH |
 			((EFX_TEST_QWORD_BIT(rx_ev,
 					     ESF_DZ_RX_IPCKSUM_ERR_LBN)) ?
 			 PKT_RX_IP_CKSUM_BAD : PKT_RX_IP_CKSUM_GOOD);
 		break;
 	case ESE_DZ_L3_CLASS_IP6_FRAG:
-		l4_ptype = RTE_PTYPE_L4_FRAG;
+		l4_ptype = (tun_ptype == 0) ? RTE_PTYPE_L4_FRAG :
+			RTE_PTYPE_INNER_L4_FRAG;
 		/* FALLTHROUGH */
 	case ESE_DZ_L3_CLASS_IP6:
-		l3_ptype = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN;
+		l3_ptype = (tun_ptype == 0) ? RTE_PTYPE_L3_IPV6_EXT_UNKNOWN :
+			RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN;
 		ol_flags |= PKT_RX_RSS_HASH;
 		break;
 	case ESE_DZ_L3_CLASS_ARP:
 		/* Override Layer 2 packet type */
-		l2_ptype = RTE_PTYPE_L2_ETHER_ARP;
+		/* There is no ARP classification for inner packets */
+		if (tun_ptype == 0)
+			l2_ptype = RTE_PTYPE_L2_ETHER_ARP;
 		break;
 	default:
 		/* Unexpected Layer 3 class */
@@ -303,14 +335,16 @@ sfc_ef10_rx_ev_to_offloads(struct sfc_ef10_rxq *rxq, const efx_qword_t rx_ev,
 
 	switch (EFX_QWORD_FIELD(rx_ev, ESF_DZ_RX_L4_CLASS)) {
 	case ESE_DZ_L4_CLASS_TCP:
-		l4_ptype = RTE_PTYPE_L4_TCP;
+		l4_ptype = (tun_ptype == 0) ? RTE_PTYPE_L4_TCP :
+			RTE_PTYPE_INNER_L4_TCP;
 		ol_flags |=
 			(EFX_TEST_QWORD_BIT(rx_ev,
 					    ESF_DZ_RX_TCPUDP_CKSUM_ERR_LBN)) ?
 			PKT_RX_L4_CKSUM_BAD : PKT_RX_L4_CKSUM_GOOD;
 		break;
 	case ESE_DZ_L4_CLASS_UDP:
-		l4_ptype = RTE_PTYPE_L4_UDP;
+		l4_ptype = (tun_ptype == 0) ? RTE_PTYPE_L4_UDP :
+			RTE_PTYPE_INNER_L4_UDP;
 		ol_flags |=
 			(EFX_TEST_QWORD_BIT(rx_ev,
 					    ESF_DZ_RX_TCPUDP_CKSUM_ERR_LBN)) ?
@@ -329,7 +363,7 @@ sfc_ef10_rx_ev_to_offloads(struct sfc_ef10_rxq *rxq, const efx_qword_t rx_ev,
 
 done:
 	m->ol_flags = ol_flags;
-	m->packet_type = l2_ptype | l3_ptype | l4_ptype;
+	m->packet_type = tun_ptype | l2_ptype | l3_ptype | l4_ptype;
 }
 
 static uint16_t
@@ -515,7 +549,7 @@ sfc_ef10_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
 }
 
 static const uint32_t *
-sfc_ef10_supported_ptypes_get(void)
+sfc_ef10_supported_ptypes_get(uint32_t tunnel_encaps)
 {
 	static const uint32_t ef10_native_ptypes[] = {
 		RTE_PTYPE_L2_ETHER,
@@ -529,8 +563,47 @@ sfc_ef10_supported_ptypes_get(void)
 		RTE_PTYPE_L4_UDP,
 		RTE_PTYPE_UNKNOWN
 	};
+	static const uint32_t ef10_overlay_ptypes[] = {
+		RTE_PTYPE_L2_ETHER,
+		RTE_PTYPE_L2_ETHER_ARP,
+		RTE_PTYPE_L2_ETHER_VLAN,
+		RTE_PTYPE_L2_ETHER_QINQ,
+		RTE_PTYPE_L3_IPV4_EXT_UNKNOWN,
+		RTE_PTYPE_L3_IPV6_EXT_UNKNOWN,
+		RTE_PTYPE_L4_FRAG,
+		RTE_PTYPE_L4_TCP,
+		RTE_PTYPE_L4_UDP,
+		RTE_PTYPE_TUNNEL_VXLAN,
+		RTE_PTYPE_TUNNEL_NVGRE,
+		RTE_PTYPE_INNER_L2_ETHER,
+		RTE_PTYPE_INNER_L2_ETHER_VLAN,
+		RTE_PTYPE_INNER_L2_ETHER_QINQ,
+		RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN,
+		RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN,
+		RTE_PTYPE_INNER_L4_FRAG,
+		RTE_PTYPE_INNER_L4_TCP,
+		RTE_PTYPE_INNER_L4_UDP,
+		RTE_PTYPE_UNKNOWN
+	};
 
-	return ef10_native_ptypes;
+	/*
+	 * The function returns static set of supported packet types,
+	 * so we can't build it dynamically based on supported tunnel
+	 * encapsulations and should limit to known sets.
+	 */
+	switch (tunnel_encaps) {
+	case (1u << EFX_TUNNEL_PROTOCOL_VXLAN |
+	      1u << EFX_TUNNEL_PROTOCOL_GENEVE |
+	      1u << EFX_TUNNEL_PROTOCOL_NVGRE):
+		return ef10_overlay_ptypes;
+	default:
+		RTE_LOG(ERR, PMD,
+			"Unexpected set of supported tunnel encapsulations: %#x\n",
+			tunnel_encaps);
+		/* FALLTHROUGH */
+	case 0:
+		return ef10_native_ptypes;
+	}
 }
 
 static sfc_dp_rx_qdesc_npending_t sfc_ef10_rx_qdesc_npending;
@@ -707,7 +780,8 @@ struct sfc_dp_rx sfc_ef10_rx = {
 		.type		= SFC_DP_RX,
 		.hw_fw_caps	= SFC_DP_HW_FW_CAP_EF10,
 	},
-	.features		= SFC_DP_RX_FEAT_MULTI_PROCESS,
+	.features		= SFC_DP_RX_FEAT_MULTI_PROCESS |
+				  SFC_DP_RX_FEAT_TUNNELS,
 	.qcreate		= sfc_ef10_rx_qcreate,
 	.qdestroy		= sfc_ef10_rx_qdestroy,
 	.qstart			= sfc_ef10_rx_qstart,
diff --git a/drivers/net/sfc/sfc_ethdev.c b/drivers/net/sfc/sfc_ethdev.c
index 837fd55..0fea997 100644
--- a/drivers/net/sfc/sfc_ethdev.c
+++ b/drivers/net/sfc/sfc_ethdev.c
@@ -131,6 +131,10 @@ sfc_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 		DEV_RX_OFFLOAD_UDP_CKSUM |
 		DEV_RX_OFFLOAD_TCP_CKSUM;
 
+	if ((encp->enc_tunnel_encapsulations_supported != 0) &&
+	    (sa->dp_rx->features & SFC_DP_RX_FEAT_TUNNELS))
+		dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM;
+
 	dev_info->tx_offload_capa =
 		DEV_TX_OFFLOAD_IPV4_CKSUM |
 		DEV_TX_OFFLOAD_UDP_CKSUM |
@@ -183,8 +187,10 @@ static const uint32_t *
 sfc_dev_supported_ptypes_get(struct rte_eth_dev *dev)
 {
 	struct sfc_adapter *sa = dev->data->dev_private;
+	const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
+	uint32_t tunnel_encaps = encp->enc_tunnel_encapsulations_supported;
 
-	return sa->dp_rx->supported_ptypes_get();
+	return sa->dp_rx->supported_ptypes_get(tunnel_encaps);
 }
 
 static int
diff --git a/drivers/net/sfc/sfc_rx.c b/drivers/net/sfc/sfc_rx.c
index 22bf372..70a72b3 100644
--- a/drivers/net/sfc/sfc_rx.c
+++ b/drivers/net/sfc/sfc_rx.c
@@ -193,7 +193,7 @@ sfc_efx_rx_desc_flags_to_packet_type(const unsigned int desc_flags)
 }
 
 static const uint32_t *
-sfc_efx_supported_ptypes_get(void)
+sfc_efx_supported_ptypes_get(__rte_unused uint32_t tunnel_encaps)
 {
 	static const uint32_t ptypes[] = {
 		RTE_PTYPE_L2_ETHER,
@@ -947,6 +947,10 @@ sfc_rx_qinit(struct sfc_adapter *sa, unsigned int sw_index,
 		sa->eth_dev->data->dev_conf.rxmode.enable_scatter ?
 		EFX_RXQ_FLAG_SCATTER : EFX_RXQ_FLAG_NONE;
 
+	if ((encp->enc_tunnel_encapsulations_supported != 0) &&
+	    (sa->dp_rx->features & SFC_DP_RX_FEAT_TUNNELS))
+		rxq_info->type_flags |= EFX_RXQ_FLAG_INNER_CLASSES;
+
 	rc = sfc_ev_qinit(sa, SFC_EVQ_TYPE_RX, sw_index,
 			  rxq_info->entries, socket_id, &evq);
 	if (rc != 0)
-- 
2.7.4

  parent reply	other threads:[~2017-12-24 10:47 UTC|newest]

Thread overview: 16+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2017-12-24 10:46 [PATCH 00/14] net/sfc: support NVGRE, VXLAN and GENEVE tunnels Andrew Rybchenko
2017-12-24 10:46 ` [PATCH 01/14] net/sfc: fix label name to be consistent Andrew Rybchenko
2017-12-24 10:46 ` [PATCH 02/14] net/sfc: do not hold management event queue lock while MCDI Andrew Rybchenko
2017-12-24 10:46 ` [PATCH 03/14] net/sfc: handle MC reboot event Andrew Rybchenko
2017-12-24 10:46 ` [PATCH 04/14] net/sfc: retry port start to handle MC reboot in the middle Andrew Rybchenko
2017-12-24 10:46 ` [PATCH 05/14] net/sfc/base: control RxQ scatter using flag instead of type Andrew Rybchenko
2017-12-24 10:46 ` [PATCH 06/14] net/sfc/base: add function to create packed stream RxQ Andrew Rybchenko
2017-12-24 10:46 ` [PATCH 07/14] net/sfc/base: allow to request inner classes for Rx packets Andrew Rybchenko
2017-12-24 10:46 ` [PATCH 08/14] net/sfc/base: add API to control UDP tunnel ports Andrew Rybchenko
2017-12-24 10:46 ` [PATCH 09/14] net/sfc: support UDP tunnel ports configuration Andrew Rybchenko
2017-12-24 10:46 ` [PATCH 10/14] net/sfc: fix incorrect bitwise ORing of L3/L4 packet types Andrew Rybchenko
2017-12-24 10:46 ` Andrew Rybchenko [this message]
2017-12-24 10:46 ` [PATCH 12/14] net/sfc: correct Rx checksum offloads for tunnel packets Andrew Rybchenko
2017-12-24 10:46 ` [PATCH 13/14] net/sfc: support inner checksum offload on transmit Andrew Rybchenko
2017-12-24 10:46 ` [PATCH 14/14] doc: add net/sfc tunnels support to release features Andrew Rybchenko
2018-01-09 17:31 ` [PATCH 00/14] net/sfc: support NVGRE, VXLAN and GENEVE tunnels Ferruh Yigit

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1514112404-13398-12-git-send-email-arybchenko@solarflare.com \
    --to=arybchenko@solarflare.com \
    --cc=dev@dpdk.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.