All of lore.kernel.org
 help / color / mirror / Atom feed
From: Xiaoyun wang <cloud.wangxiaoyun@huawei.com>
To: <ferruh.yigit@intel.com>
Cc: <dev@dpdk.org>, <xuanziyang2@huawei.com>,
	<shahar.belkar@huawei.com>, <luoxianjun@huawei.com>,
	<tanya.brokhman@huawei.com>, <zhouguoyang@huawei.com>,
	<wulike1@huawei.com>, Xiaoyun wang <cloud.wangxiaoyun@huawei.com>
Subject: [dpdk-dev] [PATCH v2 13/17] net/hinic: support inner L3 checksum offload
Date: Wed, 25 Sep 2019 22:30:41 +0800	[thread overview]
Message-ID: <924687da68bf91dc2dffbcfb39258917f7f0966c.1569421287.git.cloud.wangxiaoyun@huawei.com> (raw)
In-Reply-To: <cover.1569421286.git.cloud.wangxiaoyun@huawei.com>

This patch supports inner L3 checksum offload,
modifies rx checksum offload.

Signed-off-by: Xiaoyun wang <cloud.wangxiaoyun@huawei.com>
---
 drivers/net/hinic/hinic_pmd_ethdev.h |   1 +
 drivers/net/hinic/hinic_pmd_rx.c     |  10 +-
 drivers/net/hinic/hinic_pmd_tx.c     | 190 ++++++++++++++++++++++-------------
 3 files changed, 127 insertions(+), 74 deletions(-)

diff --git a/drivers/net/hinic/hinic_pmd_ethdev.h b/drivers/net/hinic/hinic_pmd_ethdev.h
index dd96667..3e3f3b3 100644
--- a/drivers/net/hinic/hinic_pmd_ethdev.h
+++ b/drivers/net/hinic/hinic_pmd_ethdev.h
@@ -178,6 +178,7 @@ struct hinic_nic_dev {
 	 * vf: the same with associate pf
 	 */
 	u32 default_cos;
+	u32 rx_csum_en;
 
 	struct hinic_filter_info    filter;
 	struct hinic_ntuple_filter_list filter_ntuple_list;
diff --git a/drivers/net/hinic/hinic_pmd_rx.c b/drivers/net/hinic/hinic_pmd_rx.c
index 08e02ae..37b4f5c 100644
--- a/drivers/net/hinic/hinic_pmd_rx.c
+++ b/drivers/net/hinic/hinic_pmd_rx.c
@@ -658,7 +658,6 @@ int hinic_rx_configure(struct rte_eth_dev *dev)
 	struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
 	struct rte_eth_rss_conf rss_conf =
 		dev->data->dev_conf.rx_adv_conf.rss_conf;
-	u32 csum_en = 0;
 	int err;
 
 	if (nic_dev->flags & ETH_MQ_RX_RSS_FLAG) {
@@ -678,9 +677,10 @@ int hinic_rx_configure(struct rte_eth_dev *dev)
 
 	/* Enable both L3/L4 rx checksum offload */
 	if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_CHECKSUM)
-		csum_en = HINIC_RX_CSUM_OFFLOAD_EN;
+		nic_dev->rx_csum_en = HINIC_RX_CSUM_OFFLOAD_EN;
 
-	err = hinic_set_rx_csum_offload(nic_dev->hwdev, csum_en);
+	err = hinic_set_rx_csum_offload(nic_dev->hwdev,
+					HINIC_RX_CSUM_OFFLOAD_EN);
 	if (err)
 		goto rx_csum_ofl_err;
 
@@ -781,6 +781,10 @@ static inline uint64_t hinic_rx_csum(uint32_t status, struct hinic_rxq *rxq)
 {
 	uint32_t checksum_err;
 	uint64_t flags;
+	struct hinic_nic_dev *nic_dev = rxq->nic_dev;
+
+	if (unlikely(!(nic_dev->rx_csum_en & HINIC_RX_CSUM_OFFLOAD_EN)))
+		return PKT_RX_IP_CKSUM_UNKNOWN;
 
 	/* most case checksum is ok */
 	checksum_err = HINIC_GET_RX_CSUM_ERR(status);
diff --git a/drivers/net/hinic/hinic_pmd_tx.c b/drivers/net/hinic/hinic_pmd_tx.c
index 0ef7add..26f481f 100644
--- a/drivers/net/hinic/hinic_pmd_tx.c
+++ b/drivers/net/hinic/hinic_pmd_tx.c
@@ -20,6 +20,9 @@
 #include "hinic_pmd_tx.h"
 
 /* packet header and tx offload info */
+#define ETHER_LEN_NO_VLAN		14
+#define ETHER_LEN_WITH_VLAN		18
+#define HEADER_LEN_OFFSET		2
 #define VXLANLEN			8
 #define MAX_PLD_OFFSET			221
 #define MAX_SINGLE_SGE_SIZE		65536
@@ -34,6 +37,9 @@
 #define HINIC_TSO_PKT_MAX_SGE			127	/* tso max sge 127 */
 #define HINIC_TSO_SEG_NUM_INVALID(num)		((num) > HINIC_TSO_PKT_MAX_SGE)
 
+#define HINIC_TX_OUTER_CHECKSUM_FLAG_SET       1
+#define HINIC_TX_OUTER_CHECKSUM_FLAG_NO_SET    0
+
 /* sizeof(struct hinic_sq_bufdesc) == 16, shift 4 */
 #define HINIC_BUF_DESC_SIZE(nr_descs)	(SIZE_8BYTES(((u32)nr_descs) << 4))
 
@@ -476,16 +482,16 @@ static inline bool hinic_is_tso_sge_valid(struct rte_mbuf *mbuf,
 hinic_set_l4_csum_info(struct hinic_sq_task *task,
 		u32 *queue_info, struct hinic_tx_offload_info *poff_info)
 {
-	u32 tcp_udp_cs, sctp;
+	u32 tcp_udp_cs, sctp = 0;
 	u16 l2hdr_len;
 
-	sctp = 0;
 	if (unlikely(poff_info->inner_l4_type == SCTP_OFFLOAD_ENABLE))
 		sctp = 1;
 
 	tcp_udp_cs = poff_info->inner_l4_tcp_udp;
 
-	if (poff_info->tunnel_type == TUNNEL_UDP_NO_CSUM) {
+	if (poff_info->tunnel_type == TUNNEL_UDP_CSUM ||
+	    poff_info->tunnel_type == TUNNEL_UDP_NO_CSUM) {
 		l2hdr_len =  poff_info->outer_l2_len;
 
 		task->pkt_info2 |=
@@ -665,50 +671,6 @@ static inline void hinic_xmit_mbuf_cleanup(struct hinic_txq *txq)
 	return (struct hinic_sq_wqe *)WQ_WQE_ADDR(wq, cur_pi);
 }
 
-static inline int
-hinic_validate_tx_offload(const struct rte_mbuf *m)
-{
-	uint64_t ol_flags = m->ol_flags;
-	uint64_t inner_l3_offset = m->l2_len;
-
-	/* just support vxlan offload */
-	if ((ol_flags & PKT_TX_TUNNEL_MASK) &&
-	    !(ol_flags & PKT_TX_TUNNEL_VXLAN))
-		return -ENOTSUP;
-
-	if (ol_flags & PKT_TX_OUTER_IP_CKSUM)
-		inner_l3_offset += m->outer_l2_len + m->outer_l3_len;
-
-	/* Headers are fragmented */
-	if (rte_pktmbuf_data_len(m) < inner_l3_offset + m->l3_len + m->l4_len)
-		return -ENOTSUP;
-
-	/* IP checksum can be counted only for IPv4 packet */
-	if ((ol_flags & PKT_TX_IP_CKSUM) && (ol_flags & PKT_TX_IPV6))
-		return -EINVAL;
-
-	/* IP type not set when required */
-	if (ol_flags & (PKT_TX_L4_MASK | PKT_TX_TCP_SEG)) {
-		if (!(ol_flags & (PKT_TX_IPV4 | PKT_TX_IPV6)))
-			return -EINVAL;
-	}
-
-	/* Check requirements for TSO packet */
-	if (ol_flags & PKT_TX_TCP_SEG) {
-		if (m->tso_segsz == 0 ||
-			((ol_flags & PKT_TX_IPV4) &&
-			!(ol_flags & PKT_TX_IP_CKSUM)))
-			return -EINVAL;
-	}
-
-	/* PKT_TX_OUTER_IP_CKSUM set for non outer IPv4 packet. */
-	if ((ol_flags & PKT_TX_OUTER_IP_CKSUM) &&
-		!(ol_flags & PKT_TX_OUTER_IPV4))
-		return -EINVAL;
-
-	return 0;
-}
-
 static inline uint16_t
 hinic_ipv4_phdr_cksum(const struct rte_ipv4_hdr *ipv4_hdr, uint64_t ol_flags)
 {
@@ -760,6 +722,65 @@ static inline void hinic_xmit_mbuf_cleanup(struct hinic_txq *txq)
 	return __rte_raw_cksum_reduce(sum);
 }
 
+static inline void
+hinic_get_pld_offset(struct rte_mbuf *m, struct hinic_tx_offload_info *off_info,
+		     int outer_cs_flag)
+{
+	uint64_t ol_flags = m->ol_flags;
+
+	if (outer_cs_flag == 1) {
+		if ((ol_flags & PKT_TX_UDP_CKSUM) == PKT_TX_UDP_CKSUM) {
+			off_info->payload_offset = m->outer_l2_len +
+				m->outer_l3_len + m->l2_len + m->l3_len;
+		} else if ((ol_flags & PKT_TX_TCP_CKSUM) ||
+				(ol_flags & PKT_TX_TCP_SEG)) {
+			off_info->payload_offset = m->outer_l2_len +
+					m->outer_l3_len + m->l2_len +
+					m->l3_len + m->l4_len;
+		}
+	} else {
+		if ((ol_flags & PKT_TX_UDP_CKSUM) == PKT_TX_UDP_CKSUM) {
+			off_info->payload_offset = m->l2_len + m->l3_len;
+		} else if ((ol_flags & PKT_TX_TCP_CKSUM) ||
+			(ol_flags & PKT_TX_TCP_SEG)) {
+			off_info->payload_offset = m->l2_len + m->l3_len +
+						   m->l4_len;
+		}
+	}
+}
+
+static inline void
+hinic_analyze_tx_info(struct rte_mbuf *mbuf,
+		      struct hinic_tx_offload_info *off_info)
+{
+	struct rte_ether_hdr *eth_hdr;
+	struct rte_vlan_hdr *vlan_hdr;
+	struct rte_ipv4_hdr *ip4h;
+	u16 pkt_type;
+	u8 *hdr;
+
+	hdr = (u8 *)rte_pktmbuf_mtod(mbuf, u8*);
+	eth_hdr = (struct rte_ether_hdr *)hdr;
+	pkt_type = rte_be_to_cpu_16(eth_hdr->ether_type);
+
+	if (pkt_type == RTE_ETHER_TYPE_VLAN) {
+		off_info->outer_l2_len = ETHER_LEN_WITH_VLAN;
+		vlan_hdr = (struct rte_vlan_hdr *)(hdr + 1);
+		pkt_type = rte_be_to_cpu_16(vlan_hdr->eth_proto);
+	} else {
+		off_info->outer_l2_len = ETHER_LEN_NO_VLAN;
+	}
+
+	if (pkt_type == RTE_ETHER_TYPE_IPV4) {
+		ip4h = (struct rte_ipv4_hdr *)(hdr + off_info->outer_l2_len);
+		off_info->outer_l3_len = (ip4h->version_ihl & 0xf) <<
+					HEADER_LEN_OFFSET;
+	} else if (pkt_type == RTE_ETHER_TYPE_IPV6) {
+		/* not support ipv6 extension header */
+		off_info->outer_l3_len = sizeof(struct rte_ipv6_hdr);
+	}
+}
+
 static inline int
 hinic_tx_offload_pkt_prepare(struct rte_mbuf *m,
 				struct hinic_tx_offload_info *off_info)
@@ -771,42 +792,66 @@ static inline void hinic_xmit_mbuf_cleanup(struct hinic_txq *txq)
 	struct rte_ether_hdr *eth_hdr;
 	struct rte_vlan_hdr *vlan_hdr;
 	u16 eth_type = 0;
-	uint64_t inner_l3_offset = m->l2_len;
+	uint64_t inner_l3_offset;
 	uint64_t ol_flags = m->ol_flags;
 
-	/* Does packet set any of available offloads */
+	/* Check if the packets set available offload flags */
 	if (!(ol_flags & HINIC_TX_CKSUM_OFFLOAD_MASK))
 		return 0;
 
-	if (unlikely(hinic_validate_tx_offload(m)))
+	/* Support only vxlan offload */
+	if ((ol_flags & PKT_TX_TUNNEL_MASK) &&
+	    !(ol_flags & PKT_TX_TUNNEL_VXLAN))
+		return -ENOTSUP;
+
+#ifdef RTE_LIBRTE_ETHDEV_DEBUG
+	if (rte_validate_tx_offload(m) != 0)
 		return -EINVAL;
+#endif
 
-	if ((ol_flags & PKT_TX_OUTER_IP_CKSUM) ||
-			(ol_flags & PKT_TX_OUTER_IPV6) ||
-			(ol_flags & PKT_TX_TUNNEL_VXLAN)) {
-		inner_l3_offset += m->outer_l2_len + m->outer_l3_len;
-		off_info->outer_l2_len = m->outer_l2_len;
-		off_info->outer_l3_len = m->outer_l3_len;
-		/* just support vxlan tunneling pkt */
-		off_info->inner_l2_len = m->l2_len - VXLANLEN -
-						sizeof(struct rte_udp_hdr);
-		off_info->inner_l3_len = m->l3_len;
-		off_info->inner_l4_len = m->l4_len;
-		off_info->tunnel_length = m->l2_len;
-		off_info->payload_offset = m->outer_l2_len +
-				m->outer_l3_len + m->l2_len + m->l3_len;
-		off_info->tunnel_type = TUNNEL_UDP_NO_CSUM;
+	if (ol_flags & PKT_TX_TUNNEL_VXLAN) {
+		if ((ol_flags & PKT_TX_OUTER_IP_CKSUM) ||
+		    (ol_flags & PKT_TX_OUTER_IPV6)) {
+			inner_l3_offset = m->l2_len + m->outer_l2_len +
+				m->outer_l3_len;
+			off_info->outer_l2_len = m->outer_l2_len;
+			off_info->outer_l3_len = m->outer_l3_len;
+			/* just support vxlan tunneling pkt */
+			off_info->inner_l2_len = m->l2_len - VXLANLEN -
+				sizeof(*udp_hdr);
+			off_info->inner_l3_len = m->l3_len;
+			off_info->inner_l4_len = m->l4_len;
+			off_info->tunnel_length = m->l2_len;
+			off_info->tunnel_type = TUNNEL_UDP_NO_CSUM;
+
+			hinic_get_pld_offset(m, off_info,
+					     HINIC_TX_OUTER_CHECKSUM_FLAG_SET);
+		} else {
+			inner_l3_offset = m->l2_len;
+			hinic_analyze_tx_info(m, off_info);
+			/* just support vxlan tunneling pkt */
+			off_info->inner_l2_len = m->l2_len - VXLANLEN -
+				sizeof(*udp_hdr) - off_info->outer_l2_len -
+				off_info->outer_l3_len;
+			off_info->inner_l3_len = m->l3_len;
+			off_info->inner_l4_len = m->l4_len;
+			off_info->tunnel_length = m->l2_len -
+				off_info->outer_l2_len - off_info->outer_l3_len;
+			off_info->tunnel_type = TUNNEL_UDP_NO_CSUM;
+
+			hinic_get_pld_offset(m, off_info,
+				HINIC_TX_OUTER_CHECKSUM_FLAG_NO_SET);
+		}
 	} else {
+		inner_l3_offset = m->l2_len;
 		off_info->inner_l2_len = m->l2_len;
 		off_info->inner_l3_len = m->l3_len;
 		off_info->inner_l4_len = m->l4_len;
 		off_info->tunnel_type = NOT_TUNNEL;
-		off_info->payload_offset = m->l2_len + m->l3_len;
-	}
 
-	if (((ol_flags & PKT_TX_L4_MASK) != PKT_TX_SCTP_CKSUM) &&
-	    ((ol_flags & PKT_TX_L4_MASK) != PKT_TX_UDP_CKSUM))
-		off_info->payload_offset += m->l4_len;
+		hinic_get_pld_offset(m, off_info,
+				     HINIC_TX_OUTER_CHECKSUM_FLAG_NO_SET);
+	}
 
 	/* invalid udp or tcp header */
 	if (unlikely(off_info->payload_offset > MAX_PLD_OFFSET))
@@ -855,6 +900,10 @@ static inline void hinic_xmit_mbuf_cleanup(struct hinic_txq *txq)
 			udp_hdr->dgram_cksum =
 				hinic_ipv6_phdr_cksum(ipv6_hdr, ol_flags);
 		}
+	} else if (ol_flags & PKT_TX_OUTER_IPV4) {
+		off_info->tunnel_type = TUNNEL_UDP_NO_CSUM;
+		off_info->inner_l4_tcp_udp = 1;
+		off_info->outer_l3_type = IPV4_PKT_NO_CHKSUM_OFFLOAD;
 	}
 
 	if (ol_flags & PKT_TX_IPV4)
@@ -892,7 +941,6 @@ static inline void hinic_xmit_mbuf_cleanup(struct hinic_txq *txq)
 
 		off_info->inner_l4_type = UDP_OFFLOAD_ENABLE;
 		off_info->inner_l4_tcp_udp = 1;
-		off_info->inner_l4_len = sizeof(struct rte_udp_hdr);
 	} else if (((ol_flags & PKT_TX_L4_MASK) == PKT_TX_TCP_CKSUM) ||
 			(ol_flags & PKT_TX_TCP_SEG)) {
 		if (ol_flags & PKT_TX_IPV4) {
-- 
1.8.3.1


  parent reply	other threads:[~2019-09-25 14:17 UTC|newest]

Thread overview: 32+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2019-09-25 14:26 [dpdk-dev] [PATCH v2 00/17] Add advanced features for Huawei hinic pmd Xiaoyun wang
2019-09-25 14:30 ` Xiaoyun wang
2019-09-25 14:30 ` [dpdk-dev] [PATCH v2 01/17] net/hinic/base: add mbox command channel for SRIOV Xiaoyun wang
2019-09-25 14:30 ` [dpdk-dev] [PATCH v2 02/17] net/hinic/base: add HW interfaces " Xiaoyun wang
2019-09-25 14:30 ` [dpdk-dev] [PATCH v2 03/17] net/hinic: add VF PMD operation interfaces Xiaoyun wang
2019-09-25 14:30 ` [dpdk-dev] [PATCH v2 04/17] net/hinic: add VLAN filter and offload Xiaoyun wang
2019-09-25 14:30 ` [dpdk-dev] [PATCH v2 05/17] net/hinic: add allmulticast mode and MTU set Xiaoyun wang
2019-09-26 18:47   ` Ferruh Yigit
2019-09-30  8:43     ` Wangxiaoyun (Cloud, Network Chip Application Development Dept)
2019-09-25 14:30 ` [dpdk-dev] [PATCH v2 06/17] net/hinic: add unicast and multicast MAC set Xiaoyun wang
2019-09-25 14:30 ` [dpdk-dev] [PATCH v2 07/17] net/hinic: add fdir config interface Xiaoyun wang
2019-09-26 18:48   ` Ferruh Yigit
2019-09-30 14:08     ` Wangxiaoyun (Cloud, Network Chip Application Development Dept)
2019-09-25 14:30 ` [dpdk-dev] [PATCH v2 08/17] net/hinic: add fdir validate flow operations Xiaoyun wang
2019-09-25 14:30 ` [dpdk-dev] [PATCH v2 09/17] net/hinic: create and destroy ntuple filter Xiaoyun wang
2019-09-25 14:30 ` [dpdk-dev] [PATCH v2 10/17] net/hinic: create and destroy fdir filter Xiaoyun wang
2019-09-25 14:30 ` [dpdk-dev] [PATCH v2 11/17] net/hinic: flush " Xiaoyun wang
2019-09-25 14:30 ` [dpdk-dev] [PATCH v2 12/17] net/hinic: set link down and up Xiaoyun wang
2019-09-26 18:47   ` Ferruh Yigit
2019-09-28  8:59     ` [dpdk-dev] 答复: " Wangxiaoyun (Cloud, Network Chip Application Development Dept)
2019-09-30  8:39     ` [dpdk-dev] " Wangxiaoyun (Cloud, Network Chip Application Development Dept)
2019-09-25 14:30 ` Xiaoyun wang [this message]
2019-09-25 14:30 ` [dpdk-dev] [PATCH v2 14/17] net/hinic: support LRO offload Xiaoyun wang
2019-09-25 14:30 ` [dpdk-dev] [PATCH v2 15/17] net/hinic: add hinic PMD doc files Xiaoyun wang
2019-09-26 18:51   ` Ferruh Yigit
2019-09-30 14:15     ` Wangxiaoyun (Cloud, Network Chip Application Development Dept)
2019-09-25 14:30 ` [dpdk-dev] [PATCH v2 16/17] net/hinic/base: optimize aeq interfaces Xiaoyun wang
2019-09-25 14:30 ` [dpdk-dev] [PATCH v2 17/17] net/hinic: optimize tx&rx performance Xiaoyun wang
2019-09-27  2:08   ` Gavin Hu (Arm Technology China)
2019-09-30 14:41     ` Wangxiaoyun (Cloud, Network Chip Application Development Dept)
2019-09-26 18:51 ` [dpdk-dev] [PATCH v2 00/17] Add advanced features for Huawei hinic pmd Ferruh Yigit
2019-09-30 14:19   ` Wangxiaoyun (Cloud, Network Chip Application Development Dept)

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=924687da68bf91dc2dffbcfb39258917f7f0966c.1569421287.git.cloud.wangxiaoyun@huawei.com \
    --to=cloud.wangxiaoyun@huawei.com \
    --cc=dev@dpdk.org \
    --cc=ferruh.yigit@intel.com \
    --cc=luoxianjun@huawei.com \
    --cc=shahar.belkar@huawei.com \
    --cc=tanya.brokhman@huawei.com \
    --cc=wulike1@huawei.com \
    --cc=xuanziyang2@huawei.com \
    --cc=zhouguoyang@huawei.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.