All of lore.kernel.org
 help / color / mirror / Atom feed
From: Jiawen Wu <jiawenwu@trustnetic.com>
To: dev@dpdk.org
Cc: Jiawen Wu <jiawenwu@trustnetic.com>
Subject: [dpdk-dev] [PATCH v2 14/37] net/txgbe: configure FDIR filter
Date: Wed, 11 Nov 2020 14:49:13 +0800	[thread overview]
Message-ID: <20201111064936.768604-15-jiawenwu@trustnetic.com> (raw)
In-Reply-To: <20201111064936.768604-1-jiawenwu@trustnetic.com>

Configure flow director filter with it enabled.

Signed-off-by: Jiawen Wu <jiawenwu@trustnetic.com>
---
 drivers/net/txgbe/base/txgbe_type.h |   6 +
 drivers/net/txgbe/meson.build       |   1 +
 drivers/net/txgbe/txgbe_ethdev.c    |   6 +
 drivers/net/txgbe/txgbe_ethdev.h    |   6 +
 drivers/net/txgbe/txgbe_fdir.c      | 407 ++++++++++++++++++++++++++++
 5 files changed, 426 insertions(+)
 create mode 100644 drivers/net/txgbe/txgbe_fdir.c

diff --git a/drivers/net/txgbe/base/txgbe_type.h b/drivers/net/txgbe/base/txgbe_type.h
index b9d31ab83..633692cd7 100644
--- a/drivers/net/txgbe/base/txgbe_type.h
+++ b/drivers/net/txgbe/base/txgbe_type.h
@@ -21,6 +21,8 @@
 #define TXGBE_MAX_QP		(128)
 #define TXGBE_MAX_UTA		128
 
+#define TXGBE_FDIR_INIT_DONE_POLL		10
+
 #define TXGBE_ALIGN		128 /* as intel did */
 
 #include "txgbe_status.h"
@@ -65,6 +67,10 @@ enum {
 #define TXGBE_PHYSICAL_LAYER_10BASE_T		0x08000
 #define TXGBE_PHYSICAL_LAYER_2500BASE_KX	0x10000
 
+/* Software ATR hash keys */
+#define TXGBE_ATR_BUCKET_HASH_KEY		0x3DAD14E2
+#define TXGBE_ATR_SIGNATURE_HASH_KEY		0x174D3614
+
 #define TXGBE_ATR_HASH_MASK			0x7fff
 
 /* Flow Director ATR input struct. */
diff --git a/drivers/net/txgbe/meson.build b/drivers/net/txgbe/meson.build
index 45379175d..bb1683631 100644
--- a/drivers/net/txgbe/meson.build
+++ b/drivers/net/txgbe/meson.build
@@ -6,6 +6,7 @@ objs = [base_objs]
 
 sources = files(
 	'txgbe_ethdev.c',
+	'txgbe_fdir.c',
 	'txgbe_flow.c',
 	'txgbe_ptypes.c',
 	'txgbe_pf.c',
diff --git a/drivers/net/txgbe/txgbe_ethdev.c b/drivers/net/txgbe/txgbe_ethdev.c
index 589306f0a..75a170764 100644
--- a/drivers/net/txgbe/txgbe_ethdev.c
+++ b/drivers/net/txgbe/txgbe_ethdev.c
@@ -1633,6 +1633,12 @@ txgbe_dev_start(struct rte_eth_dev *dev)
 	txgbe_configure_port(dev);
 	txgbe_configure_dcb(dev);
 
+	if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_NONE) {
+		err = txgbe_fdir_configure(dev);
+		if (err)
+			goto error;
+	}
+
 	/* Restore vf rate limit */
 	if (vfinfo != NULL) {
 		for (vf = 0; vf < pci_dev->max_vfs; vf++)
diff --git a/drivers/net/txgbe/txgbe_ethdev.h b/drivers/net/txgbe/txgbe_ethdev.h
index f33ca1d32..cd24efeea 100644
--- a/drivers/net/txgbe/txgbe_ethdev.h
+++ b/drivers/net/txgbe/txgbe_ethdev.h
@@ -13,6 +13,7 @@
 #include <rte_time.h>
 #include <rte_hash.h>
 #include <rte_hash_crc.h>
+#include <rte_ethdev.h>
 
 /* need update link, bit flag */
 #define TXGBE_FLAG_NEED_LINK_UPDATE (uint32_t)(1 << 0)
@@ -430,6 +431,11 @@ txgbe_dev_l2_tunnel_filter_del(struct rte_eth_dev *dev,
 void txgbe_set_ivar_map(struct txgbe_hw *hw, int8_t direction,
 			       uint8_t queue, uint8_t msix_vector);
 
+/*
+ * Flow director function prototypes
+ */
+int txgbe_fdir_configure(struct rte_eth_dev *dev);
+int txgbe_fdir_set_input_mask(struct rte_eth_dev *dev);
 void txgbe_configure_pb(struct rte_eth_dev *dev);
 void txgbe_configure_port(struct rte_eth_dev *dev);
 void txgbe_configure_dcb(struct rte_eth_dev *dev);
diff --git a/drivers/net/txgbe/txgbe_fdir.c b/drivers/net/txgbe/txgbe_fdir.c
new file mode 100644
index 000000000..df6125d4a
--- /dev/null
+++ b/drivers/net/txgbe/txgbe_fdir.c
@@ -0,0 +1,407 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2015-2020
+ */
+
+#include <stdio.h>
+#include <stdint.h>
+#include <stdarg.h>
+#include <errno.h>
+#include <sys/queue.h>
+
+
+#include "txgbe_logs.h"
+#include "base/txgbe.h"
+#include "txgbe_ethdev.h"
+
+#define TXGBE_DEFAULT_FLEXBYTES_OFFSET  12 /*default flexbytes offset in bytes*/
+#define TXGBE_MAX_FLX_SOURCE_OFF        62
+
+#define IPV6_ADDR_TO_MASK(ipaddr, ipv6m) do { \
+	uint8_t ipv6_addr[16]; \
+	uint8_t i; \
+	rte_memcpy(ipv6_addr, (ipaddr), sizeof(ipv6_addr));\
+	(ipv6m) = 0; \
+	for (i = 0; i < sizeof(ipv6_addr); i++) { \
+		if (ipv6_addr[i] == UINT8_MAX) \
+			(ipv6m) |= 1 << i; \
+		else if (ipv6_addr[i] != 0) { \
+			PMD_DRV_LOG(ERR, " invalid IPv6 address mask."); \
+			return -EINVAL; \
+		} \
+	} \
+} while (0)
+
+#define IPV6_MASK_TO_ADDR(ipv6m, ipaddr) do { \
+	uint8_t ipv6_addr[16]; \
+	uint8_t i; \
+	for (i = 0; i < sizeof(ipv6_addr); i++) { \
+		if ((ipv6m) & (1 << i)) \
+			ipv6_addr[i] = UINT8_MAX; \
+		else \
+			ipv6_addr[i] = 0; \
+	} \
+	rte_memcpy((ipaddr), ipv6_addr, sizeof(ipv6_addr));\
+} while (0)
+
+/**
+ *  Initialize Flow Director control registers
+ *  @hw: pointer to hardware structure
+ *  @fdirctrl: value to write to flow director control register
+ **/
+static int
+txgbe_fdir_enable(struct txgbe_hw *hw, uint32_t fdirctrl)
+{
+	int i;
+
+	PMD_INIT_FUNC_TRACE();
+
+	/* Prime the keys for hashing */
+	wr32(hw, TXGBE_FDIRBKTHKEY, TXGBE_ATR_BUCKET_HASH_KEY);
+	wr32(hw, TXGBE_FDIRSIGHKEY, TXGBE_ATR_SIGNATURE_HASH_KEY);
+
+	/*
+	 * Continue setup of fdirctrl register bits:
+	 *  Set the maximum length per hash bucket to 0xA filters
+	 *  Send interrupt when 64 filters are left
+	 */
+	fdirctrl |= TXGBE_FDIRCTL_MAXLEN(0xA) |
+		    TXGBE_FDIRCTL_FULLTHR(4);
+
+	/*
+	 * Poll init-done after we write the register.  Estimated times:
+	 *      10G: PBALLOC = 11b, timing is 60us
+	 *       1G: PBALLOC = 11b, timing is 600us
+	 *     100M: PBALLOC = 11b, timing is 6ms
+	 *
+	 *     Multiple these timings by 4 if under full Rx load
+	 *
+	 * So we'll poll for TXGBE_FDIR_INIT_DONE_POLL times, sleeping for
+	 * 1 msec per poll time.  If we're at line rate and drop to 100M, then
+	 * this might not finish in our poll time, but we can live with that
+	 * for now.
+	 */
+	wr32(hw, TXGBE_FDIRCTL, fdirctrl);
+	txgbe_flush(hw);
+	for (i = 0; i < TXGBE_FDIR_INIT_DONE_POLL; i++) {
+		if (rd32(hw, TXGBE_FDIRCTL) & TXGBE_FDIRCTL_INITDONE)
+			break;
+		msec_delay(1);
+	}
+
+	if (i >= TXGBE_FDIR_INIT_DONE_POLL) {
+		PMD_INIT_LOG(ERR, "Flow Director poll time exceeded during enabling!");
+		return -ETIMEDOUT;
+	}
+	return 0;
+}
+
+/*
+ * Set appropriate bits in fdirctrl for: variable reporting levels, moving
+ * flexbytes matching field, and drop queue (only for perfect matching mode).
+ */
+static inline int
+configure_fdir_flags(const struct rte_fdir_conf *conf,
+		     uint32_t *fdirctrl, uint32_t *flex)
+{
+	*fdirctrl = 0;
+	*flex = 0;
+
+	switch (conf->pballoc) {
+	case RTE_FDIR_PBALLOC_64K:
+		/* 8k - 1 signature filters */
+		*fdirctrl |= TXGBE_FDIRCTL_BUF_64K;
+		break;
+	case RTE_FDIR_PBALLOC_128K:
+		/* 16k - 1 signature filters */
+		*fdirctrl |= TXGBE_FDIRCTL_BUF_128K;
+		break;
+	case RTE_FDIR_PBALLOC_256K:
+		/* 32k - 1 signature filters */
+		*fdirctrl |= TXGBE_FDIRCTL_BUF_256K;
+		break;
+	default:
+		/* bad value */
+		PMD_INIT_LOG(ERR, "Invalid fdir_conf->pballoc value");
+		return -EINVAL;
+	};
+
+	/* status flags: write hash & swindex in the rx descriptor */
+	switch (conf->status) {
+	case RTE_FDIR_NO_REPORT_STATUS:
+		/* do nothing, default mode */
+		break;
+	case RTE_FDIR_REPORT_STATUS:
+		/* report status when the packet matches a fdir rule */
+		*fdirctrl |= TXGBE_FDIRCTL_REPORT_MATCH;
+		break;
+	case RTE_FDIR_REPORT_STATUS_ALWAYS:
+		/* always report status */
+		*fdirctrl |= TXGBE_FDIRCTL_REPORT_ALWAYS;
+		break;
+	default:
+		/* bad value */
+		PMD_INIT_LOG(ERR, "Invalid fdir_conf->status value");
+		return -EINVAL;
+	};
+
+	*flex |= TXGBE_FDIRFLEXCFG_BASE_MAC;
+	*flex |= TXGBE_FDIRFLEXCFG_OFST(TXGBE_DEFAULT_FLEXBYTES_OFFSET / 2);
+
+	switch (conf->mode) {
+	case RTE_FDIR_MODE_SIGNATURE:
+		break;
+	case RTE_FDIR_MODE_PERFECT:
+		*fdirctrl |= TXGBE_FDIRCTL_PERFECT;
+		*fdirctrl |= TXGBE_FDIRCTL_DROPQP(conf->drop_queue);
+		break;
+	default:
+		/* bad value */
+		PMD_INIT_LOG(ERR, "Invalid fdir_conf->mode value");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static inline uint32_t
+reverse_fdir_bmks(uint16_t hi_dword, uint16_t lo_dword)
+{
+	uint32_t mask = hi_dword << 16;
+
+	mask |= lo_dword;
+	mask = ((mask & 0x55555555) << 1) | ((mask & 0xAAAAAAAA) >> 1);
+	mask = ((mask & 0x33333333) << 2) | ((mask & 0xCCCCCCCC) >> 2);
+	mask = ((mask & 0x0F0F0F0F) << 4) | ((mask & 0xF0F0F0F0) >> 4);
+	return ((mask & 0x00FF00FF) << 8) | ((mask & 0xFF00FF00) >> 8);
+}
+
+int
+txgbe_fdir_set_input_mask(struct rte_eth_dev *dev)
+{
+	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+	struct txgbe_hw_fdir_info *info = TXGBE_DEV_FDIR(dev);
+	enum rte_fdir_mode mode = dev->data->dev_conf.fdir_conf.mode;
+	/*
+	 * mask VM pool and DIPv6 since there are currently not supported
+	 * mask FLEX byte, it will be set in flex_conf
+	 */
+	uint32_t fdirm = TXGBE_FDIRMSK_POOL;
+	uint32_t fdirtcpm;  /* TCP source and destination port masks. */
+	uint32_t fdiripv6m; /* IPv6 source and destination masks. */
+
+	PMD_INIT_FUNC_TRACE();
+
+	if (mode != RTE_FDIR_MODE_SIGNATURE &&
+	    mode != RTE_FDIR_MODE_PERFECT) {
+		PMD_DRV_LOG(ERR, "Not supported fdir mode - %d!", mode);
+		return -ENOTSUP;
+	}
+
+	/*
+	 * Program the relevant mask registers.  If src/dst_port or src/dst_addr
+	 * are zero, then assume a full mask for that field. Also assume that
+	 * a VLAN of 0 is unspecified, so mask that out as well.  L4type
+	 * cannot be masked out in this implementation.
+	 */
+	if (info->mask.dst_port_mask == 0 && info->mask.src_port_mask == 0) {
+		/* use the L4 protocol mask for raw IPv4/IPv6 traffic */
+		fdirm |= TXGBE_FDIRMSK_L4P;
+	}
+
+	/* TBD: don't support encapsulation yet */
+	wr32(hw, TXGBE_FDIRMSK, fdirm);
+
+	/* store the TCP/UDP port masks, bit reversed from port layout */
+	fdirtcpm = reverse_fdir_bmks(rte_be_to_cpu_16(info->mask.dst_port_mask),
+			rte_be_to_cpu_16(info->mask.src_port_mask));
+
+	/* write all the same so that UDP, TCP and SCTP use the same mask
+	 * (little-endian)
+	 */
+	wr32(hw, TXGBE_FDIRTCPMSK, ~fdirtcpm);
+	wr32(hw, TXGBE_FDIRUDPMSK, ~fdirtcpm);
+	wr32(hw, TXGBE_FDIRSCTPMSK, ~fdirtcpm);
+
+	/* Store source and destination IPv4 masks (big-endian) */
+	wr32(hw, TXGBE_FDIRSIP4MSK, ~info->mask.src_ipv4_mask);
+	wr32(hw, TXGBE_FDIRDIP4MSK, ~info->mask.dst_ipv4_mask);
+
+	if (mode == RTE_FDIR_MODE_SIGNATURE) {
+		/*
+		 * Store source and destination IPv6 masks (bit reversed)
+		 */
+		fdiripv6m = TXGBE_FDIRIP6MSK_DST(info->mask.dst_ipv6_mask) |
+			    TXGBE_FDIRIP6MSK_SRC(info->mask.src_ipv6_mask);
+
+		wr32(hw, TXGBE_FDIRIP6MSK, ~fdiripv6m);
+	}
+
+	return 0;
+}
+
+static int
+txgbe_fdir_store_input_mask(struct rte_eth_dev *dev)
+{
+	struct rte_eth_fdir_masks *input_mask =
+				&dev->data->dev_conf.fdir_conf.mask;
+	enum rte_fdir_mode mode = dev->data->dev_conf.fdir_conf.mode;
+	struct txgbe_hw_fdir_info *info = TXGBE_DEV_FDIR(dev);
+	uint16_t dst_ipv6m = 0;
+	uint16_t src_ipv6m = 0;
+
+	if (mode != RTE_FDIR_MODE_SIGNATURE &&
+	    mode != RTE_FDIR_MODE_PERFECT) {
+		PMD_DRV_LOG(ERR, "Not supported fdir mode - %d!", mode);
+		return -ENOTSUP;
+	}
+
+	memset(&info->mask, 0, sizeof(struct txgbe_hw_fdir_mask));
+	info->mask.vlan_tci_mask = input_mask->vlan_tci_mask;
+	info->mask.src_port_mask = input_mask->src_port_mask;
+	info->mask.dst_port_mask = input_mask->dst_port_mask;
+	info->mask.src_ipv4_mask = input_mask->ipv4_mask.src_ip;
+	info->mask.dst_ipv4_mask = input_mask->ipv4_mask.dst_ip;
+	IPV6_ADDR_TO_MASK(input_mask->ipv6_mask.src_ip, src_ipv6m);
+	IPV6_ADDR_TO_MASK(input_mask->ipv6_mask.dst_ip, dst_ipv6m);
+	info->mask.src_ipv6_mask = src_ipv6m;
+	info->mask.dst_ipv6_mask = dst_ipv6m;
+
+	return 0;
+}
+
+/*
+ * txgbe_check_fdir_flex_conf -check if the flex payload and mask configuration
+ * arguments are valid
+ */
+static int
+txgbe_set_fdir_flex_conf(struct rte_eth_dev *dev, uint32_t flex)
+{
+	const struct rte_eth_fdir_flex_conf *conf =
+				&dev->data->dev_conf.fdir_conf.flex_conf;
+	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+	struct txgbe_hw_fdir_info *info = TXGBE_DEV_FDIR(dev);
+	const struct rte_eth_flex_payload_cfg *flex_cfg;
+	const struct rte_eth_fdir_flex_mask *flex_mask;
+	uint16_t flexbytes = 0;
+	uint16_t i;
+
+	if (conf == NULL) {
+		PMD_DRV_LOG(ERR, "NULL pointer.");
+		return -EINVAL;
+	}
+
+	flex |= TXGBE_FDIRFLEXCFG_DIA;
+
+	for (i = 0; i < conf->nb_payloads; i++) {
+		flex_cfg = &conf->flex_set[i];
+		if (flex_cfg->type != RTE_ETH_RAW_PAYLOAD) {
+			PMD_DRV_LOG(ERR, "unsupported payload type.");
+			return -EINVAL;
+		}
+		if (((flex_cfg->src_offset[0] & 0x1) == 0) &&
+		    (flex_cfg->src_offset[1] == flex_cfg->src_offset[0] + 1) &&
+		     flex_cfg->src_offset[0] <= TXGBE_MAX_FLX_SOURCE_OFF) {
+			flex &= ~TXGBE_FDIRFLEXCFG_OFST_MASK;
+			flex |=
+			    TXGBE_FDIRFLEXCFG_OFST(flex_cfg->src_offset[0] / 2);
+		} else {
+			PMD_DRV_LOG(ERR, "invalid flexbytes arguments.");
+			return -EINVAL;
+		}
+	}
+
+	for (i = 0; i < conf->nb_flexmasks; i++) {
+		flex_mask = &conf->flex_mask[i];
+		if (flex_mask->flow_type != RTE_ETH_FLOW_UNKNOWN) {
+			PMD_DRV_LOG(ERR, "flexmask should be set globally.");
+			return -EINVAL;
+		}
+		flexbytes = (uint16_t)(((flex_mask->mask[1] << 8) & 0xFF00) |
+					((flex_mask->mask[0]) & 0xFF));
+		if (flexbytes == UINT16_MAX) {
+			flex &= ~TXGBE_FDIRFLEXCFG_DIA;
+		} else if (flexbytes != 0) {
+		     /* TXGBE_FDIRFLEXCFG_DIA is set by default when set mask */
+			PMD_DRV_LOG(ERR, " invalid flexbytes mask arguments.");
+			return -EINVAL;
+		}
+	}
+
+	info->mask.flex_bytes_mask = flexbytes ? UINT16_MAX : 0;
+	info->flex_bytes_offset = (uint8_t)(TXGBD_FDIRFLEXCFG_OFST(flex) * 2);
+
+	for (i = 0; i < 64; i++) {
+		uint32_t flexreg;
+		flexreg = rd32(hw, TXGBE_FDIRFLEXCFG(i / 4));
+		flexreg &= ~(TXGBE_FDIRFLEXCFG_ALL(~0UL, i % 4));
+		flexreg |= TXGBE_FDIRFLEXCFG_ALL(flex, i % 4);
+		wr32(hw, TXGBE_FDIRFLEXCFG(i / 4), flexreg);
+	}
+	return 0;
+}
+
+int
+txgbe_fdir_configure(struct rte_eth_dev *dev)
+{
+	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+	int err;
+	uint32_t fdirctrl, flex, pbsize;
+	int i;
+	enum rte_fdir_mode mode = dev->data->dev_conf.fdir_conf.mode;
+
+	PMD_INIT_FUNC_TRACE();
+
+	/* supports mac-vlan and tunnel mode */
+	if (mode != RTE_FDIR_MODE_SIGNATURE &&
+	    mode != RTE_FDIR_MODE_PERFECT)
+		return -ENOSYS;
+
+	err = configure_fdir_flags(&dev->data->dev_conf.fdir_conf,
+				   &fdirctrl, &flex);
+	if (err)
+		return err;
+
+	/*
+	 * Before enabling Flow Director, the Rx Packet Buffer size
+	 * must be reduced.  The new value is the current size minus
+	 * flow director memory usage size.
+	 */
+	pbsize = rd32(hw, TXGBE_PBRXSIZE(0));
+	pbsize -= TXGBD_FDIRCTL_BUF_BYTE(fdirctrl);
+	wr32(hw, TXGBE_PBRXSIZE(0), pbsize);
+
+	/*
+	 * The defaults in the HW for RX PB 1-7 are not zero and so should be
+	 * initialized to zero for non DCB mode otherwise actual total RX PB
+	 * would be bigger than programmed and filter space would run into
+	 * the PB 0 region.
+	 */
+	for (i = 1; i < 8; i++)
+		wr32(hw, TXGBE_PBRXSIZE(i), 0);
+
+	err = txgbe_fdir_store_input_mask(dev);
+	if (err < 0) {
+		PMD_INIT_LOG(ERR, " Error on setting FD mask");
+		return err;
+	}
+
+	err = txgbe_fdir_set_input_mask(dev);
+	if (err < 0) {
+		PMD_INIT_LOG(ERR, " Error on setting FD mask");
+		return err;
+	}
+
+	err = txgbe_set_fdir_flex_conf(dev, flex);
+	if (err < 0) {
+		PMD_INIT_LOG(ERR, " Error on setting FD flexible arguments.");
+		return err;
+	}
+
+	err = txgbe_fdir_enable(hw, fdirctrl);
+	if (err < 0) {
+		PMD_INIT_LOG(ERR, " Error on enabling FD.");
+		return err;
+	}
+	return 0;
+}
+
-- 
2.18.4




  parent reply	other threads:[~2020-11-11  6:51 UTC|newest]

Thread overview: 48+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-11-11  6:48 [dpdk-dev] [PATCH v2 00/37] net: add txgbe PMD part 2 Jiawen Wu
2020-11-11  6:49 ` [dpdk-dev] [PATCH v2 01/37] net/txgbe: add ntuple filter init and uninit Jiawen Wu
2020-11-11  6:49 ` [dpdk-dev] [PATCH v2 02/37] net/txgbe: support ntuple filter add and delete Jiawen Wu
2020-11-11  6:49 ` [dpdk-dev] [PATCH v2 03/37] net/txgbe: add ntuple parse rule Jiawen Wu
2020-11-11 16:06   ` Ferruh Yigit
2020-11-11  6:49 ` [dpdk-dev] [PATCH v2 04/37] net/txgbe: support ntuple filter remove operaion Jiawen Wu
2020-11-11  6:49 ` [dpdk-dev] [PATCH v2 05/37] net/txgbe: support ethertype filter add and delete Jiawen Wu
2020-11-11  6:49 ` [dpdk-dev] [PATCH v2 06/37] net/txgbe: add ethertype parse rule Jiawen Wu
2020-11-11 16:02   ` Ferruh Yigit
2020-11-11 16:04     ` Ferruh Yigit
2020-11-12  1:57       ` Wang, Haiyue
2020-11-11  6:49 ` [dpdk-dev] [PATCH v2 07/37] net/txgbe: support syn filter add and delete Jiawen Wu
2020-11-11  6:49 ` [dpdk-dev] [PATCH v2 08/37] net/txgbe: add syn filter parse rule Jiawen Wu
2020-11-11  6:49 ` [dpdk-dev] [PATCH v2 09/37] net/txgbe: add L2 tunnel filter init and uninit Jiawen Wu
2020-11-11  6:49 ` [dpdk-dev] [PATCH v2 10/37] net/txgbe: config L2 tunnel filter with e-tag Jiawen Wu
2020-11-11  6:49 ` [dpdk-dev] [PATCH v2 11/37] net/txgbe: support L2 tunnel filter add and delete Jiawen Wu
2020-11-11  6:49 ` [dpdk-dev] [PATCH v2 12/37] net/txgbe: add L2 tunnel filter parse rule Jiawen Wu
2020-11-11 16:10   ` Ferruh Yigit
2020-11-11  6:49 ` [dpdk-dev] [PATCH v2 13/37] net/txgbe: add FDIR filter init and uninit Jiawen Wu
2020-11-11  6:49 ` Jiawen Wu [this message]
2020-11-11  6:49 ` [dpdk-dev] [PATCH v2 15/37] net/txgbe: support FDIR add and delete operations Jiawen Wu
2020-11-11  6:49 ` [dpdk-dev] [PATCH v2 16/37] net/txgbe: add FDIR parse normal rule Jiawen Wu
2020-11-11  6:49 ` [dpdk-dev] [PATCH v2 17/37] net/txgbe: add FDIR parse tunnel rule Jiawen Wu
2020-11-11  6:49 ` [dpdk-dev] [PATCH v2 18/37] net/txgbe: add FDIR restore operation Jiawen Wu
2020-11-11  6:49 ` [dpdk-dev] [PATCH v2 19/37] net/txgbe: add RSS filter parse rule Jiawen Wu
2020-11-11  6:49 ` [dpdk-dev] [PATCH v2 20/37] net/txgbe: add RSS filter restore operation Jiawen Wu
2020-11-11  6:49 ` [dpdk-dev] [PATCH v2 21/37] net/txgbe: add filter list init and uninit Jiawen Wu
2020-11-11 16:10   ` Ferruh Yigit
2020-11-11  6:49 ` [dpdk-dev] [PATCH v2 22/37] net/txgbe: add generic flow API Jiawen Wu
2020-11-11  6:49 ` [dpdk-dev] [PATCH v2 23/37] net/txgbe: add flow API create function Jiawen Wu
2020-11-11 16:11   ` Ferruh Yigit
2020-11-11  6:49 ` [dpdk-dev] [PATCH v2 24/37] net/txgbe: add flow API destroy function Jiawen Wu
2020-11-11  6:49 ` [dpdk-dev] [PATCH v2 25/37] net/txgbe: add flow API flush function Jiawen Wu
2020-11-11  6:49 ` [dpdk-dev] [PATCH v2 26/37] net/txgbe: support UDP tunnel port add and delete Jiawen Wu
2020-11-11  6:49 ` [dpdk-dev] [PATCH v2 27/37] net/txgbe: add TM configuration init and uninit Jiawen Wu
2020-11-11  6:49 ` [dpdk-dev] [PATCH v2 28/37] net/txgbe: add TM capabilities get operation Jiawen Wu
2020-11-11  6:49 ` [dpdk-dev] [PATCH v2 29/37] net/txgbe: support TM shaper profile add and delete Jiawen Wu
2020-11-11  6:49 ` [dpdk-dev] [PATCH v2 30/37] net/txgbe: support TM node " Jiawen Wu
2020-11-11  6:49 ` [dpdk-dev] [PATCH v2 31/37] net/txgbe: add TM hierarchy commit Jiawen Wu
2020-11-11  6:49 ` [dpdk-dev] [PATCH v2 32/37] net/txgbe: add macsec setting Jiawen Wu
2020-11-11 16:13   ` Ferruh Yigit
2020-11-11  6:49 ` [dpdk-dev] [PATCH v2 33/37] net/txgbe: add IPsec context creation Jiawen Wu
2020-11-11  6:49 ` [dpdk-dev] [PATCH v2 34/37] net/txgbe: add security session create operation Jiawen Wu
2020-11-11  6:49 ` [dpdk-dev] [PATCH v2 35/37] net/txgbe: support security session destroy Jiawen Wu
2020-11-11  6:49 ` [dpdk-dev] [PATCH v2 36/37] net/txgbe: add security offload in Rx and Tx process Jiawen Wu
2020-11-11  6:49 ` [dpdk-dev] [PATCH v2 37/37] net/txgbe: add security type in flow action Jiawen Wu
2020-11-11 16:00 ` [dpdk-dev] [PATCH v2 00/37] net: add txgbe PMD part 2 Ferruh Yigit
2020-11-11 16:09   ` Ferruh Yigit

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20201111064936.768604-15-jiawenwu@trustnetic.com \
    --to=jiawenwu@trustnetic.com \
    --cc=dev@dpdk.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.