All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH 0/2] move private APIs to a specific files
@ 2017-04-11  8:31 Wenzhuo Lu
  2017-04-11  8:31 ` [PATCH 1/2] net/i40e: move private APIs to a specific file Wenzhuo Lu
                   ` (2 more replies)
  0 siblings, 3 replies; 4+ messages in thread
From: Wenzhuo Lu @ 2017-04-11  8:31 UTC (permalink / raw)
  To: dev; +Cc: Wenzhuo Lu

There're some private APIs on ixgbe and i40e. Create specific
files for them and move the related code to the new files.

Wenzhuo Lu (2):
  net/i40e: move private APIs to a specific file
  net/ixgbe: move private APIs to a specific file

 drivers/net/i40e/Makefile         |    1 +
 drivers/net/i40e/i40e_ethdev.c    | 1676 +-----------------------------------
 drivers/net/i40e/i40e_ethdev.h    |   11 +
 drivers/net/i40e/rte_pmd_i40e.c   | 1707 +++++++++++++++++++++++++++++++++++++
 drivers/net/ixgbe/Makefile        |    1 +
 drivers/net/ixgbe/ixgbe_ethdev.c  | 1300 +++++-----------------------
 drivers/net/ixgbe/ixgbe_ethdev.h  |    5 +
 drivers/net/ixgbe/rte_pmd_ixgbe.c |  910 ++++++++++++++++++++
 8 files changed, 2867 insertions(+), 2744 deletions(-)
 create mode 100644 drivers/net/i40e/rte_pmd_i40e.c
 create mode 100644 drivers/net/ixgbe/rte_pmd_ixgbe.c

-- 
1.9.3

^ permalink raw reply	[flat|nested] 4+ messages in thread

* [PATCH 1/2] net/i40e: move private APIs to a specific file
  2017-04-11  8:31 [PATCH 0/2] move private APIs to a specific files Wenzhuo Lu
@ 2017-04-11  8:31 ` Wenzhuo Lu
  2017-04-11  8:31 ` [PATCH 2/2] net/ixgbe: " Wenzhuo Lu
  2017-04-11 12:10 ` [PATCH 0/2] move private APIs to a specific files Ferruh Yigit
  2 siblings, 0 replies; 4+ messages in thread
From: Wenzhuo Lu @ 2017-04-11  8:31 UTC (permalink / raw)
  To: dev; +Cc: Wenzhuo Lu

Create a new file rte_pmd_i40e.c for all the private
APIs. Move all the related code to the new file.

Signed-off-by: Wenzhuo Lu <wenzhuo.lu@intel.com>
---
 drivers/net/i40e/Makefile       |    1 +
 drivers/net/i40e/i40e_ethdev.c  | 1676 +-------------------------------------
 drivers/net/i40e/i40e_ethdev.h  |   11 +
 drivers/net/i40e/rte_pmd_i40e.c | 1707 +++++++++++++++++++++++++++++++++++++++
 4 files changed, 1726 insertions(+), 1669 deletions(-)
 create mode 100644 drivers/net/i40e/rte_pmd_i40e.c

diff --git a/drivers/net/i40e/Makefile b/drivers/net/i40e/Makefile
index 1f5d133..56f210d 100644
--- a/drivers/net/i40e/Makefile
+++ b/drivers/net/i40e/Makefile
@@ -108,6 +108,7 @@ SRCS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += i40e_ethdev_vf.c
 SRCS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += i40e_pf.c
 SRCS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += i40e_fdir.c
 SRCS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += i40e_flow.c
+SRCS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += rte_pmd_i40e.c
 
 # vector PMD driver needs SSE4.1 support
 ifeq ($(findstring RTE_MACHINE_CPUFLAG_SSE4_1,$(CFLAGS)),)
diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c
index 8d25f8c..f11c8c1 100644
--- a/drivers/net/i40e/i40e_ethdev.c
+++ b/drivers/net/i40e/i40e_ethdev.c
@@ -64,7 +64,6 @@
 #include "i40e_rxtx.h"
 #include "i40e_pf.h"
 #include "i40e_regs.h"
-#include "rte_pmd_i40e.h"
 
 #define ETH_I40E_FLOATING_VEB_ARG	"enable_floating_veb"
 #define ETH_I40E_FLOATING_VEB_LIST_ARG	"floating_veb_list"
@@ -244,15 +243,6 @@
 /* Bit mask of Extended Tag enable/disable */
 #define PCI_DEV_CTRL_EXT_TAG_MASK  (1 << PCI_DEV_CTRL_EXT_TAG_SHIFT)
 
-/* The max bandwidth of i40e is 40Gbps. */
-#define I40E_QOS_BW_MAX 40000
-/* The bandwidth should be the multiple of 50Mbps. */
-#define I40E_QOS_BW_GRANULARITY 50
-/* The min bandwidth weight is 1. */
-#define I40E_QOS_BW_WEIGHT_MIN 1
-/* The max bandwidth weight is 127. */
-#define I40E_QOS_BW_WEIGHT_MAX 127
-
 static int eth_i40e_dev_init(struct rte_eth_dev *eth_dev);
 static int eth_i40e_dev_uninit(struct rte_eth_dev *eth_dev);
 static int i40e_dev_configure(struct rte_eth_dev *dev);
@@ -342,10 +332,6 @@ static struct i40e_veb *i40e_veb_setup(struct i40e_pf *pf,
 						struct i40e_vsi *vsi);
 static int i40e_pf_config_mq_rx(struct i40e_pf *pf);
 static int i40e_vsi_config_double_vlan(struct i40e_vsi *vsi, int on);
-static inline int i40e_find_all_vlan_for_mac(struct i40e_vsi *vsi,
-					     struct i40e_macvlan_filter *mv_f,
-					     int num,
-					     struct ether_addr *addr);
 static inline int i40e_find_all_mac_for_vlan(struct i40e_vsi *vsi,
 					     struct i40e_macvlan_filter *mv_f,
 					     int num,
@@ -5854,7 +5840,7 @@ struct i40e_vsi *
 	rte_intr_enable(dev->intr_handle);
 }
 
-static int
+int
 i40e_add_macvlan_filters(struct i40e_vsi *vsi,
 			 struct i40e_macvlan_filter *filter,
 			 int total)
@@ -5928,7 +5914,7 @@ struct i40e_vsi *
 	return ret;
 }
 
-static int
+int
 i40e_remove_macvlan_filters(struct i40e_vsi *vsi,
 			    struct i40e_macvlan_filter *filter,
 			    int total)
@@ -6048,7 +6034,7 @@ struct i40e_vsi *
 		vsi->vfta[vid_idx] &= ~vid_bit;
 }
 
-static void
+void
 i40e_set_vlan_filter(struct i40e_vsi *vsi,
 		     uint16_t vlan_id, bool on)
 {
@@ -6084,7 +6070,7 @@ struct i40e_vsi *
  * Find all vlan options for specific mac addr,
  * return with actual vlan found.
  */
-static inline int
+int
 i40e_find_all_vlan_for_mac(struct i40e_vsi *vsi,
 			   struct i40e_macvlan_filter *mv_f,
 			   int num, struct ether_addr *addr)
@@ -10690,1658 +10676,10 @@ static void i40e_set_default_mac_addr(struct rte_eth_dev *dev,
 	return true;
 }
 
-int
-rte_pmd_i40e_ping_vfs(uint8_t port, uint16_t vf)
-{
-	struct rte_eth_dev *dev;
-	struct i40e_pf *pf;
-
-	RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
-
-	dev = &rte_eth_devices[port];
-
-	if (!is_device_supported(dev, &rte_i40e_pmd))
-		return -ENOTSUP;
-
-	pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
-
-	if (vf >= pf->vf_num || !pf->vfs) {
-		PMD_DRV_LOG(ERR, "Invalid argument.");
-		return -EINVAL;
-	}
-
-	i40e_notify_vf_link_status(dev, &pf->vfs[vf]);
-
-	return 0;
-}
-
-int
-rte_pmd_i40e_set_vf_mac_anti_spoof(uint8_t port, uint16_t vf_id, uint8_t on)
-{
-	struct rte_eth_dev *dev;
-	struct i40e_pf *pf;
-	struct i40e_vsi *vsi;
-	struct i40e_hw *hw;
-	struct i40e_vsi_context ctxt;
-	int ret;
-
-	RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
-
-	dev = &rte_eth_devices[port];
-
-	if (!is_device_supported(dev, &rte_i40e_pmd))
-		return -ENOTSUP;
-
-	pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
-
-	if (vf_id >= pf->vf_num || !pf->vfs) {
-		PMD_DRV_LOG(ERR, "Invalid argument.");
-		return -EINVAL;
-	}
-
-	vsi = pf->vfs[vf_id].vsi;
-	if (!vsi) {
-		PMD_DRV_LOG(ERR, "Invalid VSI.");
-		return -EINVAL;
-	}
-
-	/* Check if it has been already on or off */
-	if (vsi->info.valid_sections &
-		rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SECURITY_VALID)) {
-		if (on) {
-			if ((vsi->info.sec_flags &
-			     I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK) ==
-			    I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK)
-				return 0; /* already on */
-		} else {
-			if ((vsi->info.sec_flags &
-			     I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK) == 0)
-				return 0; /* already off */
-		}
-	}
-
-	vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SECURITY_VALID);
-	if (on)
-		vsi->info.sec_flags |= I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK;
-	else
-		vsi->info.sec_flags &= ~I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK;
-
-	memset(&ctxt, 0, sizeof(ctxt));
-	(void)rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
-	ctxt.seid = vsi->seid;
-
-	hw = I40E_VSI_TO_HW(vsi);
-	ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
-	if (ret != I40E_SUCCESS) {
-		ret = -ENOTSUP;
-		PMD_DRV_LOG(ERR, "Failed to update VSI params");
-	}
-
-	return ret;
-}
-
-static int
-i40e_add_rm_all_vlan_filter(struct i40e_vsi *vsi, uint8_t add)
-{
-	uint32_t j, k;
-	uint16_t vlan_id;
-	struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
-	struct i40e_aqc_add_remove_vlan_element_data vlan_data = {0};
-	int ret;
-
-	for (j = 0; j < I40E_VFTA_SIZE; j++) {
-		if (!vsi->vfta[j])
-			continue;
-
-		for (k = 0; k < I40E_UINT32_BIT_SIZE; k++) {
-			if (!(vsi->vfta[j] & (1 << k)))
-				continue;
-
-			vlan_id = j * I40E_UINT32_BIT_SIZE + k;
-			if (!vlan_id)
-				continue;
-
-			vlan_data.vlan_tag = rte_cpu_to_le_16(vlan_id);
-			if (add)
-				ret = i40e_aq_add_vlan(hw, vsi->seid,
-						       &vlan_data, 1, NULL);
-			else
-				ret = i40e_aq_remove_vlan(hw, vsi->seid,
-							  &vlan_data, 1, NULL);
-			if (ret != I40E_SUCCESS) {
-				PMD_DRV_LOG(ERR,
-					    "Failed to add/rm vlan filter");
-				return ret;
-			}
-		}
-	}
-
-	return I40E_SUCCESS;
-}
-
-int
-rte_pmd_i40e_set_vf_vlan_anti_spoof(uint8_t port, uint16_t vf_id, uint8_t on)
-{
-	struct rte_eth_dev *dev;
-	struct i40e_pf *pf;
-	struct i40e_vsi *vsi;
-	struct i40e_hw *hw;
-	struct i40e_vsi_context ctxt;
-	int ret;
-
-	RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
-
-	dev = &rte_eth_devices[port];
-
-	if (!is_device_supported(dev, &rte_i40e_pmd))
-		return -ENOTSUP;
-
-	pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
-
-	if (vf_id >= pf->vf_num || !pf->vfs) {
-		PMD_DRV_LOG(ERR, "Invalid argument.");
-		return -EINVAL;
-	}
-
-	vsi = pf->vfs[vf_id].vsi;
-	if (!vsi) {
-		PMD_DRV_LOG(ERR, "Invalid VSI.");
-		return -EINVAL;
-	}
-
-	/* Check if it has been already on or off */
-	if (vsi->vlan_anti_spoof_on == on)
-		return 0; /* already on or off */
-
-	vsi->vlan_anti_spoof_on = on;
-	if (!vsi->vlan_filter_on) {
-		ret = i40e_add_rm_all_vlan_filter(vsi, on);
-		if (ret) {
-			PMD_DRV_LOG(ERR, "Failed to add/remove VLAN filters.");
-			return -ENOTSUP;
-		}
-	}
-
-	vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SECURITY_VALID);
-	if (on)
-		vsi->info.sec_flags |= I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK;
-	else
-		vsi->info.sec_flags &= ~I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK;
-
-	memset(&ctxt, 0, sizeof(ctxt));
-	(void)rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
-	ctxt.seid = vsi->seid;
-
-	hw = I40E_VSI_TO_HW(vsi);
-	ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
-	if (ret != I40E_SUCCESS) {
-		ret = -ENOTSUP;
-		PMD_DRV_LOG(ERR, "Failed to update VSI params");
-	}
-
-	return ret;
-}
-
-static int
-i40e_vsi_rm_mac_filter(struct i40e_vsi *vsi)
-{
-	struct i40e_mac_filter *f;
-	struct i40e_macvlan_filter *mv_f;
-	int i, vlan_num;
-	enum rte_mac_filter_type filter_type;
-	int ret = I40E_SUCCESS;
-	void *temp;
-
-	/* remove all the MACs */
-	TAILQ_FOREACH_SAFE(f, &vsi->mac_list, next, temp) {
-		vlan_num = vsi->vlan_num;
-		filter_type = f->mac_info.filter_type;
-		if (filter_type == RTE_MACVLAN_PERFECT_MATCH ||
-		    filter_type == RTE_MACVLAN_HASH_MATCH) {
-			if (vlan_num == 0) {
-				PMD_DRV_LOG(ERR, "VLAN number shouldn't be 0");
-				return I40E_ERR_PARAM;
-			}
-		} else if (filter_type == RTE_MAC_PERFECT_MATCH ||
-			   filter_type == RTE_MAC_HASH_MATCH)
-			vlan_num = 1;
-
-		mv_f = rte_zmalloc("macvlan_data", vlan_num * sizeof(*mv_f), 0);
-		if (!mv_f) {
-			PMD_DRV_LOG(ERR, "failed to allocate memory");
-			return I40E_ERR_NO_MEMORY;
-		}
-
-		for (i = 0; i < vlan_num; i++) {
-			mv_f[i].filter_type = filter_type;
-			(void)rte_memcpy(&mv_f[i].macaddr,
-					 &f->mac_info.mac_addr,
-					 ETH_ADDR_LEN);
-		}
-		if (filter_type == RTE_MACVLAN_PERFECT_MATCH ||
-		    filter_type == RTE_MACVLAN_HASH_MATCH) {
-			ret = i40e_find_all_vlan_for_mac(vsi, mv_f, vlan_num,
-							 &f->mac_info.mac_addr);
-			if (ret != I40E_SUCCESS) {
-				rte_free(mv_f);
-				return ret;
-			}
-		}
-
-		ret = i40e_remove_macvlan_filters(vsi, mv_f, vlan_num);
-		if (ret != I40E_SUCCESS) {
-			rte_free(mv_f);
-			return ret;
-		}
-
-		rte_free(mv_f);
-		ret = I40E_SUCCESS;
-	}
-
-	return ret;
-}
-
-static int
-i40e_vsi_restore_mac_filter(struct i40e_vsi *vsi)
-{
-	struct i40e_mac_filter *f;
-	struct i40e_macvlan_filter *mv_f;
-	int i, vlan_num = 0;
-	int ret = I40E_SUCCESS;
-	void *temp;
-
-	/* restore all the MACs */
-	TAILQ_FOREACH_SAFE(f, &vsi->mac_list, next, temp) {
-		if ((f->mac_info.filter_type == RTE_MACVLAN_PERFECT_MATCH) ||
-		    (f->mac_info.filter_type == RTE_MACVLAN_HASH_MATCH)) {
-			/**
-			 * If vlan_num is 0, that's the first time to add mac,
-			 * set mask for vlan_id 0.
-			 */
-			if (vsi->vlan_num == 0) {
-				i40e_set_vlan_filter(vsi, 0, 1);
-				vsi->vlan_num = 1;
-			}
-			vlan_num = vsi->vlan_num;
-		} else if ((f->mac_info.filter_type == RTE_MAC_PERFECT_MATCH) ||
-			   (f->mac_info.filter_type == RTE_MAC_HASH_MATCH))
-			vlan_num = 1;
-
-		mv_f = rte_zmalloc("macvlan_data", vlan_num * sizeof(*mv_f), 0);
-		if (!mv_f) {
-			PMD_DRV_LOG(ERR, "failed to allocate memory");
-			return I40E_ERR_NO_MEMORY;
-		}
-
-		for (i = 0; i < vlan_num; i++) {
-			mv_f[i].filter_type = f->mac_info.filter_type;
-			(void)rte_memcpy(&mv_f[i].macaddr,
-					 &f->mac_info.mac_addr,
-					 ETH_ADDR_LEN);
-		}
-
-		if (f->mac_info.filter_type == RTE_MACVLAN_PERFECT_MATCH ||
-		    f->mac_info.filter_type == RTE_MACVLAN_HASH_MATCH) {
-			ret = i40e_find_all_vlan_for_mac(vsi, mv_f, vlan_num,
-							 &f->mac_info.mac_addr);
-			if (ret != I40E_SUCCESS) {
-				rte_free(mv_f);
-				return ret;
-			}
-		}
-
-		ret = i40e_add_macvlan_filters(vsi, mv_f, vlan_num);
-		if (ret != I40E_SUCCESS) {
-			rte_free(mv_f);
-			return ret;
-		}
-
-		rte_free(mv_f);
-		ret = I40E_SUCCESS;
-	}
-
-	return ret;
-}
-
-static int
-i40e_vsi_set_tx_loopback(struct i40e_vsi *vsi, uint8_t on)
+bool
+is_i40e_supported(struct rte_eth_dev *dev)
 {
-	struct i40e_vsi_context ctxt;
-	struct i40e_hw *hw;
-	int ret;
-
-	if (!vsi)
-		return -EINVAL;
-
-	hw = I40E_VSI_TO_HW(vsi);
-
-	/* Use the FW API if FW >= v5.0 */
-	if (hw->aq.fw_maj_ver < 5) {
-		PMD_INIT_LOG(ERR, "FW < v5.0, cannot enable loopback");
-		return -ENOTSUP;
-	}
-
-	/* Check if it has been already on or off */
-	if (vsi->info.valid_sections &
-		rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SWITCH_VALID)) {
-		if (on) {
-			if ((vsi->info.switch_id &
-			     I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB) ==
-			    I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB)
-				return 0; /* already on */
-		} else {
-			if ((vsi->info.switch_id &
-			     I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB) == 0)
-				return 0; /* already off */
-		}
-	}
-
-	/* remove all the MAC and VLAN first */
-	ret = i40e_vsi_rm_mac_filter(vsi);
-	if (ret) {
-		PMD_INIT_LOG(ERR, "Failed to remove MAC filters.");
-		return ret;
-	}
-	if (vsi->vlan_anti_spoof_on || vsi->vlan_filter_on) {
-		ret = i40e_add_rm_all_vlan_filter(vsi, 0);
-		if (ret) {
-			PMD_INIT_LOG(ERR, "Failed to remove VLAN filters.");
-			return ret;
-		}
-	}
-
-	vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
-	if (on)
-		vsi->info.switch_id |= I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB;
-	else
-		vsi->info.switch_id &= ~I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB;
-
-	memset(&ctxt, 0, sizeof(ctxt));
-	(void)rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
-	ctxt.seid = vsi->seid;
-
-	ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
-	if (ret != I40E_SUCCESS) {
-		PMD_DRV_LOG(ERR, "Failed to update VSI params");
-		return ret;
-	}
-
-	/* add all the MAC and VLAN back */
-	ret = i40e_vsi_restore_mac_filter(vsi);
-	if (ret)
-		return ret;
-	if (vsi->vlan_anti_spoof_on || vsi->vlan_filter_on) {
-		ret = i40e_add_rm_all_vlan_filter(vsi, 1);
-		if (ret)
-			return ret;
-	}
-
-	return ret;
-}
-
-int
-rte_pmd_i40e_set_tx_loopback(uint8_t port, uint8_t on)
-{
-	struct rte_eth_dev *dev;
-	struct i40e_pf *pf;
-	struct i40e_pf_vf *vf;
-	struct i40e_vsi *vsi;
-	uint16_t vf_id;
-	int ret;
-
-	RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
-
-	dev = &rte_eth_devices[port];
-
-	if (!is_device_supported(dev, &rte_i40e_pmd))
-		return -ENOTSUP;
-
-	pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
-
-	/* setup PF TX loopback */
-	vsi = pf->main_vsi;
-	ret = i40e_vsi_set_tx_loopback(vsi, on);
-	if (ret)
-		return -ENOTSUP;
-
-	/* setup TX loopback for all the VFs */
-	if (!pf->vfs) {
-		/* if no VF, do nothing. */
-		return 0;
-	}
-
-	for (vf_id = 0; vf_id < pf->vf_num; vf_id++) {
-		vf = &pf->vfs[vf_id];
-		vsi = vf->vsi;
-
-		ret = i40e_vsi_set_tx_loopback(vsi, on);
-		if (ret)
-			return -ENOTSUP;
-	}
-
-	return ret;
-}
-
-int
-rte_pmd_i40e_set_vf_unicast_promisc(uint8_t port, uint16_t vf_id, uint8_t on)
-{
-	struct rte_eth_dev *dev;
-	struct i40e_pf *pf;
-	struct i40e_vsi *vsi;
-	struct i40e_hw *hw;
-	int ret;
-
-	RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
-
-	dev = &rte_eth_devices[port];
-
-	if (!is_device_supported(dev, &rte_i40e_pmd))
-		return -ENOTSUP;
-
-	pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
-
-	if (vf_id >= pf->vf_num || !pf->vfs) {
-		PMD_DRV_LOG(ERR, "Invalid argument.");
-		return -EINVAL;
-	}
-
-	vsi = pf->vfs[vf_id].vsi;
-	if (!vsi) {
-		PMD_DRV_LOG(ERR, "Invalid VSI.");
-		return -EINVAL;
-	}
-
-	hw = I40E_VSI_TO_HW(vsi);
-
-	ret = i40e_aq_set_vsi_unicast_promiscuous(hw, vsi->seid,
-						  on, NULL, true);
-	if (ret != I40E_SUCCESS) {
-		ret = -ENOTSUP;
-		PMD_DRV_LOG(ERR, "Failed to set unicast promiscuous mode");
-	}
-
-	return ret;
-}
-
-int
-rte_pmd_i40e_set_vf_multicast_promisc(uint8_t port, uint16_t vf_id, uint8_t on)
-{
-	struct rte_eth_dev *dev;
-	struct i40e_pf *pf;
-	struct i40e_vsi *vsi;
-	struct i40e_hw *hw;
-	int ret;
-
-	RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
-
-	dev = &rte_eth_devices[port];
-
-	if (!is_device_supported(dev, &rte_i40e_pmd))
-		return -ENOTSUP;
-
-	pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
-
-	if (vf_id >= pf->vf_num || !pf->vfs) {
-		PMD_DRV_LOG(ERR, "Invalid argument.");
-		return -EINVAL;
-	}
-
-	vsi = pf->vfs[vf_id].vsi;
-	if (!vsi) {
-		PMD_DRV_LOG(ERR, "Invalid VSI.");
-		return -EINVAL;
-	}
-
-	hw = I40E_VSI_TO_HW(vsi);
-
-	ret = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid,
-						    on, NULL);
-	if (ret != I40E_SUCCESS) {
-		ret = -ENOTSUP;
-		PMD_DRV_LOG(ERR, "Failed to set multicast promiscuous mode");
-	}
-
-	return ret;
-}
-
-int
-rte_pmd_i40e_set_vf_mac_addr(uint8_t port, uint16_t vf_id,
-			     struct ether_addr *mac_addr)
-{
-	struct i40e_mac_filter *f;
-	struct rte_eth_dev *dev;
-	struct i40e_pf_vf *vf;
-	struct i40e_vsi *vsi;
-	struct i40e_pf *pf;
-	void *temp;
-
-	if (i40e_validate_mac_addr((u8 *)mac_addr) != I40E_SUCCESS)
-		return -EINVAL;
-
-	RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
-
-	dev = &rte_eth_devices[port];
-
-	if (!is_device_supported(dev, &rte_i40e_pmd))
-		return -ENOTSUP;
-
-	pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
-
-	if (vf_id >= pf->vf_num || !pf->vfs)
-		return -EINVAL;
-
-	vf = &pf->vfs[vf_id];
-	vsi = vf->vsi;
-	if (!vsi) {
-		PMD_DRV_LOG(ERR, "Invalid VSI.");
-		return -EINVAL;
-	}
-
-	ether_addr_copy(mac_addr, &vf->mac_addr);
-
-	/* Remove all existing mac */
-	TAILQ_FOREACH_SAFE(f, &vsi->mac_list, next, temp)
-		i40e_vsi_delete_mac(vsi, &f->mac_info.mac_addr);
-
-	return 0;
-}
-
-/* Set vlan strip on/off for specific VF from host */
-int
-rte_pmd_i40e_set_vf_vlan_stripq(uint8_t port, uint16_t vf_id, uint8_t on)
-{
-	struct rte_eth_dev *dev;
-	struct i40e_pf *pf;
-	struct i40e_vsi *vsi;
-	int ret;
-
-	RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
-
-	dev = &rte_eth_devices[port];
-
-	if (!is_device_supported(dev, &rte_i40e_pmd))
-		return -ENOTSUP;
-
-	pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
-
-	if (vf_id >= pf->vf_num || !pf->vfs) {
-		PMD_DRV_LOG(ERR, "Invalid argument.");
-		return -EINVAL;
-	}
-
-	vsi = pf->vfs[vf_id].vsi;
-
-	if (!vsi)
-		return -EINVAL;
-
-	ret = i40e_vsi_config_vlan_stripping(vsi, !!on);
-	if (ret != I40E_SUCCESS) {
-		ret = -ENOTSUP;
-		PMD_DRV_LOG(ERR, "Failed to set VLAN stripping!");
-	}
-
-	return ret;
-}
-
-int rte_pmd_i40e_set_vf_vlan_insert(uint8_t port, uint16_t vf_id,
-				    uint16_t vlan_id)
-{
-	struct rte_eth_dev *dev;
-	struct i40e_pf *pf;
-	struct i40e_hw *hw;
-	struct i40e_vsi *vsi;
-	struct i40e_vsi_context ctxt;
-	int ret;
-
-	RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
-
-	if (vlan_id > ETHER_MAX_VLAN_ID) {
-		PMD_DRV_LOG(ERR, "Invalid VLAN ID.");
-		return -EINVAL;
-	}
-
-	dev = &rte_eth_devices[port];
-
-	if (!is_device_supported(dev, &rte_i40e_pmd))
-		return -ENOTSUP;
-
-	pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
-	hw = I40E_PF_TO_HW(pf);
-
-	/**
-	 * return -ENODEV if SRIOV not enabled, VF number not configured
-	 * or no queue assigned.
-	 */
-	if (!hw->func_caps.sr_iov_1_1 || pf->vf_num == 0 ||
-	    pf->vf_nb_qps == 0)
-		return -ENODEV;
-
-	if (vf_id >= pf->vf_num || !pf->vfs) {
-		PMD_DRV_LOG(ERR, "Invalid VF ID.");
-		return -EINVAL;
-	}
-
-	vsi = pf->vfs[vf_id].vsi;
-	if (!vsi) {
-		PMD_DRV_LOG(ERR, "Invalid VSI.");
-		return -EINVAL;
-	}
-
-	vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
-	vsi->info.pvid = vlan_id;
-	if (vlan_id > 0)
-		vsi->info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_INSERT_PVID;
-	else
-		vsi->info.port_vlan_flags &= ~I40E_AQ_VSI_PVLAN_INSERT_PVID;
-
-	memset(&ctxt, 0, sizeof(ctxt));
-	(void)rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
-	ctxt.seid = vsi->seid;
-
-	hw = I40E_VSI_TO_HW(vsi);
-	ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
-	if (ret != I40E_SUCCESS) {
-		ret = -ENOTSUP;
-		PMD_DRV_LOG(ERR, "Failed to update VSI params");
-	}
-
-	return ret;
-}
-
-int rte_pmd_i40e_set_vf_broadcast(uint8_t port, uint16_t vf_id,
-				  uint8_t on)
-{
-	struct rte_eth_dev *dev;
-	struct i40e_pf *pf;
-	struct i40e_vsi *vsi;
-	struct i40e_hw *hw;
-	struct i40e_mac_filter_info filter;
-	struct ether_addr broadcast = {
-		.addr_bytes = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff} };
-	int ret;
-
-	RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
-
-	if (on > 1) {
-		PMD_DRV_LOG(ERR, "on should be 0 or 1.");
-		return -EINVAL;
-	}
-
-	dev = &rte_eth_devices[port];
-
-	if (!is_device_supported(dev, &rte_i40e_pmd))
-		return -ENOTSUP;
-
-	pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
-	hw = I40E_PF_TO_HW(pf);
-
-	if (vf_id >= pf->vf_num || !pf->vfs) {
-		PMD_DRV_LOG(ERR, "Invalid VF ID.");
-		return -EINVAL;
-	}
-
-	/**
-	 * return -ENODEV if SRIOV not enabled, VF number not configured
-	 * or no queue assigned.
-	 */
-	if (!hw->func_caps.sr_iov_1_1 || pf->vf_num == 0 ||
-	    pf->vf_nb_qps == 0) {
-		PMD_DRV_LOG(ERR, "SRIOV is not enabled or no queue.");
-		return -ENODEV;
-	}
-
-	vsi = pf->vfs[vf_id].vsi;
-	if (!vsi) {
-		PMD_DRV_LOG(ERR, "Invalid VSI.");
-		return -EINVAL;
-	}
-
-	if (on) {
-		(void)rte_memcpy(&filter.mac_addr, &broadcast, ETHER_ADDR_LEN);
-		filter.filter_type = RTE_MACVLAN_PERFECT_MATCH;
-		ret = i40e_vsi_add_mac(vsi, &filter);
-	} else {
-		ret = i40e_vsi_delete_mac(vsi, &broadcast);
-	}
-
-	if (ret != I40E_SUCCESS && ret != I40E_ERR_PARAM) {
-		ret = -ENOTSUP;
-		PMD_DRV_LOG(ERR, "Failed to set VSI broadcast");
-	} else {
-		ret = 0;
-	}
-
-	return ret;
-}
-
-int rte_pmd_i40e_set_vf_vlan_tag(uint8_t port, uint16_t vf_id, uint8_t on)
-{
-	struct rte_eth_dev *dev;
-	struct i40e_pf *pf;
-	struct i40e_hw *hw;
-	struct i40e_vsi *vsi;
-	struct i40e_vsi_context ctxt;
-	int ret;
-
-	RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
-
-	if (on > 1) {
-		PMD_DRV_LOG(ERR, "on should be 0 or 1.");
-		return -EINVAL;
-	}
-
-	dev = &rte_eth_devices[port];
-
-	if (!is_device_supported(dev, &rte_i40e_pmd))
-		return -ENOTSUP;
-
-	pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
-	hw = I40E_PF_TO_HW(pf);
-
-	/**
-	 * return -ENODEV if SRIOV not enabled, VF number not configured
-	 * or no queue assigned.
-	 */
-	if (!hw->func_caps.sr_iov_1_1 || pf->vf_num == 0 ||
-	    pf->vf_nb_qps == 0) {
-		PMD_DRV_LOG(ERR, "SRIOV is not enabled or no queue.");
-		return -ENODEV;
-	}
-
-	if (vf_id >= pf->vf_num || !pf->vfs) {
-		PMD_DRV_LOG(ERR, "Invalid VF ID.");
-		return -EINVAL;
-	}
-
-	vsi = pf->vfs[vf_id].vsi;
-	if (!vsi) {
-		PMD_DRV_LOG(ERR, "Invalid VSI.");
-		return -EINVAL;
-	}
-
-	vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
-	if (on) {
-		vsi->info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_TAGGED;
-		vsi->info.port_vlan_flags &= ~I40E_AQ_VSI_PVLAN_MODE_UNTAGGED;
-	} else {
-		vsi->info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_UNTAGGED;
-		vsi->info.port_vlan_flags &= ~I40E_AQ_VSI_PVLAN_MODE_TAGGED;
-	}
-
-	memset(&ctxt, 0, sizeof(ctxt));
-	(void)rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
-	ctxt.seid = vsi->seid;
-
-	hw = I40E_VSI_TO_HW(vsi);
-	ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
-	if (ret != I40E_SUCCESS) {
-		ret = -ENOTSUP;
-		PMD_DRV_LOG(ERR, "Failed to update VSI params");
-	}
-
-	return ret;
-}
-
-static int
-i40e_vlan_filter_count(struct i40e_vsi *vsi)
-{
-	uint32_t j, k;
-	uint16_t vlan_id;
-	int count = 0;
-
-	for (j = 0; j < I40E_VFTA_SIZE; j++) {
-		if (!vsi->vfta[j])
-			continue;
-
-		for (k = 0; k < I40E_UINT32_BIT_SIZE; k++) {
-			if (!(vsi->vfta[j] & (1 << k)))
-				continue;
-
-			vlan_id = j * I40E_UINT32_BIT_SIZE + k;
-			if (!vlan_id)
-				continue;
-
-			count++;
-		}
-	}
-
-	return count;
-}
-
-int rte_pmd_i40e_set_vf_vlan_filter(uint8_t port, uint16_t vlan_id,
-				    uint64_t vf_mask, uint8_t on)
-{
-	struct rte_eth_dev *dev;
-	struct i40e_pf *pf;
-	struct i40e_hw *hw;
-	struct i40e_vsi *vsi;
-	uint16_t vf_idx;
-	int ret = I40E_SUCCESS;
-
-	RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
-
-	dev = &rte_eth_devices[port];
-
-	if (!is_device_supported(dev, &rte_i40e_pmd))
-		return -ENOTSUP;
-
-	if (vlan_id > ETHER_MAX_VLAN_ID || !vlan_id) {
-		PMD_DRV_LOG(ERR, "Invalid VLAN ID.");
-		return -EINVAL;
-	}
-
-	if (vf_mask == 0) {
-		PMD_DRV_LOG(ERR, "No VF.");
-		return -EINVAL;
-	}
-
-	if (on > 1) {
-		PMD_DRV_LOG(ERR, "on is should be 0 or 1.");
-		return -EINVAL;
-	}
-
-	pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
-	hw = I40E_PF_TO_HW(pf);
-
-	/**
-	 * return -ENODEV if SRIOV not enabled, VF number not configured
-	 * or no queue assigned.
-	 */
-	if (!hw->func_caps.sr_iov_1_1 || pf->vf_num == 0 ||
-	    pf->vf_nb_qps == 0) {
-		PMD_DRV_LOG(ERR, "SRIOV is not enabled or no queue.");
-		return -ENODEV;
-	}
-
-	for (vf_idx = 0; vf_idx < pf->vf_num && ret == I40E_SUCCESS; vf_idx++) {
-		if (vf_mask & ((uint64_t)(1ULL << vf_idx))) {
-			vsi = pf->vfs[vf_idx].vsi;
-			if (on) {
-				if (!vsi->vlan_filter_on) {
-					vsi->vlan_filter_on = true;
-					i40e_aq_set_vsi_vlan_promisc(hw,
-								     vsi->seid,
-								     false,
-								     NULL);
-					if (!vsi->vlan_anti_spoof_on)
-						i40e_add_rm_all_vlan_filter(
-							vsi, true);
-				}
-				ret = i40e_vsi_add_vlan(vsi, vlan_id);
-			} else {
-				ret = i40e_vsi_delete_vlan(vsi, vlan_id);
-
-				if (!i40e_vlan_filter_count(vsi)) {
-					vsi->vlan_filter_on = false;
-					i40e_aq_set_vsi_vlan_promisc(hw,
-								     vsi->seid,
-								     true,
-								     NULL);
-				}
-			}
-		}
-	}
-
-	if (ret != I40E_SUCCESS) {
-		ret = -ENOTSUP;
-		PMD_DRV_LOG(ERR, "Failed to set VF VLAN filter, on = %d", on);
-	}
-
-	return ret;
-}
-
-int
-rte_pmd_i40e_get_vf_stats(uint8_t port,
-			  uint16_t vf_id,
-			  struct rte_eth_stats *stats)
-{
-	struct rte_eth_dev *dev;
-	struct i40e_pf *pf;
-	struct i40e_vsi *vsi;
-
-	RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
-
-	dev = &rte_eth_devices[port];
-
-	if (!is_device_supported(dev, &rte_i40e_pmd))
-		return -ENOTSUP;
-
-	pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
-
-	if (vf_id >= pf->vf_num || !pf->vfs) {
-		PMD_DRV_LOG(ERR, "Invalid VF ID.");
-		return -EINVAL;
-	}
-
-	vsi = pf->vfs[vf_id].vsi;
-	if (!vsi) {
-		PMD_DRV_LOG(ERR, "Invalid VSI.");
-		return -EINVAL;
-	}
-
-	i40e_update_vsi_stats(vsi);
-
-	stats->ipackets = vsi->eth_stats.rx_unicast +
-			vsi->eth_stats.rx_multicast +
-			vsi->eth_stats.rx_broadcast;
-	stats->opackets = vsi->eth_stats.tx_unicast +
-			vsi->eth_stats.tx_multicast +
-			vsi->eth_stats.tx_broadcast;
-	stats->ibytes   = vsi->eth_stats.rx_bytes;
-	stats->obytes   = vsi->eth_stats.tx_bytes;
-	stats->ierrors  = vsi->eth_stats.rx_discards;
-	stats->oerrors  = vsi->eth_stats.tx_errors + vsi->eth_stats.tx_discards;
-
-	return 0;
-}
-
-int
-rte_pmd_i40e_reset_vf_stats(uint8_t port,
-			    uint16_t vf_id)
-{
-	struct rte_eth_dev *dev;
-	struct i40e_pf *pf;
-	struct i40e_vsi *vsi;
-
-	RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
-
-	dev = &rte_eth_devices[port];
-
-	if (!is_device_supported(dev, &rte_i40e_pmd))
-		return -ENOTSUP;
-
-	pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
-
-	if (vf_id >= pf->vf_num || !pf->vfs) {
-		PMD_DRV_LOG(ERR, "Invalid VF ID.");
-		return -EINVAL;
-	}
-
-	vsi = pf->vfs[vf_id].vsi;
-	if (!vsi) {
-		PMD_DRV_LOG(ERR, "Invalid VSI.");
-		return -EINVAL;
-	}
-
-	vsi->offset_loaded = false;
-	i40e_update_vsi_stats(vsi);
-
-	return 0;
-}
-
-int
-rte_pmd_i40e_set_vf_max_bw(uint8_t port, uint16_t vf_id, uint32_t bw)
-{
-	struct rte_eth_dev *dev;
-	struct i40e_pf *pf;
-	struct i40e_vsi *vsi;
-	struct i40e_hw *hw;
-	int ret = 0;
-	int i;
-
-	RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
-
-	dev = &rte_eth_devices[port];
-
-	if (!is_device_supported(dev, &rte_i40e_pmd))
-		return -ENOTSUP;
-
-	pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
-
-	if (vf_id >= pf->vf_num || !pf->vfs) {
-		PMD_DRV_LOG(ERR, "Invalid VF ID.");
-		return -EINVAL;
-	}
-
-	vsi = pf->vfs[vf_id].vsi;
-	if (!vsi) {
-		PMD_DRV_LOG(ERR, "Invalid VSI.");
-		return -EINVAL;
-	}
-
-	if (bw > I40E_QOS_BW_MAX) {
-		PMD_DRV_LOG(ERR, "Bandwidth should not be larger than %dMbps.",
-			    I40E_QOS_BW_MAX);
-		return -EINVAL;
-	}
-
-	if (bw % I40E_QOS_BW_GRANULARITY) {
-		PMD_DRV_LOG(ERR, "Bandwidth should be the multiple of %dMbps.",
-			    I40E_QOS_BW_GRANULARITY);
-		return -EINVAL;
-	}
-
-	bw /= I40E_QOS_BW_GRANULARITY;
-
-	hw = I40E_VSI_TO_HW(vsi);
-
-	/* No change. */
-	if (bw == vsi->bw_info.bw_limit) {
-		PMD_DRV_LOG(INFO,
-			    "No change for VF max bandwidth. Nothing to do.");
-		return 0;
-	}
-
-	/**
-	 * VF bandwidth limitation and TC bandwidth limitation cannot be
-	 * enabled in parallel, quit if TC bandwidth limitation is enabled.
-	 *
-	 * If bw is 0, means disable bandwidth limitation. Then no need to
-	 * check TC bandwidth limitation.
-	 */
-	if (bw) {
-		for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
-			if ((vsi->enabled_tc & BIT_ULL(i)) &&
-			    vsi->bw_info.bw_ets_credits[i])
-				break;
-		}
-		if (i != I40E_MAX_TRAFFIC_CLASS) {
-			PMD_DRV_LOG(ERR,
-				    "TC max bandwidth has been set on this VF,"
-				    " please disable it first.");
-			return -EINVAL;
-		}
-	}
-
-	ret = i40e_aq_config_vsi_bw_limit(hw, vsi->seid, (uint16_t)bw, 0, NULL);
-	if (ret) {
-		PMD_DRV_LOG(ERR,
-			    "Failed to set VF %d bandwidth, err(%d).",
-			    vf_id, ret);
-		return -EINVAL;
-	}
-
-	/* Store the configuration. */
-	vsi->bw_info.bw_limit = (uint16_t)bw;
-	vsi->bw_info.bw_max = 0;
-
-	return 0;
-}
-
-int
-rte_pmd_i40e_set_vf_tc_bw_alloc(uint8_t port, uint16_t vf_id,
-				uint8_t tc_num, uint8_t *bw_weight)
-{
-	struct rte_eth_dev *dev;
-	struct i40e_pf *pf;
-	struct i40e_vsi *vsi;
-	struct i40e_hw *hw;
-	struct i40e_aqc_configure_vsi_tc_bw_data tc_bw;
-	int ret = 0;
-	int i, j;
-	uint16_t sum;
-	bool b_change = false;
-
-	RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
-
-	dev = &rte_eth_devices[port];
-
-	if (!is_device_supported(dev, &rte_i40e_pmd))
-		return -ENOTSUP;
-
-	pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
-
-	if (vf_id >= pf->vf_num || !pf->vfs) {
-		PMD_DRV_LOG(ERR, "Invalid VF ID.");
-		return -EINVAL;
-	}
-
-	vsi = pf->vfs[vf_id].vsi;
-	if (!vsi) {
-		PMD_DRV_LOG(ERR, "Invalid VSI.");
-		return -EINVAL;
-	}
-
-	if (tc_num > I40E_MAX_TRAFFIC_CLASS) {
-		PMD_DRV_LOG(ERR, "TCs should be no more than %d.",
-			    I40E_MAX_TRAFFIC_CLASS);
-		return -EINVAL;
-	}
-
-	sum = 0;
-	for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
-		if (vsi->enabled_tc & BIT_ULL(i))
-			sum++;
-	}
-	if (sum != tc_num) {
-		PMD_DRV_LOG(ERR,
-			    "Weight should be set for all %d enabled TCs.",
-			    sum);
-		return -EINVAL;
-	}
-
-	sum = 0;
-	for (i = 0; i < tc_num; i++) {
-		if (!bw_weight[i]) {
-			PMD_DRV_LOG(ERR,
-				    "The weight should be 1 at least.");
-			return -EINVAL;
-		}
-		sum += bw_weight[i];
-	}
-	if (sum != 100) {
-		PMD_DRV_LOG(ERR,
-			    "The summary of the TC weight should be 100.");
-		return -EINVAL;
-	}
-
-	/**
-	 * Create the configuration for all the TCs.
-	 */
-	memset(&tc_bw, 0, sizeof(tc_bw));
-	tc_bw.tc_valid_bits = vsi->enabled_tc;
-	j = 0;
-	for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
-		if (vsi->enabled_tc & BIT_ULL(i)) {
-			if (bw_weight[j] !=
-				vsi->bw_info.bw_ets_share_credits[i])
-				b_change = true;
-
-			tc_bw.tc_bw_credits[i] = bw_weight[j];
-			j++;
-		}
-	}
-
-	/* No change. */
-	if (!b_change) {
-		PMD_DRV_LOG(INFO,
-			    "No change for TC allocated bandwidth."
-			    " Nothing to do.");
-		return 0;
-	}
-
-	hw = I40E_VSI_TO_HW(vsi);
-
-	ret = i40e_aq_config_vsi_tc_bw(hw, vsi->seid, &tc_bw, NULL);
-	if (ret) {
-		PMD_DRV_LOG(ERR,
-			    "Failed to set VF %d TC bandwidth weight, err(%d).",
-			    vf_id, ret);
-		return -EINVAL;
-	}
-
-	/* Store the configuration. */
-	j = 0;
-	for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
-		if (vsi->enabled_tc & BIT_ULL(i)) {
-			vsi->bw_info.bw_ets_share_credits[i] = bw_weight[j];
-			j++;
-		}
-	}
-
-	return 0;
-}
-
-int
-rte_pmd_i40e_set_vf_tc_max_bw(uint8_t port, uint16_t vf_id,
-			      uint8_t tc_no, uint32_t bw)
-{
-	struct rte_eth_dev *dev;
-	struct i40e_pf *pf;
-	struct i40e_vsi *vsi;
-	struct i40e_hw *hw;
-	struct i40e_aqc_configure_vsi_ets_sla_bw_data tc_bw;
-	int ret = 0;
-	int i;
-
-	RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
-
-	dev = &rte_eth_devices[port];
-
-	if (!is_device_supported(dev, &rte_i40e_pmd))
-		return -ENOTSUP;
-
-	pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
-
-	if (vf_id >= pf->vf_num || !pf->vfs) {
-		PMD_DRV_LOG(ERR, "Invalid VF ID.");
-		return -EINVAL;
-	}
-
-	vsi = pf->vfs[vf_id].vsi;
-	if (!vsi) {
-		PMD_DRV_LOG(ERR, "Invalid VSI.");
-		return -EINVAL;
-	}
-
-	if (bw > I40E_QOS_BW_MAX) {
-		PMD_DRV_LOG(ERR, "Bandwidth should not be larger than %dMbps.",
-			    I40E_QOS_BW_MAX);
-		return -EINVAL;
-	}
-
-	if (bw % I40E_QOS_BW_GRANULARITY) {
-		PMD_DRV_LOG(ERR, "Bandwidth should be the multiple of %dMbps.",
-			    I40E_QOS_BW_GRANULARITY);
-		return -EINVAL;
-	}
-
-	bw /= I40E_QOS_BW_GRANULARITY;
-
-	if (tc_no >= I40E_MAX_TRAFFIC_CLASS) {
-		PMD_DRV_LOG(ERR, "TC No. should be less than %d.",
-			    I40E_MAX_TRAFFIC_CLASS);
-		return -EINVAL;
-	}
-
-	hw = I40E_VSI_TO_HW(vsi);
-
-	if (!(vsi->enabled_tc & BIT_ULL(tc_no))) {
-		PMD_DRV_LOG(ERR, "VF %d TC %d isn't enabled.",
-			    vf_id, tc_no);
-		return -EINVAL;
-	}
-
-	/* No change. */
-	if (bw == vsi->bw_info.bw_ets_credits[tc_no]) {
-		PMD_DRV_LOG(INFO,
-			    "No change for TC max bandwidth. Nothing to do.");
-		return 0;
-	}
-
-	/**
-	 * VF bandwidth limitation and TC bandwidth limitation cannot be
-	 * enabled in parallel, disable VF bandwidth limitation if it's
-	 * enabled.
-	 * If bw is 0, means disable bandwidth limitation. Then no need to
-	 * care about VF bandwidth limitation configuration.
-	 */
-	if (bw && vsi->bw_info.bw_limit) {
-		ret = i40e_aq_config_vsi_bw_limit(hw, vsi->seid, 0, 0, NULL);
-		if (ret) {
-			PMD_DRV_LOG(ERR,
-				    "Failed to disable VF(%d)"
-				    " bandwidth limitation, err(%d).",
-				    vf_id, ret);
-			return -EINVAL;
-		}
-
-		PMD_DRV_LOG(INFO,
-			    "VF max bandwidth is disabled according"
-			    " to TC max bandwidth setting.");
-	}
-
-	/**
-	 * Get all the TCs' info to create a whole picture.
-	 * Because the incremental change isn't permitted.
-	 */
-	memset(&tc_bw, 0, sizeof(tc_bw));
-	tc_bw.tc_valid_bits = vsi->enabled_tc;
-	for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
-		if (vsi->enabled_tc & BIT_ULL(i)) {
-			tc_bw.tc_bw_credits[i] =
-				rte_cpu_to_le_16(
-					vsi->bw_info.bw_ets_credits[i]);
-		}
-	}
-	tc_bw.tc_bw_credits[tc_no] = rte_cpu_to_le_16((uint16_t)bw);
-
-	ret = i40e_aq_config_vsi_ets_sla_bw_limit(hw, vsi->seid, &tc_bw, NULL);
-	if (ret) {
-		PMD_DRV_LOG(ERR,
-			    "Failed to set VF %d TC %d max bandwidth, err(%d).",
-			    vf_id, tc_no, ret);
-		return -EINVAL;
-	}
-
-	/* Store the configuration. */
-	vsi->bw_info.bw_ets_credits[tc_no] = (uint16_t)bw;
-
-	return 0;
-}
-
-int
-rte_pmd_i40e_set_tc_strict_prio(uint8_t port, uint8_t tc_map)
-{
-	struct rte_eth_dev *dev;
-	struct i40e_pf *pf;
-	struct i40e_vsi *vsi;
-	struct i40e_veb *veb;
-	struct i40e_hw *hw;
-	struct i40e_aqc_configure_switching_comp_ets_data ets_data;
-	int i;
-	int ret;
-
-	RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
-
-	dev = &rte_eth_devices[port];
-
-	if (!is_device_supported(dev, &rte_i40e_pmd))
-		return -ENOTSUP;
-
-	pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
-
-	vsi = pf->main_vsi;
-	if (!vsi) {
-		PMD_DRV_LOG(ERR, "Invalid VSI.");
-		return -EINVAL;
-	}
-
-	veb = vsi->veb;
-	if (!veb) {
-		PMD_DRV_LOG(ERR, "Invalid VEB.");
-		return -EINVAL;
-	}
-
-	if ((tc_map & veb->enabled_tc) != tc_map) {
-		PMD_DRV_LOG(ERR,
-			    "TC bitmap isn't the subset of enabled TCs 0x%x.",
-			    veb->enabled_tc);
-		return -EINVAL;
-	}
-
-	if (tc_map == veb->strict_prio_tc) {
-		PMD_DRV_LOG(INFO, "No change for TC bitmap. Nothing to do.");
-		return 0;
-	}
-
-	hw = I40E_VSI_TO_HW(vsi);
-
-	/* Disable DCBx if it's the first time to set strict priority. */
-	if (!veb->strict_prio_tc) {
-		ret = i40e_aq_stop_lldp(hw, true, NULL);
-		if (ret)
-			PMD_DRV_LOG(INFO,
-				    "Failed to disable DCBx as it's already"
-				    " disabled.");
-		else
-			PMD_DRV_LOG(INFO,
-				    "DCBx is disabled according to strict"
-				    " priority setting.");
-	}
-
-	memset(&ets_data, 0, sizeof(ets_data));
-	ets_data.tc_valid_bits = veb->enabled_tc;
-	ets_data.seepage = I40E_AQ_ETS_SEEPAGE_EN_MASK;
-	ets_data.tc_strict_priority_flags = tc_map;
-	/* Get all TCs' bandwidth. */
-	for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
-		if (veb->enabled_tc & BIT_ULL(i)) {
-			/* For rubust, if bandwidth is 0, use 1 instead. */
-			if (veb->bw_info.bw_ets_share_credits[i])
-				ets_data.tc_bw_share_credits[i] =
-					veb->bw_info.bw_ets_share_credits[i];
-			else
-				ets_data.tc_bw_share_credits[i] =
-					I40E_QOS_BW_WEIGHT_MIN;
-		}
-	}
-
-	if (!veb->strict_prio_tc)
-		ret = i40e_aq_config_switch_comp_ets(
-			hw, veb->uplink_seid,
-			&ets_data, i40e_aqc_opc_enable_switching_comp_ets,
-			NULL);
-	else if (tc_map)
-		ret = i40e_aq_config_switch_comp_ets(
-			hw, veb->uplink_seid,
-			&ets_data, i40e_aqc_opc_modify_switching_comp_ets,
-			NULL);
-	else
-		ret = i40e_aq_config_switch_comp_ets(
-			hw, veb->uplink_seid,
-			&ets_data, i40e_aqc_opc_disable_switching_comp_ets,
-			NULL);
-
-	if (ret) {
-		PMD_DRV_LOG(ERR,
-			    "Failed to set TCs' strict priority mode."
-			    " err (%d)", ret);
-		return -EINVAL;
-	}
-
-	veb->strict_prio_tc = tc_map;
-
-	/* Enable DCBx again, if all the TCs' strict priority disabled. */
-	if (!tc_map) {
-		ret = i40e_aq_start_lldp(hw, NULL);
-		if (ret) {
-			PMD_DRV_LOG(ERR,
-				    "Failed to enable DCBx, err(%d).", ret);
-			return -EINVAL;
-		}
-
-		PMD_DRV_LOG(INFO,
-			    "DCBx is enabled again according to strict"
-			    " priority setting.");
-	}
-
-	return ret;
-}
-
-#define I40E_PROFILE_INFO_SIZE 48
-#define I40E_MAX_PROFILE_NUM 16
-
-static void
-i40e_generate_profile_info_sec(char *name, struct i40e_ddp_version *version,
-			       uint32_t track_id, uint8_t *profile_info_sec,
-			       bool add)
-{
-	struct i40e_profile_section_header *sec = NULL;
-	struct i40e_profile_info *pinfo;
-
-	sec = (struct i40e_profile_section_header *)profile_info_sec;
-	sec->tbl_size = 1;
-	sec->data_end = sizeof(struct i40e_profile_section_header) +
-		sizeof(struct i40e_profile_info);
-	sec->section.type = SECTION_TYPE_INFO;
-	sec->section.offset = sizeof(struct i40e_profile_section_header);
-	sec->section.size = sizeof(struct i40e_profile_info);
-	pinfo = (struct i40e_profile_info *)(profile_info_sec +
-					     sec->section.offset);
-	pinfo->track_id = track_id;
-	memcpy(pinfo->name, name, I40E_DDP_NAME_SIZE);
-	memcpy(&pinfo->version, version, sizeof(struct i40e_ddp_version));
-	if (add)
-		pinfo->op = I40E_DDP_ADD_TRACKID;
-	else
-		pinfo->op = I40E_DDP_REMOVE_TRACKID;
-}
-
-static enum i40e_status_code
-i40e_add_rm_profile_info(struct i40e_hw *hw, uint8_t *profile_info_sec)
-{
-	enum i40e_status_code status = I40E_SUCCESS;
-	struct i40e_profile_section_header *sec;
-	uint32_t track_id;
-	uint32_t offset = 0;
-	uint32_t info = 0;
-
-	sec = (struct i40e_profile_section_header *)profile_info_sec;
-	track_id = ((struct i40e_profile_info *)(profile_info_sec +
-					 sec->section.offset))->track_id;
-
-	status = i40e_aq_write_ddp(hw, (void *)sec, sec->data_end,
-				   track_id, &offset, &info, NULL);
-	if (status)
-		PMD_DRV_LOG(ERR, "Failed to add/remove profile info: "
-			    "offset %d, info %d",
-			    offset, info);
-
-	return status;
-}
-
-#define I40E_PROFILE_INFO_SIZE 48
-#define I40E_MAX_PROFILE_NUM 16
-
-/* Check if the profile info exists */
-static int
-i40e_check_profile_info(uint8_t port, uint8_t *profile_info_sec)
-{
-	struct rte_eth_dev *dev = &rte_eth_devices[port];
-	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-	uint8_t *buff;
-	struct rte_pmd_i40e_profile_list *p_list;
-	struct rte_pmd_i40e_profile_info *pinfo, *p;
-	uint32_t i;
-	int ret;
-
-	buff = rte_zmalloc("pinfo_list",
-			   (I40E_PROFILE_INFO_SIZE * I40E_MAX_PROFILE_NUM + 4),
-			   0);
-	if (!buff) {
-		PMD_DRV_LOG(ERR, "failed to allocate memory");
-		return -1;
-	}
-
-	ret = i40e_aq_get_ddp_list(hw, (void *)buff,
-		      (I40E_PROFILE_INFO_SIZE * I40E_MAX_PROFILE_NUM + 4),
-		      0, NULL);
-	if (ret) {
-		PMD_DRV_LOG(ERR, "Failed to get profile info list.");
-		rte_free(buff);
-		return -1;
-	}
-	p_list = (struct rte_pmd_i40e_profile_list *)buff;
-	pinfo = (struct rte_pmd_i40e_profile_info *)(profile_info_sec +
-			     sizeof(struct i40e_profile_section_header));
-	for (i = 0; i < p_list->p_count; i++) {
-		p = &p_list->p_info[i];
-		if ((pinfo->track_id == p->track_id) &&
-		    !memcmp(&pinfo->version, &p->version,
-			    sizeof(struct i40e_ddp_version)) &&
-		    !memcmp(&pinfo->name, &p->name,
-			    I40E_DDP_NAME_SIZE)) {
-			PMD_DRV_LOG(INFO, "Profile exists.");
-			rte_free(buff);
-			return 1;
-		}
-	}
-
-	rte_free(buff);
-	return 0;
-}
-
-int
-rte_pmd_i40e_process_ddp_package(uint8_t port, uint8_t *buff,
-				 uint32_t size,
-				 enum rte_pmd_i40e_package_op op)
-{
-	struct rte_eth_dev *dev;
-	struct i40e_hw *hw;
-	struct i40e_package_header *pkg_hdr;
-	struct i40e_generic_seg_header *profile_seg_hdr;
-	struct i40e_generic_seg_header *metadata_seg_hdr;
-	uint32_t track_id;
-	uint8_t *profile_info_sec;
-	int is_exist;
-	enum i40e_status_code status = I40E_SUCCESS;
-
-	RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
-
-	dev = &rte_eth_devices[port];
-
-	if (!is_device_supported(dev, &rte_i40e_pmd))
-		return -ENOTSUP;
-
-	hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-
-	if (size < (sizeof(struct i40e_package_header) +
-		    sizeof(struct i40e_metadata_segment) +
-		    sizeof(uint32_t) * 2)) {
-		PMD_DRV_LOG(ERR, "Buff is invalid.");
-		return -EINVAL;
-	}
-
-	pkg_hdr = (struct i40e_package_header *)buff;
-
-	if (!pkg_hdr) {
-		PMD_DRV_LOG(ERR, "Failed to fill the package structure");
-		return -EINVAL;
-	}
-
-	if (pkg_hdr->segment_count < 2) {
-		PMD_DRV_LOG(ERR, "Segment_count should be 2 at least.");
-		return -EINVAL;
-	}
-
-	/* Find metadata segment */
-	metadata_seg_hdr = i40e_find_segment_in_package(SEGMENT_TYPE_METADATA,
-							pkg_hdr);
-	if (!metadata_seg_hdr) {
-		PMD_DRV_LOG(ERR, "Failed to find metadata segment header");
-		return -EINVAL;
-	}
-	track_id = ((struct i40e_metadata_segment *)metadata_seg_hdr)->track_id;
-
-	/* Find profile segment */
-	profile_seg_hdr = i40e_find_segment_in_package(SEGMENT_TYPE_I40E,
-						       pkg_hdr);
-	if (!profile_seg_hdr) {
-		PMD_DRV_LOG(ERR, "Failed to find profile segment header");
-		return -EINVAL;
-	}
-
-	profile_info_sec = rte_zmalloc("i40e_profile_info",
-			       sizeof(struct i40e_profile_section_header) +
-			       sizeof(struct i40e_profile_info),
-			       0);
-	if (!profile_info_sec) {
-		PMD_DRV_LOG(ERR, "Failed to allocate memory");
-		return -EINVAL;
-	}
-
-	if (op == RTE_PMD_I40E_PKG_OP_WR_ADD) {
-		/* Check if the profile exists */
-		i40e_generate_profile_info_sec(
-		     ((struct i40e_profile_segment *)profile_seg_hdr)->name,
-		     &((struct i40e_profile_segment *)profile_seg_hdr)->version,
-		     track_id, profile_info_sec, 1);
-		is_exist = i40e_check_profile_info(port, profile_info_sec);
-		if (is_exist > 0) {
-			PMD_DRV_LOG(ERR, "Profile already exists.");
-			rte_free(profile_info_sec);
-			return 1;
-		} else if (is_exist < 0) {
-			PMD_DRV_LOG(ERR, "Failed to check profile.");
-			rte_free(profile_info_sec);
-			return -EINVAL;
-		}
-
-		/* Write profile to HW */
-		status = i40e_write_profile(hw,
-				 (struct i40e_profile_segment *)profile_seg_hdr,
-				 track_id);
-		if (status) {
-			PMD_DRV_LOG(ERR, "Failed to write profile.");
-			rte_free(profile_info_sec);
-			return status;
-		}
-
-		/* Add profile info to info list */
-		status = i40e_add_rm_profile_info(hw, profile_info_sec);
-		if (status)
-			PMD_DRV_LOG(ERR, "Failed to add profile info.");
-	} else
-		PMD_DRV_LOG(ERR, "Operation not supported.");
-
-	rte_free(profile_info_sec);
-	return status;
-}
-
-int
-rte_pmd_i40e_get_ddp_list(uint8_t port, uint8_t *buff, uint32_t size)
-{
-	struct rte_eth_dev *dev;
-	struct i40e_hw *hw;
-	enum i40e_status_code status = I40E_SUCCESS;
-
-	RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
-
-	dev = &rte_eth_devices[port];
-
-	if (!is_device_supported(dev, &rte_i40e_pmd))
-		return -ENOTSUP;
-
-	if (size < (I40E_PROFILE_INFO_SIZE * I40E_MAX_PROFILE_NUM + 4))
-		return -EINVAL;
-
-	hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-
-	status = i40e_aq_get_ddp_list(hw, (void *)buff,
-				      size, 0, NULL);
-
-	return status;
+	return is_device_supported(dev, &rte_i40e_pmd);
 }
 
 /* Create a QinQ cloud filter
diff --git a/drivers/net/i40e/i40e_ethdev.h b/drivers/net/i40e/i40e_ethdev.h
index 69c6684..c745e9b 100644
--- a/drivers/net/i40e/i40e_ethdev.h
+++ b/drivers/net/i40e/i40e_ethdev.h
@@ -869,6 +869,17 @@ int i40e_dev_consistent_tunnel_filter_set(struct i40e_pf *pf,
 				  struct i40e_tunnel_filter_conf *tunnel_filter,
 				  uint8_t add);
 int i40e_fdir_flush(struct rte_eth_dev *dev);
+int i40e_find_all_vlan_for_mac(struct i40e_vsi *vsi,
+			       struct i40e_macvlan_filter *mv_f,
+			       int num, struct ether_addr *addr);
+int i40e_remove_macvlan_filters(struct i40e_vsi *vsi,
+				struct i40e_macvlan_filter *filter,
+				int total);
+void i40e_set_vlan_filter(struct i40e_vsi *vsi, uint16_t vlan_id, bool on);
+int i40e_add_macvlan_filters(struct i40e_vsi *vsi,
+			     struct i40e_macvlan_filter *filter,
+			     int total);
+bool is_i40e_supported(struct rte_eth_dev *dev);
 
 #define I40E_DEV_TO_PCI(eth_dev) \
 	RTE_DEV_TO_PCI((eth_dev)->device)
diff --git a/drivers/net/i40e/rte_pmd_i40e.c b/drivers/net/i40e/rte_pmd_i40e.c
new file mode 100644
index 0000000..cee067b
--- /dev/null
+++ b/drivers/net/i40e/rte_pmd_i40e.c
@@ -0,0 +1,1707 @@
+/*-
+ *   BSD LICENSE
+ *
+ *   Copyright(c) 2010-2017 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ *
+ *   Redistribution and use in source and binary forms, with or without
+ *   modification, are permitted provided that the following conditions
+ *   are met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in
+ *       the documentation and/or other materials provided with the
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its
+ *       contributors may be used to endorse or promote products derived
+ *       from this software without specific prior written permission.
+ *
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <rte_malloc.h>
+#include <rte_tailq.h>
+
+#include "base/i40e_prototype.h"
+#include "i40e_ethdev.h"
+#include "i40e_pf.h"
+#include "rte_pmd_i40e.h"
+
+/* The max bandwidth of i40e is 40Gbps. */
+#define I40E_QOS_BW_MAX 40000
+/* The bandwidth should be the multiple of 50Mbps. */
+#define I40E_QOS_BW_GRANULARITY 50
+/* The min bandwidth weight is 1. */
+#define I40E_QOS_BW_WEIGHT_MIN 1
+/* The max bandwidth weight is 127. */
+#define I40E_QOS_BW_WEIGHT_MAX 127
+
+int
+rte_pmd_i40e_ping_vfs(uint8_t port, uint16_t vf)
+{
+	struct rte_eth_dev *dev;
+	struct i40e_pf *pf;
+
+	RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+	dev = &rte_eth_devices[port];
+
+	if (!is_i40e_supported(dev))
+		return -ENOTSUP;
+
+	pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+
+	if (vf >= pf->vf_num || !pf->vfs) {
+		PMD_DRV_LOG(ERR, "Invalid argument.");
+		return -EINVAL;
+	}
+
+	i40e_notify_vf_link_status(dev, &pf->vfs[vf]);
+
+	return 0;
+}
+
+int
+rte_pmd_i40e_set_vf_mac_anti_spoof(uint8_t port, uint16_t vf_id, uint8_t on)
+{
+	struct rte_eth_dev *dev;
+	struct i40e_pf *pf;
+	struct i40e_vsi *vsi;
+	struct i40e_hw *hw;
+	struct i40e_vsi_context ctxt;
+	int ret;
+
+	RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+	dev = &rte_eth_devices[port];
+
+	if (!is_i40e_supported(dev))
+		return -ENOTSUP;
+
+	pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+
+	if (vf_id >= pf->vf_num || !pf->vfs) {
+		PMD_DRV_LOG(ERR, "Invalid argument.");
+		return -EINVAL;
+	}
+
+	vsi = pf->vfs[vf_id].vsi;
+	if (!vsi) {
+		PMD_DRV_LOG(ERR, "Invalid VSI.");
+		return -EINVAL;
+	}
+
+	/* Check if it has been already on or off */
+	if (vsi->info.valid_sections &
+		rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SECURITY_VALID)) {
+		if (on) {
+			if ((vsi->info.sec_flags &
+			     I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK) ==
+			    I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK)
+				return 0; /* already on */
+		} else {
+			if ((vsi->info.sec_flags &
+			     I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK) == 0)
+				return 0; /* already off */
+		}
+	}
+
+	vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SECURITY_VALID);
+	if (on)
+		vsi->info.sec_flags |= I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK;
+	else
+		vsi->info.sec_flags &= ~I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK;
+
+	memset(&ctxt, 0, sizeof(ctxt));
+	(void)rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
+	ctxt.seid = vsi->seid;
+
+	hw = I40E_VSI_TO_HW(vsi);
+	ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
+	if (ret != I40E_SUCCESS) {
+		ret = -ENOTSUP;
+		PMD_DRV_LOG(ERR, "Failed to update VSI params");
+	}
+
+	return ret;
+}
+
+static int
+i40e_add_rm_all_vlan_filter(struct i40e_vsi *vsi, uint8_t add)
+{
+	uint32_t j, k;
+	uint16_t vlan_id;
+	struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
+	struct i40e_aqc_add_remove_vlan_element_data vlan_data = {0};
+	int ret;
+
+	for (j = 0; j < I40E_VFTA_SIZE; j++) {
+		if (!vsi->vfta[j])
+			continue;
+
+		for (k = 0; k < I40E_UINT32_BIT_SIZE; k++) {
+			if (!(vsi->vfta[j] & (1 << k)))
+				continue;
+
+			vlan_id = j * I40E_UINT32_BIT_SIZE + k;
+			if (!vlan_id)
+				continue;
+
+			vlan_data.vlan_tag = rte_cpu_to_le_16(vlan_id);
+			if (add)
+				ret = i40e_aq_add_vlan(hw, vsi->seid,
+						       &vlan_data, 1, NULL);
+			else
+				ret = i40e_aq_remove_vlan(hw, vsi->seid,
+							  &vlan_data, 1, NULL);
+			if (ret != I40E_SUCCESS) {
+				PMD_DRV_LOG(ERR,
+					    "Failed to add/rm vlan filter");
+				return ret;
+			}
+		}
+	}
+
+	return I40E_SUCCESS;
+}
+
+int
+rte_pmd_i40e_set_vf_vlan_anti_spoof(uint8_t port, uint16_t vf_id, uint8_t on)
+{
+	struct rte_eth_dev *dev;
+	struct i40e_pf *pf;
+	struct i40e_vsi *vsi;
+	struct i40e_hw *hw;
+	struct i40e_vsi_context ctxt;
+	int ret;
+
+	RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+	dev = &rte_eth_devices[port];
+
+	if (!is_i40e_supported(dev))
+		return -ENOTSUP;
+
+	pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+
+	if (vf_id >= pf->vf_num || !pf->vfs) {
+		PMD_DRV_LOG(ERR, "Invalid argument.");
+		return -EINVAL;
+	}
+
+	vsi = pf->vfs[vf_id].vsi;
+	if (!vsi) {
+		PMD_DRV_LOG(ERR, "Invalid VSI.");
+		return -EINVAL;
+	}
+
+	/* Check if it has been already on or off */
+	if (vsi->vlan_anti_spoof_on == on)
+		return 0; /* already on or off */
+
+	vsi->vlan_anti_spoof_on = on;
+	if (!vsi->vlan_filter_on) {
+		ret = i40e_add_rm_all_vlan_filter(vsi, on);
+		if (ret) {
+			PMD_DRV_LOG(ERR, "Failed to add/remove VLAN filters.");
+			return -ENOTSUP;
+		}
+	}
+
+	vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SECURITY_VALID);
+	if (on)
+		vsi->info.sec_flags |= I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK;
+	else
+		vsi->info.sec_flags &= ~I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK;
+
+	memset(&ctxt, 0, sizeof(ctxt));
+	(void)rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
+	ctxt.seid = vsi->seid;
+
+	hw = I40E_VSI_TO_HW(vsi);
+	ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
+	if (ret != I40E_SUCCESS) {
+		ret = -ENOTSUP;
+		PMD_DRV_LOG(ERR, "Failed to update VSI params");
+	}
+
+	return ret;
+}
+
+static int
+i40e_vsi_rm_mac_filter(struct i40e_vsi *vsi)
+{
+	struct i40e_mac_filter *f;
+	struct i40e_macvlan_filter *mv_f;
+	int i, vlan_num;
+	enum rte_mac_filter_type filter_type;
+	int ret = I40E_SUCCESS;
+	void *temp;
+
+	/* remove all the MACs */
+	TAILQ_FOREACH_SAFE(f, &vsi->mac_list, next, temp) {
+		vlan_num = vsi->vlan_num;
+		filter_type = f->mac_info.filter_type;
+		if (filter_type == RTE_MACVLAN_PERFECT_MATCH ||
+		    filter_type == RTE_MACVLAN_HASH_MATCH) {
+			if (vlan_num == 0) {
+				PMD_DRV_LOG(ERR, "VLAN number shouldn't be 0");
+				return I40E_ERR_PARAM;
+			}
+		} else if (filter_type == RTE_MAC_PERFECT_MATCH ||
+			   filter_type == RTE_MAC_HASH_MATCH)
+			vlan_num = 1;
+
+		mv_f = rte_zmalloc("macvlan_data", vlan_num * sizeof(*mv_f), 0);
+		if (!mv_f) {
+			PMD_DRV_LOG(ERR, "failed to allocate memory");
+			return I40E_ERR_NO_MEMORY;
+		}
+
+		for (i = 0; i < vlan_num; i++) {
+			mv_f[i].filter_type = filter_type;
+			(void)rte_memcpy(&mv_f[i].macaddr,
+					 &f->mac_info.mac_addr,
+					 ETH_ADDR_LEN);
+		}
+		if (filter_type == RTE_MACVLAN_PERFECT_MATCH ||
+		    filter_type == RTE_MACVLAN_HASH_MATCH) {
+			ret = i40e_find_all_vlan_for_mac(vsi, mv_f, vlan_num,
+							 &f->mac_info.mac_addr);
+			if (ret != I40E_SUCCESS) {
+				rte_free(mv_f);
+				return ret;
+			}
+		}
+
+		ret = i40e_remove_macvlan_filters(vsi, mv_f, vlan_num);
+		if (ret != I40E_SUCCESS) {
+			rte_free(mv_f);
+			return ret;
+		}
+
+		rte_free(mv_f);
+		ret = I40E_SUCCESS;
+	}
+
+	return ret;
+}
+
+static int
+i40e_vsi_restore_mac_filter(struct i40e_vsi *vsi)
+{
+	struct i40e_mac_filter *f;
+	struct i40e_macvlan_filter *mv_f;
+	int i, vlan_num = 0;
+	int ret = I40E_SUCCESS;
+	void *temp;
+
+	/* restore all the MACs */
+	TAILQ_FOREACH_SAFE(f, &vsi->mac_list, next, temp) {
+		if ((f->mac_info.filter_type == RTE_MACVLAN_PERFECT_MATCH) ||
+		    (f->mac_info.filter_type == RTE_MACVLAN_HASH_MATCH)) {
+			/**
+			 * If vlan_num is 0, that's the first time to add mac,
+			 * set mask for vlan_id 0.
+			 */
+			if (vsi->vlan_num == 0) {
+				i40e_set_vlan_filter(vsi, 0, 1);
+				vsi->vlan_num = 1;
+			}
+			vlan_num = vsi->vlan_num;
+		} else if ((f->mac_info.filter_type == RTE_MAC_PERFECT_MATCH) ||
+			   (f->mac_info.filter_type == RTE_MAC_HASH_MATCH))
+			vlan_num = 1;
+
+		mv_f = rte_zmalloc("macvlan_data", vlan_num * sizeof(*mv_f), 0);
+		if (!mv_f) {
+			PMD_DRV_LOG(ERR, "failed to allocate memory");
+			return I40E_ERR_NO_MEMORY;
+		}
+
+		for (i = 0; i < vlan_num; i++) {
+			mv_f[i].filter_type = f->mac_info.filter_type;
+			(void)rte_memcpy(&mv_f[i].macaddr,
+					 &f->mac_info.mac_addr,
+					 ETH_ADDR_LEN);
+		}
+
+		if (f->mac_info.filter_type == RTE_MACVLAN_PERFECT_MATCH ||
+		    f->mac_info.filter_type == RTE_MACVLAN_HASH_MATCH) {
+			ret = i40e_find_all_vlan_for_mac(vsi, mv_f, vlan_num,
+							 &f->mac_info.mac_addr);
+			if (ret != I40E_SUCCESS) {
+				rte_free(mv_f);
+				return ret;
+			}
+		}
+
+		ret = i40e_add_macvlan_filters(vsi, mv_f, vlan_num);
+		if (ret != I40E_SUCCESS) {
+			rte_free(mv_f);
+			return ret;
+		}
+
+		rte_free(mv_f);
+		ret = I40E_SUCCESS;
+	}
+
+	return ret;
+}
+
+static int
+i40e_vsi_set_tx_loopback(struct i40e_vsi *vsi, uint8_t on)
+{
+	struct i40e_vsi_context ctxt;
+	struct i40e_hw *hw;
+	int ret;
+
+	if (!vsi)
+		return -EINVAL;
+
+	hw = I40E_VSI_TO_HW(vsi);
+
+	/* Use the FW API if FW >= v5.0 */
+	if (hw->aq.fw_maj_ver < 5) {
+		PMD_INIT_LOG(ERR, "FW < v5.0, cannot enable loopback");
+		return -ENOTSUP;
+	}
+
+	/* Check if it has been already on or off */
+	if (vsi->info.valid_sections &
+		rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SWITCH_VALID)) {
+		if (on) {
+			if ((vsi->info.switch_id &
+			     I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB) ==
+			    I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB)
+				return 0; /* already on */
+		} else {
+			if ((vsi->info.switch_id &
+			     I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB) == 0)
+				return 0; /* already off */
+		}
+	}
+
+	/* remove all the MAC and VLAN first */
+	ret = i40e_vsi_rm_mac_filter(vsi);
+	if (ret) {
+		PMD_INIT_LOG(ERR, "Failed to remove MAC filters.");
+		return ret;
+	}
+	if (vsi->vlan_anti_spoof_on || vsi->vlan_filter_on) {
+		ret = i40e_add_rm_all_vlan_filter(vsi, 0);
+		if (ret) {
+			PMD_INIT_LOG(ERR, "Failed to remove VLAN filters.");
+			return ret;
+		}
+	}
+
+	vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
+	if (on)
+		vsi->info.switch_id |= I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB;
+	else
+		vsi->info.switch_id &= ~I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB;
+
+	memset(&ctxt, 0, sizeof(ctxt));
+	(void)rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
+	ctxt.seid = vsi->seid;
+
+	ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
+	if (ret != I40E_SUCCESS) {
+		PMD_DRV_LOG(ERR, "Failed to update VSI params");
+		return ret;
+	}
+
+	/* add all the MAC and VLAN back */
+	ret = i40e_vsi_restore_mac_filter(vsi);
+	if (ret)
+		return ret;
+	if (vsi->vlan_anti_spoof_on || vsi->vlan_filter_on) {
+		ret = i40e_add_rm_all_vlan_filter(vsi, 1);
+		if (ret)
+			return ret;
+	}
+
+	return ret;
+}
+
+int
+rte_pmd_i40e_set_tx_loopback(uint8_t port, uint8_t on)
+{
+	struct rte_eth_dev *dev;
+	struct i40e_pf *pf;
+	struct i40e_pf_vf *vf;
+	struct i40e_vsi *vsi;
+	uint16_t vf_id;
+	int ret;
+
+	RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+	dev = &rte_eth_devices[port];
+
+	if (!is_i40e_supported(dev))
+		return -ENOTSUP;
+
+	pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+
+	/* setup PF TX loopback */
+	vsi = pf->main_vsi;
+	ret = i40e_vsi_set_tx_loopback(vsi, on);
+	if (ret)
+		return -ENOTSUP;
+
+	/* setup TX loopback for all the VFs */
+	if (!pf->vfs) {
+		/* if no VF, do nothing. */
+		return 0;
+	}
+
+	for (vf_id = 0; vf_id < pf->vf_num; vf_id++) {
+		vf = &pf->vfs[vf_id];
+		vsi = vf->vsi;
+
+		ret = i40e_vsi_set_tx_loopback(vsi, on);
+		if (ret)
+			return -ENOTSUP;
+	}
+
+	return ret;
+}
+
+int
+rte_pmd_i40e_set_vf_unicast_promisc(uint8_t port, uint16_t vf_id, uint8_t on)
+{
+	struct rte_eth_dev *dev;
+	struct i40e_pf *pf;
+	struct i40e_vsi *vsi;
+	struct i40e_hw *hw;
+	int ret;
+
+	RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+	dev = &rte_eth_devices[port];
+
+	if (!is_i40e_supported(dev))
+		return -ENOTSUP;
+
+	pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+
+	if (vf_id >= pf->vf_num || !pf->vfs) {
+		PMD_DRV_LOG(ERR, "Invalid argument.");
+		return -EINVAL;
+	}
+
+	vsi = pf->vfs[vf_id].vsi;
+	if (!vsi) {
+		PMD_DRV_LOG(ERR, "Invalid VSI.");
+		return -EINVAL;
+	}
+
+	hw = I40E_VSI_TO_HW(vsi);
+
+	ret = i40e_aq_set_vsi_unicast_promiscuous(hw, vsi->seid,
+						  on, NULL, true);
+	if (ret != I40E_SUCCESS) {
+		ret = -ENOTSUP;
+		PMD_DRV_LOG(ERR, "Failed to set unicast promiscuous mode");
+	}
+
+	return ret;
+}
+
+int
+rte_pmd_i40e_set_vf_multicast_promisc(uint8_t port, uint16_t vf_id, uint8_t on)
+{
+	struct rte_eth_dev *dev;
+	struct i40e_pf *pf;
+	struct i40e_vsi *vsi;
+	struct i40e_hw *hw;
+	int ret;
+
+	RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+	dev = &rte_eth_devices[port];
+
+	if (!is_i40e_supported(dev))
+		return -ENOTSUP;
+
+	pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+
+	if (vf_id >= pf->vf_num || !pf->vfs) {
+		PMD_DRV_LOG(ERR, "Invalid argument.");
+		return -EINVAL;
+	}
+
+	vsi = pf->vfs[vf_id].vsi;
+	if (!vsi) {
+		PMD_DRV_LOG(ERR, "Invalid VSI.");
+		return -EINVAL;
+	}
+
+	hw = I40E_VSI_TO_HW(vsi);
+
+	ret = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid,
+						    on, NULL);
+	if (ret != I40E_SUCCESS) {
+		ret = -ENOTSUP;
+		PMD_DRV_LOG(ERR, "Failed to set multicast promiscuous mode");
+	}
+
+	return ret;
+}
+
+int
+rte_pmd_i40e_set_vf_mac_addr(uint8_t port, uint16_t vf_id,
+			     struct ether_addr *mac_addr)
+{
+	struct i40e_mac_filter *f;
+	struct rte_eth_dev *dev;
+	struct i40e_pf_vf *vf;
+	struct i40e_vsi *vsi;
+	struct i40e_pf *pf;
+	void *temp;
+
+	if (i40e_validate_mac_addr((u8 *)mac_addr) != I40E_SUCCESS)
+		return -EINVAL;
+
+	RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+	dev = &rte_eth_devices[port];
+
+	if (!is_i40e_supported(dev))
+		return -ENOTSUP;
+
+	pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+
+	if (vf_id >= pf->vf_num || !pf->vfs)
+		return -EINVAL;
+
+	vf = &pf->vfs[vf_id];
+	vsi = vf->vsi;
+	if (!vsi) {
+		PMD_DRV_LOG(ERR, "Invalid VSI.");
+		return -EINVAL;
+	}
+
+	ether_addr_copy(mac_addr, &vf->mac_addr);
+
+	/* Remove all existing mac */
+	TAILQ_FOREACH_SAFE(f, &vsi->mac_list, next, temp)
+		i40e_vsi_delete_mac(vsi, &f->mac_info.mac_addr);
+
+	return 0;
+}
+
+/* Set vlan strip on/off for specific VF from host */
+int
+rte_pmd_i40e_set_vf_vlan_stripq(uint8_t port, uint16_t vf_id, uint8_t on)
+{
+	struct rte_eth_dev *dev;
+	struct i40e_pf *pf;
+	struct i40e_vsi *vsi;
+	int ret;
+
+	RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+	dev = &rte_eth_devices[port];
+
+	if (!is_i40e_supported(dev))
+		return -ENOTSUP;
+
+	pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+
+	if (vf_id >= pf->vf_num || !pf->vfs) {
+		PMD_DRV_LOG(ERR, "Invalid argument.");
+		return -EINVAL;
+	}
+
+	vsi = pf->vfs[vf_id].vsi;
+
+	if (!vsi)
+		return -EINVAL;
+
+	ret = i40e_vsi_config_vlan_stripping(vsi, !!on);
+	if (ret != I40E_SUCCESS) {
+		ret = -ENOTSUP;
+		PMD_DRV_LOG(ERR, "Failed to set VLAN stripping!");
+	}
+
+	return ret;
+}
+
+int rte_pmd_i40e_set_vf_vlan_insert(uint8_t port, uint16_t vf_id,
+				    uint16_t vlan_id)
+{
+	struct rte_eth_dev *dev;
+	struct i40e_pf *pf;
+	struct i40e_hw *hw;
+	struct i40e_vsi *vsi;
+	struct i40e_vsi_context ctxt;
+	int ret;
+
+	RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+	if (vlan_id > ETHER_MAX_VLAN_ID) {
+		PMD_DRV_LOG(ERR, "Invalid VLAN ID.");
+		return -EINVAL;
+	}
+
+	dev = &rte_eth_devices[port];
+
+	if (!is_i40e_supported(dev))
+		return -ENOTSUP;
+
+	pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+	hw = I40E_PF_TO_HW(pf);
+
+	/**
+	 * return -ENODEV if SRIOV not enabled, VF number not configured
+	 * or no queue assigned.
+	 */
+	if (!hw->func_caps.sr_iov_1_1 || pf->vf_num == 0 ||
+	    pf->vf_nb_qps == 0)
+		return -ENODEV;
+
+	if (vf_id >= pf->vf_num || !pf->vfs) {
+		PMD_DRV_LOG(ERR, "Invalid VF ID.");
+		return -EINVAL;
+	}
+
+	vsi = pf->vfs[vf_id].vsi;
+	if (!vsi) {
+		PMD_DRV_LOG(ERR, "Invalid VSI.");
+		return -EINVAL;
+	}
+
+	vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
+	vsi->info.pvid = vlan_id;
+	if (vlan_id > 0)
+		vsi->info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_INSERT_PVID;
+	else
+		vsi->info.port_vlan_flags &= ~I40E_AQ_VSI_PVLAN_INSERT_PVID;
+
+	memset(&ctxt, 0, sizeof(ctxt));
+	(void)rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
+	ctxt.seid = vsi->seid;
+
+	hw = I40E_VSI_TO_HW(vsi);
+	ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
+	if (ret != I40E_SUCCESS) {
+		ret = -ENOTSUP;
+		PMD_DRV_LOG(ERR, "Failed to update VSI params");
+	}
+
+	return ret;
+}
+
+int rte_pmd_i40e_set_vf_broadcast(uint8_t port, uint16_t vf_id,
+				  uint8_t on)
+{
+	struct rte_eth_dev *dev;
+	struct i40e_pf *pf;
+	struct i40e_vsi *vsi;
+	struct i40e_hw *hw;
+	struct i40e_mac_filter_info filter;
+	struct ether_addr broadcast = {
+		.addr_bytes = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff} };
+	int ret;
+
+	RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+	if (on > 1) {
+		PMD_DRV_LOG(ERR, "on should be 0 or 1.");
+		return -EINVAL;
+	}
+
+	dev = &rte_eth_devices[port];
+
+	if (!is_i40e_supported(dev))
+		return -ENOTSUP;
+
+	pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+	hw = I40E_PF_TO_HW(pf);
+
+	if (vf_id >= pf->vf_num || !pf->vfs) {
+		PMD_DRV_LOG(ERR, "Invalid VF ID.");
+		return -EINVAL;
+	}
+
+	/**
+	 * return -ENODEV if SRIOV not enabled, VF number not configured
+	 * or no queue assigned.
+	 */
+	if (!hw->func_caps.sr_iov_1_1 || pf->vf_num == 0 ||
+	    pf->vf_nb_qps == 0) {
+		PMD_DRV_LOG(ERR, "SRIOV is not enabled or no queue.");
+		return -ENODEV;
+	}
+
+	vsi = pf->vfs[vf_id].vsi;
+	if (!vsi) {
+		PMD_DRV_LOG(ERR, "Invalid VSI.");
+		return -EINVAL;
+	}
+
+	if (on) {
+		(void)rte_memcpy(&filter.mac_addr, &broadcast, ETHER_ADDR_LEN);
+		filter.filter_type = RTE_MACVLAN_PERFECT_MATCH;
+		ret = i40e_vsi_add_mac(vsi, &filter);
+	} else {
+		ret = i40e_vsi_delete_mac(vsi, &broadcast);
+	}
+
+	if (ret != I40E_SUCCESS && ret != I40E_ERR_PARAM) {
+		ret = -ENOTSUP;
+		PMD_DRV_LOG(ERR, "Failed to set VSI broadcast");
+	} else {
+		ret = 0;
+	}
+
+	return ret;
+}
+
+int rte_pmd_i40e_set_vf_vlan_tag(uint8_t port, uint16_t vf_id, uint8_t on)
+{
+	struct rte_eth_dev *dev;
+	struct i40e_pf *pf;
+	struct i40e_hw *hw;
+	struct i40e_vsi *vsi;
+	struct i40e_vsi_context ctxt;
+	int ret;
+
+	RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+	if (on > 1) {
+		PMD_DRV_LOG(ERR, "on should be 0 or 1.");
+		return -EINVAL;
+	}
+
+	dev = &rte_eth_devices[port];
+
+	if (!is_i40e_supported(dev))
+		return -ENOTSUP;
+
+	pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+	hw = I40E_PF_TO_HW(pf);
+
+	/**
+	 * return -ENODEV if SRIOV not enabled, VF number not configured
+	 * or no queue assigned.
+	 */
+	if (!hw->func_caps.sr_iov_1_1 || pf->vf_num == 0 ||
+	    pf->vf_nb_qps == 0) {
+		PMD_DRV_LOG(ERR, "SRIOV is not enabled or no queue.");
+		return -ENODEV;
+	}
+
+	if (vf_id >= pf->vf_num || !pf->vfs) {
+		PMD_DRV_LOG(ERR, "Invalid VF ID.");
+		return -EINVAL;
+	}
+
+	vsi = pf->vfs[vf_id].vsi;
+	if (!vsi) {
+		PMD_DRV_LOG(ERR, "Invalid VSI.");
+		return -EINVAL;
+	}
+
+	vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
+	if (on) {
+		vsi->info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_TAGGED;
+		vsi->info.port_vlan_flags &= ~I40E_AQ_VSI_PVLAN_MODE_UNTAGGED;
+	} else {
+		vsi->info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_UNTAGGED;
+		vsi->info.port_vlan_flags &= ~I40E_AQ_VSI_PVLAN_MODE_TAGGED;
+	}
+
+	memset(&ctxt, 0, sizeof(ctxt));
+	(void)rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
+	ctxt.seid = vsi->seid;
+
+	hw = I40E_VSI_TO_HW(vsi);
+	ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
+	if (ret != I40E_SUCCESS) {
+		ret = -ENOTSUP;
+		PMD_DRV_LOG(ERR, "Failed to update VSI params");
+	}
+
+	return ret;
+}
+
+static int
+i40e_vlan_filter_count(struct i40e_vsi *vsi)
+{
+	uint32_t j, k;
+	uint16_t vlan_id;
+	int count = 0;
+
+	for (j = 0; j < I40E_VFTA_SIZE; j++) {
+		if (!vsi->vfta[j])
+			continue;
+
+		for (k = 0; k < I40E_UINT32_BIT_SIZE; k++) {
+			if (!(vsi->vfta[j] & (1 << k)))
+				continue;
+
+			vlan_id = j * I40E_UINT32_BIT_SIZE + k;
+			if (!vlan_id)
+				continue;
+
+			count++;
+		}
+	}
+
+	return count;
+}
+
+int rte_pmd_i40e_set_vf_vlan_filter(uint8_t port, uint16_t vlan_id,
+				    uint64_t vf_mask, uint8_t on)
+{
+	struct rte_eth_dev *dev;
+	struct i40e_pf *pf;
+	struct i40e_hw *hw;
+	struct i40e_vsi *vsi;
+	uint16_t vf_idx;
+	int ret = I40E_SUCCESS;
+
+	RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+	dev = &rte_eth_devices[port];
+
+	if (!is_i40e_supported(dev))
+		return -ENOTSUP;
+
+	if (vlan_id > ETHER_MAX_VLAN_ID || !vlan_id) {
+		PMD_DRV_LOG(ERR, "Invalid VLAN ID.");
+		return -EINVAL;
+	}
+
+	if (vf_mask == 0) {
+		PMD_DRV_LOG(ERR, "No VF.");
+		return -EINVAL;
+	}
+
+	if (on > 1) {
+		PMD_DRV_LOG(ERR, "on is should be 0 or 1.");
+		return -EINVAL;
+	}
+
+	pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+	hw = I40E_PF_TO_HW(pf);
+
+	/**
+	 * return -ENODEV if SRIOV not enabled, VF number not configured
+	 * or no queue assigned.
+	 */
+	if (!hw->func_caps.sr_iov_1_1 || pf->vf_num == 0 ||
+	    pf->vf_nb_qps == 0) {
+		PMD_DRV_LOG(ERR, "SRIOV is not enabled or no queue.");
+		return -ENODEV;
+	}
+
+	for (vf_idx = 0; vf_idx < pf->vf_num && ret == I40E_SUCCESS; vf_idx++) {
+		if (vf_mask & ((uint64_t)(1ULL << vf_idx))) {
+			vsi = pf->vfs[vf_idx].vsi;
+			if (on) {
+				if (!vsi->vlan_filter_on) {
+					vsi->vlan_filter_on = true;
+					i40e_aq_set_vsi_vlan_promisc(hw,
+								     vsi->seid,
+								     false,
+								     NULL);
+					if (!vsi->vlan_anti_spoof_on)
+						i40e_add_rm_all_vlan_filter(
+							vsi, true);
+				}
+				ret = i40e_vsi_add_vlan(vsi, vlan_id);
+			} else {
+				ret = i40e_vsi_delete_vlan(vsi, vlan_id);
+
+				if (!i40e_vlan_filter_count(vsi)) {
+					vsi->vlan_filter_on = false;
+					i40e_aq_set_vsi_vlan_promisc(hw,
+								     vsi->seid,
+								     true,
+								     NULL);
+				}
+			}
+		}
+	}
+
+	if (ret != I40E_SUCCESS) {
+		ret = -ENOTSUP;
+		PMD_DRV_LOG(ERR, "Failed to set VF VLAN filter, on = %d", on);
+	}
+
+	return ret;
+}
+
+int
+rte_pmd_i40e_get_vf_stats(uint8_t port,
+			  uint16_t vf_id,
+			  struct rte_eth_stats *stats)
+{
+	struct rte_eth_dev *dev;
+	struct i40e_pf *pf;
+	struct i40e_vsi *vsi;
+
+	RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+	dev = &rte_eth_devices[port];
+
+	if (!is_i40e_supported(dev))
+		return -ENOTSUP;
+
+	pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+
+	if (vf_id >= pf->vf_num || !pf->vfs) {
+		PMD_DRV_LOG(ERR, "Invalid VF ID.");
+		return -EINVAL;
+	}
+
+	vsi = pf->vfs[vf_id].vsi;
+	if (!vsi) {
+		PMD_DRV_LOG(ERR, "Invalid VSI.");
+		return -EINVAL;
+	}
+
+	i40e_update_vsi_stats(vsi);
+
+	stats->ipackets = vsi->eth_stats.rx_unicast +
+			vsi->eth_stats.rx_multicast +
+			vsi->eth_stats.rx_broadcast;
+	stats->opackets = vsi->eth_stats.tx_unicast +
+			vsi->eth_stats.tx_multicast +
+			vsi->eth_stats.tx_broadcast;
+	stats->ibytes   = vsi->eth_stats.rx_bytes;
+	stats->obytes   = vsi->eth_stats.tx_bytes;
+	stats->ierrors  = vsi->eth_stats.rx_discards;
+	stats->oerrors  = vsi->eth_stats.tx_errors + vsi->eth_stats.tx_discards;
+
+	return 0;
+}
+
+int
+rte_pmd_i40e_reset_vf_stats(uint8_t port,
+			    uint16_t vf_id)
+{
+	struct rte_eth_dev *dev;
+	struct i40e_pf *pf;
+	struct i40e_vsi *vsi;
+
+	RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+	dev = &rte_eth_devices[port];
+
+	if (!is_i40e_supported(dev))
+		return -ENOTSUP;
+
+	pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+
+	if (vf_id >= pf->vf_num || !pf->vfs) {
+		PMD_DRV_LOG(ERR, "Invalid VF ID.");
+		return -EINVAL;
+	}
+
+	vsi = pf->vfs[vf_id].vsi;
+	if (!vsi) {
+		PMD_DRV_LOG(ERR, "Invalid VSI.");
+		return -EINVAL;
+	}
+
+	vsi->offset_loaded = false;
+	i40e_update_vsi_stats(vsi);
+
+	return 0;
+}
+
+int
+rte_pmd_i40e_set_vf_max_bw(uint8_t port, uint16_t vf_id, uint32_t bw)
+{
+	struct rte_eth_dev *dev;
+	struct i40e_pf *pf;
+	struct i40e_vsi *vsi;
+	struct i40e_hw *hw;
+	int ret = 0;
+	int i;
+
+	RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+	dev = &rte_eth_devices[port];
+
+	if (!is_i40e_supported(dev))
+		return -ENOTSUP;
+
+	pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+
+	if (vf_id >= pf->vf_num || !pf->vfs) {
+		PMD_DRV_LOG(ERR, "Invalid VF ID.");
+		return -EINVAL;
+	}
+
+	vsi = pf->vfs[vf_id].vsi;
+	if (!vsi) {
+		PMD_DRV_LOG(ERR, "Invalid VSI.");
+		return -EINVAL;
+	}
+
+	if (bw > I40E_QOS_BW_MAX) {
+		PMD_DRV_LOG(ERR, "Bandwidth should not be larger than %dMbps.",
+			    I40E_QOS_BW_MAX);
+		return -EINVAL;
+	}
+
+	if (bw % I40E_QOS_BW_GRANULARITY) {
+		PMD_DRV_LOG(ERR, "Bandwidth should be the multiple of %dMbps.",
+			    I40E_QOS_BW_GRANULARITY);
+		return -EINVAL;
+	}
+
+	bw /= I40E_QOS_BW_GRANULARITY;
+
+	hw = I40E_VSI_TO_HW(vsi);
+
+	/* No change. */
+	if (bw == vsi->bw_info.bw_limit) {
+		PMD_DRV_LOG(INFO,
+			    "No change for VF max bandwidth. Nothing to do.");
+		return 0;
+	}
+
+	/**
+	 * VF bandwidth limitation and TC bandwidth limitation cannot be
+	 * enabled in parallel, quit if TC bandwidth limitation is enabled.
+	 *
+	 * If bw is 0, means disable bandwidth limitation. Then no need to
+	 * check TC bandwidth limitation.
+	 */
+	if (bw) {
+		for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
+			if ((vsi->enabled_tc & BIT_ULL(i)) &&
+			    vsi->bw_info.bw_ets_credits[i])
+				break;
+		}
+		if (i != I40E_MAX_TRAFFIC_CLASS) {
+			PMD_DRV_LOG(ERR,
+				    "TC max bandwidth has been set on this VF,"
+				    " please disable it first.");
+			return -EINVAL;
+		}
+	}
+
+	ret = i40e_aq_config_vsi_bw_limit(hw, vsi->seid, (uint16_t)bw, 0, NULL);
+	if (ret) {
+		PMD_DRV_LOG(ERR,
+			    "Failed to set VF %d bandwidth, err(%d).",
+			    vf_id, ret);
+		return -EINVAL;
+	}
+
+	/* Store the configuration. */
+	vsi->bw_info.bw_limit = (uint16_t)bw;
+	vsi->bw_info.bw_max = 0;
+
+	return 0;
+}
+
+int
+rte_pmd_i40e_set_vf_tc_bw_alloc(uint8_t port, uint16_t vf_id,
+				uint8_t tc_num, uint8_t *bw_weight)
+{
+	struct rte_eth_dev *dev;
+	struct i40e_pf *pf;
+	struct i40e_vsi *vsi;
+	struct i40e_hw *hw;
+	struct i40e_aqc_configure_vsi_tc_bw_data tc_bw;
+	int ret = 0;
+	int i, j;
+	uint16_t sum;
+	bool b_change = false;
+
+	RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+	dev = &rte_eth_devices[port];
+
+	if (!is_i40e_supported(dev))
+		return -ENOTSUP;
+
+	pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+
+	if (vf_id >= pf->vf_num || !pf->vfs) {
+		PMD_DRV_LOG(ERR, "Invalid VF ID.");
+		return -EINVAL;
+	}
+
+	vsi = pf->vfs[vf_id].vsi;
+	if (!vsi) {
+		PMD_DRV_LOG(ERR, "Invalid VSI.");
+		return -EINVAL;
+	}
+
+	if (tc_num > I40E_MAX_TRAFFIC_CLASS) {
+		PMD_DRV_LOG(ERR, "TCs should be no more than %d.",
+			    I40E_MAX_TRAFFIC_CLASS);
+		return -EINVAL;
+	}
+
+	sum = 0;
+	for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
+		if (vsi->enabled_tc & BIT_ULL(i))
+			sum++;
+	}
+	if (sum != tc_num) {
+		PMD_DRV_LOG(ERR,
+			    "Weight should be set for all %d enabled TCs.",
+			    sum);
+		return -EINVAL;
+	}
+
+	sum = 0;
+	for (i = 0; i < tc_num; i++) {
+		if (!bw_weight[i]) {
+			PMD_DRV_LOG(ERR,
+				    "The weight should be 1 at least.");
+			return -EINVAL;
+		}
+		sum += bw_weight[i];
+	}
+	if (sum != 100) {
+		PMD_DRV_LOG(ERR,
+			    "The summary of the TC weight should be 100.");
+		return -EINVAL;
+	}
+
+	/**
+	 * Create the configuration for all the TCs.
+	 */
+	memset(&tc_bw, 0, sizeof(tc_bw));
+	tc_bw.tc_valid_bits = vsi->enabled_tc;
+	j = 0;
+	for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
+		if (vsi->enabled_tc & BIT_ULL(i)) {
+			if (bw_weight[j] !=
+				vsi->bw_info.bw_ets_share_credits[i])
+				b_change = true;
+
+			tc_bw.tc_bw_credits[i] = bw_weight[j];
+			j++;
+		}
+	}
+
+	/* No change. */
+	if (!b_change) {
+		PMD_DRV_LOG(INFO,
+			    "No change for TC allocated bandwidth."
+			    " Nothing to do.");
+		return 0;
+	}
+
+	hw = I40E_VSI_TO_HW(vsi);
+
+	ret = i40e_aq_config_vsi_tc_bw(hw, vsi->seid, &tc_bw, NULL);
+	if (ret) {
+		PMD_DRV_LOG(ERR,
+			    "Failed to set VF %d TC bandwidth weight, err(%d).",
+			    vf_id, ret);
+		return -EINVAL;
+	}
+
+	/* Store the configuration. */
+	j = 0;
+	for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
+		if (vsi->enabled_tc & BIT_ULL(i)) {
+			vsi->bw_info.bw_ets_share_credits[i] = bw_weight[j];
+			j++;
+		}
+	}
+
+	return 0;
+}
+
+int
+rte_pmd_i40e_set_vf_tc_max_bw(uint8_t port, uint16_t vf_id,
+			      uint8_t tc_no, uint32_t bw)
+{
+	struct rte_eth_dev *dev;
+	struct i40e_pf *pf;
+	struct i40e_vsi *vsi;
+	struct i40e_hw *hw;
+	struct i40e_aqc_configure_vsi_ets_sla_bw_data tc_bw;
+	int ret = 0;
+	int i;
+
+	RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+	dev = &rte_eth_devices[port];
+
+	if (!is_i40e_supported(dev))
+		return -ENOTSUP;
+
+	pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+
+	if (vf_id >= pf->vf_num || !pf->vfs) {
+		PMD_DRV_LOG(ERR, "Invalid VF ID.");
+		return -EINVAL;
+	}
+
+	vsi = pf->vfs[vf_id].vsi;
+	if (!vsi) {
+		PMD_DRV_LOG(ERR, "Invalid VSI.");
+		return -EINVAL;
+	}
+
+	if (bw > I40E_QOS_BW_MAX) {
+		PMD_DRV_LOG(ERR, "Bandwidth should not be larger than %dMbps.",
+			    I40E_QOS_BW_MAX);
+		return -EINVAL;
+	}
+
+	if (bw % I40E_QOS_BW_GRANULARITY) {
+		PMD_DRV_LOG(ERR, "Bandwidth should be the multiple of %dMbps.",
+			    I40E_QOS_BW_GRANULARITY);
+		return -EINVAL;
+	}
+
+	bw /= I40E_QOS_BW_GRANULARITY;
+
+	if (tc_no >= I40E_MAX_TRAFFIC_CLASS) {
+		PMD_DRV_LOG(ERR, "TC No. should be less than %d.",
+			    I40E_MAX_TRAFFIC_CLASS);
+		return -EINVAL;
+	}
+
+	hw = I40E_VSI_TO_HW(vsi);
+
+	if (!(vsi->enabled_tc & BIT_ULL(tc_no))) {
+		PMD_DRV_LOG(ERR, "VF %d TC %d isn't enabled.",
+			    vf_id, tc_no);
+		return -EINVAL;
+	}
+
+	/* No change. */
+	if (bw == vsi->bw_info.bw_ets_credits[tc_no]) {
+		PMD_DRV_LOG(INFO,
+			    "No change for TC max bandwidth. Nothing to do.");
+		return 0;
+	}
+
+	/**
+	 * VF bandwidth limitation and TC bandwidth limitation cannot be
+	 * enabled in parallel, disable VF bandwidth limitation if it's
+	 * enabled.
+	 * If bw is 0, means disable bandwidth limitation. Then no need to
+	 * care about VF bandwidth limitation configuration.
+	 */
+	if (bw && vsi->bw_info.bw_limit) {
+		ret = i40e_aq_config_vsi_bw_limit(hw, vsi->seid, 0, 0, NULL);
+		if (ret) {
+			PMD_DRV_LOG(ERR,
+				    "Failed to disable VF(%d)"
+				    " bandwidth limitation, err(%d).",
+				    vf_id, ret);
+			return -EINVAL;
+		}
+
+		PMD_DRV_LOG(INFO,
+			    "VF max bandwidth is disabled according"
+			    " to TC max bandwidth setting.");
+	}
+
+	/**
+	 * Get all the TCs' info to create a whole picture.
+	 * Because the incremental change isn't permitted.
+	 */
+	memset(&tc_bw, 0, sizeof(tc_bw));
+	tc_bw.tc_valid_bits = vsi->enabled_tc;
+	for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
+		if (vsi->enabled_tc & BIT_ULL(i)) {
+			tc_bw.tc_bw_credits[i] =
+				rte_cpu_to_le_16(
+					vsi->bw_info.bw_ets_credits[i]);
+		}
+	}
+	tc_bw.tc_bw_credits[tc_no] = rte_cpu_to_le_16((uint16_t)bw);
+
+	ret = i40e_aq_config_vsi_ets_sla_bw_limit(hw, vsi->seid, &tc_bw, NULL);
+	if (ret) {
+		PMD_DRV_LOG(ERR,
+			    "Failed to set VF %d TC %d max bandwidth, err(%d).",
+			    vf_id, tc_no, ret);
+		return -EINVAL;
+	}
+
+	/* Store the configuration. */
+	vsi->bw_info.bw_ets_credits[tc_no] = (uint16_t)bw;
+
+	return 0;
+}
+
+int
+rte_pmd_i40e_set_tc_strict_prio(uint8_t port, uint8_t tc_map)
+{
+	struct rte_eth_dev *dev;
+	struct i40e_pf *pf;
+	struct i40e_vsi *vsi;
+	struct i40e_veb *veb;
+	struct i40e_hw *hw;
+	struct i40e_aqc_configure_switching_comp_ets_data ets_data;
+	int i;
+	int ret;
+
+	RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+	dev = &rte_eth_devices[port];
+
+	if (!is_i40e_supported(dev))
+		return -ENOTSUP;
+
+	pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+
+	vsi = pf->main_vsi;
+	if (!vsi) {
+		PMD_DRV_LOG(ERR, "Invalid VSI.");
+		return -EINVAL;
+	}
+
+	veb = vsi->veb;
+	if (!veb) {
+		PMD_DRV_LOG(ERR, "Invalid VEB.");
+		return -EINVAL;
+	}
+
+	if ((tc_map & veb->enabled_tc) != tc_map) {
+		PMD_DRV_LOG(ERR,
+			    "TC bitmap isn't the subset of enabled TCs 0x%x.",
+			    veb->enabled_tc);
+		return -EINVAL;
+	}
+
+	if (tc_map == veb->strict_prio_tc) {
+		PMD_DRV_LOG(INFO, "No change for TC bitmap. Nothing to do.");
+		return 0;
+	}
+
+	hw = I40E_VSI_TO_HW(vsi);
+
+	/* Disable DCBx if it's the first time to set strict priority. */
+	if (!veb->strict_prio_tc) {
+		ret = i40e_aq_stop_lldp(hw, true, NULL);
+		if (ret)
+			PMD_DRV_LOG(INFO,
+				    "Failed to disable DCBx as it's already"
+				    " disabled.");
+		else
+			PMD_DRV_LOG(INFO,
+				    "DCBx is disabled according to strict"
+				    " priority setting.");
+	}
+
+	memset(&ets_data, 0, sizeof(ets_data));
+	ets_data.tc_valid_bits = veb->enabled_tc;
+	ets_data.seepage = I40E_AQ_ETS_SEEPAGE_EN_MASK;
+	ets_data.tc_strict_priority_flags = tc_map;
+	/* Get all TCs' bandwidth. */
+	for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
+		if (veb->enabled_tc & BIT_ULL(i)) {
+			/* For rubust, if bandwidth is 0, use 1 instead. */
+			if (veb->bw_info.bw_ets_share_credits[i])
+				ets_data.tc_bw_share_credits[i] =
+					veb->bw_info.bw_ets_share_credits[i];
+			else
+				ets_data.tc_bw_share_credits[i] =
+					I40E_QOS_BW_WEIGHT_MIN;
+		}
+	}
+
+	if (!veb->strict_prio_tc)
+		ret = i40e_aq_config_switch_comp_ets(
+			hw, veb->uplink_seid,
+			&ets_data, i40e_aqc_opc_enable_switching_comp_ets,
+			NULL);
+	else if (tc_map)
+		ret = i40e_aq_config_switch_comp_ets(
+			hw, veb->uplink_seid,
+			&ets_data, i40e_aqc_opc_modify_switching_comp_ets,
+			NULL);
+	else
+		ret = i40e_aq_config_switch_comp_ets(
+			hw, veb->uplink_seid,
+			&ets_data, i40e_aqc_opc_disable_switching_comp_ets,
+			NULL);
+
+	if (ret) {
+		PMD_DRV_LOG(ERR,
+			    "Failed to set TCs' strict priority mode."
+			    " err (%d)", ret);
+		return -EINVAL;
+	}
+
+	veb->strict_prio_tc = tc_map;
+
+	/* Enable DCBx again, if all the TCs' strict priority disabled. */
+	if (!tc_map) {
+		ret = i40e_aq_start_lldp(hw, NULL);
+		if (ret) {
+			PMD_DRV_LOG(ERR,
+				    "Failed to enable DCBx, err(%d).", ret);
+			return -EINVAL;
+		}
+
+		PMD_DRV_LOG(INFO,
+			    "DCBx is enabled again according to strict"
+			    " priority setting.");
+	}
+
+	return ret;
+}
+
+#define I40E_PROFILE_INFO_SIZE 48
+#define I40E_MAX_PROFILE_NUM 16
+
+static void
+i40e_generate_profile_info_sec(char *name, struct i40e_ddp_version *version,
+			       uint32_t track_id, uint8_t *profile_info_sec,
+			       bool add)
+{
+	struct i40e_profile_section_header *sec = NULL;
+	struct i40e_profile_info *pinfo;
+
+	sec = (struct i40e_profile_section_header *)profile_info_sec;
+	sec->tbl_size = 1;
+	sec->data_end = sizeof(struct i40e_profile_section_header) +
+		sizeof(struct i40e_profile_info);
+	sec->section.type = SECTION_TYPE_INFO;
+	sec->section.offset = sizeof(struct i40e_profile_section_header);
+	sec->section.size = sizeof(struct i40e_profile_info);
+	pinfo = (struct i40e_profile_info *)(profile_info_sec +
+					     sec->section.offset);
+	pinfo->track_id = track_id;
+	memcpy(pinfo->name, name, I40E_DDP_NAME_SIZE);
+	memcpy(&pinfo->version, version, sizeof(struct i40e_ddp_version));
+	if (add)
+		pinfo->op = I40E_DDP_ADD_TRACKID;
+	else
+		pinfo->op = I40E_DDP_REMOVE_TRACKID;
+}
+
+static enum i40e_status_code
+i40e_add_rm_profile_info(struct i40e_hw *hw, uint8_t *profile_info_sec)
+{
+	enum i40e_status_code status = I40E_SUCCESS;
+	struct i40e_profile_section_header *sec;
+	uint32_t track_id;
+	uint32_t offset = 0;
+	uint32_t info = 0;
+
+	sec = (struct i40e_profile_section_header *)profile_info_sec;
+	track_id = ((struct i40e_profile_info *)(profile_info_sec +
+					 sec->section.offset))->track_id;
+
+	status = i40e_aq_write_ddp(hw, (void *)sec, sec->data_end,
+				   track_id, &offset, &info, NULL);
+	if (status)
+		PMD_DRV_LOG(ERR, "Failed to add/remove profile info: "
+			    "offset %d, info %d",
+			    offset, info);
+
+	return status;
+}
+
+#define I40E_PROFILE_INFO_SIZE 48
+#define I40E_MAX_PROFILE_NUM 16
+
+/* Check if the profile info exists */
+static int
+i40e_check_profile_info(uint8_t port, uint8_t *profile_info_sec)
+{
+	struct rte_eth_dev *dev = &rte_eth_devices[port];
+	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+	uint8_t *buff;
+	struct rte_pmd_i40e_profile_list *p_list;
+	struct rte_pmd_i40e_profile_info *pinfo, *p;
+	uint32_t i;
+	int ret;
+
+	buff = rte_zmalloc("pinfo_list",
+			   (I40E_PROFILE_INFO_SIZE * I40E_MAX_PROFILE_NUM + 4),
+			   0);
+	if (!buff) {
+		PMD_DRV_LOG(ERR, "failed to allocate memory");
+		return -1;
+	}
+
+	ret = i40e_aq_get_ddp_list(
+		hw, (void *)buff,
+		(I40E_PROFILE_INFO_SIZE * I40E_MAX_PROFILE_NUM + 4),
+		0, NULL);
+	if (ret) {
+		PMD_DRV_LOG(ERR, "Failed to get profile info list.");
+		rte_free(buff);
+		return -1;
+	}
+	p_list = (struct rte_pmd_i40e_profile_list *)buff;
+	pinfo = (struct rte_pmd_i40e_profile_info *)(profile_info_sec +
+			     sizeof(struct i40e_profile_section_header));
+	for (i = 0; i < p_list->p_count; i++) {
+		p = &p_list->p_info[i];
+		if ((pinfo->track_id == p->track_id) &&
+		    !memcmp(&pinfo->version, &p->version,
+			    sizeof(struct i40e_ddp_version)) &&
+		    !memcmp(&pinfo->name, &p->name,
+			    I40E_DDP_NAME_SIZE)) {
+			PMD_DRV_LOG(INFO, "Profile exists.");
+			rte_free(buff);
+			return 1;
+		}
+	}
+
+	rte_free(buff);
+	return 0;
+}
+
+int
+rte_pmd_i40e_process_ddp_package(uint8_t port, uint8_t *buff,
+				 uint32_t size,
+				 enum rte_pmd_i40e_package_op op)
+{
+	struct rte_eth_dev *dev;
+	struct i40e_hw *hw;
+	struct i40e_package_header *pkg_hdr;
+	struct i40e_generic_seg_header *profile_seg_hdr;
+	struct i40e_generic_seg_header *metadata_seg_hdr;
+	uint32_t track_id;
+	uint8_t *profile_info_sec;
+	int is_exist;
+	enum i40e_status_code status = I40E_SUCCESS;
+
+	RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+	dev = &rte_eth_devices[port];
+
+	if (!is_i40e_supported(dev))
+		return -ENOTSUP;
+
+	hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+	if (size < (sizeof(struct i40e_package_header) +
+		    sizeof(struct i40e_metadata_segment) +
+		    sizeof(uint32_t) * 2)) {
+		PMD_DRV_LOG(ERR, "Buff is invalid.");
+		return -EINVAL;
+	}
+
+	pkg_hdr = (struct i40e_package_header *)buff;
+
+	if (!pkg_hdr) {
+		PMD_DRV_LOG(ERR, "Failed to fill the package structure");
+		return -EINVAL;
+	}
+
+	if (pkg_hdr->segment_count < 2) {
+		PMD_DRV_LOG(ERR, "Segment_count should be 2 at least.");
+		return -EINVAL;
+	}
+
+	/* Find metadata segment */
+	metadata_seg_hdr = i40e_find_segment_in_package(SEGMENT_TYPE_METADATA,
+							pkg_hdr);
+	if (!metadata_seg_hdr) {
+		PMD_DRV_LOG(ERR, "Failed to find metadata segment header");
+		return -EINVAL;
+	}
+	track_id = ((struct i40e_metadata_segment *)metadata_seg_hdr)->track_id;
+
+	/* Find profile segment */
+	profile_seg_hdr = i40e_find_segment_in_package(SEGMENT_TYPE_I40E,
+						       pkg_hdr);
+	if (!profile_seg_hdr) {
+		PMD_DRV_LOG(ERR, "Failed to find profile segment header");
+		return -EINVAL;
+	}
+
+	profile_info_sec = rte_zmalloc(
+		"i40e_profile_info",
+		sizeof(struct i40e_profile_section_header) +
+		sizeof(struct i40e_profile_info),
+		0);
+	if (!profile_info_sec) {
+		PMD_DRV_LOG(ERR, "Failed to allocate memory");
+		return -EINVAL;
+	}
+
+	if (op == RTE_PMD_I40E_PKG_OP_WR_ADD) {
+		/* Check if the profile exists */
+		i40e_generate_profile_info_sec(
+		     ((struct i40e_profile_segment *)profile_seg_hdr)->name,
+		     &((struct i40e_profile_segment *)profile_seg_hdr)->version,
+		     track_id, profile_info_sec, 1);
+		is_exist = i40e_check_profile_info(port, profile_info_sec);
+		if (is_exist > 0) {
+			PMD_DRV_LOG(ERR, "Profile already exists.");
+			rte_free(profile_info_sec);
+			return 1;
+		} else if (is_exist < 0) {
+			PMD_DRV_LOG(ERR, "Failed to check profile.");
+			rte_free(profile_info_sec);
+			return -EINVAL;
+		}
+
+		/* Write profile to HW */
+		status = i40e_write_profile(
+				hw,
+				(struct i40e_profile_segment *)profile_seg_hdr,
+				track_id);
+		if (status) {
+			PMD_DRV_LOG(ERR, "Failed to write profile.");
+			rte_free(profile_info_sec);
+			return status;
+		}
+
+		/* Add profile info to info list */
+		status = i40e_add_rm_profile_info(hw, profile_info_sec);
+		if (status)
+			PMD_DRV_LOG(ERR, "Failed to add profile info.");
+	} else {
+		PMD_DRV_LOG(ERR, "Operation not supported.");
+	}
+
+	rte_free(profile_info_sec);
+	return status;
+}
+
+int
+rte_pmd_i40e_get_ddp_list(uint8_t port, uint8_t *buff, uint32_t size)
+{
+	struct rte_eth_dev *dev;
+	struct i40e_hw *hw;
+	enum i40e_status_code status = I40E_SUCCESS;
+
+	RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+	dev = &rte_eth_devices[port];
+
+	if (!is_i40e_supported(dev))
+		return -ENOTSUP;
+
+	if (size < (I40E_PROFILE_INFO_SIZE * I40E_MAX_PROFILE_NUM + 4))
+		return -EINVAL;
+
+	hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+	status = i40e_aq_get_ddp_list(hw, (void *)buff,
+				      size, 0, NULL);
+
+	return status;
+}
-- 
1.9.3

^ permalink raw reply related	[flat|nested] 4+ messages in thread

* [PATCH 2/2] net/ixgbe: move private APIs to a specific file
  2017-04-11  8:31 [PATCH 0/2] move private APIs to a specific files Wenzhuo Lu
  2017-04-11  8:31 ` [PATCH 1/2] net/i40e: move private APIs to a specific file Wenzhuo Lu
@ 2017-04-11  8:31 ` Wenzhuo Lu
  2017-04-11 12:10 ` [PATCH 0/2] move private APIs to a specific files Ferruh Yigit
  2 siblings, 0 replies; 4+ messages in thread
From: Wenzhuo Lu @ 2017-04-11  8:31 UTC (permalink / raw)
  To: dev; +Cc: Wenzhuo Lu

Create a new file rte_pmd_ixgbe.c for all the private
APIs. Move all the related code to the new file.

Signed-off-by: Wenzhuo Lu <wenzhuo.lu@intel.com>
---
 drivers/net/ixgbe/Makefile        |    1 +
 drivers/net/ixgbe/ixgbe_ethdev.c  | 1300 +++++++------------------------------
 drivers/net/ixgbe/ixgbe_ethdev.h  |    5 +
 drivers/net/ixgbe/rte_pmd_ixgbe.c |  910 ++++++++++++++++++++++++++
 4 files changed, 1141 insertions(+), 1075 deletions(-)
 create mode 100644 drivers/net/ixgbe/rte_pmd_ixgbe.c

diff --git a/drivers/net/ixgbe/Makefile b/drivers/net/ixgbe/Makefile
index f62f3d5..0a6b7f2 100644
--- a/drivers/net/ixgbe/Makefile
+++ b/drivers/net/ixgbe/Makefile
@@ -120,6 +120,7 @@ ifeq ($(CONFIG_RTE_NIC_BYPASS),y)
 SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_bypass.c
 SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_82599_bypass.c
 endif
+SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += rte_pmd_ixgbe.c
 
 # install this header file
 SYMLINK-$(CONFIG_RTE_LIBRTE_IXGBE_PMD)-include := rte_pmd_ixgbe.h
diff --git a/drivers/net/ixgbe/ixgbe_ethdev.c b/drivers/net/ixgbe/ixgbe_ethdev.c
index 1462324..35c3870 100644
--- a/drivers/net/ixgbe/ixgbe_ethdev.c
+++ b/drivers/net/ixgbe/ixgbe_ethdev.c
@@ -73,8 +73,6 @@
 #include "base/ixgbe_phy.h"
 #include "ixgbe_regs.h"
 
-#include "rte_pmd_ixgbe.h"
-
 /*
  * High threshold controlling when to start sending XOFF frames. Must be at
  * least 8 bytes less than receive packet buffer size. This value is in units
@@ -2387,6 +2385,80 @@ static int ixgbe_l2_tn_filter_init(struct rte_eth_dev *eth_dev)
 	}
 }
 
+int
+ixgbe_set_vf_rate_limit(struct rte_eth_dev *dev, uint16_t vf,
+			uint16_t tx_rate, uint64_t q_msk)
+{
+	struct ixgbe_hw *hw;
+	struct ixgbe_vf_info *vfinfo;
+	struct rte_eth_link link;
+	uint8_t  nb_q_per_pool;
+	uint32_t queue_stride;
+	uint32_t queue_idx, idx = 0, vf_idx;
+	uint32_t queue_end;
+	uint16_t total_rate = 0;
+	struct rte_pci_device *pci_dev;
+
+	pci_dev = IXGBE_DEV_TO_PCI(dev);
+	rte_eth_link_get_nowait(dev->data->port_id, &link);
+
+	if (vf >= pci_dev->max_vfs)
+		return -EINVAL;
+
+	if (tx_rate > link.link_speed)
+		return -EINVAL;
+
+	if (q_msk == 0)
+		return 0;
+
+	hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+	vfinfo = *(IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private));
+	nb_q_per_pool = RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool;
+	queue_stride = IXGBE_MAX_RX_QUEUE_NUM / RTE_ETH_DEV_SRIOV(dev).active;
+	queue_idx = vf * queue_stride;
+	queue_end = queue_idx + nb_q_per_pool - 1;
+	if (queue_end >= hw->mac.max_tx_queues)
+		return -EINVAL;
+
+	if (vfinfo) {
+		for (vf_idx = 0; vf_idx < pci_dev->max_vfs; vf_idx++) {
+			if (vf_idx == vf)
+				continue;
+			for (idx = 0; idx < RTE_DIM(vfinfo[vf_idx].tx_rate);
+				idx++)
+				total_rate += vfinfo[vf_idx].tx_rate[idx];
+		}
+	} else {
+		return -EINVAL;
+	}
+
+	/* Store tx_rate for this vf. */
+	for (idx = 0; idx < nb_q_per_pool; idx++) {
+		if (((uint64_t)0x1 << idx) & q_msk) {
+			if (vfinfo[vf].tx_rate[idx] != tx_rate)
+				vfinfo[vf].tx_rate[idx] = tx_rate;
+			total_rate += tx_rate;
+		}
+	}
+
+	if (total_rate > dev->data->dev_link.link_speed) {
+		/* Reset stored TX rate of the VF if it causes exceed
+		 * link speed.
+		 */
+		memset(vfinfo[vf].tx_rate, 0, sizeof(vfinfo[vf].tx_rate));
+		return -EINVAL;
+	}
+
+	/* Set RTTBCNRC of each queue/pool for vf X  */
+	for (; queue_idx <= queue_end; queue_idx++) {
+		if (0x1 & q_msk)
+			ixgbe_set_queue_rate_limit(dev, queue_idx, tx_rate);
+		q_msk = q_msk >> 1;
+	}
+
+	return 0;
+}
+
 /*
  * Configure device link speed and setup link.
  * It returns 0 on success.
@@ -2502,8 +2574,8 @@ static int ixgbe_l2_tn_filter_init(struct rte_eth_dev *eth_dev)
 		for (vf = 0; vf < pci_dev->max_vfs; vf++)
 			for (idx = 0; idx < IXGBE_MAX_QUEUE_NUM_PER_VF; idx++)
 				if (vfinfo[vf].tx_rate[idx] != 0)
-					rte_pmd_ixgbe_set_vf_rate_limit(
-						dev->data->port_id, vf,
+					ixgbe_set_vf_rate_limit(
+						dev, vf,
 						vfinfo[vf].tx_rate[idx],
 						1 << idx);
 	}
@@ -4392,39 +4464,10 @@ static int ixgbevf_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
 	return true;
 }
 
-int
-rte_pmd_ixgbe_set_vf_mac_addr(uint8_t port, uint16_t vf,
-		struct ether_addr *mac_addr)
+bool
+is_ixgbe_supported(struct rte_eth_dev *dev)
 {
-	struct ixgbe_hw *hw;
-	struct ixgbe_vf_info *vfinfo;
-	int rar_entry;
-	uint8_t *new_mac = (uint8_t *)(mac_addr);
-	struct rte_eth_dev *dev;
-	struct rte_pci_device *pci_dev;
-
-	RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
-
-	dev = &rte_eth_devices[port];
-	pci_dev = IXGBE_DEV_TO_PCI(dev);
-
-	if (!is_device_supported(dev, &rte_ixgbe_pmd))
-		return -ENOTSUP;
-
-	if (vf >= pci_dev->max_vfs)
-		return -EINVAL;
-
-	hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-	vfinfo = *(IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private));
-	rar_entry = hw->mac.num_rar_entries - (vf + 1);
-
-	if (is_valid_assigned_ether_addr((struct ether_addr *)new_mac)) {
-		rte_memcpy(vfinfo[vf].vf_mac_addresses, new_mac,
-				ETHER_ADDR_LEN);
-		return hw->mac.ops.set_rar(hw, rar_entry, new_mac, vf,
-				IXGBE_RAH_AV);
-	}
-	return -EINVAL;
+	return is_device_supported(dev, &rte_ixgbe_pmd);
 }
 
 static int
@@ -4749,7 +4792,7 @@ static void ixgbevf_set_vfta_all(struct rte_eth_dev *dev, bool on)
 	}
 }
 
-static int
+int
 ixgbe_vt_check(struct ixgbe_hw *hw)
 {
 	uint32_t reg_val;
@@ -4895,703 +4938,180 @@ static void ixgbevf_set_vfta_all(struct rte_eth_dev *dev, bool on)
 	return new_val;
 }
 
-int
-rte_pmd_ixgbe_ping_vf(uint8_t port, uint16_t vf)
+#define IXGBE_MRCTL_VPME  0x01 /* Virtual Pool Mirroring. */
+#define IXGBE_MRCTL_UPME  0x02 /* Uplink Port Mirroring. */
+#define IXGBE_MRCTL_DPME  0x04 /* Downlink Port Mirroring. */
+#define IXGBE_MRCTL_VLME  0x08 /* VLAN Mirroring. */
+#define IXGBE_INVALID_MIRROR_TYPE(mirror_type) \
+	((mirror_type) & ~(uint8_t)(ETH_MIRROR_VIRTUAL_POOL_UP | \
+	ETH_MIRROR_UPLINK_PORT | ETH_MIRROR_DOWNLINK_PORT | ETH_MIRROR_VLAN))
+
+static int
+ixgbe_mirror_rule_set(struct rte_eth_dev *dev,
+		      struct rte_eth_mirror_conf *mirror_conf,
+		      uint8_t rule_id, uint8_t on)
 {
-	struct ixgbe_hw *hw;
-	struct ixgbe_vf_info *vfinfo;
-	struct rte_eth_dev *dev;
-	struct rte_pci_device *pci_dev;
-	uint32_t ctrl;
+	uint32_t mr_ctl, vlvf;
+	uint32_t mp_lsb = 0;
+	uint32_t mv_msb = 0;
+	uint32_t mv_lsb = 0;
+	uint32_t mp_msb = 0;
+	uint8_t i = 0;
+	int reg_index = 0;
+	uint64_t vlan_mask = 0;
 
-	RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+	const uint8_t pool_mask_offset = 32;
+	const uint8_t vlan_mask_offset = 32;
+	const uint8_t dst_pool_offset = 8;
+	const uint8_t rule_mr_offset  = 4;
+	const uint8_t mirror_rule_mask = 0x0F;
 
-	dev = &rte_eth_devices[port];
-	pci_dev = IXGBE_DEV_TO_PCI(dev);
+	struct ixgbe_mirror_info *mr_info =
+			(IXGBE_DEV_PRIVATE_TO_PFDATA(dev->data->dev_private));
+	struct ixgbe_hw *hw =
+		IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+	uint8_t mirror_type = 0;
 
-	if (!is_device_supported(dev, &rte_ixgbe_pmd))
+	if (ixgbe_vt_check(hw) < 0)
 		return -ENOTSUP;
 
-	if (vf >= pci_dev->max_vfs)
+	if (rule_id >= IXGBE_MAX_MIRROR_RULES)
 		return -EINVAL;
 
-	hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-	vfinfo = *(IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private));
-
-	ctrl = IXGBE_PF_CONTROL_MSG;
-	if (vfinfo[vf].clear_to_send)
-		ctrl |= IXGBE_VT_MSGTYPE_CTS;
+	if (IXGBE_INVALID_MIRROR_TYPE(mirror_conf->rule_type)) {
+		PMD_DRV_LOG(ERR, "unsupported mirror type 0x%x.",
+			    mirror_conf->rule_type);
+		return -EINVAL;
+	}
 
-	ixgbe_write_mbx(hw, &ctrl, 1, vf);
+	if (mirror_conf->rule_type & ETH_MIRROR_VLAN) {
+		mirror_type |= IXGBE_MRCTL_VLME;
+		/* Check if vlan id is valid and find conresponding VLAN ID
+		 * index in VLVF
+		 */
+		for (i = 0; i < IXGBE_VLVF_ENTRIES; i++) {
+			if (mirror_conf->vlan.vlan_mask & (1ULL << i)) {
+				/* search vlan id related pool vlan filter
+				 * index
+				 */
+				reg_index = ixgbe_find_vlvf_slot(
+						hw,
+						mirror_conf->vlan.vlan_id[i],
+						false);
+				if (reg_index < 0)
+					return -EINVAL;
+				vlvf = IXGBE_READ_REG(hw,
+						      IXGBE_VLVF(reg_index));
+				if ((vlvf & IXGBE_VLVF_VIEN) &&
+				    ((vlvf & IXGBE_VLVF_VLANID_MASK) ==
+				      mirror_conf->vlan.vlan_id[i]))
+					vlan_mask |= (1ULL << reg_index);
+				else
+					return -EINVAL;
+			}
+		}
 
-	return 0;
-}
+		if (on) {
+			mv_lsb = vlan_mask & 0xFFFFFFFF;
+			mv_msb = vlan_mask >> vlan_mask_offset;
 
-int
-rte_pmd_ixgbe_set_vf_vlan_anti_spoof(uint8_t port, uint16_t vf, uint8_t on)
-{
-	struct ixgbe_hw *hw;
-	struct ixgbe_mac_info *mac;
-	struct rte_eth_dev *dev;
-	struct rte_pci_device *pci_dev;
+			mr_info->mr_conf[rule_id].vlan.vlan_mask =
+						mirror_conf->vlan.vlan_mask;
+			for (i = 0; i < ETH_VMDQ_MAX_VLAN_FILTERS; i++) {
+				if (mirror_conf->vlan.vlan_mask & (1ULL << i))
+					mr_info->mr_conf[rule_id].vlan.vlan_id[i] =
+						mirror_conf->vlan.vlan_id[i];
+			}
+		} else {
+			mv_lsb = 0;
+			mv_msb = 0;
+			mr_info->mr_conf[rule_id].vlan.vlan_mask = 0;
+			for (i = 0; i < ETH_VMDQ_MAX_VLAN_FILTERS; i++)
+				mr_info->mr_conf[rule_id].vlan.vlan_id[i] = 0;
+		}
+	}
 
-	RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+	/**
+	 * if enable pool mirror, write related pool mask register,if disable
+	 * pool mirror, clear PFMRVM register
+	 */
+	if (mirror_conf->rule_type & ETH_MIRROR_VIRTUAL_POOL_UP) {
+		mirror_type |= IXGBE_MRCTL_VPME;
+		if (on) {
+			mp_lsb = mirror_conf->pool_mask & 0xFFFFFFFF;
+			mp_msb = mirror_conf->pool_mask >> pool_mask_offset;
+			mr_info->mr_conf[rule_id].pool_mask =
+					mirror_conf->pool_mask;
 
-	dev = &rte_eth_devices[port];
-	pci_dev = IXGBE_DEV_TO_PCI(dev);
+		} else {
+			mp_lsb = 0;
+			mp_msb = 0;
+			mr_info->mr_conf[rule_id].pool_mask = 0;
+		}
+	}
+	if (mirror_conf->rule_type & ETH_MIRROR_UPLINK_PORT)
+		mirror_type |= IXGBE_MRCTL_UPME;
+	if (mirror_conf->rule_type & ETH_MIRROR_DOWNLINK_PORT)
+		mirror_type |= IXGBE_MRCTL_DPME;
 
-	if (!is_device_supported(dev, &rte_ixgbe_pmd))
-		return -ENOTSUP;
+	/* read  mirror control register and recalculate it */
+	mr_ctl = IXGBE_READ_REG(hw, IXGBE_MRCTL(rule_id));
 
-	if (vf >= pci_dev->max_vfs)
-		return -EINVAL;
+	if (on) {
+		mr_ctl |= mirror_type;
+		mr_ctl &= mirror_rule_mask;
+		mr_ctl |= mirror_conf->dst_pool << dst_pool_offset;
+	} else {
+		mr_ctl &= ~(mirror_conf->rule_type & mirror_rule_mask);
+	}
 
-	if (on > 1)
-		return -EINVAL;
+	mr_info->mr_conf[rule_id].rule_type = mirror_conf->rule_type;
+	mr_info->mr_conf[rule_id].dst_pool = mirror_conf->dst_pool;
 
-	hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-	mac = &hw->mac;
+	/* write mirrror control  register */
+	IXGBE_WRITE_REG(hw, IXGBE_MRCTL(rule_id), mr_ctl);
 
-	mac->ops.set_vlan_anti_spoofing(hw, on, vf);
+	/* write pool mirrror control  register */
+	if (mirror_conf->rule_type == ETH_MIRROR_VIRTUAL_POOL_UP) {
+		IXGBE_WRITE_REG(hw, IXGBE_VMRVM(rule_id), mp_lsb);
+		IXGBE_WRITE_REG(hw, IXGBE_VMRVM(rule_id + rule_mr_offset),
+				mp_msb);
+	}
+	/* write VLAN mirrror control  register */
+	if (mirror_conf->rule_type == ETH_MIRROR_VLAN) {
+		IXGBE_WRITE_REG(hw, IXGBE_VMRVLAN(rule_id), mv_lsb);
+		IXGBE_WRITE_REG(hw, IXGBE_VMRVLAN(rule_id + rule_mr_offset),
+				mv_msb);
+	}
 
 	return 0;
 }
 
-int
-rte_pmd_ixgbe_set_vf_mac_anti_spoof(uint8_t port, uint16_t vf, uint8_t on)
+static int
+ixgbe_mirror_rule_reset(struct rte_eth_dev *dev, uint8_t rule_id)
 {
-	struct ixgbe_hw *hw;
-	struct ixgbe_mac_info *mac;
-	struct rte_eth_dev *dev;
-	struct rte_pci_device *pci_dev;
-
-	RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+	int mr_ctl = 0;
+	uint32_t lsb_val = 0;
+	uint32_t msb_val = 0;
+	const uint8_t rule_mr_offset = 4;
 
-	dev = &rte_eth_devices[port];
-	pci_dev = IXGBE_DEV_TO_PCI(dev);
+	struct ixgbe_hw *hw =
+		IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+	struct ixgbe_mirror_info *mr_info =
+		(IXGBE_DEV_PRIVATE_TO_PFDATA(dev->data->dev_private));
 
-	if (!is_device_supported(dev, &rte_ixgbe_pmd))
+	if (ixgbe_vt_check(hw) < 0)
 		return -ENOTSUP;
 
-	if (vf >= pci_dev->max_vfs)
-		return -EINVAL;
+	memset(&mr_info->mr_conf[rule_id], 0,
+	       sizeof(struct rte_eth_mirror_conf));
 
-	if (on > 1)
-		return -EINVAL;
+	/* clear PFVMCTL register */
+	IXGBE_WRITE_REG(hw, IXGBE_MRCTL(rule_id), mr_ctl);
 
-	hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-	mac = &hw->mac;
-	mac->ops.set_mac_anti_spoofing(hw, on, vf);
-
-	return 0;
-}
-
-int
-rte_pmd_ixgbe_set_vf_vlan_insert(uint8_t port, uint16_t vf, uint16_t vlan_id)
-{
-	struct ixgbe_hw *hw;
-	uint32_t ctrl;
-	struct rte_eth_dev *dev;
-	struct rte_pci_device *pci_dev;
-
-	RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
-
-	dev = &rte_eth_devices[port];
-	pci_dev = IXGBE_DEV_TO_PCI(dev);
-
-	if (!is_device_supported(dev, &rte_ixgbe_pmd))
-		return -ENOTSUP;
-
-	if (vf >= pci_dev->max_vfs)
-		return -EINVAL;
-
-	if (vlan_id > ETHER_MAX_VLAN_ID)
-		return -EINVAL;
-
-	hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-	ctrl = IXGBE_READ_REG(hw, IXGBE_VMVIR(vf));
-	if (vlan_id) {
-		ctrl = vlan_id;
-		ctrl |= IXGBE_VMVIR_VLANA_DEFAULT;
-	} else {
-		ctrl = 0;
-	}
-
-	IXGBE_WRITE_REG(hw, IXGBE_VMVIR(vf), ctrl);
-
-	return 0;
-}
-
-int
-rte_pmd_ixgbe_set_tx_loopback(uint8_t port, uint8_t on)
-{
-	struct ixgbe_hw *hw;
-	uint32_t ctrl;
-	struct rte_eth_dev *dev;
-
-	RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
-
-	dev = &rte_eth_devices[port];
-
-	if (!is_device_supported(dev, &rte_ixgbe_pmd))
-		return -ENOTSUP;
-
-	if (on > 1)
-		return -EINVAL;
-
-	hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-	ctrl = IXGBE_READ_REG(hw, IXGBE_PFDTXGSWC);
-	/* enable or disable VMDQ loopback */
-	if (on)
-		ctrl |= IXGBE_PFDTXGSWC_VT_LBEN;
-	else
-		ctrl &= ~IXGBE_PFDTXGSWC_VT_LBEN;
-
-	IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, ctrl);
-
-	return 0;
-}
-
-int
-rte_pmd_ixgbe_set_all_queues_drop_en(uint8_t port, uint8_t on)
-{
-	struct ixgbe_hw *hw;
-	uint32_t reg_value;
-	int i;
-	int num_queues = (int)(IXGBE_QDE_IDX_MASK >> IXGBE_QDE_IDX_SHIFT);
-	struct rte_eth_dev *dev;
-
-	RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
-
-	dev = &rte_eth_devices[port];
-
-	if (!is_device_supported(dev, &rte_ixgbe_pmd))
-		return -ENOTSUP;
-
-	if (on > 1)
-		return -EINVAL;
-
-	hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-	for (i = 0; i <= num_queues; i++) {
-		reg_value = IXGBE_QDE_WRITE |
-				(i << IXGBE_QDE_IDX_SHIFT) |
-				(on & IXGBE_QDE_ENABLE);
-		IXGBE_WRITE_REG(hw, IXGBE_QDE, reg_value);
-	}
-
-	return 0;
-}
-
-int
-rte_pmd_ixgbe_set_vf_split_drop_en(uint8_t port, uint16_t vf, uint8_t on)
-{
-	struct ixgbe_hw *hw;
-	uint32_t reg_value;
-	struct rte_eth_dev *dev;
-	struct rte_pci_device *pci_dev;
-
-	RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
-
-	dev = &rte_eth_devices[port];
-	pci_dev = IXGBE_DEV_TO_PCI(dev);
-
-	if (!is_device_supported(dev, &rte_ixgbe_pmd))
-		return -ENOTSUP;
-
-	/* only support VF's 0 to 63 */
-	if ((vf >= pci_dev->max_vfs) || (vf > 63))
-		return -EINVAL;
-
-	if (on > 1)
-		return -EINVAL;
-
-	hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-	reg_value = IXGBE_READ_REG(hw, IXGBE_SRRCTL(vf));
-	if (on)
-		reg_value |= IXGBE_SRRCTL_DROP_EN;
-	else
-		reg_value &= ~IXGBE_SRRCTL_DROP_EN;
-
-	IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(vf), reg_value);
-
-	return 0;
-}
-
-int
-rte_pmd_ixgbe_set_vf_vlan_stripq(uint8_t port, uint16_t vf, uint8_t on)
-{
-	struct rte_eth_dev *dev;
-	struct rte_pci_device *pci_dev;
-	struct ixgbe_hw *hw;
-	uint16_t queues_per_pool;
-	uint32_t q;
-
-	RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
-
-	dev = &rte_eth_devices[port];
-	pci_dev = IXGBE_DEV_TO_PCI(dev);
-	hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-
-	if (!is_device_supported(dev, &rte_ixgbe_pmd))
-		return -ENOTSUP;
-
-	if (vf >= pci_dev->max_vfs)
-		return -EINVAL;
-
-	if (on > 1)
-		return -EINVAL;
-
-	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_strip_queue_set, -ENOTSUP);
-
-	/* The PF has 128 queue pairs and in SRIOV configuration
-	 * those queues will be assigned to VF's, so RXDCTL
-	 * registers will be dealing with queues which will be
-	 * assigned to VF's.
-	 * Let's say we have SRIOV configured with 31 VF's then the
-	 * first 124 queues 0-123 will be allocated to VF's and only
-	 * the last 4 queues 123-127 will be assigned to the PF.
-	 */
-	if (hw->mac.type == ixgbe_mac_82598EB)
-		queues_per_pool = (uint16_t)hw->mac.max_rx_queues /
-				  ETH_16_POOLS;
-	else
-		queues_per_pool = (uint16_t)hw->mac.max_rx_queues /
-				  ETH_64_POOLS;
-
-	for (q = 0; q < queues_per_pool; q++)
-		(*dev->dev_ops->vlan_strip_queue_set)(dev,
-				q + vf * queues_per_pool, on);
-	return 0;
-}
-
-int
-rte_pmd_ixgbe_set_vf_rxmode(uint8_t port, uint16_t vf, uint16_t rx_mask, uint8_t on)
-{
-	int val = 0;
-	struct rte_eth_dev *dev;
-	struct rte_pci_device *pci_dev;
-	struct ixgbe_hw *hw;
-	uint32_t vmolr;
-
-	RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
-
-	dev = &rte_eth_devices[port];
-	pci_dev = IXGBE_DEV_TO_PCI(dev);
-
-	if (!is_device_supported(dev, &rte_ixgbe_pmd))
-		return -ENOTSUP;
-
-	if (vf >= pci_dev->max_vfs)
-		return -EINVAL;
-
-	if (on > 1)
-		return -EINVAL;
-
-	hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-	vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf));
-
-	if (hw->mac.type == ixgbe_mac_82598EB) {
-		PMD_INIT_LOG(ERR, "setting VF receive mode set should be done"
-			     " on 82599 hardware and newer");
-		return -ENOTSUP;
-	}
-	if (ixgbe_vt_check(hw) < 0)
-		return -ENOTSUP;
-
-	val = ixgbe_convert_vm_rx_mask_to_val(rx_mask, val);
-
-	if (on)
-		vmolr |= val;
-	else
-		vmolr &= ~val;
-
-	IXGBE_WRITE_REG(hw, IXGBE_VMOLR(vf), vmolr);
-
-	return 0;
-}
-
-int
-rte_pmd_ixgbe_set_vf_rx(uint8_t port, uint16_t vf, uint8_t on)
-{
-	struct rte_eth_dev *dev;
-	struct rte_pci_device *pci_dev;
-	uint32_t reg, addr;
-	uint32_t val;
-	const uint8_t bit1 = 0x1;
-	struct ixgbe_hw *hw;
-
-	RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
-
-	dev = &rte_eth_devices[port];
-	pci_dev = IXGBE_DEV_TO_PCI(dev);
-
-	if (!is_device_supported(dev, &rte_ixgbe_pmd))
-		return -ENOTSUP;
-
-	if (vf >= pci_dev->max_vfs)
-		return -EINVAL;
-
-	if (on > 1)
-		return -EINVAL;
-
-	hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-
-	if (ixgbe_vt_check(hw) < 0)
-		return -ENOTSUP;
-
-	/* for vf >= 32, set bit in PFVFRE[1], otherwise PFVFRE[0] */
-	if (vf >= 32) {
-		addr = IXGBE_VFRE(1);
-		val = bit1 << (vf - 32);
-	} else {
-		addr = IXGBE_VFRE(0);
-		val = bit1 << vf;
-	}
-
-	reg = IXGBE_READ_REG(hw, addr);
-
-	if (on)
-		reg |= val;
-	else
-		reg &= ~val;
-
-	IXGBE_WRITE_REG(hw, addr, reg);
-
-	return 0;
-}
-
-int
-rte_pmd_ixgbe_set_vf_tx(uint8_t port, uint16_t vf, uint8_t on)
-{
-	struct rte_eth_dev *dev;
-	struct rte_pci_device *pci_dev;
-	uint32_t reg, addr;
-	uint32_t val;
-	const uint8_t bit1 = 0x1;
-
-	struct ixgbe_hw *hw;
-
-	RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
-
-	dev = &rte_eth_devices[port];
-	pci_dev = IXGBE_DEV_TO_PCI(dev);
-
-	if (!is_device_supported(dev, &rte_ixgbe_pmd))
-		return -ENOTSUP;
-
-	if (vf >= pci_dev->max_vfs)
-		return -EINVAL;
-
-	if (on > 1)
-		return -EINVAL;
-
-	hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-	if (ixgbe_vt_check(hw) < 0)
-		return -ENOTSUP;
-
-	/* for vf >= 32, set bit in PFVFTE[1], otherwise PFVFTE[0] */
-	if (vf >= 32) {
-		addr = IXGBE_VFTE(1);
-		val = bit1 << (vf - 32);
-	} else {
-		addr = IXGBE_VFTE(0);
-		val = bit1 << vf;
-	}
-
-	reg = IXGBE_READ_REG(hw, addr);
-
-	if (on)
-		reg |= val;
-	else
-		reg &= ~val;
-
-	IXGBE_WRITE_REG(hw, addr, reg);
-
-	return 0;
-}
-
-int
-rte_pmd_ixgbe_set_vf_vlan_filter(uint8_t port, uint16_t vlan,
-			uint64_t vf_mask, uint8_t vlan_on)
-{
-	struct rte_eth_dev *dev;
-	int ret = 0;
-	uint16_t vf_idx;
-	struct ixgbe_hw *hw;
-
-	RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
-
-	dev = &rte_eth_devices[port];
-
-	if (!is_device_supported(dev, &rte_ixgbe_pmd))
-		return -ENOTSUP;
-
-	if ((vlan > ETHER_MAX_VLAN_ID) || (vf_mask == 0))
-		return -EINVAL;
-
-	hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-	if (ixgbe_vt_check(hw) < 0)
-		return -ENOTSUP;
-
-	for (vf_idx = 0; vf_idx < 64; vf_idx++) {
-		if (vf_mask & ((uint64_t)(1ULL << vf_idx))) {
-			ret = hw->mac.ops.set_vfta(hw, vlan, vf_idx,
-						   vlan_on, false);
-			if (ret < 0)
-				return ret;
-		}
-	}
-
-	return ret;
-}
-
-int rte_pmd_ixgbe_set_vf_rate_limit(uint8_t port, uint16_t vf,
-	uint16_t tx_rate, uint64_t q_msk)
-{
-	struct rte_eth_dev *dev;
-	struct ixgbe_hw *hw;
-	struct ixgbe_vf_info *vfinfo;
-	struct rte_eth_link link;
-	uint8_t  nb_q_per_pool;
-	uint32_t queue_stride;
-	uint32_t queue_idx, idx = 0, vf_idx;
-	uint32_t queue_end;
-	uint16_t total_rate = 0;
-	struct rte_pci_device *pci_dev;
-
-	RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
-
-	dev = &rte_eth_devices[port];
-	pci_dev = IXGBE_DEV_TO_PCI(dev);
-	rte_eth_link_get_nowait(port, &link);
-
-	if (!is_device_supported(dev, &rte_ixgbe_pmd))
-		return -ENOTSUP;
-
-	if (vf >= pci_dev->max_vfs)
-		return -EINVAL;
-
-	if (tx_rate > link.link_speed)
-		return -EINVAL;
-
-	if (q_msk == 0)
-		return 0;
-
-	hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-	vfinfo = *(IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private));
-	nb_q_per_pool = RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool;
-	queue_stride = IXGBE_MAX_RX_QUEUE_NUM / RTE_ETH_DEV_SRIOV(dev).active;
-	queue_idx = vf * queue_stride;
-	queue_end = queue_idx + nb_q_per_pool - 1;
-	if (queue_end >= hw->mac.max_tx_queues)
-		return -EINVAL;
-
-	if (vfinfo) {
-		for (vf_idx = 0; vf_idx < pci_dev->max_vfs; vf_idx++) {
-			if (vf_idx == vf)
-				continue;
-			for (idx = 0; idx < RTE_DIM(vfinfo[vf_idx].tx_rate);
-				idx++)
-				total_rate += vfinfo[vf_idx].tx_rate[idx];
-		}
-	} else {
-		return -EINVAL;
-	}
-
-	/* Store tx_rate for this vf. */
-	for (idx = 0; idx < nb_q_per_pool; idx++) {
-		if (((uint64_t)0x1 << idx) & q_msk) {
-			if (vfinfo[vf].tx_rate[idx] != tx_rate)
-				vfinfo[vf].tx_rate[idx] = tx_rate;
-			total_rate += tx_rate;
-		}
-	}
-
-	if (total_rate > dev->data->dev_link.link_speed) {
-		/* Reset stored TX rate of the VF if it causes exceed
-		 * link speed.
-		 */
-		memset(vfinfo[vf].tx_rate, 0, sizeof(vfinfo[vf].tx_rate));
-		return -EINVAL;
-	}
-
-	/* Set RTTBCNRC of each queue/pool for vf X  */
-	for (; queue_idx <= queue_end; queue_idx++) {
-		if (0x1 & q_msk)
-			ixgbe_set_queue_rate_limit(dev, queue_idx, tx_rate);
-		q_msk = q_msk >> 1;
-	}
-
-	return 0;
-}
-
-#define IXGBE_MRCTL_VPME  0x01 /* Virtual Pool Mirroring. */
-#define IXGBE_MRCTL_UPME  0x02 /* Uplink Port Mirroring. */
-#define IXGBE_MRCTL_DPME  0x04 /* Downlink Port Mirroring. */
-#define IXGBE_MRCTL_VLME  0x08 /* VLAN Mirroring. */
-#define IXGBE_INVALID_MIRROR_TYPE(mirror_type) \
-	((mirror_type) & ~(uint8_t)(ETH_MIRROR_VIRTUAL_POOL_UP | \
-	ETH_MIRROR_UPLINK_PORT | ETH_MIRROR_DOWNLINK_PORT | ETH_MIRROR_VLAN))
-
-static int
-ixgbe_mirror_rule_set(struct rte_eth_dev *dev,
-			struct rte_eth_mirror_conf *mirror_conf,
-			uint8_t rule_id, uint8_t on)
-{
-	uint32_t mr_ctl, vlvf;
-	uint32_t mp_lsb = 0;
-	uint32_t mv_msb = 0;
-	uint32_t mv_lsb = 0;
-	uint32_t mp_msb = 0;
-	uint8_t i = 0;
-	int reg_index = 0;
-	uint64_t vlan_mask = 0;
-
-	const uint8_t pool_mask_offset = 32;
-	const uint8_t vlan_mask_offset = 32;
-	const uint8_t dst_pool_offset = 8;
-	const uint8_t rule_mr_offset  = 4;
-	const uint8_t mirror_rule_mask = 0x0F;
-
-	struct ixgbe_mirror_info *mr_info =
-			(IXGBE_DEV_PRIVATE_TO_PFDATA(dev->data->dev_private));
-	struct ixgbe_hw *hw =
-		IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-	uint8_t mirror_type = 0;
-
-	if (ixgbe_vt_check(hw) < 0)
-		return -ENOTSUP;
-
-	if (rule_id >= IXGBE_MAX_MIRROR_RULES)
-		return -EINVAL;
-
-	if (IXGBE_INVALID_MIRROR_TYPE(mirror_conf->rule_type)) {
-		PMD_DRV_LOG(ERR, "unsupported mirror type 0x%x.",
-			mirror_conf->rule_type);
-		return -EINVAL;
-	}
-
-	if (mirror_conf->rule_type & ETH_MIRROR_VLAN) {
-		mirror_type |= IXGBE_MRCTL_VLME;
-		/* Check if vlan id is valid and find conresponding VLAN ID index in VLVF */
-		for (i = 0; i < IXGBE_VLVF_ENTRIES; i++) {
-			if (mirror_conf->vlan.vlan_mask & (1ULL << i)) {
-				/* search vlan id related pool vlan filter index */
-				reg_index = ixgbe_find_vlvf_slot(hw,
-						 mirror_conf->vlan.vlan_id[i],
-						 false);
-				if (reg_index < 0)
-					return -EINVAL;
-				vlvf = IXGBE_READ_REG(hw, IXGBE_VLVF(reg_index));
-				if ((vlvf & IXGBE_VLVF_VIEN) &&
-				    ((vlvf & IXGBE_VLVF_VLANID_MASK) ==
-				      mirror_conf->vlan.vlan_id[i]))
-					vlan_mask |= (1ULL << reg_index);
-				else
-					return -EINVAL;
-			}
-		}
-
-		if (on) {
-			mv_lsb = vlan_mask & 0xFFFFFFFF;
-			mv_msb = vlan_mask >> vlan_mask_offset;
-
-			mr_info->mr_conf[rule_id].vlan.vlan_mask =
-						mirror_conf->vlan.vlan_mask;
-			for (i = 0; i < ETH_VMDQ_MAX_VLAN_FILTERS; i++) {
-				if (mirror_conf->vlan.vlan_mask & (1ULL << i))
-					mr_info->mr_conf[rule_id].vlan.vlan_id[i] =
-						mirror_conf->vlan.vlan_id[i];
-			}
-		} else {
-			mv_lsb = 0;
-			mv_msb = 0;
-			mr_info->mr_conf[rule_id].vlan.vlan_mask = 0;
-			for (i = 0; i < ETH_VMDQ_MAX_VLAN_FILTERS; i++)
-				mr_info->mr_conf[rule_id].vlan.vlan_id[i] = 0;
-		}
-	}
-
-	/*
-	 * if enable pool mirror, write related pool mask register,if disable
-	 * pool mirror, clear PFMRVM register
-	 */
-	if (mirror_conf->rule_type & ETH_MIRROR_VIRTUAL_POOL_UP) {
-		mirror_type |= IXGBE_MRCTL_VPME;
-		if (on) {
-			mp_lsb = mirror_conf->pool_mask & 0xFFFFFFFF;
-			mp_msb = mirror_conf->pool_mask >> pool_mask_offset;
-			mr_info->mr_conf[rule_id].pool_mask =
-					mirror_conf->pool_mask;
-
-		} else {
-			mp_lsb = 0;
-			mp_msb = 0;
-			mr_info->mr_conf[rule_id].pool_mask = 0;
-		}
-	}
-	if (mirror_conf->rule_type & ETH_MIRROR_UPLINK_PORT)
-		mirror_type |= IXGBE_MRCTL_UPME;
-	if (mirror_conf->rule_type & ETH_MIRROR_DOWNLINK_PORT)
-		mirror_type |= IXGBE_MRCTL_DPME;
-
-	/* read  mirror control register and recalculate it */
-	mr_ctl = IXGBE_READ_REG(hw, IXGBE_MRCTL(rule_id));
-
-	if (on) {
-		mr_ctl |= mirror_type;
-		mr_ctl &= mirror_rule_mask;
-		mr_ctl |= mirror_conf->dst_pool << dst_pool_offset;
-	} else
-		mr_ctl &= ~(mirror_conf->rule_type & mirror_rule_mask);
-
-	mr_info->mr_conf[rule_id].rule_type = mirror_conf->rule_type;
-	mr_info->mr_conf[rule_id].dst_pool = mirror_conf->dst_pool;
-
-	/* write mirrror control  register */
-	IXGBE_WRITE_REG(hw, IXGBE_MRCTL(rule_id), mr_ctl);
-
-	/* write pool mirrror control  register */
-	if (mirror_conf->rule_type == ETH_MIRROR_VIRTUAL_POOL_UP) {
-		IXGBE_WRITE_REG(hw, IXGBE_VMRVM(rule_id), mp_lsb);
-		IXGBE_WRITE_REG(hw, IXGBE_VMRVM(rule_id + rule_mr_offset),
-				mp_msb);
-	}
-	/* write VLAN mirrror control  register */
-	if (mirror_conf->rule_type == ETH_MIRROR_VLAN) {
-		IXGBE_WRITE_REG(hw, IXGBE_VMRVLAN(rule_id), mv_lsb);
-		IXGBE_WRITE_REG(hw, IXGBE_VMRVLAN(rule_id + rule_mr_offset),
-				mv_msb);
-	}
-
-	return 0;
-}
-
-static int
-ixgbe_mirror_rule_reset(struct rte_eth_dev *dev, uint8_t rule_id)
-{
-	int mr_ctl = 0;
-	uint32_t lsb_val = 0;
-	uint32_t msb_val = 0;
-	const uint8_t rule_mr_offset = 4;
-
-	struct ixgbe_hw *hw =
-		IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-	struct ixgbe_mirror_info *mr_info =
-		(IXGBE_DEV_PRIVATE_TO_PFDATA(dev->data->dev_private));
-
-	if (ixgbe_vt_check(hw) < 0)
-		return -ENOTSUP;
-
-	memset(&mr_info->mr_conf[rule_id], 0,
-		sizeof(struct rte_eth_mirror_conf));
-
-	/* clear PFVMCTL register */
-	IXGBE_WRITE_REG(hw, IXGBE_MRCTL(rule_id), mr_ctl);
-
-	/* clear pool mask register */
-	IXGBE_WRITE_REG(hw, IXGBE_VMRVM(rule_id), lsb_val);
-	IXGBE_WRITE_REG(hw, IXGBE_VMRVM(rule_id + rule_mr_offset), msb_val);
+	/* clear pool mask register */
+	IXGBE_WRITE_REG(hw, IXGBE_VMRVM(rule_id), lsb_val);
+	IXGBE_WRITE_REG(hw, IXGBE_VMRVM(rule_id + rule_mr_offset), msb_val);
 
 	/* clear vlan mask register */
 	IXGBE_WRITE_REG(hw, IXGBE_VMRVLAN(rule_id), lsb_val);
@@ -8238,303 +7758,6 @@ int ixgbe_enable_sec_tx_path_generic(struct ixgbe_hw *hw)
 	return IXGBE_SUCCESS;
 }
 
-int
-rte_pmd_ixgbe_macsec_enable(uint8_t port, uint8_t en, uint8_t rp)
-{
-	struct ixgbe_hw *hw;
-	struct rte_eth_dev *dev;
-	uint32_t ctrl;
-
-	RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
-
-	dev = &rte_eth_devices[port];
-
-	if (!is_device_supported(dev, &rte_ixgbe_pmd))
-		return -ENOTSUP;
-
-	hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-
-	/* Stop the data paths */
-	if (ixgbe_disable_sec_rx_path(hw) != IXGBE_SUCCESS)
-		return -ENOTSUP;
-	/*
-	 * Workaround:
-	 * As no ixgbe_disable_sec_rx_path equivalent is
-	 * implemented for tx in the base code, and we are
-	 * not allowed to modify the base code in DPDK, so
-	 * just call the hand-written one directly for now.
-	 * The hardware support has been checked by
-	 * ixgbe_disable_sec_rx_path().
-	 */
-	ixgbe_disable_sec_tx_path_generic(hw);
-
-	/* Enable Ethernet CRC (required by MACsec offload) */
-	ctrl = IXGBE_READ_REG(hw, IXGBE_HLREG0);
-	ctrl |= IXGBE_HLREG0_TXCRCEN | IXGBE_HLREG0_RXCRCSTRP;
-	IXGBE_WRITE_REG(hw, IXGBE_HLREG0, ctrl);
-
-	/* Enable the TX and RX crypto engines */
-	ctrl = IXGBE_READ_REG(hw, IXGBE_SECTXCTRL);
-	ctrl &= ~IXGBE_SECTXCTRL_SECTX_DIS;
-	IXGBE_WRITE_REG(hw, IXGBE_SECTXCTRL, ctrl);
-
-	ctrl = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
-	ctrl &= ~IXGBE_SECRXCTRL_SECRX_DIS;
-	IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, ctrl);
-
-	ctrl = IXGBE_READ_REG(hw, IXGBE_SECTXMINIFG);
-	ctrl &= ~IXGBE_SECTX_MINSECIFG_MASK;
-	ctrl |= 0x3;
-	IXGBE_WRITE_REG(hw, IXGBE_SECTXMINIFG, ctrl);
-
-	/* Enable SA lookup */
-	ctrl = IXGBE_READ_REG(hw, IXGBE_LSECTXCTRL);
-	ctrl &= ~IXGBE_LSECTXCTRL_EN_MASK;
-	ctrl |= en ? IXGBE_LSECTXCTRL_AUTH_ENCRYPT :
-		     IXGBE_LSECTXCTRL_AUTH;
-	ctrl |= IXGBE_LSECTXCTRL_AISCI;
-	ctrl &= ~IXGBE_LSECTXCTRL_PNTHRSH_MASK;
-	ctrl |= IXGBE_MACSEC_PNTHRSH & IXGBE_LSECTXCTRL_PNTHRSH_MASK;
-	IXGBE_WRITE_REG(hw, IXGBE_LSECTXCTRL, ctrl);
-
-	ctrl = IXGBE_READ_REG(hw, IXGBE_LSECRXCTRL);
-	ctrl &= ~IXGBE_LSECRXCTRL_EN_MASK;
-	ctrl |= IXGBE_LSECRXCTRL_STRICT << IXGBE_LSECRXCTRL_EN_SHIFT;
-	ctrl &= ~IXGBE_LSECRXCTRL_PLSH;
-	if (rp)
-		ctrl |= IXGBE_LSECRXCTRL_RP;
-	else
-		ctrl &= ~IXGBE_LSECRXCTRL_RP;
-	IXGBE_WRITE_REG(hw, IXGBE_LSECRXCTRL, ctrl);
-
-	/* Start the data paths */
-	ixgbe_enable_sec_rx_path(hw);
-	/*
-	 * Workaround:
-	 * As no ixgbe_enable_sec_rx_path equivalent is
-	 * implemented for tx in the base code, and we are
-	 * not allowed to modify the base code in DPDK, so
-	 * just call the hand-written one directly for now.
-	 */
-	ixgbe_enable_sec_tx_path_generic(hw);
-
-	return 0;
-}
-
-int
-rte_pmd_ixgbe_macsec_disable(uint8_t port)
-{
-	struct ixgbe_hw *hw;
-	struct rte_eth_dev *dev;
-	uint32_t ctrl;
-
-	RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
-
-	dev = &rte_eth_devices[port];
-
-	if (!is_device_supported(dev, &rte_ixgbe_pmd))
-		return -ENOTSUP;
-
-	hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-
-	/* Stop the data paths */
-	if (ixgbe_disable_sec_rx_path(hw) != IXGBE_SUCCESS)
-		return -ENOTSUP;
-	/*
-	 * Workaround:
-	 * As no ixgbe_disable_sec_rx_path equivalent is
-	 * implemented for tx in the base code, and we are
-	 * not allowed to modify the base code in DPDK, so
-	 * just call the hand-written one directly for now.
-	 * The hardware support has been checked by
-	 * ixgbe_disable_sec_rx_path().
-	 */
-	ixgbe_disable_sec_tx_path_generic(hw);
-
-	/* Disable the TX and RX crypto engines */
-	ctrl = IXGBE_READ_REG(hw, IXGBE_SECTXCTRL);
-	ctrl |= IXGBE_SECTXCTRL_SECTX_DIS;
-	IXGBE_WRITE_REG(hw, IXGBE_SECTXCTRL, ctrl);
-
-	ctrl = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
-	ctrl |= IXGBE_SECRXCTRL_SECRX_DIS;
-	IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, ctrl);
-
-	/* Disable SA lookup */
-	ctrl = IXGBE_READ_REG(hw, IXGBE_LSECTXCTRL);
-	ctrl &= ~IXGBE_LSECTXCTRL_EN_MASK;
-	ctrl |= IXGBE_LSECTXCTRL_DISABLE;
-	IXGBE_WRITE_REG(hw, IXGBE_LSECTXCTRL, ctrl);
-
-	ctrl = IXGBE_READ_REG(hw, IXGBE_LSECRXCTRL);
-	ctrl &= ~IXGBE_LSECRXCTRL_EN_MASK;
-	ctrl |= IXGBE_LSECRXCTRL_DISABLE << IXGBE_LSECRXCTRL_EN_SHIFT;
-	IXGBE_WRITE_REG(hw, IXGBE_LSECRXCTRL, ctrl);
-
-	/* Start the data paths */
-	ixgbe_enable_sec_rx_path(hw);
-	/*
-	 * Workaround:
-	 * As no ixgbe_enable_sec_rx_path equivalent is
-	 * implemented for tx in the base code, and we are
-	 * not allowed to modify the base code in DPDK, so
-	 * just call the hand-written one directly for now.
-	 */
-	ixgbe_enable_sec_tx_path_generic(hw);
-
-	return 0;
-}
-
-int
-rte_pmd_ixgbe_macsec_config_txsc(uint8_t port, uint8_t *mac)
-{
-	struct ixgbe_hw *hw;
-	struct rte_eth_dev *dev;
-	uint32_t ctrl;
-
-	RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
-
-	dev = &rte_eth_devices[port];
-
-	if (!is_device_supported(dev, &rte_ixgbe_pmd))
-		return -ENOTSUP;
-
-	hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-
-	ctrl = mac[0] | (mac[1] << 8) | (mac[2] << 16) | (mac[3] << 24);
-	IXGBE_WRITE_REG(hw, IXGBE_LSECTXSCL, ctrl);
-
-	ctrl = mac[4] | (mac[5] << 8);
-	IXGBE_WRITE_REG(hw, IXGBE_LSECTXSCH, ctrl);
-
-	return 0;
-}
-
-int
-rte_pmd_ixgbe_macsec_config_rxsc(uint8_t port, uint8_t *mac, uint16_t pi)
-{
-	struct ixgbe_hw *hw;
-	struct rte_eth_dev *dev;
-	uint32_t ctrl;
-
-	RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
-
-	dev = &rte_eth_devices[port];
-
-	if (!is_device_supported(dev, &rte_ixgbe_pmd))
-		return -ENOTSUP;
-
-	hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-
-	ctrl = mac[0] | (mac[1] << 8) | (mac[2] << 16) | (mac[3] << 24);
-	IXGBE_WRITE_REG(hw, IXGBE_LSECRXSCL, ctrl);
-
-	pi = rte_cpu_to_be_16(pi);
-	ctrl = mac[4] | (mac[5] << 8) | (pi << 16);
-	IXGBE_WRITE_REG(hw, IXGBE_LSECRXSCH, ctrl);
-
-	return 0;
-}
-
-int
-rte_pmd_ixgbe_macsec_select_txsa(uint8_t port, uint8_t idx, uint8_t an,
-				 uint32_t pn, uint8_t *key)
-{
-	struct ixgbe_hw *hw;
-	struct rte_eth_dev *dev;
-	uint32_t ctrl, i;
-
-	RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
-
-	dev = &rte_eth_devices[port];
-
-	if (!is_device_supported(dev, &rte_ixgbe_pmd))
-		return -ENOTSUP;
-
-	hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-
-	if (idx != 0 && idx != 1)
-		return -EINVAL;
-
-	if (an >= 4)
-		return -EINVAL;
-
-	hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-
-	/* Set the PN and key */
-	pn = rte_cpu_to_be_32(pn);
-	if (idx == 0) {
-		IXGBE_WRITE_REG(hw, IXGBE_LSECTXPN0, pn);
-
-		for (i = 0; i < 4; i++) {
-			ctrl = (key[i * 4 + 0] <<  0) |
-			       (key[i * 4 + 1] <<  8) |
-			       (key[i * 4 + 2] << 16) |
-			       (key[i * 4 + 3] << 24);
-			IXGBE_WRITE_REG(hw, IXGBE_LSECTXKEY0(i), ctrl);
-		}
-	} else {
-		IXGBE_WRITE_REG(hw, IXGBE_LSECTXPN1, pn);
-
-		for (i = 0; i < 4; i++) {
-			ctrl = (key[i * 4 + 0] <<  0) |
-			       (key[i * 4 + 1] <<  8) |
-			       (key[i * 4 + 2] << 16) |
-			       (key[i * 4 + 3] << 24);
-			IXGBE_WRITE_REG(hw, IXGBE_LSECTXKEY1(i), ctrl);
-		}
-	}
-
-	/* Set AN and select the SA */
-	ctrl = (an << idx * 2) | (idx << 4);
-	IXGBE_WRITE_REG(hw, IXGBE_LSECTXSA, ctrl);
-
-	return 0;
-}
-
-int
-rte_pmd_ixgbe_macsec_select_rxsa(uint8_t port, uint8_t idx, uint8_t an,
-				 uint32_t pn, uint8_t *key)
-{
-	struct ixgbe_hw *hw;
-	struct rte_eth_dev *dev;
-	uint32_t ctrl, i;
-
-	RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
-
-	dev = &rte_eth_devices[port];
-
-	if (!is_device_supported(dev, &rte_ixgbe_pmd))
-		return -ENOTSUP;
-
-	hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-
-	if (idx != 0 && idx != 1)
-		return -EINVAL;
-
-	if (an >= 4)
-		return -EINVAL;
-
-	/* Set the PN */
-	pn = rte_cpu_to_be_32(pn);
-	IXGBE_WRITE_REG(hw, IXGBE_LSECRXPN(idx), pn);
-
-	/* Set the key */
-	for (i = 0; i < 4; i++) {
-		ctrl = (key[i * 4 + 0] <<  0) |
-		       (key[i * 4 + 1] <<  8) |
-		       (key[i * 4 + 2] << 16) |
-		       (key[i * 4 + 3] << 24);
-		IXGBE_WRITE_REG(hw, IXGBE_LSECRXKEY(idx, i), ctrl);
-	}
-
-	/* Set the AN and validate the SA */
-	ctrl = an | (1 << 2);
-	IXGBE_WRITE_REG(hw, IXGBE_LSECRXSA(idx), ctrl);
-
-	return 0;
-}
-
 /* restore n-tuple filter */
 static inline void
 ixgbe_ntuple_filter_restore(struct rte_eth_dev *dev)
@@ -8701,79 +7924,6 @@ int ixgbe_enable_sec_tx_path_generic(struct ixgbe_hw *hw)
 	return 0;
 }
 
-int
-rte_pmd_ixgbe_set_tc_bw_alloc(uint8_t port,
-			      uint8_t tc_num,
-			      uint8_t *bw_weight)
-{
-	struct rte_eth_dev *dev;
-	struct ixgbe_dcb_config *dcb_config;
-	struct ixgbe_dcb_tc_config *tc;
-	struct rte_eth_conf *eth_conf;
-	struct ixgbe_bw_conf *bw_conf;
-	uint8_t i;
-	uint8_t nb_tcs;
-	uint16_t sum;
-
-	RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
-
-	dev = &rte_eth_devices[port];
-
-	if (!is_device_supported(dev, &rte_ixgbe_pmd))
-		return -ENOTSUP;
-
-	if (tc_num > IXGBE_DCB_MAX_TRAFFIC_CLASS) {
-		PMD_DRV_LOG(ERR, "TCs should be no more than %d.",
-			    IXGBE_DCB_MAX_TRAFFIC_CLASS);
-		return -EINVAL;
-	}
-
-	dcb_config = IXGBE_DEV_PRIVATE_TO_DCB_CFG(dev->data->dev_private);
-	bw_conf = IXGBE_DEV_PRIVATE_TO_BW_CONF(dev->data->dev_private);
-	eth_conf = &dev->data->dev_conf;
-
-	if (eth_conf->txmode.mq_mode == ETH_MQ_TX_DCB) {
-		nb_tcs = eth_conf->tx_adv_conf.dcb_tx_conf.nb_tcs;
-	} else if (eth_conf->txmode.mq_mode == ETH_MQ_TX_VMDQ_DCB) {
-		if (eth_conf->tx_adv_conf.vmdq_dcb_tx_conf.nb_queue_pools ==
-		    ETH_32_POOLS)
-			nb_tcs = ETH_4_TCS;
-		else
-			nb_tcs = ETH_8_TCS;
-	} else {
-		nb_tcs = 1;
-	}
-
-	if (nb_tcs != tc_num) {
-		PMD_DRV_LOG(ERR,
-			    "Weight should be set for all %d enabled TCs.",
-			    nb_tcs);
-		return -EINVAL;
-	}
-
-	sum = 0;
-	for (i = 0; i < nb_tcs; i++)
-		sum += bw_weight[i];
-	if (sum != 100) {
-		PMD_DRV_LOG(ERR,
-			    "The summary of the TC weight should be 100.");
-		return -EINVAL;
-	}
-
-	for (i = 0; i < nb_tcs; i++) {
-		tc = &dcb_config->tc_config[i];
-		tc->path[IXGBE_DCB_TX_CONFIG].bwg_percent = bw_weight[i];
-	}
-	for (; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
-		tc = &dcb_config->tc_config[i];
-		tc->path[IXGBE_DCB_TX_CONFIG].bwg_percent = 0;
-	}
-
-	bw_conf->tc_num = nb_tcs;
-
-	return 0;
-}
-
 RTE_PMD_REGISTER_PCI(net_ixgbe, rte_ixgbe_pmd.pci_drv);
 RTE_PMD_REGISTER_PCI_TABLE(net_ixgbe, pci_id_ixgbe_map);
 RTE_PMD_REGISTER_KMOD_DEP(net_ixgbe, "* igb_uio | uio_pci_generic | vfio");
diff --git a/drivers/net/ixgbe/ixgbe_ethdev.h b/drivers/net/ixgbe/ixgbe_ethdev.h
index a32ba4d..5176b02 100644
--- a/drivers/net/ixgbe/ixgbe_ethdev.h
+++ b/drivers/net/ixgbe/ixgbe_ethdev.h
@@ -666,6 +666,11 @@ int ixgbe_fdir_ctrl_func(struct rte_eth_dev *dev,
 
 int ixgbe_enable_sec_tx_path_generic(struct ixgbe_hw *hw);
 
+int ixgbe_vt_check(struct ixgbe_hw *hw);
+int ixgbe_set_vf_rate_limit(struct rte_eth_dev *dev, uint16_t vf,
+			    uint16_t tx_rate, uint64_t q_msk);
+bool is_ixgbe_supported(struct rte_eth_dev *dev);
+
 static inline int
 ixgbe_ethertype_filter_lookup(struct ixgbe_filter_info *filter_info,
 			      uint16_t ethertype)
diff --git a/drivers/net/ixgbe/rte_pmd_ixgbe.c b/drivers/net/ixgbe/rte_pmd_ixgbe.c
new file mode 100644
index 0000000..e8fc9a6
--- /dev/null
+++ b/drivers/net/ixgbe/rte_pmd_ixgbe.c
@@ -0,0 +1,910 @@
+/*-
+ *   BSD LICENSE
+ *
+ *   Copyright(c) 2010-2017 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ *
+ *   Redistribution and use in source and binary forms, with or without
+ *   modification, are permitted provided that the following conditions
+ *   are met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in
+ *       the documentation and/or other materials provided with the
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its
+ *       contributors may be used to endorse or promote products derived
+ *       from this software without specific prior written permission.
+ *
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <rte_ethdev.h>
+
+#include "base/ixgbe_api.h"
+#include "ixgbe_ethdev.h"
+#include "rte_pmd_ixgbe.h"
+
+int
+rte_pmd_ixgbe_set_vf_mac_addr(uint8_t port, uint16_t vf,
+			      struct ether_addr *mac_addr)
+{
+	struct ixgbe_hw *hw;
+	struct ixgbe_vf_info *vfinfo;
+	int rar_entry;
+	uint8_t *new_mac = (uint8_t *)(mac_addr);
+	struct rte_eth_dev *dev;
+	struct rte_pci_device *pci_dev;
+
+	RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+	dev = &rte_eth_devices[port];
+	pci_dev = IXGBE_DEV_TO_PCI(dev);
+
+	if (!is_ixgbe_supported(dev))
+		return -ENOTSUP;
+
+	if (vf >= pci_dev->max_vfs)
+		return -EINVAL;
+
+	hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+	vfinfo = *(IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private));
+	rar_entry = hw->mac.num_rar_entries - (vf + 1);
+
+	if (is_valid_assigned_ether_addr((struct ether_addr *)new_mac)) {
+		rte_memcpy(vfinfo[vf].vf_mac_addresses, new_mac,
+			   ETHER_ADDR_LEN);
+		return hw->mac.ops.set_rar(hw, rar_entry, new_mac, vf,
+					   IXGBE_RAH_AV);
+	}
+	return -EINVAL;
+}
+
+int
+rte_pmd_ixgbe_ping_vf(uint8_t port, uint16_t vf)
+{
+	struct ixgbe_hw *hw;
+	struct ixgbe_vf_info *vfinfo;
+	struct rte_eth_dev *dev;
+	struct rte_pci_device *pci_dev;
+	uint32_t ctrl;
+
+	RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+	dev = &rte_eth_devices[port];
+	pci_dev = IXGBE_DEV_TO_PCI(dev);
+
+	if (!is_ixgbe_supported(dev))
+		return -ENOTSUP;
+
+	if (vf >= pci_dev->max_vfs)
+		return -EINVAL;
+
+	hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+	vfinfo = *(IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private));
+
+	ctrl = IXGBE_PF_CONTROL_MSG;
+	if (vfinfo[vf].clear_to_send)
+		ctrl |= IXGBE_VT_MSGTYPE_CTS;
+
+	ixgbe_write_mbx(hw, &ctrl, 1, vf);
+
+	return 0;
+}
+
+int
+rte_pmd_ixgbe_set_vf_vlan_anti_spoof(uint8_t port, uint16_t vf, uint8_t on)
+{
+	struct ixgbe_hw *hw;
+	struct ixgbe_mac_info *mac;
+	struct rte_eth_dev *dev;
+	struct rte_pci_device *pci_dev;
+
+	RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+	dev = &rte_eth_devices[port];
+	pci_dev = IXGBE_DEV_TO_PCI(dev);
+
+	if (!is_ixgbe_supported(dev))
+		return -ENOTSUP;
+
+	if (vf >= pci_dev->max_vfs)
+		return -EINVAL;
+
+	if (on > 1)
+		return -EINVAL;
+
+	hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+	mac = &hw->mac;
+
+	mac->ops.set_vlan_anti_spoofing(hw, on, vf);
+
+	return 0;
+}
+
+int
+rte_pmd_ixgbe_set_vf_mac_anti_spoof(uint8_t port, uint16_t vf, uint8_t on)
+{
+	struct ixgbe_hw *hw;
+	struct ixgbe_mac_info *mac;
+	struct rte_eth_dev *dev;
+	struct rte_pci_device *pci_dev;
+
+	RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+	dev = &rte_eth_devices[port];
+	pci_dev = IXGBE_DEV_TO_PCI(dev);
+
+	if (!is_ixgbe_supported(dev))
+		return -ENOTSUP;
+
+	if (vf >= pci_dev->max_vfs)
+		return -EINVAL;
+
+	if (on > 1)
+		return -EINVAL;
+
+	hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+	mac = &hw->mac;
+	mac->ops.set_mac_anti_spoofing(hw, on, vf);
+
+	return 0;
+}
+
+int
+rte_pmd_ixgbe_set_vf_vlan_insert(uint8_t port, uint16_t vf, uint16_t vlan_id)
+{
+	struct ixgbe_hw *hw;
+	uint32_t ctrl;
+	struct rte_eth_dev *dev;
+	struct rte_pci_device *pci_dev;
+
+	RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+	dev = &rte_eth_devices[port];
+	pci_dev = IXGBE_DEV_TO_PCI(dev);
+
+	if (!is_ixgbe_supported(dev))
+		return -ENOTSUP;
+
+	if (vf >= pci_dev->max_vfs)
+		return -EINVAL;
+
+	if (vlan_id > ETHER_MAX_VLAN_ID)
+		return -EINVAL;
+
+	hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+	ctrl = IXGBE_READ_REG(hw, IXGBE_VMVIR(vf));
+	if (vlan_id) {
+		ctrl = vlan_id;
+		ctrl |= IXGBE_VMVIR_VLANA_DEFAULT;
+	} else {
+		ctrl = 0;
+	}
+
+	IXGBE_WRITE_REG(hw, IXGBE_VMVIR(vf), ctrl);
+
+	return 0;
+}
+
+int
+rte_pmd_ixgbe_set_tx_loopback(uint8_t port, uint8_t on)
+{
+	struct ixgbe_hw *hw;
+	uint32_t ctrl;
+	struct rte_eth_dev *dev;
+
+	RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+	dev = &rte_eth_devices[port];
+
+	if (!is_ixgbe_supported(dev))
+		return -ENOTSUP;
+
+	if (on > 1)
+		return -EINVAL;
+
+	hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+	ctrl = IXGBE_READ_REG(hw, IXGBE_PFDTXGSWC);
+	/* enable or disable VMDQ loopback */
+	if (on)
+		ctrl |= IXGBE_PFDTXGSWC_VT_LBEN;
+	else
+		ctrl &= ~IXGBE_PFDTXGSWC_VT_LBEN;
+
+	IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, ctrl);
+
+	return 0;
+}
+
+int
+rte_pmd_ixgbe_set_all_queues_drop_en(uint8_t port, uint8_t on)
+{
+	struct ixgbe_hw *hw;
+	uint32_t reg_value;
+	int i;
+	int num_queues = (int)(IXGBE_QDE_IDX_MASK >> IXGBE_QDE_IDX_SHIFT);
+	struct rte_eth_dev *dev;
+
+	RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+	dev = &rte_eth_devices[port];
+
+	if (!is_ixgbe_supported(dev))
+		return -ENOTSUP;
+
+	if (on > 1)
+		return -EINVAL;
+
+	hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+	for (i = 0; i <= num_queues; i++) {
+		reg_value = IXGBE_QDE_WRITE |
+				(i << IXGBE_QDE_IDX_SHIFT) |
+				(on & IXGBE_QDE_ENABLE);
+		IXGBE_WRITE_REG(hw, IXGBE_QDE, reg_value);
+	}
+
+	return 0;
+}
+
+int
+rte_pmd_ixgbe_set_vf_split_drop_en(uint8_t port, uint16_t vf, uint8_t on)
+{
+	struct ixgbe_hw *hw;
+	uint32_t reg_value;
+	struct rte_eth_dev *dev;
+	struct rte_pci_device *pci_dev;
+
+	RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+	dev = &rte_eth_devices[port];
+	pci_dev = IXGBE_DEV_TO_PCI(dev);
+
+	if (!is_ixgbe_supported(dev))
+		return -ENOTSUP;
+
+	/* only support VF's 0 to 63 */
+	if ((vf >= pci_dev->max_vfs) || (vf > 63))
+		return -EINVAL;
+
+	if (on > 1)
+		return -EINVAL;
+
+	hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+	reg_value = IXGBE_READ_REG(hw, IXGBE_SRRCTL(vf));
+	if (on)
+		reg_value |= IXGBE_SRRCTL_DROP_EN;
+	else
+		reg_value &= ~IXGBE_SRRCTL_DROP_EN;
+
+	IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(vf), reg_value);
+
+	return 0;
+}
+
+int
+rte_pmd_ixgbe_set_vf_vlan_stripq(uint8_t port, uint16_t vf, uint8_t on)
+{
+	struct rte_eth_dev *dev;
+	struct rte_pci_device *pci_dev;
+	struct ixgbe_hw *hw;
+	uint16_t queues_per_pool;
+	uint32_t q;
+
+	RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+	dev = &rte_eth_devices[port];
+	pci_dev = IXGBE_DEV_TO_PCI(dev);
+	hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+	if (!is_ixgbe_supported(dev))
+		return -ENOTSUP;
+
+	if (vf >= pci_dev->max_vfs)
+		return -EINVAL;
+
+	if (on > 1)
+		return -EINVAL;
+
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_strip_queue_set, -ENOTSUP);
+
+	/* The PF has 128 queue pairs and in SRIOV configuration
+	 * those queues will be assigned to VF's, so RXDCTL
+	 * registers will be dealing with queues which will be
+	 * assigned to VF's.
+	 * Let's say we have SRIOV configured with 31 VF's then the
+	 * first 124 queues 0-123 will be allocated to VF's and only
+	 * the last 4 queues 123-127 will be assigned to the PF.
+	 */
+	if (hw->mac.type == ixgbe_mac_82598EB)
+		queues_per_pool = (uint16_t)hw->mac.max_rx_queues /
+				  ETH_16_POOLS;
+	else
+		queues_per_pool = (uint16_t)hw->mac.max_rx_queues /
+				  ETH_64_POOLS;
+
+	for (q = 0; q < queues_per_pool; q++)
+		(*dev->dev_ops->vlan_strip_queue_set)(dev,
+				q + vf * queues_per_pool, on);
+	return 0;
+}
+
+int
+rte_pmd_ixgbe_set_vf_rxmode(uint8_t port, uint16_t vf,
+			    uint16_t rx_mask, uint8_t on)
+{
+	int val = 0;
+	struct rte_eth_dev *dev;
+	struct rte_pci_device *pci_dev;
+	struct ixgbe_hw *hw;
+	uint32_t vmolr;
+
+	RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+	dev = &rte_eth_devices[port];
+	pci_dev = IXGBE_DEV_TO_PCI(dev);
+
+	if (!is_ixgbe_supported(dev))
+		return -ENOTSUP;
+
+	if (vf >= pci_dev->max_vfs)
+		return -EINVAL;
+
+	if (on > 1)
+		return -EINVAL;
+
+	hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+	vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf));
+
+	if (hw->mac.type == ixgbe_mac_82598EB) {
+		PMD_INIT_LOG(ERR, "setting VF receive mode set should be done"
+			     " on 82599 hardware and newer");
+		return -ENOTSUP;
+	}
+	if (ixgbe_vt_check(hw) < 0)
+		return -ENOTSUP;
+
+	val = ixgbe_convert_vm_rx_mask_to_val(rx_mask, val);
+
+	if (on)
+		vmolr |= val;
+	else
+		vmolr &= ~val;
+
+	IXGBE_WRITE_REG(hw, IXGBE_VMOLR(vf), vmolr);
+
+	return 0;
+}
+
+int
+rte_pmd_ixgbe_set_vf_rx(uint8_t port, uint16_t vf, uint8_t on)
+{
+	struct rte_eth_dev *dev;
+	struct rte_pci_device *pci_dev;
+	uint32_t reg, addr;
+	uint32_t val;
+	const uint8_t bit1 = 0x1;
+	struct ixgbe_hw *hw;
+
+	RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+	dev = &rte_eth_devices[port];
+	pci_dev = IXGBE_DEV_TO_PCI(dev);
+
+	if (!is_ixgbe_supported(dev))
+		return -ENOTSUP;
+
+	if (vf >= pci_dev->max_vfs)
+		return -EINVAL;
+
+	if (on > 1)
+		return -EINVAL;
+
+	hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+	if (ixgbe_vt_check(hw) < 0)
+		return -ENOTSUP;
+
+	/* for vf >= 32, set bit in PFVFRE[1], otherwise PFVFRE[0] */
+	if (vf >= 32) {
+		addr = IXGBE_VFRE(1);
+		val = bit1 << (vf - 32);
+	} else {
+		addr = IXGBE_VFRE(0);
+		val = bit1 << vf;
+	}
+
+	reg = IXGBE_READ_REG(hw, addr);
+
+	if (on)
+		reg |= val;
+	else
+		reg &= ~val;
+
+	IXGBE_WRITE_REG(hw, addr, reg);
+
+	return 0;
+}
+
+int
+rte_pmd_ixgbe_set_vf_tx(uint8_t port, uint16_t vf, uint8_t on)
+{
+	struct rte_eth_dev *dev;
+	struct rte_pci_device *pci_dev;
+	uint32_t reg, addr;
+	uint32_t val;
+	const uint8_t bit1 = 0x1;
+
+	struct ixgbe_hw *hw;
+
+	RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+	dev = &rte_eth_devices[port];
+	pci_dev = IXGBE_DEV_TO_PCI(dev);
+
+	if (!is_ixgbe_supported(dev))
+		return -ENOTSUP;
+
+	if (vf >= pci_dev->max_vfs)
+		return -EINVAL;
+
+	if (on > 1)
+		return -EINVAL;
+
+	hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+	if (ixgbe_vt_check(hw) < 0)
+		return -ENOTSUP;
+
+	/* for vf >= 32, set bit in PFVFTE[1], otherwise PFVFTE[0] */
+	if (vf >= 32) {
+		addr = IXGBE_VFTE(1);
+		val = bit1 << (vf - 32);
+	} else {
+		addr = IXGBE_VFTE(0);
+		val = bit1 << vf;
+	}
+
+	reg = IXGBE_READ_REG(hw, addr);
+
+	if (on)
+		reg |= val;
+	else
+		reg &= ~val;
+
+	IXGBE_WRITE_REG(hw, addr, reg);
+
+	return 0;
+}
+
+int
+rte_pmd_ixgbe_set_vf_vlan_filter(uint8_t port, uint16_t vlan,
+				 uint64_t vf_mask, uint8_t vlan_on)
+{
+	struct rte_eth_dev *dev;
+	int ret = 0;
+	uint16_t vf_idx;
+	struct ixgbe_hw *hw;
+
+	RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+	dev = &rte_eth_devices[port];
+
+	if (!is_ixgbe_supported(dev))
+		return -ENOTSUP;
+
+	if ((vlan > ETHER_MAX_VLAN_ID) || (vf_mask == 0))
+		return -EINVAL;
+
+	hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+	if (ixgbe_vt_check(hw) < 0)
+		return -ENOTSUP;
+
+	for (vf_idx = 0; vf_idx < 64; vf_idx++) {
+		if (vf_mask & ((uint64_t)(1ULL << vf_idx))) {
+			ret = hw->mac.ops.set_vfta(hw, vlan, vf_idx,
+						   vlan_on, false);
+			if (ret < 0)
+				return ret;
+		}
+	}
+
+	return ret;
+}
+
+int
+rte_pmd_ixgbe_set_vf_rate_limit(uint8_t port, uint16_t vf,
+				uint16_t tx_rate, uint64_t q_msk)
+{
+	struct rte_eth_dev *dev;
+
+	RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+	dev = &rte_eth_devices[port];
+
+	if (!is_ixgbe_supported(dev))
+		return -ENOTSUP;
+
+	return ixgbe_set_vf_rate_limit(dev, vf, tx_rate, q_msk);
+}
+
+int
+rte_pmd_ixgbe_macsec_enable(uint8_t port, uint8_t en, uint8_t rp)
+{
+	struct ixgbe_hw *hw;
+	struct rte_eth_dev *dev;
+	uint32_t ctrl;
+
+	RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+	dev = &rte_eth_devices[port];
+
+	if (!is_ixgbe_supported(dev))
+		return -ENOTSUP;
+
+	hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+	/* Stop the data paths */
+	if (ixgbe_disable_sec_rx_path(hw) != IXGBE_SUCCESS)
+		return -ENOTSUP;
+	/**
+	 * Workaround:
+	 * As no ixgbe_disable_sec_rx_path equivalent is
+	 * implemented for tx in the base code, and we are
+	 * not allowed to modify the base code in DPDK, so
+	 * just call the hand-written one directly for now.
+	 * The hardware support has been checked by
+	 * ixgbe_disable_sec_rx_path().
+	 */
+	ixgbe_disable_sec_tx_path_generic(hw);
+
+	/* Enable Ethernet CRC (required by MACsec offload) */
+	ctrl = IXGBE_READ_REG(hw, IXGBE_HLREG0);
+	ctrl |= IXGBE_HLREG0_TXCRCEN | IXGBE_HLREG0_RXCRCSTRP;
+	IXGBE_WRITE_REG(hw, IXGBE_HLREG0, ctrl);
+
+	/* Enable the TX and RX crypto engines */
+	ctrl = IXGBE_READ_REG(hw, IXGBE_SECTXCTRL);
+	ctrl &= ~IXGBE_SECTXCTRL_SECTX_DIS;
+	IXGBE_WRITE_REG(hw, IXGBE_SECTXCTRL, ctrl);
+
+	ctrl = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
+	ctrl &= ~IXGBE_SECRXCTRL_SECRX_DIS;
+	IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, ctrl);
+
+	ctrl = IXGBE_READ_REG(hw, IXGBE_SECTXMINIFG);
+	ctrl &= ~IXGBE_SECTX_MINSECIFG_MASK;
+	ctrl |= 0x3;
+	IXGBE_WRITE_REG(hw, IXGBE_SECTXMINIFG, ctrl);
+
+	/* Enable SA lookup */
+	ctrl = IXGBE_READ_REG(hw, IXGBE_LSECTXCTRL);
+	ctrl &= ~IXGBE_LSECTXCTRL_EN_MASK;
+	ctrl |= en ? IXGBE_LSECTXCTRL_AUTH_ENCRYPT :
+		     IXGBE_LSECTXCTRL_AUTH;
+	ctrl |= IXGBE_LSECTXCTRL_AISCI;
+	ctrl &= ~IXGBE_LSECTXCTRL_PNTHRSH_MASK;
+	ctrl |= IXGBE_MACSEC_PNTHRSH & IXGBE_LSECTXCTRL_PNTHRSH_MASK;
+	IXGBE_WRITE_REG(hw, IXGBE_LSECTXCTRL, ctrl);
+
+	ctrl = IXGBE_READ_REG(hw, IXGBE_LSECRXCTRL);
+	ctrl &= ~IXGBE_LSECRXCTRL_EN_MASK;
+	ctrl |= IXGBE_LSECRXCTRL_STRICT << IXGBE_LSECRXCTRL_EN_SHIFT;
+	ctrl &= ~IXGBE_LSECRXCTRL_PLSH;
+	if (rp)
+		ctrl |= IXGBE_LSECRXCTRL_RP;
+	else
+		ctrl &= ~IXGBE_LSECRXCTRL_RP;
+	IXGBE_WRITE_REG(hw, IXGBE_LSECRXCTRL, ctrl);
+
+	/* Start the data paths */
+	ixgbe_enable_sec_rx_path(hw);
+	/**
+	 * Workaround:
+	 * As no ixgbe_enable_sec_rx_path equivalent is
+	 * implemented for tx in the base code, and we are
+	 * not allowed to modify the base code in DPDK, so
+	 * just call the hand-written one directly for now.
+	 */
+	ixgbe_enable_sec_tx_path_generic(hw);
+
+	return 0;
+}
+
+int
+rte_pmd_ixgbe_macsec_disable(uint8_t port)
+{
+	struct ixgbe_hw *hw;
+	struct rte_eth_dev *dev;
+	uint32_t ctrl;
+
+	RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+	dev = &rte_eth_devices[port];
+
+	if (!is_ixgbe_supported(dev))
+		return -ENOTSUP;
+
+	hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+	/* Stop the data paths */
+	if (ixgbe_disable_sec_rx_path(hw) != IXGBE_SUCCESS)
+		return -ENOTSUP;
+	/**
+	 * Workaround:
+	 * As no ixgbe_disable_sec_rx_path equivalent is
+	 * implemented for tx in the base code, and we are
+	 * not allowed to modify the base code in DPDK, so
+	 * just call the hand-written one directly for now.
+	 * The hardware support has been checked by
+	 * ixgbe_disable_sec_rx_path().
+	 */
+	ixgbe_disable_sec_tx_path_generic(hw);
+
+	/* Disable the TX and RX crypto engines */
+	ctrl = IXGBE_READ_REG(hw, IXGBE_SECTXCTRL);
+	ctrl |= IXGBE_SECTXCTRL_SECTX_DIS;
+	IXGBE_WRITE_REG(hw, IXGBE_SECTXCTRL, ctrl);
+
+	ctrl = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
+	ctrl |= IXGBE_SECRXCTRL_SECRX_DIS;
+	IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, ctrl);
+
+	/* Disable SA lookup */
+	ctrl = IXGBE_READ_REG(hw, IXGBE_LSECTXCTRL);
+	ctrl &= ~IXGBE_LSECTXCTRL_EN_MASK;
+	ctrl |= IXGBE_LSECTXCTRL_DISABLE;
+	IXGBE_WRITE_REG(hw, IXGBE_LSECTXCTRL, ctrl);
+
+	ctrl = IXGBE_READ_REG(hw, IXGBE_LSECRXCTRL);
+	ctrl &= ~IXGBE_LSECRXCTRL_EN_MASK;
+	ctrl |= IXGBE_LSECRXCTRL_DISABLE << IXGBE_LSECRXCTRL_EN_SHIFT;
+	IXGBE_WRITE_REG(hw, IXGBE_LSECRXCTRL, ctrl);
+
+	/* Start the data paths */
+	ixgbe_enable_sec_rx_path(hw);
+	/**
+	 * Workaround:
+	 * As no ixgbe_enable_sec_rx_path equivalent is
+	 * implemented for tx in the base code, and we are
+	 * not allowed to modify the base code in DPDK, so
+	 * just call the hand-written one directly for now.
+	 */
+	ixgbe_enable_sec_tx_path_generic(hw);
+
+	return 0;
+}
+
+int
+rte_pmd_ixgbe_macsec_config_txsc(uint8_t port, uint8_t *mac)
+{
+	struct ixgbe_hw *hw;
+	struct rte_eth_dev *dev;
+	uint32_t ctrl;
+
+	RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+	dev = &rte_eth_devices[port];
+
+	if (!is_ixgbe_supported(dev))
+		return -ENOTSUP;
+
+	hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+	ctrl = mac[0] | (mac[1] << 8) | (mac[2] << 16) | (mac[3] << 24);
+	IXGBE_WRITE_REG(hw, IXGBE_LSECTXSCL, ctrl);
+
+	ctrl = mac[4] | (mac[5] << 8);
+	IXGBE_WRITE_REG(hw, IXGBE_LSECTXSCH, ctrl);
+
+	return 0;
+}
+
+int
+rte_pmd_ixgbe_macsec_config_rxsc(uint8_t port, uint8_t *mac, uint16_t pi)
+{
+	struct ixgbe_hw *hw;
+	struct rte_eth_dev *dev;
+	uint32_t ctrl;
+
+	RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+	dev = &rte_eth_devices[port];
+
+	if (!is_ixgbe_supported(dev))
+		return -ENOTSUP;
+
+	hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+	ctrl = mac[0] | (mac[1] << 8) | (mac[2] << 16) | (mac[3] << 24);
+	IXGBE_WRITE_REG(hw, IXGBE_LSECRXSCL, ctrl);
+
+	pi = rte_cpu_to_be_16(pi);
+	ctrl = mac[4] | (mac[5] << 8) | (pi << 16);
+	IXGBE_WRITE_REG(hw, IXGBE_LSECRXSCH, ctrl);
+
+	return 0;
+}
+
+int
+rte_pmd_ixgbe_macsec_select_txsa(uint8_t port, uint8_t idx, uint8_t an,
+				 uint32_t pn, uint8_t *key)
+{
+	struct ixgbe_hw *hw;
+	struct rte_eth_dev *dev;
+	uint32_t ctrl, i;
+
+	RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+	dev = &rte_eth_devices[port];
+
+	if (!is_ixgbe_supported(dev))
+		return -ENOTSUP;
+
+	hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+	if (idx != 0 && idx != 1)
+		return -EINVAL;
+
+	if (an >= 4)
+		return -EINVAL;
+
+	hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+	/* Set the PN and key */
+	pn = rte_cpu_to_be_32(pn);
+	if (idx == 0) {
+		IXGBE_WRITE_REG(hw, IXGBE_LSECTXPN0, pn);
+
+		for (i = 0; i < 4; i++) {
+			ctrl = (key[i * 4 + 0] <<  0) |
+			       (key[i * 4 + 1] <<  8) |
+			       (key[i * 4 + 2] << 16) |
+			       (key[i * 4 + 3] << 24);
+			IXGBE_WRITE_REG(hw, IXGBE_LSECTXKEY0(i), ctrl);
+		}
+	} else {
+		IXGBE_WRITE_REG(hw, IXGBE_LSECTXPN1, pn);
+
+		for (i = 0; i < 4; i++) {
+			ctrl = (key[i * 4 + 0] <<  0) |
+			       (key[i * 4 + 1] <<  8) |
+			       (key[i * 4 + 2] << 16) |
+			       (key[i * 4 + 3] << 24);
+			IXGBE_WRITE_REG(hw, IXGBE_LSECTXKEY1(i), ctrl);
+		}
+	}
+
+	/* Set AN and select the SA */
+	ctrl = (an << idx * 2) | (idx << 4);
+	IXGBE_WRITE_REG(hw, IXGBE_LSECTXSA, ctrl);
+
+	return 0;
+}
+
+int
+rte_pmd_ixgbe_macsec_select_rxsa(uint8_t port, uint8_t idx, uint8_t an,
+				 uint32_t pn, uint8_t *key)
+{
+	struct ixgbe_hw *hw;
+	struct rte_eth_dev *dev;
+	uint32_t ctrl, i;
+
+	RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+	dev = &rte_eth_devices[port];
+
+	if (!is_ixgbe_supported(dev))
+		return -ENOTSUP;
+
+	hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+	if (idx != 0 && idx != 1)
+		return -EINVAL;
+
+	if (an >= 4)
+		return -EINVAL;
+
+	/* Set the PN */
+	pn = rte_cpu_to_be_32(pn);
+	IXGBE_WRITE_REG(hw, IXGBE_LSECRXPN(idx), pn);
+
+	/* Set the key */
+	for (i = 0; i < 4; i++) {
+		ctrl = (key[i * 4 + 0] <<  0) |
+		       (key[i * 4 + 1] <<  8) |
+		       (key[i * 4 + 2] << 16) |
+		       (key[i * 4 + 3] << 24);
+		IXGBE_WRITE_REG(hw, IXGBE_LSECRXKEY(idx, i), ctrl);
+	}
+
+	/* Set the AN and validate the SA */
+	ctrl = an | (1 << 2);
+	IXGBE_WRITE_REG(hw, IXGBE_LSECRXSA(idx), ctrl);
+
+	return 0;
+}
+
+int
+rte_pmd_ixgbe_set_tc_bw_alloc(uint8_t port,
+			      uint8_t tc_num,
+			      uint8_t *bw_weight)
+{
+	struct rte_eth_dev *dev;
+	struct ixgbe_dcb_config *dcb_config;
+	struct ixgbe_dcb_tc_config *tc;
+	struct rte_eth_conf *eth_conf;
+	struct ixgbe_bw_conf *bw_conf;
+	uint8_t i;
+	uint8_t nb_tcs;
+	uint16_t sum;
+
+	RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+	dev = &rte_eth_devices[port];
+
+	if (!is_ixgbe_supported(dev))
+		return -ENOTSUP;
+
+	if (tc_num > IXGBE_DCB_MAX_TRAFFIC_CLASS) {
+		PMD_DRV_LOG(ERR, "TCs should be no more than %d.",
+			    IXGBE_DCB_MAX_TRAFFIC_CLASS);
+		return -EINVAL;
+	}
+
+	dcb_config = IXGBE_DEV_PRIVATE_TO_DCB_CFG(dev->data->dev_private);
+	bw_conf = IXGBE_DEV_PRIVATE_TO_BW_CONF(dev->data->dev_private);
+	eth_conf = &dev->data->dev_conf;
+
+	if (eth_conf->txmode.mq_mode == ETH_MQ_TX_DCB) {
+		nb_tcs = eth_conf->tx_adv_conf.dcb_tx_conf.nb_tcs;
+	} else if (eth_conf->txmode.mq_mode == ETH_MQ_TX_VMDQ_DCB) {
+		if (eth_conf->tx_adv_conf.vmdq_dcb_tx_conf.nb_queue_pools ==
+		    ETH_32_POOLS)
+			nb_tcs = ETH_4_TCS;
+		else
+			nb_tcs = ETH_8_TCS;
+	} else {
+		nb_tcs = 1;
+	}
+
+	if (nb_tcs != tc_num) {
+		PMD_DRV_LOG(ERR,
+			    "Weight should be set for all %d enabled TCs.",
+			    nb_tcs);
+		return -EINVAL;
+	}
+
+	sum = 0;
+	for (i = 0; i < nb_tcs; i++)
+		sum += bw_weight[i];
+	if (sum != 100) {
+		PMD_DRV_LOG(ERR,
+			    "The summary of the TC weight should be 100.");
+		return -EINVAL;
+	}
+
+	for (i = 0; i < nb_tcs; i++) {
+		tc = &dcb_config->tc_config[i];
+		tc->path[IXGBE_DCB_TX_CONFIG].bwg_percent = bw_weight[i];
+	}
+	for (; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
+		tc = &dcb_config->tc_config[i];
+		tc->path[IXGBE_DCB_TX_CONFIG].bwg_percent = 0;
+	}
+
+	bw_conf->tc_num = nb_tcs;
+
+	return 0;
+}
-- 
1.9.3

^ permalink raw reply related	[flat|nested] 4+ messages in thread

* Re: [PATCH 0/2] move private APIs to a specific files
  2017-04-11  8:31 [PATCH 0/2] move private APIs to a specific files Wenzhuo Lu
  2017-04-11  8:31 ` [PATCH 1/2] net/i40e: move private APIs to a specific file Wenzhuo Lu
  2017-04-11  8:31 ` [PATCH 2/2] net/ixgbe: " Wenzhuo Lu
@ 2017-04-11 12:10 ` Ferruh Yigit
  2 siblings, 0 replies; 4+ messages in thread
From: Ferruh Yigit @ 2017-04-11 12:10 UTC (permalink / raw)
  To: Wenzhuo Lu, dev

On 4/11/2017 9:31 AM, Wenzhuo Lu wrote:
> There're some private APIs on ixgbe and i40e. Create specific
> files for them and move the related code to the new files.
> 
> Wenzhuo Lu (2):
>   net/i40e: move private APIs to a specific file
>   net/ixgbe: move private APIs to a specific file

Series applied to dpdk-next-net/master, thanks.

Thanks Wenzhuo!

^ permalink raw reply	[flat|nested] 4+ messages in thread

end of thread, other threads:[~2017-04-11 12:10 UTC | newest]

Thread overview: 4+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2017-04-11  8:31 [PATCH 0/2] move private APIs to a specific files Wenzhuo Lu
2017-04-11  8:31 ` [PATCH 1/2] net/i40e: move private APIs to a specific file Wenzhuo Lu
2017-04-11  8:31 ` [PATCH 2/2] net/ixgbe: " Wenzhuo Lu
2017-04-11 12:10 ` [PATCH 0/2] move private APIs to a specific files Ferruh Yigit

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.