All of lore.kernel.org
 help / color / mirror / Atom feed
From: "Ananyev, Konstantin" <konstantin.ananyev@intel.com>
To: "Dai, Wei" <wei.dai@intel.com>, "Lu, Wenzhuo" <wenzhuo.lu@intel.com>
Cc: "dev@dpdk.org" <dev@dpdk.org>
Subject: Re: [PATCH v2 3/4] net/ixgbe: convert to new Rx offloads API
Date: Wed, 14 Mar 2018 21:47:48 +0000	[thread overview]
Message-ID: <2601191342CEEE43887BDE71AB9772589E28FDBA@irsmsx105.ger.corp.intel.com> (raw)
In-Reply-To: <1520427990-73471-4-git-send-email-wei.dai@intel.com>

Hi Wei,

> -----Original Message-----
> From: Dai, Wei
> Sent: Wednesday, March 7, 2018 1:06 PM
> To: Lu, Wenzhuo <wenzhuo.lu@intel.com>; Ananyev, Konstantin <konstantin.ananyev@intel.com>
> Cc: dev@dpdk.org; Dai, Wei <wei.dai@intel.com>
> Subject: [PATCH v2 3/4] net/ixgbe: convert to new Rx offloads API
> 
> Ethdev Rx offloads API has changed since:
> commit ce17eddefc20 ("ethdev: introduce Rx queue offloads API")
> This commit support the new Rx offloads API.
> 
> Signed-off-by: Wei Dai <wei.dai@intel.com>
> ---
>  drivers/net/ixgbe/ixgbe_ethdev.c          |  93 +++++++++--------
>  drivers/net/ixgbe/ixgbe_ipsec.c           |   8 +-
>  drivers/net/ixgbe/ixgbe_rxtx.c            | 163 ++++++++++++++++++++++++++----
>  drivers/net/ixgbe/ixgbe_rxtx.h            |   3 +
>  drivers/net/ixgbe/ixgbe_rxtx_vec_common.h |   2 +-
>  drivers/net/ixgbe/ixgbe_rxtx_vec_neon.c   |   2 +-
>  6 files changed, 205 insertions(+), 66 deletions(-)
> 
> diff --git a/drivers/net/ixgbe/ixgbe_ethdev.c b/drivers/net/ixgbe/ixgbe_ethdev.c
> index 8bb67ba..9437f05 100644
> --- a/drivers/net/ixgbe/ixgbe_ethdev.c
> +++ b/drivers/net/ixgbe/ixgbe_ethdev.c
> @@ -2105,19 +2105,22 @@ ixgbe_vlan_hw_strip_config(struct rte_eth_dev *dev)
>  static int
>  ixgbe_vlan_offload_set(struct rte_eth_dev *dev, int mask)
>  {
> +	struct rte_eth_rxmode *rxmode;
> +	rxmode = &dev->data->dev_conf.rxmode;
> +
>  	if (mask & ETH_VLAN_STRIP_MASK) {
>  		ixgbe_vlan_hw_strip_config(dev);
>  	}
> 
>  	if (mask & ETH_VLAN_FILTER_MASK) {
> -		if (dev->data->dev_conf.rxmode.hw_vlan_filter)
> +		if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
>  			ixgbe_vlan_hw_filter_enable(dev);
>  		else
>  			ixgbe_vlan_hw_filter_disable(dev);
>  	}
> 
>  	if (mask & ETH_VLAN_EXTEND_MASK) {
> -		if (dev->data->dev_conf.rxmode.hw_vlan_extend)
> +		if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_EXTEND)
>  			ixgbe_vlan_hw_extend_enable(dev);
>  		else
>  			ixgbe_vlan_hw_extend_disable(dev);
> @@ -2332,6 +2335,8 @@ ixgbe_dev_configure(struct rte_eth_dev *dev)
>  		IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
>  	struct ixgbe_adapter *adapter =
>  		(struct ixgbe_adapter *)dev->data->dev_private;
> +	struct rte_eth_dev_info dev_info;
> +	uint64_t rx_offloads;
>  	int ret;
> 
>  	PMD_INIT_FUNC_TRACE();
> @@ -2343,6 +2348,15 @@ ixgbe_dev_configure(struct rte_eth_dev *dev)
>  		return ret;
>  	}
> 
> +	ixgbe_dev_info_get(dev, &dev_info);
> +	rx_offloads = dev->data->dev_conf.rxmode.offloads;
> +	if ((rx_offloads & dev_info.rx_offload_capa) != rx_offloads) {
> +		PMD_DRV_LOG(ERR, "Some Rx offloads are not supported "
> +			    "requested 0x%" PRIx64 " supported 0x%" PRIx64,
> +			    rx_offloads, dev_info.rx_offload_capa);
> +		return -ENOTSUP;
> +	}
> +
>  	/* set flag to update link status after init */
>  	intr->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
> 
> @@ -3632,30 +3646,9 @@ ixgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
>  	else
>  		dev_info->max_vmdq_pools = ETH_64_POOLS;
>  	dev_info->vmdq_queue_num = dev_info->max_rx_queues;
> -	dev_info->rx_offload_capa =
> -		DEV_RX_OFFLOAD_VLAN_STRIP |
> -		DEV_RX_OFFLOAD_IPV4_CKSUM |
> -		DEV_RX_OFFLOAD_UDP_CKSUM  |
> -		DEV_RX_OFFLOAD_TCP_CKSUM  |
> -		DEV_RX_OFFLOAD_CRC_STRIP;
> -
> -	/*
> -	 * RSC is only supported by 82599 and x540 PF devices in a non-SR-IOV
> -	 * mode.
> -	 */
> -	if ((hw->mac.type == ixgbe_mac_82599EB ||
> -	     hw->mac.type == ixgbe_mac_X540) &&
> -	    !RTE_ETH_DEV_SRIOV(dev).active)
> -		dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_TCP_LRO;
> -
> -	if (hw->mac.type == ixgbe_mac_82599EB ||
> -	    hw->mac.type == ixgbe_mac_X540)
> -		dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_MACSEC_STRIP;
> -
> -	if (hw->mac.type == ixgbe_mac_X550 ||
> -	    hw->mac.type == ixgbe_mac_X550EM_x ||
> -	    hw->mac.type == ixgbe_mac_X550EM_a)
> -		dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM;
> +	dev_info->rx_queue_offload_capa = ixgbe_get_rx_queue_offloads(dev);
> +	dev_info->rx_offload_capa = (ixgbe_get_rx_port_offloads(dev) |
> +				     dev_info->rx_queue_offload_capa);
> 
>  	dev_info->tx_offload_capa =
>  		DEV_TX_OFFLOAD_VLAN_INSERT |
> @@ -3675,10 +3668,8 @@ ixgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
>  		dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM;
> 
>  #ifdef RTE_LIBRTE_SECURITY
> -	if (dev->security_ctx) {
> -		dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_SECURITY;
> +	if (dev->security_ctx)
>  		dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_SECURITY;
> -	}
>  #endif
> 
>  	dev_info->default_rxconf = (struct rte_eth_rxconf) {
> @@ -3689,6 +3680,7 @@ ixgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
>  		},
>  		.rx_free_thresh = IXGBE_DEFAULT_RX_FREE_THRESH,
>  		.rx_drop_en = 0,
> +		.offloads = 0,
>  	};
> 
>  	dev_info->default_txconf = (struct rte_eth_txconf) {
> @@ -3781,11 +3773,9 @@ ixgbevf_dev_info_get(struct rte_eth_dev *dev,
>  		dev_info->max_vmdq_pools = ETH_16_POOLS;
>  	else
>  		dev_info->max_vmdq_pools = ETH_64_POOLS;
> -	dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP |
> -				DEV_RX_OFFLOAD_IPV4_CKSUM |
> -				DEV_RX_OFFLOAD_UDP_CKSUM  |
> -				DEV_RX_OFFLOAD_TCP_CKSUM  |
> -				DEV_RX_OFFLOAD_CRC_STRIP;
> +	dev_info->rx_queue_offload_capa = ixgbe_get_rx_queue_offloads(dev);
> +	dev_info->rx_offload_capa = (ixgbe_get_rx_port_offloads(dev) |
> +				     dev_info->rx_queue_offload_capa);
>  	dev_info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT |
>  				DEV_TX_OFFLOAD_IPV4_CKSUM  |
>  				DEV_TX_OFFLOAD_UDP_CKSUM   |
> @@ -3801,6 +3791,7 @@ ixgbevf_dev_info_get(struct rte_eth_dev *dev,
>  		},
>  		.rx_free_thresh = IXGBE_DEFAULT_RX_FREE_THRESH,
>  		.rx_drop_en = 0,
> +		.offloads = 0,
>  	};
> 
>  	dev_info->default_txconf = (struct rte_eth_txconf) {
> @@ -4894,10 +4885,12 @@ ixgbe_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
> 
>  	/* switch to jumbo mode if needed */
>  	if (frame_size > ETHER_MAX_LEN) {
> -		dev->data->dev_conf.rxmode.jumbo_frame = 1;
> +		dev->data->dev_conf.rxmode.offloads |=
> +			DEV_RX_OFFLOAD_JUMBO_FRAME;
>  		hlreg0 |= IXGBE_HLREG0_JUMBOEN;
>  	} else {
> -		dev->data->dev_conf.rxmode.jumbo_frame = 0;
> +		dev->data->dev_conf.rxmode.offloads &=
> +			~DEV_RX_OFFLOAD_JUMBO_FRAME;
>  		hlreg0 &= ~IXGBE_HLREG0_JUMBOEN;
>  	}
>  	IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
> @@ -4946,23 +4939,34 @@ ixgbevf_dev_configure(struct rte_eth_dev *dev)
>  	struct rte_eth_conf *conf = &dev->data->dev_conf;
>  	struct ixgbe_adapter *adapter =
>  			(struct ixgbe_adapter *)dev->data->dev_private;
> +	struct rte_eth_dev_info dev_info;
> +	uint64_t rx_offloads;
> 
>  	PMD_INIT_LOG(DEBUG, "Configured Virtual Function port id: %d",
>  		     dev->data->port_id);
> 
> +	ixgbevf_dev_info_get(dev, &dev_info);
> +	rx_offloads = dev->data->dev_conf.rxmode.offloads;
> +	if ((rx_offloads & dev_info.rx_offload_capa) != rx_offloads) {
> +		PMD_DRV_LOG(ERR, "Some Rx offloads are not supported "
> +			    "requested 0x%" PRIx64 " supported 0x%" PRIx64,
> +			    rx_offloads, dev_info.rx_offload_capa);
> +		return -ENOTSUP;
> +	}
> +
>  	/*
>  	 * VF has no ability to enable/disable HW CRC
>  	 * Keep the persistent behavior the same as Host PF
>  	 */
>  #ifndef RTE_LIBRTE_IXGBE_PF_DISABLE_STRIP_CRC
> -	if (!conf->rxmode.hw_strip_crc) {
> +	if (!(conf->rxmode.offloads & DEV_RX_OFFLOAD_CRC_STRIP)) {
>  		PMD_INIT_LOG(NOTICE, "VF can't disable HW CRC Strip");
> -		conf->rxmode.hw_strip_crc = 1;
> +		conf->rxmode.offloads |= DEV_RX_OFFLOAD_CRC_STRIP;
>  	}
>  #else
> -	if (conf->rxmode.hw_strip_crc) {
> +	if (conf->rxmode.offloads & DEV_RX_OFFLOAD_CRC_STRIP) {
>  		PMD_INIT_LOG(NOTICE, "VF can't enable HW CRC Strip");
> -		conf->rxmode.hw_strip_crc = 0;
> +		conf->rxmode.offloads &= ~DEV_RX_OFFLOAD_CRC_STRIP;
>  	}
>  #endif
> 
> @@ -5850,6 +5854,7 @@ ixgbe_set_queue_rate_limit(struct rte_eth_dev *dev,
>  			   uint16_t queue_idx, uint16_t tx_rate)
>  {
>  	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
> +	struct rte_eth_rxmode *rxmode;
>  	uint32_t rf_dec, rf_int;
>  	uint32_t bcnrc_val;
>  	uint16_t link_speed = dev->data->dev_link.link_speed;
> @@ -5871,14 +5876,14 @@ ixgbe_set_queue_rate_limit(struct rte_eth_dev *dev,
>  		bcnrc_val = 0;
>  	}
> 
> +	rxmode = &dev->data->dev_conf.rxmode;
>  	/*
>  	 * Set global transmit compensation time to the MMW_SIZE in RTTBCNRM
>  	 * register. MMW_SIZE=0x014 if 9728-byte jumbo is supported, otherwise
>  	 * set as 0x4.
>  	 */
> -	if ((dev->data->dev_conf.rxmode.jumbo_frame == 1) &&
> -		(dev->data->dev_conf.rxmode.max_rx_pkt_len >=
> -				IXGBE_MAX_JUMBO_FRAME_SIZE))
> +	if ((rxmode->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) &&
> +	    (rxmode->max_rx_pkt_len >= IXGBE_MAX_JUMBO_FRAME_SIZE))
>  		IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRM,
>  			IXGBE_MMW_SIZE_JUMBO_FRAME);
>  	else
> @@ -6225,7 +6230,7 @@ ixgbevf_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
>  	/* refuse mtu that requires the support of scattered packets when this
>  	 * feature has not been enabled before.
>  	 */
> -	if (!rx_conf->enable_scatter &&
> +	if (!(rx_conf->offloads & DEV_RX_OFFLOAD_SCATTER) &&
>  	    (max_frame + 2 * IXGBE_VLAN_TAG_SIZE >
>  	     dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM))
>  		return -EINVAL;
> diff --git a/drivers/net/ixgbe/ixgbe_ipsec.c b/drivers/net/ixgbe/ixgbe_ipsec.c
> index 176ec0f..29e4728 100644
> --- a/drivers/net/ixgbe/ixgbe_ipsec.c
> +++ b/drivers/net/ixgbe/ixgbe_ipsec.c
> @@ -598,13 +598,15 @@ ixgbe_crypto_enable_ipsec(struct rte_eth_dev *dev)
>  {
>  	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
>  	uint32_t reg;
> +	uint64_t rx_offloads;
> 
> +	rx_offloads = dev->data->dev_conf.rxmode.offloads;
>  	/* sanity checks */
> -	if (dev->data->dev_conf.rxmode.enable_lro) {
> +	if (rx_offloads & DEV_RX_OFFLOAD_TCP_LRO) {
>  		PMD_DRV_LOG(ERR, "RSC and IPsec not supported");
>  		return -1;
>  	}
> -	if (!dev->data->dev_conf.rxmode.hw_strip_crc) {
> +	if (!(rx_offloads & DEV_RX_OFFLOAD_CRC_STRIP)) {
>  		PMD_DRV_LOG(ERR, "HW CRC strip needs to be enabled for IPsec");
>  		return -1;
>  	}
> @@ -624,7 +626,7 @@ ixgbe_crypto_enable_ipsec(struct rte_eth_dev *dev)
>  	reg |= IXGBE_HLREG0_TXCRCEN | IXGBE_HLREG0_RXCRCSTRP;
>  	IXGBE_WRITE_REG(hw, IXGBE_HLREG0, reg);
> 
> -	if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SECURITY) {
> +	if (rx_offloads & DEV_RX_OFFLOAD_SECURITY) {
>  		IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, 0);
>  		reg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
>  		if (reg != 0) {
> diff --git a/drivers/net/ixgbe/ixgbe_rxtx.c b/drivers/net/ixgbe/ixgbe_rxtx.c
> index 5c45eb4..a5d4822 100644
> --- a/drivers/net/ixgbe/ixgbe_rxtx.c
> +++ b/drivers/net/ixgbe/ixgbe_rxtx.c
> @@ -2769,6 +2769,98 @@ ixgbe_reset_rx_queue(struct ixgbe_adapter *adapter, struct ixgbe_rx_queue *rxq)
>  #endif
>  }
> 
> +static int
> +ixgbe_is_vf(struct rte_eth_dev *dev)
> +{
> +	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
> +
> +	switch (hw->mac.type) {
> +	case ixgbe_mac_82599_vf:
> +	case ixgbe_mac_X540_vf:
> +	case ixgbe_mac_X550_vf:
> +	case ixgbe_mac_X550EM_x_vf:
> +	case ixgbe_mac_X550EM_a_vf:
> +		return 1;
> +	default:
> +		return 0;
> +	}
> +}
> +
> +uint64_t
> +ixgbe_get_rx_queue_offloads(struct rte_eth_dev *dev)
> +{
> +	uint64_t offloads;
> +	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
> +
> +	offloads = DEV_RX_OFFLOAD_HEADER_SPLIT;

As I can see I ixgbe all header_split code is enabled only if RTE_HEADER_SPLIT_ENABLE is on.
It is off by default and I doubt anyone really using it these days.
So I think the best thing would be not to advertise  DEV_RX_OFFLOAD_HEADER_SPLIT for ixgbe at all,
and probably remove related code.
If you'd prefer to keep it, then at least we should set that capability only
at #ifdef RTE_HEADER_SPLIT_ENABLE.
Another thing - 	it should be per port, not per queue.
Thought I think better is just to remove it completely.

> +	if (hw->mac.type != ixgbe_mac_82598EB)
> +		offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
> +
> +	return offloads;
> +}
> +
> +uint64_t
> +ixgbe_get_rx_port_offloads(struct rte_eth_dev *dev)
> +{
> +	uint64_t offloads;
> +	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
> +
> +	offloads = DEV_RX_OFFLOAD_IPV4_CKSUM  |
> +		   DEV_RX_OFFLOAD_UDP_CKSUM   |
> +		   DEV_RX_OFFLOAD_TCP_CKSUM   |
> +		   DEV_RX_OFFLOAD_CRC_STRIP   |
> +		   DEV_RX_OFFLOAD_JUMBO_FRAME |
> +		   DEV_RX_OFFLOAD_SCATTER;
> +
> +	if (hw->mac.type == ixgbe_mac_82598EB)
> +		offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
> +
> +	if (ixgbe_is_vf(dev) == 0)
> +		offloads |= (DEV_RX_OFFLOAD_VLAN_FILTER |
> +			     DEV_RX_OFFLOAD_VLAN_EXTEND);
> +
> +	/*
> +	 * RSC is only supported by 82599 and x540 PF devices in a non-SR-IOV
> +	 * mode.
> +	 */
> +	if ((hw->mac.type == ixgbe_mac_82599EB ||
> +	     hw->mac.type == ixgbe_mac_X540) &&
> +	    !RTE_ETH_DEV_SRIOV(dev).active)
> +		offloads |= DEV_RX_OFFLOAD_TCP_LRO;
> +
> +	if (hw->mac.type == ixgbe_mac_82599EB ||
> +	    hw->mac.type == ixgbe_mac_X540)
> +		offloads |= DEV_RX_OFFLOAD_MACSEC_STRIP;
> +
> +	if (hw->mac.type == ixgbe_mac_X550 ||
> +	    hw->mac.type == ixgbe_mac_X550EM_x ||
> +	    hw->mac.type == ixgbe_mac_X550EM_a)
> +		offloads |= DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM;
> +
> +#ifdef RTE_LIBRTE_SECURITY

I don't think you need that ifdef here.

> +	if (dev->security_ctx)
> +		offloads |= DEV_RX_OFFLOAD_SECURITY;
> +#endif
> +
> +	return offloads;
> +}
> +
> +static int
> +ixgbe_check_rx_queue_offloads(struct rte_eth_dev *dev, uint64_t requested)
> +{
> +	uint64_t port_offloads = dev->data->dev_conf.rxmode.offloads;
> +	uint64_t queue_supported = ixgbe_get_rx_queue_offloads(dev);
> +	uint64_t port_supported = ixgbe_get_rx_port_offloads(dev);
> +
> +	if ((requested & (queue_supported | port_supported)) != requested)
> +		return 0;
> +
> +	if ((port_offloads ^ requested) & port_supported)

Could you explain a bit more what are you cheking here?
As I can see:
 (port_offloads ^ requested) - that's a diff between already set and newly
requested offloads.
Then you check if that diff consists of supported by port offloads,
and if yes you return an error?  
Konstantin

> +		return 0;
> +
> +	return 1;
> +}
> +

  reply	other threads:[~2018-03-14 21:47 UTC|newest]

Thread overview: 28+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2018-02-27 16:01 [PATCH 0/4] ixgbe: convert to new offloads API Wei Dai
2018-02-27 16:01 ` [PATCH 1/4] net/ixgbe: support VLAN strip per queue offloading in PF Wei Dai
2018-02-27 16:01 ` [PATCH 2/4] net/ixgbe: support VLAN strip per queue offloading in VF Wei Dai
2018-02-27 16:01 ` [PATCH 3/4] net/ixgbe: convert to new Rx offloads API Wei Dai
2018-02-27 16:01 ` [PATCH 4/4] net/ixgbe: convert to new Tx " Wei Dai
2018-03-14 23:18   ` Ananyev, Konstantin
2018-03-19  6:24     ` Dai, Wei
2018-03-07 13:06 ` [PATCH v2 0/4] ixgbe: convert to new " Wei Dai
2018-03-07 13:06   ` [PATCH v2 1/4] net/ixgbe: support VLAN strip per queue offloading in PF Wei Dai
2018-03-07 13:06   ` [PATCH v2 2/4] net/ixgbe: support VLAN strip per queue offloading in VF Wei Dai
2018-03-07 13:06   ` [PATCH v2 3/4] net/ixgbe: convert to new Rx offloads API Wei Dai
2018-03-14 21:47     ` Ananyev, Konstantin [this message]
2018-03-19  3:15       ` Dai, Wei
2018-03-20 11:53         ` Ananyev, Konstantin
2018-03-21 14:03           ` Dai, Wei
2018-03-07 13:06   ` [PATCH v2 4/4] net/ixgbe: convert to new Tx " Wei Dai
2018-03-19  7:04   ` [PATCH v3 0/4] net/ixgbe: convert to new " Wei Dai
2018-03-19  7:04     ` [PATCH v3 1/4] net/ixgbe: support VLAN strip per queue offloading in PF Wei Dai
2018-03-19  7:04     ` [PATCH v3 2/4] net/ixgbe: support VLAN strip per queue offloading in VF Wei Dai
2018-03-19  7:04     ` [PATCH v3 3/4] net/ixgbe: convert to new Rx offloads API Wei Dai
2018-03-19  7:04     ` [PATCH v3 4/4] net/ixgbe: convert to new Tx " Wei Dai
2018-03-22  3:40     ` [PATCH v4 0/4] net/ixgbe: convert to new " Wei Dai
2018-03-22  3:41       ` [PATCH v4 1/4] net/ixgbe: support VLAN strip per queue offloading in PF Wei Dai
2018-03-22  3:41       ` [PATCH v4 2/4] net/ixgbe: support VLAN strip per queue offloading in VF Wei Dai
2018-03-22  3:41       ` [PATCH v4 3/4] net/ixgbe: convert to new Rx offloads API Wei Dai
2018-03-22  3:41       ` [PATCH v4 4/4] net/ixgbe: convert to new Tx " Wei Dai
2018-04-02 13:27       ` [PATCH v4 0/4] net/ixgbe: convert to new " Zhang, Qi Z
2018-04-03 15:14         ` Zhang, Helin

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=2601191342CEEE43887BDE71AB9772589E28FDBA@irsmsx105.ger.corp.intel.com \
    --to=konstantin.ananyev@intel.com \
    --cc=dev@dpdk.org \
    --cc=wei.dai@intel.com \
    --cc=wenzhuo.lu@intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.