All of lore.kernel.org
 help / color / mirror / Atom feed
* [Intel-wired-lan] [PATCH net-next 0/2] Add I210 AVB support
@ 2015-10-28  5:33 Gangfeng Huang
  2015-10-28  5:33 ` [Intel-wired-lan] [PATCH net-next 1/2] igb: add function to set I210 transmit mode Gangfeng Huang
  2015-10-28  5:33 ` [Intel-wired-lan] [PATCH net-next 2/2] igb: add a character device to support AVB Gangfeng Huang
  0 siblings, 2 replies; 3+ messages in thread
From: Gangfeng Huang @ 2015-10-28  5:33 UTC (permalink / raw)
  To: intel-wired-lan

The Intel Ethernet Server Adapter I210 supports IEEE 802.1Qav Audio-Video
Bridging (AVB) for users requiring tightly controlled media stream
synchronization, buffering, and reservation. The 802.1Qav is part of the
AVB specification that provides a way to guarantee bounded latency and
latency variation for time sensitive traffic.

Reference:
https://github.com/AVnu/Open-AVB/tree/master/kmod/igb

Gangfeng Huang (2):
  igb: add function to set I210 transmit mode
  igb: add a character device to support AVB

 drivers/net/ethernet/intel/igb/Makefile        |    2 +-
 drivers/net/ethernet/intel/igb/e1000_defines.h |   22 +
 drivers/net/ethernet/intel/igb/e1000_regs.h    |    7 +
 drivers/net/ethernet/intel/igb/igb.h           |   19 +-
 drivers/net/ethernet/intel/igb/igb_cdev.c      |  511 ++++++++++++++++++++++++
 drivers/net/ethernet/intel/igb/igb_cdev.h      |   45 +++
 drivers/net/ethernet/intel/igb/igb_main.c      |  286 ++++++++++++-
 7 files changed, 877 insertions(+), 15 deletions(-)
 create mode 100644 drivers/net/ethernet/intel/igb/igb_cdev.c
 create mode 100644 drivers/net/ethernet/intel/igb/igb_cdev.h

-- 
1.7.9.5


^ permalink raw reply	[flat|nested] 3+ messages in thread

* [Intel-wired-lan] [PATCH net-next 1/2] igb: add function to set I210 transmit mode
  2015-10-28  5:33 [Intel-wired-lan] [PATCH net-next 0/2] Add I210 AVB support Gangfeng Huang
@ 2015-10-28  5:33 ` Gangfeng Huang
  2015-10-28  5:33 ` [Intel-wired-lan] [PATCH net-next 2/2] igb: add a character device to support AVB Gangfeng Huang
  1 sibling, 0 replies; 3+ messages in thread
From: Gangfeng Huang @ 2015-10-28  5:33 UTC (permalink / raw)
  To: intel-wired-lan

I210 supports two transmit modes, legacy and Qav. The transmit mode is
configured in TQAVCTRL.QavMode register. Before this patch igb driver
only support legacy mode. This patch makes it possible to configure the
transmit mode.

Example:
Get the transmit mode:
$ echo /sys/class/net/eth0/qav_mode
0
Set transmit mode to qav mode
$ echo 1 > /sys/class/net/eth0/qav_mode

Tested:
Setting /sys/class/net/eth0/qav_mode to Qav mode,
 1) Switch back and forth between Qav mode and legacy mode
 2) Send/recv packets in both mode.

Signed-off-by: Gangfeng Huang <gangfeng.huang@ni.com>
---
 drivers/net/ethernet/intel/igb/e1000_defines.h |   21 +++
 drivers/net/ethernet/intel/igb/e1000_regs.h    |    7 +
 drivers/net/ethernet/intel/igb/igb.h           |    5 +
 drivers/net/ethernet/intel/igb/igb_main.c      |  182 +++++++++++++++++++++++-
 4 files changed, 213 insertions(+), 2 deletions(-)

diff --git a/drivers/net/ethernet/intel/igb/e1000_defines.h b/drivers/net/ethernet/intel/igb/e1000_defines.h
index f8684aa..f09d016 100644
--- a/drivers/net/ethernet/intel/igb/e1000_defines.h
+++ b/drivers/net/ethernet/intel/igb/e1000_defines.h
@@ -359,6 +359,7 @@
 #define MAX_JUMBO_FRAME_SIZE    0x3F00
 
 /* PBA constants */
+#define E1000_PBA_32K 0x0020
 #define E1000_PBA_34K 0x0022
 #define E1000_PBA_64K 0x0040    /* 64KB */
 
@@ -1014,4 +1015,24 @@
 #define E1000_RTTBCNRC_RF_INT_MASK	\
 	(E1000_RTTBCNRC_RF_DEC_MASK << E1000_RTTBCNRC_RF_INT_SHIFT)
 
+/* Queue mode, 0=strict, 1=SR mode */
+#define E1000_TQAVCC_QUEUEMODE         0x80000000
+/* Transmit mode, 0=legacy, 1=QAV */
+#define E1000_TQAVCTRL_TXMODE          0x00000001
+/* Report DMA time of tx packets */
+#define E1000_TQAVCTRL_1588_STAT_EN    0x00000004
+#define E1000_TQAVCTRL_DATA_FETCH_ARB  0x00000010 /* Data fetch arbitration */
+#define E1000_TQAVCTRL_DATA_TRAN_ARB   0x00000100 /* Data tx arbitration */
+#define E1000_TQAVCTRL_DATA_TRAN_TIM   0x00000200 /* Data launch time valid */
+/* Stall SP to guarantee SR */
+#define E1000_TQAVCTRL_SP_WAIT_SR      0x00000400
+#define E1000_TQAVCTRL_FETCH_TM_SHIFT  (16)
+
+#define E1000_TXPBSIZE_TX0PB_SHIFT    0
+#define E1000_TXPBSIZE_TX1PB_SHIFT    6
+#define E1000_TXPBSIZE_TX2PB_SHIFT    12
+#define E1000_TXPBSIZE_TX3PB_SHIFT    18
+
+#define E1000_DTXMXPKTSZ_DEFAULT 0x00000098
+
 #endif
diff --git a/drivers/net/ethernet/intel/igb/e1000_regs.h b/drivers/net/ethernet/intel/igb/e1000_regs.h
index 6f0490d..2c73e7f 100644
--- a/drivers/net/ethernet/intel/igb/e1000_regs.h
+++ b/drivers/net/ethernet/intel/igb/e1000_regs.h
@@ -135,6 +135,12 @@
 #define E1000_FCRTC	0x02170 /* Flow Control Rx high watermark */
 #define E1000_PCIEMISC	0x05BB8 /* PCIE misc config register */
 
+/* High credit registers where _n can be 0 or 1. */
+#define E1000_TQAVHC(_n)	(0x300C + 0x40 * (_n))
+/* QAV Tx mode control registers where _n can be 0 or 1. */
+#define E1000_TQAVCC(_n)	(0x3004 + 0x40 * (_n))
+#define E1000_TQAVCTRL	0x3570 /* Tx Qav Control registers */
+
 /* TX Rate Limit Registers */
 #define E1000_RTTDQSEL	0x3604 /* Tx Desc Plane Queue Select - WO */
 #define E1000_RTTBCNRM	0x3690 /* Tx BCN Rate-scheduler MMW */
@@ -201,6 +207,7 @@
 #define E1000_TDFT     0x03418  /* TX Data FIFO Tail - RW */
 #define E1000_TDFHS    0x03420  /* TX Data FIFO Head Saved - RW */
 #define E1000_TDFPC    0x03430  /* TX Data FIFO Packet Count - RW */
+#define E1000_DTXMXPKT 0x0355C  /* DMA TX Maximum Packet Size */
 #define E1000_DTXCTL   0x03590  /* DMA TX Control - RW */
 #define E1000_CRCERRS  0x04000  /* CRC Error Count - R/clr */
 #define E1000_ALGNERRC 0x04004  /* Alignment Error Count - R/clr */
diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h
index c2bd4f9..b84a266 100644
--- a/drivers/net/ethernet/intel/igb/igb.h
+++ b/drivers/net/ethernet/intel/igb/igb.h
@@ -132,6 +132,9 @@ struct vf_data_storage {
 /* this is the size past which hardware will drop packets when setting LPE=0 */
 #define MAXIMUM_ETHERNET_VLAN_SIZE 1522
 
+/* In qav mode, the maximum frame size is 1536 */
+#define IGB_MAX_QAV_FRAME_SIZE 1536
+
 /* Supported Rx Buffer Sizes */
 #define IGB_RXBUFFER_256	256
 #define IGB_RXBUFFER_2048	2048
@@ -463,6 +466,8 @@ struct igb_adapter {
 	int copper_tries;
 	struct e1000_info ei;
 	u16 eee_advert;
+
+	bool qav_mode;
 };
 
 #define IGB_FLAG_HAS_MSI		(1 << 0)
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index 41e2740..1d00f41 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -176,6 +176,17 @@ static int igb_ndo_get_vf_config(struct net_device *netdev, int vf,
 				 struct ifla_vf_info *ivi);
 static void igb_check_vf_rate_limit(struct igb_adapter *);
 
+/* Switch qav mode and legacy mode by sysfs*/
+static void igb_setup_qav_mode(struct igb_adapter *adapter);
+static void igb_setup_normal_mode(struct igb_adapter *adapter);
+static ssize_t igb_get_qav_mode(struct device *dev,
+				struct device_attribute *attr, char *buf);
+static ssize_t igb_set_qav_mode(struct device *dev,
+				struct device_attribute *attr,
+				const char *buf, size_t count);
+static DEVICE_ATTR(qav_mode, S_IRUGO | S_IWUSR,
+		   igb_get_qav_mode, igb_set_qav_mode);
+
 #ifdef CONFIG_PCI_IOV
 static int igb_vf_configure(struct igb_adapter *adapter, int vf);
 static int igb_pci_enable_sriov(struct pci_dev *dev, int num_vfs);
@@ -1600,6 +1611,11 @@ static void igb_configure(struct igb_adapter *adapter)
 
 	igb_restore_vlan(adapter);
 
+	if (adapter->qav_mode)
+		igb_setup_qav_mode(adapter);
+	else
+		igb_setup_normal_mode(adapter);
+
 	igb_setup_tctl(adapter);
 	igb_setup_mrqc(adapter);
 	igb_setup_rctl(adapter);
@@ -1873,8 +1889,10 @@ void igb_reset(struct igb_adapter *adapter)
 		pba = rd32(E1000_RXPBS);
 		pba &= E1000_RXPBS_SIZE_MASK_82576;
 		break;
-	case e1000_82575:
 	case e1000_i210:
+		pba = (adapter->qav_mode) ? E1000_PBA_32K : E1000_PBA_34K;
+		break;
+	case e1000_82575:
 	case e1000_i211:
 	default:
 		pba = E1000_PBA_34K;
@@ -2286,6 +2304,7 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 	hw = &adapter->hw;
 	hw->back = adapter;
 	adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
+	adapter->qav_mode = false;
 
 	err = -EIO;
 	hw->hw_addr = pci_iomap(pdev, 0, 0);
@@ -2531,6 +2550,15 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 	if (err)
 		goto err_register;
 
+	if (hw->mac.type == e1000_i210) {
+		err = sysfs_create_file(&netdev->dev.kobj,
+					&dev_attr_qav_mode.attr);
+		if (err) {
+			netdev_err(netdev, "error creating sysfs file\n");
+			goto err_register;
+		}
+	}
+
 	/* carrier off reporting is important to ethtool even BEFORE open */
 	netif_carrier_off(netdev);
 
@@ -2805,6 +2833,9 @@ static void igb_remove(struct pci_dev *pdev)
 	 */
 	igb_release_hw_control(adapter);
 
+	if (hw->mac.type == e1000_i210)
+		sysfs_remove_file(&netdev->dev.kobj, &dev_attr_qav_mode.attr);
+
 	unregister_netdev(netdev);
 
 	igb_clear_interrupt_scheme(adapter);
@@ -2886,7 +2917,12 @@ static void igb_init_queue_configuration(struct igb_adapter *adapter)
 		break;
 	}
 
-	adapter->rss_queues = min_t(u32, max_rss_queues, num_online_cpus());
+	/* For QAV mode, always enable all queues */
+	if (adapter->qav_mode)
+		adapter->rss_queues = max_rss_queues;
+	else
+		adapter->rss_queues = min_t(u32, max_rss_queues,
+					    num_online_cpus());
 
 	/* Determine if we need to pair queues. */
 	switch (hw->mac.type) {
@@ -5144,6 +5180,10 @@ static int igb_change_mtu(struct net_device *netdev, int new_mtu)
 		return -EINVAL;
 	}
 
+	/* For i210 Qav mode, the max frame is 1536 */
+	if (adapter->qav_mode && max_frame > IGB_MAX_QAV_FRAME_SIZE)
+		return -EINVAL;
+
 #define MAX_STD_JUMBO_FRAME_SIZE 9238
 	if (max_frame > MAX_STD_JUMBO_FRAME_SIZE) {
 		dev_err(&pdev->dev, "MTU > 9216 not supported.\n");
@@ -8075,4 +8115,142 @@ int igb_reinit_queues(struct igb_adapter *adapter)
 
 	return err;
 }
+
+static void igb_setup_qav_mode(struct igb_adapter *adapter)
+{
+	struct e1000_hw *hw = &adapter->hw;
+	u32	tqavctrl;
+	u32	tqavcc0, tqavcc1;
+	u32	tqavhc0, tqavhc1;
+	u32	txpbsize;
+
+	/* reconfigure the tx packet buffer allocation */
+	txpbsize = (8);
+	txpbsize |= (8) << E1000_TXPBSIZE_TX1PB_SHIFT;
+	txpbsize |= (4) << E1000_TXPBSIZE_TX2PB_SHIFT;
+	txpbsize |= (4) << E1000_TXPBSIZE_TX3PB_SHIFT;
+
+	wr32(E1000_TXPBS, txpbsize);
+
+	/* In Qav mode, the maximum sized frames of 1536 bytes */
+	wr32(E1000_DTXMXPKT, IGB_MAX_QAV_FRAME_SIZE / 64);
+
+	/* The I210 implements 4 queues, up to two queues are dedicated
+	 * for stream reservation or priority, strict priority queuing
+	 * while SR queue are subjected to launch time policy
+	 */
+
+	tqavcc0 = E1000_TQAVCC_QUEUEMODE; /* no idle slope */
+	tqavcc1 = E1000_TQAVCC_QUEUEMODE; /* no idle slope */
+	tqavhc0 = 0xFFFFFFFF; /* unlimited credits */
+	tqavhc1 = 0xFFFFFFFF; /* unlimited credits */
+
+	wr32(E1000_TQAVCC(0), tqavcc0);
+	wr32(E1000_TQAVCC(1), tqavcc1);
+	wr32(E1000_TQAVHC(0), tqavhc0);
+	wr32(E1000_TQAVHC(1), tqavhc1);
+
+	tqavctrl = E1000_TQAVCTRL_TXMODE |
+			E1000_TQAVCTRL_DATA_FETCH_ARB |
+			E1000_TQAVCTRL_DATA_TRAN_TIM |
+			E1000_TQAVCTRL_SP_WAIT_SR;
+
+	/* Default to a 10 usec prefetch delta from launch time - time for
+	 * a 1500 byte rx frame to be received over the PCIe Gen1 x1 link.
+	 */
+	tqavctrl |= (10 << 5) << E1000_TQAVCTRL_FETCH_TM_SHIFT;
+
+	wr32(E1000_TQAVCTRL, tqavctrl);
+}
+
+static void igb_setup_normal_mode(struct igb_adapter *adapter)
+{
+	struct e1000_hw *hw = &adapter->hw;
+
+	wr32(E1000_TXPBS, I210_TXPBSIZE_DEFAULT);
+	wr32(E1000_DTXMXPKT, E1000_DTXMXPKTSZ_DEFAULT);
+	wr32(E1000_TQAVCTRL, 0);
+}
+
+static int igb_change_mode(struct igb_adapter *adapter, int request_mode)
+{
+	struct net_device *netdev;
+	int err = 0;
+	int current_mode;
+
+	if (NULL == adapter) {
+		dev_err(&adapter->pdev->dev, "map to unbound device!\n");
+		return -ENOENT;
+	}
+
+	current_mode = adapter->qav_mode;
+
+	if (request_mode == current_mode)
+		return 0;
+
+	netdev = adapter->netdev;
+
+	rtnl_lock();
+
+	if (netif_running(netdev))
+		igb_close(netdev);
+	else
+		igb_reset(adapter);
+
+	igb_clear_interrupt_scheme(adapter);
+
+	adapter->qav_mode = request_mode;
+
+	igb_init_queue_configuration(adapter);
+
+	if (igb_init_interrupt_scheme(adapter, true)) {
+		dev_err(&adapter->pdev->dev,
+			"Unable to allocate memory for queues\n");
+		err = -ENOMEM;
+		goto err_out;
+	}
+
+	if (netif_running(netdev))
+		igb_open(netdev);
+
+	rtnl_unlock();
+
+	return err;
+err_out:
+	rtnl_unlock();
+	return err;
+}
+
+static ssize_t igb_get_qav_mode(struct device *dev,
+				struct device_attribute *attr, char *buf)
+{
+	struct net_device *netdev = to_net_dev(dev);
+	struct igb_adapter *adapter = netdev_priv(netdev);
+
+	return scnprintf(buf, PAGE_SIZE, "%d\n", adapter->qav_mode);
+}
+
+static ssize_t igb_set_qav_mode(struct device *dev,
+				struct device_attribute *attr,
+				const char *buf, size_t len)
+{
+	struct net_device *netdev = to_net_dev(dev);
+	struct igb_adapter *adapter = netdev_priv(netdev);
+	int request_mode, err;
+
+	if (!capable(CAP_NET_ADMIN))
+		return -EPERM;
+
+	if (0 > kstrtoint(buf, 0, &request_mode))
+		return -EINVAL;
+
+	if (request_mode != 0 && request_mode != 1)
+		return -EINVAL;
+
+	err = igb_change_mode(adapter, request_mode);
+	if (err)
+		return err;
+
+	return len;
+}
 /* igb_main.c */
-- 
1.7.9.5


^ permalink raw reply related	[flat|nested] 3+ messages in thread

* [Intel-wired-lan] [PATCH net-next 2/2] igb: add a character device to support AVB
  2015-10-28  5:33 [Intel-wired-lan] [PATCH net-next 0/2] Add I210 AVB support Gangfeng Huang
  2015-10-28  5:33 ` [Intel-wired-lan] [PATCH net-next 1/2] igb: add function to set I210 transmit mode Gangfeng Huang
@ 2015-10-28  5:33 ` Gangfeng Huang
  1 sibling, 0 replies; 3+ messages in thread
From: Gangfeng Huang @ 2015-10-28  5:33 UTC (permalink / raw)
  To: intel-wired-lan

This patch create a character device for Intel I210 Ethernet controller,
it can be used for developing Audio/Video Bridging applications,Industrial
Ethernet applications which require precise timing control over frame
transmission, or test harneses for measuring system latencies and samping
events.

As the AVB queues (0,1) are mapped to a  user-space application, typical
LAN traffic must be steered away from these queues. For transmit, this
driver implements one method registering an ndo_select_queue handler to
map traffic to queue[3]. and set the register MRQC to receive all BE
traffic to Rx queue[3].

This patch is reference to the Intel Open-AVB project:
http://github.com/AVnu/Open-AVB/tree/master/kmod/igb

Signed-off-by: Gangfeng Huang <gangfeng.huang@ni.com>
---
 drivers/net/ethernet/intel/igb/Makefile        |    2 +-
 drivers/net/ethernet/intel/igb/e1000_defines.h |    1 +
 drivers/net/ethernet/intel/igb/igb.h           |   14 +-
 drivers/net/ethernet/intel/igb/igb_cdev.c      |  511 ++++++++++++++++++++++++
 drivers/net/ethernet/intel/igb/igb_cdev.h      |   45 +++
 drivers/net/ethernet/intel/igb/igb_main.c      |  104 ++++-
 6 files changed, 664 insertions(+), 13 deletions(-)
 create mode 100644 drivers/net/ethernet/intel/igb/igb_cdev.c
 create mode 100644 drivers/net/ethernet/intel/igb/igb_cdev.h

diff --git a/drivers/net/ethernet/intel/igb/Makefile b/drivers/net/ethernet/intel/igb/Makefile
index 5bcb2de..3fee429 100644
--- a/drivers/net/ethernet/intel/igb/Makefile
+++ b/drivers/net/ethernet/intel/igb/Makefile
@@ -33,4 +33,4 @@ obj-$(CONFIG_IGB) += igb.o
 
 igb-objs := igb_main.o igb_ethtool.o e1000_82575.o \
 	    e1000_mac.o e1000_nvm.o e1000_phy.o e1000_mbx.o \
-	    e1000_i210.o igb_ptp.o igb_hwmon.o
+	    e1000_i210.o igb_ptp.o igb_hwmon.o igb_cdev.o
diff --git a/drivers/net/ethernet/intel/igb/e1000_defines.h b/drivers/net/ethernet/intel/igb/e1000_defines.h
index f09d016..6bf0e56 100644
--- a/drivers/net/ethernet/intel/igb/e1000_defines.h
+++ b/drivers/net/ethernet/intel/igb/e1000_defines.h
@@ -112,6 +112,7 @@
 #define E1000_MRQC_RSS_FIELD_IPV6              0x00100000
 #define E1000_MRQC_RSS_FIELD_IPV6_TCP          0x00200000
 
+#define E1000_MRQC_DEF_QUEUE_OFFSET            0x3
 
 /* Management Control */
 #define E1000_MANC_SMBUS_EN      0x00000001 /* SMBus Enabled - RO */
diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h
index b84a266..f661729 100644
--- a/drivers/net/ethernet/intel/igb/igb.h
+++ b/drivers/net/ethernet/intel/igb/igb.h
@@ -38,6 +38,8 @@
 #include <linux/i2c-algo-bit.h>
 #include <linux/pci.h>
 #include <linux/mdio.h>
+#include <linux/types.h>
+#include <linux/cdev.h>
 
 struct igb_adapter;
 
@@ -50,12 +52,12 @@ struct igb_adapter;
 #define IGB_70K_ITR		56
 
 /* TX/RX descriptor defines */
-#define IGB_DEFAULT_TXD		256
+#define IGB_DEFAULT_TXD		1024
 #define IGB_DEFAULT_TX_WORK	128
 #define IGB_MIN_TXD		80
 #define IGB_MAX_TXD		4096
 
-#define IGB_DEFAULT_RXD		256
+#define IGB_DEFAULT_RXD		1024
 #define IGB_MIN_RXD		80
 #define IGB_MAX_RXD		4096
 
@@ -468,6 +470,14 @@ struct igb_adapter {
 	u16 eee_advert;
 
 	bool qav_mode;
+	struct cdev char_dev;
+	struct list_head user_page_list;
+	struct mutex user_page_mutex; /* protect user_page_list */
+	unsigned long tx_uring_init;
+	unsigned long rx_uring_init;
+	struct mutex user_ring_mutex; /* protect tx/rx_uring_init */
+	bool cdev_in_use;
+	struct mutex cdev_mutex; /* protect cdev_in_use */
 };
 
 #define IGB_FLAG_HAS_MSI		(1 << 0)
diff --git a/drivers/net/ethernet/intel/igb/igb_cdev.c b/drivers/net/ethernet/intel/igb/igb_cdev.c
new file mode 100644
index 0000000..df237c6
--- /dev/null
+++ b/drivers/net/ethernet/intel/igb/igb_cdev.c
@@ -0,0 +1,511 @@
+#include "igb.h"
+#include "igb_cdev.h"
+
+#include <linux/pagemap.h>
+#include <linux/bitops.h>
+#include <linux/types.h>
+#include <linux/cdev.h>
+
+/* TSN char dev */
+static DECLARE_BITMAP(cdev_minors, IGB_MAX_DEV_NUM);
+
+static int igb_major;
+static struct class *igb_class;
+static const char * const igb_class_name = "igb_tsn";
+static const char * const igb_dev_name = "igb_tsn_%s";
+
+/* user-mode API forward definitions */
+static int igb_open_file(struct inode *inode, struct file *file);
+static int igb_close_file(struct inode *inode, struct file *file);
+static int igb_mmap(struct file *file, struct vm_area_struct *vma);
+static long igb_ioctl_file(struct file *file, unsigned int cmd,
+			   unsigned long arg);
+
+/* user-mode IO API registrations */
+static const struct file_operations igb_fops = {
+		.owner   = THIS_MODULE,
+		.llseek  = no_llseek,
+		.open	= igb_open_file,
+		.release = igb_close_file,
+		.mmap	= igb_mmap,
+		.unlocked_ioctl = igb_ioctl_file,
+};
+
+int igb_tsn_setup_all_tx_resources(struct igb_adapter *adapter)
+{
+	struct pci_dev *pdev = adapter->pdev;
+	int i, err = 0;
+
+	for (i = 0; i < IGB_USER_TX_QUEUES; i++) {
+		err = igb_setup_tx_resources(adapter->tx_ring[i]);
+		if (err) {
+			dev_err(&pdev->dev,
+				"Allocation for Tx Queue %u failed\n", i);
+			for (i--; i >= 0; i--)
+				igb_free_tx_resources(adapter->tx_ring[i]);
+			break;
+		}
+	}
+
+	return err;
+}
+
+int igb_tsn_setup_all_rx_resources(struct igb_adapter *adapter)
+{
+	struct pci_dev *pdev = adapter->pdev;
+	int i, err = 0;
+
+	for (i = 0; i < IGB_USER_RX_QUEUES; i++) {
+		err = igb_setup_rx_resources(adapter->rx_ring[i]);
+		if (err) {
+			dev_err(&pdev->dev,
+				"Allocation for Rx Queue %u failed\n", i);
+			for (i--; i >= 0; i--)
+				igb_free_rx_resources(adapter->rx_ring[i]);
+			break;
+		}
+	}
+
+	return err;
+}
+
+void igb_tsn_free_all_tx_resources(struct igb_adapter *adapter)
+{
+	int i;
+
+	for (i = 0; i < IGB_USER_TX_QUEUES; i++)
+		igb_free_tx_resources(adapter->tx_ring[i]);
+}
+
+void igb_tsn_free_all_rx_resources(struct igb_adapter *adapter)
+{
+	int i;
+
+	for (i = 0; i < IGB_USER_RX_QUEUES; i++)
+		igb_free_rx_resources(adapter->rx_ring[i]);
+}
+
+static int igb_bind(struct file *file, void __user *argp)
+{
+	struct igb_adapter *adapter;
+	u32 mmap_size;
+
+	adapter = (struct igb_adapter *)file->private_data;
+
+	if (NULL == adapter)
+		return -ENOENT;
+
+	mmap_size = pci_resource_len(adapter->pdev, 0);
+
+	if (copy_to_user(argp, &mmap_size, sizeof(mmap_size)))
+		return -EFAULT;
+
+	return 0;
+}
+
+static long igb_mapring(struct file *file, void __user *arg)
+{
+	struct igb_adapter *adapter;
+	struct igb_buf_cmd req;
+	int queue_size;
+	unsigned long *uring_init;
+	struct igb_ring *ring;
+	int err;
+
+	if (copy_from_user(&req, arg, sizeof(req)))
+		return -EFAULT;
+
+	if (req.flags != 0 && req.flags != 1)
+		return -EINVAL;
+
+	adapter = file->private_data;
+	if (NULL == adapter) {
+		dev_err(&adapter->pdev->dev, "map to unbound device!\n");
+		return -ENOENT;
+	}
+
+	/* Req flags, Tx: 0, Rx: 1 */
+	if (req.flags == 0) {
+		queue_size = IGB_USER_TX_QUEUES;
+		uring_init =  &adapter->tx_uring_init;
+		ring = adapter->tx_ring[req.queue];
+	} else {
+		queue_size = IGB_USER_RX_QUEUES;
+		uring_init =  &adapter->rx_uring_init;
+		ring = adapter->rx_ring[req.queue];
+	}
+
+	mutex_lock(&adapter->user_ring_mutex);
+	if (test_bit(req.queue, uring_init)) {
+		dev_err(&adapter->pdev->dev, "the queue is in using\n");
+		err = -EBUSY;
+		goto failed;
+	}
+
+	if (req.queue >= queue_size) {
+		err = -EINVAL;
+		goto failed;
+	}
+
+	set_pages_uc(virt_to_page(ring->desc), ring->size >> PAGE_SHIFT);
+	set_bit(req.queue, uring_init);
+	mutex_unlock(&adapter->user_ring_mutex);
+
+	req.physaddr = ring->dma;
+	req.mmap_size = ring->size;
+
+	if (copy_to_user(arg, &req, sizeof(req))) {
+		dev_err(&adapter->pdev->dev, "copyout to user failed\n");
+		return -EFAULT;
+	}
+
+	return 0;
+failed:
+	mutex_unlock(&adapter->user_ring_mutex);
+	return err;
+}
+
+static long igb_mapbuf(struct file *file, void __user *arg)
+{
+	struct igb_adapter *adapter;
+	struct igb_buf_cmd req;
+	struct page *page;
+	dma_addr_t page_dma;
+	struct igb_user_page *userpage;
+	int err = 0;
+	int direction;
+
+	if (copy_from_user(&req, arg, sizeof(req)))
+		return -EFAULT;
+
+	if (req.flags != 0 && req.flags != 1)
+		return -EINVAL;
+
+	adapter = file->private_data;
+	if (NULL == adapter) {
+		dev_err(&adapter->pdev->dev, "map to unbound device!\n");
+		return -ENOENT;
+	}
+
+	userpage = kzalloc(sizeof(*userpage), GFP_KERNEL);
+	if (unlikely(!userpage))
+		return -ENOMEM;
+
+	page = alloc_page(GFP_KERNEL | __GFP_COLD);
+	if (unlikely(!page)) {
+		err = -ENOMEM;
+		goto failed;
+	}
+
+	direction = req.flags ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
+	page_dma = dma_map_page(&adapter->pdev->dev, page,
+				0, PAGE_SIZE, direction);
+
+	if (dma_mapping_error(&adapter->pdev->dev, page_dma)) {
+		put_page(page);
+		err = -ENOMEM;
+		goto failed;
+	}
+
+	set_pages_uc(page, 1);
+	userpage->page = page;
+	userpage->page_dma = page_dma;
+	userpage->flags = req.flags;
+
+	mutex_lock(&adapter->user_page_mutex);
+	list_add_tail(&userpage->page_node, &adapter->user_page_list);
+	mutex_unlock(&adapter->user_page_mutex);
+
+	req.physaddr = page_dma;
+	req.mmap_size = PAGE_SIZE;
+
+	if (copy_to_user(arg, &req, sizeof(req))) {
+		dev_err(&adapter->pdev->dev, "copyout to user failed\n");
+		return -EFAULT;
+	}
+	return 0;
+
+failed:
+	kfree(userpage);
+	return err;
+}
+
+static long igb_unmapring(struct file *file, void __user *arg)
+{
+	struct igb_adapter *adapter;
+	struct igb_buf_cmd req;
+	struct igb_ring *ring;
+	int queue_size;
+	unsigned long *uring_init;
+	int err;
+
+	if (copy_from_user(&req, arg, sizeof(req)))
+		return -EFAULT;
+
+	if (req.flags != 0 && req.flags != 1)
+		return -EINVAL;
+
+	adapter = file->private_data;
+	if (NULL == adapter) {
+		dev_err(&adapter->pdev->dev, "map to unbound device!\n");
+		return -ENOENT;
+	}
+
+	if (req.flags == 0) {
+		queue_size = IGB_USER_TX_QUEUES;
+		uring_init =  &adapter->tx_uring_init;
+		ring = adapter->tx_ring[req.queue];
+	} else {
+		queue_size = IGB_USER_RX_QUEUES;
+		uring_init =  &adapter->rx_uring_init;
+		ring = adapter->rx_ring[req.queue];
+	}
+
+	if (req.queue >= queue_size)
+		return -EINVAL;
+
+	mutex_lock(&adapter->user_ring_mutex);
+	if (!test_bit(req.queue, uring_init)) {
+		dev_err(&adapter->pdev->dev,
+			"the ring is already unmap\n");
+		err = -EINVAL;
+		goto failed;
+	}
+
+	set_pages_wb(virt_to_page(ring->desc), ring->size >> PAGE_SHIFT);
+	clear_bit(req.queue, uring_init);
+	mutex_unlock(&adapter->user_ring_mutex);
+
+	return 0;
+failed:
+	mutex_unlock(&adapter->user_ring_mutex);
+	return err;
+}
+
+static void igb_free_page(struct igb_adapter *adapter,
+			  struct igb_user_page *userpage)
+{
+	int direction = userpage->flags ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
+
+	set_pages_wb(userpage->page, 1);
+	dma_unmap_page(&adapter->pdev->dev,
+		       userpage->page_dma,
+		       PAGE_SIZE,
+		       direction);
+
+	put_page(userpage->page);
+	list_del(&userpage->page_node);
+	kfree(userpage);
+	userpage = NULL;
+}
+
+static long igb_unmapbuf(struct file *file, void __user *arg)
+{
+	int err = 0;
+	struct igb_adapter *adapter;
+	struct igb_buf_cmd req;
+	struct igb_user_page *userpage, *tmp;
+
+	if (copy_from_user(&req, arg, sizeof(req)))
+		return -EFAULT;
+
+	adapter = file->private_data;
+	if (NULL == adapter) {
+		dev_err(&adapter->pdev->dev, "map to unbound device!\n");
+		return -ENOENT;
+	}
+
+	mutex_lock(&adapter->user_page_mutex);
+	if (list_empty(&adapter->user_page_list)) {
+		err = -EINVAL;
+		goto failed;
+	}
+
+	list_for_each_entry_safe(userpage, tmp, &adapter->user_page_list,
+				 page_node) {
+		if (req.physaddr == userpage->page_dma) {
+			igb_free_page(adapter, userpage);
+			break;
+		}
+	}
+	mutex_unlock(&adapter->user_page_mutex);
+
+	return 0;
+failed:
+	mutex_unlock(&adapter->user_page_mutex);
+	return err;
+}
+
+static long igb_ioctl_file(struct file *file, unsigned int cmd,
+			   unsigned long arg)
+{
+	void __user *argp = (void __user *)arg;
+	int err;
+
+	switch (cmd) {
+	case IGB_BIND:
+		err = igb_bind(file, argp);
+		break;
+	case IGB_MAPRING:
+		err = igb_mapring(file, argp);
+		break;
+	case IGB_MAPBUF:
+		err = igb_mapbuf(file, argp);
+		break;
+	case IGB_UNMAPRING:
+		err = igb_unmapring(file, argp);
+		break;
+	case IGB_UNMAPBUF:
+		err = igb_unmapbuf(file, argp);
+		break;
+	default:
+		err = -EINVAL;
+		break;
+	};
+
+	return err;
+}
+
+static int igb_open_file(struct inode *inode, struct file *file)
+{
+	struct igb_adapter *adapter;
+	int err = 0;
+
+	adapter = container_of(inode->i_cdev, struct igb_adapter, char_dev);
+	if (!adapter)
+		return -ENOENT;
+
+	if (!adapter->qav_mode)
+		return -EPERM;
+
+	mutex_lock(&adapter->cdev_mutex);
+	if (adapter->cdev_in_use) {
+		err = -EBUSY;
+		goto failed;
+	}
+
+	file->private_data = adapter;
+	adapter->cdev_in_use = true;
+	mutex_unlock(&adapter->cdev_mutex);
+
+	return 0;
+failed:
+	mutex_unlock(&adapter->cdev_mutex);
+	return err;
+}
+
+static int igb_close_file(struct inode *inode, struct file *file)
+{
+	struct igb_adapter *adapter = file->private_data;
+
+	if (NULL == adapter)
+		return 0;
+
+	mutex_lock(&adapter->cdev_mutex);
+	if (!adapter->cdev_in_use)
+		goto out;
+
+	mutex_lock(&adapter->user_page_mutex);
+	if (!list_empty(&adapter->user_page_list)) {
+		struct igb_user_page *userpage, *tmp;
+
+		list_for_each_entry_safe(userpage, tmp,
+					 &adapter->user_page_list, page_node) {
+			if (userpage)
+				igb_free_page(adapter, userpage);
+		}
+	}
+	mutex_unlock(&adapter->user_page_mutex);
+
+	file->private_data = NULL;
+	adapter->cdev_in_use = false;
+	adapter->tx_uring_init = 0;
+	adapter->rx_uring_init = 0;
+
+out:
+	mutex_unlock(&adapter->cdev_mutex);
+	return 0;
+}
+
+static int igb_mmap(struct file *file, struct vm_area_struct *vma)
+{
+	struct igb_adapter *adapter = file->private_data;
+	unsigned long size  = vma->vm_end - vma->vm_start;
+	dma_addr_t pgoff = vma->vm_pgoff;
+	dma_addr_t physaddr;
+
+	if (NULL == adapter)
+		return -ENODEV;
+
+	if (pgoff == 0)
+		physaddr = pci_resource_start(adapter->pdev, 0) >> PAGE_SHIFT;
+	else
+		physaddr = pgoff;
+
+	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+
+	if (remap_pfn_range(vma, vma->vm_start,
+			    physaddr, size, vma->vm_page_prot))
+		return -EAGAIN;
+
+	return 0;
+}
+
+int igb_add_cdev(struct igb_adapter *adapter)
+{
+	int result = 0;
+	dev_t dev_num;
+	int igb_minor;
+
+	igb_minor = find_first_zero_bit(cdev_minors, IGB_MAX_DEV_NUM);
+	if (igb_minor >= IGB_MAX_DEV_NUM)
+		return -EBUSY;
+	set_bit(igb_minor, cdev_minors);
+
+	dev_num = MKDEV(igb_major, igb_minor);
+	cdev_init(&adapter->char_dev, &igb_fops);
+	adapter->char_dev.owner = THIS_MODULE;
+	adapter->char_dev.ops = &igb_fops;
+	result = cdev_add(&adapter->char_dev, dev_num, 1);
+
+	if (result) {
+		dev_err(&adapter->pdev->dev,
+			"igb_tsn: add character device failed\n");
+		return result;
+	}
+
+	device_create(igb_class, NULL, dev_num, NULL, igb_dev_name,
+		      adapter->netdev->name);
+
+	return 0;
+}
+
+void igb_remove_cdev(struct igb_adapter *adapter)
+{
+	device_destroy(igb_class, adapter->char_dev.dev);
+	cdev_del(&adapter->char_dev);
+}
+
+int igb_cdev_init(char *igb_driver_name)
+{
+	dev_t dev_num;
+	int ret;
+
+	ret = alloc_chrdev_region(&dev_num, 0, IGB_MAX_DEV_NUM,
+				  igb_driver_name);
+	if (ret)
+		return ret;
+	igb_major = MAJOR(dev_num);
+
+	igb_class = class_create(THIS_MODULE, igb_class_name);
+	if (IS_ERR(igb_class))
+		pr_info("igb_tsn: create device class failed\n");
+
+	return ret;
+}
+
+void igb_cdev_destroy(void)
+{
+	class_destroy(igb_class);
+	unregister_chrdev_region(MKDEV(igb_major, 0), IGB_MAX_DEV_NUM);
+}
diff --git a/drivers/net/ethernet/intel/igb/igb_cdev.h b/drivers/net/ethernet/intel/igb/igb_cdev.h
new file mode 100644
index 0000000..07fc0b6
--- /dev/null
+++ b/drivers/net/ethernet/intel/igb/igb_cdev.h
@@ -0,0 +1,45 @@
+#ifndef _IGB_CDEV_H_
+#define _IGB_CDEV_H_
+
+#include <asm/page.h>
+#include <asm/ioctl.h>
+
+struct igb_adapter;
+/* queues reserved for user mode */
+#define IGB_USER_TX_QUEUES        2
+#define IGB_USER_RX_QUEUES        2
+#define IGB_MAX_DEV_NUM  64
+
+/* TSN char dev ioctls */
+#define IGB_BIND       _IOW('E', 200, int)
+#define IGB_MAPRING    _IOW('E', 201, int)
+#define IGB_UNMAPRING  _IOW('E', 202, int)
+#define IGB_MAPBUF     _IOW('E', 203, int)
+#define IGB_UNMAPBUF   _IOW('E', 204, int)
+
+/* Used with both map/unmap ring & buf ioctls */
+struct igb_buf_cmd {
+	u64		physaddr;
+	u32		queue;
+	u32		mmap_size;
+	u32		flags;
+};
+
+struct igb_user_page {
+	struct list_head page_node;
+	struct page *page;
+	dma_addr_t page_dma;
+	u32 flags;
+};
+
+int igb_tsn_setup_all_tx_resources(struct igb_adapter *);
+int igb_tsn_setup_all_rx_resources(struct igb_adapter *);
+void igb_tsn_free_all_tx_resources(struct igb_adapter *);
+void igb_tsn_free_all_rx_resources(struct igb_adapter *);
+
+int igb_add_cdev(struct igb_adapter *adapter);
+void igb_remove_cdev(struct igb_adapter *adapter);
+int igb_cdev_init(char *igb_driver_name);
+void igb_cdev_destroy(void);
+
+#endif
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index 1d00f41..4193e58 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -55,6 +55,7 @@
 #endif
 #include <linux/i2c.h>
 #include "igb.h"
+#include "igb_cdev.h"
 
 #define MAJ 5
 #define MIN 3
@@ -688,6 +689,11 @@ static int __init igb_init_module(void)
 #ifdef CONFIG_IGB_DCA
 	dca_register_notify(&dca_notifier);
 #endif
+
+	ret = igb_cdev_init(igb_driver_name);
+	if (ret)
+		return ret;
+
 	ret = pci_register_driver(&igb_driver);
 	return ret;
 }
@@ -706,6 +712,8 @@ static void __exit igb_exit_module(void)
 	dca_unregister_notify(&dca_notifier);
 #endif
 	pci_unregister_driver(&igb_driver);
+
+	igb_cdev_destroy();
 }
 
 module_exit(igb_exit_module);
@@ -1629,7 +1637,8 @@ static void igb_configure(struct igb_adapter *adapter)
 	 * at least 1 descriptor unused to make sure
 	 * next_to_use != next_to_clean
 	 */
-	for (i = 0; i < adapter->num_rx_queues; i++) {
+	i = adapter->qav_mode ? IGB_USER_RX_QUEUES : 0;
+	for (; i < adapter->num_rx_queues; i++) {
 		struct igb_ring *ring = adapter->rx_ring[i];
 		igb_alloc_rx_buffers(ring, igb_desc_unused(ring));
 	}
@@ -2077,10 +2086,24 @@ static int igb_set_features(struct net_device *netdev,
 	return 0;
 }
 
+static u16 igb_select_queue(struct net_device *netdev,
+			    struct sk_buff *skb,
+			    void *accel_priv,
+			    select_queue_fallback_t fallback)
+{
+	struct igb_adapter *adapter = netdev_priv(netdev);
+
+	if (adapter->qav_mode)
+		return adapter->num_tx_queues - 1;
+	else
+		return fallback(netdev, skb);
+}
+
 static const struct net_device_ops igb_netdev_ops = {
 	.ndo_open		= igb_open,
 	.ndo_stop		= igb_close,
 	.ndo_start_xmit		= igb_xmit_frame,
+	.ndo_select_queue	= igb_select_queue,
 	.ndo_get_stats64	= igb_get_stats64,
 	.ndo_set_rx_mode	= igb_set_rx_mode,
 	.ndo_set_mac_address	= igb_set_mac,
@@ -2306,6 +2329,10 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 	adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
 	adapter->qav_mode = false;
 
+	adapter->tx_uring_init = 0;
+	adapter->rx_uring_init = 0;
+	adapter->cdev_in_use = false;
+
 	err = -EIO;
 	hw->hw_addr = pci_iomap(pdev, 0, 0);
 	if (!hw->hw_addr)
@@ -2559,6 +2586,10 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 		}
 	}
 
+	err = igb_add_cdev(adapter);
+	if (err)
+		goto err_register;
+
 	/* carrier off reporting is important to ethtool even BEFORE open */
 	netif_carrier_off(netdev);
 
@@ -2803,6 +2834,8 @@ static void igb_remove(struct pci_dev *pdev)
 	struct igb_adapter *adapter = netdev_priv(netdev);
 	struct e1000_hw *hw = &adapter->hw;
 
+	igb_remove_cdev(adapter);
+
 	pm_runtime_get_noresume(&pdev->dev);
 #ifdef CONFIG_IGB_HWMON
 	igb_sysfs_exit(adapter);
@@ -2985,6 +3018,12 @@ static int igb_sw_init(struct igb_adapter *adapter)
 	adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
 
 	spin_lock_init(&adapter->stats64_lock);
+
+	INIT_LIST_HEAD(&adapter->user_page_list);
+	mutex_init(&adapter->user_page_mutex);
+	mutex_init(&adapter->user_ring_mutex);
+	mutex_init(&adapter->cdev_mutex);
+
 #ifdef CONFIG_PCI_IOV
 	switch (hw->mac.type) {
 	case e1000_82576:
@@ -3231,7 +3270,8 @@ static int igb_setup_all_tx_resources(struct igb_adapter *adapter)
 	struct pci_dev *pdev = adapter->pdev;
 	int i, err = 0;
 
-	for (i = 0; i < adapter->num_tx_queues; i++) {
+	i = adapter->qav_mode ? IGB_USER_TX_QUEUES : 0;
+	for (; i < adapter->num_tx_queues; i++) {
 		err = igb_setup_tx_resources(adapter->tx_ring[i]);
 		if (err) {
 			dev_err(&pdev->dev,
@@ -3319,7 +3359,8 @@ static void igb_configure_tx(struct igb_adapter *adapter)
 {
 	int i;
 
-	for (i = 0; i < adapter->num_tx_queues; i++)
+	i = adapter->qav_mode ? IGB_USER_TX_QUEUES : 0;
+	for (; i < adapter->num_tx_queues; i++)
 		igb_configure_tx_ring(adapter, adapter->tx_ring[i]);
 }
 
@@ -3374,7 +3415,8 @@ static int igb_setup_all_rx_resources(struct igb_adapter *adapter)
 	struct pci_dev *pdev = adapter->pdev;
 	int i, err = 0;
 
-	for (i = 0; i < adapter->num_rx_queues; i++) {
+	i = adapter->qav_mode ? IGB_USER_RX_QUEUES : 0;
+	for (; i < adapter->num_rx_queues; i++) {
 		err = igb_setup_rx_resources(adapter->rx_ring[i]);
 		if (err) {
 			dev_err(&pdev->dev,
@@ -3399,6 +3441,15 @@ static void igb_setup_mrqc(struct igb_adapter *adapter)
 	u32 j, num_rx_queues;
 	u32 rss_key[10];
 
+	/* For TSN, kernel driver only create buffer for queue 2 and queue 3,
+	 * by default receive all BE packets from queue 3.
+	 */
+	if (adapter->qav_mode) {
+		wr32(E1000_MRQC, (adapter->num_rx_queues - 1)
+		     << E1000_MRQC_DEF_QUEUE_OFFSET);
+		return;
+	}
+
 	netdev_rss_key_fill(rss_key, sizeof(rss_key));
 	for (j = 0; j < 10; j++)
 		wr32(E1000_RSSRK(j), rss_key[j]);
@@ -3474,6 +3525,7 @@ static void igb_setup_mrqc(struct igb_adapter *adapter)
 		if (hw->mac.type != e1000_i211)
 			mrqc |= E1000_MRQC_ENABLE_RSS_4Q;
 	}
+
 	igb_vmm_control(adapter);
 
 	wr32(E1000_MRQC, mrqc);
@@ -3701,7 +3753,8 @@ static void igb_configure_rx(struct igb_adapter *adapter)
 	/* Setup the HW Rx Head and Tail Descriptor Pointers and
 	 * the Base and Length of the Rx Descriptor Ring
 	 */
-	for (i = 0; i < adapter->num_rx_queues; i++)
+	i = adapter->qav_mode ? IGB_USER_RX_QUEUES : 0;
+	for (; i < adapter->num_rx_queues; i++)
 		igb_configure_rx_ring(adapter, adapter->rx_ring[i]);
 }
 
@@ -3737,8 +3790,8 @@ void igb_free_tx_resources(struct igb_ring *tx_ring)
 static void igb_free_all_tx_resources(struct igb_adapter *adapter)
 {
 	int i;
-
-	for (i = 0; i < adapter->num_tx_queues; i++)
+	i = adapter->qav_mode ? IGB_USER_TX_QUEUES : 0;
+	for (; i < adapter->num_tx_queues; i++)
 		if (adapter->tx_ring[i])
 			igb_free_tx_resources(adapter->tx_ring[i]);
 }
@@ -3804,7 +3857,8 @@ static void igb_clean_all_tx_rings(struct igb_adapter *adapter)
 {
 	int i;
 
-	for (i = 0; i < adapter->num_tx_queues; i++)
+	i = adapter->qav_mode ? IGB_USER_TX_QUEUES : 0;
+	for (; i < adapter->num_tx_queues; i++)
 		if (adapter->tx_ring[i])
 			igb_clean_tx_ring(adapter->tx_ring[i]);
 }
@@ -3842,7 +3896,8 @@ static void igb_free_all_rx_resources(struct igb_adapter *adapter)
 {
 	int i;
 
-	for (i = 0; i < adapter->num_rx_queues; i++)
+	i = adapter->qav_mode ? IGB_USER_RX_QUEUES : 0;
+	for (; i < adapter->num_rx_queues; i++)
 		if (adapter->rx_ring[i])
 			igb_free_rx_resources(adapter->rx_ring[i]);
 }
@@ -3898,7 +3953,8 @@ static void igb_clean_all_rx_rings(struct igb_adapter *adapter)
 {
 	int i;
 
-	for (i = 0; i < adapter->num_rx_queues; i++)
+	i = adapter->qav_mode ? IGB_USER_TX_QUEUES : 0;
+	for (; i < adapter->num_rx_queues; i++)
 		if (adapter->rx_ring[i])
 			igb_clean_rx_ring(adapter->rx_ring[i]);
 }
@@ -6928,6 +6984,11 @@ static bool igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)
 	struct sk_buff *skb = rx_ring->skb;
 	unsigned int total_bytes = 0, total_packets = 0;
 	u16 cleaned_count = igb_desc_unused(rx_ring);
+	struct igb_adapter *adapter = netdev_priv(rx_ring->netdev);
+
+	/* Don't service user (AVB) queues */
+	if (adapter->qav_mode && rx_ring->queue_index < IGB_USER_RX_QUEUES)
+		return true;
 
 	while (likely(total_packets < budget)) {
 		union e1000_adv_rx_desc *rx_desc;
@@ -7127,6 +7188,9 @@ static int igb_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
 	return 0;
 }
 
+#define SIOSTXQUEUESELECT SIOCDEVPRIVATE
+#define SIOSRXQUEUESELECT (SIOCDEVPRIVATE + 1)
+
 /**
  * igb_ioctl -
  * @netdev:
@@ -8188,6 +8252,9 @@ static int igb_change_mode(struct igb_adapter *adapter, int request_mode)
 	if (request_mode == current_mode)
 		return 0;
 
+	if (adapter->cdev_in_use)
+		return -EBUSY;
+
 	netdev = adapter->netdev;
 
 	rtnl_lock();
@@ -8197,6 +8264,11 @@ static int igb_change_mode(struct igb_adapter *adapter, int request_mode)
 	else
 		igb_reset(adapter);
 
+	if (current_mode) {
+		igb_tsn_free_all_rx_resources(adapter);
+		igb_tsn_free_all_tx_resources(adapter);
+	}
+
 	igb_clear_interrupt_scheme(adapter);
 
 	adapter->qav_mode = request_mode;
@@ -8210,12 +8282,23 @@ static int igb_change_mode(struct igb_adapter *adapter, int request_mode)
 		goto err_out;
 	}
 
+	if (request_mode) {
+		err = igb_tsn_setup_all_tx_resources(adapter);
+		if (err)
+			goto err_out;
+		err = igb_tsn_setup_all_rx_resources(adapter);
+		if (err)
+			goto err_tsn_setup_rx;
+	}
+
 	if (netif_running(netdev))
 		igb_open(netdev);
 
 	rtnl_unlock();
 
 	return err;
+err_tsn_setup_rx:
+	igb_tsn_free_all_tx_resources(adapter);
 err_out:
 	rtnl_unlock();
 	return err;
@@ -8253,4 +8336,5 @@ static ssize_t igb_set_qav_mode(struct device *dev,
 
 	return len;
 }
+
 /* igb_main.c */
-- 
1.7.9.5


^ permalink raw reply related	[flat|nested] 3+ messages in thread

end of thread, other threads:[~2015-10-28  5:33 UTC | newest]

Thread overview: 3+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2015-10-28  5:33 [Intel-wired-lan] [PATCH net-next 0/2] Add I210 AVB support Gangfeng Huang
2015-10-28  5:33 ` [Intel-wired-lan] [PATCH net-next 1/2] igb: add function to set I210 transmit mode Gangfeng Huang
2015-10-28  5:33 ` [Intel-wired-lan] [PATCH net-next 2/2] igb: add a character device to support AVB Gangfeng Huang

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.