netdev.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [RFC PATCH net-next 00/16] net: WangXun txgbe ethernet driver
@ 2022-08-10  8:55 Jiawen Wu
  2022-08-10  8:55 ` [RFC PATCH net-next 01/16] net: txgbe: Store PCI info Jiawen Wu
                   ` (15 more replies)
  0 siblings, 16 replies; 18+ messages in thread
From: Jiawen Wu @ 2022-08-10  8:55 UTC (permalink / raw)
  To: netdev; +Cc: Jiawen Wu

This patch series adds support for WangXun 10 gigabit NIC, to initialize
hardware, establish link connection and pass traffic.

Jiawen Wu (16):
  net: txgbe: Store PCI info
  net: txgbe: Reset hardware
  net: txgbe: Set MAC address and register netdev
  net: txgbe: Add operations to interact with firmware
  net: txgbe: Identify PHY and SFP module
  net: txgbe: Initialize service task
  net: txgbe: Support to setup link
  net: txgbe: Add interrupt support
  net: txgbe: Handle various event interrupts
  net: txgbe: Configure Rx and Tx unit of the MAC
  net: txgbe: Allocate Rx and Tx resources
  net: txgbe: Add Rx and Tx cleanup routine
  net: txgbe: Add device Rx features
  net: txgbe: Add transmit path to process packets
  net: txgbe: Support to get system network statistics
  net: txgbe: support to respond Tx hang

 .../device_drivers/ethernet/wangxun/txgbe.rst |   83 +
 drivers/net/ethernet/wangxun/txgbe/Makefile   |    4 +-
 drivers/net/ethernet/wangxun/txgbe/txgbe.h    |  594 ++
 drivers/net/ethernet/wangxun/txgbe/txgbe_hw.c | 4023 ++++++++++++
 drivers/net/ethernet/wangxun/txgbe/txgbe_hw.h |  179 +
 .../net/ethernet/wangxun/txgbe/txgbe_lib.c    |  463 ++
 .../net/ethernet/wangxun/txgbe/txgbe_main.c   | 5505 ++++++++++++++++-
 .../net/ethernet/wangxun/txgbe/txgbe_phy.c    |  418 ++
 .../net/ethernet/wangxun/txgbe/txgbe_phy.h    |   55 +
 .../net/ethernet/wangxun/txgbe/txgbe_type.h   | 1791 ++++++
 10 files changed, 13088 insertions(+), 27 deletions(-)
 create mode 100644 drivers/net/ethernet/wangxun/txgbe/txgbe_hw.c
 create mode 100644 drivers/net/ethernet/wangxun/txgbe/txgbe_hw.h
 create mode 100644 drivers/net/ethernet/wangxun/txgbe/txgbe_lib.c
 create mode 100644 drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c
 create mode 100644 drivers/net/ethernet/wangxun/txgbe/txgbe_phy.h

-- 
2.27.0


^ permalink raw reply	[flat|nested] 18+ messages in thread

* [RFC PATCH net-next 01/16] net: txgbe: Store PCI info
  2022-08-10  8:55 [RFC PATCH net-next 00/16] net: WangXun txgbe ethernet driver Jiawen Wu
@ 2022-08-10  8:55 ` Jiawen Wu
  2022-08-11  1:55   ` Andrew Lunn
  2022-08-10  8:55 ` [RFC PATCH net-next 02/16] net: txgbe: Reset hardware Jiawen Wu
                   ` (14 subsequent siblings)
  15 siblings, 1 reply; 18+ messages in thread
From: Jiawen Wu @ 2022-08-10  8:55 UTC (permalink / raw)
  To: netdev; +Cc: Jiawen Wu

Get PCI config space info and store bus info.
Set LAN id and check flash status.

Signed-off-by: Jiawen Wu <jiawenwu@trustnetic.com>
---
 drivers/net/ethernet/wangxun/txgbe/Makefile   |   3 +-
 drivers/net/ethernet/wangxun/txgbe/txgbe.h    |  12 +
 drivers/net/ethernet/wangxun/txgbe/txgbe_hw.c | 173 +++++++++
 drivers/net/ethernet/wangxun/txgbe/txgbe_hw.h |  30 ++
 .../net/ethernet/wangxun/txgbe/txgbe_main.c   | 177 +++++++++-
 .../net/ethernet/wangxun/txgbe/txgbe_type.h   | 334 ++++++++++++++++++
 6 files changed, 727 insertions(+), 2 deletions(-)
 create mode 100644 drivers/net/ethernet/wangxun/txgbe/txgbe_hw.c
 create mode 100644 drivers/net/ethernet/wangxun/txgbe/txgbe_hw.h

diff --git a/drivers/net/ethernet/wangxun/txgbe/Makefile b/drivers/net/ethernet/wangxun/txgbe/Makefile
index 431303ca75b4..78484c58b78b 100644
--- a/drivers/net/ethernet/wangxun/txgbe/Makefile
+++ b/drivers/net/ethernet/wangxun/txgbe/Makefile
@@ -6,4 +6,5 @@
 
 obj-$(CONFIG_TXGBE) += txgbe.o
 
-txgbe-objs := txgbe_main.o
+txgbe-objs := txgbe_main.o \
+              txgbe_hw.o
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe.h b/drivers/net/ethernet/wangxun/txgbe/txgbe.h
index 38ddbde0ed0f..94c43eef0cd6 100644
--- a/drivers/net/ethernet/wangxun/txgbe/txgbe.h
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe.h
@@ -17,8 +17,20 @@ struct txgbe_adapter {
 	/* OS defined structs */
 	struct net_device *netdev;
 	struct pci_dev *pdev;
+
+	/* structs defined in txgbe_type.h */
+	struct txgbe_hw hw;
+	u16 msg_enable;
 };
 
+s32 txgbe_init_shared_code(struct txgbe_hw *hw);
+
 extern char txgbe_driver_name[];
 
+#define TXGBE_FAILED_READ_CFG_DWORD 0xffffffffU
+#define TXGBE_FAILED_READ_CFG_WORD  0xffffU
+#define TXGBE_FAILED_READ_CFG_BYTE  0xffU
+
+extern u16 txgbe_read_pci_cfg_word(struct txgbe_hw *hw, u32 reg);
+
 #endif /* _TXGBE_H_ */
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.c
new file mode 100644
index 000000000000..1baf965e50d7
--- /dev/null
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.c
@@ -0,0 +1,173 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2015 - 2022 Beijing WangXun Technology Co., Ltd. */
+
+#include "txgbe_type.h"
+#include "txgbe_hw.h"
+#include "txgbe.h"
+
+/**
+ *  txgbe_set_pci_config_data - Generic store PCI bus info
+ *  @hw: pointer to hardware structure
+ *  @link_status: the link status returned by the PCI config space
+ *
+ *  Stores the PCI bus info (speed, width, type) within the txgbe_hw structure
+ **/
+void txgbe_set_pci_config_data(struct txgbe_hw *hw, u16 link_status)
+{
+	if (hw->bus.type == txgbe_bus_type_unknown)
+		hw->bus.type = txgbe_bus_type_pci_express;
+
+	switch (link_status & TXGBE_PCI_LINK_WIDTH) {
+	case TXGBE_PCI_LINK_WIDTH_1:
+		hw->bus.width = txgbe_bus_width_pcie_x1;
+		break;
+	case TXGBE_PCI_LINK_WIDTH_2:
+		hw->bus.width = txgbe_bus_width_pcie_x2;
+		break;
+	case TXGBE_PCI_LINK_WIDTH_4:
+		hw->bus.width = txgbe_bus_width_pcie_x4;
+		break;
+	case TXGBE_PCI_LINK_WIDTH_8:
+		hw->bus.width = txgbe_bus_width_pcie_x8;
+		break;
+	default:
+		hw->bus.width = txgbe_bus_width_unknown;
+		break;
+	}
+
+	switch (link_status & TXGBE_PCI_LINK_SPEED) {
+	case TXGBE_PCI_LINK_SPEED_2500:
+		hw->bus.speed = txgbe_bus_speed_2500;
+		break;
+	case TXGBE_PCI_LINK_SPEED_5000:
+		hw->bus.speed = txgbe_bus_speed_5000;
+		break;
+	case TXGBE_PCI_LINK_SPEED_8000:
+		hw->bus.speed = txgbe_bus_speed_8000;
+		break;
+	default:
+		hw->bus.speed = txgbe_bus_speed_unknown;
+		break;
+	}
+}
+
+/**
+ *  txgbe_get_bus_info - Generic set PCI bus info
+ *  @hw: pointer to hardware structure
+ *
+ *  Gets the PCI bus info (speed, width, type) then calls helper function to
+ *  store this data within the txgbe_hw structure.
+ **/
+s32 txgbe_get_bus_info(struct txgbe_hw *hw)
+{
+	u16 link_status;
+
+	/* Get the negotiated link width and speed from PCI config space */
+	link_status = txgbe_read_pci_cfg_word(hw, TXGBE_PCI_LINK_STATUS);
+
+	txgbe_set_pci_config_data(hw, link_status);
+
+	return 0;
+}
+
+/**
+ *  txgbe_set_lan_id_multi_port_pcie - Set LAN id for PCIe multiple port devices
+ *  @hw: pointer to the HW structure
+ *
+ *  Determines the LAN function id by reading memory-mapped registers
+ *  and swaps the port value if requested.
+ **/
+s32 txgbe_set_lan_id_multi_port_pcie(struct txgbe_hw *hw)
+{
+	struct txgbe_bus_info *bus = &hw->bus;
+	u32 reg;
+
+	reg = rd32(hw, TXGBE_CFG_PORT_ST);
+	bus->lan_id = TXGBE_CFG_PORT_ST_LAN_ID(reg);
+
+	/* check for a port swap */
+	reg = rd32(hw, TXGBE_MIS_PWR);
+	if (TXGBE_MIS_PWR_LAN_ID(reg) == TXGBE_MIS_PWR_LAN_ID_1)
+		bus->func = 0;
+	else
+		bus->func = bus->lan_id;
+
+	return 0;
+}
+
+/* cmd_addr is used for some special command:
+ * 1. to be sector address, when implemented erase sector command
+ * 2. to be flash address when implemented read, write flash address
+ */
+u8 fmgr_cmd_op(struct txgbe_hw *hw, u32 cmd, u32 cmd_addr)
+{
+	u32 cmd_val = 0;
+	u32 time_out = 0;
+
+	cmd_val = (cmd << SPI_CLK_CMD_OFFSET) |
+		  (SPI_CLK_DIV << SPI_CLK_DIV_OFFSET) | cmd_addr;
+	wr32(hw, SPI_H_CMD_REG_ADDR, cmd_val);
+	while (1) {
+		if (rd32(hw, SPI_H_STA_REG_ADDR) & 0x1)
+			break;
+
+		if (time_out == SPI_TIME_OUT_VALUE)
+			return 1;
+
+		time_out = time_out + 1;
+		usleep_range(10, 20);
+	}
+
+	return 0;
+}
+
+u32 txgbe_flash_read_dword(struct txgbe_hw *hw, u32 addr)
+{
+	u8 status = fmgr_cmd_op(hw, SPI_CMD_READ_DWORD, addr);
+
+	if (status)
+		return (u32)status;
+
+	return rd32(hw, SPI_H_DAT_REG_ADDR);
+}
+
+int txgbe_check_flash_load(struct txgbe_hw *hw, u32 check_bit)
+{
+	u32 i = 0, reg = 0;
+	int err = 0;
+
+	/* if there's flash existing */
+	if (!(rd32(hw, TXGBE_SPI_STATUS) &
+	      TXGBE_SPI_STATUS_FLASH_BYPASS)) {
+		/* wait hw load flash done */
+		for (i = 0; i < TXGBE_MAX_FLASH_LOAD_POLL_TIME; i++) {
+			reg = rd32(hw, TXGBE_SPI_ILDR_STATUS);
+			if (!(reg & check_bit)) {
+				/* done */
+				break;
+			}
+			msleep(200);
+		}
+		if (i == TXGBE_MAX_FLASH_LOAD_POLL_TIME)
+			err = TXGBE_ERR_FLASH_LOADING_FAILED;
+	}
+	return err;
+}
+
+/**
+ *  txgbe_init_ops - Inits func ptrs and MAC type
+ *  @hw: pointer to hardware structure
+ *
+ *  Initialize the function pointers and assign the MAC type for sapphire.
+ *  Does not touch the hardware.
+ **/
+s32 txgbe_init_ops(struct txgbe_hw *hw)
+{
+	struct txgbe_mac_info *mac = &hw->mac;
+
+	/* MAC */
+	mac->ops.get_bus_info = txgbe_get_bus_info;
+	mac->ops.set_lan_id = txgbe_set_lan_id_multi_port_pcie;
+
+	return 0;
+}
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.h b/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.h
new file mode 100644
index 000000000000..fb250c99ddfd
--- /dev/null
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.h
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2015 - 2022 Beijing WangXun Technology Co., Ltd. */
+
+#ifndef _TXGBE_HW_H_
+#define _TXGBE_HW_H_
+
+#define SPI_CLK_DIV           2
+
+#define SPI_CMD_READ_DWORD    1  /* SPI read a dword command */
+
+#define SPI_CLK_CMD_OFFSET    28  /* SPI command field offset in Command register */
+#define SPI_CLK_DIV_OFFSET    25  /* SPI clock divide field offset in Command register */
+
+#define SPI_TIME_OUT_VALUE           10000
+#define SPI_H_CMD_REG_ADDR           0x10104  /* SPI Command register address */
+#define SPI_H_DAT_REG_ADDR           0x10108  /* SPI Data register address */
+#define SPI_H_STA_REG_ADDR           0x1010c  /* SPI Status register address */
+
+s32 txgbe_get_bus_info(struct txgbe_hw *hw);
+void txgbe_set_pci_config_data(struct txgbe_hw *hw, u16 link_status);
+s32 txgbe_set_lan_id_multi_port_pcie(struct txgbe_hw *hw);
+
+int txgbe_check_flash_load(struct txgbe_hw *hw, u32 check_bit);
+
+s32 txgbe_init_ops(struct txgbe_hw *hw);
+
+u8 fmgr_cmd_op(struct txgbe_hw *hw, u32 cmd, u32 cmd_addr);
+u32 txgbe_flash_read_dword(struct txgbe_hw *hw, u32 addr);
+
+#endif /* _TXGBE_HW_H_ */
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c
index d3b9f73ecba4..d6145eca7b0a 100644
--- a/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c
@@ -10,6 +10,7 @@
 #include <linux/etherdevice.h>
 
 #include "txgbe.h"
+#include "txgbe_hw.h"
 
 char txgbe_driver_name[] = "txgbe";
 
@@ -30,6 +31,130 @@ static const struct pci_device_id txgbe_pci_tbl[] = {
 
 #define DEFAULT_DEBUG_LEVEL_SHIFT 3
 
+static void txgbe_check_minimum_link(struct txgbe_adapter *adapter)
+{
+	struct txgbe_hw *hw = &adapter->hw;
+	struct pci_dev *pdev;
+
+	/* Some devices are not connected over PCIe and thus do not negotiate
+	 * speed. These devices do not have valid bus info, and thus any report
+	 * we generate may not be correct.
+	 */
+	if (hw->bus.type == txgbe_bus_type_internal)
+		return;
+
+	pdev = adapter->pdev;
+	pcie_print_link_status(pdev);
+}
+
+/**
+ * txgbe_enumerate_functions - Get the number of ports this device has
+ * @adapter: adapter structure
+ *
+ * This function enumerates the phsyical functions co-located on a single slot,
+ * in order to determine how many ports a device has. This is most useful in
+ * determining the required GT/s of PCIe bandwidth necessary for optimal
+ * performance.
+ **/
+static inline int txgbe_enumerate_functions(struct txgbe_adapter *adapter)
+{
+	struct pci_dev *entry, *pdev = adapter->pdev;
+	int physfns = 0;
+
+	list_for_each_entry(entry, &pdev->bus->devices, bus_list) {
+		/* When the devices on the bus don't all match our device ID,
+		 * we can't reliably determine the correct number of
+		 * functions. This can occur if a function has been direct
+		 * attached to a virtual machine using VT-d, for example. In
+		 * this case, simply return -1 to indicate this.
+		 */
+		if (entry->vendor != pdev->vendor ||
+		    entry->device != pdev->device)
+			return -1;
+
+		physfns++;
+	}
+
+	return physfns;
+}
+
+static void txgbe_remove_adapter(struct txgbe_hw *hw)
+{
+	struct txgbe_adapter *adapter = container_of(hw, struct txgbe_adapter, hw);
+
+	if (!hw->hw_addr)
+		return;
+	hw->hw_addr = NULL;
+	dev_info(&adapter->pdev->dev, "Adapter removed\n");
+}
+
+static bool txgbe_check_cfg_remove(struct txgbe_hw *hw, struct pci_dev *pdev)
+{
+	u16 value;
+
+	pci_read_config_word(pdev, PCI_VENDOR_ID, &value);
+	if (value == TXGBE_FAILED_READ_CFG_WORD) {
+		txgbe_remove_adapter(hw);
+		return true;
+	}
+	return false;
+}
+
+/**
+ *  txgbe_init_shared_code - Initialize the shared code
+ *  @hw: pointer to hardware structure
+ *
+ *  This will assign function pointers and assign the MAC type and PHY code.
+ **/
+s32 txgbe_init_shared_code(struct txgbe_hw *hw)
+{
+	s32 status;
+
+	status = txgbe_init_ops(hw);
+	return status;
+}
+
+/**
+ * txgbe_sw_init - Initialize general software structures (struct txgbe_adapter)
+ * @adapter: board private structure to initialize
+ **/
+static int txgbe_sw_init(struct txgbe_adapter *adapter)
+{
+	struct pci_dev *pdev = adapter->pdev;
+	struct txgbe_hw *hw = &adapter->hw;
+	u32 ssid = 0;
+	int err = 0;
+
+	/* PCI config space info */
+	hw->vendor_id = pdev->vendor;
+	hw->device_id = pdev->device;
+	hw->revision_id = pdev->revision;
+	hw->oem_svid = pdev->subsystem_vendor;
+	hw->oem_ssid = pdev->subsystem_device;
+
+	if (hw->oem_svid == PCI_VENDOR_ID_WANGXUN) {
+		hw->subsystem_vendor_id = pdev->subsystem_vendor;
+		hw->subsystem_device_id = pdev->subsystem_device;
+	} else {
+		ssid = txgbe_flash_read_dword(hw, 0xfffdc);
+		if (ssid == 0x1) {
+			netif_err(adapter, probe, adapter->netdev,
+				  "read of internal subsystem device id failed\n");
+			return -ENODEV;
+		}
+		hw->subsystem_device_id = (u16)ssid >> 8 | (u16)ssid << 8;
+	}
+
+	err = txgbe_init_shared_code(hw);
+	if (err) {
+		netif_err(adapter, probe, adapter->netdev,
+			  "init_shared_code failed: %d\n", err);
+		return err;
+	}
+
+	return 0;
+}
+
 static void txgbe_dev_shutdown(struct pci_dev *pdev, bool *enable_wake)
 {
 	struct txgbe_adapter *adapter = pci_get_drvdata(pdev);
@@ -67,8 +192,9 @@ static int txgbe_probe(struct pci_dev *pdev,
 		       const struct pci_device_id __always_unused *ent)
 {
 	struct txgbe_adapter *adapter = NULL;
+	struct txgbe_hw *hw = NULL;
 	struct net_device *netdev;
-	int err;
+	int err, expected_gts;
 
 	err = pci_enable_device_mem(pdev);
 	if (err)
@@ -107,6 +233,8 @@ static int txgbe_probe(struct pci_dev *pdev,
 	adapter = netdev_priv(netdev);
 	adapter->netdev = netdev;
 	adapter->pdev = pdev;
+	hw = &adapter->hw;
+	adapter->msg_enable = (1 << DEFAULT_DEBUG_LEVEL_SHIFT) - 1;
 
 	adapter->io_addr = devm_ioremap(&pdev->dev,
 					pci_resource_start(pdev, 0),
@@ -115,11 +243,44 @@ static int txgbe_probe(struct pci_dev *pdev,
 		err = -EIO;
 		goto err_pci_release_regions;
 	}
+	hw->hw_addr = adapter->io_addr;
+
+	strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
+
+	/* setup the private structure */
+	err = txgbe_sw_init(adapter);
+	if (err)
+		goto err_pci_release_regions;
+
+	TCALL(hw, mac.ops.set_lan_id);
+
+	/* check if flash load is done after hw power up */
+	err = txgbe_check_flash_load(hw, TXGBE_SPI_ILDR_STATUS_PERST);
+	if (err)
+		goto err_pci_release_regions;
+	err = txgbe_check_flash_load(hw, TXGBE_SPI_ILDR_STATUS_PWRRST);
+	if (err)
+		goto err_pci_release_regions;
 
 	netdev->features |= NETIF_F_HIGHDMA;
 
+	/* pick up the PCI bus settings for reporting later */
+	TCALL(hw, mac.ops.get_bus_info);
+
 	pci_set_drvdata(pdev, adapter);
 
+	/* calculate the expected PCIe bandwidth required for optimal
+	 * performance. Note that some older parts will never have enough
+	 * bandwidth due to being older generation PCIe parts. We clamp these
+	 * parts to ensure that no warning is displayed, as this could confuse
+	 * users otherwise.
+	 */
+	expected_gts = txgbe_enumerate_functions(adapter) * 10;
+
+	/* don't check link if we failed to enumerate functions */
+	if (expected_gts > 0)
+		txgbe_check_minimum_link(adapter);
+
 	return 0;
 
 err_pci_release_regions:
@@ -150,6 +311,20 @@ static void txgbe_remove(struct pci_dev *pdev)
 	pci_disable_device(pdev);
 }
 
+u16 txgbe_read_pci_cfg_word(struct txgbe_hw *hw, u32 reg)
+{
+	struct txgbe_adapter *adapter = container_of(hw, struct txgbe_adapter, hw);
+	u16 value;
+
+	if (TXGBE_REMOVED(hw->hw_addr))
+		return TXGBE_FAILED_READ_CFG_WORD;
+	pci_read_config_word(adapter->pdev, reg, &value);
+	if (value == TXGBE_FAILED_READ_CFG_WORD &&
+	    txgbe_check_cfg_remove(hw, adapter->pdev))
+		return TXGBE_FAILED_READ_CFG_WORD;
+	return value;
+}
+
 static struct pci_driver txgbe_driver = {
 	.name     = txgbe_driver_name,
 	.id_table = txgbe_pci_tbl,
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h b/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h
index b2e329f50bae..b769af5e6cbb 100644
--- a/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h
@@ -54,4 +54,338 @@
 /* Revision ID */
 #define TXGBE_SP_MPW  1
 
+/**************** Global Registers ****************************/
+/* chip control Registers */
+#define TXGBE_MIS_RST                   0x1000C
+#define TXGBE_MIS_PWR                   0x10000
+#define TXGBE_MIS_CTL                   0x10004
+#define TXGBE_MIS_PF_SM                 0x10008
+#define TXGBE_MIS_PRB_CTL               0x10010
+#define TXGBE_MIS_ST                    0x10028
+#define TXGBE_MIS_SWSM                  0x1002C
+#define TXGBE_MIS_RST_ST                0x10030
+
+#define TXGBE_MIS_RST_SW_RST            0x00000001U
+#define TXGBE_MIS_RST_LAN0_RST          0x00000002U
+#define TXGBE_MIS_RST_LAN1_RST          0x00000004U
+#define TXGBE_MIS_RST_LAN0_CHG_ETH_MODE 0x20000000U
+#define TXGBE_MIS_RST_LAN1_CHG_ETH_MODE 0x40000000U
+#define TXGBE_MIS_RST_GLOBAL_RST        0x80000000U
+#define TXGBE_MIS_RST_MASK      (TXGBE_MIS_RST_SW_RST | \
+				 TXGBE_MIS_RST_LAN0_RST | \
+				 TXGBE_MIS_RST_LAN1_RST)
+#define TXGBE_MIS_PWR_LAN_ID(_r)        ((0xC0000000U & (_r)) >> 30)
+#define TXGBE_MIS_PWR_LAN_ID_0          (1)
+#define TXGBE_MIS_PWR_LAN_ID_1          (2)
+#define TXGBE_MIS_PWR_LAN_ID_A          (3)
+#define TXGBE_MIS_ST_MNG_INIT_DN        0x00000001U
+#define TXGBE_MIS_ST_MNG_VETO           0x00000100U
+#define TXGBE_MIS_ST_LAN0_ECC           0x00010000U
+#define TXGBE_MIS_ST_LAN1_ECC           0x00020000U
+#define TXGBE_MIS_ST_MNG_ECC            0x00040000U
+#define TXGBE_MIS_ST_PCORE_ECC          0x00080000U
+#define TXGBE_MIS_ST_PCIWRP_ECC         0x00100000U
+#define TXGBE_MIS_SWSM_SMBI             1
+#define TXGBE_MIS_RST_ST_DEV_RST_ST_DONE        0x00000000U
+#define TXGBE_MIS_RST_ST_DEV_RST_ST_REQ         0x00080000U
+#define TXGBE_MIS_RST_ST_DEV_RST_ST_INPROGRESS  0x00100000U
+#define TXGBE_MIS_RST_ST_DEV_RST_ST_MASK        0x00180000U
+#define TXGBE_MIS_RST_ST_DEV_RST_TYPE_MASK      0x00070000U
+#define TXGBE_MIS_RST_ST_DEV_RST_TYPE_SHIFT     16
+#define TXGBE_MIS_RST_ST_DEV_RST_TYPE_SW_RST    0x3
+#define TXGBE_MIS_RST_ST_DEV_RST_TYPE_GLOBAL_RST 0x5
+#define TXGBE_MIS_RST_ST_RST_INIT       0x0000FF00U
+#define TXGBE_MIS_RST_ST_RST_INI_SHIFT  8
+#define TXGBE_MIS_RST_ST_RST_TIM        0x000000FFU
+#define TXGBE_MIS_PF_SM_SM              1
+#define TXGBE_MIS_PRB_CTL_LAN0_UP       0x2
+#define TXGBE_MIS_PRB_CTL_LAN1_UP       0x1
+
+/* FMGR Registers */
+#define TXGBE_SPI_ILDR_STATUS           0x10120
+#define TXGBE_SPI_ILDR_STATUS_PERST     0x00000001U /* PCIE_PERST is done */
+#define TXGBE_SPI_ILDR_STATUS_PWRRST    0x00000002U /* Power on reset is done */
+#define TXGBE_SPI_ILDR_STATUS_SW_RESET  0x00000080U /* software reset is done */
+#define TXGBE_SPI_ILDR_STATUS_LAN0_SW_RST 0x00000200U /* lan0 soft reset done */
+#define TXGBE_SPI_ILDR_STATUS_LAN1_SW_RST 0x00000400U /* lan1 soft reset done */
+
+#define TXGBE_MAX_FLASH_LOAD_POLL_TIME  10
+
+#define TXGBE_SPI_CMD                   0x10104
+#define TXGBE_SPI_CMD_CMD(_v)           (((_v) & 0x7) << 28)
+#define TXGBE_SPI_CMD_CLK(_v)           (((_v) & 0x7) << 25)
+#define TXGBE_SPI_CMD_ADDR(_v)          (((_v) & 0xFFFFFF))
+#define TXGBE_SPI_DATA                  0x10108
+#define TXGBE_SPI_DATA_BYPASS           ((0x1) << 31)
+#define TXGBE_SPI_DATA_STATUS(_v)       (((_v) & 0xFF) << 16)
+#define TXGBE_SPI_DATA_OP_DONE          ((0x1))
+
+#define TXGBE_SPI_STATUS                0x1010C
+#define TXGBE_SPI_STATUS_OPDONE         ((0x1))
+#define TXGBE_SPI_STATUS_FLASH_BYPASS   ((0x1) << 31)
+
+#define TXGBE_SPI_USR_CMD               0x10110
+#define TXGBE_SPI_CMDCFG0               0x10114
+#define TXGBE_SPI_CMDCFG1               0x10118
+#define TXGBE_SPI_ECC_CTL               0x10130
+#define TXGBE_SPI_ECC_INJ               0x10134
+#define TXGBE_SPI_ECC_ST                0x10138
+#define TXGBE_SPI_ILDR_SWPTR            0x10124
+
+/* port cfg Registers */
+#define TXGBE_CFG_PORT_CTL              0x14400
+#define TXGBE_CFG_PORT_ST               0x14404
+#define TXGBE_CFG_EX_VTYPE              0x14408
+#define TXGBE_CFG_LED_CTL               0x14424
+#define TXGBE_CFG_VXLAN                 0x14410
+#define TXGBE_CFG_VXLAN_GPE             0x14414
+#define TXGBE_CFG_GENEVE                0x14418
+#define TXGBE_CFG_TEREDO                0x1441C
+#define TXGBE_CFG_TCP_TIME              0x14420
+#define TXGBE_CFG_TAG_TPID(_i)          (0x14430 + ((_i) * 4))
+/* port cfg bit */
+#define TXGBE_CFG_PORT_CTL_PFRSTD       0x00004000U /* Phy Function Reset Done */
+#define TXGBE_CFG_PORT_CTL_D_VLAN       0x00000001U /* double vlan*/
+#define TXGBE_CFG_PORT_CTL_ETAG_ETYPE_VLD 0x00000002U
+#define TXGBE_CFG_PORT_CTL_QINQ         0x00000004U
+#define TXGBE_CFG_PORT_CTL_DRV_LOAD     0x00000008U
+#define TXGBE_CFG_PORT_CTL_FORCE_LKUP   0x00000010U /* force link up */
+#define TXGBE_CFG_PORT_CTL_DCB_EN       0x00000400U /* dcb enabled */
+#define TXGBE_CFG_PORT_CTL_NUM_TC_MASK  0x00000800U /* number of TCs */
+#define TXGBE_CFG_PORT_CTL_NUM_TC_4     0x00000000U
+#define TXGBE_CFG_PORT_CTL_NUM_TC_8     0x00000800U
+#define TXGBE_CFG_PORT_CTL_NUM_VT_MASK  0x00003000U /* number of TVs */
+#define TXGBE_CFG_PORT_CTL_NUM_VT_NONE  0x00000000U
+#define TXGBE_CFG_PORT_CTL_NUM_VT_16    0x00001000U
+#define TXGBE_CFG_PORT_CTL_NUM_VT_32    0x00002000U
+#define TXGBE_CFG_PORT_CTL_NUM_VT_64    0x00003000U
+/* Status Bit */
+#define TXGBE_CFG_PORT_ST_LINK_UP       0x00000001U
+#define TXGBE_CFG_PORT_ST_LINK_10G      0x00000002U
+#define TXGBE_CFG_PORT_ST_LINK_1G       0x00000004U
+#define TXGBE_CFG_PORT_ST_LINK_100M     0x00000008U
+#define TXGBE_CFG_PORT_ST_LAN_ID(_r)    ((0x00000100U & (_r)) >> 8)
+#define TXGBE_LINK_UP_TIME              90
+/* LED CTL Bit */
+#define TXGBE_CFG_LED_CTL_LINK_BSY_SEL  0x00000010U
+#define TXGBE_CFG_LED_CTL_LINK_100M_SEL 0x00000008U
+#define TXGBE_CFG_LED_CTL_LINK_1G_SEL   0x00000004U
+#define TXGBE_CFG_LED_CTL_LINK_10G_SEL  0x00000002U
+#define TXGBE_CFG_LED_CTL_LINK_UP_SEL   0x00000001U
+#define TXGBE_CFG_LED_CTL_LINK_OD_SHIFT 16
+/******************************** PCI Bus Info *******************************/
+#define TXGBE_PCI_DEVICE_STATUS         0xAA
+#define TXGBE_PCI_DEVICE_STATUS_TRANSACTION_PENDING     0x0020
+#define TXGBE_PCI_LINK_STATUS           0xB2
+#define TXGBE_PCI_DEVICE_CONTROL2       0xC8
+#define TXGBE_PCI_LINK_WIDTH            0x3F0
+#define TXGBE_PCI_LINK_WIDTH_1          0x10
+#define TXGBE_PCI_LINK_WIDTH_2          0x20
+#define TXGBE_PCI_LINK_WIDTH_4          0x40
+#define TXGBE_PCI_LINK_WIDTH_8          0x80
+#define TXGBE_PCI_LINK_SPEED            0xF
+#define TXGBE_PCI_LINK_SPEED_2500       0x1
+#define TXGBE_PCI_LINK_SPEED_5000       0x2
+#define TXGBE_PCI_LINK_SPEED_8000       0x3
+#define TXGBE_PCI_HEADER_TYPE_REGISTER  0x0E
+#define TXGBE_PCI_HEADER_TYPE_MULTIFUNC 0x80
+#define TXGBE_PCI_DEVICE_CONTROL2_16ms  0x0005
+
+#define TXGBE_PCIDEVCTRL2_RELAX_ORDER_OFFSET    4
+#define TXGBE_PCIDEVCTRL2_RELAX_ORDER_MASK      \
+				(0x0001 << TXGBE_PCIDEVCTRL2_RELAX_ORDER_OFFSET)
+#define TXGBE_PCIDEVCTRL2_RELAX_ORDER_ENABLE    \
+				(0x01 << TXGBE_PCIDEVCTRL2_RELAX_ORDER_OFFSET)
+
+#define TXGBE_PCIDEVCTRL2_TIMEO_MASK    0xf
+#define TXGBE_PCIDEVCTRL2_16_32ms_def   0x0
+#define TXGBE_PCIDEVCTRL2_50_100us      0x1
+#define TXGBE_PCIDEVCTRL2_1_2ms         0x2
+#define TXGBE_PCIDEVCTRL2_16_32ms       0x5
+#define TXGBE_PCIDEVCTRL2_65_130ms      0x6
+#define TXGBE_PCIDEVCTRL2_260_520ms     0x9
+#define TXGBE_PCIDEVCTRL2_1_2s          0xa
+#define TXGBE_PCIDEVCTRL2_4_8s          0xd
+#define TXGBE_PCIDEVCTRL2_17_34s        0xe
+
+/* PCI bus types */
+enum txgbe_bus_type {
+	txgbe_bus_type_unknown = 0,
+	txgbe_bus_type_pci,
+	txgbe_bus_type_pcix,
+	txgbe_bus_type_pci_express,
+	txgbe_bus_type_internal,
+	txgbe_bus_type_reserved
+};
+
+/* PCI bus speeds */
+enum txgbe_bus_speed {
+	txgbe_bus_speed_unknown	= 0,
+	txgbe_bus_speed_33	= 33,
+	txgbe_bus_speed_66	= 66,
+	txgbe_bus_speed_100	= 100,
+	txgbe_bus_speed_120	= 120,
+	txgbe_bus_speed_133	= 133,
+	txgbe_bus_speed_2500	= 2500,
+	txgbe_bus_speed_5000	= 5000,
+	txgbe_bus_speed_8000	= 8000,
+	txgbe_bus_speed_reserved
+};
+
+/* PCI bus widths */
+enum txgbe_bus_width {
+	txgbe_bus_width_unknown	= 0,
+	txgbe_bus_width_pcie_x1	= 1,
+	txgbe_bus_width_pcie_x2	= 2,
+	txgbe_bus_width_pcie_x4	= 4,
+	txgbe_bus_width_pcie_x8	= 8,
+	txgbe_bus_width_32	= 32,
+	txgbe_bus_width_64	= 64,
+	txgbe_bus_width_reserved
+};
+
+/* Bus parameters */
+struct txgbe_bus_info {
+	enum txgbe_bus_speed speed;
+	enum txgbe_bus_width width;
+	enum txgbe_bus_type type;
+
+	u16 func;
+	u16 lan_id;
+};
+
+/* forward declaration */
+struct txgbe_hw;
+
+struct txgbe_mac_operations {
+	s32 (*get_bus_info)(struct txgbe_hw *hw);
+	s32 (*set_lan_id)(struct txgbe_hw *hw);
+
+};
+
+struct txgbe_mac_info {
+	struct txgbe_mac_operations ops;
+};
+
+struct txgbe_hw {
+	u8 __iomem *hw_addr;
+	struct txgbe_mac_info mac;
+	struct txgbe_bus_info bus;
+	u16 device_id;
+	u16 vendor_id;
+	u16 subsystem_device_id;
+	u16 subsystem_vendor_id;
+	u8 revision_id;
+	u16 oem_ssid;
+	u16 oem_svid;
+};
+
+#define TCALL(hw, func, args...) (((hw)->func) \
+		? (hw)->func((hw), ##args) : TXGBE_NOT_IMPLEMENTED)
+
+/* Error Codes */
+#define TXGBE_ERR                                100
+#define TXGBE_NOT_IMPLEMENTED                    0x7FFFFFFF
+/* (-TXGBE_ERR, TXGBE_ERR): reserved for non-txgbe defined error code */
+#define TXGBE_ERR_NOSUPP                        -(TXGBE_ERR + 0)
+#define TXGBE_ERR_EEPROM                        -(TXGBE_ERR + 1)
+#define TXGBE_ERR_EEPROM_CHECKSUM               -(TXGBE_ERR + 2)
+#define TXGBE_ERR_PHY                           -(TXGBE_ERR + 3)
+#define TXGBE_ERR_CONFIG                        -(TXGBE_ERR + 4)
+#define TXGBE_ERR_PARAM                         -(TXGBE_ERR + 5)
+#define TXGBE_ERR_MAC_TYPE                      -(TXGBE_ERR + 6)
+#define TXGBE_ERR_UNKNOWN_PHY                   -(TXGBE_ERR + 7)
+#define TXGBE_ERR_LINK_SETUP                    -(TXGBE_ERR + 8)
+#define TXGBE_ERR_ADAPTER_STOPPED               -(TXGBE_ERR + 9)
+#define TXGBE_ERR_INVALID_MAC_ADDR              -(TXGBE_ERR + 10)
+#define TXGBE_ERR_DEVICE_NOT_SUPPORTED          -(TXGBE_ERR + 11)
+#define TXGBE_ERR_MASTER_REQUESTS_PENDING       -(TXGBE_ERR + 12)
+#define TXGBE_ERR_INVALID_LINK_SETTINGS         -(TXGBE_ERR + 13)
+#define TXGBE_ERR_AUTONEG_NOT_COMPLETE          -(TXGBE_ERR + 14)
+#define TXGBE_ERR_RESET_FAILED                  -(TXGBE_ERR + 15)
+#define TXGBE_ERR_SWFW_SYNC                     -(TXGBE_ERR + 16)
+#define TXGBE_ERR_PHY_ADDR_INVALID              -(TXGBE_ERR + 17)
+#define TXGBE_ERR_I2C                           -(TXGBE_ERR + 18)
+#define TXGBE_ERR_SFP_NOT_SUPPORTED             -(TXGBE_ERR + 19)
+#define TXGBE_ERR_SFP_NOT_PRESENT               -(TXGBE_ERR + 20)
+#define TXGBE_ERR_SFP_NO_INIT_SEQ_PRESENT       -(TXGBE_ERR + 21)
+#define TXGBE_ERR_NO_SAN_ADDR_PTR               -(TXGBE_ERR + 22)
+#define TXGBE_ERR_FDIR_REINIT_FAILED            -(TXGBE_ERR + 23)
+#define TXGBE_ERR_EEPROM_VERSION                -(TXGBE_ERR + 24)
+#define TXGBE_ERR_NO_SPACE                      -(TXGBE_ERR + 25)
+#define TXGBE_ERR_OVERTEMP                      -(TXGBE_ERR + 26)
+#define TXGBE_ERR_UNDERTEMP                     -(TXGBE_ERR + 27)
+#define TXGBE_ERR_FC_NOT_NEGOTIATED             -(TXGBE_ERR + 28)
+#define TXGBE_ERR_FC_NOT_SUPPORTED              -(TXGBE_ERR + 29)
+#define TXGBE_ERR_SFP_SETUP_NOT_COMPLETE        -(TXGBE_ERR + 30)
+#define TXGBE_ERR_PBA_SECTION                   -(TXGBE_ERR + 31)
+#define TXGBE_ERR_INVALID_ARGUMENT              -(TXGBE_ERR + 32)
+#define TXGBE_ERR_HOST_INTERFACE_COMMAND        -(TXGBE_ERR + 33)
+#define TXGBE_ERR_OUT_OF_MEM                    -(TXGBE_ERR + 34)
+#define TXGBE_ERR_FEATURE_NOT_SUPPORTED         -(TXGBE_ERR + 36)
+#define TXGBE_ERR_EEPROM_PROTECTED_REGION       -(TXGBE_ERR + 37)
+#define TXGBE_ERR_FDIR_CMD_INCOMPLETE           -(TXGBE_ERR + 38)
+#define TXGBE_ERR_FLASH_LOADING_FAILED          -(TXGBE_ERR + 39)
+#define TXGBE_ERR_XPCS_POWER_UP_FAILED          -(TXGBE_ERR + 40)
+#define TXGBE_ERR_FW_RESP_INVALID               -(TXGBE_ERR + 41)
+#define TXGBE_ERR_PHY_INIT_NOT_DONE             -(TXGBE_ERR + 42)
+#define TXGBE_ERR_TIMEOUT                       -(TXGBE_ERR + 43)
+#define TXGBE_ERR_TOKEN_RETRY                   -(TXGBE_ERR + 44)
+#define TXGBE_ERR_REGISTER                      -(TXGBE_ERR + 45)
+#define TXGBE_ERR_MBX                           -(TXGBE_ERR + 46)
+#define TXGBE_ERR_MNG_ACCESS_FAILED             -(TXGBE_ERR + 47)
+
+/**
+ * register operations
+ **/
+/* read register */
+#define TXGBE_DEAD_READ_RETRIES     10
+#define TXGBE_DEAD_READ_REG         0xdeadbeefU
+#define TXGBE_DEAD_READ_REG64       0xdeadbeefdeadbeefULL
+#define TXGBE_FAILED_READ_REG       0xffffffffU
+#define TXGBE_FAILED_READ_REG64     0xffffffffffffffffULL
+
+static inline bool TXGBE_REMOVED(void __iomem *addr)
+{
+	return unlikely(!addr);
+}
+
+static inline u32
+txgbe_rd32(u8 __iomem *base)
+{
+	return readl(base);
+}
+
+static inline u32
+rd32(struct txgbe_hw *hw, u32 reg)
+{
+	u8 __iomem *base = READ_ONCE(hw->hw_addr);
+	u32 val = TXGBE_FAILED_READ_REG;
+
+	if (unlikely(!base))
+		return val;
+
+	val = txgbe_rd32(base + reg);
+
+	return val;
+}
+
+/* write register */
+static inline void
+txgbe_wr32(u8 __iomem *base, u32 val)
+{
+	writel(val, base);
+}
+
+static inline void
+wr32(struct txgbe_hw *hw, u32 reg, u32 val)
+{
+	u8 __iomem *base = READ_ONCE(hw->hw_addr);
+
+	if (unlikely(!base))
+		return;
+
+	txgbe_wr32(base + reg, val);
+}
+
 #endif /* _TXGBE_TYPE_H_ */
-- 
2.27.0


^ permalink raw reply related	[flat|nested] 18+ messages in thread

* [RFC PATCH net-next 02/16] net: txgbe: Reset hardware
  2022-08-10  8:55 [RFC PATCH net-next 00/16] net: WangXun txgbe ethernet driver Jiawen Wu
  2022-08-10  8:55 ` [RFC PATCH net-next 01/16] net: txgbe: Store PCI info Jiawen Wu
@ 2022-08-10  8:55 ` Jiawen Wu
  2022-08-10  8:55 ` [RFC PATCH net-next 03/16] net: txgbe: Set MAC address and register netdev Jiawen Wu
                   ` (13 subsequent siblings)
  15 siblings, 0 replies; 18+ messages in thread
From: Jiawen Wu @ 2022-08-10  8:55 UTC (permalink / raw)
  To: netdev; +Cc: Jiawen Wu

Reset and initialize the hardware by configuring the MAC layer.

Signed-off-by: Jiawen Wu <jiawenwu@trustnetic.com>
---
 drivers/net/ethernet/wangxun/txgbe/txgbe.h    |  66 +++++
 drivers/net/ethernet/wangxun/txgbe/txgbe_hw.c | 253 ++++++++++++++++++
 drivers/net/ethernet/wangxun/txgbe/txgbe_hw.h |   7 +
 .../net/ethernet/wangxun/txgbe/txgbe_main.c   |   6 +
 .../net/ethernet/wangxun/txgbe/txgbe_type.h   | 249 +++++++++++++++++
 5 files changed, 581 insertions(+)

diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe.h b/drivers/net/ethernet/wangxun/txgbe/txgbe.h
index 94c43eef0cd6..393f6454f023 100644
--- a/drivers/net/ethernet/wangxun/txgbe/txgbe.h
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe.h
@@ -4,6 +4,8 @@
 #ifndef _TXGBE_H_
 #define _TXGBE_H_
 
+#include <linux/pci.h>
+
 #include "txgbe_type.h"
 
 #define TXGBE_MAX_FDIR_INDICES          63
@@ -25,12 +27,76 @@ struct txgbe_adapter {
 
 s32 txgbe_init_shared_code(struct txgbe_hw *hw);
 
+#define TXGBE_INTR_ALL (~0ULL)
+
+static inline void txgbe_intr_disable(struct txgbe_hw *hw, u64 qmask)
+{
+	u32 mask;
+
+	mask = (qmask & 0xFFFFFFFF);
+	if (mask)
+		wr32(hw, TXGBE_PX_IMS(0), mask);
+	mask = (qmask >> 32);
+	if (mask)
+		wr32(hw, TXGBE_PX_IMS(1), mask);
+}
+
 extern char txgbe_driver_name[];
 
+struct txgbe_msg {
+	u16 msg_enable;
+};
+
+__maybe_unused static struct net_device *txgbe_hw_to_netdev(const struct txgbe_hw *hw)
+{
+	struct txgbe_adapter *adapter =
+		container_of(hw, struct txgbe_adapter, hw);
+	return adapter->netdev;
+}
+
+__maybe_unused static struct txgbe_msg *txgbe_hw_to_msg(const struct txgbe_hw *hw)
+{
+	struct txgbe_adapter *adapter =
+		container_of(hw, struct txgbe_adapter, hw);
+	return (struct txgbe_msg *)&adapter->msg_enable;
+}
+
 #define TXGBE_FAILED_READ_CFG_DWORD 0xffffffffU
 #define TXGBE_FAILED_READ_CFG_WORD  0xffffU
 #define TXGBE_FAILED_READ_CFG_BYTE  0xffU
 
 extern u16 txgbe_read_pci_cfg_word(struct txgbe_hw *hw, u32 reg);
 
+enum {
+	TXGBE_ERROR_SOFTWARE,
+	TXGBE_ERROR_POLLING,
+	TXGBE_ERROR_INVALID_STATE,
+	TXGBE_ERROR_UNSUPPORTED,
+	TXGBE_ERROR_ARGUMENT,
+	TXGBE_ERROR_CAUTION,
+};
+
+#define ERROR_REPORT(hw, level, format, arg...) do {                           \
+	switch (level) {                                                       \
+	case TXGBE_ERROR_SOFTWARE:                                             \
+	case TXGBE_ERROR_CAUTION:                                              \
+	case TXGBE_ERROR_POLLING:                                              \
+		netif_warn(txgbe_hw_to_msg(hw), drv, txgbe_hw_to_netdev(hw),   \
+			   format, ## arg);                                    \
+		break;                                                         \
+	case TXGBE_ERROR_INVALID_STATE:                                        \
+	case TXGBE_ERROR_UNSUPPORTED:                                          \
+	case TXGBE_ERROR_ARGUMENT:                                             \
+		netif_err(txgbe_hw_to_msg(hw), hw, txgbe_hw_to_netdev(hw),     \
+			  format, ## arg);                                     \
+		break;                                                         \
+	default:                                                               \
+		break;                                                         \
+	}                                                                      \
+} while (0)
+
+#define ERROR_REPORT1 ERROR_REPORT
+#define ERROR_REPORT2 ERROR_REPORT
+#define ERROR_REPORT3 ERROR_REPORT
+
 #endif /* _TXGBE_H_ */
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.c
index 1baf965e50d7..060f9e4ef65b 100644
--- a/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.c
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.c
@@ -5,6 +5,9 @@
 #include "txgbe_hw.h"
 #include "txgbe.h"
 
+#define TXGBE_SP_MAX_TX_QUEUES  128
+#define TXGBE_SP_MAX_RX_QUEUES  128
+
 /**
  *  txgbe_set_pci_config_data - Generic store PCI bus info
  *  @hw: pointer to hardware structure
@@ -95,6 +98,93 @@ s32 txgbe_set_lan_id_multi_port_pcie(struct txgbe_hw *hw)
 	return 0;
 }
 
+/**
+ *  txgbe_stop_adapter - Generic stop Tx/Rx units
+ *  @hw: pointer to hardware structure
+ *
+ *  Sets the adapter_stopped flag within txgbe_hw struct. Clears interrupts,
+ *  disables transmit and receive units. The adapter_stopped flag is used by
+ *  the shared code and drivers to determine if the adapter is in a stopped
+ *  state and should not touch the hardware.
+ **/
+s32 txgbe_stop_adapter(struct txgbe_hw *hw)
+{
+	u16 i;
+
+	/* Set the adapter_stopped flag so other driver functions stop touching
+	 * the hardware
+	 */
+	hw->adapter_stopped = true;
+
+	/* Disable the receive unit */
+	TCALL(hw, mac.ops.disable_rx);
+
+	/* Set interrupt mask to stop interrupts from being generated */
+	txgbe_intr_disable(hw, TXGBE_INTR_ALL);
+
+	/* Clear any pending interrupts, flush previous writes */
+	wr32(hw, TXGBE_PX_MISC_IC, 0xffffffff);
+	wr32(hw, TXGBE_BME_CTL, 0x3);
+
+	/* Disable the transmit unit.  Each queue must be disabled. */
+	for (i = 0; i < hw->mac.max_tx_queues; i++) {
+		wr32m(hw, TXGBE_PX_TR_CFG(i),
+		      TXGBE_PX_TR_CFG_SWFLSH | TXGBE_PX_TR_CFG_ENABLE,
+		      TXGBE_PX_TR_CFG_SWFLSH);
+	}
+
+	/* Disable the receive unit by stopping each queue */
+	for (i = 0; i < hw->mac.max_rx_queues; i++) {
+		wr32m(hw, TXGBE_PX_RR_CFG(i),
+		      TXGBE_PX_RR_CFG_RR_EN, 0);
+	}
+
+	/* flush all queues disables */
+	TXGBE_WRITE_FLUSH(hw);
+
+	/* Prevent the PCI-E bus from hanging by disabling PCI-E master
+	 * access and verify no pending requests
+	 */
+	return txgbe_disable_pcie_master(hw);
+}
+
+/**
+ *  txgbe_disable_pcie_master - Disable PCI-express master access
+ *  @hw: pointer to hardware structure
+ *
+ *  Disables PCI-Express master access and verifies there are no pending
+ *  requests. TXGBE_ERR_MASTER_REQUESTS_PENDING is returned if master disable
+ *  bit hasn't caused the master requests to be disabled, else 0
+ *  is returned signifying master requests disabled.
+ **/
+s32 txgbe_disable_pcie_master(struct txgbe_hw *hw)
+{
+	struct txgbe_adapter *adapter = container_of(hw, struct txgbe_adapter, hw);
+	s32 status = 0;
+	u32 i;
+
+	/* Always set this bit to ensure any future transactions are blocked */
+	pci_clear_master(adapter->pdev);
+
+	/* Exit if master requests are blocked */
+	if (!(rd32(hw, TXGBE_PX_TRANSACTION_PENDING)) ||
+	    TXGBE_REMOVED(hw->hw_addr))
+		goto out;
+
+	/* Poll for master request bit to clear */
+	for (i = 0; i < TXGBE_PCI_MASTER_DISABLE_TIMEOUT; i++) {
+		usleep_range(100, 120);
+		if (!(rd32(hw, TXGBE_PX_TRANSACTION_PENDING)))
+			goto out;
+	}
+
+	ERROR_REPORT1(hw, TXGBE_ERROR_POLLING,
+		      "PCIe transaction pending bit did not clear.\n");
+	status = TXGBE_ERR_MASTER_REQUESTS_PENDING;
+out:
+	return status;
+}
+
 /* cmd_addr is used for some special command:
  * 1. to be sector address, when implemented erase sector command
  * 2. to be flash address when implemented read, write flash address
@@ -131,6 +221,67 @@ u32 txgbe_flash_read_dword(struct txgbe_hw *hw, u32 addr)
 	return rd32(hw, SPI_H_DAT_REG_ADDR);
 }
 
+/**
+ *  txgbe_init_thermal_sensor_thresh - Inits thermal sensor thresholds
+ *  @hw: pointer to hardware structure
+ *
+ *  Inits the thermal sensor thresholds according to the NVM map
+ *  and save off the threshold and location values into mac.thermal_sensor_data
+ **/
+s32 txgbe_init_thermal_sensor_thresh(struct txgbe_hw *hw)
+{
+	s32 status = 0;
+
+	struct txgbe_thermal_sensor_data *data = &hw->mac.thermal_sensor_data;
+
+	memset(data, 0, sizeof(struct txgbe_thermal_sensor_data));
+
+	/* Only support thermal sensors attached to SP physical port 0 */
+	if (hw->bus.lan_id)
+		return TXGBE_NOT_IMPLEMENTED;
+
+	wr32(hw, TXGBE_TS_CTL, TXGBE_TS_CTL_EVAL_MD);
+	wr32(hw, TXGBE_TS_INT_EN,
+	     TXGBE_TS_INT_EN_ALARM_INT_EN | TXGBE_TS_INT_EN_DALARM_INT_EN);
+	wr32(hw, TXGBE_TS_EN, TXGBE_TS_EN_ENA);
+
+	data->sensor.alarm_thresh = 100;
+	wr32(hw, TXGBE_TS_ALARM_THRE, 677);
+	data->sensor.dalarm_thresh = 90;
+	wr32(hw, TXGBE_TS_DALARM_THRE, 614);
+
+	return status;
+}
+
+s32 txgbe_disable_rx(struct txgbe_hw *hw)
+{
+	u32 pfdtxgswc;
+	u32 rxctrl;
+
+	rxctrl = rd32(hw, TXGBE_RDB_PB_CTL);
+	if (rxctrl & TXGBE_RDB_PB_CTL_RXEN) {
+		pfdtxgswc = rd32(hw, TXGBE_PSR_CTL);
+		if (pfdtxgswc & TXGBE_PSR_CTL_SW_EN) {
+			pfdtxgswc &= ~TXGBE_PSR_CTL_SW_EN;
+			wr32(hw, TXGBE_PSR_CTL, pfdtxgswc);
+			hw->mac.set_lben = true;
+		} else {
+			hw->mac.set_lben = false;
+		}
+		rxctrl &= ~TXGBE_RDB_PB_CTL_RXEN;
+		wr32(hw, TXGBE_RDB_PB_CTL, rxctrl);
+
+		if (!(((hw->subsystem_device_id & TXGBE_NCSI_MASK) == TXGBE_NCSI_SUP) ||
+		      ((hw->subsystem_device_id & TXGBE_WOL_MASK) == TXGBE_WOL_SUP))) {
+			/* disable mac receiver */
+			wr32m(hw, TXGBE_MAC_RX_CFG,
+			      TXGBE_MAC_RX_CFG_RE, 0);
+		}
+	}
+
+	return 0;
+}
+
 int txgbe_check_flash_load(struct txgbe_hw *hw, u32 check_bit)
 {
 	u32 i = 0, reg = 0;
@@ -166,8 +317,110 @@ s32 txgbe_init_ops(struct txgbe_hw *hw)
 	struct txgbe_mac_info *mac = &hw->mac;
 
 	/* MAC */
+	mac->ops.stop_adapter = txgbe_stop_adapter;
 	mac->ops.get_bus_info = txgbe_get_bus_info;
 	mac->ops.set_lan_id = txgbe_set_lan_id_multi_port_pcie;
+	mac->ops.reset_hw = txgbe_reset_hw;
+
+	/* RAR */
+	mac->ops.disable_rx = txgbe_disable_rx;
+
+	mac->max_rx_queues      = TXGBE_SP_MAX_RX_QUEUES;
+	mac->max_tx_queues      = TXGBE_SP_MAX_TX_QUEUES;
+
+	/* Manageability interface */
+	mac->ops.init_thermal_sensor_thresh =
+				      txgbe_init_thermal_sensor_thresh;
+
+	return 0;
+}
+
+int txgbe_reset_misc(struct txgbe_hw *hw)
+{
+	int i;
+
+	/* receive packets that size > 2048 */
+	wr32m(hw, TXGBE_MAC_RX_CFG,
+	      TXGBE_MAC_RX_CFG_JE, TXGBE_MAC_RX_CFG_JE);
+
+	/* clear counters on read */
+	wr32m(hw, TXGBE_MMC_CONTROL,
+	      TXGBE_MMC_CONTROL_RSTONRD, TXGBE_MMC_CONTROL_RSTONRD);
+
+	wr32m(hw, TXGBE_MAC_RX_FLOW_CTRL,
+	      TXGBE_MAC_RX_FLOW_CTRL_RFE, TXGBE_MAC_RX_FLOW_CTRL_RFE);
+
+	wr32(hw, TXGBE_MAC_PKT_FLT, TXGBE_MAC_PKT_FLT_PR);
+
+	wr32m(hw, TXGBE_MIS_RST_ST,
+	      TXGBE_MIS_RST_ST_RST_INIT, 0x1E00);
+
+	/* errata 4: initialize mng flex tbl and wakeup flex tbl*/
+	wr32(hw, TXGBE_PSR_MNG_FLEX_SEL, 0);
+	for (i = 0; i < 16; i++) {
+		wr32(hw, TXGBE_PSR_MNG_FLEX_DW_L(i), 0);
+		wr32(hw, TXGBE_PSR_MNG_FLEX_DW_H(i), 0);
+		wr32(hw, TXGBE_PSR_MNG_FLEX_MSK(i), 0);
+	}
+	wr32(hw, TXGBE_PSR_LAN_FLEX_SEL, 0);
+	for (i = 0; i < 16; i++) {
+		wr32(hw, TXGBE_PSR_LAN_FLEX_DW_L(i), 0);
+		wr32(hw, TXGBE_PSR_LAN_FLEX_DW_H(i), 0);
+		wr32(hw, TXGBE_PSR_LAN_FLEX_MSK(i), 0);
+	}
+
+	/* set pause frame dst mac addr */
+	wr32(hw, TXGBE_RDB_PFCMACDAL, 0xC2000001);
+	wr32(hw, TXGBE_RDB_PFCMACDAH, 0x0180);
+
+	txgbe_init_thermal_sensor_thresh(hw);
 
 	return 0;
 }
+
+/**
+ *  txgbe_reset_hw - Perform hardware reset
+ *  @hw: pointer to hardware structure
+ *
+ *  Resets the hardware by resetting the transmit and receive units, masks
+ *  and clears all interrupts, perform a PHY reset, and perform a link (MAC)
+ *  reset.
+ **/
+s32 txgbe_reset_hw(struct txgbe_hw *hw)
+{
+	struct txgbe_adapter *adapter = container_of(hw, struct txgbe_adapter, hw);
+	u32 reset = 0;
+	s32 status;
+
+	/* Call adapter stop to disable tx/rx and clear interrupts */
+	status = TCALL(hw, mac.ops.stop_adapter);
+	if (status != 0)
+		goto reset_hw_out;
+
+	if (hw->bus.lan_id == 0)
+		reset = TXGBE_MIS_RST_LAN0_RST;
+	else
+		reset = TXGBE_MIS_RST_LAN1_RST;
+
+	wr32(hw, TXGBE_MIS_RST,
+	     reset | rd32(hw, TXGBE_MIS_RST));
+	TXGBE_WRITE_FLUSH(hw);
+	usleep_range(10, 100);
+
+	if (hw->bus.lan_id == 0)
+		status = txgbe_check_flash_load(hw, TXGBE_SPI_ILDR_STATUS_LAN0_SW_RST);
+	else
+		status = txgbe_check_flash_load(hw, TXGBE_SPI_ILDR_STATUS_LAN1_SW_RST);
+
+	if (status != 0)
+		goto reset_hw_out;
+
+	status = txgbe_reset_misc(hw);
+	if (status != 0)
+		goto reset_hw_out;
+
+	pci_set_master(adapter->pdev);
+
+reset_hw_out:
+	return status;
+}
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.h b/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.h
index fb250c99ddfd..e56fe21250c3 100644
--- a/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.h
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.h
@@ -19,9 +19,16 @@
 s32 txgbe_get_bus_info(struct txgbe_hw *hw);
 void txgbe_set_pci_config_data(struct txgbe_hw *hw, u16 link_status);
 s32 txgbe_set_lan_id_multi_port_pcie(struct txgbe_hw *hw);
+s32 txgbe_stop_adapter(struct txgbe_hw *hw);
 
+s32 txgbe_disable_pcie_master(struct txgbe_hw *hw);
+
+s32 txgbe_init_thermal_sensor_thresh(struct txgbe_hw *hw);
+s32 txgbe_disable_rx(struct txgbe_hw *hw);
 int txgbe_check_flash_load(struct txgbe_hw *hw, u32 check_bit);
 
+int txgbe_reset_misc(struct txgbe_hw *hw);
+s32 txgbe_reset_hw(struct txgbe_hw *hw);
 s32 txgbe_init_ops(struct txgbe_hw *hw);
 
 u8 fmgr_cmd_op(struct txgbe_hw *hw, u32 cmd, u32 cmd_addr);
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c
index d6145eca7b0a..cb950d52a51d 100644
--- a/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c
@@ -262,6 +262,12 @@ static int txgbe_probe(struct pci_dev *pdev,
 	if (err)
 		goto err_pci_release_regions;
 
+	err = TCALL(hw, mac.ops.reset_hw);
+	if (err) {
+		dev_err(&pdev->dev, "HW Init failed: %d\n", err);
+		goto err_pci_release_regions;
+	}
+
 	netdev->features |= NETIF_F_HIGHDMA;
 
 	/* pick up the PCI bus settings for reporting later */
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h b/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h
index b769af5e6cbb..ae3407a30d9e 100644
--- a/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h
@@ -101,6 +101,35 @@
 #define TXGBE_MIS_PRB_CTL_LAN0_UP       0x2
 #define TXGBE_MIS_PRB_CTL_LAN1_UP       0x1
 
+/* Sensors for PVT(Process Voltage Temperature) */
+#define TXGBE_TS_CTL                    0x10300
+#define TXGBE_TS_EN                     0x10304
+#define TXGBE_TS_ST                     0x10308
+#define TXGBE_TS_ALARM_THRE             0x1030C
+#define TXGBE_TS_DALARM_THRE            0x10310
+#define TXGBE_TS_INT_EN                 0x10314
+#define TXGBE_TS_ALARM_ST               0x10318
+#define TXGBE_TS_ALARM_ST_DALARM        0x00000002U
+#define TXGBE_TS_ALARM_ST_ALARM         0x00000001U
+
+#define TXGBE_TS_CTL_EVAL_MD            0x80000000U
+#define TXGBE_TS_EN_ENA                 0x00000001U
+#define TXGBE_TS_ST_DATA_OUT_MASK       0x000003FFU
+#define TXGBE_TS_ALARM_THRE_MASK        0x000003FFU
+#define TXGBE_TS_DALARM_THRE_MASK       0x000003FFU
+#define TXGBE_TS_INT_EN_DALARM_INT_EN   0x00000002U
+#define TXGBE_TS_INT_EN_ALARM_INT_EN    0x00000001U
+
+struct txgbe_thermal_diode_data {
+	s16 temp;
+	s16 alarm_thresh;
+	s16 dalarm_thresh;
+};
+
+struct txgbe_thermal_sensor_data {
+	struct txgbe_thermal_diode_data sensor;
+};
+
 /* FMGR Registers */
 #define TXGBE_SPI_ILDR_STATUS           0x10120
 #define TXGBE_SPI_ILDR_STATUS_PERST     0x00000001U /* PCIE_PERST is done */
@@ -173,6 +202,176 @@
 #define TXGBE_CFG_LED_CTL_LINK_10G_SEL  0x00000002U
 #define TXGBE_CFG_LED_CTL_LINK_UP_SEL   0x00000001U
 #define TXGBE_CFG_LED_CTL_LINK_OD_SHIFT 16
+
+/***************************** RDB registers *********************************/
+/* receive packet buffer */
+#define TXGBE_RDB_PB_WRAP           0x19004
+#define TXGBE_RDB_PB_SZ(_i)         (0x19020 + ((_i) * 4))
+#define TXGBE_RDB_PB_CTL            0x19000
+#define TXGBE_RDB_UP2TC             0x19008
+#define TXGBE_RDB_PB_SZ_SHIFT       10
+#define TXGBE_RDB_PB_SZ_MASK        0x000FFC00U
+/* statistic */
+#define TXGBE_RDB_MPCNT(_i)         (0x19040 + ((_i) * 4)) /* 8 of 3FA0-3FBC*/
+#define TXGBE_RDB_LXONTXC           0x1921C
+#define TXGBE_RDB_LXOFFTXC          0x19218
+#define TXGBE_RDB_PXON2OFFCNT(_i)   (0x19280 + ((_i) * 4)) /* 8 of these */
+#define TXGBE_RDB_PXONTXC(_i)       (0x192E0 + ((_i) * 4)) /* 8 of 3F00-3F1C*/
+#define TXGBE_RDB_PXOFFTXC(_i)      (0x192C0 + ((_i) * 4)) /* 8 of 3F20-3F3C*/
+#define TXGBE_RDB_PFCMACDAL         0x19210
+#define TXGBE_RDB_PFCMACDAH         0x19214
+#define TXGBE_RDB_TXSWERR           0x1906C
+#define TXGBE_RDB_TXSWERR_TB_FREE   0x3FF
+/* Receive Config masks */
+#define TXGBE_RDB_PB_CTL_RXEN           (0x80000000) /* Enable Receiver */
+#define TXGBE_RDB_PB_CTL_DISABLED       0x1
+
+/******************************* PSR Registers *******************************/
+/* psr control */
+#define TXGBE_PSR_CTL                   0x15000
+#define TXGBE_PSR_VLAN_CTL              0x15088
+#define TXGBE_PSR_VM_CTL                0x151B0
+/* Header split receive */
+#define TXGBE_PSR_CTL_SW_EN             0x00040000U
+#define TXGBE_PSR_CTL_RSC_DIS           0x00010000U
+#define TXGBE_PSR_CTL_RSC_ACK           0x00020000U
+#define TXGBE_PSR_CTL_PCSD              0x00002000U
+#define TXGBE_PSR_CTL_IPPCSE            0x00001000U
+#define TXGBE_PSR_CTL_BAM               0x00000400U
+#define TXGBE_PSR_CTL_UPE               0x00000200U
+#define TXGBE_PSR_CTL_MPE               0x00000100U
+#define TXGBE_PSR_CTL_MFE               0x00000080U
+#define TXGBE_PSR_CTL_MO                0x00000060U
+#define TXGBE_PSR_CTL_TPE               0x00000010U
+#define TXGBE_PSR_CTL_MO_SHIFT          5
+/* Management */
+#define TXGBE_PSR_MNG_FIT_CTL           0x15820
+/* Management Bit Fields and Masks */
+#define TXGBE_PSR_MNG_FIT_CTL_MPROXYE    0x40000000U /* Management Proxy Enable*/
+#define TXGBE_PSR_MNG_FIT_CTL_RCV_TCO_EN 0x00020000U /* Rcv TCO packet enable */
+#define TXGBE_PSR_MNG_FIT_CTL_EN_BMC2OS  0x10000000U /* Ena BMC2OS and OS2BMC traffic */
+#define TXGBE_PSR_MNG_FIT_CTL_EN_BMC2OS_SHIFT   28
+
+#define TXGBE_PSR_MNG_FLEX_SEL  0x1582C
+#define TXGBE_PSR_MNG_FLEX_DW_L(_i) (0x15A00 + ((_i) * 16))
+#define TXGBE_PSR_MNG_FLEX_DW_H(_i) (0x15A04 + ((_i) * 16))
+#define TXGBE_PSR_MNG_FLEX_MSK(_i)  (0x15A08 + ((_i) * 16))
+
+#define TXGBE_PSR_LAN_FLEX_SEL  0x15B8C
+#define TXGBE_PSR_LAN_FLEX_DW_L(_i)     (0x15C00 + ((_i) * 16))
+#define TXGBE_PSR_LAN_FLEX_DW_H(_i)     (0x15C04 + ((_i) * 16))
+#define TXGBE_PSR_LAN_FLEX_MSK(_i)      (0x15C08 + ((_i) * 16))
+#define TXGBE_PSR_LAN_FLEX_CTL  0x15CFC
+
+/************************************* ETH MAC *****************************/
+#define TXGBE_MAC_TX_CFG                0x11000
+#define TXGBE_MAC_RX_CFG                0x11004
+#define TXGBE_MAC_PKT_FLT               0x11008
+#define TXGBE_MAC_PKT_FLT_PR            (0x1) /* promiscuous mode */
+#define TXGBE_MAC_PKT_FLT_RA            (0x80000000) /* receive all */
+#define TXGBE_MAC_WDG_TIMEOUT           0x1100C
+#define TXGBE_MAC_RX_FLOW_CTRL          0x11090
+#define TXGBE_MAC_ADDRESS0_HIGH         0x11300
+#define TXGBE_MAC_ADDRESS0_LOW          0x11304
+
+#define TXGBE_MAC_TX_CFG_TE             0x00000001U
+#define TXGBE_MAC_TX_CFG_SPEED_MASK     0x60000000U
+#define TXGBE_MAC_TX_CFG_SPEED_10G      0x00000000U
+#define TXGBE_MAC_TX_CFG_SPEED_1G       0x60000000U
+#define TXGBE_MAC_RX_CFG_RE             0x00000001U
+#define TXGBE_MAC_RX_CFG_JE             0x00000100U
+#define TXGBE_MAC_RX_CFG_LM             0x00000400U
+#define TXGBE_MAC_WDG_TIMEOUT_PWE       0x00000100U
+#define TXGBE_MAC_WDG_TIMEOUT_WTO_MASK  0x0000000FU
+#define TXGBE_MAC_WDG_TIMEOUT_WTO_DELTA 2
+
+#define TXGBE_MAC_RX_FLOW_CTRL_RFE      0x00000001U /* receive fc enable */
+#define TXGBE_MAC_RX_FLOW_CTRL_PFCE     0x00000100U /* pfc enable */
+
+/* statistic */
+#define TXGBE_MAC_LXONRXC               0x11E0C
+#define TXGBE_MAC_LXOFFRXC              0x11988
+#define TXGBE_MAC_PXONRXC(_i)           (0x11E30 + ((_i) * 4)) /* 8 of these */
+#define TXGBE_MAC_PXOFFRXC              0x119DC
+#define TXGBE_RX_BC_FRAMES_GOOD_LOW     0x11918
+#define TXGBE_RX_CRC_ERROR_FRAMES_LOW   0x11928
+#define TXGBE_RX_LEN_ERROR_FRAMES_LOW   0x11978
+#define TXGBE_RX_UNDERSIZE_FRAMES_GOOD  0x11938
+#define TXGBE_RX_OVERSIZE_FRAMES_GOOD   0x1193C
+#define TXGBE_RX_FRAME_CNT_GOOD_BAD_LOW 0x11900
+#define TXGBE_TX_FRAME_CNT_GOOD_BAD_LOW 0x1181C
+#define TXGBE_TX_MC_FRAMES_GOOD_LOW     0x1182C
+#define TXGBE_TX_BC_FRAMES_GOOD_LOW     0x11824
+#define TXGBE_MMC_CONTROL               0x11800
+#define TXGBE_MMC_CONTROL_RSTONRD       0x4 /* reset on read */
+#define TXGBE_MMC_CONTROL_UP            0x700
+
+/********************************* BAR registers ***************************/
+/* Interrupt Registers */
+#define TXGBE_BME_CTL				0x12020
+#define TXGBE_PX_MISC_IC                        0x100
+#define TXGBE_PX_MISC_ICS                       0x104
+#define TXGBE_PX_MISC_IEN                       0x108
+#define TXGBE_PX_MISC_IVAR                      0x4FC
+#define TXGBE_PX_GPIE                           0x118
+#define TXGBE_PX_ISB_ADDR_L                     0x160
+#define TXGBE_PX_ISB_ADDR_H                     0x164
+#define TXGBE_PX_TCP_TIMER                      0x170
+#define TXGBE_PX_ITRSEL                         0x180
+#define TXGBE_PX_IC(_i)                         (0x120 + (_i) * 4)
+#define TXGBE_PX_ICS(_i)                        (0x130 + (_i) * 4)
+#define TXGBE_PX_IMS(_i)                        (0x140 + (_i) * 4)
+#define TXGBE_PX_IMC(_i)                        (0x150 + (_i) * 4)
+#define TXGBE_PX_IVAR(_i)                       (0x500 + (_i) * 4)
+#define TXGBE_PX_ITR(_i)                        (0x200 + (_i) * 4)
+#define TXGBE_PX_TRANSACTION_PENDING            0x168
+#define TXGBE_PX_INTA                           0x110
+
+/* transmit DMA Registers */
+#define TXGBE_PX_TR_BAL(_i)     (0x03000 + ((_i) * 0x40))
+#define TXGBE_PX_TR_BAH(_i)     (0x03004 + ((_i) * 0x40))
+#define TXGBE_PX_TR_WP(_i)      (0x03008 + ((_i) * 0x40))
+#define TXGBE_PX_TR_RP(_i)      (0x0300C + ((_i) * 0x40))
+#define TXGBE_PX_TR_CFG(_i)     (0x03010 + ((_i) * 0x40))
+/* Transmit Config masks */
+#define TXGBE_PX_TR_CFG_ENABLE          (1) /* Ena specific Tx Queue */
+#define TXGBE_PX_TR_CFG_TR_SIZE_SHIFT   1 /* tx desc number per ring */
+#define TXGBE_PX_TR_CFG_SWFLSH          BIT(26) /* Tx Desc. wr-bk flushing */
+#define TXGBE_PX_TR_CFG_WTHRESH_SHIFT   16 /* shift to WTHRESH bits */
+#define TXGBE_PX_TR_CFG_THRE_SHIFT      8
+
+/* Receive DMA Registers */
+#define TXGBE_PX_RR_BAL(_i)             (0x01000 + ((_i) * 0x40))
+#define TXGBE_PX_RR_BAH(_i)             (0x01004 + ((_i) * 0x40))
+#define TXGBE_PX_RR_WP(_i)              (0x01008 + ((_i) * 0x40))
+#define TXGBE_PX_RR_RP(_i)              (0x0100C + ((_i) * 0x40))
+#define TXGBE_PX_RR_CFG(_i)             (0x01010 + ((_i) * 0x40))
+/* PX_RR_CFG bit definitions */
+#define TXGBE_PX_RR_CFG_RR_SIZE_SHIFT           1
+#define TXGBE_PX_RR_CFG_BSIZEPKT_SHIFT          2 /* so many KBs */
+#define TXGBE_PX_RR_CFG_BSIZEHDRSIZE_SHIFT      6 /* 64byte resolution (>> 6)
+						   * + at bit 8 offset (<< 12)
+						   *  = (<< 6)
+						   */
+#define TXGBE_PX_RR_CFG_DROP_EN         0x40000000U
+#define TXGBE_PX_RR_CFG_VLAN            0x80000000U
+#define TXGBE_PX_RR_CFG_RSC             0x20000000U
+#define TXGBE_PX_RR_CFG_CNTAG           0x10000000U
+#define TXGBE_PX_RR_CFG_RSC_CNT_MD      0x08000000U
+#define TXGBE_PX_RR_CFG_SPLIT_MODE      0x04000000U
+#define TXGBE_PX_RR_CFG_STALL           0x02000000U
+#define TXGBE_PX_RR_CFG_MAX_RSCBUF_1    0x00000000U
+#define TXGBE_PX_RR_CFG_MAX_RSCBUF_4    0x00800000U
+#define TXGBE_PX_RR_CFG_MAX_RSCBUF_8    0x01000000U
+#define TXGBE_PX_RR_CFG_MAX_RSCBUF_16   0x01800000U
+#define TXGBE_PX_RR_CFG_RR_THER         0x00070000U
+#define TXGBE_PX_RR_CFG_RR_THER_SHIFT   16
+
+#define TXGBE_PX_RR_CFG_RR_HDR_SZ       0x0000F000U
+#define TXGBE_PX_RR_CFG_RR_BUF_SZ       0x00000F00U
+#define TXGBE_PX_RR_CFG_RR_SZ           0x0000007EU
+#define TXGBE_PX_RR_CFG_RR_EN           0x00000001U
+
 /******************************** PCI Bus Info *******************************/
 #define TXGBE_PCI_DEVICE_STATUS         0xAA
 #define TXGBE_PCI_DEVICE_STATUS_TRANSACTION_PENDING     0x0020
@@ -208,6 +407,9 @@
 #define TXGBE_PCIDEVCTRL2_4_8s          0xd
 #define TXGBE_PCIDEVCTRL2_17_34s        0xe
 
+/* Number of 100 microseconds we wait for PCI Express master disable */
+#define TXGBE_PCI_MASTER_DISABLE_TIMEOUT        800
+
 /* PCI bus types */
 enum txgbe_bus_type {
 	txgbe_bus_type_unknown = 0,
@@ -258,13 +460,24 @@ struct txgbe_bus_info {
 struct txgbe_hw;
 
 struct txgbe_mac_operations {
+	s32 (*reset_hw)(struct txgbe_hw *hw);
+	s32 (*stop_adapter)(struct txgbe_hw *hw);
 	s32 (*get_bus_info)(struct txgbe_hw *hw);
 	s32 (*set_lan_id)(struct txgbe_hw *hw);
 
+	/* RAR */
+	s32 (*disable_rx)(struct txgbe_hw *hw);
+
+	/* Manageability interface */
+	s32 (*init_thermal_sensor_thresh)(struct txgbe_hw *hw);
 };
 
 struct txgbe_mac_info {
 	struct txgbe_mac_operations ops;
+	u32 max_tx_queues;
+	u32 max_rx_queues;
+	struct txgbe_thermal_sensor_data  thermal_sensor_data;
+	bool set_lben;
 };
 
 struct txgbe_hw {
@@ -276,6 +489,7 @@ struct txgbe_hw {
 	u16 subsystem_device_id;
 	u16 subsystem_vendor_id;
 	u8 revision_id;
+	bool adapter_stopped;
 	u16 oem_ssid;
 	u16 oem_svid;
 };
@@ -370,6 +584,22 @@ rd32(struct txgbe_hw *hw, u32 reg)
 	return val;
 }
 
+static inline u32
+rd32m(struct txgbe_hw *hw, u32 reg, u32 mask)
+{
+	u8 __iomem *base = READ_ONCE(hw->hw_addr);
+	u32 val = TXGBE_FAILED_READ_REG;
+
+	if (unlikely(!base))
+		return val;
+
+	val = txgbe_rd32(base + reg);
+	if (unlikely(val == TXGBE_FAILED_READ_REG))
+		return val;
+
+	return val & mask;
+}
+
 /* write register */
 static inline void
 txgbe_wr32(u8 __iomem *base, u32 val)
@@ -388,4 +618,23 @@ wr32(struct txgbe_hw *hw, u32 reg, u32 val)
 	txgbe_wr32(base + reg, val);
 }
 
+static inline void
+wr32m(struct txgbe_hw *hw, u32 reg, u32 mask, u32 field)
+{
+	u8 __iomem *base = READ_ONCE(hw->hw_addr);
+	u32 val;
+
+	if (unlikely(!base))
+		return;
+
+	val = txgbe_rd32(base + reg);
+	if (unlikely(val == TXGBE_FAILED_READ_REG))
+		return;
+
+	val = ((val & ~mask) | (field & mask));
+	txgbe_wr32(base + reg, val);
+}
+
+#define TXGBE_WRITE_FLUSH(H) rd32(H, TXGBE_MIS_PWR)
+
 #endif /* _TXGBE_TYPE_H_ */
-- 
2.27.0


^ permalink raw reply related	[flat|nested] 18+ messages in thread

* [RFC PATCH net-next 03/16] net: txgbe: Set MAC address and register netdev
  2022-08-10  8:55 [RFC PATCH net-next 00/16] net: WangXun txgbe ethernet driver Jiawen Wu
  2022-08-10  8:55 ` [RFC PATCH net-next 01/16] net: txgbe: Store PCI info Jiawen Wu
  2022-08-10  8:55 ` [RFC PATCH net-next 02/16] net: txgbe: Reset hardware Jiawen Wu
@ 2022-08-10  8:55 ` Jiawen Wu
  2022-08-10  8:55 ` [RFC PATCH net-next 04/16] net: txgbe: Add operations to interact with firmware Jiawen Wu
                   ` (12 subsequent siblings)
  15 siblings, 0 replies; 18+ messages in thread
From: Jiawen Wu @ 2022-08-10  8:55 UTC (permalink / raw)
  To: netdev; +Cc: Jiawen Wu

Add MAC address related operations, and register netdev.

Signed-off-by: Jiawen Wu <jiawenwu@trustnetic.com>
---
 drivers/net/ethernet/wangxun/txgbe/txgbe.h    |  38 ++
 drivers/net/ethernet/wangxun/txgbe/txgbe_hw.c | 360 ++++++++++++++++++
 drivers/net/ethernet/wangxun/txgbe/txgbe_hw.h |  14 +
 .../net/ethernet/wangxun/txgbe/txgbe_main.c   | 303 ++++++++++++++-
 .../net/ethernet/wangxun/txgbe/txgbe_type.h   |  63 +++
 5 files changed, 774 insertions(+), 4 deletions(-)

diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe.h b/drivers/net/ethernet/wangxun/txgbe/txgbe.h
index 393f6454f023..a10792612c2e 100644
--- a/drivers/net/ethernet/wangxun/txgbe/txgbe.h
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe.h
@@ -4,15 +4,31 @@
 #ifndef _TXGBE_H_
 #define _TXGBE_H_
 
+#include <net/ip.h>
 #include <linux/pci.h>
+#include <linux/etherdevice.h>
 
 #include "txgbe_type.h"
 
+struct txgbe_ring {
+	u8 reg_idx;
+} ____cacheline_internodealigned_in_smp;
+
 #define TXGBE_MAX_FDIR_INDICES          63
 
 #define TXGBE_MAX_RX_QUEUES   (TXGBE_MAX_FDIR_INDICES + 1)
 #define TXGBE_MAX_TX_QUEUES   (TXGBE_MAX_FDIR_INDICES + 1)
 
+struct txgbe_mac_addr {
+	u8 addr[ETH_ALEN];
+	u16 state; /* bitmask */
+	u64 pools;
+};
+
+#define TXGBE_MAC_STATE_DEFAULT         0x1
+#define TXGBE_MAC_STATE_MODIFIED        0x2
+#define TXGBE_MAC_STATE_IN_USE          0x4
+
 /* board specific private data structure */
 struct txgbe_adapter {
 	u8 __iomem *io_addr;    /* Mainly for iounmap use */
@@ -20,12 +36,31 @@ struct txgbe_adapter {
 	struct net_device *netdev;
 	struct pci_dev *pdev;
 
+	/* Tx fast path data */
+	int num_tx_queues;
+
+	/* TX */
+	struct txgbe_ring *tx_ring[TXGBE_MAX_TX_QUEUES] ____cacheline_aligned_in_smp;
+
 	/* structs defined in txgbe_type.h */
 	struct txgbe_hw hw;
 	u16 msg_enable;
+
+	bool netdev_registered;
+
+	struct txgbe_mac_addr *mac_table;
+
 };
 
+/* needed by txgbe_main.c */
+void txgbe_assign_netdev_ops(struct net_device *netdev);
+
+int txgbe_open(struct net_device *netdev);
+int txgbe_close(struct net_device *netdev);
+void txgbe_down(struct txgbe_adapter *adapter);
+void txgbe_reset(struct txgbe_adapter *adapter);
 s32 txgbe_init_shared_code(struct txgbe_hw *hw);
+void txgbe_disable_device(struct txgbe_adapter *adapter);
 
 #define TXGBE_INTR_ALL (~0ULL)
 
@@ -61,6 +96,9 @@ __maybe_unused static struct txgbe_msg *txgbe_hw_to_msg(const struct txgbe_hw *h
 	return (struct txgbe_msg *)&adapter->msg_enable;
 }
 
+#define txgbe_dbg(hw, fmt, arg...) \
+	netdev_dbg(txgbe_hw_to_netdev(hw), fmt, ##arg)
+
 #define TXGBE_FAILED_READ_CFG_DWORD 0xffffffffU
 #define TXGBE_FAILED_READ_CFG_WORD  0xffffU
 #define TXGBE_FAILED_READ_CFG_BYTE  0xffU
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.c
index 060f9e4ef65b..ef44da54c954 100644
--- a/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.c
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.c
@@ -7,6 +7,50 @@
 
 #define TXGBE_SP_MAX_TX_QUEUES  128
 #define TXGBE_SP_MAX_RX_QUEUES  128
+#define TXGBE_SP_RAR_ENTRIES    128
+
+s32 txgbe_init_hw(struct txgbe_hw *hw)
+{
+	s32 status;
+
+	/* Reset the hardware */
+	status = TCALL(hw, mac.ops.reset_hw);
+
+	if (status == 0) {
+		/* Start the HW */
+		status = TCALL(hw, mac.ops.start_hw);
+	}
+
+	return status;
+}
+
+/**
+ *  txgbe_get_mac_addr - Generic get MAC address
+ *  @hw: pointer to hardware structure
+ *  @mac_addr: Adapter MAC address
+ *
+ *  Reads the adapter's MAC address from first Receive Address Register (RAR0)
+ *  A reset of the adapter must be performed prior to calling this function
+ *  in order for the MAC address to have been loaded from the EEPROM into RAR0
+ **/
+s32 txgbe_get_mac_addr(struct txgbe_hw *hw, u8 *mac_addr)
+{
+	u32 rar_high;
+	u32 rar_low;
+	u16 i;
+
+	wr32(hw, TXGBE_PSR_MAC_SWC_IDX, 0);
+	rar_high = rd32(hw, TXGBE_PSR_MAC_SWC_AD_H);
+	rar_low = rd32(hw, TXGBE_PSR_MAC_SWC_AD_L);
+
+	for (i = 0; i < 2; i++)
+		mac_addr[i] = (u8)(rar_high >> (1 - i) * 8);
+
+	for (i = 0; i < 4; i++)
+		mac_addr[i + 2] = (u8)(rar_low >> (3 - i) * 8);
+
+	return 0;
+}
 
 /**
  *  txgbe_set_pci_config_data - Generic store PCI bus info
@@ -148,6 +192,166 @@ s32 txgbe_stop_adapter(struct txgbe_hw *hw)
 	return txgbe_disable_pcie_master(hw);
 }
 
+/**
+ *  txgbe_set_rar - Set Rx address register
+ *  @hw: pointer to hardware structure
+ *  @index: Receive address register to write
+ *  @addr: Address to put into receive address register
+ *  @pools: VMDq "set" or "pool" index
+ *  @enable_addr: set flag that address is active
+ *
+ *  Puts an ethernet address into a receive address register.
+ **/
+s32 txgbe_set_rar(struct txgbe_hw *hw, u32 index, u8 *addr, u64 pools,
+		  u32 enable_addr)
+{
+	u32 rar_low, rar_high;
+	u32 rar_entries = hw->mac.num_rar_entries;
+
+	/* Make sure we are using a valid rar index range */
+	if (index >= rar_entries) {
+		ERROR_REPORT2(hw, TXGBE_ERROR_ARGUMENT,
+			      "RAR index %d is out of range.\n", index);
+		return TXGBE_ERR_INVALID_ARGUMENT;
+	}
+
+	/* select the MAC address */
+	wr32(hw, TXGBE_PSR_MAC_SWC_IDX, index);
+
+	/* setup VMDq pool mapping */
+	wr32(hw, TXGBE_PSR_MAC_SWC_VM_L, pools & 0xFFFFFFFF);
+	wr32(hw, TXGBE_PSR_MAC_SWC_VM_H, pools >> 32);
+
+	/* HW expects these in little endian so we reverse the byte
+	 * order from network order (big endian) to little endian
+	 *
+	 * Some parts put the VMDq setting in the extra RAH bits,
+	 * so save everything except the lower 16 bits that hold part
+	 * of the address and the address valid bit.
+	 */
+	rar_low = ((u32)addr[5] |
+		  ((u32)addr[4] << 8) |
+		  ((u32)addr[3] << 16) |
+		  ((u32)addr[2] << 24));
+	rar_high = ((u32)addr[1] |
+		   ((u32)addr[0] << 8));
+	if (enable_addr != 0)
+		rar_high |= TXGBE_PSR_MAC_SWC_AD_H_AV;
+
+	wr32(hw, TXGBE_PSR_MAC_SWC_AD_L, rar_low);
+	wr32m(hw, TXGBE_PSR_MAC_SWC_AD_H,
+	      (TXGBE_PSR_MAC_SWC_AD_H_AD(~0) |
+	       TXGBE_PSR_MAC_SWC_AD_H_ADTYPE(~0) |
+	       TXGBE_PSR_MAC_SWC_AD_H_AV),
+	      rar_high);
+
+	return 0;
+}
+
+/**
+ *  txgbe_clear_rar - Remove Rx address register
+ *  @hw: pointer to hardware structure
+ *  @index: Receive address register to write
+ *
+ *  Clears an ethernet address from a receive address register.
+ **/
+s32 txgbe_clear_rar(struct txgbe_hw *hw, u32 index)
+{
+	u32 rar_entries = hw->mac.num_rar_entries;
+
+	/* Make sure we are using a valid rar index range */
+	if (index >= rar_entries) {
+		ERROR_REPORT2(hw, TXGBE_ERROR_ARGUMENT,
+			      "RAR index %d is out of range.\n", index);
+		return TXGBE_ERR_INVALID_ARGUMENT;
+	}
+
+	/* Some parts put the VMDq setting in the extra RAH bits,
+	 * so save everything except the lower 16 bits that hold part
+	 * of the address and the address valid bit.
+	 */
+	wr32(hw, TXGBE_PSR_MAC_SWC_IDX, index);
+
+	wr32(hw, TXGBE_PSR_MAC_SWC_VM_L, 0);
+	wr32(hw, TXGBE_PSR_MAC_SWC_VM_H, 0);
+
+	wr32(hw, TXGBE_PSR_MAC_SWC_AD_L, 0);
+	wr32m(hw, TXGBE_PSR_MAC_SWC_AD_H,
+	      (TXGBE_PSR_MAC_SWC_AD_H_AD(~0) |
+	       TXGBE_PSR_MAC_SWC_AD_H_ADTYPE(~0) |
+	       TXGBE_PSR_MAC_SWC_AD_H_AV),
+	      0);
+
+	return 0;
+}
+
+/**
+ *  txgbe_init_rx_addrs - Initializes receive address filters.
+ *  @hw: pointer to hardware structure
+ *
+ *  Places the MAC address in receive address register 0 and clears the rest
+ *  of the receive address registers. Clears the multicast table. Assumes
+ *  the receiver is in reset when the routine is called.
+ **/
+s32 txgbe_init_rx_addrs(struct txgbe_hw *hw)
+{
+	u32 i;
+	u32 rar_entries = hw->mac.num_rar_entries;
+	u32 psrctl;
+
+	/* If the current mac address is valid, assume it is a software override
+	 * to the permanent address.
+	 * Otherwise, use the permanent address from the eeprom.
+	 */
+	if (!is_valid_ether_addr(hw->mac.addr)) {
+		/* Get the MAC address from the RAR0 for later reference */
+		TCALL(hw, mac.ops.get_mac_addr, hw->mac.addr);
+
+		txgbe_dbg(hw, "Keeping Current RAR0 Addr =%.2X %.2X %.2X %.2X %.2X %.2X\n",
+			  hw->mac.addr[0], hw->mac.addr[1],
+			  hw->mac.addr[2], hw->mac.addr[3],
+			  hw->mac.addr[4], hw->mac.addr[5]);
+	} else {
+		/* Setup the receive address. */
+		txgbe_dbg(hw, "Overriding MAC Address in RAR[0]\n");
+		txgbe_dbg(hw, "New MAC Addr =%.2X %.2X %.2X %.2X %.2X %.2X\n",
+			  hw->mac.addr[0], hw->mac.addr[1],
+			  hw->mac.addr[2], hw->mac.addr[3],
+			  hw->mac.addr[4], hw->mac.addr[5]);
+
+		TCALL(hw, mac.ops.set_rar, 0, hw->mac.addr, 0,
+		      TXGBE_PSR_MAC_SWC_AD_H_AV);
+
+		/* clear VMDq pool/queue selection for RAR 0 */
+		TCALL(hw, mac.ops.clear_vmdq, 0, TXGBE_CLEAR_VMDQ_ALL);
+	}
+	hw->addr_ctrl.overflow_promisc = 0;
+
+	hw->addr_ctrl.rar_used_count = 1;
+
+	/* Zero out the other receive addresses. */
+	txgbe_dbg(hw, "Clearing RAR[1-%d]\n", rar_entries - 1);
+	for (i = 1; i < rar_entries; i++) {
+		wr32(hw, TXGBE_PSR_MAC_SWC_IDX, i);
+		wr32(hw, TXGBE_PSR_MAC_SWC_AD_L, 0);
+		wr32(hw, TXGBE_PSR_MAC_SWC_AD_H, 0);
+	}
+
+	/* Clear the MTA */
+	hw->addr_ctrl.mta_in_use = 0;
+	psrctl = rd32(hw, TXGBE_PSR_CTL);
+	psrctl &= ~(TXGBE_PSR_CTL_MO | TXGBE_PSR_CTL_MFE);
+	psrctl |= hw->mac.mc_filter_type << TXGBE_PSR_CTL_MO_SHIFT;
+	wr32(hw, TXGBE_PSR_CTL, psrctl);
+	txgbe_dbg(hw, " Clearing MTA\n");
+	for (i = 0; i < hw->mac.mcft_size; i++)
+		wr32(hw, TXGBE_PSR_MC_TBL(i), 0);
+
+	TCALL(hw, mac.ops.init_uta_tables);
+
+	return 0;
+}
+
 /**
  *  txgbe_disable_pcie_master - Disable PCI-express master access
  *  @hw: pointer to hardware structure
@@ -185,6 +389,97 @@ s32 txgbe_disable_pcie_master(struct txgbe_hw *hw)
 	return status;
 }
 
+/**
+ *  txgbe_get_san_mac_addr - SAN MAC address retrieval from the EEPROM
+ *  @hw: pointer to hardware structure
+ *  @san_mac_addr: SAN MAC address
+ *
+ *  Reads the SAN MAC address.
+ **/
+s32 txgbe_get_san_mac_addr(struct txgbe_hw *hw, u8 *san_mac_addr)
+{
+	u8 i;
+
+	/* No addresses available in this EEPROM.  It's not an
+	 * error though, so just wipe the local address and return.
+	 */
+	for (i = 0; i < 6; i++)
+		san_mac_addr[i] = 0xFF;
+	return 0;
+}
+
+/**
+ *  txgbe_clear_vmdq - Disassociate a VMDq pool index from a rx address
+ *  @hw: pointer to hardware struct
+ *  @rar: receive address register index to disassociate
+ *  @vmdq: VMDq pool index to remove from the rar
+ **/
+s32 txgbe_clear_vmdq(struct txgbe_hw *hw, u32 rar, u32 __maybe_unused vmdq)
+{
+	u32 mpsar_lo, mpsar_hi;
+	u32 rar_entries = hw->mac.num_rar_entries;
+
+	/* Make sure we are using a valid rar index range */
+	if (rar >= rar_entries) {
+		ERROR_REPORT2(hw, TXGBE_ERROR_ARGUMENT,
+			      "RAR index %d is out of range.\n", rar);
+		return TXGBE_ERR_INVALID_ARGUMENT;
+	}
+
+	wr32(hw, TXGBE_PSR_MAC_SWC_IDX, rar);
+	mpsar_lo = rd32(hw, TXGBE_PSR_MAC_SWC_VM_L);
+	mpsar_hi = rd32(hw, TXGBE_PSR_MAC_SWC_VM_H);
+
+	if (TXGBE_REMOVED(hw->hw_addr))
+		goto done;
+
+	if (!mpsar_lo && !mpsar_hi)
+		goto done;
+
+	/* was that the last pool using this rar? */
+	if (mpsar_lo == 0 && mpsar_hi == 0 && rar != 0)
+		TCALL(hw, mac.ops.clear_rar, rar);
+done:
+	return 0;
+}
+
+/**
+ *  txgbe_set_vmdq_san_mac - Associate default VMDq pool index with a rx address
+ *  @hw: pointer to hardware struct
+ *  @vmdq: VMDq pool index
+ **/
+s32 txgbe_set_vmdq_san_mac(struct txgbe_hw *hw, u32 vmdq)
+{
+	u32 rar = hw->mac.san_mac_rar_index;
+
+	wr32(hw, TXGBE_PSR_MAC_SWC_IDX, rar);
+	if (vmdq < 32) {
+		wr32(hw, TXGBE_PSR_MAC_SWC_VM_L, 1 << vmdq);
+		wr32(hw, TXGBE_PSR_MAC_SWC_VM_H, 0);
+	} else {
+		wr32(hw, TXGBE_PSR_MAC_SWC_VM_L, 0);
+		wr32(hw, TXGBE_PSR_MAC_SWC_VM_H, 1 << (vmdq - 32));
+	}
+
+	return 0;
+}
+
+/**
+ *  txgbe_init_uta_tables - Initialize the Unicast Table Array
+ *  @hw: pointer to hardware structure
+ **/
+s32 txgbe_init_uta_tables(struct txgbe_hw *hw)
+{
+	int i;
+
+	txgbe_dbg(hw, " Clearing UTA\n");
+
+	for (i = 0; i < 128; i++)
+		wr32(hw, TXGBE_PSR_UC_TBL(i), 0);
+
+	return 0;
+}
+
 /* cmd_addr is used for some special command:
  * 1. to be sector address, when implemented erase sector command
  * 2. to be flash address when implemented read, write flash address
@@ -317,14 +612,25 @@ s32 txgbe_init_ops(struct txgbe_hw *hw)
 	struct txgbe_mac_info *mac = &hw->mac;
 
 	/* MAC */
+	mac->ops.init_hw = txgbe_init_hw;
+	mac->ops.get_mac_addr = txgbe_get_mac_addr;
 	mac->ops.stop_adapter = txgbe_stop_adapter;
 	mac->ops.get_bus_info = txgbe_get_bus_info;
 	mac->ops.set_lan_id = txgbe_set_lan_id_multi_port_pcie;
 	mac->ops.reset_hw = txgbe_reset_hw;
+	mac->ops.start_hw = txgbe_start_hw;
+	mac->ops.get_san_mac_addr = txgbe_get_san_mac_addr;
 
 	/* RAR */
+	mac->ops.set_rar = txgbe_set_rar;
+	mac->ops.clear_rar = txgbe_clear_rar;
+	mac->ops.init_rx_addrs = txgbe_init_rx_addrs;
+	mac->ops.clear_vmdq = txgbe_clear_vmdq;
 	mac->ops.disable_rx = txgbe_disable_rx;
+	mac->ops.set_vmdq_san_mac = txgbe_set_vmdq_san_mac;
+	mac->ops.init_uta_tables = txgbe_init_uta_tables;
 
+	mac->num_rar_entries    = TXGBE_SP_RAR_ENTRIES;
 	mac->max_rx_queues      = TXGBE_SP_MAX_RX_QUEUES;
 	mac->max_tx_queues      = TXGBE_SP_MAX_TX_QUEUES;
 
@@ -419,8 +725,62 @@ s32 txgbe_reset_hw(struct txgbe_hw *hw)
 	if (status != 0)
 		goto reset_hw_out;
 
+	/* Store the permanent mac address */
+	TCALL(hw, mac.ops.get_mac_addr, hw->mac.perm_addr);
+
+	/* Store MAC address from RAR0, clear receive address registers, and
+	 * clear the multicast table.  Also reset num_rar_entries to 128,
+	 * since we modify this value when programming the SAN MAC address.
+	 */
+	hw->mac.num_rar_entries = 128;
+	TCALL(hw, mac.ops.init_rx_addrs);
+
+	/* Store the permanent SAN mac address */
+	TCALL(hw, mac.ops.get_san_mac_addr, hw->mac.san_addr);
+
+	/* Add the SAN MAC address to the RAR only if it's a valid address */
+	if (is_valid_ether_addr(hw->mac.san_addr)) {
+		TCALL(hw, mac.ops.set_rar, hw->mac.num_rar_entries - 1,
+		      hw->mac.san_addr, 0, TXGBE_PSR_MAC_SWC_AD_H_AV);
+
+		/* Save the SAN MAC RAR index */
+		hw->mac.san_mac_rar_index = hw->mac.num_rar_entries - 1;
+
+		/* Reserve the last RAR for the SAN MAC address */
+		hw->mac.num_rar_entries--;
+	}
+
 	pci_set_master(adapter->pdev);
 
 reset_hw_out:
 	return status;
 }
+
+/**
+ *  txgbe_start_hw - Prepare hardware for Tx/Rx
+ *  @hw: pointer to hardware structure
+ *
+ *  Starts the hardware using the generic start_hw function
+ *  and the generation start_hw function.
+ *  Then performs revision-specific operations, if any.
+ **/
+s32 txgbe_start_hw(struct txgbe_hw *hw)
+{
+	int ret_val = 0;
+	u32 i;
+
+	/* Clear the rate limiters */
+	for (i = 0; i < hw->mac.max_tx_queues; i++) {
+		wr32(hw, TXGBE_TDM_RP_IDX, i);
+		wr32(hw, TXGBE_TDM_RP_RATE, 0);
+	}
+	TXGBE_WRITE_FLUSH(hw);
+
+	/* Clear adapter stopped flag */
+	hw->adapter_stopped = false;
+
+	/* We need to run link autotry after the driver loads */
+	hw->mac.autotry_restart = true;
+
+	return ret_val;
+}
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.h b/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.h
index e56fe21250c3..6b17942c4670 100644
--- a/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.h
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.h
@@ -16,13 +16,27 @@
 #define SPI_H_DAT_REG_ADDR           0x10108  /* SPI Data register address */
 #define SPI_H_STA_REG_ADDR           0x1010c  /* SPI Status register address */
 
+s32 txgbe_init_hw(struct txgbe_hw *hw);
+s32 txgbe_start_hw(struct txgbe_hw *hw);
+s32 txgbe_get_mac_addr(struct txgbe_hw *hw, u8 *mac_addr);
 s32 txgbe_get_bus_info(struct txgbe_hw *hw);
 void txgbe_set_pci_config_data(struct txgbe_hw *hw, u16 link_status);
 s32 txgbe_set_lan_id_multi_port_pcie(struct txgbe_hw *hw);
 s32 txgbe_stop_adapter(struct txgbe_hw *hw);
 
+s32 txgbe_set_rar(struct txgbe_hw *hw, u32 index, u8 *addr, u64 pools,
+		  u32 enable_addr);
+s32 txgbe_clear_rar(struct txgbe_hw *hw, u32 index);
+s32 txgbe_init_rx_addrs(struct txgbe_hw *hw);
+
 s32 txgbe_disable_pcie_master(struct txgbe_hw *hw);
 
+s32 txgbe_get_san_mac_addr(struct txgbe_hw *hw, u8 *san_mac_addr);
+
+s32 txgbe_set_vmdq_san_mac(struct txgbe_hw *hw, u32 vmdq);
+s32 txgbe_clear_vmdq(struct txgbe_hw *hw, u32 rar, u32 vmdq);
+s32 txgbe_init_uta_tables(struct txgbe_hw *hw);
+
 s32 txgbe_init_thermal_sensor_thresh(struct txgbe_hw *hw);
 s32 txgbe_disable_rx(struct txgbe_hw *hw);
 int txgbe_check_flash_load(struct txgbe_hw *hw, u32 check_bit);
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c
index cb950d52a51d..21b63856db49 100644
--- a/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c
@@ -100,6 +100,131 @@ static bool txgbe_check_cfg_remove(struct txgbe_hw *hw, struct pci_dev *pdev)
 	return false;
 }
 
+static void txgbe_sync_mac_table(struct txgbe_adapter *adapter)
+{
+	struct txgbe_hw *hw = &adapter->hw;
+	int i;
+
+	for (i = 0; i < hw->mac.num_rar_entries; i++) {
+		if (adapter->mac_table[i].state & TXGBE_MAC_STATE_MODIFIED) {
+			if (adapter->mac_table[i].state &
+					TXGBE_MAC_STATE_IN_USE) {
+				TCALL(hw, mac.ops.set_rar, i,
+				      adapter->mac_table[i].addr,
+				      adapter->mac_table[i].pools,
+				      TXGBE_PSR_MAC_SWC_AD_H_AV);
+			} else {
+				TCALL(hw, mac.ops.clear_rar, i);
+			}
+			adapter->mac_table[i].state &=
+				~(TXGBE_MAC_STATE_MODIFIED);
+		}
+	}
+}
+
+/* this function destroys the first RAR entry */
+static void txgbe_mac_set_default_filter(struct txgbe_adapter *adapter,
+					 u8 *addr)
+{
+	struct txgbe_hw *hw = &adapter->hw;
+
+	memcpy(&adapter->mac_table[0].addr, addr, ETH_ALEN);
+	adapter->mac_table[0].pools = 1ULL;
+	adapter->mac_table[0].state = (TXGBE_MAC_STATE_DEFAULT |
+				       TXGBE_MAC_STATE_IN_USE);
+	TCALL(hw, mac.ops.set_rar, 0, adapter->mac_table[0].addr,
+	      adapter->mac_table[0].pools,
+	      TXGBE_PSR_MAC_SWC_AD_H_AV);
+}
+
+static void txgbe_flush_sw_mac_table(struct txgbe_adapter *adapter)
+{
+	struct txgbe_hw *hw = &adapter->hw;
+	u32 i;
+
+	for (i = 0; i < hw->mac.num_rar_entries; i++) {
+		adapter->mac_table[i].state |= TXGBE_MAC_STATE_MODIFIED;
+		adapter->mac_table[i].state &= ~TXGBE_MAC_STATE_IN_USE;
+		memset(adapter->mac_table[i].addr, 0, ETH_ALEN);
+		adapter->mac_table[i].pools = 0;
+	}
+	txgbe_sync_mac_table(adapter);
+}
+
+void txgbe_reset(struct txgbe_adapter *adapter)
+{
+	struct net_device *netdev = adapter->netdev;
+	struct txgbe_hw *hw = &adapter->hw;
+	u8 old_addr[ETH_ALEN];
+	int err;
+
+	if (TXGBE_REMOVED(hw->hw_addr))
+		return;
+
+	err = TCALL(hw, mac.ops.init_hw);
+	switch (err) {
+	case 0:
+		break;
+	case TXGBE_ERR_MASTER_REQUESTS_PENDING:
+		dev_err(&adapter->pdev->dev, "master disable timed out\n");
+		break;
+	default:
+		dev_err(&adapter->pdev->dev, "Hardware Error: %d\n", err);
+	}
+
+	/* do not flush user set addresses */
+	memcpy(old_addr, &adapter->mac_table[0].addr, netdev->addr_len);
+	txgbe_flush_sw_mac_table(adapter);
+	txgbe_mac_set_default_filter(adapter, old_addr);
+
+	/* update SAN MAC vmdq pool selection */
+	TCALL(hw, mac.ops.set_vmdq_san_mac, 0);
+}
+
+void txgbe_disable_device(struct txgbe_adapter *adapter)
+{
+	struct net_device *netdev = adapter->netdev;
+	struct txgbe_hw *hw = &adapter->hw;
+	u32 i;
+
+	txgbe_disable_pcie_master(hw);
+	/* disable receives */
+	TCALL(hw, mac.ops.disable_rx);
+
+	netif_carrier_off(netdev);
+	netif_tx_disable(netdev);
+
+	if (hw->bus.lan_id == 0)
+		wr32m(hw, TXGBE_MIS_PRB_CTL, TXGBE_MIS_PRB_CTL_LAN0_UP, 0);
+	else if (hw->bus.lan_id == 1)
+		wr32m(hw, TXGBE_MIS_PRB_CTL, TXGBE_MIS_PRB_CTL_LAN1_UP, 0);
+	else
+		dev_err(&adapter->pdev->dev,
+			"%s: invalid bus lan id %d\n",
+			__func__, hw->bus.lan_id);
+
+	if (!(((hw->subsystem_device_id & TXGBE_NCSI_MASK) == TXGBE_NCSI_SUP) ||
+	      ((hw->subsystem_device_id & TXGBE_WOL_MASK) == TXGBE_WOL_SUP))) {
+		/* disable mac transmiter */
+		wr32m(hw, TXGBE_MAC_TX_CFG, TXGBE_MAC_TX_CFG_TE, 0);
+	}
+	/* disable transmits in the hardware now that interrupts are off */
+	for (i = 0; i < adapter->num_tx_queues; i++) {
+		u8 reg_idx = adapter->tx_ring[i]->reg_idx;
+
+		wr32(hw, TXGBE_PX_TR_CFG(reg_idx), TXGBE_PX_TR_CFG_SWFLSH);
+	}
+
+	/* Disable the Tx DMA engine */
+	wr32m(hw, TXGBE_TDM_CTL, TXGBE_TDM_CTL_TE, 0);
+}
+
+void txgbe_down(struct txgbe_adapter *adapter)
+{
+	txgbe_disable_device(adapter);
+	txgbe_reset(adapter);
+}
+
 /**
  *  txgbe_init_shared_code - Initialize the shared code
  *  @hw: pointer to hardware structure
@@ -151,6 +276,63 @@ static int txgbe_sw_init(struct txgbe_adapter *adapter)
 			  "init_shared_code failed: %d\n", err);
 		return err;
 	}
+	adapter->mac_table = kzalloc(sizeof(*adapter->mac_table) *
+				     hw->mac.num_rar_entries,
+				     GFP_ATOMIC);
+	if (!adapter->mac_table) {
+		err = TXGBE_ERR_OUT_OF_MEM;
+		netif_err(adapter, probe, adapter->netdev,
+			  "mac_table allocation failed: %d\n", err);
+		return err;
+	}
+
+	return 0;
+}
+
+/**
+ * txgbe_open - Called when a network interface is made active
+ * @netdev: network interface device structure
+ *
+ * Returns 0 on success, negative value on failure
+ *
+ * The open entry point is called when a network interface is made
+ * active by the system (IFF_UP).
+ **/
+int txgbe_open(struct net_device *netdev)
+{
+	netif_carrier_off(netdev);
+
+	return 0;
+}
+
+/**
+ * txgbe_close_suspend - actions necessary to both suspend and close flows
+ * @adapter: the private adapter struct
+ *
+ * This function should contain the necessary work common to both suspending
+ * and closing of the device.
+ */
+static void txgbe_close_suspend(struct txgbe_adapter *adapter)
+{
+	txgbe_disable_device(adapter);
+}
+
+/**
+ * txgbe_close - Disables a network interface
+ * @netdev: network interface device structure
+ *
+ * Returns 0, this is not allowed to fail
+ *
+ * The close entry point is called when an interface is de-activated
+ * by the OS.  The hardware is still under the drivers control, but
+ * needs to be disabled.  A global MAC reset is issued to stop the
+ * hardware, and all transmit and receive resources are freed.
+ **/
+int txgbe_close(struct net_device *netdev)
+{
+	struct txgbe_adapter *adapter = netdev_priv(netdev);
+
+	txgbe_down(adapter);
 
 	return 0;
 }
@@ -162,6 +344,11 @@ static void txgbe_dev_shutdown(struct pci_dev *pdev, bool *enable_wake)
 
 	netif_device_detach(netdev);
 
+	rtnl_lock();
+	if (netif_running(netdev))
+		txgbe_close_suspend(adapter);
+	rtnl_unlock();
+
 	pci_disable_device(pdev);
 }
 
@@ -177,6 +364,62 @@ static void txgbe_shutdown(struct pci_dev *pdev)
 	}
 }
 
+/**
+ * txgbe_add_sanmac_netdev - Add the SAN MAC address to the corresponding
+ * netdev->dev_addr_list
+ * @dev: network interface device structure
+ *
+ * Returns non-zero on failure
+ **/
+static int txgbe_add_sanmac_netdev(struct net_device *dev)
+{
+	int err = 0;
+	struct txgbe_adapter *adapter = netdev_priv(dev);
+	struct txgbe_hw *hw = &adapter->hw;
+
+	if (is_valid_ether_addr(hw->mac.san_addr)) {
+		rtnl_lock();
+		err = dev_addr_add(dev, hw->mac.san_addr,
+				   NETDEV_HW_ADDR_T_SAN);
+		rtnl_unlock();
+
+		/* update SAN MAC vmdq pool selection */
+		TCALL(hw, mac.ops.set_vmdq_san_mac, 0);
+	}
+	return err;
+}
+
+/**
+ * txgbe_del_sanmac_netdev - Removes the SAN MAC address to the corresponding
+ * netdev->dev_addr_list
+ * @dev: network interface device structure
+ *
+ * Returns non-zero on failure
+ **/
+static int txgbe_del_sanmac_netdev(struct net_device *dev)
+{
+	int err = 0;
+	struct txgbe_adapter *adapter = netdev_priv(dev);
+	struct txgbe_mac_info *mac = &adapter->hw.mac;
+
+	if (is_valid_ether_addr(mac->san_addr)) {
+		rtnl_lock();
+		err = dev_addr_del(dev, mac->san_addr, NETDEV_HW_ADDR_T_SAN);
+		rtnl_unlock();
+	}
+	return err;
+}
+
+static const struct net_device_ops txgbe_netdev_ops = {
+	.ndo_open               = txgbe_open,
+	.ndo_stop               = txgbe_close,
+};
+
+void txgbe_assign_netdev_ops(struct net_device *dev)
+{
+	dev->netdev_ops = &txgbe_netdev_ops;
+}
+
 /**
  * txgbe_probe - Device Initialization Routine
  * @pdev: PCI device information struct
@@ -245,35 +488,62 @@ static int txgbe_probe(struct pci_dev *pdev,
 	}
 	hw->hw_addr = adapter->io_addr;
 
+	txgbe_assign_netdev_ops(netdev);
 	strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
 
 	/* setup the private structure */
 	err = txgbe_sw_init(adapter);
 	if (err)
-		goto err_pci_release_regions;
+		goto err_free_mac_table;
 
 	TCALL(hw, mac.ops.set_lan_id);
 
 	/* check if flash load is done after hw power up */
 	err = txgbe_check_flash_load(hw, TXGBE_SPI_ILDR_STATUS_PERST);
 	if (err)
-		goto err_pci_release_regions;
+		goto err_free_mac_table;
 	err = txgbe_check_flash_load(hw, TXGBE_SPI_ILDR_STATUS_PWRRST);
 	if (err)
-		goto err_pci_release_regions;
+		goto err_free_mac_table;
 
 	err = TCALL(hw, mac.ops.reset_hw);
 	if (err) {
 		dev_err(&pdev->dev, "HW Init failed: %d\n", err);
-		goto err_pci_release_regions;
+		goto err_free_mac_table;
 	}
 
 	netdev->features |= NETIF_F_HIGHDMA;
 
+	eth_hw_addr_set(netdev, hw->mac.perm_addr);
+
+	if (!is_valid_ether_addr(netdev->dev_addr)) {
+		dev_err(&pdev->dev, "invalid MAC address\n");
+		err = -EIO;
+		goto err_free_mac_table;
+	}
+
+	txgbe_mac_set_default_filter(adapter, hw->mac.perm_addr);
+
+	/* reset the hardware with the new settings */
+	err = TCALL(hw, mac.ops.start_hw);
+	if (err) {
+		dev_err(&pdev->dev, "HW init failed\n");
+		goto err_free_mac_table;
+	}
+
 	/* pick up the PCI bus settings for reporting later */
 	TCALL(hw, mac.ops.get_bus_info);
 
+	strcpy(netdev->name, "eth%d");
+	err = register_netdev(netdev);
+	if (err)
+		goto err_free_mac_table;
+
 	pci_set_drvdata(pdev, adapter);
+	adapter->netdev_registered = true;
+
+	/* carrier off reporting is important to ethtool even BEFORE open */
+	netif_carrier_off(netdev);
 
 	/* calculate the expected PCIe bandwidth required for optimal
 	 * performance. Note that some older parts will never have enough
@@ -287,8 +557,18 @@ static int txgbe_probe(struct pci_dev *pdev,
 	if (expected_gts > 0)
 		txgbe_check_minimum_link(adapter);
 
+	netif_info(adapter, probe, netdev, "%02x:%02x:%02x:%02x:%02x:%02x\n",
+		   netdev->dev_addr[0], netdev->dev_addr[1],
+		   netdev->dev_addr[2], netdev->dev_addr[3],
+		   netdev->dev_addr[4], netdev->dev_addr[5]);
+
+	/* add san mac addr to netdev */
+	txgbe_add_sanmac_netdev(netdev);
+
 	return 0;
 
+err_free_mac_table:
+	kfree(adapter->mac_table);
 err_pci_release_regions:
 	pci_disable_pcie_error_reporting(pdev);
 	pci_release_selected_regions(pdev,
@@ -309,9 +589,24 @@ static int txgbe_probe(struct pci_dev *pdev,
  **/
 static void txgbe_remove(struct pci_dev *pdev)
 {
+	struct txgbe_adapter *adapter = pci_get_drvdata(pdev);
+	struct net_device *netdev;
+
+	netdev = adapter->netdev;
+
+	/* remove the added san mac */
+	txgbe_del_sanmac_netdev(netdev);
+
+	if (adapter->netdev_registered) {
+		unregister_netdev(netdev);
+		adapter->netdev_registered = false;
+	}
+
 	pci_release_selected_regions(pdev,
 				     pci_select_bars(pdev, IORESOURCE_MEM));
 
+	kfree(adapter->mac_table);
+
 	pci_disable_pcie_error_reporting(pdev);
 
 	pci_disable_device(pdev);
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h b/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h
index ae3407a30d9e..5baf328138a5 100644
--- a/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h
@@ -203,6 +203,23 @@ struct txgbe_thermal_sensor_data {
 #define TXGBE_CFG_LED_CTL_LINK_UP_SEL   0x00000001U
 #define TXGBE_CFG_LED_CTL_LINK_OD_SHIFT 16
 
+/*********************** Transmit DMA registers **************************/
+/* transmit global control */
+#define TXGBE_TDM_CTL           0x18000
+/* TDM CTL BIT */
+#define TXGBE_TDM_CTL_TE        0x1 /* Transmit Enable */
+#define TXGBE_TDM_CTL_PADDING   0x2 /* Padding byte number for ipsec ESP */
+#define TXGBE_TDM_CTL_VT_SHIFT  16  /* VLAN EtherType */
+
+#define TXGBE_TDM_RP_CTL        0x18400
+#define TXGBE_TDM_RP_CTL_RST    ((0x1) << 0)
+#define TXGBE_TDM_RP_CTL_RPEN   ((0x1) << 2)
+#define TXGBE_TDM_RP_CTL_RLEN   ((0x1) << 3)
+#define TXGBE_TDM_RP_IDX        0x1820C
+#define TXGBE_TDM_RP_RATE       0x18404
+#define TXGBE_TDM_RP_RATE_MIN(v) ((0x3FFF & (v)))
+#define TXGBE_TDM_RP_RATE_MAX(v) ((0x3FFF & (v)) << 16)
+
 /***************************** RDB registers *********************************/
 /* receive packet buffer */
 #define TXGBE_RDB_PB_WRAP           0x19004
@@ -244,6 +261,22 @@ struct txgbe_thermal_sensor_data {
 #define TXGBE_PSR_CTL_MO                0x00000060U
 #define TXGBE_PSR_CTL_TPE               0x00000010U
 #define TXGBE_PSR_CTL_MO_SHIFT          5
+/* mcasst/ucast overflow tbl */
+#define TXGBE_PSR_MC_TBL(_i)    (0x15200  + ((_i) * 4))
+#define TXGBE_PSR_UC_TBL(_i)    (0x15400 + ((_i) * 4))
+
+/* mac switcher */
+#define TXGBE_PSR_MAC_SWC_AD_L  0x16200
+#define TXGBE_PSR_MAC_SWC_AD_H  0x16204
+#define TXGBE_PSR_MAC_SWC_VM_L  0x16208
+#define TXGBE_PSR_MAC_SWC_VM_H  0x1620C
+#define TXGBE_PSR_MAC_SWC_IDX   0x16210
+/* RAH */
+#define TXGBE_PSR_MAC_SWC_AD_H_AD(v)       (((v) & 0xFFFF))
+#define TXGBE_PSR_MAC_SWC_AD_H_ADTYPE(v)   (((v) & 0x1) << 30)
+#define TXGBE_PSR_MAC_SWC_AD_H_AV       0x80000000U
+#define TXGBE_CLEAR_VMDQ_ALL            0xFFFFFFFFU
+
 /* Management */
 #define TXGBE_PSR_MNG_FIT_CTL           0x15820
 /* Management Bit Fields and Masks */
@@ -372,6 +405,8 @@ struct txgbe_thermal_sensor_data {
 #define TXGBE_PX_RR_CFG_RR_SZ           0x0000007EU
 #define TXGBE_PX_RR_CFG_RR_EN           0x00000001U
 
+#define TXGBE_ETH_LENGTH_OF_ADDRESS     6
+
 /******************************** PCI Bus Info *******************************/
 #define TXGBE_PCI_DEVICE_STATUS         0xAA
 #define TXGBE_PCI_DEVICE_STATUS_TRANSACTION_PENDING     0x0020
@@ -446,6 +481,14 @@ enum txgbe_bus_width {
 	txgbe_bus_width_reserved
 };
 
+struct txgbe_addr_filter_info {
+	u32 num_mc_addrs;
+	u32 rar_used_count;
+	u32 mta_in_use;
+	u32 overflow_promisc;
+	bool user_set_promisc;
+};
+
 /* Bus parameters */
 struct txgbe_bus_info {
 	enum txgbe_bus_speed speed;
@@ -460,13 +503,24 @@ struct txgbe_bus_info {
 struct txgbe_hw;
 
 struct txgbe_mac_operations {
+	s32 (*init_hw)(struct txgbe_hw *hw);
 	s32 (*reset_hw)(struct txgbe_hw *hw);
+	s32 (*start_hw)(struct txgbe_hw *hw);
+	s32 (*get_mac_addr)(struct txgbe_hw *hw, u8 *mac_addr);
+	s32 (*get_san_mac_addr)(struct txgbe_hw *hw, u8 *san_mac_addr);
 	s32 (*stop_adapter)(struct txgbe_hw *hw);
 	s32 (*get_bus_info)(struct txgbe_hw *hw);
 	s32 (*set_lan_id)(struct txgbe_hw *hw);
 
 	/* RAR */
+	s32 (*set_rar)(struct txgbe_hw *hw, u32 index, u8 *addr, u64 pools,
+		       u32 enable_addr);
+	s32 (*clear_rar)(struct txgbe_hw *hw, u32 index);
 	s32 (*disable_rx)(struct txgbe_hw *hw);
+	s32 (*set_vmdq_san_mac)(struct txgbe_hw *hw, u32 vmdq);
+	s32 (*clear_vmdq)(struct txgbe_hw *hw, u32 rar, u32 vmdq);
+	s32 (*init_rx_addrs)(struct txgbe_hw *hw);
+	s32 (*init_uta_tables)(struct txgbe_hw *hw);
 
 	/* Manageability interface */
 	s32 (*init_thermal_sensor_thresh)(struct txgbe_hw *hw);
@@ -474,8 +528,16 @@ struct txgbe_mac_operations {
 
 struct txgbe_mac_info {
 	struct txgbe_mac_operations ops;
+	u8 addr[TXGBE_ETH_LENGTH_OF_ADDRESS];
+	u8 perm_addr[TXGBE_ETH_LENGTH_OF_ADDRESS];
+	u8 san_addr[TXGBE_ETH_LENGTH_OF_ADDRESS];
+	s32 mc_filter_type;
+	u32 mcft_size;
+	u32 num_rar_entries;
 	u32 max_tx_queues;
 	u32 max_rx_queues;
+	u8  san_mac_rar_index;
+	bool autotry_restart;
 	struct txgbe_thermal_sensor_data  thermal_sensor_data;
 	bool set_lben;
 };
@@ -483,6 +545,7 @@ struct txgbe_mac_info {
 struct txgbe_hw {
 	u8 __iomem *hw_addr;
 	struct txgbe_mac_info mac;
+	struct txgbe_addr_filter_info addr_ctrl;
 	struct txgbe_bus_info bus;
 	u16 device_id;
 	u16 vendor_id;
-- 
2.27.0


^ permalink raw reply related	[flat|nested] 18+ messages in thread

* [RFC PATCH net-next 04/16] net: txgbe: Add operations to interact with firmware
  2022-08-10  8:55 [RFC PATCH net-next 00/16] net: WangXun txgbe ethernet driver Jiawen Wu
                   ` (2 preceding siblings ...)
  2022-08-10  8:55 ` [RFC PATCH net-next 03/16] net: txgbe: Set MAC address and register netdev Jiawen Wu
@ 2022-08-10  8:55 ` Jiawen Wu
  2022-08-10  8:55 ` [RFC PATCH net-next 05/16] net: txgbe: Identify PHY and SFP module Jiawen Wu
                   ` (11 subsequent siblings)
  15 siblings, 0 replies; 18+ messages in thread
From: Jiawen Wu @ 2022-08-10  8:55 UTC (permalink / raw)
  To: netdev; +Cc: Jiawen Wu

Add firmware interaction to get EEPROM information.

Signed-off-by: Jiawen Wu <jiawenwu@trustnetic.com>
---
 drivers/net/ethernet/wangxun/txgbe/txgbe.h    |   1 +
 drivers/net/ethernet/wangxun/txgbe/txgbe_hw.c | 965 +++++++++++++++++-
 drivers/net/ethernet/wangxun/txgbe/txgbe_hw.h |  26 +
 .../net/ethernet/wangxun/txgbe/txgbe_main.c   |  94 +-
 .../net/ethernet/wangxun/txgbe/txgbe_type.h   | 181 ++++
 5 files changed, 1256 insertions(+), 11 deletions(-)

diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe.h b/drivers/net/ethernet/wangxun/txgbe/txgbe.h
index a10792612c2e..d0ea817e2f42 100644
--- a/drivers/net/ethernet/wangxun/txgbe/txgbe.h
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe.h
@@ -46,6 +46,7 @@ struct txgbe_adapter {
 	struct txgbe_hw hw;
 	u16 msg_enable;
 
+	char eeprom_id[32];
 	bool netdev_registered;
 
 	struct txgbe_mac_addr *mac_table;
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.c
index ef44da54c954..34a7c8dad0e4 100644
--- a/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.c
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.c
@@ -9,6 +9,9 @@
 #define TXGBE_SP_MAX_RX_QUEUES  128
 #define TXGBE_SP_RAR_ENTRIES    128
 
+static s32 txgbe_get_eeprom_semaphore(struct txgbe_hw *hw);
+static void txgbe_release_eeprom_semaphore(struct txgbe_hw *hw);
+
 s32 txgbe_init_hw(struct txgbe_hw *hw)
 {
 	s32 status;
@@ -24,6 +27,118 @@ s32 txgbe_init_hw(struct txgbe_hw *hw)
 	return status;
 }
 
+/**
+ *  txgbe_read_pba_string - Reads part number string from EEPROM
+ *  @hw: pointer to hardware structure
+ *  @pba_num: stores the part number string from the EEPROM
+ *  @pba_num_size: part number string buffer length
+ *
+ *  Reads the part number string from the EEPROM.
+ **/
+s32 txgbe_read_pba_string(struct txgbe_hw *hw, u8 *pba_num,
+			  u32 pba_num_size)
+{
+	s32 ret_val;
+	u16 data;
+	u16 pba_ptr;
+	u16 offset;
+	u16 length;
+
+	if (!pba_num) {
+		txgbe_dbg(hw, "PBA string buffer was null\n");
+		return TXGBE_ERR_INVALID_ARGUMENT;
+	}
+
+	ret_val = TCALL(hw, eeprom.ops.read,
+			hw->eeprom.sw_region_offset + TXGBE_PBANUM0_PTR,
+			&data);
+	if (ret_val) {
+		txgbe_dbg(hw, "NVM Read Error\n");
+		return ret_val;
+	}
+
+	ret_val = TCALL(hw, eeprom.ops.read,
+			hw->eeprom.sw_region_offset + TXGBE_PBANUM1_PTR,
+			&pba_ptr);
+	if (ret_val) {
+		txgbe_dbg(hw, "NVM Read Error\n");
+		return ret_val;
+	}
+
+	/* if data is not ptr guard the PBA must be in legacy format which
+	 * means pba_ptr is actually our second data word for the PBA number
+	 * and we can decode it into an ascii string
+	 */
+	if (data != TXGBE_PBANUM_PTR_GUARD) {
+		txgbe_dbg(hw, "NVM PBA number is not stored as string\n");
+
+		/* we will need 11 characters to store the PBA */
+		if (pba_num_size < 11) {
+			txgbe_dbg(hw, "PBA string buffer too small\n");
+			return TXGBE_ERR_NO_SPACE;
+		}
+
+		/* extract hex string from data and pba_ptr */
+		pba_num[0] = (data >> 12) & 0xF;
+		pba_num[1] = (data >> 8) & 0xF;
+		pba_num[2] = (data >> 4) & 0xF;
+		pba_num[3] = data & 0xF;
+		pba_num[4] = (pba_ptr >> 12) & 0xF;
+		pba_num[5] = (pba_ptr >> 8) & 0xF;
+		pba_num[6] = '-';
+		pba_num[7] = 0;
+		pba_num[8] = (pba_ptr >> 4) & 0xF;
+		pba_num[9] = pba_ptr & 0xF;
+
+		/* put a null character on the end of our string */
+		pba_num[10] = '\0';
+
+		/* switch all the data but the '-' to hex char */
+		for (offset = 0; offset < 10; offset++) {
+			if (pba_num[offset] < 0xA)
+				pba_num[offset] += '0';
+			else if (pba_num[offset] < 0x10)
+				pba_num[offset] += 'A' - 0xA;
+		}
+
+		return 0;
+	}
+
+	ret_val = TCALL(hw, eeprom.ops.read, pba_ptr, &length);
+	if (ret_val) {
+		txgbe_dbg(hw, "NVM Read Error\n");
+		return ret_val;
+	}
+
+	if (length == 0xFFFF || length == 0) {
+		txgbe_dbg(hw, "NVM PBA number section invalid length\n");
+		return TXGBE_ERR_PBA_SECTION;
+	}
+
+	/* check if pba_num buffer is big enough */
+	if (pba_num_size  < (((u32)length * 2) - 1)) {
+		txgbe_dbg(hw, "PBA string buffer too small\n");
+		return TXGBE_ERR_NO_SPACE;
+	}
+
+	/* trim pba length from start of string */
+	pba_ptr++;
+	length--;
+
+	for (offset = 0; offset < length; offset++) {
+		ret_val = TCALL(hw, eeprom.ops.read, pba_ptr + offset, &data);
+		if (ret_val) {
+			txgbe_dbg(hw, "NVM Read Error\n");
+			return ret_val;
+		}
+		pba_num[offset * 2] = (u8)(data >> 8);
+		pba_num[(offset * 2) + 1] = (u8)(data & 0xFF);
+	}
+	pba_num[offset * 2] = '\0';
+
+	return 0;
+}
+
 /**
  *  txgbe_get_mac_addr - Generic get MAC address
  *  @hw: pointer to hardware structure
@@ -192,6 +307,69 @@ s32 txgbe_stop_adapter(struct txgbe_hw *hw)
 	return txgbe_disable_pcie_master(hw);
 }
 
+/**
+ *  txgbe_get_eeprom_semaphore - Get hardware semaphore
+ *  @hw: pointer to hardware structure
+ *
+ *  Sets the hardware semaphores so EEPROM access can occur for bit-bang method
+ **/
+static s32 txgbe_get_eeprom_semaphore(struct txgbe_hw *hw)
+{
+	s32 status = TXGBE_ERR_EEPROM;
+	u32 timeout = 2000;
+	u32 i;
+	u32 swsm;
+
+	/* Get SMBI software semaphore between device drivers first */
+	for (i = 0; i < timeout; i++) {
+		/* If the SMBI bit is 0 when we read it, then the bit will be
+		 * set and we have the semaphore
+		 */
+		swsm = rd32(hw, TXGBE_MIS_SWSM);
+		if (!(swsm & TXGBE_MIS_SWSM_SMBI)) {
+			status = 0;
+			break;
+		}
+		usleep_range(50, 100);
+	}
+
+	if (i == timeout) {
+		txgbe_dbg(hw, "Driver can't access the Eeprom - SMBI Semaphore not granted.\n");
+
+		/* this release is particularly important because our attempts
+		 * above to get the semaphore may have succeeded, and if there
+		 * was a timeout, we should unconditionally clear the semaphore
+		 * bits to free the driver to make progress
+		 */
+		txgbe_release_eeprom_semaphore(hw);
+
+		usleep_range(50, 100);
+		/* one last try
+		 * If the SMBI bit is 0 when we read it, then the bit will be
+		 * set and we have the semaphore
+		 */
+		swsm = rd32(hw, TXGBE_MIS_SWSM);
+		if (!(swsm & TXGBE_MIS_SWSM_SMBI))
+			status = 0;
+	}
+
+	return status;
+}
+
+/**
+ *  txgbe_release_eeprom_semaphore - Release hardware semaphore
+ *  @hw: pointer to hardware structure
+ *
+ *  This function clears hardware semaphore bits.
+ **/
+static void txgbe_release_eeprom_semaphore(struct txgbe_hw *hw)
+{
+	if (txgbe_check_mng_access(hw)) {
+		wr32m(hw, TXGBE_MIS_SWSM, TXGBE_MIS_SWSM_SMBI, 0);
+		TXGBE_WRITE_FLUSH(hw);
+	}
+}
+
 /**
  *  txgbe_set_rar - Set Rx address register
  *  @hw: pointer to hardware structure
@@ -389,17 +567,139 @@ s32 txgbe_disable_pcie_master(struct txgbe_hw *hw)
 	return status;
 }
 
+/**
+ *  txgbe_acquire_swfw_sync - Acquire SWFW semaphore
+ *  @hw: pointer to hardware structure
+ *  @mask: Mask to specify which semaphore to acquire
+ *
+ *  Acquires the SWFW semaphore through the GSSR register for the specified
+ *  function (CSR, PHY0, PHY1, EEPROM, Flash)
+ **/
+s32 txgbe_acquire_swfw_sync(struct txgbe_hw *hw, u32 mask)
+{
+	u32 gssr = 0;
+	u32 swmask = mask;
+	u32 fwmask = mask << 16;
+	u32 timeout = 200;
+	u32 i;
+
+	for (i = 0; i < timeout; i++) {
+		/* SW NVM semaphore bit is used for access to all
+		 * SW_FW_SYNC bits (not just NVM)
+		 */
+		if (txgbe_get_eeprom_semaphore(hw))
+			return TXGBE_ERR_SWFW_SYNC;
+
+		if (txgbe_check_mng_access(hw)) {
+			gssr = rd32(hw, TXGBE_MNG_SWFW_SYNC);
+			if (gssr & (fwmask | swmask)) {
+				/* Resource is currently in use by FW or SW */
+				txgbe_release_eeprom_semaphore(hw);
+				usleep_range(5000, 6000);
+			} else {
+				gssr |= swmask;
+				wr32(hw, TXGBE_MNG_SWFW_SYNC, gssr);
+				txgbe_release_eeprom_semaphore(hw);
+				return 0;
+			}
+		}
+	}
+
+	/* If time expired clear the bits holding the lock and retry */
+	if (gssr & (fwmask | swmask))
+		txgbe_release_swfw_sync(hw, gssr & (fwmask | swmask));
+
+	usleep_range(5000, 6000);
+	return TXGBE_ERR_SWFW_SYNC;
+}
+
+/**
+ *  txgbe_release_swfw_sync - Release SWFW semaphore
+ *  @hw: pointer to hardware structure
+ *  @mask: Mask to specify which semaphore to release
+ *
+ *  Releases the SWFW semaphore through the GSSR register for the specified
+ *  function (CSR, PHY0, PHY1, EEPROM, Flash)
+ **/
+s32 txgbe_release_swfw_sync(struct txgbe_hw *hw, u32 mask)
+{
+	txgbe_get_eeprom_semaphore(hw);
+	if (txgbe_check_mng_access(hw))
+		wr32m(hw, TXGBE_MNG_SWFW_SYNC, mask, 0);
+
+	txgbe_release_eeprom_semaphore(hw);
+
+	return 0;
+}
+
+/**
+ *  txgbe_get_san_mac_addr_offset - Get SAN MAC address offset from the EEPROM
+ *  @hw: pointer to hardware structure
+ *  @san_mac_offset: SAN MAC address offset
+ *
+ *  This function will read the EEPROM location for the SAN MAC address
+ *  pointer, and returns the value at that location.  This is used in both
+ *  get and set mac_addr routines.
+ **/
+static s32 txgbe_get_san_mac_addr_offset(struct txgbe_hw *hw,
+					 u16 *san_mac_offset)
+{
+	s32 ret_val;
+
+	/* First read the EEPROM pointer to see if the MAC addresses are
+	 * available.
+	 */
+	ret_val = TCALL(hw, eeprom.ops.read,
+			hw->eeprom.sw_region_offset + TXGBE_SAN_MAC_ADDR_PTR,
+			san_mac_offset);
+	if (ret_val) {
+		ERROR_REPORT2(hw, TXGBE_ERROR_INVALID_STATE,
+			      "eeprom at offset %d failed",
+			      TXGBE_SAN_MAC_ADDR_PTR);
+	}
+
+	return ret_val;
+}
+
 /**
  *  txgbe_get_san_mac_addr - SAN MAC address retrieval from the EEPROM
  *  @hw: pointer to hardware structure
  *  @san_mac_addr: SAN MAC address
  *
- *  Reads the SAN MAC address.
+ *  Reads the SAN MAC address from the EEPROM.
  **/
 s32 txgbe_get_san_mac_addr(struct txgbe_hw *hw, u8 *san_mac_addr)
 {
+	u16 san_mac_data, san_mac_offset;
+	s32 ret_val;
 	u8 i;
 
+	/* First read the EEPROM pointer to see if the MAC addresses are
+	 * available.  If they're not, no point in calling set_lan_id() here.
+	 */
+	ret_val = txgbe_get_san_mac_addr_offset(hw, &san_mac_offset);
+	if (ret_val || san_mac_offset == 0 || san_mac_offset == 0xFFFF)
+		goto san_mac_addr_out;
+
+	/* apply the port offset to the address offset */
+	(hw->bus.func) ? (san_mac_offset += TXGBE_SAN_MAC_ADDR_PORT1_OFFSET) :
+			 (san_mac_offset += TXGBE_SAN_MAC_ADDR_PORT0_OFFSET);
+	for (i = 0; i < 3; i++) {
+		ret_val = TCALL(hw, eeprom.ops.read, san_mac_offset,
+				&san_mac_data);
+		if (ret_val) {
+			ERROR_REPORT2(hw, TXGBE_ERROR_INVALID_STATE,
+				      "eeprom read at offset %d failed",
+				      san_mac_offset);
+			goto san_mac_addr_out;
+		}
+		san_mac_addr[i * 2] = (u8)(san_mac_data);
+		san_mac_addr[i * 2 + 1] = (u8)(san_mac_data >> 8);
+		san_mac_offset++;
+	}
+	return 0;
+
+san_mac_addr_out:
 	/* No addresses available in this EEPROM.  It's not an
 	 * error though, so just wipe the local address and return.
 	 */
@@ -480,6 +780,324 @@ s32 txgbe_init_uta_tables(struct txgbe_hw *hw)
 	return 0;
 }
 
+/**
+ *  txgbe_get_wwn_prefix - Get alternative WWNN/WWPN prefix from the EEPROM
+ *  @hw: pointer to hardware structure
+ *  @wwnn_prefix: the alternative WWNN prefix
+ *  @wwpn_prefix: the alternative WWPN prefix
+ *
+ *  This function will read the EEPROM from the alternative SAN MAC address
+ *  block to check the support for the alternative WWNN/WWPN prefix support.
+ **/
+s32 txgbe_get_wwn_prefix(struct txgbe_hw *hw, u16 *wwnn_prefix,
+			 u16 *wwpn_prefix)
+{
+	u16 offset, caps;
+	u16 alt_san_mac_blk_offset;
+
+	/* clear output first */
+	*wwnn_prefix = 0xFFFF;
+	*wwpn_prefix = 0xFFFF;
+
+	/* check if alternative SAN MAC is supported */
+	offset = hw->eeprom.sw_region_offset + TXGBE_ALT_SAN_MAC_ADDR_BLK_PTR;
+	if (TCALL(hw, eeprom.ops.read, offset, &alt_san_mac_blk_offset))
+		goto wwn_prefix_err;
+
+	if (alt_san_mac_blk_offset == 0 ||
+	    alt_san_mac_blk_offset == 0xFFFF)
+		goto wwn_prefix_out;
+
+	/* check capability in alternative san mac address block */
+	offset = alt_san_mac_blk_offset + TXGBE_ALT_SAN_MAC_ADDR_CAPS_OFFSET;
+	if (TCALL(hw, eeprom.ops.read, offset, &caps))
+		goto wwn_prefix_err;
+	if (!(caps & TXGBE_ALT_SAN_MAC_ADDR_CAPS_ALTWWN))
+		goto wwn_prefix_out;
+
+	/* get the corresponding prefix for WWNN/WWPN */
+	offset = alt_san_mac_blk_offset + TXGBE_ALT_SAN_MAC_ADDR_WWNN_OFFSET;
+	if (TCALL(hw, eeprom.ops.read, offset, wwnn_prefix)) {
+		ERROR_REPORT2(hw, TXGBE_ERROR_INVALID_STATE,
+			      "eeprom read at offset %d failed", offset);
+	}
+
+	offset = alt_san_mac_blk_offset + TXGBE_ALT_SAN_MAC_ADDR_WWPN_OFFSET;
+	if (TCALL(hw, eeprom.ops.read, offset, wwpn_prefix))
+		goto wwn_prefix_err;
+
+wwn_prefix_err:
+	ERROR_REPORT2(hw, TXGBE_ERROR_INVALID_STATE,
+		      "eeprom read at offset %d failed", offset);
+wwn_prefix_out:
+	return 0;
+}
+
+/**
+ *  txgbe_calculate_checksum - Calculate checksum for buffer
+ *  @buffer: pointer to EEPROM
+ *  @length: size of EEPROM to calculate a checksum for
+ *  Calculates the checksum for some buffer on a specified length.  The
+ *  checksum calculated is returned.
+ **/
+u8 txgbe_calculate_checksum(u8 *buffer, u32 length)
+{
+	u32 i;
+	u8 sum = 0;
+
+	if (!buffer)
+		return 0;
+
+	for (i = 0; i < length; i++)
+		sum += buffer[i];
+
+	return (u8)(0 - sum);
+}
+
+/**
+ *  txgbe_host_interface_command - Issue command to manageability block
+ *  @hw: pointer to the HW structure
+ *  @buffer: contains the command to write and where the return status will
+ *   be placed
+ *  @length: length of buffer, must be multiple of 4 bytes
+ *  @timeout: time in ms to wait for command completion
+ *  @return_data: read and return data from the buffer (true) or not (false)
+ *   Needed because FW structures are big endian and decoding of
+ *   these fields can be 8 bit or 16 bit based on command. Decoding
+ *   is not easily understood without making a table of commands.
+ *   So we will leave this up to the caller to read back the data
+ *   in these cases.
+ *
+ *  Communicates with the manageability block.  On success return 0
+ *  else return TXGBE_ERR_HOST_INTERFACE_COMMAND.
+ **/
+s32 txgbe_host_interface_command(struct txgbe_hw *hw, u32 *buffer,
+				 u32 length, u32 timeout, bool return_data)
+{
+	u32 hicr, i, bi;
+	u32 hdr_size = sizeof(struct txgbe_hic_hdr);
+	u16 buf_len;
+	u32 dword_len;
+	s32 status = 0;
+	u32 buf[64] = {};
+
+	if (length == 0 || length > TXGBE_HI_MAX_BLOCK_BYTE_LENGTH) {
+		txgbe_dbg(hw, "Buffer length failure buffersize=%d.\n", length);
+		return TXGBE_ERR_HOST_INTERFACE_COMMAND;
+	}
+
+	if (TCALL(hw, mac.ops.acquire_swfw_sync, TXGBE_MNG_SWFW_SYNC_SW_MB) != 0)
+		return TXGBE_ERR_SWFW_SYNC;
+
+	/* Calculate length in DWORDs. We must be DWORD aligned */
+	if ((length % (sizeof(u32))) != 0) {
+		txgbe_dbg(hw, "Buffer length failure, not aligned to dword");
+		status = TXGBE_ERR_INVALID_ARGUMENT;
+		goto rel_out;
+	}
+
+	dword_len = length >> 2;
+
+	/* The device driver writes the relevant command block
+	 * into the ram area.
+	 */
+	for (i = 0; i < dword_len; i++) {
+		if (txgbe_check_mng_access(hw)) {
+			wr32a(hw, TXGBE_MNG_MBOX, i, (__force u32)cpu_to_le32(buffer[i]));
+			/* write flush */
+			buf[i] = rd32a(hw, TXGBE_MNG_MBOX, i);
+		} else {
+			status = TXGBE_ERR_MNG_ACCESS_FAILED;
+			goto rel_out;
+		}
+	}
+	/* Setting this bit tells the ARC that a new command is pending. */
+	if (txgbe_check_mng_access(hw)) {
+		wr32m(hw, TXGBE_MNG_MBOX_CTL,
+		      TXGBE_MNG_MBOX_CTL_SWRDY, TXGBE_MNG_MBOX_CTL_SWRDY);
+	} else {
+		status = TXGBE_ERR_MNG_ACCESS_FAILED;
+		goto rel_out;
+	}
+
+	for (i = 0; i < timeout; i++) {
+		if (txgbe_check_mng_access(hw)) {
+			hicr = rd32(hw, TXGBE_MNG_MBOX_CTL);
+			if ((hicr & TXGBE_MNG_MBOX_CTL_FWRDY))
+				break;
+		}
+		usleep_range(1000, 2000);
+	}
+
+	buf[0] = rd32(hw, TXGBE_MNG_MBOX);
+	if ((buf[0] & 0xff0000) >> 16 == 0x80) {
+		txgbe_dbg(hw, "It's unknown cmd.\n");
+		status = TXGBE_ERR_MNG_ACCESS_FAILED;
+		goto rel_out;
+	}
+	/* Check command completion */
+	if (timeout != 0 && i == timeout) {
+		ERROR_REPORT1(hw, TXGBE_ERROR_CAUTION,
+			      "Command has failed with no status valid.\n");
+
+		ERROR_REPORT1(hw, TXGBE_ERROR_CAUTION, "write value:\n");
+		for (i = 0; i < dword_len; i++)
+			ERROR_REPORT1(hw, TXGBE_ERROR_CAUTION, "%x ", buffer[i]);
+		ERROR_REPORT1(hw, TXGBE_ERROR_CAUTION, "read value:\n");
+		for (i = 0; i < dword_len; i++)
+			ERROR_REPORT1(hw, TXGBE_ERROR_CAUTION, "%x ", buf[i]);
+		if ((buffer[0] & 0xff) != (~buf[0] >> 24)) {
+			status = TXGBE_ERR_HOST_INTERFACE_COMMAND;
+			goto rel_out;
+		}
+	}
+
+	if (!return_data)
+		goto rel_out;
+
+	/* Calculate length in DWORDs */
+	dword_len = hdr_size >> 2;
+
+	/* first pull in the header so we know the buffer length */
+	for (bi = 0; bi < dword_len; bi++) {
+		if (txgbe_check_mng_access(hw)) {
+			buffer[bi] = rd32a(hw, TXGBE_MNG_MBOX, bi);
+			le32_to_cpus(&buffer[bi]);
+		} else {
+			status = TXGBE_ERR_MNG_ACCESS_FAILED;
+			goto rel_out;
+		}
+	}
+
+	/* If there is any thing in data position pull it in */
+	buf_len = ((struct txgbe_hic_hdr *)buffer)->buf_len;
+	if (buf_len == 0)
+		goto rel_out;
+
+	if (length < buf_len + hdr_size) {
+		txgbe_dbg(hw, "Buffer not large enough for reply message.\n");
+		status = TXGBE_ERR_HOST_INTERFACE_COMMAND;
+		goto rel_out;
+	}
+
+	/* Calculate length in DWORDs, add 3 for odd lengths */
+	dword_len = (buf_len + 3) >> 2;
+
+	/* Pull in the rest of the buffer (bi is where we left off) */
+	for (; bi <= dword_len; bi++) {
+		if (txgbe_check_mng_access(hw)) {
+			buffer[bi] = rd32a(hw, TXGBE_MNG_MBOX, bi);
+			le32_to_cpus(&buffer[bi]);
+		} else {
+			status = TXGBE_ERR_MNG_ACCESS_FAILED;
+			goto rel_out;
+		}
+	}
+
+rel_out:
+	TCALL(hw, mac.ops.release_swfw_sync, TXGBE_MNG_SWFW_SYNC_SW_MB);
+	return status;
+}
+
+/**
+ *  txgbe_set_fw_drv_ver - Sends driver version to firmware
+ *  @hw: pointer to the HW structure
+ *  @maj: driver version major number
+ *  @min: driver version minor number
+ *  @build: driver version build number
+ *  @sub: driver version sub build number
+ *
+ *  Sends driver version number to firmware through the manageability
+ *  block.  On success return 0
+ *  else returns TXGBE_ERR_SWFW_SYNC when encountering an error acquiring
+ *  semaphore or TXGBE_ERR_HOST_INTERFACE_COMMAND when command fails.
+ **/
+s32 txgbe_set_fw_drv_ver(struct txgbe_hw *hw, u8 maj, u8 min,
+			 u8 build, u8 sub)
+{
+	struct txgbe_hic_drv_info fw_cmd;
+	int i;
+	s32 ret_val = 0;
+
+	fw_cmd.hdr.cmd = FW_CEM_CMD_DRIVER_INFO;
+	fw_cmd.hdr.buf_len = FW_CEM_CMD_DRIVER_INFO_LEN;
+	fw_cmd.hdr.cmd_or_resp.cmd_resv = FW_CEM_CMD_RESERVED;
+	fw_cmd.port_num = (u8)hw->bus.func;
+	fw_cmd.ver_maj = maj;
+	fw_cmd.ver_min = min;
+	fw_cmd.ver_build = build;
+	fw_cmd.ver_sub = sub;
+	fw_cmd.hdr.checksum = 0;
+	fw_cmd.hdr.checksum = txgbe_calculate_checksum((u8 *)&fw_cmd,
+						       (FW_CEM_HDR_LEN + fw_cmd.hdr.buf_len));
+	fw_cmd.pad = 0;
+	fw_cmd.pad2 = 0;
+
+	for (i = 0; i <= FW_CEM_MAX_RETRIES; i++) {
+		ret_val = txgbe_host_interface_command(hw, (u32 *)&fw_cmd,
+						       sizeof(fw_cmd),
+						       TXGBE_HI_COMMAND_TIMEOUT,
+						       true);
+		if (ret_val != 0)
+			continue;
+
+		if (fw_cmd.hdr.cmd_or_resp.ret_status ==
+		    FW_CEM_RESP_STATUS_SUCCESS)
+			ret_val = 0;
+		else
+			ret_val = TXGBE_ERR_HOST_INTERFACE_COMMAND;
+
+		break;
+	}
+
+	return ret_val;
+}
+
+/**
+ *  txgbe_reset_hostif - send reset cmd to fw
+ *  @hw: pointer to hardware structure
+ *
+ *  Sends reset cmd to firmware through the manageability
+ *  block.  On success return 0
+ *  else returns TXGBE_ERR_SWFW_SYNC when encountering an error acquiring
+ *  semaphore or TXGBE_ERR_HOST_INTERFACE_COMMAND when command fails.
+ **/
+s32 txgbe_reset_hostif(struct txgbe_hw *hw)
+{
+	struct txgbe_hic_reset reset_cmd;
+	int i;
+	s32 status = 0;
+
+	reset_cmd.hdr.cmd = FW_RESET_CMD;
+	reset_cmd.hdr.buf_len = FW_RESET_LEN;
+	reset_cmd.hdr.cmd_or_resp.cmd_resv = FW_CEM_CMD_RESERVED;
+	reset_cmd.lan_id = hw->bus.lan_id;
+	reset_cmd.reset_type = (u16)hw->reset_type;
+	reset_cmd.hdr.checksum = 0;
+	reset_cmd.hdr.checksum = txgbe_calculate_checksum((u8 *)&reset_cmd,
+							  (FW_CEM_HDR_LEN +
+							   reset_cmd.hdr.buf_len));
+
+	for (i = 0; i <= FW_CEM_MAX_RETRIES; i++) {
+		status = txgbe_host_interface_command(hw, (u32 *)&reset_cmd,
+						      sizeof(reset_cmd),
+						      TXGBE_HI_COMMAND_TIMEOUT,
+						      true);
+		if (status != 0)
+			continue;
+
+		if (reset_cmd.hdr.cmd_or_resp.ret_status ==
+		    FW_CEM_RESP_STATUS_SUCCESS)
+			status = 0;
+		else
+			status = TXGBE_ERR_HOST_INTERFACE_COMMAND;
+
+		break;
+	}
+
+	return status;
+}
+
 /* cmd_addr is used for some special command:
  * 1. to be sector address, when implemented erase sector command
  * 2. to be flash address when implemented read, write flash address
@@ -577,6 +1195,26 @@ s32 txgbe_disable_rx(struct txgbe_hw *hw)
 	return 0;
 }
 
+/**
+ * txgbe_mng_present - returns true when management capability is present
+ * @hw: pointer to hardware structure
+ */
+bool txgbe_mng_present(struct txgbe_hw *hw)
+{
+	u32 fwsm;
+
+	fwsm = rd32(hw, TXGBE_MIS_ST);
+	return fwsm & TXGBE_MIS_ST_MNG_INIT_DN;
+}
+
+bool txgbe_check_mng_access(struct txgbe_hw *hw)
+{
+	if (!txgbe_mng_present(hw))
+		return false;
+
+	return true;
+}
+
 int txgbe_check_flash_load(struct txgbe_hw *hw, u32 check_bit)
 {
 	u32 i = 0, reg = 0;
@@ -610,6 +1248,7 @@ int txgbe_check_flash_load(struct txgbe_hw *hw, u32 check_bit)
 s32 txgbe_init_ops(struct txgbe_hw *hw)
 {
 	struct txgbe_mac_info *mac = &hw->mac;
+	struct txgbe_eeprom_info *eeprom = &hw->eeprom;
 
 	/* MAC */
 	mac->ops.init_hw = txgbe_init_hw;
@@ -617,9 +1256,12 @@ s32 txgbe_init_ops(struct txgbe_hw *hw)
 	mac->ops.stop_adapter = txgbe_stop_adapter;
 	mac->ops.get_bus_info = txgbe_get_bus_info;
 	mac->ops.set_lan_id = txgbe_set_lan_id_multi_port_pcie;
+	mac->ops.acquire_swfw_sync = txgbe_acquire_swfw_sync;
+	mac->ops.release_swfw_sync = txgbe_release_swfw_sync;
 	mac->ops.reset_hw = txgbe_reset_hw;
 	mac->ops.start_hw = txgbe_start_hw;
 	mac->ops.get_san_mac_addr = txgbe_get_san_mac_addr;
+	mac->ops.get_wwn_prefix = txgbe_get_wwn_prefix;
 
 	/* RAR */
 	mac->ops.set_rar = txgbe_set_rar;
@@ -634,7 +1276,16 @@ s32 txgbe_init_ops(struct txgbe_hw *hw)
 	mac->max_rx_queues      = TXGBE_SP_MAX_RX_QUEUES;
 	mac->max_tx_queues      = TXGBE_SP_MAX_TX_QUEUES;
 
+	/* EEPROM */
+	eeprom->ops.init_params = txgbe_init_eeprom_params;
+	eeprom->ops.calc_checksum = txgbe_calc_eeprom_checksum;
+	eeprom->ops.read = txgbe_read_ee_hostif;
+	eeprom->ops.read_buffer = txgbe_read_ee_hostif_buffer;
+	eeprom->ops.validate_checksum = txgbe_validate_eeprom_checksum;
+
 	/* Manageability interface */
+	mac->ops.set_fw_drv_ver = txgbe_set_fw_drv_ver;
+
 	mac->ops.init_thermal_sensor_thresh =
 				      txgbe_init_thermal_sensor_thresh;
 
@@ -703,14 +1354,21 @@ s32 txgbe_reset_hw(struct txgbe_hw *hw)
 	if (status != 0)
 		goto reset_hw_out;
 
-	if (hw->bus.lan_id == 0)
-		reset = TXGBE_MIS_RST_LAN0_RST;
-	else
-		reset = TXGBE_MIS_RST_LAN1_RST;
-
-	wr32(hw, TXGBE_MIS_RST,
-	     reset | rd32(hw, TXGBE_MIS_RST));
-	TXGBE_WRITE_FLUSH(hw);
+	if (txgbe_mng_present(hw)) {
+		if (!(((hw->subsystem_device_id & TXGBE_NCSI_MASK) == TXGBE_NCSI_SUP) ||
+		      ((hw->subsystem_device_id & TXGBE_WOL_MASK) == TXGBE_WOL_SUP))) {
+			txgbe_reset_hostif(hw);
+		}
+	} else {
+		if (hw->bus.lan_id == 0)
+			reset = TXGBE_MIS_RST_LAN0_RST;
+		else
+			reset = TXGBE_MIS_RST_LAN1_RST;
+
+		wr32(hw, TXGBE_MIS_RST,
+		     reset | rd32(hw, TXGBE_MIS_RST));
+		TXGBE_WRITE_FLUSH(hw);
+	}
 	usleep_range(10, 100);
 
 	if (hw->bus.lan_id == 0)
@@ -750,6 +1408,10 @@ s32 txgbe_reset_hw(struct txgbe_hw *hw)
 		hw->mac.num_rar_entries--;
 	}
 
+	/* Store the alternative WWNN/WWPN prefix */
+	TCALL(hw, mac.ops.get_wwn_prefix, &hw->mac.wwnn_prefix,
+	      &hw->mac.wwpn_prefix);
+
 	pci_set_master(adapter->pdev);
 
 reset_hw_out:
@@ -784,3 +1446,288 @@ s32 txgbe_start_hw(struct txgbe_hw *hw)
 
 	return ret_val;
 }
+
+/**
+ *  txgbe_init_eeprom_params - Initialize EEPROM params
+ *  @hw: pointer to hardware structure
+ *
+ *  Initializes the EEPROM parameters txgbe_eeprom_info within the
+ *  txgbe_hw struct in order to set up EEPROM access.
+ **/
+s32 txgbe_init_eeprom_params(struct txgbe_hw *hw)
+{
+	struct txgbe_eeprom_info *eeprom = &hw->eeprom;
+	u16 eeprom_size;
+	s32 status = 0;
+	u16 data;
+
+	if (eeprom->type == txgbe_eeprom_uninitialized) {
+		eeprom->semaphore_delay = 10;
+		eeprom->type = txgbe_eeprom_none;
+
+		if (!(rd32(hw, TXGBE_SPI_STATUS) &
+		      TXGBE_SPI_STATUS_FLASH_BYPASS)) {
+			eeprom->type = txgbe_flash;
+
+			eeprom_size = 4096;
+			eeprom->word_size = eeprom_size >> 1;
+
+			txgbe_dbg(hw, "Eeprom params: type = %d, size = %d\n",
+				  eeprom->type, eeprom->word_size);
+		}
+	}
+
+	status = TCALL(hw, eeprom.ops.read, TXGBE_SW_REGION_PTR, &data);
+	if (status) {
+		txgbe_dbg(hw, "NVM Read Error\n");
+		return status;
+	}
+	eeprom->sw_region_offset = data >> 1;
+
+	return status;
+}
+
+/**
+ *  txgbe_read_ee_hostif_data - Read EEPROM word using a host interface cmd
+ *  assuming that the semaphore is already obtained.
+ *  @hw: pointer to hardware structure
+ *  @offset: offset of  word in the EEPROM to read
+ *  @data: word read from the EEPROM
+ *
+ *  Reads a 16 bit word from the EEPROM using the hostif.
+ **/
+s32 txgbe_read_ee_hostif_data(struct txgbe_hw *hw, u16 offset,
+			      u16 *data)
+{
+	s32 status;
+	struct txgbe_hic_read_shadow_ram buffer;
+
+	buffer.hdr.req.cmd = FW_READ_SHADOW_RAM_CMD;
+	buffer.hdr.req.buf_lenh = 0;
+	buffer.hdr.req.buf_lenl = FW_READ_SHADOW_RAM_LEN;
+	buffer.hdr.req.checksum = FW_DEFAULT_CHECKSUM;
+
+	/* convert offset from words to bytes */
+	buffer.address = (__force u32)cpu_to_be32(offset * 2);
+	/* one word */
+	buffer.length = (__force u16)cpu_to_be16(sizeof(u16));
+
+	status = txgbe_host_interface_command(hw, (u32 *)&buffer,
+					      sizeof(buffer),
+					      TXGBE_HI_COMMAND_TIMEOUT, false);
+
+	if (status)
+		return status;
+	if (txgbe_check_mng_access(hw)) {
+		*data = (u16)rd32a(hw, TXGBE_MNG_MBOX, FW_NVM_DATA_OFFSET);
+	} else {
+		status = TXGBE_ERR_MNG_ACCESS_FAILED;
+		return status;
+	}
+
+	return 0;
+}
+
+/**
+ *  txgbe_read_ee_hostif - Read EEPROM word using a host interface cmd
+ *  @hw: pointer to hardware structure
+ *  @offset: offset of  word in the EEPROM to read
+ *  @data: word read from the EEPROM
+ *
+ *  Reads a 16 bit word from the EEPROM using the hostif.
+ **/
+s32 txgbe_read_ee_hostif(struct txgbe_hw *hw, u16 offset,
+			 u16 *data)
+{
+	s32 status = 0;
+
+	if (TCALL(hw, mac.ops.acquire_swfw_sync,
+		  TXGBE_MNG_SWFW_SYNC_SW_FLASH) == 0) {
+		status = txgbe_read_ee_hostif_data(hw, offset, data);
+		TCALL(hw, mac.ops.release_swfw_sync,
+		      TXGBE_MNG_SWFW_SYNC_SW_FLASH);
+	} else {
+		status = TXGBE_ERR_SWFW_SYNC;
+	}
+
+	return status;
+}
+
+/**
+ *  txgbe_read_ee_hostif_buffer- Read EEPROM word(s) using hostif
+ *  @hw: pointer to hardware structure
+ *  @offset: offset of  word in the EEPROM to read
+ *  @words: number of words
+ *  @data: word(s) read from the EEPROM
+ *
+ *  Reads a 16 bit word(s) from the EEPROM using the hostif.
+ **/
+s32 txgbe_read_ee_hostif_buffer(struct txgbe_hw *hw,
+				u16 offset, u16 words, u16 *data)
+{
+	struct txgbe_hic_read_shadow_ram buffer;
+	u32 current_word = 0;
+	u16 words_to_read;
+	s32 status;
+	u32 i;
+	u32 value = 0;
+
+	/* Take semaphore for the entire operation. */
+	status = TCALL(hw, mac.ops.acquire_swfw_sync,
+		       TXGBE_MNG_SWFW_SYNC_SW_FLASH);
+	if (status) {
+		txgbe_dbg(hw, "EEPROM read buffer - semaphore failed\n");
+		return status;
+	}
+	while (words) {
+		if (words > FW_MAX_READ_BUFFER_SIZE / 2)
+			words_to_read = FW_MAX_READ_BUFFER_SIZE / 2;
+		else
+			words_to_read = words;
+
+		buffer.hdr.req.cmd = FW_READ_SHADOW_RAM_CMD;
+		buffer.hdr.req.buf_lenh = 0;
+		buffer.hdr.req.buf_lenl = FW_READ_SHADOW_RAM_LEN;
+		buffer.hdr.req.checksum = FW_DEFAULT_CHECKSUM;
+
+		/* convert offset from words to bytes */
+		buffer.address = (__force u32)cpu_to_be32((offset + current_word) * 2);
+		buffer.length = (__force u16)cpu_to_be16(words_to_read * 2);
+
+		status = txgbe_host_interface_command(hw, (u32 *)&buffer,
+						      sizeof(buffer),
+						      TXGBE_HI_COMMAND_TIMEOUT,
+						      false);
+
+		if (status) {
+			txgbe_dbg(hw, "Host interface command failed\n");
+			goto out;
+		}
+
+		for (i = 0; i < words_to_read; i++) {
+			u32 reg = TXGBE_MNG_MBOX + (FW_NVM_DATA_OFFSET << 2) +
+				  2 * i;
+			if (txgbe_check_mng_access(hw)) {
+				value = rd32(hw, reg);
+			} else {
+				status = TXGBE_ERR_MNG_ACCESS_FAILED;
+				return status;
+			}
+			data[current_word] = (u16)(value & 0xffff);
+			current_word++;
+			i++;
+			if (i < words_to_read) {
+				value >>= 16;
+				data[current_word] = (u16)(value & 0xffff);
+				current_word++;
+			}
+		}
+		words -= words_to_read;
+	}
+
+out:
+	TCALL(hw, mac.ops.release_swfw_sync, TXGBE_MNG_SWFW_SYNC_SW_FLASH);
+	return status;
+}
+
+/**
+ *  txgbe_calc_eeprom_checksum - Calculates and returns the checksum
+ *  @hw: pointer to hardware structure
+ *
+ *  Returns a negative error code on error, or the 16-bit checksum
+ **/
+s32 txgbe_calc_eeprom_checksum(struct txgbe_hw *hw)
+{
+	u16 *buffer = NULL;
+	u32 buffer_size = 0;
+
+	u16 *eeprom_ptrs = NULL;
+	u16 *local_buffer;
+	s32 status;
+	u16 checksum = 0;
+	u16 i;
+
+	TCALL(hw, eeprom.ops.init_params);
+
+	if (!buffer) {
+		eeprom_ptrs = vmalloc(TXGBE_EEPROM_LAST_WORD * sizeof(u16));
+		if (!eeprom_ptrs)
+			return TXGBE_ERR_NO_SPACE;
+		/* Read pointer area */
+		status = txgbe_read_ee_hostif_buffer(hw, 0,
+						     TXGBE_EEPROM_LAST_WORD,
+						     eeprom_ptrs);
+		if (status) {
+			txgbe_dbg(hw, "Failed to read EEPROM image\n");
+			return status;
+		}
+		local_buffer = eeprom_ptrs;
+	} else {
+		if (buffer_size < TXGBE_EEPROM_LAST_WORD)
+			return TXGBE_ERR_PARAM;
+		local_buffer = buffer;
+	}
+
+	for (i = 0; i < TXGBE_EEPROM_LAST_WORD; i++)
+		if (i != hw->eeprom.sw_region_offset + TXGBE_EEPROM_CHECKSUM)
+			checksum += local_buffer[i];
+
+	checksum = (u16)TXGBE_EEPROM_SUM - checksum;
+	if (eeprom_ptrs)
+		vfree(eeprom_ptrs);
+
+	return (s32)checksum;
+}
+
+/**
+ *  txgbe_validate_eeprom_checksum - Validate EEPROM checksum
+ *  @hw: pointer to hardware structure
+ *  @checksum_val: calculated checksum
+ *
+ *  Performs checksum calculation and validates the EEPROM checksum.  If the
+ *  caller does not need checksum_val, the value can be NULL.
+ **/
+s32 txgbe_validate_eeprom_checksum(struct txgbe_hw *hw,
+				   u16 *checksum_val)
+{
+	s32 status;
+	u16 checksum;
+	u16 read_checksum = 0;
+
+	/* Read the first word from the EEPROM. If this times out or fails, do
+	 * not continue or we could be in for a very long wait while every
+	 * EEPROM read fails
+	 */
+	status = TCALL(hw, eeprom.ops.read, 0, &checksum);
+	if (status) {
+		txgbe_dbg(hw, "EEPROM read failed\n");
+		return status;
+	}
+
+	status = TCALL(hw, eeprom.ops.calc_checksum);
+	if (status < 0)
+		return status;
+
+	checksum = (u16)(status & 0xffff);
+
+	status = txgbe_read_ee_hostif(hw, hw->eeprom.sw_region_offset +
+				      TXGBE_EEPROM_CHECKSUM,
+				      &read_checksum);
+	if (status)
+		return status;
+
+	/* Verify read checksum from EEPROM is the same as
+	 * calculated checksum
+	 */
+	if (read_checksum != checksum) {
+		status = TXGBE_ERR_EEPROM_CHECKSUM;
+		ERROR_REPORT1(hw, TXGBE_ERROR_INVALID_STATE,
+			      "Invalid EEPROM checksum\n");
+	}
+
+	/* If the user cares, return the calculated checksum */
+	if (checksum_val)
+		*checksum_val = checksum;
+
+	return status;
+}
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.h b/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.h
index 6b17942c4670..4871429fc0fc 100644
--- a/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.h
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.h
@@ -18,6 +18,8 @@
 
 s32 txgbe_init_hw(struct txgbe_hw *hw);
 s32 txgbe_start_hw(struct txgbe_hw *hw);
+s32 txgbe_read_pba_string(struct txgbe_hw *hw, u8 *pba_num,
+			  u32 pba_num_size);
 s32 txgbe_get_mac_addr(struct txgbe_hw *hw, u8 *mac_addr);
 s32 txgbe_get_bus_info(struct txgbe_hw *hw);
 void txgbe_set_pci_config_data(struct txgbe_hw *hw, u16 link_status);
@@ -29,6 +31,8 @@ s32 txgbe_set_rar(struct txgbe_hw *hw, u32 index, u8 *addr, u64 pools,
 s32 txgbe_clear_rar(struct txgbe_hw *hw, u32 index);
 s32 txgbe_init_rx_addrs(struct txgbe_hw *hw);
 
+s32 txgbe_acquire_swfw_sync(struct txgbe_hw *hw, u32 mask);
+s32 txgbe_release_swfw_sync(struct txgbe_hw *hw, u32 mask);
 s32 txgbe_disable_pcie_master(struct txgbe_hw *hw);
 
 s32 txgbe_get_san_mac_addr(struct txgbe_hw *hw, u8 *san_mac_addr);
@@ -37,6 +41,19 @@ s32 txgbe_set_vmdq_san_mac(struct txgbe_hw *hw, u32 vmdq);
 s32 txgbe_clear_vmdq(struct txgbe_hw *hw, u32 rar, u32 vmdq);
 s32 txgbe_init_uta_tables(struct txgbe_hw *hw);
 
+s32 txgbe_get_wwn_prefix(struct txgbe_hw *hw, u16 *wwnn_prefix,
+			 u16 *wwpn_prefix);
+
+s32 txgbe_set_fw_drv_ver(struct txgbe_hw *hw, u8 maj, u8 min,
+			 u8 build, u8 ver);
+s32 txgbe_reset_hostif(struct txgbe_hw *hw);
+u8 txgbe_calculate_checksum(u8 *buffer, u32 length);
+s32 txgbe_host_interface_command(struct txgbe_hw *hw, u32 *buffer,
+				 u32 length, u32 timeout, bool return_data);
+
+bool txgbe_mng_present(struct txgbe_hw *hw);
+bool txgbe_check_mng_access(struct txgbe_hw *hw);
+
 s32 txgbe_init_thermal_sensor_thresh(struct txgbe_hw *hw);
 s32 txgbe_disable_rx(struct txgbe_hw *hw);
 int txgbe_check_flash_load(struct txgbe_hw *hw, u32 check_bit);
@@ -45,6 +62,15 @@ int txgbe_reset_misc(struct txgbe_hw *hw);
 s32 txgbe_reset_hw(struct txgbe_hw *hw);
 s32 txgbe_init_ops(struct txgbe_hw *hw);
 
+s32 txgbe_init_eeprom_params(struct txgbe_hw *hw);
+s32 txgbe_calc_eeprom_checksum(struct txgbe_hw *hw);
+s32 txgbe_validate_eeprom_checksum(struct txgbe_hw *hw,
+				   u16 *checksum_val);
+s32 txgbe_read_ee_hostif_buffer(struct txgbe_hw *hw,
+				u16 offset, u16 words, u16 *data);
+s32 txgbe_read_ee_hostif_data(struct txgbe_hw *hw, u16 offset, u16 *data);
+s32 txgbe_read_ee_hostif(struct txgbe_hw *hw, u16 offset, u16 *data);
+
 u8 fmgr_cmd_op(struct txgbe_hw *hw, u32 cmd, u32 cmd_addr);
 u32 txgbe_flash_read_dword(struct txgbe_hw *hw, u32 addr);
 
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c
index 21b63856db49..948d78d178ae 100644
--- a/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c
@@ -100,6 +100,20 @@ static bool txgbe_check_cfg_remove(struct txgbe_hw *hw, struct pci_dev *pdev)
 	return false;
 }
 
+static void txgbe_release_hw_control(struct txgbe_adapter *adapter)
+{
+	/* Let firmware take over control of hw */
+	wr32m(&adapter->hw, TXGBE_CFG_PORT_CTL,
+	      TXGBE_CFG_PORT_CTL_DRV_LOAD, 0);
+}
+
+static void txgbe_get_hw_control(struct txgbe_adapter *adapter)
+{
+	/* Let firmware know the driver has taken over */
+	wr32m(&adapter->hw, TXGBE_CFG_PORT_CTL,
+	      TXGBE_CFG_PORT_CTL_DRV_LOAD, TXGBE_CFG_PORT_CTL_DRV_LOAD);
+}
+
 static void txgbe_sync_mac_table(struct txgbe_adapter *adapter)
 {
 	struct txgbe_hw *hw = &adapter->hw;
@@ -151,6 +165,11 @@ static void txgbe_flush_sw_mac_table(struct txgbe_adapter *adapter)
 	txgbe_sync_mac_table(adapter);
 }
 
+static void txgbe_up_complete(struct txgbe_adapter *adapter)
+{
+	txgbe_get_hw_control(adapter);
+}
+
 void txgbe_reset(struct txgbe_adapter *adapter)
 {
 	struct net_device *netdev = adapter->netdev;
@@ -300,8 +319,12 @@ static int txgbe_sw_init(struct txgbe_adapter *adapter)
  **/
 int txgbe_open(struct net_device *netdev)
 {
+	struct txgbe_adapter *adapter = netdev_priv(netdev);
+
 	netif_carrier_off(netdev);
 
+	txgbe_up_complete(adapter);
+
 	return 0;
 }
 
@@ -334,6 +357,8 @@ int txgbe_close(struct net_device *netdev)
 
 	txgbe_down(adapter);
 
+	txgbe_release_hw_control(adapter);
+
 	return 0;
 }
 
@@ -349,6 +374,8 @@ static void txgbe_dev_shutdown(struct pci_dev *pdev, bool *enable_wake)
 		txgbe_close_suspend(adapter);
 	rtnl_unlock();
 
+	txgbe_release_hw_control(adapter);
+
 	pci_disable_device(pdev);
 }
 
@@ -439,6 +466,12 @@ static int txgbe_probe(struct pci_dev *pdev,
 	struct net_device *netdev;
 	int err, expected_gts;
 
+	u16 eeprom_verh = 0, eeprom_verl = 0, offset = 0;
+	u16 eeprom_cfg_blkh = 0, eeprom_cfg_blkl = 0;
+	u16 build = 0, major = 0, patch = 0;
+	u8 part_str[TXGBE_PBANUM_LENGTH];
+	u32 etrack_id = 0;
+
 	err = pci_enable_device_mem(pdev);
 	if (err)
 		return err;
@@ -514,6 +547,14 @@ static int txgbe_probe(struct pci_dev *pdev,
 
 	netdev->features |= NETIF_F_HIGHDMA;
 
+	/* make sure the EEPROM is good */
+	if (TCALL(hw, eeprom.ops.validate_checksum, NULL)) {
+		dev_err(&pdev->dev, "The EEPROM Checksum Is Not Valid\n");
+		wr32(hw, TXGBE_MIS_RST, TXGBE_MIS_RST_SW_RST);
+		err = -EIO;
+		goto err_free_mac_table;
+	}
+
 	eth_hw_addr_set(netdev, hw->mac.perm_addr);
 
 	if (!is_valid_ether_addr(netdev->dev_addr)) {
@@ -524,11 +565,48 @@ static int txgbe_probe(struct pci_dev *pdev,
 
 	txgbe_mac_set_default_filter(adapter, hw->mac.perm_addr);
 
+	/* Save off EEPROM version number and Option Rom version which
+	 * together make a unique identify for the eeprom
+	 */
+	TCALL(hw, eeprom.ops.read,
+	      hw->eeprom.sw_region_offset + TXGBE_EEPROM_VERSION_H,
+	      &eeprom_verh);
+	TCALL(hw, eeprom.ops.read,
+	      hw->eeprom.sw_region_offset + TXGBE_EEPROM_VERSION_L,
+	      &eeprom_verl);
+	etrack_id = (eeprom_verh << 16) | eeprom_verl;
+
+	TCALL(hw, eeprom.ops.read,
+	      hw->eeprom.sw_region_offset + TXGBE_ISCSI_BOOT_CONFIG, &offset);
+
+	/* Make sure offset to SCSI block is valid */
+	if (!(offset == 0x0) && !(offset == 0xffff)) {
+		TCALL(hw, eeprom.ops.read, offset + 0x84, &eeprom_cfg_blkh);
+		TCALL(hw, eeprom.ops.read, offset + 0x83, &eeprom_cfg_blkl);
+
+		/* Only display Option Rom if exist */
+		if (eeprom_cfg_blkl && eeprom_cfg_blkh) {
+			major = eeprom_cfg_blkl >> 8;
+			build = (eeprom_cfg_blkl << 8) | (eeprom_cfg_blkh >> 8);
+			patch = eeprom_cfg_blkh & 0x00ff;
+
+			snprintf(adapter->eeprom_id, sizeof(adapter->eeprom_id),
+				 "0x%08x, %d.%d.%d", etrack_id, major, build,
+				 patch);
+		} else {
+			snprintf(adapter->eeprom_id, sizeof(adapter->eeprom_id),
+				 "0x%08x", etrack_id);
+		}
+	} else {
+		snprintf(adapter->eeprom_id, sizeof(adapter->eeprom_id),
+			 "0x%08x", etrack_id);
+	}
+
 	/* reset the hardware with the new settings */
 	err = TCALL(hw, mac.ops.start_hw);
 	if (err) {
 		dev_err(&pdev->dev, "HW init failed\n");
-		goto err_free_mac_table;
+		goto err_release_hw;
 	}
 
 	/* pick up the PCI bus settings for reporting later */
@@ -537,7 +615,7 @@ static int txgbe_probe(struct pci_dev *pdev,
 	strcpy(netdev->name, "eth%d");
 	err = register_netdev(netdev);
 	if (err)
-		goto err_free_mac_table;
+		goto err_release_hw;
 
 	pci_set_drvdata(pdev, adapter);
 	adapter->netdev_registered = true;
@@ -557,16 +635,26 @@ static int txgbe_probe(struct pci_dev *pdev,
 	if (expected_gts > 0)
 		txgbe_check_minimum_link(adapter);
 
+	/* First try to read PBA as a string */
+	err = txgbe_read_pba_string(hw, part_str, TXGBE_PBANUM_LENGTH);
+	if (err)
+		strncpy(part_str, "Unknown", TXGBE_PBANUM_LENGTH);
+
 	netif_info(adapter, probe, netdev, "%02x:%02x:%02x:%02x:%02x:%02x\n",
 		   netdev->dev_addr[0], netdev->dev_addr[1],
 		   netdev->dev_addr[2], netdev->dev_addr[3],
 		   netdev->dev_addr[4], netdev->dev_addr[5]);
 
+	/* firmware requires blank driver version */
+	TCALL(hw, mac.ops.set_fw_drv_ver, 0xFF, 0xFF, 0xFF, 0xFF);
+
 	/* add san mac addr to netdev */
 	txgbe_add_sanmac_netdev(netdev);
 
 	return 0;
 
+err_release_hw:
+	txgbe_release_hw_control(adapter);
 err_free_mac_table:
 	kfree(adapter->mac_table);
 err_pci_release_regions:
@@ -602,6 +690,8 @@ static void txgbe_remove(struct pci_dev *pdev)
 		adapter->netdev_registered = false;
 	}
 
+	txgbe_release_hw_control(adapter);
+
 	pci_release_selected_regions(pdev,
 				     pci_select_bars(pdev, IORESOURCE_MEM));
 
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h b/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h
index 5baf328138a5..b6abda615340 100644
--- a/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h
@@ -295,6 +295,30 @@ struct txgbe_thermal_sensor_data {
 #define TXGBE_PSR_LAN_FLEX_DW_H(_i)     (0x15C04 + ((_i) * 16))
 #define TXGBE_PSR_LAN_FLEX_MSK(_i)      (0x15C08 + ((_i) * 16))
 #define TXGBE_PSR_LAN_FLEX_CTL  0x15CFC
+/************************************** MNG ********************************/
+#define TXGBE_MNG_FW_SM         0x1E000
+#define TXGBE_MNG_SW_SM         0x1E004
+#define TXGBE_MNG_SWFW_SYNC     0x1E008
+#define TXGBE_MNG_MBOX          0x1E100
+#define TXGBE_MNG_MBOX_CTL      0x1E044
+#define TXGBE_MNG_OS2BMC_CNT    0x1E094
+#define TXGBE_MNG_BMC2OS_CNT    0x1E090
+
+/* Firmware Semaphore Register */
+#define TXGBE_MNG_FW_SM_MODE_MASK       0xE
+#define TXGBE_MNG_FW_SM_TS_ENABLED      0x1
+/* SW Semaphore Register bitmasks */
+#define TXGBE_MNG_SW_SM_SM              0x00000001U /* software Semaphore */
+
+/* SW_FW_SYNC definitions */
+#define TXGBE_MNG_SWFW_SYNC_SW_PHY      0x0001
+#define TXGBE_MNG_SWFW_SYNC_SW_FLASH    0x0008
+#define TXGBE_MNG_SWFW_SYNC_SW_MB       0x0004
+
+#define TXGBE_MNG_MBOX_CTL_SWRDY        0x1
+#define TXGBE_MNG_MBOX_CTL_SWACK        0x2
+#define TXGBE_MNG_MBOX_CTL_FWRDY        0x4
+#define TXGBE_MNG_MBOX_CTL_FWACK        0x8
 
 /************************************* ETH MAC *****************************/
 #define TXGBE_MAC_TX_CFG                0x11000
@@ -405,8 +429,40 @@ struct txgbe_thermal_sensor_data {
 #define TXGBE_PX_RR_CFG_RR_SZ           0x0000007EU
 #define TXGBE_PX_RR_CFG_RR_EN           0x00000001U
 
+/* Part Number String Length */
+#define TXGBE_PBANUM_LENGTH     32
+
+/* Checksum and EEPROM pointers */
+#define TXGBE_PBANUM_PTR_GUARD          0xFAFA
+#define TXGBE_EEPROM_CHECKSUM           0x2F
+#define TXGBE_EEPROM_SUM                0xBABA
+#define TXGBE_EEPROM_LAST_WORD          0x800
+#define TXGBE_FW_PTR                    0x0F
+#define TXGBE_PBANUM0_PTR               0x05
+#define TXGBE_PBANUM1_PTR               0x06
+#define TXGBE_SW_REGION_PTR             0x1C
+
+#define TXGBE_SAN_MAC_ADDR_PTR          0x18
+#define TXGBE_DEVICE_CAPS               0x1C
+#define TXGBE_EEPROM_VERSION_L          0x1D
+#define TXGBE_EEPROM_VERSION_H          0x1E
+#define TXGBE_ISCSI_BOOT_CONFIG         0x07
+
+#define TXGBE_SERIAL_NUMBER_MAC_ADDR    0x11
+
 #define TXGBE_ETH_LENGTH_OF_ADDRESS     6
 
+#define TXGBE_SAN_MAC_ADDR_PORT0_OFFSET         0x0
+#define TXGBE_SAN_MAC_ADDR_PORT1_OFFSET         0x3
+#define TXGBE_ALT_SAN_MAC_ADDR_BLK_PTR          0x17 /* Alt. SAN MAC block */
+#define TXGBE_ALT_SAN_MAC_ADDR_CAPS_OFFSET      0x0 /* Alt SAN MAC capability */
+#define TXGBE_ALT_SAN_MAC_ADDR_PORT0_OFFSET     0x1 /* Alt SAN MAC 0 offset */
+#define TXGBE_ALT_SAN_MAC_ADDR_PORT1_OFFSET     0x4 /* Alt SAN MAC 1 offset */
+#define TXGBE_ALT_SAN_MAC_ADDR_WWNN_OFFSET      0x7 /* Alt WWNN prefix offset */
+#define TXGBE_ALT_SAN_MAC_ADDR_WWPN_OFFSET      0x8 /* Alt WWPN prefix offset */
+#define TXGBE_ALT_SAN_MAC_ADDR_CAPS_SANMAC      0x0 /* Alt SAN MAC exists */
+#define TXGBE_ALT_SAN_MAC_ADDR_CAPS_ALTWWN      0x1 /* Alt WWN base exists */
+
 /******************************** PCI Bus Info *******************************/
 #define TXGBE_PCI_DEVICE_STATUS         0xAA
 #define TXGBE_PCI_DEVICE_STATUS_TRANSACTION_PENDING     0x0020
@@ -442,8 +498,91 @@ struct txgbe_thermal_sensor_data {
 #define TXGBE_PCIDEVCTRL2_4_8s          0xd
 #define TXGBE_PCIDEVCTRL2_17_34s        0xe
 
+/****************** Manageablility Host Interface defines ********************/
+#define TXGBE_HI_MAX_BLOCK_BYTE_LENGTH  256 /* Num of bytes in range */
+#define TXGBE_HI_MAX_BLOCK_DWORD_LENGTH 64 /* Num of dwords in range */
+#define TXGBE_HI_COMMAND_TIMEOUT        5000 /* Process HI command limit */
+
+/* CEM Support */
+#define FW_CEM_HDR_LEN                  0x4
+#define FW_CEM_CMD_DRIVER_INFO          0xDD
+#define FW_CEM_CMD_DRIVER_INFO_LEN      0x5
+#define FW_CEM_CMD_RESERVED             0X0
+#define FW_CEM_MAX_RETRIES              3
+#define FW_CEM_RESP_STATUS_SUCCESS      0x1
+#define FW_READ_SHADOW_RAM_CMD          0x31
+#define FW_READ_SHADOW_RAM_LEN          0x6
+#define FW_DEFAULT_CHECKSUM             0xFF /* checksum always 0xFF */
+#define FW_NVM_DATA_OFFSET              3
+#define FW_MAX_READ_BUFFER_SIZE         244
+#define FW_RESET_CMD                    0xDF
+#define FW_RESET_LEN                    0x2
+
+/* Host Interface Command Structures */
+struct txgbe_hic_hdr {
+	u8 cmd;
+	u8 buf_len;
+	union {
+		u8 cmd_resv;
+		u8 ret_status;
+	} cmd_or_resp;
+	u8 checksum;
+};
+
+struct txgbe_hic_hdr2_req {
+	u8 cmd;
+	u8 buf_lenh;
+	u8 buf_lenl;
+	u8 checksum;
+};
+
+struct txgbe_hic_hdr2_rsp {
+	u8 cmd;
+	u8 buf_lenl;
+	u8 buf_lenh_status;     /* 7-5: high bits of buf_len, 4-0: status */
+	u8 checksum;
+};
+
+union txgbe_hic_hdr2 {
+	struct txgbe_hic_hdr2_req req;
+	struct txgbe_hic_hdr2_rsp rsp;
+};
+
+struct txgbe_hic_drv_info {
+	struct txgbe_hic_hdr hdr;
+	u8 port_num;
+	u8 ver_sub;
+	u8 ver_build;
+	u8 ver_min;
+	u8 ver_maj;
+	u8 pad; /* end spacing to ensure length is mult. of dword */
+	u16 pad2; /* end spacing to ensure length is mult. of dword2 */
+};
+
+/* These need to be dword aligned */
+struct txgbe_hic_read_shadow_ram {
+	union txgbe_hic_hdr2 hdr;
+	u32 address;
+	u16 length;
+	u16 pad2;
+	u16 data;
+	u16 pad3;
+};
+
+struct txgbe_hic_reset {
+	struct txgbe_hic_hdr hdr;
+	u16 lan_id;
+	u16 reset_type;
+};
+
 /* Number of 100 microseconds we wait for PCI Express master disable */
 #define TXGBE_PCI_MASTER_DISABLE_TIMEOUT        800
+enum txgbe_eeprom_type {
+	txgbe_eeprom_uninitialized = 0,
+	txgbe_eeprom_spi,
+	txgbe_flash,
+	txgbe_eeprom_none /* No NVM support */
+};
 
 /* PCI bus types */
 enum txgbe_bus_type {
@@ -502,15 +641,29 @@ struct txgbe_bus_info {
 /* forward declaration */
 struct txgbe_hw;
 
+/* Function pointer table */
+struct txgbe_eeprom_operations {
+	s32 (*init_params)(struct txgbe_hw *hw);
+	s32 (*read)(struct txgbe_hw *hw, u16 offset, u16 *data);
+	s32 (*read_buffer)(struct txgbe_hw *hw,
+			   u16 offset, u16 words, u16 *data);
+	s32 (*validate_checksum)(struct txgbe_hw *hw, u16 *checksum_val);
+	s32 (*calc_checksum)(struct txgbe_hw *hw);
+};
+
 struct txgbe_mac_operations {
 	s32 (*init_hw)(struct txgbe_hw *hw);
 	s32 (*reset_hw)(struct txgbe_hw *hw);
 	s32 (*start_hw)(struct txgbe_hw *hw);
 	s32 (*get_mac_addr)(struct txgbe_hw *hw, u8 *mac_addr);
 	s32 (*get_san_mac_addr)(struct txgbe_hw *hw, u8 *san_mac_addr);
+	s32 (*get_wwn_prefix)(struct txgbe_hw *hw, u16 *wwnn_prefix,
+			      u16 *wwpn_prefix);
 	s32 (*stop_adapter)(struct txgbe_hw *hw);
 	s32 (*get_bus_info)(struct txgbe_hw *hw);
 	s32 (*set_lan_id)(struct txgbe_hw *hw);
+	s32 (*acquire_swfw_sync)(struct txgbe_hw *hw, u32 mask);
+	s32 (*release_swfw_sync)(struct txgbe_hw *hw, u32 mask);
 
 	/* RAR */
 	s32 (*set_rar)(struct txgbe_hw *hw, u32 index, u8 *addr, u64 pools,
@@ -523,14 +676,28 @@ struct txgbe_mac_operations {
 	s32 (*init_uta_tables)(struct txgbe_hw *hw);
 
 	/* Manageability interface */
+	s32 (*set_fw_drv_ver)(struct txgbe_hw *hw, u8 maj, u8 min,
+			      u8 build, u8 ver);
 	s32 (*init_thermal_sensor_thresh)(struct txgbe_hw *hw);
 };
 
+struct txgbe_eeprom_info {
+	struct txgbe_eeprom_operations ops;
+	enum txgbe_eeprom_type type;
+	u32 semaphore_delay;
+	u16 word_size;
+	u16 sw_region_offset;
+};
+
 struct txgbe_mac_info {
 	struct txgbe_mac_operations ops;
 	u8 addr[TXGBE_ETH_LENGTH_OF_ADDRESS];
 	u8 perm_addr[TXGBE_ETH_LENGTH_OF_ADDRESS];
 	u8 san_addr[TXGBE_ETH_LENGTH_OF_ADDRESS];
+	/* prefix for World Wide Node Name (WWNN) */
+	u16 wwnn_prefix;
+	/* prefix for World Wide Port Name (WWPN) */
+	u16 wwpn_prefix;
 	s32 mc_filter_type;
 	u32 mcft_size;
 	u32 num_rar_entries;
@@ -542,10 +709,17 @@ struct txgbe_mac_info {
 	bool set_lben;
 };
 
+enum txgbe_reset_type {
+	TXGBE_LAN_RESET = 0,
+	TXGBE_SW_RESET,
+	TXGBE_GLOBAL_RESET
+};
+
 struct txgbe_hw {
 	u8 __iomem *hw_addr;
 	struct txgbe_mac_info mac;
 	struct txgbe_addr_filter_info addr_ctrl;
+	struct txgbe_eeprom_info eeprom;
 	struct txgbe_bus_info bus;
 	u16 device_id;
 	u16 vendor_id;
@@ -553,6 +727,7 @@ struct txgbe_hw {
 	u16 subsystem_vendor_id;
 	u8 revision_id;
 	bool adapter_stopped;
+	enum txgbe_reset_type reset_type;
 	u16 oem_ssid;
 	u16 oem_svid;
 };
@@ -647,6 +822,9 @@ rd32(struct txgbe_hw *hw, u32 reg)
 	return val;
 }
 
+#define rd32a(a, reg, offset) ( \
+	rd32((a), (reg) + ((offset) << 2)))
+
 static inline u32
 rd32m(struct txgbe_hw *hw, u32 reg, u32 mask)
 {
@@ -681,6 +859,9 @@ wr32(struct txgbe_hw *hw, u32 reg, u32 val)
 	txgbe_wr32(base + reg, val);
 }
 
+#define wr32a(a, reg, off, val) \
+	wr32((a), (reg) + ((off) << 2), (val))
+
 static inline void
 wr32m(struct txgbe_hw *hw, u32 reg, u32 mask, u32 field)
 {
-- 
2.27.0


^ permalink raw reply related	[flat|nested] 18+ messages in thread

* [RFC PATCH net-next 05/16] net: txgbe: Identify PHY and SFP module
  2022-08-10  8:55 [RFC PATCH net-next 00/16] net: WangXun txgbe ethernet driver Jiawen Wu
                   ` (3 preceding siblings ...)
  2022-08-10  8:55 ` [RFC PATCH net-next 04/16] net: txgbe: Add operations to interact with firmware Jiawen Wu
@ 2022-08-10  8:55 ` Jiawen Wu
  2022-08-10  8:55 ` [RFC PATCH net-next 06/16] net: txgbe: Initialize service task Jiawen Wu
                   ` (10 subsequent siblings)
  15 siblings, 0 replies; 18+ messages in thread
From: Jiawen Wu @ 2022-08-10  8:55 UTC (permalink / raw)
  To: netdev; +Cc: Jiawen Wu

Add to get media type and physical layer module, support I2C access.

Signed-off-by: Jiawen Wu <jiawenwu@trustnetic.com>
---
 .../device_drivers/ethernet/wangxun/txgbe.rst |  38 ++
 drivers/net/ethernet/wangxun/txgbe/Makefile   |   2 +-
 drivers/net/ethernet/wangxun/txgbe/txgbe_hw.c | 172 ++++++++
 drivers/net/ethernet/wangxun/txgbe/txgbe_hw.h |   6 +
 .../net/ethernet/wangxun/txgbe/txgbe_main.c   |  33 +-
 .../net/ethernet/wangxun/txgbe/txgbe_phy.c    | 373 ++++++++++++++++++
 .../net/ethernet/wangxun/txgbe/txgbe_phy.h    |  52 +++
 .../net/ethernet/wangxun/txgbe/txgbe_type.h   | 200 ++++++++++
 8 files changed, 874 insertions(+), 2 deletions(-)
 create mode 100644 drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c
 create mode 100644 drivers/net/ethernet/wangxun/txgbe/txgbe_phy.h

diff --git a/Documentation/networking/device_drivers/ethernet/wangxun/txgbe.rst b/Documentation/networking/device_drivers/ethernet/wangxun/txgbe.rst
index eaa87dbe8848..037d8538e848 100644
--- a/Documentation/networking/device_drivers/ethernet/wangxun/txgbe.rst
+++ b/Documentation/networking/device_drivers/ethernet/wangxun/txgbe.rst
@@ -11,9 +11,47 @@ Copyright (c) 2015 - 2022 Beijing WangXun Technology Co., Ltd.
 Contents
 ========
 
+- Identifying Your Adapter
 - Support
 
 
+Identifying Your Adapter
+========================
+The driver is compatible with WangXun Sapphire Dual ports Ethernet Adapters.
+
+SFP+ Devices with Pluggable Optics
+----------------------------------
+The following is a list of 3rd party SFP+ modules that have been tested and verified.
+
++----------+----------------------+----------------------+
+| Supplier | Type                 | Part Numbers         |
++==========+======================+======================+
+| Avago	   | SFP+                 | AFBR-709SMZ          |
++----------+----------------------+----------------------+
+| F-tone   | SFP+                 | FTCS-851X-02D        |
++----------+----------------------+----------------------+
+| Finisar  | SFP+                 | FTLX8574D3BCL        |
++----------+----------------------+----------------------+
+| Hasense  | SFP+                 | AFBR-709SMZ          |
++----------+----------------------+----------------------+
+| HGTECH   | SFP+                 | MTRS-01X11-G         |
++----------+----------------------+----------------------+
+| HP       | SFP+                 | SR SFP+ 456096-001   |
++----------+----------------------+----------------------+
+| Huawei   | SFP+                 | AFBR-709SMZ          |
++----------+----------------------+----------------------+
+| Intel    | SFP+                 | FTLX8571D3BCV-IT     |
++----------+----------------------+----------------------+
+| JDSU     | SFP+                 | PLRXPL-SC-S43        |
++----------+----------------------+----------------------+
+| SONT     | SFP+                 | XP-8G10-01           |
++----------+----------------------+----------------------+
+| Trixon   | SFP+                 | TPS-TGM3-85DCR       |
++----------+----------------------+----------------------+
+| WTD      | SFP+                 | RTXM228-551          |
++----------+----------------------+----------------------+
+
+
 Support
 =======
 If you got any problem, contact Wangxun support team via support@trustnetic.com
diff --git a/drivers/net/ethernet/wangxun/txgbe/Makefile b/drivers/net/ethernet/wangxun/txgbe/Makefile
index 78484c58b78b..875704a29c4c 100644
--- a/drivers/net/ethernet/wangxun/txgbe/Makefile
+++ b/drivers/net/ethernet/wangxun/txgbe/Makefile
@@ -7,4 +7,4 @@
 obj-$(CONFIG_TXGBE) += txgbe.o
 
 txgbe-objs := txgbe_main.o \
-              txgbe_hw.o
+              txgbe_hw.o txgbe_phy.o
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.c
index 34a7c8dad0e4..240c19c20e2c 100644
--- a/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.c
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.c
@@ -3,6 +3,7 @@
 
 #include "txgbe_type.h"
 #include "txgbe_hw.h"
+#include "txgbe_phy.h"
 #include "txgbe.h"
 
 #define TXGBE_SP_MAX_TX_QUEUES  128
@@ -12,6 +13,22 @@
 static s32 txgbe_get_eeprom_semaphore(struct txgbe_hw *hw);
 static void txgbe_release_eeprom_semaphore(struct txgbe_hw *hw);
 
+u32 txgbe_rd32_epcs(struct txgbe_hw *hw, u32 addr)
+{
+	unsigned int offset;
+	u32 data;
+	/* Set the LAN port indicator to offset[1] */
+	/* 1st, write the offset to IDA_ADDR register */
+	offset = TXGBE_XPCS_IDA_ADDR;
+	wr32(hw, offset, addr);
+
+	/* 2nd, read the data from IDA_DATA register */
+	offset = TXGBE_XPCS_IDA_DATA;
+	data = rd32(hw, offset);
+
+	return data;
+}
+
 s32 txgbe_init_hw(struct txgbe_hw *hw)
 {
 	s32 status;
@@ -1238,6 +1255,25 @@ int txgbe_check_flash_load(struct txgbe_hw *hw, u32 check_bit)
 	return err;
 }
 
+/**
+ *  txgbe_init_phy_ops - PHY/SFP specific init
+ *  @hw: pointer to hardware structure
+ *
+ *  Initialize any function pointers that were not able to be
+ *  set during init_shared_code because the PHY/SFP type was
+ *  not known. Perform the SFP init if necessary.
+ **/
+s32 txgbe_init_phy_ops(struct txgbe_hw *hw)
+{
+	s32 ret_val = 0;
+
+	txgbe_init_i2c(hw);
+	/* Identify the PHY or SFP module */
+	ret_val = TCALL(hw, phy.ops.identify);
+
+	return ret_val;
+}
+
 /**
  *  txgbe_init_ops - Inits func ptrs and MAC type
  *  @hw: pointer to hardware structure
@@ -1248,8 +1284,16 @@ int txgbe_check_flash_load(struct txgbe_hw *hw, u32 check_bit)
 s32 txgbe_init_ops(struct txgbe_hw *hw)
 {
 	struct txgbe_mac_info *mac = &hw->mac;
+	struct txgbe_phy_info *phy = &hw->phy;
 	struct txgbe_eeprom_info *eeprom = &hw->eeprom;
 
+	/* PHY */
+	phy->ops.read_i2c_byte = txgbe_read_i2c_byte;
+	phy->ops.read_i2c_eeprom = txgbe_read_i2c_eeprom;
+	phy->ops.identify_sfp = txgbe_identify_module;
+	phy->ops.identify = txgbe_identify_phy;
+	phy->ops.init = txgbe_init_phy_ops;
+
 	/* MAC */
 	mac->ops.init_hw = txgbe_init_hw;
 	mac->ops.get_mac_addr = txgbe_get_mac_addr;
@@ -1259,6 +1303,7 @@ s32 txgbe_init_ops(struct txgbe_hw *hw)
 	mac->ops.acquire_swfw_sync = txgbe_acquire_swfw_sync;
 	mac->ops.release_swfw_sync = txgbe_release_swfw_sync;
 	mac->ops.reset_hw = txgbe_reset_hw;
+	mac->ops.get_media_type = txgbe_get_media_type;
 	mac->ops.start_hw = txgbe_start_hw;
 	mac->ops.get_san_mac_addr = txgbe_get_san_mac_addr;
 	mac->ops.get_wwn_prefix = txgbe_get_wwn_prefix;
@@ -1292,10 +1337,51 @@ s32 txgbe_init_ops(struct txgbe_hw *hw)
 	return 0;
 }
 
+/**
+ *  txgbe_get_media_type - Get media type
+ *  @hw: pointer to hardware structure
+ *
+ *  Returns the media type (fiber, copper, backplane)
+ **/
+enum txgbe_media_type txgbe_get_media_type(struct txgbe_hw *hw)
+{
+	enum txgbe_media_type media_type;
+	u8 device_type = hw->subsystem_device_id & 0xF0;
+
+	switch (device_type) {
+	case TXGBE_ID_MAC_XAUI:
+	case TXGBE_ID_MAC_SGMII:
+	case TXGBE_ID_KR_KX_KX4:
+		/* Default device ID is mezzanine card KX/KX4 */
+		media_type = txgbe_media_type_backplane;
+		break;
+	case TXGBE_ID_SFP:
+		media_type = txgbe_media_type_fiber;
+		break;
+	case TXGBE_ID_XAUI:
+	case TXGBE_ID_SGMII:
+		media_type = txgbe_media_type_copper;
+		break;
+	case TXGBE_ID_SFI_XAUI:
+		if (hw->bus.lan_id == 0)
+			media_type = txgbe_media_type_fiber;
+		else
+			media_type = txgbe_media_type_copper;
+		break;
+	default:
+		media_type = txgbe_media_type_unknown;
+		break;
+	}
+
+	return media_type;
+}
+
 int txgbe_reset_misc(struct txgbe_hw *hw)
 {
 	int i;
 
+	txgbe_init_i2c(hw);
+
 	/* receive packets that size > 2048 */
 	wr32m(hw, TXGBE_MAC_RX_CFG,
 	      TXGBE_MAC_RX_CFG_JE, TXGBE_MAC_RX_CFG_JE);
@@ -1349,11 +1435,31 @@ s32 txgbe_reset_hw(struct txgbe_hw *hw)
 	u32 reset = 0;
 	s32 status;
 
+	u32 sr_pcs_ctl, sr_pma_mmd_ctl1, sr_an_mmd_ctl, sr_an_mmd_adv_reg2;
+	u32 vr_xs_or_pcs_mmd_digi_ctl1, curr_vr_xs_or_pcs_mmd_digi_ctl1;
+	u32 curr_sr_an_mmd_ctl, curr_sr_an_mmd_adv_reg2;
+	u32 curr_sr_pcs_ctl, curr_sr_pma_mmd_ctl1;
+
 	/* Call adapter stop to disable tx/rx and clear interrupts */
 	status = TCALL(hw, mac.ops.stop_adapter);
 	if (status != 0)
 		goto reset_hw_out;
 
+	/* Identify PHY and related function pointers */
+	status = TCALL(hw, phy.ops.init);
+
+	if (status == TXGBE_ERR_SFP_NOT_SUPPORTED)
+		goto reset_hw_out;
+
+	/* Remember internal phy regs from before we reset */
+	curr_sr_pcs_ctl = txgbe_rd32_epcs(hw, TXGBE_SR_PCS_CTL2);
+	curr_sr_pma_mmd_ctl1 = txgbe_rd32_epcs(hw, TXGBE_SR_PMA_MMD_CTL1);
+	curr_sr_an_mmd_ctl = txgbe_rd32_epcs(hw, TXGBE_SR_AN_MMD_CTL);
+	curr_sr_an_mmd_adv_reg2 = txgbe_rd32_epcs(hw,
+						  TXGBE_SR_AN_MMD_ADV_REG2);
+	curr_vr_xs_or_pcs_mmd_digi_ctl1 =
+		txgbe_rd32_epcs(hw, TXGBE_VR_XS_OR_PCS_MMD_DIGI_CTL1);
+
 	if (txgbe_mng_present(hw)) {
 		if (!(((hw->subsystem_device_id & TXGBE_NCSI_MASK) == TXGBE_NCSI_SUP) ||
 		      ((hw->subsystem_device_id & TXGBE_WOL_MASK) == TXGBE_WOL_SUP))) {
@@ -1383,6 +1489,38 @@ s32 txgbe_reset_hw(struct txgbe_hw *hw)
 	if (status != 0)
 		goto reset_hw_out;
 
+	/* Store the original values if they have not been stored
+	 * off yet.  Otherwise restore the stored original values
+	 * since the reset operation sets back to defaults.
+	 */
+	sr_pcs_ctl = txgbe_rd32_epcs(hw, TXGBE_SR_PCS_CTL2);
+	sr_pma_mmd_ctl1 = txgbe_rd32_epcs(hw, TXGBE_SR_PMA_MMD_CTL1);
+	sr_an_mmd_ctl = txgbe_rd32_epcs(hw, TXGBE_SR_AN_MMD_CTL);
+	sr_an_mmd_adv_reg2 = txgbe_rd32_epcs(hw, TXGBE_SR_AN_MMD_ADV_REG2);
+	vr_xs_or_pcs_mmd_digi_ctl1 =
+		txgbe_rd32_epcs(hw, TXGBE_VR_XS_OR_PCS_MMD_DIGI_CTL1);
+
+	if (!hw->mac.orig_link_settings_stored) {
+		hw->mac.orig_sr_pcs_ctl2 = sr_pcs_ctl;
+		hw->mac.orig_sr_pma_mmd_ctl1 = sr_pma_mmd_ctl1;
+		hw->mac.orig_sr_an_mmd_ctl = sr_an_mmd_ctl;
+		hw->mac.orig_sr_an_mmd_adv_reg2 = sr_an_mmd_adv_reg2;
+		hw->mac.orig_vr_xs_or_pcs_mmd_digi_ctl1 =
+						vr_xs_or_pcs_mmd_digi_ctl1;
+		hw->mac.orig_link_settings_stored = true;
+	} else {
+		hw->mac.orig_sr_pcs_ctl2 = curr_sr_pcs_ctl;
+		hw->mac.orig_sr_pma_mmd_ctl1 = curr_sr_pma_mmd_ctl1;
+		hw->mac.orig_sr_an_mmd_ctl = curr_sr_an_mmd_ctl;
+		hw->mac.orig_sr_an_mmd_adv_reg2 =
+					curr_sr_an_mmd_adv_reg2;
+		hw->mac.orig_vr_xs_or_pcs_mmd_digi_ctl1 =
+					curr_vr_xs_or_pcs_mmd_digi_ctl1;
+	}
+
+	/*make sure phy power is up*/
+	msleep(100);
+
 	/* Store the permanent mac address */
 	TCALL(hw, mac.ops.get_mac_addr, hw->mac.perm_addr);
 
@@ -1431,6 +1569,9 @@ s32 txgbe_start_hw(struct txgbe_hw *hw)
 	int ret_val = 0;
 	u32 i;
 
+	/* Set the media type */
+	hw->phy.media_type = TCALL(hw, mac.ops.get_media_type);
+
 	/* Clear the rate limiters */
 	for (i = 0; i < hw->mac.max_tx_queues; i++) {
 		wr32(hw, TXGBE_TDM_RP_IDX, i);
@@ -1447,6 +1588,37 @@ s32 txgbe_start_hw(struct txgbe_hw *hw)
 	return ret_val;
 }
 
+/**
+ *  txgbe_identify_phy - Get physical layer module
+ *  @hw: pointer to hardware structure
+ *
+ *  Determines the physical layer module found on the current adapter.
+ *  If PHY already detected, maintains current PHY type in hw struct,
+ *  otherwise executes the PHY detection routine.
+ **/
+s32 txgbe_identify_phy(struct txgbe_hw *hw)
+{
+	/* Detect PHY if not unknown - returns success if already detected. */
+	s32 status = TXGBE_ERR_PHY_ADDR_INVALID;
+
+	if (!hw->phy.phy_semaphore_mask)
+		hw->phy.phy_semaphore_mask = TXGBE_MNG_SWFW_SYNC_SW_PHY;
+
+	hw->phy.media_type = TCALL(hw, mac.ops.get_media_type);
+	if (hw->phy.media_type == txgbe_media_type_fiber) {
+		status = txgbe_identify_module(hw);
+	} else {
+		hw->phy.type = txgbe_phy_none;
+		status = 0;
+	}
+
+	/* Return error if SFP module has been detected but is not supported */
+	if (hw->phy.type == txgbe_phy_sfp_unsupported)
+		return TXGBE_ERR_SFP_NOT_SUPPORTED;
+
+	return status;
+}
+
 /**
  *  txgbe_init_eeprom_params - Initialize EEPROM params
  *  @hw: pointer to hardware structure
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.h b/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.h
index 4871429fc0fc..eaa1a6fe4dd7 100644
--- a/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.h
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.h
@@ -58,8 +58,11 @@ s32 txgbe_init_thermal_sensor_thresh(struct txgbe_hw *hw);
 s32 txgbe_disable_rx(struct txgbe_hw *hw);
 int txgbe_check_flash_load(struct txgbe_hw *hw, u32 check_bit);
 
+enum txgbe_media_type txgbe_get_media_type(struct txgbe_hw *hw);
 int txgbe_reset_misc(struct txgbe_hw *hw);
 s32 txgbe_reset_hw(struct txgbe_hw *hw);
+s32 txgbe_identify_phy(struct txgbe_hw *hw);
+s32 txgbe_init_phy_ops(struct txgbe_hw *hw);
 s32 txgbe_init_ops(struct txgbe_hw *hw);
 
 s32 txgbe_init_eeprom_params(struct txgbe_hw *hw);
@@ -70,6 +73,9 @@ s32 txgbe_read_ee_hostif_buffer(struct txgbe_hw *hw,
 				u16 offset, u16 words, u16 *data);
 s32 txgbe_read_ee_hostif_data(struct txgbe_hw *hw, u16 offset, u16 *data);
 s32 txgbe_read_ee_hostif(struct txgbe_hw *hw, u16 offset, u16 *data);
+u32 txgbe_rd32_epcs(struct txgbe_hw *hw, u32 addr);
+void txgbe_wr32_epcs(struct txgbe_hw *hw, u32 addr, u32 data);
+void txgbe_wr32_ephy(struct txgbe_hw *hw, u32 addr, u32 data);
 
 u8 fmgr_cmd_op(struct txgbe_hw *hw, u32 cmd, u32 cmd_addr);
 u32 txgbe_flash_read_dword(struct txgbe_hw *hw, u32 addr);
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c
index 948d78d178ae..da5193c871b3 100644
--- a/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c
@@ -11,6 +11,7 @@
 
 #include "txgbe.h"
 #include "txgbe_hw.h"
+#include "txgbe_phy.h"
 
 char txgbe_driver_name[] = "txgbe";
 
@@ -31,6 +32,8 @@ static const struct pci_device_id txgbe_pci_tbl[] = {
 
 #define DEFAULT_DEBUG_LEVEL_SHIFT 3
 
+static bool txgbe_is_sfp(struct txgbe_hw *hw);
+
 static void txgbe_check_minimum_link(struct txgbe_adapter *adapter)
 {
 	struct txgbe_hw *hw = &adapter->hw;
@@ -165,6 +168,16 @@ static void txgbe_flush_sw_mac_table(struct txgbe_adapter *adapter)
 	txgbe_sync_mac_table(adapter);
 }
 
+static bool txgbe_is_sfp(struct txgbe_hw *hw)
+{
+	switch (hw->phy.media_type) {
+	case txgbe_media_type_fiber:
+		return true;
+	default:
+		return false;
+	}
+}
+
 static void txgbe_up_complete(struct txgbe_adapter *adapter)
 {
 	txgbe_get_hw_control(adapter);
@@ -183,6 +196,8 @@ void txgbe_reset(struct txgbe_adapter *adapter)
 	err = TCALL(hw, mac.ops.init_hw);
 	switch (err) {
 	case 0:
+	case TXGBE_ERR_SFP_NOT_PRESENT:
+	case TXGBE_ERR_SFP_NOT_SUPPORTED:
 		break;
 	case TXGBE_ERR_MASTER_REQUESTS_PENDING:
 		dev_err(&adapter->pdev->dev, "master disable timed out\n");
@@ -540,7 +555,15 @@ static int txgbe_probe(struct pci_dev *pdev,
 		goto err_free_mac_table;
 
 	err = TCALL(hw, mac.ops.reset_hw);
-	if (err) {
+	if (err == TXGBE_ERR_SFP_NOT_PRESENT) {
+		err = 0;
+	} else if (err == TXGBE_ERR_SFP_NOT_SUPPORTED) {
+		dev_err(&pdev->dev,
+			"failed to load because an unsupported SFP+ module type was detected.\n");
+		dev_err(&pdev->dev,
+			"Reload the driver after installing a supported module.\n");
+		goto err_free_mac_table;
+	} else if (err) {
 		dev_err(&pdev->dev, "HW Init failed: %d\n", err);
 		goto err_free_mac_table;
 	}
@@ -639,6 +662,14 @@ static int txgbe_probe(struct pci_dev *pdev,
 	err = txgbe_read_pba_string(hw, part_str, TXGBE_PBANUM_LENGTH);
 	if (err)
 		strncpy(part_str, "Unknown", TXGBE_PBANUM_LENGTH);
+	if (txgbe_is_sfp(hw) && hw->phy.sfp_type != txgbe_sfp_type_not_present)
+		netif_info(adapter, probe, netdev,
+			   "PHY: %d, SFP+: %d, PBA No: %s\n",
+			   hw->phy.type, hw->phy.sfp_type, part_str);
+	else
+		netif_info(adapter, probe, netdev,
+			   "PHY: %d, PBA No: %s\n",
+			   hw->phy.type, part_str);
 
 	netif_info(adapter, probe, netdev, "%02x:%02x:%02x:%02x:%02x:%02x\n",
 		   netdev->dev_addr[0], netdev->dev_addr[1],
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c
new file mode 100644
index 000000000000..f3099103110b
--- /dev/null
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c
@@ -0,0 +1,373 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2015 - 2022 Beijing WangXun Technology Co., Ltd. */
+
+#include "txgbe_phy.h"
+
+/**
+ *  txgbe_identify_module - Identifies module type
+ *  @hw: pointer to hardware structure
+ *
+ *  Determines HW type and calls appropriate function.
+ **/
+s32 txgbe_identify_module(struct txgbe_hw *hw)
+{
+	s32 status = TXGBE_ERR_SFP_NOT_PRESENT;
+
+	switch (hw->phy.media_type) {
+	case txgbe_media_type_fiber:
+		status = txgbe_identify_sfp_module(hw);
+		break;
+
+	default:
+		hw->phy.sfp_type = txgbe_sfp_type_not_present;
+		status = TXGBE_ERR_SFP_NOT_PRESENT;
+		break;
+	}
+
+	return status;
+}
+
+/**
+ *  txgbe_identify_sfp_module - Identifies SFP modules
+ *  @hw: pointer to hardware structure
+ *
+ *  Searches for and identifies the SFP module and assigns appropriate PHY type.
+ **/
+s32 txgbe_identify_sfp_module(struct txgbe_hw *hw)
+{
+	s32 status = TXGBE_ERR_PHY_ADDR_INVALID;
+	u32 vendor_oui = 0;
+	u8 identifier = 0;
+	u8 comp_codes_1g = 0;
+	u8 comp_codes_10g = 0;
+	u8 oui_bytes[3] = {0, 0, 0};
+	u8 cable_tech = 0;
+	u8 cable_spec = 0;
+
+	/* LAN ID is needed for I2C access */
+	txgbe_init_i2c(hw);
+	status = TCALL(hw, phy.ops.read_i2c_eeprom,
+		       TXGBE_SFF_IDENTIFIER,
+		       &identifier);
+
+	if (status != 0)
+		goto err_read_i2c_eeprom;
+
+	if (identifier != TXGBE_SFF_IDENTIFIER_SFP) {
+		hw->phy.type = txgbe_phy_sfp_unsupported;
+		status = TXGBE_ERR_SFP_NOT_SUPPORTED;
+	} else {
+		status = TCALL(hw, phy.ops.read_i2c_eeprom,
+			       TXGBE_SFF_1GBE_COMP_CODES,
+			       &comp_codes_1g);
+
+		if (status != 0)
+			goto err_read_i2c_eeprom;
+
+		status = TCALL(hw, phy.ops.read_i2c_eeprom,
+			       TXGBE_SFF_10GBE_COMP_CODES,
+			       &comp_codes_10g);
+
+		if (status != 0)
+			goto err_read_i2c_eeprom;
+		status = TCALL(hw, phy.ops.read_i2c_eeprom,
+			       TXGBE_SFF_CABLE_TECHNOLOGY,
+			       &cable_tech);
+
+		if (status != 0)
+			goto err_read_i2c_eeprom;
+
+		 /* ID Module
+		  * =========
+		  * 0   SFP_DA_CU
+		  * 1   SFP_SR
+		  * 2   SFP_LR
+		  * 3   SFP_DA_CORE0
+		  * 4   SFP_DA_CORE1
+		  * 5   SFP_SR/LR_CORE0
+		  * 6   SFP_SR/LR_CORE1
+		  * 7   SFP_act_lmt_DA_CORE0
+		  * 8   SFP_act_lmt_DA_CORE1
+		  * 9   SFP_1g_cu_CORE0
+		  * 10  SFP_1g_cu_CORE1
+		  * 11  SFP_1g_sx_CORE0
+		  * 12  SFP_1g_sx_CORE1
+		  */
+		{
+			if (cable_tech & TXGBE_SFF_DA_PASSIVE_CABLE) {
+				if (hw->bus.lan_id == 0)
+					hw->phy.sfp_type =
+						     txgbe_sfp_type_da_cu_core0;
+				else
+					hw->phy.sfp_type =
+						     txgbe_sfp_type_da_cu_core1;
+			} else if (cable_tech & TXGBE_SFF_DA_ACTIVE_CABLE) {
+				TCALL(hw, phy.ops.read_i2c_eeprom,
+				      TXGBE_SFF_CABLE_SPEC_COMP,
+				      &cable_spec);
+				if (cable_spec &
+				    TXGBE_SFF_DA_SPEC_ACTIVE_LIMITING) {
+					if (hw->bus.lan_id == 0)
+						hw->phy.sfp_type =
+						txgbe_sfp_type_da_act_lmt_core0;
+					else
+						hw->phy.sfp_type =
+						txgbe_sfp_type_da_act_lmt_core1;
+				} else {
+					hw->phy.sfp_type =
+							txgbe_sfp_type_unknown;
+				}
+			} else if (comp_codes_10g &
+				   (TXGBE_SFF_10GBASESR_CAPABLE |
+				    TXGBE_SFF_10GBASELR_CAPABLE)) {
+				if (hw->bus.lan_id == 0)
+					hw->phy.sfp_type =
+						      txgbe_sfp_type_srlr_core0;
+				else
+					hw->phy.sfp_type =
+						      txgbe_sfp_type_srlr_core1;
+			} else if (comp_codes_1g & TXGBE_SFF_1GBASET_CAPABLE) {
+				if (hw->bus.lan_id == 0)
+					hw->phy.sfp_type =
+						txgbe_sfp_type_1g_cu_core0;
+				else
+					hw->phy.sfp_type =
+						txgbe_sfp_type_1g_cu_core1;
+			} else if (comp_codes_1g & TXGBE_SFF_1GBASESX_CAPABLE) {
+				if (hw->bus.lan_id == 0)
+					hw->phy.sfp_type =
+						txgbe_sfp_type_1g_sx_core0;
+				else
+					hw->phy.sfp_type =
+						txgbe_sfp_type_1g_sx_core1;
+			} else if (comp_codes_1g & TXGBE_SFF_1GBASELX_CAPABLE) {
+				if (hw->bus.lan_id == 0)
+					hw->phy.sfp_type =
+						txgbe_sfp_type_1g_lx_core0;
+				else
+					hw->phy.sfp_type =
+						txgbe_sfp_type_1g_lx_core1;
+			} else {
+				hw->phy.sfp_type = txgbe_sfp_type_unknown;
+			}
+		}
+
+		/* Determine if the SFP+ PHY is dual speed or not. */
+		hw->phy.multispeed_fiber = false;
+		if (((comp_codes_1g & TXGBE_SFF_1GBASESX_CAPABLE) &&
+		     (comp_codes_10g & TXGBE_SFF_10GBASESR_CAPABLE)) ||
+		    ((comp_codes_1g & TXGBE_SFF_1GBASELX_CAPABLE) &&
+		     (comp_codes_10g & TXGBE_SFF_10GBASELR_CAPABLE)))
+			hw->phy.multispeed_fiber = true;
+
+		/* Determine PHY vendor */
+		if (hw->phy.type != txgbe_phy_nl) {
+			status = TCALL(hw, phy.ops.read_i2c_eeprom,
+				       TXGBE_SFF_VENDOR_OUI_BYTE0,
+				       &oui_bytes[0]);
+
+			if (status != 0)
+				goto err_read_i2c_eeprom;
+
+			status = TCALL(hw, phy.ops.read_i2c_eeprom,
+				       TXGBE_SFF_VENDOR_OUI_BYTE1,
+				       &oui_bytes[1]);
+
+			if (status != 0)
+				goto err_read_i2c_eeprom;
+
+			status = TCALL(hw, phy.ops.read_i2c_eeprom,
+				       TXGBE_SFF_VENDOR_OUI_BYTE2,
+				       &oui_bytes[2]);
+
+			if (status != 0)
+				goto err_read_i2c_eeprom;
+
+			vendor_oui =
+			  ((oui_bytes[0] << TXGBE_SFF_VENDOR_OUI_BYTE0_SHIFT) |
+			   (oui_bytes[1] << TXGBE_SFF_VENDOR_OUI_BYTE1_SHIFT) |
+			   (oui_bytes[2] << TXGBE_SFF_VENDOR_OUI_BYTE2_SHIFT));
+
+			switch (vendor_oui) {
+			case TXGBE_SFF_VENDOR_OUI_TYCO:
+				if (cable_tech & TXGBE_SFF_DA_PASSIVE_CABLE)
+					hw->phy.type =
+						    txgbe_phy_sfp_passive_tyco;
+				break;
+			case TXGBE_SFF_VENDOR_OUI_FTL:
+				if (cable_tech & TXGBE_SFF_DA_ACTIVE_CABLE)
+					hw->phy.type = txgbe_phy_sfp_ftl_active;
+				else
+					hw->phy.type = txgbe_phy_sfp_ftl;
+				break;
+			case TXGBE_SFF_VENDOR_OUI_AVAGO:
+				hw->phy.type = txgbe_phy_sfp_avago;
+				break;
+			case TXGBE_SFF_VENDOR_OUI_INTEL:
+				hw->phy.type = txgbe_phy_sfp_intel;
+				break;
+			default:
+				if (cable_tech & TXGBE_SFF_DA_PASSIVE_CABLE)
+					hw->phy.type =
+						 txgbe_phy_sfp_passive_unknown;
+				else if (cable_tech & TXGBE_SFF_DA_ACTIVE_CABLE)
+					hw->phy.type =
+						txgbe_phy_sfp_active_unknown;
+				else
+					hw->phy.type = txgbe_phy_sfp_unknown;
+				break;
+			}
+		}
+
+		/* Allow any DA cable vendor */
+		if (cable_tech & (TXGBE_SFF_DA_PASSIVE_CABLE |
+		    TXGBE_SFF_DA_ACTIVE_CABLE)) {
+			status = 0;
+			goto out;
+		}
+
+		/* Verify supported 1G SFP modules */
+		if (comp_codes_10g == 0 &&
+		    !(hw->phy.sfp_type == txgbe_sfp_type_1g_cu_core1 ||
+		      hw->phy.sfp_type == txgbe_sfp_type_1g_cu_core0 ||
+		      hw->phy.sfp_type == txgbe_sfp_type_1g_lx_core0 ||
+		      hw->phy.sfp_type == txgbe_sfp_type_1g_lx_core1 ||
+		      hw->phy.sfp_type == txgbe_sfp_type_1g_sx_core0 ||
+		      hw->phy.sfp_type == txgbe_sfp_type_1g_sx_core1)) {
+			hw->phy.type = txgbe_phy_sfp_unsupported;
+			status = TXGBE_ERR_SFP_NOT_SUPPORTED;
+			goto out;
+		}
+	}
+
+out:
+	return status;
+
+err_read_i2c_eeprom:
+	hw->phy.sfp_type = txgbe_sfp_type_not_present;
+	if (hw->phy.type != txgbe_phy_nl)
+		hw->phy.type = txgbe_phy_unknown;
+
+	return TXGBE_ERR_SFP_NOT_PRESENT;
+}
+
+s32 txgbe_init_i2c(struct txgbe_hw *hw)
+{
+	wr32(hw, TXGBE_I2C_ENABLE, 0);
+
+	wr32(hw, TXGBE_I2C_CON,
+	     (TXGBE_I2C_CON_MASTER_MODE |
+	      TXGBE_I2C_CON_SPEED(1) |
+	      TXGBE_I2C_CON_RESTART_EN |
+	      TXGBE_I2C_CON_SLAVE_DISABLE));
+	/* Default addr is 0xA0 ,bit 0 is configure for read/write! */
+	wr32(hw, TXGBE_I2C_TAR, TXGBE_I2C_SLAVE_ADDR);
+	wr32(hw, TXGBE_I2C_SS_SCL_HCNT, 600);
+	wr32(hw, TXGBE_I2C_SS_SCL_LCNT, 600);
+	wr32(hw, TXGBE_I2C_RX_TL, 0); /* 1byte for rx full signal */
+	wr32(hw, TXGBE_I2C_TX_TL, 4);
+	wr32(hw, TXGBE_I2C_SCL_STUCK_TIMEOUT, 0xFFFFFF);
+	wr32(hw, TXGBE_I2C_SDA_STUCK_TIMEOUT, 0xFFFFFF);
+
+	wr32(hw, TXGBE_I2C_INTR_MASK, 0);
+	wr32(hw, TXGBE_I2C_ENABLE, 1);
+	return 0;
+}
+
+/**
+ *  txgbe_read_i2c_eeprom - Reads 8 bit EEPROM word over I2C interface
+ *  @hw: pointer to hardware structure
+ *  @byte_offset: EEPROM byte offset to read
+ *  @eeprom_data: value read
+ *
+ *  Performs byte read operation to SFP module's EEPROM over I2C interface.
+ **/
+s32 txgbe_read_i2c_eeprom(struct txgbe_hw *hw, u8 byte_offset,
+			  u8 *eeprom_data)
+{
+	return TCALL(hw, phy.ops.read_i2c_byte, byte_offset,
+		     TXGBE_I2C_EEPROM_DEV_ADDR,
+		     eeprom_data);
+}
+
+/**
+ *  txgbe_read_i2c_byte_int - Reads 8 bit word over I2C
+ *  @hw: pointer to hardware structure
+ *  @byte_offset: byte offset to read
+ *  @dev_addr: device address
+ *  @data: value read
+ *  @lock: true if to take and release semaphore
+ *
+ *  Performs byte read operation to SFP module's EEPROM over I2C interface at
+ *  a specified device address.
+ **/
+static s32 txgbe_read_i2c_byte_int(struct txgbe_hw *hw, u8 byte_offset,
+				   u8 __maybe_unused dev_addr, u8 *data, bool lock)
+{
+	s32 status = 0;
+	u32 swfw_mask = hw->phy.phy_semaphore_mask;
+
+	if (lock && 0 != TCALL(hw, mac.ops.acquire_swfw_sync, swfw_mask))
+		return TXGBE_ERR_SWFW_SYNC;
+
+	/* wait tx empty */
+	status = txgbe_po32m(hw, TXGBE_I2C_RAW_INTR_STAT,
+			     TXGBE_I2C_INTR_STAT_TX_EMPTY,
+			     TXGBE_I2C_INTR_STAT_TX_EMPTY,
+			     TXGBE_I2C_TIMEOUT, 10);
+	if (status != 0)
+		goto out;
+
+	/* read data */
+	wr32(hw, TXGBE_I2C_DATA_CMD,
+	     byte_offset | TXGBE_I2C_DATA_CMD_STOP);
+	wr32(hw, TXGBE_I2C_DATA_CMD, TXGBE_I2C_DATA_CMD_READ);
+
+	/* wait for read complete */
+	status = txgbe_po32m(hw, TXGBE_I2C_RAW_INTR_STAT,
+			     TXGBE_I2C_INTR_STAT_RX_FULL,
+			     TXGBE_I2C_INTR_STAT_RX_FULL,
+			     TXGBE_I2C_TIMEOUT, 10);
+	if (status != 0)
+		goto out;
+
+	*data = 0xFF & rd32(hw, TXGBE_I2C_DATA_CMD);
+
+out:
+	if (lock)
+		TCALL(hw, mac.ops.release_swfw_sync, swfw_mask);
+	return status;
+}
+
+/**
+ *  txgbe_switch_i2c_slave_addr - Switch I2C slave address
+ *  @hw: pointer to hardware structure
+ *  @dev_addr: slave addr to switch
+ *
+ **/
+s32 txgbe_switch_i2c_slave_addr(struct txgbe_hw *hw, u8 dev_addr)
+{
+	wr32(hw, TXGBE_I2C_ENABLE, 0);
+	wr32(hw, TXGBE_I2C_TAR, dev_addr >> 1);
+	wr32(hw, TXGBE_I2C_ENABLE, 1);
+	return 0;
+}
+
+/**
+ *  txgbe_read_i2c_byte - Reads 8 bit word over I2C
+ *  @hw: pointer to hardware structure
+ *  @byte_offset: byte offset to read
+ *  @dev_addr: device address
+ *  @data: value read
+ *
+ *  Performs byte read operation to SFP module's EEPROM over I2C interface at
+ *  a specified device address.
+ **/
+s32 txgbe_read_i2c_byte(struct txgbe_hw *hw, u8 byte_offset,
+			u8 dev_addr, u8 *data)
+{
+	txgbe_switch_i2c_slave_addr(hw, dev_addr);
+
+	return txgbe_read_i2c_byte_int(hw, byte_offset, dev_addr,
+				       data, true);
+}
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.h b/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.h
new file mode 100644
index 000000000000..7e172885f536
--- /dev/null
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.h
@@ -0,0 +1,52 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2015 - 2022 Beijing WangXun Technology Co., Ltd. */
+
+#ifndef _TXGBE_PHY_H_
+#define _TXGBE_PHY_H_
+
+#include "txgbe.h"
+
+#define TXGBE_I2C_EEPROM_DEV_ADDR       0xA0
+
+/* EEPROM byte offsets */
+#define TXGBE_SFF_IDENTIFIER            0x0
+#define TXGBE_SFF_IDENTIFIER_SFP        0x3
+#define TXGBE_SFF_VENDOR_OUI_BYTE0      0x25
+#define TXGBE_SFF_VENDOR_OUI_BYTE1      0x26
+#define TXGBE_SFF_VENDOR_OUI_BYTE2      0x27
+#define TXGBE_SFF_1GBE_COMP_CODES       0x6
+#define TXGBE_SFF_10GBE_COMP_CODES      0x3
+#define TXGBE_SFF_CABLE_TECHNOLOGY      0x8
+#define TXGBE_SFF_CABLE_SPEC_COMP       0x3C
+
+/* Bitmasks */
+#define TXGBE_SFF_DA_PASSIVE_CABLE      0x4
+#define TXGBE_SFF_DA_ACTIVE_CABLE       0x8
+#define TXGBE_SFF_DA_SPEC_ACTIVE_LIMITING       0x4
+#define TXGBE_SFF_1GBASESX_CAPABLE      0x1
+#define TXGBE_SFF_1GBASELX_CAPABLE      0x2
+#define TXGBE_SFF_1GBASET_CAPABLE       0x8
+#define TXGBE_SFF_10GBASESR_CAPABLE     0x10
+#define TXGBE_SFF_10GBASELR_CAPABLE     0x20
+/* Bit-shift macros */
+#define TXGBE_SFF_VENDOR_OUI_BYTE0_SHIFT        24
+#define TXGBE_SFF_VENDOR_OUI_BYTE1_SHIFT        16
+#define TXGBE_SFF_VENDOR_OUI_BYTE2_SHIFT        8
+
+/* Vendor OUIs: format of OUI is 0x[byte0][byte1][byte2][00] */
+#define TXGBE_SFF_VENDOR_OUI_TYCO       0x00407600
+#define TXGBE_SFF_VENDOR_OUI_FTL        0x00906500
+#define TXGBE_SFF_VENDOR_OUI_AVAGO      0x00176A00
+#define TXGBE_SFF_VENDOR_OUI_INTEL      0x001B2100
+
+s32 txgbe_identify_module(struct txgbe_hw *hw);
+s32 txgbe_identify_sfp_module(struct txgbe_hw *hw);
+s32 txgbe_init_i2c(struct txgbe_hw *hw);
+s32 txgbe_switch_i2c_slave_addr(struct txgbe_hw *hw, u8 dev_addr);
+s32 txgbe_read_i2c_byte(struct txgbe_hw *hw, u8 byte_offset,
+			u8 dev_addr, u8 *data);
+
+s32 txgbe_read_i2c_eeprom(struct txgbe_hw *hw, u8 byte_offset,
+			  u8 *eeprom_data);
+
+#endif /* _TXGBE_PHY_H_ */
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h b/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h
index b6abda615340..5539da638c09 100644
--- a/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h
@@ -54,6 +54,25 @@
 /* Revision ID */
 #define TXGBE_SP_MPW  1
 
+/* ETH PHY Registers */
+#define TXGBE_SR_XS_PCS_MMD_STATUS1             0x30001
+#define TXGBE_SR_PCS_CTL2                       0x30007
+#define TXGBE_SR_PMA_MMD_CTL1                   0x10000
+#define TXGBE_SR_AN_MMD_CTL                     0x70000
+#define TXGBE_SR_AN_MMD_ADV_REG1                0x70010
+#define TXGBE_SR_AN_MMD_ADV_REG1_PAUSE(_v)      ((0x3 & (_v)) << 10)
+#define TXGBE_SR_AN_MMD_ADV_REG1_PAUSE_SYM      0x400
+#define TXGBE_SR_AN_MMD_ADV_REG1_PAUSE_ASM      0x800
+#define TXGBE_SR_AN_MMD_ADV_REG2                0x70011
+#define TXGBE_SR_AN_MMD_LP_ABL1                 0x70013
+#define TXGBE_VR_AN_KR_MODE_CL                  0x78003
+#define TXGBE_VR_XS_OR_PCS_MMD_DIGI_CTL1        0x38000
+#define TXGBE_VR_XS_OR_PCS_MMD_DIGI_STATUS      0x38010
+
+#define TXGBE_SR_PCS_CTL2_PCS_TYPE_SEL_R        0x0
+#define TXGBE_SR_PCS_CTL2_PCS_TYPE_SEL_X        0x1
+#define TXGBE_SR_PCS_CTL2_PCS_TYPE_SEL_MASK     0x3
+
 /**************** Global Registers ****************************/
 /* chip control Registers */
 #define TXGBE_MIS_RST                   0x1000C
@@ -161,6 +180,66 @@ struct txgbe_thermal_sensor_data {
 #define TXGBE_SPI_ECC_ST                0x10138
 #define TXGBE_SPI_ILDR_SWPTR            0x10124
 
+/************************* Port Registers ************************************/
+/* I2C registers */
+#define TXGBE_I2C_CON                   0x14900 /* I2C Control */
+#define TXGBE_I2C_CON_SLAVE_DISABLE     ((1 << 6))
+#define TXGBE_I2C_CON_RESTART_EN        ((1 << 5))
+#define TXGBE_I2C_CON_10BITADDR_MASTER  ((1 << 4))
+#define TXGBE_I2C_CON_10BITADDR_SLAVE   ((1 << 3))
+#define TXGBE_I2C_CON_SPEED(_v)         (((_v) & 0x3) << 1)
+#define TXGBE_I2C_CON_MASTER_MODE       ((1 << 0))
+#define TXGBE_I2C_TAR                   0x14904 /* I2C Target Address */
+#define TXGBE_I2C_DATA_CMD              0x14910 /* I2C Rx/Tx Data Buf and Cmd */
+#define TXGBE_I2C_DATA_CMD_STOP         ((1 << 9))
+#define TXGBE_I2C_DATA_CMD_READ         ((1 << 8) | TXGBE_I2C_DATA_CMD_STOP)
+#define TXGBE_I2C_DATA_CMD_WRITE        ((0 << 8) | TXGBE_I2C_DATA_CMD_STOP)
+#define TXGBE_I2C_SS_SCL_HCNT           0x14914 /* Standard speed I2C Clock SCL High Count */
+#define TXGBE_I2C_SS_SCL_LCNT           0x14918 /* Standard speed I2C Clock SCL Low Count */
+#define TXGBE_I2C_FS_SCL_HCNT           0x1491C
+#define TXGBE_I2C_FS_SCL_LCNT           0x14920
+#define TXGBE_I2C_HS_SCL_HCNT           0x14924 /* High speed I2C Clock SCL High Count */
+#define TXGBE_I2C_HS_SCL_LCNT           0x14928 /* High speed I2C Clock SCL Low Count */
+#define TXGBE_I2C_INTR_STAT             0x1492C /* I2C Interrupt Status */
+#define TXGBE_I2C_RAW_INTR_STAT         0x14934 /* I2C Raw Interrupt Status */
+#define TXGBE_I2C_INTR_STAT_RX_FULL     ((0x1) << 2)
+#define TXGBE_I2C_INTR_STAT_TX_EMPTY    ((0x1) << 4)
+#define TXGBE_I2C_INTR_MASK             0x14930 /* I2C Interrupt Mask */
+#define TXGBE_I2C_RX_TL                 0x14938 /* I2C Receive FIFO Threshold */
+#define TXGBE_I2C_TX_TL                 0x1493C /* I2C TX FIFO Threshold */
+#define TXGBE_I2C_CLR_INTR              0x14940 /* Clear Combined and Individual Int */
+#define TXGBE_I2C_CLR_RX_UNDER          0x14944 /* Clear RX_UNDER Interrupt */
+#define TXGBE_I2C_CLR_RX_OVER           0x14948 /* Clear RX_OVER Interrupt */
+#define TXGBE_I2C_CLR_TX_OVER           0x1494C /* Clear TX_OVER Interrupt */
+#define TXGBE_I2C_CLR_RD_REQ            0x14950 /* Clear RD_REQ Interrupt */
+#define TXGBE_I2C_CLR_TX_ABRT           0x14954 /* Clear TX_ABRT Interrupt */
+#define TXGBE_I2C_CLR_RX_DONE           0x14958 /* Clear RX_DONE Interrupt */
+#define TXGBE_I2C_CLR_ACTIVITY          0x1495C /* Clear ACTIVITY Interrupt */
+#define TXGBE_I2C_CLR_STOP_DET          0x14960 /* Clear STOP_DET Interrupt */
+#define TXGBE_I2C_CLR_START_DET         0x14964 /* Clear START_DET Interrupt */
+#define TXGBE_I2C_CLR_GEN_CALL          0x14968 /* Clear GEN_CALL Interrupt */
+#define TXGBE_I2C_ENABLE                0x1496C /* I2C Enable */
+#define TXGBE_I2C_STATUS                0x14970 /* I2C Status register */
+#define TXGBE_I2C_STATUS_MST_ACTIVITY   ((1U << 5))
+#define TXGBE_I2C_TXFLR                 0x14974 /* Transmit FIFO Level Reg */
+#define TXGBE_I2C_RXFLR                 0x14978 /* Receive FIFO Level Reg */
+#define TXGBE_I2C_SDA_HOLD              0x1497C /* SDA hold time length reg */
+#define TXGBE_I2C_TX_ABRT_SOURCE        0x14980 /* I2C TX Abort Status Reg */
+#define TXGBE_I2C_SDA_SETUP             0x14994 /* I2C SDA Setup Register */
+#define TXGBE_I2C_ENABLE_STATUS         0x1499C /* I2C Enable Status Register */
+#define TXGBE_I2C_FS_SPKLEN             0x149A0 /* ISS and FS spike suppression limit */
+#define TXGBE_I2C_HS_SPKLEN             0x149A4 /* HS spike suppression limit */
+#define TXGBE_I2C_SCL_STUCK_TIMEOUT     0x149AC /* I2C SCL stuck at low timeout register */
+#define TXGBE_I2C_SDA_STUCK_TIMEOUT     0x149B0 /*I2C SDA Stuck at Low Timeout*/
+#define TXGBE_I2C_CLR_SCL_STUCK_DET     0x149B4 /* Clear SCL Stuck at Low Detect Interrupt */
+#define TXGBE_I2C_DEVICE_ID             0x149b8 /* I2C Device ID */
+#define TXGBE_I2C_COMP_PARAM_1          0x149f4 /* Component Parameter Reg */
+#define TXGBE_I2C_COMP_VERSION          0x149f8 /* Component Version ID */
+#define TXGBE_I2C_COMP_TYPE             0x149fc /* DesignWare Component Type Reg */
+
+#define TXGBE_I2C_SLAVE_ADDR            (0xA0 >> 1)
+#define TXGBE_I2C_THERMAL_SENSOR_ADDR   0xF8
+
 /* port cfg Registers */
 #define TXGBE_CFG_PORT_CTL              0x14400
 #define TXGBE_CFG_PORT_ST               0x14404
@@ -295,6 +374,12 @@ struct txgbe_thermal_sensor_data {
 #define TXGBE_PSR_LAN_FLEX_DW_H(_i)     (0x15C04 + ((_i) * 16))
 #define TXGBE_PSR_LAN_FLEX_MSK(_i)      (0x15C08 + ((_i) * 16))
 #define TXGBE_PSR_LAN_FLEX_CTL  0x15CFC
+/************************************** ETH PHY ******************************/
+#define TXGBE_XPCS_IDA_ADDR    0x13000
+#define TXGBE_XPCS_IDA_DATA    0x13004
+#define TXGBE_ETHPHY_IDA_ADDR  0x13008
+#define TXGBE_ETHPHY_IDA_DATA  0x1300C
+
 /************************************** MNG ********************************/
 #define TXGBE_MNG_FW_SM         0x1E000
 #define TXGBE_MNG_SW_SM         0x1E004
@@ -584,6 +669,67 @@ enum txgbe_eeprom_type {
 	txgbe_eeprom_none /* No NVM support */
 };
 
+enum txgbe_phy_type {
+	txgbe_phy_unknown = 0,
+	txgbe_phy_none,
+	txgbe_phy_tn,
+	txgbe_phy_aq,
+	txgbe_phy_cu_unknown,
+	txgbe_phy_qt,
+	txgbe_phy_xaui,
+	txgbe_phy_nl,
+	txgbe_phy_sfp_passive_tyco,
+	txgbe_phy_sfp_passive_unknown,
+	txgbe_phy_sfp_active_unknown,
+	txgbe_phy_sfp_avago,
+	txgbe_phy_sfp_ftl,
+	txgbe_phy_sfp_ftl_active,
+	txgbe_phy_sfp_unknown,
+	txgbe_phy_sfp_intel,
+	txgbe_phy_sfp_unsupported, /*Enforce bit set with unsupported module*/
+	txgbe_phy_generic
+};
+
+/* SFP+ module type IDs:
+ *
+ * ID   Module Type
+ * =============
+ * 0    SFP_DA_CU
+ * 1    SFP_SR
+ * 2    SFP_LR
+ * 3    SFP_DA_CU_CORE0
+ * 4    SFP_DA_CU_CORE1
+ * 5    SFP_SR/LR_CORE0
+ * 6    SFP_SR/LR_CORE1
+ */
+enum txgbe_sfp_type {
+	txgbe_sfp_type_da_cu = 0,
+	txgbe_sfp_type_sr = 1,
+	txgbe_sfp_type_lr = 2,
+	txgbe_sfp_type_da_cu_core0 = 3,
+	txgbe_sfp_type_da_cu_core1 = 4,
+	txgbe_sfp_type_srlr_core0 = 5,
+	txgbe_sfp_type_srlr_core1 = 6,
+	txgbe_sfp_type_da_act_lmt_core0 = 7,
+	txgbe_sfp_type_da_act_lmt_core1 = 8,
+	txgbe_sfp_type_1g_cu_core0 = 9,
+	txgbe_sfp_type_1g_cu_core1 = 10,
+	txgbe_sfp_type_1g_sx_core0 = 11,
+	txgbe_sfp_type_1g_sx_core1 = 12,
+	txgbe_sfp_type_1g_lx_core0 = 13,
+	txgbe_sfp_type_1g_lx_core1 = 14,
+	txgbe_sfp_type_not_present = 0xFFFE,
+	txgbe_sfp_type_unknown = 0xFFFF
+};
+
+enum txgbe_media_type {
+	txgbe_media_type_unknown = 0,
+	txgbe_media_type_fiber,
+	txgbe_media_type_copper,
+	txgbe_media_type_backplane,
+	txgbe_media_type_virtual
+};
+
 /* PCI bus types */
 enum txgbe_bus_type {
 	txgbe_bus_type_unknown = 0,
@@ -655,6 +801,7 @@ struct txgbe_mac_operations {
 	s32 (*init_hw)(struct txgbe_hw *hw);
 	s32 (*reset_hw)(struct txgbe_hw *hw);
 	s32 (*start_hw)(struct txgbe_hw *hw);
+	enum txgbe_media_type (*get_media_type)(struct txgbe_hw *hw);
 	s32 (*get_mac_addr)(struct txgbe_hw *hw, u8 *mac_addr);
 	s32 (*get_san_mac_addr)(struct txgbe_hw *hw, u8 *san_mac_addr);
 	s32 (*get_wwn_prefix)(struct txgbe_hw *hw, u16 *wwnn_prefix,
@@ -681,6 +828,16 @@ struct txgbe_mac_operations {
 	s32 (*init_thermal_sensor_thresh)(struct txgbe_hw *hw);
 };
 
+struct txgbe_phy_operations {
+	s32 (*identify)(struct txgbe_hw *hw);
+	s32 (*identify_sfp)(struct txgbe_hw *hw);
+	s32 (*init)(struct txgbe_hw *hw);
+	s32 (*read_i2c_byte)(struct txgbe_hw *hw, u8 byte_offset,
+			     u8 dev_addr, u8 *data);
+	s32 (*read_i2c_eeprom)(struct txgbe_hw *hw, u8 byte_offset,
+			       u8 *eeprom_data);
+};
+
 struct txgbe_eeprom_info {
 	struct txgbe_eeprom_operations ops;
 	enum txgbe_eeprom_type type;
@@ -703,12 +860,27 @@ struct txgbe_mac_info {
 	u32 num_rar_entries;
 	u32 max_tx_queues;
 	u32 max_rx_queues;
+	u32 orig_sr_pcs_ctl2;
+	u32 orig_sr_pma_mmd_ctl1;
+	u32 orig_sr_an_mmd_ctl;
+	u32 orig_sr_an_mmd_adv_reg2;
+	u32 orig_vr_xs_or_pcs_mmd_digi_ctl1;
 	u8  san_mac_rar_index;
+	bool orig_link_settings_stored;
 	bool autotry_restart;
 	struct txgbe_thermal_sensor_data  thermal_sensor_data;
 	bool set_lben;
 };
 
+struct txgbe_phy_info {
+	struct txgbe_phy_operations ops;
+	enum txgbe_phy_type type;
+	enum txgbe_sfp_type sfp_type;
+	enum txgbe_media_type media_type;
+	u32 phy_semaphore_mask;
+	bool multispeed_fiber;
+};
+
 enum txgbe_reset_type {
 	TXGBE_LAN_RESET = 0,
 	TXGBE_SW_RESET,
@@ -719,6 +891,7 @@ struct txgbe_hw {
 	u8 __iomem *hw_addr;
 	struct txgbe_mac_info mac;
 	struct txgbe_addr_filter_info addr_ctrl;
+	struct txgbe_phy_info phy;
 	struct txgbe_eeprom_info eeprom;
 	struct txgbe_bus_info bus;
 	u16 device_id;
@@ -879,6 +1052,33 @@ wr32m(struct txgbe_hw *hw, u32 reg, u32 mask, u32 field)
 	txgbe_wr32(base + reg, val);
 }
 
+/* poll register */
+#define TXGBE_I2C_TIMEOUT  1000
+static inline s32
+txgbe_po32m(struct txgbe_hw *hw, u32 reg, u32 mask,
+	    u32 field, int usecs, int count)
+{
+	int loop;
+
+	loop = (count ? count : (usecs + 9) / 10);
+	usecs = (loop ? (usecs + loop - 1) / loop : 0);
+
+	count = loop;
+	do {
+		u32 value = rd32(hw, reg);
+
+		if ((value & mask) == (field & mask))
+			break;
+
+		if (loop-- <= 0)
+			break;
+
+		udelay(usecs);
+	} while (true);
+
+	return (count - loop <= count ? 0 : TXGBE_ERR_TIMEOUT);
+}
+
 #define TXGBE_WRITE_FLUSH(H) rd32(H, TXGBE_MIS_PWR)
 
 #endif /* _TXGBE_TYPE_H_ */
-- 
2.27.0


^ permalink raw reply related	[flat|nested] 18+ messages in thread

* [RFC PATCH net-next 06/16] net: txgbe: Initialize service task
  2022-08-10  8:55 [RFC PATCH net-next 00/16] net: WangXun txgbe ethernet driver Jiawen Wu
                   ` (4 preceding siblings ...)
  2022-08-10  8:55 ` [RFC PATCH net-next 05/16] net: txgbe: Identify PHY and SFP module Jiawen Wu
@ 2022-08-10  8:55 ` Jiawen Wu
  2022-08-10  8:55 ` [RFC PATCH net-next 07/16] net: txgbe: Support to setup link Jiawen Wu
                   ` (9 subsequent siblings)
  15 siblings, 0 replies; 18+ messages in thread
From: Jiawen Wu @ 2022-08-10  8:55 UTC (permalink / raw)
  To: netdev; +Cc: Jiawen Wu

Setup work queue, and initialize service task to process the following
tasks.

Signed-off-by: Jiawen Wu <jiawenwu@trustnetic.com>
---
 drivers/net/ethernet/wangxun/txgbe/txgbe.h    |  17 +++
 .../net/ethernet/wangxun/txgbe/txgbe_main.c   | 124 +++++++++++++++++-
 2 files changed, 137 insertions(+), 4 deletions(-)

diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe.h b/drivers/net/ethernet/wangxun/txgbe/txgbe.h
index d0ea817e2f42..397241df4078 100644
--- a/drivers/net/ethernet/wangxun/txgbe/txgbe.h
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe.h
@@ -36,6 +36,8 @@ struct txgbe_adapter {
 	struct net_device *netdev;
 	struct pci_dev *pdev;
 
+	unsigned long state;
+
 	/* Tx fast path data */
 	int num_tx_queues;
 
@@ -46,6 +48,9 @@ struct txgbe_adapter {
 	struct txgbe_hw hw;
 	u16 msg_enable;
 
+	struct timer_list service_timer;
+	struct work_struct service_task;
+
 	char eeprom_id[32];
 	bool netdev_registered;
 
@@ -53,7 +58,19 @@ struct txgbe_adapter {
 
 };
 
+enum txgbe_state_t {
+	__TXGBE_TESTING,
+	__TXGBE_RESETTING,
+	__TXGBE_DOWN,
+	__TXGBE_HANGING,
+	__TXGBE_DISABLED,
+	__TXGBE_REMOVING,
+	__TXGBE_SERVICE_SCHED,
+	__TXGBE_SERVICE_INITED,
+};
+
 /* needed by txgbe_main.c */
+void txgbe_service_event_schedule(struct txgbe_adapter *adapter);
 void txgbe_assign_netdev_ops(struct net_device *netdev);
 
 int txgbe_open(struct net_device *netdev);
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c
index da5193c871b3..30bac8a049df 100644
--- a/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c
@@ -32,6 +32,8 @@ static const struct pci_device_id txgbe_pci_tbl[] = {
 
 #define DEFAULT_DEBUG_LEVEL_SHIFT 3
 
+static struct workqueue_struct *txgbe_wq;
+
 static bool txgbe_is_sfp(struct txgbe_hw *hw);
 
 static void txgbe_check_minimum_link(struct txgbe_adapter *adapter)
@@ -81,6 +83,24 @@ static inline int txgbe_enumerate_functions(struct txgbe_adapter *adapter)
 	return physfns;
 }
 
+void txgbe_service_event_schedule(struct txgbe_adapter *adapter)
+{
+	if (!test_bit(__TXGBE_DOWN, &adapter->state) &&
+	    !test_bit(__TXGBE_REMOVING, &adapter->state) &&
+	    !test_and_set_bit(__TXGBE_SERVICE_SCHED, &adapter->state))
+		queue_work(txgbe_wq, &adapter->service_task);
+}
+
+static void txgbe_service_event_complete(struct txgbe_adapter *adapter)
+{
+	if (WARN_ON(!test_bit(__TXGBE_SERVICE_SCHED, &adapter->state)))
+		return;
+
+	/* flush memory to make sure state is correct before next watchdog */
+	smp_mb__before_atomic();
+	clear_bit(__TXGBE_SERVICE_SCHED, &adapter->state);
+}
+
 static void txgbe_remove_adapter(struct txgbe_hw *hw)
 {
 	struct txgbe_adapter *adapter = container_of(hw, struct txgbe_adapter, hw);
@@ -89,6 +109,8 @@ static void txgbe_remove_adapter(struct txgbe_hw *hw)
 		return;
 	hw->hw_addr = NULL;
 	dev_info(&adapter->pdev->dev, "Adapter removed\n");
+	if (test_bit(__TXGBE_SERVICE_INITED, &adapter->state))
+		txgbe_service_event_schedule(adapter);
 }
 
 static bool txgbe_check_cfg_remove(struct txgbe_hw *hw, struct pci_dev *pdev)
@@ -221,6 +243,10 @@ void txgbe_disable_device(struct txgbe_adapter *adapter)
 	struct txgbe_hw *hw = &adapter->hw;
 	u32 i;
 
+	/* signal that we are down to the interrupt handler */
+	if (test_and_set_bit(__TXGBE_DOWN, &adapter->state))
+		return; /* do nothing if already down */
+
 	txgbe_disable_pcie_master(hw);
 	/* disable receives */
 	TCALL(hw, mac.ops.disable_rx);
@@ -228,6 +254,8 @@ void txgbe_disable_device(struct txgbe_adapter *adapter)
 	netif_carrier_off(netdev);
 	netif_tx_disable(netdev);
 
+	del_timer_sync(&adapter->service_timer);
+
 	if (hw->bus.lan_id == 0)
 		wr32m(hw, TXGBE_MIS_PRB_CTL, TXGBE_MIS_PRB_CTL_LAN0_UP, 0);
 	else if (hw->bus.lan_id == 1)
@@ -320,6 +348,8 @@ static int txgbe_sw_init(struct txgbe_adapter *adapter)
 		return err;
 	}
 
+	set_bit(__TXGBE_DOWN, &adapter->state);
+
 	return 0;
 }
 
@@ -391,7 +421,8 @@ static void txgbe_dev_shutdown(struct pci_dev *pdev, bool *enable_wake)
 
 	txgbe_release_hw_control(adapter);
 
-	pci_disable_device(pdev);
+	if (!test_and_set_bit(__TXGBE_DISABLED, &adapter->state))
+		pci_disable_device(pdev);
 }
 
 static void txgbe_shutdown(struct pci_dev *pdev)
@@ -406,6 +437,41 @@ static void txgbe_shutdown(struct pci_dev *pdev)
 	}
 }
 
+static void txgbe_service_timer(struct timer_list *t)
+{
+	struct txgbe_adapter *adapter = from_timer(adapter, t, service_timer);
+	unsigned long next_event_offset;
+
+	next_event_offset = HZ * 2;
+
+	/* Reset the timer */
+	mod_timer(&adapter->service_timer, next_event_offset + jiffies);
+
+	txgbe_service_event_schedule(adapter);
+}
+
+/**
+ * txgbe_service_task - manages and runs subtasks
+ * @work: pointer to work_struct containing our data
+ **/
+static void txgbe_service_task(struct work_struct *work)
+{
+	struct txgbe_adapter *adapter = container_of(work,
+						     struct txgbe_adapter,
+						     service_task);
+	if (TXGBE_REMOVED(adapter->hw.hw_addr)) {
+		if (!test_bit(__TXGBE_DOWN, &adapter->state)) {
+			rtnl_lock();
+			txgbe_down(adapter);
+			rtnl_unlock();
+		}
+		txgbe_service_event_complete(adapter);
+		return;
+	}
+
+	txgbe_service_event_complete(adapter);
+}
+
 /**
  * txgbe_add_sanmac_netdev - Add the SAN MAC address to the corresponding
  * netdev->dev_addr_list
@@ -479,6 +545,7 @@ static int txgbe_probe(struct pci_dev *pdev,
 	struct txgbe_adapter *adapter = NULL;
 	struct txgbe_hw *hw = NULL;
 	struct net_device *netdev;
+	bool disable_dev = false;
 	int err, expected_gts;
 
 	u16 eeprom_verh = 0, eeprom_verl = 0, offset = 0;
@@ -537,6 +604,7 @@ static int txgbe_probe(struct pci_dev *pdev,
 	hw->hw_addr = adapter->io_addr;
 
 	txgbe_assign_netdev_ops(netdev);
+	netdev->watchdog_timeo = 5 * HZ;
 	strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
 
 	/* setup the private structure */
@@ -588,6 +656,12 @@ static int txgbe_probe(struct pci_dev *pdev,
 
 	txgbe_mac_set_default_filter(adapter, hw->mac.perm_addr);
 
+	timer_setup(&adapter->service_timer, txgbe_service_timer, 0);
+
+	INIT_WORK(&adapter->service_task, txgbe_service_task);
+	set_bit(__TXGBE_SERVICE_INITED, &adapter->state);
+	clear_bit(__TXGBE_SERVICE_SCHED, &adapter->state);
+
 	/* Save off EEPROM version number and Option Rom version which
 	 * together make a unique identify for the eeprom
 	 */
@@ -689,11 +763,13 @@ static int txgbe_probe(struct pci_dev *pdev,
 err_free_mac_table:
 	kfree(adapter->mac_table);
 err_pci_release_regions:
+	disable_dev = !test_and_set_bit(__TXGBE_DISABLED, &adapter->state);
 	pci_disable_pcie_error_reporting(pdev);
 	pci_release_selected_regions(pdev,
 				     pci_select_bars(pdev, IORESOURCE_MEM));
 err_pci_disable_dev:
-	pci_disable_device(pdev);
+	if (!adapter || disable_dev)
+		pci_disable_device(pdev);
 	return err;
 }
 
@@ -710,8 +786,11 @@ static void txgbe_remove(struct pci_dev *pdev)
 {
 	struct txgbe_adapter *adapter = pci_get_drvdata(pdev);
 	struct net_device *netdev;
+	bool disable_dev;
 
 	netdev = adapter->netdev;
+	set_bit(__TXGBE_REMOVING, &adapter->state);
+	cancel_work_sync(&adapter->service_task);
 
 	/* remove the added san mac */
 	txgbe_del_sanmac_netdev(netdev);
@@ -727,10 +806,12 @@ static void txgbe_remove(struct pci_dev *pdev)
 				     pci_select_bars(pdev, IORESOURCE_MEM));
 
 	kfree(adapter->mac_table);
+	disable_dev = !test_and_set_bit(__TXGBE_DISABLED, &adapter->state);
 
 	pci_disable_pcie_error_reporting(pdev);
 
-	pci_disable_device(pdev);
+	if (disable_dev)
+		pci_disable_device(pdev);
 }
 
 u16 txgbe_read_pci_cfg_word(struct txgbe_hw *hw, u32 reg)
@@ -755,7 +836,42 @@ static struct pci_driver txgbe_driver = {
 	.shutdown = txgbe_shutdown,
 };
 
-module_pci_driver(txgbe_driver);
+/**
+ * txgbe_init_module - Driver Registration Routine
+ *
+ * txgbe_init_module is the first routine called when the driver is
+ * loaded. All it does is register with the PCI subsystem.
+ **/
+static int __init txgbe_init_module(void)
+{
+	int ret;
+
+	txgbe_wq = create_singlethread_workqueue(txgbe_driver_name);
+	if (!txgbe_wq) {
+		pr_err("%s: Failed to create workqueue\n", txgbe_driver_name);
+		return -ENOMEM;
+	}
+
+	ret = pci_register_driver(&txgbe_driver);
+	return ret;
+}
+
+module_init(txgbe_init_module);
+
+/**
+ * txgbe_exit_module - Driver Exit Cleanup Routine
+ *
+ * txgbe_exit_module is called just before the driver is removed
+ * from memory.
+ **/
+static void __exit txgbe_exit_module(void)
+{
+	pci_unregister_driver(&txgbe_driver);
+	if (txgbe_wq)
+		destroy_workqueue(txgbe_wq);
+}
+
+module_exit(txgbe_exit_module);
 
 MODULE_DEVICE_TABLE(pci, txgbe_pci_tbl);
 MODULE_AUTHOR("Beijing WangXun Technology Co., Ltd, <software@trustnetic.com>");
-- 
2.27.0


^ permalink raw reply related	[flat|nested] 18+ messages in thread

* [RFC PATCH net-next 07/16] net: txgbe: Support to setup link
  2022-08-10  8:55 [RFC PATCH net-next 00/16] net: WangXun txgbe ethernet driver Jiawen Wu
                   ` (5 preceding siblings ...)
  2022-08-10  8:55 ` [RFC PATCH net-next 06/16] net: txgbe: Initialize service task Jiawen Wu
@ 2022-08-10  8:55 ` Jiawen Wu
  2022-08-10  8:55 ` [RFC PATCH net-next 08/16] net: txgbe: Add interrupt support Jiawen Wu
                   ` (8 subsequent siblings)
  15 siblings, 0 replies; 18+ messages in thread
From: Jiawen Wu @ 2022-08-10  8:55 UTC (permalink / raw)
  To: netdev; +Cc: Jiawen Wu

Get link capabilities, setup MAC and PHY link, and support to enable
or disable Tx laser.

Signed-off-by: Jiawen Wu <jiawenwu@trustnetic.com>
---
 .../device_drivers/ethernet/wangxun/txgbe.rst |    5 +
 drivers/net/ethernet/wangxun/txgbe/txgbe.h    |   25 +
 drivers/net/ethernet/wangxun/txgbe/txgbe_hw.c | 1314 ++++++++++++++++-
 drivers/net/ethernet/wangxun/txgbe/txgbe_hw.h |   21 +
 .../net/ethernet/wangxun/txgbe/txgbe_main.c   |  359 ++++-
 .../net/ethernet/wangxun/txgbe/txgbe_phy.c    |   23 +
 .../net/ethernet/wangxun/txgbe/txgbe_phy.h    |    2 +
 .../net/ethernet/wangxun/txgbe/txgbe_type.h   |  201 +++
 8 files changed, 1946 insertions(+), 4 deletions(-)

diff --git a/Documentation/networking/device_drivers/ethernet/wangxun/txgbe.rst b/Documentation/networking/device_drivers/ethernet/wangxun/txgbe.rst
index 037d8538e848..3c7656057c69 100644
--- a/Documentation/networking/device_drivers/ethernet/wangxun/txgbe.rst
+++ b/Documentation/networking/device_drivers/ethernet/wangxun/txgbe.rst
@@ -51,6 +51,11 @@ The following is a list of 3rd party SFP+ modules that have been tested and veri
 | WTD      | SFP+                 | RTXM228-551          |
 +----------+----------------------+----------------------+
 
+Laser turns off for SFP+ when ifconfig ethX down
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+"ifconfig ethX down" turns off the laser for SFP+ fiber adapters.
+"ifconfig ethX up" turns on the laser.
+
 
 Support
 =======
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe.h b/drivers/net/ethernet/wangxun/txgbe/txgbe.h
index 397241df4078..0f06efbcfef5 100644
--- a/drivers/net/ethernet/wangxun/txgbe/txgbe.h
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe.h
@@ -7,6 +7,7 @@
 #include <net/ip.h>
 #include <linux/pci.h>
 #include <linux/etherdevice.h>
+#include <linux/timecounter.h>
 
 #include "txgbe_type.h"
 
@@ -29,6 +30,22 @@ struct txgbe_mac_addr {
 #define TXGBE_MAC_STATE_MODIFIED        0x2
 #define TXGBE_MAC_STATE_IN_USE          0x4
 
+/* default to trying for four seconds */
+#define TXGBE_TRY_LINK_TIMEOUT  (4 * HZ)
+#define TXGBE_SFP_POLL_JIFFIES  (2 * HZ)        /* SFP poll every 2 seconds */
+
+/**
+ * txgbe_adapter.flag
+ **/
+#define TXGBE_FLAG_NEED_LINK_UPDATE             BIT(0)
+#define TXGBE_FLAG_NEED_LINK_CONFIG             BIT(1)
+
+/**
+ * txgbe_adapter.flag2
+ **/
+#define TXGBE_FLAG2_MNG_REG_ACCESS_DISABLED     BIT(0)
+#define TXGBE_FLAG2_SFP_NEEDS_RESET             BIT(1)
+
 /* board specific private data structure */
 struct txgbe_adapter {
 	u8 __iomem *io_addr;    /* Mainly for iounmap use */
@@ -38,6 +55,8 @@ struct txgbe_adapter {
 
 	unsigned long state;
 
+	u32 flags;
+	u32 flags2;
 	/* Tx fast path data */
 	int num_tx_queues;
 
@@ -48,6 +67,11 @@ struct txgbe_adapter {
 	struct txgbe_hw hw;
 	u16 msg_enable;
 
+	u32 link_speed;
+	bool link_up;
+	unsigned long sfp_poll_time;
+	unsigned long link_check_timeout;
+
 	struct timer_list service_timer;
 	struct work_struct service_task;
 
@@ -67,6 +91,7 @@ enum txgbe_state_t {
 	__TXGBE_REMOVING,
 	__TXGBE_SERVICE_SCHED,
 	__TXGBE_SERVICE_INITED,
+	__TXGBE_IN_SFP_INIT,
 };
 
 /* needed by txgbe_main.c */
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.c
index 240c19c20e2c..89a67b158fa5 100644
--- a/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.c
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.c
@@ -29,6 +29,34 @@ u32 txgbe_rd32_epcs(struct txgbe_hw *hw, u32 addr)
 	return data;
 }
 
+void txgbe_wr32_ephy(struct txgbe_hw *hw, u32 addr, u32 data)
+{
+	unsigned int offset;
+
+	/* Set the LAN port indicator to offset[1] */
+	/* 1st, write the offset to IDA_ADDR register */
+	offset = TXGBE_ETHPHY_IDA_ADDR;
+	wr32(hw, offset, addr);
+
+	/* 2nd, read the data from IDA_DATA register */
+	offset = TXGBE_ETHPHY_IDA_DATA;
+	wr32(hw, offset, data);
+}
+
+void txgbe_wr32_epcs(struct txgbe_hw *hw, u32 addr, u32 data)
+{
+	unsigned int offset;
+
+	/* Set the LAN port indicator to offset[1] */
+	/* 1st, write the offset to IDA_ADDR register */
+	offset = TXGBE_XPCS_IDA_ADDR;
+	wr32(hw, offset, addr);
+
+	/* 2nd, read the data from IDA_DATA register */
+	offset = TXGBE_XPCS_IDA_DATA;
+	wr32(hw, offset, data);
+}
+
 s32 txgbe_init_hw(struct txgbe_hw *hw)
 {
 	s32 status;
@@ -1104,11 +1132,12 @@ s32 txgbe_reset_hostif(struct txgbe_hw *hw)
 			continue;
 
 		if (reset_cmd.hdr.cmd_or_resp.ret_status ==
-		    FW_CEM_RESP_STATUS_SUCCESS)
+		    FW_CEM_RESP_STATUS_SUCCESS) {
 			status = 0;
-		else
+			hw->link_status = TXGBE_LINK_STATUS_NONE;
+		} else {
 			status = TXGBE_ERR_HOST_INTERFACE_COMMAND;
-
+		}
 		break;
 	}
 
@@ -1232,6 +1261,141 @@ bool txgbe_check_mng_access(struct txgbe_hw *hw)
 	return true;
 }
 
+/**
+ *  txgbe_setup_mac_link_multispeed_fiber - Set MAC link speed
+ *  @hw: pointer to hardware structure
+ *  @speed: new link speed
+ *  @autoneg_wait_to_complete: true when waiting for completion is needed
+ *
+ *  Set the link speed in the MAC and/or PHY register and restarts link.
+ **/
+s32 txgbe_setup_mac_link_multispeed_fiber(struct txgbe_hw *hw,
+					  u32 speed,
+					  bool autoneg_wait_to_complete)
+{
+	u32 link_speed = TXGBE_LINK_SPEED_UNKNOWN;
+	u32 highest_link_speed = TXGBE_LINK_SPEED_UNKNOWN;
+	s32 status = 0;
+	u32 speedcnt = 0;
+	u32 i = 0;
+	bool autoneg, link_up = false;
+
+	/* Mask off requested but non-supported speeds */
+	status = TCALL(hw, mac.ops.get_link_capabilities,
+		       &link_speed, &autoneg);
+	if (status != 0)
+		return status;
+
+	speed &= link_speed;
+
+	/* Try each speed one by one, highest priority first.  We do this in
+	 * software because 10Gb fiber doesn't support speed autonegotiation.
+	 */
+	if (speed & TXGBE_LINK_SPEED_10GB_FULL) {
+		speedcnt++;
+		highest_link_speed = TXGBE_LINK_SPEED_10GB_FULL;
+
+		/* If we already have link at this speed, just jump out */
+		status = TCALL(hw, mac.ops.check_link,
+			       &link_speed, &link_up, false);
+		if (status != 0)
+			return status;
+
+		if (link_speed == TXGBE_LINK_SPEED_10GB_FULL && link_up)
+			goto out;
+
+		/* Allow module to change analog characteristics (1G->10G) */
+		msleep(40);
+
+		status = TCALL(hw, mac.ops.setup_mac_link,
+			       TXGBE_LINK_SPEED_10GB_FULL,
+			       autoneg_wait_to_complete);
+		if (status != 0)
+			return status;
+
+		/* Flap the Tx laser if it has not already been done */
+		TCALL(hw, mac.ops.flap_tx_laser);
+
+		/* Wait for the controller to acquire link.  Per IEEE 802.3ap,
+		 * Section 73.10.2, we may have to wait up to 500ms if KR is
+		 * attempted.  sapphire uses the same timing for 10g SFI.
+		 */
+		for (i = 0; i < 5; i++) {
+			/* Wait for the link partner to also set speed */
+			msleep(100);
+
+			/* If we have link, just jump out */
+			status = TCALL(hw, mac.ops.check_link,
+				       &link_speed, &link_up, false);
+			if (status != 0)
+				return status;
+
+			if (link_up)
+				goto out;
+		}
+	}
+
+	if (speed & TXGBE_LINK_SPEED_1GB_FULL) {
+		speedcnt++;
+		if (highest_link_speed == TXGBE_LINK_SPEED_UNKNOWN)
+			highest_link_speed = TXGBE_LINK_SPEED_1GB_FULL;
+
+		/* If we already have link at this speed, just jump out */
+		status = TCALL(hw, mac.ops.check_link,
+			       &link_speed, &link_up, false);
+		if (status != 0)
+			return status;
+
+		if (link_speed == TXGBE_LINK_SPEED_1GB_FULL && link_up)
+			goto out;
+
+		/* Allow module to change analog characteristics (10G->1G) */
+		msleep(40);
+
+		status = TCALL(hw, mac.ops.setup_mac_link,
+			       TXGBE_LINK_SPEED_1GB_FULL,
+			       autoneg_wait_to_complete);
+		if (status != 0)
+			return status;
+
+		/* Flap the Tx laser if it has not already been done */
+		TCALL(hw, mac.ops.flap_tx_laser);
+
+		/* Wait for the link partner to also set speed */
+		msleep(100);
+
+		/* If we have link, just jump out */
+		status = TCALL(hw, mac.ops.check_link,
+			       &link_speed, &link_up, false);
+		if (status != 0)
+			return status;
+
+		if (link_up)
+			goto out;
+	}
+
+	/* We didn't get link.  Configure back to the highest speed we tried,
+	 * (if there was more than one).  We call ourselves back with just the
+	 * single highest speed that the user requested.
+	 */
+	if (speedcnt > 1)
+		status = txgbe_setup_mac_link_multispeed_fiber(hw,
+							       highest_link_speed,
+							       autoneg_wait_to_complete);
+
+out:
+	/* Set autoneg_advertised value based on input link speed */
+	hw->phy.autoneg_advertised = 0;
+
+	if (speed & TXGBE_LINK_SPEED_10GB_FULL)
+		hw->phy.autoneg_advertised |= TXGBE_LINK_SPEED_10GB_FULL;
+
+	if (speed & TXGBE_LINK_SPEED_1GB_FULL)
+		hw->phy.autoneg_advertised |= TXGBE_LINK_SPEED_1GB_FULL;
+
+	return status;
+}
+
 int txgbe_check_flash_load(struct txgbe_hw *hw, u32 check_bit)
 {
 	u32 i = 0, reg = 0;
@@ -1255,6 +1419,34 @@ int txgbe_check_flash_load(struct txgbe_hw *hw, u32 check_bit)
 	return err;
 }
 
+void txgbe_init_mac_link_ops(struct txgbe_hw *hw)
+{
+	struct txgbe_mac_info *mac = &hw->mac;
+
+	/* enable the laser control functions for SFP+ fiber
+	 * and MNG not enabled
+	 */
+	if (hw->phy.media_type == txgbe_media_type_fiber) {
+		mac->ops.disable_tx_laser = txgbe_disable_tx_laser_multispeed_fiber;
+		mac->ops.enable_tx_laser = txgbe_enable_tx_laser_multispeed_fiber;
+		mac->ops.flap_tx_laser = txgbe_flap_tx_laser_multispeed_fiber;
+	} else {
+		mac->ops.disable_tx_laser = NULL;
+		mac->ops.enable_tx_laser = NULL;
+		mac->ops.flap_tx_laser = NULL;
+	}
+
+	if (hw->phy.multispeed_fiber) {
+		/* Set up dual speed SFP+ support */
+		mac->ops.setup_link = txgbe_setup_mac_link_multispeed_fiber;
+		mac->ops.setup_mac_link = txgbe_setup_mac_link;
+		mac->ops.set_rate_select_speed = txgbe_set_hard_rate_select_speed;
+	} else {
+		mac->ops.setup_link = txgbe_setup_mac_link;
+		mac->ops.set_rate_select_speed = txgbe_set_hard_rate_select_speed;
+	}
+}
+
 /**
  *  txgbe_init_phy_ops - PHY/SFP specific init
  *  @hw: pointer to hardware structure
@@ -1270,7 +1462,13 @@ s32 txgbe_init_phy_ops(struct txgbe_hw *hw)
 	txgbe_init_i2c(hw);
 	/* Identify the PHY or SFP module */
 	ret_val = TCALL(hw, phy.ops.identify);
+	if (ret_val == TXGBE_ERR_SFP_NOT_SUPPORTED)
+		goto init_phy_ops_out;
 
+	/* Setup function pointers based on detected SFP module and speeds */
+	txgbe_init_mac_link_ops(hw);
+
+init_phy_ops_out:
 	return ret_val;
 }
 
@@ -1317,6 +1515,9 @@ s32 txgbe_init_ops(struct txgbe_hw *hw)
 	mac->ops.set_vmdq_san_mac = txgbe_set_vmdq_san_mac;
 	mac->ops.init_uta_tables = txgbe_init_uta_tables;
 
+	/* Link */
+	mac->ops.get_link_capabilities = txgbe_get_link_capabilities;
+	mac->ops.check_link = txgbe_check_mac_link;
 	mac->num_rar_entries    = TXGBE_SP_RAR_ENTRIES;
 	mac->max_rx_queues      = TXGBE_SP_MAX_RX_QUEUES;
 	mac->max_tx_queues      = TXGBE_SP_MAX_TX_QUEUES;
@@ -1337,6 +1538,124 @@ s32 txgbe_init_ops(struct txgbe_hw *hw)
 	return 0;
 }
 
+/**
+ *  txgbe_get_link_capabilities - Determines link capabilities
+ *  @hw: pointer to hardware structure
+ *  @speed: pointer to link speed
+ *  @autoneg: true when autoneg or autotry is enabled
+ **/
+s32 txgbe_get_link_capabilities(struct txgbe_hw *hw,
+				u32 *speed,
+				bool *autoneg)
+{
+	s32 status = 0;
+	u32 sr_pcs_ctl, sr_pma_mmd_ctl1, sr_an_mmd_ctl;
+	u32 sr_an_mmd_adv_reg2;
+
+	/* Check if 1G SFP module. */
+	if (hw->phy.sfp_type == txgbe_sfp_type_1g_cu_core0 ||
+	    hw->phy.sfp_type == txgbe_sfp_type_1g_cu_core1 ||
+	    hw->phy.sfp_type == txgbe_sfp_type_1g_lx_core0 ||
+	    hw->phy.sfp_type == txgbe_sfp_type_1g_lx_core1 ||
+	    hw->phy.sfp_type == txgbe_sfp_type_1g_sx_core0 ||
+	    hw->phy.sfp_type == txgbe_sfp_type_1g_sx_core1) {
+		*speed = TXGBE_LINK_SPEED_1GB_FULL;
+		*autoneg = false;
+	} else if (hw->phy.multispeed_fiber) {
+		*speed = TXGBE_LINK_SPEED_10GB_FULL |
+			  TXGBE_LINK_SPEED_1GB_FULL;
+		*autoneg = true;
+	}
+	/* SFP */
+	else if (hw->phy.media_type == txgbe_media_type_fiber) {
+		*speed = TXGBE_LINK_SPEED_10GB_FULL;
+		*autoneg = false;
+	}
+	/* SGMII */
+	else if ((hw->subsystem_device_id & 0xF0) == TXGBE_ID_SGMII) {
+		*speed = TXGBE_LINK_SPEED_1GB_FULL |
+			TXGBE_LINK_SPEED_100_FULL |
+			TXGBE_LINK_SPEED_10_FULL;
+		*autoneg = false;
+		hw->phy.link_mode = TXGBE_PHYSICAL_LAYER_1000BASE_T |
+				TXGBE_PHYSICAL_LAYER_100BASE_TX;
+	/* MAC XAUI */
+	} else if ((hw->subsystem_device_id & 0xF0) == TXGBE_ID_MAC_XAUI) {
+		*speed = TXGBE_LINK_SPEED_10GB_FULL;
+		*autoneg = false;
+		hw->phy.link_mode = TXGBE_PHYSICAL_LAYER_10GBASE_KX4;
+	/* MAC SGMII */
+	} else if ((hw->subsystem_device_id & 0xF0) == TXGBE_ID_MAC_SGMII) {
+		*speed = TXGBE_LINK_SPEED_1GB_FULL;
+		*autoneg = false;
+		hw->phy.link_mode = TXGBE_PHYSICAL_LAYER_1000BASE_KX;
+	} else { /* KR KX KX4 */
+		/* Determine link capabilities based on the stored value,
+		 * which represents EEPROM defaults.  If value has not
+		 * been stored, use the current register values.
+		 */
+		if (hw->mac.orig_link_settings_stored) {
+			sr_pcs_ctl = hw->mac.orig_sr_pcs_ctl2;
+			sr_pma_mmd_ctl1 = hw->mac.orig_sr_pma_mmd_ctl1;
+			sr_an_mmd_ctl = hw->mac.orig_sr_an_mmd_ctl;
+			sr_an_mmd_adv_reg2 = hw->mac.orig_sr_an_mmd_adv_reg2;
+		} else {
+			sr_pcs_ctl = txgbe_rd32_epcs(hw, TXGBE_SR_PCS_CTL2);
+			sr_pma_mmd_ctl1 = txgbe_rd32_epcs(hw,
+							  TXGBE_SR_PMA_MMD_CTL1);
+			sr_an_mmd_ctl = txgbe_rd32_epcs(hw,
+							TXGBE_SR_AN_MMD_CTL);
+			sr_an_mmd_adv_reg2 = txgbe_rd32_epcs(hw,
+							     TXGBE_SR_AN_MMD_ADV_REG2);
+		}
+
+		if ((sr_pcs_ctl & TXGBE_SR_PCS_CTL2_PCS_TYPE_SEL_MASK) ==
+				TXGBE_SR_PCS_CTL2_PCS_TYPE_SEL_X &&
+		    (sr_pma_mmd_ctl1 & TXGBE_SR_PMA_MMD_CTL1_SPEED_SEL_MASK) ==
+				TXGBE_SR_PMA_MMD_CTL1_SPEED_SEL_1G &&
+		    (sr_an_mmd_ctl & TXGBE_SR_AN_MMD_CTL_ENABLE) == 0) {
+			/* 1G or KX - no backplane auto-negotiation */
+			*speed = TXGBE_LINK_SPEED_1GB_FULL;
+			*autoneg = false;
+			hw->phy.link_mode = TXGBE_PHYSICAL_LAYER_1000BASE_KX;
+		} else if ((sr_pcs_ctl & TXGBE_SR_PCS_CTL2_PCS_TYPE_SEL_MASK) ==
+				TXGBE_SR_PCS_CTL2_PCS_TYPE_SEL_X &&
+			   (sr_pma_mmd_ctl1 & TXGBE_SR_PMA_MMD_CTL1_SPEED_SEL_MASK) ==
+				TXGBE_SR_PMA_MMD_CTL1_SPEED_SEL_10G &&
+			   (sr_an_mmd_ctl & TXGBE_SR_AN_MMD_CTL_ENABLE) == 0) {
+			*speed = TXGBE_LINK_SPEED_10GB_FULL;
+			*autoneg = false;
+			hw->phy.link_mode = TXGBE_PHYSICAL_LAYER_10GBASE_KX4;
+		} else if ((sr_pcs_ctl & TXGBE_SR_PCS_CTL2_PCS_TYPE_SEL_MASK) ==
+				TXGBE_SR_PCS_CTL2_PCS_TYPE_SEL_R &&
+			   (sr_an_mmd_ctl & TXGBE_SR_AN_MMD_CTL_ENABLE) == 0) {
+			/* 10 GbE serial link (KR -no backplane auto-negotiation) */
+			*speed = TXGBE_LINK_SPEED_10GB_FULL;
+			*autoneg = false;
+			hw->phy.link_mode = TXGBE_PHYSICAL_LAYER_10GBASE_KR;
+		} else if (sr_an_mmd_ctl & TXGBE_SR_AN_MMD_CTL_ENABLE) {
+			/* KX/KX4/KR backplane auto-negotiation enable */
+			*speed = TXGBE_LINK_SPEED_UNKNOWN;
+			if (sr_an_mmd_adv_reg2 & TXGBE_SR_AN_MMD_ADV_REG2_BP_TYPE_KR)
+				*speed |= TXGBE_LINK_SPEED_10GB_FULL;
+			if (sr_an_mmd_adv_reg2 & TXGBE_SR_AN_MMD_ADV_REG2_BP_TYPE_KX4)
+				*speed |= TXGBE_LINK_SPEED_10GB_FULL;
+			if (sr_an_mmd_adv_reg2 & TXGBE_SR_AN_MMD_ADV_REG2_BP_TYPE_KX)
+				*speed |= TXGBE_LINK_SPEED_1GB_FULL;
+			*autoneg = true;
+			hw->phy.link_mode = TXGBE_PHYSICAL_LAYER_10GBASE_KR |
+					TXGBE_PHYSICAL_LAYER_10GBASE_KX4 |
+					TXGBE_PHYSICAL_LAYER_1000BASE_KX;
+		} else {
+			status = TXGBE_ERR_LINK_SETUP;
+			goto out;
+		}
+	}
+
+out:
+	return status;
+}
+
 /**
  *  txgbe_get_media_type - Get media type
  *  @hw: pointer to hardware structure
@@ -1376,12 +1695,949 @@ enum txgbe_media_type txgbe_get_media_type(struct txgbe_hw *hw)
 	return media_type;
 }
 
+/**
+ *  txgbe_disable_tx_laser_multispeed_fiber - Disable Tx laser
+ *  @hw: pointer to hardware structure
+ *
+ *  The base drivers may require better control over SFP+ module
+ *  PHY states.  This includes selectively shutting down the Tx
+ *  laser on the PHY, effectively halting physical link.
+ **/
+s32 txgbe_disable_tx_laser_multispeed_fiber(struct txgbe_hw *hw)
+{
+	u32 esdp_reg = rd32(hw, TXGBE_GPIO_DR);
+
+	if (!(hw->phy.media_type == txgbe_media_type_fiber))
+		return 0;
+	/* Blocked by MNG FW so bail */
+	txgbe_check_reset_blocked(hw);
+
+	/* Disable Tx laser; allow 100us to go dark per spec */
+	esdp_reg |= TXGBE_GPIO_DR_1 | TXGBE_GPIO_DR_0;
+	wr32(hw, TXGBE_GPIO_DR, esdp_reg);
+	TXGBE_WRITE_FLUSH(hw);
+	usleep_range(100, 200);
+
+	return 0;
+}
+
+/**
+ *  txgbe_enable_tx_laser_multispeed_fiber - Enable Tx laser
+ *  @hw: pointer to hardware structure
+ *
+ *  The base drivers may require better control over SFP+ module
+ *  PHY states.  This includes selectively turning on the Tx
+ *  laser on the PHY, effectively starting physical link.
+ **/
+s32 txgbe_enable_tx_laser_multispeed_fiber(struct txgbe_hw *hw)
+{
+	if (!(TCALL(hw, mac.ops.get_media_type) == txgbe_media_type_fiber))
+		return 0;
+
+	/* Enable Tx laser; allow 100ms to light up */
+	wr32m(hw, TXGBE_GPIO_DR,
+	      TXGBE_GPIO_DR_0 | TXGBE_GPIO_DR_1, 0);
+	TXGBE_WRITE_FLUSH(hw);
+	msleep(100);
+
+	return 0;
+}
+
+/**
+ *  txgbe_flap_tx_laser_multispeed_fiber - Flap Tx laser
+ *  @hw: pointer to hardware structure
+ *
+ *  When the driver changes the link speeds that it can support,
+ *  it sets autotry_restart to true to indicate that we need to
+ *  initiate a new autotry session with the link partner.  To do
+ *  so, we set the speed then disable and re-enable the Tx laser, to
+ *  alert the link partner that it also needs to restart autotry on its
+ *  end.  This is consistent with true clause 37 autoneg, which also
+ *  involves a loss of signal.
+ **/
+s32 txgbe_flap_tx_laser_multispeed_fiber(struct txgbe_hw *hw)
+{
+	/* Blocked by MNG FW so bail */
+	txgbe_check_reset_blocked(hw);
+
+	if (hw->mac.autotry_restart) {
+		txgbe_disable_tx_laser_multispeed_fiber(hw);
+		txgbe_enable_tx_laser_multispeed_fiber(hw);
+		hw->mac.autotry_restart = false;
+	}
+
+	return 0;
+}
+
+/**
+ *  txgbe_set_hard_rate_select_speed - Set module link speed
+ *  @hw: pointer to hardware structure
+ *  @speed: link speed to set
+ *
+ *  Set module link speed via RS0/RS1 rate select pins.
+ */
+s32 txgbe_set_hard_rate_select_speed(struct txgbe_hw *hw,
+				     u32 speed)
+{
+	u32 esdp_reg = rd32(hw, TXGBE_GPIO_DR);
+
+	switch (speed) {
+	case TXGBE_LINK_SPEED_10GB_FULL:
+		esdp_reg |= TXGBE_GPIO_DR_5 | TXGBE_GPIO_DR_4;
+		break;
+	case TXGBE_LINK_SPEED_1GB_FULL:
+		esdp_reg &= ~(TXGBE_GPIO_DR_5 | TXGBE_GPIO_DR_4);
+		break;
+	default:
+		txgbe_dbg(hw, "Invalid fixed module speed\n");
+		return 0;
+	}
+
+	wr32(hw, TXGBE_GPIO_DDR,
+	     TXGBE_GPIO_DDR_5 | TXGBE_GPIO_DDR_4 |
+	     TXGBE_GPIO_DDR_1 | TXGBE_GPIO_DDR_0);
+
+	wr32(hw, TXGBE_GPIO_DR, esdp_reg);
+
+	TXGBE_WRITE_FLUSH(hw);
+
+	return 0;
+}
+
+static s32 txgbe_set_sgmii_an37_ability(struct txgbe_hw *hw)
+{
+	u32 value;
+
+	txgbe_wr32_epcs(hw, TXGBE_VR_XS_OR_PCS_MMD_DIGI_CTL1, 0x3002);
+	/* for sgmii + external phy, set to 0x0105 (phy sgmii mode) */
+	/* for sgmii direct link, set to 0x010c (mac sgmii mode) */
+	if ((hw->subsystem_device_id & 0xF0) == TXGBE_ID_MAC_SGMII ||
+	    hw->phy.media_type == txgbe_media_type_fiber) {
+		txgbe_wr32_epcs(hw, TXGBE_SR_MII_MMD_AN_CTL, 0x010c);
+	} else if ((hw->subsystem_device_id & 0xF0) == TXGBE_ID_SGMII ||
+		   (hw->subsystem_device_id & 0xF0) == TXGBE_ID_XAUI) {
+		txgbe_wr32_epcs(hw, TXGBE_SR_MII_MMD_AN_CTL, 0x0105);
+	}
+	txgbe_wr32_epcs(hw, TXGBE_SR_MII_MMD_DIGI_CTL, 0x0200);
+	value = txgbe_rd32_epcs(hw, TXGBE_SR_MII_MMD_CTL);
+	value = (value & ~0x1200) | (0x1 << 12) | (0x1 << 9);
+	txgbe_wr32_epcs(hw, TXGBE_SR_MII_MMD_CTL, value);
+	return 0;
+}
+
+s32 txgbe_set_link_to_kr(struct txgbe_hw *hw, bool autoneg)
+{
+	struct txgbe_adapter *adapter = container_of(hw, struct txgbe_adapter, hw);
+	s32 status = 0;
+	u32 i;
+
+	/* 1. Wait xpcs power-up good */
+	for (i = 0; i < TXGBE_XPCS_POWER_GOOD_MAX_POLLING_TIME; i++) {
+		if ((txgbe_rd32_epcs(hw, TXGBE_VR_XS_OR_PCS_MMD_DIGI_STATUS) &
+		    TXGBE_VR_XS_OR_PCS_MMD_DIGI_STATUS_PSEQ_MASK) ==
+		    TXGBE_VR_XS_OR_PCS_MMD_DIGI_STATUS_PSEQ_POWER_GOOD)
+			break;
+		msleep(20);
+	}
+	if (i == TXGBE_XPCS_POWER_GOOD_MAX_POLLING_TIME) {
+		status = TXGBE_ERR_XPCS_POWER_UP_FAILED;
+		goto out;
+	}
+	dev_info(&adapter->pdev->dev, "It is set to kr.\n");
+
+	txgbe_wr32_epcs(hw, 0x78001, 0x7);
+
+	/* 2. Disable xpcs AN-73 */
+	txgbe_wr32_epcs(hw, TXGBE_SR_AN_MMD_CTL, 0x3000);
+	txgbe_wr32_epcs(hw, TXGBE_VR_AN_KR_MODE_CL, 0x1);
+
+	/* 3. Set VR_XS_PMA_Gen5_12G_MPLLA_CTRL3 Register */
+	/* Bit[10:0](MPLLA_BANDWIDTH) = 11'd123 (default: 11'd16) */
+	txgbe_wr32_epcs(hw, TXGBE_PHY_MPLLA_CTL3,
+			TXGBE_PHY_MPLLA_CTL3_MULTIPLIER_BW_10GBASER_KR);
+
+	/* 4. Set VR_XS_PMA_Gen5_12G_MISC_CTRL0 Register */
+	/* Bit[12:8](RX_VREF_CTRL) = 5'hF (default: 5'h11) */
+	txgbe_wr32_epcs(hw, TXGBE_PHY_MISC_CTL0, 0xCF00);
+
+	/* 5. Set VR_XS_PMA_Gen5_12G_RX_EQ_CTRL0 Register */
+	/* Bit[15:8](VGA1/2_GAIN_0) = 8'h77,
+	 * Bit[7:5](CTLE_POLE_0) = 3'h2
+	 * Bit[4:0](CTLE_BOOST_0) = 4'hA
+	 */
+	txgbe_wr32_epcs(hw, TXGBE_PHY_RX_EQ_CTL0, 0x774A);
+
+	/* 6. Set VR_MII_Gen5_12G_RX_GENCTRL3 Register */
+	/* Bit[2:0](LOS_TRSHLD_0) = 3'h4 (default: 3) */
+	txgbe_wr32_epcs(hw, TXGBE_PHY_RX_GEN_CTL3, 0x0004);
+	/* 7. Initialize the mode by setting VR XS or PCS MMD Digital */
+	/* Control1 Register Bit[15](VR_RST) */
+	txgbe_wr32_epcs(hw, TXGBE_VR_XS_OR_PCS_MMD_DIGI_CTL1, 0xA000);
+	/* wait phy initialization done */
+	for (i = 0; i < TXGBE_PHY_INIT_DONE_POLLING_TIME; i++) {
+		if ((txgbe_rd32_epcs(hw,
+				     TXGBE_VR_XS_OR_PCS_MMD_DIGI_CTL1) &
+		    TXGBE_VR_XS_OR_PCS_MMD_DIGI_CTL1_VR_RST) == 0)
+			break;
+		msleep(100);
+	}
+	if (i == TXGBE_PHY_INIT_DONE_POLLING_TIME) {
+		status = TXGBE_ERR_PHY_INIT_NOT_DONE;
+		goto out;
+	}
+
+out:
+	return status;
+}
+
+s32 txgbe_set_link_to_kx4(struct txgbe_hw *hw, bool autoneg)
+{
+	struct txgbe_adapter *adapter = container_of(hw, struct txgbe_adapter, hw);
+	s32 status = 0;
+	u32 value;
+	u32 i;
+
+	/* check link status, if already set, skip setting it again */
+	if (hw->link_status == TXGBE_LINK_STATUS_KX4)
+		goto out;
+
+	dev_info(&adapter->pdev->dev, "It is set to kx4.\n");
+
+	/* 1. Wait xpcs power-up good */
+	for (i = 0; i < TXGBE_XPCS_POWER_GOOD_MAX_POLLING_TIME; i++) {
+		if ((txgbe_rd32_epcs(hw, TXGBE_VR_XS_OR_PCS_MMD_DIGI_STATUS) &
+			TXGBE_VR_XS_OR_PCS_MMD_DIGI_STATUS_PSEQ_MASK) ==
+			TXGBE_VR_XS_OR_PCS_MMD_DIGI_STATUS_PSEQ_POWER_GOOD)
+			break;
+		msleep(20);
+	}
+	if (i == TXGBE_XPCS_POWER_GOOD_MAX_POLLING_TIME) {
+		status = TXGBE_ERR_XPCS_POWER_UP_FAILED;
+		goto out;
+	}
+
+	wr32m(hw, TXGBE_MAC_TX_CFG, TXGBE_MAC_TX_CFG_TE, ~TXGBE_MAC_TX_CFG_TE);
+
+	/* 2. Disable xpcs AN-73 */
+	if (!autoneg)
+		txgbe_wr32_epcs(hw, TXGBE_SR_AN_MMD_CTL, 0x0);
+	else
+		txgbe_wr32_epcs(hw, TXGBE_SR_AN_MMD_CTL, 0x3000);
+
+	/* Disable PHY MPLLA for eth mode change(after ECO) */
+	txgbe_wr32_ephy(hw, 0x4, 0x250A);
+	TXGBE_WRITE_FLUSH(hw);
+	msleep(20);
+
+	/* Set the eth change_mode bit first in mis_rst register
+	 * for corresponding LAN port
+	 */
+	if (hw->bus.lan_id == 0)
+		wr32(hw, TXGBE_MIS_RST,
+		     TXGBE_MIS_RST_LAN0_CHG_ETH_MODE);
+	else
+		wr32(hw, TXGBE_MIS_RST,
+		     TXGBE_MIS_RST_LAN1_CHG_ETH_MODE);
+
+	/* Set SR PCS Control2 Register Bits[1:0] = 2'b01
+	 * PCS_TYPE_SEL: non KR
+	 */
+	txgbe_wr32_epcs(hw, TXGBE_SR_PCS_CTL2,
+			TXGBE_SR_PCS_CTL2_PCS_TYPE_SEL_X);
+	/* Set SR PMA MMD Control1 Register Bit[13] = 1'b1  SS13: 10G speed */
+	txgbe_wr32_epcs(hw, TXGBE_SR_PMA_MMD_CTL1,
+			TXGBE_SR_PMA_MMD_CTL1_SPEED_SEL_10G);
+
+	value = (0xf5f0 & ~0x7F0) | (0x5 << 8) | (0x7 << 5) | 0xF0;
+	txgbe_wr32_epcs(hw, TXGBE_PHY_TX_GENCTRL1, value);
+
+	if ((hw->subsystem_device_id & 0xF0) == TXGBE_ID_MAC_XAUI)
+		txgbe_wr32_epcs(hw, TXGBE_PHY_MISC_CTL0, 0xCF00);
+	else
+		txgbe_wr32_epcs(hw, TXGBE_PHY_MISC_CTL0, 0x4F00);
+
+	for (i = 0; i < 4; i++) {
+		if (i == 0)
+			value = (0x45 & ~0xFFFF) | (0x7 << 12) |
+				(0x7 << 8) | 0x6;
+		else
+			value = (0xff06 & ~0xFFFF) | (0x7 << 12) |
+				(0x7 << 8) | 0x6;
+		txgbe_wr32_epcs(hw, TXGBE_PHY_RX_EQ_CTL0 + i, value);
+	}
+
+	value = 0x0 & ~0x7777;
+	txgbe_wr32_epcs(hw, TXGBE_PHY_RX_EQ_ATT_LVL0, value);
+
+	txgbe_wr32_epcs(hw, TXGBE_PHY_DFE_TAP_CTL0, 0x0);
+
+	value = (0x6db & ~0xFFF) | (0x1 << 9) | (0x1 << 6) | (0x1 << 3) | 0x1;
+	txgbe_wr32_epcs(hw, TXGBE_PHY_RX_GEN_CTL3, value);
+
+	/* Set VR XS, PMA, or MII Synopsys Enterprise Gen5 12G PHY MPLLA */
+	/* Control 0 Register Bit[7:0] = 8'd40  MPLLA_MULTIPLIER */
+	txgbe_wr32_epcs(hw, TXGBE_PHY_MPLLA_CTL0,
+			TXGBE_PHY_MPLLA_CTL0_MULTIPLIER_OTHER);
+	/* Set VR XS, PMA or MII Synopsys Enterprise Gen5 12G PHY MPLLA */
+	/* Control 3 Register Bit[10:0] = 11'd86  MPLLA_BANDWIDTH */
+	txgbe_wr32_epcs(hw, TXGBE_PHY_MPLLA_CTL3,
+			TXGBE_PHY_MPLLA_CTL3_MULTIPLIER_BW_OTHER);
+
+	/* Set VR XS, PMA, or MII Synopsys Enterprise Gen5 12G PHY VCO */
+	/* Calibration Load 0 Register  Bit[12:0] = 13'd1360 VCO_LD_VAL_0 */
+	txgbe_wr32_epcs(hw, TXGBE_PHY_VCO_CAL_LD0, TXGBE_PHY_VCO_CAL_LD0_OTHER);
+
+	/* Set VR XS, PMA, or MII Synopsys Enterprise Gen5 12G PHY VCO */
+	/* Calibration Load 1 Register  Bit[12:0] = 13'd1360 VCO_LD_VAL_1 */
+	txgbe_wr32_epcs(hw, TXGBE_PHY_VCO_CAL_LD1, TXGBE_PHY_VCO_CAL_LD0_OTHER);
+
+	/* Set VR XS, PMA, or MII Synopsys Enterprise Gen5 12G PHY VCO */
+	/* Calibration Load 2 Register  Bit[12:0] = 13'd1360 VCO_LD_VAL_2 */
+	txgbe_wr32_epcs(hw, TXGBE_PHY_VCO_CAL_LD2, TXGBE_PHY_VCO_CAL_LD0_OTHER);
+
+	/* Set VR XS, PMA, or MII Synopsys Enterprise Gen5 12G PHY VCO */
+	/* Calibration Load 3 Register  Bit[12:0] = 13'd1360 VCO_LD_VAL_3 */
+	txgbe_wr32_epcs(hw, TXGBE_PHY_VCO_CAL_LD3, TXGBE_PHY_VCO_CAL_LD0_OTHER);
+
+	/* Set VR XS, PMA, or MII Synopsys Enterprise Gen5 12G PHY VCO */
+	/* Calibration Reference 0 Register Bit[5:0] = 6'd34 VCO_REF_LD_0/1 */
+	txgbe_wr32_epcs(hw, TXGBE_PHY_VCO_CAL_REF0, 0x2222);
+
+	/* Set VR XS, PMA, or MII Synopsys Enterprise Gen5 12G PHY VCO */
+	/* Calibration Reference 1 Register Bit[5:0] = 6'd34 VCO_REF_LD_2/3 */
+	txgbe_wr32_epcs(hw, TXGBE_PHY_VCO_CAL_REF1, 0x2222);
+
+	/* Set VR XS, PMA, or MII Synopsys Enterprise Gen5 12G PHY AFE-DFE */
+	/* Enable Register Bit[7:0] = 8'd0  AFE_EN_0/3_1, DFE_EN_0/3_1 */
+	txgbe_wr32_epcs(hw, TXGBE_PHY_AFE_DFE_ENABLE, 0x0);
+
+	/* Set  VR XS, PMA, or MII Synopsys Enterprise Gen5 12G PHY Rx */
+	/* Equalization Control 4 Register Bit[3:0] = 4'd0 CONT_ADAPT_0/3_1 */
+	txgbe_wr32_epcs(hw, TXGBE_PHY_RX_EQ_CTL, 0x00F0);
+
+	/* Set VR XS, PMA, or MII Synopsys Enterprise Gen5 12G PHY Tx Rate */
+	/* Control Register Bit[14:12], Bit[10:8], Bit[6:4], Bit[2:0],
+	 * all rates to 3'b010  TX0/1/2/3_RATE
+	 */
+	txgbe_wr32_epcs(hw, TXGBE_PHY_TX_RATE_CTL, 0x2222);
+
+	/* Set VR XS, PMA, or MII Synopsys Enterprise Gen5 12G PHY Rx Rate */
+	/* Control Register Bit[13:12], Bit[9:8], Bit[5:4], Bit[1:0],
+	 * all rates to 2'b10  RX0/1/2/3_RATE
+	 */
+	txgbe_wr32_epcs(hw, TXGBE_PHY_RX_RATE_CTL, 0x2222);
+
+	/* Set VR XS, PMA, or MII Synopsys Enterprise Gen5 12G PHY Tx General */
+	/* Control 2 Register Bit[15:8] = 2'b01  TX0/1/2/3_WIDTH: 10bits */
+	txgbe_wr32_epcs(hw, TXGBE_PHY_TX_GEN_CTL2, 0x5500);
+
+	/* Set VR XS, PMA, or MII Synopsys Enterprise Gen5 12G PHY Rx General */
+	/* Control 2 Register Bit[15:8] = 2'b01  RX0/1/2/3_WIDTH: 10bits */
+	txgbe_wr32_epcs(hw, TXGBE_PHY_RX_GEN_CTL2, 0x5500);
+
+	/* Set VR XS, PMA, or MII Synopsys Enterprise Gen5 12G PHY MPLLA Control
+	 * 2 Register Bit[10:8] = 3'b010
+	 * MPLLA_DIV16P5_CLK_EN=0, MPLLA_DIV10_CLK_EN=1, MPLLA_DIV8_CLK_EN=0
+	 */
+	txgbe_wr32_epcs(hw, TXGBE_PHY_MPLLA_CTL2,
+			TXGBE_PHY_MPLLA_CTL2_DIV_CLK_EN_10);
+
+	txgbe_wr32_epcs(hw, 0x1f0000, 0x0);
+	txgbe_wr32_epcs(hw, 0x1f8001, 0x0);
+	txgbe_wr32_epcs(hw, TXGBE_SR_MII_MMD_DIGI_CTL, 0x0);
+
+	/* 10. Initialize the mode by setting VR XS or PCS MMD Digital Control1
+	 * Register Bit[15](VR_RST)
+	 */
+	txgbe_wr32_epcs(hw, TXGBE_VR_XS_OR_PCS_MMD_DIGI_CTL1, 0xA000);
+
+	/* wait phy initialization done */
+	for (i = 0; i < TXGBE_PHY_INIT_DONE_POLLING_TIME; i++) {
+		if ((txgbe_rd32_epcs(hw, TXGBE_VR_XS_OR_PCS_MMD_DIGI_CTL1) &
+			TXGBE_VR_XS_OR_PCS_MMD_DIGI_CTL1_VR_RST) == 0)
+			break;
+		msleep(100);
+	}
+
+	/* if success, set link status */
+	hw->link_status = TXGBE_LINK_STATUS_KX4;
+
+	if (i == TXGBE_PHY_INIT_DONE_POLLING_TIME) {
+		status = TXGBE_ERR_PHY_INIT_NOT_DONE;
+		goto out;
+	}
+
+out:
+	return status;
+}
+
+s32 txgbe_set_link_to_kx(struct txgbe_hw *hw, u32 speed, bool autoneg)
+{
+	struct txgbe_adapter *adapter = container_of(hw, struct txgbe_adapter, hw);
+	s32 status = 0;
+	u32 wdata = 0;
+	u32 value;
+	u32 i;
+
+	/* check link status, if already set, skip setting it again */
+	if (hw->link_status == TXGBE_LINK_STATUS_KX)
+		goto out;
+
+	dev_info(&adapter->pdev->dev, "It is set to kx. speed =0x%x\n", speed);
+
+	/* 1. Wait xpcs power-up good */
+	for (i = 0; i < TXGBE_XPCS_POWER_GOOD_MAX_POLLING_TIME; i++) {
+		if ((txgbe_rd32_epcs(hw, TXGBE_VR_XS_OR_PCS_MMD_DIGI_STATUS) &
+			TXGBE_VR_XS_OR_PCS_MMD_DIGI_STATUS_PSEQ_MASK) ==
+			TXGBE_VR_XS_OR_PCS_MMD_DIGI_STATUS_PSEQ_POWER_GOOD)
+			break;
+		msleep(20);
+	}
+	if (i == TXGBE_XPCS_POWER_GOOD_MAX_POLLING_TIME) {
+		status = TXGBE_ERR_XPCS_POWER_UP_FAILED;
+		goto out;
+	}
+
+	wr32m(hw, TXGBE_MAC_TX_CFG, TXGBE_MAC_TX_CFG_TE, ~TXGBE_MAC_TX_CFG_TE);
+
+	/* 2. Disable xpcs AN-73 */
+	if (!autoneg)
+		txgbe_wr32_epcs(hw, TXGBE_SR_AN_MMD_CTL, 0x0);
+	else
+		txgbe_wr32_epcs(hw, TXGBE_SR_AN_MMD_CTL, 0x3000);
+
+	/* Disable PHY MPLLA for eth mode change(after ECO) */
+	txgbe_wr32_ephy(hw, 0x4, 0x240A);
+	TXGBE_WRITE_FLUSH(hw);
+	msleep(20);
+
+	/* Set the eth change_mode bit first in mis_rst register */
+	/* for corresponding LAN port */
+	if (hw->bus.lan_id == 0)
+		wr32(hw, TXGBE_MIS_RST,
+		     TXGBE_MIS_RST_LAN0_CHG_ETH_MODE);
+	else
+		wr32(hw, TXGBE_MIS_RST,
+		     TXGBE_MIS_RST_LAN1_CHG_ETH_MODE);
+
+	/* Set SR PCS Control2 Register Bits[1:0] = 2'b01
+	 * PCS_TYPE_SEL: non KR
+	 */
+	txgbe_wr32_epcs(hw, TXGBE_SR_PCS_CTL2,
+			TXGBE_SR_PCS_CTL2_PCS_TYPE_SEL_X);
+
+	/* Set SR PMA MMD Control1 Register Bit[13] = 1'b0 SS13: 1G speed */
+	txgbe_wr32_epcs(hw, TXGBE_SR_PMA_MMD_CTL1,
+			TXGBE_SR_PMA_MMD_CTL1_SPEED_SEL_1G);
+
+	/* Set SR MII MMD Control Register to corresponding speed: {Bit[6],
+	 * Bit[13]}=[2'b00,2'b01,2'b10]->[10M,100M,1G]
+	 */
+	if (speed == TXGBE_LINK_SPEED_100_FULL)
+		wdata = 0x2100;
+	else if (speed == TXGBE_LINK_SPEED_1GB_FULL)
+		wdata = 0x0140;
+	else if (speed == TXGBE_LINK_SPEED_10_FULL)
+		wdata = 0x0100;
+	txgbe_wr32_epcs(hw, TXGBE_SR_MII_MMD_CTL, wdata);
+
+	value = (0xf5f0 & ~0x710) | (0x5 << 8) | 0x10;
+	txgbe_wr32_epcs(hw, TXGBE_PHY_TX_GENCTRL1, value);
+
+	txgbe_wr32_epcs(hw, TXGBE_PHY_MISC_CTL0, 0xCF00);
+
+	for (i = 0; i < 4; i++) {
+		if (i)
+			value = 0xff06;
+		else
+			value = (0x45 & ~0xFFFF) | (0x7 << 12) |
+				(0x7 << 8) | 0x6;
+
+		txgbe_wr32_epcs(hw, TXGBE_PHY_RX_EQ_CTL0 + i, value);
+	}
+
+	value = 0x0 & ~0x7;
+	txgbe_wr32_epcs(hw, TXGBE_PHY_RX_EQ_ATT_LVL0, value);
+
+	txgbe_wr32_epcs(hw, TXGBE_PHY_DFE_TAP_CTL0, 0x0);
+
+	value = (0x6db & ~0x7) | 0x4;
+	txgbe_wr32_epcs(hw, TXGBE_PHY_RX_GEN_CTL3, value);
+
+	/* Set VR XS, PMA, or MII Synopsys Enterprise Gen5 12G PHY MPLLA Control
+	 * 0 Register Bit[7:0] = 8'd32  MPLLA_MULTIPLIER
+	 */
+	txgbe_wr32_epcs(hw, TXGBE_PHY_MPLLA_CTL0,
+			TXGBE_PHY_MPLLA_CTL0_MULTIPLIER_1GBASEX_KX);
+
+	/* Set VR XS, PMA or MII Synopsys Enterprise Gen5 12G PHY MPLLA Control
+	 * 3 Register Bit[10:0] = 11'd70  MPLLA_BANDWIDTH
+	 */
+	txgbe_wr32_epcs(hw, TXGBE_PHY_MPLLA_CTL3,
+			TXGBE_PHY_MPLLA_CTL3_MULTIPLIER_BW_1GBASEX_KX);
+
+	/* Set VR XS, PMA, or MII Synopsys Enterprise Gen5 12G PHY VCO
+	 * Calibration Load 0 Register  Bit[12:0] = 13'd1344  VCO_LD_VAL_0
+	 */
+	txgbe_wr32_epcs(hw, TXGBE_PHY_VCO_CAL_LD0,
+			TXGBE_PHY_VCO_CAL_LD0_1GBASEX_KX);
+
+	txgbe_wr32_epcs(hw, TXGBE_PHY_VCO_CAL_LD1, 0x549);
+	txgbe_wr32_epcs(hw, TXGBE_PHY_VCO_CAL_LD2, 0x549);
+	txgbe_wr32_epcs(hw, TXGBE_PHY_VCO_CAL_LD3, 0x549);
+
+	/* Set VR XS, PMA, or MII Synopsys Enterprise Gen5 12G PHY VCO
+	 * Calibration Reference 0 Register Bit[5:0] = 6'd42  VCO_REF_LD_0
+	 */
+	txgbe_wr32_epcs(hw, TXGBE_PHY_VCO_CAL_REF0,
+			TXGBE_PHY_VCO_CAL_REF0_LD0_1GBASEX_KX);
+
+	txgbe_wr32_epcs(hw, TXGBE_PHY_VCO_CAL_REF1, 0x2929);
+
+	/* Set VR XS, PMA, or MII Synopsys Enterprise Gen5 12G PHY AFE-DFE
+	 * Enable Register Bit[4], Bit[0] = 1'b0  AFE_EN_0, DFE_EN_0
+	 */
+	txgbe_wr32_epcs(hw, TXGBE_PHY_AFE_DFE_ENABLE, 0x0);
+
+	/* Set VR XS, PMA, or MII Synopsys Enterprise Gen5 12G PHY Rx
+	 * Equalization Control 4 Register Bit[0] = 1'b0  CONT_ADAPT_0
+	 */
+	txgbe_wr32_epcs(hw, TXGBE_PHY_RX_EQ_CTL, 0x0010);
+
+	/* Set VR XS, PMA, or MII Synopsys Enterprise Gen5 12G PHY Tx Rate
+	 * Control Register Bit[2:0] = 3'b011  TX0_RATE
+	 */
+	txgbe_wr32_epcs(hw, TXGBE_PHY_TX_RATE_CTL,
+			TXGBE_PHY_TX_RATE_CTL_TX0_RATE_1GBASEX_KX);
+
+	/* Set VR XS, PMA, or MII Synopsys Enterprise Gen5 12G PHY Rx Rate
+	 * Control Register Bit[2:0] = 3'b011 RX0_RATE
+	 */
+	txgbe_wr32_epcs(hw, TXGBE_PHY_RX_RATE_CTL,
+			TXGBE_PHY_RX_RATE_CTL_RX0_RATE_1GBASEX_KX);
+
+	/* Set VR XS, PMA, or MII Synopsys Enterprise Gen5 12G PHY Tx General
+	 * Control 2 Register Bit[9:8] = 2'b01  TX0_WIDTH: 10bits
+	 */
+	txgbe_wr32_epcs(hw, TXGBE_PHY_TX_GEN_CTL2,
+			TXGBE_PHY_TX_GEN_CTL2_TX0_WIDTH_OTHER);
+
+	/* Set VR XS, PMA, or MII Synopsys Enterprise Gen5 12G PHY Rx General
+	 * Control 2 Register Bit[9:8] = 2'b01  RX0_WIDTH: 10bits
+	 */
+	txgbe_wr32_epcs(hw, TXGBE_PHY_RX_GEN_CTL2,
+			TXGBE_PHY_RX_GEN_CTL2_RX0_WIDTH_OTHER);
+
+	/* Set VR XS, PMA, or MII Synopsys Enterprise Gen5 12G PHY MPLLA Control
+	 * 2 Register Bit[10:8] = 3'b010	MPLLA_DIV16P5_CLK_EN=0,
+	 * MPLLA_DIV10_CLK_EN=1, MPLLA_DIV8_CLK_EN=0
+	 */
+	txgbe_wr32_epcs(hw, TXGBE_PHY_MPLLA_CTL2,
+			TXGBE_PHY_MPLLA_CTL2_DIV_CLK_EN_10);
+
+	/* VR MII MMD AN Control Register Bit[8] = 1'b1 MII_CTRL */
+	/* Set to 8bit MII (required in 10M/100M SGMII) */
+	txgbe_wr32_epcs(hw, TXGBE_SR_MII_MMD_AN_CTL, 0x0100);
+
+	/* 10. Initialize the mode by setting VR XS or PCS MMD Digital Control1
+	 * Register Bit[15](VR_RST)
+	 */
+	txgbe_wr32_epcs(hw, TXGBE_VR_XS_OR_PCS_MMD_DIGI_CTL1, 0xA000);
+	/* wait phy initialization done */
+	for (i = 0; i < TXGBE_PHY_INIT_DONE_POLLING_TIME; i++) {
+		if ((txgbe_rd32_epcs(hw, TXGBE_VR_XS_OR_PCS_MMD_DIGI_CTL1) &
+			TXGBE_VR_XS_OR_PCS_MMD_DIGI_CTL1_VR_RST) == 0)
+			break;
+		msleep(100);
+	}
+
+	/* if success, set link status */
+	hw->link_status = TXGBE_LINK_STATUS_KX;
+
+	if (i == TXGBE_PHY_INIT_DONE_POLLING_TIME) {
+		status = TXGBE_ERR_PHY_INIT_NOT_DONE;
+		goto out;
+	}
+
+	dev_info(&adapter->pdev->dev, "Set KX TX_EQ MAIN:24 PRE:4 POST:16\n");
+	/* 5. Set VR_XS_PMA_Gen5_12G_TX_EQ_CTRL0 Register
+	 * Bit[13:8](TX_EQ_MAIN) = 6'd30, Bit[5:0](TX_EQ_PRE) = 6'd4
+	 */
+	value = txgbe_rd32_epcs(hw, TXGBE_PHY_TX_EQ_CTL0);
+	value = (value & ~0x3F3F) | (24 << 8) | 4;
+	txgbe_wr32_epcs(hw, TXGBE_PHY_TX_EQ_CTL0, value);
+	/* 6. Set VR_XS_PMA_Gen5_12G_TX_EQ_CTRL1 Register
+	 * Bit[6](TX_EQ_OVR_RIDE) = 1'b1, Bit[5:0](TX_EQ_POST) = 6'd36
+	 */
+	value = txgbe_rd32_epcs(hw, TXGBE_PHY_TX_EQ_CTL1);
+	value = (value & ~0x7F) | 16 | (1 << 6);
+	txgbe_wr32_epcs(hw, TXGBE_PHY_TX_EQ_CTL1, value);
+
+out:
+	return status;
+}
+
+static s32 txgbe_set_link_to_sfi(struct txgbe_hw *hw, u32 speed)
+{
+	u32 i;
+	s32 status = 0;
+	u32 value = 0;
+
+	/* Set the module link speed */
+	TCALL(hw, mac.ops.set_rate_select_speed, speed);
+
+	/* 1. Wait xpcs power-up good */
+	for (i = 0; i < TXGBE_XPCS_POWER_GOOD_MAX_POLLING_TIME; i++) {
+		if ((txgbe_rd32_epcs(hw, TXGBE_VR_XS_OR_PCS_MMD_DIGI_STATUS) &
+			TXGBE_VR_XS_OR_PCS_MMD_DIGI_STATUS_PSEQ_MASK) ==
+			TXGBE_VR_XS_OR_PCS_MMD_DIGI_STATUS_PSEQ_POWER_GOOD)
+			break;
+		msleep(20);
+	}
+	if (i == TXGBE_XPCS_POWER_GOOD_MAX_POLLING_TIME) {
+		status = TXGBE_ERR_XPCS_POWER_UP_FAILED;
+		goto out;
+	}
+
+	wr32m(hw, TXGBE_MAC_TX_CFG, TXGBE_MAC_TX_CFG_TE, ~TXGBE_MAC_TX_CFG_TE);
+
+	/* 2. Disable xpcs AN-73 */
+	txgbe_wr32_epcs(hw, TXGBE_SR_AN_MMD_CTL, 0x0);
+
+	/* Disable PHY MPLLA for eth mode change(after ECO) */
+	txgbe_wr32_ephy(hw, 0x4, 0x243A);
+	TXGBE_WRITE_FLUSH(hw);
+	msleep(20);
+	/* Set the eth change_mode bit first in mis_rst register
+	 * for corresponding LAN port
+	 */
+	if (hw->bus.lan_id == 0)
+		wr32(hw, TXGBE_MIS_RST,
+		     TXGBE_MIS_RST_LAN0_CHG_ETH_MODE);
+	else
+		wr32(hw, TXGBE_MIS_RST,
+		     TXGBE_MIS_RST_LAN1_CHG_ETH_MODE);
+
+	if (speed == TXGBE_LINK_SPEED_10GB_FULL) {
+		/* Set SR PCS Control2 Register Bits[1:0] = 2'b00
+		 * PCS_TYPE_SEL: KR
+		 */
+		txgbe_wr32_epcs(hw, TXGBE_SR_PCS_CTL2, 0);
+		value = txgbe_rd32_epcs(hw, TXGBE_SR_PMA_MMD_CTL1);
+		value = value | 0x2000;
+		txgbe_wr32_epcs(hw, TXGBE_SR_PMA_MMD_CTL1, value);
+		/* Set VR_XS_PMA_Gen5_12G_MPLLA_CTRL0 Register Bit[7:0] = 8'd33
+		 * MPLLA_MULTIPLIER
+		 */
+		txgbe_wr32_epcs(hw, TXGBE_PHY_MPLLA_CTL0, 0x0021);
+		/* 3. Set VR_XS_PMA_Gen5_12G_MPLLA_CTRL3 Register
+		 * Bit[10:0](MPLLA_BANDWIDTH) = 11'd0
+		 */
+		txgbe_wr32_epcs(hw, TXGBE_PHY_MPLLA_CTL3, 0);
+		value = txgbe_rd32_epcs(hw, TXGBE_PHY_TX_GENCTRL1);
+		value = (value & ~0x700) | 0x500;
+		txgbe_wr32_epcs(hw, TXGBE_PHY_TX_GENCTRL1, value);
+		/* 4.Set VR_XS_PMA_Gen5_12G_MISC_CTRL0 Register
+		 * Bit[12:8](RX_VREF_CTRL) = 5'hF
+		 */
+		txgbe_wr32_epcs(hw, TXGBE_PHY_MISC_CTL0, 0xCF00);
+		/* Set VR_XS_PMA_Gen5_12G_VCO_CAL_LD0 Register
+		 * Bit[12:0] = 13'd1353 VCO_LD_VAL_0
+		 */
+		txgbe_wr32_epcs(hw, TXGBE_PHY_VCO_CAL_LD0, 0x0549);
+		/* Set VR_XS_PMA_Gen5_12G_VCO_CAL_REF0 Register Bit[5:0] = 6'd41
+		 * VCO_REF_LD_0
+		 */
+		txgbe_wr32_epcs(hw, TXGBE_PHY_VCO_CAL_REF0, 0x0029);
+		/* Set VR_XS_PMA_Gen5_12G_TX_RATE_CTRL Register
+		 * Bit[2:0] = 3'b000 TX0_RATE
+		 */
+		txgbe_wr32_epcs(hw, TXGBE_PHY_TX_RATE_CTL, 0);
+		/* Set VR_XS_PMA_Gen5_12G_RX_RATE_CTRL Register
+		 * Bit[2:0] = 3'b000 RX0_RATE
+		 */
+		txgbe_wr32_epcs(hw, TXGBE_PHY_RX_RATE_CTL, 0);
+		/* Set VR_XS_PMA_Gen5_12G_TX_GENCTRL2 Register Bit[9:8] = 2'b11
+		 * TX0_WIDTH: 20bits
+		 */
+		txgbe_wr32_epcs(hw, TXGBE_PHY_TX_GEN_CTL2, 0x0300);
+		/* Set VR_XS_PMA_Gen5_12G_RX_GENCTRL2 Register Bit[9:8] = 2'b11
+		 * RX0_WIDTH: 20bits
+		 */
+		txgbe_wr32_epcs(hw, TXGBE_PHY_RX_GEN_CTL2, 0x0300);
+		/* Set VR_XS_PMA_Gen5_12G_MPLLA_CTRL2 Register
+		 * Bit[10:8] = 3'b110 MPLLA_DIV16P5_CLK_EN=1,
+		 * MPLLA_DIV10_CLK_EN=1, MPLLA_DIV8_CLK_EN=0
+		 */
+		txgbe_wr32_epcs(hw, TXGBE_PHY_MPLLA_CTL2, 0x0600);
+
+		if (hw->phy.sfp_type == txgbe_sfp_type_da_cu_core0 ||
+		    hw->phy.sfp_type == txgbe_sfp_type_da_cu_core1) {
+			/* 7. Set VR_XS_PMA_Gen5_12G_RX_EQ_CTRL0 Register
+			 * Bit[15:8](VGA1/2_GAIN_0) = 8'h77, Bit[7:5]
+			 * (CTLE_POLE_0) = 3'h2, Bit[4:0](CTLE_BOOST_0) = 4'hF
+			 */
+			txgbe_wr32_epcs(hw, TXGBE_PHY_RX_EQ_CTL0, 0x774F);
+		} else {
+			/* 7. Set VR_XS_PMA_Gen5_12G_RX_EQ_CTRL0 Register
+			 * Bit[15:8] (VGA1/2_GAIN_0) = 8'h00,
+			 * Bit[7:5](CTLE_POLE_0) = 3'h2,
+			 * Bit[4:0](CTLE_BOOST_0) = 4'hA
+			 */
+			value = txgbe_rd32_epcs(hw, TXGBE_PHY_RX_EQ_CTL0);
+			value = (value & ~0xFFFF) | (2 << 5) | 0x05;
+			txgbe_wr32_epcs(hw, TXGBE_PHY_RX_EQ_CTL0, value);
+		}
+		value = txgbe_rd32_epcs(hw, TXGBE_PHY_RX_EQ_ATT_LVL0);
+		value &= ~0x7;
+		txgbe_wr32_epcs(hw, TXGBE_PHY_RX_EQ_ATT_LVL0, value);
+
+		if (hw->phy.sfp_type == txgbe_sfp_type_da_cu_core0 ||
+		    hw->phy.sfp_type == txgbe_sfp_type_da_cu_core1) {
+			/* 8. Set VR_XS_PMA_Gen5_12G_DFE_TAP_CTRL0 Register
+			 * Bit[7:0](DFE_TAP1_0) = 8'd20
+			 */
+			txgbe_wr32_epcs(hw, TXGBE_PHY_DFE_TAP_CTL0, 0x0014);
+			value = txgbe_rd32_epcs(hw, TXGBE_PHY_AFE_DFE_ENABLE);
+			value = (value & ~0x11) | 0x11;
+			txgbe_wr32_epcs(hw, TXGBE_PHY_AFE_DFE_ENABLE, value);
+		} else {
+			/* 8. Set VR_XS_PMA_Gen5_12G_DFE_TAP_CTRL0 Register
+			 * Bit[7:0](DFE_TAP1_0) = 8'd20
+			 */
+			txgbe_wr32_epcs(hw, TXGBE_PHY_DFE_TAP_CTL0, 0xBE);
+			/* 9. Set VR_MII_Gen5_12G_AFE_DFE_EN_CTRL Register
+			 * Bit[4](DFE_EN_0) = 1'b0, Bit[0](AFE_EN_0) = 1'b0
+			 */
+			value = txgbe_rd32_epcs(hw, TXGBE_PHY_AFE_DFE_ENABLE);
+			value = (value & ~0x11) | 0x0;
+			txgbe_wr32_epcs(hw, TXGBE_PHY_AFE_DFE_ENABLE, value);
+		}
+		value = txgbe_rd32_epcs(hw, TXGBE_PHY_RX_EQ_CTL);
+		value = value & ~0x1;
+		txgbe_wr32_epcs(hw, TXGBE_PHY_RX_EQ_CTL, value);
+	} else {
+		/* Set SR PCS Control2 Register Bits[1:0] = 2'b00
+		 * PCS_TYPE_SEL: KR
+		 */
+		txgbe_wr32_epcs(hw, TXGBE_SR_PCS_CTL2, 0x1);
+		/* Set SR PMA MMD Control1 Register Bit[13] = 1'b0
+		 * SS13: 1G speed
+		 */
+		txgbe_wr32_epcs(hw, TXGBE_SR_PMA_MMD_CTL1, 0x0000);
+		/* Set SR MII MMD Control Register to corresponding speed: */
+		txgbe_wr32_epcs(hw, TXGBE_SR_MII_MMD_CTL, 0x0140);
+
+		value = txgbe_rd32_epcs(hw, TXGBE_PHY_TX_GENCTRL1);
+		value = (value & ~0x710) | 0x500;
+		txgbe_wr32_epcs(hw, TXGBE_PHY_TX_GENCTRL1, value);
+		/* 4. Set VR_XS_PMA_Gen5_12G_MISC_CTRL0 Register
+		 * Bit[12:8](RX_VREF_CTRL) = 5'hF
+		 */
+		txgbe_wr32_epcs(hw, TXGBE_PHY_MISC_CTL0, 0xCF00);
+		/* 5. Set VR_XS_PMA_Gen5_12G_TX_EQ_CTRL0 Register
+		 * Bit[13:8](TX_EQ_MAIN) = 6'd30, Bit[5:0](TX_EQ_PRE) = 6'd4
+		 */
+		value = txgbe_rd32_epcs(hw, TXGBE_PHY_TX_EQ_CTL0);
+		value = (value & ~0x3F3F) | (24 << 8) | 4;
+		txgbe_wr32_epcs(hw, TXGBE_PHY_TX_EQ_CTL0, value);
+		/* 6. Set VR_XS_PMA_Gen5_12G_TX_EQ_CTRL1 Register
+		 * Bit[6](TX_EQ_OVR_RIDE) = 1'b1, Bit[5:0](TX_EQ_POST) = 6'd36
+		 */
+		value = txgbe_rd32_epcs(hw, TXGBE_PHY_TX_EQ_CTL1);
+		value = (value & ~0x7F) | 16 | (1 << 6);
+		txgbe_wr32_epcs(hw, TXGBE_PHY_TX_EQ_CTL1, value);
+		if (hw->phy.sfp_type == txgbe_sfp_type_da_cu_core0 ||
+		    hw->phy.sfp_type == txgbe_sfp_type_da_cu_core1) {
+			txgbe_wr32_epcs(hw, TXGBE_PHY_RX_EQ_CTL0, 0x774F);
+		} else {
+			/* 7. Set VR_XS_PMA_Gen5_12G_RX_EQ_CTRL0 Register
+			 * Bit[15:8] (VGA1/2_GAIN_0) = 8'h00,
+			 * Bit[7:5](CTLE_POLE_0) = 3'h2,
+			 * Bit[4:0](CTLE_BOOST_0) = 4'hA
+			 */
+			value = txgbe_rd32_epcs(hw, TXGBE_PHY_RX_EQ_CTL0);
+			value = (value & ~0xFFFF) | 0x7706;
+			txgbe_wr32_epcs(hw, TXGBE_PHY_RX_EQ_CTL0, value);
+		}
+		value = txgbe_rd32_epcs(hw, TXGBE_PHY_RX_EQ_ATT_LVL0);
+		value = (value & ~0x7) | 0x0;
+		txgbe_wr32_epcs(hw, TXGBE_PHY_RX_EQ_ATT_LVL0, value);
+		/* 8. Set VR_XS_PMA_Gen5_12G_DFE_TAP_CTRL0 Register
+		 * Bit[7:0](DFE_TAP1_0) = 8'd00
+		 */
+		txgbe_wr32_epcs(hw, TXGBE_PHY_DFE_TAP_CTL0, 0x0);
+		/* Set VR_XS_PMA_Gen5_12G_RX_GENCTRL3 Register
+		 * Bit[2:0] LOS_TRSHLD_0 = 4
+		 */
+		value = txgbe_rd32_epcs(hw, TXGBE_PHY_RX_GEN_CTL3);
+		value = (value & ~0x7) | 0x4;
+		txgbe_wr32_epcs(hw, TXGBE_PHY_RX_GEN_CTL3, value);
+		/* Set VR XS, PMA, or MII Synopsys Enterprise Gen5 12G PHY
+		 * MPLLA Control 0 Register Bit[7:0] = 8'd32  MPLLA_MULTIPLIER
+		 */
+		txgbe_wr32_epcs(hw, TXGBE_PHY_MPLLA_CTL0, 0x0020);
+		/* Set VR XS, PMA or MII Synopsys Enterprise Gen5 12G PHY MPLLA
+		 * Control 3 Register Bit[10:0] = 11'd70  MPLLA_BANDWIDTH
+		 */
+		txgbe_wr32_epcs(hw, TXGBE_PHY_MPLLA_CTL3, 0x0046);
+		/* Set VR XS, PMA, or MII Synopsys Enterprise Gen5 12G PHY VCO
+		 * Calibration Load 0 Register
+		 * Bit[12:0] = 13'd1344  VCO_LD_VAL_0
+		 */
+		txgbe_wr32_epcs(hw, TXGBE_PHY_VCO_CAL_LD0, 0x0540);
+		/* Set VR XS, PMA, or MII Synopsys Enterprise Gen5 12G PHY VCO
+		 * Calibration Reference 0 Register
+		 * Bit[5:0] = 6'd42 VCO_REF_LD_0
+		 */
+		txgbe_wr32_epcs(hw, TXGBE_PHY_VCO_CAL_REF0, 0x002A);
+		/* Set VR XS, PMA, MII Synopsys Enterprise Gen5 12G PHY AFE-DFE
+		 * Enable Register Bit[4], Bit[0] = 1'b0  AFE_EN_0, DFE_EN_0
+		 */
+		txgbe_wr32_epcs(hw, TXGBE_PHY_AFE_DFE_ENABLE, 0x0);
+		/* Set  VR XS, PMA, or MII Synopsys Enterprise Gen5 12G PHY Rx
+		 * Equalization Control 4 Register Bit[0] = 1'b0  CONT_ADAPT_0
+		 */
+		txgbe_wr32_epcs(hw, TXGBE_PHY_RX_EQ_CTL, 0x0010);
+		/* Set VR XS, PMA, MII Synopsys Enterprise Gen5 12G PHY Tx Rate
+		 * Control Register Bit[2:0] = 3'b011  TX0_RATE
+		 */
+		txgbe_wr32_epcs(hw, TXGBE_PHY_TX_RATE_CTL, 0x0003);
+		/* Set VR XS, PMA, MII Synopsys Enterprise Gen5 12G PHY Rx Rate
+		 * Control Register Bit[2:0] = 3'b011
+		 */
+		txgbe_wr32_epcs(hw, TXGBE_PHY_RX_RATE_CTL, 0x0003);
+		/* Set VR XS, PMA, or MII Synopsys Enterprise Gen5 12G PHY
+		 * Tx General Control 2 Register
+		 * Bit[9:8] = 2'b01  TX0_WIDTH: 10bits
+		 */
+		txgbe_wr32_epcs(hw, TXGBE_PHY_TX_GEN_CTL2, 0x0100);
+		/* Set VR XS, PMA, or MII Synopsys Enterprise Gen5 12G PHY
+		 * Rx General Control 2 Register
+		 * Bit[9:8] = 2'b01  RX0_WIDTH: 10bits
+		 */
+		txgbe_wr32_epcs(hw, TXGBE_PHY_RX_GEN_CTL2, 0x0100);
+		/* Set VR XS, PMA, or MII Synopsys Enterprise Gen5 12G PHY MPLLA
+		 * Control 2 Register Bit[10:8] = 3'b010 MPLLA_DIV16P5_CLK_EN=0,
+		 * MPLLA_DIV10_CLK_EN=1, MPLLA_DIV8_CLK_EN=0
+		 */
+		txgbe_wr32_epcs(hw, TXGBE_PHY_MPLLA_CTL2, 0x0200);
+		/* VR MII MMD AN Control Register Bit[8] = 1'b1 MII_CTRL */
+		txgbe_wr32_epcs(hw, TXGBE_SR_MII_MMD_AN_CTL, 0x0100);
+	}
+	/* 10. Initialize the mode by setting VR XS or PCS MMD Digital Control1
+	 * Register Bit[15](VR_RST)
+	 */
+	txgbe_wr32_epcs(hw, TXGBE_VR_XS_OR_PCS_MMD_DIGI_CTL1, 0xA000);
+	/* wait phy initialization done */
+	for (i = 0; i < TXGBE_PHY_INIT_DONE_POLLING_TIME; i++) {
+		if ((txgbe_rd32_epcs(hw, TXGBE_VR_XS_OR_PCS_MMD_DIGI_CTL1) &
+			TXGBE_VR_XS_OR_PCS_MMD_DIGI_CTL1_VR_RST) == 0)
+			break;
+		msleep(100);
+	}
+	if (i == TXGBE_PHY_INIT_DONE_POLLING_TIME) {
+		status = TXGBE_ERR_PHY_INIT_NOT_DONE;
+		goto out;
+	}
+
+out:
+	return status;
+}
+
+/**
+ *  txgbe_setup_mac_link - Set MAC link speed
+ *  @hw: pointer to hardware structure
+ *  @speed: new link speed
+ *  @autoneg_wait_to_complete: true when waiting for completion is needed
+ *
+ *  Set the link speed.
+ **/
+s32 txgbe_setup_mac_link(struct txgbe_hw *hw,
+			 u32 speed,
+			 bool __maybe_unused autoneg_wait_to_complete)
+{
+	u32 link_capabilities = TXGBE_LINK_SPEED_UNKNOWN;
+	u32 link_speed = TXGBE_LINK_SPEED_UNKNOWN;
+	u16 sub_dev_id = hw->subsystem_device_id;
+	bool autoneg = false;
+	bool link_up = false;
+	s32 status = 0;
+
+	/* Check to see if speed passed in is supported. */
+	status = TCALL(hw, mac.ops.get_link_capabilities,
+		       &link_capabilities, &autoneg);
+	if (status)
+		goto out;
+
+	speed &= link_capabilities;
+
+	if (speed == TXGBE_LINK_SPEED_UNKNOWN) {
+		status = TXGBE_ERR_LINK_SETUP;
+		goto out;
+	}
+
+	if (!(((sub_dev_id & TXGBE_DEV_MASK) == TXGBE_ID_KR_KX_KX4) ||
+	      ((sub_dev_id & TXGBE_DEV_MASK) == TXGBE_ID_MAC_XAUI) ||
+	      ((sub_dev_id & TXGBE_DEV_MASK) == TXGBE_ID_MAC_SGMII))) {
+		status = TCALL(hw, mac.ops.check_link,
+			       &link_speed, &link_up, false);
+		if (status != 0)
+			goto out;
+		if (link_speed == speed && link_up)
+			goto out;
+	}
+
+	if ((hw->subsystem_device_id & TXGBE_DEV_MASK) == TXGBE_ID_KR_KX_KX4) {
+		if (!autoneg) {
+			switch (hw->phy.link_mode) {
+			case TXGBE_PHYSICAL_LAYER_10GBASE_KR:
+				txgbe_set_link_to_kr(hw, autoneg);
+				break;
+			case TXGBE_PHYSICAL_LAYER_10GBASE_KX4:
+				txgbe_set_link_to_kx4(hw, autoneg);
+				break;
+			case TXGBE_PHYSICAL_LAYER_1000BASE_KX:
+				txgbe_set_link_to_kx(hw, speed, autoneg);
+				break;
+			default:
+				status = TXGBE_ERR_PHY;
+				goto out;
+			}
+		} else {
+			txgbe_set_link_to_kr(hw, autoneg);
+		}
+	} else if ((hw->subsystem_device_id & TXGBE_DEV_MASK) == TXGBE_ID_XAUI ||
+		   (hw->subsystem_device_id & TXGBE_DEV_MASK) == TXGBE_ID_MAC_XAUI ||
+		   (hw->subsystem_device_id & TXGBE_DEV_MASK) == TXGBE_ID_SGMII ||
+		   (hw->subsystem_device_id & TXGBE_DEV_MASK) == TXGBE_ID_MAC_SGMII) {
+		if (speed == TXGBE_LINK_SPEED_10GB_FULL) {
+			txgbe_set_link_to_kx4(hw, 0);
+		} else {
+			txgbe_set_link_to_kx(hw, speed, 0);
+			txgbe_set_sgmii_an37_ability(hw);
+		}
+	} else if (hw->phy.media_type == txgbe_media_type_fiber) {
+		txgbe_set_link_to_sfi(hw, speed);
+		if (speed == TXGBE_LINK_SPEED_1GB_FULL)
+			txgbe_set_sgmii_an37_ability(hw);
+	}
+
+out:
+	return status;
+}
+
 int txgbe_reset_misc(struct txgbe_hw *hw)
 {
+	u32 value;
 	int i;
 
 	txgbe_init_i2c(hw);
 
+	value = txgbe_rd32_epcs(hw, TXGBE_SR_PCS_CTL2);
+	if ((value & 0x3) != TXGBE_SR_PCS_CTL2_PCS_TYPE_SEL_X)
+		hw->link_status = TXGBE_LINK_STATUS_NONE;
+
 	/* receive packets that size > 2048 */
 	wr32m(hw, TXGBE_MAC_RX_CFG,
 	      TXGBE_MAC_RX_CFG_JE, TXGBE_MAC_RX_CFG_JE);
@@ -1903,3 +3159,55 @@ s32 txgbe_validate_eeprom_checksum(struct txgbe_hw *hw,
 
 	return status;
 }
+
+/**
+ *  txgbe_check_mac_link - Determine link and speed status
+ *  @hw: pointer to hardware structure
+ *  @speed: pointer to link speed
+ *  @link_up: true when link is up
+ *  @link_up_wait_to_complete: bool used to wait for link up or not
+ *
+ *  Reads the links register to determine if link is up and the current speed
+ **/
+s32 txgbe_check_mac_link(struct txgbe_hw *hw, u32 *speed,
+			 bool *link_up, bool link_up_wait_to_complete)
+{
+	u32 links_reg = 0;
+	u32 i;
+
+	if (link_up_wait_to_complete) {
+		for (i = 0; i < TXGBE_LINK_UP_TIME; i++) {
+			links_reg = rd32(hw, TXGBE_CFG_PORT_ST);
+			if (links_reg & TXGBE_CFG_PORT_ST_LINK_UP) {
+				*link_up = true;
+				break;
+			}
+			*link_up = false;
+			msleep(100);
+		}
+	} else {
+		links_reg = rd32(hw, TXGBE_CFG_PORT_ST);
+		if (links_reg & TXGBE_CFG_PORT_ST_LINK_UP)
+			*link_up = true;
+		else
+			*link_up = false;
+	}
+
+	if (*link_up) {
+		if ((links_reg & TXGBE_CFG_PORT_ST_LINK_10G) ==
+				TXGBE_CFG_PORT_ST_LINK_10G)
+			*speed = TXGBE_LINK_SPEED_10GB_FULL;
+		else if ((links_reg & TXGBE_CFG_PORT_ST_LINK_1G) ==
+				TXGBE_CFG_PORT_ST_LINK_1G)
+			*speed = TXGBE_LINK_SPEED_1GB_FULL;
+		else if ((links_reg & TXGBE_CFG_PORT_ST_LINK_100M) ==
+				TXGBE_CFG_PORT_ST_LINK_100M)
+			*speed = TXGBE_LINK_SPEED_100_FULL;
+		else
+			*speed = TXGBE_LINK_SPEED_10_FULL;
+	} else {
+		*speed = TXGBE_LINK_SPEED_UNKNOWN;
+	}
+
+	return 0;
+}
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.h b/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.h
index eaa1a6fe4dd7..d620c88f6318 100644
--- a/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.h
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.h
@@ -56,9 +56,23 @@ bool txgbe_check_mng_access(struct txgbe_hw *hw);
 
 s32 txgbe_init_thermal_sensor_thresh(struct txgbe_hw *hw);
 s32 txgbe_disable_rx(struct txgbe_hw *hw);
+s32 txgbe_setup_mac_link_multispeed_fiber(struct txgbe_hw *hw,
+					  u32 speed,
+					  bool autoneg_wait_to_complete);
 int txgbe_check_flash_load(struct txgbe_hw *hw, u32 check_bit);
 
+s32 txgbe_get_link_capabilities(struct txgbe_hw *hw,
+				u32 *speed, bool *autoneg);
 enum txgbe_media_type txgbe_get_media_type(struct txgbe_hw *hw);
+s32 txgbe_disable_tx_laser_multispeed_fiber(struct txgbe_hw *hw);
+s32 txgbe_enable_tx_laser_multispeed_fiber(struct txgbe_hw *hw);
+s32 txgbe_flap_tx_laser_multispeed_fiber(struct txgbe_hw *hw);
+s32 txgbe_set_hard_rate_select_speed(struct txgbe_hw *hw, u32 speed);
+s32 txgbe_setup_mac_link(struct txgbe_hw *hw, u32 speed,
+			 bool autoneg_wait_to_complete);
+s32 txgbe_check_mac_link(struct txgbe_hw *hw, u32 *speed,
+			 bool *link_up, bool link_up_wait_to_complete);
+void txgbe_init_mac_link_ops(struct txgbe_hw *hw);
 int txgbe_reset_misc(struct txgbe_hw *hw);
 s32 txgbe_reset_hw(struct txgbe_hw *hw);
 s32 txgbe_identify_phy(struct txgbe_hw *hw);
@@ -77,6 +91,13 @@ u32 txgbe_rd32_epcs(struct txgbe_hw *hw, u32 addr);
 void txgbe_wr32_epcs(struct txgbe_hw *hw, u32 addr, u32 data);
 void txgbe_wr32_ephy(struct txgbe_hw *hw, u32 addr, u32 data);
 
+s32 txgbe_set_link_to_kr(struct txgbe_hw *hw, bool autoneg);
+s32 txgbe_set_link_to_kx4(struct txgbe_hw *hw, bool autoneg);
+
+s32 txgbe_set_link_to_kx(struct txgbe_hw *hw,
+			 u32 speed,
+			 bool autoneg);
+
 u8 fmgr_cmd_op(struct txgbe_hw *hw, u32 cmd, u32 cmd_addr);
 u32 txgbe_flash_read_dword(struct txgbe_hw *hw, u32 addr);
 
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c
index 30bac8a049df..7f5225004e28 100644
--- a/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c
@@ -200,9 +200,71 @@ static bool txgbe_is_sfp(struct txgbe_hw *hw)
 	}
 }
 
+static bool txgbe_is_backplane(struct txgbe_hw *hw)
+{
+	switch (hw->phy.media_type) {
+	case txgbe_media_type_backplane:
+		return true;
+	default:
+		return false;
+	}
+}
+
+/**
+ * txgbe_sfp_link_config - set up SFP+ link
+ * @adapter: pointer to private adapter struct
+ **/
+static void txgbe_sfp_link_config(struct txgbe_adapter *adapter)
+{
+	/* We are assuming the worst case scenerio here, and that
+	 * is that an SFP was inserted/removed after the reset
+	 * but before SFP detection was enabled.  As such the best
+	 * solution is to just start searching as soon as we start
+	 */
+
+	adapter->flags2 |= TXGBE_FLAG2_SFP_NEEDS_RESET;
+	adapter->sfp_poll_time = 0;
+}
+
 static void txgbe_up_complete(struct txgbe_adapter *adapter)
 {
+	struct txgbe_hw *hw = &adapter->hw;
+	u32 links_reg;
+
 	txgbe_get_hw_control(adapter);
+
+	/* enable the optics for SFP+ fiber */
+	TCALL(hw, mac.ops.enable_tx_laser);
+
+	/* make sure to complete pre-operations */
+	smp_mb__before_atomic();
+	clear_bit(__TXGBE_DOWN, &adapter->state);
+
+	if (txgbe_is_sfp(hw)) {
+		txgbe_sfp_link_config(adapter);
+	} else if (txgbe_is_backplane(hw)) {
+		adapter->flags |= TXGBE_FLAG_NEED_LINK_CONFIG;
+		txgbe_service_event_schedule(adapter);
+	}
+
+	links_reg = rd32(hw, TXGBE_CFG_PORT_ST);
+	if (links_reg & TXGBE_CFG_PORT_ST_LINK_UP) {
+		if (links_reg & TXGBE_CFG_PORT_ST_LINK_10G) {
+			wr32(hw, TXGBE_MAC_TX_CFG,
+			     (rd32(hw, TXGBE_MAC_TX_CFG) &
+			      ~TXGBE_MAC_TX_CFG_SPEED_MASK) |
+			     TXGBE_MAC_TX_CFG_SPEED_10G);
+		} else if (links_reg & (TXGBE_CFG_PORT_ST_LINK_1G | TXGBE_CFG_PORT_ST_LINK_100M)) {
+			wr32(hw, TXGBE_MAC_TX_CFG,
+			     (rd32(hw, TXGBE_MAC_TX_CFG) &
+			      ~TXGBE_MAC_TX_CFG_SPEED_MASK) |
+			     TXGBE_MAC_TX_CFG_SPEED_1G);
+		}
+	}
+
+	/* Set PF Reset Done bit so PF/VF Mail Ops can work */
+	wr32m(hw, TXGBE_CFG_PORT_CTL,
+	      TXGBE_CFG_PORT_CTL_PFRSTD, TXGBE_CFG_PORT_CTL_PFRSTD);
 }
 
 void txgbe_reset(struct txgbe_adapter *adapter)
@@ -214,6 +276,13 @@ void txgbe_reset(struct txgbe_adapter *adapter)
 
 	if (TXGBE_REMOVED(hw->hw_addr))
 		return;
+	/* lock SFP init bit to prevent race conditions with the watchdog */
+	while (test_and_set_bit(__TXGBE_IN_SFP_INIT, &adapter->state))
+		usleep_range(1000, 2000);
+
+	/* clear all SFP and link config related flags while holding SFP_INIT */
+	adapter->flags2 &= ~TXGBE_FLAG2_SFP_NEEDS_RESET;
+	adapter->flags &= ~TXGBE_FLAG_NEED_LINK_CONFIG;
 
 	err = TCALL(hw, mac.ops.init_hw);
 	switch (err) {
@@ -228,6 +297,7 @@ void txgbe_reset(struct txgbe_adapter *adapter)
 		dev_err(&adapter->pdev->dev, "Hardware Error: %d\n", err);
 	}
 
+	clear_bit(__TXGBE_IN_SFP_INIT, &adapter->state);
 	/* do not flush user set addresses */
 	memcpy(old_addr, &adapter->mac_table[0].addr, netdev->addr_len);
 	txgbe_flush_sw_mac_table(adapter);
@@ -254,6 +324,8 @@ void txgbe_disable_device(struct txgbe_adapter *adapter)
 	netif_carrier_off(netdev);
 	netif_tx_disable(netdev);
 
+	adapter->flags &= ~TXGBE_FLAG_NEED_LINK_UPDATE;
+
 	del_timer_sync(&adapter->service_timer);
 
 	if (hw->bus.lan_id == 0)
@@ -283,8 +355,14 @@ void txgbe_disable_device(struct txgbe_adapter *adapter)
 
 void txgbe_down(struct txgbe_adapter *adapter)
 {
+	struct txgbe_hw *hw = &adapter->hw;
+
 	txgbe_disable_device(adapter);
 	txgbe_reset(adapter);
+
+	if (!(((hw->subsystem_device_id & TXGBE_NCSI_MASK) == TXGBE_NCSI_SUP)))
+		/* power down the optics for SFP+ fiber */
+		TCALL(&adapter->hw, mac.ops.disable_tx_laser);
 }
 
 /**
@@ -382,7 +460,11 @@ int txgbe_open(struct net_device *netdev)
  */
 static void txgbe_close_suspend(struct txgbe_adapter *adapter)
 {
+	struct txgbe_hw *hw = &adapter->hw;
+
 	txgbe_disable_device(adapter);
+	if (!((hw->subsystem_device_id & TXGBE_NCSI_MASK) == TXGBE_NCSI_SUP))
+		TCALL(hw, mac.ops.disable_tx_laser);
 }
 
 /**
@@ -437,12 +519,264 @@ static void txgbe_shutdown(struct pci_dev *pdev)
 	}
 }
 
+/**
+ * txgbe_watchdog_update_link - update the link status
+ * @adapter: pointer to the device adapter structure
+ **/
+static void txgbe_watchdog_update_link(struct txgbe_adapter *adapter)
+{
+	struct txgbe_hw *hw = &adapter->hw;
+	u32 link_speed = adapter->link_speed;
+	bool link_up = adapter->link_up;
+	u32 reg;
+	u32 i = 1;
+
+	if (!(adapter->flags & TXGBE_FLAG_NEED_LINK_UPDATE))
+		return;
+
+	link_speed = TXGBE_LINK_SPEED_10GB_FULL;
+	link_up = true;
+	TCALL(hw, mac.ops.check_link, &link_speed, &link_up, false);
+
+	if (link_up || time_after(jiffies, (adapter->link_check_timeout +
+		TXGBE_TRY_LINK_TIMEOUT))) {
+		adapter->flags &= ~TXGBE_FLAG_NEED_LINK_UPDATE;
+	}
+
+	for (i = 0; i < 3; i++) {
+		TCALL(hw, mac.ops.check_link, &link_speed, &link_up, false);
+		msleep(20);
+	}
+
+	adapter->link_up = link_up;
+	adapter->link_speed = link_speed;
+
+	if (link_up) {
+		if (link_speed & TXGBE_LINK_SPEED_10GB_FULL) {
+			wr32(hw, TXGBE_MAC_TX_CFG,
+			     (rd32(hw, TXGBE_MAC_TX_CFG) &
+			      ~TXGBE_MAC_TX_CFG_SPEED_MASK) | TXGBE_MAC_TX_CFG_TE |
+			     TXGBE_MAC_TX_CFG_SPEED_10G);
+		} else if (link_speed & (TXGBE_LINK_SPEED_1GB_FULL |
+			   TXGBE_LINK_SPEED_100_FULL | TXGBE_LINK_SPEED_10_FULL)) {
+			wr32(hw, TXGBE_MAC_TX_CFG,
+			     (rd32(hw, TXGBE_MAC_TX_CFG) &
+			      ~TXGBE_MAC_TX_CFG_SPEED_MASK) | TXGBE_MAC_TX_CFG_TE |
+			     TXGBE_MAC_TX_CFG_SPEED_1G);
+		}
+
+		/* Re configure MAC RX */
+		reg = rd32(hw, TXGBE_MAC_RX_CFG);
+		wr32(hw, TXGBE_MAC_RX_CFG, reg);
+		wr32(hw, TXGBE_MAC_PKT_FLT, TXGBE_MAC_PKT_FLT_PR);
+		reg = rd32(hw, TXGBE_MAC_WDG_TIMEOUT);
+		wr32(hw, TXGBE_MAC_WDG_TIMEOUT, reg);
+	}
+}
+
+/**
+ * txgbe_watchdog_link_is_up - update netif_carrier status and
+ *                             print link up message
+ * @adapter: pointer to the device adapter structure
+ **/
+static void txgbe_watchdog_link_is_up(struct txgbe_adapter *adapter)
+{
+	struct net_device *netdev = adapter->netdev;
+	u32 link_speed = adapter->link_speed;
+	const char *speed_str;
+
+	/* only continue if link was previously down */
+	if (netif_carrier_ok(netdev))
+		return;
+
+	switch (link_speed) {
+	case TXGBE_LINK_SPEED_10GB_FULL:
+		speed_str = "10 Gbps";
+		break;
+	case TXGBE_LINK_SPEED_1GB_FULL:
+		speed_str = "1 Gbps";
+		break;
+	case TXGBE_LINK_SPEED_100_FULL:
+		speed_str = "100 Mbps";
+		break;
+	case TXGBE_LINK_SPEED_10_FULL:
+		speed_str = "10 Mbps";
+		break;
+	default:
+		speed_str = "unknown speed";
+		break;
+	}
+
+	netif_info(adapter, drv, netdev,
+		   "NIC Link is Up %s\n", speed_str);
+
+	netif_carrier_on(netdev);
+}
+
+/**
+ * txgbe_watchdog_link_is_down - update netif_carrier status and
+ *                               print link down message
+ * @adapter: pointer to the adapter structure
+ **/
+static void txgbe_watchdog_link_is_down(struct txgbe_adapter *adapter)
+{
+	struct net_device *netdev = adapter->netdev;
+
+	adapter->link_up = false;
+	adapter->link_speed = 0;
+
+	/* only continue if link was up previously */
+	if (!netif_carrier_ok(netdev))
+		return;
+
+	netif_info(adapter, drv, netdev, "NIC Link is Down\n");
+	netif_carrier_off(netdev);
+}
+
+/**
+ * txgbe_watchdog_subtask - check and bring link up
+ * @adapter: pointer to the device adapter structure
+ **/
+static void txgbe_watchdog_subtask(struct txgbe_adapter *adapter)
+{
+	/* if interface is down do nothing */
+	if (test_bit(__TXGBE_DOWN, &adapter->state) ||
+	    test_bit(__TXGBE_REMOVING, &adapter->state) ||
+	    test_bit(__TXGBE_RESETTING, &adapter->state))
+		return;
+
+	txgbe_watchdog_update_link(adapter);
+
+	if (adapter->link_up)
+		txgbe_watchdog_link_is_up(adapter);
+	else
+		txgbe_watchdog_link_is_down(adapter);
+}
+
+/**
+ * txgbe_sfp_detection_subtask - poll for SFP+ cable
+ * @adapter: the txgbe adapter structure
+ **/
+static void txgbe_sfp_detection_subtask(struct txgbe_adapter *adapter)
+{
+	struct txgbe_hw *hw = &adapter->hw;
+	struct txgbe_mac_info *mac = &hw->mac;
+	s32 err;
+
+	/* not searching for SFP so there is nothing to do here */
+	if (!(adapter->flags2 & TXGBE_FLAG2_SFP_NEEDS_RESET))
+		return;
+
+	if (adapter->sfp_poll_time &&
+	    time_after(adapter->sfp_poll_time, jiffies))
+		return; /* If not yet time to poll for SFP */
+
+	/* someone else is in init, wait until next service event */
+	if (test_and_set_bit(__TXGBE_IN_SFP_INIT, &adapter->state))
+		return;
+
+	adapter->sfp_poll_time = jiffies + TXGBE_SFP_POLL_JIFFIES - 1;
+
+	err = TCALL(hw, phy.ops.identify_sfp);
+	if (err == TXGBE_ERR_SFP_NOT_SUPPORTED)
+		goto sfp_out;
+
+	if (err == TXGBE_ERR_SFP_NOT_PRESENT) {
+		/* If no cable is present, then we need to reset
+		 * the next time we find a good cable.
+		 */
+		adapter->flags2 |= TXGBE_FLAG2_SFP_NEEDS_RESET;
+	}
+
+	/* exit on error */
+	if (err)
+		goto sfp_out;
+
+	/* exit if reset not needed */
+	if (!(adapter->flags2 & TXGBE_FLAG2_SFP_NEEDS_RESET))
+		goto sfp_out;
+
+	adapter->flags2 &= ~TXGBE_FLAG2_SFP_NEEDS_RESET;
+
+	if (hw->phy.multispeed_fiber) {
+		/* Set up dual speed SFP+ support */
+		mac->ops.setup_link = txgbe_setup_mac_link_multispeed_fiber;
+		mac->ops.setup_mac_link = txgbe_setup_mac_link;
+		mac->ops.set_rate_select_speed = txgbe_set_hard_rate_select_speed;
+	} else {
+		mac->ops.setup_link = txgbe_setup_mac_link;
+		mac->ops.set_rate_select_speed = txgbe_set_hard_rate_select_speed;
+		hw->phy.autoneg_advertised = 0;
+	}
+
+	adapter->flags |= TXGBE_FLAG_NEED_LINK_CONFIG;
+	netif_info(adapter, probe, adapter->netdev,
+		   "detected SFP+: %d\n", hw->phy.sfp_type);
+
+sfp_out:
+	clear_bit(__TXGBE_IN_SFP_INIT, &adapter->state);
+
+	if (err == TXGBE_ERR_SFP_NOT_SUPPORTED && adapter->netdev_registered)
+		dev_err(&adapter->pdev->dev,
+			"failed to initialize because an unsupported SFP+ module type was detected.\n");
+}
+
+/**
+ * txgbe_sfp_link_config_subtask - set up link SFP after module install
+ * @adapter: the txgbe adapter structure
+ **/
+static void txgbe_sfp_link_config_subtask(struct txgbe_adapter *adapter)
+{
+	struct txgbe_hw *hw = &adapter->hw;
+	u8 device_type = hw->subsystem_device_id & 0xF0;
+	bool autoneg = false;
+	u32 speed;
+
+	if (!(adapter->flags & TXGBE_FLAG_NEED_LINK_CONFIG))
+		return;
+
+	/* someone else is in init, wait until next service event */
+	if (test_and_set_bit(__TXGBE_IN_SFP_INIT, &adapter->state))
+		return;
+
+	adapter->flags &= ~TXGBE_FLAG_NEED_LINK_CONFIG;
+
+	if (device_type == TXGBE_ID_MAC_SGMII) {
+		speed = TXGBE_LINK_SPEED_1GB_FULL;
+	} else {
+		speed = hw->phy.autoneg_advertised;
+		if (!speed && hw->mac.ops.get_link_capabilities) {
+			TCALL(hw, mac.ops.get_link_capabilities, &speed, &autoneg);
+			/* setup the highest link when no autoneg */
+			if (!autoneg) {
+				if (speed & TXGBE_LINK_SPEED_10GB_FULL)
+					speed = TXGBE_LINK_SPEED_10GB_FULL;
+			}
+		}
+	}
+
+	TCALL(hw, mac.ops.setup_link, speed, false);
+
+	adapter->flags |= TXGBE_FLAG_NEED_LINK_UPDATE;
+	adapter->link_check_timeout = jiffies;
+	clear_bit(__TXGBE_IN_SFP_INIT, &adapter->state);
+}
+
 static void txgbe_service_timer(struct timer_list *t)
 {
 	struct txgbe_adapter *adapter = from_timer(adapter, t, service_timer);
+	struct txgbe_hw *hw = &adapter->hw;
 	unsigned long next_event_offset;
 
-	next_event_offset = HZ * 2;
+	/* poll faster when waiting for link */
+	if (adapter->flags & TXGBE_FLAG_NEED_LINK_UPDATE) {
+		if ((hw->subsystem_device_id & 0xF0) == TXGBE_ID_KR_KX_KX4)
+			next_event_offset = HZ;
+		else
+			next_event_offset = HZ / 10;
+	} else {
+		next_event_offset = HZ * 2;
+	}
 
 	/* Reset the timer */
 	mod_timer(&adapter->service_timer, next_event_offset + jiffies);
@@ -469,6 +803,10 @@ static void txgbe_service_task(struct work_struct *work)
 		return;
 	}
 
+	txgbe_sfp_detection_subtask(adapter);
+	txgbe_sfp_link_config_subtask(adapter);
+	txgbe_watchdog_subtask(adapter);
+
 	txgbe_service_event_complete(adapter);
 }
 
@@ -717,6 +1055,10 @@ static int txgbe_probe(struct pci_dev *pdev,
 	pci_set_drvdata(pdev, adapter);
 	adapter->netdev_registered = true;
 
+	if (!((hw->subsystem_device_id & TXGBE_NCSI_MASK) == TXGBE_NCSI_SUP))
+		/* power down the optics for SFP+ fiber */
+		TCALL(hw, mac.ops.disable_tx_laser);
+
 	/* carrier off reporting is important to ethtool even BEFORE open */
 	netif_carrier_off(netdev);
 
@@ -732,6 +1074,11 @@ static int txgbe_probe(struct pci_dev *pdev,
 	if (expected_gts > 0)
 		txgbe_check_minimum_link(adapter);
 
+	if ((hw->subsystem_device_id & TXGBE_NCSI_MASK) == TXGBE_NCSI_SUP)
+		netif_info(adapter, probe, netdev, "NCSI : support");
+	else
+		netif_info(adapter, probe, netdev, "NCSI : unsupported");
+
 	/* First try to read PBA as a string */
 	err = txgbe_read_pba_string(hw, part_str, TXGBE_PBANUM_LENGTH);
 	if (err)
@@ -756,6 +1103,16 @@ static int txgbe_probe(struct pci_dev *pdev,
 	/* add san mac addr to netdev */
 	txgbe_add_sanmac_netdev(netdev);
 
+	netif_info(adapter, probe, netdev,
+		   "WangXun(R) 10 Gigabit Network Connection\n");
+
+	/* setup link for SFP devices with MNG FW, else wait for TXGBE_UP */
+	if (txgbe_mng_present(hw) && txgbe_is_sfp(hw) &&
+	    ((hw->subsystem_device_id & TXGBE_NCSI_MASK) == TXGBE_NCSI_SUP))
+		TCALL(hw, mac.ops.setup_link,
+		      TXGBE_LINK_SPEED_10GB_FULL | TXGBE_LINK_SPEED_1GB_FULL,
+		      true);
+
 	return 0;
 
 err_release_hw:
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c
index f3099103110b..be0185570b62 100644
--- a/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c
@@ -3,6 +3,29 @@
 
 #include "txgbe_phy.h"
 
+/**
+ * txgbe_check_reset_blocked - check status of MNG FW veto bit
+ * @hw: pointer to the hardware structure
+ *
+ * This function checks the MMNGC.MNG_VETO bit to see if there are
+ * any constraints on link from manageability.  For MAC's that don't
+ * have this bit just return faluse since the link can not be blocked
+ * via this method.
+ **/
+s32 txgbe_check_reset_blocked(struct txgbe_hw *hw)
+{
+	u32 mmngc;
+
+	mmngc = rd32(hw, TXGBE_MIS_ST);
+	if (mmngc & TXGBE_MIS_ST_MNG_VETO) {
+		ERROR_REPORT1(hw, TXGBE_ERROR_SOFTWARE,
+			      "MNG_VETO bit detected.\n");
+		return true;
+	}
+
+	return false;
+}
+
 /**
  *  txgbe_identify_module - Identifies module type
  *  @hw: pointer to hardware structure
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.h b/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.h
index 7e172885f536..bb34e2dce2f8 100644
--- a/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.h
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.h
@@ -39,6 +39,8 @@
 #define TXGBE_SFF_VENDOR_OUI_AVAGO      0x00176A00
 #define TXGBE_SFF_VENDOR_OUI_INTEL      0x001B2100
 
+s32 txgbe_check_reset_blocked(struct txgbe_hw *hw);
+
 s32 txgbe_identify_module(struct txgbe_hw *hw);
 s32 txgbe_identify_sfp_module(struct txgbe_hw *hw);
 s32 txgbe_init_i2c(struct txgbe_hw *hw);
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h b/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h
index 5539da638c09..a8f9a8af980e 100644
--- a/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h
@@ -58,6 +58,14 @@
 #define TXGBE_SR_XS_PCS_MMD_STATUS1             0x30001
 #define TXGBE_SR_PCS_CTL2                       0x30007
 #define TXGBE_SR_PMA_MMD_CTL1                   0x10000
+#define TXGBE_SR_MII_MMD_CTL                    0x1F0000
+#define TXGBE_SR_MII_MMD_DIGI_CTL               0x1F8000
+#define TXGBE_SR_MII_MMD_AN_CTL                 0x1F8001
+#define TXGBE_SR_MII_MMD_AN_ADV                 0x1F0004
+#define TXGBE_SR_MII_MMD_AN_ADV_PAUSE(_v)       ((0x3 & (_v)) << 7)
+#define TXGBE_SR_MII_MMD_AN_ADV_PAUSE_ASM       0x80
+#define TXGBE_SR_MII_MMD_AN_ADV_PAUSE_SYM       0x100
+#define TXGBE_SR_MII_MMD_LP_BABL                0x1F0005
 #define TXGBE_SR_AN_MMD_CTL                     0x70000
 #define TXGBE_SR_AN_MMD_ADV_REG1                0x70010
 #define TXGBE_SR_AN_MMD_ADV_REG1_PAUSE(_v)      ((0x3 & (_v)) << 10)
@@ -68,10 +76,127 @@
 #define TXGBE_VR_AN_KR_MODE_CL                  0x78003
 #define TXGBE_VR_XS_OR_PCS_MMD_DIGI_CTL1        0x38000
 #define TXGBE_VR_XS_OR_PCS_MMD_DIGI_STATUS      0x38010
+#define TXGBE_PHY_MPLLA_CTL0                    0x18071
+#define TXGBE_PHY_MPLLA_CTL3                    0x18077
+#define TXGBE_PHY_MISC_CTL0                     0x18090
+#define TXGBE_PHY_VCO_CAL_LD0                   0x18092
+#define TXGBE_PHY_VCO_CAL_LD1                   0x18093
+#define TXGBE_PHY_VCO_CAL_LD2                   0x18094
+#define TXGBE_PHY_VCO_CAL_LD3                   0x18095
+#define TXGBE_PHY_VCO_CAL_REF0                  0x18096
+#define TXGBE_PHY_VCO_CAL_REF1                  0x18097
+#define TXGBE_PHY_RX_AD_ACK                     0x18098
+#define TXGBE_PHY_AFE_DFE_ENABLE                0x1805D
+#define TXGBE_PHY_DFE_TAP_CTL0                  0x1805E
+#define TXGBE_PHY_RX_EQ_ATT_LVL0                0x18057
+#define TXGBE_PHY_RX_EQ_CTL0                    0x18058
+#define TXGBE_PHY_RX_EQ_CTL                     0x1805C
+#define TXGBE_PHY_TX_EQ_CTL0                    0x18036
+#define TXGBE_PHY_TX_EQ_CTL1                    0x18037
+#define TXGBE_PHY_TX_RATE_CTL                   0x18034
+#define TXGBE_PHY_RX_RATE_CTL                   0x18054
+#define TXGBE_PHY_TX_GEN_CTL2                   0x18032
+#define TXGBE_PHY_RX_GEN_CTL2                   0x18052
+#define TXGBE_PHY_RX_GEN_CTL3                   0x18053
+#define TXGBE_PHY_MPLLA_CTL2                    0x18073
+#define TXGBE_PHY_RX_POWER_ST_CTL               0x18055
+#define TXGBE_PHY_TX_POWER_ST_CTL               0x18035
+#define TXGBE_PHY_TX_GENCTRL1                   0x18031
 
 #define TXGBE_SR_PCS_CTL2_PCS_TYPE_SEL_R        0x0
 #define TXGBE_SR_PCS_CTL2_PCS_TYPE_SEL_X        0x1
 #define TXGBE_SR_PCS_CTL2_PCS_TYPE_SEL_MASK     0x3
+#define TXGBE_SR_PMA_MMD_CTL1_SPEED_SEL_1G      0x0
+#define TXGBE_SR_PMA_MMD_CTL1_SPEED_SEL_10G     0x2000
+#define TXGBE_SR_PMA_MMD_CTL1_SPEED_SEL_MASK    0x2000
+#define TXGBE_SR_PMA_MMD_CTL1_LB_EN             0x1
+#define TXGBE_SR_MII_MMD_CTL_AN_EN              0x1000
+#define TXGBE_SR_MII_MMD_CTL_RESTART_AN         0x0200
+#define TXGBE_SR_AN_MMD_CTL_RESTART_AN          0x0200
+#define TXGBE_SR_AN_MMD_CTL_ENABLE              0x1000
+#define TXGBE_SR_AN_MMD_ADV_REG2_BP_TYPE_KX4    0x40
+#define TXGBE_SR_AN_MMD_ADV_REG2_BP_TYPE_KX     0x20
+#define TXGBE_SR_AN_MMD_ADV_REG2_BP_TYPE_KR     0x80
+#define TXGBE_SR_AN_MMD_ADV_REG2_BP_TYPE_MASK   0xFFFF
+#define TXGBE_VR_XS_OR_PCS_MMD_DIGI_CTL1_ENABLE 0x1000
+#define TXGBE_VR_XS_OR_PCS_MMD_DIGI_CTL1_VR_RST 0x8000
+#define TXGBE_VR_XS_OR_PCS_MMD_DIGI_STATUS_PSEQ_MASK            0x1C
+#define TXGBE_VR_XS_OR_PCS_MMD_DIGI_STATUS_PSEQ_POWER_GOOD      0x10
+
+#define TXGBE_PHY_MPLLA_CTL0_MULTIPLIER_1GBASEX_KX              32
+#define TXGBE_PHY_MPLLA_CTL0_MULTIPLIER_10GBASER_KR             33
+#define TXGBE_PHY_MPLLA_CTL0_MULTIPLIER_OTHER                   40
+#define TXGBE_PHY_MPLLA_CTL0_MULTIPLIER_MASK                    0xFF
+#define TXGBE_PHY_MPLLA_CTL3_MULTIPLIER_BW_1GBASEX_KX           0x56
+#define TXGBE_PHY_MPLLA_CTL3_MULTIPLIER_BW_10GBASER_KR          0x7B
+#define TXGBE_PHY_MPLLA_CTL3_MULTIPLIER_BW_OTHER                0x56
+#define TXGBE_PHY_MPLLA_CTL3_MULTIPLIER_BW_MASK                 0x7FF
+#define TXGBE_PHY_MISC_CTL0_TX2RX_LB_EN_0                       0x1
+#define TXGBE_PHY_MISC_CTL0_TX2RX_LB_EN_3_1                     0xE
+#define TXGBE_PHY_MISC_CTL0_RX_VREF_CTRL                        0x1F00
+#define TXGBE_PHY_VCO_CAL_LD0_1GBASEX_KX                        1344
+#define TXGBE_PHY_VCO_CAL_LD0_10GBASER_KR                       1353
+#define TXGBE_PHY_VCO_CAL_LD0_OTHER                             1360
+#define TXGBE_PHY_VCO_CAL_LD0_MASK                              0x1000
+#define TXGBE_PHY_VCO_CAL_REF0_LD0_1GBASEX_KX                   42
+#define TXGBE_PHY_VCO_CAL_REF0_LD0_10GBASER_KR                  41
+#define TXGBE_PHY_VCO_CAL_REF0_LD0_OTHER                        34
+#define TXGBE_PHY_VCO_CAL_REF0_LD0_MASK                         0x3F
+#define TXGBE_PHY_AFE_DFE_ENABLE_DFE_EN0                        0x10
+#define TXGBE_PHY_AFE_DFE_ENABLE_AFE_EN0                        0x1
+#define TXGBE_PHY_AFE_DFE_ENABLE_MASK                           0xFF
+#define TXGBE_PHY_RX_EQ_CTL_CONT_ADAPT0                         0x1
+#define TXGBE_PHY_RX_EQ_CTL_CONT_ADAPT_MASK                     0xF
+#define TXGBE_PHY_TX_RATE_CTL_TX0_RATE_10GBASER_KR              0x0
+#define TXGBE_PHY_TX_RATE_CTL_TX0_RATE_RXAUI                    0x1
+#define TXGBE_PHY_TX_RATE_CTL_TX0_RATE_1GBASEX_KX               0x3
+#define TXGBE_PHY_TX_RATE_CTL_TX0_RATE_OTHER                    0x2
+#define TXGBE_PHY_TX_RATE_CTL_TX1_RATE_OTHER                    0x20
+#define TXGBE_PHY_TX_RATE_CTL_TX2_RATE_OTHER                    0x200
+#define TXGBE_PHY_TX_RATE_CTL_TX3_RATE_OTHER                    0x2000
+#define TXGBE_PHY_TX_RATE_CTL_TX0_RATE_MASK                     0x7
+#define TXGBE_PHY_TX_RATE_CTL_TX1_RATE_MASK                     0x70
+#define TXGBE_PHY_TX_RATE_CTL_TX2_RATE_MASK                     0x700
+#define TXGBE_PHY_TX_RATE_CTL_TX3_RATE_MASK                     0x7000
+#define TXGBE_PHY_RX_RATE_CTL_RX0_RATE_10GBASER_KR              0x0
+#define TXGBE_PHY_RX_RATE_CTL_RX0_RATE_RXAUI                    0x1
+#define TXGBE_PHY_RX_RATE_CTL_RX0_RATE_1GBASEX_KX               0x3
+#define TXGBE_PHY_RX_RATE_CTL_RX0_RATE_OTHER                    0x2
+#define TXGBE_PHY_RX_RATE_CTL_RX1_RATE_OTHER                    0x20
+#define TXGBE_PHY_RX_RATE_CTL_RX2_RATE_OTHER                    0x200
+#define TXGBE_PHY_RX_RATE_CTL_RX3_RATE_OTHER                    0x2000
+#define TXGBE_PHY_RX_RATE_CTL_RX0_RATE_MASK                     0x7
+#define TXGBE_PHY_RX_RATE_CTL_RX1_RATE_MASK                     0x70
+#define TXGBE_PHY_RX_RATE_CTL_RX2_RATE_MASK                     0x700
+#define TXGBE_PHY_RX_RATE_CTL_RX3_RATE_MASK                     0x7000
+#define TXGBE_PHY_TX_GEN_CTL2_TX0_WIDTH_10GBASER_KR             0x200
+#define TXGBE_PHY_TX_GEN_CTL2_TX0_WIDTH_10GBASER_KR_RXAUI       0x300
+#define TXGBE_PHY_TX_GEN_CTL2_TX0_WIDTH_OTHER                   0x100
+#define TXGBE_PHY_TX_GEN_CTL2_TX0_WIDTH_MASK                    0x300
+#define TXGBE_PHY_TX_GEN_CTL2_TX1_WIDTH_OTHER                   0x400
+#define TXGBE_PHY_TX_GEN_CTL2_TX1_WIDTH_MASK                    0xC00
+#define TXGBE_PHY_TX_GEN_CTL2_TX2_WIDTH_OTHER                   0x1000
+#define TXGBE_PHY_TX_GEN_CTL2_TX2_WIDTH_MASK                    0x3000
+#define TXGBE_PHY_TX_GEN_CTL2_TX3_WIDTH_OTHER                   0x4000
+#define TXGBE_PHY_TX_GEN_CTL2_TX3_WIDTH_MASK                    0xC000
+#define TXGBE_PHY_RX_GEN_CTL2_RX0_WIDTH_10GBASER_KR             0x200
+#define TXGBE_PHY_RX_GEN_CTL2_RX0_WIDTH_10GBASER_KR_RXAUI       0x300
+#define TXGBE_PHY_RX_GEN_CTL2_RX0_WIDTH_OTHER                   0x100
+#define TXGBE_PHY_RX_GEN_CTL2_RX0_WIDTH_MASK                    0x300
+#define TXGBE_PHY_RX_GEN_CTL2_RX1_WIDTH_OTHER                   0x400
+#define TXGBE_PHY_RX_GEN_CTL2_RX1_WIDTH_MASK                    0xC00
+#define TXGBE_PHY_RX_GEN_CTL2_RX2_WIDTH_OTHER                   0x1000
+#define TXGBE_PHY_RX_GEN_CTL2_RX2_WIDTH_MASK                    0x3000
+#define TXGBE_PHY_RX_GEN_CTL2_RX3_WIDTH_OTHER                   0x4000
+#define TXGBE_PHY_RX_GEN_CTL2_RX3_WIDTH_MASK                    0xC000
+
+#define TXGBE_PHY_MPLLA_CTL2_DIV_CLK_EN_8                       0x100
+#define TXGBE_PHY_MPLLA_CTL2_DIV_CLK_EN_10                      0x200
+#define TXGBE_PHY_MPLLA_CTL2_DIV_CLK_EN_16P5                    0x400
+#define TXGBE_PHY_MPLLA_CTL2_DIV_CLK_EN_MASK                    0x700
+
+#define TXGBE_XPCS_POWER_GOOD_MAX_POLLING_TIME  100
+#define TXGBE_PHY_INIT_DONE_POLLING_TIME        100
 
 /**************** Global Registers ****************************/
 /* chip control Registers */
@@ -282,6 +407,27 @@ struct txgbe_thermal_sensor_data {
 #define TXGBE_CFG_LED_CTL_LINK_UP_SEL   0x00000001U
 #define TXGBE_CFG_LED_CTL_LINK_OD_SHIFT 16
 
+/* GPIO Registers */
+#define TXGBE_GPIO_DR                   0x14800
+#define TXGBE_GPIO_DDR                  0x14804
+/*GPIO bit */
+#define TXGBE_GPIO_DR_0         0x00000001U /* SDP0 Data Value */
+#define TXGBE_GPIO_DR_1         0x00000002U /* SDP1 Data Value */
+#define TXGBE_GPIO_DR_2         0x00000004U /* SDP2 Data Value */
+#define TXGBE_GPIO_DR_3         0x00000008U /* SDP3 Data Value */
+#define TXGBE_GPIO_DR_4         0x00000010U /* SDP4 Data Value */
+#define TXGBE_GPIO_DR_5         0x00000020U /* SDP5 Data Value */
+#define TXGBE_GPIO_DR_6         0x00000040U /* SDP6 Data Value */
+#define TXGBE_GPIO_DR_7         0x00000080U /* SDP7 Data Value */
+#define TXGBE_GPIO_DDR_0        0x00000001U /* SDP0 IO direction */
+#define TXGBE_GPIO_DDR_1        0x00000002U /* SDP1 IO direction */
+#define TXGBE_GPIO_DDR_2        0x00000004U /* SDP1 IO direction */
+#define TXGBE_GPIO_DDR_3        0x00000008U /* SDP3 IO direction */
+#define TXGBE_GPIO_DDR_4        0x00000010U /* SDP4 IO direction */
+#define TXGBE_GPIO_DDR_5        0x00000020U /* SDP5 IO direction */
+#define TXGBE_GPIO_DDR_6        0x00000040U /* SDP6 IO direction */
+#define TXGBE_GPIO_DDR_7        0x00000080U /* SDP7 IO direction */
+
 /*********************** Transmit DMA registers **************************/
 /* transmit global control */
 #define TXGBE_TDM_CTL           0x18000
@@ -662,6 +808,38 @@ struct txgbe_hic_reset {
 
 /* Number of 100 microseconds we wait for PCI Express master disable */
 #define TXGBE_PCI_MASTER_DISABLE_TIMEOUT        800
+
+/* Autonegotiation advertised speeds */
+typedef u32 txgbe_autoneg_advertised;
+/* Link speed */
+#define TXGBE_LINK_SPEED_UNKNOWN        0
+#define TXGBE_LINK_SPEED_100_FULL       1
+#define TXGBE_LINK_SPEED_1GB_FULL       2
+#define TXGBE_LINK_SPEED_10GB_FULL      4
+#define TXGBE_LINK_SPEED_10_FULL        8
+#define TXGBE_LINK_SPEED_AUTONEG  (TXGBE_LINK_SPEED_100_FULL | \
+				   TXGBE_LINK_SPEED_1GB_FULL | \
+				   TXGBE_LINK_SPEED_10GB_FULL | \
+				   TXGBE_LINK_SPEED_10_FULL)
+
+/* Physical layer type */
+typedef u32 txgbe_physical_layer;
+#define TXGBE_PHYSICAL_LAYER_UNKNOWN            0
+#define TXGBE_PHYSICAL_LAYER_10GBASE_T          0x0001
+#define TXGBE_PHYSICAL_LAYER_1000BASE_T         0x0002
+#define TXGBE_PHYSICAL_LAYER_100BASE_TX         0x0004
+#define TXGBE_PHYSICAL_LAYER_SFP_PLUS_CU        0x0008
+#define TXGBE_PHYSICAL_LAYER_10GBASE_LR         0x0010
+#define TXGBE_PHYSICAL_LAYER_10GBASE_LRM        0x0020
+#define TXGBE_PHYSICAL_LAYER_10GBASE_SR         0x0040
+#define TXGBE_PHYSICAL_LAYER_10GBASE_KX4        0x0080
+#define TXGBE_PHYSICAL_LAYER_1000BASE_KX        0x0200
+#define TXGBE_PHYSICAL_LAYER_1000BASE_BX        0x0400
+#define TXGBE_PHYSICAL_LAYER_10GBASE_KR         0x0800
+#define TXGBE_PHYSICAL_LAYER_10GBASE_XAUI       0x1000
+#define TXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA      0x2000
+#define TXGBE_PHYSICAL_LAYER_1000BASE_SX        0x4000
+
 enum txgbe_eeprom_type {
 	txgbe_eeprom_uninitialized = 0,
 	txgbe_eeprom_spi,
@@ -812,6 +990,20 @@ struct txgbe_mac_operations {
 	s32 (*acquire_swfw_sync)(struct txgbe_hw *hw, u32 mask);
 	s32 (*release_swfw_sync)(struct txgbe_hw *hw, u32 mask);
 
+	/* Link */
+	s32 (*disable_tx_laser)(struct txgbe_hw *hw);
+	s32 (*enable_tx_laser)(struct txgbe_hw *hw);
+	s32 (*flap_tx_laser)(struct txgbe_hw *hw);
+	s32 (*setup_link)(struct txgbe_hw *hw, u32 speed,
+			  bool autoneg_wait_to_complete);
+	s32 (*setup_mac_link)(struct txgbe_hw *hw, u32 speed,
+			      bool autoneg_wait_to_complete);
+	s32 (*check_link)(struct txgbe_hw *hw, u32 *speed,
+			  bool *link_up, bool link_up_wait_to_complete);
+	s32 (*get_link_capabilities)(struct txgbe_hw *hw, u32 *speed,
+				     bool *autoneg);
+	s32 (*set_rate_select_speed)(struct txgbe_hw *hw, u32 speed);
+
 	/* RAR */
 	s32 (*set_rar)(struct txgbe_hw *hw, u32 index, u8 *addr, u64 pools,
 		       u32 enable_addr);
@@ -878,7 +1070,9 @@ struct txgbe_phy_info {
 	enum txgbe_sfp_type sfp_type;
 	enum txgbe_media_type media_type;
 	u32 phy_semaphore_mask;
+	txgbe_autoneg_advertised autoneg_advertised;
 	bool multispeed_fiber;
+	txgbe_physical_layer link_mode;
 };
 
 enum txgbe_reset_type {
@@ -887,6 +1081,12 @@ enum txgbe_reset_type {
 	TXGBE_GLOBAL_RESET
 };
 
+enum txgbe_link_status {
+	TXGBE_LINK_STATUS_NONE = 0,
+	TXGBE_LINK_STATUS_KX,
+	TXGBE_LINK_STATUS_KX4
+};
+
 struct txgbe_hw {
 	u8 __iomem *hw_addr;
 	struct txgbe_mac_info mac;
@@ -901,6 +1101,7 @@ struct txgbe_hw {
 	u8 revision_id;
 	bool adapter_stopped;
 	enum txgbe_reset_type reset_type;
+	enum txgbe_link_status link_status;
 	u16 oem_ssid;
 	u16 oem_svid;
 };
-- 
2.27.0


^ permalink raw reply related	[flat|nested] 18+ messages in thread

* [RFC PATCH net-next 08/16] net: txgbe: Add interrupt support
  2022-08-10  8:55 [RFC PATCH net-next 00/16] net: WangXun txgbe ethernet driver Jiawen Wu
                   ` (6 preceding siblings ...)
  2022-08-10  8:55 ` [RFC PATCH net-next 07/16] net: txgbe: Support to setup link Jiawen Wu
@ 2022-08-10  8:55 ` Jiawen Wu
  2022-08-10  8:55 ` [RFC PATCH net-next 09/16] net: txgbe: Handle various event interrupts Jiawen Wu
                   ` (7 subsequent siblings)
  15 siblings, 0 replies; 18+ messages in thread
From: Jiawen Wu @ 2022-08-10  8:55 UTC (permalink / raw)
  To: netdev; +Cc: Jiawen Wu

Determine proper interrupt scheme based on kernel support and
hardware queue count. Allocate memory for interrupt vectors.

Signed-off-by: Jiawen Wu <jiawenwu@trustnetic.com>
---
 drivers/net/ethernet/wangxun/txgbe/Makefile   |   3 +-
 drivers/net/ethernet/wangxun/txgbe/txgbe.h    | 128 +++++
 drivers/net/ethernet/wangxun/txgbe/txgbe_hw.c |  35 ++
 drivers/net/ethernet/wangxun/txgbe/txgbe_hw.h |   1 +
 .../net/ethernet/wangxun/txgbe/txgbe_lib.c    | 435 ++++++++++++++++
 .../net/ethernet/wangxun/txgbe/txgbe_main.c   | 480 +++++++++++++++++-
 .../net/ethernet/wangxun/txgbe/txgbe_type.h   | 131 +++++
 7 files changed, 1211 insertions(+), 2 deletions(-)
 create mode 100644 drivers/net/ethernet/wangxun/txgbe/txgbe_lib.c

diff --git a/drivers/net/ethernet/wangxun/txgbe/Makefile b/drivers/net/ethernet/wangxun/txgbe/Makefile
index 875704a29c4c..0111fda0e784 100644
--- a/drivers/net/ethernet/wangxun/txgbe/Makefile
+++ b/drivers/net/ethernet/wangxun/txgbe/Makefile
@@ -7,4 +7,5 @@
 obj-$(CONFIG_TXGBE) += txgbe.o
 
 txgbe-objs := txgbe_main.o \
-              txgbe_hw.o txgbe_phy.o
+              txgbe_hw.o txgbe_phy.o \
+              txgbe_lib.o
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe.h b/drivers/net/ethernet/wangxun/txgbe/txgbe.h
index 0f06efbcfef5..a4ebc58a984b 100644
--- a/drivers/net/ethernet/wangxun/txgbe/txgbe.h
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe.h
@@ -12,6 +12,13 @@
 #include "txgbe_type.h"
 
 struct txgbe_ring {
+	struct txgbe_ring *next;        /* pointer to next ring in q_vector */
+	struct txgbe_q_vector *q_vector; /* backpointer to host q_vector */
+	struct net_device *netdev;      /* netdev ring belongs to */
+	struct device *dev;             /* device for DMA mapping */
+	u16 count;                      /* amount of descriptors */
+
+	u8 queue_index; /* needed for multiqueue queue management */
 	u8 reg_idx;
 } ____cacheline_internodealigned_in_smp;
 
@@ -20,6 +27,52 @@ struct txgbe_ring {
 #define TXGBE_MAX_RX_QUEUES   (TXGBE_MAX_FDIR_INDICES + 1)
 #define TXGBE_MAX_TX_QUEUES   (TXGBE_MAX_FDIR_INDICES + 1)
 
+struct txgbe_ring_container {
+	struct txgbe_ring *ring;        /* pointer to linked list of rings */
+	u16 work_limit;                 /* total work allowed per interrupt */
+	u8 count;                       /* total number of rings in vector */
+};
+
+/* iterator for handling rings in ring container */
+#define txgbe_for_each_ring(pos, head) \
+	for (pos = (head).ring; pos; pos = pos->next)
+
+/* MAX_MSIX_Q_VECTORS of these are allocated,
+ * but we only use one per queue-specific vector.
+ */
+struct txgbe_q_vector {
+	struct txgbe_adapter *adapter;
+	int cpu;
+	u16 v_idx;      /* index of q_vector within array, also used for
+			 * finding the bit in EICR and friends that
+			 * represents the vector for this ring
+			 */
+	u16 itr;        /* Interrupt throttle rate written to EITR */
+	struct txgbe_ring_container rx, tx;
+
+	cpumask_t affinity_mask;
+	int numa_node;
+	struct rcu_head rcu;    /* to avoid race with update stats on free */
+	char name[IFNAMSIZ + 17];
+
+	/* for dynamic allocation of rings associated with this q_vector */
+	struct txgbe_ring ring[0] ____cacheline_internodealigned_in_smp;
+};
+
+/* microsecond values for various ITR rates shifted by 2 to fit itr register
+ * with the first 3 bits reserved 0
+ */
+#define TXGBE_100K_ITR          40
+#define TXGBE_20K_ITR           200
+#define TXGBE_16K_ITR           248
+#define TXGBE_12K_ITR           336
+
+#define TCP_TIMER_VECTOR        0
+#define OTHER_VECTOR    1
+#define NON_Q_VECTORS   (OTHER_VECTOR + TCP_TIMER_VECTOR)
+
+#define TXGBE_MAX_MSIX_Q_VECTORS_SAPPHIRE       64
+
 struct txgbe_mac_addr {
 	u8 addr[ETH_ALEN];
 	u16 state; /* bitmask */
@@ -30,6 +83,12 @@ struct txgbe_mac_addr {
 #define TXGBE_MAC_STATE_MODIFIED        0x2
 #define TXGBE_MAC_STATE_IN_USE          0x4
 
+#define MAX_MSIX_Q_VECTORS      TXGBE_MAX_MSIX_Q_VECTORS_SAPPHIRE
+#define MAX_MSIX_COUNT          TXGBE_MAX_MSIX_VECTORS_SAPPHIRE
+
+#define MIN_MSIX_Q_VECTORS      1
+#define MIN_MSIX_COUNT          (MIN_MSIX_Q_VECTORS + NON_Q_VECTORS)
+
 /* default to trying for four seconds */
 #define TXGBE_TRY_LINK_TIMEOUT  (4 * HZ)
 #define TXGBE_SFP_POLL_JIFFIES  (2 * HZ)        /* SFP poll every 2 seconds */
@@ -39,6 +98,8 @@ struct txgbe_mac_addr {
  **/
 #define TXGBE_FLAG_NEED_LINK_UPDATE             BIT(0)
 #define TXGBE_FLAG_NEED_LINK_CONFIG             BIT(1)
+#define TXGBE_FLAG_MSI_ENABLED                  BIT(2)
+#define TXGBE_FLAG_MSIX_ENABLED                 BIT(3)
 
 /**
  * txgbe_adapter.flag2
@@ -46,6 +107,14 @@ struct txgbe_mac_addr {
 #define TXGBE_FLAG2_MNG_REG_ACCESS_DISABLED     BIT(0)
 #define TXGBE_FLAG2_SFP_NEEDS_RESET             BIT(1)
 
+enum txgbe_isb_idx {
+	TXGBE_ISB_HEADER,
+	TXGBE_ISB_MISC,
+	TXGBE_ISB_VEC0,
+	TXGBE_ISB_VEC1,
+	TXGBE_ISB_MAX
+};
+
 /* board specific private data structure */
 struct txgbe_adapter {
 	u8 __iomem *io_addr;    /* Mainly for iounmap use */
@@ -59,14 +128,30 @@ struct txgbe_adapter {
 	u32 flags2;
 	/* Tx fast path data */
 	int num_tx_queues;
+	u16 tx_itr_setting;
+
+	/* Rx fast path data */
+	int num_rx_queues;
+	u16 rx_itr_setting;
 
 	/* TX */
 	struct txgbe_ring *tx_ring[TXGBE_MAX_TX_QUEUES] ____cacheline_aligned_in_smp;
 
+	/* RX */
+	struct txgbe_ring *rx_ring[TXGBE_MAX_RX_QUEUES];
+	struct txgbe_q_vector *q_vector[MAX_MSIX_Q_VECTORS];
+
+	int num_q_vectors;      /* current number of q_vectors for device */
+	int max_q_vectors;      /* upper limit of q_vectors for device */
+	struct msix_entry *msix_entries;
+
 	/* structs defined in txgbe_type.h */
 	struct txgbe_hw hw;
 	u16 msg_enable;
 
+	unsigned int tx_ring_count;
+	unsigned int rx_ring_count;
+
 	u32 link_speed;
 	bool link_up;
 	unsigned long sfp_poll_time;
@@ -80,8 +165,24 @@ struct txgbe_adapter {
 
 	struct txgbe_mac_addr *mac_table;
 
+	/* misc interrupt status block */
+	dma_addr_t isb_dma;
+	u32 *isb_mem;
+	u32 isb_tag[TXGBE_ISB_MAX];
 };
 
+static inline u32 txgbe_misc_isb(struct txgbe_adapter *adapter,
+				 enum txgbe_isb_idx idx)
+{
+	u32 cur_tag = 0;
+
+	cur_tag = adapter->isb_mem[TXGBE_ISB_HEADER];
+
+	adapter->isb_tag[idx] = cur_tag;
+
+	return adapter->isb_mem[idx];
+}
+
 enum txgbe_state_t {
 	__TXGBE_TESTING,
 	__TXGBE_RESETTING,
@@ -98,14 +199,41 @@ enum txgbe_state_t {
 void txgbe_service_event_schedule(struct txgbe_adapter *adapter);
 void txgbe_assign_netdev_ops(struct net_device *netdev);
 
+void txgbe_irq_disable(struct txgbe_adapter *adapter);
+void txgbe_irq_enable(struct txgbe_adapter *adapter, bool queues, bool flush);
 int txgbe_open(struct net_device *netdev);
 int txgbe_close(struct net_device *netdev);
 void txgbe_down(struct txgbe_adapter *adapter);
 void txgbe_reset(struct txgbe_adapter *adapter);
 s32 txgbe_init_shared_code(struct txgbe_hw *hw);
 void txgbe_disable_device(struct txgbe_adapter *adapter);
+int txgbe_init_interrupt_scheme(struct txgbe_adapter *adapter);
+void txgbe_reset_interrupt_capability(struct txgbe_adapter *adapter);
+void txgbe_set_interrupt_capability(struct txgbe_adapter *adapter);
+void txgbe_clear_interrupt_scheme(struct txgbe_adapter *adapter);
+void txgbe_write_eitr(struct txgbe_q_vector *q_vector);
 
+/**
+ * interrupt masking operations. each bit in PX_ICn correspond to a interrupt.
+ * disable a interrupt by writing to PX_IMS with the corresponding bit=1
+ * enable a interrupt by writing to PX_IMC with the corresponding bit=1
+ * trigger a interrupt by writing to PX_ICS with the corresponding bit=1
+ **/
 #define TXGBE_INTR_ALL (~0ULL)
+#define TXGBE_INTR_MISC(A) (1ULL << (A)->num_q_vectors)
+#define TXGBE_INTR_QALL(A) (TXGBE_INTR_MISC(A) - 1)
+#define TXGBE_INTR_Q(i) (1ULL << (i))
+static inline void txgbe_intr_enable(struct txgbe_hw *hw, u64 qmask)
+{
+	u32 mask;
+
+	mask = (qmask & 0xFFFFFFFF);
+	if (mask)
+		wr32(hw, TXGBE_PX_IMC(0), mask);
+	mask = (qmask >> 32);
+	if (mask)
+		wr32(hw, TXGBE_PX_IMC(1), mask);
+}
 
 static inline void txgbe_intr_disable(struct txgbe_hw *hw, u64 qmask)
 {
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.c
index 89a67b158fa5..8dd0dec41971 100644
--- a/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.c
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.c
@@ -57,6 +57,40 @@ void txgbe_wr32_epcs(struct txgbe_hw *hw, u32 addr, u32 data)
 	wr32(hw, offset, data);
 }
 
+/**
+ *  txgbe_get_pcie_msix_count - Gets MSI-X vector count
+ *  @hw: pointer to hardware structure
+ *
+ *  Read PCIe configuration space, and get the MSI-X vector count from
+ *  the capabilities table.
+ **/
+u16 txgbe_get_pcie_msix_count(struct txgbe_hw *hw)
+{
+	struct txgbe_adapter *adapter = container_of(hw, struct txgbe_adapter, hw);
+	u16 msix_count = 1;
+	u16 max_msix_count;
+	u32 pos;
+
+	max_msix_count = TXGBE_MAX_MSIX_VECTORS_SAPPHIRE;
+	pos = pci_find_capability(adapter->pdev, PCI_CAP_ID_MSIX);
+	if (!pos)
+		return msix_count;
+	pci_read_config_word(adapter->pdev,
+			     pos + PCI_MSIX_FLAGS, &msix_count);
+
+	if (TXGBE_REMOVED(hw->hw_addr))
+		msix_count = 0;
+	msix_count &= TXGBE_PCIE_MSIX_TBL_SZ_MASK;
+
+	/* MSI-X count is zero-based in HW */
+	msix_count++;
+
+	if (msix_count > max_msix_count)
+		msix_count = max_msix_count;
+
+	return msix_count;
+}
+
 s32 txgbe_init_hw(struct txgbe_hw *hw)
 {
 	s32 status;
@@ -1521,6 +1555,7 @@ s32 txgbe_init_ops(struct txgbe_hw *hw)
 	mac->num_rar_entries    = TXGBE_SP_RAR_ENTRIES;
 	mac->max_rx_queues      = TXGBE_SP_MAX_RX_QUEUES;
 	mac->max_tx_queues      = TXGBE_SP_MAX_TX_QUEUES;
+	mac->max_msix_vectors   = txgbe_get_pcie_msix_count(hw);
 
 	/* EEPROM */
 	eeprom->ops.init_params = txgbe_init_eeprom_params;
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.h b/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.h
index d620c88f6318..d52c3b5775cc 100644
--- a/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.h
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.h
@@ -16,6 +16,7 @@
 #define SPI_H_DAT_REG_ADDR           0x10108  /* SPI Data register address */
 #define SPI_H_STA_REG_ADDR           0x1010c  /* SPI Status register address */
 
+u16 txgbe_get_pcie_msix_count(struct txgbe_hw *hw);
 s32 txgbe_init_hw(struct txgbe_hw *hw);
 s32 txgbe_start_hw(struct txgbe_hw *hw);
 s32 txgbe_read_pba_string(struct txgbe_hw *hw, u8 *pba_num,
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_lib.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_lib.c
new file mode 100644
index 000000000000..e7b6316e3b56
--- /dev/null
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_lib.c
@@ -0,0 +1,435 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2015 - 2022 Beijing WangXun Technology Co., Ltd. */
+
+#include "txgbe.h"
+
+/**
+ * txgbe_cache_ring_register - Descriptor ring to register mapping
+ * @adapter: board private structure to initialize
+ *
+ * Once we know the feature-set enabled for the device, we'll cache
+ * the register offset the descriptor ring is assigned to.
+ *
+ * Note, the order the various feature calls is important.  It must start with
+ * the "most" features enabled at the same time, then trickle down to the
+ * least amount of features turned on at once.
+ **/
+static void txgbe_cache_ring_register(struct txgbe_adapter *adapter)
+{
+	u16 i;
+
+	for (i = 0; i < adapter->num_rx_queues; i++)
+		adapter->rx_ring[i]->reg_idx = i;
+
+	for (i = 0; i < adapter->num_tx_queues; i++)
+		adapter->tx_ring[i]->reg_idx = i;
+}
+
+/**
+ * txgbe_set_num_queues: Allocate queues for device, feature dependent
+ * @adapter: board private structure to initialize
+ **/
+static void txgbe_set_num_queues(struct txgbe_adapter *adapter)
+{
+	/* Start with base case */
+	adapter->num_rx_queues = 1;
+	adapter->num_tx_queues = 1;
+}
+
+/**
+ * txgbe_acquire_msix_vectors - acquire MSI-X vectors
+ * @adapter: board private structure
+ *
+ * Attempts to acquire a suitable range of MSI-X vector interrupts. Will
+ * return a negative error code if unable to acquire MSI-X vectors for any
+ * reason.
+ */
+static int txgbe_acquire_msix_vectors(struct txgbe_adapter *adapter)
+{
+	struct txgbe_hw *hw = &adapter->hw;
+	int i, vectors, vector_threshold;
+
+	/* We start by asking for one vector per queue pair */
+	vectors = max(adapter->num_rx_queues, adapter->num_tx_queues);
+
+	/* It is easy to be greedy for MSI-X vectors. However, it really
+	 * doesn't do much good if we have a lot more vectors than CPUs. We'll
+	 * be somewhat conservative and only ask for (roughly) the same number
+	 * of vectors as there are CPUs.
+	 */
+	vectors = min_t(int, vectors, num_online_cpus());
+
+	/* Some vectors are necessary for non-queue interrupts */
+	vectors += NON_Q_VECTORS;
+
+	/* Hardware can only support a maximum of hw.mac->max_msix_vectors.
+	 * With features such as RSS and VMDq, we can easily surpass the
+	 * number of Rx and Tx descriptor queues supported by our device.
+	 * Thus, we cap the maximum in the rare cases where the CPU count also
+	 * exceeds our vector limit
+	 */
+	vectors = min_t(int, vectors, hw->mac.max_msix_vectors);
+
+	/* We want a minimum of two MSI-X vectors for (1) a TxQ[0] + RxQ[0]
+	 * handler, and (2) an Other (Link Status Change, etc.) handler.
+	 */
+	vector_threshold = MIN_MSIX_COUNT;
+
+	adapter->msix_entries = kcalloc(vectors,
+					sizeof(struct msix_entry),
+					GFP_KERNEL);
+	if (!adapter->msix_entries)
+		return -ENOMEM;
+
+	for (i = 0; i < vectors; i++)
+		adapter->msix_entries[i].entry = i;
+
+	vectors = pci_enable_msix_range(adapter->pdev, adapter->msix_entries,
+					vector_threshold, vectors);
+	if (vectors < 0) {
+		/* A negative count of allocated vectors indicates an error in
+		 * acquiring within the specified range of MSI-X vectors
+		 */
+		dev_warn(&adapter->pdev->dev,
+			 "Failed to allocate MSI-X interrupts. Err: %d\n",
+			 vectors);
+
+		adapter->flags &= ~TXGBE_FLAG_MSIX_ENABLED;
+		kfree(adapter->msix_entries);
+		adapter->msix_entries = NULL;
+
+		return vectors;
+	}
+
+	/* we successfully allocated some number of vectors within our
+	 * requested range.
+	 */
+	adapter->flags |= TXGBE_FLAG_MSIX_ENABLED;
+
+	/* Adjust for only the vectors we'll use, which is minimum
+	 * of max_q_vectors, or the number of vectors we were allocated.
+	 */
+	vectors -= NON_Q_VECTORS;
+	adapter->num_q_vectors = min_t(int, vectors, adapter->max_q_vectors);
+
+	return 0;
+}
+
+static void txgbe_add_ring(struct txgbe_ring *ring,
+			   struct txgbe_ring_container *head)
+{
+	ring->next = head->ring;
+	head->ring = ring;
+	head->count++;
+}
+
+/**
+ * txgbe_alloc_q_vector - Allocate memory for a single interrupt vector
+ * @adapter: board private structure to initialize
+ * @v_count: q_vectors allocated on adapter, used for ring interleaving
+ * @v_idx: index of vector in adapter struct
+ * @txr_count: total number of Tx rings to allocate
+ * @txr_idx: index of first Tx ring to allocate
+ * @rxr_count: total number of Rx rings to allocate
+ * @rxr_idx: index of first Rx ring to allocate
+ *
+ * We allocate one q_vector.  If allocation fails we return -ENOMEM.
+ **/
+static int txgbe_alloc_q_vector(struct txgbe_adapter *adapter,
+				unsigned int v_count, unsigned int v_idx,
+				unsigned int txr_count, unsigned int txr_idx,
+				unsigned int rxr_count, unsigned int rxr_idx)
+{
+	struct txgbe_q_vector *q_vector;
+	struct txgbe_ring *ring;
+	int node = -1;
+	int cpu = -1;
+	int ring_count, size;
+
+	/* note this will allocate space for the ring structure as well! */
+	ring_count = txr_count + rxr_count;
+	size = sizeof(struct txgbe_q_vector) +
+	       (sizeof(struct txgbe_ring) * ring_count);
+
+	/* allocate q_vector and rings */
+	q_vector = kzalloc_node(size, GFP_KERNEL, node);
+	if (!q_vector)
+		q_vector = kzalloc(size, GFP_KERNEL);
+	if (!q_vector)
+		return -ENOMEM;
+
+	/* setup affinity mask and node */
+	if (cpu != -1)
+		cpumask_set_cpu(cpu, &q_vector->affinity_mask);
+	q_vector->numa_node = node;
+
+	/* initialize CPU for DCA */
+	q_vector->cpu = -1;
+
+	/* tie q_vector and adapter together */
+	adapter->q_vector[v_idx] = q_vector;
+	q_vector->adapter = adapter;
+	q_vector->v_idx = v_idx;
+
+	/* initialize pointer to rings */
+	ring = q_vector->ring;
+
+	/* initialize ITR */
+	if (txr_count && !rxr_count) {
+		/* tx only vector */
+		if (adapter->tx_itr_setting == 1)
+			q_vector->itr = TXGBE_12K_ITR;
+		else
+			q_vector->itr = adapter->tx_itr_setting;
+	} else {
+		/* rx or rx/tx vector */
+		if (adapter->rx_itr_setting == 1)
+			q_vector->itr = TXGBE_20K_ITR;
+		else
+			q_vector->itr = adapter->rx_itr_setting;
+	}
+
+	while (txr_count) {
+		/* assign generic ring traits */
+		ring->dev = &adapter->pdev->dev;
+		ring->netdev = adapter->netdev;
+
+		/* configure backlink on ring */
+		ring->q_vector = q_vector;
+
+		/* update q_vector Tx values */
+		txgbe_add_ring(ring, &q_vector->tx);
+
+		/* apply Tx specific ring traits */
+		ring->count = adapter->tx_ring_count;
+		ring->queue_index = txr_idx;
+
+		/* assign ring to adapter */
+		adapter->tx_ring[txr_idx] = ring;
+
+		/* update count and index */
+		txr_count--;
+		txr_idx += v_count;
+
+		/* push pointer to next ring */
+		ring++;
+	}
+
+	while (rxr_count) {
+		/* assign generic ring traits */
+		ring->dev = &adapter->pdev->dev;
+		ring->netdev = adapter->netdev;
+
+		/* configure backlink on ring */
+		ring->q_vector = q_vector;
+
+		/* update q_vector Rx values */
+		txgbe_add_ring(ring, &q_vector->rx);
+
+		/* apply Rx specific ring traits */
+		ring->count = adapter->rx_ring_count;
+		ring->queue_index = rxr_idx;
+
+		/* assign ring to adapter */
+		adapter->rx_ring[rxr_idx] = ring;
+
+		/* update count and index */
+		rxr_count--;
+		rxr_idx += v_count;
+
+		/* push pointer to next ring */
+		ring++;
+	}
+
+	return 0;
+}
+
+/**
+ * txgbe_free_q_vector - Free memory allocated for specific interrupt vector
+ * @adapter: board private structure to initialize
+ * @v_idx: Index of vector to be freed
+ *
+ * This function frees the memory allocated to the q_vector.  In addition if
+ * NAPI is enabled it will delete any references to the NAPI struct prior
+ * to freeing the q_vector.
+ **/
+static void txgbe_free_q_vector(struct txgbe_adapter *adapter, int v_idx)
+{
+	struct txgbe_q_vector *q_vector = adapter->q_vector[v_idx];
+	struct txgbe_ring *ring;
+
+	txgbe_for_each_ring(ring, q_vector->tx)
+		adapter->tx_ring[ring->queue_index] = NULL;
+
+	txgbe_for_each_ring(ring, q_vector->rx)
+		adapter->rx_ring[ring->queue_index] = NULL;
+
+	adapter->q_vector[v_idx] = NULL;
+	kfree_rcu(q_vector, rcu);
+}
+
+/**
+ * txgbe_alloc_q_vectors - Allocate memory for interrupt vectors
+ * @adapter: board private structure to initialize
+ *
+ * We allocate one q_vector per queue interrupt.  If allocation fails we
+ * return -ENOMEM.
+ **/
+static int txgbe_alloc_q_vectors(struct txgbe_adapter *adapter)
+{
+	unsigned int q_vectors = adapter->num_q_vectors;
+	unsigned int rxr_remaining = adapter->num_rx_queues;
+	unsigned int txr_remaining = adapter->num_tx_queues;
+	unsigned int rxr_idx = 0, txr_idx = 0, v_idx = 0;
+	int err;
+
+	if (q_vectors >= (rxr_remaining + txr_remaining)) {
+		for (; rxr_remaining; v_idx++) {
+			err = txgbe_alloc_q_vector(adapter, q_vectors, v_idx,
+						   0, 0, 1, rxr_idx);
+			if (err)
+				goto err_out;
+
+			/* update counts and index */
+			rxr_remaining--;
+			rxr_idx++;
+		}
+	}
+
+	for (; v_idx < q_vectors; v_idx++) {
+		int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - v_idx);
+		int tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - v_idx);
+
+		err = txgbe_alloc_q_vector(adapter, q_vectors, v_idx,
+					   tqpv, txr_idx,
+					   rqpv, rxr_idx);
+
+		if (err)
+			goto err_out;
+
+		/* update counts and index */
+		rxr_remaining -= rqpv;
+		txr_remaining -= tqpv;
+		rxr_idx++;
+		txr_idx++;
+	}
+
+	return 0;
+
+err_out:
+	adapter->num_tx_queues = 0;
+	adapter->num_rx_queues = 0;
+	adapter->num_q_vectors = 0;
+
+	while (v_idx--)
+		txgbe_free_q_vector(adapter, v_idx);
+
+	return -ENOMEM;
+}
+
+/**
+ * txgbe_free_q_vectors - Free memory allocated for interrupt vectors
+ * @adapter: board private structure to initialize
+ *
+ * This function frees the memory allocated to the q_vectors.  In addition if
+ * NAPI is enabled it will delete any references to the NAPI struct prior
+ * to freeing the q_vector.
+ **/
+static void txgbe_free_q_vectors(struct txgbe_adapter *adapter)
+{
+	int v_idx = adapter->num_q_vectors;
+
+	adapter->num_tx_queues = 0;
+	adapter->num_rx_queues = 0;
+	adapter->num_q_vectors = 0;
+
+	while (v_idx--)
+		txgbe_free_q_vector(adapter, v_idx);
+}
+
+void txgbe_reset_interrupt_capability(struct txgbe_adapter *adapter)
+{
+	if (adapter->flags & TXGBE_FLAG_MSIX_ENABLED) {
+		adapter->flags &= ~TXGBE_FLAG_MSIX_ENABLED;
+		pci_disable_msix(adapter->pdev);
+		kfree(adapter->msix_entries);
+		adapter->msix_entries = NULL;
+	} else if (adapter->flags & TXGBE_FLAG_MSI_ENABLED) {
+		adapter->flags &= ~TXGBE_FLAG_MSI_ENABLED;
+		pci_disable_msi(adapter->pdev);
+	}
+}
+
+/**
+ * txgbe_set_interrupt_capability - set MSI-X or MSI if supported
+ * @adapter: board private structure to initialize
+ *
+ * Attempt to configure the interrupts using the best available
+ * capabilities of the hardware and the kernel.
+ **/
+void txgbe_set_interrupt_capability(struct txgbe_adapter *adapter)
+{
+	int err;
+
+	/* We will try to get MSI-X interrupts first */
+	if (!txgbe_acquire_msix_vectors(adapter))
+		return;
+	adapter->num_q_vectors = 1;
+
+	err = pci_enable_msi(adapter->pdev);
+	if (err)
+		dev_warn(&adapter->pdev->dev,
+			 "Failed to allocate MSI interrupt, falling back to legacy. Error: %d\n",
+			 err);
+	else
+		adapter->flags |= TXGBE_FLAG_MSI_ENABLED;
+}
+
+/**
+ * txgbe_init_interrupt_scheme - Determine proper interrupt scheme
+ * @adapter: board private structure to initialize
+ *
+ * We determine which interrupt scheme to use based on...
+ * - Kernel support (MSI, MSI-X)
+ *   - which can be user-defined (via MODULE_PARAM)
+ * - Hardware queue count (num_*_queues)
+ *   - defined by miscellaneous hardware support/features (RSS, etc.)
+ **/
+int txgbe_init_interrupt_scheme(struct txgbe_adapter *adapter)
+{
+	int err;
+
+	/* Number of supported queues */
+	txgbe_set_num_queues(adapter);
+
+	/* Set interrupt mode */
+	txgbe_set_interrupt_capability(adapter);
+
+	/* Allocate memory for queues */
+	err = txgbe_alloc_q_vectors(adapter);
+	if (err) {
+		netif_err(adapter, probe, adapter->netdev,
+			  "Unable to allocate memory for queue vectors\n");
+		txgbe_reset_interrupt_capability(adapter);
+		return err;
+	}
+
+	txgbe_cache_ring_register(adapter);
+
+	set_bit(__TXGBE_DOWN, &adapter->state);
+
+	return 0;
+}
+
+/**
+ * txgbe_clear_interrupt_scheme - Clear the current interrupt scheme settings
+ * @adapter: board private structure to clear interrupt scheme on
+ *
+ * We go through and clear interrupt specific resources and reset the structure
+ * to pre-load conditions
+ **/
+void txgbe_clear_interrupt_scheme(struct txgbe_adapter *adapter)
+{
+	txgbe_free_q_vectors(adapter);
+	txgbe_reset_interrupt_capability(adapter);
+}
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c
index 7f5225004e28..71954d2d4b9a 100644
--- a/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c
@@ -139,6 +139,355 @@ static void txgbe_get_hw_control(struct txgbe_adapter *adapter)
 	      TXGBE_CFG_PORT_CTL_DRV_LOAD, TXGBE_CFG_PORT_CTL_DRV_LOAD);
 }
 
+/**
+ * txgbe_set_ivar - set the IVAR registers, mapping interrupt causes to vectors
+ * @adapter: pointer to adapter struct
+ * @direction: 0 for Rx, 1 for Tx, -1 for other causes
+ * @queue: queue to map the corresponding interrupt to
+ * @msix_vector: the vector to map to the corresponding queue
+ *
+ **/
+static void txgbe_set_ivar(struct txgbe_adapter *adapter, s8 direction,
+			   u16 queue, u16 msix_vector)
+{
+	u32 ivar, index;
+	struct txgbe_hw *hw = &adapter->hw;
+
+	if (direction == -1) {
+		/* other causes */
+		msix_vector |= TXGBE_PX_IVAR_ALLOC_VAL;
+		index = 0;
+		ivar = rd32(&adapter->hw, TXGBE_PX_MISC_IVAR);
+		ivar &= ~(0xFF << index);
+		ivar |= (msix_vector << index);
+		wr32(&adapter->hw, TXGBE_PX_MISC_IVAR, ivar);
+	} else {
+		/* tx or rx causes */
+		msix_vector |= TXGBE_PX_IVAR_ALLOC_VAL;
+		index = ((16 * (queue & 1)) + (8 * direction));
+		ivar = rd32(hw, TXGBE_PX_IVAR(queue >> 1));
+		ivar &= ~(0xFF << index);
+		ivar |= (msix_vector << index);
+		wr32(hw, TXGBE_PX_IVAR(queue >> 1), ivar);
+	}
+}
+
+/**
+ * txgbe_configure_msix - Configure MSI-X hardware
+ * @adapter: board private structure
+ *
+ * txgbe_configure_msix sets up the hardware to properly generate MSI-X
+ * interrupts.
+ **/
+static void txgbe_configure_msix(struct txgbe_adapter *adapter)
+{
+	u16 v_idx;
+
+	/* Populate MSIX to EITR Select */
+	wr32(&adapter->hw, TXGBE_PX_ITRSEL, 0);
+
+	/* Populate the IVAR table and set the ITR values to the
+	 * corresponding register.
+	 */
+	for (v_idx = 0; v_idx < adapter->num_q_vectors; v_idx++) {
+		struct txgbe_q_vector *q_vector = adapter->q_vector[v_idx];
+		struct txgbe_ring *ring;
+
+		txgbe_for_each_ring(ring, q_vector->rx)
+			txgbe_set_ivar(adapter, 0, ring->reg_idx, v_idx);
+
+		txgbe_for_each_ring(ring, q_vector->tx)
+			txgbe_set_ivar(adapter, 1, ring->reg_idx, v_idx);
+
+		txgbe_write_eitr(q_vector);
+	}
+
+	txgbe_set_ivar(adapter, -1, 0, v_idx);
+
+	wr32(&adapter->hw, TXGBE_PX_ITR(v_idx), 1950);
+}
+
+/**
+ * txgbe_write_eitr - write EITR register in hardware specific way
+ * @q_vector: structure containing interrupt and ring information
+ *
+ * This function is made to be called by ethtool and by the driver
+ * when it needs to update EITR registers at runtime.  Hardware
+ * specific quirks/differences are taken care of here.
+ */
+void txgbe_write_eitr(struct txgbe_q_vector *q_vector)
+{
+	struct txgbe_adapter *adapter = q_vector->adapter;
+	struct txgbe_hw *hw = &adapter->hw;
+	int v_idx = q_vector->v_idx;
+	u32 itr_reg = q_vector->itr & TXGBE_MAX_EITR;
+
+	itr_reg |= TXGBE_PX_ITR_CNT_WDIS;
+
+	wr32(hw, TXGBE_PX_ITR(v_idx), itr_reg);
+}
+
+/**
+ * txgbe_irq_enable - Enable default interrupt generation settings
+ * @adapter: board private structure
+ * @queues: enable irqs for queues
+ * @flush: flush register write
+ **/
+void txgbe_irq_enable(struct txgbe_adapter *adapter, bool queues, bool flush)
+{
+	u32 mask = 0;
+	struct txgbe_hw *hw = &adapter->hw;
+	u8 device_type = hw->subsystem_device_id & 0xF0;
+
+	/* enable gpio interrupt */
+	if (device_type != TXGBE_ID_MAC_XAUI &&
+	    device_type != TXGBE_ID_MAC_SGMII) {
+		mask |= TXGBE_GPIO_INTEN_2;
+		mask |= TXGBE_GPIO_INTEN_3;
+		mask |= TXGBE_GPIO_INTEN_6;
+	}
+	wr32(&adapter->hw, TXGBE_GPIO_INTEN, mask);
+
+	if (device_type != TXGBE_ID_MAC_XAUI &&
+	    device_type != TXGBE_ID_MAC_SGMII) {
+		mask = TXGBE_GPIO_INTTYPE_LEVEL_2 | TXGBE_GPIO_INTTYPE_LEVEL_3 |
+			TXGBE_GPIO_INTTYPE_LEVEL_6;
+	}
+	wr32(&adapter->hw, TXGBE_GPIO_INTTYPE_LEVEL, mask);
+
+	/* enable misc interrupt */
+	mask = TXGBE_PX_MISC_IEN_MASK;
+
+	wr32(&adapter->hw, TXGBE_PX_MISC_IEN, mask);
+
+	/* unmask interrupt */
+	txgbe_intr_enable(&adapter->hw, TXGBE_INTR_MISC(adapter));
+	if (queues)
+		txgbe_intr_enable(&adapter->hw, TXGBE_INTR_QALL(adapter));
+
+	/* flush configuration */
+	if (flush)
+		TXGBE_WRITE_FLUSH(&adapter->hw);
+}
+
+static irqreturn_t txgbe_msix_other(int __always_unused irq, void *data)
+{
+	struct txgbe_adapter *adapter = data;
+
+	txgbe_misc_isb(adapter, TXGBE_ISB_MISC);
+
+	/* re-enable the original interrupt state, no lsc, no queues */
+	if (!test_bit(__TXGBE_DOWN, &adapter->state))
+		txgbe_irq_enable(adapter, false, false);
+
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t txgbe_msix_clean_rings(int __always_unused irq, void *data)
+{
+	return IRQ_HANDLED;
+}
+
+/**
+ * txgbe_request_msix_irqs - Initialize MSI-X interrupts
+ * @adapter: board private structure
+ *
+ * txgbe_request_msix_irqs allocates MSI-X vectors and requests
+ * interrupts from the kernel.
+ **/
+static int txgbe_request_msix_irqs(struct txgbe_adapter *adapter)
+{
+	struct net_device *netdev = adapter->netdev;
+	int vector, err;
+	int ri = 0, ti = 0;
+
+	for (vector = 0; vector < adapter->num_q_vectors; vector++) {
+		struct txgbe_q_vector *q_vector = adapter->q_vector[vector];
+		struct msix_entry *entry = &adapter->msix_entries[vector];
+
+		if (q_vector->tx.ring && q_vector->rx.ring) {
+			snprintf(q_vector->name, sizeof(q_vector->name) - 1,
+				 "%s-TxRx-%d", netdev->name, ri++);
+			ti++;
+		} else if (q_vector->rx.ring) {
+			snprintf(q_vector->name, sizeof(q_vector->name) - 1,
+				 "%s-rx-%d", netdev->name, ri++);
+		} else if (q_vector->tx.ring) {
+			snprintf(q_vector->name, sizeof(q_vector->name) - 1,
+				 "%s-tx-%d", netdev->name, ti++);
+		} else {
+			/* skip this unused q_vector */
+			continue;
+		}
+		err = request_irq(entry->vector, &txgbe_msix_clean_rings, 0,
+				  q_vector->name, q_vector);
+		if (err) {
+			netif_err(adapter, probe, netdev,
+				  "request_irq failed for MSIX interrupt '%s' Error: %d\n",
+				  q_vector->name, err);
+			goto free_queue_irqs;
+		}
+	}
+
+	err = request_irq(adapter->msix_entries[vector].vector,
+			  txgbe_msix_other, 0, netdev->name, adapter);
+	if (err) {
+		netif_err(adapter, probe, netdev,
+			  "request_irq for msix_other failed: %d\n", err);
+		goto free_queue_irqs;
+	}
+
+	return 0;
+
+free_queue_irqs:
+	while (vector) {
+		vector--;
+		irq_set_affinity_hint(adapter->msix_entries[vector].vector,
+				      NULL);
+		free_irq(adapter->msix_entries[vector].vector,
+			 adapter->q_vector[vector]);
+	}
+	adapter->flags &= ~TXGBE_FLAG_MSIX_ENABLED;
+	pci_disable_msix(adapter->pdev);
+	kfree(adapter->msix_entries);
+	adapter->msix_entries = NULL;
+	return err;
+}
+
+/**
+ * txgbe_intr - legacy mode Interrupt Handler
+ * @irq: interrupt number
+ * @data: pointer to a network interface device structure
+ **/
+static irqreturn_t txgbe_intr(int __always_unused irq, void *data)
+{
+	struct txgbe_adapter *adapter = data;
+	u32 eicr;
+
+	eicr = txgbe_misc_isb(adapter, TXGBE_ISB_VEC0);
+	if (!eicr) {
+		/* shared interrupt alert!
+		 * the interrupt that we masked before the EICR read.
+		 */
+		if (!test_bit(__TXGBE_DOWN, &adapter->state))
+			txgbe_irq_enable(adapter, true, true);
+		return IRQ_NONE;        /* Not our interrupt */
+	}
+	adapter->isb_mem[TXGBE_ISB_VEC0] = 0;
+	if (!(adapter->flags & TXGBE_FLAG_MSI_ENABLED))
+		wr32(&adapter->hw, TXGBE_PX_INTA, 1);
+
+	txgbe_misc_isb(adapter, TXGBE_ISB_MISC);
+
+	adapter->isb_mem[TXGBE_ISB_MISC] = 0;
+
+	/* re-enable link(maybe) and non-queue interrupts, no flush.
+	 * txgbe_poll will re-enable the queue interrupts
+	 */
+	if (!test_bit(__TXGBE_DOWN, &adapter->state))
+		txgbe_irq_enable(adapter, false, false);
+
+	return IRQ_HANDLED;
+}
+
+/**
+ * txgbe_request_irq - initialize interrupts
+ * @adapter: board private structure
+ *
+ * Attempts to configure interrupts using the best available
+ * capabilities of the hardware and kernel.
+ **/
+static int txgbe_request_irq(struct txgbe_adapter *adapter)
+{
+	struct net_device *netdev = adapter->netdev;
+	int err;
+
+	if (adapter->flags & TXGBE_FLAG_MSIX_ENABLED)
+		err = txgbe_request_msix_irqs(adapter);
+	else if (adapter->flags & TXGBE_FLAG_MSI_ENABLED)
+		err = request_irq(adapter->pdev->irq, &txgbe_intr, 0,
+				  netdev->name, adapter);
+	else
+		err = request_irq(adapter->pdev->irq, &txgbe_intr, IRQF_SHARED,
+				  netdev->name, adapter);
+
+	if (err)
+		netif_err(adapter, probe, adapter->netdev,
+			  "request_irq failed, Error %d\n", err);
+
+	return err;
+}
+
+static void txgbe_free_irq(struct txgbe_adapter *adapter)
+{
+	int vector;
+
+	if (!(adapter->flags & TXGBE_FLAG_MSIX_ENABLED)) {
+		free_irq(adapter->pdev->irq, adapter);
+		return;
+	}
+
+	for (vector = 0; vector < adapter->num_q_vectors; vector++) {
+		struct txgbe_q_vector *q_vector = adapter->q_vector[vector];
+		struct msix_entry *entry = &adapter->msix_entries[vector];
+
+		/* free only the irqs that were actually requested */
+		if (!q_vector->rx.ring && !q_vector->tx.ring)
+			continue;
+
+		/* clear the affinity_mask in the IRQ descriptor */
+		irq_set_affinity_hint(entry->vector, NULL);
+		free_irq(entry->vector, q_vector);
+	}
+
+	free_irq(adapter->msix_entries[vector++].vector, adapter);
+}
+
+/**
+ * txgbe_irq_disable - Mask off interrupt generation on the NIC
+ * @adapter: board private structure
+ **/
+void txgbe_irq_disable(struct txgbe_adapter *adapter)
+{
+	wr32(&adapter->hw, TXGBE_PX_MISC_IEN, 0);
+	txgbe_intr_disable(&adapter->hw, TXGBE_INTR_ALL);
+
+	TXGBE_WRITE_FLUSH(&adapter->hw);
+	if (adapter->flags & TXGBE_FLAG_MSIX_ENABLED) {
+		int vector;
+
+		for (vector = 0; vector < adapter->num_q_vectors; vector++)
+			synchronize_irq(adapter->msix_entries[vector].vector);
+
+		synchronize_irq(adapter->msix_entries[vector++].vector);
+	} else {
+		synchronize_irq(adapter->pdev->irq);
+	}
+}
+
+/**
+ * txgbe_configure_msi_and_legacy - Initialize PIN (INTA...) and MSI interrupts
+ * @adapter: board private structure
+ **/
+static void txgbe_configure_msi_and_legacy(struct txgbe_adapter *adapter)
+{
+	struct txgbe_q_vector *q_vector = adapter->q_vector[0];
+	struct txgbe_ring *ring;
+
+	txgbe_write_eitr(q_vector);
+
+	txgbe_for_each_ring(ring, q_vector->rx)
+		txgbe_set_ivar(adapter, 0, ring->reg_idx, 0);
+
+	txgbe_for_each_ring(ring, q_vector->tx)
+		txgbe_set_ivar(adapter, 1, ring->reg_idx, 0);
+
+	txgbe_set_ivar(adapter, -1, 0, 1);
+
+	netif_info(adapter, hw, adapter->netdev,
+		   "Legacy interrupt IVAR setup done\n");
+}
+
 static void txgbe_sync_mac_table(struct txgbe_adapter *adapter)
 {
 	struct txgbe_hw *hw = &adapter->hw;
@@ -190,6 +539,21 @@ static void txgbe_flush_sw_mac_table(struct txgbe_adapter *adapter)
 	txgbe_sync_mac_table(adapter);
 }
 
+static void txgbe_configure_isb(struct txgbe_adapter *adapter)
+{
+	/* set ISB Address */
+	struct txgbe_hw *hw = &adapter->hw;
+
+	wr32(hw, TXGBE_PX_ISB_ADDR_L,
+	     adapter->isb_dma & DMA_BIT_MASK(32));
+	wr32(hw, TXGBE_PX_ISB_ADDR_H, adapter->isb_dma >> 32);
+}
+
+static void txgbe_configure(struct txgbe_adapter *adapter)
+{
+	txgbe_configure_isb(adapter);
+}
+
 static bool txgbe_is_sfp(struct txgbe_hw *hw)
 {
 	switch (hw->phy.media_type) {
@@ -226,12 +590,29 @@ static void txgbe_sfp_link_config(struct txgbe_adapter *adapter)
 	adapter->sfp_poll_time = 0;
 }
 
+static void txgbe_setup_gpie(struct txgbe_adapter *adapter)
+{
+	struct txgbe_hw *hw = &adapter->hw;
+	u32 gpie = 0;
+
+	if (adapter->flags & TXGBE_FLAG_MSIX_ENABLED)
+		gpie = TXGBE_PX_GPIE_MODEL;
+
+	wr32(hw, TXGBE_PX_GPIE, gpie);
+}
+
 static void txgbe_up_complete(struct txgbe_adapter *adapter)
 {
 	struct txgbe_hw *hw = &adapter->hw;
 	u32 links_reg;
 
 	txgbe_get_hw_control(adapter);
+	txgbe_setup_gpie(adapter);
+
+	if (adapter->flags & TXGBE_FLAG_MSIX_ENABLED)
+		txgbe_configure_msix(adapter);
+	else
+		txgbe_configure_msi_and_legacy(adapter);
 
 	/* enable the optics for SFP+ fiber */
 	TCALL(hw, mac.ops.enable_tx_laser);
@@ -262,6 +643,14 @@ static void txgbe_up_complete(struct txgbe_adapter *adapter)
 		}
 	}
 
+	/* clear any pending interrupts, may auto mask */
+	rd32(hw, TXGBE_PX_IC(0));
+	rd32(hw, TXGBE_PX_IC(1));
+	rd32(hw, TXGBE_PX_MISC_IC);
+	if ((hw->subsystem_device_id & 0xF0) == TXGBE_ID_XAUI)
+		wr32(hw, TXGBE_GPIO_EOI, TXGBE_GPIO_EOI_6);
+	txgbe_irq_enable(adapter, true, true);
+
 	/* Set PF Reset Done bit so PF/VF Mail Ops can work */
 	wr32m(hw, TXGBE_CFG_PORT_CTL,
 	      TXGBE_CFG_PORT_CTL_PFRSTD, TXGBE_CFG_PORT_CTL_PFRSTD);
@@ -324,6 +713,8 @@ void txgbe_disable_device(struct txgbe_adapter *adapter)
 	netif_carrier_off(netdev);
 	netif_tx_disable(netdev);
 
+	txgbe_irq_disable(adapter);
+
 	adapter->flags &= ~TXGBE_FLAG_NEED_LINK_UPDATE;
 
 	del_timer_sync(&adapter->service_timer);
@@ -426,11 +817,52 @@ static int txgbe_sw_init(struct txgbe_adapter *adapter)
 		return err;
 	}
 
+	/* enable itr by default in dynamic mode */
+	adapter->rx_itr_setting = 1;
+	adapter->tx_itr_setting = 1;
+
+	adapter->max_q_vectors = TXGBE_MAX_MSIX_Q_VECTORS_SAPPHIRE;
+
 	set_bit(__TXGBE_DOWN, &adapter->state);
 
 	return 0;
 }
 
+/**
+ * txgbe_setup_isb_resources - allocate interrupt status resources
+ * @adapter: board private structure
+ *
+ * Return 0 on success, negative on failure
+ **/
+static int txgbe_setup_isb_resources(struct txgbe_adapter *adapter)
+{
+	struct device *dev = &adapter->pdev->dev;
+
+	adapter->isb_mem = dma_alloc_coherent(dev,
+					      sizeof(u32) * TXGBE_ISB_MAX,
+					      &adapter->isb_dma,
+					      GFP_KERNEL);
+	if (!adapter->isb_mem)
+		return -ENOMEM;
+	memset(adapter->isb_mem, 0, sizeof(u32) * TXGBE_ISB_MAX);
+	return 0;
+}
+
+/**
+ * txgbe_free_isb_resources - allocate all queues Rx resources
+ * @adapter: board private structure
+ *
+ * Return 0 on success, negative on failure
+ **/
+static void txgbe_free_isb_resources(struct txgbe_adapter *adapter)
+{
+	struct device *dev = &adapter->pdev->dev;
+
+	dma_free_coherent(dev, sizeof(u32) * TXGBE_ISB_MAX,
+			  adapter->isb_mem, adapter->isb_dma);
+	adapter->isb_mem = NULL;
+}
+
 /**
  * txgbe_open - Called when a network interface is made active
  * @netdev: network interface device structure
@@ -438,17 +870,49 @@ static int txgbe_sw_init(struct txgbe_adapter *adapter)
  * Returns 0 on success, negative value on failure
  *
  * The open entry point is called when a network interface is made
- * active by the system (IFF_UP).
+ * active by the system (IFF_UP).  At this point all resources needed
+ * for transmit and receive operations are allocated, the interrupt
+ * handler is registered with the OS, the watchdog timer is started,
+ * and the stack is notified that the interface is ready.
  **/
 int txgbe_open(struct net_device *netdev)
 {
 	struct txgbe_adapter *adapter = netdev_priv(netdev);
+	int err;
 
 	netif_carrier_off(netdev);
 
+	err = txgbe_setup_isb_resources(adapter);
+	if (err)
+		goto err_reset;
+
+	txgbe_configure(adapter);
+
+	err = txgbe_request_irq(adapter);
+	if (err)
+		goto err_free_isb;
+
+	/* Notify the stack of the actual queue counts. */
+	err = netif_set_real_num_tx_queues(netdev, adapter->num_tx_queues);
+	if (err)
+		goto err_free_irq;
+
+	err = netif_set_real_num_rx_queues(netdev, adapter->num_rx_queues);
+	if (err)
+		goto err_free_irq;
+
 	txgbe_up_complete(adapter);
 
 	return 0;
+
+err_free_irq:
+	txgbe_free_irq(adapter);
+err_free_isb:
+	txgbe_free_isb_resources(adapter);
+err_reset:
+	txgbe_reset(adapter);
+
+	return err;
 }
 
 /**
@@ -465,6 +929,9 @@ static void txgbe_close_suspend(struct txgbe_adapter *adapter)
 	txgbe_disable_device(adapter);
 	if (!((hw->subsystem_device_id & TXGBE_NCSI_MASK) == TXGBE_NCSI_SUP))
 		TCALL(hw, mac.ops.disable_tx_laser);
+	txgbe_free_irq(adapter);
+
+	txgbe_free_isb_resources(adapter);
 }
 
 /**
@@ -483,6 +950,9 @@ int txgbe_close(struct net_device *netdev)
 	struct txgbe_adapter *adapter = netdev_priv(netdev);
 
 	txgbe_down(adapter);
+	txgbe_free_irq(adapter);
+
+	txgbe_free_isb_resources(adapter);
 
 	txgbe_release_hw_control(adapter);
 
@@ -501,6 +971,8 @@ static void txgbe_dev_shutdown(struct pci_dev *pdev, bool *enable_wake)
 		txgbe_close_suspend(adapter);
 	rtnl_unlock();
 
+	txgbe_clear_interrupt_scheme(adapter);
+
 	txgbe_release_hw_control(adapter);
 
 	if (!test_and_set_bit(__TXGBE_DISABLED, &adapter->state))
@@ -1000,6 +1472,10 @@ static int txgbe_probe(struct pci_dev *pdev,
 	set_bit(__TXGBE_SERVICE_INITED, &adapter->state);
 	clear_bit(__TXGBE_SERVICE_SCHED, &adapter->state);
 
+	err = txgbe_init_interrupt_scheme(adapter);
+	if (err)
+		goto err_free_mac_table;
+
 	/* Save off EEPROM version number and Option Rom version which
 	 * together make a unique identify for the eeprom
 	 */
@@ -1116,6 +1592,7 @@ static int txgbe_probe(struct pci_dev *pdev,
 	return 0;
 
 err_release_hw:
+	txgbe_clear_interrupt_scheme(adapter);
 	txgbe_release_hw_control(adapter);
 err_free_mac_table:
 	kfree(adapter->mac_table);
@@ -1157,6 +1634,7 @@ static void txgbe_remove(struct pci_dev *pdev)
 		adapter->netdev_registered = false;
 	}
 
+	txgbe_clear_interrupt_scheme(adapter);
 	txgbe_release_hw_control(adapter);
 
 	pci_release_selected_regions(pdev,
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h b/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h
index a8f9a8af980e..690b644962f2 100644
--- a/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h
@@ -410,6 +410,12 @@ struct txgbe_thermal_sensor_data {
 /* GPIO Registers */
 #define TXGBE_GPIO_DR                   0x14800
 #define TXGBE_GPIO_DDR                  0x14804
+#define TXGBE_GPIO_CTL                  0x14808
+#define TXGBE_GPIO_INTEN                0x14830
+#define TXGBE_GPIO_INTMASK              0x14834
+#define TXGBE_GPIO_INTTYPE_LEVEL        0x14838
+#define TXGBE_GPIO_INTSTATUS            0x14844
+#define TXGBE_GPIO_EOI                  0x1484C
 /*GPIO bit */
 #define TXGBE_GPIO_DR_0         0x00000001U /* SDP0 Data Value */
 #define TXGBE_GPIO_DR_1         0x00000002U /* SDP1 Data Value */
@@ -427,6 +433,25 @@ struct txgbe_thermal_sensor_data {
 #define TXGBE_GPIO_DDR_5        0x00000020U /* SDP5 IO direction */
 #define TXGBE_GPIO_DDR_6        0x00000040U /* SDP6 IO direction */
 #define TXGBE_GPIO_DDR_7        0x00000080U /* SDP7 IO direction */
+#define TXGBE_GPIO_CTL_SW_MODE  0x00000000U /* SDP software mode */
+#define TXGBE_GPIO_INTEN_1      0x00000002U /* SDP1 interrupt enable */
+#define TXGBE_GPIO_INTEN_2      0x00000004U /* SDP2 interrupt enable */
+#define TXGBE_GPIO_INTEN_3      0x00000008U /* SDP3 interrupt enable */
+#define TXGBE_GPIO_INTEN_5      0x00000020U /* SDP5 interrupt enable */
+#define TXGBE_GPIO_INTEN_6      0x00000040U /* SDP6 interrupt enable */
+#define TXGBE_GPIO_INTTYPE_LEVEL_2 0x00000004U /* SDP2 interrupt type level */
+#define TXGBE_GPIO_INTTYPE_LEVEL_3 0x00000008U /* SDP3 interrupt type level */
+#define TXGBE_GPIO_INTTYPE_LEVEL_5 0x00000020U /* SDP5 interrupt type level */
+#define TXGBE_GPIO_INTTYPE_LEVEL_6 0x00000040U /* SDP6 interrupt type level */
+#define TXGBE_GPIO_INTSTATUS_1  0x00000002U /* SDP1 interrupt status */
+#define TXGBE_GPIO_INTSTATUS_2  0x00000004U /* SDP2 interrupt status */
+#define TXGBE_GPIO_INTSTATUS_3  0x00000008U /* SDP3 interrupt status */
+#define TXGBE_GPIO_INTSTATUS_5  0x00000020U /* SDP5 interrupt status */
+#define TXGBE_GPIO_INTSTATUS_6  0x00000040U /* SDP6 interrupt status */
+#define TXGBE_GPIO_EOI_2        0x00000004U /* SDP2 interrupt clear */
+#define TXGBE_GPIO_EOI_3        0x00000008U /* SDP3 interrupt clear */
+#define TXGBE_GPIO_EOI_5        0x00000020U /* SDP5 interrupt clear */
+#define TXGBE_GPIO_EOI_6        0x00000040U /* SDP6 interrupt clear */
 
 /*********************** Transmit DMA registers **************************/
 /* transmit global control */
@@ -615,6 +640,107 @@ struct txgbe_thermal_sensor_data {
 #define TXGBE_PX_TRANSACTION_PENDING            0x168
 #define TXGBE_PX_INTA                           0x110
 
+/* Interrupt register bitmasks */
+/* Extended Interrupt Cause Read */
+#define TXGBE_PX_MISC_IC_ETH_LKDN       0x00000100U /* eth link down */
+#define TXGBE_PX_MISC_IC_DEV_RST        0x00000400U /* device reset event */
+#define TXGBE_PX_MISC_IC_TIMESYNC       0x00000800U /* time sync */
+#define TXGBE_PX_MISC_IC_STALL          0x00001000U /* trans or recv path is stalled */
+#define TXGBE_PX_MISC_IC_LINKSEC        0x00002000U /* Tx LinkSec require key exchange */
+#define TXGBE_PX_MISC_IC_RX_MISS        0x00004000U /* Packet Buffer Overrun */
+#define TXGBE_PX_MISC_IC_FLOW_DIR       0x00008000U /* FDir Exception */
+#define TXGBE_PX_MISC_IC_I2C            0x00010000U /* I2C interrupt */
+#define TXGBE_PX_MISC_IC_ETH_EVENT      0x00020000U /* err reported by MAC except eth link down */
+#define TXGBE_PX_MISC_IC_ETH_LK         0x00040000U /* link up */
+#define TXGBE_PX_MISC_IC_ETH_AN         0x00080000U /* link auto-nego done */
+#define TXGBE_PX_MISC_IC_INT_ERR        0x00100000U /* integrity error */
+#define TXGBE_PX_MISC_IC_SPI            0x00200000U /* SPI interface */
+#define TXGBE_PX_MISC_IC_VF_MBOX        0x00800000U /* VF-PF message box */
+#define TXGBE_PX_MISC_IC_GPIO           0x04000000U /* GPIO interrupt */
+#define TXGBE_PX_MISC_IC_PCIE_REQ_ERR   0x08000000U /* pcie request error int */
+#define TXGBE_PX_MISC_IC_OVER_HEAT      0x10000000U /* overheat detection */
+#define TXGBE_PX_MISC_IC_PROBE_MATCH    0x20000000U /* probe match */
+#define TXGBE_PX_MISC_IC_MNG_HOST_MBOX  0x40000000U /* mng mailbox */
+#define TXGBE_PX_MISC_IC_TIMER          0x80000000U /* tcp timer */
+
+/* Extended Interrupt Cause Set */
+#define TXGBE_PX_MISC_ICS_ETH_LKDN      0x00000100U
+#define TXGBE_PX_MISC_ICS_DEV_RST       0x00000400U
+#define TXGBE_PX_MISC_ICS_TIMESYNC      0x00000800U
+#define TXGBE_PX_MISC_ICS_STALL         0x00001000U
+#define TXGBE_PX_MISC_ICS_LINKSEC       0x00002000U
+#define TXGBE_PX_MISC_ICS_RX_MISS       0x00004000U
+#define TXGBE_PX_MISC_ICS_FLOW_DIR      0x00008000U
+#define TXGBE_PX_MISC_ICS_I2C           0x00010000U
+#define TXGBE_PX_MISC_ICS_ETH_EVENT     0x00020000U
+#define TXGBE_PX_MISC_ICS_ETH_LK        0x00040000U
+#define TXGBE_PX_MISC_ICS_ETH_AN        0x00080000U
+#define TXGBE_PX_MISC_ICS_INT_ERR       0x00100000U
+#define TXGBE_PX_MISC_ICS_SPI           0x00200000U
+#define TXGBE_PX_MISC_ICS_VF_MBOX       0x00800000U
+#define TXGBE_PX_MISC_ICS_GPIO          0x04000000U
+#define TXGBE_PX_MISC_ICS_PCIE_REQ_ERR  0x08000000U
+#define TXGBE_PX_MISC_ICS_OVER_HEAT     0x10000000U
+#define TXGBE_PX_MISC_ICS_PROBE_MATCH   0x20000000U
+#define TXGBE_PX_MISC_ICS_MNG_HOST_MBOX 0x40000000U
+#define TXGBE_PX_MISC_ICS_TIMER         0x80000000U
+
+/* Extended Interrupt Enable Set */
+#define TXGBE_PX_MISC_IEN_ETH_LKDN      0x00000100U
+#define TXGBE_PX_MISC_IEN_DEV_RST       0x00000400U
+#define TXGBE_PX_MISC_IEN_TIMESYNC      0x00000800U
+#define TXGBE_PX_MISC_IEN_STALL         0x00001000U
+#define TXGBE_PX_MISC_IEN_LINKSEC       0x00002000U
+#define TXGBE_PX_MISC_IEN_RX_MISS       0x00004000U
+#define TXGBE_PX_MISC_IEN_FLOW_DIR      0x00008000U
+#define TXGBE_PX_MISC_IEN_I2C           0x00010000U
+#define TXGBE_PX_MISC_IEN_ETH_EVENT     0x00020000U
+#define TXGBE_PX_MISC_IEN_ETH_LK        0x00040000U
+#define TXGBE_PX_MISC_IEN_ETH_AN        0x00080000U
+#define TXGBE_PX_MISC_IEN_INT_ERR       0x00100000U
+#define TXGBE_PX_MISC_IEN_SPI           0x00200000U
+#define TXGBE_PX_MISC_IEN_VF_MBOX       0x00800000U
+#define TXGBE_PX_MISC_IEN_GPIO          0x04000000U
+#define TXGBE_PX_MISC_IEN_PCIE_REQ_ERR  0x08000000U
+#define TXGBE_PX_MISC_IEN_OVER_HEAT     0x10000000U
+#define TXGBE_PX_MISC_IEN_PROBE_MATCH   0x20000000U
+#define TXGBE_PX_MISC_IEN_MNG_HOST_MBOX 0x40000000U
+#define TXGBE_PX_MISC_IEN_TIMER         0x80000000U
+
+#define TXGBE_PX_MISC_IEN_MASK ( \
+				TXGBE_PX_MISC_IEN_ETH_LKDN | \
+				TXGBE_PX_MISC_IEN_DEV_RST | \
+				TXGBE_PX_MISC_IEN_ETH_EVENT | \
+				TXGBE_PX_MISC_IEN_ETH_LK | \
+				TXGBE_PX_MISC_IEN_ETH_AN | \
+				TXGBE_PX_MISC_IEN_INT_ERR | \
+				TXGBE_PX_MISC_IEN_VF_MBOX | \
+				TXGBE_PX_MISC_IEN_GPIO | \
+				TXGBE_PX_MISC_IEN_MNG_HOST_MBOX | \
+				TXGBE_PX_MISC_IEN_STALL | \
+				TXGBE_PX_MISC_IEN_PCIE_REQ_ERR | \
+				TXGBE_PX_MISC_IEN_TIMER)
+
+/* General purpose Interrupt Enable */
+#define TXGBE_PX_GPIE_MODEL             0x00000001U
+#define TXGBE_PX_GPIE_IMEN              0x00000002U
+#define TXGBE_PX_GPIE_LL_INTERVAL       0x000000F0U
+#define TXGBE_PX_GPIE_RSC_DELAY         0x00000700U
+
+/* Interrupt Vector Allocation Registers */
+#define TXGBE_PX_IVAR_REG_NUM              64
+#define TXGBE_PX_IVAR_ALLOC_VAL            0x80 /* Interrupt Allocation valid */
+
+#define TXGBE_MAX_INT_RATE              500000
+#define TXGBE_MIN_INT_RATE              980
+#define TXGBE_MAX_EITR                  0x00000FF8U
+#define TXGBE_MIN_EITR                  8
+#define TXGBE_PX_ITR_ITR_INT_MASK       0x00000FF8U
+#define TXGBE_PX_ITR_LLI_CREDIT         0x001f0000U
+#define TXGBE_PX_ITR_LLI_MOD            0x00008000U
+#define TXGBE_PX_ITR_CNT_WDIS           0x80000000U
+#define TXGBE_PX_ITR_ITR_CNT            0x0FE00000U
+
 /* transmit DMA Registers */
 #define TXGBE_PX_TR_BAL(_i)     (0x03000 + ((_i) * 0x40))
 #define TXGBE_PX_TR_BAH(_i)     (0x03004 + ((_i) * 0x40))
@@ -680,6 +806,10 @@ struct txgbe_thermal_sensor_data {
 #define TXGBE_ISCSI_BOOT_CONFIG         0x07
 
 #define TXGBE_SERIAL_NUMBER_MAC_ADDR    0x11
+#define TXGBE_MAX_MSIX_VECTORS_SAPPHIRE 0x40
+
+/* MSI-X capability fields masks */
+#define TXGBE_PCIE_MSIX_TBL_SZ_MASK     0x7FF
 
 #define TXGBE_ETH_LENGTH_OF_ADDRESS     6
 
@@ -1058,6 +1188,7 @@ struct txgbe_mac_info {
 	u32 orig_sr_an_mmd_adv_reg2;
 	u32 orig_vr_xs_or_pcs_mmd_digi_ctl1;
 	u8  san_mac_rar_index;
+	u16 max_msix_vectors;
 	bool orig_link_settings_stored;
 	bool autotry_restart;
 	struct txgbe_thermal_sensor_data  thermal_sensor_data;
-- 
2.27.0


^ permalink raw reply related	[flat|nested] 18+ messages in thread

* [RFC PATCH net-next 09/16] net: txgbe: Handle various event interrupts
  2022-08-10  8:55 [RFC PATCH net-next 00/16] net: WangXun txgbe ethernet driver Jiawen Wu
                   ` (7 preceding siblings ...)
  2022-08-10  8:55 ` [RFC PATCH net-next 08/16] net: txgbe: Add interrupt support Jiawen Wu
@ 2022-08-10  8:55 ` Jiawen Wu
  2022-08-10  8:55 ` [RFC PATCH net-next 10/16] net: txgbe: Configure Rx and Tx unit of the MAC Jiawen Wu
                   ` (6 subsequent siblings)
  15 siblings, 0 replies; 18+ messages in thread
From: Jiawen Wu @ 2022-08-10  8:55 UTC (permalink / raw)
  To: netdev; +Cc: Jiawen Wu

Support to handle event interrupts like link down, device reset, etc.

Signed-off-by: Jiawen Wu <jiawenwu@trustnetic.com>
---
 drivers/net/ethernet/wangxun/txgbe/txgbe.h    |  10 +
 drivers/net/ethernet/wangxun/txgbe/txgbe_hw.c |  78 +++--
 .../net/ethernet/wangxun/txgbe/txgbe_main.c   | 285 +++++++++++++++++-
 .../net/ethernet/wangxun/txgbe/txgbe_phy.c    |  22 ++
 .../net/ethernet/wangxun/txgbe/txgbe_phy.h    |   1 +
 .../net/ethernet/wangxun/txgbe/txgbe_type.h   |   2 +
 6 files changed, 378 insertions(+), 20 deletions(-)

diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe.h b/drivers/net/ethernet/wangxun/txgbe/txgbe.h
index a4ebc58a984b..ac4a3ec16309 100644
--- a/drivers/net/ethernet/wangxun/txgbe/txgbe.h
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe.h
@@ -50,6 +50,7 @@ struct txgbe_q_vector {
 	u16 itr;        /* Interrupt throttle rate written to EITR */
 	struct txgbe_ring_container rx, tx;
 
+	struct napi_struct napi;
 	cpumask_t affinity_mask;
 	int numa_node;
 	struct rcu_head rcu;    /* to avoid race with update stats on free */
@@ -106,6 +107,10 @@ struct txgbe_mac_addr {
  **/
 #define TXGBE_FLAG2_MNG_REG_ACCESS_DISABLED     BIT(0)
 #define TXGBE_FLAG2_SFP_NEEDS_RESET             BIT(1)
+#define TXGBE_FLAG2_TEMP_SENSOR_EVENT           BIT(2)
+#define TXGBE_FLAG2_PF_RESET_REQUESTED          BIT(3)
+#define TXGBE_FLAG2_RESET_INTR_RECEIVED         BIT(4)
+#define TXGBE_FLAG2_GLOBAL_RESET_REQUESTED      BIT(5)
 
 enum txgbe_isb_idx {
 	TXGBE_ISB_HEADER,
@@ -137,6 +142,8 @@ struct txgbe_adapter {
 	/* TX */
 	struct txgbe_ring *tx_ring[TXGBE_MAX_TX_QUEUES] ____cacheline_aligned_in_smp;
 
+	u64 lsc_int;
+
 	/* RX */
 	struct txgbe_ring *rx_ring[TXGBE_MAX_RX_QUEUES];
 	struct txgbe_q_vector *q_vector[MAX_MSIX_Q_VECTORS];
@@ -162,6 +169,7 @@ struct txgbe_adapter {
 
 	char eeprom_id[32];
 	bool netdev_registered;
+	u32 interrupt_event;
 
 	struct txgbe_mac_addr *mac_table;
 
@@ -203,7 +211,9 @@ void txgbe_irq_disable(struct txgbe_adapter *adapter);
 void txgbe_irq_enable(struct txgbe_adapter *adapter, bool queues, bool flush);
 int txgbe_open(struct net_device *netdev);
 int txgbe_close(struct net_device *netdev);
+void txgbe_up(struct txgbe_adapter *adapter);
 void txgbe_down(struct txgbe_adapter *adapter);
+void txgbe_reinit_locked(struct txgbe_adapter *adapter);
 void txgbe_reset(struct txgbe_adapter *adapter);
 s32 txgbe_init_shared_code(struct txgbe_hw *hw);
 void txgbe_disable_device(struct txgbe_adapter *adapter);
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.c
index 8dd0dec41971..6a5398b2f80e 100644
--- a/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.c
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.c
@@ -1523,6 +1523,7 @@ s32 txgbe_init_ops(struct txgbe_hw *hw)
 	phy->ops.read_i2c_byte = txgbe_read_i2c_byte;
 	phy->ops.read_i2c_eeprom = txgbe_read_i2c_eeprom;
 	phy->ops.identify_sfp = txgbe_identify_module;
+	phy->ops.check_overtemp = txgbe_check_overtemp;
 	phy->ops.identify = txgbe_identify_phy;
 	phy->ops.init = txgbe_init_phy_ops;
 
@@ -2725,12 +2726,16 @@ s32 txgbe_reset_hw(struct txgbe_hw *hw)
 	struct txgbe_adapter *adapter = container_of(hw, struct txgbe_adapter, hw);
 	u32 reset = 0;
 	s32 status;
+	u32 i;
 
 	u32 sr_pcs_ctl, sr_pma_mmd_ctl1, sr_an_mmd_ctl, sr_an_mmd_adv_reg2;
 	u32 vr_xs_or_pcs_mmd_digi_ctl1, curr_vr_xs_or_pcs_mmd_digi_ctl1;
 	u32 curr_sr_an_mmd_ctl, curr_sr_an_mmd_adv_reg2;
 	u32 curr_sr_pcs_ctl, curr_sr_pma_mmd_ctl1;
 
+	u32 reset_status = 0;
+	u32 rst_delay = 0;
+
 	/* Call adapter stop to disable tx/rx and clear interrupts */
 	status = TCALL(hw, mac.ops.stop_adapter);
 	if (status != 0)
@@ -2751,30 +2756,67 @@ s32 txgbe_reset_hw(struct txgbe_hw *hw)
 	curr_vr_xs_or_pcs_mmd_digi_ctl1 =
 		txgbe_rd32_epcs(hw, TXGBE_VR_XS_OR_PCS_MMD_DIGI_CTL1);
 
-	if (txgbe_mng_present(hw)) {
-		if (!(((hw->subsystem_device_id & TXGBE_NCSI_MASK) == TXGBE_NCSI_SUP) ||
-		      ((hw->subsystem_device_id & TXGBE_WOL_MASK) == TXGBE_WOL_SUP))) {
-			txgbe_reset_hostif(hw);
+	/* Issue global reset to the MAC.  Needs to be SW reset if link is up.
+	 * If link reset is used when link is up, it might reset the PHY when
+	 * mng is using it.  If link is down or the flag to force full link
+	 * reset is set, then perform link reset.
+	 */
+	if (hw->force_full_reset) {
+		rst_delay = (rd32(hw, TXGBE_MIS_RST_ST) &
+			     TXGBE_MIS_RST_ST_RST_INIT) >>
+			     TXGBE_MIS_RST_ST_RST_INI_SHIFT;
+		if (hw->reset_type == TXGBE_SW_RESET) {
+			for (i = 0; i < rst_delay + 20; i++) {
+				reset_status =
+					rd32(hw, TXGBE_MIS_RST_ST);
+				if (!(reset_status &
+				    TXGBE_MIS_RST_ST_DEV_RST_ST_MASK))
+					break;
+				msleep(100);
+			}
+
+			if (reset_status & TXGBE_MIS_RST_ST_DEV_RST_ST_MASK) {
+				status = TXGBE_ERR_RESET_FAILED;
+				txgbe_dbg(hw, "Global reset polling failed to complete.\n");
+				goto reset_hw_out;
+			}
+			status = txgbe_check_flash_load(hw, TXGBE_SPI_ILDR_STATUS_SW_RESET);
+			if (status != 0)
+				goto reset_hw_out;
+		} else if (hw->reset_type == TXGBE_GLOBAL_RESET) {
+			struct txgbe_adapter *adapter =
+				container_of(hw, struct txgbe_adapter, hw);
+			msleep(100 * rst_delay + 2000);
+			pci_restore_state(adapter->pdev);
+			pci_save_state(adapter->pdev);
+			pci_wake_from_d3(adapter->pdev, false);
 		}
 	} else {
+		if (txgbe_mng_present(hw)) {
+			if (!(((hw->subsystem_device_id & TXGBE_NCSI_MASK) == TXGBE_NCSI_SUP) ||
+			      ((hw->subsystem_device_id & TXGBE_WOL_MASK) == TXGBE_WOL_SUP))) {
+				txgbe_reset_hostif(hw);
+			}
+		} else {
+			if (hw->bus.lan_id == 0)
+				reset = TXGBE_MIS_RST_LAN0_RST;
+			else
+				reset = TXGBE_MIS_RST_LAN1_RST;
+
+			wr32(hw, TXGBE_MIS_RST,
+			     reset | rd32(hw, TXGBE_MIS_RST));
+			TXGBE_WRITE_FLUSH(hw);
+		}
+		usleep_range(10, 100);
+
 		if (hw->bus.lan_id == 0)
-			reset = TXGBE_MIS_RST_LAN0_RST;
+			status = txgbe_check_flash_load(hw, TXGBE_SPI_ILDR_STATUS_LAN0_SW_RST);
 		else
-			reset = TXGBE_MIS_RST_LAN1_RST;
+			status = txgbe_check_flash_load(hw, TXGBE_SPI_ILDR_STATUS_LAN1_SW_RST);
 
-		wr32(hw, TXGBE_MIS_RST,
-		     reset | rd32(hw, TXGBE_MIS_RST));
-		TXGBE_WRITE_FLUSH(hw);
+		if (status != 0)
+			goto reset_hw_out;
 	}
-	usleep_range(10, 100);
-
-	if (hw->bus.lan_id == 0)
-		status = txgbe_check_flash_load(hw, TXGBE_SPI_ILDR_STATUS_LAN0_SW_RST);
-	else
-		status = txgbe_check_flash_load(hw, TXGBE_SPI_ILDR_STATUS_LAN1_SW_RST);
-
-	if (status != 0)
-		goto reset_hw_out;
 
 	status = txgbe_reset_misc(hw);
 	if (status != 0)
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c
index 71954d2d4b9a..bb87cc7c4157 100644
--- a/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c
@@ -15,6 +15,12 @@
 
 char txgbe_driver_name[] = "txgbe";
 
+static const char txgbe_overheat_msg[] =
+	"Network adapter has been stopped because it has over heated."
+	"If the problem persists, restart or power off the system and replace the adapter.";
+static const char txgbe_underheat_msg[] =
+	"Network adapter has been started again, the temperature has been back to normal state";
+
 /* txgbe_pci_tbl - PCI Device ID Table
  *
  * Wildcard entries (PCI_ANY_ID) should come last
@@ -227,6 +233,113 @@ void txgbe_write_eitr(struct txgbe_q_vector *q_vector)
 	wr32(hw, TXGBE_PX_ITR(v_idx), itr_reg);
 }
 
+/**
+ * txgbe_check_overtemp_subtask - check for over temperature
+ * @adapter: pointer to adapter
+ **/
+static void txgbe_check_overtemp_subtask(struct txgbe_adapter *adapter)
+{
+	struct txgbe_hw *hw = &adapter->hw;
+	u32 eicr = adapter->interrupt_event;
+	s32 temp_state;
+
+	if (test_bit(__TXGBE_DOWN, &adapter->state))
+		return;
+	if (!(adapter->flags2 & TXGBE_FLAG2_TEMP_SENSOR_EVENT))
+		return;
+
+	adapter->flags2 &= ~TXGBE_FLAG2_TEMP_SENSOR_EVENT;
+
+	/* Since the warning interrupt is for both ports
+	 * we don't have to check if:
+	 *  - This interrupt wasn't for our port.
+	 *  - We may have missed the interrupt so always have to
+	 *    check if we  got a LSC
+	 */
+	if (!(eicr & TXGBE_PX_MISC_IC_OVER_HEAT))
+		return;
+
+	temp_state = TCALL(hw, phy.ops.check_overtemp);
+	if (!temp_state || temp_state == TXGBE_NOT_IMPLEMENTED)
+		return;
+
+	if (temp_state == TXGBE_ERR_UNDERTEMP &&
+	    test_bit(__TXGBE_HANGING, &adapter->state)) {
+		netif_crit(adapter, drv, adapter->netdev,
+			   "%s\n", txgbe_underheat_msg);
+		wr32m(&adapter->hw, TXGBE_RDB_PB_CTL,
+		      TXGBE_RDB_PB_CTL_RXEN, TXGBE_RDB_PB_CTL_RXEN);
+		netif_carrier_on(adapter->netdev);
+		clear_bit(__TXGBE_HANGING, &adapter->state);
+	} else if (temp_state == TXGBE_ERR_OVERTEMP &&
+		!test_and_set_bit(__TXGBE_HANGING, &adapter->state)) {
+		netif_crit(adapter, drv, adapter->netdev,
+			   "%s\n", txgbe_overheat_msg);
+		netif_carrier_off(adapter->netdev);
+		wr32m(&adapter->hw, TXGBE_RDB_PB_CTL,
+		      TXGBE_RDB_PB_CTL_RXEN, 0);
+	}
+
+	adapter->interrupt_event = 0;
+}
+
+static void txgbe_check_overtemp_event(struct txgbe_adapter *adapter, u32 eicr)
+{
+	if (!(eicr & TXGBE_PX_MISC_IC_OVER_HEAT))
+		return;
+
+	if (!test_bit(__TXGBE_DOWN, &adapter->state)) {
+		adapter->interrupt_event = eicr;
+		adapter->flags2 |= TXGBE_FLAG2_TEMP_SENSOR_EVENT;
+		txgbe_service_event_schedule(adapter);
+	}
+}
+
+static void txgbe_check_sfp_event(struct txgbe_adapter *adapter, u32 eicr)
+{
+	struct txgbe_hw *hw = &adapter->hw;
+	u32 eicr_mask = TXGBE_PX_MISC_IC_GPIO;
+	u32 reg;
+
+	if (eicr & eicr_mask) {
+		if (!test_bit(__TXGBE_DOWN, &adapter->state)) {
+			wr32(hw, TXGBE_GPIO_INTMASK, 0xFF);
+			reg = rd32(hw, TXGBE_GPIO_INTSTATUS);
+			if (reg & TXGBE_GPIO_INTSTATUS_2) {
+				adapter->flags2 |= TXGBE_FLAG2_SFP_NEEDS_RESET;
+				wr32(hw, TXGBE_GPIO_EOI,
+				     TXGBE_GPIO_EOI_2);
+				adapter->sfp_poll_time = 0;
+				txgbe_service_event_schedule(adapter);
+			}
+			if (reg & TXGBE_GPIO_INTSTATUS_3) {
+				adapter->flags |= TXGBE_FLAG_NEED_LINK_CONFIG;
+				wr32(hw, TXGBE_GPIO_EOI,
+				     TXGBE_GPIO_EOI_3);
+				txgbe_service_event_schedule(adapter);
+			}
+
+			if (reg & TXGBE_GPIO_INTSTATUS_6) {
+				wr32(hw, TXGBE_GPIO_EOI,
+				     TXGBE_GPIO_EOI_6);
+				adapter->flags |=
+					TXGBE_FLAG_NEED_LINK_CONFIG;
+				txgbe_service_event_schedule(adapter);
+			}
+			wr32(hw, TXGBE_GPIO_INTMASK, 0x0);
+		}
+	}
+}
+
+static void txgbe_check_lsc(struct txgbe_adapter *adapter)
+{
+	adapter->lsc_int++;
+	adapter->flags |= TXGBE_FLAG_NEED_LINK_UPDATE;
+	adapter->link_check_timeout = jiffies;
+	if (!test_bit(__TXGBE_DOWN, &adapter->state))
+		txgbe_service_event_schedule(adapter);
+}
+
 /**
  * txgbe_irq_enable - Enable default interrupt generation settings
  * @adapter: board private structure
@@ -258,6 +371,8 @@ void txgbe_irq_enable(struct txgbe_adapter *adapter, bool queues, bool flush)
 	/* enable misc interrupt */
 	mask = TXGBE_PX_MISC_IEN_MASK;
 
+	mask |= TXGBE_PX_MISC_IEN_OVER_HEAT;
+
 	wr32(&adapter->hw, TXGBE_PX_MISC_IEN, mask);
 
 	/* unmask interrupt */
@@ -273,8 +388,37 @@ void txgbe_irq_enable(struct txgbe_adapter *adapter, bool queues, bool flush)
 static irqreturn_t txgbe_msix_other(int __always_unused irq, void *data)
 {
 	struct txgbe_adapter *adapter = data;
+	struct txgbe_hw *hw = &adapter->hw;
+	u32 eicr;
+	u32 ecc;
+
+	eicr = txgbe_misc_isb(adapter, TXGBE_ISB_MISC);
+
+	if (eicr & (TXGBE_PX_MISC_IC_ETH_LK | TXGBE_PX_MISC_IC_ETH_LKDN))
+		txgbe_check_lsc(adapter);
+
+	if (eicr & TXGBE_PX_MISC_IC_INT_ERR) {
+		netif_info(adapter, link, adapter->netdev,
+			   "Received unrecoverable ECC Err, initiating reset.\n");
+		ecc = rd32(hw, TXGBE_MIS_ST);
+		if (((ecc & TXGBE_MIS_ST_LAN0_ECC) && hw->bus.lan_id == 0) ||
+		    ((ecc & TXGBE_MIS_ST_LAN1_ECC) && hw->bus.lan_id == 1))
+			adapter->flags2 |= TXGBE_FLAG2_PF_RESET_REQUESTED;
+
+		txgbe_service_event_schedule(adapter);
+	}
+	if (eicr & TXGBE_PX_MISC_IC_DEV_RST) {
+		adapter->flags2 |= TXGBE_FLAG2_RESET_INTR_RECEIVED;
+		txgbe_service_event_schedule(adapter);
+	}
+	if ((eicr & TXGBE_PX_MISC_IC_STALL) ||
+	    (eicr & TXGBE_PX_MISC_IC_ETH_EVENT)) {
+		adapter->flags2 |= TXGBE_FLAG2_PF_RESET_REQUESTED;
+		txgbe_service_event_schedule(adapter);
+	}
 
-	txgbe_misc_isb(adapter, TXGBE_ISB_MISC);
+	txgbe_check_sfp_event(adapter, eicr);
+	txgbe_check_overtemp_event(adapter, eicr);
 
 	/* re-enable the original interrupt state, no lsc, no queues */
 	if (!test_bit(__TXGBE_DOWN, &adapter->state))
@@ -285,6 +429,12 @@ static irqreturn_t txgbe_msix_other(int __always_unused irq, void *data)
 
 static irqreturn_t txgbe_msix_clean_rings(int __always_unused irq, void *data)
 {
+	struct txgbe_q_vector *q_vector = data;
+
+	/* EIAM disabled interrupts (on this vector) for us */
+	if (q_vector->rx.ring || q_vector->tx.ring)
+		napi_schedule_irqoff(&q_vector->napi);
+
 	return IRQ_HANDLED;
 }
 
@@ -362,6 +512,8 @@ static int txgbe_request_msix_irqs(struct txgbe_adapter *adapter)
 static irqreturn_t txgbe_intr(int __always_unused irq, void *data)
 {
 	struct txgbe_adapter *adapter = data;
+	struct txgbe_q_vector *q_vector = adapter->q_vector[0];
+	u32 eicr_misc;
 	u32 eicr;
 
 	eicr = txgbe_misc_isb(adapter, TXGBE_ISB_VEC0);
@@ -377,9 +529,27 @@ static irqreturn_t txgbe_intr(int __always_unused irq, void *data)
 	if (!(adapter->flags & TXGBE_FLAG_MSI_ENABLED))
 		wr32(&adapter->hw, TXGBE_PX_INTA, 1);
 
-	txgbe_misc_isb(adapter, TXGBE_ISB_MISC);
+	eicr_misc = txgbe_misc_isb(adapter, TXGBE_ISB_MISC);
+	if (eicr_misc & (TXGBE_PX_MISC_IC_ETH_LK | TXGBE_PX_MISC_IC_ETH_LKDN))
+		txgbe_check_lsc(adapter);
+
+	if (eicr_misc & TXGBE_PX_MISC_IC_INT_ERR) {
+		netif_info(adapter, link, adapter->netdev,
+			   "Received unrecoverable ECC Err, initiating reset.\n");
+		adapter->flags2 |= TXGBE_FLAG2_GLOBAL_RESET_REQUESTED;
+		txgbe_service_event_schedule(adapter);
+	}
+
+	if (eicr_misc & TXGBE_PX_MISC_IC_DEV_RST) {
+		adapter->flags2 |= TXGBE_FLAG2_RESET_INTR_RECEIVED;
+		txgbe_service_event_schedule(adapter);
+	}
+	txgbe_check_sfp_event(adapter, eicr_misc);
+	txgbe_check_overtemp_event(adapter, eicr_misc);
 
 	adapter->isb_mem[TXGBE_ISB_MISC] = 0;
+	/* would disable interrupts here but it is auto disabled */
+	napi_schedule_irqoff(&q_vector->napi);
 
 	/* re-enable link(maybe) and non-queue interrupts, no flush.
 	 * txgbe_poll will re-enable the queue interrupts
@@ -651,11 +821,39 @@ static void txgbe_up_complete(struct txgbe_adapter *adapter)
 		wr32(hw, TXGBE_GPIO_EOI, TXGBE_GPIO_EOI_6);
 	txgbe_irq_enable(adapter, true, true);
 
+	/* bring the link up in the watchdog, this could race with our first
+	 * link up interrupt but shouldn't be a problem
+	 */
+	adapter->flags |= TXGBE_FLAG_NEED_LINK_UPDATE;
+	adapter->link_check_timeout = jiffies;
+
+	mod_timer(&adapter->service_timer, jiffies);
+
 	/* Set PF Reset Done bit so PF/VF Mail Ops can work */
 	wr32m(hw, TXGBE_CFG_PORT_CTL,
 	      TXGBE_CFG_PORT_CTL_PFRSTD, TXGBE_CFG_PORT_CTL_PFRSTD);
 }
 
+void txgbe_reinit_locked(struct txgbe_adapter *adapter)
+{
+	/* put off any impending NetWatchDogTimeout */
+	netif_trans_update(adapter->netdev);
+
+	while (test_and_set_bit(__TXGBE_RESETTING, &adapter->state))
+		usleep_range(1000, 2000);
+	txgbe_down(adapter);
+	txgbe_up(adapter);
+	clear_bit(__TXGBE_RESETTING, &adapter->state);
+}
+
+void txgbe_up(struct txgbe_adapter *adapter)
+{
+	/* hardware has been reset, we need to reload some things */
+	txgbe_configure(adapter);
+
+	txgbe_up_complete(adapter);
+}
+
 void txgbe_reset(struct txgbe_adapter *adapter)
 {
 	struct net_device *netdev = adapter->netdev;
@@ -715,6 +913,8 @@ void txgbe_disable_device(struct txgbe_adapter *adapter)
 
 	txgbe_irq_disable(adapter);
 
+	adapter->flags2 &= ~(TXGBE_FLAG2_PF_RESET_REQUESTED |
+			     TXGBE_FLAG2_GLOBAL_RESET_REQUESTED);
 	adapter->flags &= ~TXGBE_FLAG_NEED_LINK_UPDATE;
 
 	del_timer_sync(&adapter->service_timer);
@@ -1256,6 +1456,78 @@ static void txgbe_service_timer(struct timer_list *t)
 	txgbe_service_event_schedule(adapter);
 }
 
+static void txgbe_reset_subtask(struct txgbe_adapter *adapter)
+{
+	u32 reset_flag = 0;
+	u32 value = 0;
+
+	if (!(adapter->flags2 & (TXGBE_FLAG2_PF_RESET_REQUESTED |
+				 TXGBE_FLAG2_GLOBAL_RESET_REQUESTED |
+				 TXGBE_FLAG2_RESET_INTR_RECEIVED)))
+		return;
+
+	/* If we're already down, just bail */
+	if (test_bit(__TXGBE_DOWN, &adapter->state) ||
+	    test_bit(__TXGBE_REMOVING, &adapter->state))
+		return;
+
+	netdev_err(adapter->netdev, "Reset adapter\n");
+
+	rtnl_lock();
+	if (adapter->flags2 & TXGBE_FLAG2_GLOBAL_RESET_REQUESTED) {
+		reset_flag |= TXGBE_FLAG2_GLOBAL_RESET_REQUESTED;
+		adapter->flags2 &= ~TXGBE_FLAG2_GLOBAL_RESET_REQUESTED;
+	}
+	if (adapter->flags2 & TXGBE_FLAG2_PF_RESET_REQUESTED) {
+		reset_flag |= TXGBE_FLAG2_PF_RESET_REQUESTED;
+		adapter->flags2 &= ~TXGBE_FLAG2_PF_RESET_REQUESTED;
+	}
+
+	if (adapter->flags2 & TXGBE_FLAG2_RESET_INTR_RECEIVED) {
+		/* If there's a recovery already waiting, it takes
+		 * precedence before starting a new reset sequence.
+		 */
+		adapter->flags2 &= ~TXGBE_FLAG2_RESET_INTR_RECEIVED;
+		value = rd32m(&adapter->hw, TXGBE_MIS_RST_ST,
+			      TXGBE_MIS_RST_ST_DEV_RST_TYPE_MASK) >>
+			TXGBE_MIS_RST_ST_DEV_RST_TYPE_SHIFT;
+		if (value == TXGBE_MIS_RST_ST_DEV_RST_TYPE_SW_RST)
+			adapter->hw.reset_type = TXGBE_SW_RESET;
+		else if (value == TXGBE_MIS_RST_ST_DEV_RST_TYPE_GLOBAL_RST)
+			adapter->hw.reset_type = TXGBE_GLOBAL_RESET;
+
+		adapter->hw.force_full_reset = true;
+		txgbe_reinit_locked(adapter);
+		adapter->hw.force_full_reset = false;
+		goto unlock;
+	}
+
+	if (reset_flag & TXGBE_FLAG2_PF_RESET_REQUESTED) {
+		/*debug to up*/
+		txgbe_reinit_locked(adapter);
+	} else if (reset_flag & TXGBE_FLAG2_GLOBAL_RESET_REQUESTED) {
+		/* Request a Global Reset
+		 *
+		 * This will start the chip's countdown to the actual full
+		 * chip reset event, and a warning interrupt to be sent
+		 * to all PFs, including the requestor.  Our handler
+		 * for the warning interrupt will deal with the shutdown
+		 * and recovery of the switch setup.
+		 */
+		/*debug to up*/
+		pci_save_state(adapter->pdev);
+		if (txgbe_mng_present(&adapter->hw))
+			txgbe_reset_hostif(&adapter->hw);
+		else
+			wr32m(&adapter->hw, TXGBE_MIS_RST,
+			      TXGBE_MIS_RST_GLOBAL_RST,
+			      TXGBE_MIS_RST_GLOBAL_RST);
+	}
+
+unlock:
+	rtnl_unlock();
+}
+
 /**
  * txgbe_service_task - manages and runs subtasks
  * @work: pointer to work_struct containing our data
@@ -1275,13 +1547,21 @@ static void txgbe_service_task(struct work_struct *work)
 		return;
 	}
 
+	txgbe_reset_subtask(adapter);
 	txgbe_sfp_detection_subtask(adapter);
 	txgbe_sfp_link_config_subtask(adapter);
+	txgbe_check_overtemp_subtask(adapter);
 	txgbe_watchdog_subtask(adapter);
 
 	txgbe_service_event_complete(adapter);
 }
 
+static netdev_tx_t txgbe_xmit_frame(struct sk_buff *skb,
+				    struct net_device *netdev)
+{
+	return NETDEV_TX_OK;
+}
+
 /**
  * txgbe_add_sanmac_netdev - Add the SAN MAC address to the corresponding
  * netdev->dev_addr_list
@@ -1331,6 +1611,7 @@ static int txgbe_del_sanmac_netdev(struct net_device *dev)
 static const struct net_device_ops txgbe_netdev_ops = {
 	.ndo_open               = txgbe_open,
 	.ndo_stop               = txgbe_close,
+	.ndo_start_xmit         = txgbe_xmit_frame,
 };
 
 void txgbe_assign_netdev_ops(struct net_device *dev)
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c
index be0185570b62..5c6161a14876 100644
--- a/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c
@@ -394,3 +394,25 @@ s32 txgbe_read_i2c_byte(struct txgbe_hw *hw, u8 byte_offset,
 	return txgbe_read_i2c_byte_int(hw, byte_offset, dev_addr,
 				       data, true);
 }
+
+/**
+ *  txgbe_check_overtemp - Checks if an overtemp occurred.
+ *  @hw: pointer to hardware structure
+ *
+ *  Checks if the LASI temp alarm status was triggered due to overtemp
+ **/
+s32 txgbe_check_overtemp(struct txgbe_hw *hw)
+{
+	s32 status = 0;
+	u32 ts_state;
+
+	/* Check that the LASI temp alarm status was triggered */
+	ts_state = rd32(hw, TXGBE_TS_ALARM_ST);
+
+	if (ts_state & TXGBE_TS_ALARM_ST_DALARM)
+		status = TXGBE_ERR_UNDERTEMP;
+	else if (ts_state & TXGBE_TS_ALARM_ST_ALARM)
+		status = TXGBE_ERR_OVERTEMP;
+
+	return status;
+}
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.h b/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.h
index bb34e2dce2f8..c041dc8133cb 100644
--- a/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.h
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.h
@@ -43,6 +43,7 @@ s32 txgbe_check_reset_blocked(struct txgbe_hw *hw);
 
 s32 txgbe_identify_module(struct txgbe_hw *hw);
 s32 txgbe_identify_sfp_module(struct txgbe_hw *hw);
+s32 txgbe_check_overtemp(struct txgbe_hw *hw);
 s32 txgbe_init_i2c(struct txgbe_hw *hw);
 s32 txgbe_switch_i2c_slave_addr(struct txgbe_hw *hw, u8 dev_addr);
 s32 txgbe_read_i2c_byte(struct txgbe_hw *hw, u8 byte_offset,
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h b/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h
index 690b644962f2..51d349f72591 100644
--- a/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h
@@ -1158,6 +1158,7 @@ struct txgbe_phy_operations {
 			     u8 dev_addr, u8 *data);
 	s32 (*read_i2c_eeprom)(struct txgbe_hw *hw, u8 byte_offset,
 			       u8 *eeprom_data);
+	s32 (*check_overtemp)(struct txgbe_hw *hw);
 };
 
 struct txgbe_eeprom_info {
@@ -1232,6 +1233,7 @@ struct txgbe_hw {
 	u8 revision_id;
 	bool adapter_stopped;
 	enum txgbe_reset_type reset_type;
+	bool force_full_reset;
 	enum txgbe_link_status link_status;
 	u16 oem_ssid;
 	u16 oem_svid;
-- 
2.27.0


^ permalink raw reply related	[flat|nested] 18+ messages in thread

* [RFC PATCH net-next 10/16] net: txgbe: Configure Rx and Tx unit of the MAC
  2022-08-10  8:55 [RFC PATCH net-next 00/16] net: WangXun txgbe ethernet driver Jiawen Wu
                   ` (8 preceding siblings ...)
  2022-08-10  8:55 ` [RFC PATCH net-next 09/16] net: txgbe: Handle various event interrupts Jiawen Wu
@ 2022-08-10  8:55 ` Jiawen Wu
  2022-08-10  8:55 ` [RFC PATCH net-next 11/16] net: txgbe: Allocate Rx and Tx resources Jiawen Wu
                   ` (5 subsequent siblings)
  15 siblings, 0 replies; 18+ messages in thread
From: Jiawen Wu @ 2022-08-10  8:55 UTC (permalink / raw)
  To: netdev; +Cc: Jiawen Wu

Configure receive and transmit unit of the MAC, setup Rx and Tx ring.

Signed-off-by: Jiawen Wu <jiawenwu@trustnetic.com>
---
 drivers/net/ethernet/wangxun/txgbe/txgbe.h    |  64 ++
 drivers/net/ethernet/wangxun/txgbe/txgbe_hw.c | 292 +++++++-
 drivers/net/ethernet/wangxun/txgbe/txgbe_hw.h |   9 +
 .../net/ethernet/wangxun/txgbe/txgbe_main.c   | 635 ++++++++++++++++++
 .../net/ethernet/wangxun/txgbe/txgbe_type.h   | 117 +++-
 5 files changed, 1115 insertions(+), 2 deletions(-)

diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe.h b/drivers/net/ethernet/wangxun/txgbe/txgbe.h
index ac4a3ec16309..516b4f865e6d 100644
--- a/drivers/net/ethernet/wangxun/txgbe/txgbe.h
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe.h
@@ -11,15 +11,46 @@
 
 #include "txgbe_type.h"
 
+/* TX/RX descriptor defines */
+#define TXGBE_MAX_TXD                   8192
+#define TXGBE_MIN_TXD                   128
+
+#define TXGBE_MAX_RXD                   8192
+#define TXGBE_MIN_RXD                   128
+
+/* Supported Rx Buffer Sizes */
+#define TXGBE_RXBUFFER_256       256  /* Used for skb receive header */
+#define TXGBE_RXBUFFER_2K       2048
+#define TXGBE_RXBUFFER_3K       3072
+#define TXGBE_RXBUFFER_4K       4096
+#define TXGBE_MAX_RXBUFFER      16384  /* largest size for single descriptor */
+
+/* NOTE: netdev_alloc_skb reserves up to 64 bytes, NET_IP_ALIGN means we
+ * reserve 64 more, and skb_shared_info adds an additional 320 bytes more,
+ * this adds up to 448 bytes of extra data.
+ *
+ * Since netdev_alloc_skb now allocates a page fragment we can use a value
+ * of 256 and the resultant skb will have a truesize of 960 or less.
+ */
+#define TXGBE_RX_HDR_SIZE       TXGBE_RXBUFFER_256
+
+#define TXGBE_MAX_RX_DESC_POLL          10
+
 struct txgbe_ring {
 	struct txgbe_ring *next;        /* pointer to next ring in q_vector */
 	struct txgbe_q_vector *q_vector; /* backpointer to host q_vector */
 	struct net_device *netdev;      /* netdev ring belongs to */
 	struct device *dev;             /* device for DMA mapping */
+	u8 __iomem *tail;
+	dma_addr_t dma;                 /* phys. address of descriptor ring */
+
 	u16 count;                      /* amount of descriptors */
 
 	u8 queue_index; /* needed for multiqueue queue management */
 	u8 reg_idx;
+	u16 next_to_use;
+	u16 next_to_clean;
+	u16 next_to_alloc;
 } ____cacheline_internodealigned_in_smp;
 
 #define TXGBE_MAX_FDIR_INDICES          63
@@ -27,6 +58,17 @@ struct txgbe_ring {
 #define TXGBE_MAX_RX_QUEUES   (TXGBE_MAX_FDIR_INDICES + 1)
 #define TXGBE_MAX_TX_QUEUES   (TXGBE_MAX_FDIR_INDICES + 1)
 
+#define TXGBE_MAX_MACVLANS      32
+
+static inline unsigned int txgbe_rx_bufsz(struct txgbe_ring __maybe_unused *ring)
+{
+#if MAX_SKB_FRAGS < 8
+	return ALIGN(TXGBE_MAX_RXBUFFER / MAX_SKB_FRAGS, 1024);
+#else
+	return TXGBE_RXBUFFER_2K;
+#endif
+}
+
 struct txgbe_ring_container {
 	struct txgbe_ring *ring;        /* pointer to linked list of rings */
 	u16 work_limit;                 /* total work allowed per interrupt */
@@ -68,6 +110,8 @@ struct txgbe_q_vector {
 #define TXGBE_16K_ITR           248
 #define TXGBE_12K_ITR           336
 
+#define TXGBE_MAX_JUMBO_FRAME_SIZE      9432 /* max payload 9414 */
+
 #define TCP_TIMER_VECTOR        0
 #define OTHER_VECTOR    1
 #define NON_Q_VECTORS   (OTHER_VECTOR + TCP_TIMER_VECTOR)
@@ -173,6 +217,8 @@ struct txgbe_adapter {
 
 	struct txgbe_mac_addr *mac_table;
 
+	unsigned long fwd_bitmask; /* bitmask indicating in use pools */
+
 	/* misc interrupt status block */
 	dma_addr_t isb_dma;
 	u32 *isb_mem;
@@ -217,11 +263,27 @@ void txgbe_reinit_locked(struct txgbe_adapter *adapter);
 void txgbe_reset(struct txgbe_adapter *adapter);
 s32 txgbe_init_shared_code(struct txgbe_hw *hw);
 void txgbe_disable_device(struct txgbe_adapter *adapter);
+void txgbe_configure_rx_ring(struct txgbe_adapter *adapter,
+			     struct txgbe_ring *ring);
+void txgbe_configure_tx_ring(struct txgbe_adapter *adapter,
+			     struct txgbe_ring *ring);
 int txgbe_init_interrupt_scheme(struct txgbe_adapter *adapter);
 void txgbe_reset_interrupt_capability(struct txgbe_adapter *adapter);
 void txgbe_set_interrupt_capability(struct txgbe_adapter *adapter);
 void txgbe_clear_interrupt_scheme(struct txgbe_adapter *adapter);
+void txgbe_configure_port(struct txgbe_adapter *adapter);
+void txgbe_set_rx_mode(struct net_device *netdev);
+int txgbe_write_mc_addr_list(struct net_device *netdev);
 void txgbe_write_eitr(struct txgbe_q_vector *q_vector);
+void txgbe_disable_rx_queue(struct txgbe_adapter *adapter,
+			    struct txgbe_ring *ring);
+
+int txgbe_write_uc_addr_list(struct net_device *netdev, int pool);
+int txgbe_add_mac_filter(struct txgbe_adapter *adapter, u8 *addr, u16 pool);
+int txgbe_del_mac_filter(struct txgbe_adapter *adapter, u8 *addr, u16 pool);
+int txgbe_available_rars(struct txgbe_adapter *adapter);
+
+void txgbe_set_rx_drop_en(struct txgbe_adapter *adapter);
 
 /**
  * interrupt masking operations. each bit in PX_ICn correspond to a interrupt.
@@ -257,6 +319,8 @@ static inline void txgbe_intr_disable(struct txgbe_hw *hw, u64 qmask)
 		wr32(hw, TXGBE_PX_IMS(1), mask);
 }
 
+#define TXGBE_RING_SIZE(R) ((R)->count < TXGBE_MAX_TXD ? (R)->count / 128 : 0)
+
 extern char txgbe_driver_name[];
 
 struct txgbe_msg {
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.c
index 6a5398b2f80e..dd93b07cc87b 100644
--- a/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.c
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.c
@@ -9,6 +9,8 @@
 #define TXGBE_SP_MAX_TX_QUEUES  128
 #define TXGBE_SP_MAX_RX_QUEUES  128
 #define TXGBE_SP_RAR_ENTRIES    128
+#define TXGBE_SP_MC_TBL_SIZE    128
+#define TXGBE_SP_RX_PB_SIZE     512
 
 static s32 txgbe_get_eeprom_semaphore(struct txgbe_hw *hw);
 static void txgbe_release_eeprom_semaphore(struct txgbe_hw *hw);
@@ -609,6 +611,130 @@ s32 txgbe_init_rx_addrs(struct txgbe_hw *hw)
 	return 0;
 }
 
+/**
+ *  txgbe_mta_vector - Determines bit-vector in multicast table to set
+ *  @hw: pointer to hardware structure
+ *  @mc_addr: the multicast address
+ *
+ *  Extracts the 12 bits, from a multicast address, to determine which
+ *  bit-vector to set in the multicast table. The hardware uses 12 bits, from
+ *  incoming rx multicast addresses, to determine the bit-vector to check in
+ *  the MTA. Which of the 4 combination, of 12-bits, the hardware uses is set
+ *  by the MO field of the MCSTCTRL. The MO field is set during initialization
+ *  to mc_filter_type.
+ **/
+static s32 txgbe_mta_vector(struct txgbe_hw *hw, u8 *mc_addr)
+{
+	u32 vector = 0;
+
+	switch (hw->mac.mc_filter_type) {
+	case 0:   /* use bits [47:36] of the address */
+		vector = ((mc_addr[4] >> 4) | (((u16)mc_addr[5]) << 4));
+		break;
+	case 1:   /* use bits [46:35] of the address */
+		vector = ((mc_addr[4] >> 3) | (((u16)mc_addr[5]) << 5));
+		break;
+	case 2:   /* use bits [45:34] of the address */
+		vector = ((mc_addr[4] >> 2) | (((u16)mc_addr[5]) << 6));
+		break;
+	case 3:   /* use bits [43:32] of the address */
+		vector = ((mc_addr[4]) | (((u16)mc_addr[5]) << 8));
+		break;
+	default:  /* Invalid mc_filter_type */
+		txgbe_dbg(hw, "MC filter type param set incorrectly\n");
+		break;
+	}
+
+	/* vector can only be 12-bits or boundary will be exceeded */
+	vector &= 0xFFF;
+	return vector;
+}
+
+/**
+ *  txgbe_set_mta - Set bit-vector in multicast table
+ *  @hw: pointer to hardware structure
+ *  @mc_addr: Multicast address
+ *
+ *  Sets the bit-vector in the multicast table.
+ **/
+static void txgbe_set_mta(struct txgbe_hw *hw, u8 *mc_addr)
+{
+	u32 vector;
+	u32 vector_bit;
+	u32 vector_reg;
+
+	hw->addr_ctrl.mta_in_use++;
+
+	vector = txgbe_mta_vector(hw, mc_addr);
+	txgbe_dbg(hw, " bit-vector = 0x%03X\n", vector);
+
+	/* The MTA is a register array of 128 32-bit registers. It is treated
+	 * like an array of 4096 bits.  We want to set bit
+	 * BitArray[vector_value]. So we figure out what register the bit is
+	 * in, read it, OR in the new bit, then write back the new value.  The
+	 * register is determined by the upper 7 bits of the vector value and
+	 * the bit within that register are determined by the lower 5 bits of
+	 * the value.
+	 */
+	vector_reg = (vector >> 5) & 0x7F;
+	vector_bit = vector & 0x1F;
+	hw->mac.mta_shadow[vector_reg] |= (1 << vector_bit);
+}
+
+/**
+ *  txgbe_update_mc_addr_list - Updates MAC list of multicast addresses
+ *  @hw: pointer to hardware structure
+ *  @mc_addr_list: the list of new multicast addresses
+ *  @mc_addr_count: number of addresses
+ *  @next: iterator function to walk the multicast address list
+ *  @clear: flag, when set clears the table beforehand
+ *
+ *  When the clear flag is set, the given list replaces any existing list.
+ *  Hashes the given addresses into the multicast table.
+ **/
+s32 txgbe_update_mc_addr_list(struct txgbe_hw *hw, u8 *mc_addr_list,
+			      u32 mc_addr_count, txgbe_mc_addr_itr next,
+			      bool clear)
+{
+	u32 i;
+	u32 vmdq;
+	u32 psrctl;
+
+	/* Set the new number of MC addresses that we are being requested to
+	 * use.
+	 */
+	hw->addr_ctrl.num_mc_addrs = mc_addr_count;
+	hw->addr_ctrl.mta_in_use = 0;
+
+	/* Clear mta_shadow */
+	if (clear) {
+		txgbe_dbg(hw, " Clearing MTA\n");
+		memset(&hw->mac.mta_shadow, 0, sizeof(hw->mac.mta_shadow));
+	}
+
+	/* Update mta_shadow */
+	for (i = 0; i < mc_addr_count; i++) {
+		txgbe_dbg(hw, " Adding the multicast addresses:\n");
+		txgbe_set_mta(hw, next(hw, &mc_addr_list, &vmdq));
+	}
+
+	/* Enable mta */
+	for (i = 0; i < hw->mac.mcft_size; i++)
+		wr32a(hw, TXGBE_PSR_MC_TBL(0), i,
+		      hw->mac.mta_shadow[i]);
+
+	if (hw->addr_ctrl.mta_in_use > 0) {
+		psrctl = rd32(hw, TXGBE_PSR_CTL);
+		psrctl &= ~(TXGBE_PSR_CTL_MO | TXGBE_PSR_CTL_MFE);
+		psrctl |= TXGBE_PSR_CTL_MFE |
+			(hw->mac.mc_filter_type << TXGBE_PSR_CTL_MO_SHIFT);
+		wr32(hw, TXGBE_PSR_CTL, psrctl);
+	}
+
+	txgbe_dbg(hw, "txgbe update mc addr list Complete\n");
+	return 0;
+}
+
 /**
  *  txgbe_disable_pcie_master - Disable PCI-express master access
  *  @hw: pointer to hardware structure
@@ -711,6 +837,52 @@ s32 txgbe_release_swfw_sync(struct txgbe_hw *hw, u32 mask)
 	return 0;
 }
 
+/**
+ *  txgbe_disable_sec_rx_path - Stops the receive data path
+ *  @hw: pointer to hardware structure
+ *
+ *  Stops the receive data path and waits for the HW to internally empty
+ *  the Rx security block
+ **/
+s32 txgbe_disable_sec_rx_path(struct txgbe_hw *hw)
+{
+#define TXGBE_MAX_SECRX_POLL 40
+
+	int i;
+	int secrxreg;
+
+	wr32m(hw, TXGBE_RSC_CTL,
+	      TXGBE_RSC_CTL_RX_DIS, TXGBE_RSC_CTL_RX_DIS);
+	for (i = 0; i < TXGBE_MAX_SECRX_POLL; i++) {
+		secrxreg = rd32(hw, TXGBE_RSC_ST);
+		if (!(secrxreg & TXGBE_RSC_ST_RSEC_RDY))
+			/* Use interrupt-safe sleep just in case */
+			usleep_range(1000, 2000);
+		else
+			break;
+	}
+
+	/* For informational purposes only */
+	if (i >= TXGBE_MAX_SECRX_POLL)
+		txgbe_dbg(hw, "Rx unit being enabled before security path fully disabled. Continuing with init.\n");
+
+	return 0;
+}
+
+/**
+ *  txgbe_enable_sec_rx_path - Enables the receive data path
+ *  @hw: pointer to hardware structure
+ *
+ *  Enables the receive data path.
+ **/
+s32 txgbe_enable_sec_rx_path(struct txgbe_hw *hw)
+{
+	wr32m(hw, TXGBE_RSC_CTL, TXGBE_RSC_CTL_RX_DIS, 0);
+	TXGBE_WRITE_FLUSH(hw);
+
+	return 0;
+}
+
 /**
  *  txgbe_get_san_mac_addr_offset - Get SAN MAC address offset from the EEPROM
  *  @hw: pointer to hardware structure
@@ -1214,6 +1386,67 @@ u32 txgbe_flash_read_dword(struct txgbe_hw *hw, u32 addr)
 	return rd32(hw, SPI_H_DAT_REG_ADDR);
 }
 
+/**
+ * txgbe_set_rxpba - Initialize Rx packet buffer
+ * @hw: pointer to hardware structure
+ * @num_pb: number of packet buffers to allocate
+ * @headroom: reserve n KB of headroom
+ * @strategy: packet buffer allocation strategy
+ **/
+s32 txgbe_set_rxpba(struct txgbe_hw *hw, int num_pb, u32 headroom,
+		    int strategy)
+{
+	u32 pbsize = hw->mac.rx_pb_size;
+	int i = 0;
+	u32 rxpktsize, txpktsize, txpbthresh;
+
+	/* Reserve headroom */
+	pbsize -= headroom;
+
+	if (!num_pb)
+		num_pb = 1;
+
+	/* Divide remaining packet buffer space amongst the number of packet
+	 * buffers requested using supplied strategy.
+	 */
+	switch (strategy) {
+	case PBA_STRATEGY_WEIGHTED:
+		/* txgbe_dcb_pba_80_48 strategy weight first half of packet
+		 * buffer with 5/8 of the packet buffer space.
+		 */
+		rxpktsize = (pbsize * 5) / (num_pb * 4);
+		pbsize -= rxpktsize * (num_pb / 2);
+		rxpktsize <<= TXGBE_RDB_PB_SZ_SHIFT;
+		for (; i < (num_pb / 2); i++)
+			wr32(hw, TXGBE_RDB_PB_SZ(i), rxpktsize);
+		fallthrough;
+	case PBA_STRATEGY_EQUAL:
+		rxpktsize = (pbsize / (num_pb - i)) << TXGBE_RDB_PB_SZ_SHIFT;
+		for (; i < num_pb; i++)
+			wr32(hw, TXGBE_RDB_PB_SZ(i), rxpktsize);
+		break;
+	default:
+		break;
+	}
+
+	/* Only support an equally distributed Tx packet buffer strategy. */
+	txpktsize = TXGBE_TDB_PB_SZ_MAX / num_pb;
+	txpbthresh = (txpktsize / 1024) - TXGBE_TXPKT_SIZE_MAX;
+	for (i = 0; i < num_pb; i++) {
+		wr32(hw, TXGBE_TDB_PB_SZ(i), txpktsize);
+		wr32(hw, TXGBE_TDM_PB_THRE(i), txpbthresh);
+	}
+
+	/* Clear unused TCs, if any, to zero buffer size*/
+	for (; i < TXGBE_MAX_PB; i++) {
+		wr32(hw, TXGBE_RDB_PB_SZ(i), 0);
+		wr32(hw, TXGBE_TDB_PB_SZ(i), 0);
+		wr32(hw, TXGBE_TDM_PB_THRE(i), 0);
+	}
+
+	return 0;
+}
+
 /**
  *  txgbe_init_thermal_sensor_thresh - Inits thermal sensor thresholds
  *  @hw: pointer to hardware structure
@@ -1275,6 +1508,27 @@ s32 txgbe_disable_rx(struct txgbe_hw *hw)
 	return 0;
 }
 
+s32 txgbe_enable_rx(struct txgbe_hw *hw)
+{
+	u32 pfdtxgswc;
+
+	/* enable mac receiver */
+	wr32m(hw, TXGBE_MAC_RX_CFG,
+	      TXGBE_MAC_RX_CFG_RE, TXGBE_MAC_RX_CFG_RE);
+
+	wr32m(hw, TXGBE_RDB_PB_CTL,
+	      TXGBE_RDB_PB_CTL_RXEN, TXGBE_RDB_PB_CTL_RXEN);
+
+	if (hw->mac.set_lben) {
+		pfdtxgswc = rd32(hw, TXGBE_PSR_CTL);
+		pfdtxgswc |= TXGBE_PSR_CTL_SW_EN;
+		wr32(hw, TXGBE_PSR_CTL, pfdtxgswc);
+		hw->mac.set_lben = false;
+	}
+
+	return 0;
+}
+
 /**
  * txgbe_mng_present - returns true when management capability is present
  * @hw: pointer to hardware structure
@@ -1537,15 +1791,21 @@ s32 txgbe_init_ops(struct txgbe_hw *hw)
 	mac->ops.release_swfw_sync = txgbe_release_swfw_sync;
 	mac->ops.reset_hw = txgbe_reset_hw;
 	mac->ops.get_media_type = txgbe_get_media_type;
+	mac->ops.disable_sec_rx_path = txgbe_disable_sec_rx_path;
+	mac->ops.enable_sec_rx_path = txgbe_enable_sec_rx_path;
+	mac->ops.enable_rx_dma = txgbe_enable_rx_dma;
 	mac->ops.start_hw = txgbe_start_hw;
 	mac->ops.get_san_mac_addr = txgbe_get_san_mac_addr;
 	mac->ops.get_wwn_prefix = txgbe_get_wwn_prefix;
+	mac->ops.setup_rxpba = txgbe_set_rxpba;
 
-	/* RAR */
+	/* RAR, Multicast */
 	mac->ops.set_rar = txgbe_set_rar;
 	mac->ops.clear_rar = txgbe_clear_rar;
 	mac->ops.init_rx_addrs = txgbe_init_rx_addrs;
 	mac->ops.clear_vmdq = txgbe_clear_vmdq;
+	mac->ops.update_mc_addr_list = txgbe_update_mc_addr_list;
+	mac->ops.enable_rx = txgbe_enable_rx;
 	mac->ops.disable_rx = txgbe_disable_rx;
 	mac->ops.set_vmdq_san_mac = txgbe_set_vmdq_san_mac;
 	mac->ops.init_uta_tables = txgbe_init_uta_tables;
@@ -1553,7 +1813,10 @@ s32 txgbe_init_ops(struct txgbe_hw *hw)
 	/* Link */
 	mac->ops.get_link_capabilities = txgbe_get_link_capabilities;
 	mac->ops.check_link = txgbe_check_mac_link;
+
+	mac->mcft_size          = TXGBE_SP_MC_TBL_SIZE;
 	mac->num_rar_entries    = TXGBE_SP_RAR_ENTRIES;
+	mac->rx_pb_size         = TXGBE_SP_RX_PB_SIZE;
 	mac->max_rx_queues      = TXGBE_SP_MAX_RX_QUEUES;
 	mac->max_tx_queues      = TXGBE_SP_MAX_TX_QUEUES;
 	mac->max_msix_vectors   = txgbe_get_pcie_msix_count(hw);
@@ -2952,6 +3215,33 @@ s32 txgbe_identify_phy(struct txgbe_hw *hw)
 	return status;
 }
 
+/**
+ *  txgbe_enable_rx_dma - Enable the Rx DMA unit on sapphire
+ *  @hw: pointer to hardware structure
+ *  @regval: register value to write to RXCTRL
+ *
+ *  Enables the Rx DMA unit for sapphire
+ **/
+s32 txgbe_enable_rx_dma(struct txgbe_hw *hw, u32 regval)
+{
+	/* Workaround for sapphire silicon errata when enabling the Rx datapath.
+	 * If traffic is incoming before we enable the Rx unit, it could hang
+	 * the Rx DMA unit.  Therefore, make sure the security engine is
+	 * completely disabled prior to enabling the Rx unit.
+	 */
+
+	TCALL(hw, mac.ops.disable_sec_rx_path);
+
+	if (regval & TXGBE_RDB_PB_CTL_RXEN)
+		TCALL(hw, mac.ops.enable_rx);
+	else
+		TCALL(hw, mac.ops.disable_rx);
+
+	TCALL(hw, mac.ops.enable_sec_rx_path);
+
+	return 0;
+}
+
 /**
  *  txgbe_init_eeprom_params - Initialize EEPROM params
  *  @hw: pointer to hardware structure
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.h b/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.h
index d52c3b5775cc..7377788fdaa2 100644
--- a/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.h
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.h
@@ -31,6 +31,11 @@ s32 txgbe_set_rar(struct txgbe_hw *hw, u32 index, u8 *addr, u64 pools,
 		  u32 enable_addr);
 s32 txgbe_clear_rar(struct txgbe_hw *hw, u32 index);
 s32 txgbe_init_rx_addrs(struct txgbe_hw *hw);
+s32 txgbe_update_mc_addr_list(struct txgbe_hw *hw, u8 *mc_addr_list,
+			      u32 mc_addr_count,
+			      txgbe_mc_addr_itr func, bool clear);
+s32 txgbe_disable_sec_rx_path(struct txgbe_hw *hw);
+s32 txgbe_enable_sec_rx_path(struct txgbe_hw *hw);
 
 s32 txgbe_acquire_swfw_sync(struct txgbe_hw *hw, u32 mask);
 s32 txgbe_release_swfw_sync(struct txgbe_hw *hw, u32 mask);
@@ -45,6 +50,8 @@ s32 txgbe_init_uta_tables(struct txgbe_hw *hw);
 s32 txgbe_get_wwn_prefix(struct txgbe_hw *hw, u16 *wwnn_prefix,
 			 u16 *wwpn_prefix);
 
+s32 txgbe_set_rxpba(struct txgbe_hw *hw, int num_pb, u32 headroom,
+		    int strategy);
 s32 txgbe_set_fw_drv_ver(struct txgbe_hw *hw, u8 maj, u8 min,
 			 u8 build, u8 ver);
 s32 txgbe_reset_hostif(struct txgbe_hw *hw);
@@ -56,6 +63,7 @@ bool txgbe_mng_present(struct txgbe_hw *hw);
 bool txgbe_check_mng_access(struct txgbe_hw *hw);
 
 s32 txgbe_init_thermal_sensor_thresh(struct txgbe_hw *hw);
+s32 txgbe_enable_rx(struct txgbe_hw *hw);
 s32 txgbe_disable_rx(struct txgbe_hw *hw);
 s32 txgbe_setup_mac_link_multispeed_fiber(struct txgbe_hw *hw,
 					  u32 speed,
@@ -78,6 +86,7 @@ int txgbe_reset_misc(struct txgbe_hw *hw);
 s32 txgbe_reset_hw(struct txgbe_hw *hw);
 s32 txgbe_identify_phy(struct txgbe_hw *hw);
 s32 txgbe_init_phy_ops(struct txgbe_hw *hw);
+s32 txgbe_enable_rx_dma(struct txgbe_hw *hw, u32 regval);
 s32 txgbe_init_ops(struct txgbe_hw *hw);
 
 s32 txgbe_init_eeprom_params(struct txgbe_hw *hw);
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c
index bb87cc7c4157..8f6946379c79 100644
--- a/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c
@@ -658,6 +658,380 @@ static void txgbe_configure_msi_and_legacy(struct txgbe_adapter *adapter)
 		   "Legacy interrupt IVAR setup done\n");
 }
 
+/**
+ * txgbe_configure_tx_ring - Configure Tx ring after Reset
+ * @adapter: board private structure
+ * @ring: structure containing ring specific data
+ *
+ * Configure the Tx descriptor ring after a reset.
+ **/
+void txgbe_configure_tx_ring(struct txgbe_adapter *adapter,
+			     struct txgbe_ring *ring)
+{
+	struct txgbe_hw *hw = &adapter->hw;
+	u32 txdctl = TXGBE_PX_TR_CFG_ENABLE;
+	u8 reg_idx = ring->reg_idx;
+	u64 tdba = ring->dma;
+	int wait_loop = 10;
+
+	/* disable queue to avoid issues while updating state */
+	wr32(hw, TXGBE_PX_TR_CFG(reg_idx), TXGBE_PX_TR_CFG_SWFLSH);
+	TXGBE_WRITE_FLUSH(hw);
+
+	wr32(hw, TXGBE_PX_TR_BAL(reg_idx), tdba & DMA_BIT_MASK(32));
+	wr32(hw, TXGBE_PX_TR_BAH(reg_idx), tdba >> 32);
+
+	/* reset head and tail pointers */
+	wr32(hw, TXGBE_PX_TR_RP(reg_idx), 0);
+	wr32(hw, TXGBE_PX_TR_WP(reg_idx), 0);
+	ring->tail = adapter->io_addr + TXGBE_PX_TR_WP(reg_idx);
+
+	/* reset ntu and ntc to place SW in sync with hardwdare */
+	ring->next_to_clean = 0;
+	ring->next_to_use = 0;
+
+	txdctl |= TXGBE_RING_SIZE(ring) << TXGBE_PX_TR_CFG_TR_SIZE_SHIFT;
+
+	/* set WTHRESH to encourage burst writeback, it should not be set
+	 * higher than 1 when:
+	 * - ITR is 0 as it could cause false TX hangs
+	 * - ITR is set to > 100k int/sec and BQL is enabled
+	 *
+	 * In order to avoid issues WTHRESH + PTHRESH should always be equal
+	 * to or less than the number of on chip descriptors, which is
+	 * currently 40.
+	 */
+
+	txdctl |= 0x20 << TXGBE_PX_TR_CFG_WTHRESH_SHIFT;
+
+	/* enable queue */
+	wr32(hw, TXGBE_PX_TR_CFG(reg_idx), txdctl);
+
+	/* poll to verify queue is enabled */
+	do {
+		msleep(20);
+		txdctl = rd32(hw, TXGBE_PX_TR_CFG(reg_idx));
+	} while (--wait_loop && !(txdctl & TXGBE_PX_TR_CFG_ENABLE));
+	if (!wait_loop)
+		netif_err(adapter, drv, adapter->netdev,
+			  "Could not enable Tx Queue %d\n", reg_idx);
+}
+
+/**
+ * txgbe_configure_tx - Configure Transmit Unit after Reset
+ * @adapter: board private structure
+ *
+ * Configure the Tx unit of the MAC after a reset.
+ **/
+static void txgbe_configure_tx(struct txgbe_adapter *adapter)
+{
+	struct txgbe_hw *hw = &adapter->hw;
+	u32 i;
+
+	/* TDM_CTL.TE must be before Tx queues are enabled */
+	wr32m(hw, TXGBE_TDM_CTL,
+	      TXGBE_TDM_CTL_TE, TXGBE_TDM_CTL_TE);
+
+	/* Setup the HW Tx Head and Tail descriptor pointers */
+	for (i = 0; i < adapter->num_tx_queues; i++)
+		txgbe_configure_tx_ring(adapter, adapter->tx_ring[i]);
+
+	wr32m(hw, TXGBE_TSC_BUF_AE, 0x3FF, 0x10);
+	/* enable mac transmitter */
+	wr32m(hw, TXGBE_MAC_TX_CFG,
+	      TXGBE_MAC_TX_CFG_TE, TXGBE_MAC_TX_CFG_TE);
+}
+
+static void txgbe_enable_rx_drop(struct txgbe_adapter *adapter,
+				 struct txgbe_ring *ring)
+{
+	struct txgbe_hw *hw = &adapter->hw;
+	u16 reg_idx = ring->reg_idx;
+
+	u32 srrctl = rd32(hw, TXGBE_PX_RR_CFG(reg_idx));
+
+	srrctl |= TXGBE_PX_RR_CFG_DROP_EN;
+
+	wr32(hw, TXGBE_PX_RR_CFG(reg_idx), srrctl);
+}
+
+static void txgbe_disable_rx_drop(struct txgbe_adapter *adapter,
+				  struct txgbe_ring *ring)
+{
+	struct txgbe_hw *hw = &adapter->hw;
+	u16 reg_idx = ring->reg_idx;
+
+	u32 srrctl = rd32(hw, TXGBE_PX_RR_CFG(reg_idx));
+
+	srrctl &= ~TXGBE_PX_RR_CFG_DROP_EN;
+
+	wr32(hw, TXGBE_PX_RR_CFG(reg_idx), srrctl);
+}
+
+void txgbe_set_rx_drop_en(struct txgbe_adapter *adapter)
+{
+	int i;
+
+	/* We should set the drop enable bit if:
+	 *  Number of Rx queues > 1
+	 *
+	 *  This allows us to avoid head of line blocking for security
+	 *  and performance reasons.
+	 */
+	if (adapter->num_rx_queues > 1) {
+		for (i = 0; i < adapter->num_rx_queues; i++)
+			txgbe_enable_rx_drop(adapter, adapter->rx_ring[i]);
+	} else {
+		for (i = 0; i < adapter->num_rx_queues; i++)
+			txgbe_disable_rx_drop(adapter, adapter->rx_ring[i]);
+	}
+}
+
+static void txgbe_configure_srrctl(struct txgbe_adapter *adapter,
+				   struct txgbe_ring *rx_ring)
+{
+	struct txgbe_hw *hw = &adapter->hw;
+	u32 srrctl;
+	u16 reg_idx = rx_ring->reg_idx;
+
+	srrctl = rd32m(hw, TXGBE_PX_RR_CFG(reg_idx),
+		       ~(TXGBE_PX_RR_CFG_RR_HDR_SZ |
+		       TXGBE_PX_RR_CFG_RR_BUF_SZ |
+		       TXGBE_PX_RR_CFG_SPLIT_MODE));
+	/* configure header buffer length, needed for RSC */
+	srrctl |= TXGBE_RX_HDR_SIZE << TXGBE_PX_RR_CFG_BSIZEHDRSIZE_SHIFT;
+
+	/* configure the packet buffer length */
+	srrctl |= txgbe_rx_bufsz(rx_ring) >> TXGBE_PX_RR_CFG_BSIZEPKT_SHIFT;
+
+	wr32(hw, TXGBE_PX_RR_CFG(reg_idx), srrctl);
+}
+
+static void txgbe_rx_desc_queue_enable(struct txgbe_adapter *adapter,
+				       struct txgbe_ring *ring)
+{
+	struct txgbe_hw *hw = &adapter->hw;
+	int wait_loop = TXGBE_MAX_RX_DESC_POLL;
+	u32 rxdctl;
+	u8 reg_idx = ring->reg_idx;
+
+	if (TXGBE_REMOVED(hw->hw_addr))
+		return;
+
+	do {
+		usleep_range(1000, 2000);
+		rxdctl = rd32(hw, TXGBE_PX_RR_CFG(reg_idx));
+	} while (--wait_loop && !(rxdctl & TXGBE_PX_RR_CFG_RR_EN));
+
+	if (!wait_loop)
+		netif_err(adapter, drv, adapter->netdev,
+			  "RXDCTL.ENABLE on Rx queue %d not set within the polling period\n",
+			  reg_idx);
+}
+
+/* disable the specified rx ring/queue */
+void txgbe_disable_rx_queue(struct txgbe_adapter *adapter,
+			    struct txgbe_ring *ring)
+{
+	struct txgbe_hw *hw = &adapter->hw;
+	int wait_loop = TXGBE_MAX_RX_DESC_POLL;
+	u8 reg_idx = ring->reg_idx;
+	u32 rxdctl;
+
+	/* write value back with RXDCTL.ENABLE bit cleared */
+	wr32m(hw, TXGBE_PX_RR_CFG(reg_idx),
+	      TXGBE_PX_RR_CFG_RR_EN, 0);
+
+	/* the hardware may take up to 100us to really disable the rx queue */
+	do {
+		usleep_range(10, 20);
+		rxdctl = rd32(hw, TXGBE_PX_RR_CFG(reg_idx));
+	} while (--wait_loop && (rxdctl & TXGBE_PX_RR_CFG_RR_EN));
+
+	if (!wait_loop) {
+		netif_err(adapter, drv, adapter->netdev,
+			  "RXDCTL.ENABLE on Rx queue %d not cleared within the polling period\n",
+			  reg_idx);
+	}
+}
+
+void txgbe_configure_rx_ring(struct txgbe_adapter *adapter,
+			     struct txgbe_ring *ring)
+{
+	struct txgbe_hw *hw = &adapter->hw;
+	u16 reg_idx = ring->reg_idx;
+	u64 rdba = ring->dma;
+	u32 rxdctl;
+
+	/* disable queue to avoid issues while updating state */
+	rxdctl = rd32(hw, TXGBE_PX_RR_CFG(reg_idx));
+	txgbe_disable_rx_queue(adapter, ring);
+
+	wr32(hw, TXGBE_PX_RR_BAL(reg_idx), rdba & DMA_BIT_MASK(32));
+	wr32(hw, TXGBE_PX_RR_BAH(reg_idx), rdba >> 32);
+
+	if (ring->count == TXGBE_MAX_RXD)
+		rxdctl |= 0 << TXGBE_PX_RR_CFG_RR_SIZE_SHIFT;
+	else
+		rxdctl |= (ring->count / 128) << TXGBE_PX_RR_CFG_RR_SIZE_SHIFT;
+
+	rxdctl |= 0x1 << TXGBE_PX_RR_CFG_RR_THER_SHIFT;
+	wr32(hw, TXGBE_PX_RR_CFG(reg_idx), rxdctl);
+
+	/* reset head and tail pointers */
+	wr32(hw, TXGBE_PX_RR_RP(reg_idx), 0);
+	wr32(hw, TXGBE_PX_RR_WP(reg_idx), 0);
+	ring->tail = adapter->io_addr + TXGBE_PX_RR_WP(reg_idx);
+
+	/* reset ntu and ntc to place SW in sync with hardwdare */
+	ring->next_to_clean = 0;
+	ring->next_to_use = 0;
+	ring->next_to_alloc = 0;
+
+	txgbe_configure_srrctl(adapter, ring);
+
+	/* enable receive descriptor ring */
+	wr32m(hw, TXGBE_PX_RR_CFG(reg_idx),
+	      TXGBE_PX_RR_CFG_RR_EN, TXGBE_PX_RR_CFG_RR_EN);
+
+	txgbe_rx_desc_queue_enable(adapter, ring);
+}
+
+static void txgbe_setup_psrtype(struct txgbe_adapter *adapter)
+{
+	struct txgbe_hw *hw = &adapter->hw;
+	int pool;
+
+	/* PSRTYPE must be initialized in adapters */
+	u32 psrtype = TXGBE_RDB_PL_CFG_L4HDR |
+		      TXGBE_RDB_PL_CFG_L3HDR |
+		      TXGBE_RDB_PL_CFG_L2HDR |
+		      TXGBE_RDB_PL_CFG_TUN_OUTER_L2HDR |
+		      TXGBE_RDB_PL_CFG_TUN_TUNHDR;
+
+	for_each_set_bit(pool, &adapter->fwd_bitmask, TXGBE_MAX_MACVLANS)
+		wr32(hw, TXGBE_RDB_PL_CFG(pool), psrtype);
+}
+
+static void txgbe_set_rx_buffer_len(struct txgbe_adapter *adapter)
+{
+	struct net_device *netdev = adapter->netdev;
+	u32 max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
+	struct txgbe_hw *hw = &adapter->hw;
+	u32 mhadd;
+
+	/* adjust max frame to be at least the size of a standard frame */
+	if (max_frame < (ETH_FRAME_LEN + ETH_FCS_LEN))
+		max_frame = (ETH_FRAME_LEN + ETH_FCS_LEN);
+
+	mhadd = rd32(hw, TXGBE_PSR_MAX_SZ);
+	if (max_frame != mhadd)
+		wr32(hw, TXGBE_PSR_MAX_SZ, max_frame);
+}
+
+/**
+ * txgbe_configure_rx - Configure Receive Unit after Reset
+ * @adapter: board private structure
+ *
+ * Configure the Rx unit of the MAC after a reset.
+ **/
+static void txgbe_configure_rx(struct txgbe_adapter *adapter)
+{
+	struct txgbe_hw *hw = &adapter->hw;
+	u32 rxctrl, psrctl;
+	int i;
+
+	/* disable receives while setting up the descriptors */
+	TCALL(hw, mac.ops.disable_rx);
+
+	txgbe_setup_psrtype(adapter);
+
+	/* enable hw crc stripping */
+	wr32m(hw, TXGBE_RSC_CTL,
+	      TXGBE_RSC_CTL_CRC_STRIP, TXGBE_RSC_CTL_CRC_STRIP);
+
+	/* RSC Setup */
+	psrctl = rd32m(hw, TXGBE_PSR_CTL, ~TXGBE_PSR_CTL_RSC_DIS);
+	psrctl |= TXGBE_PSR_CTL_RSC_ACK; /* Disable RSC for ACK packets */
+	psrctl |= TXGBE_PSR_CTL_RSC_DIS;
+	wr32(hw, TXGBE_PSR_CTL, psrctl);
+
+	/* set_rx_buffer_len must be called before ring initialization */
+	txgbe_set_rx_buffer_len(adapter);
+
+	/* Setup the HW Rx Head and Tail Descriptor Pointers and
+	 * the Base and Length of the Rx Descriptor Ring
+	 */
+	for (i = 0; i < adapter->num_rx_queues; i++)
+		txgbe_configure_rx_ring(adapter, adapter->rx_ring[i]);
+
+	rxctrl = rd32(hw, TXGBE_RDB_PB_CTL);
+
+	/* enable all receives */
+	rxctrl |= TXGBE_RDB_PB_CTL_RXEN;
+	TCALL(hw, mac.ops.enable_rx_dma, rxctrl);
+}
+
+static u8 *txgbe_addr_list_itr(struct txgbe_hw __maybe_unused *hw,
+			       u8 **mc_addr_ptr, u32 *vmdq)
+{
+	struct netdev_hw_addr *mc_ptr;
+	u8 *addr = *mc_addr_ptr;
+
+	*vmdq = 0;
+
+	mc_ptr = container_of(addr, struct netdev_hw_addr, addr[0]);
+	if (mc_ptr->list.next) {
+		struct netdev_hw_addr *ha;
+
+		ha = list_entry(mc_ptr->list.next, struct netdev_hw_addr, list);
+		*mc_addr_ptr = ha->addr;
+	} else {
+		*mc_addr_ptr = NULL;
+	}
+
+	return addr;
+}
+
+/**
+ * txgbe_write_mc_addr_list - write multicast addresses to MTA
+ * @netdev: network interface device structure
+ *
+ * Writes multicast address list to the MTA hash table.
+ * Returns: -ENOMEM on failure
+ *                0 on no addresses written
+ *                X on writing X addresses to MTA
+ **/
+int txgbe_write_mc_addr_list(struct net_device *netdev)
+{
+	struct txgbe_adapter *adapter = netdev_priv(netdev);
+	struct txgbe_hw *hw = &adapter->hw;
+	struct netdev_hw_addr *ha;
+	u8  *addr_list = NULL;
+	int addr_count = 0;
+
+	if (!hw->mac.ops.update_mc_addr_list)
+		return -ENOMEM;
+
+	if (!netif_running(netdev))
+		return 0;
+
+	if (netdev_mc_empty(netdev)) {
+		TCALL(hw, mac.ops.update_mc_addr_list, NULL, 0,
+		      txgbe_addr_list_itr, true);
+	} else {
+		ha = list_first_entry(&netdev->mc.list,
+				      struct netdev_hw_addr, list);
+		addr_list = ha->addr;
+		addr_count = netdev_mc_count(netdev);
+
+		TCALL(hw, mac.ops.update_mc_addr_list, addr_list, addr_count,
+		      txgbe_addr_list_itr, true);
+	}
+
+	return addr_count;
+}
+
 static void txgbe_sync_mac_table(struct txgbe_adapter *adapter)
 {
 	struct txgbe_hw *hw = &adapter->hw;
@@ -680,6 +1054,18 @@ static void txgbe_sync_mac_table(struct txgbe_adapter *adapter)
 	}
 }
 
+int txgbe_available_rars(struct txgbe_adapter *adapter)
+{
+	struct txgbe_hw *hw = &adapter->hw;
+	u32 i, count = 0;
+
+	for (i = 0; i < hw->mac.num_rar_entries; i++) {
+		if (adapter->mac_table[i].state == 0)
+			count++;
+	}
+	return count;
+}
+
 /* this function destroys the first RAR entry */
 static void txgbe_mac_set_default_filter(struct txgbe_adapter *adapter,
 					 u8 *addr)
@@ -695,6 +1081,38 @@ static void txgbe_mac_set_default_filter(struct txgbe_adapter *adapter,
 	      TXGBE_PSR_MAC_SWC_AD_H_AV);
 }
 
+int txgbe_add_mac_filter(struct txgbe_adapter *adapter, u8 *addr, u16 pool)
+{
+	struct txgbe_hw *hw = &adapter->hw;
+	u32 i;
+
+	if (is_zero_ether_addr(addr))
+		return -EINVAL;
+
+	for (i = 0; i < hw->mac.num_rar_entries; i++) {
+		if (adapter->mac_table[i].state & TXGBE_MAC_STATE_IN_USE) {
+			if (ether_addr_equal(addr, adapter->mac_table[i].addr)) {
+				if (adapter->mac_table[i].pools != (1ULL << pool)) {
+					memcpy(adapter->mac_table[i].addr, addr, ETH_ALEN);
+					adapter->mac_table[i].pools |= (1ULL << pool);
+					txgbe_sync_mac_table(adapter);
+					return i;
+				}
+			}
+		}
+
+		if (adapter->mac_table[i].state & TXGBE_MAC_STATE_IN_USE)
+			continue;
+		adapter->mac_table[i].state |= (TXGBE_MAC_STATE_MODIFIED |
+						TXGBE_MAC_STATE_IN_USE);
+		memcpy(adapter->mac_table[i].addr, addr, ETH_ALEN);
+		adapter->mac_table[i].pools |= (1ULL << pool);
+		txgbe_sync_mac_table(adapter);
+		return i;
+	}
+	return -ENOMEM;
+}
+
 static void txgbe_flush_sw_mac_table(struct txgbe_adapter *adapter)
 {
 	struct txgbe_hw *hw = &adapter->hw;
@@ -709,6 +1127,165 @@ static void txgbe_flush_sw_mac_table(struct txgbe_adapter *adapter)
 	txgbe_sync_mac_table(adapter);
 }
 
+int txgbe_del_mac_filter(struct txgbe_adapter *adapter, u8 *addr, u16 pool)
+{
+	struct txgbe_hw *hw = &adapter->hw;
+	u32 i;
+
+	if (is_zero_ether_addr(addr))
+		return -EINVAL;
+
+	/* search table for addr, if found, set to 0 and sync */
+	for (i = 0; i < hw->mac.num_rar_entries; i++) {
+		if (ether_addr_equal(addr, adapter->mac_table[i].addr)) {
+			if (adapter->mac_table[i].pools & (1ULL << pool)) {
+				adapter->mac_table[i].state |= TXGBE_MAC_STATE_MODIFIED;
+				adapter->mac_table[i].state &= ~TXGBE_MAC_STATE_IN_USE;
+				adapter->mac_table[i].pools &= ~(1ULL << pool);
+				txgbe_sync_mac_table(adapter);
+			}
+			return 0;
+		}
+
+		if (adapter->mac_table[i].pools != (1 << pool))
+			continue;
+		if (!ether_addr_equal(addr, adapter->mac_table[i].addr))
+			continue;
+
+		adapter->mac_table[i].state |= TXGBE_MAC_STATE_MODIFIED;
+		adapter->mac_table[i].state &= ~TXGBE_MAC_STATE_IN_USE;
+		memset(adapter->mac_table[i].addr, 0, ETH_ALEN);
+		adapter->mac_table[i].pools = 0;
+		txgbe_sync_mac_table(adapter);
+		return 0;
+	}
+	return -ENOMEM;
+}
+
+/**
+ * txgbe_write_uc_addr_list - write unicast addresses to RAR table
+ * @netdev: network interface device structure
+ * @pool: index for mac table
+ *
+ * Writes unicast address list to the RAR table.
+ * Returns: -ENOMEM on failure/insufficient address space
+ *                0 on no addresses written
+ *                X on writing X addresses to the RAR table
+ **/
+int txgbe_write_uc_addr_list(struct net_device *netdev, int pool)
+{
+	struct txgbe_adapter *adapter = netdev_priv(netdev);
+	int count = 0;
+
+	/* return ENOMEM indicating insufficient memory for addresses */
+	if (netdev_uc_count(netdev) > txgbe_available_rars(adapter))
+		return -ENOMEM;
+
+	if (!netdev_uc_empty(netdev)) {
+		struct netdev_hw_addr *ha;
+
+		netdev_for_each_uc_addr(ha, netdev) {
+			txgbe_del_mac_filter(adapter, ha->addr, pool);
+			txgbe_add_mac_filter(adapter, ha->addr, pool);
+			count++;
+		}
+	}
+	return count;
+}
+
+/**
+ * txgbe_set_rx_mode - Unicast, Multicast and Promiscuous mode set
+ * @netdev: network interface device structure
+ *
+ * The set_rx_method entry point is called whenever the unicast/multicast
+ * address list or the network interface flags are updated.  This routine is
+ * responsible for configuring the hardware for proper unicast, multicast and
+ * promiscuous mode.
+ **/
+void txgbe_set_rx_mode(struct net_device *netdev)
+{
+	struct txgbe_adapter *adapter = netdev_priv(netdev);
+	struct txgbe_hw *hw = &adapter->hw;
+	u32 fctrl, vmolr, vlnctrl;
+	int count;
+
+	/* Check for Promiscuous and All Multicast modes */
+	fctrl = rd32m(hw, TXGBE_PSR_CTL,
+		      ~(TXGBE_PSR_CTL_UPE | TXGBE_PSR_CTL_MPE));
+	vmolr = rd32m(hw, TXGBE_PSR_VM_L2CTL(0),
+		      ~(TXGBE_PSR_VM_L2CTL_UPE |
+			TXGBE_PSR_VM_L2CTL_MPE |
+			TXGBE_PSR_VM_L2CTL_ROPE |
+			TXGBE_PSR_VM_L2CTL_ROMPE));
+	vlnctrl = rd32m(hw, TXGBE_PSR_VLAN_CTL,
+			~(TXGBE_PSR_VLAN_CTL_VFE |
+			  TXGBE_PSR_VLAN_CTL_CFIEN));
+
+	/* set all bits that we expect to always be set */
+	fctrl |= TXGBE_PSR_CTL_BAM | TXGBE_PSR_CTL_MFE;
+	vmolr |= TXGBE_PSR_VM_L2CTL_BAM |
+		 TXGBE_PSR_VM_L2CTL_AUPE |
+		 TXGBE_PSR_VM_L2CTL_VACC;
+	vlnctrl |= TXGBE_PSR_VLAN_CTL_VFE;
+
+	hw->addr_ctrl.user_set_promisc = false;
+	if (netdev->flags & IFF_PROMISC) {
+		hw->addr_ctrl.user_set_promisc = true;
+		fctrl |= (TXGBE_PSR_CTL_UPE | TXGBE_PSR_CTL_MPE);
+		/* pf don't want packets routing to vf, so clear UPE */
+		vmolr |= TXGBE_PSR_VM_L2CTL_MPE;
+		vlnctrl &= ~TXGBE_PSR_VLAN_CTL_VFE;
+	}
+
+	if (netdev->flags & IFF_ALLMULTI) {
+		fctrl |= TXGBE_PSR_CTL_MPE;
+		vmolr |= TXGBE_PSR_VM_L2CTL_MPE;
+	}
+
+	/* This is useful for sniffing bad packets. */
+	if (netdev->features & NETIF_F_RXALL) {
+		vmolr |= (TXGBE_PSR_VM_L2CTL_UPE | TXGBE_PSR_VM_L2CTL_MPE);
+		vlnctrl &= ~TXGBE_PSR_VLAN_CTL_VFE;
+		/* receive bad packets */
+		wr32m(hw, TXGBE_RSC_CTL,
+		      TXGBE_RSC_CTL_SAVE_MAC_ERR,
+		      TXGBE_RSC_CTL_SAVE_MAC_ERR);
+	} else {
+		vmolr |= TXGBE_PSR_VM_L2CTL_ROPE | TXGBE_PSR_VM_L2CTL_ROMPE;
+	}
+
+	/* Write addresses to available RAR registers, if there is not
+	 * sufficient space to store all the addresses then enable
+	 * unicast promiscuous mode
+	 */
+	count = txgbe_write_uc_addr_list(netdev, 0);
+	if (count < 0) {
+		vmolr &= ~TXGBE_PSR_VM_L2CTL_ROPE;
+		vmolr |= TXGBE_PSR_VM_L2CTL_UPE;
+	}
+
+	/* Write addresses to the MTA, if the attempt fails
+	 * then we should just turn on promiscuous mode so
+	 * that we can at least receive multicast traffic
+	 */
+	count = txgbe_write_mc_addr_list(netdev);
+	if (count < 0) {
+		vmolr &= ~TXGBE_PSR_VM_L2CTL_ROMPE;
+		vmolr |= TXGBE_PSR_VM_L2CTL_MPE;
+	}
+
+	wr32(hw, TXGBE_PSR_VLAN_CTL, vlnctrl);
+	wr32(hw, TXGBE_PSR_CTL, fctrl);
+	wr32(hw, TXGBE_PSR_VM_L2CTL(0), vmolr);
+}
+
+static void txgbe_configure_pb(struct txgbe_adapter *adapter)
+{
+	struct txgbe_hw *hw = &adapter->hw;
+
+	TCALL(hw, mac.ops.setup_rxpba, 0, 0, PBA_STRATEGY_EQUAL);
+}
+
 static void txgbe_configure_isb(struct txgbe_adapter *adapter)
 {
 	/* set ISB Address */
@@ -719,8 +1296,44 @@ static void txgbe_configure_isb(struct txgbe_adapter *adapter)
 	wr32(hw, TXGBE_PX_ISB_ADDR_H, adapter->isb_dma >> 32);
 }
 
+void txgbe_configure_port(struct txgbe_adapter *adapter)
+{
+	struct txgbe_hw *hw = &adapter->hw;
+	u32 value, i;
+
+	value = TXGBE_CFG_PORT_CTL_D_VLAN | TXGBE_CFG_PORT_CTL_QINQ;
+	wr32m(hw, TXGBE_CFG_PORT_CTL,
+	      TXGBE_CFG_PORT_CTL_D_VLAN |
+	      TXGBE_CFG_PORT_CTL_QINQ,
+	      value);
+
+	wr32(hw, TXGBE_CFG_TAG_TPID(0),
+	     ETH_P_8021Q | ETH_P_8021AD << 16);
+	adapter->hw.tpid[0] = ETH_P_8021Q;
+	adapter->hw.tpid[1] = ETH_P_8021AD;
+	for (i = 1; i < 4; i++)
+		wr32(hw, TXGBE_CFG_TAG_TPID(i),
+		     ETH_P_8021Q | ETH_P_8021Q << 16);
+	for (i = 2; i < 8; i++)
+		adapter->hw.tpid[i] = ETH_P_8021Q;
+}
+
 static void txgbe_configure(struct txgbe_adapter *adapter)
 {
+	struct txgbe_hw *hw = &adapter->hw;
+
+	txgbe_configure_pb(adapter);
+
+	txgbe_configure_port(adapter);
+
+	txgbe_set_rx_mode(adapter->netdev);
+
+	TCALL(hw, mac.ops.disable_sec_rx_path);
+
+	TCALL(hw, mac.ops.enable_sec_rx_path);
+
+	txgbe_configure_tx(adapter);
+	txgbe_configure_rx(adapter);
 	txgbe_configure_isb(adapter);
 }
 
@@ -908,6 +1521,11 @@ void txgbe_disable_device(struct txgbe_adapter *adapter)
 	/* disable receives */
 	TCALL(hw, mac.ops.disable_rx);
 
+	/* disable all enabled rx queues */
+	for (i = 0; i < adapter->num_rx_queues; i++)
+		/* this call also flushes the previous write */
+		txgbe_disable_rx_queue(adapter, adapter->rx_ring[i]);
+
 	netif_carrier_off(netdev);
 	netif_tx_disable(netdev);
 
@@ -1023,6 +1641,7 @@ static int txgbe_sw_init(struct txgbe_adapter *adapter)
 
 	adapter->max_q_vectors = TXGBE_MAX_MSIX_Q_VECTORS_SAPPHIRE;
 
+	set_bit(0, &adapter->fwd_bitmask);
 	set_bit(__TXGBE_DOWN, &adapter->state);
 
 	return 0;
@@ -1224,6 +1843,8 @@ static void txgbe_watchdog_update_link(struct txgbe_adapter *adapter)
 	adapter->link_speed = link_speed;
 
 	if (link_up) {
+		txgbe_set_rx_drop_en(adapter);
+
 		if (link_speed & TXGBE_LINK_SPEED_10GB_FULL) {
 			wr32(hw, TXGBE_MAC_TX_CFG,
 			     (rd32(hw, TXGBE_MAC_TX_CFG) &
@@ -1612,6 +2233,8 @@ static const struct net_device_ops txgbe_netdev_ops = {
 	.ndo_open               = txgbe_open,
 	.ndo_stop               = txgbe_close,
 	.ndo_start_xmit         = txgbe_xmit_frame,
+	.ndo_set_rx_mode        = txgbe_set_rx_mode,
+	.ndo_validate_addr      = eth_validate_addr,
 };
 
 void txgbe_assign_netdev_ops(struct net_device *dev)
@@ -1727,8 +2350,20 @@ static int txgbe_probe(struct pci_dev *pdev,
 		goto err_free_mac_table;
 	}
 
+	netdev->features = NETIF_F_SG;
+
+	/* copy netdev features into list of user selectable features */
+	netdev->hw_features |= netdev->features |
+			       NETIF_F_RXALL;
+
 	netdev->features |= NETIF_F_HIGHDMA;
 
+	netdev->priv_flags |= IFF_UNICAST_FLT;
+	netdev->priv_flags |= IFF_SUPP_NOFCS;
+
+	netdev->min_mtu = ETH_MIN_MTU;
+	netdev->max_mtu = TXGBE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN);
+
 	/* make sure the EEPROM is good */
 	if (TCALL(hw, eeprom.ops.validate_checksum, NULL)) {
 		dev_err(&pdev->dev, "The EEPROM Checksum Is Not Valid\n");
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h b/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h
index 51d349f72591..fc51f82b6087 100644
--- a/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h
@@ -456,6 +456,7 @@ struct txgbe_thermal_sensor_data {
 /*********************** Transmit DMA registers **************************/
 /* transmit global control */
 #define TXGBE_TDM_CTL           0x18000
+#define TXGBE_TDM_PB_THRE(_i)   (0x18020 + ((_i) * 4)) /* 8 of these 0 - 7 */
 /* TDM CTL BIT */
 #define TXGBE_TDM_CTL_TE        0x1 /* Transmit Enable */
 #define TXGBE_TDM_CTL_PADDING   0x2 /* Padding byte number for ipsec ESP */
@@ -478,6 +479,9 @@ struct txgbe_thermal_sensor_data {
 #define TXGBE_RDB_UP2TC             0x19008
 #define TXGBE_RDB_PB_SZ_SHIFT       10
 #define TXGBE_RDB_PB_SZ_MASK        0x000FFC00U
+
+/* ring assignment */
+#define TXGBE_RDB_PL_CFG(_i)    (0x19300 + ((_i) * 4))
 /* statistic */
 #define TXGBE_RDB_MPCNT(_i)         (0x19040 + ((_i) * 4)) /* 8 of 3FA0-3FBC*/
 #define TXGBE_RDB_LXONTXC           0x1921C
@@ -489,6 +493,23 @@ struct txgbe_thermal_sensor_data {
 #define TXGBE_RDB_PFCMACDAH         0x19214
 #define TXGBE_RDB_TXSWERR           0x1906C
 #define TXGBE_RDB_TXSWERR_TB_FREE   0x3FF
+/* rdb_pl_cfg reg mask */
+#define TXGBE_RDB_PL_CFG_L4HDR          0x2
+#define TXGBE_RDB_PL_CFG_L3HDR          0x4
+#define TXGBE_RDB_PL_CFG_L2HDR          0x8
+#define TXGBE_RDB_PL_CFG_TUN_OUTER_L2HDR 0x20
+#define TXGBE_RDB_PL_CFG_TUN_TUNHDR     0x10
+#define TXGBE_RDB_PL_CFG_RSS_PL_MASK    0x7
+#define TXGBE_RDB_PL_CFG_RSS_PL_SHIFT   29
+
+/* Packet buffer allocation strategies */
+enum {
+	PBA_STRATEGY_EQUAL      = 0, /* Distribute PB space equally */
+#define PBA_STRATEGY_EQUAL      PBA_STRATEGY_EQUAL
+	PBA_STRATEGY_WEIGHTED   = 1, /* Weight front half of TCs */
+#define PBA_STRATEGY_WEIGHTED   PBA_STRATEGY_WEIGHTED
+};
+
 /* Receive Config masks */
 #define TXGBE_RDB_PB_CTL_RXEN           (0x80000000) /* Enable Receiver */
 #define TXGBE_RDB_PB_CTL_DISABLED       0x1
@@ -511,6 +532,32 @@ struct txgbe_thermal_sensor_data {
 #define TXGBE_PSR_CTL_MO                0x00000060U
 #define TXGBE_PSR_CTL_TPE               0x00000010U
 #define TXGBE_PSR_CTL_MO_SHIFT          5
+/* VT_CTL bitmasks */
+#define TXGBE_PSR_VM_CTL_DIS_DEFPL      0x20000000U /* disable default pool */
+#define TXGBE_PSR_VM_CTL_REPLEN         0x40000000U /* replication enabled */
+#define TXGBE_PSR_VM_CTL_POOL_SHIFT     7
+#define TXGBE_PSR_VM_CTL_POOL_MASK      (0x3F << TXGBE_PSR_VM_CTL_POOL_SHIFT)
+/* VLAN Control Bit Masks */
+#define TXGBE_PSR_VLAN_CTL_VET          0x0000FFFFU  /* bits 0-15 */
+#define TXGBE_PSR_VLAN_CTL_CFI          0x10000000U  /* bit 28 */
+#define TXGBE_PSR_VLAN_CTL_CFIEN        0x20000000U  /* bit 29 */
+#define TXGBE_PSR_VLAN_CTL_VFE          0x40000000U  /* bit 30 */
+
+/* vm L2 contorl */
+#define TXGBE_PSR_VM_L2CTL(_i)          (0x15600 + ((_i) * 4))
+/* VMOLR bitmasks */
+#define TXGBE_PSR_VM_L2CTL_LBDIS        0x00000002U /* disable loopback */
+#define TXGBE_PSR_VM_L2CTL_LLB          0x00000004U /* local pool loopback */
+#define TXGBE_PSR_VM_L2CTL_UPE          0x00000010U /* unicast promiscuous */
+#define TXGBE_PSR_VM_L2CTL_TPE          0x00000020U /* ETAG promiscuous */
+#define TXGBE_PSR_VM_L2CTL_VACC         0x00000040U /* accept nomatched vlan */
+#define TXGBE_PSR_VM_L2CTL_VPE          0x00000080U /* vlan promiscuous mode */
+#define TXGBE_PSR_VM_L2CTL_AUPE         0x00000100U /* accept untagged packets */
+#define TXGBE_PSR_VM_L2CTL_ROMPE        0x00000200U /*accept packets in MTA tbl*/
+#define TXGBE_PSR_VM_L2CTL_ROPE         0x00000400U /* accept packets in UC tbl*/
+#define TXGBE_PSR_VM_L2CTL_BAM          0x00000800U /* accept broadcast packets*/
+#define TXGBE_PSR_VM_L2CTL_MPE          0x00001000U /* multicast promiscuous */
+
 /* mcasst/ucast overflow tbl */
 #define TXGBE_PSR_MC_TBL(_i)    (0x15200  + ((_i) * 4))
 #define TXGBE_PSR_UC_TBL(_i)    (0x15400 + ((_i) * 4))
@@ -545,6 +592,55 @@ struct txgbe_thermal_sensor_data {
 #define TXGBE_PSR_LAN_FLEX_DW_H(_i)     (0x15C04 + ((_i) * 16))
 #define TXGBE_PSR_LAN_FLEX_MSK(_i)      (0x15C08 + ((_i) * 16))
 #define TXGBE_PSR_LAN_FLEX_CTL  0x15CFC
+
+#define TXGBE_PSR_MAX_SZ                0x15020
+
+/****************************** TDB ******************************************/
+#define TXGBE_TDB_RFCS                  0x1CE00
+#define TXGBE_TDB_PB_SZ(_i)             (0x1CC00 + ((_i) * 4)) /* 8 of these */
+#define TXGBE_TDB_MNG_TC                0x1CD10
+#define TXGBE_TDB_PRB_CTL               0x17010
+#define TXGBE_TDB_PBRARB_CTL            0x1CD00
+#define TXGBE_TDB_UP2TC                 0x1C800
+#define TXGBE_TDB_PBRARB_CFG(_i)        (0x1CD20 + ((_i) * 4)) /* 8 of (0-7) */
+
+#define TXGBE_TDB_PB_SZ_20KB    0x00005000U /* 20KB Packet Buffer */
+#define TXGBE_TDB_PB_SZ_40KB    0x0000A000U /* 40KB Packet Buffer */
+#define TXGBE_TDB_PB_SZ_MAX     0x00028000U /* 160KB Packet Buffer */
+#define TXGBE_TXPKT_SIZE_MAX    0xA /* Max Tx Packet size */
+#define TXGBE_MAX_PB            8
+
+/****************************** TSEC *****************************************/
+/* Security Control Registers */
+#define TXGBE_TSC_CTL                   0x1D000
+#define TXGBE_TSC_ST                    0x1D004
+#define TXGBE_TSC_BUF_AF                0x1D008
+#define TXGBE_TSC_BUF_AE                0x1D00C
+#define TXGBE_TSC_PRB_CTL               0x1D010
+#define TXGBE_TSC_MIN_IFG               0x1D020
+/* Security Bit Fields and Masks */
+#define TXGBE_TSC_CTL_SECTX_DIS         0x00000001U
+#define TXGBE_TSC_CTL_TX_DIS            0x00000002U
+#define TXGBE_TSC_CTL_STORE_FORWARD     0x00000004U
+#define TXGBE_TSC_CTL_IV_MSK_EN         0x00000008U
+#define TXGBE_TSC_ST_SECTX_RDY          0x00000001U
+#define TXGBE_TSC_ST_OFF_DIS            0x00000002U
+#define TXGBE_TSC_ST_ECC_TXERR          0x00000004U
+
+/********************************* RSEC **************************************/
+/* general rsec */
+#define TXGBE_RSC_CTL                   0x17000
+#define TXGBE_RSC_ST                    0x17004
+/* general rsec fields */
+#define TXGBE_RSC_CTL_SECRX_DIS         0x00000001U
+#define TXGBE_RSC_CTL_RX_DIS            0x00000002U
+#define TXGBE_RSC_CTL_CRC_STRIP         0x00000004U
+#define TXGBE_RSC_CTL_IV_MSK_EN         0x00000008U
+#define TXGBE_RSC_CTL_SAVE_MAC_ERR      0x00000040U
+#define TXGBE_RSC_ST_RSEC_RDY           0x00000001U
+#define TXGBE_RSC_ST_RSEC_OFLD_DIS      0x00000002U
+#define TXGBE_RSC_ST_ECC_RXERR          0x00000004U
+
 /************************************** ETH PHY ******************************/
 #define TXGBE_XPCS_IDA_ADDR    0x13000
 #define TXGBE_XPCS_IDA_DATA    0x13004
@@ -1095,6 +1191,10 @@ struct txgbe_bus_info {
 /* forward declaration */
 struct txgbe_hw;
 
+/* iterator type for walking multicast address lists */
+typedef u8* (*txgbe_mc_addr_itr) (struct txgbe_hw *hw, u8 **mc_addr_ptr,
+				  u32 *vmdq);
+
 /* Function pointer table */
 struct txgbe_eeprom_operations {
 	s32 (*init_params)(struct txgbe_hw *hw);
@@ -1117,6 +1217,9 @@ struct txgbe_mac_operations {
 	s32 (*stop_adapter)(struct txgbe_hw *hw);
 	s32 (*get_bus_info)(struct txgbe_hw *hw);
 	s32 (*set_lan_id)(struct txgbe_hw *hw);
+	s32 (*enable_rx_dma)(struct txgbe_hw *hw, u32 regval);
+	s32 (*disable_sec_rx_path)(struct txgbe_hw *hw);
+	s32 (*enable_sec_rx_path)(struct txgbe_hw *hw);
 	s32 (*acquire_swfw_sync)(struct txgbe_hw *hw, u32 mask);
 	s32 (*release_swfw_sync)(struct txgbe_hw *hw, u32 mask);
 
@@ -1134,14 +1237,22 @@ struct txgbe_mac_operations {
 				     bool *autoneg);
 	s32 (*set_rate_select_speed)(struct txgbe_hw *hw, u32 speed);
 
-	/* RAR */
+	/* Packet Buffer manipulation */
+	s32 (*setup_rxpba)(struct txgbe_hw *hw, int num_pb, u32 headroom,
+			   int strategy);
+
+	/* RAR, Multicast */
 	s32 (*set_rar)(struct txgbe_hw *hw, u32 index, u8 *addr, u64 pools,
 		       u32 enable_addr);
 	s32 (*clear_rar)(struct txgbe_hw *hw, u32 index);
 	s32 (*disable_rx)(struct txgbe_hw *hw);
+	s32 (*enable_rx)(struct txgbe_hw *hw);
 	s32 (*set_vmdq_san_mac)(struct txgbe_hw *hw, u32 vmdq);
 	s32 (*clear_vmdq)(struct txgbe_hw *hw, u32 rar, u32 vmdq);
 	s32 (*init_rx_addrs)(struct txgbe_hw *hw);
+	s32 (*update_mc_addr_list)(struct txgbe_hw *hw, u8 *mc_addr_list,
+				   u32 mc_addr_count,
+				   txgbe_mc_addr_itr func, bool clear);
 	s32 (*init_uta_tables)(struct txgbe_hw *hw);
 
 	/* Manageability interface */
@@ -1178,9 +1289,12 @@ struct txgbe_mac_info {
 	u16 wwnn_prefix;
 	/* prefix for World Wide Port Name (WWPN) */
 	u16 wwpn_prefix;
+#define TXGBE_MAX_MTA                   128
+	u32 mta_shadow[TXGBE_MAX_MTA];
 	s32 mc_filter_type;
 	u32 mcft_size;
 	u32 num_rar_entries;
+	u32 rx_pb_size;
 	u32 max_tx_queues;
 	u32 max_rx_queues;
 	u32 orig_sr_pcs_ctl2;
@@ -1235,6 +1349,7 @@ struct txgbe_hw {
 	enum txgbe_reset_type reset_type;
 	bool force_full_reset;
 	enum txgbe_link_status link_status;
+	u16 tpid[8];
 	u16 oem_ssid;
 	u16 oem_svid;
 };
-- 
2.27.0


^ permalink raw reply related	[flat|nested] 18+ messages in thread

* [RFC PATCH net-next 11/16] net: txgbe: Allocate Rx and Tx resources
  2022-08-10  8:55 [RFC PATCH net-next 00/16] net: WangXun txgbe ethernet driver Jiawen Wu
                   ` (9 preceding siblings ...)
  2022-08-10  8:55 ` [RFC PATCH net-next 10/16] net: txgbe: Configure Rx and Tx unit of the MAC Jiawen Wu
@ 2022-08-10  8:55 ` Jiawen Wu
  2022-08-10  8:55 ` [RFC PATCH net-next 12/16] net: txgbe: Add Rx and Tx cleanup routine Jiawen Wu
                   ` (4 subsequent siblings)
  15 siblings, 0 replies; 18+ messages in thread
From: Jiawen Wu @ 2022-08-10  8:55 UTC (permalink / raw)
  To: netdev; +Cc: Jiawen Wu

Allocate receive and transmit descriptors for all queues.

Signed-off-by: Jiawen Wu <jiawenwu@trustnetic.com>
---
 drivers/net/ethernet/wangxun/txgbe/txgbe.h    |  64 +++
 .../net/ethernet/wangxun/txgbe/txgbe_lib.c    |   9 +
 .../net/ethernet/wangxun/txgbe/txgbe_main.c   | 498 +++++++++++++++++-
 .../net/ethernet/wangxun/txgbe/txgbe_type.h   |  45 ++
 4 files changed, 615 insertions(+), 1 deletion(-)

diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe.h b/drivers/net/ethernet/wangxun/txgbe/txgbe.h
index 516b4f865e6d..d3db6f1aabc5 100644
--- a/drivers/net/ethernet/wangxun/txgbe/txgbe.h
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe.h
@@ -12,9 +12,19 @@
 #include "txgbe_type.h"
 
 /* TX/RX descriptor defines */
+#define TXGBE_DEFAULT_TXD               512
+#define TXGBE_DEFAULT_TX_WORK   256
 #define TXGBE_MAX_TXD                   8192
 #define TXGBE_MIN_TXD                   128
 
+#if (PAGE_SIZE < 8192)
+#define TXGBE_DEFAULT_RXD               512
+#define TXGBE_DEFAULT_RX_WORK   256
+#else
+#define TXGBE_DEFAULT_RXD               256
+#define TXGBE_DEFAULT_RX_WORK   128
+#endif
+
 #define TXGBE_MAX_RXD                   8192
 #define TXGBE_MIN_RXD                   128
 
@@ -36,13 +46,36 @@
 
 #define TXGBE_MAX_RX_DESC_POLL          10
 
+/* wrapper around a pointer to a socket buffer,
+ * so a DMA handle can be stored along with the buffer
+ */
+struct txgbe_tx_buffer {
+	union txgbe_tx_desc *next_to_watch;
+	struct sk_buff *skb;
+	DEFINE_DMA_UNMAP_ADDR(dma);
+	DEFINE_DMA_UNMAP_LEN(len);
+};
+
+struct txgbe_rx_buffer {
+	struct sk_buff *skb;
+	dma_addr_t dma;
+	dma_addr_t page_dma;
+	struct page *page;
+};
+
 struct txgbe_ring {
 	struct txgbe_ring *next;        /* pointer to next ring in q_vector */
 	struct txgbe_q_vector *q_vector; /* backpointer to host q_vector */
 	struct net_device *netdev;      /* netdev ring belongs to */
 	struct device *dev;             /* device for DMA mapping */
+	void *desc;                     /* descriptor ring memory */
+	union {
+		struct txgbe_tx_buffer *tx_buffer_info;
+		struct txgbe_rx_buffer *rx_buffer_info;
+	};
 	u8 __iomem *tail;
 	dma_addr_t dma;                 /* phys. address of descriptor ring */
+	unsigned int size;              /* length in bytes */
 
 	u16 count;                      /* amount of descriptors */
 
@@ -50,6 +83,7 @@ struct txgbe_ring {
 	u8 reg_idx;
 	u16 next_to_use;
 	u16 next_to_clean;
+	u16 rx_buf_len;
 	u16 next_to_alloc;
 } ____cacheline_internodealigned_in_smp;
 
@@ -69,6 +103,13 @@ static inline unsigned int txgbe_rx_bufsz(struct txgbe_ring __maybe_unused *ring
 #endif
 }
 
+static inline unsigned int txgbe_rx_pg_order(struct txgbe_ring __maybe_unused *ring)
+{
+	return 0;
+}
+
+#define txgbe_rx_pg_size(_ring) (PAGE_SIZE << txgbe_rx_pg_order(_ring))
+
 struct txgbe_ring_container {
 	struct txgbe_ring *ring;        /* pointer to linked list of rings */
 	u16 work_limit;                 /* total work allowed per interrupt */
@@ -178,10 +219,12 @@ struct txgbe_adapter {
 	/* Tx fast path data */
 	int num_tx_queues;
 	u16 tx_itr_setting;
+	u16 tx_work_limit;
 
 	/* Rx fast path data */
 	int num_rx_queues;
 	u16 rx_itr_setting;
+	u16 rx_work_limit;
 
 	/* TX */
 	struct txgbe_ring *tx_ring[TXGBE_MAX_TX_QUEUES] ____cacheline_aligned_in_smp;
@@ -249,6 +292,15 @@ enum txgbe_state_t {
 	__TXGBE_IN_SFP_INIT,
 };
 
+struct txgbe_cb {
+	dma_addr_t dma;
+	u16     append_cnt;      /* number of skb's appended */
+	bool    page_released;
+	bool    dma_released;
+};
+
+#define TXGBE_CB(skb) ((struct txgbe_cb *)(skb)->cb)
+
 /* needed by txgbe_main.c */
 void txgbe_service_event_schedule(struct txgbe_adapter *adapter);
 void txgbe_assign_netdev_ops(struct net_device *netdev);
@@ -263,6 +315,10 @@ void txgbe_reinit_locked(struct txgbe_adapter *adapter);
 void txgbe_reset(struct txgbe_adapter *adapter);
 s32 txgbe_init_shared_code(struct txgbe_hw *hw);
 void txgbe_disable_device(struct txgbe_adapter *adapter);
+int txgbe_setup_rx_resources(struct txgbe_ring *rx_ring);
+int txgbe_setup_tx_resources(struct txgbe_ring *tx_ring);
+void txgbe_free_rx_resources(struct txgbe_ring *rx_ring);
+void txgbe_free_tx_resources(struct txgbe_ring *tx_ring);
 void txgbe_configure_rx_ring(struct txgbe_adapter *adapter,
 			     struct txgbe_ring *ring);
 void txgbe_configure_tx_ring(struct txgbe_adapter *adapter,
@@ -271,13 +327,21 @@ int txgbe_init_interrupt_scheme(struct txgbe_adapter *adapter);
 void txgbe_reset_interrupt_capability(struct txgbe_adapter *adapter);
 void txgbe_set_interrupt_capability(struct txgbe_adapter *adapter);
 void txgbe_clear_interrupt_scheme(struct txgbe_adapter *adapter);
+void txgbe_unmap_and_free_tx_resource(struct txgbe_ring *ring,
+				      struct txgbe_tx_buffer *tx_buffer);
 void txgbe_configure_port(struct txgbe_adapter *adapter);
 void txgbe_set_rx_mode(struct net_device *netdev);
 int txgbe_write_mc_addr_list(struct net_device *netdev);
 void txgbe_write_eitr(struct txgbe_q_vector *q_vector);
+int txgbe_poll(struct napi_struct *napi, int budget);
 void txgbe_disable_rx_queue(struct txgbe_adapter *adapter,
 			    struct txgbe_ring *ring);
 
+static inline struct netdev_queue *txring_txq(const struct txgbe_ring *ring)
+{
+	return netdev_get_tx_queue(ring->netdev, ring->queue_index);
+}
+
 int txgbe_write_uc_addr_list(struct net_device *netdev, int pool);
 int txgbe_add_mac_filter(struct txgbe_adapter *adapter, u8 *addr, u16 pool);
 int txgbe_del_mac_filter(struct txgbe_adapter *adapter, u8 *addr, u16 pool);
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_lib.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_lib.c
index e7b6316e3b56..84b7e01cc27e 100644
--- a/drivers/net/ethernet/wangxun/txgbe/txgbe_lib.c
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_lib.c
@@ -166,11 +166,19 @@ static int txgbe_alloc_q_vector(struct txgbe_adapter *adapter,
 	/* initialize CPU for DCA */
 	q_vector->cpu = -1;
 
+	/* initialize NAPI */
+	netif_napi_add(adapter->netdev, &q_vector->napi,
+		       txgbe_poll, 64);
+
 	/* tie q_vector and adapter together */
 	adapter->q_vector[v_idx] = q_vector;
 	q_vector->adapter = adapter;
 	q_vector->v_idx = v_idx;
 
+	/* initialize work limits */
+	q_vector->tx.work_limit = adapter->tx_work_limit;
+	q_vector->rx.work_limit = adapter->rx_work_limit;
+
 	/* initialize pointer to rings */
 	ring = q_vector->ring;
 
@@ -265,6 +273,7 @@ static void txgbe_free_q_vector(struct txgbe_adapter *adapter, int v_idx)
 		adapter->rx_ring[ring->queue_index] = NULL;
 
 	adapter->q_vector[v_idx] = NULL;
+	netif_napi_del(&q_vector->napi);
 	kfree_rcu(q_vector, rcu);
 }
 
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c
index 8f6946379c79..c66ad524750b 100644
--- a/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c
@@ -41,6 +41,10 @@ static const struct pci_device_id txgbe_pci_tbl[] = {
 static struct workqueue_struct *txgbe_wq;
 
 static bool txgbe_is_sfp(struct txgbe_hw *hw);
+static void txgbe_clean_rx_ring(struct txgbe_ring *rx_ring);
+static void txgbe_clean_tx_ring(struct txgbe_ring *tx_ring);
+static void txgbe_napi_enable_all(struct txgbe_adapter *adapter);
+static void txgbe_napi_disable_all(struct txgbe_adapter *adapter);
 
 static void txgbe_check_minimum_link(struct txgbe_adapter *adapter)
 {
@@ -178,6 +182,28 @@ static void txgbe_set_ivar(struct txgbe_adapter *adapter, s8 direction,
 	}
 }
 
+void txgbe_unmap_and_free_tx_resource(struct txgbe_ring *ring,
+				      struct txgbe_tx_buffer *tx_buffer)
+{
+	if (tx_buffer->skb) {
+		dev_kfree_skb_any(tx_buffer->skb);
+		if (dma_unmap_len(tx_buffer, len))
+			dma_unmap_single(ring->dev,
+					 dma_unmap_addr(tx_buffer, dma),
+					 dma_unmap_len(tx_buffer, len),
+					 DMA_TO_DEVICE);
+	} else if (dma_unmap_len(tx_buffer, len)) {
+		dma_unmap_page(ring->dev,
+			       dma_unmap_addr(tx_buffer, dma),
+			       dma_unmap_len(tx_buffer, len),
+			       DMA_TO_DEVICE);
+	}
+	tx_buffer->next_to_watch = NULL;
+	tx_buffer->skb = NULL;
+	dma_unmap_len_set(tx_buffer, len, 0);
+	/* tx_buffer must be completely set up in the transmit path */
+}
+
 /**
  * txgbe_configure_msix - Configure MSI-X hardware
  * @adapter: board private structure
@@ -438,6 +464,18 @@ static irqreturn_t txgbe_msix_clean_rings(int __always_unused irq, void *data)
 	return IRQ_HANDLED;
 }
 
+/**
+ * txgbe_poll - NAPI polling RX/TX cleanup routine
+ * @napi: napi struct with our devices info in it
+ * @budget: amount of work driver is allowed to do this pass, in packets
+ *
+ * This function will clean all queues associated with a q_vector.
+ **/
+int txgbe_poll(struct napi_struct *napi, int budget)
+{
+	return 0;
+}
+
 /**
  * txgbe_request_msix_irqs - Initialize MSI-X interrupts
  * @adapter: board private structure
@@ -1279,6 +1317,28 @@ void txgbe_set_rx_mode(struct net_device *netdev)
 	wr32(hw, TXGBE_PSR_VM_L2CTL(0), vmolr);
 }
 
+static void txgbe_napi_enable_all(struct txgbe_adapter *adapter)
+{
+	struct txgbe_q_vector *q_vector;
+	int q_idx;
+
+	for (q_idx = 0; q_idx < adapter->num_q_vectors; q_idx++) {
+		q_vector = adapter->q_vector[q_idx];
+		napi_enable(&q_vector->napi);
+	}
+}
+
+static void txgbe_napi_disable_all(struct txgbe_adapter *adapter)
+{
+	struct txgbe_q_vector *q_vector;
+	int q_idx;
+
+	for (q_idx = 0; q_idx < adapter->num_q_vectors; q_idx++) {
+		q_vector = adapter->q_vector[q_idx];
+		napi_disable(&q_vector->napi);
+	}
+}
+
 static void txgbe_configure_pb(struct txgbe_adapter *adapter)
 {
 	struct txgbe_hw *hw = &adapter->hw;
@@ -1403,6 +1463,7 @@ static void txgbe_up_complete(struct txgbe_adapter *adapter)
 	/* make sure to complete pre-operations */
 	smp_mb__before_atomic();
 	clear_bit(__TXGBE_DOWN, &adapter->state);
+	txgbe_napi_enable_all(adapter);
 
 	if (txgbe_is_sfp(hw)) {
 		txgbe_sfp_link_config(adapter);
@@ -1434,6 +1495,9 @@ static void txgbe_up_complete(struct txgbe_adapter *adapter)
 		wr32(hw, TXGBE_GPIO_EOI, TXGBE_GPIO_EOI_6);
 	txgbe_irq_enable(adapter, true, true);
 
+	/* enable transmits */
+	netif_tx_start_all_queues(adapter->netdev);
+
 	/* bring the link up in the watchdog, this could race with our first
 	 * link up interrupt but shouldn't be a problem
 	 */
@@ -1507,6 +1571,129 @@ void txgbe_reset(struct txgbe_adapter *adapter)
 	TCALL(hw, mac.ops.set_vmdq_san_mac, 0);
 }
 
+/**
+ * txgbe_clean_rx_ring - Free Rx Buffers per Queue
+ * @rx_ring: ring to free buffers from
+ **/
+static void txgbe_clean_rx_ring(struct txgbe_ring *rx_ring)
+{
+	struct device *dev = rx_ring->dev;
+	unsigned long size;
+	u16 i;
+
+	/* ring already cleared, nothing to do */
+	if (!rx_ring->rx_buffer_info)
+		return;
+
+	/* Free all the Rx ring sk_buffs */
+	for (i = 0; i < rx_ring->count; i++) {
+		struct txgbe_rx_buffer *rx_buffer = &rx_ring->rx_buffer_info[i];
+
+		if (rx_buffer->dma) {
+			dma_unmap_single(dev,
+					 rx_buffer->dma,
+					 rx_ring->rx_buf_len,
+					 DMA_FROM_DEVICE);
+			rx_buffer->dma = 0;
+		}
+
+		if (rx_buffer->skb) {
+			struct sk_buff *skb = rx_buffer->skb;
+
+			if (TXGBE_CB(skb)->dma_released) {
+				dma_unmap_single(dev,
+						 TXGBE_CB(skb)->dma,
+						 rx_ring->rx_buf_len,
+						 DMA_FROM_DEVICE);
+				TXGBE_CB(skb)->dma = 0;
+				TXGBE_CB(skb)->dma_released = false;
+			}
+
+			if (TXGBE_CB(skb)->page_released)
+				dma_unmap_page(dev,
+					       TXGBE_CB(skb)->dma,
+					       txgbe_rx_bufsz(rx_ring),
+					       DMA_FROM_DEVICE);
+			dev_kfree_skb(skb);
+			rx_buffer->skb = NULL;
+		}
+
+		if (!rx_buffer->page)
+			continue;
+
+		dma_unmap_page(dev, rx_buffer->page_dma,
+			       txgbe_rx_pg_size(rx_ring),
+			       DMA_FROM_DEVICE);
+
+		__free_pages(rx_buffer->page,
+			     txgbe_rx_pg_order(rx_ring));
+		rx_buffer->page = NULL;
+	}
+
+	size = sizeof(struct txgbe_rx_buffer) * rx_ring->count;
+	memset(rx_ring->rx_buffer_info, 0, size);
+
+	/* Zero out the descriptor ring */
+	memset(rx_ring->desc, 0, rx_ring->size);
+
+	rx_ring->next_to_alloc = 0;
+	rx_ring->next_to_clean = 0;
+	rx_ring->next_to_use = 0;
+}
+
+/**
+ * txgbe_clean_tx_ring - Free Tx Buffers
+ * @tx_ring: ring to be cleaned
+ **/
+static void txgbe_clean_tx_ring(struct txgbe_ring *tx_ring)
+{
+	struct txgbe_tx_buffer *tx_buffer_info;
+	unsigned long size;
+	u16 i;
+
+	/* ring already cleared, nothing to do */
+	if (!tx_ring->tx_buffer_info)
+		return;
+
+	/* Free all the Tx ring sk_buffs */
+	for (i = 0; i < tx_ring->count; i++) {
+		tx_buffer_info = &tx_ring->tx_buffer_info[i];
+		txgbe_unmap_and_free_tx_resource(tx_ring, tx_buffer_info);
+	}
+
+	netdev_tx_reset_queue(txring_txq(tx_ring));
+
+	size = sizeof(struct txgbe_tx_buffer) * tx_ring->count;
+	memset(tx_ring->tx_buffer_info, 0, size);
+
+	/* Zero out the descriptor ring */
+	memset(tx_ring->desc, 0, tx_ring->size);
+}
+
+/**
+ * txgbe_clean_all_rx_rings - Free Rx Buffers for all queues
+ * @adapter: board private structure
+ **/
+static void txgbe_clean_all_rx_rings(struct txgbe_adapter *adapter)
+{
+	int i;
+
+	for (i = 0; i < adapter->num_rx_queues; i++)
+		txgbe_clean_rx_ring(adapter->rx_ring[i]);
+}
+
+/**
+ * txgbe_clean_all_tx_rings - Free Tx Buffers for all queues
+ * @adapter: board private structure
+ **/
+static void txgbe_clean_all_tx_rings(struct txgbe_adapter *adapter)
+{
+	int i;
+
+	for (i = 0; i < adapter->num_tx_queues; i++)
+		txgbe_clean_tx_ring(adapter->tx_ring[i]);
+}
+
 void txgbe_disable_device(struct txgbe_adapter *adapter)
 {
 	struct net_device *netdev = adapter->netdev;
@@ -1526,11 +1713,15 @@ void txgbe_disable_device(struct txgbe_adapter *adapter)
 		/* this call also flushes the previous write */
 		txgbe_disable_rx_queue(adapter, adapter->rx_ring[i]);
 
+	netif_tx_stop_all_queues(netdev);
+
 	netif_carrier_off(netdev);
 	netif_tx_disable(netdev);
 
 	txgbe_irq_disable(adapter);
 
+	txgbe_napi_disable_all(adapter);
+
 	adapter->flags2 &= ~(TXGBE_FLAG2_PF_RESET_REQUESTED |
 			     TXGBE_FLAG2_GLOBAL_RESET_REQUESTED);
 	adapter->flags &= ~TXGBE_FLAG_NEED_LINK_UPDATE;
@@ -1572,6 +1763,9 @@ void txgbe_down(struct txgbe_adapter *adapter)
 	if (!(((hw->subsystem_device_id & TXGBE_NCSI_MASK) == TXGBE_NCSI_SUP)))
 		/* power down the optics for SFP+ fiber */
 		TCALL(&adapter->hw, mac.ops.disable_tx_laser);
+
+	txgbe_clean_all_tx_rings(adapter);
+	txgbe_clean_all_rx_rings(adapter);
 }
 
 /**
@@ -1641,12 +1835,181 @@ static int txgbe_sw_init(struct txgbe_adapter *adapter)
 
 	adapter->max_q_vectors = TXGBE_MAX_MSIX_Q_VECTORS_SAPPHIRE;
 
+	/* set default ring sizes */
+	adapter->tx_ring_count = TXGBE_DEFAULT_TXD;
+	adapter->rx_ring_count = TXGBE_DEFAULT_RXD;
+
+	/* set default work limits */
+	adapter->tx_work_limit = TXGBE_DEFAULT_TX_WORK;
+	adapter->rx_work_limit = TXGBE_DEFAULT_RX_WORK;
+
 	set_bit(0, &adapter->fwd_bitmask);
 	set_bit(__TXGBE_DOWN, &adapter->state);
 
 	return 0;
 }
 
+/**
+ * txgbe_setup_tx_resources - allocate Tx resources (Descriptors)
+ * @tx_ring:    tx descriptor ring (for a specific queue) to setup
+ *
+ * Return 0 on success, negative on failure
+ **/
+int txgbe_setup_tx_resources(struct txgbe_ring *tx_ring)
+{
+	struct device *dev = tx_ring->dev;
+	int orig_node = dev_to_node(dev);
+	int numa_node = -1;
+	int size;
+
+	size = sizeof(struct txgbe_tx_buffer) * tx_ring->count;
+
+	if (tx_ring->q_vector)
+		numa_node = tx_ring->q_vector->numa_node;
+
+	tx_ring->tx_buffer_info = vzalloc_node(size, numa_node);
+	if (!tx_ring->tx_buffer_info)
+		tx_ring->tx_buffer_info = vzalloc(size);
+	if (!tx_ring->tx_buffer_info)
+		goto err;
+
+	/* round up to nearest 4K */
+	tx_ring->size = tx_ring->count * sizeof(union txgbe_tx_desc);
+	tx_ring->size = ALIGN(tx_ring->size, 4096);
+
+	set_dev_node(dev, numa_node);
+	tx_ring->desc = dma_alloc_coherent(dev,
+					   tx_ring->size,
+					   &tx_ring->dma,
+					   GFP_KERNEL);
+	set_dev_node(dev, orig_node);
+	if (!tx_ring->desc)
+		tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size,
+						   &tx_ring->dma, GFP_KERNEL);
+	if (!tx_ring->desc)
+		goto err;
+
+	return 0;
+
+err:
+	vfree(tx_ring->tx_buffer_info);
+	tx_ring->tx_buffer_info = NULL;
+	dev_err(dev, "Unable to allocate memory for the Tx descriptor ring\n");
+	return -ENOMEM;
+}
+
+/**
+ * txgbe_setup_all_tx_resources - allocate all queues Tx resources
+ * @adapter: board private structure
+ *
+ * If this function returns with an error, then it's possible one or
+ * more of the rings is populated (while the rest are not).  It is the
+ * callers duty to clean those orphaned rings.
+ *
+ * Return 0 on success, negative on failure
+ **/
+static int txgbe_setup_all_tx_resources(struct txgbe_adapter *adapter)
+{
+	int i, err = 0;
+
+	for (i = 0; i < adapter->num_tx_queues; i++) {
+		err = txgbe_setup_tx_resources(adapter->tx_ring[i]);
+		if (!err)
+			continue;
+
+		netif_err(adapter, probe, adapter->netdev,
+			  "Allocation for Tx Queue %u failed\n", i);
+		goto err_setup_tx;
+	}
+
+	return 0;
+err_setup_tx:
+	/* rewind the index freeing the rings as we go */
+	while (i--)
+		txgbe_free_tx_resources(adapter->tx_ring[i]);
+	return err;
+}
+
+/**
+ * txgbe_setup_rx_resources - allocate Rx resources (Descriptors)
+ * @rx_ring:    rx descriptor ring (for a specific queue) to setup
+ *
+ * Returns 0 on success, negative on failure
+ **/
+int txgbe_setup_rx_resources(struct txgbe_ring *rx_ring)
+{
+	struct device *dev = rx_ring->dev;
+	int orig_node = dev_to_node(dev);
+	int numa_node = -1;
+	int size;
+
+	size = sizeof(struct txgbe_rx_buffer) * rx_ring->count;
+
+	if (rx_ring->q_vector)
+		numa_node = rx_ring->q_vector->numa_node;
+
+	rx_ring->rx_buffer_info = vzalloc_node(size, numa_node);
+	if (!rx_ring->rx_buffer_info)
+		rx_ring->rx_buffer_info = vzalloc(size);
+	if (!rx_ring->rx_buffer_info)
+		goto err;
+
+	/* Round up to nearest 4K */
+	rx_ring->size = rx_ring->count * sizeof(union txgbe_rx_desc);
+	rx_ring->size = ALIGN(rx_ring->size, 4096);
+
+	set_dev_node(dev, numa_node);
+	rx_ring->desc = dma_alloc_coherent(dev,
+					   rx_ring->size,
+					   &rx_ring->dma,
+					   GFP_KERNEL);
+	set_dev_node(dev, orig_node);
+	if (!rx_ring->desc)
+		rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size,
+						   &rx_ring->dma, GFP_KERNEL);
+	if (!rx_ring->desc)
+		goto err;
+
+	return 0;
+err:
+	vfree(rx_ring->rx_buffer_info);
+	rx_ring->rx_buffer_info = NULL;
+	dev_err(dev, "Unable to allocate memory for the Rx descriptor ring\n");
+	return -ENOMEM;
+}
+
+/**
+ * txgbe_setup_all_rx_resources - allocate all queues Rx resources
+ * @adapter: board private structure
+ *
+ * If this function returns with an error, then it's possible one or
+ * more of the rings is populated (while the rest are not).  It is the
+ * callers duty to clean those orphaned rings.
+ *
+ * Return 0 on success, negative on failure
+ **/
+static int txgbe_setup_all_rx_resources(struct txgbe_adapter *adapter)
+{
+	int i, err = 0;
+
+	for (i = 0; i < adapter->num_rx_queues; i++) {
+		err = txgbe_setup_rx_resources(adapter->rx_ring[i]);
+		if (!err)
+			continue;
+
+		netif_err(adapter, probe, adapter->netdev,
+			  "Allocation for Rx Queue %u failed\n", i);
+		goto err_setup_rx;
+	}
+
+		return 0;
+err_setup_rx:
+	/* rewind the index freeing the rings as we go */
+	while (i--)
+		txgbe_free_rx_resources(adapter->rx_ring[i]);
+	return err;
+}
+
 /**
  * txgbe_setup_isb_resources - allocate interrupt status resources
  * @adapter: board private structure
@@ -1682,6 +2045,79 @@ static void txgbe_free_isb_resources(struct txgbe_adapter *adapter)
 	adapter->isb_mem = NULL;
 }
 
+/**
+ * txgbe_free_tx_resources - Free Tx Resources per Queue
+ * @tx_ring: Tx descriptor ring for a specific queue
+ *
+ * Free all transmit software resources
+ **/
+void txgbe_free_tx_resources(struct txgbe_ring *tx_ring)
+{
+	txgbe_clean_tx_ring(tx_ring);
+
+	vfree(tx_ring->tx_buffer_info);
+	tx_ring->tx_buffer_info = NULL;
+
+	/* if not set, then don't free */
+	if (!tx_ring->desc)
+		return;
+
+	dma_free_coherent(tx_ring->dev, tx_ring->size,
+			  tx_ring->desc, tx_ring->dma);
+	tx_ring->desc = NULL;
+}
+
+/**
+ * txgbe_free_all_tx_resources - Free Tx Resources for All Queues
+ * @adapter: board private structure
+ *
+ * Free all transmit software resources
+ **/
+static void txgbe_free_all_tx_resources(struct txgbe_adapter *adapter)
+{
+	int i;
+
+	for (i = 0; i < adapter->num_tx_queues; i++)
+		txgbe_free_tx_resources(adapter->tx_ring[i]);
+}
+
+/**
+ * txgbe_free_rx_resources - Free Rx Resources
+ * @rx_ring: ring to clean the resources from
+ *
+ * Free all receive software resources
+ **/
+void txgbe_free_rx_resources(struct txgbe_ring *rx_ring)
+{
+	txgbe_clean_rx_ring(rx_ring);
+
+	vfree(rx_ring->rx_buffer_info);
+	rx_ring->rx_buffer_info = NULL;
+
+	/* if not set, then don't free */
+	if (!rx_ring->desc)
+		return;
+
+	dma_free_coherent(rx_ring->dev, rx_ring->size,
+			  rx_ring->desc, rx_ring->dma);
+
+	rx_ring->desc = NULL;
+}
+
+/**
+ * txgbe_free_all_rx_resources - Free Rx Resources for All Queues
+ * @adapter: board private structure
+ *
+ * Free all receive software resources
+ **/
+static void txgbe_free_all_rx_resources(struct txgbe_adapter *adapter)
+{
+	int i;
+
+	for (i = 0; i < adapter->num_rx_queues; i++)
+		txgbe_free_rx_resources(adapter->rx_ring[i]);
+}
+
 /**
  * txgbe_open - Called when a network interface is made active
  * @netdev: network interface device structure
@@ -1701,10 +2137,20 @@ int txgbe_open(struct net_device *netdev)
 
 	netif_carrier_off(netdev);
 
-	err = txgbe_setup_isb_resources(adapter);
+	/* allocate transmit descriptors */
+	err = txgbe_setup_all_tx_resources(adapter);
 	if (err)
 		goto err_reset;
 
+	/* allocate receive descriptors */
+	err = txgbe_setup_all_rx_resources(adapter);
+	if (err)
+		goto err_free_tx;
+
+	err = txgbe_setup_isb_resources(adapter);
+	if (err)
+		goto err_free_rx;
+
 	txgbe_configure(adapter);
 
 	err = txgbe_request_irq(adapter);
@@ -1728,6 +2174,10 @@ int txgbe_open(struct net_device *netdev)
 	txgbe_free_irq(adapter);
 err_free_isb:
 	txgbe_free_isb_resources(adapter);
+err_free_rx:
+	txgbe_free_all_rx_resources(adapter);
+err_free_tx:
+	txgbe_free_all_tx_resources(adapter);
 err_reset:
 	txgbe_reset(adapter);
 
@@ -1748,9 +2198,14 @@ static void txgbe_close_suspend(struct txgbe_adapter *adapter)
 	txgbe_disable_device(adapter);
 	if (!((hw->subsystem_device_id & TXGBE_NCSI_MASK) == TXGBE_NCSI_SUP))
 		TCALL(hw, mac.ops.disable_tx_laser);
+	txgbe_clean_all_tx_rings(adapter);
+	txgbe_clean_all_rx_rings(adapter);
+
 	txgbe_free_irq(adapter);
 
 	txgbe_free_isb_resources(adapter);
+	txgbe_free_all_rx_resources(adapter);
+	txgbe_free_all_tx_resources(adapter);
 }
 
 /**
@@ -1772,6 +2227,8 @@ int txgbe_close(struct net_device *netdev)
 	txgbe_free_irq(adapter);
 
 	txgbe_free_isb_resources(adapter);
+	txgbe_free_all_rx_resources(adapter);
+	txgbe_free_all_tx_resources(adapter);
 
 	txgbe_release_hw_control(adapter);
 
@@ -1904,6 +2361,7 @@ static void txgbe_watchdog_link_is_up(struct txgbe_adapter *adapter)
 		   "NIC Link is Up %s\n", speed_str);
 
 	netif_carrier_on(netdev);
+	netif_tx_wake_all_queues(netdev);
 }
 
 /**
@@ -1924,6 +2382,41 @@ static void txgbe_watchdog_link_is_down(struct txgbe_adapter *adapter)
 
 	netif_info(adapter, drv, netdev, "NIC Link is Down\n");
 	netif_carrier_off(netdev);
+	netif_tx_stop_all_queues(netdev);
+}
+
+static bool txgbe_ring_tx_pending(struct txgbe_adapter *adapter)
+{
+	int i;
+
+	for (i = 0; i < adapter->num_tx_queues; i++) {
+		struct txgbe_ring *tx_ring = adapter->tx_ring[i];
+
+		if (tx_ring->next_to_use != tx_ring->next_to_clean)
+			return true;
+	}
+
+	return false;
+}
+
+/**
+ * txgbe_watchdog_flush_tx - flush queues on link down
+ * @adapter: pointer to the device adapter structure
+ **/
+static void txgbe_watchdog_flush_tx(struct txgbe_adapter *adapter)
+{
+	if (!netif_carrier_ok(adapter->netdev)) {
+		if (txgbe_ring_tx_pending(adapter)) {
+			/* We've lost link, so the controller stops DMA,
+			 * but we've got queued Tx work that's never going
+			 * to get done, so reset controller to flush Tx.
+			 * (Do the reset outside of interrupt context).
+			 */
+			netif_warn(adapter, drv, adapter->netdev,
+				   "initiating reset due to lost link with pending Tx work\n");
+			adapter->flags2 |= TXGBE_FLAG2_PF_RESET_REQUESTED;
+		}
+	}
 }
 
 /**
@@ -1944,6 +2437,8 @@ static void txgbe_watchdog_subtask(struct txgbe_adapter *adapter)
 		txgbe_watchdog_link_is_up(adapter);
 	else
 		txgbe_watchdog_link_is_down(adapter);
+
+	txgbe_watchdog_flush_tx(adapter);
 }
 
 /**
@@ -2453,6 +2948,7 @@ static int txgbe_probe(struct pci_dev *pdev,
 
 	/* carrier off reporting is important to ethtool even BEFORE open */
 	netif_carrier_off(netdev);
+	netif_tx_stop_all_queues(netdev);
 
 	/* calculate the expected PCIe bandwidth required for optimal
 	 * performance. Note that some older parts will never have enough
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h b/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h
index fc51f82b6087..a2a38fc842e8 100644
--- a/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h
@@ -955,6 +955,51 @@ enum {
 #define TXGBE_PCIDEVCTRL2_4_8s          0xd
 #define TXGBE_PCIDEVCTRL2_17_34s        0xe
 
+/* Transmit Descriptor */
+union txgbe_tx_desc {
+	struct {
+		__le64 buffer_addr; /* Address of descriptor's data buf */
+		__le32 cmd_type_len;
+		__le32 olinfo_status;
+	} read;
+	struct {
+		__le64 rsvd; /* Reserved */
+		__le32 nxtseq_seed;
+		__le32 status;
+	} wb;
+};
+
+/* Receive Descriptor */
+union txgbe_rx_desc {
+	struct {
+		__le64 pkt_addr; /* Packet buffer address */
+		__le64 hdr_addr; /* Header buffer address */
+	} read;
+	struct {
+		struct {
+			union {
+				__le32 data;
+				struct {
+					__le16 pkt_info; /* RSS, Pkt type */
+					__le16 hdr_info; /* Splithdr, hdrlen */
+				} hs_rss;
+			} lo_dword;
+			union {
+				__le32 rss; /* RSS Hash */
+				struct {
+					__le16 ip_id; /* IP id */
+					__le16 csum; /* Packet Checksum */
+				} csum_ip;
+			} hi_dword;
+		} lower;
+		struct {
+			__le32 status_error; /* ext status/error */
+			__le16 length; /* Packet length */
+			__le16 vlan; /* VLAN tag */
+		} upper;
+	} wb;  /* writeback */
+};
+
 /****************** Manageablility Host Interface defines ********************/
 #define TXGBE_HI_MAX_BLOCK_BYTE_LENGTH  256 /* Num of bytes in range */
 #define TXGBE_HI_MAX_BLOCK_DWORD_LENGTH 64 /* Num of dwords in range */
-- 
2.27.0


^ permalink raw reply related	[flat|nested] 18+ messages in thread

* [RFC PATCH net-next 12/16] net: txgbe: Add Rx and Tx cleanup routine
  2022-08-10  8:55 [RFC PATCH net-next 00/16] net: WangXun txgbe ethernet driver Jiawen Wu
                   ` (10 preceding siblings ...)
  2022-08-10  8:55 ` [RFC PATCH net-next 11/16] net: txgbe: Allocate Rx and Tx resources Jiawen Wu
@ 2022-08-10  8:55 ` Jiawen Wu
  2022-08-10  8:55 ` [RFC PATCH net-next 13/16] net: txgbe: Add device Rx features Jiawen Wu
                   ` (3 subsequent siblings)
  15 siblings, 0 replies; 18+ messages in thread
From: Jiawen Wu @ 2022-08-10  8:55 UTC (permalink / raw)
  To: netdev; +Cc: Jiawen Wu

Support to clean all queues associated with a q_vector, in NAPI polling.
Add to simple receive packets, without hardware features.

Signed-off-by: Jiawen Wu <jiawenwu@trustnetic.com>
---
 drivers/net/ethernet/wangxun/txgbe/txgbe.h    |  63 ++
 .../net/ethernet/wangxun/txgbe/txgbe_main.c   | 823 ++++++++++++++++++
 .../net/ethernet/wangxun/txgbe/txgbe_type.h   |  13 +
 3 files changed, 899 insertions(+)

diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe.h b/drivers/net/ethernet/wangxun/txgbe/txgbe.h
index d3db6f1aabc5..4d998ed33998 100644
--- a/drivers/net/ethernet/wangxun/txgbe/txgbe.h
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe.h
@@ -44,14 +44,28 @@
  */
 #define TXGBE_RX_HDR_SIZE       TXGBE_RXBUFFER_256
 
+/* How many Rx Buffers do we bundle into one write to the hardware ? */
+#define TXGBE_RX_BUFFER_WRITE   16      /* Must be power of 2 */
+#define TXGBE_RX_DMA_ATTR \
+	(DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING)
+
 #define TXGBE_MAX_RX_DESC_POLL          10
 
+#ifndef MAX_SKB_FRAGS
+#define DESC_NEEDED     4
+#elif (MAX_SKB_FRAGS < 16)
+#define DESC_NEEDED     ((MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE)) + 4)
+#else
+#define DESC_NEEDED     (MAX_SKB_FRAGS + 4)
+#endif
+
 /* wrapper around a pointer to a socket buffer,
  * so a DMA handle can be stored along with the buffer
  */
 struct txgbe_tx_buffer {
 	union txgbe_tx_desc *next_to_watch;
 	struct sk_buff *skb;
+	unsigned int bytecount;
 	DEFINE_DMA_UNMAP_ADDR(dma);
 	DEFINE_DMA_UNMAP_LEN(len);
 };
@@ -61,6 +75,22 @@ struct txgbe_rx_buffer {
 	dma_addr_t dma;
 	dma_addr_t page_dma;
 	struct page *page;
+	unsigned int page_offset;
+};
+
+struct txgbe_queue_stats {
+	u64 packets;
+	u64 bytes;
+};
+
+struct txgbe_tx_queue_stats {
+	u64 restart_queue;
+};
+
+struct txgbe_rx_queue_stats {
+	u64 non_eop_descs;
+	u64 alloc_rx_page_failed;
+	u64 alloc_rx_buff_failed;
 };
 
 struct txgbe_ring {
@@ -85,6 +115,13 @@ struct txgbe_ring {
 	u16 next_to_clean;
 	u16 rx_buf_len;
 	u16 next_to_alloc;
+	struct txgbe_queue_stats stats;
+	struct u64_stats_sync syncp;
+
+	union {
+		struct txgbe_tx_queue_stats tx_stats;
+		struct txgbe_rx_queue_stats rx_stats;
+	};
 } ____cacheline_internodealigned_in_smp;
 
 #define TXGBE_MAX_FDIR_INDICES          63
@@ -112,8 +149,11 @@ static inline unsigned int txgbe_rx_pg_order(struct txgbe_ring __maybe_unused *r
 
 struct txgbe_ring_container {
 	struct txgbe_ring *ring;        /* pointer to linked list of rings */
+	unsigned int total_bytes;       /* total bytes processed this int */
+	unsigned int total_packets;     /* total packets processed this int */
 	u16 work_limit;                 /* total work allowed per interrupt */
 	u8 count;                       /* total number of rings in vector */
+	u8 itr;                         /* current ITR setting for ring */
 };
 
 /* iterator for handling rings in ring container */
@@ -151,6 +191,27 @@ struct txgbe_q_vector {
 #define TXGBE_16K_ITR           248
 #define TXGBE_12K_ITR           336
 
+/* txgbe_test_staterr - tests bits in Rx descriptor status and error fields */
+static inline __le32 txgbe_test_staterr(union txgbe_rx_desc *rx_desc,
+					const u32 stat_err_bits)
+{
+	return rx_desc->wb.upper.status_error & cpu_to_le32(stat_err_bits);
+}
+
+/* txgbe_desc_unused - calculate if we have unused descriptors */
+static inline u16 txgbe_desc_unused(struct txgbe_ring *ring)
+{
+	u16 ntc = ring->next_to_clean;
+	u16 ntu = ring->next_to_use;
+
+	return ((ntc > ntu) ? 0 : ring->count) + ntc - ntu - 1;
+}
+
+#define TXGBE_RX_DESC(R, i)     \
+	(&(((union txgbe_rx_desc *)((R)->desc))[i]))
+#define TXGBE_TX_DESC(R, i)     \
+	(&(((union txgbe_tx_desc *)((R)->desc))[i]))
+
 #define TXGBE_MAX_JUMBO_FRAME_SIZE      9432 /* max payload 9414 */
 
 #define TCP_TIMER_VECTOR        0
@@ -233,6 +294,7 @@ struct txgbe_adapter {
 
 	/* RX */
 	struct txgbe_ring *rx_ring[TXGBE_MAX_RX_QUEUES];
+
 	struct txgbe_q_vector *q_vector[MAX_MSIX_Q_VECTORS];
 
 	int num_q_vectors;      /* current number of q_vectors for device */
@@ -329,6 +391,7 @@ void txgbe_set_interrupt_capability(struct txgbe_adapter *adapter);
 void txgbe_clear_interrupt_scheme(struct txgbe_adapter *adapter);
 void txgbe_unmap_and_free_tx_resource(struct txgbe_ring *ring,
 				      struct txgbe_tx_buffer *tx_buffer);
+void txgbe_alloc_rx_buffers(struct txgbe_ring *rx_ring, u16 cleaned_count);
 void txgbe_configure_port(struct txgbe_adapter *adapter);
 void txgbe_set_rx_mode(struct net_device *netdev);
 int txgbe_write_mc_addr_list(struct net_device *netdev);
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c
index c66ad524750b..e7ab9c2c093d 100644
--- a/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c
@@ -204,6 +204,653 @@ void txgbe_unmap_and_free_tx_resource(struct txgbe_ring *ring,
 	/* tx_buffer must be completely set up in the transmit path */
 }
 
+/**
+ * txgbe_clean_tx_irq - Reclaim resources after transmit completes
+ * @q_vector: structure containing interrupt and ring information
+ * @tx_ring: tx ring to clean
+ **/
+static bool txgbe_clean_tx_irq(struct txgbe_q_vector *q_vector,
+			       struct txgbe_ring *tx_ring)
+{
+	struct txgbe_adapter *adapter = q_vector->adapter;
+	unsigned int total_bytes = 0, total_packets = 0;
+	unsigned int budget = q_vector->tx.work_limit;
+	unsigned int i = tx_ring->next_to_clean;
+	struct txgbe_tx_buffer *tx_buffer;
+	union txgbe_tx_desc *tx_desc;
+
+	if (test_bit(__TXGBE_DOWN, &adapter->state))
+		return true;
+
+	tx_buffer = &tx_ring->tx_buffer_info[i];
+	tx_desc = TXGBE_TX_DESC(tx_ring, i);
+	i -= tx_ring->count;
+
+	do {
+		union txgbe_tx_desc *eop_desc = tx_buffer->next_to_watch;
+
+		/* if next_to_watch is not set then there is no work pending */
+		if (!eop_desc)
+			break;
+
+		/* prevent any other reads prior to eop_desc */
+		smp_rmb();
+
+		/* if DD is not set pending work has not been completed */
+		if (!(eop_desc->wb.status & cpu_to_le32(TXGBE_TXD_STAT_DD)))
+			break;
+
+		/* clear next_to_watch to prevent false hangs */
+		tx_buffer->next_to_watch = NULL;
+
+		/* update the statistics for this packet */
+		total_bytes += tx_buffer->bytecount;
+
+		/* free the skb */
+		dev_consume_skb_any(tx_buffer->skb);
+
+		/* unmap skb header data */
+		dma_unmap_single(tx_ring->dev,
+				 dma_unmap_addr(tx_buffer, dma),
+				 dma_unmap_len(tx_buffer, len),
+				 DMA_TO_DEVICE);
+
+		/* clear tx_buffer data */
+		tx_buffer->skb = NULL;
+		dma_unmap_len_set(tx_buffer, len, 0);
+
+		/* unmap remaining buffers */
+		while (tx_desc != eop_desc) {
+			tx_buffer++;
+			tx_desc++;
+			i++;
+			if (unlikely(!i)) {
+				i -= tx_ring->count;
+				tx_buffer = tx_ring->tx_buffer_info;
+				tx_desc = TXGBE_TX_DESC(tx_ring, 0);
+			}
+
+			/* unmap any remaining paged data */
+			if (dma_unmap_len(tx_buffer, len)) {
+				dma_unmap_page(tx_ring->dev,
+					       dma_unmap_addr(tx_buffer, dma),
+					       dma_unmap_len(tx_buffer, len),
+					       DMA_TO_DEVICE);
+				dma_unmap_len_set(tx_buffer, len, 0);
+			}
+		}
+
+		/* move us one more past the eop_desc for start of next pkt */
+		tx_buffer++;
+		tx_desc++;
+		i++;
+		if (unlikely(!i)) {
+			i -= tx_ring->count;
+			tx_buffer = tx_ring->tx_buffer_info;
+			tx_desc = TXGBE_TX_DESC(tx_ring, 0);
+		}
+
+		/* issue prefetch for next Tx descriptor */
+		prefetch(tx_desc);
+
+		/* update budget accounting */
+		budget--;
+	} while (likely(budget));
+
+	i += tx_ring->count;
+	tx_ring->next_to_clean = i;
+	u64_stats_update_begin(&tx_ring->syncp);
+	tx_ring->stats.bytes += total_bytes;
+	tx_ring->stats.packets += total_packets;
+	u64_stats_update_end(&tx_ring->syncp);
+	q_vector->tx.total_bytes += total_bytes;
+	q_vector->tx.total_packets += total_packets;
+
+	netdev_tx_completed_queue(txring_txq(tx_ring),
+				  total_packets, total_bytes);
+
+#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
+	if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) &&
+		     (txgbe_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD))) {
+		/* Make sure that anybody stopping the queue after this
+		 * sees the new next_to_clean.
+		 */
+		smp_mb();
+
+		if (__netif_subqueue_stopped(tx_ring->netdev,
+					     tx_ring->queue_index) &&
+		    !test_bit(__TXGBE_DOWN, &adapter->state)) {
+			netif_wake_subqueue(tx_ring->netdev,
+					    tx_ring->queue_index);
+			++tx_ring->tx_stats.restart_queue;
+		}
+	}
+
+	return !!budget;
+}
+
+static bool txgbe_alloc_mapped_page(struct txgbe_ring *rx_ring,
+				    struct txgbe_rx_buffer *bi)
+{
+	struct page *page = bi->page;
+	dma_addr_t dma;
+
+	/* since we are recycling buffers we should seldom need to alloc */
+	if (likely(page))
+		return true;
+
+	/* alloc new page for storage */
+	page = dev_alloc_pages(txgbe_rx_pg_order(rx_ring));
+	if (unlikely(!page)) {
+		rx_ring->rx_stats.alloc_rx_page_failed++;
+		return false;
+	}
+
+	/* map page for use */
+	dma = dma_map_page(rx_ring->dev, page, 0,
+			   txgbe_rx_pg_size(rx_ring), DMA_FROM_DEVICE);
+
+	/* if mapping failed free memory back to system since
+	 * there isn't much point in holding memory we can't use
+	 */
+	if (dma_mapping_error(rx_ring->dev, dma)) {
+		__free_pages(page, txgbe_rx_pg_order(rx_ring));
+
+		rx_ring->rx_stats.alloc_rx_page_failed++;
+		return false;
+	}
+
+	bi->page_dma = dma;
+	bi->page = page;
+	bi->page_offset = 0;
+
+	return true;
+}
+
+/**
+ * txgbe_alloc_rx_buffers - Replace used receive buffers
+ * @rx_ring: ring to place buffers on
+ * @cleaned_count: number of buffers to replace
+ **/
+void txgbe_alloc_rx_buffers(struct txgbe_ring *rx_ring, u16 cleaned_count)
+{
+	union txgbe_rx_desc *rx_desc;
+	struct txgbe_rx_buffer *bi;
+	u16 i = rx_ring->next_to_use;
+
+	/* nothing to do */
+	if (!cleaned_count)
+		return;
+
+	rx_desc = TXGBE_RX_DESC(rx_ring, i);
+	bi = &rx_ring->rx_buffer_info[i];
+	i -= rx_ring->count;
+
+	do {
+		if (!txgbe_alloc_mapped_page(rx_ring, bi))
+			break;
+		rx_desc->read.pkt_addr =
+			cpu_to_le64(bi->page_dma + bi->page_offset);
+
+		rx_desc++;
+		bi++;
+		i++;
+		if (unlikely(!i)) {
+			rx_desc = TXGBE_RX_DESC(rx_ring, 0);
+			bi = rx_ring->rx_buffer_info;
+			i -= rx_ring->count;
+		}
+
+		/* clear the status bits for the next_to_use descriptor */
+		rx_desc->wb.upper.status_error = 0;
+
+		cleaned_count--;
+	} while (cleaned_count);
+
+	i += rx_ring->count;
+
+	if (rx_ring->next_to_use != i) {
+		rx_ring->next_to_use = i;
+		/* update next to alloc since we have filled the ring */
+		rx_ring->next_to_alloc = i;
+
+		/* Force memory writes to complete before letting h/w
+		 * know there are new descriptors to fetch.  (Only
+		 * applicable for weak-ordered memory model archs,
+		 * such as IA-64).
+		 */
+		wmb();
+		writel(i, rx_ring->tail);
+	}
+}
+
+/**
+ * txgbe_process_skb_fields - Populate skb header fields from Rx descriptor
+ * @rx_ring: rx descriptor ring packet is being transacted on
+ * @rx_desc: pointer to the EOP Rx descriptor
+ * @skb: pointer to current skb being populated
+ *
+ * This function checks the ring, descriptor, and packet information in
+ * order to populate fields within the skb.
+ **/
+static void txgbe_process_skb_fields(struct txgbe_ring *rx_ring,
+				     union txgbe_rx_desc *rx_desc,
+				     struct sk_buff *skb)
+{
+	skb_record_rx_queue(skb, rx_ring->queue_index);
+
+	skb->protocol = eth_type_trans(skb, rx_ring->netdev);
+}
+
+static void txgbe_rx_skb(struct txgbe_q_vector *q_vector,
+			 struct sk_buff *skb)
+{
+	napi_gro_receive(&q_vector->napi, skb);
+}
+
+/**
+ * txgbe_is_non_eop - process handling of non-EOP buffers
+ * @rx_ring: Rx ring being processed
+ * @rx_desc: Rx descriptor for current buffer
+ * @skb: Current socket buffer containing buffer in progress
+ *
+ * This function updates next to clean. If the buffer is an EOP buffer
+ * this function exits returning false, otherwise it will place the
+ * sk_buff in the next buffer to be chained and return true indicating
+ * that this is in fact a non-EOP buffer.
+ **/
+static bool txgbe_is_non_eop(struct txgbe_ring *rx_ring,
+			     union txgbe_rx_desc *rx_desc,
+			     struct sk_buff *skb)
+{
+	u32 ntc = rx_ring->next_to_clean + 1;
+
+	/* fetch, update, and store next to clean */
+	ntc = (ntc < rx_ring->count) ? ntc : 0;
+	rx_ring->next_to_clean = ntc;
+
+	prefetch(TXGBE_RX_DESC(rx_ring, ntc));
+
+	/* if we are the last buffer then there is nothing else to do */
+	if (likely(txgbe_test_staterr(rx_desc, TXGBE_RXD_STAT_EOP)))
+		return false;
+
+	rx_ring->rx_buffer_info[ntc].skb = skb;
+	rx_ring->rx_stats.non_eop_descs++;
+
+	return true;
+}
+
+static void txgbe_pull_tail(struct sk_buff *skb)
+{
+	skb_frag_t *frag = &skb_shinfo(skb)->frags[0];
+	unsigned int pull_len;
+	unsigned char *va;
+
+	/* it is valid to use page_address instead of kmap since we are
+	 * working with pages allocated out of the lomem pool per
+	 * alloc_page(GFP_ATOMIC)
+	 */
+	va = skb_frag_address(frag);
+
+	/* we need the header to contain the greater of either ETH_HLEN or
+	 * 60 bytes if the skb->len is less than 60 for skb_pad.
+	 */
+	pull_len = eth_get_headlen(skb->dev, va, TXGBE_RX_HDR_SIZE);
+
+	/* align pull length to size of long to optimize memcpy performance */
+	skb_copy_to_linear_data(skb, va, ALIGN(pull_len, sizeof(long)));
+
+	/* update all of the pointers */
+	skb_frag_size_sub(frag, pull_len);
+	skb_frag_off_add(frag, pull_len);
+	skb->data_len -= pull_len;
+	skb->tail += pull_len;
+}
+
+/**
+ * txgbe_dma_sync_frag - perform DMA sync for first frag of SKB
+ * @rx_ring: rx descriptor ring packet is being transacted on
+ * @skb: pointer to current skb being updated
+ *
+ * This function provides a basic DMA sync up for the first fragment of an
+ * skb.  The reason for doing this is that the first fragment cannot be
+ * unmapped until we have reached the end of packet descriptor for a buffer
+ * chain.
+ */
+static void txgbe_dma_sync_frag(struct txgbe_ring *rx_ring,
+				struct sk_buff *skb)
+{
+	skb_frag_t *frag = &skb_shinfo(skb)->frags[0];
+
+	dma_sync_single_range_for_cpu(rx_ring->dev,
+				      TXGBE_CB(skb)->dma,
+				      skb_frag_off(frag),
+				      skb_frag_size(frag),
+				      DMA_FROM_DEVICE);
+
+	/* If the page was released, just unmap it. */
+	if (unlikely(TXGBE_CB(skb)->page_released)) {
+		dma_unmap_page_attrs(rx_ring->dev, TXGBE_CB(skb)->dma,
+				     txgbe_rx_pg_size(rx_ring),
+				     DMA_FROM_DEVICE,
+				     TXGBE_RX_DMA_ATTR);
+	}
+}
+
+/**
+ * txgbe_cleanup_headers - Correct corrupted or empty headers
+ * @rx_ring: rx descriptor ring packet is being transacted on
+ * @rx_desc: pointer to the EOP Rx descriptor
+ * @skb: pointer to current skb being fixed
+ *
+ * Check for corrupted packet headers caused by senders on the local L2
+ * embedded NIC switch not setting up their Tx Descriptors right.  These
+ * should be very rare.
+ *
+ * Also address the case where we are pulling data in on pages only
+ * and as such no data is present in the skb header.
+ *
+ * In addition if skb is not at least 60 bytes we need to pad it so that
+ * it is large enough to qualify as a valid Ethernet frame.
+ *
+ * Returns true if an error was encountered and skb was freed.
+ **/
+static bool txgbe_cleanup_headers(struct txgbe_ring *rx_ring,
+				  union txgbe_rx_desc *rx_desc,
+				  struct sk_buff *skb)
+{
+	struct net_device *netdev = rx_ring->netdev;
+
+	/* verify that the packet does not have any known errors */
+	if (unlikely(txgbe_test_staterr(rx_desc,
+					TXGBE_RXD_ERR_FRAME_ERR_MASK) &&
+		     !(netdev->features & NETIF_F_RXALL))) {
+		dev_kfree_skb_any(skb);
+		return true;
+	}
+
+	/* place header in linear portion of buffer */
+	if (skb_is_nonlinear(skb)  && !skb_headlen(skb))
+		txgbe_pull_tail(skb);
+
+	/* if eth_skb_pad returns an error the skb was freed */
+	if (eth_skb_pad(skb))
+		return true;
+
+	return false;
+}
+
+/**
+ * txgbe_reuse_rx_page - page flip buffer and store it back on the ring
+ * @rx_ring: rx descriptor ring to store buffers on
+ * @old_buff: donor buffer to have page reused
+ *
+ * Synchronizes page for reuse by the adapter
+ **/
+static void txgbe_reuse_rx_page(struct txgbe_ring *rx_ring,
+				struct txgbe_rx_buffer *old_buff)
+{
+	struct txgbe_rx_buffer *new_buff;
+	u16 nta = rx_ring->next_to_alloc;
+
+	new_buff = &rx_ring->rx_buffer_info[nta];
+
+	/* update, and store next to alloc */
+	nta++;
+	rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
+
+	/* transfer page from old buffer to new buffer */
+	new_buff->page_dma = old_buff->page_dma;
+	new_buff->page = old_buff->page;
+	new_buff->page_offset = old_buff->page_offset;
+
+	/* sync the buffer for use by the device */
+	dma_sync_single_range_for_device(rx_ring->dev, new_buff->page_dma,
+					 new_buff->page_offset,
+					 txgbe_rx_bufsz(rx_ring),
+					 DMA_FROM_DEVICE);
+}
+
+static inline bool txgbe_page_is_reserved(struct page *page)
+{
+	return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page);
+}
+
+/**
+ * txgbe_add_rx_frag - Add contents of Rx buffer to sk_buff
+ * @rx_ring: rx descriptor ring to transact packets on
+ * @rx_buffer: buffer containing page to add
+ * @rx_desc: descriptor containing length of buffer written by hardware
+ * @skb: sk_buff to place the data into
+ *
+ * This function will add the data contained in rx_buffer->page to the skb.
+ * This is done either through a direct copy if the data in the buffer is
+ * less than the skb header size, otherwise it will just attach the page as
+ * a frag to the skb.
+ *
+ * The function will then update the page offset if necessary and return
+ * true if the buffer can be reused by the adapter.
+ **/
+static bool txgbe_add_rx_frag(struct txgbe_ring *rx_ring,
+			      struct txgbe_rx_buffer *rx_buffer,
+			      union txgbe_rx_desc *rx_desc,
+			      struct sk_buff *skb)
+{
+	struct page *page = rx_buffer->page;
+	unsigned int size = le16_to_cpu(rx_desc->wb.upper.length);
+#if (PAGE_SIZE < 8192)
+	unsigned int truesize = txgbe_rx_bufsz(rx_ring);
+#else
+	unsigned int truesize = ALIGN(size, L1_CACHE_BYTES);
+	unsigned int last_offset = txgbe_rx_pg_size(rx_ring) -
+				   txgbe_rx_bufsz(rx_ring);
+#endif
+
+	if (size <= TXGBE_RX_HDR_SIZE && !skb_is_nonlinear(skb)) {
+		unsigned char *va = page_address(page) + rx_buffer->page_offset;
+
+		memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long)));
+
+		/* page is not reserved, we can reuse buffer as-is */
+		if (likely(!txgbe_page_is_reserved(page)))
+			return true;
+
+		/* this page cannot be reused so discard it */
+		__free_pages(page, txgbe_rx_pg_order(rx_ring));
+		return false;
+	}
+
+	skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
+			rx_buffer->page_offset, size, truesize);
+
+	/* avoid re-using remote pages */
+	if (unlikely(txgbe_page_is_reserved(page)))
+		return false;
+
+#if (PAGE_SIZE < 8192)
+	/* if we are only owner of page we can reuse it */
+	if (unlikely(page_count(page) != 1))
+		return false;
+
+	/* flip page offset to other buffer */
+	rx_buffer->page_offset ^= truesize;
+#else
+	/* move offset up to the next cache line */
+	rx_buffer->page_offset += truesize;
+
+	if (rx_buffer->page_offset > last_offset)
+		return false;
+#endif
+
+	/* Even if we own the page, we are not allowed to use atomic_set()
+	 * This would break get_page_unless_zero() users.
+	 */
+	page_ref_inc(page);
+
+	return true;
+}
+
+static struct sk_buff *txgbe_fetch_rx_buffer(struct txgbe_ring *rx_ring,
+					     union txgbe_rx_desc *rx_desc)
+{
+	struct txgbe_rx_buffer *rx_buffer;
+	struct sk_buff *skb;
+	struct page *page;
+
+	rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
+	page = rx_buffer->page;
+	prefetchw(page);
+
+	skb = rx_buffer->skb;
+
+	if (likely(!skb)) {
+		void *page_addr = page_address(page) +
+				  rx_buffer->page_offset;
+
+		/* prefetch first cache line of first page */
+		prefetch(page_addr);
+#if L1_CACHE_BYTES < 128
+		prefetch(page_addr + L1_CACHE_BYTES);
+#endif
+
+		/* allocate a skb to store the frags */
+		skb = netdev_alloc_skb_ip_align(rx_ring->netdev,
+						TXGBE_RX_HDR_SIZE);
+		if (unlikely(!skb)) {
+			rx_ring->rx_stats.alloc_rx_buff_failed++;
+			return NULL;
+		}
+
+		/* we will be copying header into skb->data in
+		 * pskb_may_pull so it is in our interest to prefetch
+		 * it now to avoid a possible cache miss
+		 */
+		prefetchw(skb->data);
+
+		/* Delay unmapping of the first packet. It carries the
+		 * header information, HW may still access the header
+		 * after the writeback.  Only unmap it when EOP is
+		 * reached
+		 */
+		if (likely(txgbe_test_staterr(rx_desc, TXGBE_RXD_STAT_EOP)))
+			goto dma_sync;
+
+		TXGBE_CB(skb)->dma = rx_buffer->page_dma;
+	} else {
+		if (txgbe_test_staterr(rx_desc, TXGBE_RXD_STAT_EOP))
+			txgbe_dma_sync_frag(rx_ring, skb);
+
+dma_sync:
+		/* we are reusing so sync this buffer for CPU use */
+		dma_sync_single_range_for_cpu(rx_ring->dev,
+					      rx_buffer->page_dma,
+					      rx_buffer->page_offset,
+					      txgbe_rx_bufsz(rx_ring),
+					      DMA_FROM_DEVICE);
+
+		rx_buffer->skb = NULL;
+	}
+
+	/* pull page into skb */
+	if (txgbe_add_rx_frag(rx_ring, rx_buffer, rx_desc, skb)) {
+		/* hand second half of page back to the ring */
+		txgbe_reuse_rx_page(rx_ring, rx_buffer);
+	} else if (TXGBE_CB(skb)->dma == rx_buffer->page_dma) {
+		/* the page has been released from the ring */
+		TXGBE_CB(skb)->page_released = true;
+	} else {
+		/* we are not reusing the buffer so unmap it */
+		dma_unmap_page(rx_ring->dev, rx_buffer->page_dma,
+			       txgbe_rx_pg_size(rx_ring),
+			       DMA_FROM_DEVICE);
+	}
+
+	/* clear contents of buffer_info */
+	rx_buffer->page = NULL;
+
+	return skb;
+}
+
+/**
+ * txgbe_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf
+ * @q_vector: structure containing interrupt and ring information
+ * @rx_ring: rx descriptor ring to transact packets on
+ * @budget: Total limit on number of packets to process
+ *
+ * This function provides a "bounce buffer" approach to Rx interrupt
+ * processing.  The advantage to this is that on systems that have
+ * expensive overhead for IOMMU access this provides a means of avoiding
+ * it by maintaining the mapping of the page to the system.
+ *
+ * Returns amount of work completed.
+ **/
+static int txgbe_clean_rx_irq(struct txgbe_q_vector *q_vector,
+			      struct txgbe_ring *rx_ring,
+			      int budget)
+{
+	unsigned int total_rx_bytes = 0, total_rx_packets = 0;
+	u16 cleaned_count = txgbe_desc_unused(rx_ring);
+
+	do {
+		union txgbe_rx_desc *rx_desc;
+		struct sk_buff *skb;
+
+		/* return some buffers to hardware, one at a time is too slow */
+		if (cleaned_count >= TXGBE_RX_BUFFER_WRITE) {
+			txgbe_alloc_rx_buffers(rx_ring, cleaned_count);
+			cleaned_count = 0;
+		}
+
+		rx_desc = TXGBE_RX_DESC(rx_ring, rx_ring->next_to_clean);
+
+		if (!txgbe_test_staterr(rx_desc, TXGBE_RXD_STAT_DD))
+			break;
+
+		/* This memory barrier is needed to keep us from reading
+		 * any other fields out of the rx_desc until we know the
+		 * descriptor has been written back
+		 */
+		dma_rmb();
+
+		/* retrieve a buffer from the ring */
+		skb = txgbe_fetch_rx_buffer(rx_ring, rx_desc);
+
+		/* exit if we failed to retrieve a buffer */
+		if (!skb)
+			break;
+
+		cleaned_count++;
+
+		/* place incomplete frames back on ring for completion */
+		if (txgbe_is_non_eop(rx_ring, rx_desc, skb))
+			continue;
+
+		/* verify the packet layout is correct */
+		if (txgbe_cleanup_headers(rx_ring, rx_desc, skb))
+			continue;
+
+		/* probably a little skewed due to removing CRC */
+		total_rx_bytes += skb->len;
+
+		txgbe_process_skb_fields(rx_ring, rx_desc, skb);
+
+		txgbe_rx_skb(q_vector, skb);
+
+		/* update budget accounting */
+		total_rx_packets++;
+	} while (likely(total_rx_packets < budget));
+
+	u64_stats_update_begin(&rx_ring->syncp);
+	rx_ring->stats.packets += total_rx_packets;
+	rx_ring->stats.bytes += total_rx_bytes;
+	u64_stats_update_end(&rx_ring->syncp);
+	q_vector->rx.total_packets += total_rx_packets;
+	q_vector->rx.total_bytes += total_rx_bytes;
+
+	return total_rx_packets;
+}
+
 /**
  * txgbe_configure_msix - Configure MSI-X hardware
  * @adapter: board private structure
@@ -239,6 +886,74 @@ static void txgbe_configure_msix(struct txgbe_adapter *adapter)
 	wr32(&adapter->hw, TXGBE_PX_ITR(v_idx), 1950);
 }
 
+enum latency_range {
+	lowest_latency = 0,
+	low_latency = 1,
+	bulk_latency = 2,
+	latency_invalid = 255
+};
+
+/**
+ * txgbe_update_itr - update the dynamic ITR value based on statistics
+ * @q_vector: structure containing interrupt and ring information
+ * @ring_container: structure containing ring performance data
+ *
+ * Stores a new ITR value based on packets and byte
+ * counts during the last interrupt.  The advantage of per interrupt
+ * computation is faster updates and more accurate ITR for the current
+ * traffic pattern.  Constants in this function were computed
+ * based on theoretical maximum wire speed and thresholds were set based
+ * on testing data as well as attempting to minimize response time
+ * while increasing bulk throughput.
+ **/
+static void txgbe_update_itr(struct txgbe_q_vector *q_vector,
+			     struct txgbe_ring_container *ring_container)
+{
+	int packets = ring_container->total_packets;
+	int bytes = ring_container->total_bytes;
+	u8 itr_setting = ring_container->itr;
+	u32 timepassed_us;
+	u64 bytes_perint;
+
+	if (packets == 0)
+		return;
+
+	/* simple throttlerate management
+	 *   0-10MB/s   lowest (100000 ints/s)
+	 *  10-20MB/s   low    (20000 ints/s)
+	 *  20-1249MB/s bulk   (12000 ints/s)
+	 */
+	/* what was last interrupt timeslice? */
+	timepassed_us = q_vector->itr >> 2;
+	if (timepassed_us == 0)
+		return;
+	bytes_perint = bytes / timepassed_us; /* bytes/usec */
+
+	switch (itr_setting) {
+	case lowest_latency:
+		if (bytes_perint > 10)
+			itr_setting = low_latency;
+		break;
+	case low_latency:
+		if (bytes_perint > 20)
+			itr_setting = bulk_latency;
+		else if (bytes_perint <= 10)
+			itr_setting = lowest_latency;
+		break;
+	case bulk_latency:
+		if (bytes_perint <= 20)
+			itr_setting = low_latency;
+		break;
+	}
+
+	/* clear work counters since we have the values we need */
+	ring_container->total_bytes = 0;
+	ring_container->total_packets = 0;
+
+	/* write updated itr to ring container */
+	ring_container->itr = itr_setting;
+}
+
 /**
  * txgbe_write_eitr - write EITR register in hardware specific way
  * @q_vector: structure containing interrupt and ring information
@@ -259,6 +974,43 @@ void txgbe_write_eitr(struct txgbe_q_vector *q_vector)
 	wr32(hw, TXGBE_PX_ITR(v_idx), itr_reg);
 }
 
+static void txgbe_set_itr(struct txgbe_q_vector *q_vector)
+{
+	u16 new_itr = q_vector->itr;
+	u8 current_itr;
+
+	txgbe_update_itr(q_vector, &q_vector->tx);
+	txgbe_update_itr(q_vector, &q_vector->rx);
+
+	current_itr = max(q_vector->rx.itr, q_vector->tx.itr);
+
+	switch (current_itr) {
+	/* counts and packets in update_itr are dependent on these numbers */
+	case lowest_latency:
+		new_itr = TXGBE_100K_ITR;
+		break;
+	case low_latency:
+		new_itr = TXGBE_20K_ITR;
+		break;
+	case bulk_latency:
+		new_itr = TXGBE_12K_ITR;
+		break;
+	default:
+		break;
+	}
+
+	if (new_itr != q_vector->itr) {
+		/* do an exponential smoothing */
+		new_itr = (10 * new_itr * q_vector->itr) /
+			  ((9 * new_itr) + q_vector->itr);
+
+		/* save the algorithm value here */
+		q_vector->itr = new_itr;
+
+		txgbe_write_eitr(q_vector);
+	}
+}
+
 /**
  * txgbe_check_overtemp_subtask - check for over temperature
  * @adapter: pointer to adapter
@@ -473,6 +1225,50 @@ static irqreturn_t txgbe_msix_clean_rings(int __always_unused irq, void *data)
  **/
 int txgbe_poll(struct napi_struct *napi, int budget)
 {
+	struct txgbe_q_vector *q_vector =
+			       container_of(napi, struct txgbe_q_vector, napi);
+	struct txgbe_adapter *adapter = q_vector->adapter;
+	struct txgbe_ring *ring;
+	int per_ring_budget;
+	bool clean_complete = true;
+
+	txgbe_for_each_ring(ring, q_vector->tx) {
+		if (!txgbe_clean_tx_irq(q_vector, ring))
+			clean_complete = false;
+	}
+
+	/* Exit if we are called by netpoll */
+	if (budget <= 0)
+		return budget;
+
+	/* attempt to distribute budget to each queue fairly, but don't allow
+	 * the budget to go below 1 because we'll exit polling
+	 */
+	if (q_vector->rx.count > 1)
+		per_ring_budget = max(budget / q_vector->rx.count, 1);
+	else
+		per_ring_budget = budget;
+
+	txgbe_for_each_ring(ring, q_vector->rx) {
+		int cleaned = txgbe_clean_rx_irq(q_vector, ring,
+						 per_ring_budget);
+
+		if (cleaned >= per_ring_budget)
+			clean_complete = false;
+	}
+
+	/* If all work not completed, return budget and keep polling */
+	if (!clean_complete)
+		return budget;
+
+	/* all work done, exit the polling mode */
+	napi_complete(napi);
+	if (adapter->rx_itr_setting == 1)
+		txgbe_set_itr(q_vector);
+	if (!test_bit(__TXGBE_DOWN, &adapter->state))
+		txgbe_intr_enable(&adapter->hw,
+				  TXGBE_INTR_Q(q_vector->v_idx));
+
 	return 0;
 }
 
@@ -933,6 +1729,7 @@ void txgbe_configure_rx_ring(struct txgbe_adapter *adapter,
 	      TXGBE_PX_RR_CFG_RR_EN, TXGBE_PX_RR_CFG_RR_EN);
 
 	txgbe_rx_desc_queue_enable(adapter, ring);
+	txgbe_alloc_rx_buffers(ring, txgbe_desc_unused(ring));
 }
 
 static void txgbe_setup_psrtype(struct txgbe_adapter *adapter)
@@ -2760,6 +3557,8 @@ static int txgbe_probe(struct pci_dev *pdev,
 	u16 eeprom_verh = 0, eeprom_verl = 0, offset = 0;
 	u16 eeprom_cfg_blkh = 0, eeprom_cfg_blkl = 0;
 	u16 build = 0, major = 0, patch = 0;
+	u16 ctl = 0;
+	char *info_string, *i_s_var;
 	u8 part_str[TXGBE_PBANUM_LENGTH];
 	u32 etrack_id = 0;
 
@@ -2786,6 +3585,13 @@ static int txgbe_probe(struct pci_dev *pdev,
 	pci_enable_pcie_error_reporting(pdev);
 	pci_set_master(pdev);
 
+	pcie_capability_read_word(pdev, PCI_EXP_DEVCTL, &ctl);
+	if (((ctl & PCI_EXP_DEVCTL_READRQ) != PCI_EXP_DEVCTL_READRQ_128B) &&
+	    ((ctl & PCI_EXP_DEVCTL_READRQ) != PCI_EXP_DEVCTL_READRQ_256B))
+		pcie_capability_clear_and_set_word(pdev, PCI_EXP_DEVCTL,
+						   PCI_EXP_DEVCTL_READRQ,
+						   PCI_EXP_DEVCTL_READRQ_256B);
+
 	netdev = devm_alloc_etherdev_mqs(&pdev->dev,
 					 sizeof(struct txgbe_adapter),
 					 TXGBE_MAX_TX_QUEUES,
@@ -2985,6 +3791,23 @@ static int txgbe_probe(struct pci_dev *pdev,
 		   netdev->dev_addr[2], netdev->dev_addr[3],
 		   netdev->dev_addr[4], netdev->dev_addr[5]);
 
+#define INFO_STRING_LEN 255
+	info_string = kzalloc(INFO_STRING_LEN, GFP_KERNEL);
+	if (!info_string) {
+		netif_err(adapter, probe, netdev,
+			  "allocation for info string failed\n");
+		goto no_info_string;
+	}
+	i_s_var = info_string;
+	i_s_var += sprintf(info_string, "Enabled Features: ");
+	i_s_var += sprintf(i_s_var, "RxQ: %d TxQ: %d ",
+			   adapter->num_rx_queues, adapter->num_tx_queues);
+
+	WARN_ON(i_s_var > (info_string + INFO_STRING_LEN));
+	/* end features printing */
+	netif_info(adapter, probe, netdev, "%s\n", info_string);
+	kfree(info_string);
+no_info_string:
 	/* firmware requires blank driver version */
 	TCALL(hw, mac.ops.set_fw_drv_ver, 0xFF, 0xFF, 0xFF, 0xFF);
 
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h b/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h
index a2a38fc842e8..5b823cd988ca 100644
--- a/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h
@@ -955,6 +955,19 @@ enum {
 #define TXGBE_PCIDEVCTRL2_4_8s          0xd
 #define TXGBE_PCIDEVCTRL2_17_34s        0xe
 
+/******************* Receive Descriptor bit definitions **********************/
+#define TXGBE_RXD_STAT_DD               0x00000001U /* Done */
+#define TXGBE_RXD_STAT_EOP              0x00000002U /* End of Packet */
+
+#define TXGBE_RXD_ERR_MASK              0xfff00000U /* RDESC.ERRORS mask */
+#define TXGBE_RXD_ERR_RXE               0x20000000U /* Any MAC Error */
+
+/* Masks to determine if packets should be dropped due to frame errors */
+#define TXGBE_RXD_ERR_FRAME_ERR_MASK    TXGBE_RXD_ERR_RXE
+
+/*********************** Transmit Descriptor Config Masks ****************/
+#define TXGBE_TXD_STAT_DD               0x00000001U /* Descriptor Done */
+
 /* Transmit Descriptor */
 union txgbe_tx_desc {
 	struct {
-- 
2.27.0


^ permalink raw reply related	[flat|nested] 18+ messages in thread

* [RFC PATCH net-next 13/16] net: txgbe: Add device Rx features
  2022-08-10  8:55 [RFC PATCH net-next 00/16] net: WangXun txgbe ethernet driver Jiawen Wu
                   ` (11 preceding siblings ...)
  2022-08-10  8:55 ` [RFC PATCH net-next 12/16] net: txgbe: Add Rx and Tx cleanup routine Jiawen Wu
@ 2022-08-10  8:55 ` Jiawen Wu
  2022-08-10  8:55 ` [RFC PATCH net-next 14/16] net: txgbe: Add transmit path to process packets Jiawen Wu
                   ` (2 subsequent siblings)
  15 siblings, 0 replies; 18+ messages in thread
From: Jiawen Wu @ 2022-08-10  8:55 UTC (permalink / raw)
  To: netdev; +Cc: Jiawen Wu

Support RSC/LRO, Rx checksum offload, VLAN, jumbo frame, VXLAN, etc.

Signed-off-by: Jiawen Wu <jiawenwu@trustnetic.com>
---
 .../device_drivers/ethernet/wangxun/txgbe.rst |  40 ++
 drivers/net/ethernet/wangxun/txgbe/txgbe.h    |  36 ++
 drivers/net/ethernet/wangxun/txgbe/txgbe_hw.c | 391 +++++++++++++-
 drivers/net/ethernet/wangxun/txgbe/txgbe_hw.h |  64 +++
 .../net/ethernet/wangxun/txgbe/txgbe_main.c   | 484 +++++++++++++++++-
 .../net/ethernet/wangxun/txgbe/txgbe_type.h   |  54 +-
 6 files changed, 1064 insertions(+), 5 deletions(-)

diff --git a/Documentation/networking/device_drivers/ethernet/wangxun/txgbe.rst b/Documentation/networking/device_drivers/ethernet/wangxun/txgbe.rst
index 3c7656057c69..b331c73c8393 100644
--- a/Documentation/networking/device_drivers/ethernet/wangxun/txgbe.rst
+++ b/Documentation/networking/device_drivers/ethernet/wangxun/txgbe.rst
@@ -12,6 +12,7 @@ Contents
 ========
 
 - Identifying Your Adapter
+- Additional Features and Configurations
 - Support
 
 
@@ -57,6 +58,45 @@ Laser turns off for SFP+ when ifconfig ethX down
 "ifconfig ethX up" turns on the laser.
 
 
+Additional Features and Configurations
+======================================
+
+Jumbo Frames
+------------
+Jumbo Frames support is enabled by changing the Maximum Transmission Unit
+(MTU) to a value larger than the default value of 1500.
+
+Use the ifconfig command to increase the MTU size. For example, enter the
+following where <x> is the interface number::
+
+  ifconfig eth<x> mtu 9000 up
+
+NOTES:
+- The maximum MTU setting for Jumbo Frames is 9710. This value coincides
+  with the maximum Jumbo Frames size of 9728 bytes.
+- This driver will attempt to use multiple page sized buffers to receive
+  each jumbo packet. This should help to avoid buffer starvation issues
+  when allocating receive packets.
+
+Hardware Receive Side Coalescing (HW RSC)
+-----------------------------------------
+Sapphire adapters support HW RSC, which can merge multiple
+frames from the same IPv4 TCP/IP flow into a single structure that can span
+one or more descriptors. It works similarly to Software Large Receive Offload
+technique.
+
+VXLAN Overlay HW Offloading
+---------------------------
+Virtual Extensible LAN (VXLAN) allows you to extend an L2 network over an L3
+network, which may be useful in a virtualized or cloud environment. WangXun(R)
+10Gb Ethernet Network devices perform VXLAN processing, offloading it from the
+operating system. This reduces CPU utilization.
+
+VXLAN offloading is controlled by the tx and rx checksum offload options
+provided by ethtool. That is, if tx checksum offload is enabled, and the adapter
+has the capability, VXLAN offloading is also enabled. If rx checksum offload is
+enabled, then the VXLAN packets rx checksum will be offloaded.
+
 Support
 =======
 If you got any problem, contact Wangxun support team via support@trustnetic.com
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe.h b/drivers/net/ethernet/wangxun/txgbe/txgbe.h
index 4d998ed33998..4e3dc9f20f74 100644
--- a/drivers/net/ethernet/wangxun/txgbe/txgbe.h
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe.h
@@ -6,6 +6,7 @@
 
 #include <net/ip.h>
 #include <linux/pci.h>
+#include <linux/if_vlan.h>
 #include <linux/etherdevice.h>
 #include <linux/timecounter.h>
 
@@ -88,21 +89,43 @@ struct txgbe_tx_queue_stats {
 };
 
 struct txgbe_rx_queue_stats {
+	u64 rsc_count;
+	u64 rsc_flush;
 	u64 non_eop_descs;
 	u64 alloc_rx_page_failed;
 	u64 alloc_rx_buff_failed;
+	u64 csum_good_cnt;
+	u64 csum_err;
 };
 
+enum txgbe_ring_state_t {
+	__TXGBE_RX_RSC_ENABLED,
+};
+
+struct txgbe_fwd_adapter {
+	unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
+	struct txgbe_adapter *adapter;
+};
+
+#define ring_is_rsc_enabled(ring) \
+	test_bit(__TXGBE_RX_RSC_ENABLED, &(ring)->state)
+#define set_ring_rsc_enabled(ring) \
+	set_bit(__TXGBE_RX_RSC_ENABLED, &(ring)->state)
+#define clear_ring_rsc_enabled(ring) \
+	clear_bit(__TXGBE_RX_RSC_ENABLED, &(ring)->state)
+
 struct txgbe_ring {
 	struct txgbe_ring *next;        /* pointer to next ring in q_vector */
 	struct txgbe_q_vector *q_vector; /* backpointer to host q_vector */
 	struct net_device *netdev;      /* netdev ring belongs to */
 	struct device *dev;             /* device for DMA mapping */
+	struct txgbe_fwd_adapter *accel;
 	void *desc;                     /* descriptor ring memory */
 	union {
 		struct txgbe_tx_buffer *tx_buffer_info;
 		struct txgbe_rx_buffer *rx_buffer_info;
 	};
+	unsigned long state;
 	u8 __iomem *tail;
 	dma_addr_t dma;                 /* phys. address of descriptor ring */
 	unsigned int size;              /* length in bytes */
@@ -186,6 +209,7 @@ struct txgbe_q_vector {
 /* microsecond values for various ITR rates shifted by 2 to fit itr register
  * with the first 3 bits reserved 0
  */
+#define TXGBE_MIN_RSC_ITR       24
 #define TXGBE_100K_ITR          40
 #define TXGBE_20K_ITR           200
 #define TXGBE_16K_ITR           248
@@ -247,6 +271,8 @@ struct txgbe_mac_addr {
 #define TXGBE_FLAG_NEED_LINK_CONFIG             BIT(1)
 #define TXGBE_FLAG_MSI_ENABLED                  BIT(2)
 #define TXGBE_FLAG_MSIX_ENABLED                 BIT(3)
+#define TXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE        BIT(4)
+#define TXGBE_FLAG_VXLAN_OFFLOAD_ENABLE         BIT(5)
 
 /**
  * txgbe_adapter.flag2
@@ -257,6 +283,8 @@ struct txgbe_mac_addr {
 #define TXGBE_FLAG2_PF_RESET_REQUESTED          BIT(3)
 #define TXGBE_FLAG2_RESET_INTR_RECEIVED         BIT(4)
 #define TXGBE_FLAG2_GLOBAL_RESET_REQUESTED      BIT(5)
+#define TXGBE_FLAG2_RSC_CAPABLE                 BIT(6)
+#define TXGBE_FLAG2_RSC_ENABLED                 BIT(7)
 
 enum txgbe_isb_idx {
 	TXGBE_ISB_HEADER,
@@ -269,6 +297,7 @@ enum txgbe_isb_idx {
 /* board specific private data structure */
 struct txgbe_adapter {
 	u8 __iomem *io_addr;    /* Mainly for iounmap use */
+	unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
 	/* OS defined structs */
 	struct net_device *netdev;
 	struct pci_dev *pdev;
@@ -392,13 +421,19 @@ void txgbe_clear_interrupt_scheme(struct txgbe_adapter *adapter);
 void txgbe_unmap_and_free_tx_resource(struct txgbe_ring *ring,
 				      struct txgbe_tx_buffer *tx_buffer);
 void txgbe_alloc_rx_buffers(struct txgbe_ring *rx_ring, u16 cleaned_count);
+void txgbe_configure_rscctl(struct txgbe_adapter *adapter,
+			    struct txgbe_ring *ring);
 void txgbe_configure_port(struct txgbe_adapter *adapter);
+void txgbe_clear_vxlan_port(struct txgbe_adapter *adapter);
 void txgbe_set_rx_mode(struct net_device *netdev);
 int txgbe_write_mc_addr_list(struct net_device *netdev);
+void txgbe_do_reset(struct net_device *netdev);
 void txgbe_write_eitr(struct txgbe_q_vector *q_vector);
 int txgbe_poll(struct napi_struct *napi, int budget);
 void txgbe_disable_rx_queue(struct txgbe_adapter *adapter,
 			    struct txgbe_ring *ring);
+void txgbe_vlan_strip_enable(struct txgbe_adapter *adapter);
+void txgbe_vlan_strip_disable(struct txgbe_adapter *adapter);
 
 static inline struct netdev_queue *txring_txq(const struct txgbe_ring *ring)
 {
@@ -409,6 +444,7 @@ int txgbe_write_uc_addr_list(struct net_device *netdev, int pool);
 int txgbe_add_mac_filter(struct txgbe_adapter *adapter, u8 *addr, u16 pool);
 int txgbe_del_mac_filter(struct txgbe_adapter *adapter, u8 *addr, u16 pool);
 int txgbe_available_rars(struct txgbe_adapter *adapter);
+void txgbe_vlan_mode(struct net_device *netdev, u32 features);
 
 void txgbe_set_rx_drop_en(struct txgbe_adapter *adapter);
 
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.c
index dd93b07cc87b..6cd7e1a1c751 100644
--- a/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.c
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.c
@@ -10,6 +10,7 @@
 #define TXGBE_SP_MAX_RX_QUEUES  128
 #define TXGBE_SP_RAR_ENTRIES    128
 #define TXGBE_SP_MC_TBL_SIZE    128
+#define TXGBE_SP_VFT_TBL_SIZE   128
 #define TXGBE_SP_RX_PB_SIZE     512
 
 static s32 txgbe_get_eeprom_semaphore(struct txgbe_hw *hw);
@@ -1031,6 +1032,82 @@ s32 txgbe_init_uta_tables(struct txgbe_hw *hw)
 	return 0;
 }
 
+/**
+ *  txgbe_set_vfta - Set VLAN filter table
+ *  @hw: pointer to hardware structure
+ *  @vlan: VLAN id to write to VLAN filter
+ *  @vind: VMDq output index that maps queue to VLAN id in VFVFB
+ *  @vlan_on: boolean flag to turn on/off VLAN in VFVF
+ *
+ *  Turn on/off specified VLAN in the VLAN filter table.
+ **/
+s32 txgbe_set_vfta(struct txgbe_hw *hw, u32 vlan, u32 vind,
+		   bool vlan_on)
+{
+	s32 regindex;
+	u32 bitindex;
+	u32 vfta;
+	u32 targetbit;
+	bool vfta_changed = false;
+
+	if (vlan > 4095)
+		return TXGBE_ERR_PARAM;
+
+	/* The VFTA is a bitstring made up of 128 32-bit registers
+	 * that enable the particular VLAN id, much like the MTA:
+	 *    bits[11-5]: which register
+	 *    bits[4-0]:  which bit in the register
+	 */
+	regindex = (vlan >> 5) & 0x7F;
+	bitindex = vlan & 0x1F;
+	targetbit = (1 << bitindex);
+	/* errata 5 */
+	vfta = hw->mac.vft_shadow[regindex];
+	if (vlan_on) {
+		if (!(vfta & targetbit)) {
+			vfta |= targetbit;
+			vfta_changed = true;
+		}
+	} else {
+		if ((vfta & targetbit)) {
+			vfta &= ~targetbit;
+			vfta_changed = true;
+		}
+	}
+
+	if (vfta_changed)
+		wr32(hw, TXGBE_PSR_VLAN_TBL(regindex), vfta);
+	/* errata 5 */
+	hw->mac.vft_shadow[regindex] = vfta;
+	return 0;
+}
+
+/**
+ *  txgbe_clear_vfta - Clear VLAN filter table
+ *  @hw: pointer to hardware structure
+ *
+ *  Clears the VLAN filer table, and the VMDq index associated with the filter
+ **/
+s32 txgbe_clear_vfta(struct txgbe_hw *hw)
+{
+	u32 offset;
+
+	for (offset = 0; offset < hw->mac.vft_size; offset++) {
+		wr32(hw, TXGBE_PSR_VLAN_TBL(offset), 0);
+		/* errata 5 */
+		hw->mac.vft_shadow[offset] = 0;
+	}
+
+	for (offset = 0; offset < TXGBE_PSR_VLAN_SWC_ENTRIES; offset++) {
+		wr32(hw, TXGBE_PSR_VLAN_SWC_IDX, offset);
+		wr32(hw, TXGBE_PSR_VLAN_SWC, 0);
+		wr32(hw, TXGBE_PSR_VLAN_SWC_VM_L, 0);
+		wr32(hw, TXGBE_PSR_VLAN_SWC_VM_H, 0);
+	}
+
+	return 0;
+}
+
 /**
  *  txgbe_get_wwn_prefix - Get alternative WWNN/WWPN prefix from the EEPROM
  *  @hw: pointer to hardware structure
@@ -1707,6 +1784,310 @@ int txgbe_check_flash_load(struct txgbe_hw *hw, u32 check_bit)
 	return err;
 }
 
+/* The txgbe_ptype_lookup is used to convert from the 8-bit ptype in the
+ * hardware to a bit-field that can be used by SW to more easily determine the
+ * packet type.
+ *
+ * Macros are used to shorten the table lines and make this table human
+ * readable.
+ *
+ * We store the PTYPE in the top byte of the bit field - this is just so that
+ * we can check that the table doesn't have a row missing, as the index into
+ * the table should be the PTYPE.
+ *
+ * Typical work flow:
+ *
+ * IF NOT txgbe_ptype_lookup[ptype].known
+ * THEN
+ *      Packet is unknown
+ * ELSE IF txgbe_ptype_lookup[ptype].mac == TXGBE_DEC_PTYPE_MAC_IP
+ *      Use the rest of the fields to look at the tunnels, inner protocols, etc
+ * ELSE
+ *      Use the enum txgbe_l2_ptypes to decode the packet type
+ * ENDIF
+ */
+
+/* macro to make the table lines short */
+#define TXGBE_PTT(ptype, mac, ip, etype, eip, proto, layer)\
+	{       ptype, \
+		1, \
+		/* mac     */ TXGBE_DEC_PTYPE_MAC_##mac, \
+		/* ip      */ TXGBE_DEC_PTYPE_IP_##ip, \
+		/* etype   */ TXGBE_DEC_PTYPE_ETYPE_##etype, \
+		/* eip     */ TXGBE_DEC_PTYPE_IP_##eip, \
+		/* proto   */ TXGBE_DEC_PTYPE_PROT_##proto, \
+		/* layer   */ TXGBE_DEC_PTYPE_LAYER_##layer }
+
+#define TXGBE_UKN(ptype) \
+		{ ptype, 0, 0, 0, 0, 0, 0, 0 }
+
+/* Lookup table mapping the HW PTYPE to the bit field for decoding */
+struct txgbe_dptype txgbe_ptype_lookup[256] = {
+	TXGBE_UKN(0x00),
+	TXGBE_UKN(0x01),
+	TXGBE_UKN(0x02),
+	TXGBE_UKN(0x03),
+	TXGBE_UKN(0x04),
+	TXGBE_UKN(0x05),
+	TXGBE_UKN(0x06),
+	TXGBE_UKN(0x07),
+	TXGBE_UKN(0x08),
+	TXGBE_UKN(0x09),
+	TXGBE_UKN(0x0A),
+	TXGBE_UKN(0x0B),
+	TXGBE_UKN(0x0C),
+	TXGBE_UKN(0x0D),
+	TXGBE_UKN(0x0E),
+	TXGBE_UKN(0x0F),
+
+	/* L2: mac */
+	TXGBE_UKN(0x10),
+	TXGBE_PTT(0x11, L2, NONE, NONE, NONE, NONE, PAY2),
+	TXGBE_PTT(0x12, L2, NONE, NONE, NONE, TS,   PAY2),
+	TXGBE_PTT(0x13, L2, NONE, NONE, NONE, NONE, PAY2),
+	TXGBE_PTT(0x14, L2, NONE, NONE, NONE, NONE, PAY2),
+	TXGBE_PTT(0x15, L2, NONE, NONE, NONE, NONE, NONE),
+	TXGBE_PTT(0x16, L2, NONE, NONE, NONE, NONE, PAY2),
+	TXGBE_PTT(0x17, L2, NONE, NONE, NONE, NONE, NONE),
+
+	/* L2: ethertype filter */
+	TXGBE_PTT(0x18, L2, NONE, NONE, NONE, NONE, NONE),
+	TXGBE_PTT(0x19, L2, NONE, NONE, NONE, NONE, NONE),
+	TXGBE_PTT(0x1A, L2, NONE, NONE, NONE, NONE, NONE),
+	TXGBE_PTT(0x1B, L2, NONE, NONE, NONE, NONE, NONE),
+	TXGBE_PTT(0x1C, L2, NONE, NONE, NONE, NONE, NONE),
+	TXGBE_PTT(0x1D, L2, NONE, NONE, NONE, NONE, NONE),
+	TXGBE_PTT(0x1E, L2, NONE, NONE, NONE, NONE, NONE),
+	TXGBE_PTT(0x1F, L2, NONE, NONE, NONE, NONE, NONE),
+
+	/* L3: ip non-tunnel */
+	TXGBE_UKN(0x20),
+	TXGBE_PTT(0x21, IP, FGV4, NONE, NONE, NONE, PAY3),
+	TXGBE_PTT(0x22, IP, IPV4, NONE, NONE, NONE, PAY3),
+	TXGBE_PTT(0x23, IP, IPV4, NONE, NONE, UDP,  PAY4),
+	TXGBE_PTT(0x24, IP, IPV4, NONE, NONE, TCP,  PAY4),
+	TXGBE_PTT(0x25, IP, IPV4, NONE, NONE, SCTP, PAY4),
+	TXGBE_UKN(0x26),
+	TXGBE_UKN(0x27),
+	TXGBE_UKN(0x28),
+	TXGBE_PTT(0x29, IP, FGV6, NONE, NONE, NONE, PAY3),
+	TXGBE_PTT(0x2A, IP, IPV6, NONE, NONE, NONE, PAY3),
+	TXGBE_PTT(0x2B, IP, IPV6, NONE, NONE, UDP,  PAY3),
+	TXGBE_PTT(0x2C, IP, IPV6, NONE, NONE, TCP,  PAY4),
+	TXGBE_PTT(0x2D, IP, IPV6, NONE, NONE, SCTP, PAY4),
+	TXGBE_UKN(0x2E),
+	TXGBE_UKN(0x2F),
+
+	TXGBE_UKN(0x40),
+	TXGBE_UKN(0x41),
+	TXGBE_UKN(0x42),
+	TXGBE_UKN(0x43),
+	TXGBE_UKN(0x44),
+	TXGBE_UKN(0x45),
+	TXGBE_UKN(0x46),
+	TXGBE_UKN(0x47),
+	TXGBE_UKN(0x48),
+	TXGBE_UKN(0x49),
+	TXGBE_UKN(0x4A),
+	TXGBE_UKN(0x4B),
+	TXGBE_UKN(0x4C),
+	TXGBE_UKN(0x4D),
+	TXGBE_UKN(0x4E),
+	TXGBE_UKN(0x4F),
+	TXGBE_UKN(0x50),
+	TXGBE_UKN(0x51),
+	TXGBE_UKN(0x52),
+	TXGBE_UKN(0x53),
+	TXGBE_UKN(0x54),
+	TXGBE_UKN(0x55),
+	TXGBE_UKN(0x56),
+	TXGBE_UKN(0x57),
+	TXGBE_UKN(0x58),
+	TXGBE_UKN(0x59),
+	TXGBE_UKN(0x5A),
+	TXGBE_UKN(0x5B),
+	TXGBE_UKN(0x5C),
+	TXGBE_UKN(0x5D),
+	TXGBE_UKN(0x5E),
+	TXGBE_UKN(0x5F),
+	TXGBE_UKN(0x60),
+	TXGBE_UKN(0x61),
+	TXGBE_UKN(0x62),
+	TXGBE_UKN(0x63),
+	TXGBE_UKN(0x64),
+	TXGBE_UKN(0x65),
+	TXGBE_UKN(0x66),
+	TXGBE_UKN(0x67),
+	TXGBE_UKN(0x68),
+	TXGBE_UKN(0x69),
+	TXGBE_UKN(0x6A),
+	TXGBE_UKN(0x6B),
+	TXGBE_UKN(0x6C),
+	TXGBE_UKN(0x6D),
+	TXGBE_UKN(0x6E),
+	TXGBE_UKN(0x6F),
+	TXGBE_UKN(0x70),
+	TXGBE_UKN(0x71),
+	TXGBE_UKN(0x72),
+	TXGBE_UKN(0x73),
+	TXGBE_UKN(0x74),
+	TXGBE_UKN(0x75),
+	TXGBE_UKN(0x76),
+	TXGBE_UKN(0x77),
+	TXGBE_UKN(0x78),
+	TXGBE_UKN(0x79),
+	TXGBE_UKN(0x7A),
+	TXGBE_UKN(0x7B),
+	TXGBE_UKN(0x7C),
+	TXGBE_UKN(0x7D),
+	TXGBE_UKN(0x7E),
+	TXGBE_UKN(0x7F),
+
+	/* IPv4 --> IPv4/IPv6 */
+	TXGBE_UKN(0x80),
+	TXGBE_PTT(0x81, IP, IPV4, IPIP, FGV4, NONE, PAY3),
+	TXGBE_PTT(0x82, IP, IPV4, IPIP, IPV4, NONE, PAY3),
+	TXGBE_PTT(0x83, IP, IPV4, IPIP, IPV4, UDP,  PAY4),
+	TXGBE_PTT(0x84, IP, IPV4, IPIP, IPV4, TCP,  PAY4),
+	TXGBE_PTT(0x85, IP, IPV4, IPIP, IPV4, SCTP, PAY4),
+	TXGBE_UKN(0x86),
+	TXGBE_UKN(0x87),
+	TXGBE_UKN(0x88),
+	TXGBE_PTT(0x89, IP, IPV4, IPIP, FGV6, NONE, PAY3),
+	TXGBE_PTT(0x8A, IP, IPV4, IPIP, IPV6, NONE, PAY3),
+	TXGBE_PTT(0x8B, IP, IPV4, IPIP, IPV6, UDP,  PAY4),
+	TXGBE_PTT(0x8C, IP, IPV4, IPIP, IPV6, TCP,  PAY4),
+	TXGBE_PTT(0x8D, IP, IPV4, IPIP, IPV6, SCTP, PAY4),
+	TXGBE_UKN(0x8E),
+	TXGBE_UKN(0x8F),
+
+	/* IPv4 --> GRE/NAT --> NONE/IPv4/IPv6 */
+	TXGBE_PTT(0x90, IP, IPV4, IG, NONE, NONE, PAY3),
+	TXGBE_PTT(0x91, IP, IPV4, IG, FGV4, NONE, PAY3),
+	TXGBE_PTT(0x92, IP, IPV4, IG, IPV4, NONE, PAY3),
+	TXGBE_PTT(0x93, IP, IPV4, IG, IPV4, UDP,  PAY4),
+	TXGBE_PTT(0x94, IP, IPV4, IG, IPV4, TCP,  PAY4),
+	TXGBE_PTT(0x95, IP, IPV4, IG, IPV4, SCTP, PAY4),
+	TXGBE_UKN(0x96),
+	TXGBE_UKN(0x97),
+	TXGBE_UKN(0x98),
+	TXGBE_PTT(0x99, IP, IPV4, IG, FGV6, NONE, PAY3),
+	TXGBE_PTT(0x9A, IP, IPV4, IG, IPV6, NONE, PAY3),
+	TXGBE_PTT(0x9B, IP, IPV4, IG, IPV6, UDP,  PAY4),
+	TXGBE_PTT(0x9C, IP, IPV4, IG, IPV6, TCP,  PAY4),
+	TXGBE_PTT(0x9D, IP, IPV4, IG, IPV6, SCTP, PAY4),
+	TXGBE_UKN(0x9E),
+	TXGBE_UKN(0x9F),
+
+	/* IPv4 --> GRE/NAT --> MAC --> NONE/IPv4/IPv6 */
+	TXGBE_PTT(0xA0, IP, IPV4, IGM, NONE, NONE, PAY3),
+	TXGBE_PTT(0xA1, IP, IPV4, IGM, FGV4, NONE, PAY3),
+	TXGBE_PTT(0xA2, IP, IPV4, IGM, IPV4, NONE, PAY3),
+	TXGBE_PTT(0xA3, IP, IPV4, IGM, IPV4, UDP,  PAY4),
+	TXGBE_PTT(0xA4, IP, IPV4, IGM, IPV4, TCP,  PAY4),
+	TXGBE_PTT(0xA5, IP, IPV4, IGM, IPV4, SCTP, PAY4),
+	TXGBE_UKN(0xA6),
+	TXGBE_UKN(0xA7),
+	TXGBE_UKN(0xA8),
+	TXGBE_PTT(0xA9, IP, IPV4, IGM, FGV6, NONE, PAY3),
+	TXGBE_PTT(0xAA, IP, IPV4, IGM, IPV6, NONE, PAY3),
+	TXGBE_PTT(0xAB, IP, IPV4, IGM, IPV6, UDP,  PAY4),
+	TXGBE_PTT(0xAC, IP, IPV4, IGM, IPV6, TCP,  PAY4),
+	TXGBE_PTT(0xAD, IP, IPV4, IGM, IPV6, SCTP, PAY4),
+	TXGBE_UKN(0xAE),
+	TXGBE_UKN(0xAF),
+
+	/* IPv4 --> GRE/NAT --> MAC+VLAN --> NONE/IPv4/IPv6 */
+	TXGBE_PTT(0xB0, IP, IPV4, IGMV, NONE, NONE, PAY3),
+	TXGBE_PTT(0xB1, IP, IPV4, IGMV, FGV4, NONE, PAY3),
+	TXGBE_PTT(0xB2, IP, IPV4, IGMV, IPV4, NONE, PAY3),
+	TXGBE_PTT(0xB3, IP, IPV4, IGMV, IPV4, UDP,  PAY4),
+	TXGBE_PTT(0xB4, IP, IPV4, IGMV, IPV4, TCP,  PAY4),
+	TXGBE_PTT(0xB5, IP, IPV4, IGMV, IPV4, SCTP, PAY4),
+	TXGBE_UKN(0xB6),
+	TXGBE_UKN(0xB7),
+	TXGBE_UKN(0xB8),
+	TXGBE_PTT(0xB9, IP, IPV4, IGMV, FGV6, NONE, PAY3),
+	TXGBE_PTT(0xBA, IP, IPV4, IGMV, IPV6, NONE, PAY3),
+	TXGBE_PTT(0xBB, IP, IPV4, IGMV, IPV6, UDP,  PAY4),
+	TXGBE_PTT(0xBC, IP, IPV4, IGMV, IPV6, TCP,  PAY4),
+	TXGBE_PTT(0xBD, IP, IPV4, IGMV, IPV6, SCTP, PAY4),
+	TXGBE_UKN(0xBE),
+	TXGBE_UKN(0xBF),
+
+	/* IPv6 --> IPv4/IPv6 */
+	TXGBE_UKN(0xC0),
+	TXGBE_PTT(0xC1, IP, IPV6, IPIP, FGV4, NONE, PAY3),
+	TXGBE_PTT(0xC2, IP, IPV6, IPIP, IPV4, NONE, PAY3),
+	TXGBE_PTT(0xC3, IP, IPV6, IPIP, IPV4, UDP,  PAY4),
+	TXGBE_PTT(0xC4, IP, IPV6, IPIP, IPV4, TCP,  PAY4),
+	TXGBE_PTT(0xC5, IP, IPV6, IPIP, IPV4, SCTP, PAY4),
+	TXGBE_UKN(0xC6),
+	TXGBE_UKN(0xC7),
+	TXGBE_UKN(0xC8),
+	TXGBE_PTT(0xC9, IP, IPV6, IPIP, FGV6, NONE, PAY3),
+	TXGBE_PTT(0xCA, IP, IPV6, IPIP, IPV6, NONE, PAY3),
+	TXGBE_PTT(0xCB, IP, IPV6, IPIP, IPV6, UDP,  PAY4),
+	TXGBE_PTT(0xCC, IP, IPV6, IPIP, IPV6, TCP,  PAY4),
+	TXGBE_PTT(0xCD, IP, IPV6, IPIP, IPV6, SCTP, PAY4),
+	TXGBE_UKN(0xCE),
+	TXGBE_UKN(0xCF),
+
+	/* IPv6 --> GRE/NAT -> NONE/IPv4/IPv6 */
+	TXGBE_PTT(0xD0, IP, IPV6, IG,   NONE, NONE, PAY3),
+	TXGBE_PTT(0xD1, IP, IPV6, IG,   FGV4, NONE, PAY3),
+	TXGBE_PTT(0xD2, IP, IPV6, IG,   IPV4, NONE, PAY3),
+	TXGBE_PTT(0xD3, IP, IPV6, IG,   IPV4, UDP,  PAY4),
+	TXGBE_PTT(0xD4, IP, IPV6, IG,   IPV4, TCP,  PAY4),
+	TXGBE_PTT(0xD5, IP, IPV6, IG,   IPV4, SCTP, PAY4),
+	TXGBE_UKN(0xD6),
+	TXGBE_UKN(0xD7),
+	TXGBE_UKN(0xD8),
+	TXGBE_PTT(0xD9, IP, IPV6, IG,   FGV6, NONE, PAY3),
+	TXGBE_PTT(0xDA, IP, IPV6, IG,   IPV6, NONE, PAY3),
+	TXGBE_PTT(0xDB, IP, IPV6, IG,   IPV6, UDP,  PAY4),
+	TXGBE_PTT(0xDC, IP, IPV6, IG,   IPV6, TCP,  PAY4),
+	TXGBE_PTT(0xDD, IP, IPV6, IG,   IPV6, SCTP, PAY4),
+	TXGBE_UKN(0xDE),
+	TXGBE_UKN(0xDF),
+
+	/* IPv6 --> GRE/NAT -> MAC -> NONE/IPv4/IPv6 */
+	TXGBE_PTT(0xE0, IP, IPV6, IGM,  NONE, NONE, PAY3),
+	TXGBE_PTT(0xE1, IP, IPV6, IGM,  FGV4, NONE, PAY3),
+	TXGBE_PTT(0xE2, IP, IPV6, IGM,  IPV4, NONE, PAY3),
+	TXGBE_PTT(0xE3, IP, IPV6, IGM,  IPV4, UDP,  PAY4),
+	TXGBE_PTT(0xE4, IP, IPV6, IGM,  IPV4, TCP,  PAY4),
+	TXGBE_PTT(0xE5, IP, IPV6, IGM,  IPV4, SCTP, PAY4),
+	TXGBE_UKN(0xE6),
+	TXGBE_UKN(0xE7),
+	TXGBE_UKN(0xE8),
+	TXGBE_PTT(0xE9, IP, IPV6, IGM,  FGV6, NONE, PAY3),
+	TXGBE_PTT(0xEA, IP, IPV6, IGM,  IPV6, NONE, PAY3),
+	TXGBE_PTT(0xEB, IP, IPV6, IGM,  IPV6, UDP,  PAY4),
+	TXGBE_PTT(0xEC, IP, IPV6, IGM,  IPV6, TCP,  PAY4),
+	TXGBE_PTT(0xED, IP, IPV6, IGM,  IPV6, SCTP, PAY4),
+	TXGBE_UKN(0xEE),
+	TXGBE_UKN(0xEF),
+
+	/* IPv6 --> GRE/NAT -> MAC--> NONE/IPv */
+	TXGBE_PTT(0xF0, IP, IPV6, IGMV, NONE, NONE, PAY3),
+	TXGBE_PTT(0xF1, IP, IPV6, IGMV, FGV4, NONE, PAY3),
+	TXGBE_PTT(0xF2, IP, IPV6, IGMV, IPV4, NONE, PAY3),
+	TXGBE_PTT(0xF3, IP, IPV6, IGMV, IPV4, UDP,  PAY4),
+	TXGBE_PTT(0xF4, IP, IPV6, IGMV, IPV4, TCP,  PAY4),
+	TXGBE_PTT(0xF5, IP, IPV6, IGMV, IPV4, SCTP, PAY4),
+	TXGBE_UKN(0xF6),
+	TXGBE_UKN(0xF7),
+	TXGBE_UKN(0xF8),
+	TXGBE_PTT(0xF9, IP, IPV6, IGMV, FGV6, NONE, PAY3),
+	TXGBE_PTT(0xFA, IP, IPV6, IGMV, IPV6, NONE, PAY3),
+	TXGBE_PTT(0xFB, IP, IPV6, IGMV, IPV6, UDP,  PAY4),
+	TXGBE_PTT(0xFC, IP, IPV6, IGMV, IPV6, TCP,  PAY4),
+	TXGBE_PTT(0xFD, IP, IPV6, IGMV, IPV6, SCTP, PAY4),
+	TXGBE_UKN(0xFE),
+	TXGBE_UKN(0xFF),
+};
+
 void txgbe_init_mac_link_ops(struct txgbe_hw *hw)
 {
 	struct txgbe_mac_info *mac = &hw->mac;
@@ -1799,7 +2180,7 @@ s32 txgbe_init_ops(struct txgbe_hw *hw)
 	mac->ops.get_wwn_prefix = txgbe_get_wwn_prefix;
 	mac->ops.setup_rxpba = txgbe_set_rxpba;
 
-	/* RAR, Multicast */
+	/* RAR, Multicast, VLAN */
 	mac->ops.set_rar = txgbe_set_rar;
 	mac->ops.clear_rar = txgbe_clear_rar;
 	mac->ops.init_rx_addrs = txgbe_init_rx_addrs;
@@ -1808,6 +2189,8 @@ s32 txgbe_init_ops(struct txgbe_hw *hw)
 	mac->ops.enable_rx = txgbe_enable_rx;
 	mac->ops.disable_rx = txgbe_disable_rx;
 	mac->ops.set_vmdq_san_mac = txgbe_set_vmdq_san_mac;
+	mac->ops.set_vfta = txgbe_set_vfta;
+	mac->ops.clear_vfta = txgbe_clear_vfta;
 	mac->ops.init_uta_tables = txgbe_init_uta_tables;
 
 	/* Link */
@@ -1815,6 +2198,7 @@ s32 txgbe_init_ops(struct txgbe_hw *hw)
 	mac->ops.check_link = txgbe_check_mac_link;
 
 	mac->mcft_size          = TXGBE_SP_MC_TBL_SIZE;
+	mac->vft_size           = TXGBE_SP_VFT_TBL_SIZE;
 	mac->num_rar_entries    = TXGBE_SP_RAR_ENTRIES;
 	mac->rx_pb_size         = TXGBE_SP_RX_PB_SIZE;
 	mac->max_rx_queues      = TXGBE_SP_MAX_RX_QUEUES;
@@ -3168,6 +3552,11 @@ s32 txgbe_start_hw(struct txgbe_hw *hw)
 	/* Set the media type */
 	hw->phy.media_type = TCALL(hw, mac.ops.get_media_type);
 
+	/* Clear the VLAN filter table */
+	TCALL(hw, mac.ops.clear_vfta);
+
+	TXGBE_WRITE_FLUSH(hw);
+
 	/* Clear the rate limiters */
 	for (i = 0; i < hw->mac.max_tx_queues; i++) {
 		wr32(hw, TXGBE_TDM_RP_IDX, i);
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.h b/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.h
index 7377788fdaa2..9c3ab32ae608 100644
--- a/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.h
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.h
@@ -16,6 +16,67 @@
 #define SPI_H_DAT_REG_ADDR           0x10108  /* SPI Data register address */
 #define SPI_H_STA_REG_ADDR           0x1010c  /* SPI Status register address */
 
+/**
+ * Packet Type decoding
+ **/
+/* txgbe_dptype.mac: outer mac */
+enum txgbe_dec_ptype_mac {
+	TXGBE_DEC_PTYPE_MAC_IP = 0,
+	TXGBE_DEC_PTYPE_MAC_L2 = 2,
+};
+
+/* txgbe_dptype.[e]ip: outer&encaped ip */
+#define TXGBE_DEC_PTYPE_IP_FRAG (0x4)
+enum txgbe_dec_ptype_ip {
+	TXGBE_DEC_PTYPE_IP_NONE = 0,
+	TXGBE_DEC_PTYPE_IP_IPV4 = 1,
+	TXGBE_DEC_PTYPE_IP_IPV6 = 2,
+	TXGBE_DEC_PTYPE_IP_FGV4 =
+		(TXGBE_DEC_PTYPE_IP_FRAG | TXGBE_DEC_PTYPE_IP_IPV4),
+	TXGBE_DEC_PTYPE_IP_FGV6 =
+		(TXGBE_DEC_PTYPE_IP_FRAG | TXGBE_DEC_PTYPE_IP_IPV6),
+};
+
+/* txgbe_dptype.etype: encaped type */
+enum txgbe_dec_ptype_etype {
+	TXGBE_DEC_PTYPE_ETYPE_NONE = 0,
+	TXGBE_DEC_PTYPE_ETYPE_IPIP = 1, /* IP+IP */
+	TXGBE_DEC_PTYPE_ETYPE_IG = 2, /* IP+GRE */
+	TXGBE_DEC_PTYPE_ETYPE_IGM = 3, /* IP+GRE+MAC */
+	TXGBE_DEC_PTYPE_ETYPE_IGMV = 4, /* IP+GRE+MAC+VLAN */
+};
+
+/* txgbe_dptype.proto: payload proto */
+enum txgbe_dec_ptype_prot {
+	TXGBE_DEC_PTYPE_PROT_NONE = 0,
+	TXGBE_DEC_PTYPE_PROT_UDP = 1,
+	TXGBE_DEC_PTYPE_PROT_TCP = 2,
+	TXGBE_DEC_PTYPE_PROT_SCTP = 3,
+	TXGBE_DEC_PTYPE_PROT_ICMP = 4,
+	TXGBE_DEC_PTYPE_PROT_TS = 5, /* time sync */
+};
+
+/* txgbe_dptype.layer: payload layer */
+enum txgbe_dec_ptype_layer {
+	TXGBE_DEC_PTYPE_LAYER_NONE = 0,
+	TXGBE_DEC_PTYPE_LAYER_PAY2 = 1,
+	TXGBE_DEC_PTYPE_LAYER_PAY3 = 2,
+	TXGBE_DEC_PTYPE_LAYER_PAY4 = 3,
+};
+
+struct txgbe_dptype {
+	u32 ptype:8;
+	u32 known:1;
+	u32 mac:2; /* outer mac */
+	u32 ip:3; /* outer ip*/
+	u32 etype:3; /* encaped type */
+	u32 eip:3; /* encaped ip */
+	u32 prot:4; /* payload proto */
+	u32 layer:3; /* payload layer */
+};
+
+extern struct txgbe_dptype txgbe_ptype_lookup[256];
+
 u16 txgbe_get_pcie_msix_count(struct txgbe_hw *hw);
 s32 txgbe_init_hw(struct txgbe_hw *hw);
 s32 txgbe_start_hw(struct txgbe_hw *hw);
@@ -46,6 +107,9 @@ s32 txgbe_get_san_mac_addr(struct txgbe_hw *hw, u8 *san_mac_addr);
 s32 txgbe_set_vmdq_san_mac(struct txgbe_hw *hw, u32 vmdq);
 s32 txgbe_clear_vmdq(struct txgbe_hw *hw, u32 rar, u32 vmdq);
 s32 txgbe_init_uta_tables(struct txgbe_hw *hw);
+s32 txgbe_set_vfta(struct txgbe_hw *hw, u32 vlan,
+		   u32 vind, bool vlan_on);
+s32 txgbe_clear_vfta(struct txgbe_hw *hw);
 
 s32 txgbe_get_wwn_prefix(struct txgbe_hw *hw, u16 *wwnn_prefix,
 			 u16 *wwpn_prefix);
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c
index e7ab9c2c093d..ca7e99e7da97 100644
--- a/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c
@@ -7,6 +7,9 @@
 #include <linux/netdevice.h>
 #include <linux/string.h>
 #include <linux/aer.h>
+#include <net/checksum.h>
+#include <net/ip6_checksum.h>
+#include <net/vxlan.h>
 #include <linux/etherdevice.h>
 
 #include "txgbe.h"
@@ -46,6 +49,17 @@ static void txgbe_clean_tx_ring(struct txgbe_ring *tx_ring);
 static void txgbe_napi_enable_all(struct txgbe_adapter *adapter);
 static void txgbe_napi_disable_all(struct txgbe_adapter *adapter);
 
+static inline struct txgbe_dptype txgbe_decode_ptype(const u8 ptype)
+{
+	return txgbe_ptype_lookup[ptype];
+}
+
+static inline struct txgbe_dptype
+decode_rx_desc_ptype(const union txgbe_rx_desc *rx_desc)
+{
+	return txgbe_decode_ptype(TXGBE_RXD_PKTTYPE(rx_desc));
+}
+
 static void txgbe_check_minimum_link(struct txgbe_adapter *adapter)
 {
 	struct txgbe_hw *hw = &adapter->hw;
@@ -329,6 +343,63 @@ static bool txgbe_clean_tx_irq(struct txgbe_q_vector *q_vector,
 	return !!budget;
 }
 
+/**
+ * txgbe_rx_checksum - indicate in skb if hw indicated a good cksum
+ * @ring: structure containing ring specific data
+ * @rx_desc: current Rx descriptor being processed
+ * @skb: skb currently being received and modified
+ **/
+static inline void txgbe_rx_checksum(struct txgbe_ring *ring,
+				     union txgbe_rx_desc *rx_desc,
+				     struct sk_buff *skb)
+{
+	struct txgbe_dptype dptype = decode_rx_desc_ptype(rx_desc);
+
+	skb->ip_summed = CHECKSUM_NONE;
+
+	skb_checksum_none_assert(skb);
+
+	/* Rx csum disabled */
+	if (!(ring->netdev->features & NETIF_F_RXCSUM))
+		return;
+
+	/* if IPv4 header checksum error */
+	if ((txgbe_test_staterr(rx_desc, TXGBE_RXD_STAT_IPCS) &&
+	     txgbe_test_staterr(rx_desc, TXGBE_RXD_ERR_IPE)) ||
+	    (txgbe_test_staterr(rx_desc, TXGBE_RXD_STAT_OUTERIPCS) &&
+	     txgbe_test_staterr(rx_desc, TXGBE_RXD_ERR_OUTERIPER))) {
+		ring->rx_stats.csum_err++;
+		return;
+	}
+
+	/* L4 checksum offload flag must set for the below code to work */
+	if (!txgbe_test_staterr(rx_desc, TXGBE_RXD_STAT_L4CS))
+		return;
+
+	/*likely incorrect csum if IPv6 Dest Header found */
+	if (dptype.prot != TXGBE_DEC_PTYPE_PROT_SCTP && TXGBE_RXD_IPV6EX(rx_desc))
+		return;
+
+	/* if L4 checksum error */
+	if (txgbe_test_staterr(rx_desc, TXGBE_RXD_ERR_TCPE)) {
+		ring->rx_stats.csum_err++;
+		return;
+	}
+	/* If there is an outer header present that might contain a checksum
+	 * we need to bump the checksum level by 1 to reflect the fact that
+	 * we are indicating we validated the inner checksum.
+	 */
+	if (dptype.etype >= TXGBE_DEC_PTYPE_ETYPE_IG) {
+		skb->csum_level = 1;
+		/* FIXME :does skb->csum_level skb->encapsulation can both set ? */
+		skb->encapsulation = 1;
+	}
+
+	/* It must be a TCP or UDP or SCTP packet with a valid checksum */
+	skb->ip_summed = CHECKSUM_UNNECESSARY;
+	ring->rx_stats.csum_good_cnt++;
+}
+
 static bool txgbe_alloc_mapped_page(struct txgbe_ring *rx_ring,
 				    struct txgbe_rx_buffer *bi)
 {
@@ -424,6 +495,51 @@ void txgbe_alloc_rx_buffers(struct txgbe_ring *rx_ring, u16 cleaned_count)
 	}
 }
 
+static void txgbe_set_rsc_gso_size(struct txgbe_ring __maybe_unused *ring,
+				   struct sk_buff *skb)
+{
+	u16 hdr_len = eth_get_headlen(skb->dev, skb->data, skb_headlen(skb));
+
+	/* set gso_size to avoid messing up TCP MSS */
+	skb_shinfo(skb)->gso_size = DIV_ROUND_UP((skb->len - hdr_len),
+						 TXGBE_CB(skb)->append_cnt);
+	skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
+}
+
+static void txgbe_update_rsc_stats(struct txgbe_ring *rx_ring,
+				   struct sk_buff *skb)
+{
+	/* if append_cnt is 0 then frame is not RSC */
+	if (!TXGBE_CB(skb)->append_cnt)
+		return;
+
+	rx_ring->rx_stats.rsc_count += TXGBE_CB(skb)->append_cnt;
+	rx_ring->rx_stats.rsc_flush++;
+
+	txgbe_set_rsc_gso_size(rx_ring, skb);
+
+	/* gso_size is computed using append_cnt so always clear it last */
+	TXGBE_CB(skb)->append_cnt = 0;
+}
+
+static void txgbe_rx_vlan(struct txgbe_ring *ring,
+			  union txgbe_rx_desc *rx_desc,
+			  struct sk_buff *skb)
+{
+	u16 ethertype;
+	u8 idx = 0;
+
+	if ((ring->netdev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
+	    txgbe_test_staterr(rx_desc, TXGBE_RXD_STAT_VP)) {
+		idx = (le16_to_cpu(rx_desc->wb.lower.lo_dword.hs_rss.pkt_info) &
+		       TXGBE_RXD_TPID_MASK) >> TXGBE_RXD_TPID_SHIFT;
+		ethertype = ring->q_vector->adapter->hw.tpid[idx];
+		__vlan_hwaccel_put_tag(skb,
+				       htons(ethertype),
+				       le16_to_cpu(rx_desc->wb.upper.vlan));
+	}
+}
+
 /**
  * txgbe_process_skb_fields - Populate skb header fields from Rx descriptor
  * @rx_ring: rx descriptor ring packet is being transacted on
@@ -431,12 +547,18 @@ void txgbe_alloc_rx_buffers(struct txgbe_ring *rx_ring, u16 cleaned_count)
  * @skb: pointer to current skb being populated
  *
  * This function checks the ring, descriptor, and packet information in
- * order to populate fields within the skb.
+ * order to populate the checksum, VLAN, protocol, and
+ * other fields within the skb.
  **/
 static void txgbe_process_skb_fields(struct txgbe_ring *rx_ring,
 				     union txgbe_rx_desc *rx_desc,
 				     struct sk_buff *skb)
 {
+	txgbe_update_rsc_stats(rx_ring, skb);
+	txgbe_rx_checksum(rx_ring, rx_desc, skb);
+
+	txgbe_rx_vlan(rx_ring, rx_desc, skb);
+
 	skb_record_rx_queue(skb, rx_ring->queue_index);
 
 	skb->protocol = eth_type_trans(skb, rx_ring->netdev);
@@ -471,6 +593,24 @@ static bool txgbe_is_non_eop(struct txgbe_ring *rx_ring,
 
 	prefetch(TXGBE_RX_DESC(rx_ring, ntc));
 
+	/* update RSC append count if present */
+	if (ring_is_rsc_enabled(rx_ring)) {
+		__le32 rsc_enabled = rx_desc->wb.lower.lo_dword.data &
+				     cpu_to_le32(TXGBE_RXD_RSCCNT_MASK);
+
+		if (unlikely(rsc_enabled)) {
+			u32 rsc_cnt = le32_to_cpu(rsc_enabled);
+
+			rsc_cnt >>= TXGBE_RXD_RSCCNT_SHIFT;
+			TXGBE_CB(skb)->append_cnt += rsc_cnt - 1;
+
+			/* update ntc based on RSC value */
+			ntc = le32_to_cpu(rx_desc->wb.upper.status_error);
+			ntc &= TXGBE_RXD_NEXTP_MASK;
+			ntc >>= TXGBE_RXD_NEXTP_SHIFT;
+		}
+	}
+
 	/* if we are the last buffer then there is nothing else to do */
 	if (likely(txgbe_test_staterr(rx_desc, TXGBE_RXD_STAT_EOP)))
 		return false;
@@ -833,6 +973,7 @@ static int txgbe_clean_rx_irq(struct txgbe_q_vector *q_vector,
 		/* probably a little skewed due to removing CRC */
 		total_rx_bytes += skb->len;
 
+		/* populate checksum, VLAN, and protocol */
 		txgbe_process_skb_fields(rx_ring, rx_desc, skb);
 
 		txgbe_rx_skb(q_vector, skb);
@@ -1641,6 +1782,39 @@ static void txgbe_configure_srrctl(struct txgbe_adapter *adapter,
 	wr32(hw, TXGBE_PX_RR_CFG(reg_idx), srrctl);
 }
 
+/**
+ * txgbe_configure_rscctl - enable RSC for the indicated ring
+ * @adapter:    address of board private structure
+ * @ring: structure containing ring specific data
+ **/
+void txgbe_configure_rscctl(struct txgbe_adapter *adapter,
+			    struct txgbe_ring *ring)
+{
+	struct txgbe_hw *hw = &adapter->hw;
+	u8 reg_idx = ring->reg_idx;
+	u32 rscctrl;
+
+	if (!ring_is_rsc_enabled(ring))
+		return;
+
+	rscctrl = rd32(hw, TXGBE_PX_RR_CFG(reg_idx));
+	rscctrl |= TXGBE_PX_RR_CFG_RSC;
+	/* we must limit the number of descriptors so that the
+	 * total size of max desc * buf_len is not greater
+	 * than 65536
+	 */
+#if (MAX_SKB_FRAGS >= 16)
+	rscctrl |= TXGBE_PX_RR_CFG_MAX_RSCBUF_16;
+#elif (MAX_SKB_FRAGS >= 8)
+	rscctrl |= TXGBE_PX_RR_CFG_MAX_RSCBUF_8;
+#elif (MAX_SKB_FRAGS >= 4)
+	rscctrl |= TXGBE_PX_RR_CFG_MAX_RSCBUF_4;
+#else
+	rscctrl |= TXGBE_PX_RR_CFG_MAX_RSCBUF_1;
+#endif
+	wr32(hw, TXGBE_PX_RR_CFG(reg_idx), rscctrl);
+}
+
 static void txgbe_rx_desc_queue_enable(struct txgbe_adapter *adapter,
 				       struct txgbe_ring *ring)
 {
@@ -1723,6 +1897,8 @@ void txgbe_configure_rx_ring(struct txgbe_adapter *adapter,
 	ring->next_to_alloc = 0;
 
 	txgbe_configure_srrctl(adapter, ring);
+	/* In ESX, RSCCTL configuration is done by on demand */
+	txgbe_configure_rscctl(adapter, ring);
 
 	/* enable receive descriptor ring */
 	wr32m(hw, TXGBE_PX_RR_CFG(reg_idx),
@@ -1753,7 +1929,9 @@ static void txgbe_set_rx_buffer_len(struct txgbe_adapter *adapter)
 	struct net_device *netdev = adapter->netdev;
 	u32 max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
 	struct txgbe_hw *hw = &adapter->hw;
+	struct txgbe_ring *rx_ring;
 	u32 mhadd;
+	int i;
 
 	/* adjust max frame to be at least the size of a standard frame */
 	if (max_frame < (ETH_FRAME_LEN + ETH_FCS_LEN))
@@ -1762,6 +1940,15 @@ static void txgbe_set_rx_buffer_len(struct txgbe_adapter *adapter)
 	mhadd = rd32(hw, TXGBE_PSR_MAX_SZ);
 	if (max_frame != mhadd)
 		wr32(hw, TXGBE_PSR_MAX_SZ, max_frame);
+
+	for (i = 0; i < adapter->num_rx_queues; i++) {
+		rx_ring = adapter->rx_ring[i];
+
+		if (adapter->flags2 & TXGBE_FLAG2_RSC_ENABLED)
+			set_ring_rsc_enabled(rx_ring);
+		else
+			clear_ring_rsc_enabled(rx_ring);
+	}
 }
 
 /**
@@ -1788,7 +1975,8 @@ static void txgbe_configure_rx(struct txgbe_adapter *adapter)
 	/* RSC Setup */
 	psrctl = rd32m(hw, TXGBE_PSR_CTL, ~TXGBE_PSR_CTL_RSC_DIS);
 	psrctl |= TXGBE_PSR_CTL_RSC_ACK; /* Disable RSC for ACK packets */
-	psrctl |= TXGBE_PSR_CTL_RSC_DIS;
+	if (!(adapter->flags2 & TXGBE_FLAG2_RSC_ENABLED))
+		psrctl |= TXGBE_PSR_CTL_RSC_DIS;
 	wr32(hw, TXGBE_PSR_CTL, psrctl);
 
 	/* set_rx_buffer_len must be called before ring initialization */
@@ -1807,6 +1995,103 @@ static void txgbe_configure_rx(struct txgbe_adapter *adapter)
 	TCALL(hw, mac.ops.enable_rx_dma, rxctrl);
 }
 
+static int txgbe_vlan_rx_add_vid(struct net_device *netdev,
+				 __be16 proto, u16 vid)
+{
+	struct txgbe_adapter *adapter = netdev_priv(netdev);
+	struct txgbe_hw *hw = &adapter->hw;
+
+	/* add VID to filter table */
+	if (hw->mac.ops.set_vfta)
+		TCALL(hw, mac.ops.set_vfta, vid, 0, true);
+
+	set_bit(vid, adapter->active_vlans);
+
+	return 0;
+}
+
+static int txgbe_vlan_rx_kill_vid(struct net_device *netdev,
+				  __be16 proto, u16 vid)
+{
+	struct txgbe_adapter *adapter = netdev_priv(netdev);
+	struct txgbe_hw *hw = &adapter->hw;
+
+	/* remove VID from filter table */
+	if (hw->mac.ops.set_vfta)
+		TCALL(hw, mac.ops.set_vfta, vid, 0, false);
+
+	clear_bit(vid, adapter->active_vlans);
+
+	return 0;
+}
+
+/**
+ * txgbe_vlan_strip_disable - helper to disable vlan tag stripping
+ * @adapter: driver data
+ */
+void txgbe_vlan_strip_disable(struct txgbe_adapter *adapter)
+{
+	struct txgbe_hw *hw = &adapter->hw;
+	int i, j;
+
+	for (i = 0; i < adapter->num_rx_queues; i++) {
+		struct txgbe_ring *ring = adapter->rx_ring[i];
+
+		if (ring->accel)
+			continue;
+		j = ring->reg_idx;
+		wr32m(hw, TXGBE_PX_RR_CFG(j),
+		      TXGBE_PX_RR_CFG_VLAN, 0);
+	}
+}
+
+/**
+ * txgbe_vlan_strip_enable - helper to enable vlan tag stripping
+ * @adapter: driver data
+ */
+void txgbe_vlan_strip_enable(struct txgbe_adapter *adapter)
+{
+	struct txgbe_hw *hw = &adapter->hw;
+	int i, j;
+
+	for (i = 0; i < adapter->num_rx_queues; i++) {
+		struct txgbe_ring *ring = adapter->rx_ring[i];
+
+		if (ring->accel)
+			continue;
+		j = ring->reg_idx;
+		wr32m(hw, TXGBE_PX_RR_CFG(j),
+		      TXGBE_PX_RR_CFG_VLAN, TXGBE_PX_RR_CFG_VLAN);
+	}
+}
+
+void txgbe_vlan_mode(struct net_device *netdev, u32 features)
+{
+	struct txgbe_adapter *adapter = netdev_priv(netdev);
+	bool enable;
+
+	enable = !!(features & (NETIF_F_HW_VLAN_CTAG_RX));
+
+	if (enable)
+		/* enable VLAN tag insert/strip */
+		txgbe_vlan_strip_enable(adapter);
+	else
+		/* disable VLAN tag insert/strip */
+		txgbe_vlan_strip_disable(adapter);
+}
+
+static void txgbe_restore_vlan(struct txgbe_adapter *adapter)
+{
+	struct net_device *netdev = adapter->netdev;
+	u16 vid;
+
+	txgbe_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), 0);
+	txgbe_vlan_mode(netdev, netdev->features);
+
+	for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
+		txgbe_vlan_rx_add_vid(netdev, htons(ETH_P_8021Q), vid);
+}
+
 static u8 *txgbe_addr_list_itr(struct txgbe_hw __maybe_unused *hw,
 			       u8 **mc_addr_ptr, u32 *vmdq)
 {
@@ -2112,6 +2397,11 @@ void txgbe_set_rx_mode(struct net_device *netdev)
 	wr32(hw, TXGBE_PSR_VLAN_CTL, vlnctrl);
 	wr32(hw, TXGBE_PSR_CTL, fctrl);
 	wr32(hw, TXGBE_PSR_VM_L2CTL(0), vmolr);
+
+	if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX)
+		txgbe_vlan_strip_enable(adapter);
+	else
+		txgbe_vlan_strip_disable(adapter);
 }
 
 static void txgbe_napi_enable_all(struct txgbe_adapter *adapter)
@@ -2136,6 +2426,20 @@ static void txgbe_napi_disable_all(struct txgbe_adapter *adapter)
 	}
 }
 
+void txgbe_clear_vxlan_port(struct txgbe_adapter *adapter)
+{
+	if (!(adapter->flags & TXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE))
+		return;
+	wr32(&adapter->hw, TXGBE_CFG_VXLAN, 0);
+}
+
+#define TXGBE_GSO_PARTIAL_FEATURES (NETIF_F_GSO_GRE | \
+				    NETIF_F_GSO_GRE_CSUM | \
+				    NETIF_F_GSO_IPXIP4 | \
+				    NETIF_F_GSO_IPXIP6 | \
+				    NETIF_F_GSO_UDP_TUNNEL | \
+				    NETIF_F_GSO_UDP_TUNNEL_CSUM)
+
 static void txgbe_configure_pb(struct txgbe_adapter *adapter)
 {
 	struct txgbe_hw *hw = &adapter->hw;
@@ -2184,6 +2488,7 @@ static void txgbe_configure(struct txgbe_adapter *adapter)
 	txgbe_configure_port(adapter);
 
 	txgbe_set_rx_mode(adapter->netdev);
+	txgbe_restore_vlan(adapter);
 
 	TCALL(hw, mac.ops.disable_sec_rx_path);
 
@@ -2630,6 +2935,9 @@ static int txgbe_sw_init(struct txgbe_adapter *adapter)
 	adapter->rx_itr_setting = 1;
 	adapter->tx_itr_setting = 1;
 
+	adapter->flags |= TXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE;
+	adapter->flags2 |= TXGBE_FLAG2_RSC_CAPABLE;
+
 	adapter->max_q_vectors = TXGBE_MAX_MSIX_Q_VECTORS_SAPPHIRE;
 
 	/* set default ring sizes */
@@ -2915,6 +3223,32 @@ static void txgbe_free_all_rx_resources(struct txgbe_adapter *adapter)
 		txgbe_free_rx_resources(adapter->rx_ring[i]);
 }
 
+/**
+ * txgbe_change_mtu - Change the Maximum Transfer Unit
+ * @netdev: network interface device structure
+ * @new_mtu: new value for maximum frame size
+ *
+ * Returns 0 on success, negative on failure
+ **/
+static int txgbe_change_mtu(struct net_device *netdev, int new_mtu)
+{
+	struct txgbe_adapter *adapter = netdev_priv(netdev);
+
+	if (new_mtu < 68 || new_mtu > 9414)
+		return -EINVAL;
+
+	netif_info(adapter, probe, netdev,
+		   "changing MTU from %d to %d\n", netdev->mtu, new_mtu);
+
+	/* must set new MTU before calling down or up */
+	netdev->mtu = new_mtu;
+
+	if (netif_running(netdev))
+		txgbe_reinit_locked(adapter);
+
+	return 0;
+}
+
 /**
  * txgbe_open - Called when a network interface is made active
  * @netdev: network interface device structure
@@ -2965,6 +3299,9 @@ int txgbe_open(struct net_device *netdev)
 
 	txgbe_up_complete(adapter);
 
+	txgbe_clear_vxlan_port(adapter);
+	udp_tunnel_get_rx_info(netdev);
+
 	return 0;
 
 err_free_irq:
@@ -3521,12 +3858,120 @@ static int txgbe_del_sanmac_netdev(struct net_device *dev)
 	return err;
 }
 
+void txgbe_do_reset(struct net_device *netdev)
+{
+	struct txgbe_adapter *adapter = netdev_priv(netdev);
+
+	if (netif_running(netdev))
+		txgbe_reinit_locked(adapter);
+	else
+		txgbe_reset(adapter);
+}
+
+static netdev_features_t txgbe_fix_features(struct net_device *netdev,
+					    netdev_features_t features)
+{
+	struct txgbe_adapter *adapter = netdev_priv(netdev);
+
+	/* If Rx checksum is disabled, then RSC/LRO should also be disabled */
+	if (!(features & NETIF_F_RXCSUM))
+		features &= ~NETIF_F_LRO;
+
+	/* Turn off LRO if not RSC capable */
+	if (!(adapter->flags2 & TXGBE_FLAG2_RSC_CAPABLE))
+		features &= ~NETIF_F_LRO;
+
+	return features;
+}
+
+static int txgbe_set_features(struct net_device *netdev,
+			      netdev_features_t features)
+{
+	struct txgbe_adapter *adapter = netdev_priv(netdev);
+	bool need_reset = false;
+
+	/* Make sure RSC matches LRO, reset if change */
+	if (!(features & NETIF_F_LRO)) {
+		if (adapter->flags2 & TXGBE_FLAG2_RSC_ENABLED)
+			need_reset = true;
+		adapter->flags2 &= ~TXGBE_FLAG2_RSC_ENABLED;
+	} else if ((adapter->flags2 & TXGBE_FLAG2_RSC_CAPABLE) &&
+		   !(adapter->flags2 & TXGBE_FLAG2_RSC_ENABLED)) {
+		if (adapter->rx_itr_setting == 1 ||
+		    adapter->rx_itr_setting > TXGBE_MIN_RSC_ITR) {
+			adapter->flags2 |= TXGBE_FLAG2_RSC_ENABLED;
+			need_reset = true;
+		} else if ((netdev->features ^ features) & NETIF_F_LRO) {
+			netif_info(adapter, probe, netdev,
+				   "rx-usecs set too low, disabling RSC\n");
+		}
+	}
+
+	if (features & NETIF_F_HW_VLAN_CTAG_RX)
+		txgbe_vlan_strip_enable(adapter);
+	else
+		txgbe_vlan_strip_disable(adapter);
+
+	if (!(adapter->flags & TXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE &&
+	      features & NETIF_F_RXCSUM))
+		txgbe_clear_vxlan_port(adapter);
+
+	if (need_reset)
+		txgbe_do_reset(netdev);
+
+	return 0;
+}
+
+#define TXGBE_MAX_TUNNEL_HDR_LEN 80
+static netdev_features_t
+txgbe_features_check(struct sk_buff *skb, struct net_device *dev,
+		     netdev_features_t features)
+{
+	u16 vlan_depth = skb->mac_len;
+	__be16 type = skb->protocol;
+	struct vlan_hdr *vh;
+	u32 vlan_num = 0;
+
+	if (skb_vlan_tag_present(skb))
+		vlan_num++;
+
+	if (vlan_depth)
+		vlan_depth -= VLAN_HLEN;
+	else
+		vlan_depth = ETH_HLEN;
+
+	while (type == htons(ETH_P_8021Q) || type == htons(ETH_P_8021AD)) {
+		vlan_num++;
+		vh = (struct vlan_hdr *)(skb->data + vlan_depth);
+		type = vh->h_vlan_encapsulated_proto;
+		vlan_depth += VLAN_HLEN;
+	}
+
+	if (vlan_num > 2)
+		features &= ~(NETIF_F_HW_VLAN_CTAG_TX |
+			    NETIF_F_HW_VLAN_STAG_TX);
+
+	if (skb->encapsulation) {
+		if (unlikely(skb_inner_mac_header(skb) -
+			     skb_transport_header(skb) >
+			     TXGBE_MAX_TUNNEL_HDR_LEN))
+			return features & ~NETIF_F_CSUM_MASK;
+	}
+	return features;
+}
+
 static const struct net_device_ops txgbe_netdev_ops = {
 	.ndo_open               = txgbe_open,
 	.ndo_stop               = txgbe_close,
 	.ndo_start_xmit         = txgbe_xmit_frame,
 	.ndo_set_rx_mode        = txgbe_set_rx_mode,
 	.ndo_validate_addr      = eth_validate_addr,
+	.ndo_change_mtu		= txgbe_change_mtu,
+	.ndo_vlan_rx_add_vid    = txgbe_vlan_rx_add_vid,
+	.ndo_vlan_rx_kill_vid   = txgbe_vlan_rx_kill_vid,
+	.ndo_features_check     = txgbe_features_check,
+	.ndo_set_features       = txgbe_set_features,
+	.ndo_fix_features       = txgbe_fix_features,
 };
 
 void txgbe_assign_netdev_ops(struct net_device *dev)
@@ -3651,17 +4096,46 @@ static int txgbe_probe(struct pci_dev *pdev,
 		goto err_free_mac_table;
 	}
 
-	netdev->features = NETIF_F_SG;
+	netdev->features = NETIF_F_SG |
+			   NETIF_F_LRO |
+			   NETIF_F_RXCSUM |
+			   NETIF_F_HW_CSUM |
+			   NETIF_F_SCTP_CRC;
+
+	netdev->gso_partial_features = TXGBE_GSO_PARTIAL_FEATURES;
+	netdev->features |= NETIF_F_GSO_PARTIAL |
+			    TXGBE_GSO_PARTIAL_FEATURES;
 
 	/* copy netdev features into list of user selectable features */
 	netdev->hw_features |= netdev->features |
+			       NETIF_F_HW_VLAN_CTAG_FILTER |
+			       NETIF_F_HW_VLAN_CTAG_RX |
+			       NETIF_F_HW_VLAN_CTAG_TX |
 			       NETIF_F_RXALL;
 
+	netdev->hw_features |= NETIF_F_NTUPLE;
+
 	netdev->features |= NETIF_F_HIGHDMA;
 
+	netdev->vlan_features |= netdev->features;
+	netdev->hw_enc_features |= netdev->vlan_features;
+	netdev->mpls_features |= NETIF_F_HW_CSUM;
+
+	/* set this bit last since it cannot be part of vlan_features */
+	netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER |
+			    NETIF_F_HW_VLAN_CTAG_RX |
+			    NETIF_F_HW_VLAN_CTAG_TX;
+
 	netdev->priv_flags |= IFF_UNICAST_FLT;
 	netdev->priv_flags |= IFF_SUPP_NOFCS;
 
+	/* give us the option of enabling RSC/LRO later */
+	if (adapter->flags2 & TXGBE_FLAG2_RSC_CAPABLE) {
+		netdev->hw_features |= NETIF_F_LRO;
+		netdev->features |= NETIF_F_LRO;
+		adapter->flags2 |= TXGBE_FLAG2_RSC_ENABLED;
+	}
+
 	netdev->min_mtu = ETH_MIN_MTU;
 	netdev->max_mtu = TXGBE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN);
 
@@ -3802,6 +4276,10 @@ static int txgbe_probe(struct pci_dev *pdev,
 	i_s_var += sprintf(info_string, "Enabled Features: ");
 	i_s_var += sprintf(i_s_var, "RxQ: %d TxQ: %d ",
 			   adapter->num_rx_queues, adapter->num_tx_queues);
+	if (adapter->flags2 & TXGBE_FLAG2_RSC_ENABLED)
+		i_s_var += sprintf(i_s_var, "RSC ");
+	if (adapter->flags & TXGBE_FLAG_VXLAN_OFFLOAD_ENABLE)
+		i_s_var += sprintf(i_s_var, "vxlan_rx ");
 
 	WARN_ON(i_s_var > (info_string + INFO_STRING_LEN));
 	/* end features printing */
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h b/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h
index 5b823cd988ca..19446bf54c2f 100644
--- a/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h
@@ -562,6 +562,9 @@ enum {
 #define TXGBE_PSR_MC_TBL(_i)    (0x15200  + ((_i) * 4))
 #define TXGBE_PSR_UC_TBL(_i)    (0x15400 + ((_i) * 4))
 
+/* vlan tbl */
+#define TXGBE_PSR_VLAN_TBL(_i)  (0x16000 + ((_i) * 4))
+
 /* mac switcher */
 #define TXGBE_PSR_MAC_SWC_AD_L  0x16200
 #define TXGBE_PSR_MAC_SWC_AD_H  0x16204
@@ -574,6 +577,17 @@ enum {
 #define TXGBE_PSR_MAC_SWC_AD_H_AV       0x80000000U
 #define TXGBE_CLEAR_VMDQ_ALL            0xFFFFFFFFU
 
+/* vlan switch */
+#define TXGBE_PSR_VLAN_SWC      0x16220
+#define TXGBE_PSR_VLAN_SWC_VM_L 0x16224
+#define TXGBE_PSR_VLAN_SWC_VM_H 0x16228
+#define TXGBE_PSR_VLAN_SWC_IDX  0x16230         /* 64 vlan entries */
+/* VLAN pool filtering masks */
+#define TXGBE_PSR_VLAN_SWC_VIEN         0x80000000U  /* filter is valid */
+#define TXGBE_PSR_VLAN_SWC_ENTRIES      64
+#define TXGBE_PSR_VLAN_SWC_VLANID_MASK  0x00000FFFU
+#define TXGBE_ETHERNET_IEEE_VLAN_TYPE   0x8100  /* 802.1q protocol */
+
 /* Management */
 #define TXGBE_PSR_MNG_FIT_CTL           0x15820
 /* Management Bit Fields and Masks */
@@ -956,11 +970,44 @@ enum {
 #define TXGBE_PCIDEVCTRL2_17_34s        0xe
 
 /******************* Receive Descriptor bit definitions **********************/
+#define TXGBE_RXD_NEXTP_MASK            0x000FFFF0U /* Next Descriptor Index */
+#define TXGBE_RXD_NEXTP_SHIFT           0x00000004U
+#define TXGBE_RXD_STAT_MASK             0x000fffffU /* Stat/NEXTP: bit 0-19 */
 #define TXGBE_RXD_STAT_DD               0x00000001U /* Done */
 #define TXGBE_RXD_STAT_EOP              0x00000002U /* End of Packet */
+#define TXGBE_RXD_STAT_VP               0x00000020U /* IEEE VLAN Pkt */
+#define TXGBE_RXD_STAT_UDPCS            0x00000040U /* UDP xsum calculated */
+#define TXGBE_RXD_STAT_L4CS             0x00000080U /* L4 xsum calculated */
+#define TXGBE_RXD_STAT_IPCS             0x00000100U /* IP xsum calculated */
+#define TXGBE_RXD_STAT_PIF              0x00000200U /* passed in-exact filter */
+#define TXGBE_RXD_STAT_OUTERIPCS        0x00000400U /* Cloud IP xsum calculated*/
+#define TXGBE_RXD_STAT_VEXT             0x00000800U /* 1st VLAN found */
+#define TXGBE_RXD_STAT_LLINT            0x00002000U /* Pkt caused Low Latency Int */
+#define TXGBE_RXD_STAT_SECP             0x00008000U /* Security Processing */
+#define TXGBE_RXD_STAT_LB               0x00010000U /* Loopback Status */
 
 #define TXGBE_RXD_ERR_MASK              0xfff00000U /* RDESC.ERRORS mask */
+#define TXGBE_RXD_ERR_OUTERIPER         0x04000000U /* CRC IP Header error */
+#define TXGBE_RXD_ERR_SECERR_MASK       0x18000000U
 #define TXGBE_RXD_ERR_RXE               0x20000000U /* Any MAC Error */
+#define TXGBE_RXD_ERR_TCPE              0x40000000U /* TCP/UDP Checksum Error */
+#define TXGBE_RXD_ERR_IPE               0x80000000U /* IP Checksum Error */
+
+#define TXGBE_RXD_RSSTYPE_MASK          0x0000000FU
+#define TXGBE_RXD_TPID_MASK             0x000001C0U
+#define TXGBE_RXD_TPID_SHIFT            6
+#define TXGBE_RXD_HDRBUFLEN_MASK        0x00007FE0U
+#define TXGBE_RXD_RSCCNT_MASK           0x001E0000U
+#define TXGBE_RXD_RSCCNT_SHIFT          17
+#define TXGBE_RXD_HDRBUFLEN_SHIFT       5
+#define TXGBE_RXD_SPLITHEADER_EN        0x00001000U
+#define TXGBE_RXD_SPH                   0x8000
+
+#define TXGBE_RXD_PKTTYPE(_rxd) \
+	((le32_to_cpu((_rxd)->wb.lower.lo_dword.data) >> 9) & 0xFF)
+
+#define TXGBE_RXD_IPV6EX(_rxd) \
+	((le32_to_cpu((_rxd)->wb.lower.lo_dword.data) >> 6) & 0x1)
 
 /* Masks to determine if packets should be dropped due to frame errors */
 #define TXGBE_RXD_ERR_FRAME_ERR_MASK    TXGBE_RXD_ERR_RXE
@@ -1299,7 +1346,7 @@ struct txgbe_mac_operations {
 	s32 (*setup_rxpba)(struct txgbe_hw *hw, int num_pb, u32 headroom,
 			   int strategy);
 
-	/* RAR, Multicast */
+	/* RAR, Multicast, VLAN */
 	s32 (*set_rar)(struct txgbe_hw *hw, u32 index, u8 *addr, u64 pools,
 		       u32 enable_addr);
 	s32 (*clear_rar)(struct txgbe_hw *hw, u32 index);
@@ -1311,6 +1358,8 @@ struct txgbe_mac_operations {
 	s32 (*update_mc_addr_list)(struct txgbe_hw *hw, u8 *mc_addr_list,
 				   u32 mc_addr_count,
 				   txgbe_mc_addr_itr func, bool clear);
+	s32 (*clear_vfta)(struct txgbe_hw *hw);
+	s32 (*set_vfta)(struct txgbe_hw *hw, u32 vlan, u32 vind, bool vlan_on);
 	s32 (*init_uta_tables)(struct txgbe_hw *hw);
 
 	/* Manageability interface */
@@ -1348,9 +1397,12 @@ struct txgbe_mac_info {
 	/* prefix for World Wide Port Name (WWPN) */
 	u16 wwpn_prefix;
 #define TXGBE_MAX_MTA                   128
+#define TXGBE_MAX_VFTA_ENTRIES          128
 	u32 mta_shadow[TXGBE_MAX_MTA];
 	s32 mc_filter_type;
 	u32 mcft_size;
+	u32 vft_shadow[TXGBE_MAX_VFTA_ENTRIES];
+	u32 vft_size;
 	u32 num_rar_entries;
 	u32 rx_pb_size;
 	u32 max_tx_queues;
-- 
2.27.0


^ permalink raw reply related	[flat|nested] 18+ messages in thread

* [RFC PATCH net-next 14/16] net: txgbe: Add transmit path to process packets
  2022-08-10  8:55 [RFC PATCH net-next 00/16] net: WangXun txgbe ethernet driver Jiawen Wu
                   ` (12 preceding siblings ...)
  2022-08-10  8:55 ` [RFC PATCH net-next 13/16] net: txgbe: Add device Rx features Jiawen Wu
@ 2022-08-10  8:55 ` Jiawen Wu
  2022-08-10  8:55 ` [RFC PATCH net-next 15/16] net: txgbe: Support to get system network statistics Jiawen Wu
  2022-08-10  8:55 ` [RFC PATCH net-next 16/16] net: txgbe: support to respond Tx hang Jiawen Wu
  15 siblings, 0 replies; 18+ messages in thread
From: Jiawen Wu @ 2022-08-10  8:55 UTC (permalink / raw)
  To: netdev; +Cc: Jiawen Wu

Add the full transmit path, which supports TSO, checksum, etc.

Signed-off-by: Jiawen Wu <jiawenwu@trustnetic.com>
---
 drivers/net/ethernet/wangxun/txgbe/txgbe.h    |  48 +
 .../net/ethernet/wangxun/txgbe/txgbe_lib.c    |  19 +
 .../net/ethernet/wangxun/txgbe/txgbe_main.c   | 820 +++++++++++++++++-
 .../net/ethernet/wangxun/txgbe/txgbe_type.h   | 115 +++
 4 files changed, 1000 insertions(+), 2 deletions(-)

diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe.h b/drivers/net/ethernet/wangxun/txgbe/txgbe.h
index 4e3dc9f20f74..1265dd24e90b 100644
--- a/drivers/net/ethernet/wangxun/txgbe/txgbe.h
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe.h
@@ -12,6 +12,9 @@
 
 #include "txgbe_type.h"
 
+/* Ether Types */
+#define TXGBE_ETH_P_CNM                         0x22E7
+
 /* TX/RX descriptor defines */
 #define TXGBE_DEFAULT_TXD               512
 #define TXGBE_DEFAULT_TX_WORK   256
@@ -50,8 +53,37 @@
 #define TXGBE_RX_DMA_ATTR \
 	(DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING)
 
+enum txgbe_tx_flags {
+	/* cmd_type flags */
+	TXGBE_TX_FLAGS_HW_VLAN  = 0x01,
+	TXGBE_TX_FLAGS_TSO      = 0x02,
+	TXGBE_TX_FLAGS_TSTAMP   = 0x04,
+
+	/* olinfo flags */
+	TXGBE_TX_FLAGS_CC       = 0x08,
+	TXGBE_TX_FLAGS_IPV4     = 0x10,
+	TXGBE_TX_FLAGS_CSUM     = 0x20,
+	TXGBE_TX_FLAGS_OUTER_IPV4 = 0x100,
+	TXGBE_TX_FLAGS_LINKSEC	= 0x200,
+	TXGBE_TX_FLAGS_IPSEC    = 0x400,
+
+	/* software defined flags */
+	TXGBE_TX_FLAGS_SW_VLAN  = 0x40,
+};
+
+/* VLAN info */
+#define TXGBE_TX_FLAGS_VLAN_MASK        0xffff0000
+#define TXGBE_TX_FLAGS_VLAN_PRIO_MASK   0xe0000000
+#define TXGBE_TX_FLAGS_VLAN_PRIO_SHIFT  29
+#define TXGBE_TX_FLAGS_VLAN_SHIFT       16
+
 #define TXGBE_MAX_RX_DESC_POLL          10
 
+#define TXGBE_MAX_TXD_PWR       14
+#define TXGBE_MAX_DATA_PER_TXD  BIT(TXGBE_MAX_TXD_PWR)
+
+/* Tx Descriptors needed, worst case */
+#define TXD_USE_COUNT(S)        DIV_ROUND_UP((S), TXGBE_MAX_DATA_PER_TXD)
 #ifndef MAX_SKB_FRAGS
 #define DESC_NEEDED     4
 #elif (MAX_SKB_FRAGS < 16)
@@ -67,8 +99,11 @@ struct txgbe_tx_buffer {
 	union txgbe_tx_desc *next_to_watch;
 	struct sk_buff *skb;
 	unsigned int bytecount;
+	unsigned short gso_segs;
+	__be16 protocol;
 	DEFINE_DMA_UNMAP_ADDR(dma);
 	DEFINE_DMA_UNMAP_LEN(len);
+	u32 tx_flags;
 };
 
 struct txgbe_rx_buffer {
@@ -86,6 +121,7 @@ struct txgbe_queue_stats {
 
 struct txgbe_tx_queue_stats {
 	u64 restart_queue;
+	u64 tx_busy;
 };
 
 struct txgbe_rx_queue_stats {
@@ -235,6 +271,8 @@ static inline u16 txgbe_desc_unused(struct txgbe_ring *ring)
 	(&(((union txgbe_rx_desc *)((R)->desc))[i]))
 #define TXGBE_TX_DESC(R, i)     \
 	(&(((union txgbe_tx_desc *)((R)->desc))[i]))
+#define TXGBE_TX_CTXTDESC(R, i) \
+	(&(((struct txgbe_tx_context_desc *)((R)->desc))[i]))
 
 #define TXGBE_MAX_JUMBO_FRAME_SIZE      9432 /* max payload 9414 */
 
@@ -286,6 +324,11 @@ struct txgbe_mac_addr {
 #define TXGBE_FLAG2_RSC_CAPABLE                 BIT(6)
 #define TXGBE_FLAG2_RSC_ENABLED                 BIT(7)
 
+#define TXGBE_SET_FLAG(_input, _flag, _result) \
+	((_flag <= _result) ? \
+	 ((u32)(_input & _flag) * (_result / _flag)) : \
+	 ((u32)(_input & _flag) / (_flag / _result)))
+
 enum txgbe_isb_idx {
 	TXGBE_ISB_HEADER,
 	TXGBE_ISB_MISC,
@@ -418,6 +461,9 @@ int txgbe_init_interrupt_scheme(struct txgbe_adapter *adapter);
 void txgbe_reset_interrupt_capability(struct txgbe_adapter *adapter);
 void txgbe_set_interrupt_capability(struct txgbe_adapter *adapter);
 void txgbe_clear_interrupt_scheme(struct txgbe_adapter *adapter);
+netdev_tx_t txgbe_xmit_frame_ring(struct sk_buff *skb,
+				  struct txgbe_adapter *adapter,
+				  struct txgbe_ring *tx_ring);
 void txgbe_unmap_and_free_tx_resource(struct txgbe_ring *ring,
 				      struct txgbe_tx_buffer *tx_buffer);
 void txgbe_alloc_rx_buffers(struct txgbe_ring *rx_ring, u16 cleaned_count);
@@ -427,6 +473,8 @@ void txgbe_configure_port(struct txgbe_adapter *adapter);
 void txgbe_clear_vxlan_port(struct txgbe_adapter *adapter);
 void txgbe_set_rx_mode(struct net_device *netdev);
 int txgbe_write_mc_addr_list(struct net_device *netdev);
+void txgbe_tx_ctxtdesc(struct txgbe_ring *tx_ring, u32 vlan_macip_lens,
+		       u32 fcoe_sof_eof, u32 type_tucmd, u32 mss_l4len_idx);
 void txgbe_do_reset(struct net_device *netdev);
 void txgbe_write_eitr(struct txgbe_q_vector *q_vector);
 int txgbe_poll(struct napi_struct *napi, int budget);
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_lib.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_lib.c
index 84b7e01cc27e..46d54dad98bf 100644
--- a/drivers/net/ethernet/wangxun/txgbe/txgbe_lib.c
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_lib.c
@@ -442,3 +442,22 @@ void txgbe_clear_interrupt_scheme(struct txgbe_adapter *adapter)
 	txgbe_free_q_vectors(adapter);
 	txgbe_reset_interrupt_capability(adapter);
 }
+
+void txgbe_tx_ctxtdesc(struct txgbe_ring *tx_ring, u32 vlan_macip_lens,
+		       u32 fcoe_sof_eof, u32 type_tucmd, u32 mss_l4len_idx)
+{
+	struct txgbe_tx_context_desc *context_desc;
+	u16 i = tx_ring->next_to_use;
+
+	context_desc = TXGBE_TX_CTXTDESC(tx_ring, i);
+
+	i++;
+	tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
+
+	/* set bits to identify this as an advanced context descriptor */
+	type_tucmd |= TXGBE_TXD_DTYP_CTXT;
+	context_desc->vlan_macip_lens   = cpu_to_le32(vlan_macip_lens);
+	context_desc->seqnum_seed       = cpu_to_le32(fcoe_sof_eof);
+	context_desc->type_tucmd_mlhl   = cpu_to_le32(type_tucmd);
+	context_desc->mss_l4len_idx     = cpu_to_le32(mss_l4len_idx);
+}
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c
index ca7e99e7da97..3adbe3bbddac 100644
--- a/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c
@@ -6,6 +6,7 @@
 #include <linux/pci.h>
 #include <linux/netdevice.h>
 #include <linux/string.h>
+#include <linux/sctp.h>
 #include <linux/aer.h>
 #include <net/checksum.h>
 #include <net/ip6_checksum.h>
@@ -259,6 +260,7 @@ static bool txgbe_clean_tx_irq(struct txgbe_q_vector *q_vector,
 
 		/* update the statistics for this packet */
 		total_bytes += tx_buffer->bytecount;
+		total_packets += tx_buffer->gso_segs;
 
 		/* free the skb */
 		dev_consume_skb_any(tx_buffer->skb);
@@ -3806,10 +3808,822 @@ static void txgbe_service_task(struct work_struct *work)
 	txgbe_service_event_complete(adapter);
 }
 
+static u8 get_ipv6_proto(struct sk_buff *skb, int offset)
+{
+	struct ipv6hdr *hdr = (struct ipv6hdr *)(skb->data + offset);
+	u8 nexthdr = hdr->nexthdr;
+
+	offset += sizeof(struct ipv6hdr);
+
+	while (ipv6_ext_hdr(nexthdr)) {
+		struct ipv6_opt_hdr _hdr, *hp;
+
+		if (nexthdr == NEXTHDR_NONE)
+			break;
+
+		hp = skb_header_pointer(skb, offset, sizeof(_hdr), &_hdr);
+		if (!hp)
+			break;
+
+		if (nexthdr == NEXTHDR_FRAGMENT)
+			break;
+		else if (nexthdr == NEXTHDR_AUTH)
+			offset +=  ipv6_authlen(hp);
+		else
+			offset +=  ipv6_optlen(hp);
+
+		nexthdr = hp->nexthdr;
+	}
+
+	return nexthdr;
+}
+
+union network_header {
+	struct iphdr *ipv4;
+	struct ipv6hdr *ipv6;
+	void *raw;
+};
+
+static struct txgbe_dptype encode_tx_desc_ptype(const struct txgbe_tx_buffer *first)
+{
+	struct sk_buff *skb = first->skb;
+	u8 tun_prot = 0;
+	u8 l4_prot = 0;
+	u8 ptype = 0;
+
+	if (skb->encapsulation) {
+		union network_header hdr;
+
+		switch (first->protocol) {
+		case htons(ETH_P_IP):
+			tun_prot = ip_hdr(skb)->protocol;
+			if (ip_hdr(skb)->frag_off & htons(IP_MF | IP_OFFSET))
+				goto encap_frag;
+			ptype = TXGBE_PTYPE_TUN_IPV4;
+			break;
+		case htons(ETH_P_IPV6):
+			tun_prot = get_ipv6_proto(skb, skb_network_offset(skb));
+			if (tun_prot == NEXTHDR_FRAGMENT)
+				goto encap_frag;
+			ptype = TXGBE_PTYPE_TUN_IPV6;
+			break;
+		default:
+			goto exit;
+		}
+
+		if (tun_prot == IPPROTO_IPIP) {
+			hdr.raw = (void *)inner_ip_hdr(skb);
+			ptype |= TXGBE_PTYPE_PKT_IPIP;
+		} else if (tun_prot == IPPROTO_UDP) {
+			hdr.raw = (void *)inner_ip_hdr(skb);
+			if (skb->inner_protocol_type != ENCAP_TYPE_ETHER ||
+			    skb->inner_protocol != htons(ETH_P_TEB)) {
+				ptype |= TXGBE_PTYPE_PKT_IG;
+			} else {
+				if (((struct ethhdr *)
+					skb_inner_mac_header(skb))->h_proto
+					== htons(ETH_P_8021Q)) {
+					ptype |= TXGBE_PTYPE_PKT_IGMV;
+				} else {
+					ptype |= TXGBE_PTYPE_PKT_IGM;
+				}
+			}
+		} else if (tun_prot == IPPROTO_GRE) {
+			hdr.raw = (void *)inner_ip_hdr(skb);
+			if (skb->inner_protocol ==  htons(ETH_P_IP) ||
+			    skb->inner_protocol ==  htons(ETH_P_IPV6)) {
+				ptype |= TXGBE_PTYPE_PKT_IG;
+			} else {
+				if (((struct ethhdr *)
+					skb_inner_mac_header(skb))->h_proto
+					== htons(ETH_P_8021Q)) {
+					ptype |= TXGBE_PTYPE_PKT_IGMV;
+				} else {
+					ptype |= TXGBE_PTYPE_PKT_IGM;
+				}
+			}
+		} else {
+			goto exit;
+		}
+
+		switch (hdr.ipv4->version) {
+		case IPVERSION:
+			l4_prot = hdr.ipv4->protocol;
+			if (hdr.ipv4->frag_off & htons(IP_MF | IP_OFFSET)) {
+				ptype |= TXGBE_PTYPE_TYP_IPFRAG;
+				goto exit;
+			}
+			break;
+		case 6:
+			l4_prot = get_ipv6_proto(skb,
+						 skb_inner_network_offset(skb));
+			ptype |= TXGBE_PTYPE_PKT_IPV6;
+			if (l4_prot == NEXTHDR_FRAGMENT) {
+				ptype |= TXGBE_PTYPE_TYP_IPFRAG;
+				goto exit;
+			}
+			break;
+		default:
+			goto exit;
+		}
+	} else {
+encap_frag:
+		switch (first->protocol) {
+		case htons(ETH_P_IP):
+			l4_prot = ip_hdr(skb)->protocol;
+			ptype = TXGBE_PTYPE_PKT_IP;
+			if (ip_hdr(skb)->frag_off & htons(IP_MF | IP_OFFSET)) {
+				ptype |= TXGBE_PTYPE_TYP_IPFRAG;
+				goto exit;
+			}
+			break;
+		case htons(ETH_P_IPV6):
+			l4_prot = get_ipv6_proto(skb, skb_network_offset(skb));
+			ptype = TXGBE_PTYPE_PKT_IP | TXGBE_PTYPE_PKT_IPV6;
+			if (l4_prot == NEXTHDR_FRAGMENT) {
+				ptype |= TXGBE_PTYPE_TYP_IPFRAG;
+				goto exit;
+			}
+			break;
+		case htons(ETH_P_1588):
+			ptype = TXGBE_PTYPE_L2_TS;
+			goto exit;
+		case htons(ETH_P_FIP):
+			ptype = TXGBE_PTYPE_L2_FIP;
+			goto exit;
+		case htons(ETH_P_LLDP):
+			ptype = TXGBE_PTYPE_L2_LLDP;
+			goto exit;
+		case htons(TXGBE_ETH_P_CNM):
+			ptype = TXGBE_PTYPE_L2_CNM;
+			goto exit;
+		case htons(ETH_P_PAE):
+			ptype = TXGBE_PTYPE_L2_EAPOL;
+			goto exit;
+		case htons(ETH_P_ARP):
+			ptype = TXGBE_PTYPE_L2_ARP;
+			goto exit;
+		default:
+			ptype = TXGBE_PTYPE_L2_MAC;
+			goto exit;
+		}
+	}
+
+	switch (l4_prot) {
+	case IPPROTO_TCP:
+		ptype |= TXGBE_PTYPE_TYP_TCP;
+		break;
+	case IPPROTO_UDP:
+		ptype |= TXGBE_PTYPE_TYP_UDP;
+		break;
+	case IPPROTO_SCTP:
+		ptype |= TXGBE_PTYPE_TYP_SCTP;
+		break;
+	default:
+		ptype |= TXGBE_PTYPE_TYP_IP;
+		break;
+	}
+
+exit:
+	return txgbe_decode_ptype(ptype);
+}
+
+static int txgbe_tso(struct txgbe_ring *tx_ring,
+		     struct txgbe_tx_buffer *first,
+		     u8 *hdr_len, struct txgbe_dptype dptype)
+{
+	struct sk_buff *skb = first->skb;
+	u32 vlan_macip_lens, type_tucmd;
+	bool enc = skb->encapsulation;
+	u32 tunhdr_eiplen_tunlen = 0;
+	u32 mss_l4len_idx, l4len;
+	struct ipv6hdr *ipv6h;
+	struct tcphdr *tcph;
+	struct iphdr *iph;
+	u8 tun_prot = 0;
+
+	if (skb->ip_summed != CHECKSUM_PARTIAL)
+		return 0;
+
+	if (!skb_is_gso(skb))
+		return 0;
+
+	if (skb_header_cloned(skb)) {
+		int err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
+
+		if (err)
+			return err;
+	}
+
+	iph = enc ? inner_ip_hdr(skb) : ip_hdr(skb);
+
+	if (iph->version == 4) {
+		tcph = enc ? inner_tcp_hdr(skb) : tcp_hdr(skb);
+		iph->tot_len = 0;
+		iph->check = 0;
+		tcph->check = ~csum_tcpudp_magic(iph->saddr,
+						 iph->daddr, 0,
+						 IPPROTO_TCP,
+						 0);
+		first->tx_flags |= TXGBE_TX_FLAGS_TSO |
+				   TXGBE_TX_FLAGS_CSUM |
+				   TXGBE_TX_FLAGS_IPV4 |
+				   TXGBE_TX_FLAGS_CC;
+	} else if (iph->version == 6 && skb_is_gso_v6(skb)) {
+		ipv6h = enc ? inner_ipv6_hdr(skb) : ipv6_hdr(skb);
+		tcph = enc ? inner_tcp_hdr(skb) : tcp_hdr(skb);
+		ipv6h->payload_len = 0;
+		tcph->check = ~csum_ipv6_magic(&ipv6h->saddr,
+					       &ipv6h->daddr,
+					       0, IPPROTO_TCP, 0);
+		first->tx_flags |= TXGBE_TX_FLAGS_TSO |
+				   TXGBE_TX_FLAGS_CSUM |
+				   TXGBE_TX_FLAGS_CC;
+	}
+
+	/* compute header lengths */
+	l4len = enc ? inner_tcp_hdrlen(skb) : tcp_hdrlen(skb);
+	*hdr_len = enc ? (skb_inner_transport_header(skb) - skb->data)
+		       : skb_transport_offset(skb);
+	*hdr_len += l4len;
+
+	/* update gso size and bytecount with header size */
+	first->gso_segs = skb_shinfo(skb)->gso_segs;
+	first->bytecount += (first->gso_segs - 1) * *hdr_len;
+
+	/* mss_l4len_id: use 0 as index for TSO */
+	mss_l4len_idx = l4len << TXGBE_TXD_L4LEN_SHIFT;
+	mss_l4len_idx |= skb_shinfo(skb)->gso_size << TXGBE_TXD_MSS_SHIFT;
+
+	/* vlan_macip_lens: HEADLEN, MACLEN, VLAN tag */
+
+	if (enc) {
+		switch (first->protocol) {
+		case htons(ETH_P_IP):
+			tun_prot = ip_hdr(skb)->protocol;
+			first->tx_flags |= TXGBE_TX_FLAGS_OUTER_IPV4;
+			break;
+		case htons(ETH_P_IPV6):
+			tun_prot = ipv6_hdr(skb)->nexthdr;
+			break;
+		default:
+			break;
+		}
+		switch (tun_prot) {
+		case IPPROTO_UDP:
+			tunhdr_eiplen_tunlen = TXGBE_TXD_TUNNEL_UDP;
+			tunhdr_eiplen_tunlen |=
+					((skb_network_header_len(skb) >> 2) <<
+					 TXGBE_TXD_OUTER_IPLEN_SHIFT) |
+					(((skb_inner_mac_header(skb) -
+					   skb_transport_header(skb)) >> 1) <<
+					 TXGBE_TXD_TUNNEL_LEN_SHIFT);
+			break;
+		case IPPROTO_GRE:
+			tunhdr_eiplen_tunlen = TXGBE_TXD_TUNNEL_GRE;
+			tunhdr_eiplen_tunlen |=
+					((skb_network_header_len(skb) >> 2) <<
+					 TXGBE_TXD_OUTER_IPLEN_SHIFT) |
+					(((skb_inner_mac_header(skb) -
+					   skb_transport_header(skb)) >> 1) <<
+					 TXGBE_TXD_TUNNEL_LEN_SHIFT);
+			break;
+		case IPPROTO_IPIP:
+			tunhdr_eiplen_tunlen = (((char *)inner_ip_hdr(skb) -
+						 (char *)ip_hdr(skb)) >> 2) <<
+						TXGBE_TXD_OUTER_IPLEN_SHIFT;
+			break;
+		default:
+			break;
+		}
+
+		vlan_macip_lens = skb_inner_network_header_len(skb) >> 1;
+	} else {
+		vlan_macip_lens = skb_network_header_len(skb) >> 1;
+	}
+
+	vlan_macip_lens |= skb_network_offset(skb) << TXGBE_TXD_MACLEN_SHIFT;
+	vlan_macip_lens |= first->tx_flags & TXGBE_TX_FLAGS_VLAN_MASK;
+
+	type_tucmd = dptype.ptype << 24;
+	txgbe_tx_ctxtdesc(tx_ring, vlan_macip_lens, tunhdr_eiplen_tunlen,
+			  type_tucmd, mss_l4len_idx);
+
+	return 1;
+}
+
+static void txgbe_tx_csum(struct txgbe_ring *tx_ring,
+			  struct txgbe_tx_buffer *first,
+			  struct txgbe_dptype dptype)
+{
+	struct sk_buff *skb = first->skb;
+	u32 tunhdr_eiplen_tunlen = 0;
+	u32 vlan_macip_lens = 0;
+	u32 mss_l4len_idx = 0;
+	u8 tun_prot = 0;
+	u32 type_tucmd;
+
+	if (skb->ip_summed != CHECKSUM_PARTIAL) {
+		if (!(first->tx_flags & TXGBE_TX_FLAGS_HW_VLAN) &&
+		    !(first->tx_flags & TXGBE_TX_FLAGS_CC))
+			return;
+		vlan_macip_lens = skb_network_offset(skb) <<
+				  TXGBE_TXD_MACLEN_SHIFT;
+	} else {
+		u8 l4_prot = 0;
+
+		union {
+			struct iphdr *ipv4;
+			struct ipv6hdr *ipv6;
+			u8 *raw;
+		} network_hdr;
+		union {
+			struct tcphdr *tcphdr;
+			u8 *raw;
+		} transport_hdr;
+
+		if (skb->encapsulation) {
+			network_hdr.raw = skb_inner_network_header(skb);
+			transport_hdr.raw = skb_inner_transport_header(skb);
+			vlan_macip_lens = skb_network_offset(skb) <<
+					  TXGBE_TXD_MACLEN_SHIFT;
+			switch (first->protocol) {
+			case htons(ETH_P_IP):
+				tun_prot = ip_hdr(skb)->protocol;
+				break;
+			case htons(ETH_P_IPV6):
+				tun_prot = ipv6_hdr(skb)->nexthdr;
+				break;
+			default:
+				if (unlikely(net_ratelimit())) {
+					dev_warn(tx_ring->dev,
+						 "partial checksum but version=%d\n",
+						 network_hdr.ipv4->version);
+				}
+				return;
+			}
+			switch (tun_prot) {
+			case IPPROTO_UDP:
+				tunhdr_eiplen_tunlen = TXGBE_TXD_TUNNEL_UDP;
+				tunhdr_eiplen_tunlen |=
+					((skb_network_header_len(skb) >> 2) <<
+					TXGBE_TXD_OUTER_IPLEN_SHIFT) |
+					(((skb_inner_mac_header(skb) -
+					skb_transport_header(skb)) >> 1) <<
+					TXGBE_TXD_TUNNEL_LEN_SHIFT);
+				break;
+			case IPPROTO_GRE:
+				tunhdr_eiplen_tunlen = TXGBE_TXD_TUNNEL_GRE;
+				tunhdr_eiplen_tunlen |=
+					((skb_network_header_len(skb) >> 2) <<
+					TXGBE_TXD_OUTER_IPLEN_SHIFT) |
+					(((skb_inner_mac_header(skb) -
+					skb_transport_header(skb)) >> 1) <<
+					TXGBE_TXD_TUNNEL_LEN_SHIFT);
+				break;
+			case IPPROTO_IPIP:
+				tunhdr_eiplen_tunlen =
+					(((char *)inner_ip_hdr(skb) -
+					(char *)ip_hdr(skb)) >> 2) <<
+					TXGBE_TXD_OUTER_IPLEN_SHIFT;
+				break;
+			default:
+				break;
+			}
+
+		} else {
+			network_hdr.raw = skb_network_header(skb);
+			transport_hdr.raw = skb_transport_header(skb);
+			vlan_macip_lens = skb_network_offset(skb) <<
+					  TXGBE_TXD_MACLEN_SHIFT;
+		}
+
+		switch (network_hdr.ipv4->version) {
+		case IPVERSION:
+			vlan_macip_lens |=
+				(transport_hdr.raw - network_hdr.raw) >> 1;
+			l4_prot = network_hdr.ipv4->protocol;
+			break;
+		case 6:
+			vlan_macip_lens |=
+				(transport_hdr.raw - network_hdr.raw) >> 1;
+			l4_prot = network_hdr.ipv6->nexthdr;
+			break;
+		default:
+			break;
+		}
+
+		switch (l4_prot) {
+		case IPPROTO_TCP:
+
+		mss_l4len_idx = (transport_hdr.tcphdr->doff * 4) <<
+				TXGBE_TXD_L4LEN_SHIFT;
+			break;
+		case IPPROTO_SCTP:
+			mss_l4len_idx = sizeof(struct sctphdr) <<
+					TXGBE_TXD_L4LEN_SHIFT;
+			break;
+		case IPPROTO_UDP:
+			mss_l4len_idx = sizeof(struct udphdr) <<
+					TXGBE_TXD_L4LEN_SHIFT;
+			break;
+		default:
+			break;
+		}
+
+		/* update TX checksum flag */
+		first->tx_flags |= TXGBE_TX_FLAGS_CSUM;
+	}
+	first->tx_flags |= TXGBE_TX_FLAGS_CC;
+	/* vlan_macip_lens: MACLEN, VLAN tag */
+	vlan_macip_lens |= first->tx_flags & TXGBE_TX_FLAGS_VLAN_MASK;
+
+	type_tucmd = dptype.ptype << 24;
+	txgbe_tx_ctxtdesc(tx_ring, vlan_macip_lens, tunhdr_eiplen_tunlen,
+			  type_tucmd, mss_l4len_idx);
+}
+
+static u32 txgbe_tx_cmd_type(u32 tx_flags)
+{
+	/* set type for advanced descriptor with frame checksum insertion */
+	u32 cmd_type = TXGBE_TXD_DTYP_DATA |
+		       TXGBE_TXD_IFCS;
+
+	/* set HW vlan bit if vlan is present */
+	cmd_type |= TXGBE_SET_FLAG(tx_flags, TXGBE_TX_FLAGS_HW_VLAN,
+				   TXGBE_TXD_VLE);
+
+	/* set segmentation enable bits for TSO/FSO */
+	cmd_type |= TXGBE_SET_FLAG(tx_flags, TXGBE_TX_FLAGS_TSO,
+				   TXGBE_TXD_TSE);
+
+	cmd_type |= TXGBE_SET_FLAG(tx_flags, TXGBE_TX_FLAGS_LINKSEC,
+				   TXGBE_TXD_LINKSEC);
+
+	return cmd_type;
+}
+
+static void txgbe_tx_olinfo_status(union txgbe_tx_desc *tx_desc,
+				   u32 tx_flags, unsigned int paylen)
+{
+	u32 olinfo_status = paylen << TXGBE_TXD_PAYLEN_SHIFT;
+
+	/* enable L4 checksum for TSO and TX checksum offload */
+	olinfo_status |= TXGBE_SET_FLAG(tx_flags,
+					TXGBE_TX_FLAGS_CSUM,
+					TXGBE_TXD_L4CS);
+
+	/* enable IPv4 checksum for TSO */
+	olinfo_status |= TXGBE_SET_FLAG(tx_flags,
+					TXGBE_TX_FLAGS_IPV4,
+					TXGBE_TXD_IIPCS);
+	/* enable outer IPv4 checksum for TSO */
+	olinfo_status |= TXGBE_SET_FLAG(tx_flags,
+					TXGBE_TX_FLAGS_OUTER_IPV4,
+					TXGBE_TXD_EIPCS);
+	/* Check Context must be set if Tx switch is enabled, which it
+	 * always is for case where virtual functions are running
+	 */
+	olinfo_status |= TXGBE_SET_FLAG(tx_flags,
+					TXGBE_TX_FLAGS_CC,
+					TXGBE_TXD_CC);
+
+	olinfo_status |= TXGBE_SET_FLAG(tx_flags,
+					TXGBE_TX_FLAGS_IPSEC,
+					TXGBE_TXD_IPSEC);
+
+	tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
+}
+
+static int __txgbe_maybe_stop_tx(struct txgbe_ring *tx_ring, u16 size)
+{
+	netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
+
+	/* For the next check */
+	smp_mb();
+
+	/* We need to check again in a case another CPU has just
+	 * made room available.
+	 */
+	if (likely(txgbe_desc_unused(tx_ring) < size))
+		return -EBUSY;
+
+	/* A reprieve! - use start_queue because it doesn't call schedule */
+	netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index);
+	++tx_ring->tx_stats.restart_queue;
+	return 0;
+}
+
+static inline int txgbe_maybe_stop_tx(struct txgbe_ring *tx_ring, u16 size)
+{
+	if (likely(txgbe_desc_unused(tx_ring) >= size))
+		return 0;
+
+	return __txgbe_maybe_stop_tx(tx_ring, size);
+}
+
+#define TXGBE_TXD_CMD (TXGBE_TXD_EOP | \
+		       TXGBE_TXD_RS)
+
+static int txgbe_tx_map(struct txgbe_ring *tx_ring,
+			struct txgbe_tx_buffer *first,
+			const u8 hdr_len)
+{
+	struct txgbe_tx_buffer *tx_buffer;
+	struct sk_buff *skb = first->skb;
+	u32 tx_flags = first->tx_flags;
+	union txgbe_tx_desc *tx_desc;
+	u16 i = tx_ring->next_to_use;
+	unsigned int data_len, size;
+	skb_frag_t *frag;
+	dma_addr_t dma;
+	u32 cmd_type;
+
+	cmd_type = txgbe_tx_cmd_type(tx_flags);
+	tx_desc = TXGBE_TX_DESC(tx_ring, i);
+
+	txgbe_tx_olinfo_status(tx_desc, tx_flags, skb->len - hdr_len);
+
+	size = skb_headlen(skb);
+	data_len = skb->data_len;
+
+	dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
+
+	tx_buffer = first;
+
+	for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
+		if (dma_mapping_error(tx_ring->dev, dma))
+			goto dma_error;
+
+		/* record length, and DMA address */
+		dma_unmap_len_set(tx_buffer, len, size);
+		dma_unmap_addr_set(tx_buffer, dma, dma);
+
+		tx_desc->read.buffer_addr = cpu_to_le64(dma);
+
+		while (unlikely(size > TXGBE_MAX_DATA_PER_TXD)) {
+			tx_desc->read.cmd_type_len =
+				cpu_to_le32(cmd_type ^ TXGBE_MAX_DATA_PER_TXD);
+
+			i++;
+			tx_desc++;
+			if (i == tx_ring->count) {
+				tx_desc = TXGBE_TX_DESC(tx_ring, 0);
+				i = 0;
+			}
+			tx_desc->read.olinfo_status = 0;
+
+			dma += TXGBE_MAX_DATA_PER_TXD;
+			size -= TXGBE_MAX_DATA_PER_TXD;
+
+			tx_desc->read.buffer_addr = cpu_to_le64(dma);
+		}
+
+		if (likely(!data_len))
+			break;
+
+		tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type ^ size);
+
+		i++;
+		tx_desc++;
+		if (i == tx_ring->count) {
+			tx_desc = TXGBE_TX_DESC(tx_ring, 0);
+			i = 0;
+		}
+		tx_desc->read.olinfo_status = 0;
+
+		size = skb_frag_size(frag);
+
+		data_len -= size;
+
+		dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size,
+				       DMA_TO_DEVICE);
+
+		tx_buffer = &tx_ring->tx_buffer_info[i];
+	}
+
+	/* write last descriptor with RS and EOP bits */
+	cmd_type |= size | TXGBE_TXD_CMD;
+	tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type);
+
+	netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount);
+
+	/* Force memory writes to complete before letting h/w know there
+	 * are new descriptors to fetch.  (Only applicable for weak-ordered
+	 * memory model archs, such as IA-64).
+	 *
+	 * We also need this memory barrier to make certain all of the
+	 * status bits have been updated before next_to_watch is written.
+	 */
+	wmb();
+
+	/* set next_to_watch value indicating a packet is present */
+	first->next_to_watch = tx_desc;
+
+	i++;
+	if (i == tx_ring->count)
+		i = 0;
+
+	tx_ring->next_to_use = i;
+
+	txgbe_maybe_stop_tx(tx_ring, DESC_NEEDED);
+
+	if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more())
+		writel(i, tx_ring->tail);
+
+	return 0;
+dma_error:
+	dev_err(tx_ring->dev, "TX DMA map failed\n");
+
+	/* clear dma mappings for failed tx_buffer_info map */
+	for (;;) {
+		tx_buffer = &tx_ring->tx_buffer_info[i];
+		if (dma_unmap_len(tx_buffer, len))
+			dma_unmap_page(tx_ring->dev,
+				       dma_unmap_addr(tx_buffer, dma),
+				       dma_unmap_len(tx_buffer, len),
+				       DMA_TO_DEVICE);
+		dma_unmap_len_set(tx_buffer, len, 0);
+		if (tx_buffer == first)
+			break;
+		if (i == 0)
+			i += tx_ring->count;
+		i--;
+	}
+
+	dev_kfree_skb_any(first->skb);
+	first->skb = NULL;
+
+	tx_ring->next_to_use = i;
+
+	return -1;
+}
+
+/**
+ * txgbe_skb_pad_nonzero - zero pad the tail of an skb
+ * @skb: buffer to pad
+ * @pad: space to pad
+ *
+ * Ensure that a buffer is followed by a padding area that is zero
+ * filled. Used by network drivers which may DMA or transfer data
+ * beyond the buffer end onto the wire.
+ *
+ * May return error in out of memory cases. The skb is freed on error.
+ */
+static int txgbe_skb_pad_nonzero(struct sk_buff *skb, int pad)
+{
+	int err;
+	int ntail;
+
+	/* If the skbuff is non linear tailroom is always zero. */
+	if (!skb_cloned(skb) && skb_tailroom(skb) >= pad) {
+		memset(skb->data + skb->len, 0x1, pad);
+		return 0;
+	}
+
+	ntail = skb->data_len + pad - (skb->end - skb->tail);
+	if (likely(skb_cloned(skb) || ntail > 0)) {
+		err = pskb_expand_head(skb, 0, ntail, GFP_ATOMIC);
+		if (unlikely(err))
+			goto free_skb;
+	}
+
+	/* The use of this function with non-linear skb's really needs
+	 * to be audited.
+	 */
+	err = skb_linearize(skb);
+	if (unlikely(err))
+		goto free_skb;
+
+	memset(skb->data + skb->len, 0x1, pad);
+	return 0;
+
+free_skb:
+	kfree_skb(skb);
+	return err;
+}
+
+netdev_tx_t txgbe_xmit_frame_ring(struct sk_buff *skb,
+				  struct txgbe_adapter __maybe_unused *adapter,
+				  struct txgbe_ring *tx_ring)
+{
+	u16 count = TXD_USE_COUNT(skb_headlen(skb));
+	__be16 protocol = skb->protocol;
+	struct txgbe_tx_buffer *first;
+	struct txgbe_dptype dptype;
+	u8 vlan_addlen = 0;
+	u32 tx_flags = 0;
+	unsigned short f;
+	u8 hdr_len = 0;
+	int tso;
+
+	/* work around hw errata 3 */
+	u16 _llclen, *llclen;
+
+	llclen = skb_header_pointer(skb, ETH_HLEN - 2, sizeof(u16), &_llclen);
+	if (*llclen == 0x3 || *llclen == 0x4 || *llclen == 0x5) {
+		if (txgbe_skb_pad_nonzero(skb, ETH_ZLEN - skb->len))
+			return -ENOMEM;
+		__skb_put(skb, ETH_ZLEN - skb->len);
+	}
+
+	/* need: 1 descriptor per page * PAGE_SIZE/TXGBE_MAX_DATA_PER_TXD,
+	 *       + 1 desc for skb_headlen/TXGBE_MAX_DATA_PER_TXD,
+	 *       + 2 desc gap to keep tail from touching head,
+	 *       + 1 desc for context descriptor,
+	 * otherwise try next time
+	 */
+	for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
+		count += TXD_USE_COUNT(skb_frag_size(&skb_shinfo(skb)->
+						     frags[f]));
+
+	if (txgbe_maybe_stop_tx(tx_ring, count + 3)) {
+		tx_ring->tx_stats.tx_busy++;
+		return NETDEV_TX_BUSY;
+	}
+
+	/* record the location of the first descriptor for this packet */
+	first = &tx_ring->tx_buffer_info[tx_ring->next_to_use];
+	first->skb = skb;
+	first->bytecount = skb->len;
+	first->gso_segs = 1;
+
+	/* if we have a HW VLAN tag being added default to the HW one */
+	if (skb_vlan_tag_present(skb)) {
+		tx_flags |= skb_vlan_tag_get(skb) << TXGBE_TX_FLAGS_VLAN_SHIFT;
+		tx_flags |= TXGBE_TX_FLAGS_HW_VLAN;
+	/* else if it is a SW VLAN check the next protocol and store the tag */
+	} else if (protocol == htons(ETH_P_8021Q)) {
+		struct vlan_hdr *vhdr, _vhdr;
+
+		vhdr = skb_header_pointer(skb, ETH_HLEN, sizeof(_vhdr), &_vhdr);
+		if (!vhdr)
+			goto out_drop;
+
+		protocol = vhdr->h_vlan_encapsulated_proto;
+		tx_flags |= ntohs(vhdr->h_vlan_TCI) <<
+				  TXGBE_TX_FLAGS_VLAN_SHIFT;
+		tx_flags |= TXGBE_TX_FLAGS_SW_VLAN;
+	}
+
+	if (protocol == htons(ETH_P_8021Q) || protocol == htons(ETH_P_8021AD)) {
+		struct vlan_hdr *vhdr, _vhdr;
+
+		vhdr = skb_header_pointer(skb, ETH_HLEN, sizeof(_vhdr), &_vhdr);
+		if (!vhdr)
+			goto out_drop;
+
+		protocol = vhdr->h_vlan_encapsulated_proto;
+		tx_flags |= TXGBE_TX_FLAGS_SW_VLAN;
+		vlan_addlen += VLAN_HLEN;
+	}
+
+	/* record initial flags and protocol */
+	first->tx_flags = tx_flags;
+	first->protocol = protocol;
+
+	dptype = encode_tx_desc_ptype(first);
+
+	tso = txgbe_tso(tx_ring, first, &hdr_len, dptype);
+	if (tso < 0)
+		goto out_drop;
+	else if (!tso)
+		txgbe_tx_csum(tx_ring, first, dptype);
+
+	txgbe_tx_map(tx_ring, first, hdr_len);
+
+	return NETDEV_TX_OK;
+
+out_drop:
+	dev_kfree_skb_any(first->skb);
+	first->skb = NULL;
+
+	return NETDEV_TX_OK;
+}
+
 static netdev_tx_t txgbe_xmit_frame(struct sk_buff *skb,
 				    struct net_device *netdev)
 {
-	return NETDEV_TX_OK;
+	struct txgbe_adapter *adapter = netdev_priv(netdev);
+	unsigned int r_idx = skb->queue_mapping;
+	struct txgbe_ring *tx_ring;
+
+	if (!netif_carrier_ok(netdev)) {
+		dev_kfree_skb_any(skb);
+		return NETDEV_TX_OK;
+	}
+
+	/* The minimum packet size for olinfo paylen is 17 so pad the skb
+	 * in order to meet this minimum size requirement.
+	 */
+	if (skb_put_padto(skb, 17))
+		return NETDEV_TX_OK;
+
+	if (r_idx >= adapter->num_tx_queues)
+		r_idx = r_idx % adapter->num_tx_queues;
+	tx_ring = adapter->tx_ring[r_idx];
+
+	return txgbe_xmit_frame_ring(skb, adapter, tx_ring);
 }
 
 /**
@@ -4098,6 +4912,8 @@ static int txgbe_probe(struct pci_dev *pdev,
 
 	netdev->features = NETIF_F_SG |
 			   NETIF_F_LRO |
+			   NETIF_F_TSO |
+			   NETIF_F_TSO6 |
 			   NETIF_F_RXCSUM |
 			   NETIF_F_HW_CSUM |
 			   NETIF_F_SCTP_CRC;
@@ -4117,7 +4933,7 @@ static int txgbe_probe(struct pci_dev *pdev,
 
 	netdev->features |= NETIF_F_HIGHDMA;
 
-	netdev->vlan_features |= netdev->features;
+	netdev->vlan_features |= netdev->features | NETIF_F_TSO_MANGLEID;
 	netdev->hw_enc_features |= netdev->vlan_features;
 	netdev->mpls_features |= NETIF_F_HW_CSUM;
 
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h b/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h
index 19446bf54c2f..58fa3b19b3a7 100644
--- a/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h
@@ -1003,8 +1003,77 @@ enum {
 #define TXGBE_RXD_SPLITHEADER_EN        0x00001000U
 #define TXGBE_RXD_SPH                   0x8000
 
+/**
+ * receive packet type
+ * PTYPE:8 = TUN:2 + PKT:2 + TYP:4
+ **/
+/* TUN */
+#define TXGBE_PTYPE_TUN_IPV4            (0x80)
+#define TXGBE_PTYPE_TUN_IPV6            (0xC0)
+
+/* PKT for TUN */
+#define TXGBE_PTYPE_PKT_IPIP            (0x00) /* IP+IP */
+#define TXGBE_PTYPE_PKT_IG              (0x10) /* IP+GRE */
+#define TXGBE_PTYPE_PKT_IGM             (0x20) /* IP+GRE+MAC */
+#define TXGBE_PTYPE_PKT_IGMV            (0x30) /* IP+GRE+MAC+VLAN */
+/* PKT for !TUN */
+#define TXGBE_PTYPE_PKT_MAC             (0x10)
+#define TXGBE_PTYPE_PKT_IP              (0x20)
+
+/* TYP for PKT=mac */
+#define TXGBE_PTYPE_TYP_MAC             (0x01)
+#define TXGBE_PTYPE_TYP_TS              (0x02) /* time sync */
+#define TXGBE_PTYPE_TYP_FIP             (0x03)
+#define TXGBE_PTYPE_TYP_LLDP            (0x04)
+#define TXGBE_PTYPE_TYP_CNM             (0x05)
+#define TXGBE_PTYPE_TYP_EAPOL           (0x06)
+#define TXGBE_PTYPE_TYP_ARP             (0x07)
+/* TYP for PKT=ip */
+#define TXGBE_PTYPE_PKT_IPV6            (0x08)
+#define TXGBE_PTYPE_TYP_IPFRAG          (0x01)
+#define TXGBE_PTYPE_TYP_IP              (0x02)
+#define TXGBE_PTYPE_TYP_UDP             (0x03)
+#define TXGBE_PTYPE_TYP_TCP             (0x04)
+#define TXGBE_PTYPE_TYP_SCTP            (0x05)
+
+/* Packet type non-ip values */
+enum txgbe_l2_ptypes {
+	TXGBE_PTYPE_L2_ABORTED = (TXGBE_PTYPE_PKT_MAC),
+	TXGBE_PTYPE_L2_MAC = (TXGBE_PTYPE_PKT_MAC | TXGBE_PTYPE_TYP_MAC),
+	TXGBE_PTYPE_L2_TS = (TXGBE_PTYPE_PKT_MAC | TXGBE_PTYPE_TYP_TS),
+	TXGBE_PTYPE_L2_FIP = (TXGBE_PTYPE_PKT_MAC | TXGBE_PTYPE_TYP_FIP),
+	TXGBE_PTYPE_L2_LLDP = (TXGBE_PTYPE_PKT_MAC | TXGBE_PTYPE_TYP_LLDP),
+	TXGBE_PTYPE_L2_CNM = (TXGBE_PTYPE_PKT_MAC | TXGBE_PTYPE_TYP_CNM),
+	TXGBE_PTYPE_L2_EAPOL = (TXGBE_PTYPE_PKT_MAC | TXGBE_PTYPE_TYP_EAPOL),
+	TXGBE_PTYPE_L2_ARP = (TXGBE_PTYPE_PKT_MAC | TXGBE_PTYPE_TYP_ARP),
+
+	TXGBE_PTYPE_L2_IPV4_FRAG = (TXGBE_PTYPE_PKT_IP |
+				    TXGBE_PTYPE_TYP_IPFRAG),
+	TXGBE_PTYPE_L2_IPV4 = (TXGBE_PTYPE_PKT_IP | TXGBE_PTYPE_TYP_IP),
+	TXGBE_PTYPE_L2_IPV4_UDP = (TXGBE_PTYPE_PKT_IP | TXGBE_PTYPE_TYP_UDP),
+	TXGBE_PTYPE_L2_IPV4_TCP = (TXGBE_PTYPE_PKT_IP | TXGBE_PTYPE_TYP_TCP),
+	TXGBE_PTYPE_L2_IPV4_SCTP = (TXGBE_PTYPE_PKT_IP | TXGBE_PTYPE_TYP_SCTP),
+	TXGBE_PTYPE_L2_IPV6_FRAG = (TXGBE_PTYPE_PKT_IP | TXGBE_PTYPE_PKT_IPV6 |
+				    TXGBE_PTYPE_TYP_IPFRAG),
+	TXGBE_PTYPE_L2_IPV6 = (TXGBE_PTYPE_PKT_IP | TXGBE_PTYPE_PKT_IPV6 |
+			       TXGBE_PTYPE_TYP_IP),
+	TXGBE_PTYPE_L2_IPV6_UDP = (TXGBE_PTYPE_PKT_IP | TXGBE_PTYPE_PKT_IPV6 |
+				   TXGBE_PTYPE_TYP_UDP),
+	TXGBE_PTYPE_L2_IPV6_TCP = (TXGBE_PTYPE_PKT_IP | TXGBE_PTYPE_PKT_IPV6 |
+				   TXGBE_PTYPE_TYP_TCP),
+	TXGBE_PTYPE_L2_IPV6_SCTP = (TXGBE_PTYPE_PKT_IP | TXGBE_PTYPE_PKT_IPV6 |
+				    TXGBE_PTYPE_TYP_SCTP),
+
+	TXGBE_PTYPE_L2_TUN4_MAC = (TXGBE_PTYPE_TUN_IPV4 | TXGBE_PTYPE_PKT_IGM),
+	TXGBE_PTYPE_L2_TUN6_MAC = (TXGBE_PTYPE_TUN_IPV6 | TXGBE_PTYPE_PKT_IGM),
+};
+
 #define TXGBE_RXD_PKTTYPE(_rxd) \
 	((le32_to_cpu((_rxd)->wb.lower.lo_dword.data) >> 9) & 0xFF)
+#define TXGBE_PTYPE_TUN(_pt) ((_pt) & 0xC0)
+#define TXGBE_PTYPE_PKT(_pt) ((_pt) & 0x30)
+#define TXGBE_PTYPE_TYP(_pt) ((_pt) & 0x0F)
+#define TXGBE_PTYPE_TYPL4(_pt) ((_pt) & 0x07)
 
 #define TXGBE_RXD_IPV6EX(_rxd) \
 	((le32_to_cpu((_rxd)->wb.lower.lo_dword.data) >> 6) & 0x1)
@@ -1013,7 +1082,45 @@ enum {
 #define TXGBE_RXD_ERR_FRAME_ERR_MASK    TXGBE_RXD_ERR_RXE
 
 /*********************** Transmit Descriptor Config Masks ****************/
+#define TXGBE_TXD_DTALEN_MASK           0x0000FFFFU /* Data buf length(bytes) */
+#define TXGBE_TXD_MAC_LINKSEC           0x00040000U /* Insert LinkSec */
+#define TXGBE_TXD_MAC_TSTAMP            0x00080000U /* IEEE1588 time stamp */
+#define TXGBE_TXD_IPSEC_SA_INDEX_MASK   0x000003FFU /* IPSec SA index */
+#define TXGBE_TXD_IPSEC_ESP_LEN_MASK    0x000001FFU /* IPSec ESP length */
+#define TXGBE_TXD_DTYP_MASK             0x00F00000U /* DTYP mask */
+#define TXGBE_TXD_DTYP_CTXT             0x00100000U /* Adv Context Desc */
+#define TXGBE_TXD_DTYP_DATA             0x00000000U /* Adv Data Descriptor */
+#define TXGBE_TXD_EOP                   0x01000000U  /* End of Packet */
+#define TXGBE_TXD_IFCS                  0x02000000U /* Insert FCS */
+#define TXGBE_TXD_LINKSEC               0x04000000U /* Enable linksec */
+#define TXGBE_TXD_RS                    0x08000000U /* Report Status */
+#define TXGBE_TXD_ECU                   0x10000000U /* DDP hdr type or iSCSI */
+#define TXGBE_TXD_QCN                   0x20000000U /* cntag insertion enable */
+#define TXGBE_TXD_VLE                   0x40000000U /* VLAN pkt enable */
+#define TXGBE_TXD_TSE                   0x80000000U /* TCP Seg enable */
 #define TXGBE_TXD_STAT_DD               0x00000001U /* Descriptor Done */
+#define TXGBE_TXD_IDX_SHIFT             4 /* Desc Index shift */
+#define TXGBE_TXD_CC                    0x00000080U /* Check Context */
+#define TXGBE_TXD_IPSEC                 0x00000100U /* Enable ipsec esp */
+#define TXGBE_TXD_IIPCS                 0x00000400U
+#define TXGBE_TXD_EIPCS                 0x00000800U
+#define TXGBE_TXD_L4CS                  0x00000200U
+#define TXGBE_TXD_PAYLEN_SHIFT          13 /* Desc PAYLEN shift */
+#define TXGBE_TXD_MACLEN_SHIFT          9  /* ctxt desc mac len shift */
+#define TXGBE_TXD_VLAN_SHIFT            16 /* ctxt vlan tag shift */
+#define TXGBE_TXD_TAG_TPID_SEL_SHIFT    11
+#define TXGBE_TXD_IPSEC_TYPE_SHIFT      14
+#define TXGBE_TXD_ENC_SHIFT             15
+
+#define TXGBE_TXD_L4LEN_SHIFT           8  /* ctxt L4LEN shift */
+#define TXGBE_TXD_MSS_SHIFT             16  /* ctxt MSS shift */
+
+#define TXGBE_TXD_OUTER_IPLEN_SHIFT     12 /* ctxt OUTERIPLEN shift */
+#define TXGBE_TXD_TUNNEL_LEN_SHIFT      21 /* ctxt TUNNELLEN shift */
+#define TXGBE_TXD_TUNNEL_TYPE_SHIFT     11 /* Tx Desc Tunnel Type shift */
+#define TXGBE_TXD_TUNNEL_DECTTL_SHIFT   27 /* ctxt DECTTL shift */
+#define TXGBE_TXD_TUNNEL_UDP            (0x0ULL << TXGBE_TXD_TUNNEL_TYPE_SHIFT)
+#define TXGBE_TXD_TUNNEL_GRE            (0x1ULL << TXGBE_TXD_TUNNEL_TYPE_SHIFT)
 
 /* Transmit Descriptor */
 union txgbe_tx_desc {
@@ -1060,6 +1167,14 @@ union txgbe_rx_desc {
 	} wb;  /* writeback */
 };
 
+/* Context descriptors */
+struct txgbe_tx_context_desc {
+	__le32 vlan_macip_lens;
+	__le32 seqnum_seed;
+	__le32 type_tucmd_mlhl;
+	__le32 mss_l4len_idx;
+};
+
 /****************** Manageablility Host Interface defines ********************/
 #define TXGBE_HI_MAX_BLOCK_BYTE_LENGTH  256 /* Num of bytes in range */
 #define TXGBE_HI_MAX_BLOCK_DWORD_LENGTH 64 /* Num of dwords in range */
-- 
2.27.0


^ permalink raw reply related	[flat|nested] 18+ messages in thread

* [RFC PATCH net-next 15/16] net: txgbe: Support to get system network statistics
  2022-08-10  8:55 [RFC PATCH net-next 00/16] net: WangXun txgbe ethernet driver Jiawen Wu
                   ` (13 preceding siblings ...)
  2022-08-10  8:55 ` [RFC PATCH net-next 14/16] net: txgbe: Add transmit path to process packets Jiawen Wu
@ 2022-08-10  8:55 ` Jiawen Wu
  2022-08-10  8:55 ` [RFC PATCH net-next 16/16] net: txgbe: support to respond Tx hang Jiawen Wu
  15 siblings, 0 replies; 18+ messages in thread
From: Jiawen Wu @ 2022-08-10  8:55 UTC (permalink / raw)
  To: netdev; +Cc: Jiawen Wu

Support to get system network statistics.

Signed-off-by: Jiawen Wu <jiawenwu@trustnetic.com>
---
 drivers/net/ethernet/wangxun/txgbe/txgbe.h    |  12 ++
 drivers/net/ethernet/wangxun/txgbe/txgbe_hw.c |  54 +++++
 drivers/net/ethernet/wangxun/txgbe/txgbe_hw.h |   1 +
 .../net/ethernet/wangxun/txgbe/txgbe_main.c   | 190 ++++++++++++++++++
 .../net/ethernet/wangxun/txgbe/txgbe_type.h   |  90 +++++++++
 5 files changed, 347 insertions(+)

diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe.h b/drivers/net/ethernet/wangxun/txgbe/txgbe.h
index 1265dd24e90b..584b9542f768 100644
--- a/drivers/net/ethernet/wangxun/txgbe/txgbe.h
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe.h
@@ -362,10 +362,19 @@ struct txgbe_adapter {
 	/* TX */
 	struct txgbe_ring *tx_ring[TXGBE_MAX_TX_QUEUES] ____cacheline_aligned_in_smp;
 
+	u64 restart_queue;
 	u64 lsc_int;
 
 	/* RX */
 	struct txgbe_ring *rx_ring[TXGBE_MAX_RX_QUEUES];
+	u64 hw_csum_rx_error;
+	u64 hw_csum_rx_good;
+	u64 hw_rx_no_dma_resources;
+	u64 rsc_total_count;
+	u64 rsc_total_flush;
+	u64 non_eop_descs;
+	u32 alloc_rx_page_failed;
+	u32 alloc_rx_buff_failed;
 
 	struct txgbe_q_vector *q_vector[MAX_MSIX_Q_VECTORS];
 
@@ -376,7 +385,9 @@ struct txgbe_adapter {
 	/* structs defined in txgbe_type.h */
 	struct txgbe_hw hw;
 	u16 msg_enable;
+	struct txgbe_hw_stats stats;
 
+	u64 tx_busy;
 	unsigned int tx_ring_count;
 	unsigned int rx_ring_count;
 
@@ -457,6 +468,7 @@ void txgbe_configure_rx_ring(struct txgbe_adapter *adapter,
 			     struct txgbe_ring *ring);
 void txgbe_configure_tx_ring(struct txgbe_adapter *adapter,
 			     struct txgbe_ring *ring);
+void txgbe_update_stats(struct txgbe_adapter *adapter);
 int txgbe_init_interrupt_scheme(struct txgbe_adapter *adapter);
 void txgbe_reset_interrupt_capability(struct txgbe_adapter *adapter);
 void txgbe_set_interrupt_capability(struct txgbe_adapter *adapter);
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.c
index 6cd7e1a1c751..bf9720b505fc 100644
--- a/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.c
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.c
@@ -109,6 +109,56 @@ s32 txgbe_init_hw(struct txgbe_hw *hw)
 	return status;
 }
 
+/**
+ *  txgbe_clear_hw_cntrs - Generic clear hardware counters
+ *  @hw: pointer to hardware structure
+ *
+ *  Clears all hardware statistics counters by reading them from the hardware
+ *  Statistics counters are clear on read.
+ **/
+s32 txgbe_clear_hw_cntrs(struct txgbe_hw *hw)
+{
+	u16 i = 0;
+
+	rd32(hw, TXGBE_RX_CRC_ERROR_FRAMES_LOW);
+	for (i = 0; i < 8; i++)
+		rd32(hw, TXGBE_RDB_MPCNT(i));
+
+	rd32(hw, TXGBE_RX_LEN_ERROR_FRAMES_LOW);
+	rd32(hw, TXGBE_RDB_LXONTXC);
+	rd32(hw, TXGBE_RDB_LXOFFTXC);
+	rd32(hw, TXGBE_MAC_LXONRXC);
+	rd32(hw, TXGBE_MAC_LXOFFRXC);
+
+	for (i = 0; i < 8; i++) {
+		rd32(hw, TXGBE_RDB_PXONTXC(i));
+		rd32(hw, TXGBE_RDB_PXOFFTXC(i));
+		rd32(hw, TXGBE_MAC_PXONRXC(i));
+		wr32m(hw, TXGBE_MMC_CONTROL,
+		      TXGBE_MMC_CONTROL_UP, i << 16);
+		rd32(hw, TXGBE_MAC_PXOFFRXC);
+	}
+	for (i = 0; i < 8; i++)
+		rd32(hw, TXGBE_RDB_PXON2OFFCNT(i));
+	for (i = 0; i < 128; i++)
+		wr32(hw, TXGBE_PX_MPRC(i), 0);
+
+	rd32(hw, TXGBE_PX_GPRC);
+	rd32(hw, TXGBE_PX_GPTC);
+	rd32(hw, TXGBE_PX_GORC_MSB);
+	rd32(hw, TXGBE_PX_GOTC_MSB);
+
+	rd32(hw, TXGBE_RX_BC_FRAMES_GOOD_LOW);
+	rd32(hw, TXGBE_RX_UNDERSIZE_FRAMES_GOOD);
+	rd32(hw, TXGBE_RX_OVERSIZE_FRAMES_GOOD);
+	rd32(hw, TXGBE_RX_FRAME_CNT_GOOD_BAD_LOW);
+	rd32(hw, TXGBE_TX_FRAME_CNT_GOOD_BAD_LOW);
+	rd32(hw, TXGBE_TX_MC_FRAMES_GOOD_LOW);
+	rd32(hw, TXGBE_TX_BC_FRAMES_GOOD_LOW);
+	rd32(hw, TXGBE_RDM_DRP_PKT);
+	return 0;
+}
+
 /**
  *  txgbe_read_pba_string - Reads part number string from EEPROM
  *  @hw: pointer to hardware structure
@@ -2164,6 +2214,7 @@ s32 txgbe_init_ops(struct txgbe_hw *hw)
 
 	/* MAC */
 	mac->ops.init_hw = txgbe_init_hw;
+	mac->ops.clear_hw_cntrs = txgbe_clear_hw_cntrs;
 	mac->ops.get_mac_addr = txgbe_get_mac_addr;
 	mac->ops.stop_adapter = txgbe_stop_adapter;
 	mac->ops.get_bus_info = txgbe_get_bus_info;
@@ -3555,6 +3606,9 @@ s32 txgbe_start_hw(struct txgbe_hw *hw)
 	/* Clear the VLAN filter table */
 	TCALL(hw, mac.ops.clear_vfta);
 
+	/* Clear statistics registers */
+	TCALL(hw, mac.ops.clear_hw_cntrs);
+
 	TXGBE_WRITE_FLUSH(hw);
 
 	/* Clear the rate limiters */
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.h b/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.h
index 9c3ab32ae608..7124f04dfa76 100644
--- a/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.h
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.h
@@ -80,6 +80,7 @@ extern struct txgbe_dptype txgbe_ptype_lookup[256];
 u16 txgbe_get_pcie_msix_count(struct txgbe_hw *hw);
 s32 txgbe_init_hw(struct txgbe_hw *hw);
 s32 txgbe_start_hw(struct txgbe_hw *hw);
+s32 txgbe_clear_hw_cntrs(struct txgbe_hw *hw);
 s32 txgbe_read_pba_string(struct txgbe_hw *hw, u8 *pba_num,
 			  u32 pba_num_size);
 s32 txgbe_get_mac_addr(struct txgbe_hw *hw, u8 *mac_addr);
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c
index 3adbe3bbddac..33a2c681bb1f 100644
--- a/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c
@@ -3403,6 +3403,193 @@ static void txgbe_shutdown(struct pci_dev *pdev)
 	}
 }
 
+/**
+ * txgbe_get_stats64 - Get System Network Statistics
+ * @netdev: network interface device structure
+ * @stats: storage space for 64bit statistics
+ *
+ * Returns 64bit statistics, for use in the ndo_get_stats64 callback.
+ */
+static void txgbe_get_stats64(struct net_device *netdev,
+			      struct rtnl_link_stats64 *stats)
+{
+	struct txgbe_adapter *adapter = netdev_priv(netdev);
+	int i;
+
+	rcu_read_lock();
+	for (i = 0; i < adapter->num_rx_queues; i++) {
+		struct txgbe_ring *ring = READ_ONCE(adapter->rx_ring[i]);
+		u64 bytes, packets;
+		unsigned int start;
+
+		if (ring) {
+			do {
+				start = u64_stats_fetch_begin_irq(&ring->syncp);
+				packets = ring->stats.packets;
+				bytes   = ring->stats.bytes;
+			} while (u64_stats_fetch_retry_irq(&ring->syncp,
+				 start));
+			stats->rx_packets += packets;
+			stats->rx_bytes   += bytes;
+		}
+	}
+
+	for (i = 0; i < adapter->num_tx_queues; i++) {
+		struct txgbe_ring *ring = READ_ONCE(adapter->tx_ring[i]);
+		u64 bytes, packets;
+		unsigned int start;
+
+		if (ring) {
+			do {
+				start = u64_stats_fetch_begin_irq(&ring->syncp);
+				packets = ring->stats.packets;
+				bytes   = ring->stats.bytes;
+			} while (u64_stats_fetch_retry_irq(&ring->syncp,
+				 start));
+			stats->tx_packets += packets;
+			stats->tx_bytes   += bytes;
+		}
+	}
+	rcu_read_unlock();
+	/* following stats updated by txgbe_watchdog_subtask() */
+	stats->multicast        = netdev->stats.multicast;
+	stats->rx_errors        = netdev->stats.rx_errors;
+	stats->rx_length_errors = netdev->stats.rx_length_errors;
+	stats->rx_crc_errors    = netdev->stats.rx_crc_errors;
+	stats->rx_missed_errors = netdev->stats.rx_missed_errors;
+}
+
+/**
+ * txgbe_update_stats - Update the board statistics counters.
+ * @adapter: board private structure
+ **/
+void txgbe_update_stats(struct txgbe_adapter *adapter)
+{
+	struct net_device_stats *net_stats = &adapter->netdev->stats;
+	struct txgbe_hw_stats *hwstats = &adapter->stats;
+	struct txgbe_hw *hw = &adapter->hw;
+	u64 non_eop_descs = 0, restart_queue = 0, tx_busy = 0;
+	u64 alloc_rx_page_failed = 0, alloc_rx_buff_failed = 0;
+	u64 hw_csum_rx_good = 0, hw_csum_rx_error = 0;
+	u32 i, missed_rx = 0, mpc, bprc, lxon, lxoff;
+	u64 bytes = 0, packets = 0;
+	u64 total_mpc = 0;
+
+	if (test_bit(__TXGBE_DOWN, &adapter->state) ||
+	    test_bit(__TXGBE_RESETTING, &adapter->state))
+		return;
+
+	if (adapter->flags2 & TXGBE_FLAG2_RSC_ENABLED) {
+		u64 rsc_count = 0;
+		u64 rsc_flush = 0;
+
+		for (i = 0; i < adapter->num_rx_queues; i++) {
+			rsc_count += adapter->rx_ring[i]->rx_stats.rsc_count;
+			rsc_flush += adapter->rx_ring[i]->rx_stats.rsc_flush;
+		}
+		adapter->rsc_total_count = rsc_count;
+		adapter->rsc_total_flush = rsc_flush;
+	}
+
+	for (i = 0; i < adapter->num_rx_queues; i++) {
+		struct txgbe_ring *rx_ring = adapter->rx_ring[i];
+
+		non_eop_descs += rx_ring->rx_stats.non_eop_descs;
+		alloc_rx_page_failed += rx_ring->rx_stats.alloc_rx_page_failed;
+		alloc_rx_buff_failed += rx_ring->rx_stats.alloc_rx_buff_failed;
+		hw_csum_rx_error += rx_ring->rx_stats.csum_err;
+		hw_csum_rx_good += rx_ring->rx_stats.csum_good_cnt;
+		bytes += rx_ring->stats.bytes;
+		packets += rx_ring->stats.packets;
+	}
+	adapter->non_eop_descs = non_eop_descs;
+	adapter->alloc_rx_page_failed = alloc_rx_page_failed;
+	adapter->alloc_rx_buff_failed = alloc_rx_buff_failed;
+	adapter->hw_csum_rx_error = hw_csum_rx_error;
+	adapter->hw_csum_rx_good = hw_csum_rx_good;
+	net_stats->rx_bytes = bytes;
+	net_stats->rx_packets = packets;
+
+	bytes = 0;
+	packets = 0;
+	/* gather some stats to the adapter struct that are per queue */
+	for (i = 0; i < adapter->num_tx_queues; i++) {
+		struct txgbe_ring *tx_ring = adapter->tx_ring[i];
+
+		restart_queue += tx_ring->tx_stats.restart_queue;
+		tx_busy += tx_ring->tx_stats.tx_busy;
+		bytes += tx_ring->stats.bytes;
+		packets += tx_ring->stats.packets;
+	}
+	adapter->restart_queue = restart_queue;
+	adapter->tx_busy = tx_busy;
+	net_stats->tx_bytes = bytes;
+	net_stats->tx_packets = packets;
+
+	hwstats->crcerrs += rd32(hw, TXGBE_RX_CRC_ERROR_FRAMES_LOW);
+
+	/* 8 register reads */
+	for (i = 0; i < 8; i++) {
+		/* for packet buffers not used, the register should read 0 */
+		mpc = rd32(hw, TXGBE_RDB_MPCNT(i));
+		missed_rx += mpc;
+		hwstats->mpc[i] += mpc;
+		total_mpc += hwstats->mpc[i];
+		hwstats->pxontxc[i] += rd32(hw, TXGBE_RDB_PXONTXC(i));
+		hwstats->pxofftxc[i] +=
+				rd32(hw, TXGBE_RDB_PXOFFTXC(i));
+		hwstats->pxonrxc[i] += rd32(hw, TXGBE_MAC_PXONRXC(i));
+	}
+
+	hwstats->gprc += rd32(hw, TXGBE_PX_GPRC);
+
+	hwstats->o2bgptc += rd32(hw, TXGBE_TDM_OS2BMC_CNT);
+	if (txgbe_check_mng_access(&adapter->hw)) {
+		hwstats->o2bspc += rd32(hw, TXGBE_MNG_OS2BMC_CNT);
+		hwstats->b2ospc += rd32(hw, TXGBE_MNG_BMC2OS_CNT);
+	}
+	hwstats->b2ogprc += rd32(hw, TXGBE_RDM_BMC2OS_CNT);
+	hwstats->gorc += rd32(hw, TXGBE_PX_GORC_LSB);
+	hwstats->gorc += (u64)rd32(hw, TXGBE_PX_GORC_MSB) << 32;
+
+	hwstats->gotc += rd32(hw, TXGBE_PX_GOTC_LSB);
+	hwstats->gotc += (u64)rd32(hw, TXGBE_PX_GOTC_MSB) << 32;
+
+	adapter->hw_rx_no_dma_resources +=
+				     rd32(hw, TXGBE_RDM_DRP_PKT);
+	hwstats->lxonrxc += rd32(hw, TXGBE_MAC_LXONRXC);
+
+	bprc = rd32(hw, TXGBE_RX_BC_FRAMES_GOOD_LOW);
+	hwstats->bprc += bprc;
+	hwstats->mprc = 0;
+
+	for (i = 0; i < 128; i++)
+		hwstats->mprc += rd32(hw, TXGBE_PX_MPRC(i));
+
+	hwstats->roc += rd32(hw, TXGBE_RX_OVERSIZE_FRAMES_GOOD);
+	hwstats->rlec += rd32(hw, TXGBE_RX_LEN_ERROR_FRAMES_LOW);
+	lxon = rd32(hw, TXGBE_RDB_LXONTXC);
+	hwstats->lxontxc += lxon;
+	lxoff = rd32(hw, TXGBE_RDB_LXOFFTXC);
+	hwstats->lxofftxc += lxoff;
+
+	hwstats->gptc += rd32(hw, TXGBE_PX_GPTC);
+	hwstats->mptc += rd32(hw, TXGBE_TX_MC_FRAMES_GOOD_LOW);
+	hwstats->ruc += rd32(hw, TXGBE_RX_UNDERSIZE_FRAMES_GOOD);
+	hwstats->tpr += rd32(hw, TXGBE_RX_FRAME_CNT_GOOD_BAD_LOW);
+	hwstats->bptc += rd32(hw, TXGBE_TX_BC_FRAMES_GOOD_LOW);
+	/* Fill out the OS statistics structure */
+	net_stats->multicast = hwstats->mprc;
+
+	/* Rx Errors */
+	net_stats->rx_errors = hwstats->crcerrs +
+				       hwstats->rlec;
+	net_stats->rx_dropped = 0;
+	net_stats->rx_length_errors = hwstats->rlec;
+	net_stats->rx_crc_errors = hwstats->crcerrs;
+	net_stats->rx_missed_errors = total_mpc;
+}
+
 /**
  * txgbe_watchdog_update_link - update the link status
  * @adapter: pointer to the device adapter structure
@@ -3574,6 +3761,8 @@ static void txgbe_watchdog_subtask(struct txgbe_adapter *adapter)
 	else
 		txgbe_watchdog_link_is_down(adapter);
 
+	txgbe_update_stats(adapter);
+
 	txgbe_watchdog_flush_tx(adapter);
 }
 
@@ -4783,6 +4972,7 @@ static const struct net_device_ops txgbe_netdev_ops = {
 	.ndo_change_mtu		= txgbe_change_mtu,
 	.ndo_vlan_rx_add_vid    = txgbe_vlan_rx_add_vid,
 	.ndo_vlan_rx_kill_vid   = txgbe_vlan_rx_kill_vid,
+	.ndo_get_stats64        = txgbe_get_stats64,
 	.ndo_features_check     = txgbe_features_check,
 	.ndo_set_features       = txgbe_set_features,
 	.ndo_fix_features       = txgbe_fix_features,
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h b/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h
index 58fa3b19b3a7..3f6381aaa7d5 100644
--- a/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h
@@ -471,6 +471,16 @@ struct txgbe_thermal_sensor_data {
 #define TXGBE_TDM_RP_RATE_MIN(v) ((0x3FFF & (v)))
 #define TXGBE_TDM_RP_RATE_MAX(v) ((0x3FFF & (v)) << 16)
 
+/* statistic */
+#define TXGBE_TDM_SEC_DRP       0x18304
+#define TXGBE_TDM_PKT_CNT       0x18308
+#define TXGBE_TDM_OS2BMC_CNT    0x18314
+
+/**************************** Receive DMA registers **************************/
+/* statistic */
+#define TXGBE_RDM_DRP_PKT           0x12500
+#define TXGBE_RDM_BMC2OS_CNT        0x12510
+
 /***************************** RDB registers *********************************/
 /* receive packet buffer */
 #define TXGBE_RDB_PB_WRAP           0x19004
@@ -896,6 +906,18 @@ enum {
 #define TXGBE_PX_RR_CFG_RR_SZ           0x0000007EU
 #define TXGBE_PX_RR_CFG_RR_EN           0x00000001U
 
+/* statistic */
+#define TXGBE_PX_MPRC(_i)               (0x1020 + ((_i) * 64))
+
+#define TXGBE_PX_GPRC                   0x12504
+#define TXGBE_PX_GPTC                   0x18308
+
+#define TXGBE_PX_GORC_LSB               0x12508
+#define TXGBE_PX_GORC_MSB               0x1250C
+
+#define TXGBE_PX_GOTC_LSB               0x1830C
+#define TXGBE_PX_GOTC_MSB               0x18310
+
 /* Part Number String Length */
 #define TXGBE_PBANUM_LENGTH     32
 
@@ -1408,6 +1430,73 @@ struct txgbe_bus_info {
 	u16 lan_id;
 };
 
+/* Statistics counters collected by the MAC */
+struct txgbe_hw_stats {
+	u64 crcerrs;
+	u64 illerrc;
+	u64 errbc;
+	u64 mspdc;
+	u64 mpctotal;
+	u64 mpc[8];
+	u64 mlfc;
+	u64 mrfc;
+	u64 rlec;
+	u64 lxontxc;
+	u64 lxonrxc;
+	u64 lxofftxc;
+	u64 lxoffrxc;
+	u64 pxontxc[8];
+	u64 pxonrxc[8];
+	u64 pxofftxc[8];
+	u64 pxoffrxc[8];
+	u64 prc64;
+	u64 prc127;
+	u64 prc255;
+	u64 prc511;
+	u64 prc1023;
+	u64 prc1522;
+	u64 gprc;
+	u64 bprc;
+	u64 mprc;
+	u64 gptc;
+	u64 gorc;
+	u64 gotc;
+	u64 rnbc[8];
+	u64 ruc;
+	u64 rfc;
+	u64 roc;
+	u64 rjc;
+	u64 mngprc;
+	u64 mngpdc;
+	u64 mngptc;
+	u64 tor;
+	u64 tpr;
+	u64 tpt;
+	u64 ptc64;
+	u64 ptc127;
+	u64 ptc255;
+	u64 ptc511;
+	u64 ptc1023;
+	u64 ptc1522;
+	u64 mptc;
+	u64 bptc;
+	u64 xec;
+	u64 qprc[16];
+	u64 qptc[16];
+	u64 qbrc[16];
+	u64 qbtc[16];
+	u64 qprdc[16];
+	u64 pxon2offc[8];
+	u64 fccrc;
+	u64 fclast;
+	u64 ldpcec;
+	u64 pcrc8ec;
+	u64 b2ospc;
+	u64 b2ogprc;
+	u64 o2bgptc;
+	u64 o2bspc;
+};
+
 /* forward declaration */
 struct txgbe_hw;
 
@@ -1429,6 +1518,7 @@ struct txgbe_mac_operations {
 	s32 (*init_hw)(struct txgbe_hw *hw);
 	s32 (*reset_hw)(struct txgbe_hw *hw);
 	s32 (*start_hw)(struct txgbe_hw *hw);
+	s32 (*clear_hw_cntrs)(struct txgbe_hw *hw);
 	enum txgbe_media_type (*get_media_type)(struct txgbe_hw *hw);
 	s32 (*get_mac_addr)(struct txgbe_hw *hw, u8 *mac_addr);
 	s32 (*get_san_mac_addr)(struct txgbe_hw *hw, u8 *san_mac_addr);
-- 
2.27.0


^ permalink raw reply related	[flat|nested] 18+ messages in thread

* [RFC PATCH net-next 16/16] net: txgbe: support to respond Tx hang
  2022-08-10  8:55 [RFC PATCH net-next 00/16] net: WangXun txgbe ethernet driver Jiawen Wu
                   ` (14 preceding siblings ...)
  2022-08-10  8:55 ` [RFC PATCH net-next 15/16] net: txgbe: Support to get system network statistics Jiawen Wu
@ 2022-08-10  8:55 ` Jiawen Wu
  15 siblings, 0 replies; 18+ messages in thread
From: Jiawen Wu @ 2022-08-10  8:55 UTC (permalink / raw)
  To: netdev; +Cc: Jiawen Wu

Check Tx hang, and determine whether it is caused by PCIe link loss.

Signed-off-by: Jiawen Wu <jiawenwu@trustnetic.com>
---
 drivers/net/ethernet/wangxun/txgbe/txgbe.h    |  10 +
 .../net/ethernet/wangxun/txgbe/txgbe_main.c   | 186 ++++++++++++++++++
 2 files changed, 196 insertions(+)

diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe.h b/drivers/net/ethernet/wangxun/txgbe/txgbe.h
index 584b9542f768..7646cdfa1c67 100644
--- a/drivers/net/ethernet/wangxun/txgbe/txgbe.h
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe.h
@@ -122,6 +122,7 @@ struct txgbe_queue_stats {
 struct txgbe_tx_queue_stats {
 	u64 restart_queue;
 	u64 tx_busy;
+	u64 tx_done_old;
 };
 
 struct txgbe_rx_queue_stats {
@@ -135,6 +136,8 @@ struct txgbe_rx_queue_stats {
 };
 
 enum txgbe_ring_state_t {
+	__TXGBE_TX_DETECT_HANG,
+	__TXGBE_HANG_CHECK_ARMED,
 	__TXGBE_RX_RSC_ENABLED,
 };
 
@@ -143,6 +146,12 @@ struct txgbe_fwd_adapter {
 	struct txgbe_adapter *adapter;
 };
 
+#define check_for_tx_hang(ring) \
+	test_bit(__TXGBE_TX_DETECT_HANG, &(ring)->state)
+#define set_check_for_tx_hang(ring) \
+	set_bit(__TXGBE_TX_DETECT_HANG, &(ring)->state)
+#define clear_check_for_tx_hang(ring) \
+	clear_bit(__TXGBE_TX_DETECT_HANG, &(ring)->state)
 #define ring_is_rsc_enabled(ring) \
 	test_bit(__TXGBE_RX_RSC_ENABLED, &(ring)->state)
 #define set_ring_rsc_enabled(ring) \
@@ -364,6 +373,7 @@ struct txgbe_adapter {
 
 	u64 restart_queue;
 	u64 lsc_int;
+	u32 tx_timeout_count;
 
 	/* RX */
 	struct txgbe_ring *rx_ring[TXGBE_MAX_RX_QUEUES];
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c
index 33a2c681bb1f..806901db5a0b 100644
--- a/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c
@@ -219,6 +219,130 @@ void txgbe_unmap_and_free_tx_resource(struct txgbe_ring *ring,
 	/* tx_buffer must be completely set up in the transmit path */
 }
 
+static u64 txgbe_get_tx_completed(struct txgbe_ring *ring)
+{
+	return ring->stats.packets;
+}
+
+static u64 txgbe_get_tx_pending(struct txgbe_ring *ring)
+{
+	struct txgbe_adapter *adapter;
+	struct txgbe_hw *hw;
+	u32 head, tail;
+
+	if (ring->accel)
+		adapter = ring->accel->adapter;
+	else
+		adapter = ring->q_vector->adapter;
+
+	hw = &adapter->hw;
+	head = rd32(hw, TXGBE_PX_TR_RP(ring->reg_idx));
+	tail = rd32(hw, TXGBE_PX_TR_WP(ring->reg_idx));
+
+	return ((head <= tail) ? tail : tail + ring->count) - head;
+}
+
+static inline bool txgbe_check_tx_hang(struct txgbe_ring *tx_ring)
+{
+	u64 tx_done = txgbe_get_tx_completed(tx_ring);
+	u64 tx_done_old = tx_ring->tx_stats.tx_done_old;
+	u64 tx_pending = txgbe_get_tx_pending(tx_ring);
+
+	clear_check_for_tx_hang(tx_ring);
+
+	/* Check for a hung queue, but be thorough. This verifies
+	 * that a transmit has been completed since the previous
+	 * check AND there is at least one packet pending. The
+	 * ARMED bit is set to indicate a potential hang. The
+	 * bit is cleared if a pause frame is received to remove
+	 * false hang detection due to PFC or 802.3x frames. By
+	 * requiring this to fail twice we avoid races with
+	 * pfc clearing the ARMED bit and conditions where we
+	 * run the check_tx_hang logic with a transmit completion
+	 * pending but without time to complete it yet.
+	 */
+	if (tx_done_old == tx_done && tx_pending)
+		/* make sure it is true for two checks in a row */
+		return test_and_set_bit(__TXGBE_HANG_CHECK_ARMED,
+					&tx_ring->state);
+	/* update completed stats and continue */
+	tx_ring->tx_stats.tx_done_old = tx_done;
+	/* reset the countdown */
+	clear_bit(__TXGBE_HANG_CHECK_ARMED, &tx_ring->state);
+
+	return false;
+}
+
+/**
+ * txgbe_tx_timeout - Respond to a Tx Hang
+ * @netdev: network interface device structure
+ * @txqueue: queue number that timed out
+ **/
+static void txgbe_tx_timeout(struct net_device *netdev, unsigned int txqueue)
+{
+	struct txgbe_adapter *adapter = netdev_priv(netdev);
+	bool real_tx_hang = false;
+	u16 value = 0;
+	u32 value2 = 0;
+	u32 value3 = 0;
+	u32 head, tail;
+	int i;
+
+	for (i = 0; i < adapter->num_tx_queues; i++) {
+		struct txgbe_ring *tx_ring = adapter->tx_ring[i];
+
+		if (check_for_tx_hang(tx_ring) && txgbe_check_tx_hang(tx_ring))
+			real_tx_hang = true;
+	}
+
+	if (real_tx_hang)
+		netif_warn(adapter, drv, netdev, "Real Tx hang.\n");
+
+	/* Dump the relevant registers to determine the cause of a timeout event. */
+	pci_read_config_word(adapter->pdev, PCI_VENDOR_ID, &value);
+	netif_warn(adapter, drv, netdev, "pci vendor id: 0x%x\n", value);
+	pci_read_config_word(adapter->pdev, PCI_COMMAND, &value);
+	netif_warn(adapter, drv, netdev, "pci command reg: 0x%x.\n", value);
+
+	value2 = rd32(&adapter->hw, 0x10000);
+	netif_warn(adapter, drv, netdev, "reg mis_pwr: 0x%08x\n", value2);
+	value2 = rd32(&adapter->hw, 0x180d0);
+	netif_warn(adapter, drv, netdev, "tdm desc 0 fatal: 0x%08x\n", value2);
+	value2 = rd32(&adapter->hw, 0x180d4);
+	netif_warn(adapter, drv, netdev, "tdm desc 1 fatal: 0x%08x\n", value2);
+	value2 = rd32(&adapter->hw, 0x180d8);
+	netif_warn(adapter, drv, netdev, "tdm desc 2 fatal: 0x%08x\n", value2);
+	value2 = rd32(&adapter->hw, 0x180dc);
+	netif_warn(adapter, drv, netdev, "tdm desc 3 fatal: 0x%08x\n", value2);
+
+	for (i = 0; i < adapter->num_tx_queues; i++) {
+		head = rd32(&adapter->hw, TXGBE_PX_TR_RP(adapter->tx_ring[i]->reg_idx));
+		tail = rd32(&adapter->hw, TXGBE_PX_TR_WP(adapter->tx_ring[i]->reg_idx));
+
+		netif_warn(adapter, drv, netdev,
+			   "tx ring %d next_to_use is %d, next_to_clean is %d\n",
+			   i, adapter->tx_ring[i]->next_to_use,
+			   adapter->tx_ring[i]->next_to_clean);
+		netif_warn(adapter, drv, netdev,
+			   "tx ring %d hw rp is 0x%x, wp is 0x%x\n",
+			   i, head, tail);
+	}
+
+	value2 = rd32(&adapter->hw, TXGBE_PX_IMS(0));
+	value3 = rd32(&adapter->hw, TXGBE_PX_IMS(1));
+	netif_warn(adapter, drv, netdev,
+		   "PX_IMS0 value is 0x%08x, PX_IMS1 value is 0x%08x\n",
+		   value2, value3);
+
+	if (value2 || value3) {
+		netif_warn(adapter, drv, netdev, "clear interrupt mask.\n");
+		wr32(&adapter->hw, TXGBE_PX_ICS(0), value2);
+		wr32(&adapter->hw, TXGBE_PX_IMC(0), value2);
+		wr32(&adapter->hw, TXGBE_PX_ICS(1), value3);
+		wr32(&adapter->hw, TXGBE_PX_IMC(1), value3);
+	}
+}
+
 /**
  * txgbe_clean_tx_irq - Reclaim resources after transmit completes
  * @q_vector: structure containing interrupt and ring information
@@ -322,6 +446,39 @@ static bool txgbe_clean_tx_irq(struct txgbe_q_vector *q_vector,
 	q_vector->tx.total_bytes += total_bytes;
 	q_vector->tx.total_packets += total_packets;
 
+	if (check_for_tx_hang(tx_ring) && txgbe_check_tx_hang(tx_ring)) {
+	/* schedule immediate reset if we believe we hung */
+		struct txgbe_hw *hw = &adapter->hw;
+		u16 value = 0;
+
+		netif_err(adapter, drv, adapter->netdev,
+			  "Detected Tx Unit Hang\n"
+			  "  Tx Queue             <%d>\n"
+			  "  TDH, TDT             <%x>, <%x>\n"
+			  "  next_to_use          <%x>\n"
+			  "  next_to_clean        <%x>\n"
+			  "tx_buffer_info[next_to_clean]\n"
+			  "  jiffies              <%lx>\n",
+			  tx_ring->queue_index,
+			  rd32(hw, TXGBE_PX_TR_RP(tx_ring->reg_idx)),
+			  rd32(hw, TXGBE_PX_TR_WP(tx_ring->reg_idx)),
+			  tx_ring->next_to_use, i, jiffies);
+
+		pci_read_config_word(adapter->pdev, PCI_VENDOR_ID, &value);
+		if (value == TXGBE_FAILED_READ_CFG_WORD)
+			netif_info(adapter, hw, adapter->netdev,
+				   "pcie link has been lost.\n");
+
+		netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
+
+		netif_info(adapter, probe, adapter->netdev,
+			   "tx hang %d detected on queue %d, resetting adapter\n",
+			   adapter->tx_timeout_count + 1, tx_ring->queue_index);
+
+		/* the adapter is about to reset, no point in enabling stuff */
+		return true;
+	}
+
 	netdev_tx_completed_queue(txring_txq(tx_ring),
 				  total_packets, total_bytes);
 
@@ -3590,6 +3747,32 @@ void txgbe_update_stats(struct txgbe_adapter *adapter)
 	net_stats->rx_missed_errors = total_mpc;
 }
 
+/**
+ * txgbe_check_hang_subtask - check for hung queues and dropped interrupts
+ * @adapter: pointer to the device adapter structure
+ *
+ * This function serves two purposes.  First it strobes the interrupt lines
+ * in order to make certain interrupts are occurring.  Secondly it sets the
+ * bits needed to check for TX hangs.  As a result we should immediately
+ * determine if a hang has occurred.
+ */
+static void txgbe_check_hang_subtask(struct txgbe_adapter *adapter)
+{
+	int i;
+
+	/* If we're down or resetting, just bail */
+	if (test_bit(__TXGBE_DOWN, &adapter->state) ||
+	    test_bit(__TXGBE_REMOVING, &adapter->state) ||
+	    test_bit(__TXGBE_RESETTING, &adapter->state))
+		return;
+
+	/* Force detection of hung controller */
+	if (netif_carrier_ok(adapter->netdev)) {
+		for (i = 0; i < adapter->num_tx_queues; i++)
+			set_check_for_tx_hang(adapter->tx_ring[i]);
+	}
+}
+
 /**
  * txgbe_watchdog_update_link - update the link status
  * @adapter: pointer to the device adapter structure
@@ -3913,6 +4096,7 @@ static void txgbe_reset_subtask(struct txgbe_adapter *adapter)
 		return;
 
 	netdev_err(adapter->netdev, "Reset adapter\n");
+	adapter->tx_timeout_count++;
 
 	rtnl_lock();
 	if (adapter->flags2 & TXGBE_FLAG2_GLOBAL_RESET_REQUESTED) {
@@ -3993,6 +4177,7 @@ static void txgbe_service_task(struct work_struct *work)
 	txgbe_sfp_link_config_subtask(adapter);
 	txgbe_check_overtemp_subtask(adapter);
 	txgbe_watchdog_subtask(adapter);
+	txgbe_check_hang_subtask(adapter);
 
 	txgbe_service_event_complete(adapter);
 }
@@ -4970,6 +5155,7 @@ static const struct net_device_ops txgbe_netdev_ops = {
 	.ndo_set_rx_mode        = txgbe_set_rx_mode,
 	.ndo_validate_addr      = eth_validate_addr,
 	.ndo_change_mtu		= txgbe_change_mtu,
+	.ndo_tx_timeout         = txgbe_tx_timeout,
 	.ndo_vlan_rx_add_vid    = txgbe_vlan_rx_add_vid,
 	.ndo_vlan_rx_kill_vid   = txgbe_vlan_rx_kill_vid,
 	.ndo_get_stats64        = txgbe_get_stats64,
-- 
2.27.0


^ permalink raw reply related	[flat|nested] 18+ messages in thread

* Re: [RFC PATCH net-next 01/16] net: txgbe: Store PCI info
  2022-08-10  8:55 ` [RFC PATCH net-next 01/16] net: txgbe: Store PCI info Jiawen Wu
@ 2022-08-11  1:55   ` Andrew Lunn
  0 siblings, 0 replies; 18+ messages in thread
From: Andrew Lunn @ 2022-08-11  1:55 UTC (permalink / raw)
  To: Jiawen Wu; +Cc: netdev

> +/**
> + *  txgbe_set_lan_id_multi_port_pcie - Set LAN id for PCIe multiple port devices
> + *  @hw: pointer to the HW structure
> + *
> + *  Determines the LAN function id by reading memory-mapped registers
> + *  and swaps the port value if requested.
> + **/
> +s32 txgbe_set_lan_id_multi_port_pcie(struct txgbe_hw *hw)
> +{
> +	struct txgbe_bus_info *bus = &hw->bus;
> +	u32 reg;
> +
> +	reg = rd32(hw, TXGBE_CFG_PORT_ST);
> +	bus->lan_id = TXGBE_CFG_PORT_ST_LAN_ID(reg);
> +
> +	/* check for a port swap */
> +	reg = rd32(hw, TXGBE_MIS_PWR);
> +	if (TXGBE_MIS_PWR_LAN_ID(reg) == TXGBE_MIS_PWR_LAN_ID_1)
> +		bus->func = 0;
> +	else
> +		bus->func = bus->lan_id;
> +
> +	return 0;

If there is nothing useful to return, and there is nothing which can
go wrong, make functions void.

> +}
> +
> +/* cmd_addr is used for some special command:
> + * 1. to be sector address, when implemented erase sector command
> + * 2. to be flash address when implemented read, write flash address
> + */
> +u8 fmgr_cmd_op(struct txgbe_hw *hw, u32 cmd, u32 cmd_addr)
> +{
> +	u32 cmd_val = 0;
> +	u32 time_out = 0;
> +
> +	cmd_val = (cmd << SPI_CLK_CMD_OFFSET) |
> +		  (SPI_CLK_DIV << SPI_CLK_DIV_OFFSET) | cmd_addr;
> +	wr32(hw, SPI_H_CMD_REG_ADDR, cmd_val);
> +	while (1) {
> +		if (rd32(hw, SPI_H_STA_REG_ADDR) & 0x1)
> +			break;
> +
> +		if (time_out == SPI_TIME_OUT_VALUE)
> +			return 1;
> +
> +		time_out = time_out + 1;
> +		usleep_range(10, 20);
> +	}

Please use iopoll.h for code which looks like this.

> +
> +	return 0;

Don't use 0 or 1 as return values. Return -ETIMEDOUT on error, since
you want that error code to be returned to user space.

> +}
> +
> +u32 txgbe_flash_read_dword(struct txgbe_hw *hw, u32 addr)
> +{
> +	u8 status = fmgr_cmd_op(hw, SPI_CMD_READ_DWORD, addr);
> +
> +	if (status)
> +		return (u32)status;

Avoid casts where ever possible. Casts like this suggest your API
design is wrong between your helpers.

> +
> +	return rd32(hw, SPI_H_DAT_REG_ADDR);
> +}

How is the caller of this function meant to decide if the flash
contained 0x1, or the read timed out?

> +int txgbe_check_flash_load(struct txgbe_hw *hw, u32 check_bit)
> +{
> +	u32 i = 0, reg = 0;
> +	int err = 0;
> +
> +	/* if there's flash existing */
> +	if (!(rd32(hw, TXGBE_SPI_STATUS) &
> +	      TXGBE_SPI_STATUS_FLASH_BYPASS)) {
> +		/* wait hw load flash done */
> +		for (i = 0; i < TXGBE_MAX_FLASH_LOAD_POLL_TIME; i++) {
> +			reg = rd32(hw, TXGBE_SPI_ILDR_STATUS);
> +			if (!(reg & check_bit)) {
> +				/* done */
> +				break;
> +			}
> +			msleep(200);
> +		}
> +		if (i == TXGBE_MAX_FLASH_LOAD_POLL_TIME)
> +			err = TXGBE_ERR_FLASH_LOADING_FAILED;

Use standard error codes, ETIMEDOUT.

> +/**
> + * txgbe_enumerate_functions - Get the number of ports this device has
> + * @adapter: adapter structure
> + *
> + * This function enumerates the phsyical functions co-located on a single slot,
> + * in order to determine how many ports a device has. This is most useful in
> + * determining the required GT/s of PCIe bandwidth necessary for optimal
> + * performance.
> + **/
> +static inline int txgbe_enumerate_functions(struct txgbe_adapter *adapter)

No inline functions. Let the compiler decide.

> +{
> +	struct pci_dev *entry, *pdev = adapter->pdev;
> +	int physfns = 0;
> +
> +	list_for_each_entry(entry, &pdev->bus->devices, bus_list) {
> +		/* When the devices on the bus don't all match our device ID,
> +		 * we can't reliably determine the correct number of
> +		 * functions. This can occur if a function has been direct
> +		 * attached to a virtual machine using VT-d, for example. In
> +		 * this case, simply return -1 to indicate this.
> +		 */
> +		if (entry->vendor != pdev->vendor ||
> +		    entry->device != pdev->device)
> +			return -1;

EINVAL? ENODEV?

> + *  txgbe_init_shared_code - Initialize the shared code
> + *  @hw: pointer to hardware structure
> + *
> + *  This will assign function pointers and assign the MAC type and PHY code.
> + **/
> +s32 txgbe_init_shared_code(struct txgbe_hw *hw)
> +{
> +	s32 status;
> +
> +	status = txgbe_init_ops(hw);
> +	return status;

just

	return txgbe_init_ops(hw);

> +}
> +
> +/**
> + * txgbe_sw_init - Initialize general software structures (struct txgbe_adapter)
> + * @adapter: board private structure to initialize
> + **/
> +static int txgbe_sw_init(struct txgbe_adapter *adapter)
> +{
> +	struct pci_dev *pdev = adapter->pdev;
> +	struct txgbe_hw *hw = &adapter->hw;
> +	u32 ssid = 0;
> +	int err = 0;
> +
> +	/* PCI config space info */
> +	hw->vendor_id = pdev->vendor;
> +	hw->device_id = pdev->device;
> +	hw->revision_id = pdev->revision;
> +	hw->oem_svid = pdev->subsystem_vendor;
> +	hw->oem_ssid = pdev->subsystem_device;
> +
> +	if (hw->oem_svid == PCI_VENDOR_ID_WANGXUN) {
> +		hw->subsystem_vendor_id = pdev->subsystem_vendor;
> +		hw->subsystem_device_id = pdev->subsystem_device;
> +	} else {
> +		ssid = txgbe_flash_read_dword(hw, 0xfffdc);
> +		if (ssid == 0x1) {

This is where you cannot differentiate between a timeout and a FLASH
containing 0x1.

> +			netif_err(adapter, probe, adapter->netdev,
> +				  "read of internal subsystem device id failed\n");
> +			return -ENODEV;
> +		}
> +		hw->subsystem_device_id = (u16)ssid >> 8 | (u16)ssid << 8;
> +	}
> +
> +	err = txgbe_init_shared_code(hw);
> +	if (err) {
> +		netif_err(adapter, probe, adapter->netdev,
> +			  "init_shared_code failed: %d\n", err);
> +		return err;
> +	}
> +
> +	return 0;
> +}
> +
>  static void txgbe_dev_shutdown(struct pci_dev *pdev, bool *enable_wake)
>  {
>  	struct txgbe_adapter *adapter = pci_get_drvdata(pdev);
> @@ -67,8 +192,9 @@ static int txgbe_probe(struct pci_dev *pdev,
>  		       const struct pci_device_id __always_unused *ent)
>  {
>  	struct txgbe_adapter *adapter = NULL;
> +	struct txgbe_hw *hw = NULL;
>  	struct net_device *netdev;
> -	int err;
> +	int err, expected_gts;
>  
>  	err = pci_enable_device_mem(pdev);
>  	if (err)
> @@ -107,6 +233,8 @@ static int txgbe_probe(struct pci_dev *pdev,
>  	adapter = netdev_priv(netdev);
>  	adapter->netdev = netdev;
>  	adapter->pdev = pdev;
> +	hw = &adapter->hw;
> +	adapter->msg_enable = (1 << DEFAULT_DEBUG_LEVEL_SHIFT) - 1;
>  
>  	adapter->io_addr = devm_ioremap(&pdev->dev,
>  					pci_resource_start(pdev, 0),
> @@ -115,11 +243,44 @@ static int txgbe_probe(struct pci_dev *pdev,
>  		err = -EIO;
>  		goto err_pci_release_regions;
>  	}
> +	hw->hw_addr = adapter->io_addr;
> +
> +	strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
> +
> +	/* setup the private structure */
> +	err = txgbe_sw_init(adapter);
> +	if (err)
> +		goto err_pci_release_regions;
> +
> +	TCALL(hw, mac.ops.set_lan_id);

Don't use macros like this.

> +
> +	/* check if flash load is done after hw power up */
> +	err = txgbe_check_flash_load(hw, TXGBE_SPI_ILDR_STATUS_PERST);
> +	if (err)
> +		goto err_pci_release_regions;
> +	err = txgbe_check_flash_load(hw, TXGBE_SPI_ILDR_STATUS_PWRRST);
> +	if (err)
> +		goto err_pci_release_regions;
>  
>  	netdev->features |= NETIF_F_HIGHDMA;
>  
> +	/* pick up the PCI bus settings for reporting later */
> +	TCALL(hw, mac.ops.get_bus_info);
> +
>  	pci_set_drvdata(pdev, adapter);
>  
> +	/* calculate the expected PCIe bandwidth required for optimal
> +	 * performance. Note that some older parts will never have enough
> +	 * bandwidth due to being older generation PCIe parts. We clamp these
> +	 * parts to ensure that no warning is displayed, as this could confuse
> +	 * users otherwise.
> +	 */
> +	expected_gts = txgbe_enumerate_functions(adapter) * 10;
> +
> +	/* don't check link if we failed to enumerate functions */
> +	if (expected_gts > 0)
> +		txgbe_check_minimum_link(adapter);

What about expected_gts == -1?

> +
> +/* PCI bus speeds */
> +enum txgbe_bus_speed {
> +	txgbe_bus_speed_unknown	= 0,
> +	txgbe_bus_speed_33	= 33,
> +	txgbe_bus_speed_66	= 66,
> +	txgbe_bus_speed_100	= 100,
> +	txgbe_bus_speed_120	= 120,
> +	txgbe_bus_speed_133	= 133,
> +	txgbe_bus_speed_2500	= 2500,
> +	txgbe_bus_speed_5000	= 5000,
> +	txgbe_bus_speed_8000	= 8000,
> +	txgbe_bus_speed_reserved

Could you use pci_bus_speed from include/linux/pic.h


> +};
> +
> +/* PCI bus widths */
> +enum txgbe_bus_width {
> +	txgbe_bus_width_unknown	= 0,
> +	txgbe_bus_width_pcie_x1	= 1,
> +	txgbe_bus_width_pcie_x2	= 2,
> +	txgbe_bus_width_pcie_x4	= 4,
> +	txgbe_bus_width_pcie_x8	= 8,
> +	txgbe_bus_width_32	= 32,
> +	txgbe_bus_width_64	= 64,
> +	txgbe_bus_width_reserved

pcie_link_width?

It is much better to use existing enums that invent your own.

> +/* Error Codes */
> +#define TXGBE_ERR                                100
> +#define TXGBE_NOT_IMPLEMENTED                    0x7FFFFFFF
> +/* (-TXGBE_ERR, TXGBE_ERR): reserved for non-txgbe defined error code */
> +#define TXGBE_ERR_NOSUPP                        -(TXGBE_ERR + 0)
> +#define TXGBE_ERR_EEPROM                        -(TXGBE_ERR + 1)
> +#define TXGBE_ERR_EEPROM_CHECKSUM               -(TXGBE_ERR + 2)
> +#define TXGBE_ERR_PHY                           -(TXGBE_ERR + 3)

Use standard error codes, which you can return to user space.

> +
> +static inline bool TXGBE_REMOVED(void __iomem *addr)
> +{
> +	return unlikely(!addr);
> +}

This needs a comment to explain it!

> +
> +static inline u32
> +txgbe_rd32(u8 __iomem *base)
> +{
> +	return readl(base);
> +}

Pointless wrapper. Just use readl()!

> +
> +static inline u32
> +rd32(struct txgbe_hw *hw, u32 reg)
> +{
> +	u8 __iomem *base = READ_ONCE(hw->hw_addr);

It is very unusual for the hardware to change its address after
probe. In fact, it is very unusual for the hardware to change its
address ever.  I find this READ_ONCE very suspicious. Please explain.

> +	u32 val = TXGBE_FAILED_READ_REG;
> +
> +	if (unlikely(!base))
> +		return val;

Can this happen? If it does -ENODEV or -EIO would be the correct
return value.

Please go through the whole driver and fix up your function return
types and values, and checking for errors.

In general, functions should be int, return 0 on success, or a
negative error code on failure. Callers for functions should always
look for error codes, and return them up the call stack.

     Andrew

^ permalink raw reply	[flat|nested] 18+ messages in thread

end of thread, other threads:[~2022-08-11  1:55 UTC | newest]

Thread overview: 18+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2022-08-10  8:55 [RFC PATCH net-next 00/16] net: WangXun txgbe ethernet driver Jiawen Wu
2022-08-10  8:55 ` [RFC PATCH net-next 01/16] net: txgbe: Store PCI info Jiawen Wu
2022-08-11  1:55   ` Andrew Lunn
2022-08-10  8:55 ` [RFC PATCH net-next 02/16] net: txgbe: Reset hardware Jiawen Wu
2022-08-10  8:55 ` [RFC PATCH net-next 03/16] net: txgbe: Set MAC address and register netdev Jiawen Wu
2022-08-10  8:55 ` [RFC PATCH net-next 04/16] net: txgbe: Add operations to interact with firmware Jiawen Wu
2022-08-10  8:55 ` [RFC PATCH net-next 05/16] net: txgbe: Identify PHY and SFP module Jiawen Wu
2022-08-10  8:55 ` [RFC PATCH net-next 06/16] net: txgbe: Initialize service task Jiawen Wu
2022-08-10  8:55 ` [RFC PATCH net-next 07/16] net: txgbe: Support to setup link Jiawen Wu
2022-08-10  8:55 ` [RFC PATCH net-next 08/16] net: txgbe: Add interrupt support Jiawen Wu
2022-08-10  8:55 ` [RFC PATCH net-next 09/16] net: txgbe: Handle various event interrupts Jiawen Wu
2022-08-10  8:55 ` [RFC PATCH net-next 10/16] net: txgbe: Configure Rx and Tx unit of the MAC Jiawen Wu
2022-08-10  8:55 ` [RFC PATCH net-next 11/16] net: txgbe: Allocate Rx and Tx resources Jiawen Wu
2022-08-10  8:55 ` [RFC PATCH net-next 12/16] net: txgbe: Add Rx and Tx cleanup routine Jiawen Wu
2022-08-10  8:55 ` [RFC PATCH net-next 13/16] net: txgbe: Add device Rx features Jiawen Wu
2022-08-10  8:55 ` [RFC PATCH net-next 14/16] net: txgbe: Add transmit path to process packets Jiawen Wu
2022-08-10  8:55 ` [RFC PATCH net-next 15/16] net: txgbe: Support to get system network statistics Jiawen Wu
2022-08-10  8:55 ` [RFC PATCH net-next 16/16] net: txgbe: support to respond Tx hang Jiawen Wu

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).