All of lore.kernel.org
 help / color / mirror / Atom feed
* [trivial-mods:20200310_fallthrough_2 158/491] drivers/net/ethernet/broadcom/tg3.c:10720 tg3_reset_hw() warn: inconsistent indenting
@ 2020-03-11 21:58 kbuild test robot
  2020-03-11 23:58 ` Joe Perches
  0 siblings, 1 reply; 2+ messages in thread
From: kbuild test robot @ 2020-03-11 21:58 UTC (permalink / raw)
  To: kbuild-all

[-- Attachment #1: Type: text/plain, Size: 35850 bytes --]

tree:   https://repo.or.cz/linux-2.6/trivial-mods.git 20200310_fallthrough_2
head:   71c55e51125d74e9bd8cce382679ee762d9a86fd
commit: f29d60f7f1429e115b81d2e47e605a727861e10f [158/491] BROADCOM TG3 GIGABIT ETHERNET DRIVER: Use fallthrough;

If you fix the issue, kindly add following tag
Reported-by: kbuild test robot <lkp@intel.com>

New smatch warnings:
drivers/net/ethernet/broadcom/tg3.c:10720 tg3_reset_hw() warn: inconsistent indenting

Old smatch warnings:
drivers/net/ethernet/broadcom/tg3.c:2605 tg3_phy_reset_5703_4_5() error: uninitialized symbol 'phy9_orig'.
drivers/net/ethernet/broadcom/tg3.c:5988 tg3_setup_fiber_mii_phy() error: uninitialized symbol 'local_adv'.
drivers/net/ethernet/broadcom/tg3.c:5988 tg3_setup_fiber_mii_phy() error: uninitialized symbol 'remote_adv'.

vim +10720 drivers/net/ethernet/broadcom/tg3.c

  9859	
  9860	/* tp->lock is held. */
  9861	static int tg3_reset_hw(struct tg3 *tp, bool reset_phy)
  9862	{
  9863		u32 val, rdmac_mode;
  9864		int i, err, limit;
  9865		struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
  9866	
  9867		tg3_disable_ints(tp);
  9868	
  9869		tg3_stop_fw(tp);
  9870	
  9871		tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
  9872	
  9873		if (tg3_flag(tp, INIT_COMPLETE))
  9874			tg3_abort_hw(tp, 1);
  9875	
  9876		if ((tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
  9877		    !(tp->phy_flags & TG3_PHYFLG_USER_CONFIGURED)) {
  9878			tg3_phy_pull_config(tp);
  9879			tg3_eee_pull_config(tp, NULL);
  9880			tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
  9881		}
  9882	
  9883		/* Enable MAC control of LPI */
  9884		if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
  9885			tg3_setup_eee(tp);
  9886	
  9887		if (reset_phy)
  9888			tg3_phy_reset(tp);
  9889	
  9890		err = tg3_chip_reset(tp);
  9891		if (err)
  9892			return err;
  9893	
  9894		tg3_write_sig_legacy(tp, RESET_KIND_INIT);
  9895	
  9896		if (tg3_chip_rev(tp) == CHIPREV_5784_AX) {
  9897			val = tr32(TG3_CPMU_CTRL);
  9898			val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
  9899			tw32(TG3_CPMU_CTRL, val);
  9900	
  9901			val = tr32(TG3_CPMU_LSPD_10MB_CLK);
  9902			val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
  9903			val |= CPMU_LSPD_10MB_MACCLK_6_25;
  9904			tw32(TG3_CPMU_LSPD_10MB_CLK, val);
  9905	
  9906			val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
  9907			val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
  9908			val |= CPMU_LNK_AWARE_MACCLK_6_25;
  9909			tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
  9910	
  9911			val = tr32(TG3_CPMU_HST_ACC);
  9912			val &= ~CPMU_HST_ACC_MACCLK_MASK;
  9913			val |= CPMU_HST_ACC_MACCLK_6_25;
  9914			tw32(TG3_CPMU_HST_ACC, val);
  9915		}
  9916	
  9917		if (tg3_asic_rev(tp) == ASIC_REV_57780) {
  9918			val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
  9919			val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
  9920			       PCIE_PWR_MGMT_L1_THRESH_4MS;
  9921			tw32(PCIE_PWR_MGMT_THRESH, val);
  9922	
  9923			val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
  9924			tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
  9925	
  9926			tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
  9927	
  9928			val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
  9929			tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
  9930		}
  9931	
  9932		if (tg3_flag(tp, L1PLLPD_EN)) {
  9933			u32 grc_mode = tr32(GRC_MODE);
  9934	
  9935			/* Access the lower 1K of PL PCIE block registers. */
  9936			val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
  9937			tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
  9938	
  9939			val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1);
  9940			tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1,
  9941			     val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN);
  9942	
  9943			tw32(GRC_MODE, grc_mode);
  9944		}
  9945	
  9946		if (tg3_flag(tp, 57765_CLASS)) {
  9947			if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) {
  9948				u32 grc_mode = tr32(GRC_MODE);
  9949	
  9950				/* Access the lower 1K of PL PCIE block registers. */
  9951				val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
  9952				tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
  9953	
  9954				val = tr32(TG3_PCIE_TLDLPL_PORT +
  9955					   TG3_PCIE_PL_LO_PHYCTL5);
  9956				tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
  9957				     val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
  9958	
  9959				tw32(GRC_MODE, grc_mode);
  9960			}
  9961	
  9962			if (tg3_chip_rev(tp) != CHIPREV_57765_AX) {
  9963				u32 grc_mode;
  9964	
  9965				/* Fix transmit hangs */
  9966				val = tr32(TG3_CPMU_PADRNG_CTL);
  9967				val |= TG3_CPMU_PADRNG_CTL_RDIV2;
  9968				tw32(TG3_CPMU_PADRNG_CTL, val);
  9969	
  9970				grc_mode = tr32(GRC_MODE);
  9971	
  9972				/* Access the lower 1K of DL PCIE block registers. */
  9973				val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
  9974				tw32(GRC_MODE, val | GRC_MODE_PCIE_DL_SEL);
  9975	
  9976				val = tr32(TG3_PCIE_TLDLPL_PORT +
  9977					   TG3_PCIE_DL_LO_FTSMAX);
  9978				val &= ~TG3_PCIE_DL_LO_FTSMAX_MSK;
  9979				tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_DL_LO_FTSMAX,
  9980				     val | TG3_PCIE_DL_LO_FTSMAX_VAL);
  9981	
  9982				tw32(GRC_MODE, grc_mode);
  9983			}
  9984	
  9985			val = tr32(TG3_CPMU_LSPD_10MB_CLK);
  9986			val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
  9987			val |= CPMU_LSPD_10MB_MACCLK_6_25;
  9988			tw32(TG3_CPMU_LSPD_10MB_CLK, val);
  9989		}
  9990	
  9991		/* This works around an issue with Athlon chipsets on
  9992		 * B3 tigon3 silicon.  This bit has no effect on any
  9993		 * other revision.  But do not set this on PCI Express
  9994		 * chips and don't even touch the clocks if the CPMU is present.
  9995		 */
  9996		if (!tg3_flag(tp, CPMU_PRESENT)) {
  9997			if (!tg3_flag(tp, PCI_EXPRESS))
  9998				tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
  9999			tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
 10000		}
 10001	
 10002		if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 &&
 10003		    tg3_flag(tp, PCIX_MODE)) {
 10004			val = tr32(TG3PCI_PCISTATE);
 10005			val |= PCISTATE_RETRY_SAME_DMA;
 10006			tw32(TG3PCI_PCISTATE, val);
 10007		}
 10008	
 10009		if (tg3_flag(tp, ENABLE_APE)) {
 10010			/* Allow reads and writes to the
 10011			 * APE register and memory space.
 10012			 */
 10013			val = tr32(TG3PCI_PCISTATE);
 10014			val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
 10015			       PCISTATE_ALLOW_APE_SHMEM_WR |
 10016			       PCISTATE_ALLOW_APE_PSPACE_WR;
 10017			tw32(TG3PCI_PCISTATE, val);
 10018		}
 10019	
 10020		if (tg3_chip_rev(tp) == CHIPREV_5704_BX) {
 10021			/* Enable some hw fixes.  */
 10022			val = tr32(TG3PCI_MSI_DATA);
 10023			val |= (1 << 26) | (1 << 28) | (1 << 29);
 10024			tw32(TG3PCI_MSI_DATA, val);
 10025		}
 10026	
 10027		/* Descriptor ring init may make accesses to the
 10028		 * NIC SRAM area to setup the TX descriptors, so we
 10029		 * can only do this after the hardware has been
 10030		 * successfully reset.
 10031		 */
 10032		err = tg3_init_rings(tp);
 10033		if (err)
 10034			return err;
 10035	
 10036		if (tg3_flag(tp, 57765_PLUS)) {
 10037			val = tr32(TG3PCI_DMA_RW_CTRL) &
 10038			      ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
 10039			if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0)
 10040				val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
 10041			if (!tg3_flag(tp, 57765_CLASS) &&
 10042			    tg3_asic_rev(tp) != ASIC_REV_5717 &&
 10043			    tg3_asic_rev(tp) != ASIC_REV_5762)
 10044				val |= DMA_RWCTRL_TAGGED_STAT_WA;
 10045			tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
 10046		} else if (tg3_asic_rev(tp) != ASIC_REV_5784 &&
 10047			   tg3_asic_rev(tp) != ASIC_REV_5761) {
 10048			/* This value is determined during the probe time DMA
 10049			 * engine test, tg3_test_dma.
 10050			 */
 10051			tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
 10052		}
 10053	
 10054		tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
 10055				  GRC_MODE_4X_NIC_SEND_RINGS |
 10056				  GRC_MODE_NO_TX_PHDR_CSUM |
 10057				  GRC_MODE_NO_RX_PHDR_CSUM);
 10058		tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
 10059	
 10060		/* Pseudo-header checksum is done by hardware logic and not
 10061		 * the offload processers, so make the chip do the pseudo-
 10062		 * header checksums on receive.  For transmit it is more
 10063		 * convenient to do the pseudo-header checksum in software
 10064		 * as Linux does that on transmit for us in all cases.
 10065		 */
 10066		tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
 10067	
 10068		val = GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP;
 10069		if (tp->rxptpctl)
 10070			tw32(TG3_RX_PTP_CTL,
 10071			     tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
 10072	
 10073		if (tg3_flag(tp, PTP_CAPABLE))
 10074			val |= GRC_MODE_TIME_SYNC_ENABLE;
 10075	
 10076		tw32(GRC_MODE, tp->grc_mode | val);
 10077	
 10078		/* On one of the AMD platform, MRRS is restricted to 4000 because of
 10079		 * south bridge limitation. As a workaround, Driver is setting MRRS
 10080		 * to 2048 instead of default 4096.
 10081		 */
 10082		if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
 10083		    tp->pdev->subsystem_device == TG3PCI_SUBDEVICE_ID_DELL_5762) {
 10084			val = tr32(TG3PCI_DEV_STATUS_CTRL) & ~MAX_READ_REQ_MASK;
 10085			tw32(TG3PCI_DEV_STATUS_CTRL, val | MAX_READ_REQ_SIZE_2048);
 10086		}
 10087	
 10088		/* Setup the timer prescalar register.  Clock is always 66Mhz. */
 10089		val = tr32(GRC_MISC_CFG);
 10090		val &= ~0xff;
 10091		val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
 10092		tw32(GRC_MISC_CFG, val);
 10093	
 10094		/* Initialize MBUF/DESC pool. */
 10095		if (tg3_flag(tp, 5750_PLUS)) {
 10096			/* Do nothing.  */
 10097		} else if (tg3_asic_rev(tp) != ASIC_REV_5705) {
 10098			tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
 10099			if (tg3_asic_rev(tp) == ASIC_REV_5704)
 10100				tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
 10101			else
 10102				tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
 10103			tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
 10104			tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
 10105		} else if (tg3_flag(tp, TSO_CAPABLE)) {
 10106			int fw_len;
 10107	
 10108			fw_len = tp->fw_len;
 10109			fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
 10110			tw32(BUFMGR_MB_POOL_ADDR,
 10111			     NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
 10112			tw32(BUFMGR_MB_POOL_SIZE,
 10113			     NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
 10114		}
 10115	
 10116		if (tp->dev->mtu <= ETH_DATA_LEN) {
 10117			tw32(BUFMGR_MB_RDMA_LOW_WATER,
 10118			     tp->bufmgr_config.mbuf_read_dma_low_water);
 10119			tw32(BUFMGR_MB_MACRX_LOW_WATER,
 10120			     tp->bufmgr_config.mbuf_mac_rx_low_water);
 10121			tw32(BUFMGR_MB_HIGH_WATER,
 10122			     tp->bufmgr_config.mbuf_high_water);
 10123		} else {
 10124			tw32(BUFMGR_MB_RDMA_LOW_WATER,
 10125			     tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
 10126			tw32(BUFMGR_MB_MACRX_LOW_WATER,
 10127			     tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
 10128			tw32(BUFMGR_MB_HIGH_WATER,
 10129			     tp->bufmgr_config.mbuf_high_water_jumbo);
 10130		}
 10131		tw32(BUFMGR_DMA_LOW_WATER,
 10132		     tp->bufmgr_config.dma_low_water);
 10133		tw32(BUFMGR_DMA_HIGH_WATER,
 10134		     tp->bufmgr_config.dma_high_water);
 10135	
 10136		val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE;
 10137		if (tg3_asic_rev(tp) == ASIC_REV_5719)
 10138			val |= BUFMGR_MODE_NO_TX_UNDERRUN;
 10139		if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
 10140		    tg3_asic_rev(tp) == ASIC_REV_5762 ||
 10141		    tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
 10142		    tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0)
 10143			val |= BUFMGR_MODE_MBLOW_ATTN_ENAB;
 10144		tw32(BUFMGR_MODE, val);
 10145		for (i = 0; i < 2000; i++) {
 10146			if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
 10147				break;
 10148			udelay(10);
 10149		}
 10150		if (i >= 2000) {
 10151			netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__);
 10152			return -ENODEV;
 10153		}
 10154	
 10155		if (tg3_chip_rev_id(tp) == CHIPREV_ID_5906_A1)
 10156			tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
 10157	
 10158		tg3_setup_rxbd_thresholds(tp);
 10159	
 10160		/* Initialize TG3_BDINFO's at:
 10161		 *  RCVDBDI_STD_BD:	standard eth size rx ring
 10162		 *  RCVDBDI_JUMBO_BD:	jumbo frame rx ring
 10163		 *  RCVDBDI_MINI_BD:	small frame rx ring (??? does not work)
 10164		 *
 10165		 * like so:
 10166		 *  TG3_BDINFO_HOST_ADDR:	high/low parts of DMA address of ring
 10167		 *  TG3_BDINFO_MAXLEN_FLAGS:	(rx max buffer size << 16) |
 10168		 *                              ring attribute flags
 10169		 *  TG3_BDINFO_NIC_ADDR:	location of descriptors in nic SRAM
 10170		 *
 10171		 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
 10172		 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
 10173		 *
 10174		 * The size of each ring is fixed in the firmware, but the location is
 10175		 * configurable.
 10176		 */
 10177		tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
 10178		     ((u64) tpr->rx_std_mapping >> 32));
 10179		tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
 10180		     ((u64) tpr->rx_std_mapping & 0xffffffff));
 10181		if (!tg3_flag(tp, 5717_PLUS))
 10182			tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
 10183			     NIC_SRAM_RX_BUFFER_DESC);
 10184	
 10185		/* Disable the mini ring */
 10186		if (!tg3_flag(tp, 5705_PLUS))
 10187			tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
 10188			     BDINFO_FLAGS_DISABLED);
 10189	
 10190		/* Program the jumbo buffer descriptor ring control
 10191		 * blocks on those devices that have them.
 10192		 */
 10193		if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
 10194		    (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) {
 10195	
 10196			if (tg3_flag(tp, JUMBO_RING_ENABLE)) {
 10197				tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
 10198				     ((u64) tpr->rx_jmb_mapping >> 32));
 10199				tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
 10200				     ((u64) tpr->rx_jmb_mapping & 0xffffffff));
 10201				val = TG3_RX_JMB_RING_SIZE(tp) <<
 10202				      BDINFO_FLAGS_MAXLEN_SHIFT;
 10203				tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
 10204				     val | BDINFO_FLAGS_USE_EXT_RECV);
 10205				if (!tg3_flag(tp, USE_JUMBO_BDFLAG) ||
 10206				    tg3_flag(tp, 57765_CLASS) ||
 10207				    tg3_asic_rev(tp) == ASIC_REV_5762)
 10208					tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
 10209					     NIC_SRAM_RX_JUMBO_BUFFER_DESC);
 10210			} else {
 10211				tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
 10212				     BDINFO_FLAGS_DISABLED);
 10213			}
 10214	
 10215			if (tg3_flag(tp, 57765_PLUS)) {
 10216				val = TG3_RX_STD_RING_SIZE(tp);
 10217				val <<= BDINFO_FLAGS_MAXLEN_SHIFT;
 10218				val |= (TG3_RX_STD_DMA_SZ << 2);
 10219			} else
 10220				val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT;
 10221		} else
 10222			val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT;
 10223	
 10224		tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
 10225	
 10226		tpr->rx_std_prod_idx = tp->rx_pending;
 10227		tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx);
 10228	
 10229		tpr->rx_jmb_prod_idx =
 10230			tg3_flag(tp, JUMBO_RING_ENABLE) ? tp->rx_jumbo_pending : 0;
 10231		tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
 10232	
 10233		tg3_rings_reset(tp);
 10234	
 10235		/* Initialize MAC address and backoff seed. */
 10236		__tg3_set_mac_addr(tp, false);
 10237	
 10238		/* MTU + ethernet header + FCS + optional VLAN tag */
 10239		tw32(MAC_RX_MTU_SIZE,
 10240		     tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
 10241	
 10242		/* The slot time is changed by tg3_setup_phy if we
 10243		 * run at gigabit with half duplex.
 10244		 */
 10245		val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
 10246		      (6 << TX_LENGTHS_IPG_SHIFT) |
 10247		      (32 << TX_LENGTHS_SLOT_TIME_SHIFT);
 10248	
 10249		if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
 10250		    tg3_asic_rev(tp) == ASIC_REV_5762)
 10251			val |= tr32(MAC_TX_LENGTHS) &
 10252			       (TX_LENGTHS_JMB_FRM_LEN_MSK |
 10253				TX_LENGTHS_CNT_DWN_VAL_MSK);
 10254	
 10255		tw32(MAC_TX_LENGTHS, val);
 10256	
 10257		/* Receive rules. */
 10258		tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
 10259		tw32(RCVLPC_CONFIG, 0x0181);
 10260	
 10261		/* Calculate RDMAC_MODE setting early, we need it to determine
 10262		 * the RCVLPC_STATE_ENABLE mask.
 10263		 */
 10264		rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
 10265			      RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
 10266			      RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
 10267			      RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
 10268			      RDMAC_MODE_LNGREAD_ENAB);
 10269	
 10270		if (tg3_asic_rev(tp) == ASIC_REV_5717)
 10271			rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
 10272	
 10273		if (tg3_asic_rev(tp) == ASIC_REV_5784 ||
 10274		    tg3_asic_rev(tp) == ASIC_REV_5785 ||
 10275		    tg3_asic_rev(tp) == ASIC_REV_57780)
 10276			rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
 10277				      RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
 10278				      RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
 10279	
 10280		if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
 10281		    tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
 10282			if (tg3_flag(tp, TSO_CAPABLE) &&
 10283			    tg3_asic_rev(tp) == ASIC_REV_5705) {
 10284				rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
 10285			} else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
 10286				   !tg3_flag(tp, IS_5788)) {
 10287				rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
 10288			}
 10289		}
 10290	
 10291		if (tg3_flag(tp, PCI_EXPRESS))
 10292			rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
 10293	
 10294		if (tg3_asic_rev(tp) == ASIC_REV_57766) {
 10295			tp->dma_limit = 0;
 10296			if (tp->dev->mtu <= ETH_DATA_LEN) {
 10297				rdmac_mode |= RDMAC_MODE_JMB_2K_MMRR;
 10298				tp->dma_limit = TG3_TX_BD_DMA_MAX_2K;
 10299			}
 10300		}
 10301	
 10302		if (tg3_flag(tp, HW_TSO_1) ||
 10303		    tg3_flag(tp, HW_TSO_2) ||
 10304		    tg3_flag(tp, HW_TSO_3))
 10305			rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
 10306	
 10307		if (tg3_flag(tp, 57765_PLUS) ||
 10308		    tg3_asic_rev(tp) == ASIC_REV_5785 ||
 10309		    tg3_asic_rev(tp) == ASIC_REV_57780)
 10310			rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
 10311	
 10312		if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
 10313		    tg3_asic_rev(tp) == ASIC_REV_5762)
 10314			rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET;
 10315	
 10316		if (tg3_asic_rev(tp) == ASIC_REV_5761 ||
 10317		    tg3_asic_rev(tp) == ASIC_REV_5784 ||
 10318		    tg3_asic_rev(tp) == ASIC_REV_5785 ||
 10319		    tg3_asic_rev(tp) == ASIC_REV_57780 ||
 10320		    tg3_flag(tp, 57765_PLUS)) {
 10321			u32 tgtreg;
 10322	
 10323			if (tg3_asic_rev(tp) == ASIC_REV_5762)
 10324				tgtreg = TG3_RDMA_RSRVCTRL_REG2;
 10325			else
 10326				tgtreg = TG3_RDMA_RSRVCTRL_REG;
 10327	
 10328			val = tr32(tgtreg);
 10329			if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
 10330			    tg3_asic_rev(tp) == ASIC_REV_5762) {
 10331				val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
 10332					 TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
 10333					 TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
 10334				val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B |
 10335				       TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
 10336				       TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K;
 10337			}
 10338			tw32(tgtreg, val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
 10339		}
 10340	
 10341		if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
 10342		    tg3_asic_rev(tp) == ASIC_REV_5720 ||
 10343		    tg3_asic_rev(tp) == ASIC_REV_5762) {
 10344			u32 tgtreg;
 10345	
 10346			if (tg3_asic_rev(tp) == ASIC_REV_5762)
 10347				tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL2;
 10348			else
 10349				tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL;
 10350	
 10351			val = tr32(tgtreg);
 10352			tw32(tgtreg, val |
 10353			     TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K |
 10354			     TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K);
 10355		}
 10356	
 10357		/* Receive/send statistics. */
 10358		if (tg3_flag(tp, 5750_PLUS)) {
 10359			val = tr32(RCVLPC_STATS_ENABLE);
 10360			val &= ~RCVLPC_STATSENAB_DACK_FIX;
 10361			tw32(RCVLPC_STATS_ENABLE, val);
 10362		} else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
 10363			   tg3_flag(tp, TSO_CAPABLE)) {
 10364			val = tr32(RCVLPC_STATS_ENABLE);
 10365			val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
 10366			tw32(RCVLPC_STATS_ENABLE, val);
 10367		} else {
 10368			tw32(RCVLPC_STATS_ENABLE, 0xffffff);
 10369		}
 10370		tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
 10371		tw32(SNDDATAI_STATSENAB, 0xffffff);
 10372		tw32(SNDDATAI_STATSCTRL,
 10373		     (SNDDATAI_SCTRL_ENABLE |
 10374		      SNDDATAI_SCTRL_FASTUPD));
 10375	
 10376		/* Setup host coalescing engine. */
 10377		tw32(HOSTCC_MODE, 0);
 10378		for (i = 0; i < 2000; i++) {
 10379			if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
 10380				break;
 10381			udelay(10);
 10382		}
 10383	
 10384		__tg3_set_coalesce(tp, &tp->coal);
 10385	
 10386		if (!tg3_flag(tp, 5705_PLUS)) {
 10387			/* Status/statistics block address.  See tg3_timer,
 10388			 * the tg3_periodic_fetch_stats call there, and
 10389			 * tg3_get_stats to see how this works for 5705/5750 chips.
 10390			 */
 10391			tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
 10392			     ((u64) tp->stats_mapping >> 32));
 10393			tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
 10394			     ((u64) tp->stats_mapping & 0xffffffff));
 10395			tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
 10396	
 10397			tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
 10398	
 10399			/* Clear statistics and status block memory areas */
 10400			for (i = NIC_SRAM_STATS_BLK;
 10401			     i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
 10402			     i += sizeof(u32)) {
 10403				tg3_write_mem(tp, i, 0);
 10404				udelay(40);
 10405			}
 10406		}
 10407	
 10408		tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
 10409	
 10410		tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
 10411		tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
 10412		if (!tg3_flag(tp, 5705_PLUS))
 10413			tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
 10414	
 10415		if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
 10416			tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
 10417			/* reset to prevent losing 1st rx packet intermittently */
 10418			tw32_f(MAC_RX_MODE, RX_MODE_RESET);
 10419			udelay(10);
 10420		}
 10421	
 10422		tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
 10423				MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE |
 10424				MAC_MODE_FHDE_ENABLE;
 10425		if (tg3_flag(tp, ENABLE_APE))
 10426			tp->mac_mode |= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
 10427		if (!tg3_flag(tp, 5705_PLUS) &&
 10428		    !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
 10429		    tg3_asic_rev(tp) != ASIC_REV_5700)
 10430			tp->mac_mode |= MAC_MODE_LINK_POLARITY;
 10431		tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
 10432		udelay(40);
 10433	
 10434		/* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
 10435		 * If TG3_FLAG_IS_NIC is zero, we should read the
 10436		 * register to preserve the GPIO settings for LOMs. The GPIOs,
 10437		 * whether used as inputs or outputs, are set by boot code after
 10438		 * reset.
 10439		 */
 10440		if (!tg3_flag(tp, IS_NIC)) {
 10441			u32 gpio_mask;
 10442	
 10443			gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
 10444				    GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
 10445				    GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
 10446	
 10447			if (tg3_asic_rev(tp) == ASIC_REV_5752)
 10448				gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
 10449					     GRC_LCLCTRL_GPIO_OUTPUT3;
 10450	
 10451			if (tg3_asic_rev(tp) == ASIC_REV_5755)
 10452				gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
 10453	
 10454			tp->grc_local_ctrl &= ~gpio_mask;
 10455			tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
 10456	
 10457			/* GPIO1 must be driven high for eeprom write protect */
 10458			if (tg3_flag(tp, EEPROM_WRITE_PROT))
 10459				tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
 10460						       GRC_LCLCTRL_GPIO_OUTPUT1);
 10461		}
 10462		tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
 10463		udelay(100);
 10464	
 10465		if (tg3_flag(tp, USING_MSIX)) {
 10466			val = tr32(MSGINT_MODE);
 10467			val |= MSGINT_MODE_ENABLE;
 10468			if (tp->irq_cnt > 1)
 10469				val |= MSGINT_MODE_MULTIVEC_EN;
 10470			if (!tg3_flag(tp, 1SHOT_MSI))
 10471				val |= MSGINT_MODE_ONE_SHOT_DISABLE;
 10472			tw32(MSGINT_MODE, val);
 10473		}
 10474	
 10475		if (!tg3_flag(tp, 5705_PLUS)) {
 10476			tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
 10477			udelay(40);
 10478		}
 10479	
 10480		val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
 10481		       WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
 10482		       WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
 10483		       WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
 10484		       WDMAC_MODE_LNGREAD_ENAB);
 10485	
 10486		if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
 10487		    tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
 10488			if (tg3_flag(tp, TSO_CAPABLE) &&
 10489			    (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 ||
 10490			     tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A2)) {
 10491				/* nothing */
 10492			} else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
 10493				   !tg3_flag(tp, IS_5788)) {
 10494				val |= WDMAC_MODE_RX_ACCEL;
 10495			}
 10496		}
 10497	
 10498		/* Enable host coalescing bug fix */
 10499		if (tg3_flag(tp, 5755_PLUS))
 10500			val |= WDMAC_MODE_STATUS_TAG_FIX;
 10501	
 10502		if (tg3_asic_rev(tp) == ASIC_REV_5785)
 10503			val |= WDMAC_MODE_BURST_ALL_DATA;
 10504	
 10505		tw32_f(WDMAC_MODE, val);
 10506		udelay(40);
 10507	
 10508		if (tg3_flag(tp, PCIX_MODE)) {
 10509			u16 pcix_cmd;
 10510	
 10511			pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
 10512					     &pcix_cmd);
 10513			if (tg3_asic_rev(tp) == ASIC_REV_5703) {
 10514				pcix_cmd &= ~PCI_X_CMD_MAX_READ;
 10515				pcix_cmd |= PCI_X_CMD_READ_2K;
 10516			} else if (tg3_asic_rev(tp) == ASIC_REV_5704) {
 10517				pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
 10518				pcix_cmd |= PCI_X_CMD_READ_2K;
 10519			}
 10520			pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
 10521					      pcix_cmd);
 10522		}
 10523	
 10524		tw32_f(RDMAC_MODE, rdmac_mode);
 10525		udelay(40);
 10526	
 10527		if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
 10528		    tg3_asic_rev(tp) == ASIC_REV_5720) {
 10529			for (i = 0; i < TG3_NUM_RDMA_CHANNELS; i++) {
 10530				if (tr32(TG3_RDMA_LENGTH + (i << 2)) > TG3_MAX_MTU(tp))
 10531					break;
 10532			}
 10533			if (i < TG3_NUM_RDMA_CHANNELS) {
 10534				val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
 10535				val |= tg3_lso_rd_dma_workaround_bit(tp);
 10536				tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
 10537				tg3_flag_set(tp, 5719_5720_RDMA_BUG);
 10538			}
 10539		}
 10540	
 10541		tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
 10542		if (!tg3_flag(tp, 5705_PLUS))
 10543			tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
 10544	
 10545		if (tg3_asic_rev(tp) == ASIC_REV_5761)
 10546			tw32(SNDDATAC_MODE,
 10547			     SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
 10548		else
 10549			tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
 10550	
 10551		tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
 10552		tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
 10553		val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ;
 10554		if (tg3_flag(tp, LRG_PROD_RING_CAP))
 10555			val |= RCVDBDI_MODE_LRG_RING_SZ;
 10556		tw32(RCVDBDI_MODE, val);
 10557		tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
 10558		if (tg3_flag(tp, HW_TSO_1) ||
 10559		    tg3_flag(tp, HW_TSO_2) ||
 10560		    tg3_flag(tp, HW_TSO_3))
 10561			tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
 10562		val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
 10563		if (tg3_flag(tp, ENABLE_TSS))
 10564			val |= SNDBDI_MODE_MULTI_TXQ_EN;
 10565		tw32(SNDBDI_MODE, val);
 10566		tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
 10567	
 10568		if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) {
 10569			err = tg3_load_5701_a0_firmware_fix(tp);
 10570			if (err)
 10571				return err;
 10572		}
 10573	
 10574		if (tg3_asic_rev(tp) == ASIC_REV_57766) {
 10575			/* Ignore any errors for the firmware download. If download
 10576			 * fails, the device will operate with EEE disabled
 10577			 */
 10578			tg3_load_57766_firmware(tp);
 10579		}
 10580	
 10581		if (tg3_flag(tp, TSO_CAPABLE)) {
 10582			err = tg3_load_tso_firmware(tp);
 10583			if (err)
 10584				return err;
 10585		}
 10586	
 10587		tp->tx_mode = TX_MODE_ENABLE;
 10588	
 10589		if (tg3_flag(tp, 5755_PLUS) ||
 10590		    tg3_asic_rev(tp) == ASIC_REV_5906)
 10591			tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX;
 10592	
 10593		if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
 10594		    tg3_asic_rev(tp) == ASIC_REV_5762) {
 10595			val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE;
 10596			tp->tx_mode &= ~val;
 10597			tp->tx_mode |= tr32(MAC_TX_MODE) & val;
 10598		}
 10599	
 10600		tw32_f(MAC_TX_MODE, tp->tx_mode);
 10601		udelay(100);
 10602	
 10603		if (tg3_flag(tp, ENABLE_RSS)) {
 10604			u32 rss_key[10];
 10605	
 10606			tg3_rss_write_indir_tbl(tp);
 10607	
 10608			netdev_rss_key_fill(rss_key, 10 * sizeof(u32));
 10609	
 10610			for (i = 0; i < 10 ; i++)
 10611				tw32(MAC_RSS_HASH_KEY_0 + i*4, rss_key[i]);
 10612		}
 10613	
 10614		tp->rx_mode = RX_MODE_ENABLE;
 10615		if (tg3_flag(tp, 5755_PLUS))
 10616			tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
 10617	
 10618		if (tg3_asic_rev(tp) == ASIC_REV_5762)
 10619			tp->rx_mode |= RX_MODE_IPV4_FRAG_FIX;
 10620	
 10621		if (tg3_flag(tp, ENABLE_RSS))
 10622			tp->rx_mode |= RX_MODE_RSS_ENABLE |
 10623				       RX_MODE_RSS_ITBL_HASH_BITS_7 |
 10624				       RX_MODE_RSS_IPV6_HASH_EN |
 10625				       RX_MODE_RSS_TCP_IPV6_HASH_EN |
 10626				       RX_MODE_RSS_IPV4_HASH_EN |
 10627				       RX_MODE_RSS_TCP_IPV4_HASH_EN;
 10628	
 10629		tw32_f(MAC_RX_MODE, tp->rx_mode);
 10630		udelay(10);
 10631	
 10632		tw32(MAC_LED_CTRL, tp->led_ctrl);
 10633	
 10634		tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
 10635		if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
 10636			tw32_f(MAC_RX_MODE, RX_MODE_RESET);
 10637			udelay(10);
 10638		}
 10639		tw32_f(MAC_RX_MODE, tp->rx_mode);
 10640		udelay(10);
 10641	
 10642		if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
 10643			if ((tg3_asic_rev(tp) == ASIC_REV_5704) &&
 10644			    !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) {
 10645				/* Set drive transmission level to 1.2V  */
 10646				/* only if the signal pre-emphasis bit is not set  */
 10647				val = tr32(MAC_SERDES_CFG);
 10648				val &= 0xfffff000;
 10649				val |= 0x880;
 10650				tw32(MAC_SERDES_CFG, val);
 10651			}
 10652			if (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1)
 10653				tw32(MAC_SERDES_CFG, 0x616000);
 10654		}
 10655	
 10656		/* Prevent chip from dropping frames when flow control
 10657		 * is enabled.
 10658		 */
 10659		if (tg3_flag(tp, 57765_CLASS))
 10660			val = 1;
 10661		else
 10662			val = 2;
 10663		tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
 10664	
 10665		if (tg3_asic_rev(tp) == ASIC_REV_5704 &&
 10666		    (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
 10667			/* Use hardware link auto-negotiation */
 10668			tg3_flag_set(tp, HW_AUTONEG);
 10669		}
 10670	
 10671		if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
 10672		    tg3_asic_rev(tp) == ASIC_REV_5714) {
 10673			u32 tmp;
 10674	
 10675			tmp = tr32(SERDES_RX_CTRL);
 10676			tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
 10677			tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
 10678			tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
 10679			tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
 10680		}
 10681	
 10682		if (!tg3_flag(tp, USE_PHYLIB)) {
 10683			if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
 10684				tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
 10685	
 10686			err = tg3_setup_phy(tp, false);
 10687			if (err)
 10688				return err;
 10689	
 10690			if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
 10691			    !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
 10692				u32 tmp;
 10693	
 10694				/* Clear CRC stats. */
 10695				if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
 10696					tg3_writephy(tp, MII_TG3_TEST1,
 10697						     tmp | MII_TG3_TEST1_CRC_EN);
 10698					tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp);
 10699				}
 10700			}
 10701		}
 10702	
 10703		__tg3_set_rx_mode(tp->dev);
 10704	
 10705		/* Initialize receive rules. */
 10706		tw32(MAC_RCV_RULE_0,  0xc2000000 & RCV_RULE_DISABLE_MASK);
 10707		tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
 10708		tw32(MAC_RCV_RULE_1,  0x86000004 & RCV_RULE_DISABLE_MASK);
 10709		tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
 10710	
 10711		if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS))
 10712			limit = 8;
 10713		else
 10714			limit = 16;
 10715		if (tg3_flag(tp, ENABLE_ASF))
 10716			limit -= 4;
 10717		switch (limit) {
 10718		case 16:
 10719			tw32(MAC_RCV_RULE_15,  0); tw32(MAC_RCV_VALUE_15,  0);
 10720			fallthrough;
 10721		case 15:
 10722			tw32(MAC_RCV_RULE_14,  0); tw32(MAC_RCV_VALUE_14,  0);
 10723			fallthrough;
 10724		case 14:
 10725			tw32(MAC_RCV_RULE_13,  0); tw32(MAC_RCV_VALUE_13,  0);
 10726			fallthrough;
 10727		case 13:
 10728			tw32(MAC_RCV_RULE_12,  0); tw32(MAC_RCV_VALUE_12,  0);
 10729			fallthrough;
 10730		case 12:
 10731			tw32(MAC_RCV_RULE_11,  0); tw32(MAC_RCV_VALUE_11,  0);
 10732			fallthrough;
 10733		case 11:
 10734			tw32(MAC_RCV_RULE_10,  0); tw32(MAC_RCV_VALUE_10,  0);
 10735			fallthrough;
 10736		case 10:
 10737			tw32(MAC_RCV_RULE_9,  0); tw32(MAC_RCV_VALUE_9,  0);
 10738			fallthrough;
 10739		case 9:
 10740			tw32(MAC_RCV_RULE_8,  0); tw32(MAC_RCV_VALUE_8,  0);
 10741			fallthrough;
 10742		case 8:
 10743			tw32(MAC_RCV_RULE_7,  0); tw32(MAC_RCV_VALUE_7,  0);
 10744			fallthrough;
 10745		case 7:
 10746			tw32(MAC_RCV_RULE_6,  0); tw32(MAC_RCV_VALUE_6,  0);
 10747			fallthrough;
 10748		case 6:
 10749			tw32(MAC_RCV_RULE_5,  0); tw32(MAC_RCV_VALUE_5,  0);
 10750			fallthrough;
 10751		case 5:
 10752			tw32(MAC_RCV_RULE_4,  0); tw32(MAC_RCV_VALUE_4,  0);
 10753			fallthrough;
 10754		case 4:
 10755			/* tw32(MAC_RCV_RULE_3,  0); tw32(MAC_RCV_VALUE_3,  0); */
 10756		case 3:
 10757			/* tw32(MAC_RCV_RULE_2,  0); tw32(MAC_RCV_VALUE_2,  0); */
 10758		case 2:
 10759		case 1:
 10760	
 10761		default:
 10762			break;
 10763		}
 10764	
 10765		if (tg3_flag(tp, ENABLE_APE))
 10766			/* Write our heartbeat update interval to APE. */
 10767			tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
 10768					APE_HOST_HEARTBEAT_INT_5SEC);
 10769	
 10770		tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
 10771	
 10772		return 0;
 10773	}
 10774	

---
0-DAY CI Kernel Test Service, Intel Corporation
https://lists.01.org/hyperkitty/list/kbuild-all(a)lists.01.org

^ permalink raw reply	[flat|nested] 2+ messages in thread

* Re: [trivial-mods:20200310_fallthrough_2 158/491] drivers/net/ethernet/broadcom/tg3.c:10720 tg3_reset_hw() warn: inconsistent indenting
  2020-03-11 21:58 [trivial-mods:20200310_fallthrough_2 158/491] drivers/net/ethernet/broadcom/tg3.c:10720 tg3_reset_hw() warn: inconsistent indenting kbuild test robot
@ 2020-03-11 23:58 ` Joe Perches
  0 siblings, 0 replies; 2+ messages in thread
From: Joe Perches @ 2020-03-11 23:58 UTC (permalink / raw)
  To: kbuild-all

[-- Attachment #1: Type: text/plain, Size: 2187 bytes --]

On Thu, 2020-03-12 at 05:58 +0800, kbuild test robot wrote:
> tree:   https://repo.or.cz/linux-2.6/trivial-mods.git 20200310_fallthrough_2
> head:   71c55e51125d74e9bd8cce382679ee762d9a86fd
> commit: f29d60f7f1429e115b81d2e47e605a727861e10f [158/491] BROADCOM TG3 GIGABIT ETHERNET DRIVER: Use fallthrough;
> 
> If you fix the issue, kindly add following tag
> Reported-by: kbuild test robot <lkp@intel.com>
> 
> New smatch warnings:
> drivers/net/ethernet/broadcom/tg3.c:10720 tg3_reset_hw() warn: inconsistent indenting

Not a real issue IMO.

> vim +10720 drivers/net/ethernet/broadcom/tg3.c
[]
>  10717		switch (limit) {
>  10718		case 16:
>  10719			tw32(MAC_RCV_RULE_15,  0); tw32(MAC_RCV_VALUE_15,  0);
>  10720			fallthrough;
>  10721		case 15:
>  10722			tw32(MAC_RCV_RULE_14,  0); tw32(MAC_RCV_VALUE_14,  0);
>  10723			fallthrough;
>  10724		case 14:
>  10725			tw32(MAC_RCV_RULE_13,  0); tw32(MAC_RCV_VALUE_13,  0);
>  10726			fallthrough;
>  10727		case 13:
>  10728			tw32(MAC_RCV_RULE_12,  0); tw32(MAC_RCV_VALUE_12,  0);
>  10729			fallthrough;
>  10730		case 12:
>  10731			tw32(MAC_RCV_RULE_11,  0); tw32(MAC_RCV_VALUE_11,  0);
>  10732			fallthrough;
>  10733		case 11:
>  10734			tw32(MAC_RCV_RULE_10,  0); tw32(MAC_RCV_VALUE_10,  0);
>  10735			fallthrough;
>  10736		case 10:
>  10737			tw32(MAC_RCV_RULE_9,  0); tw32(MAC_RCV_VALUE_9,  0);
>  10738			fallthrough;
>  10739		case 9:
>  10740			tw32(MAC_RCV_RULE_8,  0); tw32(MAC_RCV_VALUE_8,  0);
>  10741			fallthrough;
>  10742		case 8:
>  10743			tw32(MAC_RCV_RULE_7,  0); tw32(MAC_RCV_VALUE_7,  0);
>  10744			fallthrough;
>  10745		case 7:
>  10746			tw32(MAC_RCV_RULE_6,  0); tw32(MAC_RCV_VALUE_6,  0);
>  10747			fallthrough;
>  10748		case 6:
>  10749			tw32(MAC_RCV_RULE_5,  0); tw32(MAC_RCV_VALUE_5,  0);
>  10750			fallthrough;
>  10751		case 5:
>  10752			tw32(MAC_RCV_RULE_4,  0); tw32(MAC_RCV_VALUE_4,  0);
>  10753			fallthrough;
>  10754		case 4:
>  10755			/* tw32(MAC_RCV_RULE_3,  0); tw32(MAC_RCV_VALUE_3,  0); */
>  10756		case 3:
>  10757			/* tw32(MAC_RCV_RULE_2,  0); tw32(MAC_RCV_VALUE_2,  0); */
>  10758		case 2:
>  10759		case 1:


^ permalink raw reply	[flat|nested] 2+ messages in thread

end of thread, other threads:[~2020-03-11 23:58 UTC | newest]

Thread overview: 2+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2020-03-11 21:58 [trivial-mods:20200310_fallthrough_2 158/491] drivers/net/ethernet/broadcom/tg3.c:10720 tg3_reset_hw() warn: inconsistent indenting kbuild test robot
2020-03-11 23:58 ` Joe Perches

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.