Merge branch 'master' of github.com:davem330/net
authorDavid S. Miller <davem@davemloft.net>
Thu, 22 Sep 2011 07:23:13 +0000 (03:23 -0400)
committerDavid S. Miller <davem@davemloft.net>
Thu, 22 Sep 2011 07:23:13 +0000 (03:23 -0400)
Conflicts:
MAINTAINERS
drivers/net/Kconfig
drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
drivers/net/ethernet/broadcom/tg3.c
drivers/net/wireless/iwlwifi/iwl-pci.c
drivers/net/wireless/iwlwifi/iwl-trans-tx-pcie.c
drivers/net/wireless/rt2x00/rt2800usb.c
drivers/net/wireless/wl12xx/main.c

73 files changed:
1  2 
Documentation/networking/ip-sysctl.txt
MAINTAINERS
drivers/bcma/main.c
drivers/infiniband/ulp/ipoib/ipoib_main.c
drivers/net/ethernet/aeroflex/greth.c
drivers/net/ethernet/aeroflex/greth.h
drivers/net/ethernet/amd/am79c961a.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
drivers/net/ethernet/broadcom/tg3.c
drivers/net/ethernet/freescale/gianfar_ethtool.c
drivers/net/ethernet/ibm/ibmveth.c
drivers/net/ethernet/intel/e1000/e1000_hw.c
drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
drivers/net/ethernet/marvell/pxa168_eth.c
drivers/net/ethernet/oki-semi/pch_gbe/Kconfig
drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h
drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
drivers/net/ethernet/realtek/r8169.c
drivers/net/ethernet/renesas/sh_eth.c
drivers/net/ethernet/sfc/efx.c
drivers/net/ethernet/sfc/io.h
drivers/net/ethernet/sfc/mcdi.c
drivers/net/ethernet/sfc/nic.c
drivers/net/ethernet/sfc/nic.h
drivers/net/ethernet/sfc/siena.c
drivers/net/ethernet/sfc/workarounds.h
drivers/net/ethernet/sun/cassini.c
drivers/net/ppp/ppp_generic.c
drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
drivers/net/wireless/ath/ath9k/ar9003_phy.c
drivers/net/wireless/ath/ath9k/main.c
drivers/net/wireless/ath/carl9170/main.c
drivers/net/wireless/b43/main.c
drivers/net/wireless/ipw2x00/ipw2200.c
drivers/net/wireless/iwlegacy/iwl-3945-rs.c
drivers/net/wireless/iwlwifi/iwl-agn-ucode.c
drivers/net/wireless/iwlwifi/iwl-agn.c
drivers/net/wireless/iwlwifi/iwl-trans-tx-pcie.c
drivers/net/wireless/rt2x00/rt2800lib.c
drivers/net/wireless/rt2x00/rt2800usb.c
drivers/net/wireless/rtlwifi/rtl8192cu/trx.c
drivers/net/wireless/wl12xx/acx.c
drivers/net/wireless/wl12xx/sdio.c
drivers/scsi/fcoe/fcoe.c
include/linux/skbuff.h
include/net/cfg80211.h
include/net/tcp.h
net/bridge/br_if.c
net/can/af_can.c
net/core/dev.c
net/core/fib_rules.c
net/core/neighbour.c
net/core/netpoll.c
net/core/skbuff.c
net/ipv4/igmp.c
net/ipv4/tcp_input.c
net/ipv4/tcp_ipv4.c
net/ipv6/addrconf.c
net/ipv6/raw.c
net/ipv6/tcp_ipv6.c
net/ipv6/udp.c
net/mac80211/main.c
net/mac80211/sta_info.c
net/sctp/sm_statefuns.c
net/socket.c
net/wireless/core.c
net/wireless/reg.c

diff --cc MAINTAINERS
@@@ -1582,10 -1573,9 +1581,9 @@@ F:     drivers/scsi/bfa
  
  BROCADE BNA 10 GIGABIT ETHERNET DRIVER
  M:    Rasesh Mody <rmody@brocade.com>
- M:    Debashis Dutt <ddutt@brocade.com>
  L:    netdev@vger.kernel.org
  S:    Supported
 -F:    drivers/net/bna/
 +F:    drivers/net/ethernet/brocade/bna/
  
  BSG (block layer generic sg v4 driver)
  M:    FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp>
Simple merge
index bc3bd34,0000000..6715bf5
mode 100644,000000..100644
--- /dev/null
@@@ -1,1633 -1,0 +1,1641 @@@
-       status |= GRETH_TXBD_CSALL;
 +/*
 + * Aeroflex Gaisler GRETH 10/100/1G Ethernet MAC.
 + *
 + * 2005-2010 (c) Aeroflex Gaisler AB
 + *
 + * This driver supports GRETH 10/100 and GRETH 10/100/1G Ethernet MACs
 + * available in the GRLIB VHDL IP core library.
 + *
 + * Full documentation of both cores can be found here:
 + * http://www.gaisler.com/products/grlib/grip.pdf
 + *
 + * The Gigabit version supports scatter/gather DMA, any alignment of
 + * buffers and checksum offloading.
 + *
 + * This program is free software; you can redistribute it and/or modify it
 + * under the terms of the GNU General Public License as published by the
 + * Free Software Foundation; either version 2 of the License, or (at your
 + * option) any later version.
 + *
 + * Contributors: Kristoffer Glembo
 + *               Daniel Hellstrom
 + *               Marko Isomaki
 + */
 +
 +#include <linux/dma-mapping.h>
 +#include <linux/module.h>
 +#include <linux/uaccess.h>
 +#include <linux/init.h>
 +#include <linux/interrupt.h>
 +#include <linux/netdevice.h>
 +#include <linux/etherdevice.h>
 +#include <linux/ethtool.h>
 +#include <linux/skbuff.h>
 +#include <linux/io.h>
 +#include <linux/crc32.h>
 +#include <linux/mii.h>
 +#include <linux/of_device.h>
 +#include <linux/of_platform.h>
 +#include <linux/slab.h>
 +#include <asm/cacheflush.h>
 +#include <asm/byteorder.h>
 +
 +#ifdef CONFIG_SPARC
 +#include <asm/idprom.h>
 +#endif
 +
 +#include "greth.h"
 +
 +#define GRETH_DEF_MSG_ENABLE    \
 +      (NETIF_MSG_DRV          | \
 +       NETIF_MSG_PROBE        | \
 +       NETIF_MSG_LINK         | \
 +       NETIF_MSG_IFDOWN       | \
 +       NETIF_MSG_IFUP         | \
 +       NETIF_MSG_RX_ERR       | \
 +       NETIF_MSG_TX_ERR)
 +
 +static int greth_debug = -1;  /* -1 == use GRETH_DEF_MSG_ENABLE as value */
 +module_param(greth_debug, int, 0);
 +MODULE_PARM_DESC(greth_debug, "GRETH bitmapped debugging message enable value");
 +
 +/* Accept MAC address of the form macaddr=0x08,0x00,0x20,0x30,0x40,0x50 */
 +static int macaddr[6];
 +module_param_array(macaddr, int, NULL, 0);
 +MODULE_PARM_DESC(macaddr, "GRETH Ethernet MAC address");
 +
 +static int greth_edcl = 1;
 +module_param(greth_edcl, int, 0);
 +MODULE_PARM_DESC(greth_edcl, "GRETH EDCL usage indicator. Set to 1 if EDCL is used.");
 +
 +static int greth_open(struct net_device *dev);
 +static netdev_tx_t greth_start_xmit(struct sk_buff *skb,
 +         struct net_device *dev);
 +static netdev_tx_t greth_start_xmit_gbit(struct sk_buff *skb,
 +         struct net_device *dev);
 +static int greth_rx(struct net_device *dev, int limit);
 +static int greth_rx_gbit(struct net_device *dev, int limit);
 +static void greth_clean_tx(struct net_device *dev);
 +static void greth_clean_tx_gbit(struct net_device *dev);
 +static irqreturn_t greth_interrupt(int irq, void *dev_id);
 +static int greth_close(struct net_device *dev);
 +static int greth_set_mac_add(struct net_device *dev, void *p);
 +static void greth_set_multicast_list(struct net_device *dev);
 +
 +#define GRETH_REGLOAD(a)          (be32_to_cpu(__raw_readl(&(a))))
 +#define GRETH_REGSAVE(a, v)         (__raw_writel(cpu_to_be32(v), &(a)))
 +#define GRETH_REGORIN(a, v)         (GRETH_REGSAVE(a, (GRETH_REGLOAD(a) | (v))))
 +#define GRETH_REGANDIN(a, v)        (GRETH_REGSAVE(a, (GRETH_REGLOAD(a) & (v))))
 +
 +#define NEXT_TX(N)      (((N) + 1) & GRETH_TXBD_NUM_MASK)
 +#define SKIP_TX(N, C)   (((N) + C) & GRETH_TXBD_NUM_MASK)
 +#define NEXT_RX(N)      (((N) + 1) & GRETH_RXBD_NUM_MASK)
 +
 +static void greth_print_rx_packet(void *addr, int len)
 +{
 +      print_hex_dump(KERN_DEBUG, "RX: ", DUMP_PREFIX_OFFSET, 16, 1,
 +                      addr, len, true);
 +}
 +
 +static void greth_print_tx_packet(struct sk_buff *skb)
 +{
 +      int i;
 +      int length;
 +
 +      if (skb_shinfo(skb)->nr_frags == 0)
 +              length = skb->len;
 +      else
 +              length = skb_headlen(skb);
 +
 +      print_hex_dump(KERN_DEBUG, "TX: ", DUMP_PREFIX_OFFSET, 16, 1,
 +                      skb->data, length, true);
 +
 +      for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
 +
 +              print_hex_dump(KERN_DEBUG, "TX: ", DUMP_PREFIX_OFFSET, 16, 1,
 +                             skb_frag_address(&skb_shinfo(skb)->frags[i]),
 +                             skb_shinfo(skb)->frags[i].size, true);
 +      }
 +}
 +
 +static inline void greth_enable_tx(struct greth_private *greth)
 +{
 +      wmb();
 +      GRETH_REGORIN(greth->regs->control, GRETH_TXEN);
 +}
 +
 +static inline void greth_disable_tx(struct greth_private *greth)
 +{
 +      GRETH_REGANDIN(greth->regs->control, ~GRETH_TXEN);
 +}
 +
 +static inline void greth_enable_rx(struct greth_private *greth)
 +{
 +      wmb();
 +      GRETH_REGORIN(greth->regs->control, GRETH_RXEN);
 +}
 +
 +static inline void greth_disable_rx(struct greth_private *greth)
 +{
 +      GRETH_REGANDIN(greth->regs->control, ~GRETH_RXEN);
 +}
 +
 +static inline void greth_enable_irqs(struct greth_private *greth)
 +{
 +      GRETH_REGORIN(greth->regs->control, GRETH_RXI | GRETH_TXI);
 +}
 +
 +static inline void greth_disable_irqs(struct greth_private *greth)
 +{
 +      GRETH_REGANDIN(greth->regs->control, ~(GRETH_RXI|GRETH_TXI));
 +}
 +
 +static inline void greth_write_bd(u32 *bd, u32 val)
 +{
 +      __raw_writel(cpu_to_be32(val), bd);
 +}
 +
 +static inline u32 greth_read_bd(u32 *bd)
 +{
 +      return be32_to_cpu(__raw_readl(bd));
 +}
 +
 +static void greth_clean_rings(struct greth_private *greth)
 +{
 +      int i;
 +      struct greth_bd *rx_bdp = greth->rx_bd_base;
 +      struct greth_bd *tx_bdp = greth->tx_bd_base;
 +
 +      if (greth->gbit_mac) {
 +
 +              /* Free and unmap RX buffers */
 +              for (i = 0; i < GRETH_RXBD_NUM; i++, rx_bdp++) {
 +                      if (greth->rx_skbuff[i] != NULL) {
 +                              dev_kfree_skb(greth->rx_skbuff[i]);
 +                              dma_unmap_single(greth->dev,
 +                                               greth_read_bd(&rx_bdp->addr),
 +                                               MAX_FRAME_SIZE+NET_IP_ALIGN,
 +                                               DMA_FROM_DEVICE);
 +                      }
 +              }
 +
 +              /* TX buffers */
 +              while (greth->tx_free < GRETH_TXBD_NUM) {
 +
 +                      struct sk_buff *skb = greth->tx_skbuff[greth->tx_last];
 +                      int nr_frags = skb_shinfo(skb)->nr_frags;
 +                      tx_bdp = greth->tx_bd_base + greth->tx_last;
 +                      greth->tx_last = NEXT_TX(greth->tx_last);
 +
 +                      dma_unmap_single(greth->dev,
 +                                       greth_read_bd(&tx_bdp->addr),
 +                                       skb_headlen(skb),
 +                                       DMA_TO_DEVICE);
 +
 +                      for (i = 0; i < nr_frags; i++) {
 +                              skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
 +                              tx_bdp = greth->tx_bd_base + greth->tx_last;
 +
 +                              dma_unmap_page(greth->dev,
 +                                             greth_read_bd(&tx_bdp->addr),
 +                                             frag->size,
 +                                             DMA_TO_DEVICE);
 +
 +                              greth->tx_last = NEXT_TX(greth->tx_last);
 +                      }
 +                      greth->tx_free += nr_frags+1;
 +                      dev_kfree_skb(skb);
 +              }
 +
 +
 +      } else { /* 10/100 Mbps MAC */
 +
 +              for (i = 0; i < GRETH_RXBD_NUM; i++, rx_bdp++) {
 +                      kfree(greth->rx_bufs[i]);
 +                      dma_unmap_single(greth->dev,
 +                                       greth_read_bd(&rx_bdp->addr),
 +                                       MAX_FRAME_SIZE,
 +                                       DMA_FROM_DEVICE);
 +              }
 +              for (i = 0; i < GRETH_TXBD_NUM; i++, tx_bdp++) {
 +                      kfree(greth->tx_bufs[i]);
 +                      dma_unmap_single(greth->dev,
 +                                       greth_read_bd(&tx_bdp->addr),
 +                                       MAX_FRAME_SIZE,
 +                                       DMA_TO_DEVICE);
 +              }
 +      }
 +}
 +
 +static int greth_init_rings(struct greth_private *greth)
 +{
 +      struct sk_buff *skb;
 +      struct greth_bd *rx_bd, *tx_bd;
 +      u32 dma_addr;
 +      int i;
 +
 +      rx_bd = greth->rx_bd_base;
 +      tx_bd = greth->tx_bd_base;
 +
 +      /* Initialize descriptor rings and buffers */
 +      if (greth->gbit_mac) {
 +
 +              for (i = 0; i < GRETH_RXBD_NUM; i++) {
 +                      skb = netdev_alloc_skb(greth->netdev, MAX_FRAME_SIZE+NET_IP_ALIGN);
 +                      if (skb == NULL) {
 +                              if (netif_msg_ifup(greth))
 +                                      dev_err(greth->dev, "Error allocating DMA ring.\n");
 +                              goto cleanup;
 +                      }
 +                      skb_reserve(skb, NET_IP_ALIGN);
 +                      dma_addr = dma_map_single(greth->dev,
 +                                                skb->data,
 +                                                MAX_FRAME_SIZE+NET_IP_ALIGN,
 +                                                DMA_FROM_DEVICE);
 +
 +                      if (dma_mapping_error(greth->dev, dma_addr)) {
 +                              if (netif_msg_ifup(greth))
 +                                      dev_err(greth->dev, "Could not create initial DMA mapping\n");
 +                              goto cleanup;
 +                      }
 +                      greth->rx_skbuff[i] = skb;
 +                      greth_write_bd(&rx_bd[i].addr, dma_addr);
 +                      greth_write_bd(&rx_bd[i].stat, GRETH_BD_EN | GRETH_BD_IE);
 +              }
 +
 +      } else {
 +
 +              /* 10/100 MAC uses a fixed set of buffers and copy to/from SKBs */
 +              for (i = 0; i < GRETH_RXBD_NUM; i++) {
 +
 +                      greth->rx_bufs[i] = kmalloc(MAX_FRAME_SIZE, GFP_KERNEL);
 +
 +                      if (greth->rx_bufs[i] == NULL) {
 +                              if (netif_msg_ifup(greth))
 +                                      dev_err(greth->dev, "Error allocating DMA ring.\n");
 +                              goto cleanup;
 +                      }
 +
 +                      dma_addr = dma_map_single(greth->dev,
 +                                                greth->rx_bufs[i],
 +                                                MAX_FRAME_SIZE,
 +                                                DMA_FROM_DEVICE);
 +
 +                      if (dma_mapping_error(greth->dev, dma_addr)) {
 +                              if (netif_msg_ifup(greth))
 +                                      dev_err(greth->dev, "Could not create initial DMA mapping\n");
 +                              goto cleanup;
 +                      }
 +                      greth_write_bd(&rx_bd[i].addr, dma_addr);
 +                      greth_write_bd(&rx_bd[i].stat, GRETH_BD_EN | GRETH_BD_IE);
 +              }
 +              for (i = 0; i < GRETH_TXBD_NUM; i++) {
 +
 +                      greth->tx_bufs[i] = kmalloc(MAX_FRAME_SIZE, GFP_KERNEL);
 +
 +                      if (greth->tx_bufs[i] == NULL) {
 +                              if (netif_msg_ifup(greth))
 +                                      dev_err(greth->dev, "Error allocating DMA ring.\n");
 +                              goto cleanup;
 +                      }
 +
 +                      dma_addr = dma_map_single(greth->dev,
 +                                                greth->tx_bufs[i],
 +                                                MAX_FRAME_SIZE,
 +                                                DMA_TO_DEVICE);
 +
 +                      if (dma_mapping_error(greth->dev, dma_addr)) {
 +                              if (netif_msg_ifup(greth))
 +                                      dev_err(greth->dev, "Could not create initial DMA mapping\n");
 +                              goto cleanup;
 +                      }
 +                      greth_write_bd(&tx_bd[i].addr, dma_addr);
 +                      greth_write_bd(&tx_bd[i].stat, 0);
 +              }
 +      }
 +      greth_write_bd(&rx_bd[GRETH_RXBD_NUM - 1].stat,
 +                     greth_read_bd(&rx_bd[GRETH_RXBD_NUM - 1].stat) | GRETH_BD_WR);
 +
 +      /* Initialize pointers. */
 +      greth->rx_cur = 0;
 +      greth->tx_next = 0;
 +      greth->tx_last = 0;
 +      greth->tx_free = GRETH_TXBD_NUM;
 +
 +      /* Initialize descriptor base address */
 +      GRETH_REGSAVE(greth->regs->tx_desc_p, greth->tx_bd_base_phys);
 +      GRETH_REGSAVE(greth->regs->rx_desc_p, greth->rx_bd_base_phys);
 +
 +      return 0;
 +
 +cleanup:
 +      greth_clean_rings(greth);
 +      return -ENOMEM;
 +}
 +
 +static int greth_open(struct net_device *dev)
 +{
 +      struct greth_private *greth = netdev_priv(dev);
 +      int err;
 +
 +      err = greth_init_rings(greth);
 +      if (err) {
 +              if (netif_msg_ifup(greth))
 +                      dev_err(&dev->dev, "Could not allocate memory for DMA rings\n");
 +              return err;
 +      }
 +
 +      err = request_irq(greth->irq, greth_interrupt, 0, "eth", (void *) dev);
 +      if (err) {
 +              if (netif_msg_ifup(greth))
 +                      dev_err(&dev->dev, "Could not allocate interrupt %d\n", dev->irq);
 +              greth_clean_rings(greth);
 +              return err;
 +      }
 +
 +      if (netif_msg_ifup(greth))
 +              dev_dbg(&dev->dev, " starting queue\n");
 +      netif_start_queue(dev);
 +
 +      GRETH_REGSAVE(greth->regs->status, 0xFF);
 +
 +      napi_enable(&greth->napi);
 +
 +      greth_enable_irqs(greth);
 +      greth_enable_tx(greth);
 +      greth_enable_rx(greth);
 +      return 0;
 +
 +}
 +
 +static int greth_close(struct net_device *dev)
 +{
 +      struct greth_private *greth = netdev_priv(dev);
 +
 +      napi_disable(&greth->napi);
 +
 +      greth_disable_irqs(greth);
 +      greth_disable_tx(greth);
 +      greth_disable_rx(greth);
 +
 +      netif_stop_queue(dev);
 +
 +      free_irq(greth->irq, (void *) dev);
 +
 +      greth_clean_rings(greth);
 +
 +      return 0;
 +}
 +
 +static netdev_tx_t
 +greth_start_xmit(struct sk_buff *skb, struct net_device *dev)
 +{
 +      struct greth_private *greth = netdev_priv(dev);
 +      struct greth_bd *bdp;
 +      int err = NETDEV_TX_OK;
 +      u32 status, dma_addr, ctrl;
 +      unsigned long flags;
 +
 +      /* Clean TX Ring */
 +      greth_clean_tx(greth->netdev);
 +
 +      if (unlikely(greth->tx_free <= 0)) {
 +              spin_lock_irqsave(&greth->devlock, flags);/*save from poll/irq*/
 +              ctrl = GRETH_REGLOAD(greth->regs->control);
 +              /* Enable TX IRQ only if not already in poll() routine */
 +              if (ctrl & GRETH_RXI)
 +                      GRETH_REGSAVE(greth->regs->control, ctrl | GRETH_TXI);
 +              netif_stop_queue(dev);
 +              spin_unlock_irqrestore(&greth->devlock, flags);
 +              return NETDEV_TX_BUSY;
 +      }
 +
 +      if (netif_msg_pktdata(greth))
 +              greth_print_tx_packet(skb);
 +
 +
 +      if (unlikely(skb->len > MAX_FRAME_SIZE)) {
 +              dev->stats.tx_errors++;
 +              goto out;
 +      }
 +
 +      bdp = greth->tx_bd_base + greth->tx_next;
 +      dma_addr = greth_read_bd(&bdp->addr);
 +
 +      memcpy((unsigned char *) phys_to_virt(dma_addr), skb->data, skb->len);
 +
 +      dma_sync_single_for_device(greth->dev, dma_addr, skb->len, DMA_TO_DEVICE);
 +
 +      status = GRETH_BD_EN | GRETH_BD_IE | (skb->len & GRETH_BD_LEN);
++      greth->tx_bufs_length[greth->tx_next] = skb->len & GRETH_BD_LEN;
 +
 +      /* Wrap around descriptor ring */
 +      if (greth->tx_next == GRETH_TXBD_NUM_MASK) {
 +              status |= GRETH_BD_WR;
 +      }
 +
 +      greth->tx_next = NEXT_TX(greth->tx_next);
 +      greth->tx_free--;
 +
 +      /* Write descriptor control word and enable transmission */
 +      greth_write_bd(&bdp->stat, status);
 +      spin_lock_irqsave(&greth->devlock, flags); /*save from poll/irq*/
 +      greth_enable_tx(greth);
 +      spin_unlock_irqrestore(&greth->devlock, flags);
 +
 +out:
 +      dev_kfree_skb(skb);
 +      return err;
 +}
 +
 +
 +static netdev_tx_t
 +greth_start_xmit_gbit(struct sk_buff *skb, struct net_device *dev)
 +{
 +      struct greth_private *greth = netdev_priv(dev);
 +      struct greth_bd *bdp;
 +      u32 status = 0, dma_addr, ctrl;
 +      int curr_tx, nr_frags, i, err = NETDEV_TX_OK;
 +      unsigned long flags;
 +
 +      nr_frags = skb_shinfo(skb)->nr_frags;
 +
 +      /* Clean TX Ring */
 +      greth_clean_tx_gbit(dev);
 +
 +      if (greth->tx_free < nr_frags + 1) {
 +              spin_lock_irqsave(&greth->devlock, flags);/*save from poll/irq*/
 +              ctrl = GRETH_REGLOAD(greth->regs->control);
 +              /* Enable TX IRQ only if not already in poll() routine */
 +              if (ctrl & GRETH_RXI)
 +                      GRETH_REGSAVE(greth->regs->control, ctrl | GRETH_TXI);
 +              netif_stop_queue(dev);
 +              spin_unlock_irqrestore(&greth->devlock, flags);
 +              err = NETDEV_TX_BUSY;
 +              goto out;
 +      }
 +
 +      if (netif_msg_pktdata(greth))
 +              greth_print_tx_packet(skb);
 +
 +      if (unlikely(skb->len > MAX_FRAME_SIZE)) {
 +              dev->stats.tx_errors++;
 +              goto out;
 +      }
 +
 +      /* Save skb pointer. */
 +      greth->tx_skbuff[greth->tx_next] = skb;
 +
 +      /* Linear buf */
 +      if (nr_frags != 0)
 +              status = GRETH_TXBD_MORE;
 +
-               status = GRETH_TXBD_CSALL | GRETH_BD_EN;
++      if (skb->ip_summed == CHECKSUM_PARTIAL)
++              status |= GRETH_TXBD_CSALL;
 +      status |= skb_headlen(skb) & GRETH_BD_LEN;
 +      if (greth->tx_next == GRETH_TXBD_NUM_MASK)
 +              status |= GRETH_BD_WR;
 +
 +
 +      bdp = greth->tx_bd_base + greth->tx_next;
 +      greth_write_bd(&bdp->stat, status);
 +      dma_addr = dma_map_single(greth->dev, skb->data, skb_headlen(skb), DMA_TO_DEVICE);
 +
 +      if (unlikely(dma_mapping_error(greth->dev, dma_addr)))
 +              goto map_error;
 +
 +      greth_write_bd(&bdp->addr, dma_addr);
 +
 +      curr_tx = NEXT_TX(greth->tx_next);
 +
 +      /* Frags */
 +      for (i = 0; i < nr_frags; i++) {
 +              skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
 +              greth->tx_skbuff[curr_tx] = NULL;
 +              bdp = greth->tx_bd_base + curr_tx;
 +
++              status = GRETH_BD_EN;
++              if (skb->ip_summed == CHECKSUM_PARTIAL)
++                      status |= GRETH_TXBD_CSALL;
 +              status |= frag->size & GRETH_BD_LEN;
 +
 +              /* Wrap around descriptor ring */
 +              if (curr_tx == GRETH_TXBD_NUM_MASK)
 +                      status |= GRETH_BD_WR;
 +
 +              /* More fragments left */
 +              if (i < nr_frags - 1)
 +                      status |= GRETH_TXBD_MORE;
 +              else
 +                      status |= GRETH_BD_IE; /* enable IRQ on last fragment */
 +
 +              greth_write_bd(&bdp->stat, status);
 +
 +              dma_addr = skb_frag_dma_map(greth->dev, frag, 0, frag->size,
 +                                          DMA_TO_DEVICE);
 +
 +              if (unlikely(dma_mapping_error(greth->dev, dma_addr)))
 +                      goto frag_map_error;
 +
 +              greth_write_bd(&bdp->addr, dma_addr);
 +
 +              curr_tx = NEXT_TX(curr_tx);
 +      }
 +
 +      wmb();
 +
 +      /* Enable the descriptor chain by enabling the first descriptor */
 +      bdp = greth->tx_bd_base + greth->tx_next;
 +      greth_write_bd(&bdp->stat, greth_read_bd(&bdp->stat) | GRETH_BD_EN);
 +      greth->tx_next = curr_tx;
 +      greth->tx_free -= nr_frags + 1;
 +
 +      wmb();
 +
 +      spin_lock_irqsave(&greth->devlock, flags); /*save from poll/irq*/
 +      greth_enable_tx(greth);
 +      spin_unlock_irqrestore(&greth->devlock, flags);
 +
 +      return NETDEV_TX_OK;
 +
 +frag_map_error:
 +      /* Unmap SKB mappings that succeeded and disable descriptor */
 +      for (i = 0; greth->tx_next + i != curr_tx; i++) {
 +              bdp = greth->tx_bd_base + greth->tx_next + i;
 +              dma_unmap_single(greth->dev,
 +                               greth_read_bd(&bdp->addr),
 +                               greth_read_bd(&bdp->stat) & GRETH_BD_LEN,
 +                               DMA_TO_DEVICE);
 +              greth_write_bd(&bdp->stat, 0);
 +      }
 +map_error:
 +      if (net_ratelimit())
 +              dev_warn(greth->dev, "Could not create TX DMA mapping\n");
 +      dev_kfree_skb(skb);
 +out:
 +      return err;
 +}
 +
 +static irqreturn_t greth_interrupt(int irq, void *dev_id)
 +{
 +      struct net_device *dev = dev_id;
 +      struct greth_private *greth;
 +      u32 status, ctrl;
 +      irqreturn_t retval = IRQ_NONE;
 +
 +      greth = netdev_priv(dev);
 +
 +      spin_lock(&greth->devlock);
 +
 +      /* Get the interrupt events that caused us to be here. */
 +      status = GRETH_REGLOAD(greth->regs->status);
 +
 +      /* Must see if interrupts are enabled also, INT_TX|INT_RX flags may be
 +       * set regardless of whether IRQ is enabled or not. Especially
 +       * important when shared IRQ.
 +       */
 +      ctrl = GRETH_REGLOAD(greth->regs->control);
 +
 +      /* Handle rx and tx interrupts through poll */
 +      if (((status & (GRETH_INT_RE | GRETH_INT_RX)) && (ctrl & GRETH_RXI)) ||
 +          ((status & (GRETH_INT_TE | GRETH_INT_TX)) && (ctrl & GRETH_TXI))) {
 +              retval = IRQ_HANDLED;
 +
 +              /* Disable interrupts and schedule poll() */
 +              greth_disable_irqs(greth);
 +              napi_schedule(&greth->napi);
 +      }
 +
 +      mmiowb();
 +      spin_unlock(&greth->devlock);
 +
 +      return retval;
 +}
 +
 +static void greth_clean_tx(struct net_device *dev)
 +{
 +      struct greth_private *greth;
 +      struct greth_bd *bdp;
 +      u32 stat;
 +
 +      greth = netdev_priv(dev);
 +
 +      while (1) {
 +              bdp = greth->tx_bd_base + greth->tx_last;
 +              GRETH_REGSAVE(greth->regs->status, GRETH_INT_TE | GRETH_INT_TX);
 +              mb();
 +              stat = greth_read_bd(&bdp->stat);
 +
 +              if (unlikely(stat & GRETH_BD_EN))
 +                      break;
 +
 +              if (greth->tx_free == GRETH_TXBD_NUM)
 +                      break;
 +
 +              /* Check status for errors */
 +              if (unlikely(stat & GRETH_TXBD_STATUS)) {
 +                      dev->stats.tx_errors++;
 +                      if (stat & GRETH_TXBD_ERR_AL)
 +                              dev->stats.tx_aborted_errors++;
 +                      if (stat & GRETH_TXBD_ERR_UE)
 +                              dev->stats.tx_fifo_errors++;
 +              }
 +              dev->stats.tx_packets++;
++              dev->stats.tx_bytes += greth->tx_bufs_length[greth->tx_last];
 +              greth->tx_last = NEXT_TX(greth->tx_last);
 +              greth->tx_free++;
 +      }
 +
 +      if (greth->tx_free > 0) {
 +              netif_wake_queue(dev);
 +      }
 +
 +}
 +
 +static inline void greth_update_tx_stats(struct net_device *dev, u32 stat)
 +{
 +      /* Check status for errors */
 +      if (unlikely(stat & GRETH_TXBD_STATUS)) {
 +              dev->stats.tx_errors++;
 +              if (stat & GRETH_TXBD_ERR_AL)
 +                      dev->stats.tx_aborted_errors++;
 +              if (stat & GRETH_TXBD_ERR_UE)
 +                      dev->stats.tx_fifo_errors++;
 +              if (stat & GRETH_TXBD_ERR_LC)
 +                      dev->stats.tx_aborted_errors++;
 +      }
 +      dev->stats.tx_packets++;
 +}
 +
 +static void greth_clean_tx_gbit(struct net_device *dev)
 +{
 +      struct greth_private *greth;
 +      struct greth_bd *bdp, *bdp_last_frag;
 +      struct sk_buff *skb;
 +      u32 stat;
 +      int nr_frags, i;
 +
 +      greth = netdev_priv(dev);
 +
 +      while (greth->tx_free < GRETH_TXBD_NUM) {
 +
 +              skb = greth->tx_skbuff[greth->tx_last];
 +
 +              nr_frags = skb_shinfo(skb)->nr_frags;
 +
 +              /* We only clean fully completed SKBs */
 +              bdp_last_frag = greth->tx_bd_base + SKIP_TX(greth->tx_last, nr_frags);
 +
 +              GRETH_REGSAVE(greth->regs->status, GRETH_INT_TE | GRETH_INT_TX);
 +              mb();
 +              stat = greth_read_bd(&bdp_last_frag->stat);
 +
 +              if (stat & GRETH_BD_EN)
 +                      break;
 +
 +              greth->tx_skbuff[greth->tx_last] = NULL;
 +
 +              greth_update_tx_stats(dev, stat);
++              dev->stats.tx_bytes += skb->len;
 +
 +              bdp = greth->tx_bd_base + greth->tx_last;
 +
 +              greth->tx_last = NEXT_TX(greth->tx_last);
 +
 +              dma_unmap_single(greth->dev,
 +                               greth_read_bd(&bdp->addr),
 +                               skb_headlen(skb),
 +                               DMA_TO_DEVICE);
 +
 +              for (i = 0; i < nr_frags; i++) {
 +                      skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
 +                      bdp = greth->tx_bd_base + greth->tx_last;
 +
 +                      dma_unmap_page(greth->dev,
 +                                     greth_read_bd(&bdp->addr),
 +                                     frag->size,
 +                                     DMA_TO_DEVICE);
 +
 +                      greth->tx_last = NEXT_TX(greth->tx_last);
 +              }
 +              greth->tx_free += nr_frags+1;
 +              dev_kfree_skb(skb);
 +      }
 +
 +      if (netif_queue_stopped(dev) && (greth->tx_free > (MAX_SKB_FRAGS+1)))
 +              netif_wake_queue(dev);
 +}
 +
 +static int greth_rx(struct net_device *dev, int limit)
 +{
 +      struct greth_private *greth;
 +      struct greth_bd *bdp;
 +      struct sk_buff *skb;
 +      int pkt_len;
 +      int bad, count;
 +      u32 status, dma_addr;
 +      unsigned long flags;
 +
 +      greth = netdev_priv(dev);
 +
 +      for (count = 0; count < limit; ++count) {
 +
 +              bdp = greth->rx_bd_base + greth->rx_cur;
 +              GRETH_REGSAVE(greth->regs->status, GRETH_INT_RE | GRETH_INT_RX);
 +              mb();
 +              status = greth_read_bd(&bdp->stat);
 +
 +              if (unlikely(status & GRETH_BD_EN)) {
 +                      break;
 +              }
 +
 +              dma_addr = greth_read_bd(&bdp->addr);
 +              bad = 0;
 +
 +              /* Check status for errors. */
 +              if (unlikely(status & GRETH_RXBD_STATUS)) {
 +                      if (status & GRETH_RXBD_ERR_FT) {
 +                              dev->stats.rx_length_errors++;
 +                              bad = 1;
 +                      }
 +                      if (status & (GRETH_RXBD_ERR_AE | GRETH_RXBD_ERR_OE)) {
 +                              dev->stats.rx_frame_errors++;
 +                              bad = 1;
 +                      }
 +                      if (status & GRETH_RXBD_ERR_CRC) {
 +                              dev->stats.rx_crc_errors++;
 +                              bad = 1;
 +                      }
 +              }
 +              if (unlikely(bad)) {
 +                      dev->stats.rx_errors++;
 +
 +              } else {
 +
 +                      pkt_len = status & GRETH_BD_LEN;
 +
 +                      skb = netdev_alloc_skb(dev, pkt_len + NET_IP_ALIGN);
 +
 +                      if (unlikely(skb == NULL)) {
 +
 +                              if (net_ratelimit())
 +                                      dev_warn(&dev->dev, "low on memory - " "packet dropped\n");
 +
 +                              dev->stats.rx_dropped++;
 +
 +                      } else {
 +                              skb_reserve(skb, NET_IP_ALIGN);
 +                              skb->dev = dev;
 +
 +                              dma_sync_single_for_cpu(greth->dev,
 +                                                      dma_addr,
 +                                                      pkt_len,
 +                                                      DMA_FROM_DEVICE);
 +
 +                              if (netif_msg_pktdata(greth))
 +                                      greth_print_rx_packet(phys_to_virt(dma_addr), pkt_len);
 +
 +                              memcpy(skb_put(skb, pkt_len), phys_to_virt(dma_addr), pkt_len);
 +
 +                              skb->protocol = eth_type_trans(skb, dev);
++                              dev->stats.rx_bytes += pkt_len;
 +                              dev->stats.rx_packets++;
 +                              netif_receive_skb(skb);
 +                      }
 +              }
 +
 +              status = GRETH_BD_EN | GRETH_BD_IE;
 +              if (greth->rx_cur == GRETH_RXBD_NUM_MASK) {
 +                      status |= GRETH_BD_WR;
 +              }
 +
 +              wmb();
 +              greth_write_bd(&bdp->stat, status);
 +
 +              dma_sync_single_for_device(greth->dev, dma_addr, MAX_FRAME_SIZE, DMA_FROM_DEVICE);
 +
 +              spin_lock_irqsave(&greth->devlock, flags); /* save from XMIT */
 +              greth_enable_rx(greth);
 +              spin_unlock_irqrestore(&greth->devlock, flags);
 +
 +              greth->rx_cur = NEXT_RX(greth->rx_cur);
 +      }
 +
 +      return count;
 +}
 +
 +static inline int hw_checksummed(u32 status)
 +{
 +
 +      if (status & GRETH_RXBD_IP_FRAG)
 +              return 0;
 +
 +      if (status & GRETH_RXBD_IP && status & GRETH_RXBD_IP_CSERR)
 +              return 0;
 +
 +      if (status & GRETH_RXBD_UDP && status & GRETH_RXBD_UDP_CSERR)
 +              return 0;
 +
 +      if (status & GRETH_RXBD_TCP && status & GRETH_RXBD_TCP_CSERR)
 +              return 0;
 +
 +      return 1;
 +}
 +
 +static int greth_rx_gbit(struct net_device *dev, int limit)
 +{
 +      struct greth_private *greth;
 +      struct greth_bd *bdp;
 +      struct sk_buff *skb, *newskb;
 +      int pkt_len;
 +      int bad, count = 0;
 +      u32 status, dma_addr;
 +      unsigned long flags;
 +
 +      greth = netdev_priv(dev);
 +
 +      for (count = 0; count < limit; ++count) {
 +
 +              bdp = greth->rx_bd_base + greth->rx_cur;
 +              skb = greth->rx_skbuff[greth->rx_cur];
 +              GRETH_REGSAVE(greth->regs->status, GRETH_INT_RE | GRETH_INT_RX);
 +              mb();
 +              status = greth_read_bd(&bdp->stat);
 +              bad = 0;
 +
 +              if (status & GRETH_BD_EN)
 +                      break;
 +
 +              /* Check status for errors. */
 +              if (unlikely(status & GRETH_RXBD_STATUS)) {
 +
 +                      if (status & GRETH_RXBD_ERR_FT) {
 +                              dev->stats.rx_length_errors++;
 +                              bad = 1;
 +                      } else if (status &
 +                                 (GRETH_RXBD_ERR_AE | GRETH_RXBD_ERR_OE | GRETH_RXBD_ERR_LE)) {
 +                              dev->stats.rx_frame_errors++;
 +                              bad = 1;
 +                      } else if (status & GRETH_RXBD_ERR_CRC) {
 +                              dev->stats.rx_crc_errors++;
 +                              bad = 1;
 +                      }
 +              }
 +
 +              /* Allocate new skb to replace current, not needed if the
 +               * current skb can be reused */
 +              if (!bad && (newskb=netdev_alloc_skb(dev, MAX_FRAME_SIZE + NET_IP_ALIGN))) {
 +                      skb_reserve(newskb, NET_IP_ALIGN);
 +
 +                      dma_addr = dma_map_single(greth->dev,
 +                                                    newskb->data,
 +                                                    MAX_FRAME_SIZE + NET_IP_ALIGN,
 +                                                    DMA_FROM_DEVICE);
 +
 +                      if (!dma_mapping_error(greth->dev, dma_addr)) {
 +                              /* Process the incoming frame. */
 +                              pkt_len = status & GRETH_BD_LEN;
 +
 +                              dma_unmap_single(greth->dev,
 +                                               greth_read_bd(&bdp->addr),
 +                                               MAX_FRAME_SIZE + NET_IP_ALIGN,
 +                                               DMA_FROM_DEVICE);
 +
 +                              if (netif_msg_pktdata(greth))
 +                                      greth_print_rx_packet(phys_to_virt(greth_read_bd(&bdp->addr)), pkt_len);
 +
 +                              skb_put(skb, pkt_len);
 +
 +                              if (dev->features & NETIF_F_RXCSUM && hw_checksummed(status))
 +                                      skb->ip_summed = CHECKSUM_UNNECESSARY;
 +                              else
 +                                      skb_checksum_none_assert(skb);
 +
 +                              skb->protocol = eth_type_trans(skb, dev);
 +                              dev->stats.rx_packets++;
++                              dev->stats.rx_bytes += pkt_len;
 +                              netif_receive_skb(skb);
 +
 +                              greth->rx_skbuff[greth->rx_cur] = newskb;
 +                              greth_write_bd(&bdp->addr, dma_addr);
 +                      } else {
 +                              if (net_ratelimit())
 +                                      dev_warn(greth->dev, "Could not create DMA mapping, dropping packet\n");
 +                              dev_kfree_skb(newskb);
 +                              /* reusing current skb, so it is a drop */
 +                              dev->stats.rx_dropped++;
 +                      }
 +              } else if (bad) {
 +                      /* Bad Frame transfer, the skb is reused */
 +                      dev->stats.rx_dropped++;
 +              } else {
 +                      /* Failed Allocating a new skb. This is rather stupid
 +                       * but the current "filled" skb is reused, as if
 +                       * transfer failure. One could argue that RX descriptor
 +                       * table handling should be divided into cleaning and
 +                       * filling as the TX part of the driver
 +                       */
 +                      if (net_ratelimit())
 +                              dev_warn(greth->dev, "Could not allocate SKB, dropping packet\n");
 +                      /* reusing current skb, so it is a drop */
 +                      dev->stats.rx_dropped++;
 +              }
 +
 +              status = GRETH_BD_EN | GRETH_BD_IE;
 +              if (greth->rx_cur == GRETH_RXBD_NUM_MASK) {
 +                      status |= GRETH_BD_WR;
 +              }
 +
 +              wmb();
 +              greth_write_bd(&bdp->stat, status);
 +              spin_lock_irqsave(&greth->devlock, flags);
 +              greth_enable_rx(greth);
 +              spin_unlock_irqrestore(&greth->devlock, flags);
 +              greth->rx_cur = NEXT_RX(greth->rx_cur);
 +      }
 +
 +      return count;
 +
 +}
 +
 +static int greth_poll(struct napi_struct *napi, int budget)
 +{
 +      struct greth_private *greth;
 +      int work_done = 0;
 +      unsigned long flags;
 +      u32 mask, ctrl;
 +      greth = container_of(napi, struct greth_private, napi);
 +
 +restart_txrx_poll:
 +      if (netif_queue_stopped(greth->netdev)) {
 +              if (greth->gbit_mac)
 +                      greth_clean_tx_gbit(greth->netdev);
 +              else
 +                      greth_clean_tx(greth->netdev);
 +      }
 +
 +      if (greth->gbit_mac) {
 +              work_done += greth_rx_gbit(greth->netdev, budget - work_done);
 +      } else {
 +              work_done += greth_rx(greth->netdev, budget - work_done);
 +      }
 +
 +      if (work_done < budget) {
 +
 +              spin_lock_irqsave(&greth->devlock, flags);
 +
 +              ctrl = GRETH_REGLOAD(greth->regs->control);
 +              if (netif_queue_stopped(greth->netdev)) {
 +                      GRETH_REGSAVE(greth->regs->control,
 +                                      ctrl | GRETH_TXI | GRETH_RXI);
 +                      mask = GRETH_INT_RX | GRETH_INT_RE |
 +                             GRETH_INT_TX | GRETH_INT_TE;
 +              } else {
 +                      GRETH_REGSAVE(greth->regs->control, ctrl | GRETH_RXI);
 +                      mask = GRETH_INT_RX | GRETH_INT_RE;
 +              }
 +
 +              if (GRETH_REGLOAD(greth->regs->status) & mask) {
 +                      GRETH_REGSAVE(greth->regs->control, ctrl);
 +                      spin_unlock_irqrestore(&greth->devlock, flags);
 +                      goto restart_txrx_poll;
 +              } else {
 +                      __napi_complete(napi);
 +                      spin_unlock_irqrestore(&greth->devlock, flags);
 +              }
 +      }
 +
 +      return work_done;
 +}
 +
 +static int greth_set_mac_add(struct net_device *dev, void *p)
 +{
 +      struct sockaddr *addr = p;
 +      struct greth_private *greth;
 +      struct greth_regs *regs;
 +
 +      greth = netdev_priv(dev);
 +      regs = (struct greth_regs *) greth->regs;
 +
 +      if (!is_valid_ether_addr(addr->sa_data))
 +              return -EINVAL;
 +
 +      memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
 +      GRETH_REGSAVE(regs->esa_msb, dev->dev_addr[0] << 8 | dev->dev_addr[1]);
 +      GRETH_REGSAVE(regs->esa_lsb, dev->dev_addr[2] << 24 | dev->dev_addr[3] << 16 |
 +                    dev->dev_addr[4] << 8 | dev->dev_addr[5]);
 +
 +      return 0;
 +}
 +
 +static u32 greth_hash_get_index(__u8 *addr)
 +{
 +      return (ether_crc(6, addr)) & 0x3F;
 +}
 +
 +static void greth_set_hash_filter(struct net_device *dev)
 +{
 +      struct netdev_hw_addr *ha;
 +      struct greth_private *greth = netdev_priv(dev);
 +      struct greth_regs *regs = (struct greth_regs *) greth->regs;
 +      u32 mc_filter[2];
 +      unsigned int bitnr;
 +
 +      mc_filter[0] = mc_filter[1] = 0;
 +
 +      netdev_for_each_mc_addr(ha, dev) {
 +              bitnr = greth_hash_get_index(ha->addr);
 +              mc_filter[bitnr >> 5] |= 1 << (bitnr & 31);
 +      }
 +
 +      GRETH_REGSAVE(regs->hash_msb, mc_filter[1]);
 +      GRETH_REGSAVE(regs->hash_lsb, mc_filter[0]);
 +}
 +
 +static void greth_set_multicast_list(struct net_device *dev)
 +{
 +      int cfg;
 +      struct greth_private *greth = netdev_priv(dev);
 +      struct greth_regs *regs = (struct greth_regs *) greth->regs;
 +
 +      cfg = GRETH_REGLOAD(regs->control);
 +      if (dev->flags & IFF_PROMISC)
 +              cfg |= GRETH_CTRL_PR;
 +      else
 +              cfg &= ~GRETH_CTRL_PR;
 +
 +      if (greth->multicast) {
 +              if (dev->flags & IFF_ALLMULTI) {
 +                      GRETH_REGSAVE(regs->hash_msb, -1);
 +                      GRETH_REGSAVE(regs->hash_lsb, -1);
 +                      cfg |= GRETH_CTRL_MCEN;
 +                      GRETH_REGSAVE(regs->control, cfg);
 +                      return;
 +              }
 +
 +              if (netdev_mc_empty(dev)) {
 +                      cfg &= ~GRETH_CTRL_MCEN;
 +                      GRETH_REGSAVE(regs->control, cfg);
 +                      return;
 +              }
 +
 +              /* Setup multicast filter */
 +              greth_set_hash_filter(dev);
 +              cfg |= GRETH_CTRL_MCEN;
 +      }
 +      GRETH_REGSAVE(regs->control, cfg);
 +}
 +
 +static u32 greth_get_msglevel(struct net_device *dev)
 +{
 +      struct greth_private *greth = netdev_priv(dev);
 +      return greth->msg_enable;
 +}
 +
 +static void greth_set_msglevel(struct net_device *dev, u32 value)
 +{
 +      struct greth_private *greth = netdev_priv(dev);
 +      greth->msg_enable = value;
 +}
 +static int greth_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
 +{
 +      struct greth_private *greth = netdev_priv(dev);
 +      struct phy_device *phy = greth->phy;
 +
 +      if (!phy)
 +              return -ENODEV;
 +
 +      return phy_ethtool_gset(phy, cmd);
 +}
 +
 +static int greth_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
 +{
 +      struct greth_private *greth = netdev_priv(dev);
 +      struct phy_device *phy = greth->phy;
 +
 +      if (!phy)
 +              return -ENODEV;
 +
 +      return phy_ethtool_sset(phy, cmd);
 +}
 +
 +static int greth_get_regs_len(struct net_device *dev)
 +{
 +      return sizeof(struct greth_regs);
 +}
 +
 +static void greth_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
 +{
 +      struct greth_private *greth = netdev_priv(dev);
 +
 +      strncpy(info->driver, dev_driver_string(greth->dev), 32);
 +      strncpy(info->version, "revision: 1.0", 32);
 +      strncpy(info->bus_info, greth->dev->bus->name, 32);
 +      strncpy(info->fw_version, "N/A", 32);
 +      info->eedump_len = 0;
 +      info->regdump_len = sizeof(struct greth_regs);
 +}
 +
 +static void greth_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *p)
 +{
 +      int i;
 +      struct greth_private *greth = netdev_priv(dev);
 +      u32 __iomem *greth_regs = (u32 __iomem *) greth->regs;
 +      u32 *buff = p;
 +
 +      for (i = 0; i < sizeof(struct greth_regs) / sizeof(u32); i++)
 +              buff[i] = greth_read_bd(&greth_regs[i]);
 +}
 +
 +static const struct ethtool_ops greth_ethtool_ops = {
 +      .get_msglevel           = greth_get_msglevel,
 +      .set_msglevel           = greth_set_msglevel,
 +      .get_settings           = greth_get_settings,
 +      .set_settings           = greth_set_settings,
 +      .get_drvinfo            = greth_get_drvinfo,
 +      .get_regs_len           = greth_get_regs_len,
 +      .get_regs               = greth_get_regs,
 +      .get_link               = ethtool_op_get_link,
 +};
 +
 +static struct net_device_ops greth_netdev_ops = {
 +      .ndo_open               = greth_open,
 +      .ndo_stop               = greth_close,
 +      .ndo_start_xmit         = greth_start_xmit,
 +      .ndo_set_mac_address    = greth_set_mac_add,
 +      .ndo_validate_addr      = eth_validate_addr,
 +};
 +
 +static inline int wait_for_mdio(struct greth_private *greth)
 +{
 +      unsigned long timeout = jiffies + 4*HZ/100;
 +      while (GRETH_REGLOAD(greth->regs->mdio) & GRETH_MII_BUSY) {
 +              if (time_after(jiffies, timeout))
 +                      return 0;
 +      }
 +      return 1;
 +}
 +
 +static int greth_mdio_read(struct mii_bus *bus, int phy, int reg)
 +{
 +      struct greth_private *greth = bus->priv;
 +      int data;
 +
 +      if (!wait_for_mdio(greth))
 +              return -EBUSY;
 +
 +      GRETH_REGSAVE(greth->regs->mdio, ((phy & 0x1F) << 11) | ((reg & 0x1F) << 6) | 2);
 +
 +      if (!wait_for_mdio(greth))
 +              return -EBUSY;
 +
 +      if (!(GRETH_REGLOAD(greth->regs->mdio) & GRETH_MII_NVALID)) {
 +              data = (GRETH_REGLOAD(greth->regs->mdio) >> 16) & 0xFFFF;
 +              return data;
 +
 +      } else {
 +              return -1;
 +      }
 +}
 +
 +static int greth_mdio_write(struct mii_bus *bus, int phy, int reg, u16 val)
 +{
 +      struct greth_private *greth = bus->priv;
 +
 +      if (!wait_for_mdio(greth))
 +              return -EBUSY;
 +
 +      GRETH_REGSAVE(greth->regs->mdio,
 +                    ((val & 0xFFFF) << 16) | ((phy & 0x1F) << 11) | ((reg & 0x1F) << 6) | 1);
 +
 +      if (!wait_for_mdio(greth))
 +              return -EBUSY;
 +
 +      return 0;
 +}
 +
 +static int greth_mdio_reset(struct mii_bus *bus)
 +{
 +      return 0;
 +}
 +
 +static void greth_link_change(struct net_device *dev)
 +{
 +      struct greth_private *greth = netdev_priv(dev);
 +      struct phy_device *phydev = greth->phy;
 +      unsigned long flags;
 +      int status_change = 0;
 +      u32 ctrl;
 +
 +      spin_lock_irqsave(&greth->devlock, flags);
 +
 +      if (phydev->link) {
 +
 +              if ((greth->speed != phydev->speed) || (greth->duplex != phydev->duplex)) {
 +                      ctrl = GRETH_REGLOAD(greth->regs->control) &
 +                             ~(GRETH_CTRL_FD | GRETH_CTRL_SP | GRETH_CTRL_GB);
 +
 +                      if (phydev->duplex)
 +                              ctrl |= GRETH_CTRL_FD;
 +
 +                      if (phydev->speed == SPEED_100)
 +                              ctrl |= GRETH_CTRL_SP;
 +                      else if (phydev->speed == SPEED_1000)
 +                              ctrl |= GRETH_CTRL_GB;
 +
 +                      GRETH_REGSAVE(greth->regs->control, ctrl);
 +                      greth->speed = phydev->speed;
 +                      greth->duplex = phydev->duplex;
 +                      status_change = 1;
 +              }
 +      }
 +
 +      if (phydev->link != greth->link) {
 +              if (!phydev->link) {
 +                      greth->speed = 0;
 +                      greth->duplex = -1;
 +              }
 +              greth->link = phydev->link;
 +
 +              status_change = 1;
 +      }
 +
 +      spin_unlock_irqrestore(&greth->devlock, flags);
 +
 +      if (status_change) {
 +              if (phydev->link)
 +                      pr_debug("%s: link up (%d/%s)\n",
 +                              dev->name, phydev->speed,
 +                              DUPLEX_FULL == phydev->duplex ? "Full" : "Half");
 +              else
 +                      pr_debug("%s: link down\n", dev->name);
 +      }
 +}
 +
 +static int greth_mdio_probe(struct net_device *dev)
 +{
 +      struct greth_private *greth = netdev_priv(dev);
 +      struct phy_device *phy = NULL;
 +      int ret;
 +
 +      /* Find the first PHY */
 +      phy = phy_find_first(greth->mdio);
 +
 +      if (!phy) {
 +              if (netif_msg_probe(greth))
 +                      dev_err(&dev->dev, "no PHY found\n");
 +              return -ENXIO;
 +      }
 +
 +      ret = phy_connect_direct(dev, phy, &greth_link_change,
 +                      0, greth->gbit_mac ?
 +                      PHY_INTERFACE_MODE_GMII :
 +                      PHY_INTERFACE_MODE_MII);
 +      if (ret) {
 +              if (netif_msg_ifup(greth))
 +                      dev_err(&dev->dev, "could not attach to PHY\n");
 +              return ret;
 +      }
 +
 +      if (greth->gbit_mac)
 +              phy->supported &= PHY_GBIT_FEATURES;
 +      else
 +              phy->supported &= PHY_BASIC_FEATURES;
 +
 +      phy->advertising = phy->supported;
 +
 +      greth->link = 0;
 +      greth->speed = 0;
 +      greth->duplex = -1;
 +      greth->phy = phy;
 +
 +      return 0;
 +}
 +
 +static inline int phy_aneg_done(struct phy_device *phydev)
 +{
 +      int retval;
 +
 +      retval = phy_read(phydev, MII_BMSR);
 +
 +      return (retval < 0) ? retval : (retval & BMSR_ANEGCOMPLETE);
 +}
 +
 +static int greth_mdio_init(struct greth_private *greth)
 +{
 +      int ret, phy;
 +      unsigned long timeout;
 +
 +      greth->mdio = mdiobus_alloc();
 +      if (!greth->mdio) {
 +              return -ENOMEM;
 +      }
 +
 +      greth->mdio->name = "greth-mdio";
 +      snprintf(greth->mdio->id, MII_BUS_ID_SIZE, "%s-%d", greth->mdio->name, greth->irq);
 +      greth->mdio->read = greth_mdio_read;
 +      greth->mdio->write = greth_mdio_write;
 +      greth->mdio->reset = greth_mdio_reset;
 +      greth->mdio->priv = greth;
 +
 +      greth->mdio->irq = greth->mdio_irqs;
 +
 +      for (phy = 0; phy < PHY_MAX_ADDR; phy++)
 +              greth->mdio->irq[phy] = PHY_POLL;
 +
 +      ret = mdiobus_register(greth->mdio);
 +      if (ret) {
 +              goto error;
 +      }
 +
 +      ret = greth_mdio_probe(greth->netdev);
 +      if (ret) {
 +              if (netif_msg_probe(greth))
 +                      dev_err(&greth->netdev->dev, "failed to probe MDIO bus\n");
 +              goto unreg_mdio;
 +      }
 +
 +      phy_start(greth->phy);
 +
 +      /* If Ethernet debug link is used make autoneg happen right away */
 +      if (greth->edcl && greth_edcl == 1) {
 +              phy_start_aneg(greth->phy);
 +              timeout = jiffies + 6*HZ;
 +              while (!phy_aneg_done(greth->phy) && time_before(jiffies, timeout)) {
 +              }
 +              genphy_read_status(greth->phy);
 +              greth_link_change(greth->netdev);
 +      }
 +
 +      return 0;
 +
 +unreg_mdio:
 +      mdiobus_unregister(greth->mdio);
 +error:
 +      mdiobus_free(greth->mdio);
 +      return ret;
 +}
 +
 +/* Initialize the GRETH MAC */
 +static int __devinit greth_of_probe(struct platform_device *ofdev)
 +{
 +      struct net_device *dev;
 +      struct greth_private *greth;
 +      struct greth_regs *regs;
 +
 +      int i;
 +      int err;
 +      int tmp;
 +      unsigned long timeout;
 +
 +      dev = alloc_etherdev(sizeof(struct greth_private));
 +
 +      if (dev == NULL)
 +              return -ENOMEM;
 +
 +      greth = netdev_priv(dev);
 +      greth->netdev = dev;
 +      greth->dev = &ofdev->dev;
 +
 +      if (greth_debug > 0)
 +              greth->msg_enable = greth_debug;
 +      else
 +              greth->msg_enable = GRETH_DEF_MSG_ENABLE;
 +
 +      spin_lock_init(&greth->devlock);
 +
 +      greth->regs = of_ioremap(&ofdev->resource[0], 0,
 +                               resource_size(&ofdev->resource[0]),
 +                               "grlib-greth regs");
 +
 +      if (greth->regs == NULL) {
 +              if (netif_msg_probe(greth))
 +                      dev_err(greth->dev, "ioremap failure.\n");
 +              err = -EIO;
 +              goto error1;
 +      }
 +
 +      regs = (struct greth_regs *) greth->regs;
 +      greth->irq = ofdev->archdata.irqs[0];
 +
 +      dev_set_drvdata(greth->dev, dev);
 +      SET_NETDEV_DEV(dev, greth->dev);
 +
 +      if (netif_msg_probe(greth))
 +              dev_dbg(greth->dev, "reseting controller.\n");
 +
 +      /* Reset the controller. */
 +      GRETH_REGSAVE(regs->control, GRETH_RESET);
 +
 +      /* Wait for MAC to reset itself */
 +      timeout = jiffies + HZ/100;
 +      while (GRETH_REGLOAD(regs->control) & GRETH_RESET) {
 +              if (time_after(jiffies, timeout)) {
 +                      err = -EIO;
 +                      if (netif_msg_probe(greth))
 +                              dev_err(greth->dev, "timeout when waiting for reset.\n");
 +                      goto error2;
 +              }
 +      }
 +
 +      /* Get default PHY address  */
 +      greth->phyaddr = (GRETH_REGLOAD(regs->mdio) >> 11) & 0x1F;
 +
 +      /* Check if we have GBIT capable MAC */
 +      tmp = GRETH_REGLOAD(regs->control);
 +      greth->gbit_mac = (tmp >> 27) & 1;
 +
 +      /* Check for multicast capability */
 +      greth->multicast = (tmp >> 25) & 1;
 +
 +      greth->edcl = (tmp >> 31) & 1;
 +
 +      /* If we have EDCL we disable the EDCL speed-duplex FSM so
 +       * it doesn't interfere with the software */
 +      if (greth->edcl != 0)
 +              GRETH_REGORIN(regs->control, GRETH_CTRL_DISDUPLEX);
 +
 +      /* Check if MAC can handle MDIO interrupts */
 +      greth->mdio_int_en = (tmp >> 26) & 1;
 +
 +      err = greth_mdio_init(greth);
 +      if (err) {
 +              if (netif_msg_probe(greth))
 +                      dev_err(greth->dev, "failed to register MDIO bus\n");
 +              goto error2;
 +      }
 +
 +      /* Allocate TX descriptor ring in coherent memory */
 +      greth->tx_bd_base = (struct greth_bd *) dma_alloc_coherent(greth->dev,
 +                                                                 1024,
 +                                                                 &greth->tx_bd_base_phys,
 +                                                                 GFP_KERNEL);
 +
 +      if (!greth->tx_bd_base) {
 +              if (netif_msg_probe(greth))
 +                      dev_err(&dev->dev, "could not allocate descriptor memory.\n");
 +              err = -ENOMEM;
 +              goto error3;
 +      }
 +
 +      memset(greth->tx_bd_base, 0, 1024);
 +
 +      /* Allocate RX descriptor ring in coherent memory */
 +      greth->rx_bd_base = (struct greth_bd *) dma_alloc_coherent(greth->dev,
 +                                                                 1024,
 +                                                                 &greth->rx_bd_base_phys,
 +                                                                 GFP_KERNEL);
 +
 +      if (!greth->rx_bd_base) {
 +              if (netif_msg_probe(greth))
 +                      dev_err(greth->dev, "could not allocate descriptor memory.\n");
 +              err = -ENOMEM;
 +              goto error4;
 +      }
 +
 +      memset(greth->rx_bd_base, 0, 1024);
 +
 +      /* Get MAC address from: module param, OF property or ID prom */
 +      for (i = 0; i < 6; i++) {
 +              if (macaddr[i] != 0)
 +                      break;
 +      }
 +      if (i == 6) {
 +              const unsigned char *addr;
 +              int len;
 +              addr = of_get_property(ofdev->dev.of_node, "local-mac-address",
 +                                      &len);
 +              if (addr != NULL && len == 6) {
 +                      for (i = 0; i < 6; i++)
 +                              macaddr[i] = (unsigned int) addr[i];
 +              } else {
 +#ifdef CONFIG_SPARC
 +                      for (i = 0; i < 6; i++)
 +                              macaddr[i] = (unsigned int) idprom->id_ethaddr[i];
 +#endif
 +              }
 +      }
 +
 +      for (i = 0; i < 6; i++)
 +              dev->dev_addr[i] = macaddr[i];
 +
 +      macaddr[5]++;
 +
 +      if (!is_valid_ether_addr(&dev->dev_addr[0])) {
 +              if (netif_msg_probe(greth))
 +                      dev_err(greth->dev, "no valid ethernet address, aborting.\n");
 +              err = -EINVAL;
 +              goto error5;
 +      }
 +
 +      GRETH_REGSAVE(regs->esa_msb, dev->dev_addr[0] << 8 | dev->dev_addr[1]);
 +      GRETH_REGSAVE(regs->esa_lsb, dev->dev_addr[2] << 24 | dev->dev_addr[3] << 16 |
 +                    dev->dev_addr[4] << 8 | dev->dev_addr[5]);
 +
 +      /* Clear all pending interrupts except PHY irq */
 +      GRETH_REGSAVE(regs->status, 0xFF);
 +
 +      if (greth->gbit_mac) {
 +              dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM |
 +                      NETIF_F_RXCSUM;
 +              dev->features = dev->hw_features | NETIF_F_HIGHDMA;
 +              greth_netdev_ops.ndo_start_xmit = greth_start_xmit_gbit;
 +      }
 +
 +      if (greth->multicast) {
 +              greth_netdev_ops.ndo_set_rx_mode = greth_set_multicast_list;
 +              dev->flags |= IFF_MULTICAST;
 +      } else {
 +              dev->flags &= ~IFF_MULTICAST;
 +      }
 +
 +      dev->netdev_ops = &greth_netdev_ops;
 +      dev->ethtool_ops = &greth_ethtool_ops;
 +
 +      err = register_netdev(dev);
 +      if (err) {
 +              if (netif_msg_probe(greth))
 +                      dev_err(greth->dev, "netdevice registration failed.\n");
 +              goto error5;
 +      }
 +
 +      /* setup NAPI */
 +      netif_napi_add(dev, &greth->napi, greth_poll, 64);
 +
 +      return 0;
 +
 +error5:
 +      dma_free_coherent(greth->dev, 1024, greth->rx_bd_base, greth->rx_bd_base_phys);
 +error4:
 +      dma_free_coherent(greth->dev, 1024, greth->tx_bd_base, greth->tx_bd_base_phys);
 +error3:
 +      mdiobus_unregister(greth->mdio);
 +error2:
 +      of_iounmap(&ofdev->resource[0], greth->regs, resource_size(&ofdev->resource[0]));
 +error1:
 +      free_netdev(dev);
 +      return err;
 +}
 +
 +static int __devexit greth_of_remove(struct platform_device *of_dev)
 +{
 +      struct net_device *ndev = dev_get_drvdata(&of_dev->dev);
 +      struct greth_private *greth = netdev_priv(ndev);
 +
 +      /* Free descriptor areas */
 +      dma_free_coherent(&of_dev->dev, 1024, greth->rx_bd_base, greth->rx_bd_base_phys);
 +
 +      dma_free_coherent(&of_dev->dev, 1024, greth->tx_bd_base, greth->tx_bd_base_phys);
 +
 +      dev_set_drvdata(&of_dev->dev, NULL);
 +
 +      if (greth->phy)
 +              phy_stop(greth->phy);
 +      mdiobus_unregister(greth->mdio);
 +
 +      unregister_netdev(ndev);
 +      free_netdev(ndev);
 +
 +      of_iounmap(&of_dev->resource[0], greth->regs, resource_size(&of_dev->resource[0]));
 +
 +      return 0;
 +}
 +
 +static struct of_device_id greth_of_match[] = {
 +      {
 +       .name = "GAISLER_ETHMAC",
 +       },
 +      {
 +       .name = "01_01d",
 +       },
 +      {},
 +};
 +
 +MODULE_DEVICE_TABLE(of, greth_of_match);
 +
 +static struct platform_driver greth_of_driver = {
 +      .driver = {
 +              .name = "grlib-greth",
 +              .owner = THIS_MODULE,
 +              .of_match_table = greth_of_match,
 +      },
 +      .probe = greth_of_probe,
 +      .remove = __devexit_p(greth_of_remove),
 +};
 +
 +static int __init greth_init(void)
 +{
 +      return platform_driver_register(&greth_of_driver);
 +}
 +
 +static void __exit greth_cleanup(void)
 +{
 +      platform_driver_unregister(&greth_of_driver);
 +}
 +
 +module_init(greth_init);
 +module_exit(greth_cleanup);
 +
 +MODULE_AUTHOR("Aeroflex Gaisler AB.");
 +MODULE_DESCRIPTION("Aeroflex Gaisler Ethernet MAC driver");
 +MODULE_LICENSE("GPL");
Simple merge
index c2b630c,0000000..7d5ded8
mode 100644,000000..100644
--- /dev/null
@@@ -1,767 -1,0 +1,770 @@@
 +/*
 + *  linux/drivers/net/am79c961.c
 + *
 + *  by Russell King <rmk@arm.linux.org.uk> 1995-2001.
 + *
 + * This program is free software; you can redistribute it and/or modify
 + * it under the terms of the GNU General Public License version 2 as
 + * published by the Free Software Foundation.
 + *
 + * Derived from various things including skeleton.c
 + *
 + * This is a special driver for the am79c961A Lance chip used in the
 + * Intel (formally Digital Equipment Corp) EBSA110 platform.  Please
 + * note that this can not be built as a module (it doesn't make sense).
 + */
 +#include <linux/kernel.h>
 +#include <linux/types.h>
 +#include <linux/interrupt.h>
 +#include <linux/ioport.h>
 +#include <linux/slab.h>
 +#include <linux/string.h>
 +#include <linux/errno.h>
 +#include <linux/netdevice.h>
 +#include <linux/etherdevice.h>
 +#include <linux/delay.h>
 +#include <linux/init.h>
 +#include <linux/crc32.h>
 +#include <linux/bitops.h>
 +#include <linux/platform_device.h>
 +#include <linux/io.h>
 +
 +#include <mach/hardware.h>
 +#include <asm/system.h>
 +
 +#define TX_BUFFERS 15
 +#define RX_BUFFERS 25
 +
 +#include "am79c961a.h"
 +
 +static irqreturn_t
 +am79c961_interrupt (int irq, void *dev_id);
 +
 +static unsigned int net_debug = NET_DEBUG;
 +
 +static const char version[] =
 +      "am79c961 ethernet driver (C) 1995-2001 Russell King v0.04\n";
 +
 +/* --------------------------------------------------------------------------- */
 +
 +#ifdef __arm__
 +static void write_rreg(u_long base, u_int reg, u_int val)
 +{
 +      asm volatile(
 +      "str%?h %1, [%2]        @ NET_RAP\n\t"
 +      "str%?h %0, [%2, #-4]   @ NET_RDP"
 +      :
 +      : "r" (val), "r" (reg), "r" (ISAIO_BASE + 0x0464));
 +}
 +
 +static inline unsigned short read_rreg(u_long base_addr, u_int reg)
 +{
 +      unsigned short v;
 +      asm volatile(
 +      "str%?h %1, [%2]        @ NET_RAP\n\t"
 +      "ldr%?h %0, [%2, #-4]   @ NET_RDP"
 +      : "=r" (v)
 +      : "r" (reg), "r" (ISAIO_BASE + 0x0464));
 +      return v;
 +}
 +
 +static inline void write_ireg(u_long base, u_int reg, u_int val)
 +{
 +      asm volatile(
 +      "str%?h %1, [%2]        @ NET_RAP\n\t"
 +      "str%?h %0, [%2, #8]    @ NET_IDP"
 +      :
 +      : "r" (val), "r" (reg), "r" (ISAIO_BASE + 0x0464));
 +}
 +
 +static inline unsigned short read_ireg(u_long base_addr, u_int reg)
 +{
 +      u_short v;
 +      asm volatile(
 +      "str%?h %1, [%2]        @ NAT_RAP\n\t"
 +      "ldr%?h %0, [%2, #8]    @ NET_IDP\n\t"
 +      : "=r" (v)
 +      : "r" (reg), "r" (ISAIO_BASE + 0x0464));
 +      return v;
 +}
 +
 +#define am_writeword(dev,off,val) __raw_writew(val, ISAMEM_BASE + ((off) << 1))
 +#define am_readword(dev,off)      __raw_readw(ISAMEM_BASE + ((off) << 1))
 +
 +static void
 +am_writebuffer(struct net_device *dev, u_int offset, unsigned char *buf, unsigned int length)
 +{
 +      offset = ISAMEM_BASE + (offset << 1);
 +      length = (length + 1) & ~1;
 +      if ((int)buf & 2) {
 +              asm volatile("str%?h    %2, [%0], #4"
 +               : "=&r" (offset) : "0" (offset), "r" (buf[0] | (buf[1] << 8)));
 +              buf += 2;
 +              length -= 2;
 +      }
 +      while (length > 8) {
 +              register unsigned int tmp asm("r2"), tmp2 asm("r3");
 +              asm volatile(
 +                      "ldm%?ia        %0!, {%1, %2}"
 +                      : "+r" (buf), "=&r" (tmp), "=&r" (tmp2));
 +              length -= 8;
 +              asm volatile(
 +                      "str%?h %1, [%0], #4\n\t"
 +                      "mov%?  %1, %1, lsr #16\n\t"
 +                      "str%?h %1, [%0], #4\n\t"
 +                      "str%?h %2, [%0], #4\n\t"
 +                      "mov%?  %2, %2, lsr #16\n\t"
 +                      "str%?h %2, [%0], #4"
 +              : "+r" (offset), "=&r" (tmp), "=&r" (tmp2));
 +      }
 +      while (length > 0) {
 +              asm volatile("str%?h    %2, [%0], #4"
 +               : "=&r" (offset) : "0" (offset), "r" (buf[0] | (buf[1] << 8)));
 +              buf += 2;
 +              length -= 2;
 +      }
 +}
 +
 +static void
 +am_readbuffer(struct net_device *dev, u_int offset, unsigned char *buf, unsigned int length)
 +{
 +      offset = ISAMEM_BASE + (offset << 1);
 +      length = (length + 1) & ~1;
 +      if ((int)buf & 2) {
 +              unsigned int tmp;
 +              asm volatile(
 +                      "ldr%?h %2, [%0], #4\n\t"
 +                      "str%?b %2, [%1], #1\n\t"
 +                      "mov%?  %2, %2, lsr #8\n\t"
 +                      "str%?b %2, [%1], #1"
 +              : "=&r" (offset), "=&r" (buf), "=r" (tmp): "0" (offset), "1" (buf));
 +              length -= 2;
 +      }
 +      while (length > 8) {
 +              register unsigned int tmp asm("r2"), tmp2 asm("r3"), tmp3;
 +              asm volatile(
 +                      "ldr%?h %2, [%0], #4\n\t"
 +                      "ldr%?h %4, [%0], #4\n\t"
 +                      "ldr%?h %3, [%0], #4\n\t"
 +                      "orr%?  %2, %2, %4, lsl #16\n\t"
 +                      "ldr%?h %4, [%0], #4\n\t"
 +                      "orr%?  %3, %3, %4, lsl #16\n\t"
 +                      "stm%?ia        %1!, {%2, %3}"
 +              : "=&r" (offset), "=&r" (buf), "=r" (tmp), "=r" (tmp2), "=r" (tmp3)
 +              : "0" (offset), "1" (buf));
 +              length -= 8;
 +      }
 +      while (length > 0) {
 +              unsigned int tmp;
 +              asm volatile(
 +                      "ldr%?h %2, [%0], #4\n\t"
 +                      "str%?b %2, [%1], #1\n\t"
 +                      "mov%?  %2, %2, lsr #8\n\t"
 +                      "str%?b %2, [%1], #1"
 +              : "=&r" (offset), "=&r" (buf), "=r" (tmp) : "0" (offset), "1" (buf));
 +              length -= 2;
 +      }
 +}
 +#else
 +#error Not compatible
 +#endif
 +
 +static int
 +am79c961_ramtest(struct net_device *dev, unsigned int val)
 +{
 +      unsigned char *buffer = kmalloc (65536, GFP_KERNEL);
 +      int i, error = 0, errorcount = 0;
 +
 +      if (!buffer)
 +              return 0;
 +      memset (buffer, val, 65536);
 +      am_writebuffer(dev, 0, buffer, 65536);
 +      memset (buffer, val ^ 255, 65536);
 +      am_readbuffer(dev, 0, buffer, 65536);
 +      for (i = 0; i < 65536; i++) {
 +              if (buffer[i] != val && !error) {
 +                      printk ("%s: buffer error (%02X %02X) %05X - ", dev->name, val, buffer[i], i);
 +                      error = 1;
 +                      errorcount ++;
 +              } else if (error && buffer[i] == val) {
 +                      printk ("%05X\n", i);
 +                      error = 0;
 +              }
 +      }
 +      if (error)
 +              printk ("10000\n");
 +      kfree (buffer);
 +      return errorcount;
 +}
 +
 +static void am79c961_mc_hash(char *addr, u16 *hash)
 +{
 +      int idx, bit;
 +      u32 crc;
 +
 +      crc = ether_crc_le(ETH_ALEN, addr);
 +
 +      idx = crc >> 30;
 +      bit = (crc >> 26) & 15;
 +
 +      hash[idx] |= 1 << bit;
 +}
 +
 +static unsigned int am79c961_get_rx_mode(struct net_device *dev, u16 *hash)
 +{
 +      unsigned int mode = MODE_PORT_10BT;
 +
 +      if (dev->flags & IFF_PROMISC) {
 +              mode |= MODE_PROMISC;
 +              memset(hash, 0xff, 4 * sizeof(*hash));
 +      } else if (dev->flags & IFF_ALLMULTI) {
 +              memset(hash, 0xff, 4 * sizeof(*hash));
 +      } else {
 +              struct netdev_hw_addr *ha;
 +
 +              memset(hash, 0, 4 * sizeof(*hash));
 +
 +              netdev_for_each_mc_addr(ha, dev)
 +                      am79c961_mc_hash(ha->addr, hash);
 +      }
 +
 +      return mode;
 +}
 +
 +static void
 +am79c961_init_for_open(struct net_device *dev)
 +{
 +      struct dev_priv *priv = netdev_priv(dev);
 +      unsigned long flags;
 +      unsigned char *p;
 +      u_int hdr_addr, first_free_addr;
 +      u16 multi_hash[4], mode = am79c961_get_rx_mode(dev, multi_hash);
 +      int i;
 +
 +      /*
 +       * Stop the chip.
 +       */
 +      spin_lock_irqsave(&priv->chip_lock, flags);
 +      write_rreg (dev->base_addr, CSR0, CSR0_BABL|CSR0_CERR|CSR0_MISS|CSR0_MERR|CSR0_TINT|CSR0_RINT|CSR0_STOP);
 +      spin_unlock_irqrestore(&priv->chip_lock, flags);
 +
 +      write_ireg (dev->base_addr, 5, 0x00a0); /* Receive address LED */
 +      write_ireg (dev->base_addr, 6, 0x0081); /* Collision LED */
 +      write_ireg (dev->base_addr, 7, 0x0090); /* XMIT LED */
 +      write_ireg (dev->base_addr, 2, 0x0000); /* MODE register selects media */
 +
 +      for (i = LADRL; i <= LADRH; i++)
 +              write_rreg (dev->base_addr, i, multi_hash[i - LADRL]);
 +
 +      for (i = PADRL, p = dev->dev_addr; i <= PADRH; i++, p += 2)
 +              write_rreg (dev->base_addr, i, p[0] | (p[1] << 8));
 +
 +      write_rreg (dev->base_addr, MODE, mode);
 +      write_rreg (dev->base_addr, POLLINT, 0);
 +      write_rreg (dev->base_addr, SIZERXR, -RX_BUFFERS);
 +      write_rreg (dev->base_addr, SIZETXR, -TX_BUFFERS);
 +
 +      first_free_addr = RX_BUFFERS * 8 + TX_BUFFERS * 8 + 16;
 +      hdr_addr = 0;
 +
 +      priv->rxhead = 0;
 +      priv->rxtail = 0;
 +      priv->rxhdr = hdr_addr;
 +
 +      for (i = 0; i < RX_BUFFERS; i++) {
 +              priv->rxbuffer[i] = first_free_addr;
 +              am_writeword (dev, hdr_addr, first_free_addr);
 +              am_writeword (dev, hdr_addr + 2, RMD_OWN);
 +              am_writeword (dev, hdr_addr + 4, (-1600));
 +              am_writeword (dev, hdr_addr + 6, 0);
 +              first_free_addr += 1600;
 +              hdr_addr += 8;
 +      }
 +      priv->txhead = 0;
 +      priv->txtail = 0;
 +      priv->txhdr = hdr_addr;
 +      for (i = 0; i < TX_BUFFERS; i++) {
 +              priv->txbuffer[i] = first_free_addr;
 +              am_writeword (dev, hdr_addr, first_free_addr);
 +              am_writeword (dev, hdr_addr + 2, TMD_STP|TMD_ENP);
 +              am_writeword (dev, hdr_addr + 4, 0xf000);
 +              am_writeword (dev, hdr_addr + 6, 0);
 +              first_free_addr += 1600;
 +              hdr_addr += 8;
 +      }
 +
 +      write_rreg (dev->base_addr, BASERXL, priv->rxhdr);
 +      write_rreg (dev->base_addr, BASERXH, 0);
 +      write_rreg (dev->base_addr, BASETXL, priv->txhdr);
 +      write_rreg (dev->base_addr, BASERXH, 0);
 +      write_rreg (dev->base_addr, CSR0, CSR0_STOP);
 +      write_rreg (dev->base_addr, CSR3, CSR3_IDONM|CSR3_BABLM|CSR3_DXSUFLO);
 +      write_rreg (dev->base_addr, CSR4, CSR4_APAD_XMIT|CSR4_MFCOM|CSR4_RCVCCOM|CSR4_TXSTRTM|CSR4_JABM);
 +      write_rreg (dev->base_addr, CSR0, CSR0_IENA|CSR0_STRT);
 +}
 +
 +static void am79c961_timer(unsigned long data)
 +{
 +      struct net_device *dev = (struct net_device *)data;
 +      struct dev_priv *priv = netdev_priv(dev);
 +      unsigned int lnkstat, carrier;
++      unsigned long flags;
 +
++      spin_lock_irqsave(&priv->chip_lock, flags);
 +      lnkstat = read_ireg(dev->base_addr, ISALED0) & ISALED0_LNKST;
++      spin_unlock_irqrestore(&priv->chip_lock, flags);
 +      carrier = netif_carrier_ok(dev);
 +
 +      if (lnkstat && !carrier) {
 +              netif_carrier_on(dev);
 +              printk("%s: link up\n", dev->name);
 +      } else if (!lnkstat && carrier) {
 +              netif_carrier_off(dev);
 +              printk("%s: link down\n", dev->name);
 +      }
 +
 +      mod_timer(&priv->timer, jiffies + msecs_to_jiffies(500));
 +}
 +
 +/*
 + * Open/initialize the board.
 + */
 +static int
 +am79c961_open(struct net_device *dev)
 +{
 +      struct dev_priv *priv = netdev_priv(dev);
 +      int ret;
 +
 +      ret = request_irq(dev->irq, am79c961_interrupt, 0, dev->name, dev);
 +      if (ret)
 +              return ret;
 +
 +      am79c961_init_for_open(dev);
 +
 +      netif_carrier_off(dev);
 +
 +      priv->timer.expires = jiffies;
 +      add_timer(&priv->timer);
 +
 +      netif_start_queue(dev);
 +
 +      return 0;
 +}
 +
 +/*
 + * The inverse routine to am79c961_open().
 + */
 +static int
 +am79c961_close(struct net_device *dev)
 +{
 +      struct dev_priv *priv = netdev_priv(dev);
 +      unsigned long flags;
 +
 +      del_timer_sync(&priv->timer);
 +
 +      netif_stop_queue(dev);
 +      netif_carrier_off(dev);
 +
 +      spin_lock_irqsave(&priv->chip_lock, flags);
 +      write_rreg (dev->base_addr, CSR0, CSR0_STOP);
 +      write_rreg (dev->base_addr, CSR3, CSR3_MASKALL);
 +      spin_unlock_irqrestore(&priv->chip_lock, flags);
 +
 +      free_irq (dev->irq, dev);
 +
 +      return 0;
 +}
 +
 +/*
 + * Set or clear promiscuous/multicast mode filter for this adapter.
 + */
 +static void am79c961_setmulticastlist (struct net_device *dev)
 +{
 +      struct dev_priv *priv = netdev_priv(dev);
 +      unsigned long flags;
 +      u16 multi_hash[4], mode = am79c961_get_rx_mode(dev, multi_hash);
 +      int i, stopped;
 +
 +      spin_lock_irqsave(&priv->chip_lock, flags);
 +
 +      stopped = read_rreg(dev->base_addr, CSR0) & CSR0_STOP;
 +
 +      if (!stopped) {
 +              /*
 +               * Put the chip into suspend mode
 +               */
 +              write_rreg(dev->base_addr, CTRL1, CTRL1_SPND);
 +
 +              /*
 +               * Spin waiting for chip to report suspend mode
 +               */
 +              while ((read_rreg(dev->base_addr, CTRL1) & CTRL1_SPND) == 0) {
 +                      spin_unlock_irqrestore(&priv->chip_lock, flags);
 +                      nop();
 +                      spin_lock_irqsave(&priv->chip_lock, flags);
 +              }
 +      }
 +
 +      /*
 +       * Update the multicast hash table
 +       */
 +      for (i = 0; i < ARRAY_SIZE(multi_hash); i++)
 +              write_rreg(dev->base_addr, i + LADRL, multi_hash[i]);
 +
 +      /*
 +       * Write the mode register
 +       */
 +      write_rreg(dev->base_addr, MODE, mode);
 +
 +      if (!stopped) {
 +              /*
 +               * Put the chip back into running mode
 +               */
 +              write_rreg(dev->base_addr, CTRL1, 0);
 +      }
 +
 +      spin_unlock_irqrestore(&priv->chip_lock, flags);
 +}
 +
 +static void am79c961_timeout(struct net_device *dev)
 +{
 +      printk(KERN_WARNING "%s: transmit timed out, network cable problem?\n",
 +              dev->name);
 +
 +      /*
 +       * ought to do some setup of the tx side here
 +       */
 +
 +      netif_wake_queue(dev);
 +}
 +
 +/*
 + * Transmit a packet
 + */
 +static int
 +am79c961_sendpacket(struct sk_buff *skb, struct net_device *dev)
 +{
 +      struct dev_priv *priv = netdev_priv(dev);
 +      unsigned int hdraddr, bufaddr;
 +      unsigned int head;
 +      unsigned long flags;
 +
 +      head = priv->txhead;
 +      hdraddr = priv->txhdr + (head << 3);
 +      bufaddr = priv->txbuffer[head];
 +      head += 1;
 +      if (head >= TX_BUFFERS)
 +              head = 0;
 +
 +      am_writebuffer (dev, bufaddr, skb->data, skb->len);
 +      am_writeword (dev, hdraddr + 4, -skb->len);
 +      am_writeword (dev, hdraddr + 2, TMD_OWN|TMD_STP|TMD_ENP);
 +      priv->txhead = head;
 +
 +      spin_lock_irqsave(&priv->chip_lock, flags);
 +      write_rreg (dev->base_addr, CSR0, CSR0_TDMD|CSR0_IENA);
 +      spin_unlock_irqrestore(&priv->chip_lock, flags);
 +
 +      /*
 +       * If the next packet is owned by the ethernet device,
 +       * then the tx ring is full and we can't add another
 +       * packet.
 +       */
 +      if (am_readword(dev, priv->txhdr + (priv->txhead << 3) + 2) & TMD_OWN)
 +              netif_stop_queue(dev);
 +
 +      dev_kfree_skb(skb);
 +
 +      return NETDEV_TX_OK;
 +}
 +
 +/*
 + * If we have a good packet(s), get it/them out of the buffers.
 + */
 +static void
 +am79c961_rx(struct net_device *dev, struct dev_priv *priv)
 +{
 +      do {
 +              struct sk_buff *skb;
 +              u_int hdraddr;
 +              u_int pktaddr;
 +              u_int status;
 +              int len;
 +
 +              hdraddr = priv->rxhdr + (priv->rxtail << 3);
 +              pktaddr = priv->rxbuffer[priv->rxtail];
 +
 +              status = am_readword (dev, hdraddr + 2);
 +              if (status & RMD_OWN) /* do we own it? */
 +                      break;
 +
 +              priv->rxtail ++;
 +              if (priv->rxtail >= RX_BUFFERS)
 +                      priv->rxtail = 0;
 +
 +              if ((status & (RMD_ERR|RMD_STP|RMD_ENP)) != (RMD_STP|RMD_ENP)) {
 +                      am_writeword (dev, hdraddr + 2, RMD_OWN);
 +                      dev->stats.rx_errors++;
 +                      if (status & RMD_ERR) {
 +                              if (status & RMD_FRAM)
 +                                      dev->stats.rx_frame_errors++;
 +                              if (status & RMD_CRC)
 +                                      dev->stats.rx_crc_errors++;
 +                      } else if (status & RMD_STP)
 +                              dev->stats.rx_length_errors++;
 +                      continue;
 +              }
 +
 +              len = am_readword(dev, hdraddr + 6);
 +              skb = dev_alloc_skb(len + 2);
 +
 +              if (skb) {
 +                      skb_reserve(skb, 2);
 +
 +                      am_readbuffer(dev, pktaddr, skb_put(skb, len), len);
 +                      am_writeword(dev, hdraddr + 2, RMD_OWN);
 +                      skb->protocol = eth_type_trans(skb, dev);
 +                      netif_rx(skb);
 +                      dev->stats.rx_bytes += len;
 +                      dev->stats.rx_packets++;
 +              } else {
 +                      am_writeword (dev, hdraddr + 2, RMD_OWN);
 +                      printk (KERN_WARNING "%s: memory squeeze, dropping packet.\n", dev->name);
 +                      dev->stats.rx_dropped++;
 +                      break;
 +              }
 +      } while (1);
 +}
 +
 +/*
 + * Update stats for the transmitted packet
 + */
 +static void
 +am79c961_tx(struct net_device *dev, struct dev_priv *priv)
 +{
 +      do {
 +              short len;
 +              u_int hdraddr;
 +              u_int status;
 +
 +              hdraddr = priv->txhdr + (priv->txtail << 3);
 +              status = am_readword (dev, hdraddr + 2);
 +              if (status & TMD_OWN)
 +                      break;
 +
 +              priv->txtail ++;
 +              if (priv->txtail >= TX_BUFFERS)
 +                      priv->txtail = 0;
 +
 +              if (status & TMD_ERR) {
 +                      u_int status2;
 +
 +                      dev->stats.tx_errors++;
 +
 +                      status2 = am_readword (dev, hdraddr + 6);
 +
 +                      /*
 +                       * Clear the error byte
 +                       */
 +                      am_writeword (dev, hdraddr + 6, 0);
 +
 +                      if (status2 & TST_RTRY)
 +                              dev->stats.collisions += 16;
 +                      if (status2 & TST_LCOL)
 +                              dev->stats.tx_window_errors++;
 +                      if (status2 & TST_LCAR)
 +                              dev->stats.tx_carrier_errors++;
 +                      if (status2 & TST_UFLO)
 +                              dev->stats.tx_fifo_errors++;
 +                      continue;
 +              }
 +              dev->stats.tx_packets++;
 +              len = am_readword (dev, hdraddr + 4);
 +              dev->stats.tx_bytes += -len;
 +      } while (priv->txtail != priv->txhead);
 +
 +      netif_wake_queue(dev);
 +}
 +
 +static irqreturn_t
 +am79c961_interrupt(int irq, void *dev_id)
 +{
 +      struct net_device *dev = (struct net_device *)dev_id;
 +      struct dev_priv *priv = netdev_priv(dev);
 +      u_int status, n = 100;
 +      int handled = 0;
 +
 +      do {
 +              status = read_rreg(dev->base_addr, CSR0);
 +              write_rreg(dev->base_addr, CSR0, status &
 +                         (CSR0_IENA|CSR0_TINT|CSR0_RINT|
 +                          CSR0_MERR|CSR0_MISS|CSR0_CERR|CSR0_BABL));
 +
 +              if (status & CSR0_RINT) {
 +                      handled = 1;
 +                      am79c961_rx(dev, priv);
 +              }
 +              if (status & CSR0_TINT) {
 +                      handled = 1;
 +                      am79c961_tx(dev, priv);
 +              }
 +              if (status & CSR0_MISS) {
 +                      handled = 1;
 +                      dev->stats.rx_dropped++;
 +              }
 +              if (status & CSR0_CERR) {
 +                      handled = 1;
 +                      mod_timer(&priv->timer, jiffies);
 +              }
 +      } while (--n && status & (CSR0_RINT | CSR0_TINT));
 +
 +      return IRQ_RETVAL(handled);
 +}
 +
 +#ifdef CONFIG_NET_POLL_CONTROLLER
 +static void am79c961_poll_controller(struct net_device *dev)
 +{
 +      unsigned long flags;
 +      local_irq_save(flags);
 +      am79c961_interrupt(dev->irq, dev);
 +      local_irq_restore(flags);
 +}
 +#endif
 +
 +/*
 + * Initialise the chip.  Note that we always expect
 + * to be entered with interrupts enabled.
 + */
 +static int
 +am79c961_hw_init(struct net_device *dev)
 +{
 +      struct dev_priv *priv = netdev_priv(dev);
 +
 +      spin_lock_irq(&priv->chip_lock);
 +      write_rreg (dev->base_addr, CSR0, CSR0_STOP);
 +      write_rreg (dev->base_addr, CSR3, CSR3_MASKALL);
 +      spin_unlock_irq(&priv->chip_lock);
 +
 +      am79c961_ramtest(dev, 0x66);
 +      am79c961_ramtest(dev, 0x99);
 +
 +      return 0;
 +}
 +
 +static void __init am79c961_banner(void)
 +{
 +      static unsigned version_printed;
 +
 +      if (net_debug && version_printed++ == 0)
 +              printk(KERN_INFO "%s", version);
 +}
 +static const struct net_device_ops am79c961_netdev_ops = {
 +      .ndo_open               = am79c961_open,
 +      .ndo_stop               = am79c961_close,
 +      .ndo_start_xmit         = am79c961_sendpacket,
 +      .ndo_set_rx_mode        = am79c961_setmulticastlist,
 +      .ndo_tx_timeout         = am79c961_timeout,
 +      .ndo_validate_addr      = eth_validate_addr,
 +      .ndo_change_mtu         = eth_change_mtu,
 +      .ndo_set_mac_address    = eth_mac_addr,
 +#ifdef CONFIG_NET_POLL_CONTROLLER
 +      .ndo_poll_controller    = am79c961_poll_controller,
 +#endif
 +};
 +
 +static int __devinit am79c961_probe(struct platform_device *pdev)
 +{
 +      struct resource *res;
 +      struct net_device *dev;
 +      struct dev_priv *priv;
 +      int i, ret;
 +
 +      res = platform_get_resource(pdev, IORESOURCE_IO, 0);
 +      if (!res)
 +              return -ENODEV;
 +
 +      dev = alloc_etherdev(sizeof(struct dev_priv));
 +      ret = -ENOMEM;
 +      if (!dev)
 +              goto out;
 +
 +      SET_NETDEV_DEV(dev, &pdev->dev);
 +
 +      priv = netdev_priv(dev);
 +
 +      /*
 +       * Fixed address and IRQ lines here.
 +       * The PNP initialisation should have been
 +       * done by the ether bootp loader.
 +       */
 +      dev->base_addr = res->start;
 +      ret = platform_get_irq(pdev, 0);
 +
 +      if (ret < 0) {
 +              ret = -ENODEV;
 +              goto nodev;
 +      }
 +      dev->irq = ret;
 +
 +      ret = -ENODEV;
 +      if (!request_region(dev->base_addr, 0x18, dev->name))
 +              goto nodev;
 +
 +      /*
 +       * Reset the device.
 +       */
 +      inb(dev->base_addr + NET_RESET);
 +      udelay(5);
 +
 +      /*
 +       * Check the manufacturer part of the
 +       * ether address.
 +       */
 +      if (inb(dev->base_addr) != 0x08 ||
 +          inb(dev->base_addr + 2) != 0x00 ||
 +          inb(dev->base_addr + 4) != 0x2b)
 +              goto release;
 +
 +      for (i = 0; i < 6; i++)
 +              dev->dev_addr[i] = inb(dev->base_addr + i * 2) & 0xff;
 +
 +      am79c961_banner();
 +
 +      spin_lock_init(&priv->chip_lock);
 +      init_timer(&priv->timer);
 +      priv->timer.data = (unsigned long)dev;
 +      priv->timer.function = am79c961_timer;
 +
 +      if (am79c961_hw_init(dev))
 +              goto release;
 +
 +      dev->netdev_ops = &am79c961_netdev_ops;
 +
 +      ret = register_netdev(dev);
 +      if (ret == 0) {
 +              printk(KERN_INFO "%s: ether address %pM\n",
 +                     dev->name, dev->dev_addr);
 +              return 0;
 +      }
 +
 +release:
 +      release_region(dev->base_addr, 0x18);
 +nodev:
 +      free_netdev(dev);
 +out:
 +      return ret;
 +}
 +
 +static struct platform_driver am79c961_driver = {
 +      .probe          = am79c961_probe,
 +      .driver         = {
 +              .name   = "am79c961",
 +      },
 +};
 +
 +static int __init am79c961_init(void)
 +{
 +      return platform_driver_register(&am79c961_driver);
 +}
 +
 +__initcall(am79c961_init);
index f127768,0000000..2f92487
mode 100644,000000..100644
--- /dev/null
@@@ -1,2009 -1,0 +1,2069 @@@
- #define MAX_RX_SGE_CNT                (RX_SGE_CNT - 2)
 +/* bnx2x.h: Broadcom Everest network driver.
 + *
 + * Copyright (c) 2007-2011 Broadcom Corporation
 + *
 + * This program is free software; you can redistribute it and/or modify
 + * it under the terms of the GNU General Public License as published by
 + * the Free Software Foundation.
 + *
 + * Maintained by: Eilon Greenstein <eilong@broadcom.com>
 + * Written by: Eliezer Tamir
 + * Based on code from Michael Chan's bnx2 driver
 + */
 +
 +#ifndef BNX2X_H
 +#define BNX2X_H
 +#include <linux/netdevice.h>
 +#include <linux/dma-mapping.h>
 +#include <linux/types.h>
 +
 +/* compilation time flags */
 +
 +/* define this to make the driver freeze on error to allow getting debug info
 + * (you will need to reboot afterwards) */
 +/* #define BNX2X_STOP_ON_ERROR */
 +
 +#define DRV_MODULE_VERSION      "1.70.00-0"
 +#define DRV_MODULE_RELDATE      "2011/06/13"
 +#define BNX2X_BC_VER            0x040200
 +
 +#if defined(CONFIG_DCB)
 +#define BCM_DCBNL
 +#endif
 +#if defined(CONFIG_CNIC) || defined(CONFIG_CNIC_MODULE)
 +#define BCM_CNIC 1
 +#include "../cnic_if.h"
 +#endif
 +
 +#ifdef BCM_CNIC
 +#define BNX2X_MIN_MSIX_VEC_CNT 3
 +#define BNX2X_MSIX_VEC_FP_START 2
 +#else
 +#define BNX2X_MIN_MSIX_VEC_CNT 2
 +#define BNX2X_MSIX_VEC_FP_START 1
 +#endif
 +
 +#include <linux/mdio.h>
 +
 +#include "bnx2x_reg.h"
 +#include "bnx2x_fw_defs.h"
 +#include "bnx2x_hsi.h"
 +#include "bnx2x_link.h"
 +#include "bnx2x_sp.h"
 +#include "bnx2x_dcb.h"
 +#include "bnx2x_stats.h"
 +
 +/* error/debug prints */
 +
 +#define DRV_MODULE_NAME               "bnx2x"
 +
 +/* for messages that are currently off */
 +#define BNX2X_MSG_OFF                 0
 +#define BNX2X_MSG_MCP                 0x010000 /* was: NETIF_MSG_HW */
 +#define BNX2X_MSG_STATS                       0x020000 /* was: NETIF_MSG_TIMER */
 +#define BNX2X_MSG_NVM                 0x040000 /* was: NETIF_MSG_HW */
 +#define BNX2X_MSG_DMAE                        0x080000 /* was: NETIF_MSG_HW */
 +#define BNX2X_MSG_SP                  0x100000 /* was: NETIF_MSG_INTR */
 +#define BNX2X_MSG_FP                  0x200000 /* was: NETIF_MSG_INTR */
 +
 +/* regular debug print */
 +#define DP(__mask, fmt, ...)                                  \
 +do {                                                          \
 +      if (bp->msg_enable & (__mask))                          \
 +              pr_notice("[%s:%d(%s)]" fmt,                    \
 +                        __func__, __LINE__,                   \
 +                        bp->dev ? (bp->dev->name) : "?",      \
 +                        ##__VA_ARGS__);                       \
 +} while (0)
 +
 +#define DP_CONT(__mask, fmt, ...)                             \
 +do {                                                          \
 +      if (bp->msg_enable & (__mask))                          \
 +              pr_cont(fmt, ##__VA_ARGS__);                    \
 +} while (0)
 +
 +/* errors debug print */
 +#define BNX2X_DBG_ERR(fmt, ...)                                       \
 +do {                                                          \
 +      if (netif_msg_probe(bp))                                \
 +              pr_err("[%s:%d(%s)]" fmt,                       \
 +                     __func__, __LINE__,                      \
 +                     bp->dev ? (bp->dev->name) : "?",         \
 +                     ##__VA_ARGS__);                          \
 +} while (0)
 +
 +/* for errors (never masked) */
 +#define BNX2X_ERR(fmt, ...)                                   \
 +do {                                                          \
 +      pr_err("[%s:%d(%s)]" fmt,                               \
 +             __func__, __LINE__,                              \
 +             bp->dev ? (bp->dev->name) : "?",                 \
 +             ##__VA_ARGS__);                                  \
 +} while (0)
 +
 +#define BNX2X_ERROR(fmt, ...)                                 \
 +      pr_err("[%s:%d]" fmt, __func__, __LINE__, ##__VA_ARGS__)
 +
 +
 +/* before we have a dev->name use dev_info() */
 +#define BNX2X_DEV_INFO(fmt, ...)                               \
 +do {                                                           \
 +      if (netif_msg_probe(bp))                                 \
 +              dev_info(&bp->pdev->dev, fmt, ##__VA_ARGS__);    \
 +} while (0)
 +
 +#ifdef BNX2X_STOP_ON_ERROR
 +void bnx2x_int_disable(struct bnx2x *bp);
 +#define bnx2x_panic()                         \
 +do {                                          \
 +      bp->panic = 1;                          \
 +      BNX2X_ERR("driver assert\n");           \
 +      bnx2x_int_disable(bp);                  \
 +      bnx2x_panic_dump(bp);                   \
 +} while (0)
 +#else
 +#define bnx2x_panic()                         \
 +do {                                          \
 +      bp->panic = 1;                          \
 +      BNX2X_ERR("driver assert\n");           \
 +      bnx2x_panic_dump(bp);                   \
 +} while (0)
 +#endif
 +
 +#define bnx2x_mc_addr(ha)      ((ha)->addr)
 +#define bnx2x_uc_addr(ha)      ((ha)->addr)
 +
 +#define U64_LO(x)                     (u32)(((u64)(x)) & 0xffffffff)
 +#define U64_HI(x)                     (u32)(((u64)(x)) >> 32)
 +#define HILO_U64(hi, lo)              ((((u64)(hi)) << 32) + (lo))
 +
 +
 +#define REG_ADDR(bp, offset)          ((bp->regview) + (offset))
 +
 +#define REG_RD(bp, offset)            readl(REG_ADDR(bp, offset))
 +#define REG_RD8(bp, offset)           readb(REG_ADDR(bp, offset))
 +#define REG_RD16(bp, offset)          readw(REG_ADDR(bp, offset))
 +
 +#define REG_WR(bp, offset, val)               writel((u32)val, REG_ADDR(bp, offset))
 +#define REG_WR8(bp, offset, val)      writeb((u8)val, REG_ADDR(bp, offset))
 +#define REG_WR16(bp, offset, val)     writew((u16)val, REG_ADDR(bp, offset))
 +
 +#define REG_RD_IND(bp, offset)                bnx2x_reg_rd_ind(bp, offset)
 +#define REG_WR_IND(bp, offset, val)   bnx2x_reg_wr_ind(bp, offset, val)
 +
 +#define REG_RD_DMAE(bp, offset, valp, len32) \
 +      do { \
 +              bnx2x_read_dmae(bp, offset, len32);\
 +              memcpy(valp, bnx2x_sp(bp, wb_data[0]), (len32) * 4); \
 +      } while (0)
 +
 +#define REG_WR_DMAE(bp, offset, valp, len32) \
 +      do { \
 +              memcpy(bnx2x_sp(bp, wb_data[0]), valp, (len32) * 4); \
 +              bnx2x_write_dmae(bp, bnx2x_sp_mapping(bp, wb_data), \
 +                               offset, len32); \
 +      } while (0)
 +
 +#define REG_WR_DMAE_LEN(bp, offset, valp, len32) \
 +      REG_WR_DMAE(bp, offset, valp, len32)
 +
 +#define VIRT_WR_DMAE_LEN(bp, data, addr, len32, le32_swap) \
 +      do { \
 +              memcpy(GUNZIP_BUF(bp), data, (len32) * 4); \
 +              bnx2x_write_big_buf_wb(bp, addr, len32); \
 +      } while (0)
 +
 +#define SHMEM_ADDR(bp, field)         (bp->common.shmem_base + \
 +                                       offsetof(struct shmem_region, field))
 +#define SHMEM_RD(bp, field)           REG_RD(bp, SHMEM_ADDR(bp, field))
 +#define SHMEM_WR(bp, field, val)      REG_WR(bp, SHMEM_ADDR(bp, field), val)
 +
 +#define SHMEM2_ADDR(bp, field)                (bp->common.shmem2_base + \
 +                                       offsetof(struct shmem2_region, field))
 +#define SHMEM2_RD(bp, field)          REG_RD(bp, SHMEM2_ADDR(bp, field))
 +#define SHMEM2_WR(bp, field, val)     REG_WR(bp, SHMEM2_ADDR(bp, field), val)
 +#define MF_CFG_ADDR(bp, field)                (bp->common.mf_cfg_base + \
 +                                       offsetof(struct mf_cfg, field))
 +#define MF2_CFG_ADDR(bp, field)               (bp->common.mf2_cfg_base + \
 +                                       offsetof(struct mf2_cfg, field))
 +
 +#define MF_CFG_RD(bp, field)          REG_RD(bp, MF_CFG_ADDR(bp, field))
 +#define MF_CFG_WR(bp, field, val)     REG_WR(bp,\
 +                                             MF_CFG_ADDR(bp, field), (val))
 +#define MF2_CFG_RD(bp, field)         REG_RD(bp, MF2_CFG_ADDR(bp, field))
 +
 +#define SHMEM2_HAS(bp, field)         ((bp)->common.shmem2_base &&    \
 +                                       (SHMEM2_RD((bp), size) >       \
 +                                       offsetof(struct shmem2_region, field)))
 +
 +#define EMAC_RD(bp, reg)              REG_RD(bp, emac_base + reg)
 +#define EMAC_WR(bp, reg, val)         REG_WR(bp, emac_base + reg, val)
 +
 +/* SP SB indices */
 +
 +/* General SP events - stats query, cfc delete, etc  */
 +#define HC_SP_INDEX_ETH_DEF_CONS              3
 +
 +/* EQ completions */
 +#define HC_SP_INDEX_EQ_CONS                   7
 +
 +/* FCoE L2 connection completions */
 +#define HC_SP_INDEX_ETH_FCOE_TX_CQ_CONS               6
 +#define HC_SP_INDEX_ETH_FCOE_RX_CQ_CONS               4
 +/* iSCSI L2 */
 +#define HC_SP_INDEX_ETH_ISCSI_CQ_CONS         5
 +#define HC_SP_INDEX_ETH_ISCSI_RX_CQ_CONS      1
 +
 +/* Special clients parameters */
 +
 +/* SB indices */
 +/* FCoE L2 */
 +#define BNX2X_FCOE_L2_RX_INDEX \
 +      (&bp->def_status_blk->sp_sb.\
 +      index_values[HC_SP_INDEX_ETH_FCOE_RX_CQ_CONS])
 +
 +#define BNX2X_FCOE_L2_TX_INDEX \
 +      (&bp->def_status_blk->sp_sb.\
 +      index_values[HC_SP_INDEX_ETH_FCOE_TX_CQ_CONS])
 +
 +/**
 + *  CIDs and CLIDs:
 + *  CLIDs below is a CLID for func 0, then the CLID for other
 + *  functions will be calculated by the formula:
 + *
 + *  FUNC_N_CLID_X = N * NUM_SPECIAL_CLIENTS + FUNC_0_CLID_X
 + *
 + */
 +/* iSCSI L2 */
 +#define BNX2X_ISCSI_ETH_CL_ID_IDX     1
 +#define BNX2X_ISCSI_ETH_CID           49
 +
 +/* FCoE L2 */
 +#define BNX2X_FCOE_ETH_CL_ID_IDX      2
 +#define BNX2X_FCOE_ETH_CID            50
 +
 +/** Additional rings budgeting */
 +#ifdef BCM_CNIC
 +#define CNIC_PRESENT                  1
 +#define FCOE_PRESENT                  1
 +#else
 +#define CNIC_PRESENT                  0
 +#define FCOE_PRESENT                  0
 +#endif /* BCM_CNIC */
 +#define NON_ETH_CONTEXT_USE   (FCOE_PRESENT)
 +
 +#define AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR \
 +      AEU_INPUTS_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR
 +
 +#define SM_RX_ID                      0
 +#define SM_TX_ID                      1
 +
 +/* defines for multiple tx priority indices */
 +#define FIRST_TX_ONLY_COS_INDEX               1
 +#define FIRST_TX_COS_INDEX            0
 +
 +/* defines for decodeing the fastpath index and the cos index out of the
 + * transmission queue index
 + */
 +#define MAX_TXQS_PER_COS      FP_SB_MAX_E1x
 +
 +#define TXQ_TO_FP(txq_index)  ((txq_index) % MAX_TXQS_PER_COS)
 +#define TXQ_TO_COS(txq_index) ((txq_index) / MAX_TXQS_PER_COS)
 +
 +/* rules for calculating the cids of tx-only connections */
 +#define CID_TO_FP(cid)                ((cid) % MAX_TXQS_PER_COS)
 +#define CID_COS_TO_TX_ONLY_CID(cid, cos)      (cid + cos * MAX_TXQS_PER_COS)
 +
 +/* fp index inside class of service range */
 +#define FP_COS_TO_TXQ(fp, cos)    ((fp)->index + cos * MAX_TXQS_PER_COS)
 +
 +/*
 + * 0..15 eth cos0
 + * 16..31 eth cos1 if applicable
 + * 32..47 eth cos2 If applicable
 + * fcoe queue follows eth queues (16, 32, 48 depending on cos)
 + */
 +#define MAX_ETH_TXQ_IDX(bp)   (MAX_TXQS_PER_COS * (bp)->max_cos)
 +#define FCOE_TXQ_IDX(bp)      (MAX_ETH_TXQ_IDX(bp))
 +
 +/* fast path */
 +struct sw_rx_bd {
 +      struct sk_buff  *skb;
 +      DEFINE_DMA_UNMAP_ADDR(mapping);
 +};
 +
 +struct sw_tx_bd {
 +      struct sk_buff  *skb;
 +      u16             first_bd;
 +      u8              flags;
 +/* Set on the first BD descriptor when there is a split BD */
 +#define BNX2X_TSO_SPLIT_BD            (1<<0)
 +};
 +
 +struct sw_rx_page {
 +      struct page     *page;
 +      DEFINE_DMA_UNMAP_ADDR(mapping);
 +};
 +
 +union db_prod {
 +      struct doorbell_set_prod data;
 +      u32             raw;
 +};
 +
++/* dropless fc FW/HW related params */
++#define BRB_SIZE(bp)          (CHIP_IS_E3(bp) ? 1024 : 512)
++#define MAX_AGG_QS(bp)                (CHIP_IS_E1(bp) ? \
++                                      ETH_MAX_AGGREGATION_QUEUES_E1 :\
++                                      ETH_MAX_AGGREGATION_QUEUES_E1H_E2)
++#define FW_DROP_LEVEL(bp)     (3 + MAX_SPQ_PENDING + MAX_AGG_QS(bp))
++#define FW_PREFETCH_CNT               16
++#define DROPLESS_FC_HEADROOM  100
 +
 +/* MC hsi */
 +#define BCM_PAGE_SHIFT                12
 +#define BCM_PAGE_SIZE         (1 << BCM_PAGE_SHIFT)
 +#define BCM_PAGE_MASK         (~(BCM_PAGE_SIZE - 1))
 +#define BCM_PAGE_ALIGN(addr)  (((addr) + BCM_PAGE_SIZE - 1) & BCM_PAGE_MASK)
 +
 +#define PAGES_PER_SGE_SHIFT   0
 +#define PAGES_PER_SGE         (1 << PAGES_PER_SGE_SHIFT)
 +#define SGE_PAGE_SIZE         PAGE_SIZE
 +#define SGE_PAGE_SHIFT                PAGE_SHIFT
 +#define SGE_PAGE_ALIGN(addr)  PAGE_ALIGN((typeof(PAGE_SIZE))(addr))
 +
 +/* SGE ring related macros */
 +#define NUM_RX_SGE_PAGES      2
 +#define RX_SGE_CNT            (BCM_PAGE_SIZE / sizeof(struct eth_rx_sge))
-                                 (MAX_RX_SGE_CNT - 1)) ? (x) + 3 : (x) + 1)
++#define NEXT_PAGE_SGE_DESC_CNT        2
++#define MAX_RX_SGE_CNT                (RX_SGE_CNT - NEXT_PAGE_SGE_DESC_CNT)
 +/* RX_SGE_CNT is promised to be a power of 2 */
 +#define RX_SGE_MASK           (RX_SGE_CNT - 1)
 +#define NUM_RX_SGE            (RX_SGE_CNT * NUM_RX_SGE_PAGES)
 +#define MAX_RX_SGE            (NUM_RX_SGE - 1)
 +#define NEXT_SGE_IDX(x)               ((((x) & RX_SGE_MASK) == \
- #define MAX_TX_DESC_CNT               (TX_DESC_CNT - 1)
++                                (MAX_RX_SGE_CNT - 1)) ? \
++                                      (x) + 1 + NEXT_PAGE_SGE_DESC_CNT : \
++                                      (x) + 1)
 +#define RX_SGE(x)             ((x) & MAX_RX_SGE)
 +
++/*
++ * Number of required  SGEs is the sum of two:
++ * 1. Number of possible opened aggregations (next packet for
++ *    these aggregations will probably consume SGE immidiatelly)
++ * 2. Rest of BRB blocks divided by 2 (block will consume new SGE only
++ *    after placement on BD for new TPA aggregation)
++ *
++ * Takes into account NEXT_PAGE_SGE_DESC_CNT "next" elements on each page
++ */
++#define NUM_SGE_REQ           (MAX_AGG_QS(bp) + \
++                                      (BRB_SIZE(bp) - MAX_AGG_QS(bp)) / 2)
++#define NUM_SGE_PG_REQ                ((NUM_SGE_REQ + MAX_RX_SGE_CNT - 1) / \
++                                              MAX_RX_SGE_CNT)
++#define SGE_TH_LO(bp)         (NUM_SGE_REQ + \
++                               NUM_SGE_PG_REQ * NEXT_PAGE_SGE_DESC_CNT)
++#define SGE_TH_HI(bp)         (SGE_TH_LO(bp) + DROPLESS_FC_HEADROOM)
++
 +/* Manipulate a bit vector defined as an array of u64 */
 +
 +/* Number of bits in one sge_mask array element */
 +#define BIT_VEC64_ELEM_SZ             64
 +#define BIT_VEC64_ELEM_SHIFT          6
 +#define BIT_VEC64_ELEM_MASK           ((u64)BIT_VEC64_ELEM_SZ - 1)
 +
 +
 +#define __BIT_VEC64_SET_BIT(el, bit) \
 +      do { \
 +              el = ((el) | ((u64)0x1 << (bit))); \
 +      } while (0)
 +
 +#define __BIT_VEC64_CLEAR_BIT(el, bit) \
 +      do { \
 +              el = ((el) & (~((u64)0x1 << (bit)))); \
 +      } while (0)
 +
 +
 +#define BIT_VEC64_SET_BIT(vec64, idx) \
 +      __BIT_VEC64_SET_BIT((vec64)[(idx) >> BIT_VEC64_ELEM_SHIFT], \
 +                         (idx) & BIT_VEC64_ELEM_MASK)
 +
 +#define BIT_VEC64_CLEAR_BIT(vec64, idx) \
 +      __BIT_VEC64_CLEAR_BIT((vec64)[(idx) >> BIT_VEC64_ELEM_SHIFT], \
 +                           (idx) & BIT_VEC64_ELEM_MASK)
 +
 +#define BIT_VEC64_TEST_BIT(vec64, idx) \
 +      (((vec64)[(idx) >> BIT_VEC64_ELEM_SHIFT] >> \
 +      ((idx) & BIT_VEC64_ELEM_MASK)) & 0x1)
 +
 +/* Creates a bitmask of all ones in less significant bits.
 +   idx - index of the most significant bit in the created mask */
 +#define BIT_VEC64_ONES_MASK(idx) \
 +              (((u64)0x1 << (((idx) & BIT_VEC64_ELEM_MASK) + 1)) - 1)
 +#define BIT_VEC64_ELEM_ONE_MASK       ((u64)(~0))
 +
 +/*******************************************************/
 +
 +
 +
 +/* Number of u64 elements in SGE mask array */
 +#define RX_SGE_MASK_LEN                       ((NUM_RX_SGE_PAGES * RX_SGE_CNT) / \
 +                                       BIT_VEC64_ELEM_SZ)
 +#define RX_SGE_MASK_LEN_MASK          (RX_SGE_MASK_LEN - 1)
 +#define NEXT_SGE_MASK_ELEM(el)                (((el) + 1) & RX_SGE_MASK_LEN_MASK)
 +
 +union host_hc_status_block {
 +      /* pointer to fp status block e1x */
 +      struct host_hc_status_block_e1x *e1x_sb;
 +      /* pointer to fp status block e2 */
 +      struct host_hc_status_block_e2  *e2_sb;
 +};
 +
 +struct bnx2x_agg_info {
 +      /*
 +       * First aggregation buffer is an skb, the following - are pages.
 +       * We will preallocate the skbs for each aggregation when
 +       * we open the interface and will replace the BD at the consumer
 +       * with this one when we receive the TPA_START CQE in order to
 +       * keep the Rx BD ring consistent.
 +       */
 +      struct sw_rx_bd         first_buf;
 +      u8                      tpa_state;
 +#define BNX2X_TPA_START                       1
 +#define BNX2X_TPA_STOP                        2
 +#define BNX2X_TPA_ERROR                       3
 +      u8                      placement_offset;
 +      u16                     parsing_flags;
 +      u16                     vlan_tag;
 +      u16                     len_on_bd;
 +};
 +
 +#define Q_STATS_OFFSET32(stat_name) \
 +                      (offsetof(struct bnx2x_eth_q_stats, stat_name) / 4)
 +
 +struct bnx2x_fp_txdata {
 +
 +      struct sw_tx_bd         *tx_buf_ring;
 +
 +      union eth_tx_bd_types   *tx_desc_ring;
 +      dma_addr_t              tx_desc_mapping;
 +
 +      u32                     cid;
 +
 +      union db_prod           tx_db;
 +
 +      u16                     tx_pkt_prod;
 +      u16                     tx_pkt_cons;
 +      u16                     tx_bd_prod;
 +      u16                     tx_bd_cons;
 +
 +      unsigned long           tx_pkt;
 +
 +      __le16                  *tx_cons_sb;
 +
 +      int                     txq_index;
 +};
 +
 +struct bnx2x_fastpath {
 +      struct bnx2x            *bp; /* parent */
 +
 +#define BNX2X_NAPI_WEIGHT       128
 +      struct napi_struct      napi;
 +      union host_hc_status_block      status_blk;
 +      /* chip independed shortcuts into sb structure */
 +      __le16                  *sb_index_values;
 +      __le16                  *sb_running_index;
 +      /* chip independed shortcut into rx_prods_offset memory */
 +      u32                     ustorm_rx_prods_offset;
 +
 +      u32                     rx_buf_size;
 +
 +      dma_addr_t              status_blk_mapping;
 +
 +      u8                      max_cos; /* actual number of active tx coses */
 +      struct bnx2x_fp_txdata  txdata[BNX2X_MULTI_TX_COS];
 +
 +      struct sw_rx_bd         *rx_buf_ring;   /* BDs mappings ring */
 +      struct sw_rx_page       *rx_page_ring;  /* SGE pages mappings ring */
 +
 +      struct eth_rx_bd        *rx_desc_ring;
 +      dma_addr_t              rx_desc_mapping;
 +
 +      union eth_rx_cqe        *rx_comp_ring;
 +      dma_addr_t              rx_comp_mapping;
 +
 +      /* SGE ring */
 +      struct eth_rx_sge       *rx_sge_ring;
 +      dma_addr_t              rx_sge_mapping;
 +
 +      u64                     sge_mask[RX_SGE_MASK_LEN];
 +
 +      u32                     cid;
 +
 +      __le16                  fp_hc_idx;
 +
 +      u8                      index;          /* number in fp array */
 +      u8                      cl_id;          /* eth client id */
 +      u8                      cl_qzone_id;
 +      u8                      fw_sb_id;       /* status block number in FW */
 +      u8                      igu_sb_id;      /* status block number in HW */
 +
 +      u16                     rx_bd_prod;
 +      u16                     rx_bd_cons;
 +      u16                     rx_comp_prod;
 +      u16                     rx_comp_cons;
 +      u16                     rx_sge_prod;
 +      /* The last maximal completed SGE */
 +      u16                     last_max_sge;
 +      __le16                  *rx_cons_sb;
 +      unsigned long           rx_pkt,
 +                              rx_calls;
 +
 +      /* TPA related */
 +      struct bnx2x_agg_info   tpa_info[ETH_MAX_AGGREGATION_QUEUES_E1H_E2];
 +      u8                      disable_tpa;
 +#ifdef BNX2X_STOP_ON_ERROR
 +      u64                     tpa_queue_used;
 +#endif
 +
 +      struct tstorm_per_queue_stats old_tclient;
 +      struct ustorm_per_queue_stats old_uclient;
 +      struct xstorm_per_queue_stats old_xclient;
 +      struct bnx2x_eth_q_stats eth_q_stats;
 +
 +      /* The size is calculated using the following:
 +           sizeof name field from netdev structure +
 +           4 ('-Xx-' string) +
 +           4 (for the digits and to make it DWORD aligned) */
 +#define FP_NAME_SIZE          (sizeof(((struct net_device *)0)->name) + 8)
 +      char                    name[FP_NAME_SIZE];
 +
 +      /* MACs object */
 +      struct bnx2x_vlan_mac_obj mac_obj;
 +
 +      /* Queue State object */
 +      struct bnx2x_queue_sp_obj q_obj;
 +
 +};
 +
 +#define bnx2x_fp(bp, nr, var)         (bp->fp[nr].var)
 +
 +/* Use 2500 as a mini-jumbo MTU for FCoE */
 +#define BNX2X_FCOE_MINI_JUMBO_MTU     2500
 +
 +/* FCoE L2 `fastpath' entry is right after the eth entries */
 +#define FCOE_IDX                      BNX2X_NUM_ETH_QUEUES(bp)
 +#define bnx2x_fcoe_fp(bp)             (&bp->fp[FCOE_IDX])
 +#define bnx2x_fcoe(bp, var)           (bnx2x_fcoe_fp(bp)->var)
 +#define bnx2x_fcoe_tx(bp, var)                (bnx2x_fcoe_fp(bp)-> \
 +                                              txdata[FIRST_TX_COS_INDEX].var)
 +
 +
 +#define IS_ETH_FP(fp)                 (fp->index < \
 +                                       BNX2X_NUM_ETH_QUEUES(fp->bp))
 +#ifdef BCM_CNIC
 +#define IS_FCOE_FP(fp)                        (fp->index == FCOE_IDX)
 +#define IS_FCOE_IDX(idx)              ((idx) == FCOE_IDX)
 +#else
 +#define IS_FCOE_FP(fp)                false
 +#define IS_FCOE_IDX(idx)      false
 +#endif
 +
 +
 +/* MC hsi */
 +#define MAX_FETCH_BD          13      /* HW max BDs per packet */
 +#define RX_COPY_THRESH                92
 +
 +#define NUM_TX_RINGS          16
 +#define TX_DESC_CNT           (BCM_PAGE_SIZE / sizeof(union eth_tx_bd_types))
-                                 (MAX_TX_DESC_CNT - 1)) ? (x) + 2 : (x) + 1)
++#define NEXT_PAGE_TX_DESC_CNT 1
++#define MAX_TX_DESC_CNT               (TX_DESC_CNT - NEXT_PAGE_TX_DESC_CNT)
 +#define NUM_TX_BD             (TX_DESC_CNT * NUM_TX_RINGS)
 +#define MAX_TX_BD             (NUM_TX_BD - 1)
 +#define MAX_TX_AVAIL          (MAX_TX_DESC_CNT * NUM_TX_RINGS - 2)
 +#define NEXT_TX_IDX(x)                ((((x) & MAX_TX_DESC_CNT) == \
- #define MAX_RX_DESC_CNT               (RX_DESC_CNT - 2)
++                                (MAX_TX_DESC_CNT - 1)) ? \
++                                      (x) + 1 + NEXT_PAGE_TX_DESC_CNT : \
++                                      (x) + 1)
 +#define TX_BD(x)              ((x) & MAX_TX_BD)
 +#define TX_BD_POFF(x)         ((x) & MAX_TX_DESC_CNT)
 +
 +/* The RX BD ring is special, each bd is 8 bytes but the last one is 16 */
 +#define NUM_RX_RINGS          8
 +#define RX_DESC_CNT           (BCM_PAGE_SIZE / sizeof(struct eth_rx_bd))
- #define MIN_RX_AVAIL          128
++#define NEXT_PAGE_RX_DESC_CNT 2
++#define MAX_RX_DESC_CNT               (RX_DESC_CNT - NEXT_PAGE_RX_DESC_CNT)
 +#define RX_DESC_MASK          (RX_DESC_CNT - 1)
 +#define NUM_RX_BD             (RX_DESC_CNT * NUM_RX_RINGS)
 +#define MAX_RX_BD             (NUM_RX_BD - 1)
 +#define MAX_RX_AVAIL          (MAX_RX_DESC_CNT * NUM_RX_RINGS - 2)
-                                 (MAX_RX_DESC_CNT - 1)) ? (x) + 3 : (x) + 1)
++
++/* dropless fc calculations for BDs
++ *
++ * Number of BDs should as number of buffers in BRB:
++ * Low threshold takes into account NEXT_PAGE_RX_DESC_CNT
++ * "next" elements on each page
++ */
++#define NUM_BD_REQ            BRB_SIZE(bp)
++#define NUM_BD_PG_REQ         ((NUM_BD_REQ + MAX_RX_DESC_CNT - 1) / \
++                                            MAX_RX_DESC_CNT)
++#define BD_TH_LO(bp)          (NUM_BD_REQ + \
++                               NUM_BD_PG_REQ * NEXT_PAGE_RX_DESC_CNT + \
++                               FW_DROP_LEVEL(bp))
++#define BD_TH_HI(bp)          (BD_TH_LO(bp) + DROPLESS_FC_HEADROOM)
++
++#define MIN_RX_AVAIL          ((bp)->dropless_fc ? BD_TH_HI(bp) + 128 : 128)
 +
 +#define MIN_RX_SIZE_TPA_HW    (CHIP_IS_E1(bp) ? \
 +                                      ETH_MIN_RX_CQES_WITH_TPA_E1 : \
 +                                      ETH_MIN_RX_CQES_WITH_TPA_E1H_E2)
 +#define MIN_RX_SIZE_NONTPA_HW   ETH_MIN_RX_CQES_WITHOUT_TPA
 +#define MIN_RX_SIZE_TPA               (max_t(u32, MIN_RX_SIZE_TPA_HW, MIN_RX_AVAIL))
 +#define MIN_RX_SIZE_NONTPA    (max_t(u32, MIN_RX_SIZE_NONTPA_HW,\
 +                                                              MIN_RX_AVAIL))
 +
 +#define NEXT_RX_IDX(x)                ((((x) & RX_DESC_MASK) == \
- #define MAX_RCQ_DESC_CNT      (RCQ_DESC_CNT - 1)
++                                (MAX_RX_DESC_CNT - 1)) ? \
++                                      (x) + 1 + NEXT_PAGE_RX_DESC_CNT : \
++                                      (x) + 1)
 +#define RX_BD(x)              ((x) & MAX_RX_BD)
 +
 +/*
 + * As long as CQE is X times bigger than BD entry we have to allocate X times
 + * more pages for CQ ring in order to keep it balanced with BD ring
 + */
 +#define CQE_BD_REL    (sizeof(union eth_rx_cqe) / sizeof(struct eth_rx_bd))
 +#define NUM_RCQ_RINGS         (NUM_RX_RINGS * CQE_BD_REL)
 +#define RCQ_DESC_CNT          (BCM_PAGE_SIZE / sizeof(union eth_rx_cqe))
-                                 (MAX_RCQ_DESC_CNT - 1)) ? (x) + 2 : (x) + 1)
++#define NEXT_PAGE_RCQ_DESC_CNT        1
++#define MAX_RCQ_DESC_CNT      (RCQ_DESC_CNT - NEXT_PAGE_RCQ_DESC_CNT)
 +#define NUM_RCQ_BD            (RCQ_DESC_CNT * NUM_RCQ_RINGS)
 +#define MAX_RCQ_BD            (NUM_RCQ_BD - 1)
 +#define MAX_RCQ_AVAIL         (MAX_RCQ_DESC_CNT * NUM_RCQ_RINGS - 2)
 +#define NEXT_RCQ_IDX(x)               ((((x) & MAX_RCQ_DESC_CNT) == \
- #define HC_INDEX_TOE_RX_CQ_CONS               0 /* Formerly Ustorm TOE CQ index */
-                                         /* (HC_INDEX_U_TOE_RX_CQ_CONS)  */
- #define HC_INDEX_ETH_RX_CQ_CONS               1 /* Formerly Ustorm ETH CQ index */
-                                         /* (HC_INDEX_U_ETH_RX_CQ_CONS)  */
- #define HC_INDEX_ETH_RX_BD_CONS               2 /* Formerly Ustorm ETH BD index */
-                                         /* (HC_INDEX_U_ETH_RX_BD_CONS)  */
- #define HC_INDEX_TOE_TX_CQ_CONS               4 /* Formerly Cstorm TOE CQ index   */
-                                         /* (HC_INDEX_C_TOE_TX_CQ_CONS)    */
- #define HC_INDEX_ETH_TX_CQ_CONS_COS0  5 /* Formerly Cstorm ETH CQ index   */
-                                         /* (HC_INDEX_C_ETH_TX_CQ_CONS)    */
- #define HC_INDEX_ETH_TX_CQ_CONS_COS1  6 /* Formerly Cstorm ETH CQ index   */
-                                         /* (HC_INDEX_C_ETH_TX_CQ_CONS)    */
- #define HC_INDEX_ETH_TX_CQ_CONS_COS2  7 /* Formerly Cstorm ETH CQ index   */
-                                         /* (HC_INDEX_C_ETH_TX_CQ_CONS)    */
++                                (MAX_RCQ_DESC_CNT - 1)) ? \
++                                      (x) + 1 + NEXT_PAGE_RCQ_DESC_CNT : \
++                                      (x) + 1)
 +#define RCQ_BD(x)             ((x) & MAX_RCQ_BD)
 +
++/* dropless fc calculations for RCQs
++ *
++ * Number of RCQs should be as number of buffers in BRB:
++ * Low threshold takes into account NEXT_PAGE_RCQ_DESC_CNT
++ * "next" elements on each page
++ */
++#define NUM_RCQ_REQ           BRB_SIZE(bp)
++#define NUM_RCQ_PG_REQ                ((NUM_BD_REQ + MAX_RCQ_DESC_CNT - 1) / \
++                                            MAX_RCQ_DESC_CNT)
++#define RCQ_TH_LO(bp)         (NUM_RCQ_REQ + \
++                               NUM_RCQ_PG_REQ * NEXT_PAGE_RCQ_DESC_CNT + \
++                               FW_DROP_LEVEL(bp))
++#define RCQ_TH_HI(bp)         (RCQ_TH_LO(bp) + DROPLESS_FC_HEADROOM)
++
 +
 +/* This is needed for determining of last_max */
 +#define SUB_S16(a, b)         (s16)((s16)(a) - (s16)(b))
 +#define SUB_S32(a, b)         (s32)((s32)(a) - (s32)(b))
 +
 +
 +#define BNX2X_SWCID_SHIFT     17
 +#define BNX2X_SWCID_MASK      ((0x1 << BNX2X_SWCID_SHIFT) - 1)
 +
 +/* used on a CID received from the HW */
 +#define SW_CID(x)                     (le32_to_cpu(x) & BNX2X_SWCID_MASK)
 +#define CQE_CMD(x)                    (le32_to_cpu(x) >> \
 +                                      COMMON_RAMROD_ETH_RX_CQE_CMD_ID_SHIFT)
 +
 +#define BD_UNMAP_ADDR(bd)             HILO_U64(le32_to_cpu((bd)->addr_hi), \
 +                                               le32_to_cpu((bd)->addr_lo))
 +#define BD_UNMAP_LEN(bd)              (le16_to_cpu((bd)->nbytes))
 +
 +#define BNX2X_DB_MIN_SHIFT            3       /* 8 bytes */
 +#define BNX2X_DB_SHIFT                        7       /* 128 bytes*/
 +#if (BNX2X_DB_SHIFT < BNX2X_DB_MIN_SHIFT)
 +#error "Min DB doorbell stride is 8"
 +#endif
 +#define DPM_TRIGER_TYPE                       0x40
 +#define DOORBELL(bp, cid, val) \
 +      do { \
 +              writel((u32)(val), bp->doorbells + (bp->db_size * (cid)) + \
 +                     DPM_TRIGER_TYPE); \
 +      } while (0)
 +
 +
 +/* TX CSUM helpers */
 +#define SKB_CS_OFF(skb)               (offsetof(struct tcphdr, check) - \
 +                               skb->csum_offset)
 +#define SKB_CS(skb)           (*(u16 *)(skb_transport_header(skb) + \
 +                                        skb->csum_offset))
 +
 +#define pbd_tcp_flags(skb)    (ntohl(tcp_flag_word(tcp_hdr(skb)))>>16 & 0xff)
 +
 +#define XMIT_PLAIN                    0
 +#define XMIT_CSUM_V4                  0x1
 +#define XMIT_CSUM_V6                  0x2
 +#define XMIT_CSUM_TCP                 0x4
 +#define XMIT_GSO_V4                   0x8
 +#define XMIT_GSO_V6                   0x10
 +
 +#define XMIT_CSUM                     (XMIT_CSUM_V4 | XMIT_CSUM_V6)
 +#define XMIT_GSO                      (XMIT_GSO_V4 | XMIT_GSO_V6)
 +
 +
 +/* stuff added to make the code fit 80Col */
 +#define CQE_TYPE(cqe_fp_flags)         ((cqe_fp_flags) & ETH_FAST_PATH_RX_CQE_TYPE)
 +#define CQE_TYPE_START(cqe_type) ((cqe_type) == RX_ETH_CQE_TYPE_ETH_START_AGG)
 +#define CQE_TYPE_STOP(cqe_type)  ((cqe_type) == RX_ETH_CQE_TYPE_ETH_STOP_AGG)
 +#define CQE_TYPE_SLOW(cqe_type)  ((cqe_type) == RX_ETH_CQE_TYPE_ETH_RAMROD)
 +#define CQE_TYPE_FAST(cqe_type)  ((cqe_type) == RX_ETH_CQE_TYPE_ETH_FASTPATH)
 +
 +#define ETH_RX_ERROR_FALGS            ETH_FAST_PATH_RX_CQE_PHY_DECODE_ERR_FLG
 +
 +#define BNX2X_IP_CSUM_ERR(cqe) \
 +                      (!((cqe)->fast_path_cqe.status_flags & \
 +                         ETH_FAST_PATH_RX_CQE_IP_XSUM_NO_VALIDATION_FLG) && \
 +                       ((cqe)->fast_path_cqe.type_error_flags & \
 +                        ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG))
 +
 +#define BNX2X_L4_CSUM_ERR(cqe) \
 +                      (!((cqe)->fast_path_cqe.status_flags & \
 +                         ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG) && \
 +                       ((cqe)->fast_path_cqe.type_error_flags & \
 +                        ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG))
 +
 +#define BNX2X_RX_CSUM_OK(cqe) \
 +                      (!(BNX2X_L4_CSUM_ERR(cqe) || BNX2X_IP_CSUM_ERR(cqe)))
 +
 +#define BNX2X_PRS_FLAG_OVERETH_IPV4(flags) \
 +                              (((le16_to_cpu(flags) & \
 +                                 PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) >> \
 +                                PARSING_FLAGS_OVER_ETHERNET_PROTOCOL_SHIFT) \
 +                               == PRS_FLAG_OVERETH_IPV4)
 +#define BNX2X_RX_SUM_FIX(cqe) \
 +      BNX2X_PRS_FLAG_OVERETH_IPV4(cqe->fast_path_cqe.pars_flags.flags)
 +
 +
 +#define FP_USB_FUNC_OFF       \
 +                      offsetof(struct cstorm_status_block_u, func)
 +#define FP_CSB_FUNC_OFF       \
 +                      offsetof(struct cstorm_status_block_c, func)
 +
- #define HC_INDEX_ETH_FIRST_TX_CQ_CONS HC_INDEX_ETH_TX_CQ_CONS_COS0
++#define HC_INDEX_ETH_RX_CQ_CONS               1
 +
- #define BP_E1HVN(bp)                  (bp->pfid >> 1)
- #define BP_VN(bp)                     (BP_E1HVN(bp)) /*remove when approved*/
- #define BP_L_ID(bp)                   (BP_E1HVN(bp) << 2)
- #define BP_FW_MB_IDX(bp)              (BP_PORT(bp) +\
-         BP_VN(bp) * ((CHIP_IS_E1x(bp) || (CHIP_MODE_IS_4_PORT(bp))) ? 2  : 1))
++#define HC_INDEX_OOO_TX_CQ_CONS               4
 +
++#define HC_INDEX_ETH_TX_CQ_CONS_COS0  5
++
++#define HC_INDEX_ETH_TX_CQ_CONS_COS1  6
++
++#define HC_INDEX_ETH_TX_CQ_CONS_COS2  7
++
++#define HC_INDEX_ETH_FIRST_TX_CQ_CONS HC_INDEX_ETH_TX_CQ_CONS_COS0
 +
 +#define BNX2X_RX_SB_INDEX \
 +      (&fp->sb_index_values[HC_INDEX_ETH_RX_CQ_CONS])
 +
 +#define BNX2X_TX_SB_INDEX_BASE BNX2X_TX_SB_INDEX_COS0
 +
 +#define BNX2X_TX_SB_INDEX_COS0 \
 +      (&fp->sb_index_values[HC_INDEX_ETH_TX_CQ_CONS_COS0])
 +
 +/* end of fast path */
 +
 +/* common */
 +
 +struct bnx2x_common {
 +
 +      u32                     chip_id;
 +/* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
 +#define CHIP_ID(bp)                   (bp->common.chip_id & 0xfffffff0)
 +
 +#define CHIP_NUM(bp)                  (bp->common.chip_id >> 16)
 +#define CHIP_NUM_57710                        0x164e
 +#define CHIP_NUM_57711                        0x164f
 +#define CHIP_NUM_57711E                       0x1650
 +#define CHIP_NUM_57712                        0x1662
 +#define CHIP_NUM_57712_MF             0x1663
 +#define CHIP_NUM_57713                        0x1651
 +#define CHIP_NUM_57713E                       0x1652
 +#define CHIP_NUM_57800                        0x168a
 +#define CHIP_NUM_57800_MF             0x16a5
 +#define CHIP_NUM_57810                        0x168e
 +#define CHIP_NUM_57810_MF             0x16ae
 +#define CHIP_NUM_57840                        0x168d
 +#define CHIP_NUM_57840_MF             0x16ab
 +#define CHIP_IS_E1(bp)                        (CHIP_NUM(bp) == CHIP_NUM_57710)
 +#define CHIP_IS_57711(bp)             (CHIP_NUM(bp) == CHIP_NUM_57711)
 +#define CHIP_IS_57711E(bp)            (CHIP_NUM(bp) == CHIP_NUM_57711E)
 +#define CHIP_IS_57712(bp)             (CHIP_NUM(bp) == CHIP_NUM_57712)
 +#define CHIP_IS_57712_MF(bp)          (CHIP_NUM(bp) == CHIP_NUM_57712_MF)
 +#define CHIP_IS_57800(bp)             (CHIP_NUM(bp) == CHIP_NUM_57800)
 +#define CHIP_IS_57800_MF(bp)          (CHIP_NUM(bp) == CHIP_NUM_57800_MF)
 +#define CHIP_IS_57810(bp)             (CHIP_NUM(bp) == CHIP_NUM_57810)
 +#define CHIP_IS_57810_MF(bp)          (CHIP_NUM(bp) == CHIP_NUM_57810_MF)
 +#define CHIP_IS_57840(bp)             (CHIP_NUM(bp) == CHIP_NUM_57840)
 +#define CHIP_IS_57840_MF(bp)          (CHIP_NUM(bp) == CHIP_NUM_57840_MF)
 +#define CHIP_IS_E1H(bp)                       (CHIP_IS_57711(bp) || \
 +                                       CHIP_IS_57711E(bp))
 +#define CHIP_IS_E2(bp)                        (CHIP_IS_57712(bp) || \
 +                                       CHIP_IS_57712_MF(bp))
 +#define CHIP_IS_E3(bp)                        (CHIP_IS_57800(bp) || \
 +                                       CHIP_IS_57800_MF(bp) || \
 +                                       CHIP_IS_57810(bp) || \
 +                                       CHIP_IS_57810_MF(bp) || \
 +                                       CHIP_IS_57840(bp) || \
 +                                       CHIP_IS_57840_MF(bp))
 +#define CHIP_IS_E1x(bp)                       (CHIP_IS_E1((bp)) || CHIP_IS_E1H((bp)))
 +#define USES_WARPCORE(bp)             (CHIP_IS_E3(bp))
 +#define IS_E1H_OFFSET                 (!CHIP_IS_E1(bp))
 +
 +#define CHIP_REV_SHIFT                        12
 +#define CHIP_REV_MASK                 (0xF << CHIP_REV_SHIFT)
 +#define CHIP_REV_VAL(bp)              (bp->common.chip_id & CHIP_REV_MASK)
 +#define CHIP_REV_Ax                   (0x0 << CHIP_REV_SHIFT)
 +#define CHIP_REV_Bx                   (0x1 << CHIP_REV_SHIFT)
 +/* assume maximum 5 revisions */
 +#define CHIP_REV_IS_SLOW(bp)          (CHIP_REV_VAL(bp) > 0x00005000)
 +/* Emul versions are A=>0xe, B=>0xc, C=>0xa, D=>8, E=>6 */
 +#define CHIP_REV_IS_EMUL(bp)          ((CHIP_REV_IS_SLOW(bp)) && \
 +                                       !(CHIP_REV_VAL(bp) & 0x00001000))
 +/* FPGA versions are A=>0xf, B=>0xd, C=>0xb, D=>9, E=>7 */
 +#define CHIP_REV_IS_FPGA(bp)          ((CHIP_REV_IS_SLOW(bp)) && \
 +                                       (CHIP_REV_VAL(bp) & 0x00001000))
 +
 +#define CHIP_TIME(bp)                 ((CHIP_REV_IS_EMUL(bp)) ? 2000 : \
 +                                      ((CHIP_REV_IS_FPGA(bp)) ? 200 : 1))
 +
 +#define CHIP_METAL(bp)                        (bp->common.chip_id & 0x00000ff0)
 +#define CHIP_BOND_ID(bp)              (bp->common.chip_id & 0x0000000f)
 +#define CHIP_REV_SIM(bp)              (((CHIP_REV_MASK - CHIP_REV_VAL(bp)) >>\
 +                                         (CHIP_REV_SHIFT + 1)) \
 +                                              << CHIP_REV_SHIFT)
 +#define CHIP_REV(bp)                  (CHIP_REV_IS_SLOW(bp) ? \
 +                                              CHIP_REV_SIM(bp) :\
 +                                              CHIP_REV_VAL(bp))
 +#define CHIP_IS_E3B0(bp)              (CHIP_IS_E3(bp) && \
 +                                       (CHIP_REV(bp) == CHIP_REV_Bx))
 +#define CHIP_IS_E3A0(bp)              (CHIP_IS_E3(bp) && \
 +                                       (CHIP_REV(bp) == CHIP_REV_Ax))
 +
 +      int                     flash_size;
 +#define BNX2X_NVRAM_1MB_SIZE                  0x20000 /* 1M bit in bytes */
 +#define BNX2X_NVRAM_TIMEOUT_COUNT             30000
 +#define BNX2X_NVRAM_PAGE_SIZE                 256
 +
 +      u32                     shmem_base;
 +      u32                     shmem2_base;
 +      u32                     mf_cfg_base;
 +      u32                     mf2_cfg_base;
 +
 +      u32                     hw_config;
 +
 +      u32                     bc_ver;
 +
 +      u8                      int_block;
 +#define INT_BLOCK_HC                  0
 +#define INT_BLOCK_IGU                 1
 +#define INT_BLOCK_MODE_NORMAL         0
 +#define INT_BLOCK_MODE_BW_COMP                2
 +#define CHIP_INT_MODE_IS_NBC(bp)              \
 +                      (!CHIP_IS_E1x(bp) &&    \
 +                      !((bp)->common.int_block & INT_BLOCK_MODE_BW_COMP))
 +#define CHIP_INT_MODE_IS_BC(bp) (!CHIP_INT_MODE_IS_NBC(bp))
 +
 +      u8                      chip_port_mode;
 +#define CHIP_4_PORT_MODE                      0x0
 +#define CHIP_2_PORT_MODE                      0x1
 +#define CHIP_PORT_MODE_NONE                   0x2
 +#define CHIP_MODE(bp)                 (bp->common.chip_port_mode)
 +#define CHIP_MODE_IS_4_PORT(bp) (CHIP_MODE(bp) == CHIP_4_PORT_MODE)
 +};
 +
 +/* IGU MSIX STATISTICS on 57712: 64 for VFs; 4 for PFs; 4 for Attentions */
 +#define BNX2X_IGU_STAS_MSG_VF_CNT 64
 +#define BNX2X_IGU_STAS_MSG_PF_CNT 4
 +
 +/* end of common */
 +
 +/* port */
 +
 +struct bnx2x_port {
 +      u32                     pmf;
 +
 +      u32                     link_config[LINK_CONFIG_SIZE];
 +
 +      u32                     supported[LINK_CONFIG_SIZE];
 +/* link settings - missing defines */
 +#define SUPPORTED_2500baseX_Full      (1 << 15)
 +
 +      u32                     advertising[LINK_CONFIG_SIZE];
 +/* link settings - missing defines */
 +#define ADVERTISED_2500baseX_Full     (1 << 15)
 +
 +      u32                     phy_addr;
 +
 +      /* used to synchronize phy accesses */
 +      struct mutex            phy_mutex;
 +      int                     need_hw_lock;
 +
 +      u32                     port_stx;
 +
 +      struct nig_stats        old_nig_stats;
 +};
 +
 +/* end of port */
 +
 +#define STATS_OFFSET32(stat_name) \
 +                      (offsetof(struct bnx2x_eth_stats, stat_name) / 4)
 +
 +/* slow path */
 +
 +/* slow path work-queue */
 +extern struct workqueue_struct *bnx2x_wq;
 +
 +#define BNX2X_MAX_NUM_OF_VFS  64
 +#define BNX2X_VF_ID_INVALID   0xFF
 +
 +/*
 + * The total number of L2 queues, MSIX vectors and HW contexts (CIDs) is
 + * control by the number of fast-path status blocks supported by the
 + * device (HW/FW). Each fast-path status block (FP-SB) aka non-default
 + * status block represents an independent interrupts context that can
 + * serve a regular L2 networking queue. However special L2 queues such
 + * as the FCoE queue do not require a FP-SB and other components like
 + * the CNIC may consume FP-SB reducing the number of possible L2 queues
 + *
 + * If the maximum number of FP-SB available is X then:
 + * a. If CNIC is supported it consumes 1 FP-SB thus the max number of
 + *    regular L2 queues is Y=X-1
 + * b. in MF mode the actual number of L2 queues is Y= (X-1/MF_factor)
 + * c. If the FCoE L2 queue is supported the actual number of L2 queues
 + *    is Y+1
 + * d. The number of irqs (MSIX vectors) is either Y+1 (one extra for
 + *    slow-path interrupts) or Y+2 if CNIC is supported (one additional
 + *    FP interrupt context for the CNIC).
 + * e. The number of HW context (CID count) is always X or X+1 if FCoE
 + *    L2 queue is supported. the cid for the FCoE L2 queue is always X.
 + */
 +
 +/* fast-path interrupt contexts E1x */
 +#define FP_SB_MAX_E1x         16
 +/* fast-path interrupt contexts E2 */
 +#define FP_SB_MAX_E2          HC_SB_MAX_SB_E2
 +
 +union cdu_context {
 +      struct eth_context eth;
 +      char pad[1024];
 +};
 +
 +/* CDU host DB constants */
 +#define CDU_ILT_PAGE_SZ_HW    3
 +#define CDU_ILT_PAGE_SZ               (8192 << CDU_ILT_PAGE_SZ_HW) /* 64K */
 +#define ILT_PAGE_CIDS         (CDU_ILT_PAGE_SZ / sizeof(union cdu_context))
 +
 +#ifdef BCM_CNIC
 +#define CNIC_ISCSI_CID_MAX    256
 +#define CNIC_FCOE_CID_MAX     2048
 +#define CNIC_CID_MAX          (CNIC_ISCSI_CID_MAX + CNIC_FCOE_CID_MAX)
 +#define CNIC_ILT_LINES                DIV_ROUND_UP(CNIC_CID_MAX, ILT_PAGE_CIDS)
 +#endif
 +
 +#define QM_ILT_PAGE_SZ_HW     0
 +#define QM_ILT_PAGE_SZ                (4096 << QM_ILT_PAGE_SZ_HW) /* 4K */
 +#define QM_CID_ROUND          1024
 +
 +#ifdef BCM_CNIC
 +/* TM (timers) host DB constants */
 +#define TM_ILT_PAGE_SZ_HW     0
 +#define TM_ILT_PAGE_SZ                (4096 << TM_ILT_PAGE_SZ_HW) /* 4K */
 +/* #define TM_CONN_NUM                (CNIC_STARTING_CID+CNIC_ISCSI_CXT_MAX) */
 +#define TM_CONN_NUM           1024
 +#define TM_ILT_SZ             (8 * TM_CONN_NUM)
 +#define TM_ILT_LINES          DIV_ROUND_UP(TM_ILT_SZ, TM_ILT_PAGE_SZ)
 +
 +/* SRC (Searcher) host DB constants */
 +#define SRC_ILT_PAGE_SZ_HW    0
 +#define SRC_ILT_PAGE_SZ               (4096 << SRC_ILT_PAGE_SZ_HW) /* 4K */
 +#define SRC_HASH_BITS         10
 +#define SRC_CONN_NUM          (1 << SRC_HASH_BITS) /* 1024 */
 +#define SRC_ILT_SZ            (sizeof(struct src_ent) * SRC_CONN_NUM)
 +#define SRC_T2_SZ             SRC_ILT_SZ
 +#define SRC_ILT_LINES         DIV_ROUND_UP(SRC_ILT_SZ, SRC_ILT_PAGE_SZ)
 +
 +#endif
 +
 +#define MAX_DMAE_C            8
 +
 +/* DMA memory not used in fastpath */
 +struct bnx2x_slowpath {
 +      union {
 +              struct mac_configuration_cmd            e1x;
 +              struct eth_classify_rules_ramrod_data   e2;
 +      } mac_rdata;
 +
 +
 +      union {
 +              struct tstorm_eth_mac_filter_config     e1x;
 +              struct eth_filter_rules_ramrod_data     e2;
 +      } rx_mode_rdata;
 +
 +      union {
 +              struct mac_configuration_cmd            e1;
 +              struct eth_multicast_rules_ramrod_data  e2;
 +      } mcast_rdata;
 +
 +      struct eth_rss_update_ramrod_data       rss_rdata;
 +
 +      /* Queue State related ramrods are always sent under rtnl_lock */
 +      union {
 +              struct client_init_ramrod_data  init_data;
 +              struct client_update_ramrod_data update_data;
 +      } q_rdata;
 +
 +      union {
 +              struct function_start_data      func_start;
 +              /* pfc configuration for DCBX ramrod */
 +              struct flow_control_configuration pfc_config;
 +      } func_rdata;
 +
 +      /* used by dmae command executer */
 +      struct dmae_command             dmae[MAX_DMAE_C];
 +
 +      u32                             stats_comp;
 +      union mac_stats                 mac_stats;
 +      struct nig_stats                nig_stats;
 +      struct host_port_stats          port_stats;
 +      struct host_func_stats          func_stats;
 +      struct host_func_stats          func_stats_base;
 +
 +      u32                             wb_comp;
 +      u32                             wb_data[4];
 +};
 +
 +#define bnx2x_sp(bp, var)             (&bp->slowpath->var)
 +#define bnx2x_sp_mapping(bp, var) \
 +              (bp->slowpath_mapping + offsetof(struct bnx2x_slowpath, var))
 +
 +
 +/* attn group wiring */
 +#define MAX_DYNAMIC_ATTN_GRPS         8
 +
 +struct attn_route {
 +      u32 sig[5];
 +};
 +
 +struct iro {
 +      u32 base;
 +      u16 m1;
 +      u16 m2;
 +      u16 m3;
 +      u16 size;
 +};
 +
 +struct hw_context {
 +      union cdu_context *vcxt;
 +      dma_addr_t cxt_mapping;
 +      size_t size;
 +};
 +
 +/* forward */
 +struct bnx2x_ilt;
 +
 +
 +enum bnx2x_recovery_state {
 +      BNX2X_RECOVERY_DONE,
 +      BNX2X_RECOVERY_INIT,
 +      BNX2X_RECOVERY_WAIT,
 +      BNX2X_RECOVERY_FAILED
 +};
 +
 +/*
 + * Event queue (EQ or event ring) MC hsi
 + * NUM_EQ_PAGES and EQ_DESC_CNT_PAGE must be power of 2
 + */
 +#define NUM_EQ_PAGES          1
 +#define EQ_DESC_CNT_PAGE      (BCM_PAGE_SIZE / sizeof(union event_ring_elem))
 +#define EQ_DESC_MAX_PAGE      (EQ_DESC_CNT_PAGE - 1)
 +#define NUM_EQ_DESC           (EQ_DESC_CNT_PAGE * NUM_EQ_PAGES)
 +#define EQ_DESC_MASK          (NUM_EQ_DESC - 1)
 +#define MAX_EQ_AVAIL          (EQ_DESC_MAX_PAGE * NUM_EQ_PAGES - 2)
 +
 +/* depends on EQ_DESC_CNT_PAGE being a power of 2 */
 +#define NEXT_EQ_IDX(x)                ((((x) & EQ_DESC_MAX_PAGE) == \
 +                                (EQ_DESC_MAX_PAGE - 1)) ? (x) + 2 : (x) + 1)
 +
 +/* depends on the above and on NUM_EQ_PAGES being a power of 2 */
 +#define EQ_DESC(x)            ((x) & EQ_DESC_MASK)
 +
 +#define BNX2X_EQ_INDEX \
 +      (&bp->def_status_blk->sp_sb.\
 +      index_values[HC_SP_INDEX_EQ_CONS])
 +
 +/* This is a data that will be used to create a link report message.
 + * We will keep the data used for the last link report in order
 + * to prevent reporting the same link parameters twice.
 + */
 +struct bnx2x_link_report_data {
 +      u16 line_speed;                 /* Effective line speed */
 +      unsigned long link_report_flags;/* BNX2X_LINK_REPORT_XXX flags */
 +};
 +
 +enum {
 +      BNX2X_LINK_REPORT_FD,           /* Full DUPLEX */
 +      BNX2X_LINK_REPORT_LINK_DOWN,
 +      BNX2X_LINK_REPORT_RX_FC_ON,
 +      BNX2X_LINK_REPORT_TX_FC_ON,
 +};
 +
 +enum {
 +      BNX2X_PORT_QUERY_IDX,
 +      BNX2X_PF_QUERY_IDX,
 +      BNX2X_FIRST_QUEUE_QUERY_IDX,
 +};
 +
 +struct bnx2x_fw_stats_req {
 +      struct stats_query_header hdr;
 +      struct stats_query_entry query[STATS_QUERY_CMD_COUNT];
 +};
 +
 +struct bnx2x_fw_stats_data {
 +      struct stats_counter    storm_counters;
 +      struct per_port_stats   port;
 +      struct per_pf_stats     pf;
 +      struct per_queue_stats  queue_stats[1];
 +};
 +
 +/* Public slow path states */
 +enum {
 +      BNX2X_SP_RTNL_SETUP_TC,
 +      BNX2X_SP_RTNL_TX_TIMEOUT,
 +};
 +
 +
 +struct bnx2x {
 +      /* Fields used in the tx and intr/napi performance paths
 +       * are grouped together in the beginning of the structure
 +       */
 +      struct bnx2x_fastpath   *fp;
 +      void __iomem            *regview;
 +      void __iomem            *doorbells;
 +      u16                     db_size;
 +
 +      u8                      pf_num; /* absolute PF number */
 +      u8                      pfid;   /* per-path PF number */
 +      int                     base_fw_ndsb; /**/
 +#define BP_PATH(bp)                   (CHIP_IS_E1x(bp) ? 0 : (bp->pf_num & 1))
 +#define BP_PORT(bp)                   (bp->pfid & 1)
 +#define BP_FUNC(bp)                   (bp->pfid)
 +#define BP_ABS_FUNC(bp)                       (bp->pf_num)
-                                        BP_E1HVN(bp))
++#define BP_VN(bp)                     ((bp)->pfid >> 1)
++#define BP_MAX_VN_NUM(bp)             (CHIP_MODE_IS_4_PORT(bp) ? 2 : 4)
++#define BP_L_ID(bp)                   (BP_VN(bp) << 2)
++#define BP_FW_MB_IDX_VN(bp, vn)               (BP_PORT(bp) +\
++        (vn) * ((CHIP_IS_E1x(bp) || (CHIP_MODE_IS_4_PORT(bp))) ? 2  : 1))
++#define BP_FW_MB_IDX(bp)              BP_FW_MB_IDX_VN(bp, BP_VN(bp))
 +
 +      struct net_device       *dev;
 +      struct pci_dev          *pdev;
 +
 +      const struct iro        *iro_arr;
 +#define IRO (bp->iro_arr)
 +
 +      enum bnx2x_recovery_state recovery_state;
 +      int                     is_leader;
 +      struct msix_entry       *msix_table;
 +
 +      int                     tx_ring_size;
 +
 +/* L2 header size + 2*VLANs (8 bytes) + LLC SNAP (8 bytes) */
 +#define ETH_OVREHEAD          (ETH_HLEN + 8 + 8)
 +#define ETH_MIN_PACKET_SIZE           60
 +#define ETH_MAX_PACKET_SIZE           1500
 +#define ETH_MAX_JUMBO_PACKET_SIZE     9600
 +
 +      /* Max supported alignment is 256 (8 shift) */
 +#define BNX2X_RX_ALIGN_SHIFT          ((L1_CACHE_SHIFT < 8) ? \
 +                                       L1_CACHE_SHIFT : 8)
 +      /* FW use 2 Cache lines Alignment for start packet and size  */
 +#define BNX2X_FW_RX_ALIGN             (2 << BNX2X_RX_ALIGN_SHIFT)
 +#define BNX2X_PXP_DRAM_ALIGN          (BNX2X_RX_ALIGN_SHIFT - 5)
 +
 +      struct host_sp_status_block *def_status_blk;
 +#define DEF_SB_IGU_ID                 16
 +#define DEF_SB_ID                     HC_SP_SB_ID
 +      __le16                  def_idx;
 +      __le16                  def_att_idx;
 +      u32                     attn_state;
 +      struct attn_route       attn_group[MAX_DYNAMIC_ATTN_GRPS];
 +
 +      /* slow path ring */
 +      struct eth_spe          *spq;
 +      dma_addr_t              spq_mapping;
 +      u16                     spq_prod_idx;
 +      struct eth_spe          *spq_prod_bd;
 +      struct eth_spe          *spq_last_bd;
 +      __le16                  *dsb_sp_prod;
 +      atomic_t                cq_spq_left; /* ETH_XXX ramrods credit */
 +      /* used to synchronize spq accesses */
 +      spinlock_t              spq_lock;
 +
 +      /* event queue */
 +      union event_ring_elem   *eq_ring;
 +      dma_addr_t              eq_mapping;
 +      u16                     eq_prod;
 +      u16                     eq_cons;
 +      __le16                  *eq_cons_sb;
 +      atomic_t                eq_spq_left; /* COMMON_XXX ramrods credit */
 +
 +
 +
 +      /* Counter for marking that there is a STAT_QUERY ramrod pending */
 +      u16                     stats_pending;
 +      /*  Counter for completed statistics ramrods */
 +      u16                     stats_comp;
 +
 +      /* End of fields used in the performance code paths */
 +
 +      int                     panic;
 +      int                     msg_enable;
 +
 +      u32                     flags;
 +#define PCIX_FLAG                     (1 << 0)
 +#define PCI_32BIT_FLAG                        (1 << 1)
 +#define ONE_PORT_FLAG                 (1 << 2)
 +#define NO_WOL_FLAG                   (1 << 3)
 +#define USING_DAC_FLAG                        (1 << 4)
 +#define USING_MSIX_FLAG                       (1 << 5)
 +#define USING_MSI_FLAG                        (1 << 6)
 +#define DISABLE_MSI_FLAG              (1 << 7)
 +#define TPA_ENABLE_FLAG                       (1 << 8)
 +#define NO_MCP_FLAG                   (1 << 9)
 +
 +#define BP_NOMCP(bp)                  (bp->flags & NO_MCP_FLAG)
 +#define MF_FUNC_DIS                   (1 << 11)
 +#define OWN_CNIC_IRQ                  (1 << 12)
 +#define NO_ISCSI_OOO_FLAG             (1 << 13)
 +#define NO_ISCSI_FLAG                 (1 << 14)
 +#define NO_FCOE_FLAG                  (1 << 15)
 +
 +#define NO_ISCSI(bp)          ((bp)->flags & NO_ISCSI_FLAG)
 +#define NO_ISCSI_OOO(bp)      ((bp)->flags & NO_ISCSI_OOO_FLAG)
 +#define NO_FCOE(bp)           ((bp)->flags & NO_FCOE_FLAG)
 +
 +      int                     pm_cap;
 +      int                     mrrs;
 +
 +      struct delayed_work     sp_task;
 +      struct delayed_work     sp_rtnl_task;
 +
 +      struct delayed_work     period_task;
 +      struct timer_list       timer;
 +      int                     current_interval;
 +
 +      u16                     fw_seq;
 +      u16                     fw_drv_pulse_wr_seq;
 +      u32                     func_stx;
 +
 +      struct link_params      link_params;
 +      struct link_vars        link_vars;
 +      u32                     link_cnt;
 +      struct bnx2x_link_report_data last_reported_link;
 +
 +      struct mdio_if_info     mdio;
 +
 +      struct bnx2x_common     common;
 +      struct bnx2x_port       port;
 +
 +      struct cmng_struct_per_port cmng;
 +      u32                     vn_weight_sum;
 +      u32                     mf_config[E1HVN_MAX];
 +      u32                     mf2_config[E2_FUNC_MAX];
 +      u32                     path_has_ovlan; /* E3 */
 +      u16                     mf_ov;
 +      u8                      mf_mode;
 +#define IS_MF(bp)             (bp->mf_mode != 0)
 +#define IS_MF_SI(bp)          (bp->mf_mode == MULTI_FUNCTION_SI)
 +#define IS_MF_SD(bp)          (bp->mf_mode == MULTI_FUNCTION_SD)
 +
 +      u8                      wol;
 +
 +      int                     rx_ring_size;
 +
 +      u16                     tx_quick_cons_trip_int;
 +      u16                     tx_quick_cons_trip;
 +      u16                     tx_ticks_int;
 +      u16                     tx_ticks;
 +
 +      u16                     rx_quick_cons_trip_int;
 +      u16                     rx_quick_cons_trip;
 +      u16                     rx_ticks_int;
 +      u16                     rx_ticks;
 +/* Maximal coalescing timeout in us */
 +#define BNX2X_MAX_COALESCE_TOUT               (0xf0*12)
 +
 +      u32                     lin_cnt;
 +
 +      u16                     state;
 +#define BNX2X_STATE_CLOSED            0
 +#define BNX2X_STATE_OPENING_WAIT4_LOAD        0x1000
 +#define BNX2X_STATE_OPENING_WAIT4_PORT        0x2000
 +#define BNX2X_STATE_OPEN              0x3000
 +#define BNX2X_STATE_CLOSING_WAIT4_HALT        0x4000
 +#define BNX2X_STATE_CLOSING_WAIT4_DELETE 0x5000
 +
 +#define BNX2X_STATE_DIAG              0xe000
 +#define BNX2X_STATE_ERROR             0xf000
 +
 +      int                     multi_mode;
 +#define BNX2X_MAX_PRIORITY            8
 +#define BNX2X_MAX_ENTRIES_PER_PRI     16
 +#define BNX2X_MAX_COS                 3
 +#define BNX2X_MAX_TX_COS              2
 +      int                     num_queues;
 +      int                     disable_tpa;
 +
 +      u32                     rx_mode;
 +#define BNX2X_RX_MODE_NONE            0
 +#define BNX2X_RX_MODE_NORMAL          1
 +#define BNX2X_RX_MODE_ALLMULTI                2
 +#define BNX2X_RX_MODE_PROMISC         3
 +#define BNX2X_MAX_MULTICAST           64
 +
 +      u8                      igu_dsb_id;
 +      u8                      igu_base_sb;
 +      u8                      igu_sb_cnt;
 +      dma_addr_t              def_status_blk_mapping;
 +
 +      struct bnx2x_slowpath   *slowpath;
 +      dma_addr_t              slowpath_mapping;
 +
 +      /* Total number of FW statistics requests */
 +      u8                      fw_stats_num;
 +
 +      /*
 +       * This is a memory buffer that will contain both statistics
 +       * ramrod request and data.
 +       */
 +      void                    *fw_stats;
 +      dma_addr_t              fw_stats_mapping;
 +
 +      /*
 +       * FW statistics request shortcut (points at the
 +       * beginning of fw_stats buffer).
 +       */
 +      struct bnx2x_fw_stats_req       *fw_stats_req;
 +      dma_addr_t                      fw_stats_req_mapping;
 +      int                             fw_stats_req_sz;
 +
 +      /*
 +       * FW statistics data shortcut (points at the begining of
 +       * fw_stats buffer + fw_stats_req_sz).
 +       */
 +      struct bnx2x_fw_stats_data      *fw_stats_data;
 +      dma_addr_t                      fw_stats_data_mapping;
 +      int                             fw_stats_data_sz;
 +
 +      struct hw_context       context;
 +
 +      struct bnx2x_ilt        *ilt;
 +#define BP_ILT(bp)            ((bp)->ilt)
 +#define ILT_MAX_LINES         256
 +/*
 + * Maximum supported number of RSS queues: number of IGU SBs minus one that goes
 + * to CNIC.
 + */
 +#define BNX2X_MAX_RSS_COUNT(bp)       ((bp)->igu_sb_cnt - CNIC_PRESENT)
 +
 +/*
 + * Maximum CID count that might be required by the bnx2x:
 + * Max Tss * Max_Tx_Multi_Cos + CNIC L2 Clients (FCoE and iSCSI related)
 + */
 +#define BNX2X_L2_CID_COUNT(bp)        (MAX_TXQS_PER_COS * BNX2X_MULTI_TX_COS +\
 +                                      NON_ETH_CONTEXT_USE + CNIC_PRESENT)
 +#define L2_ILT_LINES(bp)      (DIV_ROUND_UP(BNX2X_L2_CID_COUNT(bp),\
 +                                      ILT_PAGE_CIDS))
 +#define BNX2X_DB_SIZE(bp)     (BNX2X_L2_CID_COUNT(bp) * (1 << BNX2X_DB_SHIFT))
 +
 +      int                     qm_cid_count;
 +
 +      int                     dropless_fc;
 +
 +#ifdef BCM_CNIC
 +      u32                     cnic_flags;
 +#define BNX2X_CNIC_FLAG_MAC_SET               1
 +      void                    *t2;
 +      dma_addr_t              t2_mapping;
 +      struct cnic_ops __rcu   *cnic_ops;
 +      void                    *cnic_data;
 +      u32                     cnic_tag;
 +      struct cnic_eth_dev     cnic_eth_dev;
 +      union host_hc_status_block cnic_sb;
 +      dma_addr_t              cnic_sb_mapping;
 +      struct eth_spe          *cnic_kwq;
 +      struct eth_spe          *cnic_kwq_prod;
 +      struct eth_spe          *cnic_kwq_cons;
 +      struct eth_spe          *cnic_kwq_last;
 +      u16                     cnic_kwq_pending;
 +      u16                     cnic_spq_pending;
 +      u8                      fip_mac[ETH_ALEN];
 +      struct mutex            cnic_mutex;
 +      struct bnx2x_vlan_mac_obj iscsi_l2_mac_obj;
 +
 +      /* Start index of the "special" (CNIC related) L2 cleints */
 +      u8                              cnic_base_cl_id;
 +#endif
 +
 +      int                     dmae_ready;
 +      /* used to synchronize dmae accesses */
 +      spinlock_t              dmae_lock;
 +
 +      /* used to protect the FW mail box */
 +      struct mutex            fw_mb_mutex;
 +
 +      /* used to synchronize stats collecting */
 +      int                     stats_state;
 +
 +      /* used for synchronization of concurrent threads statistics handling */
 +      spinlock_t              stats_lock;
 +
 +      /* used by dmae command loader */
 +      struct dmae_command     stats_dmae;
 +      int                     executer_idx;
 +
 +      u16                     stats_counter;
 +      struct bnx2x_eth_stats  eth_stats;
 +
 +      struct z_stream_s       *strm;
 +      void                    *gunzip_buf;
 +      dma_addr_t              gunzip_mapping;
 +      int                     gunzip_outlen;
 +#define FW_BUF_SIZE                   0x8000
 +#define GUNZIP_BUF(bp)                        (bp->gunzip_buf)
 +#define GUNZIP_PHYS(bp)                       (bp->gunzip_mapping)
 +#define GUNZIP_OUTLEN(bp)             (bp->gunzip_outlen)
 +
 +      struct raw_op           *init_ops;
 +      /* Init blocks offsets inside init_ops */
 +      u16                     *init_ops_offsets;
 +      /* Data blob - has 32 bit granularity */
 +      u32                     *init_data;
 +      u32                     init_mode_flags;
 +#define INIT_MODE_FLAGS(bp)   (bp->init_mode_flags)
 +      /* Zipped PRAM blobs - raw data */
 +      const u8                *tsem_int_table_data;
 +      const u8                *tsem_pram_data;
 +      const u8                *usem_int_table_data;
 +      const u8                *usem_pram_data;
 +      const u8                *xsem_int_table_data;
 +      const u8                *xsem_pram_data;
 +      const u8                *csem_int_table_data;
 +      const u8                *csem_pram_data;
 +#define INIT_OPS(bp)                  (bp->init_ops)
 +#define INIT_OPS_OFFSETS(bp)          (bp->init_ops_offsets)
 +#define INIT_DATA(bp)                 (bp->init_data)
 +#define INIT_TSEM_INT_TABLE_DATA(bp)  (bp->tsem_int_table_data)
 +#define INIT_TSEM_PRAM_DATA(bp)               (bp->tsem_pram_data)
 +#define INIT_USEM_INT_TABLE_DATA(bp)  (bp->usem_int_table_data)
 +#define INIT_USEM_PRAM_DATA(bp)               (bp->usem_pram_data)
 +#define INIT_XSEM_INT_TABLE_DATA(bp)  (bp->xsem_int_table_data)
 +#define INIT_XSEM_PRAM_DATA(bp)               (bp->xsem_pram_data)
 +#define INIT_CSEM_INT_TABLE_DATA(bp)  (bp->csem_int_table_data)
 +#define INIT_CSEM_PRAM_DATA(bp)               (bp->csem_pram_data)
 +
 +#define PHY_FW_VER_LEN                        20
 +      char                    fw_ver[32];
 +      const struct firmware   *firmware;
 +
 +      /* DCB support on/off */
 +      u16 dcb_state;
 +#define BNX2X_DCB_STATE_OFF                   0
 +#define BNX2X_DCB_STATE_ON                    1
 +
 +      /* DCBX engine mode */
 +      int dcbx_enabled;
 +#define BNX2X_DCBX_ENABLED_OFF                        0
 +#define BNX2X_DCBX_ENABLED_ON_NEG_OFF         1
 +#define BNX2X_DCBX_ENABLED_ON_NEG_ON          2
 +#define BNX2X_DCBX_ENABLED_INVALID            (-1)
 +
 +      bool dcbx_mode_uset;
 +
 +      struct bnx2x_config_dcbx_params         dcbx_config_params;
 +      struct bnx2x_dcbx_port_params           dcbx_port_params;
 +      int                                     dcb_version;
 +
 +      /* CAM credit pools */
 +      struct bnx2x_credit_pool_obj            macs_pool;
 +
 +      /* RX_MODE object */
 +      struct bnx2x_rx_mode_obj                rx_mode_obj;
 +
 +      /* MCAST object */
 +      struct bnx2x_mcast_obj                  mcast_obj;
 +
 +      /* RSS configuration object */
 +      struct bnx2x_rss_config_obj             rss_conf_obj;
 +
 +      /* Function State controlling object */
 +      struct bnx2x_func_sp_obj                func_obj;
 +
 +      unsigned long                           sp_state;
 +
 +      /* operation indication for the sp_rtnl task */
 +      unsigned long                           sp_rtnl_state;
 +
 +      /* DCBX Negotation results */
 +      struct dcbx_features                    dcbx_local_feat;
 +      u32                                     dcbx_error;
 +
 +#ifdef BCM_DCBNL
 +      struct dcbx_features                    dcbx_remote_feat;
 +      u32                                     dcbx_remote_flags;
 +#endif
 +      u32                                     pending_max;
 +
 +      /* multiple tx classes of service */
 +      u8                                      max_cos;
 +
 +      /* priority to cos mapping */
 +      u8                                      prio_to_cos[8];
 +};
 +
 +/* Tx queues may be less or equal to Rx queues */
 +extern int num_queues;
 +#define BNX2X_NUM_QUEUES(bp)  (bp->num_queues)
 +#define BNX2X_NUM_ETH_QUEUES(bp) (BNX2X_NUM_QUEUES(bp) - NON_ETH_CONTEXT_USE)
 +#define BNX2X_NUM_RX_QUEUES(bp)       BNX2X_NUM_QUEUES(bp)
 +
 +#define is_multi(bp)          (BNX2X_NUM_QUEUES(bp) > 1)
 +
 +#define BNX2X_MAX_QUEUES(bp)  BNX2X_MAX_RSS_COUNT(bp)
 +/* #define is_eth_multi(bp)   (BNX2X_NUM_ETH_QUEUES(bp) > 1) */
 +
 +#define RSS_IPV4_CAP_MASK                                             \
 +      TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_CAPABILITY
 +
 +#define RSS_IPV4_TCP_CAP_MASK                                         \
 +      TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_TCP_CAPABILITY
 +
 +#define RSS_IPV6_CAP_MASK                                             \
 +      TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_CAPABILITY
 +
 +#define RSS_IPV6_TCP_CAP_MASK                                         \
 +      TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_TCP_CAPABILITY
 +
 +/* func init flags */
 +#define FUNC_FLG_RSS          0x0001
 +#define FUNC_FLG_STATS                0x0002
 +/* removed  FUNC_FLG_UNMATCHED        0x0004 */
 +#define FUNC_FLG_TPA          0x0008
 +#define FUNC_FLG_SPQ          0x0010
 +#define FUNC_FLG_LEADING      0x0020  /* PF only */
 +
 +
 +struct bnx2x_func_init_params {
 +      /* dma */
 +      dma_addr_t      fw_stat_map;    /* valid iff FUNC_FLG_STATS */
 +      dma_addr_t      spq_map;        /* valid iff FUNC_FLG_SPQ */
 +
 +      u16             func_flgs;
 +      u16             func_id;        /* abs fid */
 +      u16             pf_id;
 +      u16             spq_prod;       /* valid iff FUNC_FLG_SPQ */
 +};
 +
 +#define for_each_eth_queue(bp, var) \
 +      for ((var) = 0; (var) < BNX2X_NUM_ETH_QUEUES(bp); (var)++)
 +
 +#define for_each_nondefault_eth_queue(bp, var) \
 +      for ((var) = 1; (var) < BNX2X_NUM_ETH_QUEUES(bp); (var)++)
 +
 +#define for_each_queue(bp, var) \
 +      for ((var) = 0; (var) < BNX2X_NUM_QUEUES(bp); (var)++) \
 +              if (skip_queue(bp, var))        \
 +                      continue;               \
 +              else
 +
 +/* Skip forwarding FP */
 +#define for_each_rx_queue(bp, var) \
 +      for ((var) = 0; (var) < BNX2X_NUM_QUEUES(bp); (var)++) \
 +              if (skip_rx_queue(bp, var))     \
 +                      continue;               \
 +              else
 +
 +/* Skip OOO FP */
 +#define for_each_tx_queue(bp, var) \
 +      for ((var) = 0; (var) < BNX2X_NUM_QUEUES(bp); (var)++) \
 +              if (skip_tx_queue(bp, var))     \
 +                      continue;               \
 +              else
 +
 +#define for_each_nondefault_queue(bp, var) \
 +      for ((var) = 1; (var) < BNX2X_NUM_QUEUES(bp); (var)++) \
 +              if (skip_queue(bp, var))        \
 +                      continue;               \
 +              else
 +
 +#define for_each_cos_in_tx_queue(fp, var) \
 +      for ((var) = 0; (var) < (fp)->max_cos; (var)++)
 +
 +/* skip rx queue
 + * if FCOE l2 support is disabled and this is the fcoe L2 queue
 + */
 +#define skip_rx_queue(bp, idx)        (NO_FCOE(bp) && IS_FCOE_IDX(idx))
 +
 +/* skip tx queue
 + * if FCOE l2 support is disabled and this is the fcoe L2 queue
 + */
 +#define skip_tx_queue(bp, idx)        (NO_FCOE(bp) && IS_FCOE_IDX(idx))
 +
 +#define skip_queue(bp, idx)   (NO_FCOE(bp) && IS_FCOE_IDX(idx))
 +
 +
 +
 +
 +/**
 + * bnx2x_set_mac_one - configure a single MAC address
 + *
 + * @bp:                       driver handle
 + * @mac:              MAC to configure
 + * @obj:              MAC object handle
 + * @set:              if 'true' add a new MAC, otherwise - delete
 + * @mac_type:         the type of the MAC to configure (e.g. ETH, UC list)
 + * @ramrod_flags:     RAMROD_XXX flags (e.g. RAMROD_CONT, RAMROD_COMP_WAIT)
 + *
 + * Configures one MAC according to provided parameters or continues the
 + * execution of previously scheduled commands if RAMROD_CONT is set in
 + * ramrod_flags.
 + *
 + * Returns zero if operation has successfully completed, a positive value if the
 + * operation has been successfully scheduled and a negative - if a requested
 + * operations has failed.
 + */
 +int bnx2x_set_mac_one(struct bnx2x *bp, u8 *mac,
 +                    struct bnx2x_vlan_mac_obj *obj, bool set,
 +                    int mac_type, unsigned long *ramrod_flags);
 +/**
 + * Deletes all MACs configured for the specific MAC object.
 + *
 + * @param bp Function driver instance
 + * @param mac_obj MAC object to cleanup
 + *
 + * @return zero if all MACs were cleaned
 + */
 +
 +/**
 + * bnx2x_del_all_macs - delete all MACs configured for the specific MAC object
 + *
 + * @bp:                       driver handle
 + * @mac_obj:          MAC object handle
 + * @mac_type:         type of the MACs to clear (BNX2X_XXX_MAC)
 + * @wait_for_comp:    if 'true' block until completion
 + *
 + * Deletes all MACs of the specific type (e.g. ETH, UC list).
 + *
 + * Returns zero if operation has successfully completed, a positive value if the
 + * operation has been successfully scheduled and a negative - if a requested
 + * operations has failed.
 + */
 +int bnx2x_del_all_macs(struct bnx2x *bp,
 +                     struct bnx2x_vlan_mac_obj *mac_obj,
 +                     int mac_type, bool wait_for_comp);
 +
 +/* Init Function API  */
 +void bnx2x_func_init(struct bnx2x *bp, struct bnx2x_func_init_params *p);
 +int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port);
 +int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port);
 +int bnx2x_set_mult_gpio(struct bnx2x *bp, u8 pins, u32 mode);
 +int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port);
 +void bnx2x_read_mf_cfg(struct bnx2x *bp);
 +
 +
 +/* dmae */
 +void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32);
 +void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
 +                    u32 len32);
 +void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae, int idx);
 +u32 bnx2x_dmae_opcode_add_comp(u32 opcode, u8 comp_type);
 +u32 bnx2x_dmae_opcode_clr_src_reset(u32 opcode);
 +u32 bnx2x_dmae_opcode(struct bnx2x *bp, u8 src_type, u8 dst_type,
 +                    bool with_comp, u8 comp_type);
 +
 +
 +void bnx2x_calc_fc_adv(struct bnx2x *bp);
 +int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
 +                u32 data_hi, u32 data_lo, int cmd_type);
 +void bnx2x_update_coalesce(struct bnx2x *bp);
 +int bnx2x_get_cur_phy_idx(struct bnx2x *bp);
 +
 +static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
 +                         int wait)
 +{
 +      u32 val;
 +
 +      do {
 +              val = REG_RD(bp, reg);
 +              if (val == expected)
 +                      break;
 +              ms -= wait;
 +              msleep(wait);
 +
 +      } while (ms > 0);
 +
 +      return val;
 +}
 +
 +#define BNX2X_ILT_ZALLOC(x, y, size) \
 +      do { \
 +              x = dma_alloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL); \
 +              if (x) \
 +                      memset(x, 0, size); \
 +      } while (0)
 +
 +#define BNX2X_ILT_FREE(x, y, size) \
 +      do { \
 +              if (x) { \
 +                      dma_free_coherent(&bp->pdev->dev, size, x, y); \
 +                      x = NULL; \
 +                      y = 0; \
 +              } \
 +      } while (0)
 +
 +#define ILOG2(x)      (ilog2((x)))
 +
 +#define ILT_NUM_PAGE_ENTRIES  (3072)
 +/* In 57710/11 we use whole table since we have 8 func
 + * In 57712 we have only 4 func, but use same size per func, then only half of
 + * the table in use
 + */
 +#define ILT_PER_FUNC          (ILT_NUM_PAGE_ENTRIES/8)
 +
 +#define FUNC_ILT_BASE(func)   (func * ILT_PER_FUNC)
 +/*
 + * the phys address is shifted right 12 bits and has an added
 + * 1=valid bit added to the 53rd bit
 + * then since this is a wide register(TM)
 + * we split it into two 32 bit writes
 + */
 +#define ONCHIP_ADDR1(x)               ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
 +#define ONCHIP_ADDR2(x)               ((u32)((1 << 20) | ((u64)x >> 44)))
 +
 +/* load/unload mode */
 +#define LOAD_NORMAL                   0
 +#define LOAD_OPEN                     1
 +#define LOAD_DIAG                     2
 +#define UNLOAD_NORMAL                 0
 +#define UNLOAD_CLOSE                  1
 +#define UNLOAD_RECOVERY                       2
 +
 +
 +/* DMAE command defines */
 +#define DMAE_TIMEOUT                  -1
 +#define DMAE_PCI_ERROR                        -2      /* E2 and onward */
 +#define DMAE_NOT_RDY                  -3
 +#define DMAE_PCI_ERR_FLAG             0x80000000
 +
 +#define DMAE_SRC_PCI                  0
 +#define DMAE_SRC_GRC                  1
 +
 +#define DMAE_DST_NONE                 0
 +#define DMAE_DST_PCI                  1
 +#define DMAE_DST_GRC                  2
 +
 +#define DMAE_COMP_PCI                 0
 +#define DMAE_COMP_GRC                 1
 +
 +/* E2 and onward - PCI error handling in the completion */
 +
 +#define DMAE_COMP_REGULAR             0
 +#define DMAE_COM_SET_ERR              1
 +
 +#define DMAE_CMD_SRC_PCI              (DMAE_SRC_PCI << \
 +                                              DMAE_COMMAND_SRC_SHIFT)
 +#define DMAE_CMD_SRC_GRC              (DMAE_SRC_GRC << \
 +                                              DMAE_COMMAND_SRC_SHIFT)
 +
 +#define DMAE_CMD_DST_PCI              (DMAE_DST_PCI << \
 +                                              DMAE_COMMAND_DST_SHIFT)
 +#define DMAE_CMD_DST_GRC              (DMAE_DST_GRC << \
 +                                              DMAE_COMMAND_DST_SHIFT)
 +
 +#define DMAE_CMD_C_DST_PCI            (DMAE_COMP_PCI << \
 +                                              DMAE_COMMAND_C_DST_SHIFT)
 +#define DMAE_CMD_C_DST_GRC            (DMAE_COMP_GRC << \
 +                                              DMAE_COMMAND_C_DST_SHIFT)
 +
 +#define DMAE_CMD_C_ENABLE             DMAE_COMMAND_C_TYPE_ENABLE
 +
 +#define DMAE_CMD_ENDIANITY_NO_SWAP    (0 << DMAE_COMMAND_ENDIANITY_SHIFT)
 +#define DMAE_CMD_ENDIANITY_B_SWAP     (1 << DMAE_COMMAND_ENDIANITY_SHIFT)
 +#define DMAE_CMD_ENDIANITY_DW_SWAP    (2 << DMAE_COMMAND_ENDIANITY_SHIFT)
 +#define DMAE_CMD_ENDIANITY_B_DW_SWAP  (3 << DMAE_COMMAND_ENDIANITY_SHIFT)
 +
 +#define DMAE_CMD_PORT_0                       0
 +#define DMAE_CMD_PORT_1                       DMAE_COMMAND_PORT
 +
 +#define DMAE_CMD_SRC_RESET            DMAE_COMMAND_SRC_RESET
 +#define DMAE_CMD_DST_RESET            DMAE_COMMAND_DST_RESET
 +#define DMAE_CMD_E1HVN_SHIFT          DMAE_COMMAND_E1HVN_SHIFT
 +
 +#define DMAE_SRC_PF                   0
 +#define DMAE_SRC_VF                   1
 +
 +#define DMAE_DST_PF                   0
 +#define DMAE_DST_VF                   1
 +
 +#define DMAE_C_SRC                    0
 +#define DMAE_C_DST                    1
 +
 +#define DMAE_LEN32_RD_MAX             0x80
 +#define DMAE_LEN32_WR_MAX(bp)         (CHIP_IS_E1(bp) ? 0x400 : 0x2000)
 +
 +#define DMAE_COMP_VAL                 0x60d0d0ae /* E2 and on - upper bit
 +                                                      indicates eror */
 +
 +#define MAX_DMAE_C_PER_PORT           8
 +#define INIT_DMAE_C(bp)                       (BP_PORT(bp) * MAX_DMAE_C_PER_PORT + \
-                                        (BP_E1HVN(bp) << BNX2X_SWCID_SHIFT) | \
++                                       BP_VN(bp))
 +#define PMF_DMAE_C(bp)                        (BP_PORT(bp) * MAX_DMAE_C_PER_PORT + \
 +                                       E1HVN_MAX)
 +
 +/* PCIE link and speed */
 +#define PCICFG_LINK_WIDTH             0x1f00000
 +#define PCICFG_LINK_WIDTH_SHIFT               20
 +#define PCICFG_LINK_SPEED             0xf0000
 +#define PCICFG_LINK_SPEED_SHIFT               16
 +
 +
 +#define BNX2X_NUM_TESTS                       7
 +
 +#define BNX2X_PHY_LOOPBACK            0
 +#define BNX2X_MAC_LOOPBACK            1
 +#define BNX2X_PHY_LOOPBACK_FAILED     1
 +#define BNX2X_MAC_LOOPBACK_FAILED     2
 +#define BNX2X_LOOPBACK_FAILED         (BNX2X_MAC_LOOPBACK_FAILED | \
 +                                       BNX2X_PHY_LOOPBACK_FAILED)
 +
 +
 +#define STROM_ASSERT_ARRAY_SIZE               50
 +
 +
 +/* must be used on a CID before placing it on a HW ring */
 +#define HW_CID(bp, x)                 ((BP_PORT(bp) << 23) | \
++                                       (BP_VN(bp) << BNX2X_SWCID_SHIFT) | \
 +                                       (x))
 +
 +#define SP_DESC_CNT           (BCM_PAGE_SIZE / sizeof(struct eth_spe))
 +#define MAX_SP_DESC_CNT                       (SP_DESC_CNT - 1)
 +
 +
 +#define BNX2X_BTR                     4
 +#define MAX_SPQ_PENDING                       8
 +
 +/* CMNG constants, as derived from system spec calculations */
 +/* default MIN rate in case VNIC min rate is configured to zero - 100Mbps */
 +#define DEF_MIN_RATE                                  100
 +/* resolution of the rate shaping timer - 400 usec */
 +#define RS_PERIODIC_TIMEOUT_USEC                      400
 +/* number of bytes in single QM arbitration cycle -
 + * coefficient for calculating the fairness timer */
 +#define QM_ARB_BYTES                                  160000
 +/* resolution of Min algorithm 1:100 */
 +#define MIN_RES                                               100
 +/* how many bytes above threshold for the minimal credit of Min algorithm*/
 +#define MIN_ABOVE_THRESH                              32768
 +/* Fairness algorithm integration time coefficient -
 + * for calculating the actual Tfair */
 +#define T_FAIR_COEF   ((MIN_ABOVE_THRESH +  QM_ARB_BYTES) * 8 * MIN_RES)
 +/* Memory of fairness algorithm . 2 cycles */
 +#define FAIR_MEM                                      2
 +
 +
 +#define ATTN_NIG_FOR_FUNC             (1L << 8)
 +#define ATTN_SW_TIMER_4_FUNC          (1L << 9)
 +#define GPIO_2_FUNC                   (1L << 10)
 +#define GPIO_3_FUNC                   (1L << 11)
 +#define GPIO_4_FUNC                   (1L << 12)
 +#define ATTN_GENERAL_ATTN_1           (1L << 13)
 +#define ATTN_GENERAL_ATTN_2           (1L << 14)
 +#define ATTN_GENERAL_ATTN_3           (1L << 15)
 +#define ATTN_GENERAL_ATTN_4           (1L << 13)
 +#define ATTN_GENERAL_ATTN_5           (1L << 14)
 +#define ATTN_GENERAL_ATTN_6           (1L << 15)
 +
 +#define ATTN_HARD_WIRED_MASK          0xff00
 +#define ATTENTION_ID                  4
 +
 +
 +/* stuff added to make the code fit 80Col */
 +
 +#define BNX2X_PMF_LINK_ASSERT \
 +      GENERAL_ATTEN_OFFSET(LINK_SYNC_ATTENTION_BIT_FUNC_0 + BP_FUNC(bp))
 +
 +#define BNX2X_MC_ASSERT_BITS \
 +      (GENERAL_ATTEN_OFFSET(TSTORM_FATAL_ASSERT_ATTENTION_BIT) | \
 +       GENERAL_ATTEN_OFFSET(USTORM_FATAL_ASSERT_ATTENTION_BIT) | \
 +       GENERAL_ATTEN_OFFSET(CSTORM_FATAL_ASSERT_ATTENTION_BIT) | \
 +       GENERAL_ATTEN_OFFSET(XSTORM_FATAL_ASSERT_ATTENTION_BIT))
 +
 +#define BNX2X_MCP_ASSERT \
 +      GENERAL_ATTEN_OFFSET(MCP_FATAL_ASSERT_ATTENTION_BIT)
 +
 +#define BNX2X_GRC_TIMEOUT     GENERAL_ATTEN_OFFSET(LATCHED_ATTN_TIMEOUT_GRC)
 +#define BNX2X_GRC_RSV         (GENERAL_ATTEN_OFFSET(LATCHED_ATTN_RBCR) | \
 +                               GENERAL_ATTEN_OFFSET(LATCHED_ATTN_RBCT) | \
 +                               GENERAL_ATTEN_OFFSET(LATCHED_ATTN_RBCN) | \
 +                               GENERAL_ATTEN_OFFSET(LATCHED_ATTN_RBCU) | \
 +                               GENERAL_ATTEN_OFFSET(LATCHED_ATTN_RBCP) | \
 +                               GENERAL_ATTEN_OFFSET(LATCHED_ATTN_RSVD_GRC))
 +
 +#define HW_INTERRUT_ASSERT_SET_0 \
 +                              (AEU_INPUTS_ATTN_BITS_TSDM_HW_INTERRUPT | \
 +                               AEU_INPUTS_ATTN_BITS_TCM_HW_INTERRUPT | \
 +                               AEU_INPUTS_ATTN_BITS_TSEMI_HW_INTERRUPT | \
 +                               AEU_INPUTS_ATTN_BITS_PBCLIENT_HW_INTERRUPT)
 +#define HW_PRTY_ASSERT_SET_0  (AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR | \
 +                               AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR | \
 +                               AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR | \
 +                               AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR |\
 +                               AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR |\
 +                               AEU_INPUTS_ATTN_BITS_TCM_PARITY_ERROR |\
 +                               AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR)
 +#define HW_INTERRUT_ASSERT_SET_1 \
 +                              (AEU_INPUTS_ATTN_BITS_QM_HW_INTERRUPT | \
 +                               AEU_INPUTS_ATTN_BITS_TIMERS_HW_INTERRUPT | \
 +                               AEU_INPUTS_ATTN_BITS_XSDM_HW_INTERRUPT | \
 +                               AEU_INPUTS_ATTN_BITS_XCM_HW_INTERRUPT | \
 +                               AEU_INPUTS_ATTN_BITS_XSEMI_HW_INTERRUPT | \
 +                               AEU_INPUTS_ATTN_BITS_USDM_HW_INTERRUPT | \
 +                               AEU_INPUTS_ATTN_BITS_UCM_HW_INTERRUPT | \
 +                               AEU_INPUTS_ATTN_BITS_USEMI_HW_INTERRUPT | \
 +                               AEU_INPUTS_ATTN_BITS_UPB_HW_INTERRUPT | \
 +                               AEU_INPUTS_ATTN_BITS_CSDM_HW_INTERRUPT | \
 +                               AEU_INPUTS_ATTN_BITS_CCM_HW_INTERRUPT)
 +#define HW_PRTY_ASSERT_SET_1  (AEU_INPUTS_ATTN_BITS_PBF_PARITY_ERROR |\
 +                               AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR | \
 +                               AEU_INPUTS_ATTN_BITS_TIMERS_PARITY_ERROR |\
 +                               AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR | \
 +                               AEU_INPUTS_ATTN_BITS_XCM_PARITY_ERROR |\
 +                               AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR | \
 +                               AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR |\
 +                               AEU_INPUTS_ATTN_BITS_NIG_PARITY_ERROR |\
 +                           AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR |\
 +                               AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR | \
 +                               AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR | \
 +                               AEU_INPUTS_ATTN_BITS_UCM_PARITY_ERROR |\
 +                               AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR | \
 +                               AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR | \
 +                               AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR |\
 +                               AEU_INPUTS_ATTN_BITS_CCM_PARITY_ERROR)
 +#define HW_INTERRUT_ASSERT_SET_2 \
 +                              (AEU_INPUTS_ATTN_BITS_CSEMI_HW_INTERRUPT | \
 +                               AEU_INPUTS_ATTN_BITS_CDU_HW_INTERRUPT | \
 +                               AEU_INPUTS_ATTN_BITS_DMAE_HW_INTERRUPT | \
 +                      AEU_INPUTS_ATTN_BITS_PXPPCICLOCKCLIENT_HW_INTERRUPT |\
 +                               AEU_INPUTS_ATTN_BITS_MISC_HW_INTERRUPT)
 +#define HW_PRTY_ASSERT_SET_2  (AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR | \
 +                               AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR | \
 +                      AEU_INPUTS_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR |\
 +                               AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR | \
 +                               AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR | \
 +                               AEU_INPUTS_ATTN_BITS_DMAE_PARITY_ERROR |\
 +                               AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR | \
 +                               AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR)
 +
 +#define HW_PRTY_ASSERT_SET_3 (AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY | \
 +              AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY | \
 +              AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY | \
 +              AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY)
 +
 +#define HW_PRTY_ASSERT_SET_4 (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR | \
 +                            AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR)
 +
 +#define RSS_FLAGS(bp) \
 +              (TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_CAPABILITY | \
 +               TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_TCP_CAPABILITY | \
 +               TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_CAPABILITY | \
 +               TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_TCP_CAPABILITY | \
 +               (bp->multi_mode << \
 +                TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_MODE_SHIFT))
 +#define MULTI_MASK                    0x7f
 +
 +
 +#define DEF_USB_FUNC_OFF      offsetof(struct cstorm_def_status_block_u, func)
 +#define DEF_CSB_FUNC_OFF      offsetof(struct cstorm_def_status_block_c, func)
 +#define DEF_XSB_FUNC_OFF      offsetof(struct xstorm_def_status_block, func)
 +#define DEF_TSB_FUNC_OFF      offsetof(struct tstorm_def_status_block, func)
 +
 +#define DEF_USB_IGU_INDEX_OFF \
 +                      offsetof(struct cstorm_def_status_block_u, igu_index)
 +#define DEF_CSB_IGU_INDEX_OFF \
 +                      offsetof(struct cstorm_def_status_block_c, igu_index)
 +#define DEF_XSB_IGU_INDEX_OFF \
 +                      offsetof(struct xstorm_def_status_block, igu_index)
 +#define DEF_TSB_IGU_INDEX_OFF \
 +                      offsetof(struct tstorm_def_status_block, igu_index)
 +
 +#define DEF_USB_SEGMENT_OFF \
 +                      offsetof(struct cstorm_def_status_block_u, segment)
 +#define DEF_CSB_SEGMENT_OFF \
 +                      offsetof(struct cstorm_def_status_block_c, segment)
 +#define DEF_XSB_SEGMENT_OFF \
 +                      offsetof(struct xstorm_def_status_block, segment)
 +#define DEF_TSB_SEGMENT_OFF \
 +                      offsetof(struct tstorm_def_status_block, segment)
 +
 +#define BNX2X_SP_DSB_INDEX \
 +              (&bp->def_status_blk->sp_sb.\
 +                                      index_values[HC_SP_INDEX_ETH_DEF_CONS])
 +
 +#define SET_FLAG(value, mask, flag) \
 +      do {\
 +              (value) &= ~(mask);\
 +              (value) |= ((flag) << (mask##_SHIFT));\
 +      } while (0)
 +
 +#define GET_FLAG(value, mask) \
 +      (((value) & (mask)) >> (mask##_SHIFT))
 +
 +#define GET_FIELD(value, fname) \
 +      (((value) & (fname##_MASK)) >> (fname##_SHIFT))
 +
 +#define CAM_IS_INVALID(x) \
 +      (GET_FLAG(x.flags, \
 +      MAC_CONFIGURATION_ENTRY_ACTION_TYPE) == \
 +      (T_ETH_MAC_COMMAND_INVALIDATE))
 +
 +/* Number of u32 elements in MC hash array */
 +#define MC_HASH_SIZE                  8
 +#define MC_HASH_OFFSET(bp, i)         (BAR_TSTRORM_INTMEM + \
 +      TSTORM_APPROXIMATE_MATCH_MULTICAST_FILTERING_OFFSET(BP_FUNC(bp)) + i*4)
 +
 +
 +#ifndef PXP2_REG_PXP2_INT_STS
 +#define PXP2_REG_PXP2_INT_STS         PXP2_REG_PXP2_INT_STS_0
 +#endif
 +
 +#ifndef ETH_MAX_RX_CLIENTS_E2
 +#define ETH_MAX_RX_CLIENTS_E2         ETH_MAX_RX_CLIENTS_E1H
 +#endif
 +
 +#define BNX2X_VPD_LEN                 128
 +#define VENDOR_ID_LEN                 4
 +
 +/* Congestion management fairness mode */
 +#define CMNG_FNS_NONE         0
 +#define CMNG_FNS_MINMAX               1
 +
 +#define HC_SEG_ACCESS_DEF             0   /*Driver decision 0-3*/
 +#define HC_SEG_ACCESS_ATTN            4
 +#define HC_SEG_ACCESS_NORM            0   /*Driver decision 0-1*/
 +
 +static const u32 dmae_reg_go_c[] = {
 +      DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
 +      DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
 +      DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
 +      DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
 +};
 +
 +void bnx2x_set_ethtool_ops(struct net_device *netdev);
 +void bnx2x_notify_link_changed(struct bnx2x *bp);
 +#endif /* bnx2x.h */
index 5c3eb17,0000000..e575e89
mode 100644,000000..100644
--- /dev/null
@@@ -1,3597 -1,0 +1,3598 @@@
-       int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
-                                             ETH_MAX_AGGREGATION_QUEUES_E1H_E2;
 +/* bnx2x_cmn.c: Broadcom Everest network driver.
 + *
 + * Copyright (c) 2007-2011 Broadcom Corporation
 + *
 + * This program is free software; you can redistribute it and/or modify
 + * it under the terms of the GNU General Public License as published by
 + * the Free Software Foundation.
 + *
 + * Maintained by: Eilon Greenstein <eilong@broadcom.com>
 + * Written by: Eliezer Tamir
 + * Based on code from Michael Chan's bnx2 driver
 + * UDP CSUM errata workaround by Arik Gendelman
 + * Slowpath and fastpath rework by Vladislav Zolotarov
 + * Statistics and Link management by Yitchak Gertner
 + *
 + */
 +
 +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 +
 +#include <linux/etherdevice.h>
 +#include <linux/if_vlan.h>
 +#include <linux/interrupt.h>
 +#include <linux/ip.h>
 +#include <net/ipv6.h>
 +#include <net/ip6_checksum.h>
 +#include <linux/firmware.h>
 +#include <linux/prefetch.h>
 +#include "bnx2x_cmn.h"
 +#include "bnx2x_init.h"
 +#include "bnx2x_sp.h"
 +
 +
 +
 +/**
 + * bnx2x_bz_fp - zero content of the fastpath structure.
 + *
 + * @bp:               driver handle
 + * @index:    fastpath index to be zeroed
 + *
 + * Makes sure the contents of the bp->fp[index].napi is kept
 + * intact.
 + */
 +static inline void bnx2x_bz_fp(struct bnx2x *bp, int index)
 +{
 +      struct bnx2x_fastpath *fp = &bp->fp[index];
 +      struct napi_struct orig_napi = fp->napi;
 +      /* bzero bnx2x_fastpath contents */
 +      memset(fp, 0, sizeof(*fp));
 +
 +      /* Restore the NAPI object as it has been already initialized */
 +      fp->napi = orig_napi;
 +
 +      fp->bp = bp;
 +      fp->index = index;
 +      if (IS_ETH_FP(fp))
 +              fp->max_cos = bp->max_cos;
 +      else
 +              /* Special queues support only one CoS */
 +              fp->max_cos = 1;
 +
 +      /*
 +       * set the tpa flag for each queue. The tpa flag determines the queue
 +       * minimal size so it must be set prior to queue memory allocation
 +       */
 +      fp->disable_tpa = ((bp->flags & TPA_ENABLE_FLAG) == 0);
 +
 +#ifdef BCM_CNIC
 +      /* We don't want TPA on an FCoE L2 ring */
 +      if (IS_FCOE_FP(fp))
 +              fp->disable_tpa = 1;
 +#endif
 +}
 +
 +/**
 + * bnx2x_move_fp - move content of the fastpath structure.
 + *
 + * @bp:               driver handle
 + * @from:     source FP index
 + * @to:               destination FP index
 + *
 + * Makes sure the contents of the bp->fp[to].napi is kept
 + * intact.
 + */
 +static inline void bnx2x_move_fp(struct bnx2x *bp, int from, int to)
 +{
 +      struct bnx2x_fastpath *from_fp = &bp->fp[from];
 +      struct bnx2x_fastpath *to_fp = &bp->fp[to];
 +      struct napi_struct orig_napi = to_fp->napi;
 +      /* Move bnx2x_fastpath contents */
 +      memcpy(to_fp, from_fp, sizeof(*to_fp));
 +      to_fp->index = to;
 +
 +      /* Restore the NAPI object as it has been already initialized */
 +      to_fp->napi = orig_napi;
 +}
 +
 +int load_count[2][3] = { {0} }; /* per-path: 0-common, 1-port0, 2-port1 */
 +
 +/* free skb in the packet ring at pos idx
 + * return idx of last bd freed
 + */
 +static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata,
 +                           u16 idx)
 +{
 +      struct sw_tx_bd *tx_buf = &txdata->tx_buf_ring[idx];
 +      struct eth_tx_start_bd *tx_start_bd;
 +      struct eth_tx_bd *tx_data_bd;
 +      struct sk_buff *skb = tx_buf->skb;
 +      u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
 +      int nbd;
 +
 +      /* prefetch skb end pointer to speedup dev_kfree_skb() */
 +      prefetch(&skb->end);
 +
 +      DP(BNX2X_MSG_FP, "fp[%d]: pkt_idx %d  buff @(%p)->skb %p\n",
 +         txdata->txq_index, idx, tx_buf, skb);
 +
 +      /* unmap first bd */
 +      DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
 +      tx_start_bd = &txdata->tx_desc_ring[bd_idx].start_bd;
 +      dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd),
 +                       BD_UNMAP_LEN(tx_start_bd), DMA_TO_DEVICE);
 +
 +
 +      nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
 +#ifdef BNX2X_STOP_ON_ERROR
 +      if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
 +              BNX2X_ERR("BAD nbd!\n");
 +              bnx2x_panic();
 +      }
 +#endif
 +      new_cons = nbd + tx_buf->first_bd;
 +
 +      /* Get the next bd */
 +      bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
 +
 +      /* Skip a parse bd... */
 +      --nbd;
 +      bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
 +
 +      /* ...and the TSO split header bd since they have no mapping */
 +      if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
 +              --nbd;
 +              bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
 +      }
 +
 +      /* now free frags */
 +      while (nbd > 0) {
 +
 +              DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
 +              tx_data_bd = &txdata->tx_desc_ring[bd_idx].reg_bd;
 +              dma_unmap_page(&bp->pdev->dev, BD_UNMAP_ADDR(tx_data_bd),
 +                             BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
 +              if (--nbd)
 +                      bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
 +      }
 +
 +      /* release skb */
 +      WARN_ON(!skb);
 +      dev_kfree_skb_any(skb);
 +      tx_buf->first_bd = 0;
 +      tx_buf->skb = NULL;
 +
 +      return new_cons;
 +}
 +
 +int bnx2x_tx_int(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata)
 +{
 +      struct netdev_queue *txq;
 +      u16 hw_cons, sw_cons, bd_cons = txdata->tx_bd_cons;
 +
 +#ifdef BNX2X_STOP_ON_ERROR
 +      if (unlikely(bp->panic))
 +              return -1;
 +#endif
 +
 +      txq = netdev_get_tx_queue(bp->dev, txdata->txq_index);
 +      hw_cons = le16_to_cpu(*txdata->tx_cons_sb);
 +      sw_cons = txdata->tx_pkt_cons;
 +
 +      while (sw_cons != hw_cons) {
 +              u16 pkt_cons;
 +
 +              pkt_cons = TX_BD(sw_cons);
 +
 +              DP(NETIF_MSG_TX_DONE, "queue[%d]: hw_cons %u  sw_cons %u "
 +                                    " pkt_cons %u\n",
 +                 txdata->txq_index, hw_cons, sw_cons, pkt_cons);
 +
 +              bd_cons = bnx2x_free_tx_pkt(bp, txdata, pkt_cons);
 +              sw_cons++;
 +      }
 +
 +      txdata->tx_pkt_cons = sw_cons;
 +      txdata->tx_bd_cons = bd_cons;
 +
 +      /* Need to make the tx_bd_cons update visible to start_xmit()
 +       * before checking for netif_tx_queue_stopped().  Without the
 +       * memory barrier, there is a small possibility that
 +       * start_xmit() will miss it and cause the queue to be stopped
 +       * forever.
 +       * On the other hand we need an rmb() here to ensure the proper
 +       * ordering of bit testing in the following
 +       * netif_tx_queue_stopped(txq) call.
 +       */
 +      smp_mb();
 +
 +      if (unlikely(netif_tx_queue_stopped(txq))) {
 +              /* Taking tx_lock() is needed to prevent reenabling the queue
 +               * while it's empty. This could have happen if rx_action() gets
 +               * suspended in bnx2x_tx_int() after the condition before
 +               * netif_tx_wake_queue(), while tx_action (bnx2x_start_xmit()):
 +               *
 +               * stops the queue->sees fresh tx_bd_cons->releases the queue->
 +               * sends some packets consuming the whole queue again->
 +               * stops the queue
 +               */
 +
 +              __netif_tx_lock(txq, smp_processor_id());
 +
 +              if ((netif_tx_queue_stopped(txq)) &&
 +                  (bp->state == BNX2X_STATE_OPEN) &&
 +                  (bnx2x_tx_avail(bp, txdata) >= MAX_SKB_FRAGS + 3))
 +                      netif_tx_wake_queue(txq);
 +
 +              __netif_tx_unlock(txq);
 +      }
 +      return 0;
 +}
 +
 +static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
 +                                           u16 idx)
 +{
 +      u16 last_max = fp->last_max_sge;
 +
 +      if (SUB_S16(idx, last_max) > 0)
 +              fp->last_max_sge = idx;
 +}
 +
 +static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
 +                                struct eth_fast_path_rx_cqe *fp_cqe)
 +{
 +      struct bnx2x *bp = fp->bp;
 +      u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
 +                                   le16_to_cpu(fp_cqe->len_on_bd)) >>
 +                    SGE_PAGE_SHIFT;
 +      u16 last_max, last_elem, first_elem;
 +      u16 delta = 0;
 +      u16 i;
 +
 +      if (!sge_len)
 +              return;
 +
 +      /* First mark all used pages */
 +      for (i = 0; i < sge_len; i++)
 +              BIT_VEC64_CLEAR_BIT(fp->sge_mask,
 +                      RX_SGE(le16_to_cpu(fp_cqe->sgl_or_raw_data.sgl[i])));
 +
 +      DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
 +         sge_len - 1, le16_to_cpu(fp_cqe->sgl_or_raw_data.sgl[sge_len - 1]));
 +
 +      /* Here we assume that the last SGE index is the biggest */
 +      prefetch((void *)(fp->sge_mask));
 +      bnx2x_update_last_max_sge(fp,
 +              le16_to_cpu(fp_cqe->sgl_or_raw_data.sgl[sge_len - 1]));
 +
 +      last_max = RX_SGE(fp->last_max_sge);
 +      last_elem = last_max >> BIT_VEC64_ELEM_SHIFT;
 +      first_elem = RX_SGE(fp->rx_sge_prod) >> BIT_VEC64_ELEM_SHIFT;
 +
 +      /* If ring is not full */
 +      if (last_elem + 1 != first_elem)
 +              last_elem++;
 +
 +      /* Now update the prod */
 +      for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
 +              if (likely(fp->sge_mask[i]))
 +                      break;
 +
 +              fp->sge_mask[i] = BIT_VEC64_ELEM_ONE_MASK;
 +              delta += BIT_VEC64_ELEM_SZ;
 +      }
 +
 +      if (delta > 0) {
 +              fp->rx_sge_prod += delta;
 +              /* clear page-end entries */
 +              bnx2x_clear_sge_mask_next_elems(fp);
 +      }
 +
 +      DP(NETIF_MSG_RX_STATUS,
 +         "fp->last_max_sge = %d  fp->rx_sge_prod = %d\n",
 +         fp->last_max_sge, fp->rx_sge_prod);
 +}
 +
 +static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
 +                          struct sk_buff *skb, u16 cons, u16 prod,
 +                          struct eth_fast_path_rx_cqe *cqe)
 +{
 +      struct bnx2x *bp = fp->bp;
 +      struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
 +      struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
 +      struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
 +      dma_addr_t mapping;
 +      struct bnx2x_agg_info *tpa_info = &fp->tpa_info[queue];
 +      struct sw_rx_bd *first_buf = &tpa_info->first_buf;
 +
 +      /* print error if current state != stop */
 +      if (tpa_info->tpa_state != BNX2X_TPA_STOP)
 +              BNX2X_ERR("start of bin not in stop [%d]\n", queue);
 +
 +      /* Try to map an empty skb from the aggregation info  */
 +      mapping = dma_map_single(&bp->pdev->dev,
 +                               first_buf->skb->data,
 +                               fp->rx_buf_size, DMA_FROM_DEVICE);
 +      /*
 +       *  ...if it fails - move the skb from the consumer to the producer
 +       *  and set the current aggregation state as ERROR to drop it
 +       *  when TPA_STOP arrives.
 +       */
 +
 +      if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
 +              /* Move the BD from the consumer to the producer */
 +              bnx2x_reuse_rx_skb(fp, cons, prod);
 +              tpa_info->tpa_state = BNX2X_TPA_ERROR;
 +              return;
 +      }
 +
 +      /* move empty skb from pool to prod */
 +      prod_rx_buf->skb = first_buf->skb;
 +      dma_unmap_addr_set(prod_rx_buf, mapping, mapping);
 +      /* point prod_bd to new skb */
 +      prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
 +      prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
 +
 +      /* move partial skb from cons to pool (don't unmap yet) */
 +      *first_buf = *cons_rx_buf;
 +
 +      /* mark bin state as START */
 +      tpa_info->parsing_flags =
 +              le16_to_cpu(cqe->pars_flags.flags);
 +      tpa_info->vlan_tag = le16_to_cpu(cqe->vlan_tag);
 +      tpa_info->tpa_state = BNX2X_TPA_START;
 +      tpa_info->len_on_bd = le16_to_cpu(cqe->len_on_bd);
 +      tpa_info->placement_offset = cqe->placement_offset;
 +
 +#ifdef BNX2X_STOP_ON_ERROR
 +      fp->tpa_queue_used |= (1 << queue);
 +#ifdef _ASM_GENERIC_INT_L64_H
 +      DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
 +#else
 +      DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
 +#endif
 +         fp->tpa_queue_used);
 +#endif
 +}
 +
 +/* Timestamp option length allowed for TPA aggregation:
 + *
 + *            nop nop kind length echo val
 + */
 +#define TPA_TSTAMP_OPT_LEN    12
 +/**
 + * bnx2x_set_lro_mss - calculate the approximate value of the MSS
 + *
 + * @bp:                       driver handle
 + * @parsing_flags:    parsing flags from the START CQE
 + * @len_on_bd:                total length of the first packet for the
 + *                    aggregation.
 + *
 + * Approximate value of the MSS for this aggregation calculated using
 + * the first packet of it.
 + */
 +static inline u16 bnx2x_set_lro_mss(struct bnx2x *bp, u16 parsing_flags,
 +                                  u16 len_on_bd)
 +{
 +      /*
 +       * TPA arrgregation won't have either IP options or TCP options
 +       * other than timestamp or IPv6 extension headers.
 +       */
 +      u16 hdrs_len = ETH_HLEN + sizeof(struct tcphdr);
 +
 +      if (GET_FLAG(parsing_flags, PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) ==
 +          PRS_FLAG_OVERETH_IPV6)
 +              hdrs_len += sizeof(struct ipv6hdr);
 +      else /* IPv4 */
 +              hdrs_len += sizeof(struct iphdr);
 +
 +
 +      /* Check if there was a TCP timestamp, if there is it's will
 +       * always be 12 bytes length: nop nop kind length echo val.
 +       *
 +       * Otherwise FW would close the aggregation.
 +       */
 +      if (parsing_flags & PARSING_FLAGS_TIME_STAMP_EXIST_FLAG)
 +              hdrs_len += TPA_TSTAMP_OPT_LEN;
 +
 +      return len_on_bd - hdrs_len;
 +}
 +
 +static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
 +                             u16 queue, struct sk_buff *skb,
 +                             struct eth_end_agg_rx_cqe *cqe,
 +                             u16 cqe_idx)
 +{
 +      struct sw_rx_page *rx_pg, old_rx_pg;
 +      u32 i, frag_len, frag_size, pages;
 +      int err;
 +      int j;
 +      struct bnx2x_agg_info *tpa_info = &fp->tpa_info[queue];
 +      u16 len_on_bd = tpa_info->len_on_bd;
 +
 +      frag_size = le16_to_cpu(cqe->pkt_len) - len_on_bd;
 +      pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
 +
 +      /* This is needed in order to enable forwarding support */
 +      if (frag_size)
 +              skb_shinfo(skb)->gso_size = bnx2x_set_lro_mss(bp,
 +                                      tpa_info->parsing_flags, len_on_bd);
 +
 +#ifdef BNX2X_STOP_ON_ERROR
 +      if (pages > min_t(u32, 8, MAX_SKB_FRAGS)*SGE_PAGE_SIZE*PAGES_PER_SGE) {
 +              BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
 +                        pages, cqe_idx);
 +              BNX2X_ERR("cqe->pkt_len = %d\n", cqe->pkt_len);
 +              bnx2x_panic();
 +              return -EINVAL;
 +      }
 +#endif
 +
 +      /* Run through the SGL and compose the fragmented skb */
 +      for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
 +              u16 sge_idx = RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[j]));
 +
 +              /* FW gives the indices of the SGE as if the ring is an array
 +                 (meaning that "next" element will consume 2 indices) */
 +              frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE));
 +              rx_pg = &fp->rx_page_ring[sge_idx];
 +              old_rx_pg = *rx_pg;
 +
 +              /* If we fail to allocate a substitute page, we simply stop
 +                 where we are and drop the whole packet */
 +              err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
 +              if (unlikely(err)) {
 +                      fp->eth_q_stats.rx_skb_alloc_failed++;
 +                      return err;
 +              }
 +
 +              /* Unmap the page as we r going to pass it to the stack */
 +              dma_unmap_page(&bp->pdev->dev,
 +                             dma_unmap_addr(&old_rx_pg, mapping),
 +                             SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE);
 +
 +              /* Add one frag and update the appropriate fields in the skb */
 +              skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
 +
 +              skb->data_len += frag_len;
 +              skb->truesize += frag_len;
 +              skb->len += frag_len;
 +
 +              frag_size -= frag_len;
 +      }
 +
 +      return 0;
 +}
 +
 +static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
 +                         u16 queue, struct eth_end_agg_rx_cqe *cqe,
 +                         u16 cqe_idx)
 +{
 +      struct bnx2x_agg_info *tpa_info = &fp->tpa_info[queue];
 +      struct sw_rx_bd *rx_buf = &tpa_info->first_buf;
 +      u8 pad = tpa_info->placement_offset;
 +      u16 len = tpa_info->len_on_bd;
 +      struct sk_buff *skb = rx_buf->skb;
 +      /* alloc new skb */
 +      struct sk_buff *new_skb;
 +      u8 old_tpa_state = tpa_info->tpa_state;
 +
 +      tpa_info->tpa_state = BNX2X_TPA_STOP;
 +
 +      /* If we there was an error during the handling of the TPA_START -
 +       * drop this aggregation.
 +       */
 +      if (old_tpa_state == BNX2X_TPA_ERROR)
 +              goto drop;
 +
 +      /* Try to allocate the new skb */
 +      new_skb = netdev_alloc_skb(bp->dev, fp->rx_buf_size);
 +
 +      /* Unmap skb in the pool anyway, as we are going to change
 +         pool entry status to BNX2X_TPA_STOP even if new skb allocation
 +         fails. */
 +      dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping),
 +                       fp->rx_buf_size, DMA_FROM_DEVICE);
 +
 +      if (likely(new_skb)) {
 +              prefetch(skb);
 +              prefetch(((char *)(skb)) + L1_CACHE_BYTES);
 +
 +#ifdef BNX2X_STOP_ON_ERROR
 +              if (pad + len > fp->rx_buf_size) {
 +                      BNX2X_ERR("skb_put is about to fail...  "
 +                                "pad %d  len %d  rx_buf_size %d\n",
 +                                pad, len, fp->rx_buf_size);
 +                      bnx2x_panic();
 +                      return;
 +              }
 +#endif
 +
 +              skb_reserve(skb, pad);
 +              skb_put(skb, len);
 +
 +              skb->protocol = eth_type_trans(skb, bp->dev);
 +              skb->ip_summed = CHECKSUM_UNNECESSARY;
 +
 +              if (!bnx2x_fill_frag_skb(bp, fp, queue, skb, cqe, cqe_idx)) {
 +                      if (tpa_info->parsing_flags & PARSING_FLAGS_VLAN)
 +                              __vlan_hwaccel_put_tag(skb, tpa_info->vlan_tag);
 +                      napi_gro_receive(&fp->napi, skb);
 +              } else {
 +                      DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
 +                         " - dropping packet!\n");
 +                      dev_kfree_skb_any(skb);
 +              }
 +
 +
 +              /* put new skb in bin */
 +              rx_buf->skb = new_skb;
 +
 +              return;
 +      }
 +
 +drop:
 +      /* drop the packet and keep the buffer in the bin */
 +      DP(NETIF_MSG_RX_STATUS,
 +         "Failed to allocate or map a new skb - dropping packet!\n");
 +      fp->eth_q_stats.rx_skb_alloc_failed++;
 +}
 +
 +/* Set Toeplitz hash value in the skb using the value from the
 + * CQE (calculated by HW).
 + */
 +static inline void bnx2x_set_skb_rxhash(struct bnx2x *bp, union eth_rx_cqe *cqe,
 +                                      struct sk_buff *skb)
 +{
 +      /* Set Toeplitz hash from CQE */
 +      if ((bp->dev->features & NETIF_F_RXHASH) &&
 +          (cqe->fast_path_cqe.status_flags &
 +           ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG))
 +              skb->rxhash =
 +              le32_to_cpu(cqe->fast_path_cqe.rss_hash_result);
 +}
 +
 +int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
 +{
 +      struct bnx2x *bp = fp->bp;
 +      u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
 +      u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
 +      int rx_pkt = 0;
 +
 +#ifdef BNX2X_STOP_ON_ERROR
 +      if (unlikely(bp->panic))
 +              return 0;
 +#endif
 +
 +      /* CQ "next element" is of the size of the regular element,
 +         that's why it's ok here */
 +      hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
 +      if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
 +              hw_comp_cons++;
 +
 +      bd_cons = fp->rx_bd_cons;
 +      bd_prod = fp->rx_bd_prod;
 +      bd_prod_fw = bd_prod;
 +      sw_comp_cons = fp->rx_comp_cons;
 +      sw_comp_prod = fp->rx_comp_prod;
 +
 +      /* Memory barrier necessary as speculative reads of the rx
 +       * buffer can be ahead of the index in the status block
 +       */
 +      rmb();
 +
 +      DP(NETIF_MSG_RX_STATUS,
 +         "queue[%d]:  hw_comp_cons %u  sw_comp_cons %u\n",
 +         fp->index, hw_comp_cons, sw_comp_cons);
 +
 +      while (sw_comp_cons != hw_comp_cons) {
 +              struct sw_rx_bd *rx_buf = NULL;
 +              struct sk_buff *skb;
 +              union eth_rx_cqe *cqe;
 +              struct eth_fast_path_rx_cqe *cqe_fp;
 +              u8 cqe_fp_flags;
 +              enum eth_rx_cqe_type cqe_fp_type;
 +              u16 len, pad;
 +
 +#ifdef BNX2X_STOP_ON_ERROR
 +              if (unlikely(bp->panic))
 +                      return 0;
 +#endif
 +
 +              comp_ring_cons = RCQ_BD(sw_comp_cons);
 +              bd_prod = RX_BD(bd_prod);
 +              bd_cons = RX_BD(bd_cons);
 +
 +              /* Prefetch the page containing the BD descriptor
 +                 at producer's index. It will be needed when new skb is
 +                 allocated */
 +              prefetch((void *)(PAGE_ALIGN((unsigned long)
 +                                           (&fp->rx_desc_ring[bd_prod])) -
 +                                PAGE_SIZE + 1));
 +
 +              cqe = &fp->rx_comp_ring[comp_ring_cons];
 +              cqe_fp = &cqe->fast_path_cqe;
 +              cqe_fp_flags = cqe_fp->type_error_flags;
 +              cqe_fp_type = cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE;
 +
 +              DP(NETIF_MSG_RX_STATUS, "CQE type %x  err %x  status %x"
 +                 "  queue %x  vlan %x  len %u\n", CQE_TYPE(cqe_fp_flags),
 +                 cqe_fp_flags, cqe_fp->status_flags,
 +                 le32_to_cpu(cqe_fp->rss_hash_result),
 +                 le16_to_cpu(cqe_fp->vlan_tag), le16_to_cpu(cqe_fp->pkt_len));
 +
 +              /* is this a slowpath msg? */
 +              if (unlikely(CQE_TYPE_SLOW(cqe_fp_type))) {
 +                      bnx2x_sp_event(fp, cqe);
 +                      goto next_cqe;
 +
 +              /* this is an rx packet */
 +              } else {
 +                      rx_buf = &fp->rx_buf_ring[bd_cons];
 +                      skb = rx_buf->skb;
 +                      prefetch(skb);
 +
 +                      if (!CQE_TYPE_FAST(cqe_fp_type)) {
 +#ifdef BNX2X_STOP_ON_ERROR
 +                              /* sanity check */
 +                              if (fp->disable_tpa &&
 +                                  (CQE_TYPE_START(cqe_fp_type) ||
 +                                   CQE_TYPE_STOP(cqe_fp_type)))
 +                                      BNX2X_ERR("START/STOP packet while "
 +                                                "disable_tpa type %x\n",
 +                                                CQE_TYPE(cqe_fp_type));
 +#endif
 +
 +                              if (CQE_TYPE_START(cqe_fp_type)) {
 +                                      u16 queue = cqe_fp->queue_index;
 +                                      DP(NETIF_MSG_RX_STATUS,
 +                                         "calling tpa_start on queue %d\n",
 +                                         queue);
 +
 +                                      bnx2x_tpa_start(fp, queue, skb,
 +                                                      bd_cons, bd_prod,
 +                                                      cqe_fp);
 +
 +                                      /* Set Toeplitz hash for LRO skb */
 +                                      bnx2x_set_skb_rxhash(bp, cqe, skb);
 +
 +                                      goto next_rx;
 +
 +                              } else {
 +                                      u16 queue =
 +                                              cqe->end_agg_cqe.queue_index;
 +                                      DP(NETIF_MSG_RX_STATUS,
 +                                         "calling tpa_stop on queue %d\n",
 +                                         queue);
 +
 +                                      bnx2x_tpa_stop(bp, fp, queue,
 +                                                     &cqe->end_agg_cqe,
 +                                                     comp_ring_cons);
 +#ifdef BNX2X_STOP_ON_ERROR
 +                                      if (bp->panic)
 +                                              return 0;
 +#endif
 +
 +                                      bnx2x_update_sge_prod(fp, cqe_fp);
 +                                      goto next_cqe;
 +                              }
 +                      }
 +                      /* non TPA */
 +                      len = le16_to_cpu(cqe_fp->pkt_len);
 +                      pad = cqe_fp->placement_offset;
 +                      dma_sync_single_for_cpu(&bp->pdev->dev,
 +                                      dma_unmap_addr(rx_buf, mapping),
 +                                                     pad + RX_COPY_THRESH,
 +                                                     DMA_FROM_DEVICE);
 +                      prefetch(((char *)(skb)) + L1_CACHE_BYTES);
 +
 +                      /* is this an error packet? */
 +                      if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
 +                              DP(NETIF_MSG_RX_ERR,
 +                                 "ERROR  flags %x  rx packet %u\n",
 +                                 cqe_fp_flags, sw_comp_cons);
 +                              fp->eth_q_stats.rx_err_discard_pkt++;
 +                              goto reuse_rx;
 +                      }
 +
 +                      /* Since we don't have a jumbo ring
 +                       * copy small packets if mtu > 1500
 +                       */
 +                      if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
 +                          (len <= RX_COPY_THRESH)) {
 +                              struct sk_buff *new_skb;
 +
 +                              new_skb = netdev_alloc_skb(bp->dev, len + pad);
 +                              if (new_skb == NULL) {
 +                                      DP(NETIF_MSG_RX_ERR,
 +                                         "ERROR  packet dropped "
 +                                         "because of alloc failure\n");
 +                                      fp->eth_q_stats.rx_skb_alloc_failed++;
 +                                      goto reuse_rx;
 +                              }
 +
 +                              /* aligned copy */
 +                              skb_copy_from_linear_data_offset(skb, pad,
 +                                                  new_skb->data + pad, len);
 +                              skb_reserve(new_skb, pad);
 +                              skb_put(new_skb, len);
 +
 +                              bnx2x_reuse_rx_skb(fp, bd_cons, bd_prod);
 +
 +                              skb = new_skb;
 +
 +                      } else
 +                      if (likely(bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0)) {
 +                              dma_unmap_single(&bp->pdev->dev,
 +                                      dma_unmap_addr(rx_buf, mapping),
 +                                               fp->rx_buf_size,
 +                                               DMA_FROM_DEVICE);
 +                              skb_reserve(skb, pad);
 +                              skb_put(skb, len);
 +
 +                      } else {
 +                              DP(NETIF_MSG_RX_ERR,
 +                                 "ERROR  packet dropped because "
 +                                 "of alloc failure\n");
 +                              fp->eth_q_stats.rx_skb_alloc_failed++;
 +reuse_rx:
 +                              bnx2x_reuse_rx_skb(fp, bd_cons, bd_prod);
 +                              goto next_rx;
 +                      }
 +
 +                      skb->protocol = eth_type_trans(skb, bp->dev);
 +
 +                      /* Set Toeplitz hash for a none-LRO skb */
 +                      bnx2x_set_skb_rxhash(bp, cqe, skb);
 +
 +                      skb_checksum_none_assert(skb);
 +
 +                      if (bp->dev->features & NETIF_F_RXCSUM) {
 +
 +                              if (likely(BNX2X_RX_CSUM_OK(cqe)))
 +                                      skb->ip_summed = CHECKSUM_UNNECESSARY;
 +                              else
 +                                      fp->eth_q_stats.hw_csum_err++;
 +                      }
 +              }
 +
 +              skb_record_rx_queue(skb, fp->index);
 +
 +              if (le16_to_cpu(cqe_fp->pars_flags.flags) &
 +                  PARSING_FLAGS_VLAN)
 +                      __vlan_hwaccel_put_tag(skb,
 +                                             le16_to_cpu(cqe_fp->vlan_tag));
 +              napi_gro_receive(&fp->napi, skb);
 +
 +
 +next_rx:
 +              rx_buf->skb = NULL;
 +
 +              bd_cons = NEXT_RX_IDX(bd_cons);
 +              bd_prod = NEXT_RX_IDX(bd_prod);
 +              bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
 +              rx_pkt++;
 +next_cqe:
 +              sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
 +              sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
 +
 +              if (rx_pkt == budget)
 +                      break;
 +      } /* while */
 +
 +      fp->rx_bd_cons = bd_cons;
 +      fp->rx_bd_prod = bd_prod_fw;
 +      fp->rx_comp_cons = sw_comp_cons;
 +      fp->rx_comp_prod = sw_comp_prod;
 +
 +      /* Update producers */
 +      bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
 +                           fp->rx_sge_prod);
 +
 +      fp->rx_pkt += rx_pkt;
 +      fp->rx_calls++;
 +
 +      return rx_pkt;
 +}
 +
 +static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
 +{
 +      struct bnx2x_fastpath *fp = fp_cookie;
 +      struct bnx2x *bp = fp->bp;
 +      u8 cos;
 +
 +      DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB "
 +                       "[fp %d fw_sd %d igusb %d]\n",
 +         fp->index, fp->fw_sb_id, fp->igu_sb_id);
 +      bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
 +
 +#ifdef BNX2X_STOP_ON_ERROR
 +      if (unlikely(bp->panic))
 +              return IRQ_HANDLED;
 +#endif
 +
 +      /* Handle Rx and Tx according to MSI-X vector */
 +      prefetch(fp->rx_cons_sb);
 +
 +      for_each_cos_in_tx_queue(fp, cos)
 +              prefetch(fp->txdata[cos].tx_cons_sb);
 +
 +      prefetch(&fp->sb_running_index[SM_RX_ID]);
 +      napi_schedule(&bnx2x_fp(bp, fp->index, napi));
 +
 +      return IRQ_HANDLED;
 +}
 +
 +/* HW Lock for shared dual port PHYs */
 +void bnx2x_acquire_phy_lock(struct bnx2x *bp)
 +{
 +      mutex_lock(&bp->port.phy_mutex);
 +
 +      if (bp->port.need_hw_lock)
 +              bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
 +}
 +
 +void bnx2x_release_phy_lock(struct bnx2x *bp)
 +{
 +      if (bp->port.need_hw_lock)
 +              bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
 +
 +      mutex_unlock(&bp->port.phy_mutex);
 +}
 +
 +/* calculates MF speed according to current linespeed and MF configuration */
 +u16 bnx2x_get_mf_speed(struct bnx2x *bp)
 +{
 +      u16 line_speed = bp->link_vars.line_speed;
 +      if (IS_MF(bp)) {
 +              u16 maxCfg = bnx2x_extract_max_cfg(bp,
 +                                                 bp->mf_config[BP_VN(bp)]);
 +
 +              /* Calculate the current MAX line speed limit for the MF
 +               * devices
 +               */
 +              if (IS_MF_SI(bp))
 +                      line_speed = (line_speed * maxCfg) / 100;
 +              else { /* SD mode */
 +                      u16 vn_max_rate = maxCfg * 100;
 +
 +                      if (vn_max_rate < line_speed)
 +                              line_speed = vn_max_rate;
 +              }
 +      }
 +
 +      return line_speed;
 +}
 +
 +/**
 + * bnx2x_fill_report_data - fill link report data to report
 + *
 + * @bp:               driver handle
 + * @data:     link state to update
 + *
 + * It uses a none-atomic bit operations because is called under the mutex.
 + */
 +static inline void bnx2x_fill_report_data(struct bnx2x *bp,
 +                                        struct bnx2x_link_report_data *data)
 +{
 +      u16 line_speed = bnx2x_get_mf_speed(bp);
 +
 +      memset(data, 0, sizeof(*data));
 +
 +      /* Fill the report data: efective line speed */
 +      data->line_speed = line_speed;
 +
 +      /* Link is down */
 +      if (!bp->link_vars.link_up || (bp->flags & MF_FUNC_DIS))
 +              __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
 +                        &data->link_report_flags);
 +
 +      /* Full DUPLEX */
 +      if (bp->link_vars.duplex == DUPLEX_FULL)
 +              __set_bit(BNX2X_LINK_REPORT_FD, &data->link_report_flags);
 +
 +      /* Rx Flow Control is ON */
 +      if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX)
 +              __set_bit(BNX2X_LINK_REPORT_RX_FC_ON, &data->link_report_flags);
 +
 +      /* Tx Flow Control is ON */
 +      if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
 +              __set_bit(BNX2X_LINK_REPORT_TX_FC_ON, &data->link_report_flags);
 +}
 +
 +/**
 + * bnx2x_link_report - report link status to OS.
 + *
 + * @bp:               driver handle
 + *
 + * Calls the __bnx2x_link_report() under the same locking scheme
 + * as a link/PHY state managing code to ensure a consistent link
 + * reporting.
 + */
 +
 +void bnx2x_link_report(struct bnx2x *bp)
 +{
 +      bnx2x_acquire_phy_lock(bp);
 +      __bnx2x_link_report(bp);
 +      bnx2x_release_phy_lock(bp);
 +}
 +
 +/**
 + * __bnx2x_link_report - report link status to OS.
 + *
 + * @bp:               driver handle
 + *
 + * None atomic inmlementation.
 + * Should be called under the phy_lock.
 + */
 +void __bnx2x_link_report(struct bnx2x *bp)
 +{
 +      struct bnx2x_link_report_data cur_data;
 +
 +      /* reread mf_cfg */
 +      if (!CHIP_IS_E1(bp))
 +              bnx2x_read_mf_cfg(bp);
 +
 +      /* Read the current link report info */
 +      bnx2x_fill_report_data(bp, &cur_data);
 +
 +      /* Don't report link down or exactly the same link status twice */
 +      if (!memcmp(&cur_data, &bp->last_reported_link, sizeof(cur_data)) ||
 +          (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
 +                    &bp->last_reported_link.link_report_flags) &&
 +           test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
 +                    &cur_data.link_report_flags)))
 +              return;
 +
 +      bp->link_cnt++;
 +
 +      /* We are going to report a new link parameters now -
 +       * remember the current data for the next time.
 +       */
 +      memcpy(&bp->last_reported_link, &cur_data, sizeof(cur_data));
 +
 +      if (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
 +                   &cur_data.link_report_flags)) {
 +              netif_carrier_off(bp->dev);
 +              netdev_err(bp->dev, "NIC Link is Down\n");
 +              return;
 +      } else {
 +              const char *duplex;
 +              const char *flow;
 +
 +              netif_carrier_on(bp->dev);
 +
 +              if (test_and_clear_bit(BNX2X_LINK_REPORT_FD,
 +                                     &cur_data.link_report_flags))
 +                      duplex = "full";
 +              else
 +                      duplex = "half";
 +
 +              /* Handle the FC at the end so that only these flags would be
 +               * possibly set. This way we may easily check if there is no FC
 +               * enabled.
 +               */
 +              if (cur_data.link_report_flags) {
 +                      if (test_bit(BNX2X_LINK_REPORT_RX_FC_ON,
 +                                   &cur_data.link_report_flags)) {
 +                              if (test_bit(BNX2X_LINK_REPORT_TX_FC_ON,
 +                                   &cur_data.link_report_flags))
 +                                      flow = "ON - receive & transmit";
 +                              else
 +                                      flow = "ON - receive";
 +                      } else {
 +                              flow = "ON - transmit";
 +                      }
 +              } else {
 +                      flow = "none";
 +              }
 +              netdev_info(bp->dev, "NIC Link is Up, %d Mbps %s duplex, Flow control: %s\n",
 +                          cur_data.line_speed, duplex, flow);
 +      }
 +}
 +
 +void bnx2x_init_rx_rings(struct bnx2x *bp)
 +{
 +      int func = BP_FUNC(bp);
-                       for (i = 0; i < max_agg_queues; i++) {
 +      u16 ring_prod;
 +      int i, j;
 +
 +      /* Allocate TPA resources */
 +      for_each_rx_queue(bp, j) {
 +              struct bnx2x_fastpath *fp = &bp->fp[j];
 +
 +              DP(NETIF_MSG_IFUP,
 +                 "mtu %d  rx_buf_size %d\n", bp->dev->mtu, fp->rx_buf_size);
 +
 +              if (!fp->disable_tpa) {
 +                      /* Fill the per-aggregtion pool */
-                                                           max_agg_queues);
++                      for (i = 0; i < MAX_AGG_QS(bp); i++) {
 +                              struct bnx2x_agg_info *tpa_info =
 +                                      &fp->tpa_info[i];
 +                              struct sw_rx_bd *first_buf =
 +                                      &tpa_info->first_buf;
 +
 +                              first_buf->skb = netdev_alloc_skb(bp->dev,
 +                                                     fp->rx_buf_size);
 +                              if (!first_buf->skb) {
 +                                      BNX2X_ERR("Failed to allocate TPA "
 +                                                "skb pool for queue[%d] - "
 +                                                "disabling TPA on this "
 +                                                "queue!\n", j);
 +                                      bnx2x_free_tpa_pool(bp, fp, i);
 +                                      fp->disable_tpa = 1;
 +                                      break;
 +                              }
 +                              dma_unmap_addr_set(first_buf, mapping, 0);
 +                              tpa_info->tpa_state = BNX2X_TPA_STOP;
 +                      }
 +
 +                      /* "next page" elements initialization */
 +                      bnx2x_set_next_page_sgl(fp);
 +
 +                      /* set SGEs bit mask */
 +                      bnx2x_init_sge_ring_bit_mask(fp);
 +
 +                      /* Allocate SGEs and initialize the ring elements */
 +                      for (i = 0, ring_prod = 0;
 +                           i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
 +
 +                              if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
 +                                      BNX2X_ERR("was only able to allocate "
 +                                                "%d rx sges\n", i);
 +                                      BNX2X_ERR("disabling TPA for "
 +                                                "queue[%d]\n", j);
 +                                      /* Cleanup already allocated elements */
 +                                      bnx2x_free_rx_sge_range(bp, fp,
 +                                                              ring_prod);
 +                                      bnx2x_free_tpa_pool(bp, fp,
-                       bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ?
-                                           ETH_MAX_AGGREGATION_QUEUES_E1 :
-                                           ETH_MAX_AGGREGATION_QUEUES_E1H_E2);
++                                                          MAX_AGG_QS(bp));
 +                                      fp->disable_tpa = 1;
 +                                      ring_prod = 0;
 +                                      break;
 +                              }
 +                              ring_prod = NEXT_SGE_IDX(ring_prod);
 +                      }
 +
 +                      fp->rx_sge_prod = ring_prod;
 +              }
 +      }
 +
 +      for_each_rx_queue(bp, j) {
 +              struct bnx2x_fastpath *fp = &bp->fp[j];
 +
 +              fp->rx_bd_cons = 0;
 +
 +              /* Activate BD ring */
 +              /* Warning!
 +               * this will generate an interrupt (to the TSTORM)
 +               * must only be done after chip is initialized
 +               */
 +              bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
 +                                   fp->rx_sge_prod);
 +
 +              if (j != 0)
 +                      continue;
 +
 +              if (CHIP_IS_E1(bp)) {
 +                      REG_WR(bp, BAR_USTRORM_INTMEM +
 +                             USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
 +                             U64_LO(fp->rx_comp_mapping));
 +                      REG_WR(bp, BAR_USTRORM_INTMEM +
 +                             USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
 +                             U64_HI(fp->rx_comp_mapping));
 +              }
 +      }
 +}
 +
 +static void bnx2x_free_tx_skbs(struct bnx2x *bp)
 +{
 +      int i;
 +      u8 cos;
 +
 +      for_each_tx_queue(bp, i) {
 +              struct bnx2x_fastpath *fp = &bp->fp[i];
 +              for_each_cos_in_tx_queue(fp, cos) {
 +                      struct bnx2x_fp_txdata *txdata = &fp->txdata[cos];
 +
 +                      u16 bd_cons = txdata->tx_bd_cons;
 +                      u16 sw_prod = txdata->tx_pkt_prod;
 +                      u16 sw_cons = txdata->tx_pkt_cons;
 +
 +                      while (sw_cons != sw_prod) {
 +                              bd_cons = bnx2x_free_tx_pkt(bp, txdata,
 +                                                          TX_BD(sw_cons));
 +                              sw_cons++;
 +                      }
 +              }
 +      }
 +}
 +
 +static void bnx2x_free_rx_bds(struct bnx2x_fastpath *fp)
 +{
 +      struct bnx2x *bp = fp->bp;
 +      int i;
 +
 +      /* ring wasn't allocated */
 +      if (fp->rx_buf_ring == NULL)
 +              return;
 +
 +      for (i = 0; i < NUM_RX_BD; i++) {
 +              struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
 +              struct sk_buff *skb = rx_buf->skb;
 +
 +              if (skb == NULL)
 +                      continue;
 +              dma_unmap_single(&bp->pdev->dev,
 +                               dma_unmap_addr(rx_buf, mapping),
 +                               fp->rx_buf_size, DMA_FROM_DEVICE);
 +
 +              rx_buf->skb = NULL;
 +              dev_kfree_skb(skb);
 +      }
 +}
 +
 +static void bnx2x_free_rx_skbs(struct bnx2x *bp)
 +{
 +      int j;
 +
 +      for_each_rx_queue(bp, j) {
 +              struct bnx2x_fastpath *fp = &bp->fp[j];
 +
 +              bnx2x_free_rx_bds(fp);
 +
 +              if (!fp->disable_tpa)
-       int rx_ring_size = bp->rx_ring_size ? bp->rx_ring_size :
-                          MAX_RX_AVAIL/BNX2X_NUM_RX_QUEUES(bp);
++                      bnx2x_free_tpa_pool(bp, fp, MAX_AGG_QS(bp));
 +      }
 +}
 +
 +void bnx2x_free_skbs(struct bnx2x *bp)
 +{
 +      bnx2x_free_tx_skbs(bp);
 +      bnx2x_free_rx_skbs(bp);
 +}
 +
 +void bnx2x_update_max_mf_config(struct bnx2x *bp, u32 value)
 +{
 +      /* load old values */
 +      u32 mf_cfg = bp->mf_config[BP_VN(bp)];
 +
 +      if (value != bnx2x_extract_max_cfg(bp, mf_cfg)) {
 +              /* leave all but MAX value */
 +              mf_cfg &= ~FUNC_MF_CFG_MAX_BW_MASK;
 +
 +              /* set new MAX value */
 +              mf_cfg |= (value << FUNC_MF_CFG_MAX_BW_SHIFT)
 +                              & FUNC_MF_CFG_MAX_BW_MASK;
 +
 +              bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW, mf_cfg);
 +      }
 +}
 +
 +/**
 + * bnx2x_free_msix_irqs - free previously requested MSI-X IRQ vectors
 + *
 + * @bp:               driver handle
 + * @nvecs:    number of vectors to be released
 + */
 +static void bnx2x_free_msix_irqs(struct bnx2x *bp, int nvecs)
 +{
 +      int i, offset = 0;
 +
 +      if (nvecs == offset)
 +              return;
 +      free_irq(bp->msix_table[offset].vector, bp->dev);
 +      DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
 +         bp->msix_table[offset].vector);
 +      offset++;
 +#ifdef BCM_CNIC
 +      if (nvecs == offset)
 +              return;
 +      offset++;
 +#endif
 +
 +      for_each_eth_queue(bp, i) {
 +              if (nvecs == offset)
 +                      return;
 +              DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d "
 +                 "irq\n", i, bp->msix_table[offset].vector);
 +
 +              free_irq(bp->msix_table[offset++].vector, &bp->fp[i]);
 +      }
 +}
 +
 +void bnx2x_free_irq(struct bnx2x *bp)
 +{
 +      if (bp->flags & USING_MSIX_FLAG)
 +              bnx2x_free_msix_irqs(bp, BNX2X_NUM_ETH_QUEUES(bp) +
 +                                   CNIC_PRESENT + 1);
 +      else if (bp->flags & USING_MSI_FLAG)
 +              free_irq(bp->pdev->irq, bp->dev);
 +      else
 +              free_irq(bp->pdev->irq, bp->dev);
 +}
 +
 +int bnx2x_enable_msix(struct bnx2x *bp)
 +{
 +      int msix_vec = 0, i, rc, req_cnt;
 +
 +      bp->msix_table[msix_vec].entry = msix_vec;
 +      DP(NETIF_MSG_IFUP, "msix_table[0].entry = %d (slowpath)\n",
 +         bp->msix_table[0].entry);
 +      msix_vec++;
 +
 +#ifdef BCM_CNIC
 +      bp->msix_table[msix_vec].entry = msix_vec;
 +      DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d (CNIC)\n",
 +         bp->msix_table[msix_vec].entry, bp->msix_table[msix_vec].entry);
 +      msix_vec++;
 +#endif
 +      /* We need separate vectors for ETH queues only (not FCoE) */
 +      for_each_eth_queue(bp, i) {
 +              bp->msix_table[msix_vec].entry = msix_vec;
 +              DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
 +                 "(fastpath #%u)\n", msix_vec, msix_vec, i);
 +              msix_vec++;
 +      }
 +
 +      req_cnt = BNX2X_NUM_ETH_QUEUES(bp) + CNIC_PRESENT + 1;
 +
 +      rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], req_cnt);
 +
 +      /*
 +       * reconfigure number of tx/rx queues according to available
 +       * MSI-X vectors
 +       */
 +      if (rc >= BNX2X_MIN_MSIX_VEC_CNT) {
 +              /* how less vectors we will have? */
 +              int diff = req_cnt - rc;
 +
 +              DP(NETIF_MSG_IFUP,
 +                 "Trying to use less MSI-X vectors: %d\n", rc);
 +
 +              rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], rc);
 +
 +              if (rc) {
 +                      DP(NETIF_MSG_IFUP,
 +                         "MSI-X is not attainable  rc %d\n", rc);
 +                      return rc;
 +              }
 +              /*
 +               * decrease number of queues by number of unallocated entries
 +               */
 +              bp->num_queues -= diff;
 +
 +              DP(NETIF_MSG_IFUP, "New queue configuration set: %d\n",
 +                                bp->num_queues);
 +      } else if (rc) {
 +              /* fall to INTx if not enough memory */
 +              if (rc == -ENOMEM)
 +                      bp->flags |= DISABLE_MSI_FLAG;
 +              DP(NETIF_MSG_IFUP, "MSI-X is not attainable  rc %d\n", rc);
 +              return rc;
 +      }
 +
 +      bp->flags |= USING_MSIX_FLAG;
 +
 +      return 0;
 +}
 +
 +static int bnx2x_req_msix_irqs(struct bnx2x *bp)
 +{
 +      int i, rc, offset = 0;
 +
 +      rc = request_irq(bp->msix_table[offset++].vector,
 +                       bnx2x_msix_sp_int, 0,
 +                       bp->dev->name, bp->dev);
 +      if (rc) {
 +              BNX2X_ERR("request sp irq failed\n");
 +              return -EBUSY;
 +      }
 +
 +#ifdef BCM_CNIC
 +      offset++;
 +#endif
 +      for_each_eth_queue(bp, i) {
 +              struct bnx2x_fastpath *fp = &bp->fp[i];
 +              snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
 +                       bp->dev->name, i);
 +
 +              rc = request_irq(bp->msix_table[offset].vector,
 +                               bnx2x_msix_fp_int, 0, fp->name, fp);
 +              if (rc) {
 +                      BNX2X_ERR("request fp #%d irq (%d) failed  rc %d\n", i,
 +                            bp->msix_table[offset].vector, rc);
 +                      bnx2x_free_msix_irqs(bp, offset);
 +                      return -EBUSY;
 +              }
 +
 +              offset++;
 +      }
 +
 +      i = BNX2X_NUM_ETH_QUEUES(bp);
 +      offset = 1 + CNIC_PRESENT;
 +      netdev_info(bp->dev, "using MSI-X  IRQs: sp %d  fp[%d] %d"
 +             " ... fp[%d] %d\n",
 +             bp->msix_table[0].vector,
 +             0, bp->msix_table[offset].vector,
 +             i - 1, bp->msix_table[offset + i - 1].vector);
 +
 +      return 0;
 +}
 +
 +int bnx2x_enable_msi(struct bnx2x *bp)
 +{
 +      int rc;
 +
 +      rc = pci_enable_msi(bp->pdev);
 +      if (rc) {
 +              DP(NETIF_MSG_IFUP, "MSI is not attainable\n");
 +              return -1;
 +      }
 +      bp->flags |= USING_MSI_FLAG;
 +
 +      return 0;
 +}
 +
 +static int bnx2x_req_irq(struct bnx2x *bp)
 +{
 +      unsigned long flags;
 +      int rc;
 +
 +      if (bp->flags & USING_MSI_FLAG)
 +              flags = 0;
 +      else
 +              flags = IRQF_SHARED;
 +
 +      rc = request_irq(bp->pdev->irq, bnx2x_interrupt, flags,
 +                       bp->dev->name, bp->dev);
 +      return rc;
 +}
 +
 +static inline int bnx2x_setup_irqs(struct bnx2x *bp)
 +{
 +      int rc = 0;
 +      if (bp->flags & USING_MSIX_FLAG) {
 +              rc = bnx2x_req_msix_irqs(bp);
 +              if (rc)
 +                      return rc;
 +      } else {
 +              bnx2x_ack_int(bp);
 +              rc = bnx2x_req_irq(bp);
 +              if (rc) {
 +                      BNX2X_ERR("IRQ request failed  rc %d, aborting\n", rc);
 +                      return rc;
 +              }
 +              if (bp->flags & USING_MSI_FLAG) {
 +                      bp->dev->irq = bp->pdev->irq;
 +                      netdev_info(bp->dev, "using MSI  IRQ %d\n",
 +                             bp->pdev->irq);
 +              }
 +      }
 +
 +      return 0;
 +}
 +
 +static inline void bnx2x_napi_enable(struct bnx2x *bp)
 +{
 +      int i;
 +
 +      for_each_rx_queue(bp, i)
 +              napi_enable(&bnx2x_fp(bp, i, napi));
 +}
 +
 +static inline void bnx2x_napi_disable(struct bnx2x *bp)
 +{
 +      int i;
 +
 +      for_each_rx_queue(bp, i)
 +              napi_disable(&bnx2x_fp(bp, i, napi));
 +}
 +
 +void bnx2x_netif_start(struct bnx2x *bp)
 +{
 +      if (netif_running(bp->dev)) {
 +              bnx2x_napi_enable(bp);
 +              bnx2x_int_enable(bp);
 +              if (bp->state == BNX2X_STATE_OPEN)
 +                      netif_tx_wake_all_queues(bp->dev);
 +      }
 +}
 +
 +void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
 +{
 +      bnx2x_int_disable_sync(bp, disable_hw);
 +      bnx2x_napi_disable(bp);
 +}
 +
 +u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb)
 +{
 +      struct bnx2x *bp = netdev_priv(dev);
 +
 +#ifdef BCM_CNIC
 +      if (!NO_FCOE(bp)) {
 +              struct ethhdr *hdr = (struct ethhdr *)skb->data;
 +              u16 ether_type = ntohs(hdr->h_proto);
 +
 +              /* Skip VLAN tag if present */
 +              if (ether_type == ETH_P_8021Q) {
 +                      struct vlan_ethhdr *vhdr =
 +                              (struct vlan_ethhdr *)skb->data;
 +
 +                      ether_type = ntohs(vhdr->h_vlan_encapsulated_proto);
 +              }
 +
 +              /* If ethertype is FCoE or FIP - use FCoE ring */
 +              if ((ether_type == ETH_P_FCOE) || (ether_type == ETH_P_FIP))
 +                      return bnx2x_fcoe_tx(bp, txq_index);
 +      }
 +#endif
 +      /* select a non-FCoE queue */
 +      return __skb_tx_hash(dev, skb, BNX2X_NUM_ETH_QUEUES(bp));
 +}
 +
 +void bnx2x_set_num_queues(struct bnx2x *bp)
 +{
 +      switch (bp->multi_mode) {
 +      case ETH_RSS_MODE_DISABLED:
 +              bp->num_queues = 1;
 +              break;
 +      case ETH_RSS_MODE_REGULAR:
 +              bp->num_queues = bnx2x_calc_num_queues(bp);
 +              break;
 +
 +      default:
 +              bp->num_queues = 1;
 +              break;
 +      }
 +
 +      /* Add special queues */
 +      bp->num_queues += NON_ETH_CONTEXT_USE;
 +}
 +
 +/**
 + * bnx2x_set_real_num_queues - configure netdev->real_num_[tx,rx]_queues
 + *
 + * @bp:               Driver handle
 + *
 + * We currently support for at most 16 Tx queues for each CoS thus we will
 + * allocate a multiple of 16 for ETH L2 rings according to the value of the
 + * bp->max_cos.
 + *
 + * If there is an FCoE L2 queue the appropriate Tx queue will have the next
 + * index after all ETH L2 indices.
 + *
 + * If the actual number of Tx queues (for each CoS) is less than 16 then there
 + * will be the holes at the end of each group of 16 ETh L2 indices (0..15,
 + * 16..31,...) with indicies that are not coupled with any real Tx queue.
 + *
 + * The proper configuration of skb->queue_mapping is handled by
 + * bnx2x_select_queue() and __skb_tx_hash().
 + *
 + * bnx2x_setup_tc() takes care of the proper TC mappings so that __skb_tx_hash()
 + * will return a proper Tx index if TC is enabled (netdev->num_tc > 0).
 + */
 +static inline int bnx2x_set_real_num_queues(struct bnx2x *bp)
 +{
 +      int rc, tx, rx;
 +
 +      tx = MAX_TXQS_PER_COS * bp->max_cos;
 +      rx = BNX2X_NUM_ETH_QUEUES(bp);
 +
 +/* account for fcoe queue */
 +#ifdef BCM_CNIC
 +      if (!NO_FCOE(bp)) {
 +              rx += FCOE_PRESENT;
 +              tx += FCOE_PRESENT;
 +      }
 +#endif
 +
 +      rc = netif_set_real_num_tx_queues(bp->dev, tx);
 +      if (rc) {
 +              BNX2X_ERR("Failed to set real number of Tx queues: %d\n", rc);
 +              return rc;
 +      }
 +      rc = netif_set_real_num_rx_queues(bp->dev, rx);
 +      if (rc) {
 +              BNX2X_ERR("Failed to set real number of Rx queues: %d\n", rc);
 +              return rc;
 +      }
 +
 +      DP(NETIF_MSG_DRV, "Setting real num queues to (tx, rx) (%d, %d)\n",
 +                        tx, rx);
 +
 +      return rc;
 +}
 +
 +static inline void bnx2x_set_rx_buf_size(struct bnx2x *bp)
 +{
 +      int i;
 +
 +      for_each_queue(bp, i) {
 +              struct bnx2x_fastpath *fp = &bp->fp[i];
 +
 +              /* Always use a mini-jumbo MTU for the FCoE L2 ring */
 +              if (IS_FCOE_IDX(i))
 +                      /*
 +                       * Although there are no IP frames expected to arrive to
 +                       * this ring we still want to add an
 +                       * IP_HEADER_ALIGNMENT_PADDING to prevent a buffer
 +                       * overrun attack.
 +                       */
 +                      fp->rx_buf_size =
 +                              BNX2X_FCOE_MINI_JUMBO_MTU + ETH_OVREHEAD +
 +                              BNX2X_FW_RX_ALIGN + IP_HEADER_ALIGNMENT_PADDING;
 +              else
 +                      fp->rx_buf_size =
 +                              bp->dev->mtu + ETH_OVREHEAD +
 +                              BNX2X_FW_RX_ALIGN + IP_HEADER_ALIGNMENT_PADDING;
 +      }
 +}
 +
 +static inline int bnx2x_init_rss_pf(struct bnx2x *bp)
 +{
 +      int i;
 +      u8 ind_table[T_ETH_INDIRECTION_TABLE_SIZE] = {0};
 +      u8 num_eth_queues = BNX2X_NUM_ETH_QUEUES(bp);
 +
 +      /*
 +       * Prepare the inital contents fo the indirection table if RSS is
 +       * enabled
 +       */
 +      if (bp->multi_mode != ETH_RSS_MODE_DISABLED) {
 +              for (i = 0; i < sizeof(ind_table); i++)
 +                      ind_table[i] =
 +                              bp->fp->cl_id + (i % num_eth_queues);
 +      }
 +
 +      /*
 +       * For 57710 and 57711 SEARCHER configuration (rss_keys) is
 +       * per-port, so if explicit configuration is needed , do it only
 +       * for a PMF.
 +       *
 +       * For 57712 and newer on the other hand it's a per-function
 +       * configuration.
 +       */
 +      return bnx2x_config_rss_pf(bp, ind_table,
 +                                 bp->port.pmf || !CHIP_IS_E1x(bp));
 +}
 +
 +int bnx2x_config_rss_pf(struct bnx2x *bp, u8 *ind_table, bool config_hash)
 +{
 +      struct bnx2x_config_rss_params params = {0};
 +      int i;
 +
 +      /* Although RSS is meaningless when there is a single HW queue we
 +       * still need it enabled in order to have HW Rx hash generated.
 +       *
 +       * if (!is_eth_multi(bp))
 +       *      bp->multi_mode = ETH_RSS_MODE_DISABLED;
 +       */
 +
 +      params.rss_obj = &bp->rss_conf_obj;
 +
 +      __set_bit(RAMROD_COMP_WAIT, &params.ramrod_flags);
 +
 +      /* RSS mode */
 +      switch (bp->multi_mode) {
 +      case ETH_RSS_MODE_DISABLED:
 +              __set_bit(BNX2X_RSS_MODE_DISABLED, &params.rss_flags);
 +              break;
 +      case ETH_RSS_MODE_REGULAR:
 +              __set_bit(BNX2X_RSS_MODE_REGULAR, &params.rss_flags);
 +              break;
 +      case ETH_RSS_MODE_VLAN_PRI:
 +              __set_bit(BNX2X_RSS_MODE_VLAN_PRI, &params.rss_flags);
 +              break;
 +      case ETH_RSS_MODE_E1HOV_PRI:
 +              __set_bit(BNX2X_RSS_MODE_E1HOV_PRI, &params.rss_flags);
 +              break;
 +      case ETH_RSS_MODE_IP_DSCP:
 +              __set_bit(BNX2X_RSS_MODE_IP_DSCP, &params.rss_flags);
 +              break;
 +      default:
 +              BNX2X_ERR("Unknown multi_mode: %d\n", bp->multi_mode);
 +              return -EINVAL;
 +      }
 +
 +      /* If RSS is enabled */
 +      if (bp->multi_mode != ETH_RSS_MODE_DISABLED) {
 +              /* RSS configuration */
 +              __set_bit(BNX2X_RSS_IPV4, &params.rss_flags);
 +              __set_bit(BNX2X_RSS_IPV4_TCP, &params.rss_flags);
 +              __set_bit(BNX2X_RSS_IPV6, &params.rss_flags);
 +              __set_bit(BNX2X_RSS_IPV6_TCP, &params.rss_flags);
 +
 +              /* Hash bits */
 +              params.rss_result_mask = MULTI_MASK;
 +
 +              memcpy(params.ind_table, ind_table, sizeof(params.ind_table));
 +
 +              if (config_hash) {
 +                      /* RSS keys */
 +                      for (i = 0; i < sizeof(params.rss_key) / 4; i++)
 +                              params.rss_key[i] = random32();
 +
 +                      __set_bit(BNX2X_RSS_SET_SRCH, &params.rss_flags);
 +              }
 +      }
 +
 +      return bnx2x_config_rss(bp, &params);
 +}
 +
 +static inline int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
 +{
 +      struct bnx2x_func_state_params func_params = {0};
 +
 +      /* Prepare parameters for function state transitions */
 +      __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
 +
 +      func_params.f_obj = &bp->func_obj;
 +      func_params.cmd = BNX2X_F_CMD_HW_INIT;
 +
 +      func_params.params.hw_init.load_phase = load_code;
 +
 +      return bnx2x_func_state_change(bp, &func_params);
 +}
 +
 +/*
 + * Cleans the object that have internal lists without sending
 + * ramrods. Should be run when interrutps are disabled.
 + */
 +static void bnx2x_squeeze_objects(struct bnx2x *bp)
 +{
 +      int rc;
 +      unsigned long ramrod_flags = 0, vlan_mac_flags = 0;
 +      struct bnx2x_mcast_ramrod_params rparam = {0};
 +      struct bnx2x_vlan_mac_obj *mac_obj = &bp->fp->mac_obj;
 +
 +      /***************** Cleanup MACs' object first *************************/
 +
 +      /* Wait for completion of requested */
 +      __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
 +      /* Perform a dry cleanup */
 +      __set_bit(RAMROD_DRV_CLR_ONLY, &ramrod_flags);
 +
 +      /* Clean ETH primary MAC */
 +      __set_bit(BNX2X_ETH_MAC, &vlan_mac_flags);
 +      rc = mac_obj->delete_all(bp, &bp->fp->mac_obj, &vlan_mac_flags,
 +                               &ramrod_flags);
 +      if (rc != 0)
 +              BNX2X_ERR("Failed to clean ETH MACs: %d\n", rc);
 +
 +      /* Cleanup UC list */
 +      vlan_mac_flags = 0;
 +      __set_bit(BNX2X_UC_LIST_MAC, &vlan_mac_flags);
 +      rc = mac_obj->delete_all(bp, mac_obj, &vlan_mac_flags,
 +                               &ramrod_flags);
 +      if (rc != 0)
 +              BNX2X_ERR("Failed to clean UC list MACs: %d\n", rc);
 +
 +      /***************** Now clean mcast object *****************************/
 +      rparam.mcast_obj = &bp->mcast_obj;
 +      __set_bit(RAMROD_DRV_CLR_ONLY, &rparam.ramrod_flags);
 +
 +      /* Add a DEL command... */
 +      rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL);
 +      if (rc < 0)
 +              BNX2X_ERR("Failed to add a new DEL command to a multi-cast "
 +                        "object: %d\n", rc);
 +
 +      /* ...and wait until all pending commands are cleared */
 +      rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
 +      while (rc != 0) {
 +              if (rc < 0) {
 +                      BNX2X_ERR("Failed to clean multi-cast object: %d\n",
 +                                rc);
 +                      return;
 +              }
 +
 +              rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
 +      }
 +}
 +
 +#ifndef BNX2X_STOP_ON_ERROR
 +#define LOAD_ERROR_EXIT(bp, label) \
 +      do { \
 +              (bp)->state = BNX2X_STATE_ERROR; \
 +              goto label; \
 +      } while (0)
 +#else
 +#define LOAD_ERROR_EXIT(bp, label) \
 +      do { \
 +              (bp)->state = BNX2X_STATE_ERROR; \
 +              (bp)->panic = 1; \
 +              return -EBUSY; \
 +      } while (0)
 +#endif
 +
 +/* must be called with rtnl_lock */
 +int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
 +{
 +      int port = BP_PORT(bp);
 +      u32 load_code;
 +      int i, rc;
 +
 +#ifdef BNX2X_STOP_ON_ERROR
 +      if (unlikely(bp->panic))
 +              return -EPERM;
 +#endif
 +
 +      bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
 +
 +      /* Set the initial link reported state to link down */
 +      bnx2x_acquire_phy_lock(bp);
 +      memset(&bp->last_reported_link, 0, sizeof(bp->last_reported_link));
 +      __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
 +              &bp->last_reported_link.link_report_flags);
 +      bnx2x_release_phy_lock(bp);
 +
 +      /* must be called before memory allocation and HW init */
 +      bnx2x_ilt_set_info(bp);
 +
 +      /*
 +       * Zero fastpath structures preserving invariants like napi, which are
 +       * allocated only once, fp index, max_cos, bp pointer.
 +       * Also set fp->disable_tpa.
 +       */
 +      for_each_queue(bp, i)
 +              bnx2x_bz_fp(bp, i);
 +
 +
 +      /* Set the receive queues buffer size */
 +      bnx2x_set_rx_buf_size(bp);
 +
 +      if (bnx2x_alloc_mem(bp))
 +              return -ENOMEM;
 +
 +      /* As long as bnx2x_alloc_mem() may possibly update
 +       * bp->num_queues, bnx2x_set_real_num_queues() should always
 +       * come after it.
 +       */
 +      rc = bnx2x_set_real_num_queues(bp);
 +      if (rc) {
 +              BNX2X_ERR("Unable to set real_num_queues\n");
 +              LOAD_ERROR_EXIT(bp, load_error0);
 +      }
 +
 +      /* configure multi cos mappings in kernel.
 +       * this configuration may be overriden by a multi class queue discipline
 +       * or by a dcbx negotiation result.
 +       */
 +      bnx2x_setup_tc(bp->dev, bp->max_cos);
 +
 +      bnx2x_napi_enable(bp);
 +
 +      /* Send LOAD_REQUEST command to MCP
 +       * Returns the type of LOAD command:
 +       * if it is the first port to be initialized
 +       * common blocks should be initialized, otherwise - not
 +       */
 +      if (!BP_NOMCP(bp)) {
 +              load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ, 0);
 +              if (!load_code) {
 +                      BNX2X_ERR("MCP response failure, aborting\n");
 +                      rc = -EBUSY;
 +                      LOAD_ERROR_EXIT(bp, load_error1);
 +              }
 +              if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) {
 +                      rc = -EBUSY; /* other port in diagnostic mode */
 +                      LOAD_ERROR_EXIT(bp, load_error1);
 +              }
 +
 +      } else {
 +              int path = BP_PATH(bp);
 +
 +              DP(NETIF_MSG_IFUP, "NO MCP - load counts[%d]      %d, %d, %d\n",
 +                 path, load_count[path][0], load_count[path][1],
 +                 load_count[path][2]);
 +              load_count[path][0]++;
 +              load_count[path][1 + port]++;
 +              DP(NETIF_MSG_IFUP, "NO MCP - new load counts[%d]  %d, %d, %d\n",
 +                 path, load_count[path][0], load_count[path][1],
 +                 load_count[path][2]);
 +              if (load_count[path][0] == 1)
 +                      load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
 +              else if (load_count[path][1 + port] == 1)
 +                      load_code = FW_MSG_CODE_DRV_LOAD_PORT;
 +              else
 +                      load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
 +      }
 +
 +      if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
 +          (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) ||
 +          (load_code == FW_MSG_CODE_DRV_LOAD_PORT)) {
 +              bp->port.pmf = 1;
 +              /*
 +               * We need the barrier to ensure the ordering between the
 +               * writing to bp->port.pmf here and reading it from the
 +               * bnx2x_periodic_task().
 +               */
 +              smp_mb();
 +              queue_delayed_work(bnx2x_wq, &bp->period_task, 0);
 +      } else
 +              bp->port.pmf = 0;
 +
 +      DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
 +
 +      /* Init Function state controlling object */
 +      bnx2x__init_func_obj(bp);
 +
 +      /* Initialize HW */
 +      rc = bnx2x_init_hw(bp, load_code);
 +      if (rc) {
 +              BNX2X_ERR("HW init failed, aborting\n");
 +              bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
 +              LOAD_ERROR_EXIT(bp, load_error2);
 +      }
 +
 +      /* Connect to IRQs */
 +      rc = bnx2x_setup_irqs(bp);
 +      if (rc) {
 +              bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
 +              LOAD_ERROR_EXIT(bp, load_error2);
 +      }
 +
 +      /* Setup NIC internals and enable interrupts */
 +      bnx2x_nic_init(bp, load_code);
 +
 +      /* Init per-function objects */
 +      bnx2x_init_bp_objs(bp);
 +
 +      if (((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
 +          (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP)) &&
 +          (bp->common.shmem2_base)) {
 +              if (SHMEM2_HAS(bp, dcc_support))
 +                      SHMEM2_WR(bp, dcc_support,
 +                                (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
 +                                 SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
 +      }
 +
 +      bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
 +      rc = bnx2x_func_start(bp);
 +      if (rc) {
 +              BNX2X_ERR("Function start failed!\n");
 +              bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
 +              LOAD_ERROR_EXIT(bp, load_error3);
 +      }
 +
 +      /* Send LOAD_DONE command to MCP */
 +      if (!BP_NOMCP(bp)) {
 +              load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
 +              if (!load_code) {
 +                      BNX2X_ERR("MCP response failure, aborting\n");
 +                      rc = -EBUSY;
 +                      LOAD_ERROR_EXIT(bp, load_error3);
 +              }
 +      }
 +
 +      rc = bnx2x_setup_leading(bp);
 +      if (rc) {
 +              BNX2X_ERR("Setup leading failed!\n");
 +              LOAD_ERROR_EXIT(bp, load_error3);
 +      }
 +
 +#ifdef BCM_CNIC
 +      /* Enable Timer scan */
 +      REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 1);
 +#endif
 +
 +      for_each_nondefault_queue(bp, i) {
 +              rc = bnx2x_setup_queue(bp, &bp->fp[i], 0);
 +              if (rc)
 +                      LOAD_ERROR_EXIT(bp, load_error4);
 +      }
 +
 +      rc = bnx2x_init_rss_pf(bp);
 +      if (rc)
 +              LOAD_ERROR_EXIT(bp, load_error4);
 +
 +      /* Now when Clients are configured we are ready to work */
 +      bp->state = BNX2X_STATE_OPEN;
 +
 +      /* Configure a ucast MAC */
 +      rc = bnx2x_set_eth_mac(bp, true);
 +      if (rc)
 +              LOAD_ERROR_EXIT(bp, load_error4);
 +
 +      if (bp->pending_max) {
 +              bnx2x_update_max_mf_config(bp, bp->pending_max);
 +              bp->pending_max = 0;
 +      }
 +
 +      if (bp->port.pmf)
 +              bnx2x_initial_phy_init(bp, load_mode);
 +
 +      /* Start fast path */
 +
 +      /* Initialize Rx filter. */
 +      netif_addr_lock_bh(bp->dev);
 +      bnx2x_set_rx_mode(bp->dev);
 +      netif_addr_unlock_bh(bp->dev);
 +
 +      /* Start the Tx */
 +      switch (load_mode) {
 +      case LOAD_NORMAL:
 +              /* Tx queue should be only reenabled */
 +              netif_tx_wake_all_queues(bp->dev);
 +              break;
 +
 +      case LOAD_OPEN:
 +              netif_tx_start_all_queues(bp->dev);
 +              smp_mb__after_clear_bit();
 +              break;
 +
 +      case LOAD_DIAG:
 +              bp->state = BNX2X_STATE_DIAG;
 +              break;
 +
 +      default:
 +              break;
 +      }
 +
 +      if (!bp->port.pmf)
 +              bnx2x__link_status_update(bp);
 +
 +      /* start the timer */
 +      mod_timer(&bp->timer, jiffies + bp->current_interval);
 +
 +#ifdef BCM_CNIC
 +      bnx2x_setup_cnic_irq_info(bp);
 +      if (bp->state == BNX2X_STATE_OPEN)
 +              bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
 +#endif
 +      bnx2x_inc_load_cnt(bp);
 +
 +      /* Wait for all pending SP commands to complete */
 +      if (!bnx2x_wait_sp_comp(bp, ~0x0UL)) {
 +              BNX2X_ERR("Timeout waiting for SP elements to complete\n");
 +              bnx2x_nic_unload(bp, UNLOAD_CLOSE);
 +              return -EBUSY;
 +      }
 +
 +      bnx2x_dcbx_init(bp);
 +      return 0;
 +
 +#ifndef BNX2X_STOP_ON_ERROR
 +load_error4:
 +#ifdef BCM_CNIC
 +      /* Disable Timer scan */
 +      REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
 +#endif
 +load_error3:
 +      bnx2x_int_disable_sync(bp, 1);
 +
 +      /* Clean queueable objects */
 +      bnx2x_squeeze_objects(bp);
 +
 +      /* Free SKBs, SGEs, TPA pool and driver internals */
 +      bnx2x_free_skbs(bp);
 +      for_each_rx_queue(bp, i)
 +              bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
 +
 +      /* Release IRQs */
 +      bnx2x_free_irq(bp);
 +load_error2:
 +      if (!BP_NOMCP(bp)) {
 +              bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0);
 +              bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
 +      }
 +
 +      bp->port.pmf = 0;
 +load_error1:
 +      bnx2x_napi_disable(bp);
 +load_error0:
 +      bnx2x_free_mem(bp);
 +
 +      return rc;
 +#endif /* ! BNX2X_STOP_ON_ERROR */
 +}
 +
 +/* must be called with rtnl_lock */
 +int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
 +{
 +      int i;
 +      bool global = false;
 +
 +      if ((bp->state == BNX2X_STATE_CLOSED) ||
 +          (bp->state == BNX2X_STATE_ERROR)) {
 +              /* We can get here if the driver has been unloaded
 +               * during parity error recovery and is either waiting for a
 +               * leader to complete or for other functions to unload and
 +               * then ifdown has been issued. In this case we want to
 +               * unload and let other functions to complete a recovery
 +               * process.
 +               */
 +              bp->recovery_state = BNX2X_RECOVERY_DONE;
 +              bp->is_leader = 0;
 +              bnx2x_release_leader_lock(bp);
 +              smp_mb();
 +
 +              DP(NETIF_MSG_HW, "Releasing a leadership...\n");
 +
 +              return -EINVAL;
 +      }
 +
 +      /*
 +       * It's important to set the bp->state to the value different from
 +       * BNX2X_STATE_OPEN and only then stop the Tx. Otherwise bnx2x_tx_int()
 +       * may restart the Tx from the NAPI context (see bnx2x_tx_int()).
 +       */
 +      bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
 +      smp_mb();
 +
 +      /* Stop Tx */
 +      bnx2x_tx_disable(bp);
 +
 +#ifdef BCM_CNIC
 +      bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
 +#endif
 +
 +      bp->rx_mode = BNX2X_RX_MODE_NONE;
 +
 +      del_timer_sync(&bp->timer);
 +
 +      /* Set ALWAYS_ALIVE bit in shmem */
 +      bp->fw_drv_pulse_wr_seq |= DRV_PULSE_ALWAYS_ALIVE;
 +
 +      bnx2x_drv_pulse(bp);
 +
 +      bnx2x_stats_handle(bp, STATS_EVENT_STOP);
 +
 +      /* Cleanup the chip if needed */
 +      if (unload_mode != UNLOAD_RECOVERY)
 +              bnx2x_chip_cleanup(bp, unload_mode);
 +      else {
 +              /* Send the UNLOAD_REQUEST to the MCP */
 +              bnx2x_send_unload_req(bp, unload_mode);
 +
 +              /*
 +               * Prevent transactions to host from the functions on the
 +               * engine that doesn't reset global blocks in case of global
 +               * attention once gloabl blocks are reset and gates are opened
 +               * (the engine which leader will perform the recovery
 +               * last).
 +               */
 +              if (!CHIP_IS_E1x(bp))
 +                      bnx2x_pf_disable(bp);
 +
 +              /* Disable HW interrupts, NAPI */
 +              bnx2x_netif_stop(bp, 1);
 +
 +              /* Release IRQs */
 +              bnx2x_free_irq(bp);
 +
 +              /* Report UNLOAD_DONE to MCP */
 +              bnx2x_send_unload_done(bp);
 +      }
 +
 +      /*
 +       * At this stage no more interrupts will arrive so we may safly clean
 +       * the queueable objects here in case they failed to get cleaned so far.
 +       */
 +      bnx2x_squeeze_objects(bp);
 +
 +      /* There should be no more pending SP commands at this stage */
 +      bp->sp_state = 0;
 +
 +      bp->port.pmf = 0;
 +
 +      /* Free SKBs, SGEs, TPA pool and driver internals */
 +      bnx2x_free_skbs(bp);
 +      for_each_rx_queue(bp, i)
 +              bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
 +
 +      bnx2x_free_mem(bp);
 +
 +      bp->state = BNX2X_STATE_CLOSED;
 +
 +      /* Check if there are pending parity attentions. If there are - set
 +       * RECOVERY_IN_PROGRESS.
 +       */
 +      if (bnx2x_chk_parity_attn(bp, &global, false)) {
 +              bnx2x_set_reset_in_progress(bp);
 +
 +              /* Set RESET_IS_GLOBAL if needed */
 +              if (global)
 +                      bnx2x_set_reset_global(bp);
 +      }
 +
 +
 +      /* The last driver must disable a "close the gate" if there is no
 +       * parity attention or "process kill" pending.
 +       */
 +      if (!bnx2x_dec_load_cnt(bp) && bnx2x_reset_is_done(bp, BP_PATH(bp)))
 +              bnx2x_disable_close_the_gate(bp);
 +
 +      return 0;
 +}
 +
 +int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
 +{
 +      u16 pmcsr;
 +
 +      /* If there is no power capability, silently succeed */
 +      if (!bp->pm_cap) {
 +              DP(NETIF_MSG_HW, "No power capability. Breaking.\n");
 +              return 0;
 +      }
 +
 +      pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
 +
 +      switch (state) {
 +      case PCI_D0:
 +              pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
 +                                    ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
 +                                     PCI_PM_CTRL_PME_STATUS));
 +
 +              if (pmcsr & PCI_PM_CTRL_STATE_MASK)
 +                      /* delay required during transition out of D3hot */
 +                      msleep(20);
 +              break;
 +
 +      case PCI_D3hot:
 +              /* If there are other clients above don't
 +                 shut down the power */
 +              if (atomic_read(&bp->pdev->enable_cnt) != 1)
 +                      return 0;
 +              /* Don't shut down the power for emulation and FPGA */
 +              if (CHIP_REV_IS_SLOW(bp))
 +                      return 0;
 +
 +              pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
 +              pmcsr |= 3;
 +
 +              if (bp->wol)
 +                      pmcsr |= PCI_PM_CTRL_PME_ENABLE;
 +
 +              pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
 +                                    pmcsr);
 +
 +              /* No more memory access after this point until
 +              * device is brought back to D0.
 +              */
 +              break;
 +
 +      default:
 +              return -EINVAL;
 +      }
 +      return 0;
 +}
 +
 +/*
 + * net_device service functions
 + */
 +int bnx2x_poll(struct napi_struct *napi, int budget)
 +{
 +      int work_done = 0;
 +      u8 cos;
 +      struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
 +                                               napi);
 +      struct bnx2x *bp = fp->bp;
 +
 +      while (1) {
 +#ifdef BNX2X_STOP_ON_ERROR
 +              if (unlikely(bp->panic)) {
 +                      napi_complete(napi);
 +                      return 0;
 +              }
 +#endif
 +
 +              for_each_cos_in_tx_queue(fp, cos)
 +                      if (bnx2x_tx_queue_has_work(&fp->txdata[cos]))
 +                              bnx2x_tx_int(bp, &fp->txdata[cos]);
 +
 +
 +              if (bnx2x_has_rx_work(fp)) {
 +                      work_done += bnx2x_rx_int(fp, budget - work_done);
 +
 +                      /* must not complete if we consumed full budget */
 +                      if (work_done >= budget)
 +                              break;
 +              }
 +
 +              /* Fall out from the NAPI loop if needed */
 +              if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
 +#ifdef BCM_CNIC
 +                      /* No need to update SB for FCoE L2 ring as long as
 +                       * it's connected to the default SB and the SB
 +                       * has been updated when NAPI was scheduled.
 +                       */
 +                      if (IS_FCOE_FP(fp)) {
 +                              napi_complete(napi);
 +                              break;
 +                      }
 +#endif
 +
 +                      bnx2x_update_fpsb_idx(fp);
 +                      /* bnx2x_has_rx_work() reads the status block,
 +                       * thus we need to ensure that status block indices
 +                       * have been actually read (bnx2x_update_fpsb_idx)
 +                       * prior to this check (bnx2x_has_rx_work) so that
 +                       * we won't write the "newer" value of the status block
 +                       * to IGU (if there was a DMA right after
 +                       * bnx2x_has_rx_work and if there is no rmb, the memory
 +                       * reading (bnx2x_update_fpsb_idx) may be postponed
 +                       * to right before bnx2x_ack_sb). In this case there
 +                       * will never be another interrupt until there is
 +                       * another update of the status block, while there
 +                       * is still unhandled work.
 +                       */
 +                      rmb();
 +
 +                      if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
 +                              napi_complete(napi);
 +                              /* Re-enable interrupts */
 +                              DP(NETIF_MSG_HW,
 +                                 "Update index to %d\n", fp->fp_hc_idx);
 +                              bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID,
 +                                           le16_to_cpu(fp->fp_hc_idx),
 +                                           IGU_INT_ENABLE, 1);
 +                              break;
 +                      }
 +              }
 +      }
 +
 +      return work_done;
 +}
 +
 +/* we split the first BD into headers and data BDs
 + * to ease the pain of our fellow microcode engineers
 + * we use one mapping for both BDs
 + * So far this has only been observed to happen
 + * in Other Operating Systems(TM)
 + */
 +static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
 +                                 struct bnx2x_fp_txdata *txdata,
 +                                 struct sw_tx_bd *tx_buf,
 +                                 struct eth_tx_start_bd **tx_bd, u16 hlen,
 +                                 u16 bd_prod, int nbd)
 +{
 +      struct eth_tx_start_bd *h_tx_bd = *tx_bd;
 +      struct eth_tx_bd *d_tx_bd;
 +      dma_addr_t mapping;
 +      int old_len = le16_to_cpu(h_tx_bd->nbytes);
 +
 +      /* first fix first BD */
 +      h_tx_bd->nbd = cpu_to_le16(nbd);
 +      h_tx_bd->nbytes = cpu_to_le16(hlen);
 +
 +      DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d "
 +         "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
 +         h_tx_bd->addr_lo, h_tx_bd->nbd);
 +
 +      /* now get a new data BD
 +       * (after the pbd) and fill it */
 +      bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
 +      d_tx_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
 +
 +      mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
 +                         le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
 +
 +      d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
 +      d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
 +      d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
 +
 +      /* this marks the BD as one that has no individual mapping */
 +      tx_buf->flags |= BNX2X_TSO_SPLIT_BD;
 +
 +      DP(NETIF_MSG_TX_QUEUED,
 +         "TSO split data size is %d (%x:%x)\n",
 +         d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
 +
 +      /* update tx_bd */
 +      *tx_bd = (struct eth_tx_start_bd *)d_tx_bd;
 +
 +      return bd_prod;
 +}
 +
 +static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
 +{
 +      if (fix > 0)
 +              csum = (u16) ~csum_fold(csum_sub(csum,
 +                              csum_partial(t_header - fix, fix, 0)));
 +
 +      else if (fix < 0)
 +              csum = (u16) ~csum_fold(csum_add(csum,
 +                              csum_partial(t_header, -fix, 0)));
 +
 +      return swab16(csum);
 +}
 +
 +static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
 +{
 +      u32 rc;
 +
 +      if (skb->ip_summed != CHECKSUM_PARTIAL)
 +              rc = XMIT_PLAIN;
 +
 +      else {
 +              if (vlan_get_protocol(skb) == htons(ETH_P_IPV6)) {
 +                      rc = XMIT_CSUM_V6;
 +                      if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
 +                              rc |= XMIT_CSUM_TCP;
 +
 +              } else {
 +                      rc = XMIT_CSUM_V4;
 +                      if (ip_hdr(skb)->protocol == IPPROTO_TCP)
 +                              rc |= XMIT_CSUM_TCP;
 +              }
 +      }
 +
 +      if (skb_is_gso_v6(skb))
 +              rc |= XMIT_GSO_V6 | XMIT_CSUM_TCP | XMIT_CSUM_V6;
 +      else if (skb_is_gso(skb))
 +              rc |= XMIT_GSO_V4 | XMIT_CSUM_V4 | XMIT_CSUM_TCP;
 +
 +      return rc;
 +}
 +
 +#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
 +/* check if packet requires linearization (packet is too fragmented)
 +   no need to check fragmentation if page size > 8K (there will be no
 +   violation to FW restrictions) */
 +static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
 +                           u32 xmit_type)
 +{
 +      int to_copy = 0;
 +      int hlen = 0;
 +      int first_bd_sz = 0;
 +
 +      /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
 +      if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
 +
 +              if (xmit_type & XMIT_GSO) {
 +                      unsigned short lso_mss = skb_shinfo(skb)->gso_size;
 +                      /* Check if LSO packet needs to be copied:
 +                         3 = 1 (for headers BD) + 2 (for PBD and last BD) */
 +                      int wnd_size = MAX_FETCH_BD - 3;
 +                      /* Number of windows to check */
 +                      int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
 +                      int wnd_idx = 0;
 +                      int frag_idx = 0;
 +                      u32 wnd_sum = 0;
 +
 +                      /* Headers length */
 +                      hlen = (int)(skb_transport_header(skb) - skb->data) +
 +                              tcp_hdrlen(skb);
 +
 +                      /* Amount of data (w/o headers) on linear part of SKB*/
 +                      first_bd_sz = skb_headlen(skb) - hlen;
 +
 +                      wnd_sum  = first_bd_sz;
 +
 +                      /* Calculate the first sum - it's special */
 +                      for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
 +                              wnd_sum +=
 +                                      skb_shinfo(skb)->frags[frag_idx].size;
 +
 +                      /* If there was data on linear skb data - check it */
 +                      if (first_bd_sz > 0) {
 +                              if (unlikely(wnd_sum < lso_mss)) {
 +                                      to_copy = 1;
 +                                      goto exit_lbl;
 +                              }
 +
 +                              wnd_sum -= first_bd_sz;
 +                      }
 +
 +                      /* Others are easier: run through the frag list and
 +                         check all windows */
 +                      for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
 +                              wnd_sum +=
 +                        skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size;
 +
 +                              if (unlikely(wnd_sum < lso_mss)) {
 +                                      to_copy = 1;
 +                                      break;
 +                              }
 +                              wnd_sum -=
 +                                      skb_shinfo(skb)->frags[wnd_idx].size;
 +                      }
 +              } else {
 +                      /* in non-LSO too fragmented packet should always
 +                         be linearized */
 +                      to_copy = 1;
 +              }
 +      }
 +
 +exit_lbl:
 +      if (unlikely(to_copy))
 +              DP(NETIF_MSG_TX_QUEUED,
 +                 "Linearization IS REQUIRED for %s packet. "
 +                 "num_frags %d  hlen %d  first_bd_sz %d\n",
 +                 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
 +                 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
 +
 +      return to_copy;
 +}
 +#endif
 +
 +static inline void bnx2x_set_pbd_gso_e2(struct sk_buff *skb, u32 *parsing_data,
 +                                      u32 xmit_type)
 +{
 +      *parsing_data |= (skb_shinfo(skb)->gso_size <<
 +                            ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT) &
 +                            ETH_TX_PARSE_BD_E2_LSO_MSS;
 +      if ((xmit_type & XMIT_GSO_V6) &&
 +          (ipv6_hdr(skb)->nexthdr == NEXTHDR_IPV6))
 +              *parsing_data |= ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR;
 +}
 +
 +/**
 + * bnx2x_set_pbd_gso - update PBD in GSO case.
 + *
 + * @skb:      packet skb
 + * @pbd:      parse BD
 + * @xmit_type:        xmit flags
 + */
 +static inline void bnx2x_set_pbd_gso(struct sk_buff *skb,
 +                                   struct eth_tx_parse_bd_e1x *pbd,
 +                                   u32 xmit_type)
 +{
 +      pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
 +      pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
 +      pbd->tcp_flags = pbd_tcp_flags(skb);
 +
 +      if (xmit_type & XMIT_GSO_V4) {
 +              pbd->ip_id = swab16(ip_hdr(skb)->id);
 +              pbd->tcp_pseudo_csum =
 +                      swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
 +                                                ip_hdr(skb)->daddr,
 +                                                0, IPPROTO_TCP, 0));
 +
 +      } else
 +              pbd->tcp_pseudo_csum =
 +                      swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
 +                                              &ipv6_hdr(skb)->daddr,
 +                                              0, IPPROTO_TCP, 0));
 +
 +      pbd->global_data |= ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN;
 +}
 +
 +/**
 + * bnx2x_set_pbd_csum_e2 - update PBD with checksum and return header length
 + *
 + * @bp:                       driver handle
 + * @skb:              packet skb
 + * @parsing_data:     data to be updated
 + * @xmit_type:                xmit flags
 + *
 + * 57712 related
 + */
 +static inline  u8 bnx2x_set_pbd_csum_e2(struct bnx2x *bp, struct sk_buff *skb,
 +      u32 *parsing_data, u32 xmit_type)
 +{
 +      *parsing_data |=
 +                      ((((u8 *)skb_transport_header(skb) - skb->data) >> 1) <<
 +                      ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W_SHIFT) &
 +                      ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W;
 +
 +      if (xmit_type & XMIT_CSUM_TCP) {
 +              *parsing_data |= ((tcp_hdrlen(skb) / 4) <<
 +                      ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
 +                      ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW;
 +
 +              return skb_transport_header(skb) + tcp_hdrlen(skb) - skb->data;
 +      } else
 +              /* We support checksum offload for TCP and UDP only.
 +               * No need to pass the UDP header length - it's a constant.
 +               */
 +              return skb_transport_header(skb) +
 +                              sizeof(struct udphdr) - skb->data;
 +}
 +
 +static inline void bnx2x_set_sbd_csum(struct bnx2x *bp, struct sk_buff *skb,
 +      struct eth_tx_start_bd *tx_start_bd, u32 xmit_type)
 +{
 +      tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
 +
 +      if (xmit_type & XMIT_CSUM_V4)
 +              tx_start_bd->bd_flags.as_bitfield |=
 +                                      ETH_TX_BD_FLAGS_IP_CSUM;
 +      else
 +              tx_start_bd->bd_flags.as_bitfield |=
 +                                      ETH_TX_BD_FLAGS_IPV6;
 +
 +      if (!(xmit_type & XMIT_CSUM_TCP))
 +              tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IS_UDP;
 +}
 +
 +/**
 + * bnx2x_set_pbd_csum - update PBD with checksum and return header length
 + *
 + * @bp:               driver handle
 + * @skb:      packet skb
 + * @pbd:      parse BD to be updated
 + * @xmit_type:        xmit flags
 + */
 +static inline u8 bnx2x_set_pbd_csum(struct bnx2x *bp, struct sk_buff *skb,
 +      struct eth_tx_parse_bd_e1x *pbd,
 +      u32 xmit_type)
 +{
 +      u8 hlen = (skb_network_header(skb) - skb->data) >> 1;
 +
 +      /* for now NS flag is not used in Linux */
 +      pbd->global_data =
 +              (hlen | ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
 +                       ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT));
 +
 +      pbd->ip_hlen_w = (skb_transport_header(skb) -
 +                      skb_network_header(skb)) >> 1;
 +
 +      hlen += pbd->ip_hlen_w;
 +
 +      /* We support checksum offload for TCP and UDP only */
 +      if (xmit_type & XMIT_CSUM_TCP)
 +              hlen += tcp_hdrlen(skb) / 2;
 +      else
 +              hlen += sizeof(struct udphdr) / 2;
 +
 +      pbd->total_hlen_w = cpu_to_le16(hlen);
 +      hlen = hlen*2;
 +
 +      if (xmit_type & XMIT_CSUM_TCP) {
 +              pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
 +
 +      } else {
 +              s8 fix = SKB_CS_OFF(skb); /* signed! */
 +
 +              DP(NETIF_MSG_TX_QUEUED,
 +                 "hlen %d  fix %d  csum before fix %x\n",
 +                 le16_to_cpu(pbd->total_hlen_w), fix, SKB_CS(skb));
 +
 +              /* HW bug: fixup the CSUM */
 +              pbd->tcp_pseudo_csum =
 +                      bnx2x_csum_fix(skb_transport_header(skb),
 +                                     SKB_CS(skb), fix);
 +
 +              DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
 +                 pbd->tcp_pseudo_csum);
 +      }
 +
 +      return hlen;
 +}
 +
 +/* called with netif_tx_lock
 + * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
 + * netif_wake_queue()
 + */
 +netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
 +{
 +      struct bnx2x *bp = netdev_priv(dev);
 +
 +      struct bnx2x_fastpath *fp;
 +      struct netdev_queue *txq;
 +      struct bnx2x_fp_txdata *txdata;
 +      struct sw_tx_bd *tx_buf;
 +      struct eth_tx_start_bd *tx_start_bd, *first_bd;
 +      struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
 +      struct eth_tx_parse_bd_e1x *pbd_e1x = NULL;
 +      struct eth_tx_parse_bd_e2 *pbd_e2 = NULL;
 +      u32 pbd_e2_parsing_data = 0;
 +      u16 pkt_prod, bd_prod;
 +      int nbd, txq_index, fp_index, txdata_index;
 +      dma_addr_t mapping;
 +      u32 xmit_type = bnx2x_xmit_type(bp, skb);
 +      int i;
 +      u8 hlen = 0;
 +      __le16 pkt_size = 0;
 +      struct ethhdr *eth;
 +      u8 mac_type = UNICAST_ADDRESS;
 +
 +#ifdef BNX2X_STOP_ON_ERROR
 +      if (unlikely(bp->panic))
 +              return NETDEV_TX_BUSY;
 +#endif
 +
 +      txq_index = skb_get_queue_mapping(skb);
 +      txq = netdev_get_tx_queue(dev, txq_index);
 +
 +      BUG_ON(txq_index >= MAX_ETH_TXQ_IDX(bp) + FCOE_PRESENT);
 +
 +      /* decode the fastpath index and the cos index from the txq */
 +      fp_index = TXQ_TO_FP(txq_index);
 +      txdata_index = TXQ_TO_COS(txq_index);
 +
 +#ifdef BCM_CNIC
 +      /*
 +       * Override the above for the FCoE queue:
 +       *   - FCoE fp entry is right after the ETH entries.
 +       *   - FCoE L2 queue uses bp->txdata[0] only.
 +       */
 +      if (unlikely(!NO_FCOE(bp) && (txq_index ==
 +                                    bnx2x_fcoe_tx(bp, txq_index)))) {
 +              fp_index = FCOE_IDX;
 +              txdata_index = 0;
 +      }
 +#endif
 +
 +      /* enable this debug print to view the transmission queue being used
 +      DP(BNX2X_MSG_FP, "indices: txq %d, fp %d, txdata %d\n",
 +         txq_index, fp_index, txdata_index); */
 +
 +      /* locate the fastpath and the txdata */
 +      fp = &bp->fp[fp_index];
 +      txdata = &fp->txdata[txdata_index];
 +
 +      /* enable this debug print to view the tranmission details
 +      DP(BNX2X_MSG_FP,"transmitting packet cid %d fp index %d txdata_index %d"
 +                      " tx_data ptr %p fp pointer %p\n",
 +         txdata->cid, fp_index, txdata_index, txdata, fp); */
 +
 +      if (unlikely(bnx2x_tx_avail(bp, txdata) <
 +                   (skb_shinfo(skb)->nr_frags + 3))) {
 +              fp->eth_q_stats.driver_xoff++;
 +              netif_tx_stop_queue(txq);
 +              BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
 +              return NETDEV_TX_BUSY;
 +      }
 +
 +      DP(NETIF_MSG_TX_QUEUED, "queue[%d]: SKB: summed %x  protocol %x  "
 +                              "protocol(%x,%x) gso type %x  xmit_type %x\n",
 +         txq_index, skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
 +         ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
 +
 +      eth = (struct ethhdr *)skb->data;
 +
 +      /* set flag according to packet type (UNICAST_ADDRESS is default)*/
 +      if (unlikely(is_multicast_ether_addr(eth->h_dest))) {
 +              if (is_broadcast_ether_addr(eth->h_dest))
 +                      mac_type = BROADCAST_ADDRESS;
 +              else
 +                      mac_type = MULTICAST_ADDRESS;
 +      }
 +
 +#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
 +      /* First, check if we need to linearize the skb (due to FW
 +         restrictions). No need to check fragmentation if page size > 8K
 +         (there will be no violation to FW restrictions) */
 +      if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
 +              /* Statistics of linearization */
 +              bp->lin_cnt++;
 +              if (skb_linearize(skb) != 0) {
 +                      DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
 +                         "silently dropping this SKB\n");
 +                      dev_kfree_skb_any(skb);
 +                      return NETDEV_TX_OK;
 +              }
 +      }
 +#endif
 +      /* Map skb linear data for DMA */
 +      mapping = dma_map_single(&bp->pdev->dev, skb->data,
 +                               skb_headlen(skb), DMA_TO_DEVICE);
 +      if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
 +              DP(NETIF_MSG_TX_QUEUED, "SKB mapping failed - "
 +                 "silently dropping this SKB\n");
 +              dev_kfree_skb_any(skb);
 +              return NETDEV_TX_OK;
 +      }
 +      /*
 +      Please read carefully. First we use one BD which we mark as start,
 +      then we have a parsing info BD (used for TSO or xsum),
 +      and only then we have the rest of the TSO BDs.
 +      (don't forget to mark the last one as last,
 +      and to unmap only AFTER you write to the BD ...)
 +      And above all, all pdb sizes are in words - NOT DWORDS!
 +      */
 +
 +      /* get current pkt produced now - advance it just before sending packet
 +       * since mapping of pages may fail and cause packet to be dropped
 +       */
 +      pkt_prod = txdata->tx_pkt_prod;
 +      bd_prod = TX_BD(txdata->tx_bd_prod);
 +
 +      /* get a tx_buf and first BD
 +       * tx_start_bd may be changed during SPLIT,
 +       * but first_bd will always stay first
 +       */
 +      tx_buf = &txdata->tx_buf_ring[TX_BD(pkt_prod)];
 +      tx_start_bd = &txdata->tx_desc_ring[bd_prod].start_bd;
 +      first_bd = tx_start_bd;
 +
 +      tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
 +      SET_FLAG(tx_start_bd->general_data, ETH_TX_START_BD_ETH_ADDR_TYPE,
 +               mac_type);
 +
 +      /* header nbd */
 +      SET_FLAG(tx_start_bd->general_data, ETH_TX_START_BD_HDR_NBDS, 1);
 +
 +      /* remember the first BD of the packet */
 +      tx_buf->first_bd = txdata->tx_bd_prod;
 +      tx_buf->skb = skb;
 +      tx_buf->flags = 0;
 +
 +      DP(NETIF_MSG_TX_QUEUED,
 +         "sending pkt %u @%p  next_idx %u  bd %u @%p\n",
 +         pkt_prod, tx_buf, txdata->tx_pkt_prod, bd_prod, tx_start_bd);
 +
 +      if (vlan_tx_tag_present(skb)) {
 +              tx_start_bd->vlan_or_ethertype =
 +                  cpu_to_le16(vlan_tx_tag_get(skb));
 +              tx_start_bd->bd_flags.as_bitfield |=
 +                  (X_ETH_OUTBAND_VLAN << ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT);
 +      } else
 +              tx_start_bd->vlan_or_ethertype = cpu_to_le16(pkt_prod);
 +
 +      /* turn on parsing and get a BD */
 +      bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
 +
 +      if (xmit_type & XMIT_CSUM)
 +              bnx2x_set_sbd_csum(bp, skb, tx_start_bd, xmit_type);
 +
 +      if (!CHIP_IS_E1x(bp)) {
 +              pbd_e2 = &txdata->tx_desc_ring[bd_prod].parse_bd_e2;
 +              memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2));
 +              /* Set PBD in checksum offload case */
 +              if (xmit_type & XMIT_CSUM)
 +                      hlen = bnx2x_set_pbd_csum_e2(bp, skb,
 +                                                   &pbd_e2_parsing_data,
 +                                                   xmit_type);
 +              if (IS_MF_SI(bp)) {
 +                      /*
 +                       * fill in the MAC addresses in the PBD - for local
 +                       * switching
 +                       */
 +                      bnx2x_set_fw_mac_addr(&pbd_e2->src_mac_addr_hi,
 +                                            &pbd_e2->src_mac_addr_mid,
 +                                            &pbd_e2->src_mac_addr_lo,
 +                                            eth->h_source);
 +                      bnx2x_set_fw_mac_addr(&pbd_e2->dst_mac_addr_hi,
 +                                            &pbd_e2->dst_mac_addr_mid,
 +                                            &pbd_e2->dst_mac_addr_lo,
 +                                            eth->h_dest);
 +              }
 +      } else {
 +              pbd_e1x = &txdata->tx_desc_ring[bd_prod].parse_bd_e1x;
 +              memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x));
 +              /* Set PBD in checksum offload case */
 +              if (xmit_type & XMIT_CSUM)
 +                      hlen = bnx2x_set_pbd_csum(bp, skb, pbd_e1x, xmit_type);
 +
 +      }
 +
 +      /* Setup the data pointer of the first BD of the packet */
 +      tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
 +      tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
 +      nbd = 2; /* start_bd + pbd + frags (updated when pages are mapped) */
 +      tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
 +      pkt_size = tx_start_bd->nbytes;
 +
 +      DP(NETIF_MSG_TX_QUEUED, "first bd @%p  addr (%x:%x)  nbd %d"
 +         "  nbytes %d  flags %x  vlan %x\n",
 +         tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
 +         le16_to_cpu(tx_start_bd->nbd), le16_to_cpu(tx_start_bd->nbytes),
 +         tx_start_bd->bd_flags.as_bitfield,
 +         le16_to_cpu(tx_start_bd->vlan_or_ethertype));
 +
 +      if (xmit_type & XMIT_GSO) {
 +
 +              DP(NETIF_MSG_TX_QUEUED,
 +                 "TSO packet len %d  hlen %d  total len %d  tso size %d\n",
 +                 skb->len, hlen, skb_headlen(skb),
 +                 skb_shinfo(skb)->gso_size);
 +
 +              tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
 +
 +              if (unlikely(skb_headlen(skb) > hlen))
 +                      bd_prod = bnx2x_tx_split(bp, txdata, tx_buf,
 +                                               &tx_start_bd, hlen,
 +                                               bd_prod, ++nbd);
 +              if (!CHIP_IS_E1x(bp))
 +                      bnx2x_set_pbd_gso_e2(skb, &pbd_e2_parsing_data,
 +                                           xmit_type);
 +              else
 +                      bnx2x_set_pbd_gso(skb, pbd_e1x, xmit_type);
 +      }
 +
 +      /* Set the PBD's parsing_data field if not zero
 +       * (for the chips newer than 57711).
 +       */
 +      if (pbd_e2_parsing_data)
 +              pbd_e2->parsing_data = cpu_to_le32(pbd_e2_parsing_data);
 +
 +      tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
 +
 +      /* Handle fragmented skb */
 +      for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
 +              skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
 +
 +              mapping = skb_frag_dma_map(&bp->pdev->dev, frag, 0, frag->size,
 +                                         DMA_TO_DEVICE);
 +              if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
 +
 +                      DP(NETIF_MSG_TX_QUEUED, "Unable to map page - "
 +                                              "dropping packet...\n");
 +
 +                      /* we need unmap all buffers already mapped
 +                       * for this SKB;
 +                       * first_bd->nbd need to be properly updated
 +                       * before call to bnx2x_free_tx_pkt
 +                       */
 +                      first_bd->nbd = cpu_to_le16(nbd);
 +                      bnx2x_free_tx_pkt(bp, txdata,
 +                                        TX_BD(txdata->tx_pkt_prod));
 +                      return NETDEV_TX_OK;
 +              }
 +
 +              bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
 +              tx_data_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
 +              if (total_pkt_bd == NULL)
 +                      total_pkt_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
 +
 +              tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
 +              tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
 +              tx_data_bd->nbytes = cpu_to_le16(frag->size);
 +              le16_add_cpu(&pkt_size, frag->size);
 +              nbd++;
 +
 +              DP(NETIF_MSG_TX_QUEUED,
 +                 "frag %d  bd @%p  addr (%x:%x)  nbytes %d\n",
 +                 i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo,
 +                 le16_to_cpu(tx_data_bd->nbytes));
 +      }
 +
 +      DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd);
 +
 +      /* update with actual num BDs */
 +      first_bd->nbd = cpu_to_le16(nbd);
 +
 +      bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
 +
 +      /* now send a tx doorbell, counting the next BD
 +       * if the packet contains or ends with it
 +       */
 +      if (TX_BD_POFF(bd_prod) < nbd)
 +              nbd++;
 +
 +      /* total_pkt_bytes should be set on the first data BD if
 +       * it's not an LSO packet and there is more than one
 +       * data BD. In this case pkt_size is limited by an MTU value.
 +       * However we prefer to set it for an LSO packet (while we don't
 +       * have to) in order to save some CPU cycles in a none-LSO
 +       * case, when we much more care about them.
 +       */
 +      if (total_pkt_bd != NULL)
 +              total_pkt_bd->total_pkt_bytes = pkt_size;
 +
 +      if (pbd_e1x)
 +              DP(NETIF_MSG_TX_QUEUED,
 +                 "PBD (E1X) @%p  ip_data %x  ip_hlen %u  ip_id %u  lso_mss %u"
 +                 "  tcp_flags %x  xsum %x  seq %u  hlen %u\n",
 +                 pbd_e1x, pbd_e1x->global_data, pbd_e1x->ip_hlen_w,
 +                 pbd_e1x->ip_id, pbd_e1x->lso_mss, pbd_e1x->tcp_flags,
 +                 pbd_e1x->tcp_pseudo_csum, pbd_e1x->tcp_send_seq,
 +                  le16_to_cpu(pbd_e1x->total_hlen_w));
 +      if (pbd_e2)
 +              DP(NETIF_MSG_TX_QUEUED,
 +                 "PBD (E2) @%p  dst %x %x %x src %x %x %x parsing_data %x\n",
 +                 pbd_e2, pbd_e2->dst_mac_addr_hi, pbd_e2->dst_mac_addr_mid,
 +                 pbd_e2->dst_mac_addr_lo, pbd_e2->src_mac_addr_hi,
 +                 pbd_e2->src_mac_addr_mid, pbd_e2->src_mac_addr_lo,
 +                 pbd_e2->parsing_data);
 +      DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d  bd %u\n", nbd, bd_prod);
 +
 +      txdata->tx_pkt_prod++;
 +      /*
 +       * Make sure that the BD data is updated before updating the producer
 +       * since FW might read the BD right after the producer is updated.
 +       * This is only applicable for weak-ordered memory model archs such
 +       * as IA-64. The following barrier is also mandatory since FW will
 +       * assumes packets must have BDs.
 +       */
 +      wmb();
 +
 +      txdata->tx_db.data.prod += nbd;
 +      barrier();
 +
 +      DOORBELL(bp, txdata->cid, txdata->tx_db.raw);
 +
 +      mmiowb();
 +
 +      txdata->tx_bd_prod += nbd;
 +
 +      if (unlikely(bnx2x_tx_avail(bp, txdata) < MAX_SKB_FRAGS + 3)) {
 +              netif_tx_stop_queue(txq);
 +
 +              /* paired memory barrier is in bnx2x_tx_int(), we have to keep
 +               * ordering of set_bit() in netif_tx_stop_queue() and read of
 +               * fp->bd_tx_cons */
 +              smp_mb();
 +
 +              fp->eth_q_stats.driver_xoff++;
 +              if (bnx2x_tx_avail(bp, txdata) >= MAX_SKB_FRAGS + 3)
 +                      netif_tx_wake_queue(txq);
 +      }
 +      txdata->tx_pkt++;
 +
 +      return NETDEV_TX_OK;
 +}
 +
 +/**
 + * bnx2x_setup_tc - routine to configure net_device for multi tc
 + *
 + * @netdev: net device to configure
 + * @tc: number of traffic classes to enable
 + *
 + * callback connected to the ndo_setup_tc function pointer
 + */
 +int bnx2x_setup_tc(struct net_device *dev, u8 num_tc)
 +{
 +      int cos, prio, count, offset;
 +      struct bnx2x *bp = netdev_priv(dev);
 +
 +      /* setup tc must be called under rtnl lock */
 +      ASSERT_RTNL();
 +
 +      /* no traffic classes requested. aborting */
 +      if (!num_tc) {
 +              netdev_reset_tc(dev);
 +              return 0;
 +      }
 +
 +      /* requested to support too many traffic classes */
 +      if (num_tc > bp->max_cos) {
 +              DP(NETIF_MSG_TX_ERR, "support for too many traffic classes"
 +                                   " requested: %d. max supported is %d\n",
 +                                   num_tc, bp->max_cos);
 +              return -EINVAL;
 +      }
 +
 +      /* declare amount of supported traffic classes */
 +      if (netdev_set_num_tc(dev, num_tc)) {
 +              DP(NETIF_MSG_TX_ERR, "failed to declare %d traffic classes\n",
 +                                   num_tc);
 +              return -EINVAL;
 +      }
 +
 +      /* configure priority to traffic class mapping */
 +      for (prio = 0; prio < BNX2X_MAX_PRIORITY; prio++) {
 +              netdev_set_prio_tc_map(dev, prio, bp->prio_to_cos[prio]);
 +              DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n",
 +                 prio, bp->prio_to_cos[prio]);
 +      }
 +
 +
 +      /* Use this configuration to diffrentiate tc0 from other COSes
 +         This can be used for ets or pfc, and save the effort of setting
 +         up a multio class queue disc or negotiating DCBX with a switch
 +      netdev_set_prio_tc_map(dev, 0, 0);
 +      DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n", 0, 0);
 +      for (prio = 1; prio < 16; prio++) {
 +              netdev_set_prio_tc_map(dev, prio, 1);
 +              DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n", prio, 1);
 +      } */
 +
 +      /* configure traffic class to transmission queue mapping */
 +      for (cos = 0; cos < bp->max_cos; cos++) {
 +              count = BNX2X_NUM_ETH_QUEUES(bp);
 +              offset = cos * MAX_TXQS_PER_COS;
 +              netdev_set_tc_queue(dev, cos, count, offset);
 +              DP(BNX2X_MSG_SP, "mapping tc %d to offset %d count %d\n",
 +                 cos, offset, count);
 +      }
 +
 +      return 0;
 +}
 +
 +/* called with rtnl_lock */
 +int bnx2x_change_mac_addr(struct net_device *dev, void *p)
 +{
 +      struct sockaddr *addr = p;
 +      struct bnx2x *bp = netdev_priv(dev);
 +      int rc = 0;
 +
 +      if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
 +              return -EINVAL;
 +
 +      if (netif_running(dev))  {
 +              rc = bnx2x_set_eth_mac(bp, false);
 +              if (rc)
 +                      return rc;
 +      }
 +
 +      memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
 +
 +      if (netif_running(dev))
 +              rc = bnx2x_set_eth_mac(bp, true);
 +
 +      return rc;
 +}
 +
 +static void bnx2x_free_fp_mem_at(struct bnx2x *bp, int fp_index)
 +{
 +      union host_hc_status_block *sb = &bnx2x_fp(bp, fp_index, status_blk);
 +      struct bnx2x_fastpath *fp = &bp->fp[fp_index];
 +      u8 cos;
 +
 +      /* Common */
 +#ifdef BCM_CNIC
 +      if (IS_FCOE_IDX(fp_index)) {
 +              memset(sb, 0, sizeof(union host_hc_status_block));
 +              fp->status_blk_mapping = 0;
 +
 +      } else {
 +#endif
 +              /* status blocks */
 +              if (!CHIP_IS_E1x(bp))
 +                      BNX2X_PCI_FREE(sb->e2_sb,
 +                                     bnx2x_fp(bp, fp_index,
 +                                              status_blk_mapping),
 +                                     sizeof(struct host_hc_status_block_e2));
 +              else
 +                      BNX2X_PCI_FREE(sb->e1x_sb,
 +                                     bnx2x_fp(bp, fp_index,
 +                                              status_blk_mapping),
 +                                     sizeof(struct host_hc_status_block_e1x));
 +#ifdef BCM_CNIC
 +      }
 +#endif
 +      /* Rx */
 +      if (!skip_rx_queue(bp, fp_index)) {
 +              bnx2x_free_rx_bds(fp);
 +
 +              /* fastpath rx rings: rx_buf rx_desc rx_comp */
 +              BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_buf_ring));
 +              BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_desc_ring),
 +                             bnx2x_fp(bp, fp_index, rx_desc_mapping),
 +                             sizeof(struct eth_rx_bd) * NUM_RX_BD);
 +
 +              BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_comp_ring),
 +                             bnx2x_fp(bp, fp_index, rx_comp_mapping),
 +                             sizeof(struct eth_fast_path_rx_cqe) *
 +                             NUM_RCQ_BD);
 +
 +              /* SGE ring */
 +              BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_page_ring));
 +              BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_sge_ring),
 +                             bnx2x_fp(bp, fp_index, rx_sge_mapping),
 +                             BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
 +      }
 +
 +      /* Tx */
 +      if (!skip_tx_queue(bp, fp_index)) {
 +              /* fastpath tx rings: tx_buf tx_desc */
 +              for_each_cos_in_tx_queue(fp, cos) {
 +                      struct bnx2x_fp_txdata *txdata = &fp->txdata[cos];
 +
 +                      DP(BNX2X_MSG_SP,
 +                         "freeing tx memory of fp %d cos %d cid %d\n",
 +                         fp_index, cos, txdata->cid);
 +
 +                      BNX2X_FREE(txdata->tx_buf_ring);
 +                      BNX2X_PCI_FREE(txdata->tx_desc_ring,
 +                              txdata->tx_desc_mapping,
 +                              sizeof(union eth_tx_bd_types) * NUM_TX_BD);
 +              }
 +      }
 +      /* end of fastpath */
 +}
 +
 +void bnx2x_free_fp_mem(struct bnx2x *bp)
 +{
 +      int i;
 +      for_each_queue(bp, i)
 +              bnx2x_free_fp_mem_at(bp, i);
 +}
 +
 +static inline void set_sb_shortcuts(struct bnx2x *bp, int index)
 +{
 +      union host_hc_status_block status_blk = bnx2x_fp(bp, index, status_blk);
 +      if (!CHIP_IS_E1x(bp)) {
 +              bnx2x_fp(bp, index, sb_index_values) =
 +                      (__le16 *)status_blk.e2_sb->sb.index_values;
 +              bnx2x_fp(bp, index, sb_running_index) =
 +                      (__le16 *)status_blk.e2_sb->sb.running_index;
 +      } else {
 +              bnx2x_fp(bp, index, sb_index_values) =
 +                      (__le16 *)status_blk.e1x_sb->sb.index_values;
 +              bnx2x_fp(bp, index, sb_running_index) =
 +                      (__le16 *)status_blk.e1x_sb->sb.running_index;
 +      }
 +}
 +
 +static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index)
 +{
 +      union host_hc_status_block *sb;
 +      struct bnx2x_fastpath *fp = &bp->fp[index];
 +      int ring_size = 0;
 +      u8 cos;
++      int rx_ring_size = 0;
 +
 +      /* if rx_ring_size specified - use it */
-       /* allocate at least number of buffers required by FW */
-       rx_ring_size = max_t(int, bp->disable_tpa ? MIN_RX_SIZE_NONTPA :
-                                                   MIN_RX_SIZE_TPA,
-                                 rx_ring_size);
++      if (!bp->rx_ring_size) {
 +
++              rx_ring_size = MAX_RX_AVAIL/BNX2X_NUM_RX_QUEUES(bp);
++
++              /* allocate at least number of buffers required by FW */
++              rx_ring_size = max_t(int, bp->disable_tpa ? MIN_RX_SIZE_NONTPA :
++                                   MIN_RX_SIZE_TPA, rx_ring_size);
++
++              bp->rx_ring_size = rx_ring_size;
++      } else
++              rx_ring_size = bp->rx_ring_size;
 +
 +      /* Common */
 +      sb = &bnx2x_fp(bp, index, status_blk);
 +#ifdef BCM_CNIC
 +      if (!IS_FCOE_IDX(index)) {
 +#endif
 +              /* status blocks */
 +              if (!CHIP_IS_E1x(bp))
 +                      BNX2X_PCI_ALLOC(sb->e2_sb,
 +                              &bnx2x_fp(bp, index, status_blk_mapping),
 +                              sizeof(struct host_hc_status_block_e2));
 +              else
 +                      BNX2X_PCI_ALLOC(sb->e1x_sb,
 +                              &bnx2x_fp(bp, index, status_blk_mapping),
 +                          sizeof(struct host_hc_status_block_e1x));
 +#ifdef BCM_CNIC
 +      }
 +#endif
 +
 +      /* FCoE Queue uses Default SB and doesn't ACK the SB, thus no need to
 +       * set shortcuts for it.
 +       */
 +      if (!IS_FCOE_IDX(index))
 +              set_sb_shortcuts(bp, index);
 +
 +      /* Tx */
 +      if (!skip_tx_queue(bp, index)) {
 +              /* fastpath tx rings: tx_buf tx_desc */
 +              for_each_cos_in_tx_queue(fp, cos) {
 +                      struct bnx2x_fp_txdata *txdata = &fp->txdata[cos];
 +
 +                      DP(BNX2X_MSG_SP, "allocating tx memory of "
 +                                       "fp %d cos %d\n",
 +                         index, cos);
 +
 +                      BNX2X_ALLOC(txdata->tx_buf_ring,
 +                              sizeof(struct sw_tx_bd) * NUM_TX_BD);
 +                      BNX2X_PCI_ALLOC(txdata->tx_desc_ring,
 +                              &txdata->tx_desc_mapping,
 +                              sizeof(union eth_tx_bd_types) * NUM_TX_BD);
 +              }
 +      }
 +
 +      /* Rx */
 +      if (!skip_rx_queue(bp, index)) {
 +              /* fastpath rx rings: rx_buf rx_desc rx_comp */
 +              BNX2X_ALLOC(bnx2x_fp(bp, index, rx_buf_ring),
 +                              sizeof(struct sw_rx_bd) * NUM_RX_BD);
 +              BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_desc_ring),
 +                              &bnx2x_fp(bp, index, rx_desc_mapping),
 +                              sizeof(struct eth_rx_bd) * NUM_RX_BD);
 +
 +              BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_comp_ring),
 +                              &bnx2x_fp(bp, index, rx_comp_mapping),
 +                              sizeof(struct eth_fast_path_rx_cqe) *
 +                              NUM_RCQ_BD);
 +
 +              /* SGE ring */
 +              BNX2X_ALLOC(bnx2x_fp(bp, index, rx_page_ring),
 +                              sizeof(struct sw_rx_page) * NUM_RX_SGE);
 +              BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_sge_ring),
 +                              &bnx2x_fp(bp, index, rx_sge_mapping),
 +                              BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
 +              /* RX BD ring */
 +              bnx2x_set_next_page_rx_bd(fp);
 +
 +              /* CQ ring */
 +              bnx2x_set_next_page_rx_cq(fp);
 +
 +              /* BDs */
 +              ring_size = bnx2x_alloc_rx_bds(fp, rx_ring_size);
 +              if (ring_size < rx_ring_size)
 +                      goto alloc_mem_err;
 +      }
 +
 +      return 0;
 +
 +/* handles low memory cases */
 +alloc_mem_err:
 +      BNX2X_ERR("Unable to allocate full memory for queue %d (size %d)\n",
 +                                              index, ring_size);
 +      /* FW will drop all packets if queue is not big enough,
 +       * In these cases we disable the queue
 +       * Min size is different for OOO, TPA and non-TPA queues
 +       */
 +      if (ring_size < (fp->disable_tpa ?
 +                              MIN_RX_SIZE_NONTPA : MIN_RX_SIZE_TPA)) {
 +                      /* release memory allocated for this queue */
 +                      bnx2x_free_fp_mem_at(bp, index);
 +                      return -ENOMEM;
 +      }
 +      return 0;
 +}
 +
 +int bnx2x_alloc_fp_mem(struct bnx2x *bp)
 +{
 +      int i;
 +
 +      /**
 +       * 1. Allocate FP for leading - fatal if error
 +       * 2. {CNIC} Allocate FCoE FP - fatal if error
 +       * 3. {CNIC} Allocate OOO + FWD - disable OOO if error
 +       * 4. Allocate RSS - fix number of queues if error
 +       */
 +
 +      /* leading */
 +      if (bnx2x_alloc_fp_mem_at(bp, 0))
 +              return -ENOMEM;
 +
 +#ifdef BCM_CNIC
 +      if (!NO_FCOE(bp))
 +              /* FCoE */
 +              if (bnx2x_alloc_fp_mem_at(bp, FCOE_IDX))
 +                      /* we will fail load process instead of mark
 +                       * NO_FCOE_FLAG
 +                       */
 +                      return -ENOMEM;
 +#endif
 +
 +      /* RSS */
 +      for_each_nondefault_eth_queue(bp, i)
 +              if (bnx2x_alloc_fp_mem_at(bp, i))
 +                      break;
 +
 +      /* handle memory failures */
 +      if (i != BNX2X_NUM_ETH_QUEUES(bp)) {
 +              int delta = BNX2X_NUM_ETH_QUEUES(bp) - i;
 +
 +              WARN_ON(delta < 0);
 +#ifdef BCM_CNIC
 +              /**
 +               * move non eth FPs next to last eth FP
 +               * must be done in that order
 +               * FCOE_IDX < FWD_IDX < OOO_IDX
 +               */
 +
 +              /* move FCoE fp even NO_FCOE_FLAG is on */
 +              bnx2x_move_fp(bp, FCOE_IDX, FCOE_IDX - delta);
 +#endif
 +              bp->num_queues -= delta;
 +              BNX2X_ERR("Adjusted num of queues from %d to %d\n",
 +                        bp->num_queues + delta, bp->num_queues);
 +      }
 +
 +      return 0;
 +}
 +
 +void bnx2x_free_mem_bp(struct bnx2x *bp)
 +{
 +      kfree(bp->fp);
 +      kfree(bp->msix_table);
 +      kfree(bp->ilt);
 +}
 +
 +int __devinit bnx2x_alloc_mem_bp(struct bnx2x *bp)
 +{
 +      struct bnx2x_fastpath *fp;
 +      struct msix_entry *tbl;
 +      struct bnx2x_ilt *ilt;
 +      int msix_table_size = 0;
 +
 +      /*
 +       * The biggest MSI-X table we might need is as a maximum number of fast
 +       * path IGU SBs plus default SB (for PF).
 +       */
 +      msix_table_size = bp->igu_sb_cnt + 1;
 +
 +      /* fp array: RSS plus CNIC related L2 queues */
 +      fp = kzalloc((BNX2X_MAX_RSS_COUNT(bp) + NON_ETH_CONTEXT_USE) *
 +                   sizeof(*fp), GFP_KERNEL);
 +      if (!fp)
 +              goto alloc_err;
 +      bp->fp = fp;
 +
 +      /* msix table */
 +      tbl = kzalloc(msix_table_size * sizeof(*tbl), GFP_KERNEL);
 +      if (!tbl)
 +              goto alloc_err;
 +      bp->msix_table = tbl;
 +
 +      /* ilt */
 +      ilt = kzalloc(sizeof(*ilt), GFP_KERNEL);
 +      if (!ilt)
 +              goto alloc_err;
 +      bp->ilt = ilt;
 +
 +      return 0;
 +alloc_err:
 +      bnx2x_free_mem_bp(bp);
 +      return -ENOMEM;
 +
 +}
 +
 +int bnx2x_reload_if_running(struct net_device *dev)
 +{
 +      struct bnx2x *bp = netdev_priv(dev);
 +
 +      if (unlikely(!netif_running(dev)))
 +              return 0;
 +
 +      bnx2x_nic_unload(bp, UNLOAD_NORMAL);
 +      return bnx2x_nic_load(bp, LOAD_NORMAL);
 +}
 +
 +int bnx2x_get_cur_phy_idx(struct bnx2x *bp)
 +{
 +      u32 sel_phy_idx = 0;
 +      if (bp->link_params.num_phys <= 1)
 +              return INT_PHY;
 +
 +      if (bp->link_vars.link_up) {
 +              sel_phy_idx = EXT_PHY1;
 +              /* In case link is SERDES, check if the EXT_PHY2 is the one */
 +              if ((bp->link_vars.link_status & LINK_STATUS_SERDES_LINK) &&
 +                  (bp->link_params.phy[EXT_PHY2].supported & SUPPORTED_FIBRE))
 +                      sel_phy_idx = EXT_PHY2;
 +      } else {
 +
 +              switch (bnx2x_phy_selection(&bp->link_params)) {
 +              case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT:
 +              case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY:
 +              case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY:
 +                     sel_phy_idx = EXT_PHY1;
 +                     break;
 +              case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY:
 +              case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY:
 +                     sel_phy_idx = EXT_PHY2;
 +                     break;
 +              }
 +      }
 +
 +      return sel_phy_idx;
 +
 +}
 +int bnx2x_get_link_cfg_idx(struct bnx2x *bp)
 +{
 +      u32 sel_phy_idx = bnx2x_get_cur_phy_idx(bp);
 +      /*
 +       * The selected actived PHY is always after swapping (in case PHY
 +       * swapping is enabled). So when swapping is enabled, we need to reverse
 +       * the configuration
 +       */
 +
 +      if (bp->link_params.multi_phy_config &
 +          PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
 +              if (sel_phy_idx == EXT_PHY1)
 +                      sel_phy_idx = EXT_PHY2;
 +              else if (sel_phy_idx == EXT_PHY2)
 +                      sel_phy_idx = EXT_PHY1;
 +      }
 +      return LINK_CONFIG_IDX(sel_phy_idx);
 +}
 +
 +#if defined(NETDEV_FCOE_WWNN) && defined(BCM_CNIC)
 +int bnx2x_fcoe_get_wwn(struct net_device *dev, u64 *wwn, int type)
 +{
 +      struct bnx2x *bp = netdev_priv(dev);
 +      struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
 +
 +      switch (type) {
 +      case NETDEV_FCOE_WWNN:
 +              *wwn = HILO_U64(cp->fcoe_wwn_node_name_hi,
 +                              cp->fcoe_wwn_node_name_lo);
 +              break;
 +      case NETDEV_FCOE_WWPN:
 +              *wwn = HILO_U64(cp->fcoe_wwn_port_name_hi,
 +                              cp->fcoe_wwn_port_name_lo);
 +              break;
 +      default:
 +              return -EINVAL;
 +      }
 +
 +      return 0;
 +}
 +#endif
 +
 +/* called with rtnl_lock */
 +int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
 +{
 +      struct bnx2x *bp = netdev_priv(dev);
 +
 +      if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
 +              pr_err("Handling parity error recovery. Try again later\n");
 +              return -EAGAIN;
 +      }
 +
 +      if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
 +          ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
 +              return -EINVAL;
 +
 +      /* This does not race with packet allocation
 +       * because the actual alloc size is
 +       * only updated as part of load
 +       */
 +      dev->mtu = new_mtu;
 +
 +      return bnx2x_reload_if_running(dev);
 +}
 +
 +u32 bnx2x_fix_features(struct net_device *dev, u32 features)
 +{
 +      struct bnx2x *bp = netdev_priv(dev);
 +
 +      /* TPA requires Rx CSUM offloading */
 +      if (!(features & NETIF_F_RXCSUM) || bp->disable_tpa)
 +              features &= ~NETIF_F_LRO;
 +
 +      return features;
 +}
 +
 +int bnx2x_set_features(struct net_device *dev, u32 features)
 +{
 +      struct bnx2x *bp = netdev_priv(dev);
 +      u32 flags = bp->flags;
 +      bool bnx2x_reload = false;
 +
 +      if (features & NETIF_F_LRO)
 +              flags |= TPA_ENABLE_FLAG;
 +      else
 +              flags &= ~TPA_ENABLE_FLAG;
 +
 +      if (features & NETIF_F_LOOPBACK) {
 +              if (bp->link_params.loopback_mode != LOOPBACK_BMAC) {
 +                      bp->link_params.loopback_mode = LOOPBACK_BMAC;
 +                      bnx2x_reload = true;
 +              }
 +      } else {
 +              if (bp->link_params.loopback_mode != LOOPBACK_NONE) {
 +                      bp->link_params.loopback_mode = LOOPBACK_NONE;
 +                      bnx2x_reload = true;
 +              }
 +      }
 +
 +      if (flags ^ bp->flags) {
 +              bp->flags = flags;
 +              bnx2x_reload = true;
 +      }
 +
 +      if (bnx2x_reload) {
 +              if (bp->recovery_state == BNX2X_RECOVERY_DONE)
 +                      return bnx2x_reload_if_running(dev);
 +              /* else: bnx2x_nic_load() will be called at end of recovery */
 +      }
 +
 +      return 0;
 +}
 +
 +void bnx2x_tx_timeout(struct net_device *dev)
 +{
 +      struct bnx2x *bp = netdev_priv(dev);
 +
 +#ifdef BNX2X_STOP_ON_ERROR
 +      if (!bp->panic)
 +              bnx2x_panic();
 +#endif
 +
 +      smp_mb__before_clear_bit();
 +      set_bit(BNX2X_SP_RTNL_TX_TIMEOUT, &bp->sp_rtnl_state);
 +      smp_mb__after_clear_bit();
 +
 +      /* This allows the netif to be shutdown gracefully before resetting */
 +      schedule_delayed_work(&bp->sp_rtnl_task, 0);
 +}
 +
 +int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
 +{
 +      struct net_device *dev = pci_get_drvdata(pdev);
 +      struct bnx2x *bp;
 +
 +      if (!dev) {
 +              dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
 +              return -ENODEV;
 +      }
 +      bp = netdev_priv(dev);
 +
 +      rtnl_lock();
 +
 +      pci_save_state(pdev);
 +
 +      if (!netif_running(dev)) {
 +              rtnl_unlock();
 +              return 0;
 +      }
 +
 +      netif_device_detach(dev);
 +
 +      bnx2x_nic_unload(bp, UNLOAD_CLOSE);
 +
 +      bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
 +
 +      rtnl_unlock();
 +
 +      return 0;
 +}
 +
 +int bnx2x_resume(struct pci_dev *pdev)
 +{
 +      struct net_device *dev = pci_get_drvdata(pdev);
 +      struct bnx2x *bp;
 +      int rc;
 +
 +      if (!dev) {
 +              dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
 +              return -ENODEV;
 +      }
 +      bp = netdev_priv(dev);
 +
 +      if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
 +              pr_err("Handling parity error recovery. Try again later\n");
 +              return -EAGAIN;
 +      }
 +
 +      rtnl_lock();
 +
 +      pci_restore_state(pdev);
 +
 +      if (!netif_running(dev)) {
 +              rtnl_unlock();
 +              return 0;
 +      }
 +
 +      bnx2x_set_power_state(bp, PCI_D0);
 +      netif_device_attach(dev);
 +
 +      /* Since the chip was reset, clear the FW sequence number */
 +      bp->fw_seq = 0;
 +      rc = bnx2x_nic_load(bp, LOAD_OPEN);
 +
 +      rtnl_unlock();
 +
 +      return rc;
 +}
 +
 +
 +void bnx2x_set_ctx_validation(struct bnx2x *bp, struct eth_context *cxt,
 +                            u32 cid)
 +{
 +      /* ustorm cxt validation */
 +      cxt->ustorm_ag_context.cdu_usage =
 +              CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid),
 +                      CDU_REGION_NUMBER_UCM_AG, ETH_CONNECTION_TYPE);
 +      /* xcontext validation */
 +      cxt->xstorm_ag_context.cdu_reserved =
 +              CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid),
 +                      CDU_REGION_NUMBER_XCM_AG, ETH_CONNECTION_TYPE);
 +}
 +
 +static inline void storm_memset_hc_timeout(struct bnx2x *bp, u8 port,
 +                                           u8 fw_sb_id, u8 sb_index,
 +                                           u8 ticks)
 +{
 +
 +      u32 addr = BAR_CSTRORM_INTMEM +
 +                 CSTORM_STATUS_BLOCK_DATA_TIMEOUT_OFFSET(fw_sb_id, sb_index);
 +      REG_WR8(bp, addr, ticks);
 +      DP(NETIF_MSG_HW, "port %x fw_sb_id %d sb_index %d ticks %d\n",
 +                        port, fw_sb_id, sb_index, ticks);
 +}
 +
 +static inline void storm_memset_hc_disable(struct bnx2x *bp, u8 port,
 +                                           u16 fw_sb_id, u8 sb_index,
 +                                           u8 disable)
 +{
 +      u32 enable_flag = disable ? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT);
 +      u32 addr = BAR_CSTRORM_INTMEM +
 +                 CSTORM_STATUS_BLOCK_DATA_FLAGS_OFFSET(fw_sb_id, sb_index);
 +      u16 flags = REG_RD16(bp, addr);
 +      /* clear and set */
 +      flags &= ~HC_INDEX_DATA_HC_ENABLED;
 +      flags |= enable_flag;
 +      REG_WR16(bp, addr, flags);
 +      DP(NETIF_MSG_HW, "port %x fw_sb_id %d sb_index %d disable %d\n",
 +                        port, fw_sb_id, sb_index, disable);
 +}
 +
 +void bnx2x_update_coalesce_sb_index(struct bnx2x *bp, u8 fw_sb_id,
 +                                  u8 sb_index, u8 disable, u16 usec)
 +{
 +      int port = BP_PORT(bp);
 +      u8 ticks = usec / BNX2X_BTR;
 +
 +      storm_memset_hc_timeout(bp, port, fw_sb_id, sb_index, ticks);
 +
 +      disable = disable ? 1 : (usec ? 0 : 1);
 +      storm_memset_hc_disable(bp, port, fw_sb_id, sb_index, disable);
 +}
index ce14f11,0000000..a49f8cf
mode 100644,000000..100644
--- /dev/null
@@@ -1,2358 -1,0 +1,2392 @@@
-               cmd->advertising &= bp->port.supported[cfg_idx];
 +/* bnx2x_ethtool.c: Broadcom Everest network driver.
 + *
 + * Copyright (c) 2007-2011 Broadcom Corporation
 + *
 + * This program is free software; you can redistribute it and/or modify
 + * it under the terms of the GNU General Public License as published by
 + * the Free Software Foundation.
 + *
 + * Maintained by: Eilon Greenstein <eilong@broadcom.com>
 + * Written by: Eliezer Tamir
 + * Based on code from Michael Chan's bnx2 driver
 + * UDP CSUM errata workaround by Arik Gendelman
 + * Slowpath and fastpath rework by Vladislav Zolotarov
 + * Statistics and Link management by Yitchak Gertner
 + *
 + */
 +
 +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 +
 +#include <linux/ethtool.h>
 +#include <linux/netdevice.h>
 +#include <linux/types.h>
 +#include <linux/sched.h>
 +#include <linux/crc32.h>
 +
 +
 +#include "bnx2x.h"
 +#include "bnx2x_cmn.h"
 +#include "bnx2x_dump.h"
 +#include "bnx2x_init.h"
 +#include "bnx2x_sp.h"
 +
 +/* Note: in the format strings below %s is replaced by the queue-name which is
 + * either its index or 'fcoe' for the fcoe queue. Make sure the format string
 + * length does not exceed ETH_GSTRING_LEN - MAX_QUEUE_NAME_LEN + 2
 + */
 +#define MAX_QUEUE_NAME_LEN    4
 +static const struct {
 +      long offset;
 +      int size;
 +      char string[ETH_GSTRING_LEN];
 +} bnx2x_q_stats_arr[] = {
 +/* 1 */       { Q_STATS_OFFSET32(total_bytes_received_hi), 8, "[%s]: rx_bytes" },
 +      { Q_STATS_OFFSET32(total_unicast_packets_received_hi),
 +                                              8, "[%s]: rx_ucast_packets" },
 +      { Q_STATS_OFFSET32(total_multicast_packets_received_hi),
 +                                              8, "[%s]: rx_mcast_packets" },
 +      { Q_STATS_OFFSET32(total_broadcast_packets_received_hi),
 +                                              8, "[%s]: rx_bcast_packets" },
 +      { Q_STATS_OFFSET32(no_buff_discard_hi), 8, "[%s]: rx_discards" },
 +      { Q_STATS_OFFSET32(rx_err_discard_pkt),
 +                                       4, "[%s]: rx_phy_ip_err_discards"},
 +      { Q_STATS_OFFSET32(rx_skb_alloc_failed),
 +                                       4, "[%s]: rx_skb_alloc_discard" },
 +      { Q_STATS_OFFSET32(hw_csum_err), 4, "[%s]: rx_csum_offload_errors" },
 +
 +      { Q_STATS_OFFSET32(total_bytes_transmitted_hi), 8, "[%s]: tx_bytes" },
 +/* 10 */{ Q_STATS_OFFSET32(total_unicast_packets_transmitted_hi),
 +                                              8, "[%s]: tx_ucast_packets" },
 +      { Q_STATS_OFFSET32(total_multicast_packets_transmitted_hi),
 +                                              8, "[%s]: tx_mcast_packets" },
 +      { Q_STATS_OFFSET32(total_broadcast_packets_transmitted_hi),
 +                                              8, "[%s]: tx_bcast_packets" },
 +      { Q_STATS_OFFSET32(total_tpa_aggregations_hi),
 +                                              8, "[%s]: tpa_aggregations" },
 +      { Q_STATS_OFFSET32(total_tpa_aggregated_frames_hi),
 +                                      8, "[%s]: tpa_aggregated_frames"},
 +      { Q_STATS_OFFSET32(total_tpa_bytes_hi), 8, "[%s]: tpa_bytes"}
 +};
 +
 +#define BNX2X_NUM_Q_STATS ARRAY_SIZE(bnx2x_q_stats_arr)
 +
 +static const struct {
 +      long offset;
 +      int size;
 +      u32 flags;
 +#define STATS_FLAGS_PORT              1
 +#define STATS_FLAGS_FUNC              2
 +#define STATS_FLAGS_BOTH              (STATS_FLAGS_FUNC | STATS_FLAGS_PORT)
 +      char string[ETH_GSTRING_LEN];
 +} bnx2x_stats_arr[] = {
 +/* 1 */       { STATS_OFFSET32(total_bytes_received_hi),
 +                              8, STATS_FLAGS_BOTH, "rx_bytes" },
 +      { STATS_OFFSET32(error_bytes_received_hi),
 +                              8, STATS_FLAGS_BOTH, "rx_error_bytes" },
 +      { STATS_OFFSET32(total_unicast_packets_received_hi),
 +                              8, STATS_FLAGS_BOTH, "rx_ucast_packets" },
 +      { STATS_OFFSET32(total_multicast_packets_received_hi),
 +                              8, STATS_FLAGS_BOTH, "rx_mcast_packets" },
 +      { STATS_OFFSET32(total_broadcast_packets_received_hi),
 +                              8, STATS_FLAGS_BOTH, "rx_bcast_packets" },
 +      { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi),
 +                              8, STATS_FLAGS_PORT, "rx_crc_errors" },
 +      { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi),
 +                              8, STATS_FLAGS_PORT, "rx_align_errors" },
 +      { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi),
 +                              8, STATS_FLAGS_PORT, "rx_undersize_packets" },
 +      { STATS_OFFSET32(etherstatsoverrsizepkts_hi),
 +                              8, STATS_FLAGS_PORT, "rx_oversize_packets" },
 +/* 10 */{ STATS_OFFSET32(rx_stat_etherstatsfragments_hi),
 +                              8, STATS_FLAGS_PORT, "rx_fragments" },
 +      { STATS_OFFSET32(rx_stat_etherstatsjabbers_hi),
 +                              8, STATS_FLAGS_PORT, "rx_jabbers" },
 +      { STATS_OFFSET32(no_buff_discard_hi),
 +                              8, STATS_FLAGS_BOTH, "rx_discards" },
 +      { STATS_OFFSET32(mac_filter_discard),
 +                              4, STATS_FLAGS_PORT, "rx_filtered_packets" },
 +      { STATS_OFFSET32(mf_tag_discard),
 +                              4, STATS_FLAGS_PORT, "rx_mf_tag_discard" },
 +      { STATS_OFFSET32(brb_drop_hi),
 +                              8, STATS_FLAGS_PORT, "rx_brb_discard" },
 +      { STATS_OFFSET32(brb_truncate_hi),
 +                              8, STATS_FLAGS_PORT, "rx_brb_truncate" },
 +      { STATS_OFFSET32(pause_frames_received_hi),
 +                              8, STATS_FLAGS_PORT, "rx_pause_frames" },
 +      { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi),
 +                              8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" },
 +      { STATS_OFFSET32(nig_timer_max),
 +                      4, STATS_FLAGS_PORT, "rx_constant_pause_events" },
 +/* 20 */{ STATS_OFFSET32(rx_err_discard_pkt),
 +                              4, STATS_FLAGS_BOTH, "rx_phy_ip_err_discards"},
 +      { STATS_OFFSET32(rx_skb_alloc_failed),
 +                              4, STATS_FLAGS_BOTH, "rx_skb_alloc_discard" },
 +      { STATS_OFFSET32(hw_csum_err),
 +                              4, STATS_FLAGS_BOTH, "rx_csum_offload_errors" },
 +
 +      { STATS_OFFSET32(total_bytes_transmitted_hi),
 +                              8, STATS_FLAGS_BOTH, "tx_bytes" },
 +      { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi),
 +                              8, STATS_FLAGS_PORT, "tx_error_bytes" },
 +      { STATS_OFFSET32(total_unicast_packets_transmitted_hi),
 +                              8, STATS_FLAGS_BOTH, "tx_ucast_packets" },
 +      { STATS_OFFSET32(total_multicast_packets_transmitted_hi),
 +                              8, STATS_FLAGS_BOTH, "tx_mcast_packets" },
 +      { STATS_OFFSET32(total_broadcast_packets_transmitted_hi),
 +                              8, STATS_FLAGS_BOTH, "tx_bcast_packets" },
 +      { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
 +                              8, STATS_FLAGS_PORT, "tx_mac_errors" },
 +      { STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
 +                              8, STATS_FLAGS_PORT, "tx_carrier_errors" },
 +/* 30 */{ STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
 +                              8, STATS_FLAGS_PORT, "tx_single_collisions" },
 +      { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi),
 +                              8, STATS_FLAGS_PORT, "tx_multi_collisions" },
 +      { STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
 +                              8, STATS_FLAGS_PORT, "tx_deferred" },
 +      { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi),
 +                              8, STATS_FLAGS_PORT, "tx_excess_collisions" },
 +      { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi),
 +                              8, STATS_FLAGS_PORT, "tx_late_collisions" },
 +      { STATS_OFFSET32(tx_stat_etherstatscollisions_hi),
 +                              8, STATS_FLAGS_PORT, "tx_total_collisions" },
 +      { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi),
 +                              8, STATS_FLAGS_PORT, "tx_64_byte_packets" },
 +      { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi),
 +                      8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" },
 +      { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi),
 +                      8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" },
 +      { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi),
 +                      8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" },
 +/* 40 */{ STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
 +                      8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" },
 +      { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi),
 +                      8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" },
 +      { STATS_OFFSET32(etherstatspktsover1522octets_hi),
 +                      8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" },
 +      { STATS_OFFSET32(pause_frames_sent_hi),
 +                              8, STATS_FLAGS_PORT, "tx_pause_frames" },
 +      { STATS_OFFSET32(total_tpa_aggregations_hi),
 +                      8, STATS_FLAGS_FUNC, "tpa_aggregations" },
 +      { STATS_OFFSET32(total_tpa_aggregated_frames_hi),
 +                      8, STATS_FLAGS_FUNC, "tpa_aggregated_frames"},
 +      { STATS_OFFSET32(total_tpa_bytes_hi),
 +                      8, STATS_FLAGS_FUNC, "tpa_bytes"}
 +};
 +
 +#define BNX2X_NUM_STATS               ARRAY_SIZE(bnx2x_stats_arr)
 +static int bnx2x_get_port_type(struct bnx2x *bp)
 +{
 +      int port_type;
 +      u32 phy_idx = bnx2x_get_cur_phy_idx(bp);
 +      switch (bp->link_params.phy[phy_idx].media_type) {
 +      case ETH_PHY_SFP_FIBER:
 +      case ETH_PHY_XFP_FIBER:
 +      case ETH_PHY_KR:
 +      case ETH_PHY_CX4:
 +              port_type = PORT_FIBRE;
 +              break;
 +      case ETH_PHY_DA_TWINAX:
 +              port_type = PORT_DA;
 +              break;
 +      case ETH_PHY_BASE_T:
 +              port_type = PORT_TP;
 +              break;
 +      case ETH_PHY_NOT_PRESENT:
 +              port_type = PORT_NONE;
 +              break;
 +      case ETH_PHY_UNSPECIFIED:
 +      default:
 +              port_type = PORT_OTHER;
 +              break;
 +      }
 +      return port_type;
 +}
 +
 +static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
 +{
 +      struct bnx2x *bp = netdev_priv(dev);
 +      int cfg_idx = bnx2x_get_link_cfg_idx(bp);
 +
 +      /* Dual Media boards present all available port types */
 +      cmd->supported = bp->port.supported[cfg_idx] |
 +              (bp->port.supported[cfg_idx ^ 1] &
 +               (SUPPORTED_TP | SUPPORTED_FIBRE));
 +      cmd->advertising = bp->port.advertising[cfg_idx];
 +
 +      if ((bp->state == BNX2X_STATE_OPEN) &&
 +          !(bp->flags & MF_FUNC_DIS) &&
 +          (bp->link_vars.link_up)) {
 +              ethtool_cmd_speed_set(cmd, bp->link_vars.line_speed);
 +              cmd->duplex = bp->link_vars.duplex;
 +      } else {
 +              ethtool_cmd_speed_set(
 +                      cmd, bp->link_params.req_line_speed[cfg_idx]);
 +              cmd->duplex = bp->link_params.req_duplex[cfg_idx];
 +      }
 +
 +      if (IS_MF(bp))
 +              ethtool_cmd_speed_set(cmd, bnx2x_get_mf_speed(bp));
 +
 +      cmd->port = bnx2x_get_port_type(bp);
 +
 +      cmd->phy_address = bp->mdio.prtad;
 +      cmd->transceiver = XCVR_INTERNAL;
 +
 +      if (bp->link_params.req_line_speed[cfg_idx] == SPEED_AUTO_NEG)
 +              cmd->autoneg = AUTONEG_ENABLE;
 +      else
 +              cmd->autoneg = AUTONEG_DISABLE;
 +
 +      cmd->maxtxpkt = 0;
 +      cmd->maxrxpkt = 0;
 +
 +      DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
 +         "  supported 0x%x  advertising 0x%x  speed %u\n"
 +         "  duplex %d  port %d  phy_address %d  transceiver %d\n"
 +         "  autoneg %d  maxtxpkt %d  maxrxpkt %d\n",
 +         cmd->cmd, cmd->supported, cmd->advertising,
 +         ethtool_cmd_speed(cmd),
 +         cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
 +         cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
 +
 +      return 0;
 +}
 +
 +static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
 +{
 +      struct bnx2x *bp = netdev_priv(dev);
 +      u32 advertising, cfg_idx, old_multi_phy_config, new_multi_phy_config;
 +      u32 speed;
 +
 +      if (IS_MF_SD(bp))
 +              return 0;
 +
 +      DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
 +         "  supported 0x%x  advertising 0x%x  speed %u\n"
 +         "  duplex %d  port %d  phy_address %d  transceiver %d\n"
 +         "  autoneg %d  maxtxpkt %d  maxrxpkt %d\n",
 +         cmd->cmd, cmd->supported, cmd->advertising,
 +         ethtool_cmd_speed(cmd),
 +         cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
 +         cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
 +
 +      speed = ethtool_cmd_speed(cmd);
 +
 +      if (IS_MF_SI(bp)) {
 +              u32 part;
 +              u32 line_speed = bp->link_vars.line_speed;
 +
 +              /* use 10G if no link detected */
 +              if (!line_speed)
 +                      line_speed = 10000;
 +
 +              if (bp->common.bc_ver < REQ_BC_VER_4_SET_MF_BW) {
 +                      BNX2X_DEV_INFO("To set speed BC %X or higher "
 +                                     "is required, please upgrade BC\n",
 +                                     REQ_BC_VER_4_SET_MF_BW);
 +                      return -EINVAL;
 +              }
 +
 +              part = (speed * 100) / line_speed;
 +
 +              if (line_speed < speed || !part) {
 +                      BNX2X_DEV_INFO("Speed setting should be in a range "
 +                                     "from 1%% to 100%% "
 +                                     "of actual line speed\n");
 +                      return -EINVAL;
 +              }
 +
 +              if (bp->state != BNX2X_STATE_OPEN)
 +                      /* store value for following "load" */
 +                      bp->pending_max = part;
 +              else
 +                      bnx2x_update_max_mf_config(bp, part);
 +
 +              return 0;
 +      }
 +
 +      cfg_idx = bnx2x_get_link_cfg_idx(bp);
 +      old_multi_phy_config = bp->link_params.multi_phy_config;
 +      switch (cmd->port) {
 +      case PORT_TP:
 +              if (bp->port.supported[cfg_idx] & SUPPORTED_TP)
 +                      break; /* no port change */
 +
 +              if (!(bp->port.supported[0] & SUPPORTED_TP ||
 +                    bp->port.supported[1] & SUPPORTED_TP)) {
 +                      DP(NETIF_MSG_LINK, "Unsupported port type\n");
 +                      return -EINVAL;
 +              }
 +              bp->link_params.multi_phy_config &=
 +                      ~PORT_HW_CFG_PHY_SELECTION_MASK;
 +              if (bp->link_params.multi_phy_config &
 +                  PORT_HW_CFG_PHY_SWAPPED_ENABLED)
 +                      bp->link_params.multi_phy_config |=
 +                      PORT_HW_CFG_PHY_SELECTION_SECOND_PHY;
 +              else
 +                      bp->link_params.multi_phy_config |=
 +                      PORT_HW_CFG_PHY_SELECTION_FIRST_PHY;
 +              break;
 +      case PORT_FIBRE:
 +              if (bp->port.supported[cfg_idx] & SUPPORTED_FIBRE)
 +                      break; /* no port change */
 +
 +              if (!(bp->port.supported[0] & SUPPORTED_FIBRE ||
 +                    bp->port.supported[1] & SUPPORTED_FIBRE)) {
 +                      DP(NETIF_MSG_LINK, "Unsupported port type\n");
 +                      return -EINVAL;
 +              }
 +              bp->link_params.multi_phy_config &=
 +                      ~PORT_HW_CFG_PHY_SELECTION_MASK;
 +              if (bp->link_params.multi_phy_config &
 +                  PORT_HW_CFG_PHY_SWAPPED_ENABLED)
 +                      bp->link_params.multi_phy_config |=
 +                      PORT_HW_CFG_PHY_SELECTION_FIRST_PHY;
 +              else
 +                      bp->link_params.multi_phy_config |=
 +                      PORT_HW_CFG_PHY_SELECTION_SECOND_PHY;
 +              break;
 +      default:
 +              DP(NETIF_MSG_LINK, "Unsupported port type\n");
 +              return -EINVAL;
 +      }
 +      /* Save new config in case command complete successuly */
 +      new_multi_phy_config = bp->link_params.multi_phy_config;
 +      /* Get the new cfg_idx */
 +      cfg_idx = bnx2x_get_link_cfg_idx(bp);
 +      /* Restore old config in case command failed */
 +      bp->link_params.multi_phy_config = old_multi_phy_config;
 +      DP(NETIF_MSG_LINK, "cfg_idx = %x\n", cfg_idx);
 +
 +      if (cmd->autoneg == AUTONEG_ENABLE) {
 +              if (!(bp->port.supported[cfg_idx] & SUPPORTED_Autoneg)) {
 +                      DP(NETIF_MSG_LINK, "Autoneg not supported\n");
 +                      return -EINVAL;
 +              }
 +
 +              /* advertise the requested speed and duplex if supported */
-               bp->link_params.req_duplex[cfg_idx] = DUPLEX_FULL;
-               bp->port.advertising[cfg_idx] |= (ADVERTISED_Autoneg |
++              if (cmd->advertising & ~(bp->port.supported[cfg_idx])) {
++                      DP(NETIF_MSG_LINK, "Advertisement parameters "
++                                         "are not supported\n");
++                      return -EINVAL;
++              }
 +
 +              bp->link_params.req_line_speed[cfg_idx] = SPEED_AUTO_NEG;
-               if (bp->state == BNX2X_STATE_OPEN && bp->num_queues)
-                       ering->rx_pending = MAX_RX_AVAIL/bp->num_queues;
-               else
-                       ering->rx_pending = MAX_RX_AVAIL;
++              bp->link_params.req_duplex[cfg_idx] = cmd->duplex;
++              bp->port.advertising[cfg_idx] = (ADVERTISED_Autoneg |
 +                                       cmd->advertising);
++              if (cmd->advertising) {
++
++                      bp->link_params.speed_cap_mask[cfg_idx] = 0;
++                      if (cmd->advertising & ADVERTISED_10baseT_Half) {
++                              bp->link_params.speed_cap_mask[cfg_idx] |=
++                              PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF;
++                      }
++                      if (cmd->advertising & ADVERTISED_10baseT_Full)
++                              bp->link_params.speed_cap_mask[cfg_idx] |=
++                              PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL;
 +
++                      if (cmd->advertising & ADVERTISED_100baseT_Full)
++                              bp->link_params.speed_cap_mask[cfg_idx] |=
++                              PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL;
++
++                      if (cmd->advertising & ADVERTISED_100baseT_Half) {
++                              bp->link_params.speed_cap_mask[cfg_idx] |=
++                                   PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF;
++                      }
++                      if (cmd->advertising & ADVERTISED_1000baseT_Half) {
++                              bp->link_params.speed_cap_mask[cfg_idx] |=
++                                      PORT_HW_CFG_SPEED_CAPABILITY_D0_1G;
++                      }
++                      if (cmd->advertising & (ADVERTISED_1000baseT_Full |
++                                              ADVERTISED_1000baseKX_Full))
++                              bp->link_params.speed_cap_mask[cfg_idx] |=
++                                      PORT_HW_CFG_SPEED_CAPABILITY_D0_1G;
++
++                      if (cmd->advertising & (ADVERTISED_10000baseT_Full |
++                                              ADVERTISED_10000baseKX4_Full |
++                                              ADVERTISED_10000baseKR_Full))
++                              bp->link_params.speed_cap_mask[cfg_idx] |=
++                                      PORT_HW_CFG_SPEED_CAPABILITY_D0_10G;
++              }
 +      } else { /* forced speed */
 +              /* advertise the requested speed and duplex if supported */
 +              switch (speed) {
 +              case SPEED_10:
 +                      if (cmd->duplex == DUPLEX_FULL) {
 +                              if (!(bp->port.supported[cfg_idx] &
 +                                    SUPPORTED_10baseT_Full)) {
 +                                      DP(NETIF_MSG_LINK,
 +                                         "10M full not supported\n");
 +                                      return -EINVAL;
 +                              }
 +
 +                              advertising = (ADVERTISED_10baseT_Full |
 +                                             ADVERTISED_TP);
 +                      } else {
 +                              if (!(bp->port.supported[cfg_idx] &
 +                                    SUPPORTED_10baseT_Half)) {
 +                                      DP(NETIF_MSG_LINK,
 +                                         "10M half not supported\n");
 +                                      return -EINVAL;
 +                              }
 +
 +                              advertising = (ADVERTISED_10baseT_Half |
 +                                             ADVERTISED_TP);
 +                      }
 +                      break;
 +
 +              case SPEED_100:
 +                      if (cmd->duplex == DUPLEX_FULL) {
 +                              if (!(bp->port.supported[cfg_idx] &
 +                                              SUPPORTED_100baseT_Full)) {
 +                                      DP(NETIF_MSG_LINK,
 +                                         "100M full not supported\n");
 +                                      return -EINVAL;
 +                              }
 +
 +                              advertising = (ADVERTISED_100baseT_Full |
 +                                             ADVERTISED_TP);
 +                      } else {
 +                              if (!(bp->port.supported[cfg_idx] &
 +                                              SUPPORTED_100baseT_Half)) {
 +                                      DP(NETIF_MSG_LINK,
 +                                         "100M half not supported\n");
 +                                      return -EINVAL;
 +                              }
 +
 +                              advertising = (ADVERTISED_100baseT_Half |
 +                                             ADVERTISED_TP);
 +                      }
 +                      break;
 +
 +              case SPEED_1000:
 +                      if (cmd->duplex != DUPLEX_FULL) {
 +                              DP(NETIF_MSG_LINK, "1G half not supported\n");
 +                              return -EINVAL;
 +                      }
 +
 +                      if (!(bp->port.supported[cfg_idx] &
 +                            SUPPORTED_1000baseT_Full)) {
 +                              DP(NETIF_MSG_LINK, "1G full not supported\n");
 +                              return -EINVAL;
 +                      }
 +
 +                      advertising = (ADVERTISED_1000baseT_Full |
 +                                     ADVERTISED_TP);
 +                      break;
 +
 +              case SPEED_2500:
 +                      if (cmd->duplex != DUPLEX_FULL) {
 +                              DP(NETIF_MSG_LINK,
 +                                 "2.5G half not supported\n");
 +                              return -EINVAL;
 +                      }
 +
 +                      if (!(bp->port.supported[cfg_idx]
 +                            & SUPPORTED_2500baseX_Full)) {
 +                              DP(NETIF_MSG_LINK,
 +                                 "2.5G full not supported\n");
 +                              return -EINVAL;
 +                      }
 +
 +                      advertising = (ADVERTISED_2500baseX_Full |
 +                                     ADVERTISED_TP);
 +                      break;
 +
 +              case SPEED_10000:
 +                      if (cmd->duplex != DUPLEX_FULL) {
 +                              DP(NETIF_MSG_LINK, "10G half not supported\n");
 +                              return -EINVAL;
 +                      }
 +
 +                      if (!(bp->port.supported[cfg_idx]
 +                            & SUPPORTED_10000baseT_Full)) {
 +                              DP(NETIF_MSG_LINK, "10G full not supported\n");
 +                              return -EINVAL;
 +                      }
 +
 +                      advertising = (ADVERTISED_10000baseT_Full |
 +                                     ADVERTISED_FIBRE);
 +                      break;
 +
 +              default:
 +                      DP(NETIF_MSG_LINK, "Unsupported speed %u\n", speed);
 +                      return -EINVAL;
 +              }
 +
 +              bp->link_params.req_line_speed[cfg_idx] = speed;
 +              bp->link_params.req_duplex[cfg_idx] = cmd->duplex;
 +              bp->port.advertising[cfg_idx] = advertising;
 +      }
 +
 +      DP(NETIF_MSG_LINK, "req_line_speed %d\n"
 +         "  req_duplex %d  advertising 0x%x\n",
 +         bp->link_params.req_line_speed[cfg_idx],
 +         bp->link_params.req_duplex[cfg_idx],
 +         bp->port.advertising[cfg_idx]);
 +
 +      /* Set new config */
 +      bp->link_params.multi_phy_config = new_multi_phy_config;
 +      if (netif_running(dev)) {
 +              bnx2x_stats_handle(bp, STATS_EVENT_STOP);
 +              bnx2x_link_set(bp);
 +      }
 +
 +      return 0;
 +}
 +
 +#define IS_E1_ONLINE(info)    (((info) & RI_E1_ONLINE) == RI_E1_ONLINE)
 +#define IS_E1H_ONLINE(info)   (((info) & RI_E1H_ONLINE) == RI_E1H_ONLINE)
 +#define IS_E2_ONLINE(info)    (((info) & RI_E2_ONLINE) == RI_E2_ONLINE)
 +#define IS_E3_ONLINE(info)    (((info) & RI_E3_ONLINE) == RI_E3_ONLINE)
 +#define IS_E3B0_ONLINE(info)  (((info) & RI_E3B0_ONLINE) == RI_E3B0_ONLINE)
 +
 +static inline bool bnx2x_is_reg_online(struct bnx2x *bp,
 +                                     const struct reg_addr *reg_info)
 +{
 +      if (CHIP_IS_E1(bp))
 +              return IS_E1_ONLINE(reg_info->info);
 +      else if (CHIP_IS_E1H(bp))
 +              return IS_E1H_ONLINE(reg_info->info);
 +      else if (CHIP_IS_E2(bp))
 +              return IS_E2_ONLINE(reg_info->info);
 +      else if (CHIP_IS_E3A0(bp))
 +              return IS_E3_ONLINE(reg_info->info);
 +      else if (CHIP_IS_E3B0(bp))
 +              return IS_E3B0_ONLINE(reg_info->info);
 +      else
 +              return false;
 +}
 +
 +/******* Paged registers info selectors ********/
 +static inline const u32 *__bnx2x_get_page_addr_ar(struct bnx2x *bp)
 +{
 +      if (CHIP_IS_E2(bp))
 +              return page_vals_e2;
 +      else if (CHIP_IS_E3(bp))
 +              return page_vals_e3;
 +      else
 +              return NULL;
 +}
 +
 +static inline u32 __bnx2x_get_page_reg_num(struct bnx2x *bp)
 +{
 +      if (CHIP_IS_E2(bp))
 +              return PAGE_MODE_VALUES_E2;
 +      else if (CHIP_IS_E3(bp))
 +              return PAGE_MODE_VALUES_E3;
 +      else
 +              return 0;
 +}
 +
 +static inline const u32 *__bnx2x_get_page_write_ar(struct bnx2x *bp)
 +{
 +      if (CHIP_IS_E2(bp))
 +              return page_write_regs_e2;
 +      else if (CHIP_IS_E3(bp))
 +              return page_write_regs_e3;
 +      else
 +              return NULL;
 +}
 +
 +static inline u32 __bnx2x_get_page_write_num(struct bnx2x *bp)
 +{
 +      if (CHIP_IS_E2(bp))
 +              return PAGE_WRITE_REGS_E2;
 +      else if (CHIP_IS_E3(bp))
 +              return PAGE_WRITE_REGS_E3;
 +      else
 +              return 0;
 +}
 +
 +static inline const struct reg_addr *__bnx2x_get_page_read_ar(struct bnx2x *bp)
 +{
 +      if (CHIP_IS_E2(bp))
 +              return page_read_regs_e2;
 +      else if (CHIP_IS_E3(bp))
 +              return page_read_regs_e3;
 +      else
 +              return NULL;
 +}
 +
 +static inline u32 __bnx2x_get_page_read_num(struct bnx2x *bp)
 +{
 +      if (CHIP_IS_E2(bp))
 +              return PAGE_READ_REGS_E2;
 +      else if (CHIP_IS_E3(bp))
 +              return PAGE_READ_REGS_E3;
 +      else
 +              return 0;
 +}
 +
 +static inline int __bnx2x_get_regs_len(struct bnx2x *bp)
 +{
 +      int num_pages = __bnx2x_get_page_reg_num(bp);
 +      int page_write_num = __bnx2x_get_page_write_num(bp);
 +      const struct reg_addr *page_read_addr = __bnx2x_get_page_read_ar(bp);
 +      int page_read_num = __bnx2x_get_page_read_num(bp);
 +      int regdump_len = 0;
 +      int i, j, k;
 +
 +      for (i = 0; i < REGS_COUNT; i++)
 +              if (bnx2x_is_reg_online(bp, &reg_addrs[i]))
 +                      regdump_len += reg_addrs[i].size;
 +
 +      for (i = 0; i < num_pages; i++)
 +              for (j = 0; j < page_write_num; j++)
 +                      for (k = 0; k < page_read_num; k++)
 +                              if (bnx2x_is_reg_online(bp, &page_read_addr[k]))
 +                                      regdump_len += page_read_addr[k].size;
 +
 +      return regdump_len;
 +}
 +
 +static int bnx2x_get_regs_len(struct net_device *dev)
 +{
 +      struct bnx2x *bp = netdev_priv(dev);
 +      int regdump_len = 0;
 +
 +      regdump_len = __bnx2x_get_regs_len(bp);
 +      regdump_len *= 4;
 +      regdump_len += sizeof(struct dump_hdr);
 +
 +      return regdump_len;
 +}
 +
 +/**
 + * bnx2x_read_pages_regs - read "paged" registers
 + *
 + * @bp                device handle
 + * @p         output buffer
 + *
 + * Reads "paged" memories: memories that may only be read by first writing to a
 + * specific address ("write address") and then reading from a specific address
 + * ("read address"). There may be more than one write address per "page" and
 + * more than one read address per write address.
 + */
 +static inline void bnx2x_read_pages_regs(struct bnx2x *bp, u32 *p)
 +{
 +      u32 i, j, k, n;
 +      /* addresses of the paged registers */
 +      const u32 *page_addr = __bnx2x_get_page_addr_ar(bp);
 +      /* number of paged registers */
 +      int num_pages = __bnx2x_get_page_reg_num(bp);
 +      /* write addresses */
 +      const u32 *write_addr = __bnx2x_get_page_write_ar(bp);
 +      /* number of write addresses */
 +      int write_num = __bnx2x_get_page_write_num(bp);
 +      /* read addresses info */
 +      const struct reg_addr *read_addr = __bnx2x_get_page_read_ar(bp);
 +      /* number of read addresses */
 +      int read_num = __bnx2x_get_page_read_num(bp);
 +
 +      for (i = 0; i < num_pages; i++) {
 +              for (j = 0; j < write_num; j++) {
 +                      REG_WR(bp, write_addr[j], page_addr[i]);
 +                      for (k = 0; k < read_num; k++)
 +                              if (bnx2x_is_reg_online(bp, &read_addr[k]))
 +                                      for (n = 0; n <
 +                                            read_addr[k].size; n++)
 +                                              *p++ = REG_RD(bp,
 +                                                     read_addr[k].addr + n*4);
 +              }
 +      }
 +}
 +
 +static inline void __bnx2x_get_regs(struct bnx2x *bp, u32 *p)
 +{
 +      u32 i, j;
 +
 +      /* Read the regular registers */
 +      for (i = 0; i < REGS_COUNT; i++)
 +              if (bnx2x_is_reg_online(bp, &reg_addrs[i]))
 +                      for (j = 0; j < reg_addrs[i].size; j++)
 +                              *p++ = REG_RD(bp, reg_addrs[i].addr + j*4);
 +
 +      /* Read "paged" registes */
 +      bnx2x_read_pages_regs(bp, p);
 +}
 +
 +static void bnx2x_get_regs(struct net_device *dev,
 +                         struct ethtool_regs *regs, void *_p)
 +{
 +      u32 *p = _p;
 +      struct bnx2x *bp = netdev_priv(dev);
 +      struct dump_hdr dump_hdr = {0};
 +
 +      regs->version = 0;
 +      memset(p, 0, regs->len);
 +
 +      if (!netif_running(bp->dev))
 +              return;
 +
 +      /* Disable parity attentions as long as following dump may
 +       * cause false alarms by reading never written registers. We
 +       * will re-enable parity attentions right after the dump.
 +       */
 +      bnx2x_disable_blocks_parity(bp);
 +
 +      dump_hdr.hdr_size = (sizeof(struct dump_hdr) / 4) - 1;
 +      dump_hdr.dump_sign = dump_sign_all;
 +      dump_hdr.xstorm_waitp = REG_RD(bp, XSTORM_WAITP_ADDR);
 +      dump_hdr.tstorm_waitp = REG_RD(bp, TSTORM_WAITP_ADDR);
 +      dump_hdr.ustorm_waitp = REG_RD(bp, USTORM_WAITP_ADDR);
 +      dump_hdr.cstorm_waitp = REG_RD(bp, CSTORM_WAITP_ADDR);
 +
 +      if (CHIP_IS_E1(bp))
 +              dump_hdr.info = RI_E1_ONLINE;
 +      else if (CHIP_IS_E1H(bp))
 +              dump_hdr.info = RI_E1H_ONLINE;
 +      else if (!CHIP_IS_E1x(bp))
 +              dump_hdr.info = RI_E2_ONLINE |
 +              (BP_PATH(bp) ? RI_PATH1_DUMP : RI_PATH0_DUMP);
 +
 +      memcpy(p, &dump_hdr, sizeof(struct dump_hdr));
 +      p += dump_hdr.hdr_size + 1;
 +
 +      /* Actually read the registers */
 +      __bnx2x_get_regs(bp, p);
 +
 +      /* Re-enable parity attentions */
 +      bnx2x_clear_blocks_parity(bp);
 +      bnx2x_enable_blocks_parity(bp);
 +}
 +
 +static void bnx2x_get_drvinfo(struct net_device *dev,
 +                            struct ethtool_drvinfo *info)
 +{
 +      struct bnx2x *bp = netdev_priv(dev);
 +      u8 phy_fw_ver[PHY_FW_VER_LEN];
 +
 +      strcpy(info->driver, DRV_MODULE_NAME);
 +      strcpy(info->version, DRV_MODULE_VERSION);
 +
 +      phy_fw_ver[0] = '\0';
 +      if (bp->port.pmf) {
 +              bnx2x_acquire_phy_lock(bp);
 +              bnx2x_get_ext_phy_fw_version(&bp->link_params,
 +                                           (bp->state != BNX2X_STATE_CLOSED),
 +                                           phy_fw_ver, PHY_FW_VER_LEN);
 +              bnx2x_release_phy_lock(bp);
 +      }
 +
 +      strncpy(info->fw_version, bp->fw_ver, 32);
 +      snprintf(info->fw_version + strlen(bp->fw_ver), 32 - strlen(bp->fw_ver),
 +               "bc %d.%d.%d%s%s",
 +               (bp->common.bc_ver & 0xff0000) >> 16,
 +               (bp->common.bc_ver & 0xff00) >> 8,
 +               (bp->common.bc_ver & 0xff),
 +               ((phy_fw_ver[0] != '\0') ? " phy " : ""), phy_fw_ver);
 +      strcpy(info->bus_info, pci_name(bp->pdev));
 +      info->n_stats = BNX2X_NUM_STATS;
 +      info->testinfo_len = BNX2X_NUM_TESTS;
 +      info->eedump_len = bp->common.flash_size;
 +      info->regdump_len = bnx2x_get_regs_len(dev);
 +}
 +
 +static void bnx2x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
 +{
 +      struct bnx2x *bp = netdev_priv(dev);
 +
 +      if (bp->flags & NO_WOL_FLAG) {
 +              wol->supported = 0;
 +              wol->wolopts = 0;
 +      } else {
 +              wol->supported = WAKE_MAGIC;
 +              if (bp->wol)
 +                      wol->wolopts = WAKE_MAGIC;
 +              else
 +                      wol->wolopts = 0;
 +      }
 +      memset(&wol->sopass, 0, sizeof(wol->sopass));
 +}
 +
 +static int bnx2x_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
 +{
 +      struct bnx2x *bp = netdev_priv(dev);
 +
 +      if (wol->wolopts & ~WAKE_MAGIC)
 +              return -EINVAL;
 +
 +      if (wol->wolopts & WAKE_MAGIC) {
 +              if (bp->flags & NO_WOL_FLAG)
 +                      return -EINVAL;
 +
 +              bp->wol = 1;
 +      } else
 +              bp->wol = 0;
 +
 +      return 0;
 +}
 +
 +static u32 bnx2x_get_msglevel(struct net_device *dev)
 +{
 +      struct bnx2x *bp = netdev_priv(dev);
 +
 +      return bp->msg_enable;
 +}
 +
 +static void bnx2x_set_msglevel(struct net_device *dev, u32 level)
 +{
 +      struct bnx2x *bp = netdev_priv(dev);
 +
 +      if (capable(CAP_NET_ADMIN)) {
 +              /* dump MCP trace */
 +              if (level & BNX2X_MSG_MCP)
 +                      bnx2x_fw_dump_lvl(bp, KERN_INFO);
 +              bp->msg_enable = level;
 +      }
 +}
 +
 +static int bnx2x_nway_reset(struct net_device *dev)
 +{
 +      struct bnx2x *bp = netdev_priv(dev);
 +
 +      if (!bp->port.pmf)
 +              return 0;
 +
 +      if (netif_running(dev)) {
 +              bnx2x_stats_handle(bp, STATS_EVENT_STOP);
 +              bnx2x_link_set(bp);
 +      }
 +
 +      return 0;
 +}
 +
 +static u32 bnx2x_get_link(struct net_device *dev)
 +{
 +      struct bnx2x *bp = netdev_priv(dev);
 +
 +      if (bp->flags & MF_FUNC_DIS || (bp->state != BNX2X_STATE_OPEN))
 +              return 0;
 +
 +      return bp->link_vars.link_up;
 +}
 +
 +static int bnx2x_get_eeprom_len(struct net_device *dev)
 +{
 +      struct bnx2x *bp = netdev_priv(dev);
 +
 +      return bp->common.flash_size;
 +}
 +
 +static int bnx2x_acquire_nvram_lock(struct bnx2x *bp)
 +{
 +      int port = BP_PORT(bp);
 +      int count, i;
 +      u32 val = 0;
 +
 +      /* adjust timeout for emulation/FPGA */
 +      count = BNX2X_NVRAM_TIMEOUT_COUNT;
 +      if (CHIP_REV_IS_SLOW(bp))
 +              count *= 100;
 +
 +      /* request access to nvram interface */
 +      REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
 +             (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port));
 +
 +      for (i = 0; i < count*10; i++) {
 +              val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
 +              if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))
 +                      break;
 +
 +              udelay(5);
 +      }
 +
 +      if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
 +              DP(BNX2X_MSG_NVM, "cannot get access to nvram interface\n");
 +              return -EBUSY;
 +      }
 +
 +      return 0;
 +}
 +
 +static int bnx2x_release_nvram_lock(struct bnx2x *bp)
 +{
 +      int port = BP_PORT(bp);
 +      int count, i;
 +      u32 val = 0;
 +
 +      /* adjust timeout for emulation/FPGA */
 +      count = BNX2X_NVRAM_TIMEOUT_COUNT;
 +      if (CHIP_REV_IS_SLOW(bp))
 +              count *= 100;
 +
 +      /* relinquish nvram interface */
 +      REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
 +             (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port));
 +
 +      for (i = 0; i < count*10; i++) {
 +              val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
 +              if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)))
 +                      break;
 +
 +              udelay(5);
 +      }
 +
 +      if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
 +              DP(BNX2X_MSG_NVM, "cannot free access to nvram interface\n");
 +              return -EBUSY;
 +      }
 +
 +      return 0;
 +}
 +
 +static void bnx2x_enable_nvram_access(struct bnx2x *bp)
 +{
 +      u32 val;
 +
 +      val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
 +
 +      /* enable both bits, even on read */
 +      REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
 +             (val | MCPR_NVM_ACCESS_ENABLE_EN |
 +                    MCPR_NVM_ACCESS_ENABLE_WR_EN));
 +}
 +
 +static void bnx2x_disable_nvram_access(struct bnx2x *bp)
 +{
 +      u32 val;
 +
 +      val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
 +
 +      /* disable both bits, even after read */
 +      REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
 +             (val & ~(MCPR_NVM_ACCESS_ENABLE_EN |
 +                      MCPR_NVM_ACCESS_ENABLE_WR_EN)));
 +}
 +
 +static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, __be32 *ret_val,
 +                                u32 cmd_flags)
 +{
 +      int count, i, rc;
 +      u32 val;
 +
 +      /* build the command word */
 +      cmd_flags |= MCPR_NVM_COMMAND_DOIT;
 +
 +      /* need to clear DONE bit separately */
 +      REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
 +
 +      /* address of the NVRAM to read from */
 +      REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
 +             (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
 +
 +      /* issue a read command */
 +      REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
 +
 +      /* adjust timeout for emulation/FPGA */
 +      count = BNX2X_NVRAM_TIMEOUT_COUNT;
 +      if (CHIP_REV_IS_SLOW(bp))
 +              count *= 100;
 +
 +      /* wait for completion */
 +      *ret_val = 0;
 +      rc = -EBUSY;
 +      for (i = 0; i < count; i++) {
 +              udelay(5);
 +              val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
 +
 +              if (val & MCPR_NVM_COMMAND_DONE) {
 +                      val = REG_RD(bp, MCP_REG_MCPR_NVM_READ);
 +                      /* we read nvram data in cpu order
 +                       * but ethtool sees it as an array of bytes
 +                       * converting to big-endian will do the work */
 +                      *ret_val = cpu_to_be32(val);
 +                      rc = 0;
 +                      break;
 +              }
 +      }
 +
 +      return rc;
 +}
 +
 +static int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf,
 +                          int buf_size)
 +{
 +      int rc;
 +      u32 cmd_flags;
 +      __be32 val;
 +
 +      if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
 +              DP(BNX2X_MSG_NVM,
 +                 "Invalid parameter: offset 0x%x  buf_size 0x%x\n",
 +                 offset, buf_size);
 +              return -EINVAL;
 +      }
 +
 +      if (offset + buf_size > bp->common.flash_size) {
 +              DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
 +                                " buf_size (0x%x) > flash_size (0x%x)\n",
 +                 offset, buf_size, bp->common.flash_size);
 +              return -EINVAL;
 +      }
 +
 +      /* request access to nvram interface */
 +      rc = bnx2x_acquire_nvram_lock(bp);
 +      if (rc)
 +              return rc;
 +
 +      /* enable access to nvram interface */
 +      bnx2x_enable_nvram_access(bp);
 +
 +      /* read the first word(s) */
 +      cmd_flags = MCPR_NVM_COMMAND_FIRST;
 +      while ((buf_size > sizeof(u32)) && (rc == 0)) {
 +              rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
 +              memcpy(ret_buf, &val, 4);
 +
 +              /* advance to the next dword */
 +              offset += sizeof(u32);
 +              ret_buf += sizeof(u32);
 +              buf_size -= sizeof(u32);
 +              cmd_flags = 0;
 +      }
 +
 +      if (rc == 0) {
 +              cmd_flags |= MCPR_NVM_COMMAND_LAST;
 +              rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
 +              memcpy(ret_buf, &val, 4);
 +      }
 +
 +      /* disable access to nvram interface */
 +      bnx2x_disable_nvram_access(bp);
 +      bnx2x_release_nvram_lock(bp);
 +
 +      return rc;
 +}
 +
 +static int bnx2x_get_eeprom(struct net_device *dev,
 +                          struct ethtool_eeprom *eeprom, u8 *eebuf)
 +{
 +      struct bnx2x *bp = netdev_priv(dev);
 +      int rc;
 +
 +      if (!netif_running(dev))
 +              return -EAGAIN;
 +
 +      DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
 +         "  magic 0x%x  offset 0x%x (%d)  len 0x%x (%d)\n",
 +         eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
 +         eeprom->len, eeprom->len);
 +
 +      /* parameters already validated in ethtool_get_eeprom */
 +
 +      rc = bnx2x_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
 +
 +      return rc;
 +}
 +
 +static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val,
 +                                 u32 cmd_flags)
 +{
 +      int count, i, rc;
 +
 +      /* build the command word */
 +      cmd_flags |= MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR;
 +
 +      /* need to clear DONE bit separately */
 +      REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
 +
 +      /* write the data */
 +      REG_WR(bp, MCP_REG_MCPR_NVM_WRITE, val);
 +
 +      /* address of the NVRAM to write to */
 +      REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
 +             (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
 +
 +      /* issue the write command */
 +      REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
 +
 +      /* adjust timeout for emulation/FPGA */
 +      count = BNX2X_NVRAM_TIMEOUT_COUNT;
 +      if (CHIP_REV_IS_SLOW(bp))
 +              count *= 100;
 +
 +      /* wait for completion */
 +      rc = -EBUSY;
 +      for (i = 0; i < count; i++) {
 +              udelay(5);
 +              val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
 +              if (val & MCPR_NVM_COMMAND_DONE) {
 +                      rc = 0;
 +                      break;
 +              }
 +      }
 +
 +      return rc;
 +}
 +
 +#define BYTE_OFFSET(offset)           (8 * (offset & 0x03))
 +
 +static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf,
 +                            int buf_size)
 +{
 +      int rc;
 +      u32 cmd_flags;
 +      u32 align_offset;
 +      __be32 val;
 +
 +      if (offset + buf_size > bp->common.flash_size) {
 +              DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
 +                                " buf_size (0x%x) > flash_size (0x%x)\n",
 +                 offset, buf_size, bp->common.flash_size);
 +              return -EINVAL;
 +      }
 +
 +      /* request access to nvram interface */
 +      rc = bnx2x_acquire_nvram_lock(bp);
 +      if (rc)
 +              return rc;
 +
 +      /* enable access to nvram interface */
 +      bnx2x_enable_nvram_access(bp);
 +
 +      cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST);
 +      align_offset = (offset & ~0x03);
 +      rc = bnx2x_nvram_read_dword(bp, align_offset, &val, cmd_flags);
 +
 +      if (rc == 0) {
 +              val &= ~(0xff << BYTE_OFFSET(offset));
 +              val |= (*data_buf << BYTE_OFFSET(offset));
 +
 +              /* nvram data is returned as an array of bytes
 +               * convert it back to cpu order */
 +              val = be32_to_cpu(val);
 +
 +              rc = bnx2x_nvram_write_dword(bp, align_offset, val,
 +                                           cmd_flags);
 +      }
 +
 +      /* disable access to nvram interface */
 +      bnx2x_disable_nvram_access(bp);
 +      bnx2x_release_nvram_lock(bp);
 +
 +      return rc;
 +}
 +
 +static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf,
 +                           int buf_size)
 +{
 +      int rc;
 +      u32 cmd_flags;
 +      u32 val;
 +      u32 written_so_far;
 +
 +      if (buf_size == 1)      /* ethtool */
 +              return bnx2x_nvram_write1(bp, offset, data_buf, buf_size);
 +
 +      if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
 +              DP(BNX2X_MSG_NVM,
 +                 "Invalid parameter: offset 0x%x  buf_size 0x%x\n",
 +                 offset, buf_size);
 +              return -EINVAL;
 +      }
 +
 +      if (offset + buf_size > bp->common.flash_size) {
 +              DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
 +                                " buf_size (0x%x) > flash_size (0x%x)\n",
 +                 offset, buf_size, bp->common.flash_size);
 +              return -EINVAL;
 +      }
 +
 +      /* request access to nvram interface */
 +      rc = bnx2x_acquire_nvram_lock(bp);
 +      if (rc)
 +              return rc;
 +
 +      /* enable access to nvram interface */
 +      bnx2x_enable_nvram_access(bp);
 +
 +      written_so_far = 0;
 +      cmd_flags = MCPR_NVM_COMMAND_FIRST;
 +      while ((written_so_far < buf_size) && (rc == 0)) {
 +              if (written_so_far == (buf_size - sizeof(u32)))
 +                      cmd_flags |= MCPR_NVM_COMMAND_LAST;
 +              else if (((offset + 4) % BNX2X_NVRAM_PAGE_SIZE) == 0)
 +                      cmd_flags |= MCPR_NVM_COMMAND_LAST;
 +              else if ((offset % BNX2X_NVRAM_PAGE_SIZE) == 0)
 +                      cmd_flags |= MCPR_NVM_COMMAND_FIRST;
 +
 +              memcpy(&val, data_buf, 4);
 +
 +              rc = bnx2x_nvram_write_dword(bp, offset, val, cmd_flags);
 +
 +              /* advance to the next dword */
 +              offset += sizeof(u32);
 +              data_buf += sizeof(u32);
 +              written_so_far += sizeof(u32);
 +              cmd_flags = 0;
 +      }
 +
 +      /* disable access to nvram interface */
 +      bnx2x_disable_nvram_access(bp);
 +      bnx2x_release_nvram_lock(bp);
 +
 +      return rc;
 +}
 +
 +static int bnx2x_set_eeprom(struct net_device *dev,
 +                          struct ethtool_eeprom *eeprom, u8 *eebuf)
 +{
 +      struct bnx2x *bp = netdev_priv(dev);
 +      int port = BP_PORT(bp);
 +      int rc = 0;
 +      u32 ext_phy_config;
 +      if (!netif_running(dev))
 +              return -EAGAIN;
 +
 +      DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
 +         "  magic 0x%x  offset 0x%x (%d)  len 0x%x (%d)\n",
 +         eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
 +         eeprom->len, eeprom->len);
 +
 +      /* parameters already validated in ethtool_set_eeprom */
 +
 +      /* PHY eeprom can be accessed only by the PMF */
 +      if ((eeprom->magic >= 0x50485900) && (eeprom->magic <= 0x504859FF) &&
 +          !bp->port.pmf)
 +              return -EINVAL;
 +
 +      ext_phy_config =
 +              SHMEM_RD(bp,
 +                       dev_info.port_hw_config[port].external_phy_config);
 +
 +      if (eeprom->magic == 0x50485950) {
 +              /* 'PHYP' (0x50485950): prepare phy for FW upgrade */
 +              bnx2x_stats_handle(bp, STATS_EVENT_STOP);
 +
 +              bnx2x_acquire_phy_lock(bp);
 +              rc |= bnx2x_link_reset(&bp->link_params,
 +                                     &bp->link_vars, 0);
 +              if (XGXS_EXT_PHY_TYPE(ext_phy_config) ==
 +                                      PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101)
 +                      bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
 +                                     MISC_REGISTERS_GPIO_HIGH, port);
 +              bnx2x_release_phy_lock(bp);
 +              bnx2x_link_report(bp);
 +
 +      } else if (eeprom->magic == 0x50485952) {
 +              /* 'PHYR' (0x50485952): re-init link after FW upgrade */
 +              if (bp->state == BNX2X_STATE_OPEN) {
 +                      bnx2x_acquire_phy_lock(bp);
 +                      rc |= bnx2x_link_reset(&bp->link_params,
 +                                             &bp->link_vars, 1);
 +
 +                      rc |= bnx2x_phy_init(&bp->link_params,
 +                                           &bp->link_vars);
 +                      bnx2x_release_phy_lock(bp);
 +                      bnx2x_calc_fc_adv(bp);
 +              }
 +      } else if (eeprom->magic == 0x53985943) {
 +              /* 'PHYC' (0x53985943): PHY FW upgrade completed */
 +              if (XGXS_EXT_PHY_TYPE(ext_phy_config) ==
 +                                     PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101) {
 +
 +                      /* DSP Remove Download Mode */
 +                      bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
 +                                     MISC_REGISTERS_GPIO_LOW, port);
 +
 +                      bnx2x_acquire_phy_lock(bp);
 +
 +                      bnx2x_sfx7101_sp_sw_reset(bp,
 +                                              &bp->link_params.phy[EXT_PHY1]);
 +
 +                      /* wait 0.5 sec to allow it to run */
 +                      msleep(500);
 +                      bnx2x_ext_phy_hw_reset(bp, port);
 +                      msleep(500);
 +                      bnx2x_release_phy_lock(bp);
 +              }
 +      } else
 +              rc = bnx2x_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
 +
 +      return rc;
 +}
 +
 +static int bnx2x_get_coalesce(struct net_device *dev,
 +                            struct ethtool_coalesce *coal)
 +{
 +      struct bnx2x *bp = netdev_priv(dev);
 +
 +      memset(coal, 0, sizeof(struct ethtool_coalesce));
 +
 +      coal->rx_coalesce_usecs = bp->rx_ticks;
 +      coal->tx_coalesce_usecs = bp->tx_ticks;
 +
 +      return 0;
 +}
 +
 +static int bnx2x_set_coalesce(struct net_device *dev,
 +                            struct ethtool_coalesce *coal)
 +{
 +      struct bnx2x *bp = netdev_priv(dev);
 +
 +      bp->rx_ticks = (u16)coal->rx_coalesce_usecs;
 +      if (bp->rx_ticks > BNX2X_MAX_COALESCE_TOUT)
 +              bp->rx_ticks = BNX2X_MAX_COALESCE_TOUT;
 +
 +      bp->tx_ticks = (u16)coal->tx_coalesce_usecs;
 +      if (bp->tx_ticks > BNX2X_MAX_COALESCE_TOUT)
 +              bp->tx_ticks = BNX2X_MAX_COALESCE_TOUT;
 +
 +      if (netif_running(dev))
 +              bnx2x_update_coalesce(bp);
 +
 +      return 0;
 +}
 +
 +static void bnx2x_get_ringparam(struct net_device *dev,
 +                              struct ethtool_ringparam *ering)
 +{
 +      struct bnx2x *bp = netdev_priv(dev);
 +
 +      ering->rx_max_pending = MAX_RX_AVAIL;
 +      ering->rx_mini_max_pending = 0;
 +      ering->rx_jumbo_max_pending = 0;
 +
 +      if (bp->rx_ring_size)
 +              ering->rx_pending = bp->rx_ring_size;
 +      else
++              ering->rx_pending = MAX_RX_AVAIL;
 +
 +      ering->rx_mini_pending = 0;
 +      ering->rx_jumbo_pending = 0;
 +
 +      ering->tx_max_pending = MAX_TX_AVAIL;
 +      ering->tx_pending = bp->tx_ring_size;
 +}
 +
 +static int bnx2x_set_ringparam(struct net_device *dev,
 +                             struct ethtool_ringparam *ering)
 +{
 +      struct bnx2x *bp = netdev_priv(dev);
 +
 +      if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
 +              pr_err("Handling parity error recovery. Try again later\n");
 +              return -EAGAIN;
 +      }
 +
 +      if ((ering->rx_pending > MAX_RX_AVAIL) ||
 +          (ering->rx_pending < (bp->disable_tpa ? MIN_RX_SIZE_NONTPA :
 +                                                  MIN_RX_SIZE_TPA)) ||
 +          (ering->tx_pending > MAX_TX_AVAIL) ||
 +          (ering->tx_pending <= MAX_SKB_FRAGS + 4))
 +              return -EINVAL;
 +
 +      bp->rx_ring_size = ering->rx_pending;
 +      bp->tx_ring_size = ering->tx_pending;
 +
 +      return bnx2x_reload_if_running(dev);
 +}
 +
 +static void bnx2x_get_pauseparam(struct net_device *dev,
 +                               struct ethtool_pauseparam *epause)
 +{
 +      struct bnx2x *bp = netdev_priv(dev);
 +      int cfg_idx = bnx2x_get_link_cfg_idx(bp);
 +      epause->autoneg = (bp->link_params.req_flow_ctrl[cfg_idx] ==
 +                         BNX2X_FLOW_CTRL_AUTO);
 +
 +      epause->rx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) ==
 +                          BNX2X_FLOW_CTRL_RX);
 +      epause->tx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX) ==
 +                          BNX2X_FLOW_CTRL_TX);
 +
 +      DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
 +         "  autoneg %d  rx_pause %d  tx_pause %d\n",
 +         epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
 +}
 +
 +static int bnx2x_set_pauseparam(struct net_device *dev,
 +                              struct ethtool_pauseparam *epause)
 +{
 +      struct bnx2x *bp = netdev_priv(dev);
 +      u32 cfg_idx = bnx2x_get_link_cfg_idx(bp);
 +      if (IS_MF(bp))
 +              return 0;
 +
 +      DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
 +         "  autoneg %d  rx_pause %d  tx_pause %d\n",
 +         epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
 +
 +      bp->link_params.req_flow_ctrl[cfg_idx] = BNX2X_FLOW_CTRL_AUTO;
 +
 +      if (epause->rx_pause)
 +              bp->link_params.req_flow_ctrl[cfg_idx] |= BNX2X_FLOW_CTRL_RX;
 +
 +      if (epause->tx_pause)
 +              bp->link_params.req_flow_ctrl[cfg_idx] |= BNX2X_FLOW_CTRL_TX;
 +
 +      if (bp->link_params.req_flow_ctrl[cfg_idx] == BNX2X_FLOW_CTRL_AUTO)
 +              bp->link_params.req_flow_ctrl[cfg_idx] = BNX2X_FLOW_CTRL_NONE;
 +
 +      if (epause->autoneg) {
 +              if (!(bp->port.supported[cfg_idx] & SUPPORTED_Autoneg)) {
 +                      DP(NETIF_MSG_LINK, "autoneg not supported\n");
 +                      return -EINVAL;
 +              }
 +
 +              if (bp->link_params.req_line_speed[cfg_idx] == SPEED_AUTO_NEG) {
 +                      bp->link_params.req_flow_ctrl[cfg_idx] =
 +                              BNX2X_FLOW_CTRL_AUTO;
 +              }
 +      }
 +
 +      DP(NETIF_MSG_LINK,
 +         "req_flow_ctrl 0x%x\n", bp->link_params.req_flow_ctrl[cfg_idx]);
 +
 +      if (netif_running(dev)) {
 +              bnx2x_stats_handle(bp, STATS_EVENT_STOP);
 +              bnx2x_link_set(bp);
 +      }
 +
 +      return 0;
 +}
 +
 +static const struct {
 +      char string[ETH_GSTRING_LEN];
 +} bnx2x_tests_str_arr[BNX2X_NUM_TESTS] = {
 +      { "register_test (offline)" },
 +      { "memory_test (offline)" },
 +      { "loopback_test (offline)" },
 +      { "nvram_test (online)" },
 +      { "interrupt_test (online)" },
 +      { "link_test (online)" },
 +      { "idle check (online)" }
 +};
 +
 +enum {
 +      BNX2X_CHIP_E1_OFST = 0,
 +      BNX2X_CHIP_E1H_OFST,
 +      BNX2X_CHIP_E2_OFST,
 +      BNX2X_CHIP_E3_OFST,
 +      BNX2X_CHIP_E3B0_OFST,
 +      BNX2X_CHIP_MAX_OFST
 +};
 +
 +#define BNX2X_CHIP_MASK_E1    (1 << BNX2X_CHIP_E1_OFST)
 +#define BNX2X_CHIP_MASK_E1H   (1 << BNX2X_CHIP_E1H_OFST)
 +#define BNX2X_CHIP_MASK_E2    (1 << BNX2X_CHIP_E2_OFST)
 +#define BNX2X_CHIP_MASK_E3    (1 << BNX2X_CHIP_E3_OFST)
 +#define BNX2X_CHIP_MASK_E3B0  (1 << BNX2X_CHIP_E3B0_OFST)
 +
 +#define BNX2X_CHIP_MASK_ALL   ((1 << BNX2X_CHIP_MAX_OFST) - 1)
 +#define BNX2X_CHIP_MASK_E1X   (BNX2X_CHIP_MASK_E1 | BNX2X_CHIP_MASK_E1H)
 +
 +static int bnx2x_test_registers(struct bnx2x *bp)
 +{
 +      int idx, i, rc = -ENODEV;
 +      u32 wr_val = 0, hw;
 +      int port = BP_PORT(bp);
 +      static const struct {
 +              u32 hw;
 +              u32 offset0;
 +              u32 offset1;
 +              u32 mask;
 +      } reg_tbl[] = {
 +/* 0 */               { BNX2X_CHIP_MASK_ALL,
 +                      BRB1_REG_PAUSE_LOW_THRESHOLD_0, 4, 0x000003ff },
 +              { BNX2X_CHIP_MASK_ALL,
 +                      DORQ_REG_DB_ADDR0,              4, 0xffffffff },
 +              { BNX2X_CHIP_MASK_E1X,
 +                      HC_REG_AGG_INT_0,               4, 0x000003ff },
 +              { BNX2X_CHIP_MASK_ALL,
 +                      PBF_REG_MAC_IF0_ENABLE,         4, 0x00000001 },
 +              { BNX2X_CHIP_MASK_E1X | BNX2X_CHIP_MASK_E2 | BNX2X_CHIP_MASK_E3,
 +                      PBF_REG_P0_INIT_CRD,            4, 0x000007ff },
 +              { BNX2X_CHIP_MASK_E3B0,
 +                      PBF_REG_INIT_CRD_Q0,            4, 0x000007ff },
 +              { BNX2X_CHIP_MASK_ALL,
 +                      PRS_REG_CID_PORT_0,             4, 0x00ffffff },
 +              { BNX2X_CHIP_MASK_ALL,
 +                      PXP2_REG_PSWRQ_CDU0_L2P,        4, 0x000fffff },
 +              { BNX2X_CHIP_MASK_ALL,
 +                      PXP2_REG_RQ_CDU0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
 +              { BNX2X_CHIP_MASK_ALL,
 +                      PXP2_REG_PSWRQ_TM0_L2P,         4, 0x000fffff },
 +/* 10 */      { BNX2X_CHIP_MASK_ALL,
 +                      PXP2_REG_RQ_USDM0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
 +              { BNX2X_CHIP_MASK_ALL,
 +                      PXP2_REG_PSWRQ_TSDM0_L2P,       4, 0x000fffff },
 +              { BNX2X_CHIP_MASK_ALL,
 +                      QM_REG_CONNNUM_0,               4, 0x000fffff },
 +              { BNX2X_CHIP_MASK_ALL,
 +                      TM_REG_LIN0_MAX_ACTIVE_CID,     4, 0x0003ffff },
 +              { BNX2X_CHIP_MASK_ALL,
 +                      SRC_REG_KEYRSS0_0,              40, 0xffffffff },
 +              { BNX2X_CHIP_MASK_ALL,
 +                      SRC_REG_KEYRSS0_7,              40, 0xffffffff },
 +              { BNX2X_CHIP_MASK_ALL,
 +                      XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 4, 0x00000001 },
 +              { BNX2X_CHIP_MASK_ALL,
 +                      XCM_REG_WU_DA_CNT_CMD00,        4, 0x00000003 },
 +              { BNX2X_CHIP_MASK_ALL,
 +                      XCM_REG_GLB_DEL_ACK_MAX_CNT_0,  4, 0x000000ff },
 +              { BNX2X_CHIP_MASK_ALL,
 +                      NIG_REG_LLH0_T_BIT,             4, 0x00000001 },
 +/* 20 */      { BNX2X_CHIP_MASK_E1X | BNX2X_CHIP_MASK_E2,
 +                      NIG_REG_EMAC0_IN_EN,            4, 0x00000001 },
 +              { BNX2X_CHIP_MASK_E1X | BNX2X_CHIP_MASK_E2,
 +                      NIG_REG_BMAC0_IN_EN,            4, 0x00000001 },
 +              { BNX2X_CHIP_MASK_ALL,
 +                      NIG_REG_XCM0_OUT_EN,            4, 0x00000001 },
 +              { BNX2X_CHIP_MASK_ALL,
 +                      NIG_REG_BRB0_OUT_EN,            4, 0x00000001 },
 +              { BNX2X_CHIP_MASK_ALL,
 +                      NIG_REG_LLH0_XCM_MASK,          4, 0x00000007 },
 +              { BNX2X_CHIP_MASK_ALL,
 +                      NIG_REG_LLH0_ACPI_PAT_6_LEN,    68, 0x000000ff },
 +              { BNX2X_CHIP_MASK_ALL,
 +                      NIG_REG_LLH0_ACPI_PAT_0_CRC,    68, 0xffffffff },
 +              { BNX2X_CHIP_MASK_ALL,
 +                      NIG_REG_LLH0_DEST_MAC_0_0,      160, 0xffffffff },
 +              { BNX2X_CHIP_MASK_ALL,
 +                      NIG_REG_LLH0_DEST_IP_0_1,       160, 0xffffffff },
 +              { BNX2X_CHIP_MASK_ALL,
 +                      NIG_REG_LLH0_IPV4_IPV6_0,       160, 0x00000001 },
 +/* 30 */      { BNX2X_CHIP_MASK_ALL,
 +                      NIG_REG_LLH0_DEST_UDP_0,        160, 0x0000ffff },
 +              { BNX2X_CHIP_MASK_ALL,
 +                      NIG_REG_LLH0_DEST_TCP_0,        160, 0x0000ffff },
 +              { BNX2X_CHIP_MASK_ALL,
 +                      NIG_REG_LLH0_VLAN_ID_0, 160, 0x00000fff },
 +              { BNX2X_CHIP_MASK_E1X | BNX2X_CHIP_MASK_E2,
 +                      NIG_REG_XGXS_SERDES0_MODE_SEL,  4, 0x00000001 },
 +              { BNX2X_CHIP_MASK_ALL,
 +                      NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0, 4, 0x00000001},
 +              { BNX2X_CHIP_MASK_ALL,
 +                      NIG_REG_STATUS_INTERRUPT_PORT0, 4, 0x07ffffff },
 +              { BNX2X_CHIP_MASK_E1X | BNX2X_CHIP_MASK_E2,
 +                      NIG_REG_XGXS0_CTRL_EXTREMOTEMDIOST, 24, 0x00000001 },
 +              { BNX2X_CHIP_MASK_E1X | BNX2X_CHIP_MASK_E2,
 +                      NIG_REG_SERDES0_CTRL_PHY_ADDR,  16, 0x0000001f },
 +
 +              { BNX2X_CHIP_MASK_ALL, 0xffffffff, 0, 0x00000000 }
 +      };
 +
 +      if (!netif_running(bp->dev))
 +              return rc;
 +
 +      if (CHIP_IS_E1(bp))
 +              hw = BNX2X_CHIP_MASK_E1;
 +      else if (CHIP_IS_E1H(bp))
 +              hw = BNX2X_CHIP_MASK_E1H;
 +      else if (CHIP_IS_E2(bp))
 +              hw = BNX2X_CHIP_MASK_E2;
 +      else if (CHIP_IS_E3B0(bp))
 +              hw = BNX2X_CHIP_MASK_E3B0;
 +      else /* e3 A0 */
 +              hw = BNX2X_CHIP_MASK_E3;
 +
 +      /* Repeat the test twice:
 +         First by writing 0x00000000, second by writing 0xffffffff */
 +      for (idx = 0; idx < 2; idx++) {
 +
 +              switch (idx) {
 +              case 0:
 +                      wr_val = 0;
 +                      break;
 +              case 1:
 +                      wr_val = 0xffffffff;
 +                      break;
 +              }
 +
 +              for (i = 0; reg_tbl[i].offset0 != 0xffffffff; i++) {
 +                      u32 offset, mask, save_val, val;
 +                      if (!(hw & reg_tbl[i].hw))
 +                              continue;
 +
 +                      offset = reg_tbl[i].offset0 + port*reg_tbl[i].offset1;
 +                      mask = reg_tbl[i].mask;
 +
 +                      save_val = REG_RD(bp, offset);
 +
 +                      REG_WR(bp, offset, wr_val & mask);
 +
 +                      val = REG_RD(bp, offset);
 +
 +                      /* Restore the original register's value */
 +                      REG_WR(bp, offset, save_val);
 +
 +                      /* verify value is as expected */
 +                      if ((val & mask) != (wr_val & mask)) {
 +                              DP(NETIF_MSG_HW,
 +                                 "offset 0x%x: val 0x%x != 0x%x mask 0x%x\n",
 +                                 offset, val, wr_val, mask);
 +                              goto test_reg_exit;
 +                      }
 +              }
 +      }
 +
 +      rc = 0;
 +
 +test_reg_exit:
 +      return rc;
 +}
 +
 +static int bnx2x_test_memory(struct bnx2x *bp)
 +{
 +      int i, j, rc = -ENODEV;
 +      u32 val, index;
 +      static const struct {
 +              u32 offset;
 +              int size;
 +      } mem_tbl[] = {
 +              { CCM_REG_XX_DESCR_TABLE,   CCM_REG_XX_DESCR_TABLE_SIZE },
 +              { CFC_REG_ACTIVITY_COUNTER, CFC_REG_ACTIVITY_COUNTER_SIZE },
 +              { CFC_REG_LINK_LIST,        CFC_REG_LINK_LIST_SIZE },
 +              { DMAE_REG_CMD_MEM,         DMAE_REG_CMD_MEM_SIZE },
 +              { TCM_REG_XX_DESCR_TABLE,   TCM_REG_XX_DESCR_TABLE_SIZE },
 +              { UCM_REG_XX_DESCR_TABLE,   UCM_REG_XX_DESCR_TABLE_SIZE },
 +              { XCM_REG_XX_DESCR_TABLE,   XCM_REG_XX_DESCR_TABLE_SIZE },
 +
 +              { 0xffffffff, 0 }
 +      };
 +
 +      static const struct {
 +              char *name;
 +              u32 offset;
 +              u32 hw_mask[BNX2X_CHIP_MAX_OFST];
 +      } prty_tbl[] = {
 +              { "CCM_PRTY_STS",  CCM_REG_CCM_PRTY_STS,
 +                      {0x3ffc0, 0,   0, 0} },
 +              { "CFC_PRTY_STS",  CFC_REG_CFC_PRTY_STS,
 +                      {0x2,     0x2, 0, 0} },
 +              { "DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS,
 +                      {0,       0,   0, 0} },
 +              { "TCM_PRTY_STS",  TCM_REG_TCM_PRTY_STS,
 +                      {0x3ffc0, 0,   0, 0} },
 +              { "UCM_PRTY_STS",  UCM_REG_UCM_PRTY_STS,
 +                      {0x3ffc0, 0,   0, 0} },
 +              { "XCM_PRTY_STS",  XCM_REG_XCM_PRTY_STS,
 +                      {0x3ffc1, 0,   0, 0} },
 +
 +              { NULL, 0xffffffff, {0, 0, 0, 0} }
 +      };
 +
 +      if (!netif_running(bp->dev))
 +              return rc;
 +
 +      if (CHIP_IS_E1(bp))
 +              index = BNX2X_CHIP_E1_OFST;
 +      else if (CHIP_IS_E1H(bp))
 +              index = BNX2X_CHIP_E1H_OFST;
 +      else if (CHIP_IS_E2(bp))
 +              index = BNX2X_CHIP_E2_OFST;
 +      else /* e3 */
 +              index = BNX2X_CHIP_E3_OFST;
 +
 +      /* pre-Check the parity status */
 +      for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) {
 +              val = REG_RD(bp, prty_tbl[i].offset);
 +              if (val & ~(prty_tbl[i].hw_mask[index])) {
 +                      DP(NETIF_MSG_HW,
 +                         "%s is 0x%x\n", prty_tbl[i].name, val);
 +                      goto test_mem_exit;
 +              }
 +      }
 +
 +      /* Go through all the memories */
 +      for (i = 0; mem_tbl[i].offset != 0xffffffff; i++)
 +              for (j = 0; j < mem_tbl[i].size; j++)
 +                      REG_RD(bp, mem_tbl[i].offset + j*4);
 +
 +      /* Check the parity status */
 +      for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) {
 +              val = REG_RD(bp, prty_tbl[i].offset);
 +              if (val & ~(prty_tbl[i].hw_mask[index])) {
 +                      DP(NETIF_MSG_HW,
 +                         "%s is 0x%x\n", prty_tbl[i].name, val);
 +                      goto test_mem_exit;
 +              }
 +      }
 +
 +      rc = 0;
 +
 +test_mem_exit:
 +      return rc;
 +}
 +
 +static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up, u8 is_serdes)
 +{
 +      int cnt = 1400;
 +
 +      if (link_up) {
 +              while (bnx2x_link_test(bp, is_serdes) && cnt--)
 +                      msleep(20);
 +
 +              if (cnt <= 0 && bnx2x_link_test(bp, is_serdes))
 +                      DP(NETIF_MSG_LINK, "Timeout waiting for link up\n");
 +      }
 +}
 +
 +static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode)
 +{
 +      unsigned int pkt_size, num_pkts, i;
 +      struct sk_buff *skb;
 +      unsigned char *packet;
 +      struct bnx2x_fastpath *fp_rx = &bp->fp[0];
 +      struct bnx2x_fastpath *fp_tx = &bp->fp[0];
 +      struct bnx2x_fp_txdata *txdata = &fp_tx->txdata[0];
 +      u16 tx_start_idx, tx_idx;
 +      u16 rx_start_idx, rx_idx;
 +      u16 pkt_prod, bd_prod, rx_comp_cons;
 +      struct sw_tx_bd *tx_buf;
 +      struct eth_tx_start_bd *tx_start_bd;
 +      struct eth_tx_parse_bd_e1x  *pbd_e1x = NULL;
 +      struct eth_tx_parse_bd_e2  *pbd_e2 = NULL;
 +      dma_addr_t mapping;
 +      union eth_rx_cqe *cqe;
 +      u8 cqe_fp_flags, cqe_fp_type;
 +      struct sw_rx_bd *rx_buf;
 +      u16 len;
 +      int rc = -ENODEV;
 +
 +      /* check the loopback mode */
 +      switch (loopback_mode) {
 +      case BNX2X_PHY_LOOPBACK:
 +              if (bp->link_params.loopback_mode != LOOPBACK_XGXS)
 +                      return -EINVAL;
 +              break;
 +      case BNX2X_MAC_LOOPBACK:
 +              bp->link_params.loopback_mode = CHIP_IS_E3(bp) ?
 +                                              LOOPBACK_XMAC : LOOPBACK_BMAC;
 +              bnx2x_phy_init(&bp->link_params, &bp->link_vars);
 +              break;
 +      default:
 +              return -EINVAL;
 +      }
 +
 +      /* prepare the loopback packet */
 +      pkt_size = (((bp->dev->mtu < ETH_MAX_PACKET_SIZE) ?
 +                   bp->dev->mtu : ETH_MAX_PACKET_SIZE) + ETH_HLEN);
 +      skb = netdev_alloc_skb(bp->dev, fp_rx->rx_buf_size);
 +      if (!skb) {
 +              rc = -ENOMEM;
 +              goto test_loopback_exit;
 +      }
 +      packet = skb_put(skb, pkt_size);
 +      memcpy(packet, bp->dev->dev_addr, ETH_ALEN);
 +      memset(packet + ETH_ALEN, 0, ETH_ALEN);
 +      memset(packet + 2*ETH_ALEN, 0x77, (ETH_HLEN - 2*ETH_ALEN));
 +      for (i = ETH_HLEN; i < pkt_size; i++)
 +              packet[i] = (unsigned char) (i & 0xff);
 +      mapping = dma_map_single(&bp->pdev->dev, skb->data,
 +                               skb_headlen(skb), DMA_TO_DEVICE);
 +      if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
 +              rc = -ENOMEM;
 +              dev_kfree_skb(skb);
 +              BNX2X_ERR("Unable to map SKB\n");
 +              goto test_loopback_exit;
 +      }
 +
 +      /* send the loopback packet */
 +      num_pkts = 0;
 +      tx_start_idx = le16_to_cpu(*txdata->tx_cons_sb);
 +      rx_start_idx = le16_to_cpu(*fp_rx->rx_cons_sb);
 +
 +      pkt_prod = txdata->tx_pkt_prod++;
 +      tx_buf = &txdata->tx_buf_ring[TX_BD(pkt_prod)];
 +      tx_buf->first_bd = txdata->tx_bd_prod;
 +      tx_buf->skb = skb;
 +      tx_buf->flags = 0;
 +
 +      bd_prod = TX_BD(txdata->tx_bd_prod);
 +      tx_start_bd = &txdata->tx_desc_ring[bd_prod].start_bd;
 +      tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
 +      tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
 +      tx_start_bd->nbd = cpu_to_le16(2); /* start + pbd */
 +      tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
 +      tx_start_bd->vlan_or_ethertype = cpu_to_le16(pkt_prod);
 +      tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
 +      SET_FLAG(tx_start_bd->general_data,
 +               ETH_TX_START_BD_ETH_ADDR_TYPE,
 +               UNICAST_ADDRESS);
 +      SET_FLAG(tx_start_bd->general_data,
 +               ETH_TX_START_BD_HDR_NBDS,
 +               1);
 +
 +      /* turn on parsing and get a BD */
 +      bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
 +
 +      pbd_e1x = &txdata->tx_desc_ring[bd_prod].parse_bd_e1x;
 +      pbd_e2 = &txdata->tx_desc_ring[bd_prod].parse_bd_e2;
 +
 +      memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2));
 +      memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x));
 +
 +      wmb();
 +
 +      txdata->tx_db.data.prod += 2;
 +      barrier();
 +      DOORBELL(bp, txdata->cid, txdata->tx_db.raw);
 +
 +      mmiowb();
 +      barrier();
 +
 +      num_pkts++;
 +      txdata->tx_bd_prod += 2; /* start + pbd */
 +
 +      udelay(100);
 +
 +      tx_idx = le16_to_cpu(*txdata->tx_cons_sb);
 +      if (tx_idx != tx_start_idx + num_pkts)
 +              goto test_loopback_exit;
 +
 +      /* Unlike HC IGU won't generate an interrupt for status block
 +       * updates that have been performed while interrupts were
 +       * disabled.
 +       */
 +      if (bp->common.int_block == INT_BLOCK_IGU) {
 +              /* Disable local BHes to prevent a dead-lock situation between
 +               * sch_direct_xmit() and bnx2x_run_loopback() (calling
 +               * bnx2x_tx_int()), as both are taking netif_tx_lock().
 +               */
 +              local_bh_disable();
 +              bnx2x_tx_int(bp, txdata);
 +              local_bh_enable();
 +      }
 +
 +      rx_idx = le16_to_cpu(*fp_rx->rx_cons_sb);
 +      if (rx_idx != rx_start_idx + num_pkts)
 +              goto test_loopback_exit;
 +
 +      rx_comp_cons = le16_to_cpu(fp_rx->rx_comp_cons);
 +      cqe = &fp_rx->rx_comp_ring[RCQ_BD(rx_comp_cons)];
 +      cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
 +      cqe_fp_type = cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE;
 +      if (!CQE_TYPE_FAST(cqe_fp_type) || (cqe_fp_flags & ETH_RX_ERROR_FALGS))
 +              goto test_loopback_rx_exit;
 +
 +      len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
 +      if (len != pkt_size)
 +              goto test_loopback_rx_exit;
 +
 +      rx_buf = &fp_rx->rx_buf_ring[RX_BD(fp_rx->rx_bd_cons)];
 +      dma_sync_single_for_cpu(&bp->pdev->dev,
 +                                 dma_unmap_addr(rx_buf, mapping),
 +                                 fp_rx->rx_buf_size, DMA_FROM_DEVICE);
 +      skb = rx_buf->skb;
 +      skb_reserve(skb, cqe->fast_path_cqe.placement_offset);
 +      for (i = ETH_HLEN; i < pkt_size; i++)
 +              if (*(skb->data + i) != (unsigned char) (i & 0xff))
 +                      goto test_loopback_rx_exit;
 +
 +      rc = 0;
 +
 +test_loopback_rx_exit:
 +
 +      fp_rx->rx_bd_cons = NEXT_RX_IDX(fp_rx->rx_bd_cons);
 +      fp_rx->rx_bd_prod = NEXT_RX_IDX(fp_rx->rx_bd_prod);
 +      fp_rx->rx_comp_cons = NEXT_RCQ_IDX(fp_rx->rx_comp_cons);
 +      fp_rx->rx_comp_prod = NEXT_RCQ_IDX(fp_rx->rx_comp_prod);
 +
 +      /* Update producers */
 +      bnx2x_update_rx_prod(bp, fp_rx, fp_rx->rx_bd_prod, fp_rx->rx_comp_prod,
 +                           fp_rx->rx_sge_prod);
 +
 +test_loopback_exit:
 +      bp->link_params.loopback_mode = LOOPBACK_NONE;
 +
 +      return rc;
 +}
 +
 +static int bnx2x_test_loopback(struct bnx2x *bp)
 +{
 +      int rc = 0, res;
 +
 +      if (BP_NOMCP(bp))
 +              return rc;
 +
 +      if (!netif_running(bp->dev))
 +              return BNX2X_LOOPBACK_FAILED;
 +
 +      bnx2x_netif_stop(bp, 1);
 +      bnx2x_acquire_phy_lock(bp);
 +
 +      res = bnx2x_run_loopback(bp, BNX2X_PHY_LOOPBACK);
 +      if (res) {
 +              DP(NETIF_MSG_PROBE, "  PHY loopback failed  (res %d)\n", res);
 +              rc |= BNX2X_PHY_LOOPBACK_FAILED;
 +      }
 +
 +      res = bnx2x_run_loopback(bp, BNX2X_MAC_LOOPBACK);
 +      if (res) {
 +              DP(NETIF_MSG_PROBE, "  MAC loopback failed  (res %d)\n", res);
 +              rc |= BNX2X_MAC_LOOPBACK_FAILED;
 +      }
 +
 +      bnx2x_release_phy_lock(bp);
 +      bnx2x_netif_start(bp);
 +
 +      return rc;
 +}
 +
 +#define CRC32_RESIDUAL                        0xdebb20e3
 +
 +static int bnx2x_test_nvram(struct bnx2x *bp)
 +{
 +      static const struct {
 +              int offset;
 +              int size;
 +      } nvram_tbl[] = {
 +              {     0,  0x14 }, /* bootstrap */
 +              {  0x14,  0xec }, /* dir */
 +              { 0x100, 0x350 }, /* manuf_info */
 +              { 0x450,  0xf0 }, /* feature_info */
 +              { 0x640,  0x64 }, /* upgrade_key_info */
 +              { 0x708,  0x70 }, /* manuf_key_info */
 +              {     0,     0 }
 +      };
 +      __be32 buf[0x350 / 4];
 +      u8 *data = (u8 *)buf;
 +      int i, rc;
 +      u32 magic, crc;
 +
 +      if (BP_NOMCP(bp))
 +              return 0;
 +
 +      rc = bnx2x_nvram_read(bp, 0, data, 4);
 +      if (rc) {
 +              DP(NETIF_MSG_PROBE, "magic value read (rc %d)\n", rc);
 +              goto test_nvram_exit;
 +      }
 +
 +      magic = be32_to_cpu(buf[0]);
 +      if (magic != 0x669955aa) {
 +              DP(NETIF_MSG_PROBE, "magic value (0x%08x)\n", magic);
 +              rc = -ENODEV;
 +              goto test_nvram_exit;
 +      }
 +
 +      for (i = 0; nvram_tbl[i].size; i++) {
 +
 +              rc = bnx2x_nvram_read(bp, nvram_tbl[i].offset, data,
 +                                    nvram_tbl[i].size);
 +              if (rc) {
 +                      DP(NETIF_MSG_PROBE,
 +                         "nvram_tbl[%d] read data (rc %d)\n", i, rc);
 +                      goto test_nvram_exit;
 +              }
 +
 +              crc = ether_crc_le(nvram_tbl[i].size, data);
 +              if (crc != CRC32_RESIDUAL) {
 +                      DP(NETIF_MSG_PROBE,
 +                         "nvram_tbl[%d] crc value (0x%08x)\n", i, crc);
 +                      rc = -ENODEV;
 +                      goto test_nvram_exit;
 +              }
 +      }
 +
 +test_nvram_exit:
 +      return rc;
 +}
 +
 +/* Send an EMPTY ramrod on the first queue */
 +static int bnx2x_test_intr(struct bnx2x *bp)
 +{
 +      struct bnx2x_queue_state_params params = {0};
 +
 +      if (!netif_running(bp->dev))
 +              return -ENODEV;
 +
 +      params.q_obj = &bp->fp->q_obj;
 +      params.cmd = BNX2X_Q_CMD_EMPTY;
 +
 +      __set_bit(RAMROD_COMP_WAIT, &params.ramrod_flags);
 +
 +      return bnx2x_queue_state_change(bp, &params);
 +}
 +
 +static void bnx2x_self_test(struct net_device *dev,
 +                          struct ethtool_test *etest, u64 *buf)
 +{
 +      struct bnx2x *bp = netdev_priv(dev);
 +      u8 is_serdes;
 +      if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
 +              pr_err("Handling parity error recovery. Try again later\n");
 +              etest->flags |= ETH_TEST_FL_FAILED;
 +              return;
 +      }
 +
 +      memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS);
 +
 +      if (!netif_running(dev))
 +              return;
 +
 +      /* offline tests are not supported in MF mode */
 +      if (IS_MF(bp))
 +              etest->flags &= ~ETH_TEST_FL_OFFLINE;
 +      is_serdes = (bp->link_vars.link_status & LINK_STATUS_SERDES_LINK) > 0;
 +
 +      if (etest->flags & ETH_TEST_FL_OFFLINE) {
 +              int port = BP_PORT(bp);
 +              u32 val;
 +              u8 link_up;
 +
 +              /* save current value of input enable for TX port IF */
 +              val = REG_RD(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4);
 +              /* disable input for TX port IF */
 +              REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, 0);
 +
 +              link_up = bp->link_vars.link_up;
 +
 +              bnx2x_nic_unload(bp, UNLOAD_NORMAL);
 +              bnx2x_nic_load(bp, LOAD_DIAG);
 +              /* wait until link state is restored */
 +              bnx2x_wait_for_link(bp, 1, is_serdes);
 +
 +              if (bnx2x_test_registers(bp) != 0) {
 +                      buf[0] = 1;
 +                      etest->flags |= ETH_TEST_FL_FAILED;
 +              }
 +              if (bnx2x_test_memory(bp) != 0) {
 +                      buf[1] = 1;
 +                      etest->flags |= ETH_TEST_FL_FAILED;
 +              }
 +
 +              buf[2] = bnx2x_test_loopback(bp);
 +              if (buf[2] != 0)
 +                      etest->flags |= ETH_TEST_FL_FAILED;
 +
 +              bnx2x_nic_unload(bp, UNLOAD_NORMAL);
 +
 +              /* restore input for TX port IF */
 +              REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, val);
 +
 +              bnx2x_nic_load(bp, LOAD_NORMAL);
 +              /* wait until link state is restored */
 +              bnx2x_wait_for_link(bp, link_up, is_serdes);
 +      }
 +      if (bnx2x_test_nvram(bp) != 0) {
 +              buf[3] = 1;
 +              etest->flags |= ETH_TEST_FL_FAILED;
 +      }
 +      if (bnx2x_test_intr(bp) != 0) {
 +              buf[4] = 1;
 +              etest->flags |= ETH_TEST_FL_FAILED;
 +      }
 +
 +      if (bnx2x_link_test(bp, is_serdes) != 0) {
 +              buf[5] = 1;
 +              etest->flags |= ETH_TEST_FL_FAILED;
 +      }
 +
 +#ifdef BNX2X_EXTRA_DEBUG
 +      bnx2x_panic_dump(bp);
 +#endif
 +}
 +
 +#define IS_PORT_STAT(i) \
 +      ((bnx2x_stats_arr[i].flags & STATS_FLAGS_BOTH) == STATS_FLAGS_PORT)
 +#define IS_FUNC_STAT(i)               (bnx2x_stats_arr[i].flags & STATS_FLAGS_FUNC)
 +#define IS_MF_MODE_STAT(bp) \
 +                      (IS_MF(bp) && !(bp->msg_enable & BNX2X_MSG_STATS))
 +
 +/* ethtool statistics are displayed for all regular ethernet queues and the
 + * fcoe L2 queue if not disabled
 + */
 +static inline int bnx2x_num_stat_queues(struct bnx2x *bp)
 +{
 +      return BNX2X_NUM_ETH_QUEUES(bp);
 +}
 +
 +static int bnx2x_get_sset_count(struct net_device *dev, int stringset)
 +{
 +      struct bnx2x *bp = netdev_priv(dev);
 +      int i, num_stats;
 +
 +      switch (stringset) {
 +      case ETH_SS_STATS:
 +              if (is_multi(bp)) {
 +                      num_stats = bnx2x_num_stat_queues(bp) *
 +                              BNX2X_NUM_Q_STATS;
 +                      if (!IS_MF_MODE_STAT(bp))
 +                              num_stats += BNX2X_NUM_STATS;
 +              } else {
 +                      if (IS_MF_MODE_STAT(bp)) {
 +                              num_stats = 0;
 +                              for (i = 0; i < BNX2X_NUM_STATS; i++)
 +                                      if (IS_FUNC_STAT(i))
 +                                              num_stats++;
 +                      } else
 +                              num_stats = BNX2X_NUM_STATS;
 +              }
 +              return num_stats;
 +
 +      case ETH_SS_TEST:
 +              return BNX2X_NUM_TESTS;
 +
 +      default:
 +              return -EINVAL;
 +      }
 +}
 +
 +static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
 +{
 +      struct bnx2x *bp = netdev_priv(dev);
 +      int i, j, k;
 +      char queue_name[MAX_QUEUE_NAME_LEN+1];
 +
 +      switch (stringset) {
 +      case ETH_SS_STATS:
 +              if (is_multi(bp)) {
 +                      k = 0;
 +                      for_each_eth_queue(bp, i) {
 +                              memset(queue_name, 0, sizeof(queue_name));
 +                              sprintf(queue_name, "%d", i);
 +                              for (j = 0; j < BNX2X_NUM_Q_STATS; j++)
 +                                      snprintf(buf + (k + j)*ETH_GSTRING_LEN,
 +                                              ETH_GSTRING_LEN,
 +                                              bnx2x_q_stats_arr[j].string,
 +                                              queue_name);
 +                              k += BNX2X_NUM_Q_STATS;
 +                      }
 +                      if (IS_MF_MODE_STAT(bp))
 +                              break;
 +                      for (j = 0; j < BNX2X_NUM_STATS; j++)
 +                              strcpy(buf + (k + j)*ETH_GSTRING_LEN,
 +                                     bnx2x_stats_arr[j].string);
 +              } else {
 +                      for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
 +                              if (IS_MF_MODE_STAT(bp) && IS_PORT_STAT(i))
 +                                      continue;
 +                              strcpy(buf + j*ETH_GSTRING_LEN,
 +                                     bnx2x_stats_arr[i].string);
 +                              j++;
 +                      }
 +              }
 +              break;
 +
 +      case ETH_SS_TEST:
 +              memcpy(buf, bnx2x_tests_str_arr, sizeof(bnx2x_tests_str_arr));
 +              break;
 +      }
 +}
 +
 +static void bnx2x_get_ethtool_stats(struct net_device *dev,
 +                                  struct ethtool_stats *stats, u64 *buf)
 +{
 +      struct bnx2x *bp = netdev_priv(dev);
 +      u32 *hw_stats, *offset;
 +      int i, j, k;
 +
 +      if (is_multi(bp)) {
 +              k = 0;
 +              for_each_eth_queue(bp, i) {
 +                      hw_stats = (u32 *)&bp->fp[i].eth_q_stats;
 +                      for (j = 0; j < BNX2X_NUM_Q_STATS; j++) {
 +                              if (bnx2x_q_stats_arr[j].size == 0) {
 +                                      /* skip this counter */
 +                                      buf[k + j] = 0;
 +                                      continue;
 +                              }
 +                              offset = (hw_stats +
 +                                        bnx2x_q_stats_arr[j].offset);
 +                              if (bnx2x_q_stats_arr[j].size == 4) {
 +                                      /* 4-byte counter */
 +                                      buf[k + j] = (u64) *offset;
 +                                      continue;
 +                              }
 +                              /* 8-byte counter */
 +                              buf[k + j] = HILO_U64(*offset, *(offset + 1));
 +                      }
 +                      k += BNX2X_NUM_Q_STATS;
 +              }
 +              if (IS_MF_MODE_STAT(bp))
 +                      return;
 +              hw_stats = (u32 *)&bp->eth_stats;
 +              for (j = 0; j < BNX2X_NUM_STATS; j++) {
 +                      if (bnx2x_stats_arr[j].size == 0) {
 +                              /* skip this counter */
 +                              buf[k + j] = 0;
 +                              continue;
 +                      }
 +                      offset = (hw_stats + bnx2x_stats_arr[j].offset);
 +                      if (bnx2x_stats_arr[j].size == 4) {
 +                              /* 4-byte counter */
 +                              buf[k + j] = (u64) *offset;
 +                              continue;
 +                      }
 +                      /* 8-byte counter */
 +                      buf[k + j] = HILO_U64(*offset, *(offset + 1));
 +              }
 +      } else {
 +              hw_stats = (u32 *)&bp->eth_stats;
 +              for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
 +                      if (IS_MF_MODE_STAT(bp) && IS_PORT_STAT(i))
 +                              continue;
 +                      if (bnx2x_stats_arr[i].size == 0) {
 +                              /* skip this counter */
 +                              buf[j] = 0;
 +                              j++;
 +                              continue;
 +                      }
 +                      offset = (hw_stats + bnx2x_stats_arr[i].offset);
 +                      if (bnx2x_stats_arr[i].size == 4) {
 +                              /* 4-byte counter */
 +                              buf[j] = (u64) *offset;
 +                              j++;
 +                              continue;
 +                      }
 +                      /* 8-byte counter */
 +                      buf[j] = HILO_U64(*offset, *(offset + 1));
 +                      j++;
 +              }
 +      }
 +}
 +
 +static int bnx2x_set_phys_id(struct net_device *dev,
 +                           enum ethtool_phys_id_state state)
 +{
 +      struct bnx2x *bp = netdev_priv(dev);
 +
 +      if (!netif_running(dev))
 +              return -EAGAIN;
 +
 +      if (!bp->port.pmf)
 +              return -EOPNOTSUPP;
 +
 +      switch (state) {
 +      case ETHTOOL_ID_ACTIVE:
 +              return 1;       /* cycle on/off once per second */
 +
 +      case ETHTOOL_ID_ON:
 +              bnx2x_set_led(&bp->link_params, &bp->link_vars,
 +                            LED_MODE_ON, SPEED_1000);
 +              break;
 +
 +      case ETHTOOL_ID_OFF:
 +              bnx2x_set_led(&bp->link_params, &bp->link_vars,
 +                            LED_MODE_FRONT_PANEL_OFF, 0);
 +
 +              break;
 +
 +      case ETHTOOL_ID_INACTIVE:
 +              bnx2x_set_led(&bp->link_params, &bp->link_vars,
 +                            LED_MODE_OPER,
 +                            bp->link_vars.line_speed);
 +      }
 +
 +      return 0;
 +}
 +
 +static int bnx2x_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
 +                         u32 *rules __always_unused)
 +{
 +      struct bnx2x *bp = netdev_priv(dev);
 +
 +      switch (info->cmd) {
 +      case ETHTOOL_GRXRINGS:
 +              info->data = BNX2X_NUM_ETH_QUEUES(bp);
 +              return 0;
 +
 +      default:
 +              return -EOPNOTSUPP;
 +      }
 +}
 +
 +static int bnx2x_get_rxfh_indir(struct net_device *dev,
 +                              struct ethtool_rxfh_indir *indir)
 +{
 +      struct bnx2x *bp = netdev_priv(dev);
 +      size_t copy_size =
 +              min_t(size_t, indir->size, T_ETH_INDIRECTION_TABLE_SIZE);
 +      u8 ind_table[T_ETH_INDIRECTION_TABLE_SIZE] = {0};
 +      size_t i;
 +
 +      if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
 +              return -EOPNOTSUPP;
 +
 +      /* Get the current configuration of the RSS indirection table */
 +      bnx2x_get_rss_ind_table(&bp->rss_conf_obj, ind_table);
 +
 +      /*
 +       * We can't use a memcpy() as an internal storage of an
 +       * indirection table is a u8 array while indir->ring_index
 +       * points to an array of u32.
 +       *
 +       * Indirection table contains the FW Client IDs, so we need to
 +       * align the returned table to the Client ID of the leading RSS
 +       * queue.
 +       */
 +      for (i = 0; i < copy_size; i++)
 +              indir->ring_index[i] = ind_table[i] - bp->fp->cl_id;
 +
 +      indir->size = T_ETH_INDIRECTION_TABLE_SIZE;
 +
 +      return 0;
 +}
 +
 +static int bnx2x_set_rxfh_indir(struct net_device *dev,
 +                              const struct ethtool_rxfh_indir *indir)
 +{
 +      struct bnx2x *bp = netdev_priv(dev);
 +      size_t i;
 +      u8 ind_table[T_ETH_INDIRECTION_TABLE_SIZE] = {0};
 +      u32 num_eth_queues = BNX2X_NUM_ETH_QUEUES(bp);
 +
 +      if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
 +              return -EOPNOTSUPP;
 +
 +      /* validate the size */
 +      if (indir->size != T_ETH_INDIRECTION_TABLE_SIZE)
 +              return -EINVAL;
 +
 +      for (i = 0; i < T_ETH_INDIRECTION_TABLE_SIZE; i++) {
 +              /* validate the indices */
 +              if (indir->ring_index[i] >= num_eth_queues)
 +                      return -EINVAL;
 +              /*
 +               * The same as in bnx2x_get_rxfh_indir: we can't use a memcpy()
 +               * as an internal storage of an indirection table is a u8 array
 +               * while indir->ring_index points to an array of u32.
 +               *
 +               * Indirection table contains the FW Client IDs, so we need to
 +               * align the received table to the Client ID of the leading RSS
 +               * queue
 +               */
 +              ind_table[i] = indir->ring_index[i] + bp->fp->cl_id;
 +      }
 +
 +      return bnx2x_config_rss_pf(bp, ind_table, false);
 +}
 +
 +static const struct ethtool_ops bnx2x_ethtool_ops = {
 +      .get_settings           = bnx2x_get_settings,
 +      .set_settings           = bnx2x_set_settings,
 +      .get_drvinfo            = bnx2x_get_drvinfo,
 +      .get_regs_len           = bnx2x_get_regs_len,
 +      .get_regs               = bnx2x_get_regs,
 +      .get_wol                = bnx2x_get_wol,
 +      .set_wol                = bnx2x_set_wol,
 +      .get_msglevel           = bnx2x_get_msglevel,
 +      .set_msglevel           = bnx2x_set_msglevel,
 +      .nway_reset             = bnx2x_nway_reset,
 +      .get_link               = bnx2x_get_link,
 +      .get_eeprom_len         = bnx2x_get_eeprom_len,
 +      .get_eeprom             = bnx2x_get_eeprom,
 +      .set_eeprom             = bnx2x_set_eeprom,
 +      .get_coalesce           = bnx2x_get_coalesce,
 +      .set_coalesce           = bnx2x_set_coalesce,
 +      .get_ringparam          = bnx2x_get_ringparam,
 +      .set_ringparam          = bnx2x_set_ringparam,
 +      .get_pauseparam         = bnx2x_get_pauseparam,
 +      .set_pauseparam         = bnx2x_set_pauseparam,
 +      .self_test              = bnx2x_self_test,
 +      .get_sset_count         = bnx2x_get_sset_count,
 +      .get_strings            = bnx2x_get_strings,
 +      .set_phys_id            = bnx2x_set_phys_id,
 +      .get_ethtool_stats      = bnx2x_get_ethtool_stats,
 +      .get_rxnfc              = bnx2x_get_rxnfc,
 +      .get_rxfh_indir         = bnx2x_get_rxfh_indir,
 +      .set_rxfh_indir         = bnx2x_set_rxfh_indir,
 +};
 +
 +void bnx2x_set_ethtool_ops(struct net_device *netdev)
 +{
 +      SET_ETHTOOL_OPS(netdev, &bnx2x_ethtool_ops);
 +}
index 8e9b87b,0000000..818723c
mode 100644,000000..100644
--- /dev/null
@@@ -1,12480 -1,0 +1,12480 @@@
-       /* Calculate and set BW for this COS*/
-       const u32 cos_bw_nig = (bw * min_w_val_nig) / total_bw;
-       const u32 cos_bw_pbf = (bw * min_w_val_pbf) / total_bw;
 +/* Copyright 2008-2011 Broadcom Corporation
 + *
 + * Unless you and Broadcom execute a separate written software license
 + * agreement governing use of this software, this software is licensed to you
 + * under the terms of the GNU General Public License version 2, available
 + * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL").
 + *
 + * Notwithstanding the above, under no circumstances may you combine this
 + * software in any way with any other Broadcom software provided under a
 + * license other than the GPL, without Broadcom's express prior written
 + * consent.
 + *
 + * Written by Yaniv Rosner
 + *
 + */
 +
 +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 +
 +#include <linux/kernel.h>
 +#include <linux/errno.h>
 +#include <linux/pci.h>
 +#include <linux/netdevice.h>
 +#include <linux/delay.h>
 +#include <linux/ethtool.h>
 +#include <linux/mutex.h>
 +
 +#include "bnx2x.h"
 +#include "bnx2x_cmn.h"
 +
 +
 +/********************************************************/
 +#define ETH_HLEN                      14
 +/* L2 header size + 2*VLANs (8 bytes) + LLC SNAP (8 bytes) */
 +#define ETH_OVREHEAD                  (ETH_HLEN + 8 + 8)
 +#define ETH_MIN_PACKET_SIZE           60
 +#define ETH_MAX_PACKET_SIZE           1500
 +#define ETH_MAX_JUMBO_PACKET_SIZE     9600
 +#define MDIO_ACCESS_TIMEOUT           1000
 +#define BMAC_CONTROL_RX_ENABLE                2
 +#define WC_LANE_MAX                   4
 +#define I2C_SWITCH_WIDTH              2
 +#define I2C_BSC0                      0
 +#define I2C_BSC1                      1
 +#define I2C_WA_RETRY_CNT              3
 +#define MCPR_IMC_COMMAND_READ_OP      1
 +#define MCPR_IMC_COMMAND_WRITE_OP     2
 +
 +/***********************************************************/
 +/*                    Shortcut definitions               */
 +/***********************************************************/
 +
 +#define NIG_LATCH_BC_ENABLE_MI_INT 0
 +
 +#define NIG_STATUS_EMAC0_MI_INT \
 +              NIG_STATUS_INTERRUPT_PORT0_REG_STATUS_EMAC0_MISC_MI_INT
 +#define NIG_STATUS_XGXS0_LINK10G \
 +              NIG_STATUS_INTERRUPT_PORT0_REG_STATUS_XGXS0_LINK10G
 +#define NIG_STATUS_XGXS0_LINK_STATUS \
 +              NIG_STATUS_INTERRUPT_PORT0_REG_STATUS_XGXS0_LINK_STATUS
 +#define NIG_STATUS_XGXS0_LINK_STATUS_SIZE \
 +              NIG_STATUS_INTERRUPT_PORT0_REG_STATUS_XGXS0_LINK_STATUS_SIZE
 +#define NIG_STATUS_SERDES0_LINK_STATUS \
 +              NIG_STATUS_INTERRUPT_PORT0_REG_STATUS_SERDES0_LINK_STATUS
 +#define NIG_MASK_MI_INT \
 +              NIG_MASK_INTERRUPT_PORT0_REG_MASK_EMAC0_MISC_MI_INT
 +#define NIG_MASK_XGXS0_LINK10G \
 +              NIG_MASK_INTERRUPT_PORT0_REG_MASK_XGXS0_LINK10G
 +#define NIG_MASK_XGXS0_LINK_STATUS \
 +              NIG_MASK_INTERRUPT_PORT0_REG_MASK_XGXS0_LINK_STATUS
 +#define NIG_MASK_SERDES0_LINK_STATUS \
 +              NIG_MASK_INTERRUPT_PORT0_REG_MASK_SERDES0_LINK_STATUS
 +
 +#define MDIO_AN_CL73_OR_37_COMPLETE \
 +              (MDIO_GP_STATUS_TOP_AN_STATUS1_CL73_AUTONEG_COMPLETE | \
 +               MDIO_GP_STATUS_TOP_AN_STATUS1_CL37_AUTONEG_COMPLETE)
 +
 +#define XGXS_RESET_BITS \
 +      (MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_XGXS0_RSTB_HW |   \
 +       MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_XGXS0_IDDQ |      \
 +       MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_XGXS0_PWRDWN |    \
 +       MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_XGXS0_PWRDWN_SD | \
 +       MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_XGXS0_TXD_FIFO_RSTB)
 +
 +#define SERDES_RESET_BITS \
 +      (MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_SERDES0_RSTB_HW | \
 +       MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_SERDES0_IDDQ |    \
 +       MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_SERDES0_PWRDWN |  \
 +       MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_SERDES0_PWRDWN_SD)
 +
 +#define AUTONEG_CL37          SHARED_HW_CFG_AN_ENABLE_CL37
 +#define AUTONEG_CL73          SHARED_HW_CFG_AN_ENABLE_CL73
 +#define AUTONEG_BAM           SHARED_HW_CFG_AN_ENABLE_BAM
 +#define AUTONEG_PARALLEL \
 +                              SHARED_HW_CFG_AN_ENABLE_PARALLEL_DETECTION
 +#define AUTONEG_SGMII_FIBER_AUTODET \
 +                              SHARED_HW_CFG_AN_EN_SGMII_FIBER_AUTO_DETECT
 +#define AUTONEG_REMOTE_PHY    SHARED_HW_CFG_AN_ENABLE_REMOTE_PHY
 +
 +#define GP_STATUS_PAUSE_RSOLUTION_TXSIDE \
 +                      MDIO_GP_STATUS_TOP_AN_STATUS1_PAUSE_RSOLUTION_TXSIDE
 +#define GP_STATUS_PAUSE_RSOLUTION_RXSIDE \
 +                      MDIO_GP_STATUS_TOP_AN_STATUS1_PAUSE_RSOLUTION_RXSIDE
 +#define GP_STATUS_SPEED_MASK \
 +                      MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_MASK
 +#define GP_STATUS_10M MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10M
 +#define GP_STATUS_100M        MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_100M
 +#define GP_STATUS_1G  MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_1G
 +#define GP_STATUS_2_5G        MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_2_5G
 +#define GP_STATUS_5G  MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_5G
 +#define GP_STATUS_6G  MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_6G
 +#define GP_STATUS_10G_HIG \
 +                      MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_HIG
 +#define GP_STATUS_10G_CX4 \
 +                      MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_CX4
 +#define GP_STATUS_1G_KX MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_1G_KX
 +#define GP_STATUS_10G_KX4 \
 +                      MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_KX4
 +#define       GP_STATUS_10G_KR MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_KR
 +#define       GP_STATUS_10G_XFI   MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_XFI
 +#define       GP_STATUS_20G_DXGXS MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_20G_DXGXS
 +#define       GP_STATUS_10G_SFI   MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_SFI
 +#define LINK_10THD            LINK_STATUS_SPEED_AND_DUPLEX_10THD
 +#define LINK_10TFD            LINK_STATUS_SPEED_AND_DUPLEX_10TFD
 +#define LINK_100TXHD          LINK_STATUS_SPEED_AND_DUPLEX_100TXHD
 +#define LINK_100T4            LINK_STATUS_SPEED_AND_DUPLEX_100T4
 +#define LINK_100TXFD          LINK_STATUS_SPEED_AND_DUPLEX_100TXFD
 +#define LINK_1000THD          LINK_STATUS_SPEED_AND_DUPLEX_1000THD
 +#define LINK_1000TFD          LINK_STATUS_SPEED_AND_DUPLEX_1000TFD
 +#define LINK_1000XFD          LINK_STATUS_SPEED_AND_DUPLEX_1000XFD
 +#define LINK_2500THD          LINK_STATUS_SPEED_AND_DUPLEX_2500THD
 +#define LINK_2500TFD          LINK_STATUS_SPEED_AND_DUPLEX_2500TFD
 +#define LINK_2500XFD          LINK_STATUS_SPEED_AND_DUPLEX_2500XFD
 +#define LINK_10GTFD           LINK_STATUS_SPEED_AND_DUPLEX_10GTFD
 +#define LINK_10GXFD           LINK_STATUS_SPEED_AND_DUPLEX_10GXFD
 +#define LINK_20GTFD           LINK_STATUS_SPEED_AND_DUPLEX_20GTFD
 +#define LINK_20GXFD           LINK_STATUS_SPEED_AND_DUPLEX_20GXFD
 +
 +
 +
 +/* */
 +#define SFP_EEPROM_CON_TYPE_ADDR              0x2
 +      #define SFP_EEPROM_CON_TYPE_VAL_LC      0x7
 +      #define SFP_EEPROM_CON_TYPE_VAL_COPPER  0x21
 +
 +
 +#define SFP_EEPROM_COMP_CODE_ADDR             0x3
 +      #define SFP_EEPROM_COMP_CODE_SR_MASK    (1<<4)
 +      #define SFP_EEPROM_COMP_CODE_LR_MASK    (1<<5)
 +      #define SFP_EEPROM_COMP_CODE_LRM_MASK   (1<<6)
 +
 +#define SFP_EEPROM_FC_TX_TECH_ADDR            0x8
 +      #define SFP_EEPROM_FC_TX_TECH_BITMASK_COPPER_PASSIVE 0x4
 +      #define SFP_EEPROM_FC_TX_TECH_BITMASK_COPPER_ACTIVE  0x8
 +
 +#define SFP_EEPROM_OPTIONS_ADDR                       0x40
 +      #define SFP_EEPROM_OPTIONS_LINEAR_RX_OUT_MASK 0x1
 +#define SFP_EEPROM_OPTIONS_SIZE                       2
 +
 +#define EDC_MODE_LINEAR                               0x0022
 +#define EDC_MODE_LIMITING                             0x0044
 +#define EDC_MODE_PASSIVE_DAC                  0x0055
 +
 +
 +/* BRB thresholds for E2*/
 +#define PFC_E2_BRB_MAC_PAUSE_XOFF_THR_PAUSE           170
 +#define PFC_E2_BRB_MAC_PAUSE_XOFF_THR_NON_PAUSE               0
 +
 +#define PFC_E2_BRB_MAC_PAUSE_XON_THR_PAUSE            250
 +#define PFC_E2_BRB_MAC_PAUSE_XON_THR_NON_PAUSE                0
 +
 +#define PFC_E2_BRB_MAC_FULL_XOFF_THR_PAUSE            10
 +#define PFC_E2_BRB_MAC_FULL_XOFF_THR_NON_PAUSE                90
 +
 +#define PFC_E2_BRB_MAC_FULL_XON_THR_PAUSE                     50
 +#define PFC_E2_BRB_MAC_FULL_XON_THR_NON_PAUSE         250
 +
 +/* BRB thresholds for E3A0 */
 +#define PFC_E3A0_BRB_MAC_PAUSE_XOFF_THR_PAUSE         290
 +#define PFC_E3A0_BRB_MAC_PAUSE_XOFF_THR_NON_PAUSE             0
 +
 +#define PFC_E3A0_BRB_MAC_PAUSE_XON_THR_PAUSE          410
 +#define PFC_E3A0_BRB_MAC_PAUSE_XON_THR_NON_PAUSE              0
 +
 +#define PFC_E3A0_BRB_MAC_FULL_XOFF_THR_PAUSE          10
 +#define PFC_E3A0_BRB_MAC_FULL_XOFF_THR_NON_PAUSE              170
 +
 +#define PFC_E3A0_BRB_MAC_FULL_XON_THR_PAUSE           50
 +#define PFC_E3A0_BRB_MAC_FULL_XON_THR_NON_PAUSE               410
 +
 +
 +/* BRB thresholds for E3B0 2 port mode*/
 +#define PFC_E3B0_2P_BRB_MAC_PAUSE_XOFF_THR_PAUSE              1025
 +#define PFC_E3B0_2P_BRB_MAC_PAUSE_XOFF_THR_NON_PAUSE  0
 +
 +#define PFC_E3B0_2P_BRB_MAC_PAUSE_XON_THR_PAUSE               1025
 +#define PFC_E3B0_2P_BRB_MAC_PAUSE_XON_THR_NON_PAUSE   0
 +
 +#define PFC_E3B0_2P_BRB_MAC_FULL_XOFF_THR_PAUSE               10
 +#define PFC_E3B0_2P_BRB_MAC_FULL_XOFF_THR_NON_PAUSE   1025
 +
 +#define PFC_E3B0_2P_BRB_MAC_FULL_XON_THR_PAUSE                50
 +#define PFC_E3B0_2P_BRB_MAC_FULL_XON_THR_NON_PAUSE    1025
 +
 +/* only for E3B0*/
 +#define PFC_E3B0_2P_BRB_FULL_LB_XOFF_THR                      1025
 +#define PFC_E3B0_2P_BRB_FULL_LB_XON_THR                       1025
 +
 +/* Lossy +Lossless GUARANTIED == GUART */
 +#define PFC_E3B0_2P_MIX_PAUSE_LB_GUART                        284
 +/* Lossless +Lossless*/
 +#define PFC_E3B0_2P_PAUSE_LB_GUART                    236
 +/* Lossy +Lossy*/
 +#define PFC_E3B0_2P_NON_PAUSE_LB_GUART                        342
 +
 +/* Lossy +Lossless*/
 +#define PFC_E3B0_2P_MIX_PAUSE_MAC_0_CLASS_T_GUART             284
 +/* Lossless +Lossless*/
 +#define PFC_E3B0_2P_PAUSE_MAC_0_CLASS_T_GUART         236
 +/* Lossy +Lossy*/
 +#define PFC_E3B0_2P_NON_PAUSE_MAC_0_CLASS_T_GUART             336
 +#define PFC_E3B0_2P_BRB_MAC_0_CLASS_T_GUART_HYST              80
 +
 +#define PFC_E3B0_2P_BRB_MAC_1_CLASS_T_GUART           0
 +#define PFC_E3B0_2P_BRB_MAC_1_CLASS_T_GUART_HYST              0
 +
 +/* BRB thresholds for E3B0 4 port mode */
 +#define PFC_E3B0_4P_BRB_MAC_PAUSE_XOFF_THR_PAUSE              304
 +#define PFC_E3B0_4P_BRB_MAC_PAUSE_XOFF_THR_NON_PAUSE  0
 +
 +#define PFC_E3B0_4P_BRB_MAC_PAUSE_XON_THR_PAUSE               384
 +#define PFC_E3B0_4P_BRB_MAC_PAUSE_XON_THR_NON_PAUSE   0
 +
 +#define PFC_E3B0_4P_BRB_MAC_FULL_XOFF_THR_PAUSE               10
 +#define PFC_E3B0_4P_BRB_MAC_FULL_XOFF_THR_NON_PAUSE   304
 +
 +#define PFC_E3B0_4P_BRB_MAC_FULL_XON_THR_PAUSE                50
 +#define PFC_E3B0_4P_BRB_MAC_FULL_XON_THR_NON_PAUSE    384
 +
 +
 +/* only for E3B0*/
 +#define PFC_E3B0_4P_BRB_FULL_LB_XOFF_THR                      304
 +#define PFC_E3B0_4P_BRB_FULL_LB_XON_THR                       384
 +#define PFC_E3B0_4P_LB_GUART                          120
 +
 +#define PFC_E3B0_4P_BRB_MAC_0_CLASS_T_GUART           120
 +#define PFC_E3B0_4P_BRB_MAC_0_CLASS_T_GUART_HYST              80
 +
 +#define PFC_E3B0_4P_BRB_MAC_1_CLASS_T_GUART           80
 +#define PFC_E3B0_4P_BRB_MAC_1_CLASS_T_GUART_HYST              120
 +
 +#define DCBX_INVALID_COS                                      (0xFF)
 +
 +#define ETS_BW_LIMIT_CREDIT_UPPER_BOUND               (0x5000)
 +#define ETS_BW_LIMIT_CREDIT_WEIGHT            (0x5000)
 +#define ETS_E3B0_NIG_MIN_W_VAL_UP_TO_10GBPS           (1360)
 +#define ETS_E3B0_NIG_MIN_W_VAL_20GBPS                 (2720)
 +#define ETS_E3B0_PBF_MIN_W_VAL                                (10000)
 +
 +#define MAX_PACKET_SIZE                                       (9700)
 +#define WC_UC_TIMEOUT                                 100
 +
 +/**********************************************************/
 +/*                     INTERFACE                          */
 +/**********************************************************/
 +
 +#define CL22_WR_OVER_CL45(_bp, _phy, _bank, _addr, _val) \
 +      bnx2x_cl45_write(_bp, _phy, \
 +              (_phy)->def_md_devad, \
 +              (_bank + (_addr & 0xf)), \
 +              _val)
 +
 +#define CL22_RD_OVER_CL45(_bp, _phy, _bank, _addr, _val) \
 +      bnx2x_cl45_read(_bp, _phy, \
 +              (_phy)->def_md_devad, \
 +              (_bank + (_addr & 0xf)), \
 +              _val)
 +
 +static u32 bnx2x_bits_en(struct bnx2x *bp, u32 reg, u32 bits)
 +{
 +      u32 val = REG_RD(bp, reg);
 +
 +      val |= bits;
 +      REG_WR(bp, reg, val);
 +      return val;
 +}
 +
 +static u32 bnx2x_bits_dis(struct bnx2x *bp, u32 reg, u32 bits)
 +{
 +      u32 val = REG_RD(bp, reg);
 +
 +      val &= ~bits;
 +      REG_WR(bp, reg, val);
 +      return val;
 +}
 +
 +/******************************************************************/
 +/*                    EPIO/GPIO section                         */
 +/******************************************************************/
 +static void bnx2x_get_epio(struct bnx2x *bp, u32 epio_pin, u32 *en)
 +{
 +      u32 epio_mask, gp_oenable;
 +      *en = 0;
 +      /* Sanity check */
 +      if (epio_pin > 31) {
 +              DP(NETIF_MSG_LINK, "Invalid EPIO pin %d to get\n", epio_pin);
 +              return;
 +      }
 +
 +      epio_mask = 1 << epio_pin;
 +      /* Set this EPIO to output */
 +      gp_oenable = REG_RD(bp, MCP_REG_MCPR_GP_OENABLE);
 +      REG_WR(bp, MCP_REG_MCPR_GP_OENABLE, gp_oenable & ~epio_mask);
 +
 +      *en = (REG_RD(bp, MCP_REG_MCPR_GP_INPUTS) & epio_mask) >> epio_pin;
 +}
 +static void bnx2x_set_epio(struct bnx2x *bp, u32 epio_pin, u32 en)
 +{
 +      u32 epio_mask, gp_output, gp_oenable;
 +
 +      /* Sanity check */
 +      if (epio_pin > 31) {
 +              DP(NETIF_MSG_LINK, "Invalid EPIO pin %d to set\n", epio_pin);
 +              return;
 +      }
 +      DP(NETIF_MSG_LINK, "Setting EPIO pin %d to %d\n", epio_pin, en);
 +      epio_mask = 1 << epio_pin;
 +      /* Set this EPIO to output */
 +      gp_output = REG_RD(bp, MCP_REG_MCPR_GP_OUTPUTS);
 +      if (en)
 +              gp_output |= epio_mask;
 +      else
 +              gp_output &= ~epio_mask;
 +
 +      REG_WR(bp, MCP_REG_MCPR_GP_OUTPUTS, gp_output);
 +
 +      /* Set the value for this EPIO */
 +      gp_oenable = REG_RD(bp, MCP_REG_MCPR_GP_OENABLE);
 +      REG_WR(bp, MCP_REG_MCPR_GP_OENABLE, gp_oenable | epio_mask);
 +}
 +
 +static void bnx2x_set_cfg_pin(struct bnx2x *bp, u32 pin_cfg, u32 val)
 +{
 +      if (pin_cfg == PIN_CFG_NA)
 +              return;
 +      if (pin_cfg >= PIN_CFG_EPIO0) {
 +              bnx2x_set_epio(bp, pin_cfg - PIN_CFG_EPIO0, val);
 +      } else {
 +              u8 gpio_num = (pin_cfg - PIN_CFG_GPIO0_P0) & 0x3;
 +              u8 gpio_port = (pin_cfg - PIN_CFG_GPIO0_P0) >> 2;
 +              bnx2x_set_gpio(bp, gpio_num, (u8)val, gpio_port);
 +      }
 +}
 +
 +static u32 bnx2x_get_cfg_pin(struct bnx2x *bp, u32 pin_cfg, u32 *val)
 +{
 +      if (pin_cfg == PIN_CFG_NA)
 +              return -EINVAL;
 +      if (pin_cfg >= PIN_CFG_EPIO0) {
 +              bnx2x_get_epio(bp, pin_cfg - PIN_CFG_EPIO0, val);
 +      } else {
 +              u8 gpio_num = (pin_cfg - PIN_CFG_GPIO0_P0) & 0x3;
 +              u8 gpio_port = (pin_cfg - PIN_CFG_GPIO0_P0) >> 2;
 +              *val = bnx2x_get_gpio(bp, gpio_num, gpio_port);
 +      }
 +      return 0;
 +
 +}
 +/******************************************************************/
 +/*                            ETS section                       */
 +/******************************************************************/
 +static void bnx2x_ets_e2e3a0_disabled(struct link_params *params)
 +{
 +      /* ETS disabled configuration*/
 +      struct bnx2x *bp = params->bp;
 +
 +      DP(NETIF_MSG_LINK, "ETS E2E3 disabled configuration\n");
 +
 +      /*
 +       * mapping between entry  priority to client number (0,1,2 -debug and
 +       * management clients, 3 - COS0 client, 4 - COS client)(HIGHEST)
 +       * 3bits client num.
 +       *   PRI4    |    PRI3    |    PRI2    |    PRI1    |    PRI0
 +       * cos1-100     cos0-011     dbg1-010     dbg0-001     MCP-000
 +       */
 +
 +      REG_WR(bp, NIG_REG_P0_TX_ARB_PRIORITY_CLIENT, 0x4688);
 +      /*
 +       * Bitmap of 5bits length. Each bit specifies whether the entry behaves
 +       * as strict.  Bits 0,1,2 - debug and management entries, 3 -
 +       * COS0 entry, 4 - COS1 entry.
 +       * COS1 | COS0 | DEBUG1 | DEBUG0 | MGMT
 +       * bit4   bit3    bit2   bit1     bit0
 +       * MCP and debug are strict
 +       */
 +
 +      REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_IS_STRICT, 0x7);
 +      /* defines which entries (clients) are subjected to WFQ arbitration */
 +      REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_IS_SUBJECT2WFQ, 0);
 +      /*
 +       * For strict priority entries defines the number of consecutive
 +       * slots for the highest priority.
 +       */
 +      REG_WR(bp, NIG_REG_P0_TX_ARB_NUM_STRICT_ARB_SLOTS, 0x100);
 +      /*
 +       * mapping between the CREDIT_WEIGHT registers and actual client
 +       * numbers
 +       */
 +      REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_CREDIT_MAP, 0);
 +      REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_0, 0);
 +      REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_1, 0);
 +
 +      REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_0, 0);
 +      REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_1, 0);
 +      REG_WR(bp, PBF_REG_HIGH_PRIORITY_COS_NUM, 0);
 +      /* ETS mode disable */
 +      REG_WR(bp, PBF_REG_ETS_ENABLED, 0);
 +      /*
 +       * If ETS mode is enabled (there is no strict priority) defines a WFQ
 +       * weight for COS0/COS1.
 +       */
 +      REG_WR(bp, PBF_REG_COS0_WEIGHT, 0x2710);
 +      REG_WR(bp, PBF_REG_COS1_WEIGHT, 0x2710);
 +      /* Upper bound that COS0_WEIGHT can reach in the WFQ arbiter */
 +      REG_WR(bp, PBF_REG_COS0_UPPER_BOUND, 0x989680);
 +      REG_WR(bp, PBF_REG_COS1_UPPER_BOUND, 0x989680);
 +      /* Defines the number of consecutive slots for the strict priority */
 +      REG_WR(bp, PBF_REG_NUM_STRICT_ARB_SLOTS, 0);
 +}
 +/******************************************************************************
 +* Description:
 +*     Getting min_w_val will be set according to line speed .
 +*.
 +******************************************************************************/
 +static u32 bnx2x_ets_get_min_w_val_nig(const struct link_vars *vars)
 +{
 +      u32 min_w_val = 0;
 +      /* Calculate min_w_val.*/
 +      if (vars->link_up) {
 +              if (SPEED_20000 == vars->line_speed)
 +                      min_w_val = ETS_E3B0_NIG_MIN_W_VAL_20GBPS;
 +              else
 +                      min_w_val = ETS_E3B0_NIG_MIN_W_VAL_UP_TO_10GBPS;
 +      } else
 +              min_w_val = ETS_E3B0_NIG_MIN_W_VAL_20GBPS;
 +      /**
 +       *  If the link isn't up (static configuration for example ) The
 +       *  link will be according to 20GBPS.
 +      */
 +      return min_w_val;
 +}
 +/******************************************************************************
 +* Description:
 +*     Getting credit upper bound form min_w_val.
 +*.
 +******************************************************************************/
 +static u32 bnx2x_ets_get_credit_upper_bound(const u32 min_w_val)
 +{
 +      const u32 credit_upper_bound = (u32)MAXVAL((150 * min_w_val),
 +                                              MAX_PACKET_SIZE);
 +      return credit_upper_bound;
 +}
 +/******************************************************************************
 +* Description:
 +*     Set credit upper bound for NIG.
 +*.
 +******************************************************************************/
 +static void bnx2x_ets_e3b0_set_credit_upper_bound_nig(
 +      const struct link_params *params,
 +      const u32 min_w_val)
 +{
 +      struct bnx2x *bp = params->bp;
 +      const u8 port = params->port;
 +      const u32 credit_upper_bound =
 +          bnx2x_ets_get_credit_upper_bound(min_w_val);
 +
 +      REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CREDIT_UPPER_BOUND_0 :
 +              NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_0, credit_upper_bound);
 +      REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CREDIT_UPPER_BOUND_1 :
 +                 NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_1, credit_upper_bound);
 +      REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CREDIT_UPPER_BOUND_2 :
 +                 NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_2, credit_upper_bound);
 +      REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CREDIT_UPPER_BOUND_3 :
 +                 NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_3, credit_upper_bound);
 +      REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CREDIT_UPPER_BOUND_4 :
 +                 NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_4, credit_upper_bound);
 +      REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CREDIT_UPPER_BOUND_5 :
 +                 NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_5, credit_upper_bound);
 +
 +      if (0 == port) {
 +              REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_6,
 +                      credit_upper_bound);
 +              REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_7,
 +                      credit_upper_bound);
 +              REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_8,
 +                      credit_upper_bound);
 +      }
 +}
 +/******************************************************************************
 +* Description:
 +*     Will return the NIG ETS registers to init values.Except
 +*     credit_upper_bound.
 +*     That isn't used in this configuration (No WFQ is enabled) and will be
 +*     configured acording to spec
 +*.
 +******************************************************************************/
 +static void bnx2x_ets_e3b0_nig_disabled(const struct link_params *params,
 +                                      const struct link_vars *vars)
 +{
 +      struct bnx2x *bp = params->bp;
 +      const u8 port = params->port;
 +      const u32 min_w_val = bnx2x_ets_get_min_w_val_nig(vars);
 +      /**
 +       * mapping between entry  priority to client number (0,1,2 -debug and
 +       * management clients, 3 - COS0 client, 4 - COS1, ... 8 -
 +       * COS5)(HIGHEST) 4bits client num.TODO_ETS - Should be done by
 +       * reset value or init tool
 +       */
 +      if (port) {
 +              REG_WR(bp, NIG_REG_P1_TX_ARB_PRIORITY_CLIENT2_LSB, 0x543210);
 +              REG_WR(bp, NIG_REG_P1_TX_ARB_PRIORITY_CLIENT2_MSB, 0x0);
 +      } else {
 +              REG_WR(bp, NIG_REG_P0_TX_ARB_PRIORITY_CLIENT2_LSB, 0x76543210);
 +              REG_WR(bp, NIG_REG_P0_TX_ARB_PRIORITY_CLIENT2_MSB, 0x8);
 +      }
 +      /**
 +      * For strict priority entries defines the number of consecutive
 +      * slots for the highest priority.
 +      */
 +      /* TODO_ETS - Should be done by reset value or init tool */
 +      REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_NUM_STRICT_ARB_SLOTS :
 +                 NIG_REG_P1_TX_ARB_NUM_STRICT_ARB_SLOTS, 0x100);
 +      /**
 +       * mapping between the CREDIT_WEIGHT registers and actual client
 +       * numbers
 +       */
 +      /* TODO_ETS - Should be done by reset value or init tool */
 +      if (port) {
 +              /*Port 1 has 6 COS*/
 +              REG_WR(bp, NIG_REG_P1_TX_ARB_CLIENT_CREDIT_MAP2_LSB, 0x210543);
 +              REG_WR(bp, NIG_REG_P1_TX_ARB_CLIENT_CREDIT_MAP2_MSB, 0x0);
 +      } else {
 +              /*Port 0 has 9 COS*/
 +              REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_CREDIT_MAP2_LSB,
 +                     0x43210876);
 +              REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_CREDIT_MAP2_MSB, 0x5);
 +      }
 +
 +      /**
 +       * Bitmap of 5bits length. Each bit specifies whether the entry behaves
 +       * as strict.  Bits 0,1,2 - debug and management entries, 3 -
 +       * COS0 entry, 4 - COS1 entry.
 +       * COS1 | COS0 | DEBUG1 | DEBUG0 | MGMT
 +       * bit4   bit3    bit2   bit1     bit0
 +       * MCP and debug are strict
 +       */
 +      if (port)
 +              REG_WR(bp, NIG_REG_P1_TX_ARB_CLIENT_IS_STRICT, 0x3f);
 +      else
 +              REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_IS_STRICT, 0x1ff);
 +      /* defines which entries (clients) are subjected to WFQ arbitration */
 +      REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CLIENT_IS_SUBJECT2WFQ :
 +                 NIG_REG_P0_TX_ARB_CLIENT_IS_SUBJECT2WFQ, 0);
 +
 +      /**
 +      * Please notice the register address are note continuous and a
 +      * for here is note appropriate.In 2 port mode port0 only COS0-5
 +      * can be used. DEBUG1,DEBUG1,MGMT are never used for WFQ* In 4
 +      * port mode port1 only COS0-2 can be used. DEBUG1,DEBUG1,MGMT
 +      * are never used for WFQ
 +      */
 +      REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_0 :
 +                 NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_0, 0x0);
 +      REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_1 :
 +                 NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_1, 0x0);
 +      REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_2 :
 +                 NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_2, 0x0);
 +      REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_3 :
 +                 NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_3, 0x0);
 +      REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_4 :
 +                 NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_4, 0x0);
 +      REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_5 :
 +                 NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_5, 0x0);
 +      if (0 == port) {
 +              REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_6, 0x0);
 +              REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_7, 0x0);
 +              REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_8, 0x0);
 +      }
 +
 +      bnx2x_ets_e3b0_set_credit_upper_bound_nig(params, min_w_val);
 +}
 +/******************************************************************************
 +* Description:
 +*     Set credit upper bound for PBF.
 +*.
 +******************************************************************************/
 +static void bnx2x_ets_e3b0_set_credit_upper_bound_pbf(
 +      const struct link_params *params,
 +      const u32 min_w_val)
 +{
 +      struct bnx2x *bp = params->bp;
 +      const u32 credit_upper_bound =
 +          bnx2x_ets_get_credit_upper_bound(min_w_val);
 +      const u8 port = params->port;
 +      u32 base_upper_bound = 0;
 +      u8 max_cos = 0;
 +      u8 i = 0;
 +      /**
 +      * In 2 port mode port0 has COS0-5 that can be used for WFQ.In 4
 +      * port mode port1 has COS0-2 that can be used for WFQ.
 +      */
 +      if (0 == port) {
 +              base_upper_bound = PBF_REG_COS0_UPPER_BOUND_P0;
 +              max_cos = DCBX_E3B0_MAX_NUM_COS_PORT0;
 +      } else {
 +              base_upper_bound = PBF_REG_COS0_UPPER_BOUND_P1;
 +              max_cos = DCBX_E3B0_MAX_NUM_COS_PORT1;
 +      }
 +
 +      for (i = 0; i < max_cos; i++)
 +              REG_WR(bp, base_upper_bound + (i << 2), credit_upper_bound);
 +}
 +
 +/******************************************************************************
 +* Description:
 +*     Will return the PBF ETS registers to init values.Except
 +*     credit_upper_bound.
 +*     That isn't used in this configuration (No WFQ is enabled) and will be
 +*     configured acording to spec
 +*.
 +******************************************************************************/
 +static void bnx2x_ets_e3b0_pbf_disabled(const struct link_params *params)
 +{
 +      struct bnx2x *bp = params->bp;
 +      const u8 port = params->port;
 +      const u32 min_w_val_pbf = ETS_E3B0_PBF_MIN_W_VAL;
 +      u8 i = 0;
 +      u32 base_weight = 0;
 +      u8 max_cos = 0;
 +
 +      /**
 +       * mapping between entry  priority to client number 0 - COS0
 +       * client, 2 - COS1, ... 5 - COS5)(HIGHEST) 4bits client num.
 +       * TODO_ETS - Should be done by reset value or init tool
 +       */
 +      if (port)
 +              /*  0x688 (|011|0 10|00 1|000) */
 +              REG_WR(bp, PBF_REG_ETS_ARB_PRIORITY_CLIENT_P1 , 0x688);
 +      else
 +              /*  (10 1|100 |011|0 10|00 1|000) */
 +              REG_WR(bp, PBF_REG_ETS_ARB_PRIORITY_CLIENT_P0 , 0x2C688);
 +
 +      /* TODO_ETS - Should be done by reset value or init tool */
 +      if (port)
 +              /* 0x688 (|011|0 10|00 1|000)*/
 +              REG_WR(bp, PBF_REG_ETS_ARB_CLIENT_CREDIT_MAP_P1, 0x688);
 +      else
 +      /* 0x2C688 (10 1|100 |011|0 10|00 1|000) */
 +      REG_WR(bp, PBF_REG_ETS_ARB_CLIENT_CREDIT_MAP_P0, 0x2C688);
 +
 +      REG_WR(bp, (port) ? PBF_REG_ETS_ARB_NUM_STRICT_ARB_SLOTS_P1 :
 +                 PBF_REG_ETS_ARB_NUM_STRICT_ARB_SLOTS_P0 , 0x100);
 +
 +
 +      REG_WR(bp, (port) ? PBF_REG_ETS_ARB_CLIENT_IS_STRICT_P1 :
 +                 PBF_REG_ETS_ARB_CLIENT_IS_STRICT_P0 , 0);
 +
 +      REG_WR(bp, (port) ? PBF_REG_ETS_ARB_CLIENT_IS_SUBJECT2WFQ_P1 :
 +                 PBF_REG_ETS_ARB_CLIENT_IS_SUBJECT2WFQ_P0 , 0);
 +      /**
 +      * In 2 port mode port0 has COS0-5 that can be used for WFQ.
 +      * In 4 port mode port1 has COS0-2 that can be used for WFQ.
 +      */
 +      if (0 == port) {
 +              base_weight = PBF_REG_COS0_WEIGHT_P0;
 +              max_cos = DCBX_E3B0_MAX_NUM_COS_PORT0;
 +      } else {
 +              base_weight = PBF_REG_COS0_WEIGHT_P1;
 +              max_cos = DCBX_E3B0_MAX_NUM_COS_PORT1;
 +      }
 +
 +      for (i = 0; i < max_cos; i++)
 +              REG_WR(bp, base_weight + (0x4 * i), 0);
 +
 +      bnx2x_ets_e3b0_set_credit_upper_bound_pbf(params, min_w_val_pbf);
 +}
 +/******************************************************************************
 +* Description:
 +*     E3B0 disable will return basicly the values to init values.
 +*.
 +******************************************************************************/
 +static int bnx2x_ets_e3b0_disabled(const struct link_params *params,
 +                                 const struct link_vars *vars)
 +{
 +      struct bnx2x *bp = params->bp;
 +
 +      if (!CHIP_IS_E3B0(bp)) {
 +              DP(NETIF_MSG_LINK,
 +                 "bnx2x_ets_e3b0_disabled the chip isn't E3B0\n");
 +              return -EINVAL;
 +      }
 +
 +      bnx2x_ets_e3b0_nig_disabled(params, vars);
 +
 +      bnx2x_ets_e3b0_pbf_disabled(params);
 +
 +      return 0;
 +}
 +
 +/******************************************************************************
 +* Description:
 +*     Disable will return basicly the values to init values.
 +*.
 +******************************************************************************/
 +int bnx2x_ets_disabled(struct link_params *params,
 +                    struct link_vars *vars)
 +{
 +      struct bnx2x *bp = params->bp;
 +      int bnx2x_status = 0;
 +
 +      if ((CHIP_IS_E2(bp)) || (CHIP_IS_E3A0(bp)))
 +              bnx2x_ets_e2e3a0_disabled(params);
 +      else if (CHIP_IS_E3B0(bp))
 +              bnx2x_status = bnx2x_ets_e3b0_disabled(params, vars);
 +      else {
 +              DP(NETIF_MSG_LINK, "bnx2x_ets_disabled - chip not supported\n");
 +              return -EINVAL;
 +      }
 +
 +      return bnx2x_status;
 +}
 +
 +/******************************************************************************
 +* Description
 +*     Set the COS mappimg to SP and BW until this point all the COS are not
 +*     set as SP or BW.
 +******************************************************************************/
 +static int bnx2x_ets_e3b0_cli_map(const struct link_params *params,
 +                                const struct bnx2x_ets_params *ets_params,
 +                                const u8 cos_sp_bitmap,
 +                                const u8 cos_bw_bitmap)
 +{
 +      struct bnx2x *bp = params->bp;
 +      const u8 port = params->port;
 +      const u8 nig_cli_sp_bitmap = 0x7 | (cos_sp_bitmap << 3);
 +      const u8 pbf_cli_sp_bitmap = cos_sp_bitmap;
 +      const u8 nig_cli_subject2wfq_bitmap = cos_bw_bitmap << 3;
 +      const u8 pbf_cli_subject2wfq_bitmap = cos_bw_bitmap;
 +
 +      REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CLIENT_IS_STRICT :
 +             NIG_REG_P0_TX_ARB_CLIENT_IS_STRICT, nig_cli_sp_bitmap);
 +
 +      REG_WR(bp, (port) ? PBF_REG_ETS_ARB_CLIENT_IS_STRICT_P1 :
 +             PBF_REG_ETS_ARB_CLIENT_IS_STRICT_P0 , pbf_cli_sp_bitmap);
 +
 +      REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CLIENT_IS_SUBJECT2WFQ :
 +             NIG_REG_P0_TX_ARB_CLIENT_IS_SUBJECT2WFQ,
 +             nig_cli_subject2wfq_bitmap);
 +
 +      REG_WR(bp, (port) ? PBF_REG_ETS_ARB_CLIENT_IS_SUBJECT2WFQ_P1 :
 +             PBF_REG_ETS_ARB_CLIENT_IS_SUBJECT2WFQ_P0,
 +             pbf_cli_subject2wfq_bitmap);
 +
 +      return 0;
 +}
 +
 +/******************************************************************************
 +* Description:
 +*     This function is needed because NIG ARB_CREDIT_WEIGHT_X are
 +*     not continues and ARB_CREDIT_WEIGHT_0 + offset is suitable.
 +******************************************************************************/
 +static int bnx2x_ets_e3b0_set_cos_bw(struct bnx2x *bp,
 +                                   const u8 cos_entry,
 +                                   const u32 min_w_val_nig,
 +                                   const u32 min_w_val_pbf,
 +                                   const u16 total_bw,
 +                                   const u8 bw,
 +                                   const u8 port)
 +{
 +      u32 nig_reg_adress_crd_weight = 0;
 +      u32 pbf_reg_adress_crd_weight = 0;
-                       if (0 == ets_params->cos[cos_idx].params.bw_params.bw) {
-                               DP(NETIF_MSG_LINK,
-                                  "bnx2x_ets_E3B0_config BW was set to 0\n");
-                       return -EINVAL;
++      /* Calculate and set BW for this COS - use 1 instead of 0 for BW */
++      const u32 cos_bw_nig = ((bw ? bw : 1) * min_w_val_nig) / total_bw;
++      const u32 cos_bw_pbf = ((bw ? bw : 1) * min_w_val_pbf) / total_bw;
 +
 +      switch (cos_entry) {
 +      case 0:
 +          nig_reg_adress_crd_weight =
 +               (port) ? NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_0 :
 +                   NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_0;
 +           pbf_reg_adress_crd_weight = (port) ?
 +               PBF_REG_COS0_WEIGHT_P1 : PBF_REG_COS0_WEIGHT_P0;
 +           break;
 +      case 1:
 +           nig_reg_adress_crd_weight = (port) ?
 +               NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_1 :
 +               NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_1;
 +           pbf_reg_adress_crd_weight = (port) ?
 +               PBF_REG_COS1_WEIGHT_P1 : PBF_REG_COS1_WEIGHT_P0;
 +           break;
 +      case 2:
 +           nig_reg_adress_crd_weight = (port) ?
 +               NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_2 :
 +               NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_2;
 +
 +               pbf_reg_adress_crd_weight = (port) ?
 +                   PBF_REG_COS2_WEIGHT_P1 : PBF_REG_COS2_WEIGHT_P0;
 +           break;
 +      case 3:
 +          if (port)
 +                      return -EINVAL;
 +           nig_reg_adress_crd_weight =
 +               NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_3;
 +           pbf_reg_adress_crd_weight =
 +               PBF_REG_COS3_WEIGHT_P0;
 +           break;
 +      case 4:
 +          if (port)
 +              return -EINVAL;
 +           nig_reg_adress_crd_weight =
 +               NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_4;
 +           pbf_reg_adress_crd_weight = PBF_REG_COS4_WEIGHT_P0;
 +           break;
 +      case 5:
 +          if (port)
 +              return -EINVAL;
 +           nig_reg_adress_crd_weight =
 +               NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_5;
 +           pbf_reg_adress_crd_weight = PBF_REG_COS5_WEIGHT_P0;
 +           break;
 +      }
 +
 +      REG_WR(bp, nig_reg_adress_crd_weight, cos_bw_nig);
 +
 +      REG_WR(bp, pbf_reg_adress_crd_weight, cos_bw_pbf);
 +
 +      return 0;
 +}
 +/******************************************************************************
 +* Description:
 +*     Calculate the total BW.A value of 0 isn't legal.
 +*.
 +******************************************************************************/
 +static int bnx2x_ets_e3b0_get_total_bw(
 +      const struct link_params *params,
 +      const struct bnx2x_ets_params *ets_params,
 +      u16 *total_bw)
 +{
 +      struct bnx2x *bp = params->bp;
 +      u8 cos_idx = 0;
 +
 +      *total_bw = 0 ;
 +      /* Calculate total BW requested */
 +      for (cos_idx = 0; cos_idx < ets_params->num_of_cos; cos_idx++) {
 +              if (bnx2x_cos_state_bw == ets_params->cos[cos_idx].state) {
-               *total_bw +=
-                   ets_params->cos[cos_idx].params.bw_params.bw;
-           }
++                      *total_bw +=
++                              ets_params->cos[cos_idx].params.bw_params.bw;
 +              }
-       /*Check taotl BW is valid */
 +      }
 +
-               val |= XMAC_CTRL_REG_CORE_LOCAL_LPBK;
++      /* Check total BW is valid */
 +      if ((100 != *total_bw) || (0 == *total_bw)) {
 +              if (0 == *total_bw) {
 +                      DP(NETIF_MSG_LINK,
 +                         "bnx2x_ets_E3B0_config toatl BW shouldn't be 0\n");
 +                      return -EINVAL;
 +              }
 +              DP(NETIF_MSG_LINK,
 +                 "bnx2x_ets_E3B0_config toatl BW should be 100\n");
 +              /**
 +              *   We can handle a case whre the BW isn't 100 this can happen
 +              *   if the TC are joined.
 +              */
 +      }
 +      return 0;
 +}
 +
 +/******************************************************************************
 +* Description:
 +*     Invalidate all the sp_pri_to_cos.
 +*.
 +******************************************************************************/
 +static void bnx2x_ets_e3b0_sp_pri_to_cos_init(u8 *sp_pri_to_cos)
 +{
 +      u8 pri = 0;
 +      for (pri = 0; pri < DCBX_MAX_NUM_COS; pri++)
 +              sp_pri_to_cos[pri] = DCBX_INVALID_COS;
 +}
 +/******************************************************************************
 +* Description:
 +*     Calculate and set the SP (ARB_PRIORITY_CLIENT) NIG and PBF registers
 +*     according to sp_pri_to_cos.
 +*.
 +******************************************************************************/
 +static int bnx2x_ets_e3b0_sp_pri_to_cos_set(const struct link_params *params,
 +                                          u8 *sp_pri_to_cos, const u8 pri,
 +                                          const u8 cos_entry)
 +{
 +      struct bnx2x *bp = params->bp;
 +      const u8 port = params->port;
 +      const u8 max_num_of_cos = (port) ? DCBX_E3B0_MAX_NUM_COS_PORT1 :
 +              DCBX_E3B0_MAX_NUM_COS_PORT0;
 +
 +      if (DCBX_INVALID_COS != sp_pri_to_cos[pri]) {
 +              DP(NETIF_MSG_LINK, "bnx2x_ets_e3b0_sp_pri_to_cos_set invalid "
 +                                 "parameter There can't be two COS's with "
 +                                 "the same strict pri\n");
 +              return -EINVAL;
 +      }
 +
 +      if (pri > max_num_of_cos) {
 +              DP(NETIF_MSG_LINK, "bnx2x_ets_e3b0_sp_pri_to_cos_set invalid "
 +                             "parameter Illegal strict priority\n");
 +          return -EINVAL;
 +      }
 +
 +      sp_pri_to_cos[pri] = cos_entry;
 +      return 0;
 +
 +}
 +
 +/******************************************************************************
 +* Description:
 +*     Returns the correct value according to COS and priority in
 +*     the sp_pri_cli register.
 +*.
 +******************************************************************************/
 +static u64 bnx2x_e3b0_sp_get_pri_cli_reg(const u8 cos, const u8 cos_offset,
 +                                       const u8 pri_set,
 +                                       const u8 pri_offset,
 +                                       const u8 entry_size)
 +{
 +      u64 pri_cli_nig = 0;
 +      pri_cli_nig = ((u64)(cos + cos_offset)) << (entry_size *
 +                                                  (pri_set + pri_offset));
 +
 +      return pri_cli_nig;
 +}
 +/******************************************************************************
 +* Description:
 +*     Returns the correct value according to COS and priority in the
 +*     sp_pri_cli register for NIG.
 +*.
 +******************************************************************************/
 +static u64 bnx2x_e3b0_sp_get_pri_cli_reg_nig(const u8 cos, const u8 pri_set)
 +{
 +      /* MCP Dbg0 and dbg1 are always with higher strict pri*/
 +      const u8 nig_cos_offset = 3;
 +      const u8 nig_pri_offset = 3;
 +
 +      return bnx2x_e3b0_sp_get_pri_cli_reg(cos, nig_cos_offset, pri_set,
 +              nig_pri_offset, 4);
 +
 +}
 +/******************************************************************************
 +* Description:
 +*     Returns the correct value according to COS and priority in the
 +*     sp_pri_cli register for PBF.
 +*.
 +******************************************************************************/
 +static u64 bnx2x_e3b0_sp_get_pri_cli_reg_pbf(const u8 cos, const u8 pri_set)
 +{
 +      const u8 pbf_cos_offset = 0;
 +      const u8 pbf_pri_offset = 0;
 +
 +      return bnx2x_e3b0_sp_get_pri_cli_reg(cos, pbf_cos_offset, pri_set,
 +              pbf_pri_offset, 3);
 +
 +}
 +
 +/******************************************************************************
 +* Description:
 +*     Calculate and set the SP (ARB_PRIORITY_CLIENT) NIG and PBF registers
 +*     according to sp_pri_to_cos.(which COS has higher priority)
 +*.
 +******************************************************************************/
 +static int bnx2x_ets_e3b0_sp_set_pri_cli_reg(const struct link_params *params,
 +                                           u8 *sp_pri_to_cos)
 +{
 +      struct bnx2x *bp = params->bp;
 +      u8 i = 0;
 +      const u8 port = params->port;
 +      /* MCP Dbg0 and dbg1 are always with higher strict pri*/
 +      u64 pri_cli_nig = 0x210;
 +      u32 pri_cli_pbf = 0x0;
 +      u8 pri_set = 0;
 +      u8 pri_bitmask = 0;
 +      const u8 max_num_of_cos = (port) ? DCBX_E3B0_MAX_NUM_COS_PORT1 :
 +              DCBX_E3B0_MAX_NUM_COS_PORT0;
 +
 +      u8 cos_bit_to_set = (1 << max_num_of_cos) - 1;
 +
 +      /* Set all the strict priority first */
 +      for (i = 0; i < max_num_of_cos; i++) {
 +              if (DCBX_INVALID_COS != sp_pri_to_cos[i]) {
 +                      if (DCBX_MAX_NUM_COS <= sp_pri_to_cos[i]) {
 +                              DP(NETIF_MSG_LINK,
 +                                         "bnx2x_ets_e3b0_sp_set_pri_cli_reg "
 +                                         "invalid cos entry\n");
 +                              return -EINVAL;
 +                      }
 +
 +                      pri_cli_nig |= bnx2x_e3b0_sp_get_pri_cli_reg_nig(
 +                          sp_pri_to_cos[i], pri_set);
 +
 +                      pri_cli_pbf |= bnx2x_e3b0_sp_get_pri_cli_reg_pbf(
 +                          sp_pri_to_cos[i], pri_set);
 +                      pri_bitmask = 1 << sp_pri_to_cos[i];
 +                      /* COS is used remove it from bitmap.*/
 +                      if (0 == (pri_bitmask & cos_bit_to_set)) {
 +                              DP(NETIF_MSG_LINK,
 +                                      "bnx2x_ets_e3b0_sp_set_pri_cli_reg "
 +                                      "invalid There can't be two COS's with"
 +                                      " the same strict pri\n");
 +                              return -EINVAL;
 +                      }
 +                      cos_bit_to_set &= ~pri_bitmask;
 +                      pri_set++;
 +              }
 +      }
 +
 +      /* Set all the Non strict priority i= COS*/
 +      for (i = 0; i < max_num_of_cos; i++) {
 +              pri_bitmask = 1 << i;
 +              /* Check if COS was already used for SP */
 +              if (pri_bitmask & cos_bit_to_set) {
 +                      /* COS wasn't used for SP */
 +                      pri_cli_nig |= bnx2x_e3b0_sp_get_pri_cli_reg_nig(
 +                          i, pri_set);
 +
 +                      pri_cli_pbf |= bnx2x_e3b0_sp_get_pri_cli_reg_pbf(
 +                          i, pri_set);
 +                      /* COS is used remove it from bitmap.*/
 +                      cos_bit_to_set &= ~pri_bitmask;
 +                      pri_set++;
 +              }
 +      }
 +
 +      if (pri_set != max_num_of_cos) {
 +              DP(NETIF_MSG_LINK, "bnx2x_ets_e3b0_sp_set_pri_cli_reg not all "
 +                                 "entries were set\n");
 +              return -EINVAL;
 +      }
 +
 +      if (port) {
 +              /* Only 6 usable clients*/
 +              REG_WR(bp, NIG_REG_P1_TX_ARB_PRIORITY_CLIENT2_LSB,
 +                     (u32)pri_cli_nig);
 +
 +              REG_WR(bp, PBF_REG_ETS_ARB_PRIORITY_CLIENT_P1 , pri_cli_pbf);
 +      } else {
 +              /* Only 9 usable clients*/
 +              const u32 pri_cli_nig_lsb = (u32) (pri_cli_nig);
 +              const u32 pri_cli_nig_msb = (u32) ((pri_cli_nig >> 32) & 0xF);
 +
 +              REG_WR(bp, NIG_REG_P0_TX_ARB_PRIORITY_CLIENT2_LSB,
 +                     pri_cli_nig_lsb);
 +              REG_WR(bp, NIG_REG_P0_TX_ARB_PRIORITY_CLIENT2_MSB,
 +                     pri_cli_nig_msb);
 +
 +              REG_WR(bp, PBF_REG_ETS_ARB_PRIORITY_CLIENT_P0 , pri_cli_pbf);
 +      }
 +      return 0;
 +}
 +
 +/******************************************************************************
 +* Description:
 +*     Configure the COS to ETS according to BW and SP settings.
 +******************************************************************************/
 +int bnx2x_ets_e3b0_config(const struct link_params *params,
 +                       const struct link_vars *vars,
 +                       const struct bnx2x_ets_params *ets_params)
 +{
 +      struct bnx2x *bp = params->bp;
 +      int bnx2x_status = 0;
 +      const u8 port = params->port;
 +      u16 total_bw = 0;
 +      const u32 min_w_val_nig = bnx2x_ets_get_min_w_val_nig(vars);
 +      const u32 min_w_val_pbf = ETS_E3B0_PBF_MIN_W_VAL;
 +      u8 cos_bw_bitmap = 0;
 +      u8 cos_sp_bitmap = 0;
 +      u8 sp_pri_to_cos[DCBX_MAX_NUM_COS] = {0};
 +      const u8 max_num_of_cos = (port) ? DCBX_E3B0_MAX_NUM_COS_PORT1 :
 +              DCBX_E3B0_MAX_NUM_COS_PORT0;
 +      u8 cos_entry = 0;
 +
 +      if (!CHIP_IS_E3B0(bp)) {
 +              DP(NETIF_MSG_LINK,
 +                 "bnx2x_ets_e3b0_disabled the chip isn't E3B0\n");
 +              return -EINVAL;
 +      }
 +
 +      if ((ets_params->num_of_cos > max_num_of_cos)) {
 +              DP(NETIF_MSG_LINK, "bnx2x_ets_E3B0_config the number of COS "
 +                                 "isn't supported\n");
 +              return -EINVAL;
 +      }
 +
 +      /* Prepare sp strict priority parameters*/
 +      bnx2x_ets_e3b0_sp_pri_to_cos_init(sp_pri_to_cos);
 +
 +      /* Prepare BW parameters*/
 +      bnx2x_status = bnx2x_ets_e3b0_get_total_bw(params, ets_params,
 +                                                 &total_bw);
 +      if (0 != bnx2x_status) {
 +              DP(NETIF_MSG_LINK,
 +                 "bnx2x_ets_E3B0_config get_total_bw failed\n");
 +              return -EINVAL;
 +      }
 +
 +      /**
 +       *  Upper bound is set according to current link speed (min_w_val
 +       *  should be the same for upper bound and COS credit val).
 +       */
 +      bnx2x_ets_e3b0_set_credit_upper_bound_nig(params, min_w_val_nig);
 +      bnx2x_ets_e3b0_set_credit_upper_bound_pbf(params, min_w_val_pbf);
 +
 +
 +      for (cos_entry = 0; cos_entry < ets_params->num_of_cos; cos_entry++) {
 +              if (bnx2x_cos_state_bw == ets_params->cos[cos_entry].state) {
 +                      cos_bw_bitmap |= (1 << cos_entry);
 +                      /**
 +                       * The function also sets the BW in HW(not the mappin
 +                       * yet)
 +                       */
 +                      bnx2x_status = bnx2x_ets_e3b0_set_cos_bw(
 +                              bp, cos_entry, min_w_val_nig, min_w_val_pbf,
 +                              total_bw,
 +                              ets_params->cos[cos_entry].params.bw_params.bw,
 +                               port);
 +              } else if (bnx2x_cos_state_strict ==
 +                      ets_params->cos[cos_entry].state){
 +                      cos_sp_bitmap |= (1 << cos_entry);
 +
 +                      bnx2x_status = bnx2x_ets_e3b0_sp_pri_to_cos_set(
 +                              params,
 +                              sp_pri_to_cos,
 +                              ets_params->cos[cos_entry].params.sp_params.pri,
 +                              cos_entry);
 +
 +              } else {
 +                      DP(NETIF_MSG_LINK,
 +                         "bnx2x_ets_e3b0_config cos state not valid\n");
 +                      return -EINVAL;
 +              }
 +              if (0 != bnx2x_status) {
 +                      DP(NETIF_MSG_LINK,
 +                         "bnx2x_ets_e3b0_config set cos bw failed\n");
 +                      return bnx2x_status;
 +              }
 +      }
 +
 +      /* Set SP register (which COS has higher priority) */
 +      bnx2x_status = bnx2x_ets_e3b0_sp_set_pri_cli_reg(params,
 +                                                       sp_pri_to_cos);
 +
 +      if (0 != bnx2x_status) {
 +              DP(NETIF_MSG_LINK,
 +                 "bnx2x_ets_E3B0_config set_pri_cli_reg failed\n");
 +              return bnx2x_status;
 +      }
 +
 +      /* Set client mapping of BW and strict */
 +      bnx2x_status = bnx2x_ets_e3b0_cli_map(params, ets_params,
 +                                            cos_sp_bitmap,
 +                                            cos_bw_bitmap);
 +
 +      if (0 != bnx2x_status) {
 +              DP(NETIF_MSG_LINK, "bnx2x_ets_E3B0_config SP failed\n");
 +              return bnx2x_status;
 +      }
 +      return 0;
 +}
 +static void bnx2x_ets_bw_limit_common(const struct link_params *params)
 +{
 +      /* ETS disabled configuration */
 +      struct bnx2x *bp = params->bp;
 +      DP(NETIF_MSG_LINK, "ETS enabled BW limit configuration\n");
 +      /*
 +       * defines which entries (clients) are subjected to WFQ arbitration
 +       * COS0 0x8
 +       * COS1 0x10
 +       */
 +      REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_IS_SUBJECT2WFQ, 0x18);
 +      /*
 +       * mapping between the ARB_CREDIT_WEIGHT registers and actual
 +       * client numbers (WEIGHT_0 does not actually have to represent
 +       * client 0)
 +       *    PRI4    |    PRI3    |    PRI2    |    PRI1    |    PRI0
 +       *  cos1-001     cos0-000     dbg1-100     dbg0-011     MCP-010
 +       */
 +      REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_CREDIT_MAP, 0x111A);
 +
 +      REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_0,
 +             ETS_BW_LIMIT_CREDIT_UPPER_BOUND);
 +      REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_1,
 +             ETS_BW_LIMIT_CREDIT_UPPER_BOUND);
 +
 +      /* ETS mode enabled*/
 +      REG_WR(bp, PBF_REG_ETS_ENABLED, 1);
 +
 +      /* Defines the number of consecutive slots for the strict priority */
 +      REG_WR(bp, PBF_REG_NUM_STRICT_ARB_SLOTS, 0);
 +      /*
 +       * Bitmap of 5bits length. Each bit specifies whether the entry behaves
 +       * as strict.  Bits 0,1,2 - debug and management entries, 3 - COS0
 +       * entry, 4 - COS1 entry.
 +       * COS1 | COS0 | DEBUG21 | DEBUG0 | MGMT
 +       * bit4   bit3    bit2     bit1    bit0
 +       * MCP and debug are strict
 +       */
 +      REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_IS_STRICT, 0x7);
 +
 +      /* Upper bound that COS0_WEIGHT can reach in the WFQ arbiter.*/
 +      REG_WR(bp, PBF_REG_COS0_UPPER_BOUND,
 +             ETS_BW_LIMIT_CREDIT_UPPER_BOUND);
 +      REG_WR(bp, PBF_REG_COS1_UPPER_BOUND,
 +             ETS_BW_LIMIT_CREDIT_UPPER_BOUND);
 +}
 +
 +void bnx2x_ets_bw_limit(const struct link_params *params, const u32 cos0_bw,
 +                      const u32 cos1_bw)
 +{
 +      /* ETS disabled configuration*/
 +      struct bnx2x *bp = params->bp;
 +      const u32 total_bw = cos0_bw + cos1_bw;
 +      u32 cos0_credit_weight = 0;
 +      u32 cos1_credit_weight = 0;
 +
 +      DP(NETIF_MSG_LINK, "ETS enabled BW limit configuration\n");
 +
 +      if ((0 == total_bw) ||
 +          (0 == cos0_bw) ||
 +          (0 == cos1_bw)) {
 +              DP(NETIF_MSG_LINK, "Total BW can't be zero\n");
 +              return;
 +      }
 +
 +      cos0_credit_weight = (cos0_bw * ETS_BW_LIMIT_CREDIT_WEIGHT)/
 +              total_bw;
 +      cos1_credit_weight = (cos1_bw * ETS_BW_LIMIT_CREDIT_WEIGHT)/
 +              total_bw;
 +
 +      bnx2x_ets_bw_limit_common(params);
 +
 +      REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_0, cos0_credit_weight);
 +      REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_1, cos1_credit_weight);
 +
 +      REG_WR(bp, PBF_REG_COS0_WEIGHT, cos0_credit_weight);
 +      REG_WR(bp, PBF_REG_COS1_WEIGHT, cos1_credit_weight);
 +}
 +
 +int bnx2x_ets_strict(const struct link_params *params, const u8 strict_cos)
 +{
 +      /* ETS disabled configuration*/
 +      struct bnx2x *bp = params->bp;
 +      u32 val = 0;
 +
 +      DP(NETIF_MSG_LINK, "ETS enabled strict configuration\n");
 +      /*
 +       * Bitmap of 5bits length. Each bit specifies whether the entry behaves
 +       * as strict.  Bits 0,1,2 - debug and management entries,
 +       * 3 - COS0 entry, 4 - COS1 entry.
 +       *  COS1 | COS0 | DEBUG21 | DEBUG0 | MGMT
 +       *  bit4   bit3   bit2      bit1     bit0
 +       * MCP and debug are strict
 +       */
 +      REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_IS_STRICT, 0x1F);
 +      /*
 +       * For strict priority entries defines the number of consecutive slots
 +       * for the highest priority.
 +       */
 +      REG_WR(bp, NIG_REG_P0_TX_ARB_NUM_STRICT_ARB_SLOTS, 0x100);
 +      /* ETS mode disable */
 +      REG_WR(bp, PBF_REG_ETS_ENABLED, 0);
 +      /* Defines the number of consecutive slots for the strict priority */
 +      REG_WR(bp, PBF_REG_NUM_STRICT_ARB_SLOTS, 0x100);
 +
 +      /* Defines the number of consecutive slots for the strict priority */
 +      REG_WR(bp, PBF_REG_HIGH_PRIORITY_COS_NUM, strict_cos);
 +
 +      /*
 +       * mapping between entry  priority to client number (0,1,2 -debug and
 +       * management clients, 3 - COS0 client, 4 - COS client)(HIGHEST)
 +       * 3bits client num.
 +       *   PRI4    |    PRI3    |    PRI2    |    PRI1    |    PRI0
 +       * dbg0-010     dbg1-001     cos1-100     cos0-011     MCP-000
 +       * dbg0-010     dbg1-001     cos0-011     cos1-100     MCP-000
 +       */
 +      val = (0 == strict_cos) ? 0x2318 : 0x22E0;
 +      REG_WR(bp, NIG_REG_P0_TX_ARB_PRIORITY_CLIENT, val);
 +
 +      return 0;
 +}
 +/******************************************************************/
 +/*                    PFC section                               */
 +/******************************************************************/
 +
 +static void bnx2x_update_pfc_xmac(struct link_params *params,
 +                                struct link_vars *vars,
 +                                u8 is_lb)
 +{
 +      struct bnx2x *bp = params->bp;
 +      u32 xmac_base;
 +      u32 pause_val, pfc0_val, pfc1_val;
 +
 +      /* XMAC base adrr */
 +      xmac_base = (params->port) ? GRCBASE_XMAC1 : GRCBASE_XMAC0;
 +
 +      /* Initialize pause and pfc registers */
 +      pause_val = 0x18000;
 +      pfc0_val = 0xFFFF8000;
 +      pfc1_val = 0x2;
 +
 +      /* No PFC support */
 +      if (!(params->feature_config_flags &
 +            FEATURE_CONFIG_PFC_ENABLED)) {
 +
 +              /*
 +               * RX flow control - Process pause frame in receive direction
 +               */
 +              if (vars->flow_ctrl & BNX2X_FLOW_CTRL_RX)
 +                      pause_val |= XMAC_PAUSE_CTRL_REG_RX_PAUSE_EN;
 +
 +              /*
 +               * TX flow control - Send pause packet when buffer is full
 +               */
 +              if (vars->flow_ctrl & BNX2X_FLOW_CTRL_TX)
 +                      pause_val |= XMAC_PAUSE_CTRL_REG_TX_PAUSE_EN;
 +      } else {/* PFC support */
 +              pfc1_val |= XMAC_PFC_CTRL_HI_REG_PFC_REFRESH_EN |
 +                      XMAC_PFC_CTRL_HI_REG_PFC_STATS_EN |
 +                      XMAC_PFC_CTRL_HI_REG_RX_PFC_EN |
 +                      XMAC_PFC_CTRL_HI_REG_TX_PFC_EN;
 +      }
 +
 +      /* Write pause and PFC registers */
 +      REG_WR(bp, xmac_base + XMAC_REG_PAUSE_CTRL, pause_val);
 +      REG_WR(bp, xmac_base + XMAC_REG_PFC_CTRL, pfc0_val);
 +      REG_WR(bp, xmac_base + XMAC_REG_PFC_CTRL_HI, pfc1_val);
 +
 +
 +      /* Set MAC address for source TX Pause/PFC frames */
 +      REG_WR(bp, xmac_base + XMAC_REG_CTRL_SA_LO,
 +             ((params->mac_addr[2] << 24) |
 +              (params->mac_addr[3] << 16) |
 +              (params->mac_addr[4] << 8) |
 +              (params->mac_addr[5])));
 +      REG_WR(bp, xmac_base + XMAC_REG_CTRL_SA_HI,
 +             ((params->mac_addr[0] << 8) |
 +              (params->mac_addr[1])));
 +
 +      udelay(30);
 +}
 +
 +
 +static void bnx2x_emac_get_pfc_stat(struct link_params *params,
 +                                  u32 pfc_frames_sent[2],
 +                                  u32 pfc_frames_received[2])
 +{
 +      /* Read pfc statistic */
 +      struct bnx2x *bp = params->bp;
 +      u32 emac_base = params->port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
 +      u32 val_xon = 0;
 +      u32 val_xoff = 0;
 +
 +      DP(NETIF_MSG_LINK, "pfc statistic read from EMAC\n");
 +
 +      /* PFC received frames */
 +      val_xoff = REG_RD(bp, emac_base +
 +                              EMAC_REG_RX_PFC_STATS_XOFF_RCVD);
 +      val_xoff &= EMAC_REG_RX_PFC_STATS_XOFF_RCVD_COUNT;
 +      val_xon = REG_RD(bp, emac_base + EMAC_REG_RX_PFC_STATS_XON_RCVD);
 +      val_xon &= EMAC_REG_RX_PFC_STATS_XON_RCVD_COUNT;
 +
 +      pfc_frames_received[0] = val_xon + val_xoff;
 +
 +      /* PFC received sent */
 +      val_xoff = REG_RD(bp, emac_base +
 +                              EMAC_REG_RX_PFC_STATS_XOFF_SENT);
 +      val_xoff &= EMAC_REG_RX_PFC_STATS_XOFF_SENT_COUNT;
 +      val_xon = REG_RD(bp, emac_base + EMAC_REG_RX_PFC_STATS_XON_SENT);
 +      val_xon &= EMAC_REG_RX_PFC_STATS_XON_SENT_COUNT;
 +
 +      pfc_frames_sent[0] = val_xon + val_xoff;
 +}
 +
 +/* Read pfc statistic*/
 +void bnx2x_pfc_statistic(struct link_params *params, struct link_vars *vars,
 +                       u32 pfc_frames_sent[2],
 +                       u32 pfc_frames_received[2])
 +{
 +      /* Read pfc statistic */
 +      struct bnx2x *bp = params->bp;
 +
 +      DP(NETIF_MSG_LINK, "pfc statistic\n");
 +
 +      if (!vars->link_up)
 +              return;
 +
 +      if (MAC_TYPE_EMAC == vars->mac_type) {
 +              DP(NETIF_MSG_LINK, "About to read PFC stats from EMAC\n");
 +              bnx2x_emac_get_pfc_stat(params, pfc_frames_sent,
 +                                      pfc_frames_received);
 +      }
 +}
 +/******************************************************************/
 +/*                    MAC/PBF section                           */
 +/******************************************************************/
 +static void bnx2x_set_mdio_clk(struct bnx2x *bp, u32 chip_id, u8 port)
 +{
 +      u32 mode, emac_base;
 +      /**
 +       * Set clause 45 mode, slow down the MDIO clock to 2.5MHz
 +       * (a value of 49==0x31) and make sure that the AUTO poll is off
 +       */
 +
 +      if (CHIP_IS_E2(bp))
 +              emac_base = GRCBASE_EMAC0;
 +      else
 +              emac_base = (port) ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
 +      mode = REG_RD(bp, emac_base + EMAC_REG_EMAC_MDIO_MODE);
 +      mode &= ~(EMAC_MDIO_MODE_AUTO_POLL |
 +                EMAC_MDIO_MODE_CLOCK_CNT);
 +      if (USES_WARPCORE(bp))
 +              mode |= (74L << EMAC_MDIO_MODE_CLOCK_CNT_BITSHIFT);
 +      else
 +              mode |= (49L << EMAC_MDIO_MODE_CLOCK_CNT_BITSHIFT);
 +
 +      mode |= (EMAC_MDIO_MODE_CLAUSE_45);
 +      REG_WR(bp, emac_base + EMAC_REG_EMAC_MDIO_MODE, mode);
 +
 +      udelay(40);
 +}
 +
 +static void bnx2x_emac_init(struct link_params *params,
 +                          struct link_vars *vars)
 +{
 +      /* reset and unreset the emac core */
 +      struct bnx2x *bp = params->bp;
 +      u8 port = params->port;
 +      u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
 +      u32 val;
 +      u16 timeout;
 +
 +      REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
 +             (MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE << port));
 +      udelay(5);
 +      REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET,
 +             (MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE << port));
 +
 +      /* init emac - use read-modify-write */
 +      /* self clear reset */
 +      val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MODE);
 +      EMAC_WR(bp, EMAC_REG_EMAC_MODE, (val | EMAC_MODE_RESET));
 +
 +      timeout = 200;
 +      do {
 +              val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MODE);
 +              DP(NETIF_MSG_LINK, "EMAC reset reg is %u\n", val);
 +              if (!timeout) {
 +                      DP(NETIF_MSG_LINK, "EMAC timeout!\n");
 +                      return;
 +              }
 +              timeout--;
 +      } while (val & EMAC_MODE_RESET);
 +      bnx2x_set_mdio_clk(bp, params->chip_id, port);
 +      /* Set mac address */
 +      val = ((params->mac_addr[0] << 8) |
 +              params->mac_addr[1]);
 +      EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH, val);
 +
 +      val = ((params->mac_addr[2] << 24) |
 +             (params->mac_addr[3] << 16) |
 +             (params->mac_addr[4] << 8) |
 +              params->mac_addr[5]);
 +      EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + 4, val);
 +}
 +
 +static void bnx2x_set_xumac_nig(struct link_params *params,
 +                              u16 tx_pause_en,
 +                              u8 enable)
 +{
 +      struct bnx2x *bp = params->bp;
 +
 +      REG_WR(bp, params->port ? NIG_REG_P1_MAC_IN_EN : NIG_REG_P0_MAC_IN_EN,
 +             enable);
 +      REG_WR(bp, params->port ? NIG_REG_P1_MAC_OUT_EN : NIG_REG_P0_MAC_OUT_EN,
 +             enable);
 +      REG_WR(bp, params->port ? NIG_REG_P1_MAC_PAUSE_OUT_EN :
 +             NIG_REG_P0_MAC_PAUSE_OUT_EN, tx_pause_en);
 +}
 +
 +static void bnx2x_umac_enable(struct link_params *params,
 +                          struct link_vars *vars, u8 lb)
 +{
 +      u32 val;
 +      u32 umac_base = params->port ? GRCBASE_UMAC1 : GRCBASE_UMAC0;
 +      struct bnx2x *bp = params->bp;
 +      /* Reset UMAC */
 +      REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
 +             (MISC_REGISTERS_RESET_REG_2_UMAC0 << params->port));
 +      usleep_range(1000, 1000);
 +
 +      REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET,
 +             (MISC_REGISTERS_RESET_REG_2_UMAC0 << params->port));
 +
 +      DP(NETIF_MSG_LINK, "enabling UMAC\n");
 +
 +      /**
 +       * This register determines on which events the MAC will assert
 +       * error on the i/f to the NIG along w/ EOP.
 +       */
 +
 +      /**
 +       * BD REG_WR(bp, NIG_REG_P0_MAC_RSV_ERR_MASK +
 +       * params->port*0x14,      0xfffff.
 +       */
 +      /* This register opens the gate for the UMAC despite its name */
 +      REG_WR(bp, NIG_REG_EGRESS_EMAC0_PORT + params->port*4, 1);
 +
 +      val = UMAC_COMMAND_CONFIG_REG_PROMIS_EN |
 +              UMAC_COMMAND_CONFIG_REG_PAD_EN |
 +              UMAC_COMMAND_CONFIG_REG_SW_RESET |
 +              UMAC_COMMAND_CONFIG_REG_NO_LGTH_CHECK;
 +      switch (vars->line_speed) {
 +      case SPEED_10:
 +              val |= (0<<2);
 +              break;
 +      case SPEED_100:
 +              val |= (1<<2);
 +              break;
 +      case SPEED_1000:
 +              val |= (2<<2);
 +              break;
 +      case SPEED_2500:
 +              val |= (3<<2);
 +              break;
 +      default:
 +              DP(NETIF_MSG_LINK, "Invalid speed for UMAC %d\n",
 +                             vars->line_speed);
 +              break;
 +      }
 +      if (!(vars->flow_ctrl & BNX2X_FLOW_CTRL_TX))
 +              val |= UMAC_COMMAND_CONFIG_REG_IGNORE_TX_PAUSE;
 +
 +      if (!(vars->flow_ctrl & BNX2X_FLOW_CTRL_RX))
 +              val |= UMAC_COMMAND_CONFIG_REG_PAUSE_IGNORE;
 +
 +      REG_WR(bp, umac_base + UMAC_REG_COMMAND_CONFIG, val);
 +      udelay(50);
 +
 +      /* Set MAC address for source TX Pause/PFC frames (under SW reset) */
 +      REG_WR(bp, umac_base + UMAC_REG_MAC_ADDR0,
 +             ((params->mac_addr[2] << 24) |
 +              (params->mac_addr[3] << 16) |
 +              (params->mac_addr[4] << 8) |
 +              (params->mac_addr[5])));
 +      REG_WR(bp, umac_base + UMAC_REG_MAC_ADDR1,
 +             ((params->mac_addr[0] << 8) |
 +              (params->mac_addr[1])));
 +
 +      /* Enable RX and TX */
 +      val &= ~UMAC_COMMAND_CONFIG_REG_PAD_EN;
 +      val |= UMAC_COMMAND_CONFIG_REG_TX_ENA |
 +              UMAC_COMMAND_CONFIG_REG_RX_ENA;
 +      REG_WR(bp, umac_base + UMAC_REG_COMMAND_CONFIG, val);
 +      udelay(50);
 +
 +      /* Remove SW Reset */
 +      val &= ~UMAC_COMMAND_CONFIG_REG_SW_RESET;
 +
 +      /* Check loopback mode */
 +      if (lb)
 +              val |= UMAC_COMMAND_CONFIG_REG_LOOP_ENA;
 +      REG_WR(bp, umac_base + UMAC_REG_COMMAND_CONFIG, val);
 +
 +      /*
 +       * Maximum Frame Length (RW). Defines a 14-Bit maximum frame
 +       * length used by the MAC receive logic to check frames.
 +       */
 +      REG_WR(bp, umac_base + UMAC_REG_MAXFR, 0x2710);
 +      bnx2x_set_xumac_nig(params,
 +                          ((vars->flow_ctrl & BNX2X_FLOW_CTRL_TX) != 0), 1);
 +      vars->mac_type = MAC_TYPE_UMAC;
 +
 +}
 +
 +static u8 bnx2x_is_4_port_mode(struct bnx2x *bp)
 +{
 +      u32 port4mode_ovwr_val;
 +      /* Check 4-port override enabled */
 +      port4mode_ovwr_val = REG_RD(bp, MISC_REG_PORT4MODE_EN_OVWR);
 +      if (port4mode_ovwr_val & (1<<0)) {
 +              /* Return 4-port mode override value */
 +              return ((port4mode_ovwr_val & (1<<1)) == (1<<1));
 +      }
 +      /* Return 4-port mode from input pin */
 +      return (u8)REG_RD(bp, MISC_REG_PORT4MODE_EN);
 +}
 +
 +/* Define the XMAC mode */
 +static void bnx2x_xmac_init(struct bnx2x *bp, u32 max_speed)
 +{
 +      u32 is_port4mode = bnx2x_is_4_port_mode(bp);
 +
 +      /**
 +      * In 4-port mode, need to set the mode only once, so if XMAC is
 +      * already out of reset, it means the mode has already been set,
 +      * and it must not* reset the XMAC again, since it controls both
 +      * ports of the path
 +      **/
 +
 +      if (is_port4mode && (REG_RD(bp, MISC_REG_RESET_REG_2) &
 +           MISC_REGISTERS_RESET_REG_2_XMAC)) {
 +              DP(NETIF_MSG_LINK,
 +                 "XMAC already out of reset in 4-port mode\n");
 +              return;
 +      }
 +
 +      /* Hard reset */
 +      REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
 +             MISC_REGISTERS_RESET_REG_2_XMAC);
 +      usleep_range(1000, 1000);
 +
 +      REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET,
 +             MISC_REGISTERS_RESET_REG_2_XMAC);
 +      if (is_port4mode) {
 +              DP(NETIF_MSG_LINK, "Init XMAC to 2 ports x 10G per path\n");
 +
 +              /*  Set the number of ports on the system side to up to 2 */
 +              REG_WR(bp, MISC_REG_XMAC_CORE_PORT_MODE, 1);
 +
 +              /* Set the number of ports on the Warp Core to 10G */
 +              REG_WR(bp, MISC_REG_XMAC_PHY_PORT_MODE, 3);
 +      } else {
 +              /*  Set the number of ports on the system side to 1 */
 +              REG_WR(bp, MISC_REG_XMAC_CORE_PORT_MODE, 0);
 +              if (max_speed == SPEED_10000) {
 +                      DP(NETIF_MSG_LINK,
 +                         "Init XMAC to 10G x 1 port per path\n");
 +                      /* Set the number of ports on the Warp Core to 10G */
 +                      REG_WR(bp, MISC_REG_XMAC_PHY_PORT_MODE, 3);
 +              } else {
 +                      DP(NETIF_MSG_LINK,
 +                         "Init XMAC to 20G x 2 ports per path\n");
 +                      /* Set the number of ports on the Warp Core to 20G */
 +                      REG_WR(bp, MISC_REG_XMAC_PHY_PORT_MODE, 1);
 +              }
 +      }
 +      /* Soft reset */
 +      REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
 +             MISC_REGISTERS_RESET_REG_2_XMAC_SOFT);
 +      usleep_range(1000, 1000);
 +
 +      REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET,
 +             MISC_REGISTERS_RESET_REG_2_XMAC_SOFT);
 +
 +}
 +
 +static void bnx2x_xmac_disable(struct link_params *params)
 +{
 +      u8 port = params->port;
 +      struct bnx2x *bp = params->bp;
 +      u32 pfc_ctrl, xmac_base = (port) ? GRCBASE_XMAC1 : GRCBASE_XMAC0;
 +
 +      if (REG_RD(bp, MISC_REG_RESET_REG_2) &
 +          MISC_REGISTERS_RESET_REG_2_XMAC) {
 +              /*
 +               * Send an indication to change the state in the NIG back to XON
 +               * Clearing this bit enables the next set of this bit to get
 +               * rising edge
 +               */
 +              pfc_ctrl = REG_RD(bp, xmac_base + XMAC_REG_PFC_CTRL_HI);
 +              REG_WR(bp, xmac_base + XMAC_REG_PFC_CTRL_HI,
 +                     (pfc_ctrl & ~(1<<1)));
 +              REG_WR(bp, xmac_base + XMAC_REG_PFC_CTRL_HI,
 +                     (pfc_ctrl | (1<<1)));
 +              DP(NETIF_MSG_LINK, "Disable XMAC on port %x\n", port);
 +              REG_WR(bp, xmac_base + XMAC_REG_CTRL, 0);
 +              usleep_range(1000, 1000);
 +              bnx2x_set_xumac_nig(params, 0, 0);
 +              REG_WR(bp, xmac_base + XMAC_REG_CTRL,
 +                     XMAC_CTRL_REG_SOFT_RESET);
 +      }
 +}
 +
 +static int bnx2x_xmac_enable(struct link_params *params,
 +                           struct link_vars *vars, u8 lb)
 +{
 +      u32 val, xmac_base;
 +      struct bnx2x *bp = params->bp;
 +      DP(NETIF_MSG_LINK, "enabling XMAC\n");
 +
 +      xmac_base = (params->port) ? GRCBASE_XMAC1 : GRCBASE_XMAC0;
 +
 +      bnx2x_xmac_init(bp, vars->line_speed);
 +
 +      /*
 +       * This register determines on which events the MAC will assert
 +       * error on the i/f to the NIG along w/ EOP.
 +       */
 +
 +      /*
 +       * This register tells the NIG whether to send traffic to UMAC
 +       * or XMAC
 +       */
 +      REG_WR(bp, NIG_REG_EGRESS_EMAC0_PORT + params->port*4, 0);
 +
 +      /* Set Max packet size */
 +      REG_WR(bp, xmac_base + XMAC_REG_RX_MAX_SIZE, 0x2710);
 +
 +      /* CRC append for Tx packets */
 +      REG_WR(bp, xmac_base + XMAC_REG_TX_CTRL, 0xC800);
 +
 +      /* update PFC */
 +      bnx2x_update_pfc_xmac(params, vars, 0);
 +
 +      /* Enable TX and RX */
 +      val = XMAC_CTRL_REG_TX_EN | XMAC_CTRL_REG_RX_EN;
 +
 +      /* Check loopback mode */
 +      if (lb)
-                                * LED blink andsetting rate in ON mode.
++              val |= XMAC_CTRL_REG_LINE_LOCAL_LPBK;
 +      REG_WR(bp, xmac_base + XMAC_REG_CTRL, val);
 +      bnx2x_set_xumac_nig(params,
 +                          ((vars->flow_ctrl & BNX2X_FLOW_CTRL_TX) != 0), 1);
 +
 +      vars->mac_type = MAC_TYPE_XMAC;
 +
 +      return 0;
 +}
 +static int bnx2x_emac_enable(struct link_params *params,
 +                           struct link_vars *vars, u8 lb)
 +{
 +      struct bnx2x *bp = params->bp;
 +      u8 port = params->port;
 +      u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
 +      u32 val;
 +
 +      DP(NETIF_MSG_LINK, "enabling EMAC\n");
 +
 +      /* Disable BMAC */
 +      REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
 +             (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
 +
 +      /* enable emac and not bmac */
 +      REG_WR(bp, NIG_REG_EGRESS_EMAC0_PORT + port*4, 1);
 +
 +      /* ASIC */
 +      if (vars->phy_flags & PHY_XGXS_FLAG) {
 +              u32 ser_lane = ((params->lane_config &
 +                               PORT_HW_CFG_LANE_SWAP_CFG_MASTER_MASK) >>
 +                              PORT_HW_CFG_LANE_SWAP_CFG_MASTER_SHIFT);
 +
 +              DP(NETIF_MSG_LINK, "XGXS\n");
 +              /* select the master lanes (out of 0-3) */
 +              REG_WR(bp, NIG_REG_XGXS_LANE_SEL_P0 + port*4, ser_lane);
 +              /* select XGXS */
 +              REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
 +
 +      } else { /* SerDes */
 +              DP(NETIF_MSG_LINK, "SerDes\n");
 +              /* select SerDes */
 +              REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 0);
 +      }
 +
 +      bnx2x_bits_en(bp, emac_base + EMAC_REG_EMAC_RX_MODE,
 +                    EMAC_RX_MODE_RESET);
 +      bnx2x_bits_en(bp, emac_base + EMAC_REG_EMAC_TX_MODE,
 +                    EMAC_TX_MODE_RESET);
 +
 +      if (CHIP_REV_IS_SLOW(bp)) {
 +              /* config GMII mode */
 +              val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MODE);
 +              EMAC_WR(bp, EMAC_REG_EMAC_MODE, (val | EMAC_MODE_PORT_GMII));
 +      } else { /* ASIC */
 +              /* pause enable/disable */
 +              bnx2x_bits_dis(bp, emac_base + EMAC_REG_EMAC_RX_MODE,
 +                             EMAC_RX_MODE_FLOW_EN);
 +
 +              bnx2x_bits_dis(bp,  emac_base + EMAC_REG_EMAC_TX_MODE,
 +                             (EMAC_TX_MODE_EXT_PAUSE_EN |
 +                              EMAC_TX_MODE_FLOW_EN));
 +              if (!(params->feature_config_flags &
 +                    FEATURE_CONFIG_PFC_ENABLED)) {
 +                      if (vars->flow_ctrl & BNX2X_FLOW_CTRL_RX)
 +                              bnx2x_bits_en(bp, emac_base +
 +                                            EMAC_REG_EMAC_RX_MODE,
 +                                            EMAC_RX_MODE_FLOW_EN);
 +
 +                      if (vars->flow_ctrl & BNX2X_FLOW_CTRL_TX)
 +                              bnx2x_bits_en(bp, emac_base +
 +                                            EMAC_REG_EMAC_TX_MODE,
 +                                            (EMAC_TX_MODE_EXT_PAUSE_EN |
 +                                             EMAC_TX_MODE_FLOW_EN));
 +              } else
 +                      bnx2x_bits_en(bp, emac_base + EMAC_REG_EMAC_TX_MODE,
 +                                    EMAC_TX_MODE_FLOW_EN);
 +      }
 +
 +      /* KEEP_VLAN_TAG, promiscuous */
 +      val = REG_RD(bp, emac_base + EMAC_REG_EMAC_RX_MODE);
 +      val |= EMAC_RX_MODE_KEEP_VLAN_TAG | EMAC_RX_MODE_PROMISCUOUS;
 +
 +      /*
 +       * Setting this bit causes MAC control frames (except for pause
 +       * frames) to be passed on for processing. This setting has no
 +       * affect on the operation of the pause frames. This bit effects
 +       * all packets regardless of RX Parser packet sorting logic.
 +       * Turn the PFC off to make sure we are in Xon state before
 +       * enabling it.
 +       */
 +      EMAC_WR(bp, EMAC_REG_RX_PFC_MODE, 0);
 +      if (params->feature_config_flags & FEATURE_CONFIG_PFC_ENABLED) {
 +              DP(NETIF_MSG_LINK, "PFC is enabled\n");
 +              /* Enable PFC again */
 +              EMAC_WR(bp, EMAC_REG_RX_PFC_MODE,
 +                      EMAC_REG_RX_PFC_MODE_RX_EN |
 +                      EMAC_REG_RX_PFC_MODE_TX_EN |
 +                      EMAC_REG_RX_PFC_MODE_PRIORITIES);
 +
 +              EMAC_WR(bp, EMAC_REG_RX_PFC_PARAM,
 +                      ((0x0101 <<
 +                        EMAC_REG_RX_PFC_PARAM_OPCODE_BITSHIFT) |
 +                       (0x00ff <<
 +                        EMAC_REG_RX_PFC_PARAM_PRIORITY_EN_BITSHIFT)));
 +              val |= EMAC_RX_MODE_KEEP_MAC_CONTROL;
 +      }
 +      EMAC_WR(bp, EMAC_REG_EMAC_RX_MODE, val);
 +
 +      /* Set Loopback */
 +      val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MODE);
 +      if (lb)
 +              val |= 0x810;
 +      else
 +              val &= ~0x810;
 +      EMAC_WR(bp, EMAC_REG_EMAC_MODE, val);
 +
 +      /* enable emac */
 +      REG_WR(bp, NIG_REG_NIG_EMAC0_EN + port*4, 1);
 +
 +      /* enable emac for jumbo packets */
 +      EMAC_WR(bp, EMAC_REG_EMAC_RX_MTU_SIZE,
 +              (EMAC_RX_MTU_SIZE_JUMBO_ENA |
 +               (ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD)));
 +
 +      /* strip CRC */
 +      REG_WR(bp, NIG_REG_NIG_INGRESS_EMAC0_NO_CRC + port*4, 0x1);
 +
 +      /* disable the NIG in/out to the bmac */
 +      REG_WR(bp, NIG_REG_BMAC0_IN_EN + port*4, 0x0);
 +      REG_WR(bp, NIG_REG_BMAC0_PAUSE_OUT_EN + port*4, 0x0);
 +      REG_WR(bp, NIG_REG_BMAC0_OUT_EN + port*4, 0x0);
 +
 +      /* enable the NIG in/out to the emac */
 +      REG_WR(bp, NIG_REG_EMAC0_IN_EN + port*4, 0x1);
 +      val = 0;
 +      if ((params->feature_config_flags &
 +            FEATURE_CONFIG_PFC_ENABLED) ||
 +          (vars->flow_ctrl & BNX2X_FLOW_CTRL_TX))
 +              val = 1;
 +
 +      REG_WR(bp, NIG_REG_EMAC0_PAUSE_OUT_EN + port*4, val);
 +      REG_WR(bp, NIG_REG_EGRESS_EMAC0_OUT_EN + port*4, 0x1);
 +
 +      REG_WR(bp, NIG_REG_BMAC0_REGS_OUT_EN + port*4, 0x0);
 +
 +      vars->mac_type = MAC_TYPE_EMAC;
 +      return 0;
 +}
 +
 +static void bnx2x_update_pfc_bmac1(struct link_params *params,
 +                                 struct link_vars *vars)
 +{
 +      u32 wb_data[2];
 +      struct bnx2x *bp = params->bp;
 +      u32 bmac_addr =  params->port ? NIG_REG_INGRESS_BMAC1_MEM :
 +              NIG_REG_INGRESS_BMAC0_MEM;
 +
 +      u32 val = 0x14;
 +      if ((!(params->feature_config_flags &
 +            FEATURE_CONFIG_PFC_ENABLED)) &&
 +              (vars->flow_ctrl & BNX2X_FLOW_CTRL_RX))
 +              /* Enable BigMAC to react on received Pause packets */
 +              val |= (1<<5);
 +      wb_data[0] = val;
 +      wb_data[1] = 0;
 +      REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_RX_CONTROL, wb_data, 2);
 +
 +      /* tx control */
 +      val = 0xc0;
 +      if (!(params->feature_config_flags &
 +            FEATURE_CONFIG_PFC_ENABLED) &&
 +              (vars->flow_ctrl & BNX2X_FLOW_CTRL_TX))
 +              val |= 0x800000;
 +      wb_data[0] = val;
 +      wb_data[1] = 0;
 +      REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_TX_CONTROL, wb_data, 2);
 +}
 +
 +static void bnx2x_update_pfc_bmac2(struct link_params *params,
 +                                 struct link_vars *vars,
 +                                 u8 is_lb)
 +{
 +      /*
 +       * Set rx control: Strip CRC and enable BigMAC to relay
 +       * control packets to the system as well
 +       */
 +      u32 wb_data[2];
 +      struct bnx2x *bp = params->bp;
 +      u32 bmac_addr = params->port ? NIG_REG_INGRESS_BMAC1_MEM :
 +              NIG_REG_INGRESS_BMAC0_MEM;
 +      u32 val = 0x14;
 +
 +      if ((!(params->feature_config_flags &
 +            FEATURE_CONFIG_PFC_ENABLED)) &&
 +              (vars->flow_ctrl & BNX2X_FLOW_CTRL_RX))
 +              /* Enable BigMAC to react on received Pause packets */
 +              val |= (1<<5);
 +      wb_data[0] = val;
 +      wb_data[1] = 0;
 +      REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_RX_CONTROL, wb_data, 2);
 +      udelay(30);
 +
 +      /* Tx control */
 +      val = 0xc0;
 +      if (!(params->feature_config_flags &
 +                              FEATURE_CONFIG_PFC_ENABLED) &&
 +          (vars->flow_ctrl & BNX2X_FLOW_CTRL_TX))
 +              val |= 0x800000;
 +      wb_data[0] = val;
 +      wb_data[1] = 0;
 +      REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_TX_CONTROL, wb_data, 2);
 +
 +      if (params->feature_config_flags & FEATURE_CONFIG_PFC_ENABLED) {
 +              DP(NETIF_MSG_LINK, "PFC is enabled\n");
 +              /* Enable PFC RX & TX & STATS and set 8 COS  */
 +              wb_data[0] = 0x0;
 +              wb_data[0] |= (1<<0);  /* RX */
 +              wb_data[0] |= (1<<1);  /* TX */
 +              wb_data[0] |= (1<<2);  /* Force initial Xon */
 +              wb_data[0] |= (1<<3);  /* 8 cos */
 +              wb_data[0] |= (1<<5);  /* STATS */
 +              wb_data[1] = 0;
 +              REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_PFC_CONTROL,
 +                          wb_data, 2);
 +              /* Clear the force Xon */
 +              wb_data[0] &= ~(1<<2);
 +      } else {
 +              DP(NETIF_MSG_LINK, "PFC is disabled\n");
 +              /* disable PFC RX & TX & STATS and set 8 COS */
 +              wb_data[0] = 0x8;
 +              wb_data[1] = 0;
 +      }
 +
 +      REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_PFC_CONTROL, wb_data, 2);
 +
 +      /*
 +       * Set Time (based unit is 512 bit time) between automatic
 +       * re-sending of PP packets amd enable automatic re-send of
 +       * Per-Priroity Packet as long as pp_gen is asserted and
 +       * pp_disable is low.
 +       */
 +      val = 0x8000;
 +      if (params->feature_config_flags & FEATURE_CONFIG_PFC_ENABLED)
 +              val |= (1<<16); /* enable automatic re-send */
 +
 +      wb_data[0] = val;
 +      wb_data[1] = 0;
 +      REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_TX_PAUSE_CONTROL,
 +                  wb_data, 2);
 +
 +      /* mac control */
 +      val = 0x3; /* Enable RX and TX */
 +      if (is_lb) {
 +              val |= 0x4; /* Local loopback */
 +              DP(NETIF_MSG_LINK, "enable bmac loopback\n");
 +      }
 +      /* When PFC enabled, Pass pause frames towards the NIG. */
 +      if (params->feature_config_flags & FEATURE_CONFIG_PFC_ENABLED)
 +              val |= ((1<<6)|(1<<5));
 +
 +      wb_data[0] = val;
 +      wb_data[1] = 0;
 +      REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_BMAC_CONTROL, wb_data, 2);
 +}
 +
 +
 +/* PFC BRB internal port configuration params */
 +struct bnx2x_pfc_brb_threshold_val {
 +      u32 pause_xoff;
 +      u32 pause_xon;
 +      u32 full_xoff;
 +      u32 full_xon;
 +};
 +
 +struct bnx2x_pfc_brb_e3b0_val {
 +      u32 full_lb_xoff_th;
 +      u32 full_lb_xon_threshold;
 +      u32 lb_guarantied;
 +      u32 mac_0_class_t_guarantied;
 +      u32 mac_0_class_t_guarantied_hyst;
 +      u32 mac_1_class_t_guarantied;
 +      u32 mac_1_class_t_guarantied_hyst;
 +};
 +
 +struct bnx2x_pfc_brb_th_val {
 +      struct bnx2x_pfc_brb_threshold_val pauseable_th;
 +      struct bnx2x_pfc_brb_threshold_val non_pauseable_th;
 +};
 +static int bnx2x_pfc_brb_get_config_params(
 +                              struct link_params *params,
 +                              struct bnx2x_pfc_brb_th_val *config_val)
 +{
 +      struct bnx2x *bp = params->bp;
 +      DP(NETIF_MSG_LINK, "Setting PFC BRB configuration\n");
 +      if (CHIP_IS_E2(bp)) {
 +              config_val->pauseable_th.pause_xoff =
 +                  PFC_E2_BRB_MAC_PAUSE_XOFF_THR_PAUSE;
 +              config_val->pauseable_th.pause_xon =
 +                  PFC_E2_BRB_MAC_PAUSE_XON_THR_PAUSE;
 +              config_val->pauseable_th.full_xoff =
 +                  PFC_E2_BRB_MAC_FULL_XOFF_THR_PAUSE;
 +              config_val->pauseable_th.full_xon =
 +                  PFC_E2_BRB_MAC_FULL_XON_THR_PAUSE;
 +              /* non pause able*/
 +              config_val->non_pauseable_th.pause_xoff =
 +                  PFC_E2_BRB_MAC_PAUSE_XOFF_THR_NON_PAUSE;
 +              config_val->non_pauseable_th.pause_xon =
 +                  PFC_E2_BRB_MAC_PAUSE_XON_THR_NON_PAUSE;
 +              config_val->non_pauseable_th.full_xoff =
 +                  PFC_E2_BRB_MAC_FULL_XOFF_THR_NON_PAUSE;
 +              config_val->non_pauseable_th.full_xon =
 +                  PFC_E2_BRB_MAC_FULL_XON_THR_NON_PAUSE;
 +      } else if (CHIP_IS_E3A0(bp)) {
 +              config_val->pauseable_th.pause_xoff =
 +                  PFC_E3A0_BRB_MAC_PAUSE_XOFF_THR_PAUSE;
 +              config_val->pauseable_th.pause_xon =
 +                  PFC_E3A0_BRB_MAC_PAUSE_XON_THR_PAUSE;
 +              config_val->pauseable_th.full_xoff =
 +                  PFC_E3A0_BRB_MAC_FULL_XOFF_THR_PAUSE;
 +              config_val->pauseable_th.full_xon =
 +                  PFC_E3A0_BRB_MAC_FULL_XON_THR_PAUSE;
 +              /* non pause able*/
 +              config_val->non_pauseable_th.pause_xoff =
 +                  PFC_E3A0_BRB_MAC_PAUSE_XOFF_THR_NON_PAUSE;
 +              config_val->non_pauseable_th.pause_xon =
 +                  PFC_E3A0_BRB_MAC_PAUSE_XON_THR_NON_PAUSE;
 +              config_val->non_pauseable_th.full_xoff =
 +                  PFC_E3A0_BRB_MAC_FULL_XOFF_THR_NON_PAUSE;
 +              config_val->non_pauseable_th.full_xon =
 +                  PFC_E3A0_BRB_MAC_FULL_XON_THR_NON_PAUSE;
 +      } else if (CHIP_IS_E3B0(bp)) {
 +              if (params->phy[INT_PHY].flags &
 +                  FLAGS_4_PORT_MODE) {
 +                      config_val->pauseable_th.pause_xoff =
 +                          PFC_E3B0_4P_BRB_MAC_PAUSE_XOFF_THR_PAUSE;
 +                      config_val->pauseable_th.pause_xon =
 +                          PFC_E3B0_4P_BRB_MAC_PAUSE_XON_THR_PAUSE;
 +                      config_val->pauseable_th.full_xoff =
 +                          PFC_E3B0_4P_BRB_MAC_FULL_XOFF_THR_PAUSE;
 +                      config_val->pauseable_th.full_xon =
 +                          PFC_E3B0_4P_BRB_MAC_FULL_XON_THR_PAUSE;
 +                      /* non pause able*/
 +                      config_val->non_pauseable_th.pause_xoff =
 +                          PFC_E3B0_4P_BRB_MAC_PAUSE_XOFF_THR_NON_PAUSE;
 +                      config_val->non_pauseable_th.pause_xon =
 +                          PFC_E3B0_4P_BRB_MAC_PAUSE_XON_THR_NON_PAUSE;
 +                      config_val->non_pauseable_th.full_xoff =
 +                          PFC_E3B0_4P_BRB_MAC_FULL_XOFF_THR_NON_PAUSE;
 +                      config_val->non_pauseable_th.full_xon =
 +                          PFC_E3B0_4P_BRB_MAC_FULL_XON_THR_NON_PAUSE;
 +          } else {
 +              config_val->pauseable_th.pause_xoff =
 +                  PFC_E3B0_2P_BRB_MAC_PAUSE_XOFF_THR_PAUSE;
 +              config_val->pauseable_th.pause_xon =
 +                  PFC_E3B0_2P_BRB_MAC_PAUSE_XON_THR_PAUSE;
 +              config_val->pauseable_th.full_xoff =
 +                  PFC_E3B0_2P_BRB_MAC_FULL_XOFF_THR_PAUSE;
 +              config_val->pauseable_th.full_xon =
 +                      PFC_E3B0_2P_BRB_MAC_FULL_XON_THR_PAUSE;
 +              /* non pause able*/
 +              config_val->non_pauseable_th.pause_xoff =
 +                  PFC_E3B0_2P_BRB_MAC_PAUSE_XOFF_THR_NON_PAUSE;
 +              config_val->non_pauseable_th.pause_xon =
 +                  PFC_E3B0_2P_BRB_MAC_PAUSE_XON_THR_NON_PAUSE;
 +              config_val->non_pauseable_th.full_xoff =
 +                  PFC_E3B0_2P_BRB_MAC_FULL_XOFF_THR_NON_PAUSE;
 +              config_val->non_pauseable_th.full_xon =
 +                  PFC_E3B0_2P_BRB_MAC_FULL_XON_THR_NON_PAUSE;
 +          }
 +      } else
 +          return -EINVAL;
 +
 +      return 0;
 +}
 +
 +
 +static void bnx2x_pfc_brb_get_e3b0_config_params(struct link_params *params,
 +                                               struct bnx2x_pfc_brb_e3b0_val
 +                                               *e3b0_val,
 +                                               u32 cos0_pauseable,
 +                                               u32 cos1_pauseable)
 +{
 +      if (params->phy[INT_PHY].flags & FLAGS_4_PORT_MODE) {
 +              e3b0_val->full_lb_xoff_th =
 +                  PFC_E3B0_4P_BRB_FULL_LB_XOFF_THR;
 +              e3b0_val->full_lb_xon_threshold =
 +                  PFC_E3B0_4P_BRB_FULL_LB_XON_THR;
 +              e3b0_val->lb_guarantied =
 +                  PFC_E3B0_4P_LB_GUART;
 +              e3b0_val->mac_0_class_t_guarantied =
 +                  PFC_E3B0_4P_BRB_MAC_0_CLASS_T_GUART;
 +              e3b0_val->mac_0_class_t_guarantied_hyst =
 +                  PFC_E3B0_4P_BRB_MAC_0_CLASS_T_GUART_HYST;
 +              e3b0_val->mac_1_class_t_guarantied =
 +                  PFC_E3B0_4P_BRB_MAC_1_CLASS_T_GUART;
 +              e3b0_val->mac_1_class_t_guarantied_hyst =
 +                  PFC_E3B0_4P_BRB_MAC_1_CLASS_T_GUART_HYST;
 +      } else {
 +              e3b0_val->full_lb_xoff_th =
 +                  PFC_E3B0_2P_BRB_FULL_LB_XOFF_THR;
 +              e3b0_val->full_lb_xon_threshold =
 +                  PFC_E3B0_2P_BRB_FULL_LB_XON_THR;
 +              e3b0_val->mac_0_class_t_guarantied_hyst =
 +                  PFC_E3B0_2P_BRB_MAC_0_CLASS_T_GUART_HYST;
 +              e3b0_val->mac_1_class_t_guarantied =
 +                  PFC_E3B0_2P_BRB_MAC_1_CLASS_T_GUART;
 +              e3b0_val->mac_1_class_t_guarantied_hyst =
 +                  PFC_E3B0_2P_BRB_MAC_1_CLASS_T_GUART_HYST;
 +
 +              if (cos0_pauseable != cos1_pauseable) {
 +                      /* nonpauseable= Lossy + pauseable = Lossless*/
 +                      e3b0_val->lb_guarantied =
 +                          PFC_E3B0_2P_MIX_PAUSE_LB_GUART;
 +                      e3b0_val->mac_0_class_t_guarantied =
 +                          PFC_E3B0_2P_MIX_PAUSE_MAC_0_CLASS_T_GUART;
 +              } else if (cos0_pauseable) {
 +                      /* Lossless +Lossless*/
 +                      e3b0_val->lb_guarantied =
 +                          PFC_E3B0_2P_PAUSE_LB_GUART;
 +                      e3b0_val->mac_0_class_t_guarantied =
 +                          PFC_E3B0_2P_PAUSE_MAC_0_CLASS_T_GUART;
 +              } else {
 +                      /* Lossy +Lossy*/
 +                      e3b0_val->lb_guarantied =
 +                          PFC_E3B0_2P_NON_PAUSE_LB_GUART;
 +                      e3b0_val->mac_0_class_t_guarantied =
 +                          PFC_E3B0_2P_NON_PAUSE_MAC_0_CLASS_T_GUART;
 +              }
 +      }
 +}
 +static int bnx2x_update_pfc_brb(struct link_params *params,
 +                              struct link_vars *vars,
 +                              struct bnx2x_nig_brb_pfc_port_params
 +                              *pfc_params)
 +{
 +      struct bnx2x *bp = params->bp;
 +      struct bnx2x_pfc_brb_th_val config_val = { {0} };
 +      struct bnx2x_pfc_brb_threshold_val *reg_th_config =
 +          &config_val.pauseable_th;
 +      struct bnx2x_pfc_brb_e3b0_val e3b0_val = {0};
 +      int set_pfc = params->feature_config_flags &
 +              FEATURE_CONFIG_PFC_ENABLED;
 +      int bnx2x_status = 0;
 +      u8 port = params->port;
 +
 +      /* default - pause configuration */
 +      reg_th_config = &config_val.pauseable_th;
 +      bnx2x_status = bnx2x_pfc_brb_get_config_params(params, &config_val);
 +      if (0 != bnx2x_status)
 +              return bnx2x_status;
 +
 +      if (set_pfc && pfc_params)
 +              /* First COS */
 +              if (!pfc_params->cos0_pauseable)
 +                      reg_th_config = &config_val.non_pauseable_th;
 +      /*
 +       * The number of free blocks below which the pause signal to class 0
 +       * of MAC #n is asserted. n=0,1
 +       */
 +      REG_WR(bp, (port) ? BRB1_REG_PAUSE_0_XOFF_THRESHOLD_1 :
 +             BRB1_REG_PAUSE_0_XOFF_THRESHOLD_0 ,
 +             reg_th_config->pause_xoff);
 +      /*
 +       * The number of free blocks above which the pause signal to class 0
 +       * of MAC #n is de-asserted. n=0,1
 +       */
 +      REG_WR(bp, (port) ? BRB1_REG_PAUSE_0_XON_THRESHOLD_1 :
 +             BRB1_REG_PAUSE_0_XON_THRESHOLD_0 , reg_th_config->pause_xon);
 +      /*
 +       * The number of free blocks below which the full signal to class 0
 +       * of MAC #n is asserted. n=0,1
 +       */
 +      REG_WR(bp, (port) ? BRB1_REG_FULL_0_XOFF_THRESHOLD_1 :
 +             BRB1_REG_FULL_0_XOFF_THRESHOLD_0 , reg_th_config->full_xoff);
 +      /*
 +       * The number of free blocks above which the full signal to class 0
 +       * of MAC #n is de-asserted. n=0,1
 +       */
 +      REG_WR(bp, (port) ? BRB1_REG_FULL_0_XON_THRESHOLD_1 :
 +             BRB1_REG_FULL_0_XON_THRESHOLD_0 , reg_th_config->full_xon);
 +
 +      if (set_pfc && pfc_params) {
 +              /* Second COS */
 +              if (pfc_params->cos1_pauseable)
 +                      reg_th_config = &config_val.pauseable_th;
 +              else
 +                      reg_th_config = &config_val.non_pauseable_th;
 +              /*
 +               * The number of free blocks below which the pause signal to
 +               * class 1 of MAC #n is asserted. n=0,1
 +              **/
 +              REG_WR(bp, (port) ? BRB1_REG_PAUSE_1_XOFF_THRESHOLD_1 :
 +                     BRB1_REG_PAUSE_1_XOFF_THRESHOLD_0,
 +                     reg_th_config->pause_xoff);
 +              /*
 +               * The number of free blocks above which the pause signal to
 +               * class 1 of MAC #n is de-asserted. n=0,1
 +               */
 +              REG_WR(bp, (port) ? BRB1_REG_PAUSE_1_XON_THRESHOLD_1 :
 +                     BRB1_REG_PAUSE_1_XON_THRESHOLD_0,
 +                     reg_th_config->pause_xon);
 +              /*
 +               * The number of free blocks below which the full signal to
 +               * class 1 of MAC #n is asserted. n=0,1
 +               */
 +              REG_WR(bp, (port) ? BRB1_REG_FULL_1_XOFF_THRESHOLD_1 :
 +                     BRB1_REG_FULL_1_XOFF_THRESHOLD_0,
 +                     reg_th_config->full_xoff);
 +              /*
 +               * The number of free blocks above which the full signal to
 +               * class 1 of MAC #n is de-asserted. n=0,1
 +               */
 +              REG_WR(bp, (port) ? BRB1_REG_FULL_1_XON_THRESHOLD_1 :
 +                     BRB1_REG_FULL_1_XON_THRESHOLD_0,
 +                     reg_th_config->full_xon);
 +
 +
 +              if (CHIP_IS_E3B0(bp)) {
 +                      /*Should be done by init tool */
 +                      /*
 +                      * BRB_empty_for_dup = BRB1_REG_BRB_EMPTY_THRESHOLD
 +                      * reset value
 +                      * 944
 +                      */
 +
 +                      /**
 +                       * The hysteresis on the guarantied buffer space for the Lb port
 +                       * before signaling XON.
 +                       **/
 +                      REG_WR(bp, BRB1_REG_LB_GUARANTIED_HYST, 80);
 +
 +                      bnx2x_pfc_brb_get_e3b0_config_params(
 +                          params,
 +                          &e3b0_val,
 +                          pfc_params->cos0_pauseable,
 +                          pfc_params->cos1_pauseable);
 +                      /**
 +                       * The number of free blocks below which the full signal to the
 +                       * LB port is asserted.
 +                      */
 +                      REG_WR(bp, BRB1_REG_FULL_LB_XOFF_THRESHOLD,
 +                                 e3b0_val.full_lb_xoff_th);
 +                      /**
 +                       * The number of free blocks above which the full signal to the
 +                       * LB port is de-asserted.
 +                      */
 +                      REG_WR(bp, BRB1_REG_FULL_LB_XON_THRESHOLD,
 +                                 e3b0_val.full_lb_xon_threshold);
 +                      /**
 +                      * The number of blocks guarantied for the MAC #n port. n=0,1
 +                      */
 +
 +                      /*The number of blocks guarantied for the LB port.*/
 +                      REG_WR(bp, BRB1_REG_LB_GUARANTIED,
 +                             e3b0_val.lb_guarantied);
 +
 +                      /**
 +                       * The number of blocks guarantied for the MAC #n port.
 +                      */
 +                      REG_WR(bp, BRB1_REG_MAC_GUARANTIED_0,
 +                                 2 * e3b0_val.mac_0_class_t_guarantied);
 +                      REG_WR(bp, BRB1_REG_MAC_GUARANTIED_1,
 +                                 2 * e3b0_val.mac_1_class_t_guarantied);
 +                      /**
 +                       * The number of blocks guarantied for class #t in MAC0. t=0,1
 +                      */
 +                      REG_WR(bp, BRB1_REG_MAC_0_CLASS_0_GUARANTIED,
 +                             e3b0_val.mac_0_class_t_guarantied);
 +                      REG_WR(bp, BRB1_REG_MAC_0_CLASS_1_GUARANTIED,
 +                             e3b0_val.mac_0_class_t_guarantied);
 +                      /**
 +                       * The hysteresis on the guarantied buffer space for class in
 +                       * MAC0.  t=0,1
 +                      */
 +                      REG_WR(bp, BRB1_REG_MAC_0_CLASS_0_GUARANTIED_HYST,
 +                             e3b0_val.mac_0_class_t_guarantied_hyst);
 +                      REG_WR(bp, BRB1_REG_MAC_0_CLASS_1_GUARANTIED_HYST,
 +                             e3b0_val.mac_0_class_t_guarantied_hyst);
 +
 +                      /**
 +                       * The number of blocks guarantied for class #t in MAC1.t=0,1
 +                      */
 +                      REG_WR(bp, BRB1_REG_MAC_1_CLASS_0_GUARANTIED,
 +                             e3b0_val.mac_1_class_t_guarantied);
 +                      REG_WR(bp, BRB1_REG_MAC_1_CLASS_1_GUARANTIED,
 +                             e3b0_val.mac_1_class_t_guarantied);
 +                      /**
 +                       * The hysteresis on the guarantied buffer space for class #t
 +                      * in MAC1.  t=0,1
 +                      */
 +                      REG_WR(bp, BRB1_REG_MAC_1_CLASS_0_GUARANTIED_HYST,
 +                             e3b0_val.mac_1_class_t_guarantied_hyst);
 +                      REG_WR(bp, BRB1_REG_MAC_1_CLASS_1_GUARANTIED_HYST,
 +                             e3b0_val.mac_1_class_t_guarantied_hyst);
 +
 +          }
 +
 +      }
 +
 +      return bnx2x_status;
 +}
 +
 +/******************************************************************************
 +* Description:
 +*  This function is needed because NIG ARB_CREDIT_WEIGHT_X are
 +*  not continues and ARB_CREDIT_WEIGHT_0 + offset is suitable.
 +******************************************************************************/
 +int bnx2x_pfc_nig_rx_priority_mask(struct bnx2x *bp,
 +                                            u8 cos_entry,
 +                                            u32 priority_mask, u8 port)
 +{
 +      u32 nig_reg_rx_priority_mask_add = 0;
 +
 +      switch (cos_entry) {
 +      case 0:
 +           nig_reg_rx_priority_mask_add = (port) ?
 +               NIG_REG_P1_RX_COS0_PRIORITY_MASK :
 +               NIG_REG_P0_RX_COS0_PRIORITY_MASK;
 +           break;
 +      case 1:
 +          nig_reg_rx_priority_mask_add = (port) ?
 +              NIG_REG_P1_RX_COS1_PRIORITY_MASK :
 +              NIG_REG_P0_RX_COS1_PRIORITY_MASK;
 +          break;
 +      case 2:
 +          nig_reg_rx_priority_mask_add = (port) ?
 +              NIG_REG_P1_RX_COS2_PRIORITY_MASK :
 +              NIG_REG_P0_RX_COS2_PRIORITY_MASK;
 +          break;
 +      case 3:
 +          if (port)
 +              return -EINVAL;
 +          nig_reg_rx_priority_mask_add = NIG_REG_P0_RX_COS3_PRIORITY_MASK;
 +          break;
 +      case 4:
 +          if (port)
 +              return -EINVAL;
 +          nig_reg_rx_priority_mask_add = NIG_REG_P0_RX_COS4_PRIORITY_MASK;
 +          break;
 +      case 5:
 +          if (port)
 +              return -EINVAL;
 +          nig_reg_rx_priority_mask_add = NIG_REG_P0_RX_COS5_PRIORITY_MASK;
 +          break;
 +      }
 +
 +      REG_WR(bp, nig_reg_rx_priority_mask_add, priority_mask);
 +
 +      return 0;
 +}
 +static void bnx2x_update_mng(struct link_params *params, u32 link_status)
 +{
 +      struct bnx2x *bp = params->bp;
 +
 +      REG_WR(bp, params->shmem_base +
 +             offsetof(struct shmem_region,
 +                      port_mb[params->port].link_status), link_status);
 +}
 +
 +static void bnx2x_update_pfc_nig(struct link_params *params,
 +              struct link_vars *vars,
 +              struct bnx2x_nig_brb_pfc_port_params *nig_params)
 +{
 +      u32 xcm_mask = 0, ppp_enable = 0, pause_enable = 0, llfc_out_en = 0;
 +      u32 llfc_enable = 0, xcm0_out_en = 0, p0_hwpfc_enable = 0;
 +      u32 pkt_priority_to_cos = 0;
 +      struct bnx2x *bp = params->bp;
 +      u8 port = params->port;
 +
 +      int set_pfc = params->feature_config_flags &
 +              FEATURE_CONFIG_PFC_ENABLED;
 +      DP(NETIF_MSG_LINK, "updating pfc nig parameters\n");
 +
 +      /*
 +       * When NIG_LLH0_XCM_MASK_REG_LLHX_XCM_MASK_BCN bit is set
 +       * MAC control frames (that are not pause packets)
 +       * will be forwarded to the XCM.
 +       */
 +      xcm_mask = REG_RD(bp,
 +                              port ? NIG_REG_LLH1_XCM_MASK :
 +                              NIG_REG_LLH0_XCM_MASK);
 +      /*
 +       * nig params will override non PFC params, since it's possible to
 +       * do transition from PFC to SAFC
 +       */
 +      if (set_pfc) {
 +              pause_enable = 0;
 +              llfc_out_en = 0;
 +              llfc_enable = 0;
 +              if (CHIP_IS_E3(bp))
 +                      ppp_enable = 0;
 +              else
 +              ppp_enable = 1;
 +              xcm_mask &= ~(port ? NIG_LLH1_XCM_MASK_REG_LLH1_XCM_MASK_BCN :
 +                                   NIG_LLH0_XCM_MASK_REG_LLH0_XCM_MASK_BCN);
 +              xcm0_out_en = 0;
 +              p0_hwpfc_enable = 1;
 +      } else  {
 +              if (nig_params) {
 +                      llfc_out_en = nig_params->llfc_out_en;
 +                      llfc_enable = nig_params->llfc_enable;
 +                      pause_enable = nig_params->pause_enable;
 +              } else  /*defaul non PFC mode - PAUSE */
 +                      pause_enable = 1;
 +
 +              xcm_mask |= (port ? NIG_LLH1_XCM_MASK_REG_LLH1_XCM_MASK_BCN :
 +                      NIG_LLH0_XCM_MASK_REG_LLH0_XCM_MASK_BCN);
 +              xcm0_out_en = 1;
 +      }
 +
 +      if (CHIP_IS_E3(bp))
 +              REG_WR(bp, port ? NIG_REG_BRB1_PAUSE_IN_EN :
 +                     NIG_REG_BRB0_PAUSE_IN_EN, pause_enable);
 +      REG_WR(bp, port ? NIG_REG_LLFC_OUT_EN_1 :
 +             NIG_REG_LLFC_OUT_EN_0, llfc_out_en);
 +      REG_WR(bp, port ? NIG_REG_LLFC_ENABLE_1 :
 +             NIG_REG_LLFC_ENABLE_0, llfc_enable);
 +      REG_WR(bp, port ? NIG_REG_PAUSE_ENABLE_1 :
 +             NIG_REG_PAUSE_ENABLE_0, pause_enable);
 +
 +      REG_WR(bp, port ? NIG_REG_PPP_ENABLE_1 :
 +             NIG_REG_PPP_ENABLE_0, ppp_enable);
 +
 +      REG_WR(bp, port ? NIG_REG_LLH1_XCM_MASK :
 +             NIG_REG_LLH0_XCM_MASK, xcm_mask);
 +
 +      REG_WR(bp,  NIG_REG_LLFC_EGRESS_SRC_ENABLE_0, 0x7);
 +
 +      /* output enable for RX_XCM # IF */
 +      REG_WR(bp, NIG_REG_XCM0_OUT_EN, xcm0_out_en);
 +
 +      /* HW PFC TX enable */
 +      REG_WR(bp, NIG_REG_P0_HWPFC_ENABLE, p0_hwpfc_enable);
 +
 +      if (nig_params) {
 +              u8 i = 0;
 +              pkt_priority_to_cos = nig_params->pkt_priority_to_cos;
 +
 +              for (i = 0; i < nig_params->num_of_rx_cos_priority_mask; i++)
 +                      bnx2x_pfc_nig_rx_priority_mask(bp, i,
 +              nig_params->rx_cos_priority_mask[i], port);
 +
 +              REG_WR(bp, port ? NIG_REG_LLFC_HIGH_PRIORITY_CLASSES_1 :
 +                     NIG_REG_LLFC_HIGH_PRIORITY_CLASSES_0,
 +                     nig_params->llfc_high_priority_classes);
 +
 +              REG_WR(bp, port ? NIG_REG_LLFC_LOW_PRIORITY_CLASSES_1 :
 +                     NIG_REG_LLFC_LOW_PRIORITY_CLASSES_0,
 +                     nig_params->llfc_low_priority_classes);
 +      }
 +      REG_WR(bp, port ? NIG_REG_P1_PKT_PRIORITY_TO_COS :
 +             NIG_REG_P0_PKT_PRIORITY_TO_COS,
 +             pkt_priority_to_cos);
 +}
 +
 +int bnx2x_update_pfc(struct link_params *params,
 +                    struct link_vars *vars,
 +                    struct bnx2x_nig_brb_pfc_port_params *pfc_params)
 +{
 +      /*
 +       * The PFC and pause are orthogonal to one another, meaning when
 +       * PFC is enabled, the pause are disabled, and when PFC is
 +       * disabled, pause are set according to the pause result.
 +       */
 +      u32 val;
 +      struct bnx2x *bp = params->bp;
 +      int bnx2x_status = 0;
 +      u8 bmac_loopback = (params->loopback_mode == LOOPBACK_BMAC);
 +
 +      if (params->feature_config_flags & FEATURE_CONFIG_PFC_ENABLED)
 +              vars->link_status |= LINK_STATUS_PFC_ENABLED;
 +      else
 +              vars->link_status &= ~LINK_STATUS_PFC_ENABLED;
 +
 +      bnx2x_update_mng(params, vars->link_status);
 +
 +      /* update NIG params */
 +      bnx2x_update_pfc_nig(params, vars, pfc_params);
 +
 +      /* update BRB params */
 +      bnx2x_status = bnx2x_update_pfc_brb(params, vars, pfc_params);
 +      if (0 != bnx2x_status)
 +              return bnx2x_status;
 +
 +      if (!vars->link_up)
 +              return bnx2x_status;
 +
 +      DP(NETIF_MSG_LINK, "About to update PFC in BMAC\n");
 +      if (CHIP_IS_E3(bp))
 +              bnx2x_update_pfc_xmac(params, vars, 0);
 +      else {
 +              val = REG_RD(bp, MISC_REG_RESET_REG_2);
 +              if ((val &
 +                   (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << params->port))
 +                  == 0) {
 +                      DP(NETIF_MSG_LINK, "About to update PFC in EMAC\n");
 +                      bnx2x_emac_enable(params, vars, 0);
 +                      return bnx2x_status;
 +              }
 +
 +              if (CHIP_IS_E2(bp))
 +                      bnx2x_update_pfc_bmac2(params, vars, bmac_loopback);
 +              else
 +                      bnx2x_update_pfc_bmac1(params, vars);
 +
 +              val = 0;
 +              if ((params->feature_config_flags &
 +                   FEATURE_CONFIG_PFC_ENABLED) ||
 +                  (vars->flow_ctrl & BNX2X_FLOW_CTRL_TX))
 +                      val = 1;
 +              REG_WR(bp, NIG_REG_BMAC0_PAUSE_OUT_EN + params->port*4, val);
 +      }
 +      return bnx2x_status;
 +}
 +
 +
 +static int bnx2x_bmac1_enable(struct link_params *params,
 +                            struct link_vars *vars,
 +                            u8 is_lb)
 +{
 +      struct bnx2x *bp = params->bp;
 +      u8 port = params->port;
 +      u32 bmac_addr = port ? NIG_REG_INGRESS_BMAC1_MEM :
 +                             NIG_REG_INGRESS_BMAC0_MEM;
 +      u32 wb_data[2];
 +      u32 val;
 +
 +      DP(NETIF_MSG_LINK, "Enabling BigMAC1\n");
 +
 +      /* XGXS control */
 +      wb_data[0] = 0x3c;
 +      wb_data[1] = 0;
 +      REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_BMAC_XGXS_CONTROL,
 +                  wb_data, 2);
 +
 +      /* tx MAC SA */
 +      wb_data[0] = ((params->mac_addr[2] << 24) |
 +                     (params->mac_addr[3] << 16) |
 +                     (params->mac_addr[4] << 8) |
 +                      params->mac_addr[5]);
 +      wb_data[1] = ((params->mac_addr[0] << 8) |
 +                      params->mac_addr[1]);
 +      REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_TX_SOURCE_ADDR, wb_data, 2);
 +
 +      /* mac control */
 +      val = 0x3;
 +      if (is_lb) {
 +              val |= 0x4;
 +              DP(NETIF_MSG_LINK, "enable bmac loopback\n");
 +      }
 +      wb_data[0] = val;
 +      wb_data[1] = 0;
 +      REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_BMAC_CONTROL, wb_data, 2);
 +
 +      /* set rx mtu */
 +      wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD;
 +      wb_data[1] = 0;
 +      REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_RX_MAX_SIZE, wb_data, 2);
 +
 +      bnx2x_update_pfc_bmac1(params, vars);
 +
 +      /* set tx mtu */
 +      wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD;
 +      wb_data[1] = 0;
 +      REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_TX_MAX_SIZE, wb_data, 2);
 +
 +      /* set cnt max size */
 +      wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD;
 +      wb_data[1] = 0;
 +      REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_CNT_MAX_SIZE, wb_data, 2);
 +
 +      /* configure safc */
 +      wb_data[0] = 0x1000200;
 +      wb_data[1] = 0;
 +      REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_RX_LLFC_MSG_FLDS,
 +                  wb_data, 2);
 +
 +      return 0;
 +}
 +
 +static int bnx2x_bmac2_enable(struct link_params *params,
 +                            struct link_vars *vars,
 +                            u8 is_lb)
 +{
 +      struct bnx2x *bp = params->bp;
 +      u8 port = params->port;
 +      u32 bmac_addr = port ? NIG_REG_INGRESS_BMAC1_MEM :
 +                             NIG_REG_INGRESS_BMAC0_MEM;
 +      u32 wb_data[2];
 +
 +      DP(NETIF_MSG_LINK, "Enabling BigMAC2\n");
 +
 +      wb_data[0] = 0;
 +      wb_data[1] = 0;
 +      REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_BMAC_CONTROL, wb_data, 2);
 +      udelay(30);
 +
 +      /* XGXS control: Reset phy HW, MDIO registers, PHY PLL and BMAC */
 +      wb_data[0] = 0x3c;
 +      wb_data[1] = 0;
 +      REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_BMAC_XGXS_CONTROL,
 +                  wb_data, 2);
 +
 +      udelay(30);
 +
 +      /* tx MAC SA */
 +      wb_data[0] = ((params->mac_addr[2] << 24) |
 +                     (params->mac_addr[3] << 16) |
 +                     (params->mac_addr[4] << 8) |
 +                      params->mac_addr[5]);
 +      wb_data[1] = ((params->mac_addr[0] << 8) |
 +                      params->mac_addr[1]);
 +      REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_TX_SOURCE_ADDR,
 +                  wb_data, 2);
 +
 +      udelay(30);
 +
 +      /* Configure SAFC */
 +      wb_data[0] = 0x1000200;
 +      wb_data[1] = 0;
 +      REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_RX_LLFC_MSG_FLDS,
 +                  wb_data, 2);
 +      udelay(30);
 +
 +      /* set rx mtu */
 +      wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD;
 +      wb_data[1] = 0;
 +      REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_RX_MAX_SIZE, wb_data, 2);
 +      udelay(30);
 +
 +      /* set tx mtu */
 +      wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD;
 +      wb_data[1] = 0;
 +      REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_TX_MAX_SIZE, wb_data, 2);
 +      udelay(30);
 +      /* set cnt max size */
 +      wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD - 2;
 +      wb_data[1] = 0;
 +      REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_CNT_MAX_SIZE, wb_data, 2);
 +      udelay(30);
 +      bnx2x_update_pfc_bmac2(params, vars, is_lb);
 +
 +      return 0;
 +}
 +
 +static int bnx2x_bmac_enable(struct link_params *params,
 +                           struct link_vars *vars,
 +                           u8 is_lb)
 +{
 +      int rc = 0;
 +      u8 port = params->port;
 +      struct bnx2x *bp = params->bp;
 +      u32 val;
 +      /* reset and unreset the BigMac */
 +      REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
 +             (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
 +      msleep(1);
 +
 +      REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET,
 +             (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
 +
 +      /* enable access for bmac registers */
 +      REG_WR(bp, NIG_REG_BMAC0_REGS_OUT_EN + port*4, 0x1);
 +
 +      /* Enable BMAC according to BMAC type*/
 +      if (CHIP_IS_E2(bp))
 +              rc = bnx2x_bmac2_enable(params, vars, is_lb);
 +      else
 +              rc = bnx2x_bmac1_enable(params, vars, is_lb);
 +      REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 0x1);
 +      REG_WR(bp, NIG_REG_XGXS_LANE_SEL_P0 + port*4, 0x0);
 +      REG_WR(bp, NIG_REG_EGRESS_EMAC0_PORT + port*4, 0x0);
 +      val = 0;
 +      if ((params->feature_config_flags &
 +            FEATURE_CONFIG_PFC_ENABLED) ||
 +          (vars->flow_ctrl & BNX2X_FLOW_CTRL_TX))
 +              val = 1;
 +      REG_WR(bp, NIG_REG_BMAC0_PAUSE_OUT_EN + port*4, val);
 +      REG_WR(bp, NIG_REG_EGRESS_EMAC0_OUT_EN + port*4, 0x0);
 +      REG_WR(bp, NIG_REG_EMAC0_IN_EN + port*4, 0x0);
 +      REG_WR(bp, NIG_REG_EMAC0_PAUSE_OUT_EN + port*4, 0x0);
 +      REG_WR(bp, NIG_REG_BMAC0_IN_EN + port*4, 0x1);
 +      REG_WR(bp, NIG_REG_BMAC0_OUT_EN + port*4, 0x1);
 +
 +      vars->mac_type = MAC_TYPE_BMAC;
 +      return rc;
 +}
 +
 +static void bnx2x_bmac_rx_disable(struct bnx2x *bp, u8 port)
 +{
 +      u32 bmac_addr = port ? NIG_REG_INGRESS_BMAC1_MEM :
 +                      NIG_REG_INGRESS_BMAC0_MEM;
 +      u32 wb_data[2];
 +      u32 nig_bmac_enable = REG_RD(bp, NIG_REG_BMAC0_REGS_OUT_EN + port*4);
 +
 +      /* Only if the bmac is out of reset */
 +      if (REG_RD(bp, MISC_REG_RESET_REG_2) &
 +                      (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port) &&
 +          nig_bmac_enable) {
 +
 +              if (CHIP_IS_E2(bp)) {
 +                      /* Clear Rx Enable bit in BMAC_CONTROL register */
 +                      REG_RD_DMAE(bp, bmac_addr +
 +                                  BIGMAC2_REGISTER_BMAC_CONTROL,
 +                                  wb_data, 2);
 +                      wb_data[0] &= ~BMAC_CONTROL_RX_ENABLE;
 +                      REG_WR_DMAE(bp, bmac_addr +
 +                                  BIGMAC2_REGISTER_BMAC_CONTROL,
 +                                  wb_data, 2);
 +              } else {
 +                      /* Clear Rx Enable bit in BMAC_CONTROL register */
 +                      REG_RD_DMAE(bp, bmac_addr +
 +                                      BIGMAC_REGISTER_BMAC_CONTROL,
 +                                      wb_data, 2);
 +                      wb_data[0] &= ~BMAC_CONTROL_RX_ENABLE;
 +                      REG_WR_DMAE(bp, bmac_addr +
 +                                      BIGMAC_REGISTER_BMAC_CONTROL,
 +                                      wb_data, 2);
 +              }
 +              msleep(1);
 +      }
 +}
 +
 +static int bnx2x_pbf_update(struct link_params *params, u32 flow_ctrl,
 +                          u32 line_speed)
 +{
 +      struct bnx2x *bp = params->bp;
 +      u8 port = params->port;
 +      u32 init_crd, crd;
 +      u32 count = 1000;
 +
 +      /* disable port */
 +      REG_WR(bp, PBF_REG_DISABLE_NEW_TASK_PROC_P0 + port*4, 0x1);
 +
 +      /* wait for init credit */
 +      init_crd = REG_RD(bp, PBF_REG_P0_INIT_CRD + port*4);
 +      crd = REG_RD(bp, PBF_REG_P0_CREDIT + port*8);
 +      DP(NETIF_MSG_LINK, "init_crd 0x%x  crd 0x%x\n", init_crd, crd);
 +
 +      while ((init_crd != crd) && count) {
 +              msleep(5);
 +
 +              crd = REG_RD(bp, PBF_REG_P0_CREDIT + port*8);
 +              count--;
 +      }
 +      crd = REG_RD(bp, PBF_REG_P0_CREDIT + port*8);
 +      if (init_crd != crd) {
 +              DP(NETIF_MSG_LINK, "BUG! init_crd 0x%x != crd 0x%x\n",
 +                        init_crd, crd);
 +              return -EINVAL;
 +      }
 +
 +      if (flow_ctrl & BNX2X_FLOW_CTRL_RX ||
 +          line_speed == SPEED_10 ||
 +          line_speed == SPEED_100 ||
 +          line_speed == SPEED_1000 ||
 +          line_speed == SPEED_2500) {
 +              REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 1);
 +              /* update threshold */
 +              REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, 0);
 +              /* update init credit */
 +              init_crd = 778;         /* (800-18-4) */
 +
 +      } else {
 +              u32 thresh = (ETH_MAX_JUMBO_PACKET_SIZE +
 +                            ETH_OVREHEAD)/16;
 +              REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
 +              /* update threshold */
 +              REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, thresh);
 +              /* update init credit */
 +              switch (line_speed) {
 +              case SPEED_10000:
 +                      init_crd = thresh + 553 - 22;
 +                      break;
 +              default:
 +                      DP(NETIF_MSG_LINK, "Invalid line_speed 0x%x\n",
 +                                line_speed);
 +                      return -EINVAL;
 +              }
 +      }
 +      REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, init_crd);
 +      DP(NETIF_MSG_LINK, "PBF updated to speed %d credit %d\n",
 +               line_speed, init_crd);
 +
 +      /* probe the credit changes */
 +      REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0x1);
 +      msleep(5);
 +      REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0x0);
 +
 +      /* enable port */
 +      REG_WR(bp, PBF_REG_DISABLE_NEW_TASK_PROC_P0 + port*4, 0x0);
 +      return 0;
 +}
 +
 +/**
 + * bnx2x_get_emac_base - retrive emac base address
 + *
 + * @bp:                       driver handle
 + * @mdc_mdio_access:  access type
 + * @port:             port id
 + *
 + * This function selects the MDC/MDIO access (through emac0 or
 + * emac1) depend on the mdc_mdio_access, port, port swapped. Each
 + * phy has a default access mode, which could also be overridden
 + * by nvram configuration. This parameter, whether this is the
 + * default phy configuration, or the nvram overrun
 + * configuration, is passed here as mdc_mdio_access and selects
 + * the emac_base for the CL45 read/writes operations
 + */
 +static u32 bnx2x_get_emac_base(struct bnx2x *bp,
 +                             u32 mdc_mdio_access, u8 port)
 +{
 +      u32 emac_base = 0;
 +      switch (mdc_mdio_access) {
 +      case SHARED_HW_CFG_MDC_MDIO_ACCESS1_PHY_TYPE:
 +              break;
 +      case SHARED_HW_CFG_MDC_MDIO_ACCESS1_EMAC0:
 +              if (REG_RD(bp, NIG_REG_PORT_SWAP))
 +                      emac_base = GRCBASE_EMAC1;
 +              else
 +                      emac_base = GRCBASE_EMAC0;
 +              break;
 +      case SHARED_HW_CFG_MDC_MDIO_ACCESS1_EMAC1:
 +              if (REG_RD(bp, NIG_REG_PORT_SWAP))
 +                      emac_base = GRCBASE_EMAC0;
 +              else
 +                      emac_base = GRCBASE_EMAC1;
 +              break;
 +      case SHARED_HW_CFG_MDC_MDIO_ACCESS1_BOTH:
 +              emac_base = (port) ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
 +              break;
 +      case SHARED_HW_CFG_MDC_MDIO_ACCESS1_SWAPPED:
 +              emac_base = (port) ? GRCBASE_EMAC0 : GRCBASE_EMAC1;
 +              break;
 +      default:
 +              break;
 +      }
 +      return emac_base;
 +
 +}
 +
 +/******************************************************************/
 +/*                    CL22 access functions                     */
 +/******************************************************************/
 +static int bnx2x_cl22_write(struct bnx2x *bp,
 +                                     struct bnx2x_phy *phy,
 +                                     u16 reg, u16 val)
 +{
 +      u32 tmp, mode;
 +      u8 i;
 +      int rc = 0;
 +      /* Switch to CL22 */
 +      mode = REG_RD(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE);
 +      REG_WR(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE,
 +             mode & ~EMAC_MDIO_MODE_CLAUSE_45);
 +
 +      /* address */
 +      tmp = ((phy->addr << 21) | (reg << 16) | val |
 +             EMAC_MDIO_COMM_COMMAND_WRITE_22 |
 +             EMAC_MDIO_COMM_START_BUSY);
 +      REG_WR(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM, tmp);
 +
 +      for (i = 0; i < 50; i++) {
 +              udelay(10);
 +
 +              tmp = REG_RD(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM);
 +              if (!(tmp & EMAC_MDIO_COMM_START_BUSY)) {
 +                      udelay(5);
 +                      break;
 +              }
 +      }
 +      if (tmp & EMAC_MDIO_COMM_START_BUSY) {
 +              DP(NETIF_MSG_LINK, "write phy register failed\n");
 +              rc = -EFAULT;
 +      }
 +      REG_WR(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE, mode);
 +      return rc;
 +}
 +
 +static int bnx2x_cl22_read(struct bnx2x *bp,
 +                                    struct bnx2x_phy *phy,
 +                                    u16 reg, u16 *ret_val)
 +{
 +      u32 val, mode;
 +      u16 i;
 +      int rc = 0;
 +
 +      /* Switch to CL22 */
 +      mode = REG_RD(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE);
 +      REG_WR(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE,
 +             mode & ~EMAC_MDIO_MODE_CLAUSE_45);
 +
 +      /* address */
 +      val = ((phy->addr << 21) | (reg << 16) |
 +             EMAC_MDIO_COMM_COMMAND_READ_22 |
 +             EMAC_MDIO_COMM_START_BUSY);
 +      REG_WR(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM, val);
 +
 +      for (i = 0; i < 50; i++) {
 +              udelay(10);
 +
 +              val = REG_RD(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM);
 +              if (!(val & EMAC_MDIO_COMM_START_BUSY)) {
 +                      *ret_val = (u16)(val & EMAC_MDIO_COMM_DATA);
 +                      udelay(5);
 +                      break;
 +              }
 +      }
 +      if (val & EMAC_MDIO_COMM_START_BUSY) {
 +              DP(NETIF_MSG_LINK, "read phy register failed\n");
 +
 +              *ret_val = 0;
 +              rc = -EFAULT;
 +      }
 +      REG_WR(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE, mode);
 +      return rc;
 +}
 +
 +/******************************************************************/
 +/*                    CL45 access functions                     */
 +/******************************************************************/
 +static int bnx2x_cl45_read(struct bnx2x *bp, struct bnx2x_phy *phy,
 +                         u8 devad, u16 reg, u16 *ret_val)
 +{
 +      u32 val;
 +      u16 i;
 +      int rc = 0;
 +      if (phy->flags & FLAGS_MDC_MDIO_WA_B0)
 +              bnx2x_bits_en(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_STATUS,
 +                            EMAC_MDIO_STATUS_10MB);
 +      /* address */
 +      val = ((phy->addr << 21) | (devad << 16) | reg |
 +             EMAC_MDIO_COMM_COMMAND_ADDRESS |
 +             EMAC_MDIO_COMM_START_BUSY);
 +      REG_WR(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM, val);
 +
 +      for (i = 0; i < 50; i++) {
 +              udelay(10);
 +
 +              val = REG_RD(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM);
 +              if (!(val & EMAC_MDIO_COMM_START_BUSY)) {
 +                      udelay(5);
 +                      break;
 +              }
 +      }
 +      if (val & EMAC_MDIO_COMM_START_BUSY) {
 +              DP(NETIF_MSG_LINK, "read phy register failed\n");
 +              netdev_err(bp->dev,  "MDC/MDIO access timeout\n");
 +              *ret_val = 0;
 +              rc = -EFAULT;
 +      } else {
 +              /* data */
 +              val = ((phy->addr << 21) | (devad << 16) |
 +                     EMAC_MDIO_COMM_COMMAND_READ_45 |
 +                     EMAC_MDIO_COMM_START_BUSY);
 +              REG_WR(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM, val);
 +
 +              for (i = 0; i < 50; i++) {
 +                      udelay(10);
 +
 +                      val = REG_RD(bp, phy->mdio_ctrl +
 +                                   EMAC_REG_EMAC_MDIO_COMM);
 +                      if (!(val & EMAC_MDIO_COMM_START_BUSY)) {
 +                              *ret_val = (u16)(val & EMAC_MDIO_COMM_DATA);
 +                              break;
 +                      }
 +              }
 +              if (val & EMAC_MDIO_COMM_START_BUSY) {
 +                      DP(NETIF_MSG_LINK, "read phy register failed\n");
 +                      netdev_err(bp->dev,  "MDC/MDIO access timeout\n");
 +                      *ret_val = 0;
 +                      rc = -EFAULT;
 +              }
 +      }
 +      /* Work around for E3 A0 */
 +      if (phy->flags & FLAGS_MDC_MDIO_WA) {
 +              phy->flags ^= FLAGS_DUMMY_READ;
 +              if (phy->flags & FLAGS_DUMMY_READ) {
 +                      u16 temp_val;
 +                      bnx2x_cl45_read(bp, phy, devad, 0xf, &temp_val);
 +              }
 +      }
 +
 +      if (phy->flags & FLAGS_MDC_MDIO_WA_B0)
 +              bnx2x_bits_dis(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_STATUS,
 +                             EMAC_MDIO_STATUS_10MB);
 +      return rc;
 +}
 +
 +static int bnx2x_cl45_write(struct bnx2x *bp, struct bnx2x_phy *phy,
 +                          u8 devad, u16 reg, u16 val)
 +{
 +      u32 tmp;
 +      u8 i;
 +      int rc = 0;
 +      if (phy->flags & FLAGS_MDC_MDIO_WA_B0)
 +              bnx2x_bits_en(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_STATUS,
 +                            EMAC_MDIO_STATUS_10MB);
 +
 +      /* address */
 +
 +      tmp = ((phy->addr << 21) | (devad << 16) | reg |
 +             EMAC_MDIO_COMM_COMMAND_ADDRESS |
 +             EMAC_MDIO_COMM_START_BUSY);
 +      REG_WR(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM, tmp);
 +
 +      for (i = 0; i < 50; i++) {
 +              udelay(10);
 +
 +              tmp = REG_RD(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM);
 +              if (!(tmp & EMAC_MDIO_COMM_START_BUSY)) {
 +                      udelay(5);
 +                      break;
 +              }
 +      }
 +      if (tmp & EMAC_MDIO_COMM_START_BUSY) {
 +              DP(NETIF_MSG_LINK, "write phy register failed\n");
 +              netdev_err(bp->dev,  "MDC/MDIO access timeout\n");
 +              rc = -EFAULT;
 +
 +      } else {
 +              /* data */
 +              tmp = ((phy->addr << 21) | (devad << 16) | val |
 +                     EMAC_MDIO_COMM_COMMAND_WRITE_45 |
 +                     EMAC_MDIO_COMM_START_BUSY);
 +              REG_WR(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM, tmp);
 +
 +              for (i = 0; i < 50; i++) {
 +                      udelay(10);
 +
 +                      tmp = REG_RD(bp, phy->mdio_ctrl +
 +                                   EMAC_REG_EMAC_MDIO_COMM);
 +                      if (!(tmp & EMAC_MDIO_COMM_START_BUSY)) {
 +                              udelay(5);
 +                              break;
 +                      }
 +              }
 +              if (tmp & EMAC_MDIO_COMM_START_BUSY) {
 +                      DP(NETIF_MSG_LINK, "write phy register failed\n");
 +                      netdev_err(bp->dev,  "MDC/MDIO access timeout\n");
 +                      rc = -EFAULT;
 +              }
 +      }
 +      /* Work around for E3 A0 */
 +      if (phy->flags & FLAGS_MDC_MDIO_WA) {
 +              phy->flags ^= FLAGS_DUMMY_READ;
 +              if (phy->flags & FLAGS_DUMMY_READ) {
 +                      u16 temp_val;
 +                      bnx2x_cl45_read(bp, phy, devad, 0xf, &temp_val);
 +              }
 +      }
 +      if (phy->flags & FLAGS_MDC_MDIO_WA_B0)
 +              bnx2x_bits_dis(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_STATUS,
 +                             EMAC_MDIO_STATUS_10MB);
 +      return rc;
 +}
 +
 +
 +/******************************************************************/
 +/*                    BSC access functions from E3              */
 +/******************************************************************/
 +static void bnx2x_bsc_module_sel(struct link_params *params)
 +{
 +      int idx;
 +      u32 board_cfg, sfp_ctrl;
 +      u32 i2c_pins[I2C_SWITCH_WIDTH], i2c_val[I2C_SWITCH_WIDTH];
 +      struct bnx2x *bp = params->bp;
 +      u8 port = params->port;
 +      /* Read I2C output PINs */
 +      board_cfg = REG_RD(bp, params->shmem_base +
 +                         offsetof(struct shmem_region,
 +                                  dev_info.shared_hw_config.board));
 +      i2c_pins[I2C_BSC0] = board_cfg & SHARED_HW_CFG_E3_I2C_MUX0_MASK;
 +      i2c_pins[I2C_BSC1] = (board_cfg & SHARED_HW_CFG_E3_I2C_MUX1_MASK) >>
 +                      SHARED_HW_CFG_E3_I2C_MUX1_SHIFT;
 +
 +      /* Read I2C output value */
 +      sfp_ctrl = REG_RD(bp, params->shmem_base +
 +                        offsetof(struct shmem_region,
 +                               dev_info.port_hw_config[port].e3_cmn_pin_cfg));
 +      i2c_val[I2C_BSC0] = (sfp_ctrl & PORT_HW_CFG_E3_I2C_MUX0_MASK) > 0;
 +      i2c_val[I2C_BSC1] = (sfp_ctrl & PORT_HW_CFG_E3_I2C_MUX1_MASK) > 0;
 +      DP(NETIF_MSG_LINK, "Setting BSC switch\n");
 +      for (idx = 0; idx < I2C_SWITCH_WIDTH; idx++)
 +              bnx2x_set_cfg_pin(bp, i2c_pins[idx], i2c_val[idx]);
 +}
 +
 +static int bnx2x_bsc_read(struct link_params *params,
 +                        struct bnx2x_phy *phy,
 +                        u8 sl_devid,
 +                        u16 sl_addr,
 +                        u8 lc_addr,
 +                        u8 xfer_cnt,
 +                        u32 *data_array)
 +{
 +      u32 val, i;
 +      int rc = 0;
 +      struct bnx2x *bp = params->bp;
 +
 +      if ((sl_devid != 0xa0) && (sl_devid != 0xa2)) {
 +              DP(NETIF_MSG_LINK, "invalid sl_devid 0x%x\n", sl_devid);
 +              return -EINVAL;
 +      }
 +
 +      if (xfer_cnt > 16) {
 +              DP(NETIF_MSG_LINK, "invalid xfer_cnt %d. Max is 16 bytes\n",
 +                                      xfer_cnt);
 +              return -EINVAL;
 +      }
 +      bnx2x_bsc_module_sel(params);
 +
 +      xfer_cnt = 16 - lc_addr;
 +
 +      /* enable the engine */
 +      val = REG_RD(bp, MCP_REG_MCPR_IMC_COMMAND);
 +      val |= MCPR_IMC_COMMAND_ENABLE;
 +      REG_WR(bp, MCP_REG_MCPR_IMC_COMMAND, val);
 +
 +      /* program slave device ID */
 +      val = (sl_devid << 16) | sl_addr;
 +      REG_WR(bp, MCP_REG_MCPR_IMC_SLAVE_CONTROL, val);
 +
 +      /* start xfer with 0 byte to update the address pointer ???*/
 +      val = (MCPR_IMC_COMMAND_ENABLE) |
 +            (MCPR_IMC_COMMAND_WRITE_OP <<
 +              MCPR_IMC_COMMAND_OPERATION_BITSHIFT) |
 +              (lc_addr << MCPR_IMC_COMMAND_TRANSFER_ADDRESS_BITSHIFT) | (0);
 +      REG_WR(bp, MCP_REG_MCPR_IMC_COMMAND, val);
 +
 +      /* poll for completion */
 +      i = 0;
 +      val = REG_RD(bp, MCP_REG_MCPR_IMC_COMMAND);
 +      while (((val >> MCPR_IMC_COMMAND_IMC_STATUS_BITSHIFT) & 0x3) != 1) {
 +              udelay(10);
 +              val = REG_RD(bp, MCP_REG_MCPR_IMC_COMMAND);
 +              if (i++ > 1000) {
 +                      DP(NETIF_MSG_LINK, "wr 0 byte timed out after %d try\n",
 +                                                              i);
 +                      rc = -EFAULT;
 +                      break;
 +              }
 +      }
 +      if (rc == -EFAULT)
 +              return rc;
 +
 +      /* start xfer with read op */
 +      val = (MCPR_IMC_COMMAND_ENABLE) |
 +              (MCPR_IMC_COMMAND_READ_OP <<
 +              MCPR_IMC_COMMAND_OPERATION_BITSHIFT) |
 +              (lc_addr << MCPR_IMC_COMMAND_TRANSFER_ADDRESS_BITSHIFT) |
 +                (xfer_cnt);
 +      REG_WR(bp, MCP_REG_MCPR_IMC_COMMAND, val);
 +
 +      /* poll for completion */
 +      i = 0;
 +      val = REG_RD(bp, MCP_REG_MCPR_IMC_COMMAND);
 +      while (((val >> MCPR_IMC_COMMAND_IMC_STATUS_BITSHIFT) & 0x3) != 1) {
 +              udelay(10);
 +              val = REG_RD(bp, MCP_REG_MCPR_IMC_COMMAND);
 +              if (i++ > 1000) {
 +                      DP(NETIF_MSG_LINK, "rd op timed out after %d try\n", i);
 +                      rc = -EFAULT;
 +                      break;
 +              }
 +      }
 +      if (rc == -EFAULT)
 +              return rc;
 +
 +      for (i = (lc_addr >> 2); i < 4; i++) {
 +              data_array[i] = REG_RD(bp, (MCP_REG_MCPR_IMC_DATAREG0 + i*4));
 +#ifdef __BIG_ENDIAN
 +              data_array[i] = ((data_array[i] & 0x000000ff) << 24) |
 +                              ((data_array[i] & 0x0000ff00) << 8) |
 +                              ((data_array[i] & 0x00ff0000) >> 8) |
 +                              ((data_array[i] & 0xff000000) >> 24);
 +#endif
 +      }
 +      return rc;
 +}
 +
 +static void bnx2x_cl45_read_or_write(struct bnx2x *bp, struct bnx2x_phy *phy,
 +                                   u8 devad, u16 reg, u16 or_val)
 +{
 +      u16 val;
 +      bnx2x_cl45_read(bp, phy, devad, reg, &val);
 +      bnx2x_cl45_write(bp, phy, devad, reg, val | or_val);
 +}
 +
 +int bnx2x_phy_read(struct link_params *params, u8 phy_addr,
 +                 u8 devad, u16 reg, u16 *ret_val)
 +{
 +      u8 phy_index;
 +      /*
 +       * Probe for the phy according to the given phy_addr, and execute
 +       * the read request on it
 +       */
 +      for (phy_index = 0; phy_index < params->num_phys; phy_index++) {
 +              if (params->phy[phy_index].addr == phy_addr) {
 +                      return bnx2x_cl45_read(params->bp,
 +                                             &params->phy[phy_index], devad,
 +                                             reg, ret_val);
 +              }
 +      }
 +      return -EINVAL;
 +}
 +
 +int bnx2x_phy_write(struct link_params *params, u8 phy_addr,
 +                  u8 devad, u16 reg, u16 val)
 +{
 +      u8 phy_index;
 +      /*
 +       * Probe for the phy according to the given phy_addr, and execute
 +       * the write request on it
 +       */
 +      for (phy_index = 0; phy_index < params->num_phys; phy_index++) {
 +              if (params->phy[phy_index].addr == phy_addr) {
 +                      return bnx2x_cl45_write(params->bp,
 +                                              &params->phy[phy_index], devad,
 +                                              reg, val);
 +              }
 +      }
 +      return -EINVAL;
 +}
 +static u8 bnx2x_get_warpcore_lane(struct bnx2x_phy *phy,
 +                                struct link_params *params)
 +{
 +      u8 lane = 0;
 +      struct bnx2x *bp = params->bp;
 +      u32 path_swap, path_swap_ovr;
 +      u8 path, port;
 +
 +      path = BP_PATH(bp);
 +      port = params->port;
 +
 +      if (bnx2x_is_4_port_mode(bp)) {
 +              u32 port_swap, port_swap_ovr;
 +
 +              /*figure out path swap value */
 +              path_swap_ovr = REG_RD(bp, MISC_REG_FOUR_PORT_PATH_SWAP_OVWR);
 +              if (path_swap_ovr & 0x1)
 +                      path_swap = (path_swap_ovr & 0x2);
 +              else
 +                      path_swap = REG_RD(bp, MISC_REG_FOUR_PORT_PATH_SWAP);
 +
 +              if (path_swap)
 +                      path = path ^ 1;
 +
 +              /*figure out port swap value */
 +              port_swap_ovr = REG_RD(bp, MISC_REG_FOUR_PORT_PORT_SWAP_OVWR);
 +              if (port_swap_ovr & 0x1)
 +                      port_swap = (port_swap_ovr & 0x2);
 +              else
 +                      port_swap = REG_RD(bp, MISC_REG_FOUR_PORT_PORT_SWAP);
 +
 +              if (port_swap)
 +                      port = port ^ 1;
 +
 +              lane = (port<<1) + path;
 +      } else { /* two port mode - no port swap */
 +
 +              /*figure out path swap value */
 +              path_swap_ovr =
 +                      REG_RD(bp, MISC_REG_TWO_PORT_PATH_SWAP_OVWR);
 +              if (path_swap_ovr & 0x1) {
 +                      path_swap = (path_swap_ovr & 0x2);
 +              } else {
 +                      path_swap =
 +                              REG_RD(bp, MISC_REG_TWO_PORT_PATH_SWAP);
 +              }
 +              if (path_swap)
 +                      path = path ^ 1;
 +
 +              lane = path << 1 ;
 +      }
 +      return lane;
 +}
 +
 +static void bnx2x_set_aer_mmd(struct link_params *params,
 +                            struct bnx2x_phy *phy)
 +{
 +      u32 ser_lane;
 +      u16 offset, aer_val;
 +      struct bnx2x *bp = params->bp;
 +      ser_lane = ((params->lane_config &
 +                   PORT_HW_CFG_LANE_SWAP_CFG_MASTER_MASK) >>
 +                   PORT_HW_CFG_LANE_SWAP_CFG_MASTER_SHIFT);
 +
 +      offset = (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT) ?
 +              (phy->addr + ser_lane) : 0;
 +
 +      if (USES_WARPCORE(bp)) {
 +              aer_val = bnx2x_get_warpcore_lane(phy, params);
 +              /*
 +               * In Dual-lane mode, two lanes are joined together,
 +               * so in order to configure them, the AER broadcast method is
 +               * used here.
 +               * 0x200 is the broadcast address for lanes 0,1
 +               * 0x201 is the broadcast address for lanes 2,3
 +               */
 +              if (phy->flags & FLAGS_WC_DUAL_MODE)
 +                      aer_val = (aer_val >> 1) | 0x200;
 +      } else if (CHIP_IS_E2(bp))
 +              aer_val = 0x3800 + offset - 1;
 +      else
 +              aer_val = 0x3800 + offset;
 +      DP(NETIF_MSG_LINK, "Set AER to 0x%x\n", aer_val);
 +      CL22_WR_OVER_CL45(bp, phy, MDIO_REG_BANK_AER_BLOCK,
 +                        MDIO_AER_BLOCK_AER_REG, aer_val);
 +
 +}
 +
 +/******************************************************************/
 +/*                    Internal phy section                      */
 +/******************************************************************/
 +
 +static void bnx2x_set_serdes_access(struct bnx2x *bp, u8 port)
 +{
 +      u32 emac_base = (port) ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
 +
 +      /* Set Clause 22 */
 +      REG_WR(bp, NIG_REG_SERDES0_CTRL_MD_ST + port*0x10, 1);
 +      REG_WR(bp, emac_base + EMAC_REG_EMAC_MDIO_COMM, 0x245f8000);
 +      udelay(500);
 +      REG_WR(bp, emac_base + EMAC_REG_EMAC_MDIO_COMM, 0x245d000f);
 +      udelay(500);
 +       /* Set Clause 45 */
 +      REG_WR(bp, NIG_REG_SERDES0_CTRL_MD_ST + port*0x10, 0);
 +}
 +
 +static void bnx2x_serdes_deassert(struct bnx2x *bp, u8 port)
 +{
 +      u32 val;
 +
 +      DP(NETIF_MSG_LINK, "bnx2x_serdes_deassert\n");
 +
 +      val = SERDES_RESET_BITS << (port*16);
 +
 +      /* reset and unreset the SerDes/XGXS */
 +      REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_CLEAR, val);
 +      udelay(500);
 +      REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_SET, val);
 +
 +      bnx2x_set_serdes_access(bp, port);
 +
 +      REG_WR(bp, NIG_REG_SERDES0_CTRL_MD_DEVAD + port*0x10,
 +             DEFAULT_PHY_DEV_ADDR);
 +}
 +
 +static void bnx2x_xgxs_deassert(struct link_params *params)
 +{
 +      struct bnx2x *bp = params->bp;
 +      u8 port;
 +      u32 val;
 +      DP(NETIF_MSG_LINK, "bnx2x_xgxs_deassert\n");
 +      port = params->port;
 +
 +      val = XGXS_RESET_BITS << (port*16);
 +
 +      /* reset and unreset the SerDes/XGXS */
 +      REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_CLEAR, val);
 +      udelay(500);
 +      REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_SET, val);
 +
 +      REG_WR(bp, NIG_REG_XGXS0_CTRL_MD_ST + port*0x18, 0);
 +      REG_WR(bp, NIG_REG_XGXS0_CTRL_MD_DEVAD + port*0x18,
 +             params->phy[INT_PHY].def_md_devad);
 +}
 +
 +static void bnx2x_calc_ieee_aneg_adv(struct bnx2x_phy *phy,
 +                                   struct link_params *params, u16 *ieee_fc)
 +{
 +      struct bnx2x *bp = params->bp;
 +      *ieee_fc = MDIO_COMBO_IEEE0_AUTO_NEG_ADV_FULL_DUPLEX;
 +      /**
 +       * resolve pause mode and advertisement Please refer to Table
 +       * 28B-3 of the 802.3ab-1999 spec
 +       */
 +
 +      switch (phy->req_flow_ctrl) {
 +      case BNX2X_FLOW_CTRL_AUTO:
 +              if (params->req_fc_auto_adv == BNX2X_FLOW_CTRL_BOTH)
 +                      *ieee_fc |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH;
 +              else
 +                      *ieee_fc |=
 +                      MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC;
 +              break;
 +
 +      case BNX2X_FLOW_CTRL_TX:
 +              *ieee_fc |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC;
 +              break;
 +
 +      case BNX2X_FLOW_CTRL_RX:
 +      case BNX2X_FLOW_CTRL_BOTH:
 +              *ieee_fc |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH;
 +              break;
 +
 +      case BNX2X_FLOW_CTRL_NONE:
 +      default:
 +              *ieee_fc |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE;
 +              break;
 +      }
 +      DP(NETIF_MSG_LINK, "ieee_fc = 0x%x\n", *ieee_fc);
 +}
 +
 +static void set_phy_vars(struct link_params *params,
 +                       struct link_vars *vars)
 +{
 +      struct bnx2x *bp = params->bp;
 +      u8 actual_phy_idx, phy_index, link_cfg_idx;
 +      u8 phy_config_swapped = params->multi_phy_config &
 +                      PORT_HW_CFG_PHY_SWAPPED_ENABLED;
 +      for (phy_index = INT_PHY; phy_index < params->num_phys;
 +            phy_index++) {
 +              link_cfg_idx = LINK_CONFIG_IDX(phy_index);
 +              actual_phy_idx = phy_index;
 +              if (phy_config_swapped) {
 +                      if (phy_index == EXT_PHY1)
 +                              actual_phy_idx = EXT_PHY2;
 +                      else if (phy_index == EXT_PHY2)
 +                              actual_phy_idx = EXT_PHY1;
 +              }
 +              params->phy[actual_phy_idx].req_flow_ctrl =
 +                      params->req_flow_ctrl[link_cfg_idx];
 +
 +              params->phy[actual_phy_idx].req_line_speed =
 +                      params->req_line_speed[link_cfg_idx];
 +
 +              params->phy[actual_phy_idx].speed_cap_mask =
 +                      params->speed_cap_mask[link_cfg_idx];
 +
 +              params->phy[actual_phy_idx].req_duplex =
 +                      params->req_duplex[link_cfg_idx];
 +
 +              if (params->req_line_speed[link_cfg_idx] ==
 +                  SPEED_AUTO_NEG)
 +                      vars->link_status |= LINK_STATUS_AUTO_NEGOTIATE_ENABLED;
 +
 +              DP(NETIF_MSG_LINK, "req_flow_ctrl %x, req_line_speed %x,"
 +                         " speed_cap_mask %x\n",
 +                         params->phy[actual_phy_idx].req_flow_ctrl,
 +                         params->phy[actual_phy_idx].req_line_speed,
 +                         params->phy[actual_phy_idx].speed_cap_mask);
 +      }
 +}
 +
 +static void bnx2x_ext_phy_set_pause(struct link_params *params,
 +                                  struct bnx2x_phy *phy,
 +                                  struct link_vars *vars)
 +{
 +      u16 val;
 +      struct bnx2x *bp = params->bp;
 +      /* read modify write pause advertizing */
 +      bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_ADV_PAUSE, &val);
 +
 +      val &= ~MDIO_AN_REG_ADV_PAUSE_BOTH;
 +
 +      /* Please refer to Table 28B-3 of 802.3ab-1999 spec. */
 +      bnx2x_calc_ieee_aneg_adv(phy, params, &vars->ieee_fc);
 +      if ((vars->ieee_fc &
 +          MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC) ==
 +          MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC) {
 +              val |= MDIO_AN_REG_ADV_PAUSE_ASYMMETRIC;
 +      }
 +      if ((vars->ieee_fc &
 +          MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) ==
 +          MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) {
 +              val |= MDIO_AN_REG_ADV_PAUSE_PAUSE;
 +      }
 +      DP(NETIF_MSG_LINK, "Ext phy AN advertize 0x%x\n", val);
 +      bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_ADV_PAUSE, val);
 +}
 +
 +static void bnx2x_pause_resolve(struct link_vars *vars, u32 pause_result)
 +{                                             /*  LD      LP   */
 +      switch (pause_result) {                 /* ASYM P ASYM P */
 +      case 0xb:                               /*   1  0   1  1 */
 +              vars->flow_ctrl = BNX2X_FLOW_CTRL_TX;
 +              break;
 +
 +      case 0xe:                               /*   1  1   1  0 */
 +              vars->flow_ctrl = BNX2X_FLOW_CTRL_RX;
 +              break;
 +
 +      case 0x5:                               /*   0  1   0  1 */
 +      case 0x7:                               /*   0  1   1  1 */
 +      case 0xd:                               /*   1  1   0  1 */
 +      case 0xf:                               /*   1  1   1  1 */
 +              vars->flow_ctrl = BNX2X_FLOW_CTRL_BOTH;
 +              break;
 +
 +      default:
 +              break;
 +      }
 +      if (pause_result & (1<<0))
 +              vars->link_status |= LINK_STATUS_LINK_PARTNER_SYMMETRIC_PAUSE;
 +      if (pause_result & (1<<1))
 +              vars->link_status |= LINK_STATUS_LINK_PARTNER_ASYMMETRIC_PAUSE;
 +}
 +
 +static u8 bnx2x_ext_phy_resolve_fc(struct bnx2x_phy *phy,
 +                                 struct link_params *params,
 +                                 struct link_vars *vars)
 +{
 +      struct bnx2x *bp = params->bp;
 +      u16 ld_pause;           /* local */
 +      u16 lp_pause;           /* link partner */
 +      u16 pause_result;
 +      u8 ret = 0;
 +      /* read twice */
 +
 +      vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE;
 +
 +      if (phy->req_flow_ctrl != BNX2X_FLOW_CTRL_AUTO)
 +              vars->flow_ctrl = phy->req_flow_ctrl;
 +      else if (phy->req_line_speed != SPEED_AUTO_NEG)
 +              vars->flow_ctrl = params->req_fc_auto_adv;
 +      else if (vars->link_status & LINK_STATUS_AUTO_NEGOTIATE_COMPLETE) {
 +              ret = 1;
 +              if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM54618SE) {
 +                      bnx2x_cl22_read(bp, phy,
 +                                      0x4, &ld_pause);
 +                      bnx2x_cl22_read(bp, phy,
 +                                      0x5, &lp_pause);
 +              } else {
 +                      bnx2x_cl45_read(bp, phy,
 +                                      MDIO_AN_DEVAD,
 +                                      MDIO_AN_REG_ADV_PAUSE, &ld_pause);
 +                      bnx2x_cl45_read(bp, phy,
 +                                      MDIO_AN_DEVAD,
 +                                      MDIO_AN_REG_LP_AUTO_NEG, &lp_pause);
 +              }
 +              pause_result = (ld_pause &
 +                              MDIO_AN_REG_ADV_PAUSE_MASK) >> 8;
 +              pause_result |= (lp_pause &
 +                               MDIO_AN_REG_ADV_PAUSE_MASK) >> 10;
 +              DP(NETIF_MSG_LINK, "Ext PHY pause result 0x%x\n",
 +                 pause_result);
 +              bnx2x_pause_resolve(vars, pause_result);
 +      }
 +      return ret;
 +}
 +/******************************************************************/
 +/*                    Warpcore section                          */
 +/******************************************************************/
 +/* The init_internal_warpcore should mirror the xgxs,
 + * i.e. reset the lane (if needed), set aer for the
 + * init configuration, and set/clear SGMII flag. Internal
 + * phy init is done purely in phy_init stage.
 + */
 +static void bnx2x_warpcore_enable_AN_KR(struct bnx2x_phy *phy,
 +                                      struct link_params *params,
 +                                      struct link_vars *vars) {
 +      u16 val16 = 0, lane, bam37 = 0;
 +      struct bnx2x *bp = params->bp;
 +      DP(NETIF_MSG_LINK, "Enable Auto Negotiation for KR\n");
 +      /* Check adding advertisement for 1G KX */
 +      if (((vars->line_speed == SPEED_AUTO_NEG) &&
 +           (phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)) ||
 +          (vars->line_speed == SPEED_1000)) {
 +              u16 sd_digital;
 +              val16 |= (1<<5);
 +
 +              /* Enable CL37 1G Parallel Detect */
 +              bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
 +                      MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2, &sd_digital);
 +              bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
 +                       MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2,
 +                               (sd_digital | 0x1));
 +
 +              DP(NETIF_MSG_LINK, "Advertize 1G\n");
 +      }
 +      if (((vars->line_speed == SPEED_AUTO_NEG) &&
 +           (phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)) ||
 +          (vars->line_speed ==  SPEED_10000)) {
 +              /* Check adding advertisement for 10G KR */
 +              val16 |= (1<<7);
 +              /* Enable 10G Parallel Detect */
 +              bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD,
 +                              MDIO_WC_REG_PAR_DET_10G_CTRL, 1);
 +
 +              DP(NETIF_MSG_LINK, "Advertize 10G\n");
 +      }
 +
 +      /* Set Transmit PMD settings */
 +      lane = bnx2x_get_warpcore_lane(phy, params);
 +      bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
 +                    MDIO_WC_REG_TX0_TX_DRIVER + 0x10*lane,
 +                   ((0x02 << MDIO_WC_REG_TX0_TX_DRIVER_POST2_COEFF_OFFSET) |
 +                    (0x06 << MDIO_WC_REG_TX0_TX_DRIVER_IDRIVER_OFFSET) |
 +                    (0x09 << MDIO_WC_REG_TX0_TX_DRIVER_IPRE_DRIVER_OFFSET)));
 +      bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
 +                       MDIO_WC_REG_CL72_USERB0_CL72_OS_DEF_CTRL,
 +                       0x03f0);
 +      bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
 +                       MDIO_WC_REG_CL72_USERB0_CL72_2P5_DEF_CTRL,
 +                       0x03f0);
 +      bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
 +                       MDIO_WC_REG_CL72_USERB0_CL72_MISC1_CONTROL,
 +                       0x383f);
 +
 +      /* Advertised speeds */
 +      bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD,
 +                       MDIO_WC_REG_AN_IEEE1BLK_AN_ADVERTISEMENT1, val16);
 +
++      /* Advertised and set FEC (Forward Error Correction) */
++      bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD,
++                       MDIO_WC_REG_AN_IEEE1BLK_AN_ADVERTISEMENT2,
++                       (MDIO_WC_REG_AN_IEEE1BLK_AN_ADV2_FEC_ABILITY |
++                        MDIO_WC_REG_AN_IEEE1BLK_AN_ADV2_FEC_REQ));
++
 +      /* Enable CL37 BAM */
 +      if (REG_RD(bp, params->shmem_base +
 +                 offsetof(struct shmem_region, dev_info.
 +                          port_hw_config[params->port].default_cfg)) &
 +          PORT_HW_CFG_ENABLE_BAM_ON_KR_ENABLED) {
 +              bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
 +                              MDIO_WC_REG_DIGITAL6_MP5_NEXTPAGECTRL, &bam37);
 +              bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
 +                      MDIO_WC_REG_DIGITAL6_MP5_NEXTPAGECTRL, bam37 | 1);
 +              DP(NETIF_MSG_LINK, "Enable CL37 BAM on KR\n");
 +      }
 +
 +      /* Advertise pause */
 +      bnx2x_ext_phy_set_pause(params, phy, vars);
 +
 +      /* Enable Autoneg */
 +      bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD,
 +                       MDIO_WC_REG_IEEE0BLK_MIICNTL, 0x1000);
 +
 +      /* Over 1G - AN local device user page 1 */
 +      bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
 +                      MDIO_WC_REG_DIGITAL3_UP1, 0x1f);
 +
 +      bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
 +                      MDIO_WC_REG_DIGITAL5_MISC7, &val16);
 +
 +      bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
 +                       MDIO_WC_REG_DIGITAL5_MISC7, val16 | 0x100);
 +}
 +
 +static void bnx2x_warpcore_set_10G_KR(struct bnx2x_phy *phy,
 +                                    struct link_params *params,
 +                                    struct link_vars *vars)
 +{
 +      struct bnx2x *bp = params->bp;
 +      u16 val;
 +
 +      /* Disable Autoneg */
 +      bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
 +                       MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2, 0x7);
 +
 +      bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD,
 +                       MDIO_WC_REG_PAR_DET_10G_CTRL, 0);
 +
 +      bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
 +                       MDIO_WC_REG_CL72_USERB0_CL72_MISC1_CONTROL, 0x3f00);
 +
 +      bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD,
 +                       MDIO_WC_REG_AN_IEEE1BLK_AN_ADVERTISEMENT1, 0);
 +
 +      bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD,
 +                       MDIO_WC_REG_IEEE0BLK_MIICNTL, 0x0);
 +
 +      bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
 +                      MDIO_WC_REG_DIGITAL3_UP1, 0x1);
 +
 +      bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
 +                       MDIO_WC_REG_DIGITAL5_MISC7, 0xa);
 +
 +      /* Disable CL36 PCS Tx */
 +      bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
 +                      MDIO_WC_REG_XGXSBLK1_LANECTRL0, 0x0);
 +
 +      /* Double Wide Single Data Rate @ pll rate */
 +      bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
 +                      MDIO_WC_REG_XGXSBLK1_LANECTRL1, 0xFFFF);
 +
 +      /* Leave cl72 training enable, needed for KR */
 +      bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD,
 +              MDIO_WC_REG_PMD_IEEE9BLK_TENGBASE_KR_PMD_CONTROL_REGISTER_150,
 +              0x2);
 +
 +      /* Leave CL72 enabled */
 +      bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
 +                       MDIO_WC_REG_CL72_USERB0_CL72_MISC1_CONTROL,
 +                       &val);
 +      bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
 +                       MDIO_WC_REG_CL72_USERB0_CL72_MISC1_CONTROL,
 +                       val | 0x3800);
 +
 +      /* Set speed via PMA/PMD register */
 +      bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD,
 +                       MDIO_WC_REG_IEEE0BLK_MIICNTL, 0x2040);
 +
 +      bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD,
 +                       MDIO_WC_REG_IEEE0BLK_AUTONEGNP, 0xB);
 +
 +      /*Enable encoded forced speed */
 +      bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
 +                       MDIO_WC_REG_SERDESDIGITAL_MISC2, 0x30);
 +
 +      /* Turn TX scramble payload only the 64/66 scrambler */
 +      bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
 +                       MDIO_WC_REG_TX66_CONTROL, 0x9);
 +
 +      /* Turn RX scramble payload only the 64/66 scrambler */
 +      bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD,
 +                               MDIO_WC_REG_RX66_CONTROL, 0xF9);
 +
 +      /* set and clear loopback to cause a reset to 64/66 decoder */
 +      bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
 +                       MDIO_WC_REG_IEEE0BLK_MIICNTL, 0x4000);
 +      bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
 +                       MDIO_WC_REG_IEEE0BLK_MIICNTL, 0x0);
 +
 +}
 +
 +static void bnx2x_warpcore_set_10G_XFI(struct bnx2x_phy *phy,
 +                                     struct link_params *params,
 +                                     u8 is_xfi)
 +{
 +      struct bnx2x *bp = params->bp;
 +      u16 misc1_val, tap_val, tx_driver_val, lane, val;
 +      /* Hold rxSeqStart */
 +      bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
 +                      MDIO_WC_REG_DSC2B0_DSC_MISC_CTRL0, &val);
 +      bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
 +                       MDIO_WC_REG_DSC2B0_DSC_MISC_CTRL0, (val | 0x8000));
 +
 +      /* Hold tx_fifo_reset */
 +      bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
 +                      MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X3, &val);
 +      bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
 +                       MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X3, (val | 0x1));
 +
 +      /* Disable CL73 AN */
 +      bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, 0);
 +
 +      /* Disable 100FX Enable and Auto-Detect */
 +      bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
 +                      MDIO_WC_REG_FX100_CTRL1, &val);
 +      bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
 +                       MDIO_WC_REG_FX100_CTRL1, (val & 0xFFFA));
 +
 +      /* Disable 100FX Idle detect */
 +      bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
 +                      MDIO_WC_REG_FX100_CTRL3, &val);
 +      bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
 +                       MDIO_WC_REG_FX100_CTRL3, (val | 0x0080));
 +
 +      /* Set Block address to Remote PHY & Clear forced_speed[5] */
 +      bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
 +                      MDIO_WC_REG_DIGITAL4_MISC3, &val);
 +      bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
 +                       MDIO_WC_REG_DIGITAL4_MISC3, (val & 0xFF7F));
 +
 +      /* Turn off auto-detect & fiber mode */
 +      bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
 +                      MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X1, &val);
 +      bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
 +                       MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X1,
 +                       (val & 0xFFEE));
 +
 +      /* Set filter_force_link, disable_false_link and parallel_detect */
 +      bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
 +                      MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2, &val);
 +      bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
 +                       MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2,
 +                       ((val | 0x0006) & 0xFFFE));
 +
 +      /* Set XFI / SFI */
 +      bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
 +                      MDIO_WC_REG_SERDESDIGITAL_MISC1, &misc1_val);
 +
 +      misc1_val &= ~(0x1f);
 +
 +      if (is_xfi) {
 +              misc1_val |= 0x5;
 +              tap_val = ((0x08 << MDIO_WC_REG_TX_FIR_TAP_POST_TAP_OFFSET) |
 +                         (0x37 << MDIO_WC_REG_TX_FIR_TAP_MAIN_TAP_OFFSET) |
 +                         (0x00 << MDIO_WC_REG_TX_FIR_TAP_PRE_TAP_OFFSET));
 +              tx_driver_val =
 +                    ((0x00 << MDIO_WC_REG_TX0_TX_DRIVER_POST2_COEFF_OFFSET) |
 +                     (0x02 << MDIO_WC_REG_TX0_TX_DRIVER_IDRIVER_OFFSET) |
 +                     (0x03 << MDIO_WC_REG_TX0_TX_DRIVER_IPRE_DRIVER_OFFSET));
 +
 +      } else {
 +              misc1_val |= 0x9;
 +              tap_val = ((0x12 << MDIO_WC_REG_TX_FIR_TAP_POST_TAP_OFFSET) |
 +                         (0x2d << MDIO_WC_REG_TX_FIR_TAP_MAIN_TAP_OFFSET) |
 +                         (0x00 << MDIO_WC_REG_TX_FIR_TAP_PRE_TAP_OFFSET));
 +              tx_driver_val =
 +                    ((0x02 << MDIO_WC_REG_TX0_TX_DRIVER_POST2_COEFF_OFFSET) |
 +                     (0x02 << MDIO_WC_REG_TX0_TX_DRIVER_IDRIVER_OFFSET) |
 +                     (0x02 << MDIO_WC_REG_TX0_TX_DRIVER_IPRE_DRIVER_OFFSET));
 +      }
 +      bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
 +                       MDIO_WC_REG_SERDESDIGITAL_MISC1, misc1_val);
 +
 +      /* Set Transmit PMD settings */
 +      lane = bnx2x_get_warpcore_lane(phy, params);
 +      bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
 +                       MDIO_WC_REG_TX_FIR_TAP,
 +                       tap_val | MDIO_WC_REG_TX_FIR_TAP_ENABLE);
 +      bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
 +                       MDIO_WC_REG_TX0_TX_DRIVER + 0x10*lane,
 +                       tx_driver_val);
 +
 +      /* Enable fiber mode, enable and invert sig_det */
 +      bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
 +                      MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X1, &val);
 +      bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
 +                       MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X1, val | 0xd);
 +
 +      /* Set Block address to Remote PHY & Set forced_speed[5], 40bit mode */
 +      bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
 +                      MDIO_WC_REG_DIGITAL4_MISC3, &val);
 +      bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
 +                       MDIO_WC_REG_DIGITAL4_MISC3, val | 0x8080);
 +
 +      /* 10G XFI Full Duplex */
 +      bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
 +                       MDIO_WC_REG_IEEE0BLK_MIICNTL, 0x100);
 +
 +      /* Release tx_fifo_reset */
 +      bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
 +                      MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X3, &val);
 +      bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
 +                       MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X3, val & 0xFFFE);
 +
 +      /* Release rxSeqStart */
 +      bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
 +                      MDIO_WC_REG_DSC2B0_DSC_MISC_CTRL0, &val);
 +      bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
 +                       MDIO_WC_REG_DSC2B0_DSC_MISC_CTRL0, (val & 0x7FFF));
 +}
 +
 +static void bnx2x_warpcore_set_20G_KR2(struct bnx2x *bp,
 +                                     struct bnx2x_phy *phy)
 +{
 +      DP(NETIF_MSG_LINK, "KR2 still not supported !!!\n");
 +}
 +
 +static void bnx2x_warpcore_set_20G_DXGXS(struct bnx2x *bp,
 +                                       struct bnx2x_phy *phy,
 +                                       u16 lane)
 +{
 +      /* Rx0 anaRxControl1G */
 +      bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
 +                       MDIO_WC_REG_RX0_ANARXCONTROL1G, 0x90);
 +
 +      /* Rx2 anaRxControl1G */
 +      bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
 +                       MDIO_WC_REG_RX2_ANARXCONTROL1G, 0x90);
 +
 +      bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
 +                       MDIO_WC_REG_RX66_SCW0, 0xE070);
 +
 +      bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
 +                       MDIO_WC_REG_RX66_SCW1, 0xC0D0);
 +
 +      bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
 +                       MDIO_WC_REG_RX66_SCW2, 0xA0B0);
 +
 +      bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
 +                       MDIO_WC_REG_RX66_SCW3, 0x8090);
 +
 +      bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
 +                       MDIO_WC_REG_RX66_SCW0_MASK, 0xF0F0);
 +
 +      bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
 +                       MDIO_WC_REG_RX66_SCW1_MASK, 0xF0F0);
 +
 +      bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
 +                       MDIO_WC_REG_RX66_SCW2_MASK, 0xF0F0);
 +
 +      bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
 +                       MDIO_WC_REG_RX66_SCW3_MASK, 0xF0F0);
 +
 +      /* Serdes Digital Misc1 */
 +      bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
 +                       MDIO_WC_REG_SERDESDIGITAL_MISC1, 0x6008);
 +
 +      /* Serdes Digital4 Misc3 */
 +      bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
 +                       MDIO_WC_REG_DIGITAL4_MISC3, 0x8088);
 +
 +      /* Set Transmit PMD settings */
 +      bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
 +                       MDIO_WC_REG_TX_FIR_TAP,
 +                      ((0x12 << MDIO_WC_REG_TX_FIR_TAP_POST_TAP_OFFSET) |
 +                       (0x2d << MDIO_WC_REG_TX_FIR_TAP_MAIN_TAP_OFFSET) |
 +                       (0x00 << MDIO_WC_REG_TX_FIR_TAP_PRE_TAP_OFFSET) |
 +                       MDIO_WC_REG_TX_FIR_TAP_ENABLE));
 +      bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
 +                    MDIO_WC_REG_TX0_TX_DRIVER + 0x10*lane,
 +                   ((0x02 << MDIO_WC_REG_TX0_TX_DRIVER_POST2_COEFF_OFFSET) |
 +                    (0x02 << MDIO_WC_REG_TX0_TX_DRIVER_IDRIVER_OFFSET) |
 +                    (0x02 << MDIO_WC_REG_TX0_TX_DRIVER_IPRE_DRIVER_OFFSET)));
 +}
 +
 +static void bnx2x_warpcore_set_sgmii_speed(struct bnx2x_phy *phy,
 +                                         struct link_params *params,
 +                                         u8 fiber_mode)
 +{
 +      struct bnx2x *bp = params->bp;
 +      u16 val16, digctrl_kx1, digctrl_kx2;
 +      u8 lane;
 +
 +      lane = bnx2x_get_warpcore_lane(phy, params);
 +
 +      /* Clear XFI clock comp in non-10G single lane mode. */
 +      bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
 +                      MDIO_WC_REG_RX66_CONTROL, &val16);
 +      bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
 +                       MDIO_WC_REG_RX66_CONTROL, val16 & ~(3<<13));
 +
 +      if (phy->req_line_speed == SPEED_AUTO_NEG) {
 +              /* SGMII Autoneg */
 +              bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
 +                              MDIO_WC_REG_COMBO_IEEE0_MIICTRL, &val16);
 +              bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
 +                               MDIO_WC_REG_COMBO_IEEE0_MIICTRL,
 +                               val16 | 0x1000);
 +              DP(NETIF_MSG_LINK, "set SGMII AUTONEG\n");
 +      } else {
 +              bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
 +                              MDIO_WC_REG_COMBO_IEEE0_MIICTRL, &val16);
 +              val16 &= 0xcfbf;
 +              switch (phy->req_line_speed) {
 +              case SPEED_10:
 +                      break;
 +              case SPEED_100:
 +                      val16 |= 0x2000;
 +                      break;
 +              case SPEED_1000:
 +                      val16 |= 0x0040;
 +                      break;
 +              default:
 +                      DP(NETIF_MSG_LINK,
 +                         "Speed not supported: 0x%x\n", phy->req_line_speed);
 +                      return;
 +              }
 +
 +              if (phy->req_duplex == DUPLEX_FULL)
 +                      val16 |= 0x0100;
 +
 +              bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
 +                              MDIO_WC_REG_COMBO_IEEE0_MIICTRL, val16);
 +
 +              DP(NETIF_MSG_LINK, "set SGMII force speed %d\n",
 +                             phy->req_line_speed);
 +              bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
 +                              MDIO_WC_REG_COMBO_IEEE0_MIICTRL, &val16);
 +              DP(NETIF_MSG_LINK, "  (readback) %x\n", val16);
 +      }
 +
 +      /* SGMII Slave mode and disable signal detect */
 +      bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
 +                      MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X1, &digctrl_kx1);
 +      if (fiber_mode)
 +              digctrl_kx1 = 1;
 +      else
 +              digctrl_kx1 &= 0xff4a;
 +
 +      bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
 +                      MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X1,
 +                      digctrl_kx1);
 +
 +      /* Turn off parallel detect */
 +      bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
 +                      MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2, &digctrl_kx2);
 +      bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
 +                      MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2,
 +                      (digctrl_kx2 & ~(1<<2)));
 +
 +      /* Re-enable parallel detect */
 +      bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
 +                      MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2,
 +                      (digctrl_kx2 | (1<<2)));
 +
 +      /* Enable autodet */
 +      bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
 +                      MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X1,
 +                      (digctrl_kx1 | 0x10));
 +}
 +
 +static void bnx2x_warpcore_reset_lane(struct bnx2x *bp,
 +                                    struct bnx2x_phy *phy,
 +                                    u8 reset)
 +{
 +      u16 val;
 +      /* Take lane out of reset after configuration is finished */
 +      bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
 +                      MDIO_WC_REG_DIGITAL5_MISC6, &val);
 +      if (reset)
 +              val |= 0xC000;
 +      else
 +              val &= 0x3FFF;
 +      bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
 +                       MDIO_WC_REG_DIGITAL5_MISC6, val);
 +      bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
 +                       MDIO_WC_REG_DIGITAL5_MISC6, &val);
 +}
 +
 +
 +      /* Clear SFI/XFI link settings registers */
 +static void bnx2x_warpcore_clear_regs(struct bnx2x_phy *phy,
 +                                    struct link_params *params,
 +                                    u16 lane)
 +{
 +      struct bnx2x *bp = params->bp;
 +      u16 val16;
 +
 +      /* Set XFI clock comp as default. */
 +      bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
 +                      MDIO_WC_REG_RX66_CONTROL, &val16);
 +      bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
 +                       MDIO_WC_REG_RX66_CONTROL, val16 | (3<<13));
 +
 +      bnx2x_warpcore_reset_lane(bp, phy, 1);
 +      bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, 0);
 +      bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
 +                       MDIO_WC_REG_FX100_CTRL1, 0x014a);
 +      bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
 +                       MDIO_WC_REG_FX100_CTRL3, 0x0800);
 +      bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
 +                       MDIO_WC_REG_DIGITAL4_MISC3, 0x8008);
 +      bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
 +                       MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X1, 0x0195);
 +      bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
 +                       MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2, 0x0007);
 +      bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
 +                       MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X3, 0x0002);
 +      bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
 +                       MDIO_WC_REG_SERDESDIGITAL_MISC1, 0x6000);
 +      lane = bnx2x_get_warpcore_lane(phy, params);
 +      bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
 +                       MDIO_WC_REG_TX_FIR_TAP, 0x0000);
 +      bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
 +                       MDIO_WC_REG_TX0_TX_DRIVER + 0x10*lane, 0x0990);
 +      bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
 +                       MDIO_WC_REG_IEEE0BLK_MIICNTL, 0x2040);
 +      bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
 +                       MDIO_WC_REG_COMBO_IEEE0_MIICTRL, 0x0140);
 +      bnx2x_warpcore_reset_lane(bp, phy, 0);
 +}
 +
 +static int bnx2x_get_mod_abs_int_cfg(struct bnx2x *bp,
 +                                              u32 chip_id,
 +                                              u32 shmem_base, u8 port,
 +                                              u8 *gpio_num, u8 *gpio_port)
 +{
 +      u32 cfg_pin;
 +      *gpio_num = 0;
 +      *gpio_port = 0;
 +      if (CHIP_IS_E3(bp)) {
 +              cfg_pin = (REG_RD(bp, shmem_base +
 +                              offsetof(struct shmem_region,
 +                              dev_info.port_hw_config[port].e3_sfp_ctrl)) &
 +                              PORT_HW_CFG_E3_MOD_ABS_MASK) >>
 +                              PORT_HW_CFG_E3_MOD_ABS_SHIFT;
 +
 +              /*
 +               * Should not happen. This function called upon interrupt
 +               * triggered by GPIO ( since EPIO can only generate interrupts
 +               * to MCP).
 +               * So if this function was called and none of the GPIOs was set,
 +               * it means the shit hit the fan.
 +               */
 +              if ((cfg_pin < PIN_CFG_GPIO0_P0) ||
 +                  (cfg_pin > PIN_CFG_GPIO3_P1)) {
 +                      DP(NETIF_MSG_LINK,
 +                         "ERROR: Invalid cfg pin %x for module detect indication\n",
 +                         cfg_pin);
 +                      return -EINVAL;
 +              }
 +
 +              *gpio_num = (cfg_pin - PIN_CFG_GPIO0_P0) & 0x3;
 +              *gpio_port = (cfg_pin - PIN_CFG_GPIO0_P0) >> 2;
 +      } else {
 +              *gpio_num = MISC_REGISTERS_GPIO_3;
 +              *gpio_port = port;
 +      }
 +      DP(NETIF_MSG_LINK, "MOD_ABS int GPIO%d_P%d\n", *gpio_num, *gpio_port);
 +      return 0;
 +}
 +
 +static int bnx2x_is_sfp_module_plugged(struct bnx2x_phy *phy,
 +                                     struct link_params *params)
 +{
 +      struct bnx2x *bp = params->bp;
 +      u8 gpio_num, gpio_port;
 +      u32 gpio_val;
 +      if (bnx2x_get_mod_abs_int_cfg(bp, params->chip_id,
 +                                    params->shmem_base, params->port,
 +                                    &gpio_num, &gpio_port) != 0)
 +              return 0;
 +      gpio_val = bnx2x_get_gpio(bp, gpio_num, gpio_port);
 +
 +      /* Call the handling function in case module is detected */
 +      if (gpio_val == 0)
 +              return 1;
 +      else
 +              return 0;
 +}
 +
 +static void bnx2x_warpcore_config_init(struct bnx2x_phy *phy,
 +                                     struct link_params *params,
 +                                     struct link_vars *vars)
 +{
 +      struct bnx2x *bp = params->bp;
 +      u32 serdes_net_if;
 +      u8 fiber_mode;
 +      u16 lane = bnx2x_get_warpcore_lane(phy, params);
 +      serdes_net_if = (REG_RD(bp, params->shmem_base +
 +                       offsetof(struct shmem_region, dev_info.
 +                                port_hw_config[params->port].default_cfg)) &
 +                       PORT_HW_CFG_NET_SERDES_IF_MASK);
 +      DP(NETIF_MSG_LINK, "Begin Warpcore init, link_speed %d, "
 +                         "serdes_net_if = 0x%x\n",
 +                     vars->line_speed, serdes_net_if);
 +      bnx2x_set_aer_mmd(params, phy);
 +
 +      vars->phy_flags |= PHY_XGXS_FLAG;
 +      if ((serdes_net_if == PORT_HW_CFG_NET_SERDES_IF_SGMII) ||
 +          (phy->req_line_speed &&
 +           ((phy->req_line_speed == SPEED_100) ||
 +            (phy->req_line_speed == SPEED_10)))) {
 +              vars->phy_flags |= PHY_SGMII_FLAG;
 +              DP(NETIF_MSG_LINK, "Setting SGMII mode\n");
 +              bnx2x_warpcore_clear_regs(phy, params, lane);
 +              bnx2x_warpcore_set_sgmii_speed(phy, params, 0);
 +      } else {
 +              switch (serdes_net_if) {
 +              case PORT_HW_CFG_NET_SERDES_IF_KR:
 +                      /* Enable KR Auto Neg */
 +                      if (params->loopback_mode == LOOPBACK_NONE)
 +                              bnx2x_warpcore_enable_AN_KR(phy, params, vars);
 +                      else {
 +                              DP(NETIF_MSG_LINK, "Setting KR 10G-Force\n");
 +                              bnx2x_warpcore_set_10G_KR(phy, params, vars);
 +                      }
 +                      break;
 +
 +              case PORT_HW_CFG_NET_SERDES_IF_XFI:
 +                      bnx2x_warpcore_clear_regs(phy, params, lane);
 +                      if (vars->line_speed == SPEED_10000) {
 +                              DP(NETIF_MSG_LINK, "Setting 10G XFI\n");
 +                              bnx2x_warpcore_set_10G_XFI(phy, params, 1);
 +                      } else {
 +                              if (SINGLE_MEDIA_DIRECT(params)) {
 +                                      DP(NETIF_MSG_LINK, "1G Fiber\n");
 +                                      fiber_mode = 1;
 +                              } else {
 +                                      DP(NETIF_MSG_LINK, "10/100/1G SGMII\n");
 +                                      fiber_mode = 0;
 +                              }
 +                              bnx2x_warpcore_set_sgmii_speed(phy,
 +                                                              params,
 +                                                              fiber_mode);
 +                      }
 +
 +                      break;
 +
 +              case PORT_HW_CFG_NET_SERDES_IF_SFI:
 +
 +                      bnx2x_warpcore_clear_regs(phy, params, lane);
 +                      if (vars->line_speed == SPEED_10000) {
 +                              DP(NETIF_MSG_LINK, "Setting 10G SFI\n");
 +                              bnx2x_warpcore_set_10G_XFI(phy, params, 0);
 +                      } else if (vars->line_speed == SPEED_1000) {
 +                              DP(NETIF_MSG_LINK, "Setting 1G Fiber\n");
 +                              bnx2x_warpcore_set_sgmii_speed(phy, params, 1);
 +                      }
 +                      /* Issue Module detection */
 +                      if (bnx2x_is_sfp_module_plugged(phy, params))
 +                              bnx2x_sfp_module_detection(phy, params);
 +                      break;
 +
 +              case PORT_HW_CFG_NET_SERDES_IF_DXGXS:
 +                      if (vars->line_speed != SPEED_20000) {
 +                              DP(NETIF_MSG_LINK, "Speed not supported yet\n");
 +                              return;
 +                      }
 +                      DP(NETIF_MSG_LINK, "Setting 20G DXGXS\n");
 +                      bnx2x_warpcore_set_20G_DXGXS(bp, phy, lane);
 +                      /* Issue Module detection */
 +
 +                      bnx2x_sfp_module_detection(phy, params);
 +                      break;
 +
 +              case PORT_HW_CFG_NET_SERDES_IF_KR2:
 +                      if (vars->line_speed != SPEED_20000) {
 +                              DP(NETIF_MSG_LINK, "Speed not supported yet\n");
 +                              return;
 +                      }
 +                      DP(NETIF_MSG_LINK, "Setting 20G KR2\n");
 +                      bnx2x_warpcore_set_20G_KR2(bp, phy);
 +                      break;
 +
 +              default:
 +                      DP(NETIF_MSG_LINK,
 +                         "Unsupported Serdes Net Interface 0x%x\n",
 +                         serdes_net_if);
 +                      return;
 +              }
 +      }
 +
 +      /* Take lane out of reset after configuration is finished */
 +      bnx2x_warpcore_reset_lane(bp, phy, 0);
 +      DP(NETIF_MSG_LINK, "Exit config init\n");
 +}
 +
 +static void bnx2x_sfp_e3_set_transmitter(struct link_params *params,
 +                                       struct bnx2x_phy *phy,
 +                                       u8 tx_en)
 +{
 +      struct bnx2x *bp = params->bp;
 +      u32 cfg_pin;
 +      u8 port = params->port;
 +
 +      cfg_pin = REG_RD(bp, params->shmem_base +
 +                              offsetof(struct shmem_region,
 +                              dev_info.port_hw_config[port].e3_sfp_ctrl)) &
 +                              PORT_HW_CFG_TX_LASER_MASK;
 +      /* Set the !tx_en since this pin is DISABLE_TX_LASER */
 +      DP(NETIF_MSG_LINK, "Setting WC TX to %d\n", tx_en);
 +      /* For 20G, the expected pin to be used is 3 pins after the current */
 +
 +      bnx2x_set_cfg_pin(bp, cfg_pin, tx_en ^ 1);
 +      if (phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_20G)
 +              bnx2x_set_cfg_pin(bp, cfg_pin + 3, tx_en ^ 1);
 +}
 +
 +static void bnx2x_warpcore_link_reset(struct bnx2x_phy *phy,
 +                                    struct link_params *params)
 +{
 +      struct bnx2x *bp = params->bp;
 +      u16 val16;
 +      bnx2x_sfp_e3_set_transmitter(params, phy, 0);
 +      bnx2x_set_mdio_clk(bp, params->chip_id, params->port);
 +      bnx2x_set_aer_mmd(params, phy);
 +      /* Global register */
 +      bnx2x_warpcore_reset_lane(bp, phy, 1);
 +
 +      /* Clear loopback settings (if any) */
 +      /* 10G & 20G */
 +      bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
 +                      MDIO_WC_REG_COMBO_IEEE0_MIICTRL, &val16);
 +      bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
 +                       MDIO_WC_REG_COMBO_IEEE0_MIICTRL, val16 &
 +                       0xBFFF);
 +
 +      bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
 +                      MDIO_WC_REG_IEEE0BLK_MIICNTL, &val16);
 +      bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
 +                      MDIO_WC_REG_IEEE0BLK_MIICNTL, val16 & 0xfffe);
 +
 +      /* Update those 1-copy registers */
 +      CL22_WR_OVER_CL45(bp, phy, MDIO_REG_BANK_AER_BLOCK,
 +                        MDIO_AER_BLOCK_AER_REG, 0);
 +              /* Enable 1G MDIO (1-copy) */
 +      bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
 +                      MDIO_WC_REG_XGXSBLK0_XGXSCONTROL,
 +                      &val16);
 +      bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
 +                       MDIO_WC_REG_XGXSBLK0_XGXSCONTROL,
 +                       val16 & ~0x10);
 +
 +      bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
 +                      MDIO_WC_REG_XGXSBLK1_LANECTRL2, &val16);
 +      bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
 +                       MDIO_WC_REG_XGXSBLK1_LANECTRL2,
 +                       val16 & 0xff00);
 +
 +}
 +
 +static void bnx2x_set_warpcore_loopback(struct bnx2x_phy *phy,
 +                                      struct link_params *params)
 +{
 +      struct bnx2x *bp = params->bp;
 +      u16 val16;
 +      u32 lane;
 +      DP(NETIF_MSG_LINK, "Setting Warpcore loopback type %x, speed %d\n",
 +                     params->loopback_mode, phy->req_line_speed);
 +
 +      if (phy->req_line_speed < SPEED_10000) {
 +              /* 10/100/1000 */
 +
 +              /* Update those 1-copy registers */
 +              CL22_WR_OVER_CL45(bp, phy, MDIO_REG_BANK_AER_BLOCK,
 +                                MDIO_AER_BLOCK_AER_REG, 0);
 +              /* Enable 1G MDIO (1-copy) */
 +              bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
 +                              MDIO_WC_REG_XGXSBLK0_XGXSCONTROL,
 +                              &val16);
 +              bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
 +                              MDIO_WC_REG_XGXSBLK0_XGXSCONTROL,
 +                              val16 | 0x10);
 +              /* Set 1G loopback based on lane (1-copy) */
 +              lane = bnx2x_get_warpcore_lane(phy, params);
 +              bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
 +                              MDIO_WC_REG_XGXSBLK1_LANECTRL2, &val16);
 +              bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
 +                              MDIO_WC_REG_XGXSBLK1_LANECTRL2,
 +                              val16 | (1<<lane));
 +
 +              /* Switch back to 4-copy registers */
 +              bnx2x_set_aer_mmd(params, phy);
 +              /* Global loopback, not recommended. */
 +              bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
 +                              MDIO_WC_REG_COMBO_IEEE0_MIICTRL, &val16);
 +              bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
 +                              MDIO_WC_REG_COMBO_IEEE0_MIICTRL, val16 |
 +                              0x4000);
 +      } else {
 +              /* 10G & 20G */
 +              bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
 +                              MDIO_WC_REG_COMBO_IEEE0_MIICTRL, &val16);
 +              bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
 +                              MDIO_WC_REG_COMBO_IEEE0_MIICTRL, val16 |
 +                               0x4000);
 +
 +              bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
 +                              MDIO_WC_REG_IEEE0BLK_MIICNTL, &val16);
 +              bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
 +                              MDIO_WC_REG_IEEE0BLK_MIICNTL, val16 | 0x1);
 +      }
 +}
 +
 +
 +void bnx2x_link_status_update(struct link_params *params,
 +                            struct link_vars *vars)
 +{
 +      struct bnx2x *bp = params->bp;
 +      u8 link_10g_plus;
 +      u8 port = params->port;
 +      u32 sync_offset, media_types;
 +      /* Update PHY configuration */
 +      set_phy_vars(params, vars);
 +
 +      vars->link_status = REG_RD(bp, params->shmem_base +
 +                                 offsetof(struct shmem_region,
 +                                          port_mb[port].link_status));
 +
 +      vars->link_up = (vars->link_status & LINK_STATUS_LINK_UP);
 +      vars->phy_flags = PHY_XGXS_FLAG;
 +      if (vars->link_status & LINK_STATUS_PHYSICAL_LINK_FLAG)
 +              vars->phy_flags |= PHY_PHYSICAL_LINK_FLAG;
 +
 +      if (vars->link_up) {
 +              DP(NETIF_MSG_LINK, "phy link up\n");
 +
 +              vars->phy_link_up = 1;
 +              vars->duplex = DUPLEX_FULL;
 +              switch (vars->link_status &
 +                      LINK_STATUS_SPEED_AND_DUPLEX_MASK) {
 +                      case LINK_10THD:
 +                              vars->duplex = DUPLEX_HALF;
 +                              /* fall thru */
 +                      case LINK_10TFD:
 +                              vars->line_speed = SPEED_10;
 +                              break;
 +
 +                      case LINK_100TXHD:
 +                              vars->duplex = DUPLEX_HALF;
 +                              /* fall thru */
 +                      case LINK_100T4:
 +                      case LINK_100TXFD:
 +                              vars->line_speed = SPEED_100;
 +                              break;
 +
 +                      case LINK_1000THD:
 +                              vars->duplex = DUPLEX_HALF;
 +                              /* fall thru */
 +                      case LINK_1000TFD:
 +                              vars->line_speed = SPEED_1000;
 +                              break;
 +
 +                      case LINK_2500THD:
 +                              vars->duplex = DUPLEX_HALF;
 +                              /* fall thru */
 +                      case LINK_2500TFD:
 +                              vars->line_speed = SPEED_2500;
 +                              break;
 +
 +                      case LINK_10GTFD:
 +                              vars->line_speed = SPEED_10000;
 +                              break;
 +                      case LINK_20GTFD:
 +                              vars->line_speed = SPEED_20000;
 +                              break;
 +                      default:
 +                              break;
 +              }
 +              vars->flow_ctrl = 0;
 +              if (vars->link_status & LINK_STATUS_TX_FLOW_CONTROL_ENABLED)
 +                      vars->flow_ctrl |= BNX2X_FLOW_CTRL_TX;
 +
 +              if (vars->link_status & LINK_STATUS_RX_FLOW_CONTROL_ENABLED)
 +                      vars->flow_ctrl |= BNX2X_FLOW_CTRL_RX;
 +
 +              if (!vars->flow_ctrl)
 +                      vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE;
 +
 +              if (vars->line_speed &&
 +                  ((vars->line_speed == SPEED_10) ||
 +                   (vars->line_speed == SPEED_100))) {
 +                      vars->phy_flags |= PHY_SGMII_FLAG;
 +              } else {
 +                      vars->phy_flags &= ~PHY_SGMII_FLAG;
 +              }
 +              if (vars->line_speed &&
 +                  USES_WARPCORE(bp) &&
 +                  (vars->line_speed == SPEED_1000))
 +                      vars->phy_flags |= PHY_SGMII_FLAG;
 +              /* anything 10 and over uses the bmac */
 +              link_10g_plus = (vars->line_speed >= SPEED_10000);
 +
 +              if (link_10g_plus) {
 +                      if (USES_WARPCORE(bp))
 +                              vars->mac_type = MAC_TYPE_XMAC;
 +                      else
 +                              vars->mac_type = MAC_TYPE_BMAC;
 +              } else {
 +                      if (USES_WARPCORE(bp))
 +                              vars->mac_type = MAC_TYPE_UMAC;
 +                      else
 +                              vars->mac_type = MAC_TYPE_EMAC;
 +              }
 +      } else { /* link down */
 +              DP(NETIF_MSG_LINK, "phy link down\n");
 +
 +              vars->phy_link_up = 0;
 +
 +              vars->line_speed = 0;
 +              vars->duplex = DUPLEX_FULL;
 +              vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE;
 +
 +              /* indicate no mac active */
 +              vars->mac_type = MAC_TYPE_NONE;
 +              if (vars->link_status & LINK_STATUS_PHYSICAL_LINK_FLAG)
 +                      vars->phy_flags |= PHY_HALF_OPEN_CONN_FLAG;
 +      }
 +
 +      /* Sync media type */
 +      sync_offset = params->shmem_base +
 +                      offsetof(struct shmem_region,
 +                               dev_info.port_hw_config[port].media_type);
 +      media_types = REG_RD(bp, sync_offset);
 +
 +      params->phy[INT_PHY].media_type =
 +              (media_types & PORT_HW_CFG_MEDIA_TYPE_PHY0_MASK) >>
 +              PORT_HW_CFG_MEDIA_TYPE_PHY0_SHIFT;
 +      params->phy[EXT_PHY1].media_type =
 +              (media_types & PORT_HW_CFG_MEDIA_TYPE_PHY1_MASK) >>
 +              PORT_HW_CFG_MEDIA_TYPE_PHY1_SHIFT;
 +      params->phy[EXT_PHY2].media_type =
 +              (media_types & PORT_HW_CFG_MEDIA_TYPE_PHY2_MASK) >>
 +              PORT_HW_CFG_MEDIA_TYPE_PHY2_SHIFT;
 +      DP(NETIF_MSG_LINK, "media_types = 0x%x\n", media_types);
 +
 +      /* Sync AEU offset */
 +      sync_offset = params->shmem_base +
 +                      offsetof(struct shmem_region,
 +                               dev_info.port_hw_config[port].aeu_int_mask);
 +
 +      vars->aeu_int_mask = REG_RD(bp, sync_offset);
 +
 +      /* Sync PFC status */
 +      if (vars->link_status & LINK_STATUS_PFC_ENABLED)
 +              params->feature_config_flags |=
 +                                      FEATURE_CONFIG_PFC_ENABLED;
 +      else
 +              params->feature_config_flags &=
 +                                      ~FEATURE_CONFIG_PFC_ENABLED;
 +
 +      DP(NETIF_MSG_LINK, "link_status 0x%x  phy_link_up %x int_mask 0x%x\n",
 +               vars->link_status, vars->phy_link_up, vars->aeu_int_mask);
 +      DP(NETIF_MSG_LINK, "line_speed %x  duplex %x  flow_ctrl 0x%x\n",
 +               vars->line_speed, vars->duplex, vars->flow_ctrl);
 +}
 +
 +
 +static void bnx2x_set_master_ln(struct link_params *params,
 +                              struct bnx2x_phy *phy)
 +{
 +      struct bnx2x *bp = params->bp;
 +      u16 new_master_ln, ser_lane;
 +      ser_lane = ((params->lane_config &
 +                   PORT_HW_CFG_LANE_SWAP_CFG_MASTER_MASK) >>
 +                  PORT_HW_CFG_LANE_SWAP_CFG_MASTER_SHIFT);
 +
 +      /* set the master_ln for AN */
 +      CL22_RD_OVER_CL45(bp, phy,
 +                        MDIO_REG_BANK_XGXS_BLOCK2,
 +                        MDIO_XGXS_BLOCK2_TEST_MODE_LANE,
 +                        &new_master_ln);
 +
 +      CL22_WR_OVER_CL45(bp, phy,
 +                        MDIO_REG_BANK_XGXS_BLOCK2 ,
 +                        MDIO_XGXS_BLOCK2_TEST_MODE_LANE,
 +                        (new_master_ln | ser_lane));
 +}
 +
 +static int bnx2x_reset_unicore(struct link_params *params,
 +                             struct bnx2x_phy *phy,
 +                             u8 set_serdes)
 +{
 +      struct bnx2x *bp = params->bp;
 +      u16 mii_control;
 +      u16 i;
 +      CL22_RD_OVER_CL45(bp, phy,
 +                        MDIO_REG_BANK_COMBO_IEEE0,
 +                        MDIO_COMBO_IEEE0_MII_CONTROL, &mii_control);
 +
 +      /* reset the unicore */
 +      CL22_WR_OVER_CL45(bp, phy,
 +                        MDIO_REG_BANK_COMBO_IEEE0,
 +                        MDIO_COMBO_IEEE0_MII_CONTROL,
 +                        (mii_control |
 +                         MDIO_COMBO_IEEO_MII_CONTROL_RESET));
 +      if (set_serdes)
 +              bnx2x_set_serdes_access(bp, params->port);
 +
 +      /* wait for the reset to self clear */
 +      for (i = 0; i < MDIO_ACCESS_TIMEOUT; i++) {
 +              udelay(5);
 +
 +              /* the reset erased the previous bank value */
 +              CL22_RD_OVER_CL45(bp, phy,
 +                                MDIO_REG_BANK_COMBO_IEEE0,
 +                                MDIO_COMBO_IEEE0_MII_CONTROL,
 +                                &mii_control);
 +
 +              if (!(mii_control & MDIO_COMBO_IEEO_MII_CONTROL_RESET)) {
 +                      udelay(5);
 +                      return 0;
 +              }
 +      }
 +
 +      netdev_err(bp->dev,  "Warning: PHY was not initialized,"
 +                            " Port %d\n",
 +                       params->port);
 +      DP(NETIF_MSG_LINK, "BUG! XGXS is still in reset!\n");
 +      return -EINVAL;
 +
 +}
 +
 +static void bnx2x_set_swap_lanes(struct link_params *params,
 +                               struct bnx2x_phy *phy)
 +{
 +      struct bnx2x *bp = params->bp;
 +      /*
 +       *  Each two bits represents a lane number:
 +       *  No swap is 0123 => 0x1b no need to enable the swap
 +       */
 +      u16 ser_lane, rx_lane_swap, tx_lane_swap;
 +
 +      ser_lane = ((params->lane_config &
 +                   PORT_HW_CFG_LANE_SWAP_CFG_MASTER_MASK) >>
 +                  PORT_HW_CFG_LANE_SWAP_CFG_MASTER_SHIFT);
 +      rx_lane_swap = ((params->lane_config &
 +                       PORT_HW_CFG_LANE_SWAP_CFG_RX_MASK) >>
 +                      PORT_HW_CFG_LANE_SWAP_CFG_RX_SHIFT);
 +      tx_lane_swap = ((params->lane_config &
 +                       PORT_HW_CFG_LANE_SWAP_CFG_TX_MASK) >>
 +                      PORT_HW_CFG_LANE_SWAP_CFG_TX_SHIFT);
 +
 +      if (rx_lane_swap != 0x1b) {
 +              CL22_WR_OVER_CL45(bp, phy,
 +                                MDIO_REG_BANK_XGXS_BLOCK2,
 +                                MDIO_XGXS_BLOCK2_RX_LN_SWAP,
 +                                (rx_lane_swap |
 +                                 MDIO_XGXS_BLOCK2_RX_LN_SWAP_ENABLE |
 +                                 MDIO_XGXS_BLOCK2_RX_LN_SWAP_FORCE_ENABLE));
 +      } else {
 +              CL22_WR_OVER_CL45(bp, phy,
 +                                MDIO_REG_BANK_XGXS_BLOCK2,
 +                                MDIO_XGXS_BLOCK2_RX_LN_SWAP, 0);
 +      }
 +
 +      if (tx_lane_swap != 0x1b) {
 +              CL22_WR_OVER_CL45(bp, phy,
 +                                MDIO_REG_BANK_XGXS_BLOCK2,
 +                                MDIO_XGXS_BLOCK2_TX_LN_SWAP,
 +                                (tx_lane_swap |
 +                                 MDIO_XGXS_BLOCK2_TX_LN_SWAP_ENABLE));
 +      } else {
 +              CL22_WR_OVER_CL45(bp, phy,
 +                                MDIO_REG_BANK_XGXS_BLOCK2,
 +                                MDIO_XGXS_BLOCK2_TX_LN_SWAP, 0);
 +      }
 +}
 +
 +static void bnx2x_set_parallel_detection(struct bnx2x_phy *phy,
 +                                       struct link_params *params)
 +{
 +      struct bnx2x *bp = params->bp;
 +      u16 control2;
 +      CL22_RD_OVER_CL45(bp, phy,
 +                        MDIO_REG_BANK_SERDES_DIGITAL,
 +                        MDIO_SERDES_DIGITAL_A_1000X_CONTROL2,
 +                        &control2);
 +      if (phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)
 +              control2 |= MDIO_SERDES_DIGITAL_A_1000X_CONTROL2_PRL_DT_EN;
 +      else
 +              control2 &= ~MDIO_SERDES_DIGITAL_A_1000X_CONTROL2_PRL_DT_EN;
 +      DP(NETIF_MSG_LINK, "phy->speed_cap_mask = 0x%x, control2 = 0x%x\n",
 +              phy->speed_cap_mask, control2);
 +      CL22_WR_OVER_CL45(bp, phy,
 +                        MDIO_REG_BANK_SERDES_DIGITAL,
 +                        MDIO_SERDES_DIGITAL_A_1000X_CONTROL2,
 +                        control2);
 +
 +      if ((phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT) &&
 +           (phy->speed_cap_mask &
 +                  PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)) {
 +              DP(NETIF_MSG_LINK, "XGXS\n");
 +
 +              CL22_WR_OVER_CL45(bp, phy,
 +                               MDIO_REG_BANK_10G_PARALLEL_DETECT,
 +                               MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_LINK,
 +                               MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_LINK_CNT);
 +
 +              CL22_RD_OVER_CL45(bp, phy,
 +                                MDIO_REG_BANK_10G_PARALLEL_DETECT,
 +                                MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL,
 +                                &control2);
 +
 +
 +              control2 |=
 +                  MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL_PARDET10G_EN;
 +
 +              CL22_WR_OVER_CL45(bp, phy,
 +                                MDIO_REG_BANK_10G_PARALLEL_DETECT,
 +                                MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL,
 +                                control2);
 +
 +              /* Disable parallel detection of HiG */
 +              CL22_WR_OVER_CL45(bp, phy,
 +                                MDIO_REG_BANK_XGXS_BLOCK2,
 +                                MDIO_XGXS_BLOCK2_UNICORE_MODE_10G,
 +                                MDIO_XGXS_BLOCK2_UNICORE_MODE_10G_CX4_XGXS |
 +                                MDIO_XGXS_BLOCK2_UNICORE_MODE_10G_HIGIG_XGXS);
 +      }
 +}
 +
 +static void bnx2x_set_autoneg(struct bnx2x_phy *phy,
 +                            struct link_params *params,
 +                            struct link_vars *vars,
 +                            u8 enable_cl73)
 +{
 +      struct bnx2x *bp = params->bp;
 +      u16 reg_val;
 +
 +      /* CL37 Autoneg */
 +      CL22_RD_OVER_CL45(bp, phy,
 +                        MDIO_REG_BANK_COMBO_IEEE0,
 +                        MDIO_COMBO_IEEE0_MII_CONTROL, &reg_val);
 +
 +      /* CL37 Autoneg Enabled */
 +      if (vars->line_speed == SPEED_AUTO_NEG)
 +              reg_val |= MDIO_COMBO_IEEO_MII_CONTROL_AN_EN;
 +      else /* CL37 Autoneg Disabled */
 +              reg_val &= ~(MDIO_COMBO_IEEO_MII_CONTROL_AN_EN |
 +                           MDIO_COMBO_IEEO_MII_CONTROL_RESTART_AN);
 +
 +      CL22_WR_OVER_CL45(bp, phy,
 +                        MDIO_REG_BANK_COMBO_IEEE0,
 +                        MDIO_COMBO_IEEE0_MII_CONTROL, reg_val);
 +
 +      /* Enable/Disable Autodetection */
 +
 +      CL22_RD_OVER_CL45(bp, phy,
 +                        MDIO_REG_BANK_SERDES_DIGITAL,
 +                        MDIO_SERDES_DIGITAL_A_1000X_CONTROL1, &reg_val);
 +      reg_val &= ~(MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_SIGNAL_DETECT_EN |
 +                  MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_INVERT_SIGNAL_DETECT);
 +      reg_val |= MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_FIBER_MODE;
 +      if (vars->line_speed == SPEED_AUTO_NEG)
 +              reg_val |= MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_AUTODET;
 +      else
 +              reg_val &= ~MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_AUTODET;
 +
 +      CL22_WR_OVER_CL45(bp, phy,
 +                        MDIO_REG_BANK_SERDES_DIGITAL,
 +                        MDIO_SERDES_DIGITAL_A_1000X_CONTROL1, reg_val);
 +
 +      /* Enable TetonII and BAM autoneg */
 +      CL22_RD_OVER_CL45(bp, phy,
 +                        MDIO_REG_BANK_BAM_NEXT_PAGE,
 +                        MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL,
 +                        &reg_val);
 +      if (vars->line_speed == SPEED_AUTO_NEG) {
 +              /* Enable BAM aneg Mode and TetonII aneg Mode */
 +              reg_val |= (MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL_BAM_MODE |
 +                          MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL_TETON_AN);
 +      } else {
 +              /* TetonII and BAM Autoneg Disabled */
 +              reg_val &= ~(MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL_BAM_MODE |
 +                           MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL_TETON_AN);
 +      }
 +      CL22_WR_OVER_CL45(bp, phy,
 +                        MDIO_REG_BANK_BAM_NEXT_PAGE,
 +                        MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL,
 +                        reg_val);
 +
 +      if (enable_cl73) {
 +              /* Enable Cl73 FSM status bits */
 +              CL22_WR_OVER_CL45(bp, phy,
 +                                MDIO_REG_BANK_CL73_USERB0,
 +                                MDIO_CL73_USERB0_CL73_UCTRL,
 +                                0xe);
 +
 +              /* Enable BAM Station Manager*/
 +              CL22_WR_OVER_CL45(bp, phy,
 +                      MDIO_REG_BANK_CL73_USERB0,
 +                      MDIO_CL73_USERB0_CL73_BAM_CTRL1,
 +                      MDIO_CL73_USERB0_CL73_BAM_CTRL1_BAM_EN |
 +                      MDIO_CL73_USERB0_CL73_BAM_CTRL1_BAM_STATION_MNGR_EN |
 +                      MDIO_CL73_USERB0_CL73_BAM_CTRL1_BAM_NP_AFTER_BP_EN);
 +
 +              /* Advertise CL73 link speeds */
 +              CL22_RD_OVER_CL45(bp, phy,
 +                                MDIO_REG_BANK_CL73_IEEEB1,
 +                                MDIO_CL73_IEEEB1_AN_ADV2,
 +                                &reg_val);
 +              if (phy->speed_cap_mask &
 +                  PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)
 +                      reg_val |= MDIO_CL73_IEEEB1_AN_ADV2_ADVR_10G_KX4;
 +              if (phy->speed_cap_mask &
 +                  PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)
 +                      reg_val |= MDIO_CL73_IEEEB1_AN_ADV2_ADVR_1000M_KX;
 +
 +              CL22_WR_OVER_CL45(bp, phy,
 +                                MDIO_REG_BANK_CL73_IEEEB1,
 +                                MDIO_CL73_IEEEB1_AN_ADV2,
 +                                reg_val);
 +
 +              /* CL73 Autoneg Enabled */
 +              reg_val = MDIO_CL73_IEEEB0_CL73_AN_CONTROL_AN_EN;
 +
 +      } else /* CL73 Autoneg Disabled */
 +              reg_val = 0;
 +
 +      CL22_WR_OVER_CL45(bp, phy,
 +                        MDIO_REG_BANK_CL73_IEEEB0,
 +                        MDIO_CL73_IEEEB0_CL73_AN_CONTROL, reg_val);
 +}
 +
 +/* program SerDes, forced speed */
 +static void bnx2x_program_serdes(struct bnx2x_phy *phy,
 +                               struct link_params *params,
 +                               struct link_vars *vars)
 +{
 +      struct bnx2x *bp = params->bp;
 +      u16 reg_val;
 +
 +      /* program duplex, disable autoneg and sgmii*/
 +      CL22_RD_OVER_CL45(bp, phy,
 +                        MDIO_REG_BANK_COMBO_IEEE0,
 +                        MDIO_COMBO_IEEE0_MII_CONTROL, &reg_val);
 +      reg_val &= ~(MDIO_COMBO_IEEO_MII_CONTROL_FULL_DUPLEX |
 +                   MDIO_COMBO_IEEO_MII_CONTROL_AN_EN |
 +                   MDIO_COMBO_IEEO_MII_CONTROL_MAN_SGMII_SP_MASK);
 +      if (phy->req_duplex == DUPLEX_FULL)
 +              reg_val |= MDIO_COMBO_IEEO_MII_CONTROL_FULL_DUPLEX;
 +      CL22_WR_OVER_CL45(bp, phy,
 +                        MDIO_REG_BANK_COMBO_IEEE0,
 +                        MDIO_COMBO_IEEE0_MII_CONTROL, reg_val);
 +
 +      /*
 +       * program speed
 +       *  - needed only if the speed is greater than 1G (2.5G or 10G)
 +       */
 +      CL22_RD_OVER_CL45(bp, phy,
 +                        MDIO_REG_BANK_SERDES_DIGITAL,
 +                        MDIO_SERDES_DIGITAL_MISC1, &reg_val);
 +      /* clearing the speed value before setting the right speed */
 +      DP(NETIF_MSG_LINK, "MDIO_REG_BANK_SERDES_DIGITAL = 0x%x\n", reg_val);
 +
 +      reg_val &= ~(MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_MASK |
 +                   MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_SEL);
 +
 +      if (!((vars->line_speed == SPEED_1000) ||
 +            (vars->line_speed == SPEED_100) ||
 +            (vars->line_speed == SPEED_10))) {
 +
 +              reg_val |= (MDIO_SERDES_DIGITAL_MISC1_REFCLK_SEL_156_25M |
 +                          MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_SEL);
 +              if (vars->line_speed == SPEED_10000)
 +                      reg_val |=
 +                              MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_10G_CX4;
 +      }
 +
 +      CL22_WR_OVER_CL45(bp, phy,
 +                        MDIO_REG_BANK_SERDES_DIGITAL,
 +                        MDIO_SERDES_DIGITAL_MISC1, reg_val);
 +
 +}
 +
 +static void bnx2x_set_brcm_cl37_advertisement(struct bnx2x_phy *phy,
 +                                            struct link_params *params)
 +{
 +      struct bnx2x *bp = params->bp;
 +      u16 val = 0;
 +
 +      /* configure the 48 bits for BAM AN */
 +
 +      /* set extended capabilities */
 +      if (phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G)
 +              val |= MDIO_OVER_1G_UP1_2_5G;
 +      if (phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)
 +              val |= MDIO_OVER_1G_UP1_10G;
 +      CL22_WR_OVER_CL45(bp, phy,
 +                        MDIO_REG_BANK_OVER_1G,
 +                        MDIO_OVER_1G_UP1, val);
 +
 +      CL22_WR_OVER_CL45(bp, phy,
 +                        MDIO_REG_BANK_OVER_1G,
 +                        MDIO_OVER_1G_UP3, 0x400);
 +}
 +
 +static void bnx2x_set_ieee_aneg_advertisement(struct bnx2x_phy *phy,
 +                                            struct link_params *params,
 +                                            u16 ieee_fc)
 +{
 +      struct bnx2x *bp = params->bp;
 +      u16 val;
 +      /* for AN, we are always publishing full duplex */
 +
 +      CL22_WR_OVER_CL45(bp, phy,
 +                        MDIO_REG_BANK_COMBO_IEEE0,
 +                        MDIO_COMBO_IEEE0_AUTO_NEG_ADV, ieee_fc);
 +      CL22_RD_OVER_CL45(bp, phy,
 +                        MDIO_REG_BANK_CL73_IEEEB1,
 +                        MDIO_CL73_IEEEB1_AN_ADV1, &val);
 +      val &= ~MDIO_CL73_IEEEB1_AN_ADV1_PAUSE_BOTH;
 +      val |= ((ieee_fc<<3) & MDIO_CL73_IEEEB1_AN_ADV1_PAUSE_MASK);
 +      CL22_WR_OVER_CL45(bp, phy,
 +                        MDIO_REG_BANK_CL73_IEEEB1,
 +                        MDIO_CL73_IEEEB1_AN_ADV1, val);
 +}
 +
 +static void bnx2x_restart_autoneg(struct bnx2x_phy *phy,
 +                                struct link_params *params,
 +                                u8 enable_cl73)
 +{
 +      struct bnx2x *bp = params->bp;
 +      u16 mii_control;
 +
 +      DP(NETIF_MSG_LINK, "bnx2x_restart_autoneg\n");
 +      /* Enable and restart BAM/CL37 aneg */
 +
 +      if (enable_cl73) {
 +              CL22_RD_OVER_CL45(bp, phy,
 +                                MDIO_REG_BANK_CL73_IEEEB0,
 +                                MDIO_CL73_IEEEB0_CL73_AN_CONTROL,
 +                                &mii_control);
 +
 +              CL22_WR_OVER_CL45(bp, phy,
 +                                MDIO_REG_BANK_CL73_IEEEB0,
 +                                MDIO_CL73_IEEEB0_CL73_AN_CONTROL,
 +                                (mii_control |
 +                                MDIO_CL73_IEEEB0_CL73_AN_CONTROL_AN_EN |
 +                                MDIO_CL73_IEEEB0_CL73_AN_CONTROL_RESTART_AN));
 +      } else {
 +
 +              CL22_RD_OVER_CL45(bp, phy,
 +                                MDIO_REG_BANK_COMBO_IEEE0,
 +                                MDIO_COMBO_IEEE0_MII_CONTROL,
 +                                &mii_control);
 +              DP(NETIF_MSG_LINK,
 +                       "bnx2x_restart_autoneg mii_control before = 0x%x\n",
 +                       mii_control);
 +              CL22_WR_OVER_CL45(bp, phy,
 +                                MDIO_REG_BANK_COMBO_IEEE0,
 +                                MDIO_COMBO_IEEE0_MII_CONTROL,
 +                                (mii_control |
 +                                 MDIO_COMBO_IEEO_MII_CONTROL_AN_EN |
 +                                 MDIO_COMBO_IEEO_MII_CONTROL_RESTART_AN));
 +      }
 +}
 +
 +static void bnx2x_initialize_sgmii_process(struct bnx2x_phy *phy,
 +                                         struct link_params *params,
 +                                         struct link_vars *vars)
 +{
 +      struct bnx2x *bp = params->bp;
 +      u16 control1;
 +
 +      /* in SGMII mode, the unicore is always slave */
 +
 +      CL22_RD_OVER_CL45(bp, phy,
 +                        MDIO_REG_BANK_SERDES_DIGITAL,
 +                        MDIO_SERDES_DIGITAL_A_1000X_CONTROL1,
 +                        &control1);
 +      control1 |= MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_INVERT_SIGNAL_DETECT;
 +      /* set sgmii mode (and not fiber) */
 +      control1 &= ~(MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_FIBER_MODE |
 +                    MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_AUTODET |
 +                    MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_MSTR_MODE);
 +      CL22_WR_OVER_CL45(bp, phy,
 +                        MDIO_REG_BANK_SERDES_DIGITAL,
 +                        MDIO_SERDES_DIGITAL_A_1000X_CONTROL1,
 +                        control1);
 +
 +      /* if forced speed */
 +      if (!(vars->line_speed == SPEED_AUTO_NEG)) {
 +              /* set speed, disable autoneg */
 +              u16 mii_control;
 +
 +              CL22_RD_OVER_CL45(bp, phy,
 +                                MDIO_REG_BANK_COMBO_IEEE0,
 +                                MDIO_COMBO_IEEE0_MII_CONTROL,
 +                                &mii_control);
 +              mii_control &= ~(MDIO_COMBO_IEEO_MII_CONTROL_AN_EN |
 +                               MDIO_COMBO_IEEO_MII_CONTROL_MAN_SGMII_SP_MASK|
 +                               MDIO_COMBO_IEEO_MII_CONTROL_FULL_DUPLEX);
 +
 +              switch (vars->line_speed) {
 +              case SPEED_100:
 +                      mii_control |=
 +                              MDIO_COMBO_IEEO_MII_CONTROL_MAN_SGMII_SP_100;
 +                      break;
 +              case SPEED_1000:
 +                      mii_control |=
 +                              MDIO_COMBO_IEEO_MII_CONTROL_MAN_SGMII_SP_1000;
 +                      break;
 +              case SPEED_10:
 +                      /* there is nothing to set for 10M */
 +                      break;
 +              default:
 +                      /* invalid speed for SGMII */
 +                      DP(NETIF_MSG_LINK, "Invalid line_speed 0x%x\n",
 +                                vars->line_speed);
 +                      break;
 +              }
 +
 +              /* setting the full duplex */
 +              if (phy->req_duplex == DUPLEX_FULL)
 +                      mii_control |=
 +                              MDIO_COMBO_IEEO_MII_CONTROL_FULL_DUPLEX;
 +              CL22_WR_OVER_CL45(bp, phy,
 +                                MDIO_REG_BANK_COMBO_IEEE0,
 +                                MDIO_COMBO_IEEE0_MII_CONTROL,
 +                                mii_control);
 +
 +      } else { /* AN mode */
 +              /* enable and restart AN */
 +              bnx2x_restart_autoneg(phy, params, 0);
 +      }
 +}
 +
 +
 +/*
 + * link management
 + */
 +
 +static int bnx2x_direct_parallel_detect_used(struct bnx2x_phy *phy,
 +                                           struct link_params *params)
 +{
 +      struct bnx2x *bp = params->bp;
 +      u16 pd_10g, status2_1000x;
 +      if (phy->req_line_speed != SPEED_AUTO_NEG)
 +              return 0;
 +      CL22_RD_OVER_CL45(bp, phy,
 +                        MDIO_REG_BANK_SERDES_DIGITAL,
 +                        MDIO_SERDES_DIGITAL_A_1000X_STATUS2,
 +                        &status2_1000x);
 +      CL22_RD_OVER_CL45(bp, phy,
 +                        MDIO_REG_BANK_SERDES_DIGITAL,
 +                        MDIO_SERDES_DIGITAL_A_1000X_STATUS2,
 +                        &status2_1000x);
 +      if (status2_1000x & MDIO_SERDES_DIGITAL_A_1000X_STATUS2_AN_DISABLED) {
 +              DP(NETIF_MSG_LINK, "1G parallel detect link on port %d\n",
 +                       params->port);
 +              return 1;
 +      }
 +
 +      CL22_RD_OVER_CL45(bp, phy,
 +                        MDIO_REG_BANK_10G_PARALLEL_DETECT,
 +                        MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_STATUS,
 +                        &pd_10g);
 +
 +      if (pd_10g & MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_STATUS_PD_LINK) {
 +              DP(NETIF_MSG_LINK, "10G parallel detect link on port %d\n",
 +                       params->port);
 +              return 1;
 +      }
 +      return 0;
 +}
 +
 +static void bnx2x_flow_ctrl_resolve(struct bnx2x_phy *phy,
 +                                  struct link_params *params,
 +                                  struct link_vars *vars,
 +                                  u32 gp_status)
 +{
 +      struct bnx2x *bp = params->bp;
 +      u16 ld_pause;   /* local driver */
 +      u16 lp_pause;   /* link partner */
 +      u16 pause_result;
 +
 +      vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE;
 +
 +      /* resolve from gp_status in case of AN complete and not sgmii */
 +      if (phy->req_flow_ctrl != BNX2X_FLOW_CTRL_AUTO)
 +              vars->flow_ctrl = phy->req_flow_ctrl;
 +      else if (phy->req_line_speed != SPEED_AUTO_NEG)
 +              vars->flow_ctrl = params->req_fc_auto_adv;
 +      else if ((gp_status & MDIO_AN_CL73_OR_37_COMPLETE) &&
 +               (!(vars->phy_flags & PHY_SGMII_FLAG))) {
 +              if (bnx2x_direct_parallel_detect_used(phy, params)) {
 +                      vars->flow_ctrl = params->req_fc_auto_adv;
 +                      return;
 +              }
 +              if ((gp_status &
 +                  (MDIO_GP_STATUS_TOP_AN_STATUS1_CL73_AUTONEG_COMPLETE |
 +                   MDIO_GP_STATUS_TOP_AN_STATUS1_CL73_MR_LP_NP_AN_ABLE)) ==
 +                  (MDIO_GP_STATUS_TOP_AN_STATUS1_CL73_AUTONEG_COMPLETE |
 +                   MDIO_GP_STATUS_TOP_AN_STATUS1_CL73_MR_LP_NP_AN_ABLE)) {
 +
 +                      CL22_RD_OVER_CL45(bp, phy,
 +                                        MDIO_REG_BANK_CL73_IEEEB1,
 +                                        MDIO_CL73_IEEEB1_AN_ADV1,
 +                                        &ld_pause);
 +                      CL22_RD_OVER_CL45(bp, phy,
 +                                        MDIO_REG_BANK_CL73_IEEEB1,
 +                                        MDIO_CL73_IEEEB1_AN_LP_ADV1,
 +                                        &lp_pause);
 +                      pause_result = (ld_pause &
 +                                      MDIO_CL73_IEEEB1_AN_ADV1_PAUSE_MASK)
 +                                      >> 8;
 +                      pause_result |= (lp_pause &
 +                                      MDIO_CL73_IEEEB1_AN_LP_ADV1_PAUSE_MASK)
 +                                      >> 10;
 +                      DP(NETIF_MSG_LINK, "pause_result CL73 0x%x\n",
 +                               pause_result);
 +              } else {
 +                      CL22_RD_OVER_CL45(bp, phy,
 +                                        MDIO_REG_BANK_COMBO_IEEE0,
 +                                        MDIO_COMBO_IEEE0_AUTO_NEG_ADV,
 +                                        &ld_pause);
 +                      CL22_RD_OVER_CL45(bp, phy,
 +                              MDIO_REG_BANK_COMBO_IEEE0,
 +                              MDIO_COMBO_IEEE0_AUTO_NEG_LINK_PARTNER_ABILITY1,
 +                              &lp_pause);
 +                      pause_result = (ld_pause &
 +                              MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK)>>5;
 +                      pause_result |= (lp_pause &
 +                              MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK)>>7;
 +                      DP(NETIF_MSG_LINK, "pause_result CL37 0x%x\n",
 +                               pause_result);
 +              }
 +              bnx2x_pause_resolve(vars, pause_result);
 +      }
 +      DP(NETIF_MSG_LINK, "flow_ctrl 0x%x\n", vars->flow_ctrl);
 +}
 +
 +static void bnx2x_check_fallback_to_cl37(struct bnx2x_phy *phy,
 +                                       struct link_params *params)
 +{
 +      struct bnx2x *bp = params->bp;
 +      u16 rx_status, ustat_val, cl37_fsm_received;
 +      DP(NETIF_MSG_LINK, "bnx2x_check_fallback_to_cl37\n");
 +      /* Step 1: Make sure signal is detected */
 +      CL22_RD_OVER_CL45(bp, phy,
 +                        MDIO_REG_BANK_RX0,
 +                        MDIO_RX0_RX_STATUS,
 +                        &rx_status);
 +      if ((rx_status & MDIO_RX0_RX_STATUS_SIGDET) !=
 +          (MDIO_RX0_RX_STATUS_SIGDET)) {
 +              DP(NETIF_MSG_LINK, "Signal is not detected. Restoring CL73."
 +                           "rx_status(0x80b0) = 0x%x\n", rx_status);
 +              CL22_WR_OVER_CL45(bp, phy,
 +                                MDIO_REG_BANK_CL73_IEEEB0,
 +                                MDIO_CL73_IEEEB0_CL73_AN_CONTROL,
 +                                MDIO_CL73_IEEEB0_CL73_AN_CONTROL_AN_EN);
 +              return;
 +      }
 +      /* Step 2: Check CL73 state machine */
 +      CL22_RD_OVER_CL45(bp, phy,
 +                        MDIO_REG_BANK_CL73_USERB0,
 +                        MDIO_CL73_USERB0_CL73_USTAT1,
 +                        &ustat_val);
 +      if ((ustat_val &
 +           (MDIO_CL73_USERB0_CL73_USTAT1_LINK_STATUS_CHECK |
 +            MDIO_CL73_USERB0_CL73_USTAT1_AN_GOOD_CHECK_BAM37)) !=
 +          (MDIO_CL73_USERB0_CL73_USTAT1_LINK_STATUS_CHECK |
 +            MDIO_CL73_USERB0_CL73_USTAT1_AN_GOOD_CHECK_BAM37)) {
 +              DP(NETIF_MSG_LINK, "CL73 state-machine is not stable. "
 +                           "ustat_val(0x8371) = 0x%x\n", ustat_val);
 +              return;
 +      }
 +      /*
 +       * Step 3: Check CL37 Message Pages received to indicate LP
 +       * supports only CL37
 +       */
 +      CL22_RD_OVER_CL45(bp, phy,
 +                        MDIO_REG_BANK_REMOTE_PHY,
 +                        MDIO_REMOTE_PHY_MISC_RX_STATUS,
 +                        &cl37_fsm_received);
 +      if ((cl37_fsm_received &
 +           (MDIO_REMOTE_PHY_MISC_RX_STATUS_CL37_FSM_RECEIVED_OVER1G_MSG |
 +           MDIO_REMOTE_PHY_MISC_RX_STATUS_CL37_FSM_RECEIVED_BRCM_OUI_MSG)) !=
 +          (MDIO_REMOTE_PHY_MISC_RX_STATUS_CL37_FSM_RECEIVED_OVER1G_MSG |
 +            MDIO_REMOTE_PHY_MISC_RX_STATUS_CL37_FSM_RECEIVED_BRCM_OUI_MSG)) {
 +              DP(NETIF_MSG_LINK, "No CL37 FSM were received. "
 +                           "misc_rx_status(0x8330) = 0x%x\n",
 +                       cl37_fsm_received);
 +              return;
 +      }
 +      /*
 +       * The combined cl37/cl73 fsm state information indicating that
 +       * we are connected to a device which does not support cl73, but
 +       * does support cl37 BAM. In this case we disable cl73 and
 +       * restart cl37 auto-neg
 +       */
 +
 +      /* Disable CL73 */
 +      CL22_WR_OVER_CL45(bp, phy,
 +                        MDIO_REG_BANK_CL73_IEEEB0,
 +                        MDIO_CL73_IEEEB0_CL73_AN_CONTROL,
 +                        0);
 +      /* Restart CL37 autoneg */
 +      bnx2x_restart_autoneg(phy, params, 0);
 +      DP(NETIF_MSG_LINK, "Disabling CL73, and restarting CL37 autoneg\n");
 +}
 +
 +static void bnx2x_xgxs_an_resolve(struct bnx2x_phy *phy,
 +                                struct link_params *params,
 +                                struct link_vars *vars,
 +                                u32 gp_status)
 +{
 +      if (gp_status & MDIO_AN_CL73_OR_37_COMPLETE)
 +              vars->link_status |=
 +                      LINK_STATUS_AUTO_NEGOTIATE_COMPLETE;
 +
 +      if (bnx2x_direct_parallel_detect_used(phy, params))
 +              vars->link_status |=
 +                      LINK_STATUS_PARALLEL_DETECTION_USED;
 +}
 +static int bnx2x_get_link_speed_duplex(struct bnx2x_phy *phy,
 +                                   struct link_params *params,
 +                                    struct link_vars *vars,
 +                                    u16 is_link_up,
 +                                    u16 speed_mask,
 +                                    u16 is_duplex)
 +{
 +      struct bnx2x *bp = params->bp;
 +      if (phy->req_line_speed == SPEED_AUTO_NEG)
 +              vars->link_status |= LINK_STATUS_AUTO_NEGOTIATE_ENABLED;
 +      if (is_link_up) {
 +              DP(NETIF_MSG_LINK, "phy link up\n");
 +
 +              vars->phy_link_up = 1;
 +              vars->link_status |= LINK_STATUS_LINK_UP;
 +
 +              switch (speed_mask) {
 +              case GP_STATUS_10M:
 +                      vars->line_speed = SPEED_10;
 +                      if (vars->duplex == DUPLEX_FULL)
 +                              vars->link_status |= LINK_10TFD;
 +                      else
 +                              vars->link_status |= LINK_10THD;
 +                      break;
 +
 +              case GP_STATUS_100M:
 +                      vars->line_speed = SPEED_100;
 +                      if (vars->duplex == DUPLEX_FULL)
 +                              vars->link_status |= LINK_100TXFD;
 +                      else
 +                              vars->link_status |= LINK_100TXHD;
 +                      break;
 +
 +              case GP_STATUS_1G:
 +              case GP_STATUS_1G_KX:
 +                      vars->line_speed = SPEED_1000;
 +                      if (vars->duplex == DUPLEX_FULL)
 +                              vars->link_status |= LINK_1000TFD;
 +                      else
 +                              vars->link_status |= LINK_1000THD;
 +                      break;
 +
 +              case GP_STATUS_2_5G:
 +                      vars->line_speed = SPEED_2500;
 +                      if (vars->duplex == DUPLEX_FULL)
 +                              vars->link_status |= LINK_2500TFD;
 +                      else
 +                              vars->link_status |= LINK_2500THD;
 +                      break;
 +
 +              case GP_STATUS_5G:
 +              case GP_STATUS_6G:
 +                      DP(NETIF_MSG_LINK,
 +                               "link speed unsupported  gp_status 0x%x\n",
 +                                speed_mask);
 +                      return -EINVAL;
 +
 +              case GP_STATUS_10G_KX4:
 +              case GP_STATUS_10G_HIG:
 +              case GP_STATUS_10G_CX4:
 +              case GP_STATUS_10G_KR:
 +              case GP_STATUS_10G_SFI:
 +              case GP_STATUS_10G_XFI:
 +                      vars->line_speed = SPEED_10000;
 +                      vars->link_status |= LINK_10GTFD;
 +                      break;
 +              case GP_STATUS_20G_DXGXS:
 +                      vars->line_speed = SPEED_20000;
 +                      vars->link_status |= LINK_20GTFD;
 +                      break;
 +              default:
 +                      DP(NETIF_MSG_LINK,
 +                                "link speed unsupported gp_status 0x%x\n",
 +                                speed_mask);
 +                      return -EINVAL;
 +              }
 +      } else { /* link_down */
 +              DP(NETIF_MSG_LINK, "phy link down\n");
 +
 +              vars->phy_link_up = 0;
 +
 +              vars->duplex = DUPLEX_FULL;
 +              vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE;
 +              vars->mac_type = MAC_TYPE_NONE;
 +      }
 +      DP(NETIF_MSG_LINK, " phy_link_up %x line_speed %d\n",
 +                  vars->phy_link_up, vars->line_speed);
 +      return 0;
 +}
 +
 +static int bnx2x_link_settings_status(struct bnx2x_phy *phy,
 +                                    struct link_params *params,
 +                                    struct link_vars *vars)
 +{
 +
 +      struct bnx2x *bp = params->bp;
 +
 +      u16 gp_status, duplex = DUPLEX_HALF, link_up = 0, speed_mask;
 +      int rc = 0;
 +
 +      /* Read gp_status */
 +      CL22_RD_OVER_CL45(bp, phy,
 +                        MDIO_REG_BANK_GP_STATUS,
 +                        MDIO_GP_STATUS_TOP_AN_STATUS1,
 +                        &gp_status);
 +      if (gp_status & MDIO_GP_STATUS_TOP_AN_STATUS1_DUPLEX_STATUS)
 +              duplex = DUPLEX_FULL;
 +      if (gp_status & MDIO_GP_STATUS_TOP_AN_STATUS1_LINK_STATUS)
 +              link_up = 1;
 +      speed_mask = gp_status & GP_STATUS_SPEED_MASK;
 +      DP(NETIF_MSG_LINK, "gp_status 0x%x, is_link_up %d, speed_mask 0x%x\n",
 +                     gp_status, link_up, speed_mask);
 +      rc = bnx2x_get_link_speed_duplex(phy, params, vars, link_up, speed_mask,
 +                                       duplex);
 +      if (rc == -EINVAL)
 +              return rc;
 +
 +      if (gp_status & MDIO_GP_STATUS_TOP_AN_STATUS1_LINK_STATUS) {
 +              if (SINGLE_MEDIA_DIRECT(params)) {
 +                      bnx2x_flow_ctrl_resolve(phy, params, vars, gp_status);
 +                      if (phy->req_line_speed == SPEED_AUTO_NEG)
 +                              bnx2x_xgxs_an_resolve(phy, params, vars,
 +                                                    gp_status);
 +              }
 +      } else { /* link_down */
 +              if ((phy->req_line_speed == SPEED_AUTO_NEG) &&
 +                  SINGLE_MEDIA_DIRECT(params)) {
 +                      /* Check signal is detected */
 +                      bnx2x_check_fallback_to_cl37(phy, params);
 +              }
 +      }
 +
 +      DP(NETIF_MSG_LINK, "duplex %x  flow_ctrl 0x%x link_status 0x%x\n",
 +                 vars->duplex, vars->flow_ctrl, vars->link_status);
 +      return rc;
 +}
 +
 +static int bnx2x_warpcore_read_status(struct bnx2x_phy *phy,
 +                                   struct link_params *params,
 +                                   struct link_vars *vars)
 +{
 +
 +      struct bnx2x *bp = params->bp;
 +
 +      u8 lane;
 +      u16 gp_status1, gp_speed, link_up, duplex = DUPLEX_FULL;
 +      int rc = 0;
 +      lane = bnx2x_get_warpcore_lane(phy, params);
 +      /* Read gp_status */
 +      if (phy->req_line_speed > SPEED_10000) {
 +              u16 temp_link_up;
 +              bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
 +                              1, &temp_link_up);
 +              bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
 +                              1, &link_up);
 +              DP(NETIF_MSG_LINK, "PCS RX link status = 0x%x-->0x%x\n",
 +                             temp_link_up, link_up);
 +              link_up &= (1<<2);
 +              if (link_up)
 +                      bnx2x_ext_phy_resolve_fc(phy, params, vars);
 +      } else {
 +              bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
 +                              MDIO_WC_REG_GP2_STATUS_GP_2_1, &gp_status1);
 +              DP(NETIF_MSG_LINK, "0x81d1 = 0x%x\n", gp_status1);
 +              /* Check for either KR or generic link up. */
 +              gp_status1 = ((gp_status1 >> 8) & 0xf) |
 +                      ((gp_status1 >> 12) & 0xf);
 +              link_up = gp_status1 & (1 << lane);
 +              if (link_up && SINGLE_MEDIA_DIRECT(params)) {
 +                      u16 pd, gp_status4;
 +                      if (phy->req_line_speed == SPEED_AUTO_NEG) {
 +                              /* Check Autoneg complete */
 +                              bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
 +                                              MDIO_WC_REG_GP2_STATUS_GP_2_4,
 +                                              &gp_status4);
 +                              if (gp_status4 & ((1<<12)<<lane))
 +                                      vars->link_status |=
 +                                      LINK_STATUS_AUTO_NEGOTIATE_COMPLETE;
 +
 +                              /* Check parallel detect used */
 +                              bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
 +                                              MDIO_WC_REG_PAR_DET_10G_STATUS,
 +                                              &pd);
 +                              if (pd & (1<<15))
 +                                      vars->link_status |=
 +                                      LINK_STATUS_PARALLEL_DETECTION_USED;
 +                      }
 +                      bnx2x_ext_phy_resolve_fc(phy, params, vars);
 +              }
 +      }
 +
 +      if (lane < 2) {
 +              bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
 +                              MDIO_WC_REG_GP2_STATUS_GP_2_2, &gp_speed);
 +      } else {
 +              bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
 +                              MDIO_WC_REG_GP2_STATUS_GP_2_3, &gp_speed);
 +      }
 +      DP(NETIF_MSG_LINK, "lane %d gp_speed 0x%x\n", lane, gp_speed);
 +
 +      if ((lane & 1) == 0)
 +              gp_speed <<= 8;
 +      gp_speed &= 0x3f00;
 +
 +
 +      rc = bnx2x_get_link_speed_duplex(phy, params, vars, link_up, gp_speed,
 +                                       duplex);
 +
 +      DP(NETIF_MSG_LINK, "duplex %x  flow_ctrl 0x%x link_status 0x%x\n",
 +                 vars->duplex, vars->flow_ctrl, vars->link_status);
 +      return rc;
 +}
 +static void bnx2x_set_gmii_tx_driver(struct link_params *params)
 +{
 +      struct bnx2x *bp = params->bp;
 +      struct bnx2x_phy *phy = &params->phy[INT_PHY];
 +      u16 lp_up2;
 +      u16 tx_driver;
 +      u16 bank;
 +
 +      /* read precomp */
 +      CL22_RD_OVER_CL45(bp, phy,
 +                        MDIO_REG_BANK_OVER_1G,
 +                        MDIO_OVER_1G_LP_UP2, &lp_up2);
 +
 +      /* bits [10:7] at lp_up2, positioned at [15:12] */
 +      lp_up2 = (((lp_up2 & MDIO_OVER_1G_LP_UP2_PREEMPHASIS_MASK) >>
 +                 MDIO_OVER_1G_LP_UP2_PREEMPHASIS_SHIFT) <<
 +                MDIO_TX0_TX_DRIVER_PREEMPHASIS_SHIFT);
 +
 +      if (lp_up2 == 0)
 +              return;
 +
 +      for (bank = MDIO_REG_BANK_TX0; bank <= MDIO_REG_BANK_TX3;
 +            bank += (MDIO_REG_BANK_TX1 - MDIO_REG_BANK_TX0)) {
 +              CL22_RD_OVER_CL45(bp, phy,
 +                                bank,
 +                                MDIO_TX0_TX_DRIVER, &tx_driver);
 +
 +              /* replace tx_driver bits [15:12] */
 +              if (lp_up2 !=
 +                  (tx_driver & MDIO_TX0_TX_DRIVER_PREEMPHASIS_MASK)) {
 +                      tx_driver &= ~MDIO_TX0_TX_DRIVER_PREEMPHASIS_MASK;
 +                      tx_driver |= lp_up2;
 +                      CL22_WR_OVER_CL45(bp, phy,
 +                                        bank,
 +                                        MDIO_TX0_TX_DRIVER, tx_driver);
 +              }
 +      }
 +}
 +
 +static int bnx2x_emac_program(struct link_params *params,
 +                            struct link_vars *vars)
 +{
 +      struct bnx2x *bp = params->bp;
 +      u8 port = params->port;
 +      u16 mode = 0;
 +
 +      DP(NETIF_MSG_LINK, "setting link speed & duplex\n");
 +      bnx2x_bits_dis(bp, GRCBASE_EMAC0 + port*0x400 +
 +                     EMAC_REG_EMAC_MODE,
 +                     (EMAC_MODE_25G_MODE |
 +                      EMAC_MODE_PORT_MII_10M |
 +                      EMAC_MODE_HALF_DUPLEX));
 +      switch (vars->line_speed) {
 +      case SPEED_10:
 +              mode |= EMAC_MODE_PORT_MII_10M;
 +              break;
 +
 +      case SPEED_100:
 +              mode |= EMAC_MODE_PORT_MII;
 +              break;
 +
 +      case SPEED_1000:
 +              mode |= EMAC_MODE_PORT_GMII;
 +              break;
 +
 +      case SPEED_2500:
 +              mode |= (EMAC_MODE_25G_MODE | EMAC_MODE_PORT_GMII);
 +              break;
 +
 +      default:
 +              /* 10G not valid for EMAC */
 +              DP(NETIF_MSG_LINK, "Invalid line_speed 0x%x\n",
 +                         vars->line_speed);
 +              return -EINVAL;
 +      }
 +
 +      if (vars->duplex == DUPLEX_HALF)
 +              mode |= EMAC_MODE_HALF_DUPLEX;
 +      bnx2x_bits_en(bp,
 +                    GRCBASE_EMAC0 + port*0x400 + EMAC_REG_EMAC_MODE,
 +                    mode);
 +
 +      bnx2x_set_led(params, vars, LED_MODE_OPER, vars->line_speed);
 +      return 0;
 +}
 +
 +static void bnx2x_set_preemphasis(struct bnx2x_phy *phy,
 +                                struct link_params *params)
 +{
 +
 +      u16 bank, i = 0;
 +      struct bnx2x *bp = params->bp;
 +
 +      for (bank = MDIO_REG_BANK_RX0, i = 0; bank <= MDIO_REG_BANK_RX3;
 +            bank += (MDIO_REG_BANK_RX1-MDIO_REG_BANK_RX0), i++) {
 +                      CL22_WR_OVER_CL45(bp, phy,
 +                                        bank,
 +                                        MDIO_RX0_RX_EQ_BOOST,
 +                                        phy->rx_preemphasis[i]);
 +      }
 +
 +      for (bank = MDIO_REG_BANK_TX0, i = 0; bank <= MDIO_REG_BANK_TX3;
 +                    bank += (MDIO_REG_BANK_TX1 - MDIO_REG_BANK_TX0), i++) {
 +                      CL22_WR_OVER_CL45(bp, phy,
 +                                        bank,
 +                                        MDIO_TX0_TX_DRIVER,
 +                                        phy->tx_preemphasis[i]);
 +      }
 +}
 +
 +static void bnx2x_xgxs_config_init(struct bnx2x_phy *phy,
 +                                 struct link_params *params,
 +                                 struct link_vars *vars)
 +{
 +      struct bnx2x *bp = params->bp;
 +      u8 enable_cl73 = (SINGLE_MEDIA_DIRECT(params) ||
 +                        (params->loopback_mode == LOOPBACK_XGXS));
 +      if (!(vars->phy_flags & PHY_SGMII_FLAG)) {
 +              if (SINGLE_MEDIA_DIRECT(params) &&
 +                  (params->feature_config_flags &
 +                   FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED))
 +                      bnx2x_set_preemphasis(phy, params);
 +
 +              /* forced speed requested? */
 +              if (vars->line_speed != SPEED_AUTO_NEG ||
 +                  (SINGLE_MEDIA_DIRECT(params) &&
 +                   params->loopback_mode == LOOPBACK_EXT)) {
 +                      DP(NETIF_MSG_LINK, "not SGMII, no AN\n");
 +
 +                      /* disable autoneg */
 +                      bnx2x_set_autoneg(phy, params, vars, 0);
 +
 +                      /* program speed and duplex */
 +                      bnx2x_program_serdes(phy, params, vars);
 +
 +              } else { /* AN_mode */
 +                      DP(NETIF_MSG_LINK, "not SGMII, AN\n");
 +
 +                      /* AN enabled */
 +                      bnx2x_set_brcm_cl37_advertisement(phy, params);
 +
 +                      /* program duplex & pause advertisement (for aneg) */
 +                      bnx2x_set_ieee_aneg_advertisement(phy, params,
 +                                                        vars->ieee_fc);
 +
 +                      /* enable autoneg */
 +                      bnx2x_set_autoneg(phy, params, vars, enable_cl73);
 +
 +                      /* enable and restart AN */
 +                      bnx2x_restart_autoneg(phy, params, enable_cl73);
 +              }
 +
 +      } else { /* SGMII mode */
 +              DP(NETIF_MSG_LINK, "SGMII\n");
 +
 +              bnx2x_initialize_sgmii_process(phy, params, vars);
 +      }
 +}
 +
 +static int bnx2x_prepare_xgxs(struct bnx2x_phy *phy,
 +                        struct link_params *params,
 +                        struct link_vars *vars)
 +{
 +      int rc;
 +      vars->phy_flags |= PHY_XGXS_FLAG;
 +      if ((phy->req_line_speed &&
 +           ((phy->req_line_speed == SPEED_100) ||
 +            (phy->req_line_speed == SPEED_10))) ||
 +          (!phy->req_line_speed &&
 +           (phy->speed_cap_mask >=
 +            PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL) &&
 +           (phy->speed_cap_mask <
 +            PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)) ||
 +          (phy->type == PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT_SD))
 +              vars->phy_flags |= PHY_SGMII_FLAG;
 +      else
 +              vars->phy_flags &= ~PHY_SGMII_FLAG;
 +
 +      bnx2x_calc_ieee_aneg_adv(phy, params, &vars->ieee_fc);
 +      bnx2x_set_aer_mmd(params, phy);
 +      if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT)
 +              bnx2x_set_master_ln(params, phy);
 +
 +      rc = bnx2x_reset_unicore(params, phy, 0);
 +      /* reset the SerDes and wait for reset bit return low */
 +      if (rc != 0)
 +              return rc;
 +
 +      bnx2x_set_aer_mmd(params, phy);
 +      /* setting the masterLn_def again after the reset */
 +      if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT) {
 +              bnx2x_set_master_ln(params, phy);
 +              bnx2x_set_swap_lanes(params, phy);
 +      }
 +
 +      return rc;
 +}
 +
 +static u16 bnx2x_wait_reset_complete(struct bnx2x *bp,
 +                                   struct bnx2x_phy *phy,
 +                                   struct link_params *params)
 +{
 +      u16 cnt, ctrl;
 +      /* Wait for soft reset to get cleared up to 1 sec */
 +      for (cnt = 0; cnt < 1000; cnt++) {
 +              if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM54618SE)
 +                      bnx2x_cl22_read(bp, phy,
 +                              MDIO_PMA_REG_CTRL, &ctrl);
 +              else
 +                      bnx2x_cl45_read(bp, phy,
 +                              MDIO_PMA_DEVAD,
 +                              MDIO_PMA_REG_CTRL, &ctrl);
 +              if (!(ctrl & (1<<15)))
 +                      break;
 +              msleep(1);
 +      }
 +
 +      if (cnt == 1000)
 +              netdev_err(bp->dev,  "Warning: PHY was not initialized,"
 +                                    " Port %d\n",
 +                       params->port);
 +      DP(NETIF_MSG_LINK, "control reg 0x%x (after %d ms)\n", ctrl, cnt);
 +      return cnt;
 +}
 +
 +static void bnx2x_link_int_enable(struct link_params *params)
 +{
 +      u8 port = params->port;
 +      u32 mask;
 +      struct bnx2x *bp = params->bp;
 +
 +      /* Setting the status to report on link up for either XGXS or SerDes */
 +      if (CHIP_IS_E3(bp)) {
 +              mask = NIG_MASK_XGXS0_LINK_STATUS;
 +              if (!(SINGLE_MEDIA_DIRECT(params)))
 +                      mask |= NIG_MASK_MI_INT;
 +      } else if (params->switch_cfg == SWITCH_CFG_10G) {
 +              mask = (NIG_MASK_XGXS0_LINK10G |
 +                      NIG_MASK_XGXS0_LINK_STATUS);
 +              DP(NETIF_MSG_LINK, "enabled XGXS interrupt\n");
 +              if (!(SINGLE_MEDIA_DIRECT(params)) &&
 +                      params->phy[INT_PHY].type !=
 +                              PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) {
 +                      mask |= NIG_MASK_MI_INT;
 +                      DP(NETIF_MSG_LINK, "enabled external phy int\n");
 +              }
 +
 +      } else { /* SerDes */
 +              mask = NIG_MASK_SERDES0_LINK_STATUS;
 +              DP(NETIF_MSG_LINK, "enabled SerDes interrupt\n");
 +              if (!(SINGLE_MEDIA_DIRECT(params)) &&
 +                      params->phy[INT_PHY].type !=
 +                              PORT_HW_CFG_SERDES_EXT_PHY_TYPE_NOT_CONN) {
 +                      mask |= NIG_MASK_MI_INT;
 +                      DP(NETIF_MSG_LINK, "enabled external phy int\n");
 +              }
 +      }
 +      bnx2x_bits_en(bp,
 +                    NIG_REG_MASK_INTERRUPT_PORT0 + port*4,
 +                    mask);
 +
 +      DP(NETIF_MSG_LINK, "port %x, is_xgxs %x, int_status 0x%x\n", port,
 +               (params->switch_cfg == SWITCH_CFG_10G),
 +               REG_RD(bp, NIG_REG_STATUS_INTERRUPT_PORT0 + port*4));
 +      DP(NETIF_MSG_LINK, " int_mask 0x%x, MI_INT %x, SERDES_LINK %x\n",
 +               REG_RD(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4),
 +               REG_RD(bp, NIG_REG_EMAC0_STATUS_MISC_MI_INT + port*0x18),
 +               REG_RD(bp, NIG_REG_SERDES0_STATUS_LINK_STATUS+port*0x3c));
 +      DP(NETIF_MSG_LINK, " 10G %x, XGXS_LINK %x\n",
 +         REG_RD(bp, NIG_REG_XGXS0_STATUS_LINK10G + port*0x68),
 +         REG_RD(bp, NIG_REG_XGXS0_STATUS_LINK_STATUS + port*0x68));
 +}
 +
 +static void bnx2x_rearm_latch_signal(struct bnx2x *bp, u8 port,
 +                                   u8 exp_mi_int)
 +{
 +      u32 latch_status = 0;
 +
 +      /*
 +       * Disable the MI INT ( external phy int ) by writing 1 to the
 +       * status register. Link down indication is high-active-signal,
 +       * so in this case we need to write the status to clear the XOR
 +       */
 +      /* Read Latched signals */
 +      latch_status = REG_RD(bp,
 +                                  NIG_REG_LATCH_STATUS_0 + port*8);
 +      DP(NETIF_MSG_LINK, "latch_status = 0x%x\n", latch_status);
 +      /* Handle only those with latched-signal=up.*/
 +      if (exp_mi_int)
 +              bnx2x_bits_en(bp,
 +                            NIG_REG_STATUS_INTERRUPT_PORT0
 +                            + port*4,
 +                            NIG_STATUS_EMAC0_MI_INT);
 +      else
 +              bnx2x_bits_dis(bp,
 +                             NIG_REG_STATUS_INTERRUPT_PORT0
 +                             + port*4,
 +                             NIG_STATUS_EMAC0_MI_INT);
 +
 +      if (latch_status & 1) {
 +
 +              /* For all latched-signal=up : Re-Arm Latch signals */
 +              REG_WR(bp, NIG_REG_LATCH_STATUS_0 + port*8,
 +                     (latch_status & 0xfffe) | (latch_status & 1));
 +      }
 +      /* For all latched-signal=up,Write original_signal to status */
 +}
 +
 +static void bnx2x_link_int_ack(struct link_params *params,
 +                             struct link_vars *vars, u8 is_10g_plus)
 +{
 +      struct bnx2x *bp = params->bp;
 +      u8 port = params->port;
 +      u32 mask;
 +      /*
 +       * First reset all status we assume only one line will be
 +       * change at a time
 +       */
 +      bnx2x_bits_dis(bp, NIG_REG_STATUS_INTERRUPT_PORT0 + port*4,
 +                     (NIG_STATUS_XGXS0_LINK10G |
 +                      NIG_STATUS_XGXS0_LINK_STATUS |
 +                      NIG_STATUS_SERDES0_LINK_STATUS));
 +      if (vars->phy_link_up) {
 +              if (USES_WARPCORE(bp))
 +                      mask = NIG_STATUS_XGXS0_LINK_STATUS;
 +              else {
 +                      if (is_10g_plus)
 +                              mask = NIG_STATUS_XGXS0_LINK10G;
 +                      else if (params->switch_cfg == SWITCH_CFG_10G) {
 +                              /*
 +                               * Disable the link interrupt by writing 1 to
 +                               * the relevant lane in the status register
 +                               */
 +                              u32 ser_lane =
 +                                      ((params->lane_config &
 +                                  PORT_HW_CFG_LANE_SWAP_CFG_MASTER_MASK) >>
 +                                  PORT_HW_CFG_LANE_SWAP_CFG_MASTER_SHIFT);
 +                              mask = ((1 << ser_lane) <<
 +                                     NIG_STATUS_XGXS0_LINK_STATUS_SIZE);
 +                      } else
 +                              mask = NIG_STATUS_SERDES0_LINK_STATUS;
 +              }
 +              DP(NETIF_MSG_LINK, "Ack link up interrupt with mask 0x%x\n",
 +                             mask);
 +              bnx2x_bits_en(bp,
 +                            NIG_REG_STATUS_INTERRUPT_PORT0 + port*4,
 +                            mask);
 +      }
 +}
 +
 +static int bnx2x_format_ver(u32 num, u8 *str, u16 *len)
 +{
 +      u8 *str_ptr = str;
 +      u32 mask = 0xf0000000;
 +      u8 shift = 8*4;
 +      u8 digit;
 +      u8 remove_leading_zeros = 1;
 +      if (*len < 10) {
 +              /* Need more than 10chars for this format */
 +              *str_ptr = '\0';
 +              (*len)--;
 +              return -EINVAL;
 +      }
 +      while (shift > 0) {
 +
 +              shift -= 4;
 +              digit = ((num & mask) >> shift);
 +              if (digit == 0 && remove_leading_zeros) {
 +                      mask = mask >> 4;
 +                      continue;
 +              } else if (digit < 0xa)
 +                      *str_ptr = digit + '0';
 +              else
 +                      *str_ptr = digit - 0xa + 'a';
 +              remove_leading_zeros = 0;
 +              str_ptr++;
 +              (*len)--;
 +              mask = mask >> 4;
 +              if (shift == 4*4) {
 +                      *str_ptr = '.';
 +                      str_ptr++;
 +                      (*len)--;
 +                      remove_leading_zeros = 1;
 +              }
 +      }
 +      return 0;
 +}
 +
 +
 +static int bnx2x_null_format_ver(u32 spirom_ver, u8 *str, u16 *len)
 +{
 +      str[0] = '\0';
 +      (*len)--;
 +      return 0;
 +}
 +
 +int bnx2x_get_ext_phy_fw_version(struct link_params *params, u8 driver_loaded,
 +                               u8 *version, u16 len)
 +{
 +      struct bnx2x *bp;
 +      u32 spirom_ver = 0;
 +      int status = 0;
 +      u8 *ver_p = version;
 +      u16 remain_len = len;
 +      if (version == NULL || params == NULL)
 +              return -EINVAL;
 +      bp = params->bp;
 +
 +      /* Extract first external phy*/
 +      version[0] = '\0';
 +      spirom_ver = REG_RD(bp, params->phy[EXT_PHY1].ver_addr);
 +
 +      if (params->phy[EXT_PHY1].format_fw_ver) {
 +              status |= params->phy[EXT_PHY1].format_fw_ver(spirom_ver,
 +                                                            ver_p,
 +                                                            &remain_len);
 +              ver_p += (len - remain_len);
 +      }
 +      if ((params->num_phys == MAX_PHYS) &&
 +          (params->phy[EXT_PHY2].ver_addr != 0)) {
 +              spirom_ver = REG_RD(bp, params->phy[EXT_PHY2].ver_addr);
 +              if (params->phy[EXT_PHY2].format_fw_ver) {
 +                      *ver_p = '/';
 +                      ver_p++;
 +                      remain_len--;
 +                      status |= params->phy[EXT_PHY2].format_fw_ver(
 +                              spirom_ver,
 +                              ver_p,
 +                              &remain_len);
 +                      ver_p = version + (len - remain_len);
 +              }
 +      }
 +      *ver_p = '\0';
 +      return status;
 +}
 +
 +static void bnx2x_set_xgxs_loopback(struct bnx2x_phy *phy,
 +                                  struct link_params *params)
 +{
 +      u8 port = params->port;
 +      struct bnx2x *bp = params->bp;
 +
 +      if (phy->req_line_speed != SPEED_1000) {
 +              u32 md_devad = 0;
 +
 +              DP(NETIF_MSG_LINK, "XGXS 10G loopback enable\n");
 +
 +              if (!CHIP_IS_E3(bp)) {
 +                      /* change the uni_phy_addr in the nig */
 +                      md_devad = REG_RD(bp, (NIG_REG_XGXS0_CTRL_MD_DEVAD +
 +                                             port*0x18));
 +
 +                      REG_WR(bp, NIG_REG_XGXS0_CTRL_MD_DEVAD + port*0x18,
 +                             0x5);
 +              }
 +
 +              bnx2x_cl45_write(bp, phy,
 +                               5,
 +                               (MDIO_REG_BANK_AER_BLOCK +
 +                                (MDIO_AER_BLOCK_AER_REG & 0xf)),
 +                               0x2800);
 +
 +              bnx2x_cl45_write(bp, phy,
 +                               5,
 +                               (MDIO_REG_BANK_CL73_IEEEB0 +
 +                                (MDIO_CL73_IEEEB0_CL73_AN_CONTROL & 0xf)),
 +                               0x6041);
 +              msleep(200);
 +              /* set aer mmd back */
 +              bnx2x_set_aer_mmd(params, phy);
 +
 +              if (!CHIP_IS_E3(bp)) {
 +                      /* and md_devad */
 +                      REG_WR(bp, NIG_REG_XGXS0_CTRL_MD_DEVAD + port*0x18,
 +                             md_devad);
 +              }
 +      } else {
 +              u16 mii_ctrl;
 +              DP(NETIF_MSG_LINK, "XGXS 1G loopback enable\n");
 +              bnx2x_cl45_read(bp, phy, 5,
 +                              (MDIO_REG_BANK_COMBO_IEEE0 +
 +                              (MDIO_COMBO_IEEE0_MII_CONTROL & 0xf)),
 +                              &mii_ctrl);
 +              bnx2x_cl45_write(bp, phy, 5,
 +                               (MDIO_REG_BANK_COMBO_IEEE0 +
 +                               (MDIO_COMBO_IEEE0_MII_CONTROL & 0xf)),
 +                               mii_ctrl |
 +                               MDIO_COMBO_IEEO_MII_CONTROL_LOOPBACK);
 +      }
 +}
 +
 +int bnx2x_set_led(struct link_params *params,
 +                struct link_vars *vars, u8 mode, u32 speed)
 +{
 +      u8 port = params->port;
 +      u16 hw_led_mode = params->hw_led_mode;
 +      int rc = 0;
 +      u8 phy_idx;
 +      u32 tmp;
 +      u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
 +      struct bnx2x *bp = params->bp;
 +      DP(NETIF_MSG_LINK, "bnx2x_set_led: port %x, mode %d\n", port, mode);
 +      DP(NETIF_MSG_LINK, "speed 0x%x, hw_led_mode 0x%x\n",
 +               speed, hw_led_mode);
 +      /* In case */
 +      for (phy_idx = EXT_PHY1; phy_idx < MAX_PHYS; phy_idx++) {
 +              if (params->phy[phy_idx].set_link_led) {
 +                      params->phy[phy_idx].set_link_led(
 +                              &params->phy[phy_idx], params, mode);
 +              }
 +      }
 +
 +      switch (mode) {
 +      case LED_MODE_FRONT_PANEL_OFF:
 +      case LED_MODE_OFF:
 +              REG_WR(bp, NIG_REG_LED_10G_P0 + port*4, 0);
 +              REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4,
 +                     SHARED_HW_CFG_LED_MAC1);
 +
 +              tmp = EMAC_RD(bp, EMAC_REG_EMAC_LED);
 +              EMAC_WR(bp, EMAC_REG_EMAC_LED, (tmp | EMAC_LED_OVERRIDE));
 +              break;
 +
 +      case LED_MODE_OPER:
 +              /*
 +               * For all other phys, OPER mode is same as ON, so in case
 +               * link is down, do nothing
 +               */
 +              if (!vars->link_up)
 +                      break;
 +      case LED_MODE_ON:
 +              if (((params->phy[EXT_PHY1].type ==
 +                        PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727) ||
 +                       (params->phy[EXT_PHY1].type ==
 +                        PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8722)) &&
 +                  CHIP_IS_E2(bp) && params->num_phys == 2) {
 +                      /*
 +                       * This is a work-around for E2+8727 Configurations
 +                       */
 +                      if (mode == LED_MODE_ON ||
 +                              speed == SPEED_10000){
 +                              REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4, 0);
 +                              REG_WR(bp, NIG_REG_LED_10G_P0 + port*4, 1);
 +
 +                              tmp = EMAC_RD(bp, EMAC_REG_EMAC_LED);
 +                              EMAC_WR(bp, EMAC_REG_EMAC_LED,
 +                                      (tmp | EMAC_LED_OVERRIDE));
 +                              /*
 +                               * return here without enabling traffic
-                       REG_WR(bp, NIG_REG_LED_10G_P0 + port*4, 1);
++                               * LED blink and setting rate in ON mode.
 +                               * In oper mode, enabling LED blink
 +                               * and setting rate is needed.
 +                               */
 +                              if (mode == LED_MODE_ON)
 +                                      return rc;
 +                      }
 +              } else if (SINGLE_MEDIA_DIRECT(params)) {
 +                      /*
 +                       * This is a work-around for HW issue found when link
 +                       * is up in CL73
 +                       */
-       .flags          = (FLAGS_HW_LOCK_REQUIRED |
-                          FLAGS_TX_ERROR_CHECK),
++                      if ((!CHIP_IS_E3(bp)) ||
++                          (CHIP_IS_E3(bp) &&
++                           mode == LED_MODE_ON))
++                              REG_WR(bp, NIG_REG_LED_10G_P0 + port*4, 1);
++
 +                      if (CHIP_IS_E1x(bp) ||
 +                          CHIP_IS_E2(bp) ||
 +                          (mode == LED_MODE_ON))
 +                              REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4, 0);
 +                      else
 +                              REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4,
 +                                     hw_led_mode);
 +              } else
 +                      REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4, hw_led_mode);
 +
 +              REG_WR(bp, NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0 + port*4, 0);
 +              /* Set blinking rate to ~15.9Hz */
 +              REG_WR(bp, NIG_REG_LED_CONTROL_BLINK_RATE_P0 + port*4,
 +                     LED_BLINK_RATE_VAL);
 +              REG_WR(bp, NIG_REG_LED_CONTROL_BLINK_RATE_ENA_P0 +
 +                     port*4, 1);
 +              tmp = EMAC_RD(bp, EMAC_REG_EMAC_LED);
 +              EMAC_WR(bp, EMAC_REG_EMAC_LED, (tmp & (~EMAC_LED_OVERRIDE)));
 +
 +              if (CHIP_IS_E1(bp) &&
 +                  ((speed == SPEED_2500) ||
 +                   (speed == SPEED_1000) ||
 +                   (speed == SPEED_100) ||
 +                   (speed == SPEED_10))) {
 +                      /*
 +                       * On Everest 1 Ax chip versions for speeds less than
 +                       * 10G LED scheme is different
 +                       */
 +                      REG_WR(bp, NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0
 +                             + port*4, 1);
 +                      REG_WR(bp, NIG_REG_LED_CONTROL_TRAFFIC_P0 +
 +                             port*4, 0);
 +                      REG_WR(bp, NIG_REG_LED_CONTROL_BLINK_TRAFFIC_P0 +
 +                             port*4, 1);
 +              }
 +              break;
 +
 +      default:
 +              rc = -EINVAL;
 +              DP(NETIF_MSG_LINK, "bnx2x_set_led: Invalid led mode %d\n",
 +                       mode);
 +              break;
 +      }
 +      return rc;
 +
 +}
 +
 +/*
 + * This function comes to reflect the actual link state read DIRECTLY from the
 + * HW
 + */
 +int bnx2x_test_link(struct link_params *params, struct link_vars *vars,
 +                  u8 is_serdes)
 +{
 +      struct bnx2x *bp = params->bp;
 +      u16 gp_status = 0, phy_index = 0;
 +      u8 ext_phy_link_up = 0, serdes_phy_type;
 +      struct link_vars temp_vars;
 +      struct bnx2x_phy *int_phy = &params->phy[INT_PHY];
 +
 +      if (CHIP_IS_E3(bp)) {
 +              u16 link_up;
 +              if (params->req_line_speed[LINK_CONFIG_IDX(INT_PHY)]
 +                  > SPEED_10000) {
 +                      /* Check 20G link */
 +                      bnx2x_cl45_read(bp, int_phy, MDIO_WC_DEVAD,
 +                                      1, &link_up);
 +                      bnx2x_cl45_read(bp, int_phy, MDIO_WC_DEVAD,
 +                                      1, &link_up);
 +                      link_up &= (1<<2);
 +              } else {
 +                      /* Check 10G link and below*/
 +                      u8 lane = bnx2x_get_warpcore_lane(int_phy, params);
 +                      bnx2x_cl45_read(bp, int_phy, MDIO_WC_DEVAD,
 +                                      MDIO_WC_REG_GP2_STATUS_GP_2_1,
 +                                      &gp_status);
 +                      gp_status = ((gp_status >> 8) & 0xf) |
 +                              ((gp_status >> 12) & 0xf);
 +                      link_up = gp_status & (1 << lane);
 +              }
 +              if (!link_up)
 +                      return -ESRCH;
 +      } else {
 +              CL22_RD_OVER_CL45(bp, int_phy,
 +                        MDIO_REG_BANK_GP_STATUS,
 +                        MDIO_GP_STATUS_TOP_AN_STATUS1,
 +                        &gp_status);
 +      /* link is up only if both local phy and external phy are up */
 +      if (!(gp_status & MDIO_GP_STATUS_TOP_AN_STATUS1_LINK_STATUS))
 +              return -ESRCH;
 +      }
 +      /* In XGXS loopback mode, do not check external PHY */
 +      if (params->loopback_mode == LOOPBACK_XGXS)
 +              return 0;
 +
 +      switch (params->num_phys) {
 +      case 1:
 +              /* No external PHY */
 +              return 0;
 +      case 2:
 +              ext_phy_link_up = params->phy[EXT_PHY1].read_status(
 +                      &params->phy[EXT_PHY1],
 +                      params, &temp_vars);
 +              break;
 +      case 3: /* Dual Media */
 +              for (phy_index = EXT_PHY1; phy_index < params->num_phys;
 +                    phy_index++) {
 +                      serdes_phy_type = ((params->phy[phy_index].media_type ==
 +                                          ETH_PHY_SFP_FIBER) ||
 +                                         (params->phy[phy_index].media_type ==
 +                                          ETH_PHY_XFP_FIBER) ||
 +                                         (params->phy[phy_index].media_type ==
 +                                          ETH_PHY_DA_TWINAX));
 +
 +                      if (is_serdes != serdes_phy_type)
 +                              continue;
 +                      if (params->phy[phy_index].read_status) {
 +                              ext_phy_link_up |=
 +                                      params->phy[phy_index].read_status(
 +                                              &params->phy[phy_index],
 +                                              params, &temp_vars);
 +                      }
 +              }
 +              break;
 +      }
 +      if (ext_phy_link_up)
 +              return 0;
 +      return -ESRCH;
 +}
 +
 +static int bnx2x_link_initialize(struct link_params *params,
 +                               struct link_vars *vars)
 +{
 +      int rc = 0;
 +      u8 phy_index, non_ext_phy;
 +      struct bnx2x *bp = params->bp;
 +      /*
 +       * In case of external phy existence, the line speed would be the
 +       * line speed linked up by the external phy. In case it is direct
 +       * only, then the line_speed during initialization will be
 +       * equal to the req_line_speed
 +       */
 +      vars->line_speed = params->phy[INT_PHY].req_line_speed;
 +
 +      /*
 +       * Initialize the internal phy in case this is a direct board
 +       * (no external phys), or this board has external phy which requires
 +       * to first.
 +       */
 +      if (!USES_WARPCORE(bp))
 +              bnx2x_prepare_xgxs(&params->phy[INT_PHY], params, vars);
 +      /* init ext phy and enable link state int */
 +      non_ext_phy = (SINGLE_MEDIA_DIRECT(params) ||
 +                     (params->loopback_mode == LOOPBACK_XGXS));
 +
 +      if (non_ext_phy ||
 +          (params->phy[EXT_PHY1].flags & FLAGS_INIT_XGXS_FIRST) ||
 +          (params->loopback_mode == LOOPBACK_EXT_PHY)) {
 +              struct bnx2x_phy *phy = &params->phy[INT_PHY];
 +              if (vars->line_speed == SPEED_AUTO_NEG &&
 +                  (CHIP_IS_E1x(bp) ||
 +                   CHIP_IS_E2(bp)))
 +                      bnx2x_set_parallel_detection(phy, params);
 +                      if (params->phy[INT_PHY].config_init)
 +                              params->phy[INT_PHY].config_init(phy,
 +                                                               params,
 +                                                               vars);
 +      }
 +
 +      /* Init external phy*/
 +      if (non_ext_phy) {
 +              if (params->phy[INT_PHY].supported &
 +                  SUPPORTED_FIBRE)
 +                      vars->link_status |= LINK_STATUS_SERDES_LINK;
 +      } else {
 +              for (phy_index = EXT_PHY1; phy_index < params->num_phys;
 +                    phy_index++) {
 +                      /*
 +                       * No need to initialize second phy in case of first
 +                       * phy only selection. In case of second phy, we do
 +                       * need to initialize the first phy, since they are
 +                       * connected.
 +                       */
 +                      if (params->phy[phy_index].supported &
 +                          SUPPORTED_FIBRE)
 +                              vars->link_status |= LINK_STATUS_SERDES_LINK;
 +
 +                      if (phy_index == EXT_PHY2 &&
 +                          (bnx2x_phy_selection(params) ==
 +                           PORT_HW_CFG_PHY_SELECTION_FIRST_PHY)) {
 +                              DP(NETIF_MSG_LINK,
 +                                 "Not initializing second phy\n");
 +                              continue;
 +                      }
 +                      params->phy[phy_index].config_init(
 +                              &params->phy[phy_index],
 +                              params, vars);
 +              }
 +      }
 +      /* Reset the interrupt indication after phy was initialized */
 +      bnx2x_bits_dis(bp, NIG_REG_STATUS_INTERRUPT_PORT0 +
 +                     params->port*4,
 +                     (NIG_STATUS_XGXS0_LINK10G |
 +                      NIG_STATUS_XGXS0_LINK_STATUS |
 +                      NIG_STATUS_SERDES0_LINK_STATUS |
 +                      NIG_MASK_MI_INT));
 +      bnx2x_update_mng(params, vars->link_status);
 +      return rc;
 +}
 +
 +static void bnx2x_int_link_reset(struct bnx2x_phy *phy,
 +                               struct link_params *params)
 +{
 +      /* reset the SerDes/XGXS */
 +      REG_WR(params->bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_CLEAR,
 +             (0x1ff << (params->port*16)));
 +}
 +
 +static void bnx2x_common_ext_link_reset(struct bnx2x_phy *phy,
 +                                      struct link_params *params)
 +{
 +      struct bnx2x *bp = params->bp;
 +      u8 gpio_port;
 +      /* HW reset */
 +      if (CHIP_IS_E2(bp))
 +              gpio_port = BP_PATH(bp);
 +      else
 +              gpio_port = params->port;
 +      bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
 +                     MISC_REGISTERS_GPIO_OUTPUT_LOW,
 +                     gpio_port);
 +      bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
 +                     MISC_REGISTERS_GPIO_OUTPUT_LOW,
 +                     gpio_port);
 +      DP(NETIF_MSG_LINK, "reset external PHY\n");
 +}
 +
 +static int bnx2x_update_link_down(struct link_params *params,
 +                                struct link_vars *vars)
 +{
 +      struct bnx2x *bp = params->bp;
 +      u8 port = params->port;
 +
 +      DP(NETIF_MSG_LINK, "Port %x: Link is down\n", port);
 +      bnx2x_set_led(params, vars, LED_MODE_OFF, 0);
 +      vars->phy_flags &= ~PHY_PHYSICAL_LINK_FLAG;
 +      /* indicate no mac active */
 +      vars->mac_type = MAC_TYPE_NONE;
 +
 +      /* update shared memory */
 +      vars->link_status &= ~(LINK_STATUS_SPEED_AND_DUPLEX_MASK |
 +                             LINK_STATUS_LINK_UP |
 +                             LINK_STATUS_PHYSICAL_LINK_FLAG |
 +                             LINK_STATUS_AUTO_NEGOTIATE_COMPLETE |
 +                             LINK_STATUS_RX_FLOW_CONTROL_FLAG_MASK |
 +                             LINK_STATUS_TX_FLOW_CONTROL_FLAG_MASK |
 +                             LINK_STATUS_PARALLEL_DETECTION_FLAG_MASK);
 +      vars->line_speed = 0;
 +      bnx2x_update_mng(params, vars->link_status);
 +
 +      /* activate nig drain */
 +      REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + port*4, 1);
 +
 +      /* disable emac */
 +      if (!CHIP_IS_E3(bp))
 +              REG_WR(bp, NIG_REG_NIG_EMAC0_EN + port*4, 0);
 +
 +      msleep(10);
 +      /* reset BigMac/Xmac */
 +      if (CHIP_IS_E1x(bp) ||
 +          CHIP_IS_E2(bp)) {
 +              bnx2x_bmac_rx_disable(bp, params->port);
 +              REG_WR(bp, GRCBASE_MISC +
 +                     MISC_REGISTERS_RESET_REG_2_CLEAR,
 +             (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
 +      }
 +      if (CHIP_IS_E3(bp))
 +              bnx2x_xmac_disable(params);
 +
 +      return 0;
 +}
 +
 +static int bnx2x_update_link_up(struct link_params *params,
 +                              struct link_vars *vars,
 +                              u8 link_10g)
 +{
 +      struct bnx2x *bp = params->bp;
 +      u8 port = params->port;
 +      int rc = 0;
 +
 +      vars->link_status |= (LINK_STATUS_LINK_UP |
 +                            LINK_STATUS_PHYSICAL_LINK_FLAG);
 +      vars->phy_flags |= PHY_PHYSICAL_LINK_FLAG;
 +
 +      if (vars->flow_ctrl & BNX2X_FLOW_CTRL_TX)
 +              vars->link_status |=
 +                      LINK_STATUS_TX_FLOW_CONTROL_ENABLED;
 +
 +      if (vars->flow_ctrl & BNX2X_FLOW_CTRL_RX)
 +              vars->link_status |=
 +                      LINK_STATUS_RX_FLOW_CONTROL_ENABLED;
 +      if (USES_WARPCORE(bp)) {
 +              if (link_10g) {
 +                      if (bnx2x_xmac_enable(params, vars, 0) ==
 +                          -ESRCH) {
 +                              DP(NETIF_MSG_LINK, "Found errors on XMAC\n");
 +                              vars->link_up = 0;
 +                              vars->phy_flags |= PHY_HALF_OPEN_CONN_FLAG;
 +                              vars->link_status &= ~LINK_STATUS_LINK_UP;
 +                      }
 +              } else
 +                      bnx2x_umac_enable(params, vars, 0);
 +              bnx2x_set_led(params, vars,
 +                            LED_MODE_OPER, vars->line_speed);
 +      }
 +      if ((CHIP_IS_E1x(bp) ||
 +           CHIP_IS_E2(bp))) {
 +              if (link_10g) {
 +                      if (bnx2x_bmac_enable(params, vars, 0) ==
 +                          -ESRCH) {
 +                              DP(NETIF_MSG_LINK, "Found errors on BMAC\n");
 +                              vars->link_up = 0;
 +                              vars->phy_flags |= PHY_HALF_OPEN_CONN_FLAG;
 +                              vars->link_status &= ~LINK_STATUS_LINK_UP;
 +                      }
 +
 +                      bnx2x_set_led(params, vars,
 +                                    LED_MODE_OPER, SPEED_10000);
 +              } else {
 +                      rc = bnx2x_emac_program(params, vars);
 +                      bnx2x_emac_enable(params, vars, 0);
 +
 +                      /* AN complete? */
 +                      if ((vars->link_status &
 +                           LINK_STATUS_AUTO_NEGOTIATE_COMPLETE)
 +                          && (!(vars->phy_flags & PHY_SGMII_FLAG)) &&
 +                          SINGLE_MEDIA_DIRECT(params))
 +                              bnx2x_set_gmii_tx_driver(params);
 +              }
 +      }
 +
 +      /* PBF - link up */
 +      if (CHIP_IS_E1x(bp))
 +              rc |= bnx2x_pbf_update(params, vars->flow_ctrl,
 +                                     vars->line_speed);
 +
 +      /* disable drain */
 +      REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + port*4, 0);
 +
 +      /* update shared memory */
 +      bnx2x_update_mng(params, vars->link_status);
 +      msleep(20);
 +      return rc;
 +}
 +/*
 + * The bnx2x_link_update function should be called upon link
 + * interrupt.
 + * Link is considered up as follows:
 + * - DIRECT_SINGLE_MEDIA - Only XGXS link (internal link) needs
 + *   to be up
 + * - SINGLE_MEDIA - The link between the 577xx and the external
 + *   phy (XGXS) need to up as well as the external link of the
 + *   phy (PHY_EXT1)
 + * - DUAL_MEDIA - The link between the 577xx and the first
 + *   external phy needs to be up, and at least one of the 2
 + *   external phy link must be up.
 + */
 +int bnx2x_link_update(struct link_params *params, struct link_vars *vars)
 +{
 +      struct bnx2x *bp = params->bp;
 +      struct link_vars phy_vars[MAX_PHYS];
 +      u8 port = params->port;
 +      u8 link_10g_plus, phy_index;
 +      u8 ext_phy_link_up = 0, cur_link_up;
 +      int rc = 0;
 +      u8 is_mi_int = 0;
 +      u16 ext_phy_line_speed = 0, prev_line_speed = vars->line_speed;
 +      u8 active_external_phy = INT_PHY;
 +      vars->phy_flags &= ~PHY_HALF_OPEN_CONN_FLAG;
 +      for (phy_index = INT_PHY; phy_index < params->num_phys;
 +            phy_index++) {
 +              phy_vars[phy_index].flow_ctrl = 0;
 +              phy_vars[phy_index].link_status = 0;
 +              phy_vars[phy_index].line_speed = 0;
 +              phy_vars[phy_index].duplex = DUPLEX_FULL;
 +              phy_vars[phy_index].phy_link_up = 0;
 +              phy_vars[phy_index].link_up = 0;
 +              phy_vars[phy_index].fault_detected = 0;
 +      }
 +
 +      if (USES_WARPCORE(bp))
 +              bnx2x_set_aer_mmd(params, &params->phy[INT_PHY]);
 +
 +      DP(NETIF_MSG_LINK, "port %x, XGXS?%x, int_status 0x%x\n",
 +               port, (vars->phy_flags & PHY_XGXS_FLAG),
 +               REG_RD(bp, NIG_REG_STATUS_INTERRUPT_PORT0 + port*4));
 +
 +      is_mi_int = (u8)(REG_RD(bp, NIG_REG_EMAC0_STATUS_MISC_MI_INT +
 +                              port*0x18) > 0);
 +      DP(NETIF_MSG_LINK, "int_mask 0x%x MI_INT %x, SERDES_LINK %x\n",
 +               REG_RD(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4),
 +               is_mi_int,
 +               REG_RD(bp, NIG_REG_SERDES0_STATUS_LINK_STATUS + port*0x3c));
 +
 +      DP(NETIF_MSG_LINK, " 10G %x, XGXS_LINK %x\n",
 +        REG_RD(bp, NIG_REG_XGXS0_STATUS_LINK10G + port*0x68),
 +        REG_RD(bp, NIG_REG_XGXS0_STATUS_LINK_STATUS + port*0x68));
 +
 +      /* disable emac */
 +      if (!CHIP_IS_E3(bp))
 +              REG_WR(bp, NIG_REG_NIG_EMAC0_EN + port*4, 0);
 +
 +      /*
 +       * Step 1:
 +       * Check external link change only for external phys, and apply
 +       * priority selection between them in case the link on both phys
 +       * is up. Note that instead of the common vars, a temporary
 +       * vars argument is used since each phy may have different link/
 +       * speed/duplex result
 +       */
 +      for (phy_index = EXT_PHY1; phy_index < params->num_phys;
 +            phy_index++) {
 +              struct bnx2x_phy *phy = &params->phy[phy_index];
 +              if (!phy->read_status)
 +                      continue;
 +              /* Read link status and params of this ext phy */
 +              cur_link_up = phy->read_status(phy, params,
 +                                             &phy_vars[phy_index]);
 +              if (cur_link_up) {
 +                      DP(NETIF_MSG_LINK, "phy in index %d link is up\n",
 +                                 phy_index);
 +              } else {
 +                      DP(NETIF_MSG_LINK, "phy in index %d link is down\n",
 +                                 phy_index);
 +                      continue;
 +              }
 +
 +              if (!ext_phy_link_up) {
 +                      ext_phy_link_up = 1;
 +                      active_external_phy = phy_index;
 +              } else {
 +                      switch (bnx2x_phy_selection(params)) {
 +                      case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT:
 +                      case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY:
 +                      /*
 +                       * In this option, the first PHY makes sure to pass the
 +                       * traffic through itself only.
 +                       * Its not clear how to reset the link on the second phy
 +                       */
 +                              active_external_phy = EXT_PHY1;
 +                              break;
 +                      case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY:
 +                      /*
 +                       * In this option, the first PHY makes sure to pass the
 +                       * traffic through the second PHY.
 +                       */
 +                              active_external_phy = EXT_PHY2;
 +                              break;
 +                      default:
 +                      /*
 +                       * Link indication on both PHYs with the following cases
 +                       * is invalid:
 +                       * - FIRST_PHY means that second phy wasn't initialized,
 +                       * hence its link is expected to be down
 +                       * - SECOND_PHY means that first phy should not be able
 +                       * to link up by itself (using configuration)
 +                       * - DEFAULT should be overriden during initialiazation
 +                       */
 +                              DP(NETIF_MSG_LINK, "Invalid link indication"
 +                                         "mpc=0x%x. DISABLING LINK !!!\n",
 +                                         params->multi_phy_config);
 +                              ext_phy_link_up = 0;
 +                              break;
 +                      }
 +              }
 +      }
 +      prev_line_speed = vars->line_speed;
 +      /*
 +       * Step 2:
 +       * Read the status of the internal phy. In case of
 +       * DIRECT_SINGLE_MEDIA board, this link is the external link,
 +       * otherwise this is the link between the 577xx and the first
 +       * external phy
 +       */
 +      if (params->phy[INT_PHY].read_status)
 +              params->phy[INT_PHY].read_status(
 +                      &params->phy[INT_PHY],
 +                      params, vars);
 +      /*
 +       * The INT_PHY flow control reside in the vars. This include the
 +       * case where the speed or flow control are not set to AUTO.
 +       * Otherwise, the active external phy flow control result is set
 +       * to the vars. The ext_phy_line_speed is needed to check if the
 +       * speed is different between the internal phy and external phy.
 +       * This case may be result of intermediate link speed change.
 +       */
 +      if (active_external_phy > INT_PHY) {
 +              vars->flow_ctrl = phy_vars[active_external_phy].flow_ctrl;
 +              /*
 +               * Link speed is taken from the XGXS. AN and FC result from
 +               * the external phy.
 +               */
 +              vars->link_status |= phy_vars[active_external_phy].link_status;
 +
 +              /*
 +               * if active_external_phy is first PHY and link is up - disable
 +               * disable TX on second external PHY
 +               */
 +              if (active_external_phy == EXT_PHY1) {
 +                      if (params->phy[EXT_PHY2].phy_specific_func) {
 +                              DP(NETIF_MSG_LINK,
 +                                 "Disabling TX on EXT_PHY2\n");
 +                              params->phy[EXT_PHY2].phy_specific_func(
 +                                      &params->phy[EXT_PHY2],
 +                                      params, DISABLE_TX);
 +                      }
 +              }
 +
 +              ext_phy_line_speed = phy_vars[active_external_phy].line_speed;
 +              vars->duplex = phy_vars[active_external_phy].duplex;
 +              if (params->phy[active_external_phy].supported &
 +                  SUPPORTED_FIBRE)
 +                      vars->link_status |= LINK_STATUS_SERDES_LINK;
 +              else
 +                      vars->link_status &= ~LINK_STATUS_SERDES_LINK;
 +              DP(NETIF_MSG_LINK, "Active external phy selected: %x\n",
 +                         active_external_phy);
 +      }
 +
 +      for (phy_index = EXT_PHY1; phy_index < params->num_phys;
 +            phy_index++) {
 +              if (params->phy[phy_index].flags &
 +                  FLAGS_REARM_LATCH_SIGNAL) {
 +                      bnx2x_rearm_latch_signal(bp, port,
 +                                               phy_index ==
 +                                               active_external_phy);
 +                      break;
 +              }
 +      }
 +      DP(NETIF_MSG_LINK, "vars->flow_ctrl = 0x%x, vars->link_status = 0x%x,"
 +                 " ext_phy_line_speed = %d\n", vars->flow_ctrl,
 +                 vars->link_status, ext_phy_line_speed);
 +      /*
 +       * Upon link speed change set the NIG into drain mode. Comes to
 +       * deals with possible FIFO glitch due to clk change when speed
 +       * is decreased without link down indicator
 +       */
 +
 +      if (vars->phy_link_up) {
 +              if (!(SINGLE_MEDIA_DIRECT(params)) && ext_phy_link_up &&
 +                  (ext_phy_line_speed != vars->line_speed)) {
 +                      DP(NETIF_MSG_LINK, "Internal link speed %d is"
 +                                 " different than the external"
 +                                 " link speed %d\n", vars->line_speed,
 +                                 ext_phy_line_speed);
 +                      vars->phy_link_up = 0;
 +              } else if (prev_line_speed != vars->line_speed) {
 +                      REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4,
 +                             0);
 +                      msleep(1);
 +              }
 +      }
 +
 +      /* anything 10 and over uses the bmac */
 +      link_10g_plus = (vars->line_speed >= SPEED_10000);
 +
 +      bnx2x_link_int_ack(params, vars, link_10g_plus);
 +
 +      /*
 +       * In case external phy link is up, and internal link is down
 +       * (not initialized yet probably after link initialization, it
 +       * needs to be initialized.
 +       * Note that after link down-up as result of cable plug, the xgxs
 +       * link would probably become up again without the need
 +       * initialize it
 +       */
 +      if (!(SINGLE_MEDIA_DIRECT(params))) {
 +              DP(NETIF_MSG_LINK, "ext_phy_link_up = %d, int_link_up = %d,"
 +                         " init_preceding = %d\n", ext_phy_link_up,
 +                         vars->phy_link_up,
 +                         params->phy[EXT_PHY1].flags &
 +                         FLAGS_INIT_XGXS_FIRST);
 +              if (!(params->phy[EXT_PHY1].flags &
 +                    FLAGS_INIT_XGXS_FIRST)
 +                  && ext_phy_link_up && !vars->phy_link_up) {
 +                      vars->line_speed = ext_phy_line_speed;
 +                      if (vars->line_speed < SPEED_1000)
 +                              vars->phy_flags |= PHY_SGMII_FLAG;
 +                      else
 +                              vars->phy_flags &= ~PHY_SGMII_FLAG;
 +
 +                      if (params->phy[INT_PHY].config_init)
 +                              params->phy[INT_PHY].config_init(
 +                                      &params->phy[INT_PHY], params,
 +                                              vars);
 +              }
 +      }
 +      /*
 +       * Link is up only if both local phy and external phy (in case of
 +       * non-direct board) are up and no fault detected on active PHY.
 +       */
 +      vars->link_up = (vars->phy_link_up &&
 +                       (ext_phy_link_up ||
 +                        SINGLE_MEDIA_DIRECT(params)) &&
 +                       (phy_vars[active_external_phy].fault_detected == 0));
 +
 +      if (vars->link_up)
 +              rc = bnx2x_update_link_up(params, vars, link_10g_plus);
 +      else
 +              rc = bnx2x_update_link_down(params, vars);
 +
 +      return rc;
 +}
 +
 +
 +/*****************************************************************************/
 +/*                        External Phy section                             */
 +/*****************************************************************************/
 +void bnx2x_ext_phy_hw_reset(struct bnx2x *bp, u8 port)
 +{
 +      bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
 +                     MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
 +      msleep(1);
 +      bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
 +                     MISC_REGISTERS_GPIO_OUTPUT_HIGH, port);
 +}
 +
 +static void bnx2x_save_spirom_version(struct bnx2x *bp, u8 port,
 +                                    u32 spirom_ver, u32 ver_addr)
 +{
 +      DP(NETIF_MSG_LINK, "FW version 0x%x:0x%x for port %d\n",
 +               (u16)(spirom_ver>>16), (u16)spirom_ver, port);
 +
 +      if (ver_addr)
 +              REG_WR(bp, ver_addr, spirom_ver);
 +}
 +
 +static void bnx2x_save_bcm_spirom_ver(struct bnx2x *bp,
 +                                    struct bnx2x_phy *phy,
 +                                    u8 port)
 +{
 +      u16 fw_ver1, fw_ver2;
 +
 +      bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD,
 +                      MDIO_PMA_REG_ROM_VER1, &fw_ver1);
 +      bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD,
 +                      MDIO_PMA_REG_ROM_VER2, &fw_ver2);
 +      bnx2x_save_spirom_version(bp, port, (u32)(fw_ver1<<16 | fw_ver2),
 +                                phy->ver_addr);
 +}
 +
 +static void bnx2x_ext_phy_10G_an_resolve(struct bnx2x *bp,
 +                                     struct bnx2x_phy *phy,
 +                                     struct link_vars *vars)
 +{
 +      u16 val;
 +      bnx2x_cl45_read(bp, phy,
 +                      MDIO_AN_DEVAD,
 +                      MDIO_AN_REG_STATUS, &val);
 +      bnx2x_cl45_read(bp, phy,
 +                      MDIO_AN_DEVAD,
 +                      MDIO_AN_REG_STATUS, &val);
 +      if (val & (1<<5))
 +              vars->link_status |= LINK_STATUS_AUTO_NEGOTIATE_COMPLETE;
 +      if ((val & (1<<0)) == 0)
 +              vars->link_status |= LINK_STATUS_PARALLEL_DETECTION_USED;
 +}
 +
 +/******************************************************************/
 +/*            common BCM8073/BCM8727 PHY SECTION                */
 +/******************************************************************/
 +static void bnx2x_8073_resolve_fc(struct bnx2x_phy *phy,
 +                                struct link_params *params,
 +                                struct link_vars *vars)
 +{
 +      struct bnx2x *bp = params->bp;
 +      if (phy->req_line_speed == SPEED_10 ||
 +          phy->req_line_speed == SPEED_100) {
 +              vars->flow_ctrl = phy->req_flow_ctrl;
 +              return;
 +      }
 +
 +      if (bnx2x_ext_phy_resolve_fc(phy, params, vars) &&
 +          (vars->flow_ctrl == BNX2X_FLOW_CTRL_NONE)) {
 +              u16 pause_result;
 +              u16 ld_pause;           /* local */
 +              u16 lp_pause;           /* link partner */
 +              bnx2x_cl45_read(bp, phy,
 +                              MDIO_AN_DEVAD,
 +                              MDIO_AN_REG_CL37_FC_LD, &ld_pause);
 +
 +              bnx2x_cl45_read(bp, phy,
 +                              MDIO_AN_DEVAD,
 +                              MDIO_AN_REG_CL37_FC_LP, &lp_pause);
 +              pause_result = (ld_pause &
 +                              MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) >> 5;
 +              pause_result |= (lp_pause &
 +                               MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) >> 7;
 +
 +              bnx2x_pause_resolve(vars, pause_result);
 +              DP(NETIF_MSG_LINK, "Ext PHY CL37 pause result 0x%x\n",
 +                         pause_result);
 +      }
 +}
 +static int bnx2x_8073_8727_external_rom_boot(struct bnx2x *bp,
 +                                           struct bnx2x_phy *phy,
 +                                           u8 port)
 +{
 +      u32 count = 0;
 +      u16 fw_ver1, fw_msgout;
 +      int rc = 0;
 +
 +      /* Boot port from external ROM  */
 +      /* EDC grst */
 +      bnx2x_cl45_write(bp, phy,
 +                       MDIO_PMA_DEVAD,
 +                       MDIO_PMA_REG_GEN_CTRL,
 +                       0x0001);
 +
 +      /* ucode reboot and rst */
 +      bnx2x_cl45_write(bp, phy,
 +                       MDIO_PMA_DEVAD,
 +                       MDIO_PMA_REG_GEN_CTRL,
 +                       0x008c);
 +
 +      bnx2x_cl45_write(bp, phy,
 +                       MDIO_PMA_DEVAD,
 +                       MDIO_PMA_REG_MISC_CTRL1, 0x0001);
 +
 +      /* Reset internal microprocessor */
 +      bnx2x_cl45_write(bp, phy,
 +                       MDIO_PMA_DEVAD,
 +                       MDIO_PMA_REG_GEN_CTRL,
 +                       MDIO_PMA_REG_GEN_CTRL_ROM_MICRO_RESET);
 +
 +      /* Release srst bit */
 +      bnx2x_cl45_write(bp, phy,
 +                       MDIO_PMA_DEVAD,
 +                       MDIO_PMA_REG_GEN_CTRL,
 +                       MDIO_PMA_REG_GEN_CTRL_ROM_RESET_INTERNAL_MP);
 +
 +      /* Delay 100ms per the PHY specifications */
 +      msleep(100);
 +
 +      /* 8073 sometimes taking longer to download */
 +      do {
 +              count++;
 +              if (count > 300) {
 +                      DP(NETIF_MSG_LINK,
 +                               "bnx2x_8073_8727_external_rom_boot port %x:"
 +                               "Download failed. fw version = 0x%x\n",
 +                               port, fw_ver1);
 +                      rc = -EINVAL;
 +                      break;
 +              }
 +
 +              bnx2x_cl45_read(bp, phy,
 +                              MDIO_PMA_DEVAD,
 +                              MDIO_PMA_REG_ROM_VER1, &fw_ver1);
 +              bnx2x_cl45_read(bp, phy,
 +                              MDIO_PMA_DEVAD,
 +                              MDIO_PMA_REG_M8051_MSGOUT_REG, &fw_msgout);
 +
 +              msleep(1);
 +      } while (fw_ver1 == 0 || fw_ver1 == 0x4321 ||
 +                      ((fw_msgout & 0xff) != 0x03 && (phy->type ==
 +                      PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073)));
 +
 +      /* Clear ser_boot_ctl bit */
 +      bnx2x_cl45_write(bp, phy,
 +                       MDIO_PMA_DEVAD,
 +                       MDIO_PMA_REG_MISC_CTRL1, 0x0000);
 +      bnx2x_save_bcm_spirom_ver(bp, phy, port);
 +
 +      DP(NETIF_MSG_LINK,
 +               "bnx2x_8073_8727_external_rom_boot port %x:"
 +               "Download complete. fw version = 0x%x\n",
 +               port, fw_ver1);
 +
 +      return rc;
 +}
 +
 +/******************************************************************/
 +/*                    BCM8073 PHY SECTION                       */
 +/******************************************************************/
 +static int bnx2x_8073_is_snr_needed(struct bnx2x *bp, struct bnx2x_phy *phy)
 +{
 +      /* This is only required for 8073A1, version 102 only */
 +      u16 val;
 +
 +      /* Read 8073 HW revision*/
 +      bnx2x_cl45_read(bp, phy,
 +                      MDIO_PMA_DEVAD,
 +                      MDIO_PMA_REG_8073_CHIP_REV, &val);
 +
 +      if (val != 1) {
 +              /* No need to workaround in 8073 A1 */
 +              return 0;
 +      }
 +
 +      bnx2x_cl45_read(bp, phy,
 +                      MDIO_PMA_DEVAD,
 +                      MDIO_PMA_REG_ROM_VER2, &val);
 +
 +      /* SNR should be applied only for version 0x102 */
 +      if (val != 0x102)
 +              return 0;
 +
 +      return 1;
 +}
 +
 +static int bnx2x_8073_xaui_wa(struct bnx2x *bp, struct bnx2x_phy *phy)
 +{
 +      u16 val, cnt, cnt1 ;
 +
 +      bnx2x_cl45_read(bp, phy,
 +                      MDIO_PMA_DEVAD,
 +                      MDIO_PMA_REG_8073_CHIP_REV, &val);
 +
 +      if (val > 0) {
 +              /* No need to workaround in 8073 A1 */
 +              return 0;
 +      }
 +      /* XAUI workaround in 8073 A0: */
 +
 +      /*
 +       * After loading the boot ROM and restarting Autoneg, poll
 +       * Dev1, Reg $C820:
 +       */
 +
 +      for (cnt = 0; cnt < 1000; cnt++) {
 +              bnx2x_cl45_read(bp, phy,
 +                              MDIO_PMA_DEVAD,
 +                              MDIO_PMA_REG_8073_SPEED_LINK_STATUS,
 +                              &val);
 +                /*
 +                 * If bit [14] = 0 or bit [13] = 0, continue on with
 +                 * system initialization (XAUI work-around not required, as
 +                 * these bits indicate 2.5G or 1G link up).
 +                 */
 +              if (!(val & (1<<14)) || !(val & (1<<13))) {
 +                      DP(NETIF_MSG_LINK, "XAUI work-around not required\n");
 +                      return 0;
 +              } else if (!(val & (1<<15))) {
 +                      DP(NETIF_MSG_LINK, "bit 15 went off\n");
 +                      /*
 +                       * If bit 15 is 0, then poll Dev1, Reg $C841 until it's
 +                       * MSB (bit15) goes to 1 (indicating that the XAUI
 +                       * workaround has completed), then continue on with
 +                       * system initialization.
 +                       */
 +                      for (cnt1 = 0; cnt1 < 1000; cnt1++) {
 +                              bnx2x_cl45_read(bp, phy,
 +                                      MDIO_PMA_DEVAD,
 +                                      MDIO_PMA_REG_8073_XAUI_WA, &val);
 +                              if (val & (1<<15)) {
 +                                      DP(NETIF_MSG_LINK,
 +                                        "XAUI workaround has completed\n");
 +                                      return 0;
 +                               }
 +                               msleep(3);
 +                      }
 +                      break;
 +              }
 +              msleep(3);
 +      }
 +      DP(NETIF_MSG_LINK, "Warning: XAUI work-around timeout !!!\n");
 +      return -EINVAL;
 +}
 +
 +static void bnx2x_807x_force_10G(struct bnx2x *bp, struct bnx2x_phy *phy)
 +{
 +      /* Force KR or KX */
 +      bnx2x_cl45_write(bp, phy,
 +                       MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 0x2040);
 +      bnx2x_cl45_write(bp, phy,
 +                       MDIO_PMA_DEVAD, MDIO_PMA_REG_10G_CTRL2, 0x000b);
 +      bnx2x_cl45_write(bp, phy,
 +                       MDIO_PMA_DEVAD, MDIO_PMA_REG_BCM_CTRL, 0x0000);
 +      bnx2x_cl45_write(bp, phy,
 +                       MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, 0x0000);
 +}
 +
 +static void bnx2x_8073_set_pause_cl37(struct link_params *params,
 +                                    struct bnx2x_phy *phy,
 +                                    struct link_vars *vars)
 +{
 +      u16 cl37_val;
 +      struct bnx2x *bp = params->bp;
 +      bnx2x_cl45_read(bp, phy,
 +                      MDIO_AN_DEVAD, MDIO_AN_REG_CL37_FC_LD, &cl37_val);
 +
 +      cl37_val &= ~MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH;
 +      /* Please refer to Table 28B-3 of 802.3ab-1999 spec. */
 +      bnx2x_calc_ieee_aneg_adv(phy, params, &vars->ieee_fc);
 +      if ((vars->ieee_fc &
 +          MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_SYMMETRIC) ==
 +          MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_SYMMETRIC) {
 +              cl37_val |=  MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_SYMMETRIC;
 +      }
 +      if ((vars->ieee_fc &
 +          MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC) ==
 +          MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC) {
 +              cl37_val |=  MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC;
 +      }
 +      if ((vars->ieee_fc &
 +          MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) ==
 +          MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) {
 +              cl37_val |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH;
 +      }
 +      DP(NETIF_MSG_LINK,
 +               "Ext phy AN advertize cl37 0x%x\n", cl37_val);
 +
 +      bnx2x_cl45_write(bp, phy,
 +                       MDIO_AN_DEVAD, MDIO_AN_REG_CL37_FC_LD, cl37_val);
 +      msleep(500);
 +}
 +
 +static int bnx2x_8073_config_init(struct bnx2x_phy *phy,
 +                                struct link_params *params,
 +                                struct link_vars *vars)
 +{
 +      struct bnx2x *bp = params->bp;
 +      u16 val = 0, tmp1;
 +      u8 gpio_port;
 +      DP(NETIF_MSG_LINK, "Init 8073\n");
 +
 +      if (CHIP_IS_E2(bp))
 +              gpio_port = BP_PATH(bp);
 +      else
 +              gpio_port = params->port;
 +      /* Restore normal power mode*/
 +      bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
 +                     MISC_REGISTERS_GPIO_OUTPUT_HIGH, gpio_port);
 +
 +      bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
 +                     MISC_REGISTERS_GPIO_OUTPUT_HIGH, gpio_port);
 +
 +      /* enable LASI */
 +      bnx2x_cl45_write(bp, phy,
 +                       MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXCTRL, (1<<2));
 +      bnx2x_cl45_write(bp, phy,
 +                       MDIO_PMA_DEVAD, MDIO_PMA_LASI_CTRL,  0x0004);
 +
 +      bnx2x_8073_set_pause_cl37(params, phy, vars);
 +
 +      bnx2x_cl45_read(bp, phy,
 +                      MDIO_PMA_DEVAD, MDIO_PMA_REG_M8051_MSGOUT_REG, &tmp1);
 +
 +      bnx2x_cl45_read(bp, phy,
 +                      MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXSTAT, &tmp1);
 +
 +      DP(NETIF_MSG_LINK, "Before rom RX_ALARM(port1): 0x%x\n", tmp1);
 +
 +      /* Swap polarity if required - Must be done only in non-1G mode */
 +      if (params->lane_config & PORT_HW_CFG_SWAP_PHY_POLARITY_ENABLED) {
 +              /* Configure the 8073 to swap _P and _N of the KR lines */
 +              DP(NETIF_MSG_LINK, "Swapping polarity for the 8073\n");
 +              /* 10G Rx/Tx and 1G Tx signal polarity swap */
 +              bnx2x_cl45_read(bp, phy,
 +                              MDIO_PMA_DEVAD,
 +                              MDIO_PMA_REG_8073_OPT_DIGITAL_CTRL, &val);
 +              bnx2x_cl45_write(bp, phy,
 +                               MDIO_PMA_DEVAD,
 +                               MDIO_PMA_REG_8073_OPT_DIGITAL_CTRL,
 +                               (val | (3<<9)));
 +      }
 +
 +
 +      /* Enable CL37 BAM */
 +      if (REG_RD(bp, params->shmem_base +
 +                       offsetof(struct shmem_region, dev_info.
 +                                port_hw_config[params->port].default_cfg)) &
 +          PORT_HW_CFG_ENABLE_BAM_ON_KR_ENABLED) {
 +
 +              bnx2x_cl45_read(bp, phy,
 +                              MDIO_AN_DEVAD,
 +                              MDIO_AN_REG_8073_BAM, &val);
 +              bnx2x_cl45_write(bp, phy,
 +                               MDIO_AN_DEVAD,
 +                               MDIO_AN_REG_8073_BAM, val | 1);
 +              DP(NETIF_MSG_LINK, "Enable CL37 BAM on KR\n");
 +      }
 +      if (params->loopback_mode == LOOPBACK_EXT) {
 +              bnx2x_807x_force_10G(bp, phy);
 +              DP(NETIF_MSG_LINK, "Forced speed 10G on 807X\n");
 +              return 0;
 +      } else {
 +              bnx2x_cl45_write(bp, phy,
 +                               MDIO_PMA_DEVAD, MDIO_PMA_REG_BCM_CTRL, 0x0002);
 +      }
 +      if (phy->req_line_speed != SPEED_AUTO_NEG) {
 +              if (phy->req_line_speed == SPEED_10000) {
 +                      val = (1<<7);
 +              } else if (phy->req_line_speed ==  SPEED_2500) {
 +                      val = (1<<5);
 +                      /*
 +                       * Note that 2.5G works only when used with 1G
 +                       * advertisement
 +                       */
 +              } else
 +                      val = (1<<5);
 +      } else {
 +              val = 0;
 +              if (phy->speed_cap_mask &
 +                      PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)
 +                      val |= (1<<7);
 +
 +              /* Note that 2.5G works only when used with 1G advertisement */
 +              if (phy->speed_cap_mask &
 +                      (PORT_HW_CFG_SPEED_CAPABILITY_D0_1G |
 +                       PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
 +                      val |= (1<<5);
 +              DP(NETIF_MSG_LINK, "807x autoneg val = 0x%x\n", val);
 +      }
 +
 +      bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_ADV, val);
 +      bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_8073_2_5G, &tmp1);
 +
 +      if (((phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G) &&
 +           (phy->req_line_speed == SPEED_AUTO_NEG)) ||
 +          (phy->req_line_speed == SPEED_2500)) {
 +              u16 phy_ver;
 +              /* Allow 2.5G for A1 and above */
 +              bnx2x_cl45_read(bp, phy,
 +                              MDIO_PMA_DEVAD, MDIO_PMA_REG_8073_CHIP_REV,
 +                              &phy_ver);
 +              DP(NETIF_MSG_LINK, "Add 2.5G\n");
 +              if (phy_ver > 0)
 +                      tmp1 |= 1;
 +              else
 +                      tmp1 &= 0xfffe;
 +      } else {
 +              DP(NETIF_MSG_LINK, "Disable 2.5G\n");
 +              tmp1 &= 0xfffe;
 +      }
 +
 +      bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_8073_2_5G, tmp1);
 +      /* Add support for CL37 (passive mode) II */
 +
 +      bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_CL37_FC_LD, &tmp1);
 +      bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_CL37_FC_LD,
 +                       (tmp1 | ((phy->req_duplex == DUPLEX_FULL) ?
 +                                0x20 : 0x40)));
 +
 +      /* Add support for CL37 (passive mode) III */
 +      bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_CL37_AN, 0x1000);
 +
 +      /*
 +       * The SNR will improve about 2db by changing BW and FEE main
 +       * tap. Rest commands are executed after link is up
 +       * Change FFE main cursor to 5 in EDC register
 +       */
 +      if (bnx2x_8073_is_snr_needed(bp, phy))
 +              bnx2x_cl45_write(bp, phy,
 +                               MDIO_PMA_DEVAD, MDIO_PMA_REG_EDC_FFE_MAIN,
 +                               0xFB0C);
 +
 +      /* Enable FEC (Forware Error Correction) Request in the AN */
 +      bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_ADV2, &tmp1);
 +      tmp1 |= (1<<15);
 +      bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_ADV2, tmp1);
 +
 +      bnx2x_ext_phy_set_pause(params, phy, vars);
 +
 +      /* Restart autoneg */
 +      msleep(500);
 +      bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, 0x1200);
 +      DP(NETIF_MSG_LINK, "807x Autoneg Restart: Advertise 1G=%x, 10G=%x\n",
 +                 ((val & (1<<5)) > 0), ((val & (1<<7)) > 0));
 +      return 0;
 +}
 +
 +static u8 bnx2x_8073_read_status(struct bnx2x_phy *phy,
 +                               struct link_params *params,
 +                               struct link_vars *vars)
 +{
 +      struct bnx2x *bp = params->bp;
 +      u8 link_up = 0;
 +      u16 val1, val2;
 +      u16 link_status = 0;
 +      u16 an1000_status = 0;
 +
 +      bnx2x_cl45_read(bp, phy,
 +                      MDIO_PMA_DEVAD, MDIO_PMA_LASI_STAT, &val1);
 +
 +      DP(NETIF_MSG_LINK, "8703 LASI status 0x%x\n", val1);
 +
 +      /* clear the interrupt LASI status register */
 +      bnx2x_cl45_read(bp, phy,
 +                      MDIO_PCS_DEVAD, MDIO_PCS_REG_STATUS, &val2);
 +      bnx2x_cl45_read(bp, phy,
 +                      MDIO_PCS_DEVAD, MDIO_PCS_REG_STATUS, &val1);
 +      DP(NETIF_MSG_LINK, "807x PCS status 0x%x->0x%x\n", val2, val1);
 +      /* Clear MSG-OUT */
 +      bnx2x_cl45_read(bp, phy,
 +                      MDIO_PMA_DEVAD, MDIO_PMA_REG_M8051_MSGOUT_REG, &val1);
 +
 +      /* Check the LASI */
 +      bnx2x_cl45_read(bp, phy,
 +                      MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXSTAT, &val2);
 +
 +      DP(NETIF_MSG_LINK, "KR 0x9003 0x%x\n", val2);
 +
 +      /* Check the link status */
 +      bnx2x_cl45_read(bp, phy,
 +                      MDIO_PCS_DEVAD, MDIO_PCS_REG_STATUS, &val2);
 +      DP(NETIF_MSG_LINK, "KR PCS status 0x%x\n", val2);
 +
 +      bnx2x_cl45_read(bp, phy,
 +                      MDIO_PMA_DEVAD, MDIO_PMA_REG_STATUS, &val2);
 +      bnx2x_cl45_read(bp, phy,
 +                      MDIO_PMA_DEVAD, MDIO_PMA_REG_STATUS, &val1);
 +      link_up = ((val1 & 4) == 4);
 +      DP(NETIF_MSG_LINK, "PMA_REG_STATUS=0x%x\n", val1);
 +
 +      if (link_up &&
 +           ((phy->req_line_speed != SPEED_10000))) {
 +              if (bnx2x_8073_xaui_wa(bp, phy) != 0)
 +                      return 0;
 +      }
 +      bnx2x_cl45_read(bp, phy,
 +                      MDIO_AN_DEVAD, MDIO_AN_REG_LINK_STATUS, &an1000_status);
 +      bnx2x_cl45_read(bp, phy,
 +                      MDIO_AN_DEVAD, MDIO_AN_REG_LINK_STATUS, &an1000_status);
 +
 +      /* Check the link status on 1.1.2 */
 +      bnx2x_cl45_read(bp, phy,
 +                      MDIO_PMA_DEVAD, MDIO_PMA_REG_STATUS, &val2);
 +      bnx2x_cl45_read(bp, phy,
 +                      MDIO_PMA_DEVAD, MDIO_PMA_REG_STATUS, &val1);
 +      DP(NETIF_MSG_LINK, "KR PMA status 0x%x->0x%x,"
 +                 "an_link_status=0x%x\n", val2, val1, an1000_status);
 +
 +      link_up = (((val1 & 4) == 4) || (an1000_status & (1<<1)));
 +      if (link_up && bnx2x_8073_is_snr_needed(bp, phy)) {
 +              /*
 +               * The SNR will improve about 2dbby changing the BW and FEE main
 +               * tap. The 1st write to change FFE main tap is set before
 +               * restart AN. Change PLL Bandwidth in EDC register
 +               */
 +              bnx2x_cl45_write(bp, phy,
 +                               MDIO_PMA_DEVAD, MDIO_PMA_REG_PLL_BANDWIDTH,
 +                               0x26BC);
 +
 +              /* Change CDR Bandwidth in EDC register */
 +              bnx2x_cl45_write(bp, phy,
 +                               MDIO_PMA_DEVAD, MDIO_PMA_REG_CDR_BANDWIDTH,
 +                               0x0333);
 +      }
 +      bnx2x_cl45_read(bp, phy,
 +                      MDIO_PMA_DEVAD, MDIO_PMA_REG_8073_SPEED_LINK_STATUS,
 +                      &link_status);
 +
 +      /* Bits 0..2 --> speed detected, bits 13..15--> link is down */
 +      if ((link_status & (1<<2)) && (!(link_status & (1<<15)))) {
 +              link_up = 1;
 +              vars->line_speed = SPEED_10000;
 +              DP(NETIF_MSG_LINK, "port %x: External link up in 10G\n",
 +                         params->port);
 +      } else if ((link_status & (1<<1)) && (!(link_status & (1<<14)))) {
 +              link_up = 1;
 +              vars->line_speed = SPEED_2500;
 +              DP(NETIF_MSG_LINK, "port %x: External link up in 2.5G\n",
 +                         params->port);
 +      } else if ((link_status & (1<<0)) && (!(link_status & (1<<13)))) {
 +              link_up = 1;
 +              vars->line_speed = SPEED_1000;
 +              DP(NETIF_MSG_LINK, "port %x: External link up in 1G\n",
 +                         params->port);
 +      } else {
 +              link_up = 0;
 +              DP(NETIF_MSG_LINK, "port %x: External link is down\n",
 +                         params->port);
 +      }
 +
 +      if (link_up) {
 +              /* Swap polarity if required */
 +              if (params->lane_config &
 +                  PORT_HW_CFG_SWAP_PHY_POLARITY_ENABLED) {
 +                      /* Configure the 8073 to swap P and N of the KR lines */
 +                      bnx2x_cl45_read(bp, phy,
 +                                      MDIO_XS_DEVAD,
 +                                      MDIO_XS_REG_8073_RX_CTRL_PCIE, &val1);
 +                      /*
 +                       * Set bit 3 to invert Rx in 1G mode and clear this bit
 +                       * when it`s in 10G mode.
 +                       */
 +                      if (vars->line_speed == SPEED_1000) {
 +                              DP(NETIF_MSG_LINK, "Swapping 1G polarity for"
 +                                            "the 8073\n");
 +                              val1 |= (1<<3);
 +                      } else
 +                              val1 &= ~(1<<3);
 +
 +                      bnx2x_cl45_write(bp, phy,
 +                                       MDIO_XS_DEVAD,
 +                                       MDIO_XS_REG_8073_RX_CTRL_PCIE,
 +                                       val1);
 +              }
 +              bnx2x_ext_phy_10G_an_resolve(bp, phy, vars);
 +              bnx2x_8073_resolve_fc(phy, params, vars);
 +              vars->duplex = DUPLEX_FULL;
 +      }
 +      return link_up;
 +}
 +
 +static void bnx2x_8073_link_reset(struct bnx2x_phy *phy,
 +                                struct link_params *params)
 +{
 +      struct bnx2x *bp = params->bp;
 +      u8 gpio_port;
 +      if (CHIP_IS_E2(bp))
 +              gpio_port = BP_PATH(bp);
 +      else
 +              gpio_port = params->port;
 +      DP(NETIF_MSG_LINK, "Setting 8073 port %d into low power mode\n",
 +         gpio_port);
 +      bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
 +                     MISC_REGISTERS_GPIO_OUTPUT_LOW,
 +                     gpio_port);
 +}
 +
 +/******************************************************************/
 +/*                    BCM8705 PHY SECTION                       */
 +/******************************************************************/
 +static int bnx2x_8705_config_init(struct bnx2x_phy *phy,
 +                                struct link_params *params,
 +                                struct link_vars *vars)
 +{
 +      struct bnx2x *bp = params->bp;
 +      DP(NETIF_MSG_LINK, "init 8705\n");
 +      /* Restore normal power mode*/
 +      bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
 +                     MISC_REGISTERS_GPIO_OUTPUT_HIGH, params->port);
 +      /* HW reset */
 +      bnx2x_ext_phy_hw_reset(bp, params->port);
 +      bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 0xa040);
 +      bnx2x_wait_reset_complete(bp, phy, params);
 +
 +      bnx2x_cl45_write(bp, phy,
 +                       MDIO_PMA_DEVAD, MDIO_PMA_REG_MISC_CTRL, 0x8288);
 +      bnx2x_cl45_write(bp, phy,
 +                       MDIO_PMA_DEVAD, MDIO_PMA_REG_PHY_IDENTIFIER, 0x7fbf);
 +      bnx2x_cl45_write(bp, phy,
 +                       MDIO_PMA_DEVAD, MDIO_PMA_REG_CMU_PLL_BYPASS, 0x0100);
 +      bnx2x_cl45_write(bp, phy,
 +                       MDIO_WIS_DEVAD, MDIO_WIS_REG_LASI_CNTL, 0x1);
 +      /* BCM8705 doesn't have microcode, hence the 0 */
 +      bnx2x_save_spirom_version(bp, params->port, params->shmem_base, 0);
 +      return 0;
 +}
 +
 +static u8 bnx2x_8705_read_status(struct bnx2x_phy *phy,
 +                               struct link_params *params,
 +                               struct link_vars *vars)
 +{
 +      u8 link_up = 0;
 +      u16 val1, rx_sd;
 +      struct bnx2x *bp = params->bp;
 +      DP(NETIF_MSG_LINK, "read status 8705\n");
 +      bnx2x_cl45_read(bp, phy,
 +                    MDIO_WIS_DEVAD, MDIO_WIS_REG_LASI_STATUS, &val1);
 +      DP(NETIF_MSG_LINK, "8705 LASI status 0x%x\n", val1);
 +
 +      bnx2x_cl45_read(bp, phy,
 +                    MDIO_WIS_DEVAD, MDIO_WIS_REG_LASI_STATUS, &val1);
 +      DP(NETIF_MSG_LINK, "8705 LASI status 0x%x\n", val1);
 +
 +      bnx2x_cl45_read(bp, phy,
 +                    MDIO_PMA_DEVAD, MDIO_PMA_REG_RX_SD, &rx_sd);
 +
 +      bnx2x_cl45_read(bp, phy,
 +                    MDIO_PMA_DEVAD, 0xc809, &val1);
 +      bnx2x_cl45_read(bp, phy,
 +                    MDIO_PMA_DEVAD, 0xc809, &val1);
 +
 +      DP(NETIF_MSG_LINK, "8705 1.c809 val=0x%x\n", val1);
 +      link_up = ((rx_sd & 0x1) && (val1 & (1<<9)) && ((val1 & (1<<8)) == 0));
 +      if (link_up) {
 +              vars->line_speed = SPEED_10000;
 +              bnx2x_ext_phy_resolve_fc(phy, params, vars);
 +      }
 +      return link_up;
 +}
 +
 +/******************************************************************/
 +/*                    SFP+ module Section                       */
 +/******************************************************************/
 +static void bnx2x_set_disable_pmd_transmit(struct link_params *params,
 +                                         struct bnx2x_phy *phy,
 +                                         u8 pmd_dis)
 +{
 +      struct bnx2x *bp = params->bp;
 +      /*
 +       * Disable transmitter only for bootcodes which can enable it afterwards
 +       * (for D3 link)
 +       */
 +      if (pmd_dis) {
 +              if (params->feature_config_flags &
 +                   FEATURE_CONFIG_BC_SUPPORTS_SFP_TX_DISABLED)
 +                      DP(NETIF_MSG_LINK, "Disabling PMD transmitter\n");
 +              else {
 +                      DP(NETIF_MSG_LINK, "NOT disabling PMD transmitter\n");
 +                      return;
 +              }
 +      } else
 +              DP(NETIF_MSG_LINK, "Enabling PMD transmitter\n");
 +      bnx2x_cl45_write(bp, phy,
 +                       MDIO_PMA_DEVAD,
 +                       MDIO_PMA_REG_TX_DISABLE, pmd_dis);
 +}
 +
 +static u8 bnx2x_get_gpio_port(struct link_params *params)
 +{
 +      u8 gpio_port;
 +      u32 swap_val, swap_override;
 +      struct bnx2x *bp = params->bp;
 +      if (CHIP_IS_E2(bp))
 +              gpio_port = BP_PATH(bp);
 +      else
 +              gpio_port = params->port;
 +      swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
 +      swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
 +      return gpio_port ^ (swap_val && swap_override);
 +}
 +
 +static void bnx2x_sfp_e1e2_set_transmitter(struct link_params *params,
 +                                         struct bnx2x_phy *phy,
 +                                         u8 tx_en)
 +{
 +      u16 val;
 +      u8 port = params->port;
 +      struct bnx2x *bp = params->bp;
 +      u32 tx_en_mode;
 +
 +      /* Disable/Enable transmitter ( TX laser of the SFP+ module.)*/
 +      tx_en_mode = REG_RD(bp, params->shmem_base +
 +                          offsetof(struct shmem_region,
 +                                   dev_info.port_hw_config[port].sfp_ctrl)) &
 +              PORT_HW_CFG_TX_LASER_MASK;
 +      DP(NETIF_MSG_LINK, "Setting transmitter tx_en=%x for port %x "
 +                         "mode = %x\n", tx_en, port, tx_en_mode);
 +      switch (tx_en_mode) {
 +      case PORT_HW_CFG_TX_LASER_MDIO:
 +
 +              bnx2x_cl45_read(bp, phy,
 +                              MDIO_PMA_DEVAD,
 +                              MDIO_PMA_REG_PHY_IDENTIFIER,
 +                              &val);
 +
 +              if (tx_en)
 +                      val &= ~(1<<15);
 +              else
 +                      val |= (1<<15);
 +
 +              bnx2x_cl45_write(bp, phy,
 +                               MDIO_PMA_DEVAD,
 +                               MDIO_PMA_REG_PHY_IDENTIFIER,
 +                               val);
 +      break;
 +      case PORT_HW_CFG_TX_LASER_GPIO0:
 +      case PORT_HW_CFG_TX_LASER_GPIO1:
 +      case PORT_HW_CFG_TX_LASER_GPIO2:
 +      case PORT_HW_CFG_TX_LASER_GPIO3:
 +      {
 +              u16 gpio_pin;
 +              u8 gpio_port, gpio_mode;
 +              if (tx_en)
 +                      gpio_mode = MISC_REGISTERS_GPIO_OUTPUT_HIGH;
 +              else
 +                      gpio_mode = MISC_REGISTERS_GPIO_OUTPUT_LOW;
 +
 +              gpio_pin = tx_en_mode - PORT_HW_CFG_TX_LASER_GPIO0;
 +              gpio_port = bnx2x_get_gpio_port(params);
 +              bnx2x_set_gpio(bp, gpio_pin, gpio_mode, gpio_port);
 +              break;
 +      }
 +      default:
 +              DP(NETIF_MSG_LINK, "Invalid TX_LASER_MDIO 0x%x\n", tx_en_mode);
 +              break;
 +      }
 +}
 +
 +static void bnx2x_sfp_set_transmitter(struct link_params *params,
 +                                    struct bnx2x_phy *phy,
 +                                    u8 tx_en)
 +{
 +      struct bnx2x *bp = params->bp;
 +      DP(NETIF_MSG_LINK, "Setting SFP+ transmitter to %d\n", tx_en);
 +      if (CHIP_IS_E3(bp))
 +              bnx2x_sfp_e3_set_transmitter(params, phy, tx_en);
 +      else
 +              bnx2x_sfp_e1e2_set_transmitter(params, phy, tx_en);
 +}
 +
 +static int bnx2x_8726_read_sfp_module_eeprom(struct bnx2x_phy *phy,
 +                                           struct link_params *params,
 +                                           u16 addr, u8 byte_cnt, u8 *o_buf)
 +{
 +      struct bnx2x *bp = params->bp;
 +      u16 val = 0;
 +      u16 i;
 +      if (byte_cnt > 16) {
 +              DP(NETIF_MSG_LINK,
 +                 "Reading from eeprom is limited to 0xf\n");
 +              return -EINVAL;
 +      }
 +      /* Set the read command byte count */
 +      bnx2x_cl45_write(bp, phy,
 +                       MDIO_PMA_DEVAD, MDIO_PMA_REG_SFP_TWO_WIRE_BYTE_CNT,
 +                       (byte_cnt | 0xa000));
 +
 +      /* Set the read command address */
 +      bnx2x_cl45_write(bp, phy,
 +                       MDIO_PMA_DEVAD, MDIO_PMA_REG_SFP_TWO_WIRE_MEM_ADDR,
 +                       addr);
 +
 +      /* Activate read command */
 +      bnx2x_cl45_write(bp, phy,
 +                       MDIO_PMA_DEVAD, MDIO_PMA_REG_SFP_TWO_WIRE_CTRL,
 +                       0x2c0f);
 +
 +      /* Wait up to 500us for command complete status */
 +      for (i = 0; i < 100; i++) {
 +              bnx2x_cl45_read(bp, phy,
 +                              MDIO_PMA_DEVAD,
 +                              MDIO_PMA_REG_SFP_TWO_WIRE_CTRL, &val);
 +              if ((val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK) ==
 +                  MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_COMPLETE)
 +                      break;
 +              udelay(5);
 +      }
 +
 +      if ((val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK) !=
 +                  MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_COMPLETE) {
 +              DP(NETIF_MSG_LINK,
 +                       "Got bad status 0x%x when reading from SFP+ EEPROM\n",
 +                       (val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK));
 +              return -EINVAL;
 +      }
 +
 +      /* Read the buffer */
 +      for (i = 0; i < byte_cnt; i++) {
 +              bnx2x_cl45_read(bp, phy,
 +                              MDIO_PMA_DEVAD,
 +                              MDIO_PMA_REG_8726_TWO_WIRE_DATA_BUF + i, &val);
 +              o_buf[i] = (u8)(val & MDIO_PMA_REG_8726_TWO_WIRE_DATA_MASK);
 +      }
 +
 +      for (i = 0; i < 100; i++) {
 +              bnx2x_cl45_read(bp, phy,
 +                              MDIO_PMA_DEVAD,
 +                              MDIO_PMA_REG_SFP_TWO_WIRE_CTRL, &val);
 +              if ((val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK) ==
 +                  MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_IDLE)
 +                      return 0;
 +              msleep(1);
 +      }
 +      return -EINVAL;
 +}
 +
 +static int bnx2x_warpcore_read_sfp_module_eeprom(struct bnx2x_phy *phy,
 +                                               struct link_params *params,
 +                                               u16 addr, u8 byte_cnt,
 +                                               u8 *o_buf)
 +{
 +      int rc = 0;
 +      u8 i, j = 0, cnt = 0;
 +      u32 data_array[4];
 +      u16 addr32;
 +      struct bnx2x *bp = params->bp;
 +      /*DP(NETIF_MSG_LINK, "bnx2x_direct_read_sfp_module_eeprom:"
 +                                      " addr %d, cnt %d\n",
 +                                      addr, byte_cnt);*/
 +      if (byte_cnt > 16) {
 +              DP(NETIF_MSG_LINK,
 +                 "Reading from eeprom is limited to 16 bytes\n");
 +              return -EINVAL;
 +      }
 +
 +      /* 4 byte aligned address */
 +      addr32 = addr & (~0x3);
 +      do {
 +              rc = bnx2x_bsc_read(params, phy, 0xa0, addr32, 0, byte_cnt,
 +                                  data_array);
 +      } while ((rc != 0) && (++cnt < I2C_WA_RETRY_CNT));
 +
 +      if (rc == 0) {
 +              for (i = (addr - addr32); i < byte_cnt + (addr - addr32); i++) {
 +                      o_buf[j] = *((u8 *)data_array + i);
 +                      j++;
 +              }
 +      }
 +
 +      return rc;
 +}
 +
 +static int bnx2x_8727_read_sfp_module_eeprom(struct bnx2x_phy *phy,
 +                                           struct link_params *params,
 +                                           u16 addr, u8 byte_cnt, u8 *o_buf)
 +{
 +      struct bnx2x *bp = params->bp;
 +      u16 val, i;
 +
 +      if (byte_cnt > 16) {
 +              DP(NETIF_MSG_LINK,
 +                 "Reading from eeprom is limited to 0xf\n");
 +              return -EINVAL;
 +      }
 +
 +      /* Need to read from 1.8000 to clear it */
 +      bnx2x_cl45_read(bp, phy,
 +                      MDIO_PMA_DEVAD,
 +                      MDIO_PMA_REG_SFP_TWO_WIRE_CTRL,
 +                      &val);
 +
 +      /* Set the read command byte count */
 +      bnx2x_cl45_write(bp, phy,
 +                       MDIO_PMA_DEVAD,
 +                       MDIO_PMA_REG_SFP_TWO_WIRE_BYTE_CNT,
 +                       ((byte_cnt < 2) ? 2 : byte_cnt));
 +
 +      /* Set the read command address */
 +      bnx2x_cl45_write(bp, phy,
 +                       MDIO_PMA_DEVAD,
 +                       MDIO_PMA_REG_SFP_TWO_WIRE_MEM_ADDR,
 +                       addr);
 +      /* Set the destination address */
 +      bnx2x_cl45_write(bp, phy,
 +                       MDIO_PMA_DEVAD,
 +                       0x8004,
 +                       MDIO_PMA_REG_8727_TWO_WIRE_DATA_BUF);
 +
 +      /* Activate read command */
 +      bnx2x_cl45_write(bp, phy,
 +                       MDIO_PMA_DEVAD,
 +                       MDIO_PMA_REG_SFP_TWO_WIRE_CTRL,
 +                       0x8002);
 +      /*
 +       * Wait appropriate time for two-wire command to finish before
 +       * polling the status register
 +       */
 +      msleep(1);
 +
 +      /* Wait up to 500us for command complete status */
 +      for (i = 0; i < 100; i++) {
 +              bnx2x_cl45_read(bp, phy,
 +                              MDIO_PMA_DEVAD,
 +                              MDIO_PMA_REG_SFP_TWO_WIRE_CTRL, &val);
 +              if ((val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK) ==
 +                  MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_COMPLETE)
 +                      break;
 +              udelay(5);
 +      }
 +
 +      if ((val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK) !=
 +                  MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_COMPLETE) {
 +              DP(NETIF_MSG_LINK,
 +                       "Got bad status 0x%x when reading from SFP+ EEPROM\n",
 +                       (val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK));
 +              return -EFAULT;
 +      }
 +
 +      /* Read the buffer */
 +      for (i = 0; i < byte_cnt; i++) {
 +              bnx2x_cl45_read(bp, phy,
 +                              MDIO_PMA_DEVAD,
 +                              MDIO_PMA_REG_8727_TWO_WIRE_DATA_BUF + i, &val);
 +              o_buf[i] = (u8)(val & MDIO_PMA_REG_8727_TWO_WIRE_DATA_MASK);
 +      }
 +
 +      for (i = 0; i < 100; i++) {
 +              bnx2x_cl45_read(bp, phy,
 +                              MDIO_PMA_DEVAD,
 +                              MDIO_PMA_REG_SFP_TWO_WIRE_CTRL, &val);
 +              if ((val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK) ==
 +                  MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_IDLE)
 +                      return 0;
 +              msleep(1);
 +      }
 +
 +      return -EINVAL;
 +}
 +
 +int bnx2x_read_sfp_module_eeprom(struct bnx2x_phy *phy,
 +                               struct link_params *params, u16 addr,
 +                               u8 byte_cnt, u8 *o_buf)
 +{
 +      int rc = -EINVAL;
 +      switch (phy->type) {
 +      case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
 +              rc = bnx2x_8726_read_sfp_module_eeprom(phy, params, addr,
 +                                                     byte_cnt, o_buf);
 +      break;
 +      case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
 +      case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8722:
 +              rc = bnx2x_8727_read_sfp_module_eeprom(phy, params, addr,
 +                                                     byte_cnt, o_buf);
 +      break;
 +      case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
 +              rc = bnx2x_warpcore_read_sfp_module_eeprom(phy, params, addr,
 +                                                         byte_cnt, o_buf);
 +      break;
 +      }
 +      return rc;
 +}
 +
 +static int bnx2x_get_edc_mode(struct bnx2x_phy *phy,
 +                            struct link_params *params,
 +                            u16 *edc_mode)
 +{
 +      struct bnx2x *bp = params->bp;
 +      u32 sync_offset = 0, phy_idx, media_types;
 +      u8 val, check_limiting_mode = 0;
 +      *edc_mode = EDC_MODE_LIMITING;
 +
 +      phy->media_type = ETH_PHY_UNSPECIFIED;
 +      /* First check for copper cable */
 +      if (bnx2x_read_sfp_module_eeprom(phy,
 +                                       params,
 +                                       SFP_EEPROM_CON_TYPE_ADDR,
 +                                       1,
 +                                       &val) != 0) {
 +              DP(NETIF_MSG_LINK, "Failed to read from SFP+ module EEPROM\n");
 +              return -EINVAL;
 +      }
 +
 +      switch (val) {
 +      case SFP_EEPROM_CON_TYPE_VAL_COPPER:
 +      {
 +              u8 copper_module_type;
 +              phy->media_type = ETH_PHY_DA_TWINAX;
 +              /*
 +               * Check if its active cable (includes SFP+ module)
 +               * of passive cable
 +               */
 +              if (bnx2x_read_sfp_module_eeprom(phy,
 +                                             params,
 +                                             SFP_EEPROM_FC_TX_TECH_ADDR,
 +                                             1,
 +                                             &copper_module_type) != 0) {
 +                      DP(NETIF_MSG_LINK,
 +                              "Failed to read copper-cable-type"
 +                              " from SFP+ EEPROM\n");
 +                      return -EINVAL;
 +              }
 +
 +              if (copper_module_type &
 +                  SFP_EEPROM_FC_TX_TECH_BITMASK_COPPER_ACTIVE) {
 +                      DP(NETIF_MSG_LINK, "Active Copper cable detected\n");
 +                      check_limiting_mode = 1;
 +              } else if (copper_module_type &
 +                      SFP_EEPROM_FC_TX_TECH_BITMASK_COPPER_PASSIVE) {
 +                              DP(NETIF_MSG_LINK,
 +                                 "Passive Copper cable detected\n");
 +                              *edc_mode =
 +                                    EDC_MODE_PASSIVE_DAC;
 +              } else {
 +                      DP(NETIF_MSG_LINK,
 +                         "Unknown copper-cable-type 0x%x !!!\n",
 +                         copper_module_type);
 +                      return -EINVAL;
 +              }
 +              break;
 +      }
 +      case SFP_EEPROM_CON_TYPE_VAL_LC:
 +              phy->media_type = ETH_PHY_SFP_FIBER;
 +              DP(NETIF_MSG_LINK, "Optic module detected\n");
 +              check_limiting_mode = 1;
 +              break;
 +      default:
 +              DP(NETIF_MSG_LINK, "Unable to determine module type 0x%x !!!\n",
 +                       val);
 +              return -EINVAL;
 +      }
 +      sync_offset = params->shmem_base +
 +              offsetof(struct shmem_region,
 +                       dev_info.port_hw_config[params->port].media_type);
 +      media_types = REG_RD(bp, sync_offset);
 +      /* Update media type for non-PMF sync */
 +      for (phy_idx = INT_PHY; phy_idx < MAX_PHYS; phy_idx++) {
 +              if (&(params->phy[phy_idx]) == phy) {
 +                      media_types &= ~(PORT_HW_CFG_MEDIA_TYPE_PHY0_MASK <<
 +                              (PORT_HW_CFG_MEDIA_TYPE_PHY1_SHIFT * phy_idx));
 +                      media_types |= ((phy->media_type &
 +                                      PORT_HW_CFG_MEDIA_TYPE_PHY0_MASK) <<
 +                              (PORT_HW_CFG_MEDIA_TYPE_PHY1_SHIFT * phy_idx));
 +                      break;
 +              }
 +      }
 +      REG_WR(bp, sync_offset, media_types);
 +      if (check_limiting_mode) {
 +              u8 options[SFP_EEPROM_OPTIONS_SIZE];
 +              if (bnx2x_read_sfp_module_eeprom(phy,
 +                                               params,
 +                                               SFP_EEPROM_OPTIONS_ADDR,
 +                                               SFP_EEPROM_OPTIONS_SIZE,
 +                                               options) != 0) {
 +                      DP(NETIF_MSG_LINK,
 +                         "Failed to read Option field from module EEPROM\n");
 +                      return -EINVAL;
 +              }
 +              if ((options[0] & SFP_EEPROM_OPTIONS_LINEAR_RX_OUT_MASK))
 +                      *edc_mode = EDC_MODE_LINEAR;
 +              else
 +                      *edc_mode = EDC_MODE_LIMITING;
 +      }
 +      DP(NETIF_MSG_LINK, "EDC mode is set to 0x%x\n", *edc_mode);
 +      return 0;
 +}
 +/*
 + * This function read the relevant field from the module (SFP+), and verify it
 + * is compliant with this board
 + */
 +static int bnx2x_verify_sfp_module(struct bnx2x_phy *phy,
 +                                 struct link_params *params)
 +{
 +      struct bnx2x *bp = params->bp;
 +      u32 val, cmd;
 +      u32 fw_resp, fw_cmd_param;
 +      char vendor_name[SFP_EEPROM_VENDOR_NAME_SIZE+1];
 +      char vendor_pn[SFP_EEPROM_PART_NO_SIZE+1];
 +      phy->flags &= ~FLAGS_SFP_NOT_APPROVED;
 +      val = REG_RD(bp, params->shmem_base +
 +                       offsetof(struct shmem_region, dev_info.
 +                                port_feature_config[params->port].config));
 +      if ((val & PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_MASK) ==
 +          PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_NO_ENFORCEMENT) {
 +              DP(NETIF_MSG_LINK, "NOT enforcing module verification\n");
 +              return 0;
 +      }
 +
 +      if (params->feature_config_flags &
 +          FEATURE_CONFIG_BC_SUPPORTS_DUAL_PHY_OPT_MDL_VRFY) {
 +              /* Use specific phy request */
 +              cmd = DRV_MSG_CODE_VRFY_SPECIFIC_PHY_OPT_MDL;
 +      } else if (params->feature_config_flags &
 +                 FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY) {
 +              /* Use first phy request only in case of non-dual media*/
 +              if (DUAL_MEDIA(params)) {
 +                      DP(NETIF_MSG_LINK,
 +                         "FW does not support OPT MDL verification\n");
 +                      return -EINVAL;
 +              }
 +              cmd = DRV_MSG_CODE_VRFY_FIRST_PHY_OPT_MDL;
 +      } else {
 +              /* No support in OPT MDL detection */
 +              DP(NETIF_MSG_LINK,
 +                 "FW does not support OPT MDL verification\n");
 +              return -EINVAL;
 +      }
 +
 +      fw_cmd_param = FW_PARAM_SET(phy->addr, phy->type, phy->mdio_ctrl);
 +      fw_resp = bnx2x_fw_command(bp, cmd, fw_cmd_param);
 +      if (fw_resp == FW_MSG_CODE_VRFY_OPT_MDL_SUCCESS) {
 +              DP(NETIF_MSG_LINK, "Approved module\n");
 +              return 0;
 +      }
 +
 +      /* format the warning message */
 +      if (bnx2x_read_sfp_module_eeprom(phy,
 +                                       params,
 +                                       SFP_EEPROM_VENDOR_NAME_ADDR,
 +                                       SFP_EEPROM_VENDOR_NAME_SIZE,
 +                                       (u8 *)vendor_name))
 +              vendor_name[0] = '\0';
 +      else
 +              vendor_name[SFP_EEPROM_VENDOR_NAME_SIZE] = '\0';
 +      if (bnx2x_read_sfp_module_eeprom(phy,
 +                                       params,
 +                                       SFP_EEPROM_PART_NO_ADDR,
 +                                       SFP_EEPROM_PART_NO_SIZE,
 +                                       (u8 *)vendor_pn))
 +              vendor_pn[0] = '\0';
 +      else
 +              vendor_pn[SFP_EEPROM_PART_NO_SIZE] = '\0';
 +
 +      netdev_err(bp->dev,  "Warning: Unqualified SFP+ module detected,"
 +                            " Port %d from %s part number %s\n",
 +                       params->port, vendor_name, vendor_pn);
 +      phy->flags |= FLAGS_SFP_NOT_APPROVED;
 +      return -EINVAL;
 +}
 +
 +static int bnx2x_wait_for_sfp_module_initialized(struct bnx2x_phy *phy,
 +                                               struct link_params *params)
 +
 +{
 +      u8 val;
 +      struct bnx2x *bp = params->bp;
 +      u16 timeout;
 +      /*
 +       * Initialization time after hot-plug may take up to 300ms for
 +       * some phys type ( e.g. JDSU )
 +       */
 +
 +      for (timeout = 0; timeout < 60; timeout++) {
 +              if (bnx2x_read_sfp_module_eeprom(phy, params, 1, 1, &val)
 +                  == 0) {
 +                      DP(NETIF_MSG_LINK,
 +                         "SFP+ module initialization took %d ms\n",
 +                         timeout * 5);
 +                      return 0;
 +              }
 +              msleep(5);
 +      }
 +      return -EINVAL;
 +}
 +
 +static void bnx2x_8727_power_module(struct bnx2x *bp,
 +                                  struct bnx2x_phy *phy,
 +                                  u8 is_power_up) {
 +      /* Make sure GPIOs are not using for LED mode */
 +      u16 val;
 +      /*
 +       * In the GPIO register, bit 4 is use to determine if the GPIOs are
 +       * operating as INPUT or as OUTPUT. Bit 1 is for input, and 0 for
 +       * output
 +       * Bits 0-1 determine the GPIOs value for OUTPUT in case bit 4 val is 0
 +       * Bits 8-9 determine the GPIOs value for INPUT in case bit 4 val is 1
 +       * where the 1st bit is the over-current(only input), and 2nd bit is
 +       * for power( only output )
 +       *
 +       * In case of NOC feature is disabled and power is up, set GPIO control
 +       *  as input to enable listening of over-current indication
 +       */
 +      if (phy->flags & FLAGS_NOC)
 +              return;
 +      if (is_power_up)
 +              val = (1<<4);
 +      else
 +              /*
 +               * Set GPIO control to OUTPUT, and set the power bit
 +               * to according to the is_power_up
 +               */
 +              val = (1<<1);
 +
 +      bnx2x_cl45_write(bp, phy,
 +                       MDIO_PMA_DEVAD,
 +                       MDIO_PMA_REG_8727_GPIO_CTRL,
 +                       val);
 +}
 +
 +static int bnx2x_8726_set_limiting_mode(struct bnx2x *bp,
 +                                      struct bnx2x_phy *phy,
 +                                      u16 edc_mode)
 +{
 +      u16 cur_limiting_mode;
 +
 +      bnx2x_cl45_read(bp, phy,
 +                      MDIO_PMA_DEVAD,
 +                      MDIO_PMA_REG_ROM_VER2,
 +                      &cur_limiting_mode);
 +      DP(NETIF_MSG_LINK, "Current Limiting mode is 0x%x\n",
 +               cur_limiting_mode);
 +
 +      if (edc_mode == EDC_MODE_LIMITING) {
 +              DP(NETIF_MSG_LINK, "Setting LIMITING MODE\n");
 +              bnx2x_cl45_write(bp, phy,
 +                               MDIO_PMA_DEVAD,
 +                               MDIO_PMA_REG_ROM_VER2,
 +                               EDC_MODE_LIMITING);
 +      } else { /* LRM mode ( default )*/
 +
 +              DP(NETIF_MSG_LINK, "Setting LRM MODE\n");
 +
 +              /*
 +               * Changing to LRM mode takes quite few seconds. So do it only
 +               * if current mode is limiting (default is LRM)
 +               */
 +              if (cur_limiting_mode != EDC_MODE_LIMITING)
 +                      return 0;
 +
 +              bnx2x_cl45_write(bp, phy,
 +                               MDIO_PMA_DEVAD,
 +                               MDIO_PMA_REG_LRM_MODE,
 +                               0);
 +              bnx2x_cl45_write(bp, phy,
 +                               MDIO_PMA_DEVAD,
 +                               MDIO_PMA_REG_ROM_VER2,
 +                               0x128);
 +              bnx2x_cl45_write(bp, phy,
 +                               MDIO_PMA_DEVAD,
 +                               MDIO_PMA_REG_MISC_CTRL0,
 +                               0x4008);
 +              bnx2x_cl45_write(bp, phy,
 +                               MDIO_PMA_DEVAD,
 +                               MDIO_PMA_REG_LRM_MODE,
 +                               0xaaaa);
 +      }
 +      return 0;
 +}
 +
 +static int bnx2x_8727_set_limiting_mode(struct bnx2x *bp,
 +                                      struct bnx2x_phy *phy,
 +                                      u16 edc_mode)
 +{
 +      u16 phy_identifier;
 +      u16 rom_ver2_val;
 +      bnx2x_cl45_read(bp, phy,
 +                      MDIO_PMA_DEVAD,
 +                      MDIO_PMA_REG_PHY_IDENTIFIER,
 +                      &phy_identifier);
 +
 +      bnx2x_cl45_write(bp, phy,
 +                       MDIO_PMA_DEVAD,
 +                       MDIO_PMA_REG_PHY_IDENTIFIER,
 +                       (phy_identifier & ~(1<<9)));
 +
 +      bnx2x_cl45_read(bp, phy,
 +                      MDIO_PMA_DEVAD,
 +                      MDIO_PMA_REG_ROM_VER2,
 +                      &rom_ver2_val);
 +      /* Keep the MSB 8-bits, and set the LSB 8-bits with the edc_mode */
 +      bnx2x_cl45_write(bp, phy,
 +                       MDIO_PMA_DEVAD,
 +                       MDIO_PMA_REG_ROM_VER2,
 +                       (rom_ver2_val & 0xff00) | (edc_mode & 0x00ff));
 +
 +      bnx2x_cl45_write(bp, phy,
 +                       MDIO_PMA_DEVAD,
 +                       MDIO_PMA_REG_PHY_IDENTIFIER,
 +                       (phy_identifier | (1<<9)));
 +
 +      return 0;
 +}
 +
 +static void bnx2x_8727_specific_func(struct bnx2x_phy *phy,
 +                                   struct link_params *params,
 +                                   u32 action)
 +{
 +      struct bnx2x *bp = params->bp;
 +
 +      switch (action) {
 +      case DISABLE_TX:
 +              bnx2x_sfp_set_transmitter(params, phy, 0);
 +              break;
 +      case ENABLE_TX:
 +              if (!(phy->flags & FLAGS_SFP_NOT_APPROVED))
 +                      bnx2x_sfp_set_transmitter(params, phy, 1);
 +              break;
 +      default:
 +              DP(NETIF_MSG_LINK, "Function 0x%x not supported by 8727\n",
 +                 action);
 +              return;
 +      }
 +}
 +
 +static void bnx2x_set_e1e2_module_fault_led(struct link_params *params,
 +                                         u8 gpio_mode)
 +{
 +      struct bnx2x *bp = params->bp;
 +
 +      u32 fault_led_gpio = REG_RD(bp, params->shmem_base +
 +                          offsetof(struct shmem_region,
 +                      dev_info.port_hw_config[params->port].sfp_ctrl)) &
 +              PORT_HW_CFG_FAULT_MODULE_LED_MASK;
 +      switch (fault_led_gpio) {
 +      case PORT_HW_CFG_FAULT_MODULE_LED_DISABLED:
 +              return;
 +      case PORT_HW_CFG_FAULT_MODULE_LED_GPIO0:
 +      case PORT_HW_CFG_FAULT_MODULE_LED_GPIO1:
 +      case PORT_HW_CFG_FAULT_MODULE_LED_GPIO2:
 +      case PORT_HW_CFG_FAULT_MODULE_LED_GPIO3:
 +      {
 +              u8 gpio_port = bnx2x_get_gpio_port(params);
 +              u16 gpio_pin = fault_led_gpio -
 +                      PORT_HW_CFG_FAULT_MODULE_LED_GPIO0;
 +              DP(NETIF_MSG_LINK, "Set fault module-detected led "
 +                                 "pin %x port %x mode %x\n",
 +                             gpio_pin, gpio_port, gpio_mode);
 +              bnx2x_set_gpio(bp, gpio_pin, gpio_mode, gpio_port);
 +      }
 +      break;
 +      default:
 +              DP(NETIF_MSG_LINK, "Error: Invalid fault led mode 0x%x\n",
 +                             fault_led_gpio);
 +      }
 +}
 +
 +static void bnx2x_set_e3_module_fault_led(struct link_params *params,
 +                                        u8 gpio_mode)
 +{
 +      u32 pin_cfg;
 +      u8 port = params->port;
 +      struct bnx2x *bp = params->bp;
 +      pin_cfg = (REG_RD(bp, params->shmem_base +
 +                       offsetof(struct shmem_region,
 +                                dev_info.port_hw_config[port].e3_sfp_ctrl)) &
 +              PORT_HW_CFG_E3_FAULT_MDL_LED_MASK) >>
 +              PORT_HW_CFG_E3_FAULT_MDL_LED_SHIFT;
 +      DP(NETIF_MSG_LINK, "Setting Fault LED to %d using pin cfg %d\n",
 +                     gpio_mode, pin_cfg);
 +      bnx2x_set_cfg_pin(bp, pin_cfg, gpio_mode);
 +}
 +
 +static void bnx2x_set_sfp_module_fault_led(struct link_params *params,
 +                                         u8 gpio_mode)
 +{
 +      struct bnx2x *bp = params->bp;
 +      DP(NETIF_MSG_LINK, "Setting SFP+ module fault LED to %d\n", gpio_mode);
 +      if (CHIP_IS_E3(bp)) {
 +              /*
 +               * Low ==> if SFP+ module is supported otherwise
 +               * High ==> if SFP+ module is not on the approved vendor list
 +               */
 +              bnx2x_set_e3_module_fault_led(params, gpio_mode);
 +      } else
 +              bnx2x_set_e1e2_module_fault_led(params, gpio_mode);
 +}
 +
 +static void bnx2x_warpcore_power_module(struct link_params *params,
 +                                      struct bnx2x_phy *phy,
 +                                      u8 power)
 +{
 +      u32 pin_cfg;
 +      struct bnx2x *bp = params->bp;
 +
 +      pin_cfg = (REG_RD(bp, params->shmem_base +
 +                        offsetof(struct shmem_region,
 +                      dev_info.port_hw_config[params->port].e3_sfp_ctrl)) &
 +                      PORT_HW_CFG_E3_PWR_DIS_MASK) >>
 +                      PORT_HW_CFG_E3_PWR_DIS_SHIFT;
 +
 +      if (pin_cfg == PIN_CFG_NA)
 +              return;
 +      DP(NETIF_MSG_LINK, "Setting SFP+ module power to %d using pin cfg %d\n",
 +                     power, pin_cfg);
 +      /*
 +       * Low ==> corresponding SFP+ module is powered
 +       * high ==> the SFP+ module is powered down
 +       */
 +      bnx2x_set_cfg_pin(bp, pin_cfg, power ^ 1);
 +}
 +
 +static void bnx2x_warpcore_hw_reset(struct bnx2x_phy *phy,
 +                                  struct link_params *params)
 +{
 +      bnx2x_warpcore_power_module(params, phy, 0);
 +}
 +
 +static void bnx2x_power_sfp_module(struct link_params *params,
 +                                 struct bnx2x_phy *phy,
 +                                 u8 power)
 +{
 +      struct bnx2x *bp = params->bp;
 +      DP(NETIF_MSG_LINK, "Setting SFP+ power to %x\n", power);
 +
 +      switch (phy->type) {
 +      case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
 +      case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8722:
 +              bnx2x_8727_power_module(params->bp, phy, power);
 +              break;
 +      case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
 +              bnx2x_warpcore_power_module(params, phy, power);
 +              break;
 +      default:
 +              break;
 +      }
 +}
 +static void bnx2x_warpcore_set_limiting_mode(struct link_params *params,
 +                                           struct bnx2x_phy *phy,
 +                                           u16 edc_mode)
 +{
 +      u16 val = 0;
 +      u16 mode = MDIO_WC_REG_UC_INFO_B1_FIRMWARE_MODE_DEFAULT;
 +      struct bnx2x *bp = params->bp;
 +
 +      u8 lane = bnx2x_get_warpcore_lane(phy, params);
 +      /* This is a global register which controls all lanes */
 +      bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
 +                      MDIO_WC_REG_UC_INFO_B1_FIRMWARE_MODE, &val);
 +      val &= ~(0xf << (lane << 2));
 +
 +      switch (edc_mode) {
 +      case EDC_MODE_LINEAR:
 +      case EDC_MODE_LIMITING:
 +              mode = MDIO_WC_REG_UC_INFO_B1_FIRMWARE_MODE_DEFAULT;
 +              break;
 +      case EDC_MODE_PASSIVE_DAC:
 +              mode = MDIO_WC_REG_UC_INFO_B1_FIRMWARE_MODE_SFP_DAC;
 +              break;
 +      default:
 +              break;
 +      }
 +
 +      val |= (mode << (lane << 2));
 +      bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
 +                       MDIO_WC_REG_UC_INFO_B1_FIRMWARE_MODE, val);
 +      /* A must read */
 +      bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
 +                      MDIO_WC_REG_UC_INFO_B1_FIRMWARE_MODE, &val);
 +
 +      /* Restart microcode to re-read the new mode */
 +      bnx2x_warpcore_reset_lane(bp, phy, 1);
 +      bnx2x_warpcore_reset_lane(bp, phy, 0);
 +
 +}
 +
 +static void bnx2x_set_limiting_mode(struct link_params *params,
 +                                  struct bnx2x_phy *phy,
 +                                  u16 edc_mode)
 +{
 +      switch (phy->type) {
 +      case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
 +              bnx2x_8726_set_limiting_mode(params->bp, phy, edc_mode);
 +              break;
 +      case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
 +      case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8722:
 +              bnx2x_8727_set_limiting_mode(params->bp, phy, edc_mode);
 +              break;
 +      case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
 +              bnx2x_warpcore_set_limiting_mode(params, phy, edc_mode);
 +              break;
 +      }
 +}
 +
 +int bnx2x_sfp_module_detection(struct bnx2x_phy *phy,
 +                             struct link_params *params)
 +{
 +      struct bnx2x *bp = params->bp;
 +      u16 edc_mode;
 +      int rc = 0;
 +
 +      u32 val = REG_RD(bp, params->shmem_base +
 +                           offsetof(struct shmem_region, dev_info.
 +                                   port_feature_config[params->port].config));
 +
 +      DP(NETIF_MSG_LINK, "SFP+ module plugged in/out detected on port %d\n",
 +               params->port);
 +      /* Power up module */
 +      bnx2x_power_sfp_module(params, phy, 1);
 +      if (bnx2x_get_edc_mode(phy, params, &edc_mode) != 0) {
 +              DP(NETIF_MSG_LINK, "Failed to get valid module type\n");
 +              return -EINVAL;
 +      } else if (bnx2x_verify_sfp_module(phy, params) != 0) {
 +              /* check SFP+ module compatibility */
 +              DP(NETIF_MSG_LINK, "Module verification failed!!\n");
 +              rc = -EINVAL;
 +              /* Turn on fault module-detected led */
 +              bnx2x_set_sfp_module_fault_led(params,
 +                                             MISC_REGISTERS_GPIO_HIGH);
 +
 +              /* Check if need to power down the SFP+ module */
 +              if ((val & PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_MASK) ==
 +                   PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_POWER_DOWN) {
 +                      DP(NETIF_MSG_LINK, "Shutdown SFP+ module!!\n");
 +                      bnx2x_power_sfp_module(params, phy, 0);
 +                      return rc;
 +              }
 +      } else {
 +              /* Turn off fault module-detected led */
 +              bnx2x_set_sfp_module_fault_led(params, MISC_REGISTERS_GPIO_LOW);
 +      }
 +
 +      /*
 +       * Check and set limiting mode / LRM mode on 8726. On 8727 it
 +       * is done automatically
 +       */
 +      bnx2x_set_limiting_mode(params, phy, edc_mode);
 +
 +      /*
 +       * Enable transmit for this module if the module is approved, or
 +       * if unapproved modules should also enable the Tx laser
 +       */
 +      if (rc == 0 ||
 +          (val & PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_MASK) !=
 +          PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_DISABLE_TX_LASER)
 +              bnx2x_sfp_set_transmitter(params, phy, 1);
 +      else
 +              bnx2x_sfp_set_transmitter(params, phy, 0);
 +
 +      return rc;
 +}
 +
 +void bnx2x_handle_module_detect_int(struct link_params *params)
 +{
 +      struct bnx2x *bp = params->bp;
 +      struct bnx2x_phy *phy;
 +      u32 gpio_val;
 +      u8 gpio_num, gpio_port;
 +      if (CHIP_IS_E3(bp))
 +              phy = &params->phy[INT_PHY];
 +      else
 +              phy = &params->phy[EXT_PHY1];
 +
 +      if (bnx2x_get_mod_abs_int_cfg(bp, params->chip_id, params->shmem_base,
 +                                    params->port, &gpio_num, &gpio_port) ==
 +          -EINVAL) {
 +              DP(NETIF_MSG_LINK, "Failed to get MOD_ABS interrupt config\n");
 +              return;
 +      }
 +
 +      /* Set valid module led off */
 +      bnx2x_set_sfp_module_fault_led(params, MISC_REGISTERS_GPIO_HIGH);
 +
 +      /* Get current gpio val reflecting module plugged in / out*/
 +      gpio_val = bnx2x_get_gpio(bp, gpio_num, gpio_port);
 +
 +      /* Call the handling function in case module is detected */
 +      if (gpio_val == 0) {
 +              bnx2x_power_sfp_module(params, phy, 1);
 +              bnx2x_set_gpio_int(bp, gpio_num,
 +                                 MISC_REGISTERS_GPIO_INT_OUTPUT_CLR,
 +                                 gpio_port);
 +              if (bnx2x_wait_for_sfp_module_initialized(phy, params) == 0)
 +                      bnx2x_sfp_module_detection(phy, params);
 +              else
 +                      DP(NETIF_MSG_LINK, "SFP+ module is not initialized\n");
 +      } else {
 +              u32 val = REG_RD(bp, params->shmem_base +
 +                               offsetof(struct shmem_region, dev_info.
 +                                        port_feature_config[params->port].
 +                                        config));
 +              bnx2x_set_gpio_int(bp, gpio_num,
 +                                 MISC_REGISTERS_GPIO_INT_OUTPUT_SET,
 +                                 gpio_port);
 +              /*
 +               * Module was plugged out.
 +               * Disable transmit for this module
 +               */
 +              phy->media_type = ETH_PHY_NOT_PRESENT;
 +              if (((val & PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_MASK) ==
 +                   PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_DISABLE_TX_LASER) ||
 +                  CHIP_IS_E3(bp))
 +                      bnx2x_sfp_set_transmitter(params, phy, 0);
 +      }
 +}
 +
 +/******************************************************************/
 +/*            Used by 8706 and 8727                             */
 +/******************************************************************/
 +static void bnx2x_sfp_mask_fault(struct bnx2x *bp,
 +                               struct bnx2x_phy *phy,
 +                               u16 alarm_status_offset,
 +                               u16 alarm_ctrl_offset)
 +{
 +      u16 alarm_status, val;
 +      bnx2x_cl45_read(bp, phy,
 +                      MDIO_PMA_DEVAD, alarm_status_offset,
 +                      &alarm_status);
 +      bnx2x_cl45_read(bp, phy,
 +                      MDIO_PMA_DEVAD, alarm_status_offset,
 +                      &alarm_status);
 +      /* Mask or enable the fault event. */
 +      bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, alarm_ctrl_offset, &val);
 +      if (alarm_status & (1<<0))
 +              val &= ~(1<<0);
 +      else
 +              val |= (1<<0);
 +      bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, alarm_ctrl_offset, val);
 +}
 +/******************************************************************/
 +/*            common BCM8706/BCM8726 PHY SECTION                */
 +/******************************************************************/
 +static u8 bnx2x_8706_8726_read_status(struct bnx2x_phy *phy,
 +                                    struct link_params *params,
 +                                    struct link_vars *vars)
 +{
 +      u8 link_up = 0;
 +      u16 val1, val2, rx_sd, pcs_status;
 +      struct bnx2x *bp = params->bp;
 +      DP(NETIF_MSG_LINK, "XGXS 8706/8726\n");
 +      /* Clear RX Alarm*/
 +      bnx2x_cl45_read(bp, phy,
 +                      MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXSTAT, &val2);
 +
 +      bnx2x_sfp_mask_fault(bp, phy, MDIO_PMA_LASI_TXSTAT,
 +                           MDIO_PMA_LASI_TXCTRL);
 +
 +      /* clear LASI indication*/
 +      bnx2x_cl45_read(bp, phy,
 +                      MDIO_PMA_DEVAD, MDIO_PMA_LASI_STAT, &val1);
 +      bnx2x_cl45_read(bp, phy,
 +                      MDIO_PMA_DEVAD, MDIO_PMA_LASI_STAT, &val2);
 +      DP(NETIF_MSG_LINK, "8706/8726 LASI status 0x%x--> 0x%x\n", val1, val2);
 +
 +      bnx2x_cl45_read(bp, phy,
 +                      MDIO_PMA_DEVAD, MDIO_PMA_REG_RX_SD, &rx_sd);
 +      bnx2x_cl45_read(bp, phy,
 +                      MDIO_PCS_DEVAD, MDIO_PCS_REG_STATUS, &pcs_status);
 +      bnx2x_cl45_read(bp, phy,
 +                      MDIO_AN_DEVAD, MDIO_AN_REG_LINK_STATUS, &val2);
 +      bnx2x_cl45_read(bp, phy,
 +                      MDIO_AN_DEVAD, MDIO_AN_REG_LINK_STATUS, &val2);
 +
 +      DP(NETIF_MSG_LINK, "8706/8726 rx_sd 0x%x pcs_status 0x%x 1Gbps"
 +                      " link_status 0x%x\n", rx_sd, pcs_status, val2);
 +      /*
 +       * link is up if both bit 0 of pmd_rx_sd and bit 0 of pcs_status
 +       * are set, or if the autoneg bit 1 is set
 +       */
 +      link_up = ((rx_sd & pcs_status & 0x1) || (val2 & (1<<1)));
 +      if (link_up) {
 +              if (val2 & (1<<1))
 +                      vars->line_speed = SPEED_1000;
 +              else
 +                      vars->line_speed = SPEED_10000;
 +              bnx2x_ext_phy_resolve_fc(phy, params, vars);
 +              vars->duplex = DUPLEX_FULL;
 +      }
 +
 +      /* Capture 10G link fault. Read twice to clear stale value. */
 +      if (vars->line_speed == SPEED_10000) {
 +              bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD,
 +                          MDIO_PMA_LASI_TXSTAT, &val1);
 +              bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD,
 +                          MDIO_PMA_LASI_TXSTAT, &val1);
 +              if (val1 & (1<<0))
 +                      vars->fault_detected = 1;
 +      }
 +
 +      return link_up;
 +}
 +
 +/******************************************************************/
 +/*                    BCM8706 PHY SECTION                       */
 +/******************************************************************/
 +static u8 bnx2x_8706_config_init(struct bnx2x_phy *phy,
 +                               struct link_params *params,
 +                               struct link_vars *vars)
 +{
 +      u32 tx_en_mode;
 +      u16 cnt, val, tmp1;
 +      struct bnx2x *bp = params->bp;
 +
 +      bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
 +                     MISC_REGISTERS_GPIO_OUTPUT_HIGH, params->port);
 +      /* HW reset */
 +      bnx2x_ext_phy_hw_reset(bp, params->port);
 +      bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 0xa040);
 +      bnx2x_wait_reset_complete(bp, phy, params);
 +
 +      /* Wait until fw is loaded */
 +      for (cnt = 0; cnt < 100; cnt++) {
 +              bnx2x_cl45_read(bp, phy,
 +                              MDIO_PMA_DEVAD, MDIO_PMA_REG_ROM_VER1, &val);
 +              if (val)
 +                      break;
 +              msleep(10);
 +      }
 +      DP(NETIF_MSG_LINK, "XGXS 8706 is initialized after %d ms\n", cnt);
 +      if ((params->feature_config_flags &
 +           FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED)) {
 +              u8 i;
 +              u16 reg;
 +              for (i = 0; i < 4; i++) {
 +                      reg = MDIO_XS_8706_REG_BANK_RX0 +
 +                              i*(MDIO_XS_8706_REG_BANK_RX1 -
 +                                 MDIO_XS_8706_REG_BANK_RX0);
 +                      bnx2x_cl45_read(bp, phy, MDIO_XS_DEVAD, reg, &val);
 +                      /* Clear first 3 bits of the control */
 +                      val &= ~0x7;
 +                      /* Set control bits according to configuration */
 +                      val |= (phy->rx_preemphasis[i] & 0x7);
 +                      DP(NETIF_MSG_LINK, "Setting RX Equalizer to BCM8706"
 +                                 " reg 0x%x <-- val 0x%x\n", reg, val);
 +                      bnx2x_cl45_write(bp, phy, MDIO_XS_DEVAD, reg, val);
 +              }
 +      }
 +      /* Force speed */
 +      if (phy->req_line_speed == SPEED_10000) {
 +              DP(NETIF_MSG_LINK, "XGXS 8706 force 10Gbps\n");
 +
 +              bnx2x_cl45_write(bp, phy,
 +                               MDIO_PMA_DEVAD,
 +                               MDIO_PMA_REG_DIGITAL_CTRL, 0x400);
 +              bnx2x_cl45_write(bp, phy,
 +                               MDIO_PMA_DEVAD, MDIO_PMA_LASI_TXCTRL,
 +                               0);
 +              /* Arm LASI for link and Tx fault. */
 +              bnx2x_cl45_write(bp, phy,
 +                               MDIO_PMA_DEVAD, MDIO_PMA_LASI_CTRL, 3);
 +      } else {
 +              /* Force 1Gbps using autoneg with 1G advertisement */
 +
 +              /* Allow CL37 through CL73 */
 +              DP(NETIF_MSG_LINK, "XGXS 8706 AutoNeg\n");
 +              bnx2x_cl45_write(bp, phy,
 +                               MDIO_AN_DEVAD, MDIO_AN_REG_CL37_CL73, 0x040c);
 +
 +              /* Enable Full-Duplex advertisement on CL37 */
 +              bnx2x_cl45_write(bp, phy,
 +                               MDIO_AN_DEVAD, MDIO_AN_REG_CL37_FC_LP, 0x0020);
 +              /* Enable CL37 AN */
 +              bnx2x_cl45_write(bp, phy,
 +                               MDIO_AN_DEVAD, MDIO_AN_REG_CL37_AN, 0x1000);
 +              /* 1G support */
 +              bnx2x_cl45_write(bp, phy,
 +                               MDIO_AN_DEVAD, MDIO_AN_REG_ADV, (1<<5));
 +
 +              /* Enable clause 73 AN */
 +              bnx2x_cl45_write(bp, phy,
 +                               MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, 0x1200);
 +              bnx2x_cl45_write(bp, phy,
 +                               MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXCTRL,
 +                               0x0400);
 +              bnx2x_cl45_write(bp, phy,
 +                               MDIO_PMA_DEVAD, MDIO_PMA_LASI_CTRL,
 +                               0x0004);
 +      }
 +      bnx2x_save_bcm_spirom_ver(bp, phy, params->port);
 +
 +      /*
 +       * If TX Laser is controlled by GPIO_0, do not let PHY go into low
 +       * power mode, if TX Laser is disabled
 +       */
 +
 +      tx_en_mode = REG_RD(bp, params->shmem_base +
 +                          offsetof(struct shmem_region,
 +                              dev_info.port_hw_config[params->port].sfp_ctrl))
 +                      & PORT_HW_CFG_TX_LASER_MASK;
 +
 +      if (tx_en_mode == PORT_HW_CFG_TX_LASER_GPIO0) {
 +              DP(NETIF_MSG_LINK, "Enabling TXONOFF_PWRDN_DIS\n");
 +              bnx2x_cl45_read(bp, phy,
 +                      MDIO_PMA_DEVAD, MDIO_PMA_REG_DIGITAL_CTRL, &tmp1);
 +              tmp1 |= 0x1;
 +              bnx2x_cl45_write(bp, phy,
 +                      MDIO_PMA_DEVAD, MDIO_PMA_REG_DIGITAL_CTRL, tmp1);
 +      }
 +
 +      return 0;
 +}
 +
 +static int bnx2x_8706_read_status(struct bnx2x_phy *phy,
 +                                struct link_params *params,
 +                                struct link_vars *vars)
 +{
 +      return bnx2x_8706_8726_read_status(phy, params, vars);
 +}
 +
 +/******************************************************************/
 +/*                    BCM8726 PHY SECTION                       */
 +/******************************************************************/
 +static void bnx2x_8726_config_loopback(struct bnx2x_phy *phy,
 +                                     struct link_params *params)
 +{
 +      struct bnx2x *bp = params->bp;
 +      DP(NETIF_MSG_LINK, "PMA/PMD ext_phy_loopback: 8726\n");
 +      bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 0x0001);
 +}
 +
 +static void bnx2x_8726_external_rom_boot(struct bnx2x_phy *phy,
 +                                       struct link_params *params)
 +{
 +      struct bnx2x *bp = params->bp;
 +      /* Need to wait 100ms after reset */
 +      msleep(100);
 +
 +      /* Micro controller re-boot */
 +      bnx2x_cl45_write(bp, phy,
 +                       MDIO_PMA_DEVAD, MDIO_PMA_REG_GEN_CTRL, 0x018B);
 +
 +      /* Set soft reset */
 +      bnx2x_cl45_write(bp, phy,
 +                       MDIO_PMA_DEVAD,
 +                       MDIO_PMA_REG_GEN_CTRL,
 +                       MDIO_PMA_REG_GEN_CTRL_ROM_MICRO_RESET);
 +
 +      bnx2x_cl45_write(bp, phy,
 +                       MDIO_PMA_DEVAD,
 +                       MDIO_PMA_REG_MISC_CTRL1, 0x0001);
 +
 +      bnx2x_cl45_write(bp, phy,
 +                       MDIO_PMA_DEVAD,
 +                       MDIO_PMA_REG_GEN_CTRL,
 +                       MDIO_PMA_REG_GEN_CTRL_ROM_RESET_INTERNAL_MP);
 +
 +      /* wait for 150ms for microcode load */
 +      msleep(150);
 +
 +      /* Disable serial boot control, tristates pins SS_N, SCK, MOSI, MISO */
 +      bnx2x_cl45_write(bp, phy,
 +                       MDIO_PMA_DEVAD,
 +                       MDIO_PMA_REG_MISC_CTRL1, 0x0000);
 +
 +      msleep(200);
 +      bnx2x_save_bcm_spirom_ver(bp, phy, params->port);
 +}
 +
 +static u8 bnx2x_8726_read_status(struct bnx2x_phy *phy,
 +                               struct link_params *params,
 +                               struct link_vars *vars)
 +{
 +      struct bnx2x *bp = params->bp;
 +      u16 val1;
 +      u8 link_up = bnx2x_8706_8726_read_status(phy, params, vars);
 +      if (link_up) {
 +              bnx2x_cl45_read(bp, phy,
 +                              MDIO_PMA_DEVAD, MDIO_PMA_REG_PHY_IDENTIFIER,
 +                              &val1);
 +              if (val1 & (1<<15)) {
 +                      DP(NETIF_MSG_LINK, "Tx is disabled\n");
 +                      link_up = 0;
 +                      vars->line_speed = 0;
 +              }
 +      }
 +      return link_up;
 +}
 +
 +
 +static int bnx2x_8726_config_init(struct bnx2x_phy *phy,
 +                                struct link_params *params,
 +                                struct link_vars *vars)
 +{
 +      struct bnx2x *bp = params->bp;
 +      DP(NETIF_MSG_LINK, "Initializing BCM8726\n");
 +
 +      bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 1<<15);
 +      bnx2x_wait_reset_complete(bp, phy, params);
 +
 +      bnx2x_8726_external_rom_boot(phy, params);
 +
 +      /*
 +       * Need to call module detected on initialization since the module
 +       * detection triggered by actual module insertion might occur before
 +       * driver is loaded, and when driver is loaded, it reset all
 +       * registers, including the transmitter
 +       */
 +      bnx2x_sfp_module_detection(phy, params);
 +
 +      if (phy->req_line_speed == SPEED_1000) {
 +              DP(NETIF_MSG_LINK, "Setting 1G force\n");
 +              bnx2x_cl45_write(bp, phy,
 +                               MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 0x40);
 +              bnx2x_cl45_write(bp, phy,
 +                               MDIO_PMA_DEVAD, MDIO_PMA_REG_10G_CTRL2, 0xD);
 +              bnx2x_cl45_write(bp, phy,
 +                               MDIO_PMA_DEVAD, MDIO_PMA_LASI_CTRL, 0x5);
 +              bnx2x_cl45_write(bp, phy,
 +                               MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXCTRL,
 +                               0x400);
 +      } else if ((phy->req_line_speed == SPEED_AUTO_NEG) &&
 +                 (phy->speed_cap_mask &
 +                    PORT_HW_CFG_SPEED_CAPABILITY_D0_1G) &&
 +                 ((phy->speed_cap_mask &
 +                    PORT_HW_CFG_SPEED_CAPABILITY_D0_10G) !=
 +                  PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)) {
 +              DP(NETIF_MSG_LINK, "Setting 1G clause37\n");
 +              /* Set Flow control */
 +              bnx2x_ext_phy_set_pause(params, phy, vars);
 +              bnx2x_cl45_write(bp, phy,
 +                               MDIO_AN_DEVAD, MDIO_AN_REG_ADV, 0x20);
 +              bnx2x_cl45_write(bp, phy,
 +                               MDIO_AN_DEVAD, MDIO_AN_REG_CL37_CL73, 0x040c);
 +              bnx2x_cl45_write(bp, phy,
 +                               MDIO_AN_DEVAD, MDIO_AN_REG_CL37_FC_LD, 0x0020);
 +              bnx2x_cl45_write(bp, phy,
 +                               MDIO_AN_DEVAD, MDIO_AN_REG_CL37_AN, 0x1000);
 +              bnx2x_cl45_write(bp, phy,
 +                              MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, 0x1200);
 +              /*
 +               * Enable RX-ALARM control to receive interrupt for 1G speed
 +               * change
 +               */
 +              bnx2x_cl45_write(bp, phy,
 +                               MDIO_PMA_DEVAD, MDIO_PMA_LASI_CTRL, 0x4);
 +              bnx2x_cl45_write(bp, phy,
 +                               MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXCTRL,
 +                               0x400);
 +
 +      } else { /* Default 10G. Set only LASI control */
 +              bnx2x_cl45_write(bp, phy,
 +                               MDIO_PMA_DEVAD, MDIO_PMA_LASI_CTRL, 1);
 +      }
 +
 +      /* Set TX PreEmphasis if needed */
 +      if ((params->feature_config_flags &
 +           FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED)) {
 +              DP(NETIF_MSG_LINK,
 +                 "Setting TX_CTRL1 0x%x, TX_CTRL2 0x%x\n",
 +                       phy->tx_preemphasis[0],
 +                       phy->tx_preemphasis[1]);
 +              bnx2x_cl45_write(bp, phy,
 +                               MDIO_PMA_DEVAD,
 +                               MDIO_PMA_REG_8726_TX_CTRL1,
 +                               phy->tx_preemphasis[0]);
 +
 +              bnx2x_cl45_write(bp, phy,
 +                               MDIO_PMA_DEVAD,
 +                               MDIO_PMA_REG_8726_TX_CTRL2,
 +                               phy->tx_preemphasis[1]);
 +      }
 +
 +      return 0;
 +
 +}
 +
 +static void bnx2x_8726_link_reset(struct bnx2x_phy *phy,
 +                                struct link_params *params)
 +{
 +      struct bnx2x *bp = params->bp;
 +      DP(NETIF_MSG_LINK, "bnx2x_8726_link_reset port %d\n", params->port);
 +      /* Set serial boot control for external load */
 +      bnx2x_cl45_write(bp, phy,
 +                       MDIO_PMA_DEVAD,
 +                       MDIO_PMA_REG_GEN_CTRL, 0x0001);
 +}
 +
 +/******************************************************************/
 +/*                    BCM8727 PHY SECTION                       */
 +/******************************************************************/
 +
 +static void bnx2x_8727_set_link_led(struct bnx2x_phy *phy,
 +                                  struct link_params *params, u8 mode)
 +{
 +      struct bnx2x *bp = params->bp;
 +      u16 led_mode_bitmask = 0;
 +      u16 gpio_pins_bitmask = 0;
 +      u16 val;
 +      /* Only NOC flavor requires to set the LED specifically */
 +      if (!(phy->flags & FLAGS_NOC))
 +              return;
 +      switch (mode) {
 +      case LED_MODE_FRONT_PANEL_OFF:
 +      case LED_MODE_OFF:
 +              led_mode_bitmask = 0;
 +              gpio_pins_bitmask = 0x03;
 +              break;
 +      case LED_MODE_ON:
 +              led_mode_bitmask = 0;
 +              gpio_pins_bitmask = 0x02;
 +              break;
 +      case LED_MODE_OPER:
 +              led_mode_bitmask = 0x60;
 +              gpio_pins_bitmask = 0x11;
 +              break;
 +      }
 +      bnx2x_cl45_read(bp, phy,
 +                      MDIO_PMA_DEVAD,
 +                      MDIO_PMA_REG_8727_PCS_OPT_CTRL,
 +                      &val);
 +      val &= 0xff8f;
 +      val |= led_mode_bitmask;
 +      bnx2x_cl45_write(bp, phy,
 +                       MDIO_PMA_DEVAD,
 +                       MDIO_PMA_REG_8727_PCS_OPT_CTRL,
 +                       val);
 +      bnx2x_cl45_read(bp, phy,
 +                      MDIO_PMA_DEVAD,
 +                      MDIO_PMA_REG_8727_GPIO_CTRL,
 +                      &val);
 +      val &= 0xffe0;
 +      val |= gpio_pins_bitmask;
 +      bnx2x_cl45_write(bp, phy,
 +                       MDIO_PMA_DEVAD,
 +                       MDIO_PMA_REG_8727_GPIO_CTRL,
 +                       val);
 +}
 +static void bnx2x_8727_hw_reset(struct bnx2x_phy *phy,
 +                              struct link_params *params) {
 +      u32 swap_val, swap_override;
 +      u8 port;
 +      /*
 +       * The PHY reset is controlled by GPIO 1. Fake the port number
 +       * to cancel the swap done in set_gpio()
 +       */
 +      struct bnx2x *bp = params->bp;
 +      swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
 +      swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
 +      port = (swap_val && swap_override) ^ 1;
 +      bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
 +                     MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
 +}
 +
 +static int bnx2x_8727_config_init(struct bnx2x_phy *phy,
 +                                struct link_params *params,
 +                                struct link_vars *vars)
 +{
 +      u32 tx_en_mode;
 +      u16 tmp1, val, mod_abs, tmp2;
 +      u16 rx_alarm_ctrl_val;
 +      u16 lasi_ctrl_val;
 +      struct bnx2x *bp = params->bp;
 +      /* Enable PMD link, MOD_ABS_FLT, and 1G link alarm */
 +
 +      bnx2x_wait_reset_complete(bp, phy, params);
 +      rx_alarm_ctrl_val = (1<<2) | (1<<5) ;
 +      /* Should be 0x6 to enable XS on Tx side. */
 +      lasi_ctrl_val = 0x0006;
 +
 +      DP(NETIF_MSG_LINK, "Initializing BCM8727\n");
 +      /* enable LASI */
 +      bnx2x_cl45_write(bp, phy,
 +                       MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXCTRL,
 +                       rx_alarm_ctrl_val);
 +      bnx2x_cl45_write(bp, phy,
 +                       MDIO_PMA_DEVAD, MDIO_PMA_LASI_TXCTRL,
 +                       0);
 +      bnx2x_cl45_write(bp, phy,
 +                       MDIO_PMA_DEVAD, MDIO_PMA_LASI_CTRL, lasi_ctrl_val);
 +
 +      /*
 +       * Initially configure MOD_ABS to interrupt when module is
 +       * presence( bit 8)
 +       */
 +      bnx2x_cl45_read(bp, phy,
 +                      MDIO_PMA_DEVAD, MDIO_PMA_REG_PHY_IDENTIFIER, &mod_abs);
 +      /*
 +       * Set EDC off by setting OPTXLOS signal input to low (bit 9).
 +       * When the EDC is off it locks onto a reference clock and avoids
 +       * becoming 'lost'
 +       */
 +      mod_abs &= ~(1<<8);
 +      if (!(phy->flags & FLAGS_NOC))
 +              mod_abs &= ~(1<<9);
 +      bnx2x_cl45_write(bp, phy,
 +                       MDIO_PMA_DEVAD, MDIO_PMA_REG_PHY_IDENTIFIER, mod_abs);
 +
 +
 +      /* Enable/Disable PHY transmitter output */
 +      bnx2x_set_disable_pmd_transmit(params, phy, 0);
 +
 +      /* Make MOD_ABS give interrupt on change */
 +      bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_8727_PCS_OPT_CTRL,
 +                      &val);
 +      val |= (1<<12);
 +      if (phy->flags & FLAGS_NOC)
 +              val |= (3<<5);
 +
 +      /*
 +       * Set 8727 GPIOs to input to allow reading from the 8727 GPIO0
 +       * status which reflect SFP+ module over-current
 +       */
 +      if (!(phy->flags & FLAGS_NOC))
 +              val &= 0xff8f; /* Reset bits 4-6 */
 +      bnx2x_cl45_write(bp, phy,
 +                       MDIO_PMA_DEVAD, MDIO_PMA_REG_8727_PCS_OPT_CTRL, val);
 +
 +      bnx2x_8727_power_module(bp, phy, 1);
 +
 +      bnx2x_cl45_read(bp, phy,
 +                      MDIO_PMA_DEVAD, MDIO_PMA_REG_M8051_MSGOUT_REG, &tmp1);
 +
 +      bnx2x_cl45_read(bp, phy,
 +                      MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXSTAT, &tmp1);
 +
 +      /* Set option 1G speed */
 +      if (phy->req_line_speed == SPEED_1000) {
 +              DP(NETIF_MSG_LINK, "Setting 1G force\n");
 +              bnx2x_cl45_write(bp, phy,
 +                               MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 0x40);
 +              bnx2x_cl45_write(bp, phy,
 +                               MDIO_PMA_DEVAD, MDIO_PMA_REG_10G_CTRL2, 0xD);
 +              bnx2x_cl45_read(bp, phy,
 +                              MDIO_PMA_DEVAD, MDIO_PMA_REG_10G_CTRL2, &tmp1);
 +              DP(NETIF_MSG_LINK, "1.7 = 0x%x\n", tmp1);
 +              /*
 +               * Power down the XAUI until link is up in case of dual-media
 +               * and 1G
 +               */
 +              if (DUAL_MEDIA(params)) {
 +                      bnx2x_cl45_read(bp, phy,
 +                                      MDIO_PMA_DEVAD,
 +                                      MDIO_PMA_REG_8727_PCS_GP, &val);
 +                      val |= (3<<10);
 +                      bnx2x_cl45_write(bp, phy,
 +                                       MDIO_PMA_DEVAD,
 +                                       MDIO_PMA_REG_8727_PCS_GP, val);
 +              }
 +      } else if ((phy->req_line_speed == SPEED_AUTO_NEG) &&
 +                 ((phy->speed_cap_mask &
 +                   PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)) &&
 +                 ((phy->speed_cap_mask &
 +                    PORT_HW_CFG_SPEED_CAPABILITY_D0_10G) !=
 +                 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)) {
 +
 +              DP(NETIF_MSG_LINK, "Setting 1G clause37\n");
 +              bnx2x_cl45_write(bp, phy,
 +                               MDIO_AN_DEVAD, MDIO_AN_REG_8727_MISC_CTRL, 0);
 +              bnx2x_cl45_write(bp, phy,
 +                               MDIO_AN_DEVAD, MDIO_AN_REG_CL37_AN, 0x1300);
 +      } else {
 +              /*
 +               * Since the 8727 has only single reset pin, need to set the 10G
 +               * registers although it is default
 +               */
 +              bnx2x_cl45_write(bp, phy,
 +                               MDIO_AN_DEVAD, MDIO_AN_REG_8727_MISC_CTRL,
 +                               0x0020);
 +              bnx2x_cl45_write(bp, phy,
 +                               MDIO_AN_DEVAD, MDIO_AN_REG_CL37_AN, 0x0100);
 +              bnx2x_cl45_write(bp, phy,
 +                               MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 0x2040);
 +              bnx2x_cl45_write(bp, phy,
 +                               MDIO_PMA_DEVAD, MDIO_PMA_REG_10G_CTRL2,
 +                               0x0008);
 +      }
 +
 +      /*
 +       * Set 2-wire transfer rate of SFP+ module EEPROM
 +       * to 100Khz since some DACs(direct attached cables) do
 +       * not work at 400Khz.
 +       */
 +      bnx2x_cl45_write(bp, phy,
 +                       MDIO_PMA_DEVAD, MDIO_PMA_REG_8727_TWO_WIRE_SLAVE_ADDR,
 +                       0xa001);
 +
 +      /* Set TX PreEmphasis if needed */
 +      if ((params->feature_config_flags &
 +           FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED)) {
 +              DP(NETIF_MSG_LINK, "Setting TX_CTRL1 0x%x, TX_CTRL2 0x%x\n",
 +                         phy->tx_preemphasis[0],
 +                         phy->tx_preemphasis[1]);
 +              bnx2x_cl45_write(bp, phy,
 +                               MDIO_PMA_DEVAD, MDIO_PMA_REG_8727_TX_CTRL1,
 +                               phy->tx_preemphasis[0]);
 +
 +              bnx2x_cl45_write(bp, phy,
 +                               MDIO_PMA_DEVAD, MDIO_PMA_REG_8727_TX_CTRL2,
 +                               phy->tx_preemphasis[1]);
 +      }
 +
 +      /*
 +       * If TX Laser is controlled by GPIO_0, do not let PHY go into low
 +       * power mode, if TX Laser is disabled
 +       */
 +      tx_en_mode = REG_RD(bp, params->shmem_base +
 +                          offsetof(struct shmem_region,
 +                              dev_info.port_hw_config[params->port].sfp_ctrl))
 +                      & PORT_HW_CFG_TX_LASER_MASK;
 +
 +      if (tx_en_mode == PORT_HW_CFG_TX_LASER_GPIO0) {
 +
 +              DP(NETIF_MSG_LINK, "Enabling TXONOFF_PWRDN_DIS\n");
 +              bnx2x_cl45_read(bp, phy,
 +                      MDIO_PMA_DEVAD, MDIO_PMA_REG_8727_OPT_CFG_REG, &tmp2);
 +              tmp2 |= 0x1000;
 +              tmp2 &= 0xFFEF;
 +              bnx2x_cl45_write(bp, phy,
 +                      MDIO_PMA_DEVAD, MDIO_PMA_REG_8727_OPT_CFG_REG, tmp2);
 +      }
 +
 +      return 0;
 +}
 +
 +static void bnx2x_8727_handle_mod_abs(struct bnx2x_phy *phy,
 +                                    struct link_params *params)
 +{
 +      struct bnx2x *bp = params->bp;
 +      u16 mod_abs, rx_alarm_status;
 +      u32 val = REG_RD(bp, params->shmem_base +
 +                           offsetof(struct shmem_region, dev_info.
 +                                    port_feature_config[params->port].
 +                                    config));
 +      bnx2x_cl45_read(bp, phy,
 +                      MDIO_PMA_DEVAD,
 +                      MDIO_PMA_REG_PHY_IDENTIFIER, &mod_abs);
 +      if (mod_abs & (1<<8)) {
 +
 +              /* Module is absent */
 +              DP(NETIF_MSG_LINK,
 +                 "MOD_ABS indication show module is absent\n");
 +              phy->media_type = ETH_PHY_NOT_PRESENT;
 +              /*
 +               * 1. Set mod_abs to detect next module
 +               *    presence event
 +               * 2. Set EDC off by setting OPTXLOS signal input to low
 +               *    (bit 9).
 +               *    When the EDC is off it locks onto a reference clock and
 +               *    avoids becoming 'lost'.
 +               */
 +              mod_abs &= ~(1<<8);
 +              if (!(phy->flags & FLAGS_NOC))
 +                      mod_abs &= ~(1<<9);
 +              bnx2x_cl45_write(bp, phy,
 +                               MDIO_PMA_DEVAD,
 +                               MDIO_PMA_REG_PHY_IDENTIFIER, mod_abs);
 +
 +              /*
 +               * Clear RX alarm since it stays up as long as
 +               * the mod_abs wasn't changed
 +               */
 +              bnx2x_cl45_read(bp, phy,
 +                              MDIO_PMA_DEVAD,
 +                              MDIO_PMA_LASI_RXSTAT, &rx_alarm_status);
 +
 +      } else {
 +              /* Module is present */
 +              DP(NETIF_MSG_LINK,
 +                 "MOD_ABS indication show module is present\n");
 +              /*
 +               * First disable transmitter, and if the module is ok, the
 +               * module_detection will enable it
 +               * 1. Set mod_abs to detect next module absent event ( bit 8)
 +               * 2. Restore the default polarity of the OPRXLOS signal and
 +               * this signal will then correctly indicate the presence or
 +               * absence of the Rx signal. (bit 9)
 +               */
 +              mod_abs |= (1<<8);
 +              if (!(phy->flags & FLAGS_NOC))
 +                      mod_abs |= (1<<9);
 +              bnx2x_cl45_write(bp, phy,
 +                               MDIO_PMA_DEVAD,
 +                               MDIO_PMA_REG_PHY_IDENTIFIER, mod_abs);
 +
 +              /*
 +               * Clear RX alarm since it stays up as long as the mod_abs
 +               * wasn't changed. This is need to be done before calling the
 +               * module detection, otherwise it will clear* the link update
 +               * alarm
 +               */
 +              bnx2x_cl45_read(bp, phy,
 +                              MDIO_PMA_DEVAD,
 +                              MDIO_PMA_LASI_RXSTAT, &rx_alarm_status);
 +
 +
 +              if ((val & PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_MASK) ==
 +                  PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_DISABLE_TX_LASER)
 +                      bnx2x_sfp_set_transmitter(params, phy, 0);
 +
 +              if (bnx2x_wait_for_sfp_module_initialized(phy, params) == 0)
 +                      bnx2x_sfp_module_detection(phy, params);
 +              else
 +                      DP(NETIF_MSG_LINK, "SFP+ module is not initialized\n");
 +      }
 +
 +      DP(NETIF_MSG_LINK, "8727 RX_ALARM_STATUS 0x%x\n",
 +                 rx_alarm_status);
 +      /* No need to check link status in case of module plugged in/out */
 +}
 +
 +static u8 bnx2x_8727_read_status(struct bnx2x_phy *phy,
 +                               struct link_params *params,
 +                               struct link_vars *vars)
 +
 +{
 +      struct bnx2x *bp = params->bp;
 +      u8 link_up = 0, oc_port = params->port;
 +      u16 link_status = 0;
 +      u16 rx_alarm_status, lasi_ctrl, val1;
 +
 +      /* If PHY is not initialized, do not check link status */
 +      bnx2x_cl45_read(bp, phy,
 +                      MDIO_PMA_DEVAD, MDIO_PMA_LASI_CTRL,
 +                      &lasi_ctrl);
 +      if (!lasi_ctrl)
 +              return 0;
 +
 +      /* Check the LASI on Rx */
 +      bnx2x_cl45_read(bp, phy,
 +                      MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXSTAT,
 +                      &rx_alarm_status);
 +      vars->line_speed = 0;
 +      DP(NETIF_MSG_LINK, "8727 RX_ALARM_STATUS  0x%x\n", rx_alarm_status);
 +
 +      bnx2x_sfp_mask_fault(bp, phy, MDIO_PMA_LASI_TXSTAT,
 +                           MDIO_PMA_LASI_TXCTRL);
 +
 +      bnx2x_cl45_read(bp, phy,
 +                      MDIO_PMA_DEVAD, MDIO_PMA_LASI_STAT, &val1);
 +
 +      DP(NETIF_MSG_LINK, "8727 LASI status 0x%x\n", val1);
 +
 +      /* Clear MSG-OUT */
 +      bnx2x_cl45_read(bp, phy,
 +                      MDIO_PMA_DEVAD, MDIO_PMA_REG_M8051_MSGOUT_REG, &val1);
 +
 +      /*
 +       * If a module is present and there is need to check
 +       * for over current
 +       */
 +      if (!(phy->flags & FLAGS_NOC) && !(rx_alarm_status & (1<<5))) {
 +              /* Check over-current using 8727 GPIO0 input*/
 +              bnx2x_cl45_read(bp, phy,
 +                              MDIO_PMA_DEVAD, MDIO_PMA_REG_8727_GPIO_CTRL,
 +                              &val1);
 +
 +              if ((val1 & (1<<8)) == 0) {
 +                      if (!CHIP_IS_E1x(bp))
 +                              oc_port = BP_PATH(bp) + (params->port << 1);
 +                      DP(NETIF_MSG_LINK,
 +                         "8727 Power fault has been detected on port %d\n",
 +                         oc_port);
 +                      netdev_err(bp->dev, "Error:  Power fault on Port %d has"
 +                                          " been detected and the power to "
 +                                          "that SFP+ module has been removed"
 +                                          " to prevent failure of the card."
 +                                          " Please remove the SFP+ module and"
 +                                          " restart the system to clear this"
 +                                          " error.\n",
 +                       oc_port);
 +                      /* Disable all RX_ALARMs except for mod_abs */
 +                      bnx2x_cl45_write(bp, phy,
 +                                       MDIO_PMA_DEVAD,
 +                                       MDIO_PMA_LASI_RXCTRL, (1<<5));
 +
 +                      bnx2x_cl45_read(bp, phy,
 +                                      MDIO_PMA_DEVAD,
 +                                      MDIO_PMA_REG_PHY_IDENTIFIER, &val1);
 +                      /* Wait for module_absent_event */
 +                      val1 |= (1<<8);
 +                      bnx2x_cl45_write(bp, phy,
 +                                       MDIO_PMA_DEVAD,
 +                                       MDIO_PMA_REG_PHY_IDENTIFIER, val1);
 +                      /* Clear RX alarm */
 +                      bnx2x_cl45_read(bp, phy,
 +                              MDIO_PMA_DEVAD,
 +                              MDIO_PMA_LASI_RXSTAT, &rx_alarm_status);
 +                      return 0;
 +              }
 +      } /* Over current check */
 +
 +      /* When module absent bit is set, check module */
 +      if (rx_alarm_status & (1<<5)) {
 +              bnx2x_8727_handle_mod_abs(phy, params);
 +              /* Enable all mod_abs and link detection bits */
 +              bnx2x_cl45_write(bp, phy,
 +                               MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXCTRL,
 +                               ((1<<5) | (1<<2)));
 +      }
 +      DP(NETIF_MSG_LINK, "Enabling 8727 TX laser if SFP is approved\n");
 +      bnx2x_8727_specific_func(phy, params, ENABLE_TX);
 +      /* If transmitter is disabled, ignore false link up indication */
 +      bnx2x_cl45_read(bp, phy,
 +                      MDIO_PMA_DEVAD, MDIO_PMA_REG_PHY_IDENTIFIER, &val1);
 +      if (val1 & (1<<15)) {
 +              DP(NETIF_MSG_LINK, "Tx is disabled\n");
 +              return 0;
 +      }
 +
 +      bnx2x_cl45_read(bp, phy,
 +                      MDIO_PMA_DEVAD,
 +                      MDIO_PMA_REG_8073_SPEED_LINK_STATUS, &link_status);
 +
 +      /*
 +       * Bits 0..2 --> speed detected,
 +       * Bits 13..15--> link is down
 +       */
 +      if ((link_status & (1<<2)) && (!(link_status & (1<<15)))) {
 +              link_up = 1;
 +              vars->line_speed = SPEED_10000;
 +              DP(NETIF_MSG_LINK, "port %x: External link up in 10G\n",
 +                         params->port);
 +      } else if ((link_status & (1<<0)) && (!(link_status & (1<<13)))) {
 +              link_up = 1;
 +              vars->line_speed = SPEED_1000;
 +              DP(NETIF_MSG_LINK, "port %x: External link up in 1G\n",
 +                         params->port);
 +      } else {
 +              link_up = 0;
 +              DP(NETIF_MSG_LINK, "port %x: External link is down\n",
 +                         params->port);
 +      }
 +
 +      /* Capture 10G link fault. */
 +      if (vars->line_speed == SPEED_10000) {
 +              bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD,
 +                          MDIO_PMA_LASI_TXSTAT, &val1);
 +
 +              bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD,
 +                          MDIO_PMA_LASI_TXSTAT, &val1);
 +
 +              if (val1 & (1<<0)) {
 +                      vars->fault_detected = 1;
 +              }
 +      }
 +
 +      if (link_up) {
 +              bnx2x_ext_phy_resolve_fc(phy, params, vars);
 +              vars->duplex = DUPLEX_FULL;
 +              DP(NETIF_MSG_LINK, "duplex = 0x%x\n", vars->duplex);
 +      }
 +
 +      if ((DUAL_MEDIA(params)) &&
 +          (phy->req_line_speed == SPEED_1000)) {
 +              bnx2x_cl45_read(bp, phy,
 +                              MDIO_PMA_DEVAD,
 +                              MDIO_PMA_REG_8727_PCS_GP, &val1);
 +              /*
 +               * In case of dual-media board and 1G, power up the XAUI side,
 +               * otherwise power it down. For 10G it is done automatically
 +               */
 +              if (link_up)
 +                      val1 &= ~(3<<10);
 +              else
 +                      val1 |= (3<<10);
 +              bnx2x_cl45_write(bp, phy,
 +                               MDIO_PMA_DEVAD,
 +                               MDIO_PMA_REG_8727_PCS_GP, val1);
 +      }
 +      return link_up;
 +}
 +
 +static void bnx2x_8727_link_reset(struct bnx2x_phy *phy,
 +                                struct link_params *params)
 +{
 +      struct bnx2x *bp = params->bp;
 +
 +      /* Enable/Disable PHY transmitter output */
 +      bnx2x_set_disable_pmd_transmit(params, phy, 1);
 +
 +      /* Disable Transmitter */
 +      bnx2x_sfp_set_transmitter(params, phy, 0);
 +      /* Clear LASI */
 +      bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_LASI_CTRL, 0);
 +
 +}
 +
 +/******************************************************************/
 +/*            BCM8481/BCM84823/BCM84833 PHY SECTION             */
 +/******************************************************************/
 +static void bnx2x_save_848xx_spirom_version(struct bnx2x_phy *phy,
 +                                         struct link_params *params)
 +{
 +      u16 val, fw_ver1, fw_ver2, cnt;
 +      u8 port;
 +      struct bnx2x *bp = params->bp;
 +
 +      port = params->port;
 +
 +      /* For the 32 bits registers in 848xx, access via MDIO2ARM interface.*/
 +      /* (1) set register 0xc200_0014(SPI_BRIDGE_CTRL_2) to 0x03000000 */
 +      bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA819, 0x0014);
 +      bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA81A, 0xc200);
 +      bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA81B, 0x0000);
 +      bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA81C, 0x0300);
 +      bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA817, 0x0009);
 +
 +      for (cnt = 0; cnt < 100; cnt++) {
 +              bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, 0xA818, &val);
 +              if (val & 1)
 +                      break;
 +              udelay(5);
 +      }
 +      if (cnt == 100) {
 +              DP(NETIF_MSG_LINK, "Unable to read 848xx phy fw version(1)\n");
 +              bnx2x_save_spirom_version(bp, port, 0,
 +                                        phy->ver_addr);
 +              return;
 +      }
 +
 +
 +      /* 2) read register 0xc200_0000 (SPI_FW_STATUS) */
 +      bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA819, 0x0000);
 +      bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA81A, 0xc200);
 +      bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA817, 0x000A);
 +      for (cnt = 0; cnt < 100; cnt++) {
 +              bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, 0xA818, &val);
 +              if (val & 1)
 +                      break;
 +              udelay(5);
 +      }
 +      if (cnt == 100) {
 +              DP(NETIF_MSG_LINK, "Unable to read 848xx phy fw version(2)\n");
 +              bnx2x_save_spirom_version(bp, port, 0,
 +                                        phy->ver_addr);
 +              return;
 +      }
 +
 +      /* lower 16 bits of the register SPI_FW_STATUS */
 +      bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, 0xA81B, &fw_ver1);
 +      /* upper 16 bits of register SPI_FW_STATUS */
 +      bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, 0xA81C, &fw_ver2);
 +
 +      bnx2x_save_spirom_version(bp, port, (fw_ver2<<16) | fw_ver1,
 +                                phy->ver_addr);
 +}
 +
 +static void bnx2x_848xx_set_led(struct bnx2x *bp,
 +                              struct bnx2x_phy *phy)
 +{
 +      u16 val;
 +
 +      /* PHYC_CTL_LED_CTL */
 +      bnx2x_cl45_read(bp, phy,
 +                      MDIO_PMA_DEVAD,
 +                      MDIO_PMA_REG_8481_LINK_SIGNAL, &val);
 +      val &= 0xFE00;
 +      val |= 0x0092;
 +
 +      bnx2x_cl45_write(bp, phy,
 +                       MDIO_PMA_DEVAD,
 +                       MDIO_PMA_REG_8481_LINK_SIGNAL, val);
 +
 +      bnx2x_cl45_write(bp, phy,
 +                       MDIO_PMA_DEVAD,
 +                       MDIO_PMA_REG_8481_LED1_MASK,
 +                       0x80);
 +
 +      bnx2x_cl45_write(bp, phy,
 +                       MDIO_PMA_DEVAD,
 +                       MDIO_PMA_REG_8481_LED2_MASK,
 +                       0x18);
 +
 +      /* Select activity source by Tx and Rx, as suggested by PHY AE */
 +      bnx2x_cl45_write(bp, phy,
 +                       MDIO_PMA_DEVAD,
 +                       MDIO_PMA_REG_8481_LED3_MASK,
 +                       0x0006);
 +
 +      /* Select the closest activity blink rate to that in 10/100/1000 */
 +      bnx2x_cl45_write(bp, phy,
 +                      MDIO_PMA_DEVAD,
 +                      MDIO_PMA_REG_8481_LED3_BLINK,
 +                      0);
 +
 +      bnx2x_cl45_read(bp, phy,
 +                      MDIO_PMA_DEVAD,
 +                      MDIO_PMA_REG_84823_CTL_LED_CTL_1, &val);
 +      val |= MDIO_PMA_REG_84823_LED3_STRETCH_EN; /* stretch_en for LED3*/
 +
 +      bnx2x_cl45_write(bp, phy,
 +                       MDIO_PMA_DEVAD,
 +                       MDIO_PMA_REG_84823_CTL_LED_CTL_1, val);
 +
 +      /* 'Interrupt Mask' */
 +      bnx2x_cl45_write(bp, phy,
 +                       MDIO_AN_DEVAD,
 +                       0xFFFB, 0xFFFD);
 +}
 +
 +static int bnx2x_848xx_cmn_config_init(struct bnx2x_phy *phy,
 +                                     struct link_params *params,
 +                                     struct link_vars *vars)
 +{
 +      struct bnx2x *bp = params->bp;
 +      u16 autoneg_val, an_1000_val, an_10_100_val;
 +      u16 tmp_req_line_speed;
 +
 +      tmp_req_line_speed = phy->req_line_speed;
 +      if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833)
 +              if (phy->req_line_speed == SPEED_10000)
 +                      phy->req_line_speed = SPEED_AUTO_NEG;
 +
 +      /*
 +       * This phy uses the NIG latch mechanism since link indication
 +       * arrives through its LED4 and not via its LASI signal, so we
 +       * get steady signal instead of clear on read
 +       */
 +      bnx2x_bits_en(bp, NIG_REG_LATCH_BC_0 + params->port*4,
 +                    1 << NIG_LATCH_BC_ENABLE_MI_INT);
 +
 +      bnx2x_cl45_write(bp, phy,
 +                       MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 0x0000);
 +
 +      bnx2x_848xx_set_led(bp, phy);
 +
 +      /* set 1000 speed advertisement */
 +      bnx2x_cl45_read(bp, phy,
 +                      MDIO_AN_DEVAD, MDIO_AN_REG_8481_1000T_CTRL,
 +                      &an_1000_val);
 +
 +      bnx2x_ext_phy_set_pause(params, phy, vars);
 +      bnx2x_cl45_read(bp, phy,
 +                      MDIO_AN_DEVAD,
 +                      MDIO_AN_REG_8481_LEGACY_AN_ADV,
 +                      &an_10_100_val);
 +      bnx2x_cl45_read(bp, phy,
 +                      MDIO_AN_DEVAD, MDIO_AN_REG_8481_LEGACY_MII_CTRL,
 +                      &autoneg_val);
 +      /* Disable forced speed */
 +      autoneg_val &= ~((1<<6) | (1<<8) | (1<<9) | (1<<12) | (1<<13));
 +      an_10_100_val &= ~((1<<5) | (1<<6) | (1<<7) | (1<<8));
 +
 +      if (((phy->req_line_speed == SPEED_AUTO_NEG) &&
 +           (phy->speed_cap_mask &
 +           PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)) ||
 +          (phy->req_line_speed == SPEED_1000)) {
 +              an_1000_val |= (1<<8);
 +              autoneg_val |= (1<<9 | 1<<12);
 +              if (phy->req_duplex == DUPLEX_FULL)
 +                      an_1000_val |= (1<<9);
 +              DP(NETIF_MSG_LINK, "Advertising 1G\n");
 +      } else
 +              an_1000_val &= ~((1<<8) | (1<<9));
 +
 +      bnx2x_cl45_write(bp, phy,
 +                       MDIO_AN_DEVAD, MDIO_AN_REG_8481_1000T_CTRL,
 +                       an_1000_val);
 +
 +      /* set 100 speed advertisement */
 +      if (((phy->req_line_speed == SPEED_AUTO_NEG) &&
 +           (phy->speed_cap_mask &
 +            (PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL |
 +             PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF)) &&
 +           (phy->supported &
 +            (SUPPORTED_100baseT_Half |
 +             SUPPORTED_100baseT_Full)))) {
 +              an_10_100_val |= (1<<7);
 +              /* Enable autoneg and restart autoneg for legacy speeds */
 +              autoneg_val |= (1<<9 | 1<<12);
 +
 +              if (phy->req_duplex == DUPLEX_FULL)
 +                      an_10_100_val |= (1<<8);
 +              DP(NETIF_MSG_LINK, "Advertising 100M\n");
 +      }
 +      /* set 10 speed advertisement */
 +      if (((phy->req_line_speed == SPEED_AUTO_NEG) &&
 +           (phy->speed_cap_mask &
 +            (PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL |
 +             PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF)) &&
 +           (phy->supported &
 +            (SUPPORTED_10baseT_Half |
 +             SUPPORTED_10baseT_Full)))) {
 +              an_10_100_val |= (1<<5);
 +              autoneg_val |= (1<<9 | 1<<12);
 +              if (phy->req_duplex == DUPLEX_FULL)
 +                      an_10_100_val |= (1<<6);
 +              DP(NETIF_MSG_LINK, "Advertising 10M\n");
 +      }
 +
 +      /* Only 10/100 are allowed to work in FORCE mode */
 +      if ((phy->req_line_speed == SPEED_100) &&
 +          (phy->supported &
 +           (SUPPORTED_100baseT_Half |
 +            SUPPORTED_100baseT_Full))) {
 +              autoneg_val |= (1<<13);
 +              /* Enabled AUTO-MDIX when autoneg is disabled */
 +              bnx2x_cl45_write(bp, phy,
 +                               MDIO_AN_DEVAD, MDIO_AN_REG_8481_AUX_CTRL,
 +                               (1<<15 | 1<<9 | 7<<0));
 +              DP(NETIF_MSG_LINK, "Setting 100M force\n");
 +      }
 +      if ((phy->req_line_speed == SPEED_10) &&
 +          (phy->supported &
 +           (SUPPORTED_10baseT_Half |
 +            SUPPORTED_10baseT_Full))) {
 +              /* Enabled AUTO-MDIX when autoneg is disabled */
 +              bnx2x_cl45_write(bp, phy,
 +                               MDIO_AN_DEVAD, MDIO_AN_REG_8481_AUX_CTRL,
 +                               (1<<15 | 1<<9 | 7<<0));
 +              DP(NETIF_MSG_LINK, "Setting 10M force\n");
 +      }
 +
 +      bnx2x_cl45_write(bp, phy,
 +                       MDIO_AN_DEVAD, MDIO_AN_REG_8481_LEGACY_AN_ADV,
 +                       an_10_100_val);
 +
 +      if (phy->req_duplex == DUPLEX_FULL)
 +              autoneg_val |= (1<<8);
 +
 +      /*
 +       * Always write this if this is not 84833.
 +       * For 84833, write it only when it's a forced speed.
 +       */
 +      if ((phy->type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) ||
 +              ((autoneg_val & (1<<12)) == 0))
 +              bnx2x_cl45_write(bp, phy,
 +                       MDIO_AN_DEVAD,
 +                       MDIO_AN_REG_8481_LEGACY_MII_CTRL, autoneg_val);
 +
 +      if (((phy->req_line_speed == SPEED_AUTO_NEG) &&
 +          (phy->speed_cap_mask &
 +           PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)) ||
 +              (phy->req_line_speed == SPEED_10000)) {
 +                      DP(NETIF_MSG_LINK, "Advertising 10G\n");
 +                      /* Restart autoneg for 10G*/
 +
 +                      bnx2x_cl45_write(bp, phy,
 +                               MDIO_AN_DEVAD, MDIO_AN_REG_CTRL,
 +                               0x3200);
 +      } else
 +              bnx2x_cl45_write(bp, phy,
 +                               MDIO_AN_DEVAD,
 +                               MDIO_AN_REG_8481_10GBASE_T_AN_CTRL,
 +                               1);
 +
 +      /* Save spirom version */
 +      bnx2x_save_848xx_spirom_version(phy, params);
 +
 +      phy->req_line_speed = tmp_req_line_speed;
 +
 +      return 0;
 +}
 +
 +static int bnx2x_8481_config_init(struct bnx2x_phy *phy,
 +                                struct link_params *params,
 +                                struct link_vars *vars)
 +{
 +      struct bnx2x *bp = params->bp;
 +      /* Restore normal power mode*/
 +      bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
 +                     MISC_REGISTERS_GPIO_OUTPUT_HIGH, params->port);
 +
 +      /* HW reset */
 +      bnx2x_ext_phy_hw_reset(bp, params->port);
 +      bnx2x_wait_reset_complete(bp, phy, params);
 +
 +      bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 1<<15);
 +      return bnx2x_848xx_cmn_config_init(phy, params, vars);
 +}
 +
 +
 +#define PHY84833_HDSHK_WAIT 300
 +static int bnx2x_84833_pair_swap_cfg(struct bnx2x_phy *phy,
 +                                 struct link_params *params,
 +                                 struct link_vars *vars)
 +{
 +      u32 idx;
 +      u32 pair_swap;
 +      u16 val;
 +      u16 data;
 +      struct bnx2x *bp = params->bp;
 +      /* Do pair swap */
 +
 +      /* Check for configuration. */
 +      pair_swap = REG_RD(bp, params->shmem_base +
 +                         offsetof(struct shmem_region,
 +                      dev_info.port_hw_config[params->port].xgbt_phy_cfg)) &
 +              PORT_HW_CFG_RJ45_PAIR_SWAP_MASK;
 +
 +      if (pair_swap == 0)
 +              return 0;
 +
 +      data = (u16)pair_swap;
 +
 +      /* Write CMD_OPEN_OVERRIDE to STATUS reg */
 +      bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD,
 +                      MDIO_84833_TOP_CFG_SCRATCH_REG2,
 +                      PHY84833_CMD_OPEN_OVERRIDE);
 +      for (idx = 0; idx < PHY84833_HDSHK_WAIT; idx++) {
 +              bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD,
 +                              MDIO_84833_TOP_CFG_SCRATCH_REG2, &val);
 +              if (val == PHY84833_CMD_OPEN_FOR_CMDS)
 +                      break;
 +              msleep(1);
 +      }
 +      if (idx >= PHY84833_HDSHK_WAIT) {
 +              DP(NETIF_MSG_LINK, "Pairswap: FW not ready.\n");
 +              return -EINVAL;
 +      }
 +
 +      bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD,
 +                      MDIO_84833_TOP_CFG_SCRATCH_REG4,
 +                      data);
 +      /* Issue pair swap command */
 +      bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD,
 +                      MDIO_84833_TOP_CFG_SCRATCH_REG0,
 +                      PHY84833_DIAG_CMD_PAIR_SWAP_CHANGE);
 +      for (idx = 0; idx < PHY84833_HDSHK_WAIT; idx++) {
 +              bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD,
 +                              MDIO_84833_TOP_CFG_SCRATCH_REG2, &val);
 +              if ((val == PHY84833_CMD_COMPLETE_PASS) ||
 +                      (val == PHY84833_CMD_COMPLETE_ERROR))
 +                      break;
 +              msleep(1);
 +      }
 +      if ((idx >= PHY84833_HDSHK_WAIT) ||
 +              (val == PHY84833_CMD_COMPLETE_ERROR)) {
 +              DP(NETIF_MSG_LINK, "Pairswap: override failed.\n");
 +              return -EINVAL;
 +      }
 +      bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD,
 +                      MDIO_84833_TOP_CFG_SCRATCH_REG2,
 +                      PHY84833_CMD_CLEAR_COMPLETE);
 +      DP(NETIF_MSG_LINK, "Pairswap OK, val=0x%x\n", data);
 +      return 0;
 +}
 +
 +
 +static u8 bnx2x_84833_get_reset_gpios(struct bnx2x *bp,
 +                                    u32 shmem_base_path[],
 +                                    u32 chip_id)
 +{
 +      u32 reset_pin[2];
 +      u32 idx;
 +      u8 reset_gpios;
 +      if (CHIP_IS_E3(bp)) {
 +              /* Assume that these will be GPIOs, not EPIOs. */
 +              for (idx = 0; idx < 2; idx++) {
 +                      /* Map config param to register bit. */
 +                      reset_pin[idx] = REG_RD(bp, shmem_base_path[idx] +
 +                              offsetof(struct shmem_region,
 +                              dev_info.port_hw_config[0].e3_cmn_pin_cfg));
 +                      reset_pin[idx] = (reset_pin[idx] &
 +                              PORT_HW_CFG_E3_PHY_RESET_MASK) >>
 +                              PORT_HW_CFG_E3_PHY_RESET_SHIFT;
 +                      reset_pin[idx] -= PIN_CFG_GPIO0_P0;
 +                      reset_pin[idx] = (1 << reset_pin[idx]);
 +              }
 +              reset_gpios = (u8)(reset_pin[0] | reset_pin[1]);
 +      } else {
 +              /* E2, look from diff place of shmem. */
 +              for (idx = 0; idx < 2; idx++) {
 +                      reset_pin[idx] = REG_RD(bp, shmem_base_path[idx] +
 +                              offsetof(struct shmem_region,
 +                              dev_info.port_hw_config[0].default_cfg));
 +                      reset_pin[idx] &= PORT_HW_CFG_EXT_PHY_GPIO_RST_MASK;
 +                      reset_pin[idx] -= PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO0_P0;
 +                      reset_pin[idx] >>= PORT_HW_CFG_EXT_PHY_GPIO_RST_SHIFT;
 +                      reset_pin[idx] = (1 << reset_pin[idx]);
 +              }
 +              reset_gpios = (u8)(reset_pin[0] | reset_pin[1]);
 +      }
 +
 +      return reset_gpios;
 +}
 +
 +static int bnx2x_84833_hw_reset_phy(struct bnx2x_phy *phy,
 +                              struct link_params *params)
 +{
 +      struct bnx2x *bp = params->bp;
 +      u8 reset_gpios;
 +      u32 other_shmem_base_addr = REG_RD(bp, params->shmem2_base +
 +                              offsetof(struct shmem2_region,
 +                              other_shmem_base_addr));
 +
 +      u32 shmem_base_path[2];
 +      shmem_base_path[0] = params->shmem_base;
 +      shmem_base_path[1] = other_shmem_base_addr;
 +
 +      reset_gpios = bnx2x_84833_get_reset_gpios(bp, shmem_base_path,
 +                                                params->chip_id);
 +
 +      bnx2x_set_mult_gpio(bp, reset_gpios, MISC_REGISTERS_GPIO_OUTPUT_LOW);
 +      udelay(10);
 +      DP(NETIF_MSG_LINK, "84833 hw reset on pin values 0x%x\n",
 +              reset_gpios);
 +
 +      return 0;
 +}
 +
 +static int bnx2x_84833_common_init_phy(struct bnx2x *bp,
 +                                              u32 shmem_base_path[],
 +                                              u32 chip_id)
 +{
 +      u8 reset_gpios;
 +
 +      reset_gpios = bnx2x_84833_get_reset_gpios(bp, shmem_base_path, chip_id);
 +
 +      bnx2x_set_mult_gpio(bp, reset_gpios, MISC_REGISTERS_GPIO_OUTPUT_LOW);
 +      udelay(10);
 +      bnx2x_set_mult_gpio(bp, reset_gpios, MISC_REGISTERS_GPIO_OUTPUT_HIGH);
 +      msleep(800);
 +      DP(NETIF_MSG_LINK, "84833 reset pulse on pin values 0x%x\n",
 +              reset_gpios);
 +
 +      return 0;
 +}
 +
 +#define PHY84833_CONSTANT_LATENCY 1193
 +static int bnx2x_848x3_config_init(struct bnx2x_phy *phy,
 +                                 struct link_params *params,
 +                                 struct link_vars *vars)
 +{
 +      struct bnx2x *bp = params->bp;
 +      u8 port, initialize = 1;
 +      u16 val;
 +      u16 temp;
 +      u32 actual_phy_selection, cms_enable, idx;
 +      int rc = 0;
 +
 +      msleep(1);
 +
 +      if (!(CHIP_IS_E1(bp)))
 +              port = BP_PATH(bp);
 +      else
 +              port = params->port;
 +
 +      if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84823) {
 +              bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3,
 +                             MISC_REGISTERS_GPIO_OUTPUT_HIGH,
 +                             port);
 +      } else {
 +              /* MDIO reset */
 +              bnx2x_cl45_write(bp, phy,
 +                              MDIO_PMA_DEVAD,
 +                              MDIO_PMA_REG_CTRL, 0x8000);
 +              /* Bring PHY out of super isolate mode */
 +              bnx2x_cl45_read(bp, phy,
 +                              MDIO_CTL_DEVAD,
 +                              MDIO_84833_TOP_CFG_XGPHY_STRAP1, &val);
 +              val &= ~MDIO_84833_SUPER_ISOLATE;
 +              bnx2x_cl45_write(bp, phy,
 +                              MDIO_CTL_DEVAD,
 +                              MDIO_84833_TOP_CFG_XGPHY_STRAP1, val);
 +      }
 +
 +      bnx2x_wait_reset_complete(bp, phy, params);
 +
 +      /* Wait for GPHY to come out of reset */
 +      msleep(50);
 +
 +      if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833)
 +              bnx2x_84833_pair_swap_cfg(phy, params, vars);
 +
 +      /*
 +       * BCM84823 requires that XGXS links up first @ 10G for normal behavior
 +       */
 +      temp = vars->line_speed;
 +      vars->line_speed = SPEED_10000;
 +      bnx2x_set_autoneg(&params->phy[INT_PHY], params, vars, 0);
 +      bnx2x_program_serdes(&params->phy[INT_PHY], params, vars);
 +      vars->line_speed = temp;
 +
 +      /* Set dual-media configuration according to configuration */
 +
 +      bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD,
 +                      MDIO_CTL_REG_84823_MEDIA, &val);
 +      val &= ~(MDIO_CTL_REG_84823_MEDIA_MAC_MASK |
 +               MDIO_CTL_REG_84823_MEDIA_LINE_MASK |
 +               MDIO_CTL_REG_84823_MEDIA_COPPER_CORE_DOWN |
 +               MDIO_CTL_REG_84823_MEDIA_PRIORITY_MASK |
 +               MDIO_CTL_REG_84823_MEDIA_FIBER_1G);
 +
 +      if (CHIP_IS_E3(bp)) {
 +              val &= ~(MDIO_CTL_REG_84823_MEDIA_MAC_MASK |
 +                       MDIO_CTL_REG_84823_MEDIA_LINE_MASK);
 +      } else {
 +              val |= (MDIO_CTL_REG_84823_CTRL_MAC_XFI |
 +                      MDIO_CTL_REG_84823_MEDIA_LINE_XAUI_L);
 +      }
 +
 +      actual_phy_selection = bnx2x_phy_selection(params);
 +
 +      switch (actual_phy_selection) {
 +      case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT:
 +              /* Do nothing. Essentially this is like the priority copper */
 +              break;
 +      case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY:
 +              val |= MDIO_CTL_REG_84823_MEDIA_PRIORITY_COPPER;
 +              break;
 +      case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY:
 +              val |= MDIO_CTL_REG_84823_MEDIA_PRIORITY_FIBER;
 +              break;
 +      case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY:
 +              /* Do nothing here. The first PHY won't be initialized at all */
 +              break;
 +      case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY:
 +              val |= MDIO_CTL_REG_84823_MEDIA_COPPER_CORE_DOWN;
 +              initialize = 0;
 +              break;
 +      }
 +      if (params->phy[EXT_PHY2].req_line_speed == SPEED_1000)
 +              val |= MDIO_CTL_REG_84823_MEDIA_FIBER_1G;
 +
 +      bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD,
 +                       MDIO_CTL_REG_84823_MEDIA, val);
 +      DP(NETIF_MSG_LINK, "Multi_phy config = 0x%x, Media control = 0x%x\n",
 +                 params->multi_phy_config, val);
 +
 +      /* AutogrEEEn */
 +      if (params->feature_config_flags &
 +              FEATURE_CONFIG_AUTOGREEEN_ENABLED) {
 +              /* Ensure that f/w is ready */
 +              for (idx = 0; idx < PHY84833_HDSHK_WAIT; idx++) {
 +                      bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD,
 +                                      MDIO_84833_TOP_CFG_SCRATCH_REG2, &val);
 +                      if (val == PHY84833_CMD_OPEN_FOR_CMDS)
 +                              break;
 +                      usleep_range(1000, 1000);
 +              }
 +              if (idx >= PHY84833_HDSHK_WAIT) {
 +                      DP(NETIF_MSG_LINK, "AutogrEEEn: FW not ready.\n");
 +                      return -EINVAL;
 +              }
 +
 +              /* Select EEE mode */
 +              bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD,
 +                              MDIO_84833_TOP_CFG_SCRATCH_REG3,
 +                              0x2);
 +
 +              /* Set Idle and Latency */
 +              bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD,
 +                              MDIO_84833_TOP_CFG_SCRATCH_REG4,
 +                              PHY84833_CONSTANT_LATENCY + 1);
 +
 +              bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD,
 +                              MDIO_84833_TOP_CFG_DATA3_REG,
 +                              PHY84833_CONSTANT_LATENCY + 1);
 +
 +              bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD,
 +                              MDIO_84833_TOP_CFG_DATA4_REG,
 +                              PHY84833_CONSTANT_LATENCY);
 +
 +              /* Send EEE instruction to command register */
 +              bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD,
 +                              MDIO_84833_TOP_CFG_SCRATCH_REG0,
 +                              PHY84833_DIAG_CMD_SET_EEE_MODE);
 +
 +              /* Ensure that the command has completed */
 +              for (idx = 0; idx < PHY84833_HDSHK_WAIT; idx++) {
 +                      bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD,
 +                                      MDIO_84833_TOP_CFG_SCRATCH_REG2, &val);
 +                      if ((val == PHY84833_CMD_COMPLETE_PASS) ||
 +                              (val == PHY84833_CMD_COMPLETE_ERROR))
 +                              break;
 +                      usleep_range(1000, 1000);
 +              }
 +              if ((idx >= PHY84833_HDSHK_WAIT) ||
 +                      (val == PHY84833_CMD_COMPLETE_ERROR)) {
 +                      DP(NETIF_MSG_LINK, "AutogrEEEn: command failed.\n");
 +                      return -EINVAL;
 +              }
 +
 +              /* Reset command handler */
 +              bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD,
 +                          MDIO_84833_TOP_CFG_SCRATCH_REG2,
 +                          PHY84833_CMD_CLEAR_COMPLETE);
 +      }
 +
 +      if (initialize)
 +              rc = bnx2x_848xx_cmn_config_init(phy, params, vars);
 +      else
 +              bnx2x_save_848xx_spirom_version(phy, params);
 +      /* 84833 PHY has a better feature and doesn't need to support this. */
 +      if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84823) {
 +              cms_enable = REG_RD(bp, params->shmem_base +
 +                      offsetof(struct shmem_region,
 +                      dev_info.port_hw_config[params->port].default_cfg)) &
 +                      PORT_HW_CFG_ENABLE_CMS_MASK;
 +
 +              bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD,
 +                              MDIO_CTL_REG_84823_USER_CTRL_REG, &val);
 +              if (cms_enable)
 +                      val |= MDIO_CTL_REG_84823_USER_CTRL_CMS;
 +              else
 +                      val &= ~MDIO_CTL_REG_84823_USER_CTRL_CMS;
 +              bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD,
 +                               MDIO_CTL_REG_84823_USER_CTRL_REG, val);
 +      }
 +
 +      return rc;
 +}
 +
 +static u8 bnx2x_848xx_read_status(struct bnx2x_phy *phy,
 +                                struct link_params *params,
 +                                struct link_vars *vars)
 +{
 +      struct bnx2x *bp = params->bp;
 +      u16 val, val1, val2;
 +      u8 link_up = 0;
 +
 +
 +      /* Check 10G-BaseT link status */
 +      /* Check PMD signal ok */
 +      bnx2x_cl45_read(bp, phy,
 +                      MDIO_AN_DEVAD, 0xFFFA, &val1);
 +      bnx2x_cl45_read(bp, phy,
 +                      MDIO_PMA_DEVAD, MDIO_PMA_REG_8481_PMD_SIGNAL,
 +                      &val2);
 +      DP(NETIF_MSG_LINK, "BCM848xx: PMD_SIGNAL 1.a811 = 0x%x\n", val2);
 +
 +      /* Check link 10G */
 +      if (val2 & (1<<11)) {
 +              vars->line_speed = SPEED_10000;
 +              vars->duplex = DUPLEX_FULL;
 +              link_up = 1;
 +              bnx2x_ext_phy_10G_an_resolve(bp, phy, vars);
 +      } else { /* Check Legacy speed link */
 +              u16 legacy_status, legacy_speed;
 +
 +              /* Enable expansion register 0x42 (Operation mode status) */
 +              bnx2x_cl45_write(bp, phy,
 +                               MDIO_AN_DEVAD,
 +                               MDIO_AN_REG_8481_EXPANSION_REG_ACCESS, 0xf42);
 +
 +              /* Get legacy speed operation status */
 +              bnx2x_cl45_read(bp, phy,
 +                              MDIO_AN_DEVAD,
 +                              MDIO_AN_REG_8481_EXPANSION_REG_RD_RW,
 +                              &legacy_status);
 +
 +              DP(NETIF_MSG_LINK, "Legacy speed status = 0x%x\n",
 +                 legacy_status);
 +              link_up = ((legacy_status & (1<<11)) == (1<<11));
 +              if (link_up) {
 +                      legacy_speed = (legacy_status & (3<<9));
 +                      if (legacy_speed == (0<<9))
 +                              vars->line_speed = SPEED_10;
 +                      else if (legacy_speed == (1<<9))
 +                              vars->line_speed = SPEED_100;
 +                      else if (legacy_speed == (2<<9))
 +                              vars->line_speed = SPEED_1000;
 +                      else /* Should not happen */
 +                              vars->line_speed = 0;
 +
 +                      if (legacy_status & (1<<8))
 +                              vars->duplex = DUPLEX_FULL;
 +                      else
 +                              vars->duplex = DUPLEX_HALF;
 +
 +                      DP(NETIF_MSG_LINK,
 +                         "Link is up in %dMbps, is_duplex_full= %d\n",
 +                         vars->line_speed,
 +                         (vars->duplex == DUPLEX_FULL));
 +                      /* Check legacy speed AN resolution */
 +                      bnx2x_cl45_read(bp, phy,
 +                                      MDIO_AN_DEVAD,
 +                                      MDIO_AN_REG_8481_LEGACY_MII_STATUS,
 +                                      &val);
 +                      if (val & (1<<5))
 +                              vars->link_status |=
 +                                      LINK_STATUS_AUTO_NEGOTIATE_COMPLETE;
 +                      bnx2x_cl45_read(bp, phy,
 +                                      MDIO_AN_DEVAD,
 +                                      MDIO_AN_REG_8481_LEGACY_AN_EXPANSION,
 +                                      &val);
 +                      if ((val & (1<<0)) == 0)
 +                              vars->link_status |=
 +                                      LINK_STATUS_PARALLEL_DETECTION_USED;
 +              }
 +      }
 +      if (link_up) {
 +              DP(NETIF_MSG_LINK, "BCM84823: link speed is %d\n",
 +                         vars->line_speed);
 +              bnx2x_ext_phy_resolve_fc(phy, params, vars);
 +      }
 +
 +      return link_up;
 +}
 +
 +
 +static int bnx2x_848xx_format_ver(u32 raw_ver, u8 *str, u16 *len)
 +{
 +      int status = 0;
 +      u32 spirom_ver;
 +      spirom_ver = ((raw_ver & 0xF80) >> 7) << 16 | (raw_ver & 0x7F);
 +      status = bnx2x_format_ver(spirom_ver, str, len);
 +      return status;
 +}
 +
 +static void bnx2x_8481_hw_reset(struct bnx2x_phy *phy,
 +                              struct link_params *params)
 +{
 +      bnx2x_set_gpio(params->bp, MISC_REGISTERS_GPIO_1,
 +                     MISC_REGISTERS_GPIO_OUTPUT_LOW, 0);
 +      bnx2x_set_gpio(params->bp, MISC_REGISTERS_GPIO_1,
 +                     MISC_REGISTERS_GPIO_OUTPUT_LOW, 1);
 +}
 +
 +static void bnx2x_8481_link_reset(struct bnx2x_phy *phy,
 +                                      struct link_params *params)
 +{
 +      bnx2x_cl45_write(params->bp, phy,
 +                       MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, 0x0000);
 +      bnx2x_cl45_write(params->bp, phy,
 +                       MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 1);
 +}
 +
 +static void bnx2x_848x3_link_reset(struct bnx2x_phy *phy,
 +                                 struct link_params *params)
 +{
 +      struct bnx2x *bp = params->bp;
 +      u8 port;
 +      u16 val16;
 +
 +      if (!(CHIP_IS_E1(bp)))
 +              port = BP_PATH(bp);
 +      else
 +              port = params->port;
 +
 +      if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84823) {
 +              bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3,
 +                             MISC_REGISTERS_GPIO_OUTPUT_LOW,
 +                             port);
 +      } else {
 +              bnx2x_cl45_read(bp, phy,
 +                              MDIO_CTL_DEVAD,
 +                              0x400f, &val16);
 +              bnx2x_cl45_write(bp, phy,
 +                              MDIO_PMA_DEVAD,
 +                              MDIO_PMA_REG_CTRL, 0x800);
 +      }
 +}
 +
 +static void bnx2x_848xx_set_link_led(struct bnx2x_phy *phy,
 +                                   struct link_params *params, u8 mode)
 +{
 +      struct bnx2x *bp = params->bp;
 +      u16 val;
 +      u8 port;
 +
 +      if (!(CHIP_IS_E1(bp)))
 +              port = BP_PATH(bp);
 +      else
 +              port = params->port;
 +
 +      switch (mode) {
 +      case LED_MODE_OFF:
 +
 +              DP(NETIF_MSG_LINK, "Port 0x%x: LED MODE OFF\n", port);
 +
 +              if ((params->hw_led_mode << SHARED_HW_CFG_LED_MODE_SHIFT) ==
 +                  SHARED_HW_CFG_LED_EXTPHY1) {
 +
 +                      /* Set LED masks */
 +                      bnx2x_cl45_write(bp, phy,
 +                                      MDIO_PMA_DEVAD,
 +                                      MDIO_PMA_REG_8481_LED1_MASK,
 +                                      0x0);
 +
 +                      bnx2x_cl45_write(bp, phy,
 +                                      MDIO_PMA_DEVAD,
 +                                      MDIO_PMA_REG_8481_LED2_MASK,
 +                                      0x0);
 +
 +                      bnx2x_cl45_write(bp, phy,
 +                                      MDIO_PMA_DEVAD,
 +                                      MDIO_PMA_REG_8481_LED3_MASK,
 +                                      0x0);
 +
 +                      bnx2x_cl45_write(bp, phy,
 +                                      MDIO_PMA_DEVAD,
 +                                      MDIO_PMA_REG_8481_LED5_MASK,
 +                                      0x0);
 +
 +              } else {
 +                      bnx2x_cl45_write(bp, phy,
 +                                       MDIO_PMA_DEVAD,
 +                                       MDIO_PMA_REG_8481_LED1_MASK,
 +                                       0x0);
 +              }
 +              break;
 +      case LED_MODE_FRONT_PANEL_OFF:
 +
 +              DP(NETIF_MSG_LINK, "Port 0x%x: LED MODE FRONT PANEL OFF\n",
 +                 port);
 +
 +              if ((params->hw_led_mode << SHARED_HW_CFG_LED_MODE_SHIFT) ==
 +                  SHARED_HW_CFG_LED_EXTPHY1) {
 +
 +                      /* Set LED masks */
 +                      bnx2x_cl45_write(bp, phy,
 +                                       MDIO_PMA_DEVAD,
 +                                       MDIO_PMA_REG_8481_LED1_MASK,
 +                                       0x0);
 +
 +                      bnx2x_cl45_write(bp, phy,
 +                                       MDIO_PMA_DEVAD,
 +                                       MDIO_PMA_REG_8481_LED2_MASK,
 +                                       0x0);
 +
 +                      bnx2x_cl45_write(bp, phy,
 +                                       MDIO_PMA_DEVAD,
 +                                       MDIO_PMA_REG_8481_LED3_MASK,
 +                                       0x0);
 +
 +                      bnx2x_cl45_write(bp, phy,
 +                                       MDIO_PMA_DEVAD,
 +                                       MDIO_PMA_REG_8481_LED5_MASK,
 +                                       0x20);
 +
 +              } else {
 +                      bnx2x_cl45_write(bp, phy,
 +                                       MDIO_PMA_DEVAD,
 +                                       MDIO_PMA_REG_8481_LED1_MASK,
 +                                       0x0);
 +              }
 +              break;
 +      case LED_MODE_ON:
 +
 +              DP(NETIF_MSG_LINK, "Port 0x%x: LED MODE ON\n", port);
 +
 +              if ((params->hw_led_mode << SHARED_HW_CFG_LED_MODE_SHIFT) ==
 +                  SHARED_HW_CFG_LED_EXTPHY1) {
 +                      /* Set control reg */
 +                      bnx2x_cl45_read(bp, phy,
 +                                      MDIO_PMA_DEVAD,
 +                                      MDIO_PMA_REG_8481_LINK_SIGNAL,
 +                                      &val);
 +                      val &= 0x8000;
 +                      val |= 0x2492;
 +
 +                      bnx2x_cl45_write(bp, phy,
 +                                       MDIO_PMA_DEVAD,
 +                                       MDIO_PMA_REG_8481_LINK_SIGNAL,
 +                                       val);
 +
 +                      /* Set LED masks */
 +                      bnx2x_cl45_write(bp, phy,
 +                                       MDIO_PMA_DEVAD,
 +                                       MDIO_PMA_REG_8481_LED1_MASK,
 +                                       0x0);
 +
 +                      bnx2x_cl45_write(bp, phy,
 +                                       MDIO_PMA_DEVAD,
 +                                       MDIO_PMA_REG_8481_LED2_MASK,
 +                                       0x20);
 +
 +                      bnx2x_cl45_write(bp, phy,
 +                                       MDIO_PMA_DEVAD,
 +                                       MDIO_PMA_REG_8481_LED3_MASK,
 +                                       0x20);
 +
 +                      bnx2x_cl45_write(bp, phy,
 +                                       MDIO_PMA_DEVAD,
 +                                       MDIO_PMA_REG_8481_LED5_MASK,
 +                                       0x0);
 +              } else {
 +                      bnx2x_cl45_write(bp, phy,
 +                                       MDIO_PMA_DEVAD,
 +                                       MDIO_PMA_REG_8481_LED1_MASK,
 +                                       0x20);
 +              }
 +              break;
 +
 +      case LED_MODE_OPER:
 +
 +              DP(NETIF_MSG_LINK, "Port 0x%x: LED MODE OPER\n", port);
 +
 +              if ((params->hw_led_mode << SHARED_HW_CFG_LED_MODE_SHIFT) ==
 +                  SHARED_HW_CFG_LED_EXTPHY1) {
 +
 +                      /* Set control reg */
 +                      bnx2x_cl45_read(bp, phy,
 +                                      MDIO_PMA_DEVAD,
 +                                      MDIO_PMA_REG_8481_LINK_SIGNAL,
 +                                      &val);
 +
 +                      if (!((val &
 +                             MDIO_PMA_REG_8481_LINK_SIGNAL_LED4_ENABLE_MASK)
 +                        >> MDIO_PMA_REG_8481_LINK_SIGNAL_LED4_ENABLE_SHIFT)) {
 +                              DP(NETIF_MSG_LINK, "Setting LINK_SIGNAL\n");
 +                              bnx2x_cl45_write(bp, phy,
 +                                               MDIO_PMA_DEVAD,
 +                                               MDIO_PMA_REG_8481_LINK_SIGNAL,
 +                                               0xa492);
 +                      }
 +
 +                      /* Set LED masks */
 +                      bnx2x_cl45_write(bp, phy,
 +                                       MDIO_PMA_DEVAD,
 +                                       MDIO_PMA_REG_8481_LED1_MASK,
 +                                       0x10);
 +
 +                      bnx2x_cl45_write(bp, phy,
 +                                       MDIO_PMA_DEVAD,
 +                                       MDIO_PMA_REG_8481_LED2_MASK,
 +                                       0x80);
 +
 +                      bnx2x_cl45_write(bp, phy,
 +                                       MDIO_PMA_DEVAD,
 +                                       MDIO_PMA_REG_8481_LED3_MASK,
 +                                       0x98);
 +
 +                      bnx2x_cl45_write(bp, phy,
 +                                       MDIO_PMA_DEVAD,
 +                                       MDIO_PMA_REG_8481_LED5_MASK,
 +                                       0x40);
 +
 +              } else {
 +                      bnx2x_cl45_write(bp, phy,
 +                                       MDIO_PMA_DEVAD,
 +                                       MDIO_PMA_REG_8481_LED1_MASK,
 +                                       0x80);
 +
 +                      /* Tell LED3 to blink on source */
 +                      bnx2x_cl45_read(bp, phy,
 +                                      MDIO_PMA_DEVAD,
 +                                      MDIO_PMA_REG_8481_LINK_SIGNAL,
 +                                      &val);
 +                      val &= ~(7<<6);
 +                      val |= (1<<6); /* A83B[8:6]= 1 */
 +                      bnx2x_cl45_write(bp, phy,
 +                                       MDIO_PMA_DEVAD,
 +                                       MDIO_PMA_REG_8481_LINK_SIGNAL,
 +                                       val);
 +              }
 +              break;
 +      }
 +
 +      /*
 +       * This is a workaround for E3+84833 until autoneg
 +       * restart is fixed in f/w
 +       */
 +      if (CHIP_IS_E3(bp)) {
 +              bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
 +                              MDIO_WC_REG_GP2_STATUS_GP_2_1, &val);
 +      }
 +}
 +
 +/******************************************************************/
 +/*                    54618SE PHY SECTION                       */
 +/******************************************************************/
 +static int bnx2x_54618se_config_init(struct bnx2x_phy *phy,
 +                                             struct link_params *params,
 +                                             struct link_vars *vars)
 +{
 +      struct bnx2x *bp = params->bp;
 +      u8 port;
 +      u16 autoneg_val, an_1000_val, an_10_100_val, fc_val, temp;
 +      u32 cfg_pin;
 +
 +      DP(NETIF_MSG_LINK, "54618SE cfg init\n");
 +      usleep_range(1000, 1000);
 +
 +      /* This works with E3 only, no need to check the chip
 +         before determining the port. */
 +      port = params->port;
 +
 +      cfg_pin = (REG_RD(bp, params->shmem_base +
 +                      offsetof(struct shmem_region,
 +                      dev_info.port_hw_config[port].e3_cmn_pin_cfg)) &
 +                      PORT_HW_CFG_E3_PHY_RESET_MASK) >>
 +                      PORT_HW_CFG_E3_PHY_RESET_SHIFT;
 +
 +      /* Drive pin high to bring the GPHY out of reset. */
 +      bnx2x_set_cfg_pin(bp, cfg_pin, 1);
 +
 +      /* wait for GPHY to reset */
 +      msleep(50);
 +
 +      /* reset phy */
 +      bnx2x_cl22_write(bp, phy,
 +                       MDIO_PMA_REG_CTRL, 0x8000);
 +      bnx2x_wait_reset_complete(bp, phy, params);
 +
 +      /*wait for GPHY to reset */
 +      msleep(50);
 +
 +      /* Configure LED4: set to INTR (0x6). */
 +      /* Accessing shadow register 0xe. */
 +      bnx2x_cl22_write(bp, phy,
 +                      MDIO_REG_GPHY_SHADOW,
 +                      MDIO_REG_GPHY_SHADOW_LED_SEL2);
 +      bnx2x_cl22_read(bp, phy,
 +                      MDIO_REG_GPHY_SHADOW,
 +                      &temp);
 +      temp &= ~(0xf << 4);
 +      temp |= (0x6 << 4);
 +      bnx2x_cl22_write(bp, phy,
 +                      MDIO_REG_GPHY_SHADOW,
 +                      MDIO_REG_GPHY_SHADOW_WR_ENA | temp);
 +      /* Configure INTR based on link status change. */
 +      bnx2x_cl22_write(bp, phy,
 +                      MDIO_REG_INTR_MASK,
 +                      ~MDIO_REG_INTR_MASK_LINK_STATUS);
 +
 +      /* Flip the signal detect polarity (set 0x1c.0x1e[8]). */
 +      bnx2x_cl22_write(bp, phy,
 +                      MDIO_REG_GPHY_SHADOW,
 +                      MDIO_REG_GPHY_SHADOW_AUTO_DET_MED);
 +      bnx2x_cl22_read(bp, phy,
 +                      MDIO_REG_GPHY_SHADOW,
 +                      &temp);
 +      temp |= MDIO_REG_GPHY_SHADOW_INVERT_FIB_SD;
 +      bnx2x_cl22_write(bp, phy,
 +                      MDIO_REG_GPHY_SHADOW,
 +                      MDIO_REG_GPHY_SHADOW_WR_ENA | temp);
 +
 +      /* Set up fc */
 +      /* Please refer to Table 28B-3 of 802.3ab-1999 spec. */
 +      bnx2x_calc_ieee_aneg_adv(phy, params, &vars->ieee_fc);
 +      fc_val = 0;
 +      if ((vars->ieee_fc & MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC) ==
 +                      MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC)
 +              fc_val |= MDIO_AN_REG_ADV_PAUSE_ASYMMETRIC;
 +
 +      if ((vars->ieee_fc & MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) ==
 +                      MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH)
 +              fc_val |= MDIO_AN_REG_ADV_PAUSE_PAUSE;
 +
 +      /* read all advertisement */
 +      bnx2x_cl22_read(bp, phy,
 +                      0x09,
 +                      &an_1000_val);
 +
 +      bnx2x_cl22_read(bp, phy,
 +                      0x04,
 +                      &an_10_100_val);
 +
 +      bnx2x_cl22_read(bp, phy,
 +                      MDIO_PMA_REG_CTRL,
 +                      &autoneg_val);
 +
 +      /* Disable forced speed */
 +      autoneg_val &= ~((1<<6) | (1<<8) | (1<<9) | (1<<12) | (1<<13));
 +      an_10_100_val &= ~((1<<5) | (1<<6) | (1<<7) | (1<<8) | (1<<10) |
 +                         (1<<11));
 +
 +      if (((phy->req_line_speed == SPEED_AUTO_NEG) &&
 +                      (phy->speed_cap_mask &
 +                      PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)) ||
 +                      (phy->req_line_speed == SPEED_1000)) {
 +              an_1000_val |= (1<<8);
 +              autoneg_val |= (1<<9 | 1<<12);
 +              if (phy->req_duplex == DUPLEX_FULL)
 +                      an_1000_val |= (1<<9);
 +              DP(NETIF_MSG_LINK, "Advertising 1G\n");
 +      } else
 +              an_1000_val &= ~((1<<8) | (1<<9));
 +
 +      bnx2x_cl22_write(bp, phy,
 +                      0x09,
 +                      an_1000_val);
 +      bnx2x_cl22_read(bp, phy,
 +                      0x09,
 +                      &an_1000_val);
 +
 +      /* set 100 speed advertisement */
 +      if (((phy->req_line_speed == SPEED_AUTO_NEG) &&
 +                      (phy->speed_cap_mask &
 +                      (PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL |
 +                      PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF)))) {
 +              an_10_100_val |= (1<<7);
 +              /* Enable autoneg and restart autoneg for legacy speeds */
 +              autoneg_val |= (1<<9 | 1<<12);
 +
 +              if (phy->req_duplex == DUPLEX_FULL)
 +                      an_10_100_val |= (1<<8);
 +              DP(NETIF_MSG_LINK, "Advertising 100M\n");
 +      }
 +
 +      /* set 10 speed advertisement */
 +      if (((phy->req_line_speed == SPEED_AUTO_NEG) &&
 +                      (phy->speed_cap_mask &
 +                      (PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL |
 +                      PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF)))) {
 +              an_10_100_val |= (1<<5);
 +              autoneg_val |= (1<<9 | 1<<12);
 +              if (phy->req_duplex == DUPLEX_FULL)
 +                      an_10_100_val |= (1<<6);
 +              DP(NETIF_MSG_LINK, "Advertising 10M\n");
 +      }
 +
 +      /* Only 10/100 are allowed to work in FORCE mode */
 +      if (phy->req_line_speed == SPEED_100) {
 +              autoneg_val |= (1<<13);
 +              /* Enabled AUTO-MDIX when autoneg is disabled */
 +              bnx2x_cl22_write(bp, phy,
 +                              0x18,
 +                              (1<<15 | 1<<9 | 7<<0));
 +              DP(NETIF_MSG_LINK, "Setting 100M force\n");
 +      }
 +      if (phy->req_line_speed == SPEED_10) {
 +              /* Enabled AUTO-MDIX when autoneg is disabled */
 +              bnx2x_cl22_write(bp, phy,
 +                              0x18,
 +                              (1<<15 | 1<<9 | 7<<0));
 +              DP(NETIF_MSG_LINK, "Setting 10M force\n");
 +      }
 +
 +      /* Check if we should turn on Auto-GrEEEn */
 +      bnx2x_cl22_read(bp, phy, MDIO_REG_GPHY_PHYID_LSB, &temp);
 +      if (temp == MDIO_REG_GPHY_ID_54618SE) {
 +              if (params->feature_config_flags &
 +                  FEATURE_CONFIG_AUTOGREEEN_ENABLED) {
 +                      temp = 6;
 +                      DP(NETIF_MSG_LINK, "Enabling Auto-GrEEEn\n");
 +              } else {
 +                      temp = 0;
 +                      DP(NETIF_MSG_LINK, "Disabling Auto-GrEEEn\n");
 +              }
 +              bnx2x_cl22_write(bp, phy,
 +                               MDIO_REG_GPHY_CL45_ADDR_REG, MDIO_AN_DEVAD);
 +              bnx2x_cl22_write(bp, phy,
 +                               MDIO_REG_GPHY_CL45_DATA_REG,
 +                               MDIO_REG_GPHY_EEE_ADV);
 +              bnx2x_cl22_write(bp, phy,
 +                               MDIO_REG_GPHY_CL45_ADDR_REG,
 +                               (0x1 << 14) | MDIO_AN_DEVAD);
 +              bnx2x_cl22_write(bp, phy,
 +                               MDIO_REG_GPHY_CL45_DATA_REG,
 +                               temp);
 +      }
 +
 +      bnx2x_cl22_write(bp, phy,
 +                      0x04,
 +                      an_10_100_val | fc_val);
 +
 +      if (phy->req_duplex == DUPLEX_FULL)
 +              autoneg_val |= (1<<8);
 +
 +      bnx2x_cl22_write(bp, phy,
 +                      MDIO_PMA_REG_CTRL, autoneg_val);
 +
 +      return 0;
 +}
 +
 +static void bnx2x_54618se_set_link_led(struct bnx2x_phy *phy,
 +                                     struct link_params *params, u8 mode)
 +{
 +      struct bnx2x *bp = params->bp;
 +      DP(NETIF_MSG_LINK, "54618SE set link led (mode=%x)\n", mode);
 +      switch (mode) {
 +      case LED_MODE_FRONT_PANEL_OFF:
 +      case LED_MODE_OFF:
 +      case LED_MODE_OPER:
 +      case LED_MODE_ON:
 +      default:
 +              break;
 +      }
 +      return;
 +}
 +
 +static void bnx2x_54618se_link_reset(struct bnx2x_phy *phy,
 +                                   struct link_params *params)
 +{
 +      struct bnx2x *bp = params->bp;
 +      u32 cfg_pin;
 +      u8 port;
 +
 +      /*
 +       * In case of no EPIO routed to reset the GPHY, put it
 +       * in low power mode.
 +       */
 +      bnx2x_cl22_write(bp, phy, MDIO_PMA_REG_CTRL, 0x800);
 +      /*
 +       * This works with E3 only, no need to check the chip
 +       * before determining the port.
 +       */
 +      port = params->port;
 +      cfg_pin = (REG_RD(bp, params->shmem_base +
 +                      offsetof(struct shmem_region,
 +                      dev_info.port_hw_config[port].e3_cmn_pin_cfg)) &
 +                      PORT_HW_CFG_E3_PHY_RESET_MASK) >>
 +                      PORT_HW_CFG_E3_PHY_RESET_SHIFT;
 +
 +      /* Drive pin low to put GPHY in reset. */
 +      bnx2x_set_cfg_pin(bp, cfg_pin, 0);
 +}
 +
 +static u8 bnx2x_54618se_read_status(struct bnx2x_phy *phy,
 +                                  struct link_params *params,
 +                                  struct link_vars *vars)
 +{
 +      struct bnx2x *bp = params->bp;
 +      u16 val;
 +      u8 link_up = 0;
 +      u16 legacy_status, legacy_speed;
 +
 +      /* Get speed operation status */
 +      bnx2x_cl22_read(bp, phy,
 +                      0x19,
 +                      &legacy_status);
 +      DP(NETIF_MSG_LINK, "54618SE read_status: 0x%x\n", legacy_status);
 +
 +      /* Read status to clear the PHY interrupt. */
 +      bnx2x_cl22_read(bp, phy,
 +                      MDIO_REG_INTR_STATUS,
 +                      &val);
 +
 +      link_up = ((legacy_status & (1<<2)) == (1<<2));
 +
 +      if (link_up) {
 +              legacy_speed = (legacy_status & (7<<8));
 +              if (legacy_speed == (7<<8)) {
 +                      vars->line_speed = SPEED_1000;
 +                      vars->duplex = DUPLEX_FULL;
 +              } else if (legacy_speed == (6<<8)) {
 +                      vars->line_speed = SPEED_1000;
 +                      vars->duplex = DUPLEX_HALF;
 +              } else if (legacy_speed == (5<<8)) {
 +                      vars->line_speed = SPEED_100;
 +                      vars->duplex = DUPLEX_FULL;
 +              }
 +              /* Omitting 100Base-T4 for now */
 +              else if (legacy_speed == (3<<8)) {
 +                      vars->line_speed = SPEED_100;
 +                      vars->duplex = DUPLEX_HALF;
 +              } else if (legacy_speed == (2<<8)) {
 +                      vars->line_speed = SPEED_10;
 +                      vars->duplex = DUPLEX_FULL;
 +              } else if (legacy_speed == (1<<8)) {
 +                      vars->line_speed = SPEED_10;
 +                      vars->duplex = DUPLEX_HALF;
 +              } else /* Should not happen */
 +                      vars->line_speed = 0;
 +
 +              DP(NETIF_MSG_LINK,
 +                 "Link is up in %dMbps, is_duplex_full= %d\n",
 +                 vars->line_speed,
 +                 (vars->duplex == DUPLEX_FULL));
 +
 +              /* Check legacy speed AN resolution */
 +              bnx2x_cl22_read(bp, phy,
 +                              0x01,
 +                              &val);
 +              if (val & (1<<5))
 +                      vars->link_status |=
 +                              LINK_STATUS_AUTO_NEGOTIATE_COMPLETE;
 +              bnx2x_cl22_read(bp, phy,
 +                              0x06,
 +                              &val);
 +              if ((val & (1<<0)) == 0)
 +                      vars->link_status |=
 +                              LINK_STATUS_PARALLEL_DETECTION_USED;
 +
 +              DP(NETIF_MSG_LINK, "BCM54618SE: link speed is %d\n",
 +                         vars->line_speed);
 +
 +              /* Report whether EEE is resolved. */
 +              bnx2x_cl22_read(bp, phy, MDIO_REG_GPHY_PHYID_LSB, &val);
 +              if (val == MDIO_REG_GPHY_ID_54618SE) {
 +                      if (vars->link_status &
 +                          LINK_STATUS_AUTO_NEGOTIATE_COMPLETE)
 +                              val = 0;
 +                      else {
 +                              bnx2x_cl22_write(bp, phy,
 +                                      MDIO_REG_GPHY_CL45_ADDR_REG,
 +                                      MDIO_AN_DEVAD);
 +                              bnx2x_cl22_write(bp, phy,
 +                                      MDIO_REG_GPHY_CL45_DATA_REG,
 +                                      MDIO_REG_GPHY_EEE_RESOLVED);
 +                              bnx2x_cl22_write(bp, phy,
 +                                      MDIO_REG_GPHY_CL45_ADDR_REG,
 +                                      (0x1 << 14) | MDIO_AN_DEVAD);
 +                              bnx2x_cl22_read(bp, phy,
 +                                      MDIO_REG_GPHY_CL45_DATA_REG,
 +                                      &val);
 +                      }
 +                      DP(NETIF_MSG_LINK, "EEE resolution: 0x%x\n", val);
 +              }
 +
 +              bnx2x_ext_phy_resolve_fc(phy, params, vars);
 +      }
 +      return link_up;
 +}
 +
 +static void bnx2x_54618se_config_loopback(struct bnx2x_phy *phy,
 +                                        struct link_params *params)
 +{
 +      struct bnx2x *bp = params->bp;
 +      u16 val;
 +      u32 umac_base = params->port ? GRCBASE_UMAC1 : GRCBASE_UMAC0;
 +
 +      DP(NETIF_MSG_LINK, "2PMA/PMD ext_phy_loopback: 54618se\n");
 +
 +      /* Enable master/slave manual mmode and set to master */
 +      /* mii write 9 [bits set 11 12] */
 +      bnx2x_cl22_write(bp, phy, 0x09, 3<<11);
 +
 +      /* forced 1G and disable autoneg */
 +      /* set val [mii read 0] */
 +      /* set val [expr $val & [bits clear 6 12 13]] */
 +      /* set val [expr $val | [bits set 6 8]] */
 +      /* mii write 0 $val */
 +      bnx2x_cl22_read(bp, phy, 0x00, &val);
 +      val &= ~((1<<6) | (1<<12) | (1<<13));
 +      val |= (1<<6) | (1<<8);
 +      bnx2x_cl22_write(bp, phy, 0x00, val);
 +
 +      /* Set external loopback and Tx using 6dB coding */
 +      /* mii write 0x18 7 */
 +      /* set val [mii read 0x18] */
 +      /* mii write 0x18 [expr $val | [bits set 10 15]] */
 +      bnx2x_cl22_write(bp, phy, 0x18, 7);
 +      bnx2x_cl22_read(bp, phy, 0x18, &val);
 +      bnx2x_cl22_write(bp, phy, 0x18, val | (1<<10) | (1<<15));
 +
 +      /* This register opens the gate for the UMAC despite its name */
 +      REG_WR(bp, NIG_REG_EGRESS_EMAC0_PORT + params->port*4, 1);
 +
 +      /*
 +       * Maximum Frame Length (RW). Defines a 14-Bit maximum frame
 +       * length used by the MAC receive logic to check frames.
 +       */
 +      REG_WR(bp, umac_base + UMAC_REG_MAXFR, 0x2710);
 +}
 +
 +/******************************************************************/
 +/*                    SFX7101 PHY SECTION                       */
 +/******************************************************************/
 +static void bnx2x_7101_config_loopback(struct bnx2x_phy *phy,
 +                                     struct link_params *params)
 +{
 +      struct bnx2x *bp = params->bp;
 +      /* SFX7101_XGXS_TEST1 */
 +      bnx2x_cl45_write(bp, phy,
 +                       MDIO_XS_DEVAD, MDIO_XS_SFX7101_XGXS_TEST1, 0x100);
 +}
 +
 +static int bnx2x_7101_config_init(struct bnx2x_phy *phy,
 +                                struct link_params *params,
 +                                struct link_vars *vars)
 +{
 +      u16 fw_ver1, fw_ver2, val;
 +      struct bnx2x *bp = params->bp;
 +      DP(NETIF_MSG_LINK, "Setting the SFX7101 LASI indication\n");
 +
 +      /* Restore normal power mode*/
 +      bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
 +                     MISC_REGISTERS_GPIO_OUTPUT_HIGH, params->port);
 +      /* HW reset */
 +      bnx2x_ext_phy_hw_reset(bp, params->port);
 +      bnx2x_wait_reset_complete(bp, phy, params);
 +
 +      bnx2x_cl45_write(bp, phy,
 +                       MDIO_PMA_DEVAD, MDIO_PMA_LASI_CTRL, 0x1);
 +      DP(NETIF_MSG_LINK, "Setting the SFX7101 LED to blink on traffic\n");
 +      bnx2x_cl45_write(bp, phy,
 +                       MDIO_PMA_DEVAD, MDIO_PMA_REG_7107_LED_CNTL, (1<<3));
 +
 +      bnx2x_ext_phy_set_pause(params, phy, vars);
 +      /* Restart autoneg */
 +      bnx2x_cl45_read(bp, phy,
 +                      MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, &val);
 +      val |= 0x200;
 +      bnx2x_cl45_write(bp, phy,
 +                       MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, val);
 +
 +      /* Save spirom version */
 +      bnx2x_cl45_read(bp, phy,
 +                      MDIO_PMA_DEVAD, MDIO_PMA_REG_7101_VER1, &fw_ver1);
 +
 +      bnx2x_cl45_read(bp, phy,
 +                      MDIO_PMA_DEVAD, MDIO_PMA_REG_7101_VER2, &fw_ver2);
 +      bnx2x_save_spirom_version(bp, params->port,
 +                                (u32)(fw_ver1<<16 | fw_ver2), phy->ver_addr);
 +      return 0;
 +}
 +
 +static u8 bnx2x_7101_read_status(struct bnx2x_phy *phy,
 +                               struct link_params *params,
 +                               struct link_vars *vars)
 +{
 +      struct bnx2x *bp = params->bp;
 +      u8 link_up;
 +      u16 val1, val2;
 +      bnx2x_cl45_read(bp, phy,
 +                      MDIO_PMA_DEVAD, MDIO_PMA_LASI_STAT, &val2);
 +      bnx2x_cl45_read(bp, phy,
 +                      MDIO_PMA_DEVAD, MDIO_PMA_LASI_STAT, &val1);
 +      DP(NETIF_MSG_LINK, "10G-base-T LASI status 0x%x->0x%x\n",
 +                 val2, val1);
 +      bnx2x_cl45_read(bp, phy,
 +                      MDIO_PMA_DEVAD, MDIO_PMA_REG_STATUS, &val2);
 +      bnx2x_cl45_read(bp, phy,
 +                      MDIO_PMA_DEVAD, MDIO_PMA_REG_STATUS, &val1);
 +      DP(NETIF_MSG_LINK, "10G-base-T PMA status 0x%x->0x%x\n",
 +                 val2, val1);
 +      link_up = ((val1 & 4) == 4);
 +      /* if link is up print the AN outcome of the SFX7101 PHY */
 +      if (link_up) {
 +              bnx2x_cl45_read(bp, phy,
 +                              MDIO_AN_DEVAD, MDIO_AN_REG_MASTER_STATUS,
 +                              &val2);
 +              vars->line_speed = SPEED_10000;
 +              vars->duplex = DUPLEX_FULL;
 +              DP(NETIF_MSG_LINK, "SFX7101 AN status 0x%x->Master=%x\n",
 +                         val2, (val2 & (1<<14)));
 +              bnx2x_ext_phy_10G_an_resolve(bp, phy, vars);
 +              bnx2x_ext_phy_resolve_fc(phy, params, vars);
 +      }
 +      return link_up;
 +}
 +
 +static int bnx2x_7101_format_ver(u32 spirom_ver, u8 *str, u16 *len)
 +{
 +      if (*len < 5)
 +              return -EINVAL;
 +      str[0] = (spirom_ver & 0xFF);
 +      str[1] = (spirom_ver & 0xFF00) >> 8;
 +      str[2] = (spirom_ver & 0xFF0000) >> 16;
 +      str[3] = (spirom_ver & 0xFF000000) >> 24;
 +      str[4] = '\0';
 +      *len -= 5;
 +      return 0;
 +}
 +
 +void bnx2x_sfx7101_sp_sw_reset(struct bnx2x *bp, struct bnx2x_phy *phy)
 +{
 +      u16 val, cnt;
 +
 +      bnx2x_cl45_read(bp, phy,
 +                      MDIO_PMA_DEVAD,
 +                      MDIO_PMA_REG_7101_RESET, &val);
 +
 +      for (cnt = 0; cnt < 10; cnt++) {
 +              msleep(50);
 +              /* Writes a self-clearing reset */
 +              bnx2x_cl45_write(bp, phy,
 +                               MDIO_PMA_DEVAD,
 +                               MDIO_PMA_REG_7101_RESET,
 +                               (val | (1<<15)));
 +              /* Wait for clear */
 +              bnx2x_cl45_read(bp, phy,
 +                              MDIO_PMA_DEVAD,
 +                              MDIO_PMA_REG_7101_RESET, &val);
 +
 +              if ((val & (1<<15)) == 0)
 +                      break;
 +      }
 +}
 +
 +static void bnx2x_7101_hw_reset(struct bnx2x_phy *phy,
 +                              struct link_params *params) {
 +      /* Low power mode is controlled by GPIO 2 */
 +      bnx2x_set_gpio(params->bp, MISC_REGISTERS_GPIO_2,
 +                     MISC_REGISTERS_GPIO_OUTPUT_LOW, params->port);
 +      /* The PHY reset is controlled by GPIO 1 */
 +      bnx2x_set_gpio(params->bp, MISC_REGISTERS_GPIO_1,
 +                     MISC_REGISTERS_GPIO_OUTPUT_LOW, params->port);
 +}
 +
 +static void bnx2x_7101_set_link_led(struct bnx2x_phy *phy,
 +                                  struct link_params *params, u8 mode)
 +{
 +      u16 val = 0;
 +      struct bnx2x *bp = params->bp;
 +      switch (mode) {
 +      case LED_MODE_FRONT_PANEL_OFF:
 +      case LED_MODE_OFF:
 +              val = 2;
 +              break;
 +      case LED_MODE_ON:
 +              val = 1;
 +              break;
 +      case LED_MODE_OPER:
 +              val = 0;
 +              break;
 +      }
 +      bnx2x_cl45_write(bp, phy,
 +                       MDIO_PMA_DEVAD,
 +                       MDIO_PMA_REG_7107_LINK_LED_CNTL,
 +                       val);
 +}
 +
 +/******************************************************************/
 +/*                    STATIC PHY DECLARATION                    */
 +/******************************************************************/
 +
 +static struct bnx2x_phy phy_null = {
 +      .type           = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN,
 +      .addr           = 0,
 +      .def_md_devad   = 0,
 +      .flags          = FLAGS_INIT_XGXS_FIRST,
 +      .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
 +      .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
 +      .mdio_ctrl      = 0,
 +      .supported      = 0,
 +      .media_type     = ETH_PHY_NOT_PRESENT,
 +      .ver_addr       = 0,
 +      .req_flow_ctrl  = 0,
 +      .req_line_speed = 0,
 +      .speed_cap_mask = 0,
 +      .req_duplex     = 0,
 +      .rsrv           = 0,
 +      .config_init    = (config_init_t)NULL,
 +      .read_status    = (read_status_t)NULL,
 +      .link_reset     = (link_reset_t)NULL,
 +      .config_loopback = (config_loopback_t)NULL,
 +      .format_fw_ver  = (format_fw_ver_t)NULL,
 +      .hw_reset       = (hw_reset_t)NULL,
 +      .set_link_led   = (set_link_led_t)NULL,
 +      .phy_specific_func = (phy_specific_func_t)NULL
 +};
 +
 +static struct bnx2x_phy phy_serdes = {
 +      .type           = PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT,
 +      .addr           = 0xff,
 +      .def_md_devad   = 0,
 +      .flags          = 0,
 +      .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
 +      .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
 +      .mdio_ctrl      = 0,
 +      .supported      = (SUPPORTED_10baseT_Half |
 +                         SUPPORTED_10baseT_Full |
 +                         SUPPORTED_100baseT_Half |
 +                         SUPPORTED_100baseT_Full |
 +                         SUPPORTED_1000baseT_Full |
 +                         SUPPORTED_2500baseX_Full |
 +                         SUPPORTED_TP |
 +                         SUPPORTED_Autoneg |
 +                         SUPPORTED_Pause |
 +                         SUPPORTED_Asym_Pause),
 +      .media_type     = ETH_PHY_BASE_T,
 +      .ver_addr       = 0,
 +      .req_flow_ctrl  = 0,
 +      .req_line_speed = 0,
 +      .speed_cap_mask = 0,
 +      .req_duplex     = 0,
 +      .rsrv           = 0,
 +      .config_init    = (config_init_t)bnx2x_xgxs_config_init,
 +      .read_status    = (read_status_t)bnx2x_link_settings_status,
 +      .link_reset     = (link_reset_t)bnx2x_int_link_reset,
 +      .config_loopback = (config_loopback_t)NULL,
 +      .format_fw_ver  = (format_fw_ver_t)NULL,
 +      .hw_reset       = (hw_reset_t)NULL,
 +      .set_link_led   = (set_link_led_t)NULL,
 +      .phy_specific_func = (phy_specific_func_t)NULL
 +};
 +
 +static struct bnx2x_phy phy_xgxs = {
 +      .type           = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT,
 +      .addr           = 0xff,
 +      .def_md_devad   = 0,
 +      .flags          = 0,
 +      .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
 +      .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
 +      .mdio_ctrl      = 0,
 +      .supported      = (SUPPORTED_10baseT_Half |
 +                         SUPPORTED_10baseT_Full |
 +                         SUPPORTED_100baseT_Half |
 +                         SUPPORTED_100baseT_Full |
 +                         SUPPORTED_1000baseT_Full |
 +                         SUPPORTED_2500baseX_Full |
 +                         SUPPORTED_10000baseT_Full |
 +                         SUPPORTED_FIBRE |
 +                         SUPPORTED_Autoneg |
 +                         SUPPORTED_Pause |
 +                         SUPPORTED_Asym_Pause),
 +      .media_type     = ETH_PHY_CX4,
 +      .ver_addr       = 0,
 +      .req_flow_ctrl  = 0,
 +      .req_line_speed = 0,
 +      .speed_cap_mask = 0,
 +      .req_duplex     = 0,
 +      .rsrv           = 0,
 +      .config_init    = (config_init_t)bnx2x_xgxs_config_init,
 +      .read_status    = (read_status_t)bnx2x_link_settings_status,
 +      .link_reset     = (link_reset_t)bnx2x_int_link_reset,
 +      .config_loopback = (config_loopback_t)bnx2x_set_xgxs_loopback,
 +      .format_fw_ver  = (format_fw_ver_t)NULL,
 +      .hw_reset       = (hw_reset_t)NULL,
 +      .set_link_led   = (set_link_led_t)NULL,
 +      .phy_specific_func = (phy_specific_func_t)NULL
 +};
 +static struct bnx2x_phy phy_warpcore = {
 +      .type           = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT,
 +      .addr           = 0xff,
 +      .def_md_devad   = 0,
-       .flags          = (FLAGS_INIT_XGXS_FIRST |
-                          FLAGS_TX_ERROR_CHECK),
++      .flags          = FLAGS_HW_LOCK_REQUIRED,
 +      .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
 +      .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
 +      .mdio_ctrl      = 0,
 +      .supported      = (SUPPORTED_10baseT_Half |
 +                           SUPPORTED_10baseT_Full |
 +                           SUPPORTED_100baseT_Half |
 +                           SUPPORTED_100baseT_Full |
 +                           SUPPORTED_1000baseT_Full |
 +                           SUPPORTED_10000baseT_Full |
 +                           SUPPORTED_20000baseKR2_Full |
 +                           SUPPORTED_20000baseMLD2_Full |
 +                           SUPPORTED_FIBRE |
 +                           SUPPORTED_Autoneg |
 +                           SUPPORTED_Pause |
 +                           SUPPORTED_Asym_Pause),
 +      .media_type     = ETH_PHY_UNSPECIFIED,
 +      .ver_addr       = 0,
 +      .req_flow_ctrl  = 0,
 +      .req_line_speed = 0,
 +      .speed_cap_mask = 0,
 +      /* req_duplex = */0,
 +      /* rsrv = */0,
 +      .config_init    = (config_init_t)bnx2x_warpcore_config_init,
 +      .read_status    = (read_status_t)bnx2x_warpcore_read_status,
 +      .link_reset     = (link_reset_t)bnx2x_warpcore_link_reset,
 +      .config_loopback = (config_loopback_t)bnx2x_set_warpcore_loopback,
 +      .format_fw_ver  = (format_fw_ver_t)NULL,
 +      .hw_reset       = (hw_reset_t)bnx2x_warpcore_hw_reset,
 +      .set_link_led   = (set_link_led_t)NULL,
 +      .phy_specific_func = (phy_specific_func_t)NULL
 +};
 +
 +
 +static struct bnx2x_phy phy_7101 = {
 +      .type           = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101,
 +      .addr           = 0xff,
 +      .def_md_devad   = 0,
 +      .flags          = FLAGS_FAN_FAILURE_DET_REQ,
 +      .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
 +      .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
 +      .mdio_ctrl      = 0,
 +      .supported      = (SUPPORTED_10000baseT_Full |
 +                         SUPPORTED_TP |
 +                         SUPPORTED_Autoneg |
 +                         SUPPORTED_Pause |
 +                         SUPPORTED_Asym_Pause),
 +      .media_type     = ETH_PHY_BASE_T,
 +      .ver_addr       = 0,
 +      .req_flow_ctrl  = 0,
 +      .req_line_speed = 0,
 +      .speed_cap_mask = 0,
 +      .req_duplex     = 0,
 +      .rsrv           = 0,
 +      .config_init    = (config_init_t)bnx2x_7101_config_init,
 +      .read_status    = (read_status_t)bnx2x_7101_read_status,
 +      .link_reset     = (link_reset_t)bnx2x_common_ext_link_reset,
 +      .config_loopback = (config_loopback_t)bnx2x_7101_config_loopback,
 +      .format_fw_ver  = (format_fw_ver_t)bnx2x_7101_format_ver,
 +      .hw_reset       = (hw_reset_t)bnx2x_7101_hw_reset,
 +      .set_link_led   = (set_link_led_t)bnx2x_7101_set_link_led,
 +      .phy_specific_func = (phy_specific_func_t)NULL
 +};
 +static struct bnx2x_phy phy_8073 = {
 +      .type           = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073,
 +      .addr           = 0xff,
 +      .def_md_devad   = 0,
 +      .flags          = FLAGS_HW_LOCK_REQUIRED,
 +      .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
 +      .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
 +      .mdio_ctrl      = 0,
 +      .supported      = (SUPPORTED_10000baseT_Full |
 +                         SUPPORTED_2500baseX_Full |
 +                         SUPPORTED_1000baseT_Full |
 +                         SUPPORTED_FIBRE |
 +                         SUPPORTED_Autoneg |
 +                         SUPPORTED_Pause |
 +                         SUPPORTED_Asym_Pause),
 +      .media_type     = ETH_PHY_KR,
 +      .ver_addr       = 0,
 +      .req_flow_ctrl  = 0,
 +      .req_line_speed = 0,
 +      .speed_cap_mask = 0,
 +      .req_duplex     = 0,
 +      .rsrv           = 0,
 +      .config_init    = (config_init_t)bnx2x_8073_config_init,
 +      .read_status    = (read_status_t)bnx2x_8073_read_status,
 +      .link_reset     = (link_reset_t)bnx2x_8073_link_reset,
 +      .config_loopback = (config_loopback_t)NULL,
 +      .format_fw_ver  = (format_fw_ver_t)bnx2x_format_ver,
 +      .hw_reset       = (hw_reset_t)NULL,
 +      .set_link_led   = (set_link_led_t)NULL,
 +      .phy_specific_func = (phy_specific_func_t)NULL
 +};
 +static struct bnx2x_phy phy_8705 = {
 +      .type           = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705,
 +      .addr           = 0xff,
 +      .def_md_devad   = 0,
 +      .flags          = FLAGS_INIT_XGXS_FIRST,
 +      .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
 +      .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
 +      .mdio_ctrl      = 0,
 +      .supported      = (SUPPORTED_10000baseT_Full |
 +                         SUPPORTED_FIBRE |
 +                         SUPPORTED_Pause |
 +                         SUPPORTED_Asym_Pause),
 +      .media_type     = ETH_PHY_XFP_FIBER,
 +      .ver_addr       = 0,
 +      .req_flow_ctrl  = 0,
 +      .req_line_speed = 0,
 +      .speed_cap_mask = 0,
 +      .req_duplex     = 0,
 +      .rsrv           = 0,
 +      .config_init    = (config_init_t)bnx2x_8705_config_init,
 +      .read_status    = (read_status_t)bnx2x_8705_read_status,
 +      .link_reset     = (link_reset_t)bnx2x_common_ext_link_reset,
 +      .config_loopback = (config_loopback_t)NULL,
 +      .format_fw_ver  = (format_fw_ver_t)bnx2x_null_format_ver,
 +      .hw_reset       = (hw_reset_t)NULL,
 +      .set_link_led   = (set_link_led_t)NULL,
 +      .phy_specific_func = (phy_specific_func_t)NULL
 +};
 +static struct bnx2x_phy phy_8706 = {
 +      .type           = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706,
 +      .addr           = 0xff,
 +      .def_md_devad   = 0,
-                          FLAGS_INIT_XGXS_FIRST |
-                          FLAGS_TX_ERROR_CHECK),
++      .flags          = FLAGS_INIT_XGXS_FIRST,
 +      .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
 +      .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
 +      .mdio_ctrl      = 0,
 +      .supported      = (SUPPORTED_10000baseT_Full |
 +                         SUPPORTED_1000baseT_Full |
 +                         SUPPORTED_FIBRE |
 +                         SUPPORTED_Pause |
 +                         SUPPORTED_Asym_Pause),
 +      .media_type     = ETH_PHY_SFP_FIBER,
 +      .ver_addr       = 0,
 +      .req_flow_ctrl  = 0,
 +      .req_line_speed = 0,
 +      .speed_cap_mask = 0,
 +      .req_duplex     = 0,
 +      .rsrv           = 0,
 +      .config_init    = (config_init_t)bnx2x_8706_config_init,
 +      .read_status    = (read_status_t)bnx2x_8706_read_status,
 +      .link_reset     = (link_reset_t)bnx2x_common_ext_link_reset,
 +      .config_loopback = (config_loopback_t)NULL,
 +      .format_fw_ver  = (format_fw_ver_t)bnx2x_format_ver,
 +      .hw_reset       = (hw_reset_t)NULL,
 +      .set_link_led   = (set_link_led_t)NULL,
 +      .phy_specific_func = (phy_specific_func_t)NULL
 +};
 +
 +static struct bnx2x_phy phy_8726 = {
 +      .type           = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726,
 +      .addr           = 0xff,
 +      .def_md_devad   = 0,
 +      .flags          = (FLAGS_HW_LOCK_REQUIRED |
-       .flags          = (FLAGS_FAN_FAILURE_DET_REQ |
-                          FLAGS_TX_ERROR_CHECK),
++                         FLAGS_INIT_XGXS_FIRST),
 +      .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
 +      .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
 +      .mdio_ctrl      = 0,
 +      .supported      = (SUPPORTED_10000baseT_Full |
 +                         SUPPORTED_1000baseT_Full |
 +                         SUPPORTED_Autoneg |
 +                         SUPPORTED_FIBRE |
 +                         SUPPORTED_Pause |
 +                         SUPPORTED_Asym_Pause),
 +      .media_type     = ETH_PHY_NOT_PRESENT,
 +      .ver_addr       = 0,
 +      .req_flow_ctrl  = 0,
 +      .req_line_speed = 0,
 +      .speed_cap_mask = 0,
 +      .req_duplex     = 0,
 +      .rsrv           = 0,
 +      .config_init    = (config_init_t)bnx2x_8726_config_init,
 +      .read_status    = (read_status_t)bnx2x_8726_read_status,
 +      .link_reset     = (link_reset_t)bnx2x_8726_link_reset,
 +      .config_loopback = (config_loopback_t)bnx2x_8726_config_loopback,
 +      .format_fw_ver  = (format_fw_ver_t)bnx2x_format_ver,
 +      .hw_reset       = (hw_reset_t)NULL,
 +      .set_link_led   = (set_link_led_t)NULL,
 +      .phy_specific_func = (phy_specific_func_t)NULL
 +};
 +
 +static struct bnx2x_phy phy_8727 = {
 +      .type           = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727,
 +      .addr           = 0xff,
 +      .def_md_devad   = 0,
++      .flags          = FLAGS_FAN_FAILURE_DET_REQ,
 +      .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
 +      .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
 +      .mdio_ctrl      = 0,
 +      .supported      = (SUPPORTED_10000baseT_Full |
 +                         SUPPORTED_1000baseT_Full |
 +                         SUPPORTED_FIBRE |
 +                         SUPPORTED_Pause |
 +                         SUPPORTED_Asym_Pause),
 +      .media_type     = ETH_PHY_NOT_PRESENT,
 +      .ver_addr       = 0,
 +      .req_flow_ctrl  = 0,
 +      .req_line_speed = 0,
 +      .speed_cap_mask = 0,
 +      .req_duplex     = 0,
 +      .rsrv           = 0,
 +      .config_init    = (config_init_t)bnx2x_8727_config_init,
 +      .read_status    = (read_status_t)bnx2x_8727_read_status,
 +      .link_reset     = (link_reset_t)bnx2x_8727_link_reset,
 +      .config_loopback = (config_loopback_t)NULL,
 +      .format_fw_ver  = (format_fw_ver_t)bnx2x_format_ver,
 +      .hw_reset       = (hw_reset_t)bnx2x_8727_hw_reset,
 +      .set_link_led   = (set_link_led_t)bnx2x_8727_set_link_led,
 +      .phy_specific_func = (phy_specific_func_t)bnx2x_8727_specific_func
 +};
 +static struct bnx2x_phy phy_8481 = {
 +      .type           = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481,
 +      .addr           = 0xff,
 +      .def_md_devad   = 0,
 +      .flags          = FLAGS_FAN_FAILURE_DET_REQ |
 +                        FLAGS_REARM_LATCH_SIGNAL,
 +      .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
 +      .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
 +      .mdio_ctrl      = 0,
 +      .supported      = (SUPPORTED_10baseT_Half |
 +                         SUPPORTED_10baseT_Full |
 +                         SUPPORTED_100baseT_Half |
 +                         SUPPORTED_100baseT_Full |
 +                         SUPPORTED_1000baseT_Full |
 +                         SUPPORTED_10000baseT_Full |
 +                         SUPPORTED_TP |
 +                         SUPPORTED_Autoneg |
 +                         SUPPORTED_Pause |
 +                         SUPPORTED_Asym_Pause),
 +      .media_type     = ETH_PHY_BASE_T,
 +      .ver_addr       = 0,
 +      .req_flow_ctrl  = 0,
 +      .req_line_speed = 0,
 +      .speed_cap_mask = 0,
 +      .req_duplex     = 0,
 +      .rsrv           = 0,
 +      .config_init    = (config_init_t)bnx2x_8481_config_init,
 +      .read_status    = (read_status_t)bnx2x_848xx_read_status,
 +      .link_reset     = (link_reset_t)bnx2x_8481_link_reset,
 +      .config_loopback = (config_loopback_t)NULL,
 +      .format_fw_ver  = (format_fw_ver_t)bnx2x_848xx_format_ver,
 +      .hw_reset       = (hw_reset_t)bnx2x_8481_hw_reset,
 +      .set_link_led   = (set_link_led_t)bnx2x_848xx_set_link_led,
 +      .phy_specific_func = (phy_specific_func_t)NULL
 +};
 +
 +static struct bnx2x_phy phy_84823 = {
 +      .type           = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84823,
 +      .addr           = 0xff,
 +      .def_md_devad   = 0,
 +      .flags          = FLAGS_FAN_FAILURE_DET_REQ |
 +                        FLAGS_REARM_LATCH_SIGNAL,
 +      .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
 +      .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
 +      .mdio_ctrl      = 0,
 +      .supported      = (SUPPORTED_10baseT_Half |
 +                         SUPPORTED_10baseT_Full |
 +                         SUPPORTED_100baseT_Half |
 +                         SUPPORTED_100baseT_Full |
 +                         SUPPORTED_1000baseT_Full |
 +                         SUPPORTED_10000baseT_Full |
 +                         SUPPORTED_TP |
 +                         SUPPORTED_Autoneg |
 +                         SUPPORTED_Pause |
 +                         SUPPORTED_Asym_Pause),
 +      .media_type     = ETH_PHY_BASE_T,
 +      .ver_addr       = 0,
 +      .req_flow_ctrl  = 0,
 +      .req_line_speed = 0,
 +      .speed_cap_mask = 0,
 +      .req_duplex     = 0,
 +      .rsrv           = 0,
 +      .config_init    = (config_init_t)bnx2x_848x3_config_init,
 +      .read_status    = (read_status_t)bnx2x_848xx_read_status,
 +      .link_reset     = (link_reset_t)bnx2x_848x3_link_reset,
 +      .config_loopback = (config_loopback_t)NULL,
 +      .format_fw_ver  = (format_fw_ver_t)bnx2x_848xx_format_ver,
 +      .hw_reset       = (hw_reset_t)NULL,
 +      .set_link_led   = (set_link_led_t)bnx2x_848xx_set_link_led,
 +      .phy_specific_func = (phy_specific_func_t)NULL
 +};
 +
 +static struct bnx2x_phy phy_84833 = {
 +      .type           = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833,
 +      .addr           = 0xff,
 +      .def_md_devad   = 0,
 +      .flags          = FLAGS_FAN_FAILURE_DET_REQ |
 +                          FLAGS_REARM_LATCH_SIGNAL,
 +      .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
 +      .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
 +      .mdio_ctrl      = 0,
 +      .supported      = (SUPPORTED_100baseT_Half |
 +                         SUPPORTED_100baseT_Full |
 +                         SUPPORTED_1000baseT_Full |
 +                         SUPPORTED_10000baseT_Full |
 +                         SUPPORTED_TP |
 +                         SUPPORTED_Autoneg |
 +                         SUPPORTED_Pause |
 +                         SUPPORTED_Asym_Pause),
 +      .media_type     = ETH_PHY_BASE_T,
 +      .ver_addr       = 0,
 +      .req_flow_ctrl  = 0,
 +      .req_line_speed = 0,
 +      .speed_cap_mask = 0,
 +      .req_duplex     = 0,
 +      .rsrv           = 0,
 +      .config_init    = (config_init_t)bnx2x_848x3_config_init,
 +      .read_status    = (read_status_t)bnx2x_848xx_read_status,
 +      .link_reset     = (link_reset_t)bnx2x_848x3_link_reset,
 +      .config_loopback = (config_loopback_t)NULL,
 +      .format_fw_ver  = (format_fw_ver_t)bnx2x_848xx_format_ver,
 +      .hw_reset       = (hw_reset_t)bnx2x_84833_hw_reset_phy,
 +      .set_link_led   = (set_link_led_t)bnx2x_848xx_set_link_led,
 +      .phy_specific_func = (phy_specific_func_t)NULL
 +};
 +
 +static struct bnx2x_phy phy_54618se = {
 +      .type           = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM54618SE,
 +      .addr           = 0xff,
 +      .def_md_devad   = 0,
 +      .flags          = FLAGS_INIT_XGXS_FIRST,
 +      .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
 +      .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
 +      .mdio_ctrl      = 0,
 +      .supported      = (SUPPORTED_10baseT_Half |
 +                         SUPPORTED_10baseT_Full |
 +                         SUPPORTED_100baseT_Half |
 +                         SUPPORTED_100baseT_Full |
 +                         SUPPORTED_1000baseT_Full |
 +                         SUPPORTED_TP |
 +                         SUPPORTED_Autoneg |
 +                         SUPPORTED_Pause |
 +                         SUPPORTED_Asym_Pause),
 +      .media_type     = ETH_PHY_BASE_T,
 +      .ver_addr       = 0,
 +      .req_flow_ctrl  = 0,
 +      .req_line_speed = 0,
 +      .speed_cap_mask = 0,
 +      /* req_duplex = */0,
 +      /* rsrv = */0,
 +      .config_init    = (config_init_t)bnx2x_54618se_config_init,
 +      .read_status    = (read_status_t)bnx2x_54618se_read_status,
 +      .link_reset     = (link_reset_t)bnx2x_54618se_link_reset,
 +      .config_loopback = (config_loopback_t)bnx2x_54618se_config_loopback,
 +      .format_fw_ver  = (format_fw_ver_t)NULL,
 +      .hw_reset       = (hw_reset_t)NULL,
 +      .set_link_led   = (set_link_led_t)bnx2x_54618se_set_link_led,
 +      .phy_specific_func = (phy_specific_func_t)NULL
 +};
 +/*****************************************************************/
 +/*                                                               */
 +/* Populate the phy according. Main function: bnx2x_populate_phy   */
 +/*                                                               */
 +/*****************************************************************/
 +
 +static void bnx2x_populate_preemphasis(struct bnx2x *bp, u32 shmem_base,
 +                                   struct bnx2x_phy *phy, u8 port,
 +                                   u8 phy_index)
 +{
 +      /* Get the 4 lanes xgxs config rx and tx */
 +      u32 rx = 0, tx = 0, i;
 +      for (i = 0; i < 2; i++) {
 +              /*
 +               * INT_PHY and EXT_PHY1 share the same value location in the
 +               * shmem. When num_phys is greater than 1, than this value
 +               * applies only to EXT_PHY1
 +               */
 +              if (phy_index == INT_PHY || phy_index == EXT_PHY1) {
 +                      rx = REG_RD(bp, shmem_base +
 +                                  offsetof(struct shmem_region,
 +                        dev_info.port_hw_config[port].xgxs_config_rx[i<<1]));
 +
 +                      tx = REG_RD(bp, shmem_base +
 +                                  offsetof(struct shmem_region,
 +                        dev_info.port_hw_config[port].xgxs_config_tx[i<<1]));
 +              } else {
 +                      rx = REG_RD(bp, shmem_base +
 +                                  offsetof(struct shmem_region,
 +                       dev_info.port_hw_config[port].xgxs_config2_rx[i<<1]));
 +
 +                      tx = REG_RD(bp, shmem_base +
 +                                  offsetof(struct shmem_region,
 +                       dev_info.port_hw_config[port].xgxs_config2_rx[i<<1]));
 +              }
 +
 +              phy->rx_preemphasis[i << 1] = ((rx>>16) & 0xffff);
 +              phy->rx_preemphasis[(i << 1) + 1] = (rx & 0xffff);
 +
 +              phy->tx_preemphasis[i << 1] = ((tx>>16) & 0xffff);
 +              phy->tx_preemphasis[(i << 1) + 1] = (tx & 0xffff);
 +      }
 +}
 +
 +static u32 bnx2x_get_ext_phy_config(struct bnx2x *bp, u32 shmem_base,
 +                                  u8 phy_index, u8 port)
 +{
 +      u32 ext_phy_config = 0;
 +      switch (phy_index) {
 +      case EXT_PHY1:
 +              ext_phy_config = REG_RD(bp, shmem_base +
 +                                            offsetof(struct shmem_region,
 +                      dev_info.port_hw_config[port].external_phy_config));
 +              break;
 +      case EXT_PHY2:
 +              ext_phy_config = REG_RD(bp, shmem_base +
 +                                            offsetof(struct shmem_region,
 +                      dev_info.port_hw_config[port].external_phy_config2));
 +              break;
 +      default:
 +              DP(NETIF_MSG_LINK, "Invalid phy_index %d\n", phy_index);
 +              return -EINVAL;
 +      }
 +
 +      return ext_phy_config;
 +}
 +static int bnx2x_populate_int_phy(struct bnx2x *bp, u32 shmem_base, u8 port,
 +                                struct bnx2x_phy *phy)
 +{
 +      u32 phy_addr;
 +      u32 chip_id;
 +      u32 switch_cfg = (REG_RD(bp, shmem_base +
 +                                     offsetof(struct shmem_region,
 +                      dev_info.port_feature_config[port].link_config)) &
 +                        PORT_FEATURE_CONNECTED_SWITCH_MASK);
 +      chip_id = REG_RD(bp, MISC_REG_CHIP_NUM) << 16;
 +      DP(NETIF_MSG_LINK, ":chip_id = 0x%x\n", chip_id);
 +      if (USES_WARPCORE(bp)) {
 +              u32 serdes_net_if;
 +              phy_addr = REG_RD(bp,
 +                                MISC_REG_WC0_CTRL_PHY_ADDR);
 +              *phy = phy_warpcore;
 +              if (REG_RD(bp, MISC_REG_PORT4MODE_EN_OVWR) == 0x3)
 +                      phy->flags |= FLAGS_4_PORT_MODE;
 +              else
 +                      phy->flags &= ~FLAGS_4_PORT_MODE;
 +                      /* Check Dual mode */
 +              serdes_net_if = (REG_RD(bp, shmem_base +
 +                                      offsetof(struct shmem_region, dev_info.
 +                                      port_hw_config[port].default_cfg)) &
 +                               PORT_HW_CFG_NET_SERDES_IF_MASK);
 +              /*
 +               * Set the appropriate supported and flags indications per
 +               * interface type of the chip
 +               */
 +              switch (serdes_net_if) {
 +              case PORT_HW_CFG_NET_SERDES_IF_SGMII:
 +                      phy->supported &= (SUPPORTED_10baseT_Half |
 +                                         SUPPORTED_10baseT_Full |
 +                                         SUPPORTED_100baseT_Half |
 +                                         SUPPORTED_100baseT_Full |
 +                                         SUPPORTED_1000baseT_Full |
 +                                         SUPPORTED_FIBRE |
 +                                         SUPPORTED_Autoneg |
 +                                         SUPPORTED_Pause |
 +                                         SUPPORTED_Asym_Pause);
 +                      phy->media_type = ETH_PHY_BASE_T;
 +                      break;
 +              case PORT_HW_CFG_NET_SERDES_IF_XFI:
 +                      phy->media_type = ETH_PHY_XFP_FIBER;
 +                      break;
 +              case PORT_HW_CFG_NET_SERDES_IF_SFI:
 +                      phy->supported &= (SUPPORTED_1000baseT_Full |
 +                                         SUPPORTED_10000baseT_Full |
 +                                         SUPPORTED_FIBRE |
 +                                         SUPPORTED_Pause |
 +                                         SUPPORTED_Asym_Pause);
 +                      phy->media_type = ETH_PHY_SFP_FIBER;
 +                      break;
 +              case PORT_HW_CFG_NET_SERDES_IF_KR:
 +                      phy->media_type = ETH_PHY_KR;
 +                      phy->supported &= (SUPPORTED_1000baseT_Full |
 +                                         SUPPORTED_10000baseT_Full |
 +                                         SUPPORTED_FIBRE |
 +                                         SUPPORTED_Autoneg |
 +                                         SUPPORTED_Pause |
 +                                         SUPPORTED_Asym_Pause);
 +                      break;
 +              case PORT_HW_CFG_NET_SERDES_IF_DXGXS:
 +                      phy->media_type = ETH_PHY_KR;
 +                      phy->flags |= FLAGS_WC_DUAL_MODE;
 +                      phy->supported &= (SUPPORTED_20000baseMLD2_Full |
 +                                         SUPPORTED_FIBRE |
 +                                         SUPPORTED_Pause |
 +                                         SUPPORTED_Asym_Pause);
 +                      break;
 +              case PORT_HW_CFG_NET_SERDES_IF_KR2:
 +                      phy->media_type = ETH_PHY_KR;
 +                      phy->flags |= FLAGS_WC_DUAL_MODE;
 +                      phy->supported &= (SUPPORTED_20000baseKR2_Full |
 +                                         SUPPORTED_FIBRE |
 +                                         SUPPORTED_Pause |
 +                                         SUPPORTED_Asym_Pause);
 +                      break;
 +              default:
 +                      DP(NETIF_MSG_LINK, "Unknown WC interface type 0x%x\n",
 +                                     serdes_net_if);
 +                      break;
 +              }
 +
 +              /*
 +               * Enable MDC/MDIO work-around for E3 A0 since free running MDC
 +               * was not set as expected. For B0, ECO will be enabled so there
 +               * won't be an issue there
 +               */
 +              if (CHIP_REV(bp) == CHIP_REV_Ax)
 +                      phy->flags |= FLAGS_MDC_MDIO_WA;
 +              else
 +                      phy->flags |= FLAGS_MDC_MDIO_WA_B0;
 +      } else {
 +              switch (switch_cfg) {
 +              case SWITCH_CFG_1G:
 +                      phy_addr = REG_RD(bp,
 +                                        NIG_REG_SERDES0_CTRL_PHY_ADDR +
 +                                        port * 0x10);
 +                      *phy = phy_serdes;
 +                      break;
 +              case SWITCH_CFG_10G:
 +                      phy_addr = REG_RD(bp,
 +                                        NIG_REG_XGXS0_CTRL_PHY_ADDR +
 +                                        port * 0x18);
 +                      *phy = phy_xgxs;
 +                      break;
 +              default:
 +                      DP(NETIF_MSG_LINK, "Invalid switch_cfg\n");
 +                      return -EINVAL;
 +              }
 +      }
 +      phy->addr = (u8)phy_addr;
 +      phy->mdio_ctrl = bnx2x_get_emac_base(bp,
 +                                          SHARED_HW_CFG_MDC_MDIO_ACCESS1_BOTH,
 +                                          port);
 +      if (CHIP_IS_E2(bp))
 +              phy->def_md_devad = E2_DEFAULT_PHY_DEV_ADDR;
 +      else
 +              phy->def_md_devad = DEFAULT_PHY_DEV_ADDR;
 +
 +      DP(NETIF_MSG_LINK, "Internal phy port=%d, addr=0x%x, mdio_ctl=0x%x\n",
 +                 port, phy->addr, phy->mdio_ctrl);
 +
 +      bnx2x_populate_preemphasis(bp, shmem_base, phy, port, INT_PHY);
 +      return 0;
 +}
 +
 +static int bnx2x_populate_ext_phy(struct bnx2x *bp,
 +                                u8 phy_index,
 +                                u32 shmem_base,
 +                                u32 shmem2_base,
 +                                u8 port,
 +                                struct bnx2x_phy *phy)
 +{
 +      u32 ext_phy_config, phy_type, config2;
 +      u32 mdc_mdio_access = SHARED_HW_CFG_MDC_MDIO_ACCESS1_BOTH;
 +      ext_phy_config = bnx2x_get_ext_phy_config(bp, shmem_base,
 +                                                phy_index, port);
 +      phy_type = XGXS_EXT_PHY_TYPE(ext_phy_config);
 +      /* Select the phy type */
 +      switch (phy_type) {
 +      case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
 +              mdc_mdio_access = SHARED_HW_CFG_MDC_MDIO_ACCESS1_SWAPPED;
 +              *phy = phy_8073;
 +              break;
 +      case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
 +              *phy = phy_8705;
 +              break;
 +      case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
 +              *phy = phy_8706;
 +              break;
 +      case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
 +              mdc_mdio_access = SHARED_HW_CFG_MDC_MDIO_ACCESS1_EMAC1;
 +              *phy = phy_8726;
 +              break;
 +      case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727_NOC:
 +              /* BCM8727_NOC => BCM8727 no over current */
 +              mdc_mdio_access = SHARED_HW_CFG_MDC_MDIO_ACCESS1_EMAC1;
 +              *phy = phy_8727;
 +              phy->flags |= FLAGS_NOC;
 +              break;
 +      case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8722:
 +      case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
 +              mdc_mdio_access = SHARED_HW_CFG_MDC_MDIO_ACCESS1_EMAC1;
 +              *phy = phy_8727;
 +              break;
 +      case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
 +              *phy = phy_8481;
 +              break;
 +      case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84823:
 +              *phy = phy_84823;
 +              break;
 +      case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833:
 +              *phy = phy_84833;
 +              break;
 +      case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM54616:
 +      case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM54618SE:
 +              *phy = phy_54618se;
 +              break;
 +      case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
 +              *phy = phy_7101;
 +              break;
 +      case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
 +              *phy = phy_null;
 +              return -EINVAL;
 +      default:
 +              *phy = phy_null;
 +              return 0;
 +      }
 +
 +      phy->addr = XGXS_EXT_PHY_ADDR(ext_phy_config);
 +      bnx2x_populate_preemphasis(bp, shmem_base, phy, port, phy_index);
 +
 +      /*
 +       * The shmem address of the phy version is located on different
 +       * structures. In case this structure is too old, do not set
 +       * the address
 +       */
 +      config2 = REG_RD(bp, shmem_base + offsetof(struct shmem_region,
 +                                      dev_info.shared_hw_config.config2));
 +      if (phy_index == EXT_PHY1) {
 +              phy->ver_addr = shmem_base + offsetof(struct shmem_region,
 +                              port_mb[port].ext_phy_fw_version);
 +
 +              /* Check specific mdc mdio settings */
 +              if (config2 & SHARED_HW_CFG_MDC_MDIO_ACCESS1_MASK)
 +                      mdc_mdio_access = config2 &
 +                      SHARED_HW_CFG_MDC_MDIO_ACCESS1_MASK;
 +      } else {
 +              u32 size = REG_RD(bp, shmem2_base);
 +
 +              if (size >
 +                  offsetof(struct shmem2_region, ext_phy_fw_version2)) {
 +                      phy->ver_addr = shmem2_base +
 +                          offsetof(struct shmem2_region,
 +                                   ext_phy_fw_version2[port]);
 +              }
 +              /* Check specific mdc mdio settings */
 +              if (config2 & SHARED_HW_CFG_MDC_MDIO_ACCESS2_MASK)
 +                      mdc_mdio_access = (config2 &
 +                      SHARED_HW_CFG_MDC_MDIO_ACCESS2_MASK) >>
 +                      (SHARED_HW_CFG_MDC_MDIO_ACCESS2_SHIFT -
 +                       SHARED_HW_CFG_MDC_MDIO_ACCESS1_SHIFT);
 +      }
 +      phy->mdio_ctrl = bnx2x_get_emac_base(bp, mdc_mdio_access, port);
 +
 +      /*
 +       * In case mdc/mdio_access of the external phy is different than the
 +       * mdc/mdio access of the XGXS, a HW lock must be taken in each access
 +       * to prevent one port interfere with another port's CL45 operations.
 +       */
 +      if (mdc_mdio_access != SHARED_HW_CFG_MDC_MDIO_ACCESS1_BOTH)
 +              phy->flags |= FLAGS_HW_LOCK_REQUIRED;
 +      DP(NETIF_MSG_LINK, "phy_type 0x%x port %d found in index %d\n",
 +                 phy_type, port, phy_index);
 +      DP(NETIF_MSG_LINK, "             addr=0x%x, mdio_ctl=0x%x\n",
 +                 phy->addr, phy->mdio_ctrl);
 +      return 0;
 +}
 +
 +static int bnx2x_populate_phy(struct bnx2x *bp, u8 phy_index, u32 shmem_base,
 +                            u32 shmem2_base, u8 port, struct bnx2x_phy *phy)
 +{
 +      int status = 0;
 +      phy->type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN;
 +      if (phy_index == INT_PHY)
 +              return bnx2x_populate_int_phy(bp, shmem_base, port, phy);
 +      status = bnx2x_populate_ext_phy(bp, phy_index, shmem_base, shmem2_base,
 +                                      port, phy);
 +      return status;
 +}
 +
 +static void bnx2x_phy_def_cfg(struct link_params *params,
 +                            struct bnx2x_phy *phy,
 +                            u8 phy_index)
 +{
 +      struct bnx2x *bp = params->bp;
 +      u32 link_config;
 +      /* Populate the default phy configuration for MF mode */
 +      if (phy_index == EXT_PHY2) {
 +              link_config = REG_RD(bp, params->shmem_base +
 +                                   offsetof(struct shmem_region, dev_info.
 +                      port_feature_config[params->port].link_config2));
 +              phy->speed_cap_mask = REG_RD(bp, params->shmem_base +
 +                                           offsetof(struct shmem_region,
 +                                                    dev_info.
 +                      port_hw_config[params->port].speed_capability_mask2));
 +      } else {
 +              link_config = REG_RD(bp, params->shmem_base +
 +                                   offsetof(struct shmem_region, dev_info.
 +                              port_feature_config[params->port].link_config));
 +              phy->speed_cap_mask = REG_RD(bp, params->shmem_base +
 +                                           offsetof(struct shmem_region,
 +                                                    dev_info.
 +                      port_hw_config[params->port].speed_capability_mask));
 +      }
 +      DP(NETIF_MSG_LINK,
 +         "Default config phy idx %x cfg 0x%x speed_cap_mask 0x%x\n",
 +         phy_index, link_config, phy->speed_cap_mask);
 +
 +      phy->req_duplex = DUPLEX_FULL;
 +      switch (link_config  & PORT_FEATURE_LINK_SPEED_MASK) {
 +      case PORT_FEATURE_LINK_SPEED_10M_HALF:
 +              phy->req_duplex = DUPLEX_HALF;
 +      case PORT_FEATURE_LINK_SPEED_10M_FULL:
 +              phy->req_line_speed = SPEED_10;
 +              break;
 +      case PORT_FEATURE_LINK_SPEED_100M_HALF:
 +              phy->req_duplex = DUPLEX_HALF;
 +      case PORT_FEATURE_LINK_SPEED_100M_FULL:
 +              phy->req_line_speed = SPEED_100;
 +              break;
 +      case PORT_FEATURE_LINK_SPEED_1G:
 +              phy->req_line_speed = SPEED_1000;
 +              break;
 +      case PORT_FEATURE_LINK_SPEED_2_5G:
 +              phy->req_line_speed = SPEED_2500;
 +              break;
 +      case PORT_FEATURE_LINK_SPEED_10G_CX4:
 +              phy->req_line_speed = SPEED_10000;
 +              break;
 +      default:
 +              phy->req_line_speed = SPEED_AUTO_NEG;
 +              break;
 +      }
 +
 +      switch (link_config  & PORT_FEATURE_FLOW_CONTROL_MASK) {
 +      case PORT_FEATURE_FLOW_CONTROL_AUTO:
 +              phy->req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
 +              break;
 +      case PORT_FEATURE_FLOW_CONTROL_TX:
 +              phy->req_flow_ctrl = BNX2X_FLOW_CTRL_TX;
 +              break;
 +      case PORT_FEATURE_FLOW_CONTROL_RX:
 +              phy->req_flow_ctrl = BNX2X_FLOW_CTRL_RX;
 +              break;
 +      case PORT_FEATURE_FLOW_CONTROL_BOTH:
 +              phy->req_flow_ctrl = BNX2X_FLOW_CTRL_BOTH;
 +              break;
 +      default:
 +              phy->req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
 +              break;
 +      }
 +}
 +
 +u32 bnx2x_phy_selection(struct link_params *params)
 +{
 +      u32 phy_config_swapped, prio_cfg;
 +      u32 return_cfg = PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT;
 +
 +      phy_config_swapped = params->multi_phy_config &
 +              PORT_HW_CFG_PHY_SWAPPED_ENABLED;
 +
 +      prio_cfg = params->multi_phy_config &
 +                      PORT_HW_CFG_PHY_SELECTION_MASK;
 +
 +      if (phy_config_swapped) {
 +              switch (prio_cfg) {
 +              case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY:
 +                   return_cfg = PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY;
 +                   break;
 +              case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY:
 +                   return_cfg = PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY;
 +                   break;
 +              case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY:
 +                   return_cfg = PORT_HW_CFG_PHY_SELECTION_FIRST_PHY;
 +                   break;
 +              case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY:
 +                   return_cfg = PORT_HW_CFG_PHY_SELECTION_SECOND_PHY;
 +                   break;
 +              }
 +      } else
 +              return_cfg = prio_cfg;
 +
 +      return return_cfg;
 +}
 +
 +
 +int bnx2x_phy_probe(struct link_params *params)
 +{
 +      u8 phy_index, actual_phy_idx, link_cfg_idx;
 +      u32 phy_config_swapped, sync_offset, media_types;
 +      struct bnx2x *bp = params->bp;
 +      struct bnx2x_phy *phy;
 +      params->num_phys = 0;
 +      DP(NETIF_MSG_LINK, "Begin phy probe\n");
 +      phy_config_swapped = params->multi_phy_config &
 +              PORT_HW_CFG_PHY_SWAPPED_ENABLED;
 +
 +      for (phy_index = INT_PHY; phy_index < MAX_PHYS;
 +            phy_index++) {
 +              link_cfg_idx = LINK_CONFIG_IDX(phy_index);
 +              actual_phy_idx = phy_index;
 +              if (phy_config_swapped) {
 +                      if (phy_index == EXT_PHY1)
 +                              actual_phy_idx = EXT_PHY2;
 +                      else if (phy_index == EXT_PHY2)
 +                              actual_phy_idx = EXT_PHY1;
 +              }
 +              DP(NETIF_MSG_LINK, "phy_config_swapped %x, phy_index %x,"
 +                             " actual_phy_idx %x\n", phy_config_swapped,
 +                         phy_index, actual_phy_idx);
 +              phy = &params->phy[actual_phy_idx];
 +              if (bnx2x_populate_phy(bp, phy_index, params->shmem_base,
 +                                     params->shmem2_base, params->port,
 +                                     phy) != 0) {
 +                      params->num_phys = 0;
 +                      DP(NETIF_MSG_LINK, "phy probe failed in phy index %d\n",
 +                                 phy_index);
 +                      for (phy_index = INT_PHY;
 +                            phy_index < MAX_PHYS;
 +                            phy_index++)
 +                              *phy = phy_null;
 +                      return -EINVAL;
 +              }
 +              if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN)
 +                      break;
 +
 +              sync_offset = params->shmem_base +
 +                      offsetof(struct shmem_region,
 +                      dev_info.port_hw_config[params->port].media_type);
 +              media_types = REG_RD(bp, sync_offset);
 +
 +              /*
 +               * Update media type for non-PMF sync only for the first time
 +               * In case the media type changes afterwards, it will be updated
 +               * using the update_status function
 +               */
 +              if ((media_types & (PORT_HW_CFG_MEDIA_TYPE_PHY0_MASK <<
 +                                  (PORT_HW_CFG_MEDIA_TYPE_PHY1_SHIFT *
 +                                   actual_phy_idx))) == 0) {
 +                      media_types |= ((phy->media_type &
 +                                      PORT_HW_CFG_MEDIA_TYPE_PHY0_MASK) <<
 +                              (PORT_HW_CFG_MEDIA_TYPE_PHY1_SHIFT *
 +                               actual_phy_idx));
 +              }
 +              REG_WR(bp, sync_offset, media_types);
 +
 +              bnx2x_phy_def_cfg(params, phy, phy_index);
 +              params->num_phys++;
 +      }
 +
 +      DP(NETIF_MSG_LINK, "End phy probe. #phys found %x\n", params->num_phys);
 +      return 0;
 +}
 +
 +void bnx2x_init_bmac_loopback(struct link_params *params,
 +                            struct link_vars *vars)
 +{
 +      struct bnx2x *bp = params->bp;
 +              vars->link_up = 1;
 +              vars->line_speed = SPEED_10000;
 +              vars->duplex = DUPLEX_FULL;
 +              vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE;
 +              vars->mac_type = MAC_TYPE_BMAC;
 +
 +              vars->phy_flags = PHY_XGXS_FLAG;
 +
 +              bnx2x_xgxs_deassert(params);
 +
 +              /* set bmac loopback */
 +              bnx2x_bmac_enable(params, vars, 1);
 +
 +              REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 0);
 +}
 +
 +void bnx2x_init_emac_loopback(struct link_params *params,
 +                            struct link_vars *vars)
 +{
 +      struct bnx2x *bp = params->bp;
 +              vars->link_up = 1;
 +              vars->line_speed = SPEED_1000;
 +              vars->duplex = DUPLEX_FULL;
 +              vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE;
 +              vars->mac_type = MAC_TYPE_EMAC;
 +
 +              vars->phy_flags = PHY_XGXS_FLAG;
 +
 +              bnx2x_xgxs_deassert(params);
 +              /* set bmac loopback */
 +              bnx2x_emac_enable(params, vars, 1);
 +              bnx2x_emac_program(params, vars);
 +              REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 0);
 +}
 +
 +void bnx2x_init_xmac_loopback(struct link_params *params,
 +                            struct link_vars *vars)
 +{
 +      struct bnx2x *bp = params->bp;
 +      vars->link_up = 1;
 +      if (!params->req_line_speed[0])
 +              vars->line_speed = SPEED_10000;
 +      else
 +              vars->line_speed = params->req_line_speed[0];
 +      vars->duplex = DUPLEX_FULL;
 +      vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE;
 +      vars->mac_type = MAC_TYPE_XMAC;
 +      vars->phy_flags = PHY_XGXS_FLAG;
 +      /*
 +       * Set WC to loopback mode since link is required to provide clock
 +       * to the XMAC in 20G mode
 +       */
 +      bnx2x_set_aer_mmd(params, &params->phy[0]);
 +      bnx2x_warpcore_reset_lane(bp, &params->phy[0], 0);
 +      params->phy[INT_PHY].config_loopback(
 +                      &params->phy[INT_PHY],
 +                      params);
 +
 +      bnx2x_xmac_enable(params, vars, 1);
 +      REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 0);
 +}
 +
 +void bnx2x_init_umac_loopback(struct link_params *params,
 +                            struct link_vars *vars)
 +{
 +      struct bnx2x *bp = params->bp;
 +      vars->link_up = 1;
 +      vars->line_speed = SPEED_1000;
 +      vars->duplex = DUPLEX_FULL;
 +      vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE;
 +      vars->mac_type = MAC_TYPE_UMAC;
 +      vars->phy_flags = PHY_XGXS_FLAG;
 +      bnx2x_umac_enable(params, vars, 1);
 +
 +      REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 0);
 +}
 +
 +void bnx2x_init_xgxs_loopback(struct link_params *params,
 +                            struct link_vars *vars)
 +{
 +      struct bnx2x *bp = params->bp;
 +              vars->link_up = 1;
 +              vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE;
 +              vars->duplex = DUPLEX_FULL;
 +      if (params->req_line_speed[0] == SPEED_1000)
 +                      vars->line_speed = SPEED_1000;
 +      else
 +                      vars->line_speed = SPEED_10000;
 +
 +      if (!USES_WARPCORE(bp))
 +              bnx2x_xgxs_deassert(params);
 +      bnx2x_link_initialize(params, vars);
 +
 +      if (params->req_line_speed[0] == SPEED_1000) {
 +              if (USES_WARPCORE(bp))
 +                      bnx2x_umac_enable(params, vars, 0);
 +              else {
 +                      bnx2x_emac_program(params, vars);
 +                      bnx2x_emac_enable(params, vars, 0);
 +              }
 +      } else {
 +              if (USES_WARPCORE(bp))
 +                      bnx2x_xmac_enable(params, vars, 0);
 +              else
 +                      bnx2x_bmac_enable(params, vars, 0);
 +      }
 +
 +              if (params->loopback_mode == LOOPBACK_XGXS) {
 +                      /* set 10G XGXS loopback */
 +                      params->phy[INT_PHY].config_loopback(
 +                              &params->phy[INT_PHY],
 +                              params);
 +
 +              } else {
 +                      /* set external phy loopback */
 +                      u8 phy_index;
 +                      for (phy_index = EXT_PHY1;
 +                            phy_index < params->num_phys; phy_index++) {
 +                              if (params->phy[phy_index].config_loopback)
 +                                      params->phy[phy_index].config_loopback(
 +                                              &params->phy[phy_index],
 +                                              params);
 +                      }
 +              }
 +              REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 0);
 +
 +      bnx2x_set_led(params, vars, LED_MODE_OPER, vars->line_speed);
 +}
 +
 +int bnx2x_phy_init(struct link_params *params, struct link_vars *vars)
 +{
 +      struct bnx2x *bp = params->bp;
 +      DP(NETIF_MSG_LINK, "Phy Initialization started\n");
 +      DP(NETIF_MSG_LINK, "(1) req_speed %d, req_flowctrl %d\n",
 +                 params->req_line_speed[0], params->req_flow_ctrl[0]);
 +      DP(NETIF_MSG_LINK, "(2) req_speed %d, req_flowctrl %d\n",
 +                 params->req_line_speed[1], params->req_flow_ctrl[1]);
 +      vars->link_status = 0;
 +      vars->phy_link_up = 0;
 +      vars->link_up = 0;
 +      vars->line_speed = 0;
 +      vars->duplex = DUPLEX_FULL;
 +      vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE;
 +      vars->mac_type = MAC_TYPE_NONE;
 +      vars->phy_flags = 0;
 +
 +      /* disable attentions */
 +      bnx2x_bits_dis(bp, NIG_REG_MASK_INTERRUPT_PORT0 + params->port*4,
 +                     (NIG_MASK_XGXS0_LINK_STATUS |
 +                      NIG_MASK_XGXS0_LINK10G |
 +                      NIG_MASK_SERDES0_LINK_STATUS |
 +                      NIG_MASK_MI_INT));
 +
 +      bnx2x_emac_init(params, vars);
 +
 +      if (params->num_phys == 0) {
 +              DP(NETIF_MSG_LINK, "No phy found for initialization !!\n");
 +              return -EINVAL;
 +      }
 +      set_phy_vars(params, vars);
 +
 +      DP(NETIF_MSG_LINK, "Num of phys on board: %d\n", params->num_phys);
 +      switch (params->loopback_mode) {
 +      case LOOPBACK_BMAC:
 +              bnx2x_init_bmac_loopback(params, vars);
 +              break;
 +      case LOOPBACK_EMAC:
 +              bnx2x_init_emac_loopback(params, vars);
 +              break;
 +      case LOOPBACK_XMAC:
 +              bnx2x_init_xmac_loopback(params, vars);
 +              break;
 +      case LOOPBACK_UMAC:
 +              bnx2x_init_umac_loopback(params, vars);
 +              break;
 +      case LOOPBACK_XGXS:
 +      case LOOPBACK_EXT_PHY:
 +              bnx2x_init_xgxs_loopback(params, vars);
 +              break;
 +      default:
 +              if (!CHIP_IS_E3(bp)) {
 +                      if (params->switch_cfg == SWITCH_CFG_10G)
 +                              bnx2x_xgxs_deassert(params);
 +                      else
 +                              bnx2x_serdes_deassert(bp, params->port);
 +              }
 +              bnx2x_link_initialize(params, vars);
 +              msleep(30);
 +              bnx2x_link_int_enable(params);
 +              break;
 +      }
 +      return 0;
 +}
 +
 +int bnx2x_link_reset(struct link_params *params, struct link_vars *vars,
 +                   u8 reset_ext_phy)
 +{
 +      struct bnx2x *bp = params->bp;
 +      u8 phy_index, port = params->port, clear_latch_ind = 0;
 +      DP(NETIF_MSG_LINK, "Resetting the link of port %d\n", port);
 +      /* disable attentions */
 +      vars->link_status = 0;
 +      bnx2x_update_mng(params, vars->link_status);
 +      bnx2x_bits_dis(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4,
 +                     (NIG_MASK_XGXS0_LINK_STATUS |
 +                      NIG_MASK_XGXS0_LINK10G |
 +                      NIG_MASK_SERDES0_LINK_STATUS |
 +                      NIG_MASK_MI_INT));
 +
 +      /* activate nig drain */
 +      REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + port*4, 1);
 +
 +      /* disable nig egress interface */
 +      if (!CHIP_IS_E3(bp)) {
 +              REG_WR(bp, NIG_REG_BMAC0_OUT_EN + port*4, 0);
 +              REG_WR(bp, NIG_REG_EGRESS_EMAC0_OUT_EN + port*4, 0);
 +      }
 +
 +      /* Stop BigMac rx */
 +      if (!CHIP_IS_E3(bp))
 +              bnx2x_bmac_rx_disable(bp, port);
 +      else
 +              bnx2x_xmac_disable(params);
 +      /* disable emac */
 +      if (!CHIP_IS_E3(bp))
 +              REG_WR(bp, NIG_REG_NIG_EMAC0_EN + port*4, 0);
 +
 +      msleep(10);
 +      /* The PHY reset is controlled by GPIO 1
 +       * Hold it as vars low
 +       */
 +       /* clear link led */
 +      bnx2x_set_led(params, vars, LED_MODE_OFF, 0);
 +
 +      if (reset_ext_phy) {
 +              bnx2x_set_mdio_clk(bp, params->chip_id, port);
 +              for (phy_index = EXT_PHY1; phy_index < params->num_phys;
 +                    phy_index++) {
 +                      if (params->phy[phy_index].link_reset) {
 +                              bnx2x_set_aer_mmd(params,
 +                                                &params->phy[phy_index]);
 +                              params->phy[phy_index].link_reset(
 +                                      &params->phy[phy_index],
 +                                      params);
 +                      }
 +                      if (params->phy[phy_index].flags &
 +                          FLAGS_REARM_LATCH_SIGNAL)
 +                              clear_latch_ind = 1;
 +              }
 +      }
 +
 +      if (clear_latch_ind) {
 +              /* Clear latching indication */
 +              bnx2x_rearm_latch_signal(bp, port, 0);
 +              bnx2x_bits_dis(bp, NIG_REG_LATCH_BC_0 + port*4,
 +                             1 << NIG_LATCH_BC_ENABLE_MI_INT);
 +      }
 +      if (params->phy[INT_PHY].link_reset)
 +              params->phy[INT_PHY].link_reset(
 +                      &params->phy[INT_PHY], params);
 +      /* reset BigMac */
 +      REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
 +             (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
 +
 +      /* disable nig ingress interface */
 +      if (!CHIP_IS_E3(bp)) {
 +              REG_WR(bp, NIG_REG_BMAC0_IN_EN + port*4, 0);
 +              REG_WR(bp, NIG_REG_EMAC0_IN_EN + port*4, 0);
 +      }
 +      vars->link_up = 0;
 +      vars->phy_flags = 0;
 +      return 0;
 +}
 +
 +/****************************************************************************/
 +/*                            Common function                             */
 +/****************************************************************************/
 +static int bnx2x_8073_common_init_phy(struct bnx2x *bp,
 +                                    u32 shmem_base_path[],
 +                                    u32 shmem2_base_path[], u8 phy_index,
 +                                    u32 chip_id)
 +{
 +      struct bnx2x_phy phy[PORT_MAX];
 +      struct bnx2x_phy *phy_blk[PORT_MAX];
 +      u16 val;
 +      s8 port = 0;
 +      s8 port_of_path = 0;
 +      u32 swap_val, swap_override;
 +      swap_val = REG_RD(bp,  NIG_REG_PORT_SWAP);
 +      swap_override = REG_RD(bp,  NIG_REG_STRAP_OVERRIDE);
 +      port ^= (swap_val && swap_override);
 +      bnx2x_ext_phy_hw_reset(bp, port);
 +      /* PART1 - Reset both phys */
 +      for (port = PORT_MAX - 1; port >= PORT_0; port--) {
 +              u32 shmem_base, shmem2_base;
 +              /* In E2, same phy is using for port0 of the two paths */
 +              if (CHIP_IS_E1x(bp)) {
 +                      shmem_base = shmem_base_path[0];
 +                      shmem2_base = shmem2_base_path[0];
 +                      port_of_path = port;
 +              } else {
 +                      shmem_base = shmem_base_path[port];
 +                      shmem2_base = shmem2_base_path[port];
 +                      port_of_path = 0;
 +              }
 +
 +              /* Extract the ext phy address for the port */
 +              if (bnx2x_populate_phy(bp, phy_index, shmem_base, shmem2_base,
 +                                     port_of_path, &phy[port]) !=
 +                  0) {
 +                      DP(NETIF_MSG_LINK, "populate_phy failed\n");
 +                      return -EINVAL;
 +              }
 +              /* disable attentions */
 +              bnx2x_bits_dis(bp, NIG_REG_MASK_INTERRUPT_PORT0 +
 +                             port_of_path*4,
 +                             (NIG_MASK_XGXS0_LINK_STATUS |
 +                              NIG_MASK_XGXS0_LINK10G |
 +                              NIG_MASK_SERDES0_LINK_STATUS |
 +                              NIG_MASK_MI_INT));
 +
 +              /* Need to take the phy out of low power mode in order
 +                      to write to access its registers */
 +              bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
 +                             MISC_REGISTERS_GPIO_OUTPUT_HIGH,
 +                             port);
 +
 +              /* Reset the phy */
 +              bnx2x_cl45_write(bp, &phy[port],
 +                               MDIO_PMA_DEVAD,
 +                               MDIO_PMA_REG_CTRL,
 +                               1<<15);
 +      }
 +
 +      /* Add delay of 150ms after reset */
 +      msleep(150);
 +
 +      if (phy[PORT_0].addr & 0x1) {
 +              phy_blk[PORT_0] = &(phy[PORT_1]);
 +              phy_blk[PORT_1] = &(phy[PORT_0]);
 +      } else {
 +              phy_blk[PORT_0] = &(phy[PORT_0]);
 +              phy_blk[PORT_1] = &(phy[PORT_1]);
 +      }
 +
 +      /* PART2 - Download firmware to both phys */
 +      for (port = PORT_MAX - 1; port >= PORT_0; port--) {
 +              if (CHIP_IS_E1x(bp))
 +                      port_of_path = port;
 +              else
 +                      port_of_path = 0;
 +
 +              DP(NETIF_MSG_LINK, "Loading spirom for phy address 0x%x\n",
 +                         phy_blk[port]->addr);
 +              if (bnx2x_8073_8727_external_rom_boot(bp, phy_blk[port],
 +                                                    port_of_path))
 +                      return -EINVAL;
 +
 +              /* Only set bit 10 = 1 (Tx power down) */
 +              bnx2x_cl45_read(bp, phy_blk[port],
 +                              MDIO_PMA_DEVAD,
 +                              MDIO_PMA_REG_TX_POWER_DOWN, &val);
 +
 +              /* Phase1 of TX_POWER_DOWN reset */
 +              bnx2x_cl45_write(bp, phy_blk[port],
 +                               MDIO_PMA_DEVAD,
 +                               MDIO_PMA_REG_TX_POWER_DOWN,
 +                               (val | 1<<10));
 +      }
 +
 +      /*
 +       * Toggle Transmitter: Power down and then up with 600ms delay
 +       * between
 +       */
 +      msleep(600);
 +
 +      /* PART3 - complete TX_POWER_DOWN process, and set GPIO2 back to low */
 +      for (port = PORT_MAX - 1; port >= PORT_0; port--) {
 +              /* Phase2 of POWER_DOWN_RESET */
 +              /* Release bit 10 (Release Tx power down) */
 +              bnx2x_cl45_read(bp, phy_blk[port],
 +                              MDIO_PMA_DEVAD,
 +                              MDIO_PMA_REG_TX_POWER_DOWN, &val);
 +
 +              bnx2x_cl45_write(bp, phy_blk[port],
 +                              MDIO_PMA_DEVAD,
 +                              MDIO_PMA_REG_TX_POWER_DOWN, (val & (~(1<<10))));
 +              msleep(15);
 +
 +              /* Read modify write the SPI-ROM version select register */
 +              bnx2x_cl45_read(bp, phy_blk[port],
 +                              MDIO_PMA_DEVAD,
 +                              MDIO_PMA_REG_EDC_FFE_MAIN, &val);
 +              bnx2x_cl45_write(bp, phy_blk[port],
 +                               MDIO_PMA_DEVAD,
 +                               MDIO_PMA_REG_EDC_FFE_MAIN, (val | (1<<12)));
 +
 +              /* set GPIO2 back to LOW */
 +              bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
 +                             MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
 +      }
 +      return 0;
 +}
 +static int bnx2x_8726_common_init_phy(struct bnx2x *bp,
 +                                    u32 shmem_base_path[],
 +                                    u32 shmem2_base_path[], u8 phy_index,
 +                                    u32 chip_id)
 +{
 +      u32 val;
 +      s8 port;
 +      struct bnx2x_phy phy;
 +      /* Use port1 because of the static port-swap */
 +      /* Enable the module detection interrupt */
 +      val = REG_RD(bp, MISC_REG_GPIO_EVENT_EN);
 +      val |= ((1<<MISC_REGISTERS_GPIO_3)|
 +              (1<<(MISC_REGISTERS_GPIO_3 + MISC_REGISTERS_GPIO_PORT_SHIFT)));
 +      REG_WR(bp, MISC_REG_GPIO_EVENT_EN, val);
 +
 +      bnx2x_ext_phy_hw_reset(bp, 0);
 +      msleep(5);
 +      for (port = 0; port < PORT_MAX; port++) {
 +              u32 shmem_base, shmem2_base;
 +
 +              /* In E2, same phy is using for port0 of the two paths */
 +              if (CHIP_IS_E1x(bp)) {
 +                      shmem_base = shmem_base_path[0];
 +                      shmem2_base = shmem2_base_path[0];
 +              } else {
 +                      shmem_base = shmem_base_path[port];
 +                      shmem2_base = shmem2_base_path[port];
 +              }
 +              /* Extract the ext phy address for the port */
 +              if (bnx2x_populate_phy(bp, phy_index, shmem_base, shmem2_base,
 +                                     port, &phy) !=
 +                  0) {
 +                      DP(NETIF_MSG_LINK, "populate phy failed\n");
 +                      return -EINVAL;
 +              }
 +
 +              /* Reset phy*/
 +              bnx2x_cl45_write(bp, &phy,
 +                               MDIO_PMA_DEVAD, MDIO_PMA_REG_GEN_CTRL, 0x0001);
 +
 +
 +              /* Set fault module detected LED on */
 +              bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
 +                             MISC_REGISTERS_GPIO_HIGH,
 +                             port);
 +      }
 +
 +      return 0;
 +}
 +static void bnx2x_get_ext_phy_reset_gpio(struct bnx2x *bp, u32 shmem_base,
 +                                       u8 *io_gpio, u8 *io_port)
 +{
 +
 +      u32 phy_gpio_reset = REG_RD(bp, shmem_base +
 +                                        offsetof(struct shmem_region,
 +                              dev_info.port_hw_config[PORT_0].default_cfg));
 +      switch (phy_gpio_reset) {
 +      case PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO0_P0:
 +              *io_gpio = 0;
 +              *io_port = 0;
 +              break;
 +      case PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO1_P0:
 +              *io_gpio = 1;
 +              *io_port = 0;
 +              break;
 +      case PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO2_P0:
 +              *io_gpio = 2;
 +              *io_port = 0;
 +              break;
 +      case PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO3_P0:
 +              *io_gpio = 3;
 +              *io_port = 0;
 +              break;
 +      case PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO0_P1:
 +              *io_gpio = 0;
 +              *io_port = 1;
 +              break;
 +      case PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO1_P1:
 +              *io_gpio = 1;
 +              *io_port = 1;
 +              break;
 +      case PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO2_P1:
 +              *io_gpio = 2;
 +              *io_port = 1;
 +              break;
 +      case PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO3_P1:
 +              *io_gpio = 3;
 +              *io_port = 1;
 +              break;
 +      default:
 +              /* Don't override the io_gpio and io_port */
 +              break;
 +      }
 +}
 +
 +static int bnx2x_8727_common_init_phy(struct bnx2x *bp,
 +                                    u32 shmem_base_path[],
 +                                    u32 shmem2_base_path[], u8 phy_index,
 +                                    u32 chip_id)
 +{
 +      s8 port, reset_gpio;
 +      u32 swap_val, swap_override;
 +      struct bnx2x_phy phy[PORT_MAX];
 +      struct bnx2x_phy *phy_blk[PORT_MAX];
 +      s8 port_of_path;
 +      swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
 +      swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
 +
 +      reset_gpio = MISC_REGISTERS_GPIO_1;
 +      port = 1;
 +
 +      /*
 +       * Retrieve the reset gpio/port which control the reset.
 +       * Default is GPIO1, PORT1
 +       */
 +      bnx2x_get_ext_phy_reset_gpio(bp, shmem_base_path[0],
 +                                   (u8 *)&reset_gpio, (u8 *)&port);
 +
 +      /* Calculate the port based on port swap */
 +      port ^= (swap_val && swap_override);
 +
 +      /* Initiate PHY reset*/
 +      bnx2x_set_gpio(bp, reset_gpio, MISC_REGISTERS_GPIO_OUTPUT_LOW,
 +                     port);
 +      msleep(1);
 +      bnx2x_set_gpio(bp, reset_gpio, MISC_REGISTERS_GPIO_OUTPUT_HIGH,
 +                     port);
 +
 +      msleep(5);
 +
 +      /* PART1 - Reset both phys */
 +      for (port = PORT_MAX - 1; port >= PORT_0; port--) {
 +              u32 shmem_base, shmem2_base;
 +
 +              /* In E2, same phy is using for port0 of the two paths */
 +              if (CHIP_IS_E1x(bp)) {
 +                      shmem_base = shmem_base_path[0];
 +                      shmem2_base = shmem2_base_path[0];
 +                      port_of_path = port;
 +              } else {
 +                      shmem_base = shmem_base_path[port];
 +                      shmem2_base = shmem2_base_path[port];
 +                      port_of_path = 0;
 +              }
 +
 +              /* Extract the ext phy address for the port */
 +              if (bnx2x_populate_phy(bp, phy_index, shmem_base, shmem2_base,
 +                                     port_of_path, &phy[port]) !=
 +                                     0) {
 +                      DP(NETIF_MSG_LINK, "populate phy failed\n");
 +                      return -EINVAL;
 +              }
 +              /* disable attentions */
 +              bnx2x_bits_dis(bp, NIG_REG_MASK_INTERRUPT_PORT0 +
 +                             port_of_path*4,
 +                             (NIG_MASK_XGXS0_LINK_STATUS |
 +                              NIG_MASK_XGXS0_LINK10G |
 +                              NIG_MASK_SERDES0_LINK_STATUS |
 +                              NIG_MASK_MI_INT));
 +
 +
 +              /* Reset the phy */
 +              bnx2x_cl45_write(bp, &phy[port],
 +                               MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 1<<15);
 +      }
 +
 +      /* Add delay of 150ms after reset */
 +      msleep(150);
 +      if (phy[PORT_0].addr & 0x1) {
 +              phy_blk[PORT_0] = &(phy[PORT_1]);
 +              phy_blk[PORT_1] = &(phy[PORT_0]);
 +      } else {
 +              phy_blk[PORT_0] = &(phy[PORT_0]);
 +              phy_blk[PORT_1] = &(phy[PORT_1]);
 +      }
 +      /* PART2 - Download firmware to both phys */
 +      for (port = PORT_MAX - 1; port >= PORT_0; port--) {
 +              if (CHIP_IS_E1x(bp))
 +                      port_of_path = port;
 +              else
 +                      port_of_path = 0;
 +              DP(NETIF_MSG_LINK, "Loading spirom for phy address 0x%x\n",
 +                         phy_blk[port]->addr);
 +              if (bnx2x_8073_8727_external_rom_boot(bp, phy_blk[port],
 +                                                    port_of_path))
 +                      return -EINVAL;
 +              /* Disable PHY transmitter output */
 +              bnx2x_cl45_write(bp, phy_blk[port],
 +                               MDIO_PMA_DEVAD,
 +                               MDIO_PMA_REG_TX_DISABLE, 1);
 +
 +      }
 +      return 0;
 +}
 +
 +static int bnx2x_ext_phy_common_init(struct bnx2x *bp, u32 shmem_base_path[],
 +                                   u32 shmem2_base_path[], u8 phy_index,
 +                                   u32 ext_phy_type, u32 chip_id)
 +{
 +      int rc = 0;
 +
 +      switch (ext_phy_type) {
 +      case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
 +              rc = bnx2x_8073_common_init_phy(bp, shmem_base_path,
 +                                              shmem2_base_path,
 +                                              phy_index, chip_id);
 +              break;
 +      case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8722:
 +      case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
 +      case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727_NOC:
 +              rc = bnx2x_8727_common_init_phy(bp, shmem_base_path,
 +                                              shmem2_base_path,
 +                                              phy_index, chip_id);
 +              break;
 +
 +      case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
 +              /*
 +               * GPIO1 affects both ports, so there's need to pull
 +               * it for single port alone
 +               */
 +              rc = bnx2x_8726_common_init_phy(bp, shmem_base_path,
 +                                              shmem2_base_path,
 +                                              phy_index, chip_id);
 +              break;
 +      case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833:
 +              /*
 +               * GPIO3's are linked, and so both need to be toggled
 +               * to obtain required 2us pulse.
 +               */
 +              rc = bnx2x_84833_common_init_phy(bp, shmem_base_path, chip_id);
 +              break;
 +      case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
 +              rc = -EINVAL;
 +              break;
 +      default:
 +              DP(NETIF_MSG_LINK,
 +                         "ext_phy 0x%x common init not required\n",
 +                         ext_phy_type);
 +              break;
 +      }
 +
 +      if (rc != 0)
 +              netdev_err(bp->dev,  "Warning: PHY was not initialized,"
 +                                    " Port %d\n",
 +                       0);
 +      return rc;
 +}
 +
 +int bnx2x_common_init_phy(struct bnx2x *bp, u32 shmem_base_path[],
 +                        u32 shmem2_base_path[], u32 chip_id)
 +{
 +      int rc = 0;
 +      u32 phy_ver, val;
 +      u8 phy_index = 0;
 +      u32 ext_phy_type, ext_phy_config;
 +      bnx2x_set_mdio_clk(bp, chip_id, PORT_0);
 +      bnx2x_set_mdio_clk(bp, chip_id, PORT_1);
 +      DP(NETIF_MSG_LINK, "Begin common phy init\n");
 +      if (CHIP_IS_E3(bp)) {
 +              /* Enable EPIO */
 +              val = REG_RD(bp, MISC_REG_GEN_PURP_HWG);
 +              REG_WR(bp, MISC_REG_GEN_PURP_HWG, val | 1);
 +      }
 +      /* Check if common init was already done */
 +      phy_ver = REG_RD(bp, shmem_base_path[0] +
 +                       offsetof(struct shmem_region,
 +                                port_mb[PORT_0].ext_phy_fw_version));
 +      if (phy_ver) {
 +              DP(NETIF_MSG_LINK, "Not doing common init; phy ver is 0x%x\n",
 +                             phy_ver);
 +              return 0;
 +      }
 +
 +      /* Read the ext_phy_type for arbitrary port(0) */
 +      for (phy_index = EXT_PHY1; phy_index < MAX_PHYS;
 +            phy_index++) {
 +              ext_phy_config = bnx2x_get_ext_phy_config(bp,
 +                                                        shmem_base_path[0],
 +                                                        phy_index, 0);
 +              ext_phy_type = XGXS_EXT_PHY_TYPE(ext_phy_config);
 +              rc |= bnx2x_ext_phy_common_init(bp, shmem_base_path,
 +                                              shmem2_base_path,
 +                                              phy_index, ext_phy_type,
 +                                              chip_id);
 +      }
 +      return rc;
 +}
 +
 +static void bnx2x_check_over_curr(struct link_params *params,
 +                                struct link_vars *vars)
 +{
 +      struct bnx2x *bp = params->bp;
 +      u32 cfg_pin;
 +      u8 port = params->port;
 +      u32 pin_val;
 +
 +      cfg_pin = (REG_RD(bp, params->shmem_base +
 +                        offsetof(struct shmem_region,
 +                             dev_info.port_hw_config[port].e3_cmn_pin_cfg1)) &
 +                 PORT_HW_CFG_E3_OVER_CURRENT_MASK) >>
 +              PORT_HW_CFG_E3_OVER_CURRENT_SHIFT;
 +
 +      /* Ignore check if no external input PIN available */
 +      if (bnx2x_get_cfg_pin(bp, cfg_pin, &pin_val) != 0)
 +              return;
 +
 +      if (!pin_val) {
 +              if ((vars->phy_flags & PHY_OVER_CURRENT_FLAG) == 0) {
 +                      netdev_err(bp->dev, "Error:  Power fault on Port %d has"
 +                                          " been detected and the power to "
 +                                          "that SFP+ module has been removed"
 +                                          " to prevent failure of the card."
 +                                          " Please remove the SFP+ module and"
 +                                          " restart the system to clear this"
 +                                          " error.\n",
 +                       params->port);
 +                      vars->phy_flags |= PHY_OVER_CURRENT_FLAG;
 +              }
 +      } else
 +              vars->phy_flags &= ~PHY_OVER_CURRENT_FLAG;
 +}
 +
 +static void bnx2x_analyze_link_error(struct link_params *params,
 +                                   struct link_vars *vars, u32 lss_status)
 +{
 +      struct bnx2x *bp = params->bp;
 +      /* Compare new value with previous value */
 +      u8 led_mode;
 +      u32 half_open_conn = (vars->phy_flags & PHY_HALF_OPEN_CONN_FLAG) > 0;
 +
 +      if ((lss_status ^ half_open_conn) == 0)
 +              return;
 +
 +      /* If values differ */
 +      DP(NETIF_MSG_LINK, "Link changed:%x %x->%x\n", vars->link_up,
 +                     half_open_conn, lss_status);
 +
 +      /*
 +       * a. Update shmem->link_status accordingly
 +       * b. Update link_vars->link_up
 +       */
 +      if (lss_status) {
 +              DP(NETIF_MSG_LINK, "Remote Fault detected !!!\n");
 +              vars->link_status &= ~LINK_STATUS_LINK_UP;
 +              vars->link_up = 0;
 +              vars->phy_flags |= PHY_HALF_OPEN_CONN_FLAG;
 +              /*
 +               * Set LED mode to off since the PHY doesn't know about these
 +               * errors
 +               */
 +              led_mode = LED_MODE_OFF;
 +      } else {
 +              DP(NETIF_MSG_LINK, "Remote Fault cleared\n");
 +              vars->link_status |= LINK_STATUS_LINK_UP;
 +              vars->link_up = 1;
 +              vars->phy_flags &= ~PHY_HALF_OPEN_CONN_FLAG;
 +              led_mode = LED_MODE_OPER;
 +      }
 +      /* Update the LED according to the link state */
 +      bnx2x_set_led(params, vars, led_mode, SPEED_10000);
 +
 +      /* Update link status in the shared memory */
 +      bnx2x_update_mng(params, vars->link_status);
 +
 +      /* C. Trigger General Attention */
 +      vars->periodic_flags |= PERIODIC_FLAGS_LINK_EVENT;
 +      bnx2x_notify_link_changed(bp);
 +}
 +
 +/******************************************************************************
 +* Description:
 +*     This function checks for half opened connection change indication.
 +*     When such change occurs, it calls the bnx2x_analyze_link_error
 +*     to check if Remote Fault is set or cleared. Reception of remote fault
 +*     status message in the MAC indicates that the peer's MAC has detected
 +*     a fault, for example, due to break in the TX side of fiber.
 +*
 +******************************************************************************/
 +static void bnx2x_check_half_open_conn(struct link_params *params,
 +                                     struct link_vars *vars)
 +{
 +      struct bnx2x *bp = params->bp;
 +      u32 lss_status = 0;
 +      u32 mac_base;
 +      /* In case link status is physically up @ 10G do */
 +      if ((vars->phy_flags & PHY_PHYSICAL_LINK_FLAG) == 0)
 +              return;
 +
 +      if (CHIP_IS_E3(bp) &&
 +          (REG_RD(bp, MISC_REG_RESET_REG_2) &
 +            (MISC_REGISTERS_RESET_REG_2_XMAC))) {
 +              /* Check E3 XMAC */
 +              /*
 +               * Note that link speed cannot be queried here, since it may be
 +               * zero while link is down. In case UMAC is active, LSS will
 +               * simply not be set
 +               */
 +              mac_base = (params->port) ? GRCBASE_XMAC1 : GRCBASE_XMAC0;
 +
 +              /* Clear stick bits (Requires rising edge) */
 +              REG_WR(bp, mac_base + XMAC_REG_CLEAR_RX_LSS_STATUS, 0);
 +              REG_WR(bp, mac_base + XMAC_REG_CLEAR_RX_LSS_STATUS,
 +                     XMAC_CLEAR_RX_LSS_STATUS_REG_CLEAR_LOCAL_FAULT_STATUS |
 +                     XMAC_CLEAR_RX_LSS_STATUS_REG_CLEAR_REMOTE_FAULT_STATUS);
 +              if (REG_RD(bp, mac_base + XMAC_REG_RX_LSS_STATUS))
 +                      lss_status = 1;
 +
 +              bnx2x_analyze_link_error(params, vars, lss_status);
 +      } else if (REG_RD(bp, MISC_REG_RESET_REG_2) &
 +                 (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << params->port)) {
 +              /* Check E1X / E2 BMAC */
 +              u32 lss_status_reg;
 +              u32 wb_data[2];
 +              mac_base = params->port ? NIG_REG_INGRESS_BMAC1_MEM :
 +                      NIG_REG_INGRESS_BMAC0_MEM;
 +              /*  Read BIGMAC_REGISTER_RX_LSS_STATUS */
 +              if (CHIP_IS_E2(bp))
 +                      lss_status_reg = BIGMAC2_REGISTER_RX_LSS_STAT;
 +              else
 +                      lss_status_reg = BIGMAC_REGISTER_RX_LSS_STATUS;
 +
 +              REG_RD_DMAE(bp, mac_base + lss_status_reg, wb_data, 2);
 +              lss_status = (wb_data[0] > 0);
 +
 +              bnx2x_analyze_link_error(params, vars, lss_status);
 +      }
 +}
 +
 +void bnx2x_period_func(struct link_params *params, struct link_vars *vars)
 +{
 +      struct bnx2x *bp = params->bp;
 +      u16 phy_idx;
 +      if (!params) {
 +              DP(NETIF_MSG_LINK, "Uninitialized params !\n");
 +              return;
 +      }
 +
 +      for (phy_idx = INT_PHY; phy_idx < MAX_PHYS; phy_idx++) {
 +              if (params->phy[phy_idx].flags & FLAGS_TX_ERROR_CHECK) {
 +                      bnx2x_set_aer_mmd(params, &params->phy[phy_idx]);
 +                      bnx2x_check_half_open_conn(params, vars);
 +                      break;
 +              }
 +      }
 +
 +      if (CHIP_IS_E3(bp))
 +              bnx2x_check_over_curr(params, vars);
 +}
 +
 +u8 bnx2x_hw_lock_required(struct bnx2x *bp, u32 shmem_base, u32 shmem2_base)
 +{
 +      u8 phy_index;
 +      struct bnx2x_phy phy;
 +      for (phy_index = INT_PHY; phy_index < MAX_PHYS;
 +            phy_index++) {
 +              if (bnx2x_populate_phy(bp, phy_index, shmem_base, shmem2_base,
 +                                     0, &phy) != 0) {
 +                      DP(NETIF_MSG_LINK, "populate phy failed\n");
 +                      return 0;
 +              }
 +
 +              if (phy.flags & FLAGS_HW_LOCK_REQUIRED)
 +                      return 1;
 +      }
 +      return 0;
 +}
 +
 +u8 bnx2x_fan_failure_det_req(struct bnx2x *bp,
 +                           u32 shmem_base,
 +                           u32 shmem2_base,
 +                           u8 port)
 +{
 +      u8 phy_index, fan_failure_det_req = 0;
 +      struct bnx2x_phy phy;
 +      for (phy_index = EXT_PHY1; phy_index < MAX_PHYS;
 +            phy_index++) {
 +              if (bnx2x_populate_phy(bp, phy_index, shmem_base, shmem2_base,
 +                                     port, &phy)
 +                  != 0) {
 +                      DP(NETIF_MSG_LINK, "populate phy failed\n");
 +                      return 0;
 +              }
 +              fan_failure_det_req |= (phy.flags &
 +                                      FLAGS_FAN_FAILURE_DET_REQ);
 +      }
 +      return fan_failure_det_req;
 +}
 +
 +void bnx2x_hw_reset_phy(struct link_params *params)
 +{
 +      u8 phy_index;
 +      struct bnx2x *bp = params->bp;
 +      bnx2x_update_mng(params, 0);
 +      bnx2x_bits_dis(bp, NIG_REG_MASK_INTERRUPT_PORT0 + params->port*4,
 +                     (NIG_MASK_XGXS0_LINK_STATUS |
 +                      NIG_MASK_XGXS0_LINK10G |
 +                      NIG_MASK_SERDES0_LINK_STATUS |
 +                      NIG_MASK_MI_INT));
 +
 +      for (phy_index = INT_PHY; phy_index < MAX_PHYS;
 +            phy_index++) {
 +              if (params->phy[phy_index].hw_reset) {
 +                      params->phy[phy_index].hw_reset(
 +                              &params->phy[phy_index],
 +                              params);
 +                      params->phy[phy_index] = phy_null;
 +              }
 +      }
 +}
 +
 +void bnx2x_init_mod_abs_int(struct bnx2x *bp, struct link_vars *vars,
 +                          u32 chip_id, u32 shmem_base, u32 shmem2_base,
 +                          u8 port)
 +{
 +      u8 gpio_num = 0xff, gpio_port = 0xff, phy_index;
 +      u32 val;
 +      u32 offset, aeu_mask, swap_val, swap_override, sync_offset;
 +      if (CHIP_IS_E3(bp)) {
 +              if (bnx2x_get_mod_abs_int_cfg(bp, chip_id,
 +                                            shmem_base,
 +                                            port,
 +                                            &gpio_num,
 +                                            &gpio_port) != 0)
 +                      return;
 +      } else {
 +              struct bnx2x_phy phy;
 +              for (phy_index = EXT_PHY1; phy_index < MAX_PHYS;
 +                    phy_index++) {
 +                      if (bnx2x_populate_phy(bp, phy_index, shmem_base,
 +                                             shmem2_base, port, &phy)
 +                          != 0) {
 +                              DP(NETIF_MSG_LINK, "populate phy failed\n");
 +                              return;
 +                      }
 +                      if (phy.type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726) {
 +                              gpio_num = MISC_REGISTERS_GPIO_3;
 +                              gpio_port = port;
 +                              break;
 +                      }
 +              }
 +      }
 +
 +      if (gpio_num == 0xff)
 +              return;
 +
 +      /* Set GPIO3 to trigger SFP+ module insertion/removal */
 +      bnx2x_set_gpio(bp, gpio_num, MISC_REGISTERS_GPIO_INPUT_HI_Z, gpio_port);
 +
 +      swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
 +      swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
 +      gpio_port ^= (swap_val && swap_override);
 +
 +      vars->aeu_int_mask = AEU_INPUTS_ATTN_BITS_GPIO0_FUNCTION_0 <<
 +              (gpio_num + (gpio_port << 2));
 +
 +      sync_offset = shmem_base +
 +              offsetof(struct shmem_region,
 +                       dev_info.port_hw_config[port].aeu_int_mask);
 +      REG_WR(bp, sync_offset, vars->aeu_int_mask);
 +
 +      DP(NETIF_MSG_LINK, "Setting MOD_ABS (GPIO%d_P%d) AEU to 0x%x\n",
 +                     gpio_num, gpio_port, vars->aeu_int_mask);
 +
 +      if (port == 0)
 +              offset = MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0;
 +      else
 +              offset = MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0;
 +
 +      /* Open appropriate AEU for interrupts */
 +      aeu_mask = REG_RD(bp, offset);
 +      aeu_mask |= vars->aeu_int_mask;
 +      REG_WR(bp, offset, aeu_mask);
 +
 +      /* Enable the GPIO to trigger interrupt */
 +      val = REG_RD(bp, MISC_REG_GPIO_EVENT_EN);
 +      val |= 1 << (gpio_num + (gpio_port << 2));
 +      REG_WR(bp, MISC_REG_GPIO_EVENT_EN, val);
 +}
index 85dd294,0000000..621ab28
mode 100644,000000..100644
--- /dev/null
@@@ -1,11541 -1,0 +1,11609 @@@
-       opcode |= ((BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT) |
-                  (BP_E1HVN(bp) << DMAE_COMMAND_DST_VN_SHIFT));
 +/* bnx2x_main.c: Broadcom Everest network driver.
 + *
 + * Copyright (c) 2007-2011 Broadcom Corporation
 + *
 + * This program is free software; you can redistribute it and/or modify
 + * it under the terms of the GNU General Public License as published by
 + * the Free Software Foundation.
 + *
 + * Maintained by: Eilon Greenstein <eilong@broadcom.com>
 + * Written by: Eliezer Tamir
 + * Based on code from Michael Chan's bnx2 driver
 + * UDP CSUM errata workaround by Arik Gendelman
 + * Slowpath and fastpath rework by Vladislav Zolotarov
 + * Statistics and Link management by Yitchak Gertner
 + *
 + */
 +
 +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 +
 +#include <linux/module.h>
 +#include <linux/moduleparam.h>
 +#include <linux/kernel.h>
 +#include <linux/device.h>  /* for dev_info() */
 +#include <linux/timer.h>
 +#include <linux/errno.h>
 +#include <linux/ioport.h>
 +#include <linux/slab.h>
 +#include <linux/interrupt.h>
 +#include <linux/pci.h>
 +#include <linux/init.h>
 +#include <linux/netdevice.h>
 +#include <linux/etherdevice.h>
 +#include <linux/skbuff.h>
 +#include <linux/dma-mapping.h>
 +#include <linux/bitops.h>
 +#include <linux/irq.h>
 +#include <linux/delay.h>
 +#include <asm/byteorder.h>
 +#include <linux/time.h>
 +#include <linux/ethtool.h>
 +#include <linux/mii.h>
 +#include <linux/if.h>
 +#include <linux/if_vlan.h>
 +#include <net/ip.h>
 +#include <net/ipv6.h>
 +#include <net/tcp.h>
 +#include <net/checksum.h>
 +#include <net/ip6_checksum.h>
 +#include <linux/workqueue.h>
 +#include <linux/crc32.h>
 +#include <linux/crc32c.h>
 +#include <linux/prefetch.h>
 +#include <linux/zlib.h>
 +#include <linux/io.h>
 +#include <linux/stringify.h>
 +#include <linux/vmalloc.h>
 +
 +#include "bnx2x.h"
 +#include "bnx2x_init.h"
 +#include "bnx2x_init_ops.h"
 +#include "bnx2x_cmn.h"
 +#include "bnx2x_dcb.h"
 +#include "bnx2x_sp.h"
 +
 +#include <linux/firmware.h>
 +#include "bnx2x_fw_file_hdr.h"
 +/* FW files */
 +#define FW_FILE_VERSION                                       \
 +      __stringify(BCM_5710_FW_MAJOR_VERSION) "."      \
 +      __stringify(BCM_5710_FW_MINOR_VERSION) "."      \
 +      __stringify(BCM_5710_FW_REVISION_VERSION) "."   \
 +      __stringify(BCM_5710_FW_ENGINEERING_VERSION)
 +#define FW_FILE_NAME_E1               "bnx2x/bnx2x-e1-" FW_FILE_VERSION ".fw"
 +#define FW_FILE_NAME_E1H      "bnx2x/bnx2x-e1h-" FW_FILE_VERSION ".fw"
 +#define FW_FILE_NAME_E2               "bnx2x/bnx2x-e2-" FW_FILE_VERSION ".fw"
 +
 +/* Time in jiffies before concluding the transmitter is hung */
 +#define TX_TIMEOUT            (5*HZ)
 +
 +static char version[] __devinitdata =
 +      "Broadcom NetXtreme II 5771x/578xx 10/20-Gigabit Ethernet Driver "
 +      DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
 +
 +MODULE_AUTHOR("Eliezer Tamir");
 +MODULE_DESCRIPTION("Broadcom NetXtreme II "
 +                 "BCM57710/57711/57711E/"
 +                 "57712/57712_MF/57800/57800_MF/57810/57810_MF/"
 +                 "57840/57840_MF Driver");
 +MODULE_LICENSE("GPL");
 +MODULE_VERSION(DRV_MODULE_VERSION);
 +MODULE_FIRMWARE(FW_FILE_NAME_E1);
 +MODULE_FIRMWARE(FW_FILE_NAME_E1H);
 +MODULE_FIRMWARE(FW_FILE_NAME_E2);
 +
 +static int multi_mode = 1;
 +module_param(multi_mode, int, 0);
 +MODULE_PARM_DESC(multi_mode, " Multi queue mode "
 +                           "(0 Disable; 1 Enable (default))");
 +
 +int num_queues;
 +module_param(num_queues, int, 0);
 +MODULE_PARM_DESC(num_queues, " Number of queues for multi_mode=1"
 +                              " (default is as a number of CPUs)");
 +
 +static int disable_tpa;
 +module_param(disable_tpa, int, 0);
 +MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature");
 +
 +#define INT_MODE_INTx                 1
 +#define INT_MODE_MSI                  2
 +static int int_mode;
 +module_param(int_mode, int, 0);
 +MODULE_PARM_DESC(int_mode, " Force interrupt mode other than MSI-X "
 +                              "(1 INT#x; 2 MSI)");
 +
 +static int dropless_fc;
 +module_param(dropless_fc, int, 0);
 +MODULE_PARM_DESC(dropless_fc, " Pause on exhausted host ring");
 +
 +static int poll;
 +module_param(poll, int, 0);
 +MODULE_PARM_DESC(poll, " Use polling (for debug)");
 +
 +static int mrrs = -1;
 +module_param(mrrs, int, 0);
 +MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)");
 +
 +static int debug;
 +module_param(debug, int, 0);
 +MODULE_PARM_DESC(debug, " Default debug msglevel");
 +
 +
 +
 +struct workqueue_struct *bnx2x_wq;
 +
 +enum bnx2x_board_type {
 +      BCM57710 = 0,
 +      BCM57711,
 +      BCM57711E,
 +      BCM57712,
 +      BCM57712_MF,
 +      BCM57800,
 +      BCM57800_MF,
 +      BCM57810,
 +      BCM57810_MF,
 +      BCM57840,
 +      BCM57840_MF
 +};
 +
 +/* indexed by board_type, above */
 +static struct {
 +      char *name;
 +} board_info[] __devinitdata = {
 +      { "Broadcom NetXtreme II BCM57710 10 Gigabit PCIe [Everest]" },
 +      { "Broadcom NetXtreme II BCM57711 10 Gigabit PCIe" },
 +      { "Broadcom NetXtreme II BCM57711E 10 Gigabit PCIe" },
 +      { "Broadcom NetXtreme II BCM57712 10 Gigabit Ethernet" },
 +      { "Broadcom NetXtreme II BCM57712 10 Gigabit Ethernet Multi Function" },
 +      { "Broadcom NetXtreme II BCM57800 10 Gigabit Ethernet" },
 +      { "Broadcom NetXtreme II BCM57800 10 Gigabit Ethernet Multi Function" },
 +      { "Broadcom NetXtreme II BCM57810 10 Gigabit Ethernet" },
 +      { "Broadcom NetXtreme II BCM57810 10 Gigabit Ethernet Multi Function" },
 +      { "Broadcom NetXtreme II BCM57840 10/20 Gigabit Ethernet" },
 +      { "Broadcom NetXtreme II BCM57840 10/20 Gigabit "
 +                                              "Ethernet Multi Function"}
 +};
 +
 +#ifndef PCI_DEVICE_ID_NX2_57710
 +#define PCI_DEVICE_ID_NX2_57710               CHIP_NUM_57710
 +#endif
 +#ifndef PCI_DEVICE_ID_NX2_57711
 +#define PCI_DEVICE_ID_NX2_57711               CHIP_NUM_57711
 +#endif
 +#ifndef PCI_DEVICE_ID_NX2_57711E
 +#define PCI_DEVICE_ID_NX2_57711E      CHIP_NUM_57711E
 +#endif
 +#ifndef PCI_DEVICE_ID_NX2_57712
 +#define PCI_DEVICE_ID_NX2_57712               CHIP_NUM_57712
 +#endif
 +#ifndef PCI_DEVICE_ID_NX2_57712_MF
 +#define PCI_DEVICE_ID_NX2_57712_MF    CHIP_NUM_57712_MF
 +#endif
 +#ifndef PCI_DEVICE_ID_NX2_57800
 +#define PCI_DEVICE_ID_NX2_57800               CHIP_NUM_57800
 +#endif
 +#ifndef PCI_DEVICE_ID_NX2_57800_MF
 +#define PCI_DEVICE_ID_NX2_57800_MF    CHIP_NUM_57800_MF
 +#endif
 +#ifndef PCI_DEVICE_ID_NX2_57810
 +#define PCI_DEVICE_ID_NX2_57810               CHIP_NUM_57810
 +#endif
 +#ifndef PCI_DEVICE_ID_NX2_57810_MF
 +#define PCI_DEVICE_ID_NX2_57810_MF    CHIP_NUM_57810_MF
 +#endif
 +#ifndef PCI_DEVICE_ID_NX2_57840
 +#define PCI_DEVICE_ID_NX2_57840               CHIP_NUM_57840
 +#endif
 +#ifndef PCI_DEVICE_ID_NX2_57840_MF
 +#define PCI_DEVICE_ID_NX2_57840_MF    CHIP_NUM_57840_MF
 +#endif
 +static DEFINE_PCI_DEVICE_TABLE(bnx2x_pci_tbl) = {
 +      { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57710), BCM57710 },
 +      { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711), BCM57711 },
 +      { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711E), BCM57711E },
 +      { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57712), BCM57712 },
 +      { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57712_MF), BCM57712_MF },
 +      { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57800), BCM57800 },
 +      { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57800_MF), BCM57800_MF },
 +      { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57810), BCM57810 },
 +      { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57810_MF), BCM57810_MF },
 +      { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840), BCM57840 },
 +      { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_MF), BCM57840_MF },
 +      { 0 }
 +};
 +
 +MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
 +
 +/****************************************************************************
 +* General service functions
 +****************************************************************************/
 +
 +static inline void __storm_memset_dma_mapping(struct bnx2x *bp,
 +                                     u32 addr, dma_addr_t mapping)
 +{
 +      REG_WR(bp,  addr, U64_LO(mapping));
 +      REG_WR(bp,  addr + 4, U64_HI(mapping));
 +}
 +
 +static inline void storm_memset_spq_addr(struct bnx2x *bp,
 +                                       dma_addr_t mapping, u16 abs_fid)
 +{
 +      u32 addr = XSEM_REG_FAST_MEMORY +
 +                      XSTORM_SPQ_PAGE_BASE_OFFSET(abs_fid);
 +
 +      __storm_memset_dma_mapping(bp, addr, mapping);
 +}
 +
 +static inline void storm_memset_vf_to_pf(struct bnx2x *bp, u16 abs_fid,
 +                                       u16 pf_id)
 +{
 +      REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_VF_TO_PF_OFFSET(abs_fid),
 +              pf_id);
 +      REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_VF_TO_PF_OFFSET(abs_fid),
 +              pf_id);
 +      REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_VF_TO_PF_OFFSET(abs_fid),
 +              pf_id);
 +      REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_VF_TO_PF_OFFSET(abs_fid),
 +              pf_id);
 +}
 +
 +static inline void storm_memset_func_en(struct bnx2x *bp, u16 abs_fid,
 +                                      u8 enable)
 +{
 +      REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(abs_fid),
 +              enable);
 +      REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(abs_fid),
 +              enable);
 +      REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(abs_fid),
 +              enable);
 +      REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(abs_fid),
 +              enable);
 +}
 +
 +static inline void storm_memset_eq_data(struct bnx2x *bp,
 +                              struct event_ring_data *eq_data,
 +                              u16 pfid)
 +{
 +      size_t size = sizeof(struct event_ring_data);
 +
 +      u32 addr = BAR_CSTRORM_INTMEM + CSTORM_EVENT_RING_DATA_OFFSET(pfid);
 +
 +      __storm_memset_struct(bp, addr, size, (u32 *)eq_data);
 +}
 +
 +static inline void storm_memset_eq_prod(struct bnx2x *bp, u16 eq_prod,
 +                                      u16 pfid)
 +{
 +      u32 addr = BAR_CSTRORM_INTMEM + CSTORM_EVENT_RING_PROD_OFFSET(pfid);
 +      REG_WR16(bp, addr, eq_prod);
 +}
 +
 +/* used only at init
 + * locking is done by mcp
 + */
 +static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
 +{
 +      pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
 +      pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
 +      pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
 +                             PCICFG_VENDOR_ID_OFFSET);
 +}
 +
 +static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
 +{
 +      u32 val;
 +
 +      pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
 +      pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
 +      pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
 +                             PCICFG_VENDOR_ID_OFFSET);
 +
 +      return val;
 +}
 +
 +#define DMAE_DP_SRC_GRC               "grc src_addr [%08x]"
 +#define DMAE_DP_SRC_PCI               "pci src_addr [%x:%08x]"
 +#define DMAE_DP_DST_GRC               "grc dst_addr [%08x]"
 +#define DMAE_DP_DST_PCI               "pci dst_addr [%x:%08x]"
 +#define DMAE_DP_DST_NONE      "dst_addr [none]"
 +
 +static void bnx2x_dp_dmae(struct bnx2x *bp, struct dmae_command *dmae,
 +                        int msglvl)
 +{
 +      u32 src_type = dmae->opcode & DMAE_COMMAND_SRC;
 +
 +      switch (dmae->opcode & DMAE_COMMAND_DST) {
 +      case DMAE_CMD_DST_PCI:
 +              if (src_type == DMAE_CMD_SRC_PCI)
 +                      DP(msglvl, "DMAE: opcode 0x%08x\n"
 +                         "src [%x:%08x], len [%d*4], dst [%x:%08x]\n"
 +                         "comp_addr [%x:%08x], comp_val 0x%08x\n",
 +                         dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
 +                         dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo,
 +                         dmae->comp_addr_hi, dmae->comp_addr_lo,
 +                         dmae->comp_val);
 +              else
 +                      DP(msglvl, "DMAE: opcode 0x%08x\n"
 +                         "src [%08x], len [%d*4], dst [%x:%08x]\n"
 +                         "comp_addr [%x:%08x], comp_val 0x%08x\n",
 +                         dmae->opcode, dmae->src_addr_lo >> 2,
 +                         dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo,
 +                         dmae->comp_addr_hi, dmae->comp_addr_lo,
 +                         dmae->comp_val);
 +              break;
 +      case DMAE_CMD_DST_GRC:
 +              if (src_type == DMAE_CMD_SRC_PCI)
 +                      DP(msglvl, "DMAE: opcode 0x%08x\n"
 +                         "src [%x:%08x], len [%d*4], dst_addr [%08x]\n"
 +                         "comp_addr [%x:%08x], comp_val 0x%08x\n",
 +                         dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
 +                         dmae->len, dmae->dst_addr_lo >> 2,
 +                         dmae->comp_addr_hi, dmae->comp_addr_lo,
 +                         dmae->comp_val);
 +              else
 +                      DP(msglvl, "DMAE: opcode 0x%08x\n"
 +                         "src [%08x], len [%d*4], dst [%08x]\n"
 +                         "comp_addr [%x:%08x], comp_val 0x%08x\n",
 +                         dmae->opcode, dmae->src_addr_lo >> 2,
 +                         dmae->len, dmae->dst_addr_lo >> 2,
 +                         dmae->comp_addr_hi, dmae->comp_addr_lo,
 +                         dmae->comp_val);
 +              break;
 +      default:
 +              if (src_type == DMAE_CMD_SRC_PCI)
 +                      DP(msglvl, "DMAE: opcode 0x%08x\n"
 +                         "src_addr [%x:%08x]  len [%d * 4]  dst_addr [none]\n"
 +                         "comp_addr [%x:%08x]  comp_val 0x%08x\n",
 +                         dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
 +                         dmae->len, dmae->comp_addr_hi, dmae->comp_addr_lo,
 +                         dmae->comp_val);
 +              else
 +                      DP(msglvl, "DMAE: opcode 0x%08x\n"
 +                         "src_addr [%08x]  len [%d * 4]  dst_addr [none]\n"
 +                         "comp_addr [%x:%08x]  comp_val 0x%08x\n",
 +                         dmae->opcode, dmae->src_addr_lo >> 2,
 +                         dmae->len, dmae->comp_addr_hi, dmae->comp_addr_lo,
 +                         dmae->comp_val);
 +              break;
 +      }
 +
 +}
 +
 +/* copy command into DMAE command memory and set DMAE command go */
 +void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae, int idx)
 +{
 +      u32 cmd_offset;
 +      int i;
 +
 +      cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
 +      for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
 +              REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
 +
 +              DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
 +                 idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
 +      }
 +      REG_WR(bp, dmae_reg_go_c[idx], 1);
 +}
 +
 +u32 bnx2x_dmae_opcode_add_comp(u32 opcode, u8 comp_type)
 +{
 +      return opcode | ((comp_type << DMAE_COMMAND_C_DST_SHIFT) |
 +                         DMAE_CMD_C_ENABLE);
 +}
 +
 +u32 bnx2x_dmae_opcode_clr_src_reset(u32 opcode)
 +{
 +      return opcode & ~DMAE_CMD_SRC_RESET;
 +}
 +
 +u32 bnx2x_dmae_opcode(struct bnx2x *bp, u8 src_type, u8 dst_type,
 +                           bool with_comp, u8 comp_type)
 +{
 +      u32 opcode = 0;
 +
 +      opcode |= ((src_type << DMAE_COMMAND_SRC_SHIFT) |
 +                 (dst_type << DMAE_COMMAND_DST_SHIFT));
 +
 +      opcode |= (DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET);
 +
 +      opcode |= (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0);
-                       val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
++      opcode |= ((BP_VN(bp) << DMAE_CMD_E1HVN_SHIFT) |
++                 (BP_VN(bp) << DMAE_COMMAND_DST_VN_SHIFT));
 +      opcode |= (DMAE_COM_SET_ERR << DMAE_COMMAND_ERR_POLICY_SHIFT);
 +
 +#ifdef __BIG_ENDIAN
 +      opcode |= DMAE_CMD_ENDIANITY_B_DW_SWAP;
 +#else
 +      opcode |= DMAE_CMD_ENDIANITY_DW_SWAP;
 +#endif
 +      if (with_comp)
 +              opcode = bnx2x_dmae_opcode_add_comp(opcode, comp_type);
 +      return opcode;
 +}
 +
 +static void bnx2x_prep_dmae_with_comp(struct bnx2x *bp,
 +                                    struct dmae_command *dmae,
 +                                    u8 src_type, u8 dst_type)
 +{
 +      memset(dmae, 0, sizeof(struct dmae_command));
 +
 +      /* set the opcode */
 +      dmae->opcode = bnx2x_dmae_opcode(bp, src_type, dst_type,
 +                                       true, DMAE_COMP_PCI);
 +
 +      /* fill in the completion parameters */
 +      dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
 +      dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
 +      dmae->comp_val = DMAE_COMP_VAL;
 +}
 +
 +/* issue a dmae command over the init-channel and wailt for completion */
 +static int bnx2x_issue_dmae_with_comp(struct bnx2x *bp,
 +                                    struct dmae_command *dmae)
 +{
 +      u32 *wb_comp = bnx2x_sp(bp, wb_comp);
 +      int cnt = CHIP_REV_IS_SLOW(bp) ? (400000) : 4000;
 +      int rc = 0;
 +
 +      DP(BNX2X_MSG_OFF, "data before [0x%08x 0x%08x 0x%08x 0x%08x]\n",
 +         bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
 +         bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
 +
 +      /*
 +       * Lock the dmae channel. Disable BHs to prevent a dead-lock
 +       * as long as this code is called both from syscall context and
 +       * from ndo_set_rx_mode() flow that may be called from BH.
 +       */
 +      spin_lock_bh(&bp->dmae_lock);
 +
 +      /* reset completion */
 +      *wb_comp = 0;
 +
 +      /* post the command on the channel used for initializations */
 +      bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
 +
 +      /* wait for completion */
 +      udelay(5);
 +      while ((*wb_comp & ~DMAE_PCI_ERR_FLAG) != DMAE_COMP_VAL) {
 +              DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
 +
 +              if (!cnt) {
 +                      BNX2X_ERR("DMAE timeout!\n");
 +                      rc = DMAE_TIMEOUT;
 +                      goto unlock;
 +              }
 +              cnt--;
 +              udelay(50);
 +      }
 +      if (*wb_comp & DMAE_PCI_ERR_FLAG) {
 +              BNX2X_ERR("DMAE PCI error!\n");
 +              rc = DMAE_PCI_ERROR;
 +      }
 +
 +      DP(BNX2X_MSG_OFF, "data after [0x%08x 0x%08x 0x%08x 0x%08x]\n",
 +         bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
 +         bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
 +
 +unlock:
 +      spin_unlock_bh(&bp->dmae_lock);
 +      return rc;
 +}
 +
 +void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
 +                    u32 len32)
 +{
 +      struct dmae_command dmae;
 +
 +      if (!bp->dmae_ready) {
 +              u32 *data = bnx2x_sp(bp, wb_data[0]);
 +
 +              DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x  len32 %d)"
 +                 "  using indirect\n", dst_addr, len32);
 +              bnx2x_init_ind_wr(bp, dst_addr, data, len32);
 +              return;
 +      }
 +
 +      /* set opcode and fixed command fields */
 +      bnx2x_prep_dmae_with_comp(bp, &dmae, DMAE_SRC_PCI, DMAE_DST_GRC);
 +
 +      /* fill in addresses and len */
 +      dmae.src_addr_lo = U64_LO(dma_addr);
 +      dmae.src_addr_hi = U64_HI(dma_addr);
 +      dmae.dst_addr_lo = dst_addr >> 2;
 +      dmae.dst_addr_hi = 0;
 +      dmae.len = len32;
 +
 +      bnx2x_dp_dmae(bp, &dmae, BNX2X_MSG_OFF);
 +
 +      /* issue the command and wait for completion */
 +      bnx2x_issue_dmae_with_comp(bp, &dmae);
 +}
 +
 +void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
 +{
 +      struct dmae_command dmae;
 +
 +      if (!bp->dmae_ready) {
 +              u32 *data = bnx2x_sp(bp, wb_data[0]);
 +              int i;
 +
 +              DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x  len32 %d)"
 +                 "  using indirect\n", src_addr, len32);
 +              for (i = 0; i < len32; i++)
 +                      data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
 +              return;
 +      }
 +
 +      /* set opcode and fixed command fields */
 +      bnx2x_prep_dmae_with_comp(bp, &dmae, DMAE_SRC_GRC, DMAE_DST_PCI);
 +
 +      /* fill in addresses and len */
 +      dmae.src_addr_lo = src_addr >> 2;
 +      dmae.src_addr_hi = 0;
 +      dmae.dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
 +      dmae.dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
 +      dmae.len = len32;
 +
 +      bnx2x_dp_dmae(bp, &dmae, BNX2X_MSG_OFF);
 +
 +      /* issue the command and wait for completion */
 +      bnx2x_issue_dmae_with_comp(bp, &dmae);
 +}
 +
 +static void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr,
 +                                    u32 addr, u32 len)
 +{
 +      int dmae_wr_max = DMAE_LEN32_WR_MAX(bp);
 +      int offset = 0;
 +
 +      while (len > dmae_wr_max) {
 +              bnx2x_write_dmae(bp, phys_addr + offset,
 +                               addr + offset, dmae_wr_max);
 +              offset += dmae_wr_max * 4;
 +              len -= dmae_wr_max;
 +      }
 +
 +      bnx2x_write_dmae(bp, phys_addr + offset, addr + offset, len);
 +}
 +
 +/* used only for slowpath so not inlined */
 +static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
 +{
 +      u32 wb_write[2];
 +
 +      wb_write[0] = val_hi;
 +      wb_write[1] = val_lo;
 +      REG_WR_DMAE(bp, reg, wb_write, 2);
 +}
 +
 +#ifdef USE_WB_RD
 +static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
 +{
 +      u32 wb_data[2];
 +
 +      REG_RD_DMAE(bp, reg, wb_data, 2);
 +
 +      return HILO_U64(wb_data[0], wb_data[1]);
 +}
 +#endif
 +
 +static int bnx2x_mc_assert(struct bnx2x *bp)
 +{
 +      char last_idx;
 +      int i, rc = 0;
 +      u32 row0, row1, row2, row3;
 +
 +      /* XSTORM */
 +      last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
 +                         XSTORM_ASSERT_LIST_INDEX_OFFSET);
 +      if (last_idx)
 +              BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
 +
 +      /* print the asserts */
 +      for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
 +
 +              row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
 +                            XSTORM_ASSERT_LIST_OFFSET(i));
 +              row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
 +                            XSTORM_ASSERT_LIST_OFFSET(i) + 4);
 +              row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
 +                            XSTORM_ASSERT_LIST_OFFSET(i) + 8);
 +              row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
 +                            XSTORM_ASSERT_LIST_OFFSET(i) + 12);
 +
 +              if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
 +                      BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
 +                                " 0x%08x 0x%08x 0x%08x\n",
 +                                i, row3, row2, row1, row0);
 +                      rc++;
 +              } else {
 +                      break;
 +              }
 +      }
 +
 +      /* TSTORM */
 +      last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
 +                         TSTORM_ASSERT_LIST_INDEX_OFFSET);
 +      if (last_idx)
 +              BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
 +
 +      /* print the asserts */
 +      for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
 +
 +              row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
 +                            TSTORM_ASSERT_LIST_OFFSET(i));
 +              row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
 +                            TSTORM_ASSERT_LIST_OFFSET(i) + 4);
 +              row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
 +                            TSTORM_ASSERT_LIST_OFFSET(i) + 8);
 +              row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
 +                            TSTORM_ASSERT_LIST_OFFSET(i) + 12);
 +
 +              if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
 +                      BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
 +                                " 0x%08x 0x%08x 0x%08x\n",
 +                                i, row3, row2, row1, row0);
 +                      rc++;
 +              } else {
 +                      break;
 +              }
 +      }
 +
 +      /* CSTORM */
 +      last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
 +                         CSTORM_ASSERT_LIST_INDEX_OFFSET);
 +      if (last_idx)
 +              BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
 +
 +      /* print the asserts */
 +      for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
 +
 +              row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
 +                            CSTORM_ASSERT_LIST_OFFSET(i));
 +              row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
 +                            CSTORM_ASSERT_LIST_OFFSET(i) + 4);
 +              row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
 +                            CSTORM_ASSERT_LIST_OFFSET(i) + 8);
 +              row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
 +                            CSTORM_ASSERT_LIST_OFFSET(i) + 12);
 +
 +              if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
 +                      BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
 +                                " 0x%08x 0x%08x 0x%08x\n",
 +                                i, row3, row2, row1, row0);
 +                      rc++;
 +              } else {
 +                      break;
 +              }
 +      }
 +
 +      /* USTORM */
 +      last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
 +                         USTORM_ASSERT_LIST_INDEX_OFFSET);
 +      if (last_idx)
 +              BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
 +
 +      /* print the asserts */
 +      for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
 +
 +              row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
 +                            USTORM_ASSERT_LIST_OFFSET(i));
 +              row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
 +                            USTORM_ASSERT_LIST_OFFSET(i) + 4);
 +              row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
 +                            USTORM_ASSERT_LIST_OFFSET(i) + 8);
 +              row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
 +                            USTORM_ASSERT_LIST_OFFSET(i) + 12);
 +
 +              if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
 +                      BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
 +                                " 0x%08x 0x%08x 0x%08x\n",
 +                                i, row3, row2, row1, row0);
 +                      rc++;
 +              } else {
 +                      break;
 +              }
 +      }
 +
 +      return rc;
 +}
 +
 +void bnx2x_fw_dump_lvl(struct bnx2x *bp, const char *lvl)
 +{
 +      u32 addr, val;
 +      u32 mark, offset;
 +      __be32 data[9];
 +      int word;
 +      u32 trace_shmem_base;
 +      if (BP_NOMCP(bp)) {
 +              BNX2X_ERR("NO MCP - can not dump\n");
 +              return;
 +      }
 +      netdev_printk(lvl, bp->dev, "bc %d.%d.%d\n",
 +              (bp->common.bc_ver & 0xff0000) >> 16,
 +              (bp->common.bc_ver & 0xff00) >> 8,
 +              (bp->common.bc_ver & 0xff));
 +
 +      val = REG_RD(bp, MCP_REG_MCPR_CPU_PROGRAM_COUNTER);
 +      if (val == REG_RD(bp, MCP_REG_MCPR_CPU_PROGRAM_COUNTER))
 +              printk("%s" "MCP PC at 0x%x\n", lvl, val);
 +
 +      if (BP_PATH(bp) == 0)
 +              trace_shmem_base = bp->common.shmem_base;
 +      else
 +              trace_shmem_base = SHMEM2_RD(bp, other_shmem_base_addr);
 +      addr = trace_shmem_base - 0x0800 + 4;
 +      mark = REG_RD(bp, addr);
 +      mark = (CHIP_IS_E1x(bp) ? MCP_REG_MCPR_SCRATCH : MCP_A_REG_MCPR_SCRATCH)
 +                      + ((mark + 0x3) & ~0x3) - 0x08000000;
 +      printk("%s" "begin fw dump (mark 0x%x)\n", lvl, mark);
 +
 +      printk("%s", lvl);
 +      for (offset = mark; offset <= trace_shmem_base; offset += 0x8*4) {
 +              for (word = 0; word < 8; word++)
 +                      data[word] = htonl(REG_RD(bp, offset + 4*word));
 +              data[8] = 0x0;
 +              pr_cont("%s", (char *)data);
 +      }
 +      for (offset = addr + 4; offset <= mark; offset += 0x8*4) {
 +              for (word = 0; word < 8; word++)
 +                      data[word] = htonl(REG_RD(bp, offset + 4*word));
 +              data[8] = 0x0;
 +              pr_cont("%s", (char *)data);
 +      }
 +      printk("%s" "end of fw dump\n", lvl);
 +}
 +
 +static inline void bnx2x_fw_dump(struct bnx2x *bp)
 +{
 +      bnx2x_fw_dump_lvl(bp, KERN_ERR);
 +}
 +
 +void bnx2x_panic_dump(struct bnx2x *bp)
 +{
 +      int i;
 +      u16 j;
 +      struct hc_sp_status_block_data sp_sb_data;
 +      int func = BP_FUNC(bp);
 +#ifdef BNX2X_STOP_ON_ERROR
 +      u16 start = 0, end = 0;
 +      u8 cos;
 +#endif
 +
 +      bp->stats_state = STATS_STATE_DISABLED;
 +      DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
 +
 +      BNX2X_ERR("begin crash dump -----------------\n");
 +
 +      /* Indices */
 +      /* Common */
 +      BNX2X_ERR("def_idx(0x%x)  def_att_idx(0x%x)  attn_state(0x%x)"
 +                "  spq_prod_idx(0x%x) next_stats_cnt(0x%x)\n",
 +                bp->def_idx, bp->def_att_idx, bp->attn_state,
 +                bp->spq_prod_idx, bp->stats_counter);
 +      BNX2X_ERR("DSB: attn bits(0x%x)  ack(0x%x)  id(0x%x)  idx(0x%x)\n",
 +                bp->def_status_blk->atten_status_block.attn_bits,
 +                bp->def_status_blk->atten_status_block.attn_bits_ack,
 +                bp->def_status_blk->atten_status_block.status_block_id,
 +                bp->def_status_blk->atten_status_block.attn_bits_index);
 +      BNX2X_ERR("     def (");
 +      for (i = 0; i < HC_SP_SB_MAX_INDICES; i++)
 +              pr_cont("0x%x%s",
 +                      bp->def_status_blk->sp_sb.index_values[i],
 +                      (i == HC_SP_SB_MAX_INDICES - 1) ? ")  " : " ");
 +
 +      for (i = 0; i < sizeof(struct hc_sp_status_block_data)/sizeof(u32); i++)
 +              *((u32 *)&sp_sb_data + i) = REG_RD(bp, BAR_CSTRORM_INTMEM +
 +                      CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func) +
 +                      i*sizeof(u32));
 +
 +      pr_cont("igu_sb_id(0x%x)  igu_seg_id(0x%x) pf_id(0x%x)  vnic_id(0x%x)  vf_id(0x%x)  vf_valid (0x%x) state(0x%x)\n",
 +             sp_sb_data.igu_sb_id,
 +             sp_sb_data.igu_seg_id,
 +             sp_sb_data.p_func.pf_id,
 +             sp_sb_data.p_func.vnic_id,
 +             sp_sb_data.p_func.vf_id,
 +             sp_sb_data.p_func.vf_valid,
 +             sp_sb_data.state);
 +
 +
 +      for_each_eth_queue(bp, i) {
 +              struct bnx2x_fastpath *fp = &bp->fp[i];
 +              int loop;
 +              struct hc_status_block_data_e2 sb_data_e2;
 +              struct hc_status_block_data_e1x sb_data_e1x;
 +              struct hc_status_block_sm  *hc_sm_p =
 +                      CHIP_IS_E1x(bp) ?
 +                      sb_data_e1x.common.state_machine :
 +                      sb_data_e2.common.state_machine;
 +              struct hc_index_data *hc_index_p =
 +                      CHIP_IS_E1x(bp) ?
 +                      sb_data_e1x.index_data :
 +                      sb_data_e2.index_data;
 +              u8 data_size, cos;
 +              u32 *sb_data_p;
 +              struct bnx2x_fp_txdata txdata;
 +
 +              /* Rx */
 +              BNX2X_ERR("fp%d: rx_bd_prod(0x%x)  rx_bd_cons(0x%x)"
 +                        "  rx_comp_prod(0x%x)"
 +                        "  rx_comp_cons(0x%x)  *rx_cons_sb(0x%x)\n",
 +                        i, fp->rx_bd_prod, fp->rx_bd_cons,
 +                        fp->rx_comp_prod,
 +                        fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
 +              BNX2X_ERR("     rx_sge_prod(0x%x)  last_max_sge(0x%x)"
 +                        "  fp_hc_idx(0x%x)\n",
 +                        fp->rx_sge_prod, fp->last_max_sge,
 +                        le16_to_cpu(fp->fp_hc_idx));
 +
 +              /* Tx */
 +              for_each_cos_in_tx_queue(fp, cos)
 +              {
 +                      txdata = fp->txdata[cos];
 +                      BNX2X_ERR("fp%d: tx_pkt_prod(0x%x)  tx_pkt_cons(0x%x)"
 +                                "  tx_bd_prod(0x%x)  tx_bd_cons(0x%x)"
 +                                "  *tx_cons_sb(0x%x)\n",
 +                                i, txdata.tx_pkt_prod,
 +                                txdata.tx_pkt_cons, txdata.tx_bd_prod,
 +                                txdata.tx_bd_cons,
 +                                le16_to_cpu(*txdata.tx_cons_sb));
 +              }
 +
 +              loop = CHIP_IS_E1x(bp) ?
 +                      HC_SB_MAX_INDICES_E1X : HC_SB_MAX_INDICES_E2;
 +
 +              /* host sb data */
 +
 +#ifdef BCM_CNIC
 +              if (IS_FCOE_FP(fp))
 +                      continue;
 +#endif
 +              BNX2X_ERR("     run indexes (");
 +              for (j = 0; j < HC_SB_MAX_SM; j++)
 +                      pr_cont("0x%x%s",
 +                             fp->sb_running_index[j],
 +                             (j == HC_SB_MAX_SM - 1) ? ")" : " ");
 +
 +              BNX2X_ERR("     indexes (");
 +              for (j = 0; j < loop; j++)
 +                      pr_cont("0x%x%s",
 +                             fp->sb_index_values[j],
 +                             (j == loop - 1) ? ")" : " ");
 +              /* fw sb data */
 +              data_size = CHIP_IS_E1x(bp) ?
 +                      sizeof(struct hc_status_block_data_e1x) :
 +                      sizeof(struct hc_status_block_data_e2);
 +              data_size /= sizeof(u32);
 +              sb_data_p = CHIP_IS_E1x(bp) ?
 +                      (u32 *)&sb_data_e1x :
 +                      (u32 *)&sb_data_e2;
 +              /* copy sb data in here */
 +              for (j = 0; j < data_size; j++)
 +                      *(sb_data_p + j) = REG_RD(bp, BAR_CSTRORM_INTMEM +
 +                              CSTORM_STATUS_BLOCK_DATA_OFFSET(fp->fw_sb_id) +
 +                              j * sizeof(u32));
 +
 +              if (!CHIP_IS_E1x(bp)) {
 +                      pr_cont("pf_id(0x%x)  vf_id(0x%x)  vf_valid(0x%x) "
 +                              "vnic_id(0x%x)  same_igu_sb_1b(0x%x) "
 +                              "state(0x%x)\n",
 +                              sb_data_e2.common.p_func.pf_id,
 +                              sb_data_e2.common.p_func.vf_id,
 +                              sb_data_e2.common.p_func.vf_valid,
 +                              sb_data_e2.common.p_func.vnic_id,
 +                              sb_data_e2.common.same_igu_sb_1b,
 +                              sb_data_e2.common.state);
 +              } else {
 +                      pr_cont("pf_id(0x%x)  vf_id(0x%x)  vf_valid(0x%x) "
 +                              "vnic_id(0x%x)  same_igu_sb_1b(0x%x) "
 +                              "state(0x%x)\n",
 +                              sb_data_e1x.common.p_func.pf_id,
 +                              sb_data_e1x.common.p_func.vf_id,
 +                              sb_data_e1x.common.p_func.vf_valid,
 +                              sb_data_e1x.common.p_func.vnic_id,
 +                              sb_data_e1x.common.same_igu_sb_1b,
 +                              sb_data_e1x.common.state);
 +              }
 +
 +              /* SB_SMs data */
 +              for (j = 0; j < HC_SB_MAX_SM; j++) {
 +                      pr_cont("SM[%d] __flags (0x%x) "
 +                             "igu_sb_id (0x%x)  igu_seg_id(0x%x) "
 +                             "time_to_expire (0x%x) "
 +                             "timer_value(0x%x)\n", j,
 +                             hc_sm_p[j].__flags,
 +                             hc_sm_p[j].igu_sb_id,
 +                             hc_sm_p[j].igu_seg_id,
 +                             hc_sm_p[j].time_to_expire,
 +                             hc_sm_p[j].timer_value);
 +              }
 +
 +              /* Indecies data */
 +              for (j = 0; j < loop; j++) {
 +                      pr_cont("INDEX[%d] flags (0x%x) "
 +                                       "timeout (0x%x)\n", j,
 +                             hc_index_p[j].flags,
 +                             hc_index_p[j].timeout);
 +              }
 +      }
 +
 +#ifdef BNX2X_STOP_ON_ERROR
 +      /* Rings */
 +      /* Rx */
 +      for_each_rx_queue(bp, i) {
 +              struct bnx2x_fastpath *fp = &bp->fp[i];
 +
 +              start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
 +              end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
 +              for (j = start; j != end; j = RX_BD(j + 1)) {
 +                      u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
 +                      struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
 +
 +                      BNX2X_ERR("fp%d: rx_bd[%x]=[%x:%x]  sw_bd=[%p]\n",
 +                                i, j, rx_bd[1], rx_bd[0], sw_bd->skb);
 +              }
 +
 +              start = RX_SGE(fp->rx_sge_prod);
 +              end = RX_SGE(fp->last_max_sge);
 +              for (j = start; j != end; j = RX_SGE(j + 1)) {
 +                      u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
 +                      struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
 +
 +                      BNX2X_ERR("fp%d: rx_sge[%x]=[%x:%x]  sw_page=[%p]\n",
 +                                i, j, rx_sge[1], rx_sge[0], sw_page->page);
 +              }
 +
 +              start = RCQ_BD(fp->rx_comp_cons - 10);
 +              end = RCQ_BD(fp->rx_comp_cons + 503);
 +              for (j = start; j != end; j = RCQ_BD(j + 1)) {
 +                      u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
 +
 +                      BNX2X_ERR("fp%d: cqe[%x]=[%x:%x:%x:%x]\n",
 +                                i, j, cqe[0], cqe[1], cqe[2], cqe[3]);
 +              }
 +      }
 +
 +      /* Tx */
 +      for_each_tx_queue(bp, i) {
 +              struct bnx2x_fastpath *fp = &bp->fp[i];
 +              for_each_cos_in_tx_queue(fp, cos) {
 +                      struct bnx2x_fp_txdata *txdata = &fp->txdata[cos];
 +
 +                      start = TX_BD(le16_to_cpu(*txdata->tx_cons_sb) - 10);
 +                      end = TX_BD(le16_to_cpu(*txdata->tx_cons_sb) + 245);
 +                      for (j = start; j != end; j = TX_BD(j + 1)) {
 +                              struct sw_tx_bd *sw_bd =
 +                                      &txdata->tx_buf_ring[j];
 +
 +                              BNX2X_ERR("fp%d: txdata %d, "
 +                                        "packet[%x]=[%p,%x]\n",
 +                                        i, cos, j, sw_bd->skb,
 +                                        sw_bd->first_bd);
 +                      }
 +
 +                      start = TX_BD(txdata->tx_bd_cons - 10);
 +                      end = TX_BD(txdata->tx_bd_cons + 254);
 +                      for (j = start; j != end; j = TX_BD(j + 1)) {
 +                              u32 *tx_bd = (u32 *)&txdata->tx_desc_ring[j];
 +
 +                              BNX2X_ERR("fp%d: txdata %d, tx_bd[%x]="
 +                                        "[%x:%x:%x:%x]\n",
 +                                        i, cos, j, tx_bd[0], tx_bd[1],
 +                                        tx_bd[2], tx_bd[3]);
 +                      }
 +              }
 +      }
 +#endif
 +      bnx2x_fw_dump(bp);
 +      bnx2x_mc_assert(bp);
 +      BNX2X_ERR("end crash dump -----------------\n");
 +}
 +
 +/*
 + * FLR Support for E2
 + *
 + * bnx2x_pf_flr_clnup() is called during nic_load in the per function HW
 + * initialization.
 + */
 +#define FLR_WAIT_USEC         10000   /* 10 miliseconds */
 +#define FLR_WAIT_INTERAVAL    50      /* usec */
 +#define       FLR_POLL_CNT            (FLR_WAIT_USEC/FLR_WAIT_INTERAVAL) /* 200 */
 +
 +struct pbf_pN_buf_regs {
 +      int pN;
 +      u32 init_crd;
 +      u32 crd;
 +      u32 crd_freed;
 +};
 +
 +struct pbf_pN_cmd_regs {
 +      int pN;
 +      u32 lines_occup;
 +      u32 lines_freed;
 +};
 +
 +static void bnx2x_pbf_pN_buf_flushed(struct bnx2x *bp,
 +                                   struct pbf_pN_buf_regs *regs,
 +                                   u32 poll_count)
 +{
 +      u32 init_crd, crd, crd_start, crd_freed, crd_freed_start;
 +      u32 cur_cnt = poll_count;
 +
 +      crd_freed = crd_freed_start = REG_RD(bp, regs->crd_freed);
 +      crd = crd_start = REG_RD(bp, regs->crd);
 +      init_crd = REG_RD(bp, regs->init_crd);
 +
 +      DP(BNX2X_MSG_SP, "INIT CREDIT[%d] : %x\n", regs->pN, init_crd);
 +      DP(BNX2X_MSG_SP, "CREDIT[%d]      : s:%x\n", regs->pN, crd);
 +      DP(BNX2X_MSG_SP, "CREDIT_FREED[%d]: s:%x\n", regs->pN, crd_freed);
 +
 +      while ((crd != init_crd) && ((u32)SUB_S32(crd_freed, crd_freed_start) <
 +             (init_crd - crd_start))) {
 +              if (cur_cnt--) {
 +                      udelay(FLR_WAIT_INTERAVAL);
 +                      crd = REG_RD(bp, regs->crd);
 +                      crd_freed = REG_RD(bp, regs->crd_freed);
 +              } else {
 +                      DP(BNX2X_MSG_SP, "PBF tx buffer[%d] timed out\n",
 +                         regs->pN);
 +                      DP(BNX2X_MSG_SP, "CREDIT[%d]      : c:%x\n",
 +                         regs->pN, crd);
 +                      DP(BNX2X_MSG_SP, "CREDIT_FREED[%d]: c:%x\n",
 +                         regs->pN, crd_freed);
 +                      break;
 +              }
 +      }
 +      DP(BNX2X_MSG_SP, "Waited %d*%d usec for PBF tx buffer[%d]\n",
 +         poll_count-cur_cnt, FLR_WAIT_INTERAVAL, regs->pN);
 +}
 +
 +static void bnx2x_pbf_pN_cmd_flushed(struct bnx2x *bp,
 +                                   struct pbf_pN_cmd_regs *regs,
 +                                   u32 poll_count)
 +{
 +      u32 occup, to_free, freed, freed_start;
 +      u32 cur_cnt = poll_count;
 +
 +      occup = to_free = REG_RD(bp, regs->lines_occup);
 +      freed = freed_start = REG_RD(bp, regs->lines_freed);
 +
 +      DP(BNX2X_MSG_SP, "OCCUPANCY[%d]   : s:%x\n", regs->pN, occup);
 +      DP(BNX2X_MSG_SP, "LINES_FREED[%d] : s:%x\n", regs->pN, freed);
 +
 +      while (occup && ((u32)SUB_S32(freed, freed_start) < to_free)) {
 +              if (cur_cnt--) {
 +                      udelay(FLR_WAIT_INTERAVAL);
 +                      occup = REG_RD(bp, regs->lines_occup);
 +                      freed = REG_RD(bp, regs->lines_freed);
 +              } else {
 +                      DP(BNX2X_MSG_SP, "PBF cmd queue[%d] timed out\n",
 +                         regs->pN);
 +                      DP(BNX2X_MSG_SP, "OCCUPANCY[%d]   : s:%x\n",
 +                         regs->pN, occup);
 +                      DP(BNX2X_MSG_SP, "LINES_FREED[%d] : s:%x\n",
 +                         regs->pN, freed);
 +                      break;
 +              }
 +      }
 +      DP(BNX2X_MSG_SP, "Waited %d*%d usec for PBF cmd queue[%d]\n",
 +         poll_count-cur_cnt, FLR_WAIT_INTERAVAL, regs->pN);
 +}
 +
 +static inline u32 bnx2x_flr_clnup_reg_poll(struct bnx2x *bp, u32 reg,
 +                                   u32 expected, u32 poll_count)
 +{
 +      u32 cur_cnt = poll_count;
 +      u32 val;
 +
 +      while ((val = REG_RD(bp, reg)) != expected && cur_cnt--)
 +              udelay(FLR_WAIT_INTERAVAL);
 +
 +      return val;
 +}
 +
 +static inline int bnx2x_flr_clnup_poll_hw_counter(struct bnx2x *bp, u32 reg,
 +                                                char *msg, u32 poll_cnt)
 +{
 +      u32 val = bnx2x_flr_clnup_reg_poll(bp, reg, 0, poll_cnt);
 +      if (val != 0) {
 +              BNX2X_ERR("%s usage count=%d\n", msg, val);
 +              return 1;
 +      }
 +      return 0;
 +}
 +
 +static u32 bnx2x_flr_clnup_poll_count(struct bnx2x *bp)
 +{
 +      /* adjust polling timeout */
 +      if (CHIP_REV_IS_EMUL(bp))
 +              return FLR_POLL_CNT * 2000;
 +
 +      if (CHIP_REV_IS_FPGA(bp))
 +              return FLR_POLL_CNT * 120;
 +
 +      return FLR_POLL_CNT;
 +}
 +
 +static void bnx2x_tx_hw_flushed(struct bnx2x *bp, u32 poll_count)
 +{
 +      struct pbf_pN_cmd_regs cmd_regs[] = {
 +              {0, (CHIP_IS_E3B0(bp)) ?
 +                      PBF_REG_TQ_OCCUPANCY_Q0 :
 +                      PBF_REG_P0_TQ_OCCUPANCY,
 +                  (CHIP_IS_E3B0(bp)) ?
 +                      PBF_REG_TQ_LINES_FREED_CNT_Q0 :
 +                      PBF_REG_P0_TQ_LINES_FREED_CNT},
 +              {1, (CHIP_IS_E3B0(bp)) ?
 +                      PBF_REG_TQ_OCCUPANCY_Q1 :
 +                      PBF_REG_P1_TQ_OCCUPANCY,
 +                  (CHIP_IS_E3B0(bp)) ?
 +                      PBF_REG_TQ_LINES_FREED_CNT_Q1 :
 +                      PBF_REG_P1_TQ_LINES_FREED_CNT},
 +              {4, (CHIP_IS_E3B0(bp)) ?
 +                      PBF_REG_TQ_OCCUPANCY_LB_Q :
 +                      PBF_REG_P4_TQ_OCCUPANCY,
 +                  (CHIP_IS_E3B0(bp)) ?
 +                      PBF_REG_TQ_LINES_FREED_CNT_LB_Q :
 +                      PBF_REG_P4_TQ_LINES_FREED_CNT}
 +      };
 +
 +      struct pbf_pN_buf_regs buf_regs[] = {
 +              {0, (CHIP_IS_E3B0(bp)) ?
 +                      PBF_REG_INIT_CRD_Q0 :
 +                      PBF_REG_P0_INIT_CRD ,
 +                  (CHIP_IS_E3B0(bp)) ?
 +                      PBF_REG_CREDIT_Q0 :
 +                      PBF_REG_P0_CREDIT,
 +                  (CHIP_IS_E3B0(bp)) ?
 +                      PBF_REG_INTERNAL_CRD_FREED_CNT_Q0 :
 +                      PBF_REG_P0_INTERNAL_CRD_FREED_CNT},
 +              {1, (CHIP_IS_E3B0(bp)) ?
 +                      PBF_REG_INIT_CRD_Q1 :
 +                      PBF_REG_P1_INIT_CRD,
 +                  (CHIP_IS_E3B0(bp)) ?
 +                      PBF_REG_CREDIT_Q1 :
 +                      PBF_REG_P1_CREDIT,
 +                  (CHIP_IS_E3B0(bp)) ?
 +                      PBF_REG_INTERNAL_CRD_FREED_CNT_Q1 :
 +                      PBF_REG_P1_INTERNAL_CRD_FREED_CNT},
 +              {4, (CHIP_IS_E3B0(bp)) ?
 +                      PBF_REG_INIT_CRD_LB_Q :
 +                      PBF_REG_P4_INIT_CRD,
 +                  (CHIP_IS_E3B0(bp)) ?
 +                      PBF_REG_CREDIT_LB_Q :
 +                      PBF_REG_P4_CREDIT,
 +                  (CHIP_IS_E3B0(bp)) ?
 +                      PBF_REG_INTERNAL_CRD_FREED_CNT_LB_Q :
 +                      PBF_REG_P4_INTERNAL_CRD_FREED_CNT},
 +      };
 +
 +      int i;
 +
 +      /* Verify the command queues are flushed P0, P1, P4 */
 +      for (i = 0; i < ARRAY_SIZE(cmd_regs); i++)
 +              bnx2x_pbf_pN_cmd_flushed(bp, &cmd_regs[i], poll_count);
 +
 +
 +      /* Verify the transmission buffers are flushed P0, P1, P4 */
 +      for (i = 0; i < ARRAY_SIZE(buf_regs); i++)
 +              bnx2x_pbf_pN_buf_flushed(bp, &buf_regs[i], poll_count);
 +}
 +
 +#define OP_GEN_PARAM(param) \
 +      (((param) << SDM_OP_GEN_COMP_PARAM_SHIFT) & SDM_OP_GEN_COMP_PARAM)
 +
 +#define OP_GEN_TYPE(type) \
 +      (((type) << SDM_OP_GEN_COMP_TYPE_SHIFT) & SDM_OP_GEN_COMP_TYPE)
 +
 +#define OP_GEN_AGG_VECT(index) \
 +      (((index) << SDM_OP_GEN_AGG_VECT_IDX_SHIFT) & SDM_OP_GEN_AGG_VECT_IDX)
 +
 +
 +static inline int bnx2x_send_final_clnup(struct bnx2x *bp, u8 clnup_func,
 +                                       u32 poll_cnt)
 +{
 +      struct sdm_op_gen op_gen = {0};
 +
 +      u32 comp_addr = BAR_CSTRORM_INTMEM +
 +                      CSTORM_FINAL_CLEANUP_COMPLETE_OFFSET(clnup_func);
 +      int ret = 0;
 +
 +      if (REG_RD(bp, comp_addr)) {
 +              BNX2X_ERR("Cleanup complete is not 0\n");
 +              return 1;
 +      }
 +
 +      op_gen.command |= OP_GEN_PARAM(XSTORM_AGG_INT_FINAL_CLEANUP_INDEX);
 +      op_gen.command |= OP_GEN_TYPE(XSTORM_AGG_INT_FINAL_CLEANUP_COMP_TYPE);
 +      op_gen.command |= OP_GEN_AGG_VECT(clnup_func);
 +      op_gen.command |= 1 << SDM_OP_GEN_AGG_VECT_IDX_VALID_SHIFT;
 +
 +      DP(BNX2X_MSG_SP, "FW Final cleanup\n");
 +      REG_WR(bp, XSDM_REG_OPERATION_GEN, op_gen.command);
 +
 +      if (bnx2x_flr_clnup_reg_poll(bp, comp_addr, 1, poll_cnt) != 1) {
 +              BNX2X_ERR("FW final cleanup did not succeed\n");
 +              ret = 1;
 +      }
 +      /* Zero completion for nxt FLR */
 +      REG_WR(bp, comp_addr, 0);
 +
 +      return ret;
 +}
 +
 +static inline u8 bnx2x_is_pcie_pending(struct pci_dev *dev)
 +{
 +      int pos;
 +      u16 status;
 +
 +      pos = pci_pcie_cap(dev);
 +      if (!pos)
 +              return false;
 +
 +      pci_read_config_word(dev, pos + PCI_EXP_DEVSTA, &status);
 +      return status & PCI_EXP_DEVSTA_TRPND;
 +}
 +
 +/* PF FLR specific routines
 +*/
 +static int bnx2x_poll_hw_usage_counters(struct bnx2x *bp, u32 poll_cnt)
 +{
 +
 +      /* wait for CFC PF usage-counter to zero (includes all the VFs) */
 +      if (bnx2x_flr_clnup_poll_hw_counter(bp,
 +                      CFC_REG_NUM_LCIDS_INSIDE_PF,
 +                      "CFC PF usage counter timed out",
 +                      poll_cnt))
 +              return 1;
 +
 +
 +      /* Wait for DQ PF usage-counter to zero (until DQ cleanup) */
 +      if (bnx2x_flr_clnup_poll_hw_counter(bp,
 +                      DORQ_REG_PF_USAGE_CNT,
 +                      "DQ PF usage counter timed out",
 +                      poll_cnt))
 +              return 1;
 +
 +      /* Wait for QM PF usage-counter to zero (until DQ cleanup) */
 +      if (bnx2x_flr_clnup_poll_hw_counter(bp,
 +                      QM_REG_PF_USG_CNT_0 + 4*BP_FUNC(bp),
 +                      "QM PF usage counter timed out",
 +                      poll_cnt))
 +              return 1;
 +
 +      /* Wait for Timer PF usage-counters to zero (until DQ cleanup) */
 +      if (bnx2x_flr_clnup_poll_hw_counter(bp,
 +                      TM_REG_LIN0_VNIC_UC + 4*BP_PORT(bp),
 +                      "Timers VNIC usage counter timed out",
 +                      poll_cnt))
 +              return 1;
 +      if (bnx2x_flr_clnup_poll_hw_counter(bp,
 +                      TM_REG_LIN0_NUM_SCANS + 4*BP_PORT(bp),
 +                      "Timers NUM_SCANS usage counter timed out",
 +                      poll_cnt))
 +              return 1;
 +
 +      /* Wait DMAE PF usage counter to zero */
 +      if (bnx2x_flr_clnup_poll_hw_counter(bp,
 +                      dmae_reg_go_c[INIT_DMAE_C(bp)],
 +                      "DMAE dommand register timed out",
 +                      poll_cnt))
 +              return 1;
 +
 +      return 0;
 +}
 +
 +static void bnx2x_hw_enable_status(struct bnx2x *bp)
 +{
 +      u32 val;
 +
 +      val = REG_RD(bp, CFC_REG_WEAK_ENABLE_PF);
 +      DP(BNX2X_MSG_SP, "CFC_REG_WEAK_ENABLE_PF is 0x%x\n", val);
 +
 +      val = REG_RD(bp, PBF_REG_DISABLE_PF);
 +      DP(BNX2X_MSG_SP, "PBF_REG_DISABLE_PF is 0x%x\n", val);
 +
 +      val = REG_RD(bp, IGU_REG_PCI_PF_MSI_EN);
 +      DP(BNX2X_MSG_SP, "IGU_REG_PCI_PF_MSI_EN is 0x%x\n", val);
 +
 +      val = REG_RD(bp, IGU_REG_PCI_PF_MSIX_EN);
 +      DP(BNX2X_MSG_SP, "IGU_REG_PCI_PF_MSIX_EN is 0x%x\n", val);
 +
 +      val = REG_RD(bp, IGU_REG_PCI_PF_MSIX_FUNC_MASK);
 +      DP(BNX2X_MSG_SP, "IGU_REG_PCI_PF_MSIX_FUNC_MASK is 0x%x\n", val);
 +
 +      val = REG_RD(bp, PGLUE_B_REG_SHADOW_BME_PF_7_0_CLR);
 +      DP(BNX2X_MSG_SP, "PGLUE_B_REG_SHADOW_BME_PF_7_0_CLR is 0x%x\n", val);
 +
 +      val = REG_RD(bp, PGLUE_B_REG_FLR_REQUEST_PF_7_0_CLR);
 +      DP(BNX2X_MSG_SP, "PGLUE_B_REG_FLR_REQUEST_PF_7_0_CLR is 0x%x\n", val);
 +
 +      val = REG_RD(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER);
 +      DP(BNX2X_MSG_SP, "PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER is 0x%x\n",
 +         val);
 +}
 +
 +static int bnx2x_pf_flr_clnup(struct bnx2x *bp)
 +{
 +      u32 poll_cnt = bnx2x_flr_clnup_poll_count(bp);
 +
 +      DP(BNX2X_MSG_SP, "Cleanup after FLR PF[%d]\n", BP_ABS_FUNC(bp));
 +
 +      /* Re-enable PF target read access */
 +      REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1);
 +
 +      /* Poll HW usage counters */
 +      if (bnx2x_poll_hw_usage_counters(bp, poll_cnt))
 +              return -EBUSY;
 +
 +      /* Zero the igu 'trailing edge' and 'leading edge' */
 +
 +      /* Send the FW cleanup command */
 +      if (bnx2x_send_final_clnup(bp, (u8)BP_FUNC(bp), poll_cnt))
 +              return -EBUSY;
 +
 +      /* ATC cleanup */
 +
 +      /* Verify TX hw is flushed */
 +      bnx2x_tx_hw_flushed(bp, poll_cnt);
 +
 +      /* Wait 100ms (not adjusted according to platform) */
 +      msleep(100);
 +
 +      /* Verify no pending pci transactions */
 +      if (bnx2x_is_pcie_pending(bp->pdev))
 +              BNX2X_ERR("PCIE Transactions still pending\n");
 +
 +      /* Debug */
 +      bnx2x_hw_enable_status(bp);
 +
 +      /*
 +       * Master enable - Due to WB DMAE writes performed before this
 +       * register is re-initialized as part of the regular function init
 +       */
 +      REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
 +
 +      return 0;
 +}
 +
 +static void bnx2x_hc_int_enable(struct bnx2x *bp)
 +{
 +      int port = BP_PORT(bp);
 +      u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
 +      u32 val = REG_RD(bp, addr);
 +      int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
 +      int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
 +
 +      if (msix) {
 +              val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
 +                       HC_CONFIG_0_REG_INT_LINE_EN_0);
 +              val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
 +                      HC_CONFIG_0_REG_ATTN_BIT_EN_0);
 +      } else if (msi) {
 +              val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
 +              val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
 +                      HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
 +                      HC_CONFIG_0_REG_ATTN_BIT_EN_0);
 +      } else {
 +              val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
 +                      HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
 +                      HC_CONFIG_0_REG_INT_LINE_EN_0 |
 +                      HC_CONFIG_0_REG_ATTN_BIT_EN_0);
 +
 +              if (!CHIP_IS_E1(bp)) {
 +                      DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
 +                         val, port, addr);
 +
 +                      REG_WR(bp, addr, val);
 +
 +                      val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
 +              }
 +      }
 +
 +      if (CHIP_IS_E1(bp))
 +              REG_WR(bp, HC_REG_INT_MASK + port*4, 0x1FFFF);
 +
 +      DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)  mode %s\n",
 +         val, port, addr, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
 +
 +      REG_WR(bp, addr, val);
 +      /*
 +       * Ensure that HC_CONFIG is written before leading/trailing edge config
 +       */
 +      mmiowb();
 +      barrier();
 +
 +      if (!CHIP_IS_E1(bp)) {
 +              /* init leading/trailing edge */
 +              if (IS_MF(bp)) {
-               val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
++                      val = (0xee0f | (1 << (BP_VN(bp) + 4)));
 +                      if (bp->port.pmf)
 +                              /* enable nig and gpio3 attention */
 +                              val |= 0x1100;
 +              } else
 +                      val = 0xffff;
 +
 +              REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
 +              REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
 +      }
 +
 +      /* Make sure that interrupts are indeed enabled from here on */
 +      mmiowb();
 +}
 +
 +static void bnx2x_igu_int_enable(struct bnx2x *bp)
 +{
 +      u32 val;
 +      int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
 +      int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
 +
 +      val = REG_RD(bp, IGU_REG_PF_CONFIGURATION);
 +
 +      if (msix) {
 +              val &= ~(IGU_PF_CONF_INT_LINE_EN |
 +                       IGU_PF_CONF_SINGLE_ISR_EN);
 +              val |= (IGU_PF_CONF_FUNC_EN |
 +                      IGU_PF_CONF_MSI_MSIX_EN |
 +                      IGU_PF_CONF_ATTN_BIT_EN);
 +      } else if (msi) {
 +              val &= ~IGU_PF_CONF_INT_LINE_EN;
 +              val |= (IGU_PF_CONF_FUNC_EN |
 +                      IGU_PF_CONF_MSI_MSIX_EN |
 +                      IGU_PF_CONF_ATTN_BIT_EN |
 +                      IGU_PF_CONF_SINGLE_ISR_EN);
 +      } else {
 +              val &= ~IGU_PF_CONF_MSI_MSIX_EN;
 +              val |= (IGU_PF_CONF_FUNC_EN |
 +                      IGU_PF_CONF_INT_LINE_EN |
 +                      IGU_PF_CONF_ATTN_BIT_EN |
 +                      IGU_PF_CONF_SINGLE_ISR_EN);
 +      }
 +
 +      DP(NETIF_MSG_INTR, "write 0x%x to IGU  mode %s\n",
 +         val, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
 +
 +      REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
 +
 +      barrier();
 +
 +      /* init leading/trailing edge */
 +      if (IS_MF(bp)) {
-       for (vn = VN_0; vn < E1HVN_MAX; vn++) {
++              val = (0xee0f | (1 << (BP_VN(bp) + 4)));
 +              if (bp->port.pmf)
 +                      /* enable nig and gpio3 attention */
 +                      val |= 0x1100;
 +      } else
 +              val = 0xffff;
 +
 +      REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, val);
 +      REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, val);
 +
 +      /* Make sure that interrupts are indeed enabled from here on */
 +      mmiowb();
 +}
 +
 +void bnx2x_int_enable(struct bnx2x *bp)
 +{
 +      if (bp->common.int_block == INT_BLOCK_HC)
 +              bnx2x_hc_int_enable(bp);
 +      else
 +              bnx2x_igu_int_enable(bp);
 +}
 +
 +static void bnx2x_hc_int_disable(struct bnx2x *bp)
 +{
 +      int port = BP_PORT(bp);
 +      u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
 +      u32 val = REG_RD(bp, addr);
 +
 +      /*
 +       * in E1 we must use only PCI configuration space to disable
 +       * MSI/MSIX capablility
 +       * It's forbitten to disable IGU_PF_CONF_MSI_MSIX_EN in HC block
 +       */
 +      if (CHIP_IS_E1(bp)) {
 +              /*  Since IGU_PF_CONF_MSI_MSIX_EN still always on
 +               *  Use mask register to prevent from HC sending interrupts
 +               *  after we exit the function
 +               */
 +              REG_WR(bp, HC_REG_INT_MASK + port*4, 0);
 +
 +              val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
 +                       HC_CONFIG_0_REG_INT_LINE_EN_0 |
 +                       HC_CONFIG_0_REG_ATTN_BIT_EN_0);
 +      } else
 +              val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
 +                       HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
 +                       HC_CONFIG_0_REG_INT_LINE_EN_0 |
 +                       HC_CONFIG_0_REG_ATTN_BIT_EN_0);
 +
 +      DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
 +         val, port, addr);
 +
 +      /* flush all outstanding writes */
 +      mmiowb();
 +
 +      REG_WR(bp, addr, val);
 +      if (REG_RD(bp, addr) != val)
 +              BNX2X_ERR("BUG! proper val not read from IGU!\n");
 +}
 +
 +static void bnx2x_igu_int_disable(struct bnx2x *bp)
 +{
 +      u32 val = REG_RD(bp, IGU_REG_PF_CONFIGURATION);
 +
 +      val &= ~(IGU_PF_CONF_MSI_MSIX_EN |
 +               IGU_PF_CONF_INT_LINE_EN |
 +               IGU_PF_CONF_ATTN_BIT_EN);
 +
 +      DP(NETIF_MSG_INTR, "write %x to IGU\n", val);
 +
 +      /* flush all outstanding writes */
 +      mmiowb();
 +
 +      REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
 +      if (REG_RD(bp, IGU_REG_PF_CONFIGURATION) != val)
 +              BNX2X_ERR("BUG! proper val not read from IGU!\n");
 +}
 +
 +void bnx2x_int_disable(struct bnx2x *bp)
 +{
 +      if (bp->common.int_block == INT_BLOCK_HC)
 +              bnx2x_hc_int_disable(bp);
 +      else
 +              bnx2x_igu_int_disable(bp);
 +}
 +
 +void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
 +{
 +      int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
 +      int i, offset;
 +
 +      if (disable_hw)
 +              /* prevent the HW from sending interrupts */
 +              bnx2x_int_disable(bp);
 +
 +      /* make sure all ISRs are done */
 +      if (msix) {
 +              synchronize_irq(bp->msix_table[0].vector);
 +              offset = 1;
 +#ifdef BCM_CNIC
 +              offset++;
 +#endif
 +              for_each_eth_queue(bp, i)
 +                      synchronize_irq(bp->msix_table[offset++].vector);
 +      } else
 +              synchronize_irq(bp->pdev->irq);
 +
 +      /* make sure sp_task is not running */
 +      cancel_delayed_work(&bp->sp_task);
 +      cancel_delayed_work(&bp->period_task);
 +      flush_workqueue(bnx2x_wq);
 +}
 +
 +/* fast path */
 +
 +/*
 + * General service functions
 + */
 +
 +/* Return true if succeeded to acquire the lock */
 +static bool bnx2x_trylock_hw_lock(struct bnx2x *bp, u32 resource)
 +{
 +      u32 lock_status;
 +      u32 resource_bit = (1 << resource);
 +      int func = BP_FUNC(bp);
 +      u32 hw_lock_control_reg;
 +
 +      DP(NETIF_MSG_HW, "Trying to take a lock on resource %d\n", resource);
 +
 +      /* Validating that the resource is within range */
 +      if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
 +              DP(NETIF_MSG_HW,
 +                 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
 +                 resource, HW_LOCK_MAX_RESOURCE_VALUE);
 +              return false;
 +      }
 +
 +      if (func <= 5)
 +              hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
 +      else
 +              hw_lock_control_reg =
 +                              (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
 +
 +      /* Try to acquire the lock */
 +      REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
 +      lock_status = REG_RD(bp, hw_lock_control_reg);
 +      if (lock_status & resource_bit)
 +              return true;
 +
 +      DP(NETIF_MSG_HW, "Failed to get a lock on resource %d\n", resource);
 +      return false;
 +}
 +
 +/**
 + * bnx2x_get_leader_lock_resource - get the recovery leader resource id
 + *
 + * @bp:       driver handle
 + *
 + * Returns the recovery leader resource id according to the engine this function
 + * belongs to. Currently only only 2 engines is supported.
 + */
 +static inline int bnx2x_get_leader_lock_resource(struct bnx2x *bp)
 +{
 +      if (BP_PATH(bp))
 +              return HW_LOCK_RESOURCE_RECOVERY_LEADER_1;
 +      else
 +              return HW_LOCK_RESOURCE_RECOVERY_LEADER_0;
 +}
 +
 +/**
 + * bnx2x_trylock_leader_lock- try to aquire a leader lock.
 + *
 + * @bp: driver handle
 + *
 + * Tries to aquire a leader lock for cuurent engine.
 + */
 +static inline bool bnx2x_trylock_leader_lock(struct bnx2x *bp)
 +{
 +      return bnx2x_trylock_hw_lock(bp, bnx2x_get_leader_lock_resource(bp));
 +}
 +
 +#ifdef BCM_CNIC
 +static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid, u8 err);
 +#endif
 +
 +void bnx2x_sp_event(struct bnx2x_fastpath *fp, union eth_rx_cqe *rr_cqe)
 +{
 +      struct bnx2x *bp = fp->bp;
 +      int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
 +      int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
 +      enum bnx2x_queue_cmd drv_cmd = BNX2X_Q_CMD_MAX;
 +      struct bnx2x_queue_sp_obj *q_obj = &fp->q_obj;
 +
 +      DP(BNX2X_MSG_SP,
 +         "fp %d  cid %d  got ramrod #%d  state is %x  type is %d\n",
 +         fp->index, cid, command, bp->state,
 +         rr_cqe->ramrod_cqe.ramrod_type);
 +
 +      switch (command) {
 +      case (RAMROD_CMD_ID_ETH_CLIENT_UPDATE):
 +              DP(BNX2X_MSG_SP, "got UPDATE ramrod. CID %d\n", cid);
 +              drv_cmd = BNX2X_Q_CMD_UPDATE;
 +              break;
 +
 +      case (RAMROD_CMD_ID_ETH_CLIENT_SETUP):
 +              DP(BNX2X_MSG_SP, "got MULTI[%d] setup ramrod\n", cid);
 +              drv_cmd = BNX2X_Q_CMD_SETUP;
 +              break;
 +
 +      case (RAMROD_CMD_ID_ETH_TX_QUEUE_SETUP):
 +              DP(NETIF_MSG_IFUP, "got MULTI[%d] tx-only setup ramrod\n", cid);
 +              drv_cmd = BNX2X_Q_CMD_SETUP_TX_ONLY;
 +              break;
 +
 +      case (RAMROD_CMD_ID_ETH_HALT):
 +              DP(BNX2X_MSG_SP, "got MULTI[%d] halt ramrod\n", cid);
 +              drv_cmd = BNX2X_Q_CMD_HALT;
 +              break;
 +
 +      case (RAMROD_CMD_ID_ETH_TERMINATE):
 +              DP(BNX2X_MSG_SP, "got MULTI[%d] teminate ramrod\n", cid);
 +              drv_cmd = BNX2X_Q_CMD_TERMINATE;
 +              break;
 +
 +      case (RAMROD_CMD_ID_ETH_EMPTY):
 +              DP(BNX2X_MSG_SP, "got MULTI[%d] empty ramrod\n", cid);
 +              drv_cmd = BNX2X_Q_CMD_EMPTY;
 +              break;
 +
 +      default:
 +              BNX2X_ERR("unexpected MC reply (%d) on fp[%d]\n",
 +                        command, fp->index);
 +              return;
 +      }
 +
 +      if ((drv_cmd != BNX2X_Q_CMD_MAX) &&
 +          q_obj->complete_cmd(bp, q_obj, drv_cmd))
 +              /* q_obj->complete_cmd() failure means that this was
 +               * an unexpected completion.
 +               *
 +               * In this case we don't want to increase the bp->spq_left
 +               * because apparently we haven't sent this command the first
 +               * place.
 +               */
 +#ifdef BNX2X_STOP_ON_ERROR
 +              bnx2x_panic();
 +#else
 +              return;
 +#endif
 +
 +      smp_mb__before_atomic_inc();
 +      atomic_inc(&bp->cq_spq_left);
 +      /* push the change in bp->spq_left and towards the memory */
 +      smp_mb__after_atomic_inc();
 +
 +      DP(BNX2X_MSG_SP, "bp->cq_spq_left %x\n", atomic_read(&bp->cq_spq_left));
 +
 +      return;
 +}
 +
 +void bnx2x_update_rx_prod(struct bnx2x *bp, struct bnx2x_fastpath *fp,
 +                      u16 bd_prod, u16 rx_comp_prod, u16 rx_sge_prod)
 +{
 +      u32 start = BAR_USTRORM_INTMEM + fp->ustorm_rx_prods_offset;
 +
 +      bnx2x_update_rx_prod_gen(bp, fp, bd_prod, rx_comp_prod, rx_sge_prod,
 +                               start);
 +}
 +
 +irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
 +{
 +      struct bnx2x *bp = netdev_priv(dev_instance);
 +      u16 status = bnx2x_ack_int(bp);
 +      u16 mask;
 +      int i;
 +      u8 cos;
 +
 +      /* Return here if interrupt is shared and it's not for us */
 +      if (unlikely(status == 0)) {
 +              DP(NETIF_MSG_INTR, "not our interrupt!\n");
 +              return IRQ_NONE;
 +      }
 +      DP(NETIF_MSG_INTR, "got an interrupt  status 0x%x\n", status);
 +
 +#ifdef BNX2X_STOP_ON_ERROR
 +      if (unlikely(bp->panic))
 +              return IRQ_HANDLED;
 +#endif
 +
 +      for_each_eth_queue(bp, i) {
 +              struct bnx2x_fastpath *fp = &bp->fp[i];
 +
 +              mask = 0x2 << (fp->index + CNIC_PRESENT);
 +              if (status & mask) {
 +                      /* Handle Rx or Tx according to SB id */
 +                      prefetch(fp->rx_cons_sb);
 +                      for_each_cos_in_tx_queue(fp, cos)
 +                              prefetch(fp->txdata[cos].tx_cons_sb);
 +                      prefetch(&fp->sb_running_index[SM_RX_ID]);
 +                      napi_schedule(&bnx2x_fp(bp, fp->index, napi));
 +                      status &= ~mask;
 +              }
 +      }
 +
 +#ifdef BCM_CNIC
 +      mask = 0x2;
 +      if (status & (mask | 0x1)) {
 +              struct cnic_ops *c_ops = NULL;
 +
 +              if (likely(bp->state == BNX2X_STATE_OPEN)) {
 +                      rcu_read_lock();
 +                      c_ops = rcu_dereference(bp->cnic_ops);
 +                      if (c_ops)
 +                              c_ops->cnic_handler(bp->cnic_data, NULL);
 +                      rcu_read_unlock();
 +              }
 +
 +              status &= ~mask;
 +      }
 +#endif
 +
 +      if (unlikely(status & 0x1)) {
 +              queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
 +
 +              status &= ~0x1;
 +              if (!status)
 +                      return IRQ_HANDLED;
 +      }
 +
 +      if (unlikely(status))
 +              DP(NETIF_MSG_INTR, "got an unknown interrupt! (status 0x%x)\n",
 +                 status);
 +
 +      return IRQ_HANDLED;
 +}
 +
 +/* Link */
 +
 +/*
 + * General service functions
 + */
 +
 +int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
 +{
 +      u32 lock_status;
 +      u32 resource_bit = (1 << resource);
 +      int func = BP_FUNC(bp);
 +      u32 hw_lock_control_reg;
 +      int cnt;
 +
 +      /* Validating that the resource is within range */
 +      if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
 +              DP(NETIF_MSG_HW,
 +                 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
 +                 resource, HW_LOCK_MAX_RESOURCE_VALUE);
 +              return -EINVAL;
 +      }
 +
 +      if (func <= 5) {
 +              hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
 +      } else {
 +              hw_lock_control_reg =
 +                              (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
 +      }
 +
 +      /* Validating that the resource is not already taken */
 +      lock_status = REG_RD(bp, hw_lock_control_reg);
 +      if (lock_status & resource_bit) {
 +              DP(NETIF_MSG_HW, "lock_status 0x%x  resource_bit 0x%x\n",
 +                 lock_status, resource_bit);
 +              return -EEXIST;
 +      }
 +
 +      /* Try for 5 second every 5ms */
 +      for (cnt = 0; cnt < 1000; cnt++) {
 +              /* Try to acquire the lock */
 +              REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
 +              lock_status = REG_RD(bp, hw_lock_control_reg);
 +              if (lock_status & resource_bit)
 +                      return 0;
 +
 +              msleep(5);
 +      }
 +      DP(NETIF_MSG_HW, "Timeout\n");
 +      return -EAGAIN;
 +}
 +
 +int bnx2x_release_leader_lock(struct bnx2x *bp)
 +{
 +      return bnx2x_release_hw_lock(bp, bnx2x_get_leader_lock_resource(bp));
 +}
 +
 +int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
 +{
 +      u32 lock_status;
 +      u32 resource_bit = (1 << resource);
 +      int func = BP_FUNC(bp);
 +      u32 hw_lock_control_reg;
 +
 +      DP(NETIF_MSG_HW, "Releasing a lock on resource %d\n", resource);
 +
 +      /* Validating that the resource is within range */
 +      if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
 +              DP(NETIF_MSG_HW,
 +                 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
 +                 resource, HW_LOCK_MAX_RESOURCE_VALUE);
 +              return -EINVAL;
 +      }
 +
 +      if (func <= 5) {
 +              hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
 +      } else {
 +              hw_lock_control_reg =
 +                              (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
 +      }
 +
 +      /* Validating that the resource is currently taken */
 +      lock_status = REG_RD(bp, hw_lock_control_reg);
 +      if (!(lock_status & resource_bit)) {
 +              DP(NETIF_MSG_HW, "lock_status 0x%x  resource_bit 0x%x\n",
 +                 lock_status, resource_bit);
 +              return -EFAULT;
 +      }
 +
 +      REG_WR(bp, hw_lock_control_reg, resource_bit);
 +      return 0;
 +}
 +
 +
 +int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port)
 +{
 +      /* The GPIO should be swapped if swap register is set and active */
 +      int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
 +                       REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
 +      int gpio_shift = gpio_num +
 +                      (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
 +      u32 gpio_mask = (1 << gpio_shift);
 +      u32 gpio_reg;
 +      int value;
 +
 +      if (gpio_num > MISC_REGISTERS_GPIO_3) {
 +              BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
 +              return -EINVAL;
 +      }
 +
 +      /* read GPIO value */
 +      gpio_reg = REG_RD(bp, MISC_REG_GPIO);
 +
 +      /* get the requested pin value */
 +      if ((gpio_reg & gpio_mask) == gpio_mask)
 +              value = 1;
 +      else
 +              value = 0;
 +
 +      DP(NETIF_MSG_LINK, "pin %d  value 0x%x\n", gpio_num, value);
 +
 +      return value;
 +}
 +
 +int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
 +{
 +      /* The GPIO should be swapped if swap register is set and active */
 +      int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
 +                       REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
 +      int gpio_shift = gpio_num +
 +                      (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
 +      u32 gpio_mask = (1 << gpio_shift);
 +      u32 gpio_reg;
 +
 +      if (gpio_num > MISC_REGISTERS_GPIO_3) {
 +              BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
 +              return -EINVAL;
 +      }
 +
 +      bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
 +      /* read GPIO and mask except the float bits */
 +      gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
 +
 +      switch (mode) {
 +      case MISC_REGISTERS_GPIO_OUTPUT_LOW:
 +              DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
 +                 gpio_num, gpio_shift);
 +              /* clear FLOAT and set CLR */
 +              gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
 +              gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
 +              break;
 +
 +      case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
 +              DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
 +                 gpio_num, gpio_shift);
 +              /* clear FLOAT and set SET */
 +              gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
 +              gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
 +              break;
 +
 +      case MISC_REGISTERS_GPIO_INPUT_HI_Z:
 +              DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
 +                 gpio_num, gpio_shift);
 +              /* set FLOAT */
 +              gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
 +              break;
 +
 +      default:
 +              break;
 +      }
 +
 +      REG_WR(bp, MISC_REG_GPIO, gpio_reg);
 +      bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
 +
 +      return 0;
 +}
 +
 +int bnx2x_set_mult_gpio(struct bnx2x *bp, u8 pins, u32 mode)
 +{
 +      u32 gpio_reg = 0;
 +      int rc = 0;
 +
 +      /* Any port swapping should be handled by caller. */
 +
 +      bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
 +      /* read GPIO and mask except the float bits */
 +      gpio_reg = REG_RD(bp, MISC_REG_GPIO);
 +      gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_FLOAT_POS);
 +      gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_CLR_POS);
 +      gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_SET_POS);
 +
 +      switch (mode) {
 +      case MISC_REGISTERS_GPIO_OUTPUT_LOW:
 +              DP(NETIF_MSG_LINK, "Set GPIO 0x%x -> output low\n", pins);
 +              /* set CLR */
 +              gpio_reg |= (pins << MISC_REGISTERS_GPIO_CLR_POS);
 +              break;
 +
 +      case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
 +              DP(NETIF_MSG_LINK, "Set GPIO 0x%x -> output high\n", pins);
 +              /* set SET */
 +              gpio_reg |= (pins << MISC_REGISTERS_GPIO_SET_POS);
 +              break;
 +
 +      case MISC_REGISTERS_GPIO_INPUT_HI_Z:
 +              DP(NETIF_MSG_LINK, "Set GPIO 0x%x -> input\n", pins);
 +              /* set FLOAT */
 +              gpio_reg |= (pins << MISC_REGISTERS_GPIO_FLOAT_POS);
 +              break;
 +
 +      default:
 +              BNX2X_ERR("Invalid GPIO mode assignment %d\n", mode);
 +              rc = -EINVAL;
 +              break;
 +      }
 +
 +      if (rc == 0)
 +              REG_WR(bp, MISC_REG_GPIO, gpio_reg);
 +
 +      bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
 +
 +      return rc;
 +}
 +
 +int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
 +{
 +      /* The GPIO should be swapped if swap register is set and active */
 +      int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
 +                       REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
 +      int gpio_shift = gpio_num +
 +                      (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
 +      u32 gpio_mask = (1 << gpio_shift);
 +      u32 gpio_reg;
 +
 +      if (gpio_num > MISC_REGISTERS_GPIO_3) {
 +              BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
 +              return -EINVAL;
 +      }
 +
 +      bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
 +      /* read GPIO int */
 +      gpio_reg = REG_RD(bp, MISC_REG_GPIO_INT);
 +
 +      switch (mode) {
 +      case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
 +              DP(NETIF_MSG_LINK, "Clear GPIO INT %d (shift %d) -> "
 +                                 "output low\n", gpio_num, gpio_shift);
 +              /* clear SET and set CLR */
 +              gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
 +              gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
 +              break;
 +
 +      case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
 +              DP(NETIF_MSG_LINK, "Set GPIO INT %d (shift %d) -> "
 +                                 "output high\n", gpio_num, gpio_shift);
 +              /* clear CLR and set SET */
 +              gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
 +              gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
 +              break;
 +
 +      default:
 +              break;
 +      }
 +
 +      REG_WR(bp, MISC_REG_GPIO_INT, gpio_reg);
 +      bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
 +
 +      return 0;
 +}
 +
 +static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
 +{
 +      u32 spio_mask = (1 << spio_num);
 +      u32 spio_reg;
 +
 +      if ((spio_num < MISC_REGISTERS_SPIO_4) ||
 +          (spio_num > MISC_REGISTERS_SPIO_7)) {
 +              BNX2X_ERR("Invalid SPIO %d\n", spio_num);
 +              return -EINVAL;
 +      }
 +
 +      bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
 +      /* read SPIO and mask except the float bits */
 +      spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
 +
 +      switch (mode) {
 +      case MISC_REGISTERS_SPIO_OUTPUT_LOW:
 +              DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
 +              /* clear FLOAT and set CLR */
 +              spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
 +              spio_reg |=  (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
 +              break;
 +
 +      case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
 +              DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
 +              /* clear FLOAT and set SET */
 +              spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
 +              spio_reg |=  (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
 +              break;
 +
 +      case MISC_REGISTERS_SPIO_INPUT_HI_Z:
 +              DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
 +              /* set FLOAT */
 +              spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
 +              break;
 +
 +      default:
 +              break;
 +      }
 +
 +      REG_WR(bp, MISC_REG_SPIO, spio_reg);
 +      bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
 +
 +      return 0;
 +}
 +
 +void bnx2x_calc_fc_adv(struct bnx2x *bp)
 +{
 +      u8 cfg_idx = bnx2x_get_link_cfg_idx(bp);
 +      switch (bp->link_vars.ieee_fc &
 +              MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
 +      case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
 +              bp->port.advertising[cfg_idx] &= ~(ADVERTISED_Asym_Pause |
 +                                                 ADVERTISED_Pause);
 +              break;
 +
 +      case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
 +              bp->port.advertising[cfg_idx] |= (ADVERTISED_Asym_Pause |
 +                                                ADVERTISED_Pause);
 +              break;
 +
 +      case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
 +              bp->port.advertising[cfg_idx] |= ADVERTISED_Asym_Pause;
 +              break;
 +
 +      default:
 +              bp->port.advertising[cfg_idx] &= ~(ADVERTISED_Asym_Pause |
 +                                                 ADVERTISED_Pause);
 +              break;
 +      }
 +}
 +
 +u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
 +{
 +      if (!BP_NOMCP(bp)) {
 +              u8 rc;
 +              int cfx_idx = bnx2x_get_link_cfg_idx(bp);
 +              u16 req_line_speed = bp->link_params.req_line_speed[cfx_idx];
 +              /*
 +               * Initialize link parameters structure variables
 +               * It is recommended to turn off RX FC for jumbo frames
 +               * for better performance
 +               */
 +              if (CHIP_IS_E1x(bp) && (bp->dev->mtu > 5000))
 +                      bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
 +              else
 +                      bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
 +
 +              bnx2x_acquire_phy_lock(bp);
 +
 +              if (load_mode == LOAD_DIAG) {
 +                      struct link_params *lp = &bp->link_params;
 +                      lp->loopback_mode = LOOPBACK_XGXS;
 +                      /* do PHY loopback at 10G speed, if possible */
 +                      if (lp->req_line_speed[cfx_idx] < SPEED_10000) {
 +                              if (lp->speed_cap_mask[cfx_idx] &
 +                                  PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)
 +                                      lp->req_line_speed[cfx_idx] =
 +                                      SPEED_10000;
 +                              else
 +                                      lp->req_line_speed[cfx_idx] =
 +                                      SPEED_1000;
 +                      }
 +              }
 +
 +              rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
 +
 +              bnx2x_release_phy_lock(bp);
 +
 +              bnx2x_calc_fc_adv(bp);
 +
 +              if (CHIP_REV_IS_SLOW(bp) && bp->link_vars.link_up) {
 +                      bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
 +                      bnx2x_link_report(bp);
 +              } else
 +                      queue_delayed_work(bnx2x_wq, &bp->period_task, 0);
 +              bp->link_params.req_line_speed[cfx_idx] = req_line_speed;
 +              return rc;
 +      }
 +      BNX2X_ERR("Bootcode is missing - can not initialize link\n");
 +      return -EINVAL;
 +}
 +
 +void bnx2x_link_set(struct bnx2x *bp)
 +{
 +      if (!BP_NOMCP(bp)) {
 +              bnx2x_acquire_phy_lock(bp);
 +              bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
 +              bnx2x_phy_init(&bp->link_params, &bp->link_vars);
 +              bnx2x_release_phy_lock(bp);
 +
 +              bnx2x_calc_fc_adv(bp);
 +      } else
 +              BNX2X_ERR("Bootcode is missing - can not set link\n");
 +}
 +
 +static void bnx2x__link_reset(struct bnx2x *bp)
 +{
 +      if (!BP_NOMCP(bp)) {
 +              bnx2x_acquire_phy_lock(bp);
 +              bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
 +              bnx2x_release_phy_lock(bp);
 +      } else
 +              BNX2X_ERR("Bootcode is missing - can not reset link\n");
 +}
 +
 +u8 bnx2x_link_test(struct bnx2x *bp, u8 is_serdes)
 +{
 +      u8 rc = 0;
 +
 +      if (!BP_NOMCP(bp)) {
 +              bnx2x_acquire_phy_lock(bp);
 +              rc = bnx2x_test_link(&bp->link_params, &bp->link_vars,
 +                                   is_serdes);
 +              bnx2x_release_phy_lock(bp);
 +      } else
 +              BNX2X_ERR("Bootcode is missing - can not test link\n");
 +
 +      return rc;
 +}
 +
 +static void bnx2x_init_port_minmax(struct bnx2x *bp)
 +{
 +      u32 r_param = bp->link_vars.line_speed / 8;
 +      u32 fair_periodic_timeout_usec;
 +      u32 t_fair;
 +
 +      memset(&(bp->cmng.rs_vars), 0,
 +             sizeof(struct rate_shaping_vars_per_port));
 +      memset(&(bp->cmng.fair_vars), 0, sizeof(struct fairness_vars_per_port));
 +
 +      /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
 +      bp->cmng.rs_vars.rs_periodic_timeout = RS_PERIODIC_TIMEOUT_USEC / 4;
 +
 +      /* this is the threshold below which no timer arming will occur
 +         1.25 coefficient is for the threshold to be a little bigger
 +         than the real time, to compensate for timer in-accuracy */
 +      bp->cmng.rs_vars.rs_threshold =
 +                              (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
 +
 +      /* resolution of fairness timer */
 +      fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
 +      /* for 10G it is 1000usec. for 1G it is 10000usec. */
 +      t_fair = T_FAIR_COEF / bp->link_vars.line_speed;
 +
 +      /* this is the threshold below which we won't arm the timer anymore */
 +      bp->cmng.fair_vars.fair_threshold = QM_ARB_BYTES;
 +
 +      /* we multiply by 1e3/8 to get bytes/msec.
 +         We don't want the credits to pass a credit
 +         of the t_fair*FAIR_MEM (algorithm resolution) */
 +      bp->cmng.fair_vars.upper_bound = r_param * t_fair * FAIR_MEM;
 +      /* since each tick is 4 usec */
 +      bp->cmng.fair_vars.fairness_timeout = fair_periodic_timeout_usec / 4;
 +}
 +
 +/* Calculates the sum of vn_min_rates.
 +   It's needed for further normalizing of the min_rates.
 +   Returns:
 +     sum of vn_min_rates.
 +       or
 +     0 - if all the min_rates are 0.
 +     In the later case fainess algorithm should be deactivated.
 +     If not all min_rates are zero then those that are zeroes will be set to 1.
 + */
 +static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
 +{
 +      int all_zero = 1;
 +      int vn;
 +
 +      bp->vn_weight_sum = 0;
-       int func = 2*vn + BP_PORT(bp);
++      for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++) {
 +              u32 vn_cfg = bp->mf_config[vn];
 +              u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
 +                                 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
 +
 +              /* Skip hidden vns */
 +              if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
 +                      continue;
 +
 +              /* If min rate is zero - set it to 1 */
 +              if (!vn_min_rate)
 +                      vn_min_rate = DEF_MIN_RATE;
 +              else
 +                      all_zero = 0;
 +
 +              bp->vn_weight_sum += vn_min_rate;
 +      }
 +
 +      /* if ETS or all min rates are zeros - disable fairness */
 +      if (BNX2X_IS_ETS_ENABLED(bp)) {
 +              bp->cmng.flags.cmng_enables &=
 +                                      ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
 +              DP(NETIF_MSG_IFUP, "Fairness will be disabled due to ETS\n");
 +      } else if (all_zero) {
 +              bp->cmng.flags.cmng_enables &=
 +                                      ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
 +              DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
 +                 "  fairness will be disabled\n");
 +      } else
 +              bp->cmng.flags.cmng_enables |=
 +                                      CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
 +}
 +
++/* returns func by VN for current port */
++static inline int func_by_vn(struct bnx2x *bp, int vn)
++{
++      return 2 * vn + BP_PORT(bp);
++}
++
 +static void bnx2x_init_vn_minmax(struct bnx2x *bp, int vn)
 +{
 +      struct rate_shaping_vars_per_vn m_rs_vn;
 +      struct fairness_vars_per_vn m_fair_vn;
 +      u32 vn_cfg = bp->mf_config[vn];
-       for (vn = VN_0; vn < E1HVN_MAX; vn++) {
++      int func = func_by_vn(bp, vn);
 +      u16 vn_min_rate, vn_max_rate;
 +      int i;
 +
 +      /* If function is hidden - set min and max to zeroes */
 +      if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
 +              vn_min_rate = 0;
 +              vn_max_rate = 0;
 +
 +      } else {
 +              u32 maxCfg = bnx2x_extract_max_cfg(bp, vn_cfg);
 +
 +              vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
 +                              FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
 +              /* If fairness is enabled (not all min rates are zeroes) and
 +                 if current min rate is zero - set it to 1.
 +                 This is a requirement of the algorithm. */
 +              if (bp->vn_weight_sum && (vn_min_rate == 0))
 +                      vn_min_rate = DEF_MIN_RATE;
 +
 +              if (IS_MF_SI(bp))
 +                      /* maxCfg in percents of linkspeed */
 +                      vn_max_rate = (bp->link_vars.line_speed * maxCfg) / 100;
 +              else
 +                      /* maxCfg is absolute in 100Mb units */
 +                      vn_max_rate = maxCfg * 100;
 +      }
 +
 +      DP(NETIF_MSG_IFUP,
 +         "func %d: vn_min_rate %d  vn_max_rate %d  vn_weight_sum %d\n",
 +         func, vn_min_rate, vn_max_rate, bp->vn_weight_sum);
 +
 +      memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
 +      memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
 +
 +      /* global vn counter - maximal Mbps for this vn */
 +      m_rs_vn.vn_counter.rate = vn_max_rate;
 +
 +      /* quota - number of bytes transmitted in this period */
 +      m_rs_vn.vn_counter.quota =
 +                              (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
 +
 +      if (bp->vn_weight_sum) {
 +              /* credit for each period of the fairness algorithm:
 +                 number of bytes in T_FAIR (the vn share the port rate).
 +                 vn_weight_sum should not be larger than 10000, thus
 +                 T_FAIR_COEF / (8 * vn_weight_sum) will always be greater
 +                 than zero */
 +              m_fair_vn.vn_credit_delta =
 +                      max_t(u32, (vn_min_rate * (T_FAIR_COEF /
 +                                                 (8 * bp->vn_weight_sum))),
 +                            (bp->cmng.fair_vars.fair_threshold +
 +                                                      MIN_ABOVE_THRESH));
 +              DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta %d\n",
 +                 m_fair_vn.vn_credit_delta);
 +      }
 +
 +      /* Store it to internal memory */
 +      for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
 +              REG_WR(bp, BAR_XSTRORM_INTMEM +
 +                     XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
 +                     ((u32 *)(&m_rs_vn))[i]);
 +
 +      for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
 +              REG_WR(bp, BAR_XSTRORM_INTMEM +
 +                     XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
 +                     ((u32 *)(&m_fair_vn))[i]);
 +}
 +
 +static int bnx2x_get_cmng_fns_mode(struct bnx2x *bp)
 +{
 +      if (CHIP_REV_IS_SLOW(bp))
 +              return CMNG_FNS_NONE;
 +      if (IS_MF(bp))
 +              return CMNG_FNS_MINMAX;
 +
 +      return CMNG_FNS_NONE;
 +}
 +
 +void bnx2x_read_mf_cfg(struct bnx2x *bp)
 +{
 +      int vn, n = (CHIP_MODE_IS_4_PORT(bp) ? 2 : 1);
 +
 +      if (BP_NOMCP(bp))
 +              return; /* what should be the default bvalue in this case */
 +
 +      /* For 2 port configuration the absolute function number formula
 +       * is:
 +       *      abs_func = 2 * vn + BP_PORT + BP_PATH
 +       *
 +       *      and there are 4 functions per port
 +       *
 +       * For 4 port configuration it is
 +       *      abs_func = 4 * vn + 2 * BP_PORT + BP_PATH
 +       *
 +       *      and there are 2 functions per port
 +       */
-                       for (vn = VN_0; vn < E1HVN_MAX; vn++)
++      for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++) {
 +              int /*abs*/func = n * (2 * vn + BP_PORT(bp)) + BP_PATH(bp);
 +
 +              if (func >= E1H_FUNC_MAX)
 +                      break;
 +
 +              bp->mf_config[vn] =
 +                      MF_CFG_RD(bp, func_mf_config[func].config);
 +      }
 +}
 +
 +static void bnx2x_cmng_fns_init(struct bnx2x *bp, u8 read_cfg, u8 cmng_type)
 +{
 +
 +      if (cmng_type == CMNG_FNS_MINMAX) {
 +              int vn;
 +
 +              /* clear cmng_enables */
 +              bp->cmng.flags.cmng_enables = 0;
 +
 +              /* read mf conf from shmem */
 +              if (read_cfg)
 +                      bnx2x_read_mf_cfg(bp);
 +
 +              /* Init rate shaping and fairness contexts */
 +              bnx2x_init_port_minmax(bp);
 +
 +              /* vn_weight_sum and enable fairness if not 0 */
 +              bnx2x_calc_vn_weight_sum(bp);
 +
 +              /* calculate and set min-max rate for each vn */
 +              if (bp->port.pmf)
-       int port = BP_PORT(bp);
++                      for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++)
 +                              bnx2x_init_vn_minmax(bp, vn);
 +
 +              /* always enable rate shaping and fairness */
 +              bp->cmng.flags.cmng_enables |=
 +                                      CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
 +              if (!bp->vn_weight_sum)
 +                      DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
 +                                 "  fairness will be disabled\n");
 +              return;
 +      }
 +
 +      /* rate shaping and fairness are disabled */
 +      DP(NETIF_MSG_IFUP,
 +         "rate shaping and fairness are disabled\n");
 +}
 +
 +static inline void bnx2x_link_sync_notify(struct bnx2x *bp)
 +{
-       for (vn = VN_0; vn < E1HVN_MAX; vn++) {
-               if (vn == BP_E1HVN(bp))
 +      int func;
 +      int vn;
 +
 +      /* Set the attention towards other drivers on the same port */
-               func = ((vn << 1) | port);
++      for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++) {
++              if (vn == BP_VN(bp))
 +                      continue;
 +
-       val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
++              func = func_by_vn(bp, vn);
 +              REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
 +                     (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
 +      }
 +}
 +
 +/* This function is called upon link interrupt */
 +static void bnx2x_link_attn(struct bnx2x *bp)
 +{
 +      /* Make sure that we are synced with the current statistics */
 +      bnx2x_stats_handle(bp, STATS_EVENT_STOP);
 +
 +      bnx2x_link_update(&bp->link_params, &bp->link_vars);
 +
 +      if (bp->link_vars.link_up) {
 +
 +              /* dropless flow control */
 +              if (!CHIP_IS_E1(bp) && bp->dropless_fc) {
 +                      int port = BP_PORT(bp);
 +                      u32 pause_enabled = 0;
 +
 +                      if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
 +                              pause_enabled = 1;
 +
 +                      REG_WR(bp, BAR_USTRORM_INTMEM +
 +                             USTORM_ETH_PAUSE_ENABLED_OFFSET(port),
 +                             pause_enabled);
 +              }
 +
 +              if (bp->link_vars.mac_type != MAC_TYPE_EMAC) {
 +                      struct host_port_stats *pstats;
 +
 +                      pstats = bnx2x_sp(bp, port_stats);
 +                      /* reset old mac stats */
 +                      memset(&(pstats->mac_stx[0]), 0,
 +                             sizeof(struct mac_stx));
 +              }
 +              if (bp->state == BNX2X_STATE_OPEN)
 +                      bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
 +      }
 +
 +      if (bp->link_vars.link_up && bp->link_vars.line_speed) {
 +              int cmng_fns = bnx2x_get_cmng_fns_mode(bp);
 +
 +              if (cmng_fns != CMNG_FNS_NONE) {
 +                      bnx2x_cmng_fns_init(bp, false, cmng_fns);
 +                      storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
 +              } else
 +                      /* rate shaping and fairness are disabled */
 +                      DP(NETIF_MSG_IFUP,
 +                         "single function mode without fairness\n");
 +      }
 +
 +      __bnx2x_link_report(bp);
 +
 +      if (IS_MF(bp))
 +              bnx2x_link_sync_notify(bp);
 +}
 +
 +void bnx2x__link_status_update(struct bnx2x *bp)
 +{
 +      if (bp->state != BNX2X_STATE_OPEN)
 +              return;
 +
 +      bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
 +
 +      if (bp->link_vars.link_up)
 +              bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
 +      else
 +              bnx2x_stats_handle(bp, STATS_EVENT_STOP);
 +
 +      /* indicate link status */
 +      bnx2x_link_report(bp);
 +}
 +
 +static void bnx2x_pmf_update(struct bnx2x *bp)
 +{
 +      int port = BP_PORT(bp);
 +      u32 val;
 +
 +      bp->port.pmf = 1;
 +      DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
 +
 +      /*
 +       * We need the mb() to ensure the ordering between the writing to
 +       * bp->port.pmf here and reading it from the bnx2x_periodic_task().
 +       */
 +      smp_mb();
 +
 +      /* queue a periodic task */
 +      queue_delayed_work(bnx2x_wq, &bp->period_task, 0);
 +
 +      bnx2x_dcbx_pmf_update(bp);
 +
 +      /* enable nig attention */
-               pause->sge_th_hi = 250;
-               pause->sge_th_lo = 150;
++      val = (0xff0f | (1 << (BP_VN(bp) + 4)));
 +      if (bp->common.int_block == INT_BLOCK_HC) {
 +              REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
 +              REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
 +      } else if (!CHIP_IS_E1x(bp)) {
 +              REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, val);
 +              REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, val);
 +      }
 +
 +      bnx2x_stats_handle(bp, STATS_EVENT_PMF);
 +}
 +
 +/* end of Link */
 +
 +/* slow path */
 +
 +/*
 + * General service functions
 + */
 +
 +/* send the MCP a request, block until there is a reply */
 +u32 bnx2x_fw_command(struct bnx2x *bp, u32 command, u32 param)
 +{
 +      int mb_idx = BP_FW_MB_IDX(bp);
 +      u32 seq;
 +      u32 rc = 0;
 +      u32 cnt = 1;
 +      u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
 +
 +      mutex_lock(&bp->fw_mb_mutex);
 +      seq = ++bp->fw_seq;
 +      SHMEM_WR(bp, func_mb[mb_idx].drv_mb_param, param);
 +      SHMEM_WR(bp, func_mb[mb_idx].drv_mb_header, (command | seq));
 +
 +      DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB param 0x%08x\n",
 +                      (command | seq), param);
 +
 +      do {
 +              /* let the FW do it's magic ... */
 +              msleep(delay);
 +
 +              rc = SHMEM_RD(bp, func_mb[mb_idx].fw_mb_header);
 +
 +              /* Give the FW up to 5 second (500*10ms) */
 +      } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 500));
 +
 +      DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
 +         cnt*delay, rc, seq);
 +
 +      /* is this a reply to our command? */
 +      if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK))
 +              rc &= FW_MSG_CODE_MASK;
 +      else {
 +              /* FW BUG! */
 +              BNX2X_ERR("FW failed to respond!\n");
 +              bnx2x_fw_dump(bp);
 +              rc = 0;
 +      }
 +      mutex_unlock(&bp->fw_mb_mutex);
 +
 +      return rc;
 +}
 +
 +static u8 stat_counter_valid(struct bnx2x *bp, struct bnx2x_fastpath *fp)
 +{
 +#ifdef BCM_CNIC
 +      /* Statistics are not supported for CNIC Clients at the moment */
 +      if (IS_FCOE_FP(fp))
 +              return false;
 +#endif
 +      return true;
 +}
 +
 +void bnx2x_func_init(struct bnx2x *bp, struct bnx2x_func_init_params *p)
 +{
 +      if (CHIP_IS_E1x(bp)) {
 +              struct tstorm_eth_function_common_config tcfg = {0};
 +
 +              storm_memset_func_cfg(bp, &tcfg, p->func_id);
 +      }
 +
 +      /* Enable the function in the FW */
 +      storm_memset_vf_to_pf(bp, p->func_id, p->pf_id);
 +      storm_memset_func_en(bp, p->func_id, 1);
 +
 +      /* spq */
 +      if (p->func_flgs & FUNC_FLG_SPQ) {
 +              storm_memset_spq_addr(bp, p->spq_map, p->func_id);
 +              REG_WR(bp, XSEM_REG_FAST_MEMORY +
 +                     XSTORM_SPQ_PROD_OFFSET(p->func_id), p->spq_prod);
 +      }
 +}
 +
 +/**
 + * bnx2x_get_tx_only_flags - Return common flags
 + *
 + * @bp                device handle
 + * @fp                queue handle
 + * @zero_stats        TRUE if statistics zeroing is needed
 + *
 + * Return the flags that are common for the Tx-only and not normal connections.
 + */
 +static inline unsigned long bnx2x_get_common_flags(struct bnx2x *bp,
 +                                                 struct bnx2x_fastpath *fp,
 +                                                 bool zero_stats)
 +{
 +      unsigned long flags = 0;
 +
 +      /* PF driver will always initialize the Queue to an ACTIVE state */
 +      __set_bit(BNX2X_Q_FLG_ACTIVE, &flags);
 +
 +      /* tx only connections collect statistics (on the same index as the
 +       *  parent connection). The statistics are zeroed when the parent
 +       *  connection is initialized.
 +       */
 +      if (stat_counter_valid(bp, fp)) {
 +              __set_bit(BNX2X_Q_FLG_STATS, &flags);
 +              if (zero_stats)
 +                      __set_bit(BNX2X_Q_FLG_ZERO_STATS, &flags);
 +      }
 +
 +      return flags;
 +}
 +
 +static inline unsigned long bnx2x_get_q_flags(struct bnx2x *bp,
 +                                            struct bnx2x_fastpath *fp,
 +                                            bool leading)
 +{
 +      unsigned long flags = 0;
 +
 +      /* calculate other queue flags */
 +      if (IS_MF_SD(bp))
 +              __set_bit(BNX2X_Q_FLG_OV, &flags);
 +
 +      if (IS_FCOE_FP(fp))
 +              __set_bit(BNX2X_Q_FLG_FCOE, &flags);
 +
 +      if (!fp->disable_tpa) {
 +              __set_bit(BNX2X_Q_FLG_TPA, &flags);
 +              __set_bit(BNX2X_Q_FLG_TPA_IPV6, &flags);
 +      }
 +
 +      if (leading) {
 +              __set_bit(BNX2X_Q_FLG_LEADING_RSS, &flags);
 +              __set_bit(BNX2X_Q_FLG_MCAST, &flags);
 +      }
 +
 +      /* Always set HW VLAN stripping */
 +      __set_bit(BNX2X_Q_FLG_VLAN, &flags);
 +
 +
 +      return flags | bnx2x_get_common_flags(bp, fp, true);
 +}
 +
 +static void bnx2x_pf_q_prep_general(struct bnx2x *bp,
 +      struct bnx2x_fastpath *fp, struct bnx2x_general_setup_params *gen_init,
 +      u8 cos)
 +{
 +      gen_init->stat_id = bnx2x_stats_id(fp);
 +      gen_init->spcl_id = fp->cl_id;
 +
 +      /* Always use mini-jumbo MTU for FCoE L2 ring */
 +      if (IS_FCOE_FP(fp))
 +              gen_init->mtu = BNX2X_FCOE_MINI_JUMBO_MTU;
 +      else
 +              gen_init->mtu = bp->dev->mtu;
 +
 +      gen_init->cos = cos;
 +}
 +
 +static void bnx2x_pf_rx_q_prep(struct bnx2x *bp,
 +      struct bnx2x_fastpath *fp, struct rxq_pause_params *pause,
 +      struct bnx2x_rxq_setup_params *rxq_init)
 +{
 +      u8 max_sge = 0;
 +      u16 sge_sz = 0;
 +      u16 tpa_agg_size = 0;
 +
 +      if (!fp->disable_tpa) {
-               pause->bd_th_hi = 350;
-               pause->bd_th_lo = 250;
-               pause->rcq_th_hi = 350;
-               pause->rcq_th_lo = 250;
++              pause->sge_th_lo = SGE_TH_LO(bp);
++              pause->sge_th_hi = SGE_TH_HI(bp);
++
++              /* validate SGE ring has enough to cross high threshold */
++              WARN_ON(bp->dropless_fc &&
++                              pause->sge_th_hi + FW_PREFETCH_CNT >
++                              MAX_RX_SGE_CNT * NUM_RX_SGE_PAGES);
++
 +              tpa_agg_size = min_t(u32,
 +                      (min_t(u32, 8, MAX_SKB_FRAGS) *
 +                      SGE_PAGE_SIZE * PAGES_PER_SGE), 0xffff);
 +              max_sge = SGE_PAGE_ALIGN(bp->dev->mtu) >>
 +                      SGE_PAGE_SHIFT;
 +              max_sge = ((max_sge + PAGES_PER_SGE - 1) &
 +                        (~(PAGES_PER_SGE-1))) >> PAGES_PER_SGE_SHIFT;
 +              sge_sz = (u16)min_t(u32, SGE_PAGE_SIZE * PAGES_PER_SGE,
 +                                  0xffff);
 +      }
 +
 +      /* pause - not for e1 */
 +      if (!CHIP_IS_E1(bp)) {
-       rxq_init->max_tpa_queues =
-               (CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
-               ETH_MAX_AGGREGATION_QUEUES_E1H_E2);
++              pause->bd_th_lo = BD_TH_LO(bp);
++              pause->bd_th_hi = BD_TH_HI(bp);
++
++              pause->rcq_th_lo = RCQ_TH_LO(bp);
++              pause->rcq_th_hi = RCQ_TH_HI(bp);
++              /*
++               * validate that rings have enough entries to cross
++               * high thresholds
++               */
++              WARN_ON(bp->dropless_fc &&
++                              pause->bd_th_hi + FW_PREFETCH_CNT >
++                              bp->rx_ring_size);
++              WARN_ON(bp->dropless_fc &&
++                              pause->rcq_th_hi + FW_PREFETCH_CNT >
++                              NUM_RCQ_RINGS * MAX_RCQ_DESC_CNT);
 +
 +              pause->pri_map = 1;
 +      }
 +
 +      /* rxq setup */
 +      rxq_init->dscr_map = fp->rx_desc_mapping;
 +      rxq_init->sge_map = fp->rx_sge_mapping;
 +      rxq_init->rcq_map = fp->rx_comp_mapping;
 +      rxq_init->rcq_np_map = fp->rx_comp_mapping + BCM_PAGE_SIZE;
 +
 +      /* This should be a maximum number of data bytes that may be
 +       * placed on the BD (not including paddings).
 +       */
 +      rxq_init->buf_sz = fp->rx_buf_size - BNX2X_FW_RX_ALIGN -
 +              IP_HEADER_ALIGNMENT_PADDING;
 +
 +      rxq_init->cl_qzone_id = fp->cl_qzone_id;
 +      rxq_init->tpa_agg_sz = tpa_agg_size;
 +      rxq_init->sge_buf_sz = sge_sz;
 +      rxq_init->max_sges_pkt = max_sge;
 +      rxq_init->rss_engine_id = BP_FUNC(bp);
 +
 +      /* Maximum number or simultaneous TPA aggregation for this Queue.
 +       *
 +       * For PF Clients it should be the maximum avaliable number.
 +       * VF driver(s) may want to define it to a smaller value.
 +       */
-       bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
++      rxq_init->max_tpa_queues = MAX_AGG_QS(bp);
 +
 +      rxq_init->cache_line_log = BNX2X_RX_ALIGN_SHIFT;
 +      rxq_init->fw_sb_id = fp->fw_sb_id;
 +
 +      if (IS_FCOE_FP(fp))
 +              rxq_init->sb_cq_index = HC_SP_INDEX_ETH_FCOE_RX_CQ_CONS;
 +      else
 +              rxq_init->sb_cq_index = HC_INDEX_ETH_RX_CQ_CONS;
 +}
 +
 +static void bnx2x_pf_tx_q_prep(struct bnx2x *bp,
 +      struct bnx2x_fastpath *fp, struct bnx2x_txq_setup_params *txq_init,
 +      u8 cos)
 +{
 +      txq_init->dscr_map = fp->txdata[cos].tx_desc_mapping;
 +      txq_init->sb_cq_index = HC_INDEX_ETH_FIRST_TX_CQ_CONS + cos;
 +      txq_init->traffic_type = LLFC_TRAFFIC_TYPE_NW;
 +      txq_init->fw_sb_id = fp->fw_sb_id;
 +
 +      /*
 +       * set the tss leading client id for TX classfication ==
 +       * leading RSS client id
 +       */
 +      txq_init->tss_leading_cl_id = bnx2x_fp(bp, 0, cl_id);
 +
 +      if (IS_FCOE_FP(fp)) {
 +              txq_init->sb_cq_index = HC_SP_INDEX_ETH_FCOE_TX_CQ_CONS;
 +              txq_init->traffic_type = LLFC_TRAFFIC_TYPE_FCOE;
 +      }
 +}
 +
 +static void bnx2x_pf_init(struct bnx2x *bp)
 +{
 +      struct bnx2x_func_init_params func_init = {0};
 +      struct event_ring_data eq_data = { {0} };
 +      u16 flags;
 +
 +      if (!CHIP_IS_E1x(bp)) {
 +              /* reset IGU PF statistics: MSIX + ATTN */
 +              /* PF */
 +              REG_WR(bp, IGU_REG_STATISTIC_NUM_MESSAGE_SENT +
 +                         BNX2X_IGU_STAS_MSG_VF_CNT*4 +
 +                         (CHIP_MODE_IS_4_PORT(bp) ?
 +                              BP_FUNC(bp) : BP_VN(bp))*4, 0);
 +              /* ATTN */
 +              REG_WR(bp, IGU_REG_STATISTIC_NUM_MESSAGE_SENT +
 +                         BNX2X_IGU_STAS_MSG_VF_CNT*4 +
 +                         BNX2X_IGU_STAS_MSG_PF_CNT*4 +
 +                         (CHIP_MODE_IS_4_PORT(bp) ?
 +                              BP_FUNC(bp) : BP_VN(bp))*4, 0);
 +      }
 +
 +      /* function setup flags */
 +      flags = (FUNC_FLG_STATS | FUNC_FLG_LEADING | FUNC_FLG_SPQ);
 +
 +      /* This flag is relevant for E1x only.
 +       * E2 doesn't have a TPA configuration in a function level.
 +       */
 +      flags |= (bp->flags & TPA_ENABLE_FLAG) ? FUNC_FLG_TPA : 0;
 +
 +      func_init.func_flgs = flags;
 +      func_init.pf_id = BP_FUNC(bp);
 +      func_init.func_id = BP_FUNC(bp);
 +      func_init.spq_map = bp->spq_mapping;
 +      func_init.spq_prod = bp->spq_prod_idx;
 +
 +      bnx2x_func_init(bp, &func_init);
 +
 +      memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port));
 +
 +      /*
 +       * Congestion management values depend on the link rate
 +       * There is no active link so initial link rate is set to 10 Gbps.
 +       * When the link comes up The congestion management values are
 +       * re-calculated according to the actual link rate.
 +       */
 +      bp->link_vars.line_speed = SPEED_10000;
 +      bnx2x_cmng_fns_init(bp, true, bnx2x_get_cmng_fns_mode(bp));
 +
 +      /* Only the PMF sets the HW */
 +      if (bp->port.pmf)
 +              storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
 +
 +      /* init Event Queue */
 +      eq_data.base_addr.hi = U64_HI(bp->eq_mapping);
 +      eq_data.base_addr.lo = U64_LO(bp->eq_mapping);
 +      eq_data.producer = bp->eq_prod;
 +      eq_data.index_id = HC_SP_INDEX_EQ_CONS;
 +      eq_data.sb_id = DEF_SB_ID;
 +      storm_memset_eq_data(bp, &eq_data, BP_FUNC(bp));
 +}
 +
 +
 +static void bnx2x_e1h_disable(struct bnx2x *bp)
 +{
 +      int port = BP_PORT(bp);
 +
 +      bnx2x_tx_disable(bp);
 +
 +      REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
 +}
 +
 +static void bnx2x_e1h_enable(struct bnx2x *bp)
 +{
 +      int port = BP_PORT(bp);
 +
 +      REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
 +
 +      /* Tx queue should be only reenabled */
 +      netif_tx_wake_all_queues(bp->dev);
 +
 +      /*
 +       * Should not call netif_carrier_on since it will be called if the link
 +       * is up when checking for link state
 +       */
 +}
 +
 +/* called due to MCP event (on pmf):
 + *    reread new bandwidth configuration
 + *    configure FW
 + *    notify others function about the change
 + */
 +static inline void bnx2x_config_mf_bw(struct bnx2x *bp)
 +{
 +      if (bp->link_vars.link_up) {
 +              bnx2x_cmng_fns_init(bp, true, CMNG_FNS_MINMAX);
 +              bnx2x_link_sync_notify(bp);
 +      }
 +      storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
 +}
 +
 +static inline void bnx2x_set_mf_bw(struct bnx2x *bp)
 +{
 +      bnx2x_config_mf_bw(bp);
 +      bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW_ACK, 0);
 +}
 +
 +static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event)
 +{
 +      DP(BNX2X_MSG_MCP, "dcc_event 0x%x\n", dcc_event);
 +
 +      if (dcc_event & DRV_STATUS_DCC_DISABLE_ENABLE_PF) {
 +
 +              /*
 +               * This is the only place besides the function initialization
 +               * where the bp->flags can change so it is done without any
 +               * locks
 +               */
 +              if (bp->mf_config[BP_VN(bp)] & FUNC_MF_CFG_FUNC_DISABLED) {
 +                      DP(NETIF_MSG_IFDOWN, "mf_cfg function disabled\n");
 +                      bp->flags |= MF_FUNC_DIS;
 +
 +                      bnx2x_e1h_disable(bp);
 +              } else {
 +                      DP(NETIF_MSG_IFUP, "mf_cfg function enabled\n");
 +                      bp->flags &= ~MF_FUNC_DIS;
 +
 +                      bnx2x_e1h_enable(bp);
 +              }
 +              dcc_event &= ~DRV_STATUS_DCC_DISABLE_ENABLE_PF;
 +      }
 +      if (dcc_event & DRV_STATUS_DCC_BANDWIDTH_ALLOCATION) {
 +              bnx2x_config_mf_bw(bp);
 +              dcc_event &= ~DRV_STATUS_DCC_BANDWIDTH_ALLOCATION;
 +      }
 +
 +      /* Report results to MCP */
 +      if (dcc_event)
 +              bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_FAILURE, 0);
 +      else
 +              bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_OK, 0);
 +}
 +
 +/* must be called under the spq lock */
 +static inline struct eth_spe *bnx2x_sp_get_next(struct bnx2x *bp)
 +{
 +      struct eth_spe *next_spe = bp->spq_prod_bd;
 +
 +      if (bp->spq_prod_bd == bp->spq_last_bd) {
 +              bp->spq_prod_bd = bp->spq;
 +              bp->spq_prod_idx = 0;
 +              DP(NETIF_MSG_TIMER, "end of spq\n");
 +      } else {
 +              bp->spq_prod_bd++;
 +              bp->spq_prod_idx++;
 +      }
 +      return next_spe;
 +}
 +
 +/* must be called under the spq lock */
 +static inline void bnx2x_sp_prod_update(struct bnx2x *bp)
 +{
 +      int func = BP_FUNC(bp);
 +
 +      /*
 +       * Make sure that BD data is updated before writing the producer:
 +       * BD data is written to the memory, the producer is read from the
 +       * memory, thus we need a full memory barrier to ensure the ordering.
 +       */
 +      mb();
 +
 +      REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
 +               bp->spq_prod_idx);
 +      mmiowb();
 +}
 +
 +/**
 + * bnx2x_is_contextless_ramrod - check if the current command ends on EQ
 + *
 + * @cmd:      command to check
 + * @cmd_type: command type
 + */
 +static inline bool bnx2x_is_contextless_ramrod(int cmd, int cmd_type)
 +{
 +      if ((cmd_type == NONE_CONNECTION_TYPE) ||
 +          (cmd == RAMROD_CMD_ID_ETH_FORWARD_SETUP) ||
 +          (cmd == RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES) ||
 +          (cmd == RAMROD_CMD_ID_ETH_FILTER_RULES) ||
 +          (cmd == RAMROD_CMD_ID_ETH_MULTICAST_RULES) ||
 +          (cmd == RAMROD_CMD_ID_ETH_SET_MAC) ||
 +          (cmd == RAMROD_CMD_ID_ETH_RSS_UPDATE))
 +              return true;
 +      else
 +              return false;
 +
 +}
 +
 +
 +/**
 + * bnx2x_sp_post - place a single command on an SP ring
 + *
 + * @bp:               driver handle
 + * @command:  command to place (e.g. SETUP, FILTER_RULES, etc.)
 + * @cid:      SW CID the command is related to
 + * @data_hi:  command private data address (high 32 bits)
 + * @data_lo:  command private data address (low 32 bits)
 + * @cmd_type: command type (e.g. NONE, ETH)
 + *
 + * SP data is handled as if it's always an address pair, thus data fields are
 + * not swapped to little endian in upper functions. Instead this function swaps
 + * data as if it's two u32 fields.
 + */
 +int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
 +                u32 data_hi, u32 data_lo, int cmd_type)
 +{
 +      struct eth_spe *spe;
 +      u16 type;
 +      bool common = bnx2x_is_contextless_ramrod(command, cmd_type);
 +
 +#ifdef BNX2X_STOP_ON_ERROR
 +      if (unlikely(bp->panic))
 +              return -EIO;
 +#endif
 +
 +      spin_lock_bh(&bp->spq_lock);
 +
 +      if (common) {
 +              if (!atomic_read(&bp->eq_spq_left)) {
 +                      BNX2X_ERR("BUG! EQ ring full!\n");
 +                      spin_unlock_bh(&bp->spq_lock);
 +                      bnx2x_panic();
 +                      return -EBUSY;
 +              }
 +      } else if (!atomic_read(&bp->cq_spq_left)) {
 +                      BNX2X_ERR("BUG! SPQ ring full!\n");
 +                      spin_unlock_bh(&bp->spq_lock);
 +                      bnx2x_panic();
 +                      return -EBUSY;
 +      }
 +
 +      spe = bnx2x_sp_get_next(bp);
 +
 +      /* CID needs port number to be encoded int it */
 +      spe->hdr.conn_and_cmd_data =
 +                      cpu_to_le32((command << SPE_HDR_CMD_ID_SHIFT) |
 +                                  HW_CID(bp, cid));
 +
 +      type = (cmd_type << SPE_HDR_CONN_TYPE_SHIFT) & SPE_HDR_CONN_TYPE;
 +
 +      type |= ((BP_FUNC(bp) << SPE_HDR_FUNCTION_ID_SHIFT) &
 +               SPE_HDR_FUNCTION_ID);
 +
 +      spe->hdr.type = cpu_to_le16(type);
 +
 +      spe->data.update_data_addr.hi = cpu_to_le32(data_hi);
 +      spe->data.update_data_addr.lo = cpu_to_le32(data_lo);
 +
 +      /*
 +       * It's ok if the actual decrement is issued towards the memory
 +       * somewhere between the spin_lock and spin_unlock. Thus no
 +       * more explict memory barrier is needed.
 +       */
 +      if (common)
 +              atomic_dec(&bp->eq_spq_left);
 +      else
 +              atomic_dec(&bp->cq_spq_left);
 +
 +
 +      DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
 +         "SPQE[%x] (%x:%x)  (cmd, common?) (%d,%d)  hw_cid %x  data (%x:%x) "
 +         "type(0x%x) left (CQ, EQ) (%x,%x)\n",
 +         bp->spq_prod_idx, (u32)U64_HI(bp->spq_mapping),
 +         (u32)(U64_LO(bp->spq_mapping) +
 +         (void *)bp->spq_prod_bd - (void *)bp->spq), command, common,
 +         HW_CID(bp, cid), data_hi, data_lo, type,
 +         atomic_read(&bp->cq_spq_left), atomic_read(&bp->eq_spq_left));
 +
 +      bnx2x_sp_prod_update(bp);
 +      spin_unlock_bh(&bp->spq_lock);
 +      return 0;
 +}
 +
 +/* acquire split MCP access lock register */
 +static int bnx2x_acquire_alr(struct bnx2x *bp)
 +{
 +      u32 j, val;
 +      int rc = 0;
 +
 +      might_sleep();
 +      for (j = 0; j < 1000; j++) {
 +              val = (1UL << 31);
 +              REG_WR(bp, GRCBASE_MCP + 0x9c, val);
 +              val = REG_RD(bp, GRCBASE_MCP + 0x9c);
 +              if (val & (1L << 31))
 +                      break;
 +
 +              msleep(5);
 +      }
 +      if (!(val & (1L << 31))) {
 +              BNX2X_ERR("Cannot acquire MCP access lock register\n");
 +              rc = -EBUSY;
 +      }
 +
 +      return rc;
 +}
 +
 +/* release split MCP access lock register */
 +static void bnx2x_release_alr(struct bnx2x *bp)
 +{
 +      REG_WR(bp, GRCBASE_MCP + 0x9c, 0);
 +}
 +
 +#define BNX2X_DEF_SB_ATT_IDX  0x0001
 +#define BNX2X_DEF_SB_IDX      0x0002
 +
 +static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
 +{
 +      struct host_sp_status_block *def_sb = bp->def_status_blk;
 +      u16 rc = 0;
 +
 +      barrier(); /* status block is written to by the chip */
 +      if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
 +              bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
 +              rc |= BNX2X_DEF_SB_ATT_IDX;
 +      }
 +
 +      if (bp->def_idx != def_sb->sp_sb.running_index) {
 +              bp->def_idx = def_sb->sp_sb.running_index;
 +              rc |= BNX2X_DEF_SB_IDX;
 +      }
 +
 +      /* Do not reorder: indecies reading should complete before handling */
 +      barrier();
 +      return rc;
 +}
 +
 +/*
 + * slow path service functions
 + */
 +
 +static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
 +{
 +      int port = BP_PORT(bp);
 +      u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
 +                            MISC_REG_AEU_MASK_ATTN_FUNC_0;
 +      u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
 +                                     NIG_REG_MASK_INTERRUPT_PORT0;
 +      u32 aeu_mask;
 +      u32 nig_mask = 0;
 +      u32 reg_addr;
 +
 +      if (bp->attn_state & asserted)
 +              BNX2X_ERR("IGU ERROR\n");
 +
 +      bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
 +      aeu_mask = REG_RD(bp, aeu_addr);
 +
 +      DP(NETIF_MSG_HW, "aeu_mask %x  newly asserted %x\n",
 +         aeu_mask, asserted);
 +      aeu_mask &= ~(asserted & 0x3ff);
 +      DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
 +
 +      REG_WR(bp, aeu_addr, aeu_mask);
 +      bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
 +
 +      DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
 +      bp->attn_state |= asserted;
 +      DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
 +
 +      if (asserted & ATTN_HARD_WIRED_MASK) {
 +              if (asserted & ATTN_NIG_FOR_FUNC) {
 +
 +                      bnx2x_acquire_phy_lock(bp);
 +
 +                      /* save nig interrupt mask */
 +                      nig_mask = REG_RD(bp, nig_int_mask_addr);
 +
 +                      /* If nig_mask is not set, no need to call the update
 +                       * function.
 +                       */
 +                      if (nig_mask) {
 +                              REG_WR(bp, nig_int_mask_addr, 0);
 +
 +                              bnx2x_link_attn(bp);
 +                      }
 +
 +                      /* handle unicore attn? */
 +              }
 +              if (asserted & ATTN_SW_TIMER_4_FUNC)
 +                      DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
 +
 +              if (asserted & GPIO_2_FUNC)
 +                      DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
 +
 +              if (asserted & GPIO_3_FUNC)
 +                      DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
 +
 +              if (asserted & GPIO_4_FUNC)
 +                      DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
 +
 +              if (port == 0) {
 +                      if (asserted & ATTN_GENERAL_ATTN_1) {
 +                              DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
 +                              REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
 +                      }
 +                      if (asserted & ATTN_GENERAL_ATTN_2) {
 +                              DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
 +                              REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
 +                      }
 +                      if (asserted & ATTN_GENERAL_ATTN_3) {
 +                              DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
 +                              REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
 +                      }
 +              } else {
 +                      if (asserted & ATTN_GENERAL_ATTN_4) {
 +                              DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
 +                              REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
 +                      }
 +                      if (asserted & ATTN_GENERAL_ATTN_5) {
 +                              DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
 +                              REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
 +                      }
 +                      if (asserted & ATTN_GENERAL_ATTN_6) {
 +                              DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
 +                              REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
 +                      }
 +              }
 +
 +      } /* if hardwired */
 +
 +      if (bp->common.int_block == INT_BLOCK_HC)
 +              reg_addr = (HC_REG_COMMAND_REG + port*32 +
 +                          COMMAND_REG_ATTN_BITS_SET);
 +      else
 +              reg_addr = (BAR_IGU_INTMEM + IGU_CMD_ATTN_BIT_SET_UPPER*8);
 +
 +      DP(NETIF_MSG_HW, "about to mask 0x%08x at %s addr 0x%x\n", asserted,
 +         (bp->common.int_block == INT_BLOCK_HC) ? "HC" : "IGU", reg_addr);
 +      REG_WR(bp, reg_addr, asserted);
 +
 +      /* now set back the mask */
 +      if (asserted & ATTN_NIG_FOR_FUNC) {
 +              REG_WR(bp, nig_int_mask_addr, nig_mask);
 +              bnx2x_release_phy_lock(bp);
 +      }
 +}
 +
 +static inline void bnx2x_fan_failure(struct bnx2x *bp)
 +{
 +      int port = BP_PORT(bp);
 +      u32 ext_phy_config;
 +      /* mark the failure */
 +      ext_phy_config =
 +              SHMEM_RD(bp,
 +                       dev_info.port_hw_config[port].external_phy_config);
 +
 +      ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
 +      ext_phy_config |= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
 +      SHMEM_WR(bp, dev_info.port_hw_config[port].external_phy_config,
 +               ext_phy_config);
 +
 +      /* log the failure */
 +      netdev_err(bp->dev, "Fan Failure on Network Controller has caused"
 +             " the driver to shutdown the card to prevent permanent"
 +             " damage.  Please contact OEM Support for assistance\n");
 +}
 +
 +static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
 +{
 +      int port = BP_PORT(bp);
 +      int reg_offset;
 +      u32 val;
 +
 +      reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
 +                           MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
 +
 +      if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
 +
 +              val = REG_RD(bp, reg_offset);
 +              val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
 +              REG_WR(bp, reg_offset, val);
 +
 +              BNX2X_ERR("SPIO5 hw attention\n");
 +
 +              /* Fan failure attention */
 +              bnx2x_hw_reset_phy(&bp->link_params);
 +              bnx2x_fan_failure(bp);
 +      }
 +
 +      if ((attn & bp->link_vars.aeu_int_mask) && bp->port.pmf) {
 +              bnx2x_acquire_phy_lock(bp);
 +              bnx2x_handle_module_detect_int(&bp->link_params);
 +              bnx2x_release_phy_lock(bp);
 +      }
 +
 +      if (attn & HW_INTERRUT_ASSERT_SET_0) {
 +
 +              val = REG_RD(bp, reg_offset);
 +              val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
 +              REG_WR(bp, reg_offset, val);
 +
 +              BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
 +                        (u32)(attn & HW_INTERRUT_ASSERT_SET_0));
 +              bnx2x_panic();
 +      }
 +}
 +
 +static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
 +{
 +      u32 val;
 +
 +      if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) {
 +
 +              val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
 +              BNX2X_ERR("DB hw attention 0x%x\n", val);
 +              /* DORQ discard attention */
 +              if (val & 0x2)
 +                      BNX2X_ERR("FATAL error from DORQ\n");
 +      }
 +
 +      if (attn & HW_INTERRUT_ASSERT_SET_1) {
 +
 +              int port = BP_PORT(bp);
 +              int reg_offset;
 +
 +              reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
 +                                   MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
 +
 +              val = REG_RD(bp, reg_offset);
 +              val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
 +              REG_WR(bp, reg_offset, val);
 +
 +              BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
 +                        (u32)(attn & HW_INTERRUT_ASSERT_SET_1));
 +              bnx2x_panic();
 +      }
 +}
 +
 +static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
 +{
 +      u32 val;
 +
 +      if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
 +
 +              val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
 +              BNX2X_ERR("CFC hw attention 0x%x\n", val);
 +              /* CFC error attention */
 +              if (val & 0x2)
 +                      BNX2X_ERR("FATAL error from CFC\n");
 +      }
 +
 +      if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
 +              val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
 +              BNX2X_ERR("PXP hw attention-0 0x%x\n", val);
 +              /* RQ_USDMDP_FIFO_OVERFLOW */
 +              if (val & 0x18000)
 +                      BNX2X_ERR("FATAL error from PXP\n");
 +
 +              if (!CHIP_IS_E1x(bp)) {
 +                      val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_1);
 +                      BNX2X_ERR("PXP hw attention-1 0x%x\n", val);
 +              }
 +      }
 +
 +      if (attn & HW_INTERRUT_ASSERT_SET_2) {
 +
 +              int port = BP_PORT(bp);
 +              int reg_offset;
 +
 +              reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
 +                                   MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
 +
 +              val = REG_RD(bp, reg_offset);
 +              val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
 +              REG_WR(bp, reg_offset, val);
 +
 +              BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
 +                        (u32)(attn & HW_INTERRUT_ASSERT_SET_2));
 +              bnx2x_panic();
 +      }
 +}
 +
 +static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
 +{
 +      u32 val;
 +
 +      if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
 +
 +              if (attn & BNX2X_PMF_LINK_ASSERT) {
 +                      int func = BP_FUNC(bp);
 +
 +                      REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
 +                      bp->mf_config[BP_VN(bp)] = MF_CFG_RD(bp,
 +                                      func_mf_config[BP_ABS_FUNC(bp)].config);
 +                      val = SHMEM_RD(bp,
 +                                     func_mb[BP_FW_MB_IDX(bp)].drv_status);
 +                      if (val & DRV_STATUS_DCC_EVENT_MASK)
 +                              bnx2x_dcc_event(bp,
 +                                          (val & DRV_STATUS_DCC_EVENT_MASK));
 +
 +                      if (val & DRV_STATUS_SET_MF_BW)
 +                              bnx2x_set_mf_bw(bp);
 +
 +                      if ((bp->port.pmf == 0) && (val & DRV_STATUS_PMF))
 +                              bnx2x_pmf_update(bp);
 +
 +                      if (bp->port.pmf &&
 +                          (val & DRV_STATUS_DCBX_NEGOTIATION_RESULTS) &&
 +                              bp->dcbx_enabled > 0)
 +                              /* start dcbx state machine */
 +                              bnx2x_dcbx_set_params(bp,
 +                                      BNX2X_DCBX_STATE_NEG_RECEIVED);
 +                      if (bp->link_vars.periodic_flags &
 +                          PERIODIC_FLAGS_LINK_EVENT) {
 +                              /*  sync with link */
 +                              bnx2x_acquire_phy_lock(bp);
 +                              bp->link_vars.periodic_flags &=
 +                                      ~PERIODIC_FLAGS_LINK_EVENT;
 +                              bnx2x_release_phy_lock(bp);
 +                              if (IS_MF(bp))
 +                                      bnx2x_link_sync_notify(bp);
 +                              bnx2x_link_report(bp);
 +                      }
 +                      /* Always call it here: bnx2x_link_report() will
 +                       * prevent the link indication duplication.
 +                       */
 +                      bnx2x__link_status_update(bp);
 +              } else if (attn & BNX2X_MC_ASSERT_BITS) {
 +
 +                      BNX2X_ERR("MC assert!\n");
 +                      bnx2x_mc_assert(bp);
 +                      REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
 +                      REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
 +                      REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
 +                      REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
 +                      bnx2x_panic();
 +
 +              } else if (attn & BNX2X_MCP_ASSERT) {
 +
 +                      BNX2X_ERR("MCP assert!\n");
 +                      REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
 +                      bnx2x_fw_dump(bp);
 +
 +              } else
 +                      BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
 +      }
 +
 +      if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
 +              BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
 +              if (attn & BNX2X_GRC_TIMEOUT) {
 +                      val = CHIP_IS_E1(bp) ? 0 :
 +                                      REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN);
 +                      BNX2X_ERR("GRC time-out 0x%08x\n", val);
 +              }
 +              if (attn & BNX2X_GRC_RSV) {
 +                      val = CHIP_IS_E1(bp) ? 0 :
 +                                      REG_RD(bp, MISC_REG_GRC_RSV_ATTN);
 +                      BNX2X_ERR("GRC reserved 0x%08x\n", val);
 +              }
 +              REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
 +      }
 +}
 +
 +/*
 + * Bits map:
 + * 0-7   - Engine0 load counter.
 + * 8-15  - Engine1 load counter.
 + * 16    - Engine0 RESET_IN_PROGRESS bit.
 + * 17    - Engine1 RESET_IN_PROGRESS bit.
 + * 18    - Engine0 ONE_IS_LOADED. Set when there is at least one active function
 + *         on the engine
 + * 19    - Engine1 ONE_IS_LOADED.
 + * 20    - Chip reset flow bit. When set none-leader must wait for both engines
 + *         leader to complete (check for both RESET_IN_PROGRESS bits and not for
 + *         just the one belonging to its engine).
 + *
 + */
 +#define BNX2X_RECOVERY_GLOB_REG               MISC_REG_GENERIC_POR_1
 +
 +#define BNX2X_PATH0_LOAD_CNT_MASK     0x000000ff
 +#define BNX2X_PATH0_LOAD_CNT_SHIFT    0
 +#define BNX2X_PATH1_LOAD_CNT_MASK     0x0000ff00
 +#define BNX2X_PATH1_LOAD_CNT_SHIFT    8
 +#define BNX2X_PATH0_RST_IN_PROG_BIT   0x00010000
 +#define BNX2X_PATH1_RST_IN_PROG_BIT   0x00020000
 +#define BNX2X_GLOBAL_RESET_BIT                0x00040000
 +
 +/*
 + * Set the GLOBAL_RESET bit.
 + *
 + * Should be run under rtnl lock
 + */
 +void bnx2x_set_reset_global(struct bnx2x *bp)
 +{
 +      u32 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
 +
 +      REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val | BNX2X_GLOBAL_RESET_BIT);
 +      barrier();
 +      mmiowb();
 +}
 +
 +/*
 + * Clear the GLOBAL_RESET bit.
 + *
 + * Should be run under rtnl lock
 + */
 +static inline void bnx2x_clear_reset_global(struct bnx2x *bp)
 +{
 +      u32 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
 +
 +      REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val & (~BNX2X_GLOBAL_RESET_BIT));
 +      barrier();
 +      mmiowb();
 +}
 +
 +/*
 + * Checks the GLOBAL_RESET bit.
 + *
 + * should be run under rtnl lock
 + */
 +static inline bool bnx2x_reset_is_global(struct bnx2x *bp)
 +{
 +      u32 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
 +
 +      DP(NETIF_MSG_HW, "GEN_REG_VAL=0x%08x\n", val);
 +      return (val & BNX2X_GLOBAL_RESET_BIT) ? true : false;
 +}
 +
 +/*
 + * Clear RESET_IN_PROGRESS bit for the current engine.
 + *
 + * Should be run under rtnl lock
 + */
 +static inline void bnx2x_set_reset_done(struct bnx2x *bp)
 +{
 +      u32 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
 +      u32 bit = BP_PATH(bp) ?
 +              BNX2X_PATH1_RST_IN_PROG_BIT : BNX2X_PATH0_RST_IN_PROG_BIT;
 +
 +      /* Clear the bit */
 +      val &= ~bit;
 +      REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val);
 +      barrier();
 +      mmiowb();
 +}
 +
 +/*
 + * Set RESET_IN_PROGRESS for the current engine.
 + *
 + * should be run under rtnl lock
 + */
 +void bnx2x_set_reset_in_progress(struct bnx2x *bp)
 +{
 +      u32 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
 +      u32 bit = BP_PATH(bp) ?
 +              BNX2X_PATH1_RST_IN_PROG_BIT : BNX2X_PATH0_RST_IN_PROG_BIT;
 +
 +      /* Set the bit */
 +      val |= bit;
 +      REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val);
 +      barrier();
 +      mmiowb();
 +}
 +
 +/*
 + * Checks the RESET_IN_PROGRESS bit for the given engine.
 + * should be run under rtnl lock
 + */
 +bool bnx2x_reset_is_done(struct bnx2x *bp, int engine)
 +{
 +      u32 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
 +      u32 bit = engine ?
 +              BNX2X_PATH1_RST_IN_PROG_BIT : BNX2X_PATH0_RST_IN_PROG_BIT;
 +
 +      /* return false if bit is set */
 +      return (val & bit) ? false : true;
 +}
 +
 +/*
 + * Increment the load counter for the current engine.
 + *
 + * should be run under rtnl lock
 + */
 +void bnx2x_inc_load_cnt(struct bnx2x *bp)
 +{
 +      u32 val1, val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
 +      u32 mask = BP_PATH(bp) ? BNX2X_PATH1_LOAD_CNT_MASK :
 +                           BNX2X_PATH0_LOAD_CNT_MASK;
 +      u32 shift = BP_PATH(bp) ? BNX2X_PATH1_LOAD_CNT_SHIFT :
 +                           BNX2X_PATH0_LOAD_CNT_SHIFT;
 +
 +      DP(NETIF_MSG_HW, "Old GEN_REG_VAL=0x%08x\n", val);
 +
 +      /* get the current counter value */
 +      val1 = (val & mask) >> shift;
 +
 +      /* increment... */
 +      val1++;
 +
 +      /* clear the old value */
 +      val &= ~mask;
 +
 +      /* set the new one */
 +      val |= ((val1 << shift) & mask);
 +
 +      REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val);
 +      barrier();
 +      mmiowb();
 +}
 +
 +/**
 + * bnx2x_dec_load_cnt - decrement the load counter
 + *
 + * @bp:               driver handle
 + *
 + * Should be run under rtnl lock.
 + * Decrements the load counter for the current engine. Returns
 + * the new counter value.
 + */
 +u32 bnx2x_dec_load_cnt(struct bnx2x *bp)
 +{
 +      u32 val1, val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
 +      u32 mask = BP_PATH(bp) ? BNX2X_PATH1_LOAD_CNT_MASK :
 +                           BNX2X_PATH0_LOAD_CNT_MASK;
 +      u32 shift = BP_PATH(bp) ? BNX2X_PATH1_LOAD_CNT_SHIFT :
 +                           BNX2X_PATH0_LOAD_CNT_SHIFT;
 +
 +      DP(NETIF_MSG_HW, "Old GEN_REG_VAL=0x%08x\n", val);
 +
 +      /* get the current counter value */
 +      val1 = (val & mask) >> shift;
 +
 +      /* decrement... */
 +      val1--;
 +
 +      /* clear the old value */
 +      val &= ~mask;
 +
 +      /* set the new one */
 +      val |= ((val1 << shift) & mask);
 +
 +      REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val);
 +      barrier();
 +      mmiowb();
 +
 +      return val1;
 +}
 +
 +/*
 + * Read the load counter for the current engine.
 + *
 + * should be run under rtnl lock
 + */
 +static inline u32 bnx2x_get_load_cnt(struct bnx2x *bp, int engine)
 +{
 +      u32 mask = (engine ? BNX2X_PATH1_LOAD_CNT_MASK :
 +                           BNX2X_PATH0_LOAD_CNT_MASK);
 +      u32 shift = (engine ? BNX2X_PATH1_LOAD_CNT_SHIFT :
 +                           BNX2X_PATH0_LOAD_CNT_SHIFT);
 +      u32 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
 +
 +      DP(NETIF_MSG_HW, "GLOB_REG=0x%08x\n", val);
 +
 +      val = (val & mask) >> shift;
 +
 +      DP(NETIF_MSG_HW, "load_cnt for engine %d = %d\n", engine, val);
 +
 +      return val;
 +}
 +
 +/*
 + * Reset the load counter for the current engine.
 + *
 + * should be run under rtnl lock
 + */
 +static inline void bnx2x_clear_load_cnt(struct bnx2x *bp)
 +{
 +      u32 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
 +      u32 mask = (BP_PATH(bp) ? BNX2X_PATH1_LOAD_CNT_MASK :
 +                           BNX2X_PATH0_LOAD_CNT_MASK);
 +
 +      REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val & (~mask));
 +}
 +
 +static inline void _print_next_block(int idx, const char *blk)
 +{
 +      pr_cont("%s%s", idx ? ", " : "", blk);
 +}
 +
 +static inline int bnx2x_check_blocks_with_parity0(u32 sig, int par_num,
 +                                                bool print)
 +{
 +      int i = 0;
 +      u32 cur_bit = 0;
 +      for (i = 0; sig; i++) {
 +              cur_bit = ((u32)0x1 << i);
 +              if (sig & cur_bit) {
 +                      switch (cur_bit) {
 +                      case AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR:
 +                              if (print)
 +                                      _print_next_block(par_num++, "BRB");
 +                              break;
 +                      case AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR:
 +                              if (print)
 +                                      _print_next_block(par_num++, "PARSER");
 +                              break;
 +                      case AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR:
 +                              if (print)
 +                                      _print_next_block(par_num++, "TSDM");
 +                              break;
 +                      case AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR:
 +                              if (print)
 +                                      _print_next_block(par_num++,
 +                                                        "SEARCHER");
 +                              break;
 +                      case AEU_INPUTS_ATTN_BITS_TCM_PARITY_ERROR:
 +                              if (print)
 +                                      _print_next_block(par_num++, "TCM");
 +                              break;
 +                      case AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR:
 +                              if (print)
 +                                      _print_next_block(par_num++, "TSEMI");
 +                              break;
 +                      case AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR:
 +                              if (print)
 +                                      _print_next_block(par_num++, "XPB");
 +                              break;
 +                      }
 +
 +                      /* Clear the bit */
 +                      sig &= ~cur_bit;
 +              }
 +      }
 +
 +      return par_num;
 +}
 +
 +static inline int bnx2x_check_blocks_with_parity1(u32 sig, int par_num,
 +                                                bool *global, bool print)
 +{
 +      int i = 0;
 +      u32 cur_bit = 0;
 +      for (i = 0; sig; i++) {
 +              cur_bit = ((u32)0x1 << i);
 +              if (sig & cur_bit) {
 +                      switch (cur_bit) {
 +                      case AEU_INPUTS_ATTN_BITS_PBF_PARITY_ERROR:
 +                              if (print)
 +                                      _print_next_block(par_num++, "PBF");
 +                              break;
 +                      case AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR:
 +                              if (print)
 +                                      _print_next_block(par_num++, "QM");
 +                              break;
 +                      case AEU_INPUTS_ATTN_BITS_TIMERS_PARITY_ERROR:
 +                              if (print)
 +                                      _print_next_block(par_num++, "TM");
 +                              break;
 +                      case AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR:
 +                              if (print)
 +                                      _print_next_block(par_num++, "XSDM");
 +                              break;
 +                      case AEU_INPUTS_ATTN_BITS_XCM_PARITY_ERROR:
 +                              if (print)
 +                                      _print_next_block(par_num++, "XCM");
 +                              break;
 +                      case AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR:
 +                              if (print)
 +                                      _print_next_block(par_num++, "XSEMI");
 +                              break;
 +                      case AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR:
 +                              if (print)
 +                                      _print_next_block(par_num++,
 +                                                        "DOORBELLQ");
 +                              break;
 +                      case AEU_INPUTS_ATTN_BITS_NIG_PARITY_ERROR:
 +                              if (print)
 +                                      _print_next_block(par_num++, "NIG");
 +                              break;
 +                      case AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR:
 +                              if (print)
 +                                      _print_next_block(par_num++,
 +                                                        "VAUX PCI CORE");
 +                              *global = true;
 +                              break;
 +                      case AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR:
 +                              if (print)
 +                                      _print_next_block(par_num++, "DEBUG");
 +                              break;
 +                      case AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR:
 +                              if (print)
 +                                      _print_next_block(par_num++, "USDM");
 +                              break;
 +                      case AEU_INPUTS_ATTN_BITS_UCM_PARITY_ERROR:
 +                              if (print)
 +                                      _print_next_block(par_num++, "UCM");
 +                              break;
 +                      case AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR:
 +                              if (print)
 +                                      _print_next_block(par_num++, "USEMI");
 +                              break;
 +                      case AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR:
 +                              if (print)
 +                                      _print_next_block(par_num++, "UPB");
 +                              break;
 +                      case AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR:
 +                              if (print)
 +                                      _print_next_block(par_num++, "CSDM");
 +                              break;
 +                      case AEU_INPUTS_ATTN_BITS_CCM_PARITY_ERROR:
 +                              if (print)
 +                                      _print_next_block(par_num++, "CCM");
 +                              break;
 +                      }
 +
 +                      /* Clear the bit */
 +                      sig &= ~cur_bit;
 +              }
 +      }
 +
 +      return par_num;
 +}
 +
 +static inline int bnx2x_check_blocks_with_parity2(u32 sig, int par_num,
 +                                                bool print)
 +{
 +      int i = 0;
 +      u32 cur_bit = 0;
 +      for (i = 0; sig; i++) {
 +              cur_bit = ((u32)0x1 << i);
 +              if (sig & cur_bit) {
 +                      switch (cur_bit) {
 +                      case AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR:
 +                              if (print)
 +                                      _print_next_block(par_num++, "CSEMI");
 +                              break;
 +                      case AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR:
 +                              if (print)
 +                                      _print_next_block(par_num++, "PXP");
 +                              break;
 +                      case AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR:
 +                              if (print)
 +                                      _print_next_block(par_num++,
 +                                      "PXPPCICLOCKCLIENT");
 +                              break;
 +                      case AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR:
 +                              if (print)
 +                                      _print_next_block(par_num++, "CFC");
 +                              break;
 +                      case AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR:
 +                              if (print)
 +                                      _print_next_block(par_num++, "CDU");
 +                              break;
 +                      case AEU_INPUTS_ATTN_BITS_DMAE_PARITY_ERROR:
 +                              if (print)
 +                                      _print_next_block(par_num++, "DMAE");
 +                              break;
 +                      case AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR:
 +                              if (print)
 +                                      _print_next_block(par_num++, "IGU");
 +                              break;
 +                      case AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR:
 +                              if (print)
 +                                      _print_next_block(par_num++, "MISC");
 +                              break;
 +                      }
 +
 +                      /* Clear the bit */
 +                      sig &= ~cur_bit;
 +              }
 +      }
 +
 +      return par_num;
 +}
 +
 +static inline int bnx2x_check_blocks_with_parity3(u32 sig, int par_num,
 +                                                bool *global, bool print)
 +{
 +      int i = 0;
 +      u32 cur_bit = 0;
 +      for (i = 0; sig; i++) {
 +              cur_bit = ((u32)0x1 << i);
 +              if (sig & cur_bit) {
 +                      switch (cur_bit) {
 +                      case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY:
 +                              if (print)
 +                                      _print_next_block(par_num++, "MCP ROM");
 +                              *global = true;
 +                              break;
 +                      case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY:
 +                              if (print)
 +                                      _print_next_block(par_num++,
 +                                                        "MCP UMP RX");
 +                              *global = true;
 +                              break;
 +                      case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY:
 +                              if (print)
 +                                      _print_next_block(par_num++,
 +                                                        "MCP UMP TX");
 +                              *global = true;
 +                              break;
 +                      case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY:
 +                              if (print)
 +                                      _print_next_block(par_num++,
 +                                                        "MCP SCPAD");
 +                              *global = true;
 +                              break;
 +                      }
 +
 +                      /* Clear the bit */
 +                      sig &= ~cur_bit;
 +              }
 +      }
 +
 +      return par_num;
 +}
 +
 +static inline int bnx2x_check_blocks_with_parity4(u32 sig, int par_num,
 +                                                bool print)
 +{
 +      int i = 0;
 +      u32 cur_bit = 0;
 +      for (i = 0; sig; i++) {
 +              cur_bit = ((u32)0x1 << i);
 +              if (sig & cur_bit) {
 +                      switch (cur_bit) {
 +                      case AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR:
 +                              if (print)
 +                                      _print_next_block(par_num++, "PGLUE_B");
 +                              break;
 +                      case AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR:
 +                              if (print)
 +                                      _print_next_block(par_num++, "ATC");
 +                              break;
 +                      }
 +
 +                      /* Clear the bit */
 +                      sig &= ~cur_bit;
 +              }
 +      }
 +
 +      return par_num;
 +}
 +
 +static inline bool bnx2x_parity_attn(struct bnx2x *bp, bool *global, bool print,
 +                                   u32 *sig)
 +{
 +      if ((sig[0] & HW_PRTY_ASSERT_SET_0) ||
 +          (sig[1] & HW_PRTY_ASSERT_SET_1) ||
 +          (sig[2] & HW_PRTY_ASSERT_SET_2) ||
 +          (sig[3] & HW_PRTY_ASSERT_SET_3) ||
 +          (sig[4] & HW_PRTY_ASSERT_SET_4)) {
 +              int par_num = 0;
 +              DP(NETIF_MSG_HW, "Was parity error: HW block parity attention: "
 +                      "[0]:0x%08x [1]:0x%08x [2]:0x%08x [3]:0x%08x "
 +                      "[4]:0x%08x\n",
 +                        sig[0] & HW_PRTY_ASSERT_SET_0,
 +                        sig[1] & HW_PRTY_ASSERT_SET_1,
 +                        sig[2] & HW_PRTY_ASSERT_SET_2,
 +                        sig[3] & HW_PRTY_ASSERT_SET_3,
 +                        sig[4] & HW_PRTY_ASSERT_SET_4);
 +              if (print)
 +                      netdev_err(bp->dev,
 +                                 "Parity errors detected in blocks: ");
 +              par_num = bnx2x_check_blocks_with_parity0(
 +                      sig[0] & HW_PRTY_ASSERT_SET_0, par_num, print);
 +              par_num = bnx2x_check_blocks_with_parity1(
 +                      sig[1] & HW_PRTY_ASSERT_SET_1, par_num, global, print);
 +              par_num = bnx2x_check_blocks_with_parity2(
 +                      sig[2] & HW_PRTY_ASSERT_SET_2, par_num, print);
 +              par_num = bnx2x_check_blocks_with_parity3(
 +                      sig[3] & HW_PRTY_ASSERT_SET_3, par_num, global, print);
 +              par_num = bnx2x_check_blocks_with_parity4(
 +                      sig[4] & HW_PRTY_ASSERT_SET_4, par_num, print);
 +
 +              if (print)
 +                      pr_cont("\n");
 +
 +              return true;
 +      } else
 +              return false;
 +}
 +
 +/**
 + * bnx2x_chk_parity_attn - checks for parity attentions.
 + *
 + * @bp:               driver handle
 + * @global:   true if there was a global attention
 + * @print:    show parity attention in syslog
 + */
 +bool bnx2x_chk_parity_attn(struct bnx2x *bp, bool *global, bool print)
 +{
 +      struct attn_route attn = { {0} };
 +      int port = BP_PORT(bp);
 +
 +      attn.sig[0] = REG_RD(bp,
 +              MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 +
 +                           port*4);
 +      attn.sig[1] = REG_RD(bp,
 +              MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 +
 +                           port*4);
 +      attn.sig[2] = REG_RD(bp,
 +              MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 +
 +                           port*4);
 +      attn.sig[3] = REG_RD(bp,
 +              MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 +
 +                           port*4);
 +
 +      if (!CHIP_IS_E1x(bp))
 +              attn.sig[4] = REG_RD(bp,
 +                      MISC_REG_AEU_AFTER_INVERT_5_FUNC_0 +
 +                                   port*4);
 +
 +      return bnx2x_parity_attn(bp, global, print, attn.sig);
 +}
 +
 +
 +static inline void bnx2x_attn_int_deasserted4(struct bnx2x *bp, u32 attn)
 +{
 +      u32 val;
 +      if (attn & AEU_INPUTS_ATTN_BITS_PGLUE_HW_INTERRUPT) {
 +
 +              val = REG_RD(bp, PGLUE_B_REG_PGLUE_B_INT_STS_CLR);
 +              BNX2X_ERR("PGLUE hw attention 0x%x\n", val);
 +              if (val & PGLUE_B_PGLUE_B_INT_STS_REG_ADDRESS_ERROR)
 +                      BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
 +                                "ADDRESS_ERROR\n");
 +              if (val & PGLUE_B_PGLUE_B_INT_STS_REG_INCORRECT_RCV_BEHAVIOR)
 +                      BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
 +                                "INCORRECT_RCV_BEHAVIOR\n");
 +              if (val & PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN)
 +                      BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
 +                                "WAS_ERROR_ATTN\n");
 +              if (val & PGLUE_B_PGLUE_B_INT_STS_REG_VF_LENGTH_VIOLATION_ATTN)
 +                      BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
 +                                "VF_LENGTH_VIOLATION_ATTN\n");
 +              if (val &
 +                  PGLUE_B_PGLUE_B_INT_STS_REG_VF_GRC_SPACE_VIOLATION_ATTN)
 +                      BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
 +                                "VF_GRC_SPACE_VIOLATION_ATTN\n");
 +              if (val &
 +                  PGLUE_B_PGLUE_B_INT_STS_REG_VF_MSIX_BAR_VIOLATION_ATTN)
 +                      BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
 +                                "VF_MSIX_BAR_VIOLATION_ATTN\n");
 +              if (val & PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_ERROR_ATTN)
 +                      BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
 +                                "TCPL_ERROR_ATTN\n");
 +              if (val & PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_IN_TWO_RCBS_ATTN)
 +                      BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
 +                                "TCPL_IN_TWO_RCBS_ATTN\n");
 +              if (val & PGLUE_B_PGLUE_B_INT_STS_REG_CSSNOOP_FIFO_OVERFLOW)
 +                      BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
 +                                "CSSNOOP_FIFO_OVERFLOW\n");
 +      }
 +      if (attn & AEU_INPUTS_ATTN_BITS_ATC_HW_INTERRUPT) {
 +              val = REG_RD(bp, ATC_REG_ATC_INT_STS_CLR);
 +              BNX2X_ERR("ATC hw attention 0x%x\n", val);
 +              if (val & ATC_ATC_INT_STS_REG_ADDRESS_ERROR)
 +                      BNX2X_ERR("ATC_ATC_INT_STS_REG_ADDRESS_ERROR\n");
 +              if (val & ATC_ATC_INT_STS_REG_ATC_TCPL_TO_NOT_PEND)
 +                      BNX2X_ERR("ATC_ATC_INT_STS_REG"
 +                                "_ATC_TCPL_TO_NOT_PEND\n");
 +              if (val & ATC_ATC_INT_STS_REG_ATC_GPA_MULTIPLE_HITS)
 +                      BNX2X_ERR("ATC_ATC_INT_STS_REG_"
 +                                "ATC_GPA_MULTIPLE_HITS\n");
 +              if (val & ATC_ATC_INT_STS_REG_ATC_RCPL_TO_EMPTY_CNT)
 +                      BNX2X_ERR("ATC_ATC_INT_STS_REG_"
 +                                "ATC_RCPL_TO_EMPTY_CNT\n");
 +              if (val & ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR)
 +                      BNX2X_ERR("ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR\n");
 +              if (val & ATC_ATC_INT_STS_REG_ATC_IREQ_LESS_THAN_STU)
 +                      BNX2X_ERR("ATC_ATC_INT_STS_REG_"
 +                                "ATC_IREQ_LESS_THAN_STU\n");
 +      }
 +
 +      if (attn & (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR |
 +                  AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR)) {
 +              BNX2X_ERR("FATAL parity attention set4 0x%x\n",
 +              (u32)(attn & (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR |
 +                  AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR)));
 +      }
 +
 +}
 +
 +static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
 +{
 +      struct attn_route attn, *group_mask;
 +      int port = BP_PORT(bp);
 +      int index;
 +      u32 reg_addr;
 +      u32 val;
 +      u32 aeu_mask;
 +      bool global = false;
 +
 +      /* need to take HW lock because MCP or other port might also
 +         try to handle this event */
 +      bnx2x_acquire_alr(bp);
 +
 +      if (bnx2x_chk_parity_attn(bp, &global, true)) {
 +#ifndef BNX2X_STOP_ON_ERROR
 +              bp->recovery_state = BNX2X_RECOVERY_INIT;
 +              schedule_delayed_work(&bp->sp_rtnl_task, 0);
 +              /* Disable HW interrupts */
 +              bnx2x_int_disable(bp);
 +              /* In case of parity errors don't handle attentions so that
 +               * other function would "see" parity errors.
 +               */
 +#else
 +              bnx2x_panic();
 +#endif
 +              bnx2x_release_alr(bp);
 +              return;
 +      }
 +
 +      attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
 +      attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
 +      attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
 +      attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
 +      if (!CHIP_IS_E1x(bp))
 +              attn.sig[4] =
 +                    REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_5_FUNC_0 + port*4);
 +      else
 +              attn.sig[4] = 0;
 +
 +      DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x %08x\n",
 +         attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3], attn.sig[4]);
 +
 +      for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
 +              if (deasserted & (1 << index)) {
 +                      group_mask = &bp->attn_group[index];
 +
 +                      DP(NETIF_MSG_HW, "group[%d]: %08x %08x "
 +                                       "%08x %08x %08x\n",
 +                         index,
 +                         group_mask->sig[0], group_mask->sig[1],
 +                         group_mask->sig[2], group_mask->sig[3],
 +                         group_mask->sig[4]);
 +
 +                      bnx2x_attn_int_deasserted4(bp,
 +                                      attn.sig[4] & group_mask->sig[4]);
 +                      bnx2x_attn_int_deasserted3(bp,
 +                                      attn.sig[3] & group_mask->sig[3]);
 +                      bnx2x_attn_int_deasserted1(bp,
 +                                      attn.sig[1] & group_mask->sig[1]);
 +                      bnx2x_attn_int_deasserted2(bp,
 +                                      attn.sig[2] & group_mask->sig[2]);
 +                      bnx2x_attn_int_deasserted0(bp,
 +                                      attn.sig[0] & group_mask->sig[0]);
 +              }
 +      }
 +
 +      bnx2x_release_alr(bp);
 +
 +      if (bp->common.int_block == INT_BLOCK_HC)
 +              reg_addr = (HC_REG_COMMAND_REG + port*32 +
 +                          COMMAND_REG_ATTN_BITS_CLR);
 +      else
 +              reg_addr = (BAR_IGU_INTMEM + IGU_CMD_ATTN_BIT_CLR_UPPER*8);
 +
 +      val = ~deasserted;
 +      DP(NETIF_MSG_HW, "about to mask 0x%08x at %s addr 0x%x\n", val,
 +         (bp->common.int_block == INT_BLOCK_HC) ? "HC" : "IGU", reg_addr);
 +      REG_WR(bp, reg_addr, val);
 +
 +      if (~bp->attn_state & deasserted)
 +              BNX2X_ERR("IGU ERROR\n");
 +
 +      reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
 +                        MISC_REG_AEU_MASK_ATTN_FUNC_0;
 +
 +      bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
 +      aeu_mask = REG_RD(bp, reg_addr);
 +
 +      DP(NETIF_MSG_HW, "aeu_mask %x  newly deasserted %x\n",
 +         aeu_mask, deasserted);
 +      aeu_mask |= (deasserted & 0x3ff);
 +      DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
 +
 +      REG_WR(bp, reg_addr, aeu_mask);
 +      bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
 +
 +      DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
 +      bp->attn_state &= ~deasserted;
 +      DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
 +}
 +
 +static void bnx2x_attn_int(struct bnx2x *bp)
 +{
 +      /* read local copy of bits */
 +      u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
 +                                                              attn_bits);
 +      u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
 +                                                              attn_bits_ack);
 +      u32 attn_state = bp->attn_state;
 +
 +      /* look for changed bits */
 +      u32 asserted   =  attn_bits & ~attn_ack & ~attn_state;
 +      u32 deasserted = ~attn_bits &  attn_ack &  attn_state;
 +
 +      DP(NETIF_MSG_HW,
 +         "attn_bits %x  attn_ack %x  asserted %x  deasserted %x\n",
 +         attn_bits, attn_ack, asserted, deasserted);
 +
 +      if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
 +              BNX2X_ERR("BAD attention state\n");
 +
 +      /* handle bits that were raised */
 +      if (asserted)
 +              bnx2x_attn_int_asserted(bp, asserted);
 +
 +      if (deasserted)
 +              bnx2x_attn_int_deasserted(bp, deasserted);
 +}
 +
 +void bnx2x_igu_ack_sb(struct bnx2x *bp, u8 igu_sb_id, u8 segment,
 +                    u16 index, u8 op, u8 update)
 +{
 +      u32 igu_addr = BAR_IGU_INTMEM + (IGU_CMD_INT_ACK_BASE + igu_sb_id)*8;
 +
 +      bnx2x_igu_ack_sb_gen(bp, igu_sb_id, segment, index, op, update,
 +                           igu_addr);
 +}
 +
 +static inline void bnx2x_update_eq_prod(struct bnx2x *bp, u16 prod)
 +{
 +      /* No memory barriers */
 +      storm_memset_eq_prod(bp, prod, BP_FUNC(bp));
 +      mmiowb(); /* keep prod updates ordered */
 +}
 +
 +#ifdef BCM_CNIC
 +static int  bnx2x_cnic_handle_cfc_del(struct bnx2x *bp, u32 cid,
 +                                    union event_ring_elem *elem)
 +{
 +      u8 err = elem->message.error;
 +
 +      if (!bp->cnic_eth_dev.starting_cid  ||
 +          (cid < bp->cnic_eth_dev.starting_cid &&
 +          cid != bp->cnic_eth_dev.iscsi_l2_cid))
 +              return 1;
 +
 +      DP(BNX2X_MSG_SP, "got delete ramrod for CNIC CID %d\n", cid);
 +
 +      if (unlikely(err)) {
 +
 +              BNX2X_ERR("got delete ramrod for CNIC CID %d with error!\n",
 +                        cid);
 +              bnx2x_panic_dump(bp);
 +      }
 +      bnx2x_cnic_cfc_comp(bp, cid, err);
 +      return 0;
 +}
 +#endif
 +
 +static inline void bnx2x_handle_mcast_eqe(struct bnx2x *bp)
 +{
 +      struct bnx2x_mcast_ramrod_params rparam;
 +      int rc;
 +
 +      memset(&rparam, 0, sizeof(rparam));
 +
 +      rparam.mcast_obj = &bp->mcast_obj;
 +
 +      netif_addr_lock_bh(bp->dev);
 +
 +      /* Clear pending state for the last command */
 +      bp->mcast_obj.raw.clear_pending(&bp->mcast_obj.raw);
 +
 +      /* If there are pending mcast commands - send them */
 +      if (bp->mcast_obj.check_pending(&bp->mcast_obj)) {
 +              rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
 +              if (rc < 0)
 +                      BNX2X_ERR("Failed to send pending mcast commands: %d\n",
 +                                rc);
 +      }
 +
 +      netif_addr_unlock_bh(bp->dev);
 +}
 +
 +static inline void bnx2x_handle_classification_eqe(struct bnx2x *bp,
 +                                                 union event_ring_elem *elem)
 +{
 +      unsigned long ramrod_flags = 0;
 +      int rc = 0;
 +      u32 cid = elem->message.data.eth_event.echo & BNX2X_SWCID_MASK;
 +      struct bnx2x_vlan_mac_obj *vlan_mac_obj;
 +
 +      /* Always push next commands out, don't wait here */
 +      __set_bit(RAMROD_CONT, &ramrod_flags);
 +
 +      switch (elem->message.data.eth_event.echo >> BNX2X_SWCID_SHIFT) {
 +      case BNX2X_FILTER_MAC_PENDING:
 +#ifdef BCM_CNIC
 +              if (cid == BNX2X_ISCSI_ETH_CID)
 +                      vlan_mac_obj = &bp->iscsi_l2_mac_obj;
 +              else
 +#endif
 +                      vlan_mac_obj = &bp->fp[cid].mac_obj;
 +
 +              break;
 +              vlan_mac_obj = &bp->fp[cid].mac_obj;
 +
 +      case BNX2X_FILTER_MCAST_PENDING:
 +              /* This is only relevant for 57710 where multicast MACs are
 +               * configured as unicast MACs using the same ramrod.
 +               */
 +              bnx2x_handle_mcast_eqe(bp);
 +              return;
 +      default:
 +              BNX2X_ERR("Unsupported classification command: %d\n",
 +                        elem->message.data.eth_event.echo);
 +              return;
 +      }
 +
 +      rc = vlan_mac_obj->complete(bp, vlan_mac_obj, elem, &ramrod_flags);
 +
 +      if (rc < 0)
 +              BNX2X_ERR("Failed to schedule new commands: %d\n", rc);
 +      else if (rc > 0)
 +              DP(BNX2X_MSG_SP, "Scheduled next pending commands...\n");
 +
 +}
 +
 +#ifdef BCM_CNIC
 +static void bnx2x_set_iscsi_eth_rx_mode(struct bnx2x *bp, bool start);
 +#endif
 +
 +static inline void bnx2x_handle_rx_mode_eqe(struct bnx2x *bp)
 +{
 +      netif_addr_lock_bh(bp->dev);
 +
 +      clear_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state);
 +
 +      /* Send rx_mode command again if was requested */
 +      if (test_and_clear_bit(BNX2X_FILTER_RX_MODE_SCHED, &bp->sp_state))
 +              bnx2x_set_storm_rx_mode(bp);
 +#ifdef BCM_CNIC
 +      else if (test_and_clear_bit(BNX2X_FILTER_ISCSI_ETH_START_SCHED,
 +                                  &bp->sp_state))
 +              bnx2x_set_iscsi_eth_rx_mode(bp, true);
 +      else if (test_and_clear_bit(BNX2X_FILTER_ISCSI_ETH_STOP_SCHED,
 +                                  &bp->sp_state))
 +              bnx2x_set_iscsi_eth_rx_mode(bp, false);
 +#endif
 +
 +      netif_addr_unlock_bh(bp->dev);
 +}
 +
 +static inline struct bnx2x_queue_sp_obj *bnx2x_cid_to_q_obj(
 +      struct bnx2x *bp, u32 cid)
 +{
 +      DP(BNX2X_MSG_SP, "retrieving fp from cid %d\n", cid);
 +#ifdef BCM_CNIC
 +      if (cid == BNX2X_FCOE_ETH_CID)
 +              return &bnx2x_fcoe(bp, q_obj);
 +      else
 +#endif
 +              return &bnx2x_fp(bp, CID_TO_FP(cid), q_obj);
 +}
 +
 +static void bnx2x_eq_int(struct bnx2x *bp)
 +{
 +      u16 hw_cons, sw_cons, sw_prod;
 +      union event_ring_elem *elem;
 +      u32 cid;
 +      u8 opcode;
 +      int spqe_cnt = 0;
 +      struct bnx2x_queue_sp_obj *q_obj;
 +      struct bnx2x_func_sp_obj *f_obj = &bp->func_obj;
 +      struct bnx2x_raw_obj *rss_raw = &bp->rss_conf_obj.raw;
 +
 +      hw_cons = le16_to_cpu(*bp->eq_cons_sb);
 +
 +      /* The hw_cos range is 1-255, 257 - the sw_cons range is 0-254, 256.
 +       * when we get the the next-page we nned to adjust so the loop
 +       * condition below will be met. The next element is the size of a
 +       * regular element and hence incrementing by 1
 +       */
 +      if ((hw_cons & EQ_DESC_MAX_PAGE) == EQ_DESC_MAX_PAGE)
 +              hw_cons++;
 +
 +      /* This function may never run in parallel with itself for a
 +       * specific bp, thus there is no need in "paired" read memory
 +       * barrier here.
 +       */
 +      sw_cons = bp->eq_cons;
 +      sw_prod = bp->eq_prod;
 +
 +      DP(BNX2X_MSG_SP, "EQ:  hw_cons %u  sw_cons %u bp->eq_spq_left %x\n",
 +                      hw_cons, sw_cons, atomic_read(&bp->eq_spq_left));
 +
 +      for (; sw_cons != hw_cons;
 +            sw_prod = NEXT_EQ_IDX(sw_prod), sw_cons = NEXT_EQ_IDX(sw_cons)) {
 +
 +
 +              elem = &bp->eq_ring[EQ_DESC(sw_cons)];
 +
 +              cid = SW_CID(elem->message.data.cfc_del_event.cid);
 +              opcode = elem->message.opcode;
 +
 +
 +              /* handle eq element */
 +              switch (opcode) {
 +              case EVENT_RING_OPCODE_STAT_QUERY:
 +                      DP(NETIF_MSG_TIMER, "got statistics comp event %d\n",
 +                         bp->stats_comp++);
 +                      /* nothing to do with stats comp */
 +                      goto next_spqe;
 +
 +              case EVENT_RING_OPCODE_CFC_DEL:
 +                      /* handle according to cid range */
 +                      /*
 +                       * we may want to verify here that the bp state is
 +                       * HALTING
 +                       */
 +                      DP(BNX2X_MSG_SP,
 +                         "got delete ramrod for MULTI[%d]\n", cid);
 +#ifdef BCM_CNIC
 +                      if (!bnx2x_cnic_handle_cfc_del(bp, cid, elem))
 +                              goto next_spqe;
 +#endif
 +                      q_obj = bnx2x_cid_to_q_obj(bp, cid);
 +
 +                      if (q_obj->complete_cmd(bp, q_obj, BNX2X_Q_CMD_CFC_DEL))
 +                              break;
 +
 +
 +
 +                      goto next_spqe;
 +
 +              case EVENT_RING_OPCODE_STOP_TRAFFIC:
 +                      DP(BNX2X_MSG_SP, "got STOP TRAFFIC\n");
 +                      if (f_obj->complete_cmd(bp, f_obj,
 +                                              BNX2X_F_CMD_TX_STOP))
 +                              break;
 +                      bnx2x_dcbx_set_params(bp, BNX2X_DCBX_STATE_TX_PAUSED);
 +                      goto next_spqe;
 +
 +              case EVENT_RING_OPCODE_START_TRAFFIC:
 +                      DP(BNX2X_MSG_SP, "got START TRAFFIC\n");
 +                      if (f_obj->complete_cmd(bp, f_obj,
 +                                              BNX2X_F_CMD_TX_START))
 +                              break;
 +                      bnx2x_dcbx_set_params(bp, BNX2X_DCBX_STATE_TX_RELEASED);
 +                      goto next_spqe;
 +              case EVENT_RING_OPCODE_FUNCTION_START:
 +                      DP(BNX2X_MSG_SP, "got FUNC_START ramrod\n");
 +                      if (f_obj->complete_cmd(bp, f_obj, BNX2X_F_CMD_START))
 +                              break;
 +
 +                      goto next_spqe;
 +
 +              case EVENT_RING_OPCODE_FUNCTION_STOP:
 +                      DP(BNX2X_MSG_SP, "got FUNC_STOP ramrod\n");
 +                      if (f_obj->complete_cmd(bp, f_obj, BNX2X_F_CMD_STOP))
 +                              break;
 +
 +                      goto next_spqe;
 +              }
 +
 +              switch (opcode | bp->state) {
 +              case (EVENT_RING_OPCODE_RSS_UPDATE_RULES |
 +                    BNX2X_STATE_OPEN):
 +              case (EVENT_RING_OPCODE_RSS_UPDATE_RULES |
 +                    BNX2X_STATE_OPENING_WAIT4_PORT):
 +                      cid = elem->message.data.eth_event.echo &
 +                              BNX2X_SWCID_MASK;
 +                      DP(BNX2X_MSG_SP, "got RSS_UPDATE ramrod. CID %d\n",
 +                         cid);
 +                      rss_raw->clear_pending(rss_raw);
 +                      break;
 +
 +              case (EVENT_RING_OPCODE_SET_MAC | BNX2X_STATE_OPEN):
 +              case (EVENT_RING_OPCODE_SET_MAC | BNX2X_STATE_DIAG):
 +              case (EVENT_RING_OPCODE_SET_MAC |
 +                    BNX2X_STATE_CLOSING_WAIT4_HALT):
 +              case (EVENT_RING_OPCODE_CLASSIFICATION_RULES |
 +                    BNX2X_STATE_OPEN):
 +              case (EVENT_RING_OPCODE_CLASSIFICATION_RULES |
 +                    BNX2X_STATE_DIAG):
 +              case (EVENT_RING_OPCODE_CLASSIFICATION_RULES |
 +                    BNX2X_STATE_CLOSING_WAIT4_HALT):
 +                      DP(BNX2X_MSG_SP, "got (un)set mac ramrod\n");
 +                      bnx2x_handle_classification_eqe(bp, elem);
 +                      break;
 +
 +              case (EVENT_RING_OPCODE_MULTICAST_RULES |
 +                    BNX2X_STATE_OPEN):
 +              case (EVENT_RING_OPCODE_MULTICAST_RULES |
 +                    BNX2X_STATE_DIAG):
 +              case (EVENT_RING_OPCODE_MULTICAST_RULES |
 +                    BNX2X_STATE_CLOSING_WAIT4_HALT):
 +                      DP(BNX2X_MSG_SP, "got mcast ramrod\n");
 +                      bnx2x_handle_mcast_eqe(bp);
 +                      break;
 +
 +              case (EVENT_RING_OPCODE_FILTERS_RULES |
 +                    BNX2X_STATE_OPEN):
 +              case (EVENT_RING_OPCODE_FILTERS_RULES |
 +                    BNX2X_STATE_DIAG):
 +              case (EVENT_RING_OPCODE_FILTERS_RULES |
 +                    BNX2X_STATE_CLOSING_WAIT4_HALT):
 +                      DP(BNX2X_MSG_SP, "got rx_mode ramrod\n");
 +                      bnx2x_handle_rx_mode_eqe(bp);
 +                      break;
 +              default:
 +                      /* unknown event log error and continue */
 +                      BNX2X_ERR("Unknown EQ event %d, bp->state 0x%x\n",
 +                                elem->message.opcode, bp->state);
 +              }
 +next_spqe:
 +              spqe_cnt++;
 +      } /* for */
 +
 +      smp_mb__before_atomic_inc();
 +      atomic_add(spqe_cnt, &bp->eq_spq_left);
 +
 +      bp->eq_cons = sw_cons;
 +      bp->eq_prod = sw_prod;
 +      /* Make sure that above mem writes were issued towards the memory */
 +      smp_wmb();
 +
 +      /* update producer */
 +      bnx2x_update_eq_prod(bp, bp->eq_prod);
 +}
 +
 +static void bnx2x_sp_task(struct work_struct *work)
 +{
 +      struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
 +      u16 status;
 +
 +      status = bnx2x_update_dsb_idx(bp);
 +/*    if (status == 0)                                     */
 +/*            BNX2X_ERR("spurious slowpath interrupt!\n"); */
 +
 +      DP(NETIF_MSG_INTR, "got a slowpath interrupt (status 0x%x)\n", status);
 +
 +      /* HW attentions */
 +      if (status & BNX2X_DEF_SB_ATT_IDX) {
 +              bnx2x_attn_int(bp);
 +              status &= ~BNX2X_DEF_SB_ATT_IDX;
 +      }
 +
 +      /* SP events: STAT_QUERY and others */
 +      if (status & BNX2X_DEF_SB_IDX) {
 +#ifdef BCM_CNIC
 +              struct bnx2x_fastpath *fp = bnx2x_fcoe_fp(bp);
 +
 +              if ((!NO_FCOE(bp)) &&
 +                      (bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
 +                      /*
 +                       * Prevent local bottom-halves from running as
 +                       * we are going to change the local NAPI list.
 +                       */
 +                      local_bh_disable();
 +                      napi_schedule(&bnx2x_fcoe(bp, napi));
 +                      local_bh_enable();
 +              }
 +#endif
 +              /* Handle EQ completions */
 +              bnx2x_eq_int(bp);
 +
 +              bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID,
 +                      le16_to_cpu(bp->def_idx), IGU_INT_NOP, 1);
 +
 +              status &= ~BNX2X_DEF_SB_IDX;
 +      }
 +
 +      if (unlikely(status))
 +              DP(NETIF_MSG_INTR, "got an unknown interrupt! (status 0x%x)\n",
 +                 status);
 +
 +      bnx2x_ack_sb(bp, bp->igu_dsb_id, ATTENTION_ID,
 +           le16_to_cpu(bp->def_att_idx), IGU_INT_ENABLE, 1);
 +}
 +
 +irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
 +{
 +      struct net_device *dev = dev_instance;
 +      struct bnx2x *bp = netdev_priv(dev);
 +
 +      bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID, 0,
 +                   IGU_INT_DISABLE, 0);
 +
 +#ifdef BNX2X_STOP_ON_ERROR
 +      if (unlikely(bp->panic))
 +              return IRQ_HANDLED;
 +#endif
 +
 +#ifdef BCM_CNIC
 +      {
 +              struct cnic_ops *c_ops;
 +
 +              rcu_read_lock();
 +              c_ops = rcu_dereference(bp->cnic_ops);
 +              if (c_ops)
 +                      c_ops->cnic_handler(bp->cnic_data, NULL);
 +              rcu_read_unlock();
 +      }
 +#endif
 +      queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
 +
 +      return IRQ_HANDLED;
 +}
 +
 +/* end of slow path */
 +
 +
 +void bnx2x_drv_pulse(struct bnx2x *bp)
 +{
 +      SHMEM_WR(bp, func_mb[BP_FW_MB_IDX(bp)].drv_pulse_mb,
 +               bp->fw_drv_pulse_wr_seq);
 +}
 +
 +
 +static void bnx2x_timer(unsigned long data)
 +{
 +      u8 cos;
 +      struct bnx2x *bp = (struct bnx2x *) data;
 +
 +      if (!netif_running(bp->dev))
 +              return;
 +
 +      if (poll) {
 +              struct bnx2x_fastpath *fp = &bp->fp[0];
 +
 +              for_each_cos_in_tx_queue(fp, cos)
 +                      bnx2x_tx_int(bp, &fp->txdata[cos]);
 +              bnx2x_rx_int(fp, 1000);
 +      }
 +
 +      if (!BP_NOMCP(bp)) {
 +              int mb_idx = BP_FW_MB_IDX(bp);
 +              u32 drv_pulse;
 +              u32 mcp_pulse;
 +
 +              ++bp->fw_drv_pulse_wr_seq;
 +              bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
 +              /* TBD - add SYSTEM_TIME */
 +              drv_pulse = bp->fw_drv_pulse_wr_seq;
 +              bnx2x_drv_pulse(bp);
 +
 +              mcp_pulse = (SHMEM_RD(bp, func_mb[mb_idx].mcp_pulse_mb) &
 +                           MCP_PULSE_SEQ_MASK);
 +              /* The delta between driver pulse and mcp response
 +               * should be 1 (before mcp response) or 0 (after mcp response)
 +               */
 +              if ((drv_pulse != mcp_pulse) &&
 +                  (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
 +                      /* someone lost a heartbeat... */
 +                      BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
 +                                drv_pulse, mcp_pulse);
 +              }
 +      }
 +
 +      if (bp->state == BNX2X_STATE_OPEN)
 +              bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
 +
 +      mod_timer(&bp->timer, jiffies + bp->current_interval);
 +}
 +
 +/* end of Statistics */
 +
 +/* nic init */
 +
 +/*
 + * nic init service functions
 + */
 +
 +static inline void bnx2x_fill(struct bnx2x *bp, u32 addr, int fill, u32 len)
 +{
 +      u32 i;
 +      if (!(len%4) && !(addr%4))
 +              for (i = 0; i < len; i += 4)
 +                      REG_WR(bp, addr + i, fill);
 +      else
 +              for (i = 0; i < len; i++)
 +                      REG_WR8(bp, addr + i, fill);
 +
 +}
 +
 +/* helper: writes FP SP data to FW - data_size in dwords */
 +static inline void bnx2x_wr_fp_sb_data(struct bnx2x *bp,
 +                                     int fw_sb_id,
 +                                     u32 *sb_data_p,
 +                                     u32 data_size)
 +{
 +      int index;
 +      for (index = 0; index < data_size; index++)
 +              REG_WR(bp, BAR_CSTRORM_INTMEM +
 +                      CSTORM_STATUS_BLOCK_DATA_OFFSET(fw_sb_id) +
 +                      sizeof(u32)*index,
 +                      *(sb_data_p + index));
 +}
 +
 +static inline void bnx2x_zero_fp_sb(struct bnx2x *bp, int fw_sb_id)
 +{
 +      u32 *sb_data_p;
 +      u32 data_size = 0;
 +      struct hc_status_block_data_e2 sb_data_e2;
 +      struct hc_status_block_data_e1x sb_data_e1x;
 +
 +      /* disable the function first */
 +      if (!CHIP_IS_E1x(bp)) {
 +              memset(&sb_data_e2, 0, sizeof(struct hc_status_block_data_e2));
 +              sb_data_e2.common.state = SB_DISABLED;
 +              sb_data_e2.common.p_func.vf_valid = false;
 +              sb_data_p = (u32 *)&sb_data_e2;
 +              data_size = sizeof(struct hc_status_block_data_e2)/sizeof(u32);
 +      } else {
 +              memset(&sb_data_e1x, 0,
 +                     sizeof(struct hc_status_block_data_e1x));
 +              sb_data_e1x.common.state = SB_DISABLED;
 +              sb_data_e1x.common.p_func.vf_valid = false;
 +              sb_data_p = (u32 *)&sb_data_e1x;
 +              data_size = sizeof(struct hc_status_block_data_e1x)/sizeof(u32);
 +      }
 +      bnx2x_wr_fp_sb_data(bp, fw_sb_id, sb_data_p, data_size);
 +
 +      bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
 +                      CSTORM_STATUS_BLOCK_OFFSET(fw_sb_id), 0,
 +                      CSTORM_STATUS_BLOCK_SIZE);
 +      bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
 +                      CSTORM_SYNC_BLOCK_OFFSET(fw_sb_id), 0,
 +                      CSTORM_SYNC_BLOCK_SIZE);
 +}
 +
 +/* helper:  writes SP SB data to FW */
 +static inline void bnx2x_wr_sp_sb_data(struct bnx2x *bp,
 +              struct hc_sp_status_block_data *sp_sb_data)
 +{
 +      int func = BP_FUNC(bp);
 +      int i;
 +      for (i = 0; i < sizeof(struct hc_sp_status_block_data)/sizeof(u32); i++)
 +              REG_WR(bp, BAR_CSTRORM_INTMEM +
 +                      CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func) +
 +                      i*sizeof(u32),
 +                      *((u32 *)sp_sb_data + i));
 +}
 +
 +static inline void bnx2x_zero_sp_sb(struct bnx2x *bp)
 +{
 +      int func = BP_FUNC(bp);
 +      struct hc_sp_status_block_data sp_sb_data;
 +      memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data));
 +
 +      sp_sb_data.state = SB_DISABLED;
 +      sp_sb_data.p_func.vf_valid = false;
 +
 +      bnx2x_wr_sp_sb_data(bp, &sp_sb_data);
 +
 +      bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
 +                      CSTORM_SP_STATUS_BLOCK_OFFSET(func), 0,
 +                      CSTORM_SP_STATUS_BLOCK_SIZE);
 +      bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
 +                      CSTORM_SP_SYNC_BLOCK_OFFSET(func), 0,
 +                      CSTORM_SP_SYNC_BLOCK_SIZE);
 +
 +}
 +
 +
 +static inline
 +void bnx2x_setup_ndsb_state_machine(struct hc_status_block_sm *hc_sm,
 +                                         int igu_sb_id, int igu_seg_id)
 +{
 +      hc_sm->igu_sb_id = igu_sb_id;
 +      hc_sm->igu_seg_id = igu_seg_id;
 +      hc_sm->timer_value = 0xFF;
 +      hc_sm->time_to_expire = 0xFFFFFFFF;
 +}
 +
++
++/* allocates state machine ids. */
++static inline
++void bnx2x_map_sb_state_machines(struct hc_index_data *index_data)
++{
++      /* zero out state machine indices */
++      /* rx indices */
++      index_data[HC_INDEX_ETH_RX_CQ_CONS].flags &= ~HC_INDEX_DATA_SM_ID;
++
++      /* tx indices */
++      index_data[HC_INDEX_OOO_TX_CQ_CONS].flags &= ~HC_INDEX_DATA_SM_ID;
++      index_data[HC_INDEX_ETH_TX_CQ_CONS_COS0].flags &= ~HC_INDEX_DATA_SM_ID;
++      index_data[HC_INDEX_ETH_TX_CQ_CONS_COS1].flags &= ~HC_INDEX_DATA_SM_ID;
++      index_data[HC_INDEX_ETH_TX_CQ_CONS_COS2].flags &= ~HC_INDEX_DATA_SM_ID;
++
++      /* map indices */
++      /* rx indices */
++      index_data[HC_INDEX_ETH_RX_CQ_CONS].flags |=
++              SM_RX_ID << HC_INDEX_DATA_SM_ID_SHIFT;
++
++      /* tx indices */
++      index_data[HC_INDEX_OOO_TX_CQ_CONS].flags |=
++              SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT;
++      index_data[HC_INDEX_ETH_TX_CQ_CONS_COS0].flags |=
++              SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT;
++      index_data[HC_INDEX_ETH_TX_CQ_CONS_COS1].flags |=
++              SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT;
++      index_data[HC_INDEX_ETH_TX_CQ_CONS_COS2].flags |=
++              SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT;
++}
++
 +static void bnx2x_init_sb(struct bnx2x *bp, dma_addr_t mapping, int vfid,
 +                        u8 vf_valid, int fw_sb_id, int igu_sb_id)
 +{
 +      int igu_seg_id;
 +
 +      struct hc_status_block_data_e2 sb_data_e2;
 +      struct hc_status_block_data_e1x sb_data_e1x;
 +      struct hc_status_block_sm  *hc_sm_p;
 +      int data_size;
 +      u32 *sb_data_p;
 +
 +      if (CHIP_INT_MODE_IS_BC(bp))
 +              igu_seg_id = HC_SEG_ACCESS_NORM;
 +      else
 +              igu_seg_id = IGU_SEG_ACCESS_NORM;
 +
 +      bnx2x_zero_fp_sb(bp, fw_sb_id);
 +
 +      if (!CHIP_IS_E1x(bp)) {
 +              memset(&sb_data_e2, 0, sizeof(struct hc_status_block_data_e2));
 +              sb_data_e2.common.state = SB_ENABLED;
 +              sb_data_e2.common.p_func.pf_id = BP_FUNC(bp);
 +              sb_data_e2.common.p_func.vf_id = vfid;
 +              sb_data_e2.common.p_func.vf_valid = vf_valid;
 +              sb_data_e2.common.p_func.vnic_id = BP_VN(bp);
 +              sb_data_e2.common.same_igu_sb_1b = true;
 +              sb_data_e2.common.host_sb_addr.hi = U64_HI(mapping);
 +              sb_data_e2.common.host_sb_addr.lo = U64_LO(mapping);
 +              hc_sm_p = sb_data_e2.common.state_machine;
 +              sb_data_p = (u32 *)&sb_data_e2;
 +              data_size = sizeof(struct hc_status_block_data_e2)/sizeof(u32);
++              bnx2x_map_sb_state_machines(sb_data_e2.index_data);
 +      } else {
 +              memset(&sb_data_e1x, 0,
 +                     sizeof(struct hc_status_block_data_e1x));
 +              sb_data_e1x.common.state = SB_ENABLED;
 +              sb_data_e1x.common.p_func.pf_id = BP_FUNC(bp);
 +              sb_data_e1x.common.p_func.vf_id = 0xff;
 +              sb_data_e1x.common.p_func.vf_valid = false;
 +              sb_data_e1x.common.p_func.vnic_id = BP_VN(bp);
 +              sb_data_e1x.common.same_igu_sb_1b = true;
 +              sb_data_e1x.common.host_sb_addr.hi = U64_HI(mapping);
 +              sb_data_e1x.common.host_sb_addr.lo = U64_LO(mapping);
 +              hc_sm_p = sb_data_e1x.common.state_machine;
 +              sb_data_p = (u32 *)&sb_data_e1x;
 +              data_size = sizeof(struct hc_status_block_data_e1x)/sizeof(u32);
++              bnx2x_map_sb_state_machines(sb_data_e1x.index_data);
 +      }
 +
 +      bnx2x_setup_ndsb_state_machine(&hc_sm_p[SM_RX_ID],
 +                                     igu_sb_id, igu_seg_id);
 +      bnx2x_setup_ndsb_state_machine(&hc_sm_p[SM_TX_ID],
 +                                     igu_sb_id, igu_seg_id);
 +
 +      DP(NETIF_MSG_HW, "Init FW SB %d\n", fw_sb_id);
 +
 +      /* write indecies to HW */
 +      bnx2x_wr_fp_sb_data(bp, fw_sb_id, sb_data_p, data_size);
 +}
 +
 +static void bnx2x_update_coalesce_sb(struct bnx2x *bp, u8 fw_sb_id,
 +                                   u16 tx_usec, u16 rx_usec)
 +{
 +      bnx2x_update_coalesce_sb_index(bp, fw_sb_id, HC_INDEX_ETH_RX_CQ_CONS,
 +                                  false, rx_usec);
 +      bnx2x_update_coalesce_sb_index(bp, fw_sb_id,
 +                                     HC_INDEX_ETH_TX_CQ_CONS_COS0, false,
 +                                     tx_usec);
 +      bnx2x_update_coalesce_sb_index(bp, fw_sb_id,
 +                                     HC_INDEX_ETH_TX_CQ_CONS_COS1, false,
 +                                     tx_usec);
 +      bnx2x_update_coalesce_sb_index(bp, fw_sb_id,
 +                                     HC_INDEX_ETH_TX_CQ_CONS_COS2, false,
 +                                     tx_usec);
 +}
 +
 +static void bnx2x_init_def_sb(struct bnx2x *bp)
 +{
 +      struct host_sp_status_block *def_sb = bp->def_status_blk;
 +      dma_addr_t mapping = bp->def_status_blk_mapping;
 +      int igu_sp_sb_index;
 +      int igu_seg_id;
 +      int port = BP_PORT(bp);
 +      int func = BP_FUNC(bp);
 +      int reg_offset;
 +      u64 section;
 +      int index;
 +      struct hc_sp_status_block_data sp_sb_data;
 +      memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data));
 +
 +      if (CHIP_INT_MODE_IS_BC(bp)) {
 +              igu_sp_sb_index = DEF_SB_IGU_ID;
 +              igu_seg_id = HC_SEG_ACCESS_DEF;
 +      } else {
 +              igu_sp_sb_index = bp->igu_dsb_id;
 +              igu_seg_id = IGU_SEG_ACCESS_DEF;
 +      }
 +
 +      /* ATTN */
 +      section = ((u64)mapping) + offsetof(struct host_sp_status_block,
 +                                          atten_status_block);
 +      def_sb->atten_status_block.status_block_id = igu_sp_sb_index;
 +
 +      bp->attn_state = 0;
 +
 +      reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
 +                           MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
 +      for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
 +              int sindex;
 +              /* take care of sig[0]..sig[4] */
 +              for (sindex = 0; sindex < 4; sindex++)
 +                      bp->attn_group[index].sig[sindex] =
 +                         REG_RD(bp, reg_offset + sindex*0x4 + 0x10*index);
 +
 +              if (!CHIP_IS_E1x(bp))
 +                      /*
 +                       * enable5 is separate from the rest of the registers,
 +                       * and therefore the address skip is 4
 +                       * and not 16 between the different groups
 +                       */
 +                      bp->attn_group[index].sig[4] = REG_RD(bp,
 +                                      reg_offset + 0x10 + 0x4*index);
 +              else
 +                      bp->attn_group[index].sig[4] = 0;
 +      }
 +
 +      if (bp->common.int_block == INT_BLOCK_HC) {
 +              reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
 +                                   HC_REG_ATTN_MSG0_ADDR_L);
 +
 +              REG_WR(bp, reg_offset, U64_LO(section));
 +              REG_WR(bp, reg_offset + 4, U64_HI(section));
 +      } else if (!CHIP_IS_E1x(bp)) {
 +              REG_WR(bp, IGU_REG_ATTN_MSG_ADDR_L, U64_LO(section));
 +              REG_WR(bp, IGU_REG_ATTN_MSG_ADDR_H, U64_HI(section));
 +      }
 +
 +      section = ((u64)mapping) + offsetof(struct host_sp_status_block,
 +                                          sp_sb);
 +
 +      bnx2x_zero_sp_sb(bp);
 +
 +      sp_sb_data.state                = SB_ENABLED;
 +      sp_sb_data.host_sb_addr.lo      = U64_LO(section);
 +      sp_sb_data.host_sb_addr.hi      = U64_HI(section);
 +      sp_sb_data.igu_sb_id            = igu_sp_sb_index;
 +      sp_sb_data.igu_seg_id           = igu_seg_id;
 +      sp_sb_data.p_func.pf_id         = func;
 +      sp_sb_data.p_func.vnic_id       = BP_VN(bp);
 +      sp_sb_data.p_func.vf_id         = 0xff;
 +
 +      bnx2x_wr_sp_sb_data(bp, &sp_sb_data);
 +
 +      bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID, 0, IGU_INT_ENABLE, 0);
 +}
 +
 +void bnx2x_update_coalesce(struct bnx2x *bp)
 +{
 +      int i;
 +
 +      for_each_eth_queue(bp, i)
 +              bnx2x_update_coalesce_sb(bp, bp->fp[i].fw_sb_id,
 +                                       bp->tx_ticks, bp->rx_ticks);
 +}
 +
 +static void bnx2x_init_sp_ring(struct bnx2x *bp)
 +{
 +      spin_lock_init(&bp->spq_lock);
 +      atomic_set(&bp->cq_spq_left, MAX_SPQ_PENDING);
 +
 +      bp->spq_prod_idx = 0;
 +      bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
 +      bp->spq_prod_bd = bp->spq;
 +      bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
 +}
 +
 +static void bnx2x_init_eq_ring(struct bnx2x *bp)
 +{
 +      int i;
 +      for (i = 1; i <= NUM_EQ_PAGES; i++) {
 +              union event_ring_elem *elem =
 +                      &bp->eq_ring[EQ_DESC_CNT_PAGE * i - 1];
 +
 +              elem->next_page.addr.hi =
 +                      cpu_to_le32(U64_HI(bp->eq_mapping +
 +                                 BCM_PAGE_SIZE * (i % NUM_EQ_PAGES)));
 +              elem->next_page.addr.lo =
 +                      cpu_to_le32(U64_LO(bp->eq_mapping +
 +                                 BCM_PAGE_SIZE*(i % NUM_EQ_PAGES)));
 +      }
 +      bp->eq_cons = 0;
 +      bp->eq_prod = NUM_EQ_DESC;
 +      bp->eq_cons_sb = BNX2X_EQ_INDEX;
 +      /* we want a warning message before it gets rought... */
 +      atomic_set(&bp->eq_spq_left,
 +              min_t(int, MAX_SP_DESC_CNT - MAX_SPQ_PENDING, NUM_EQ_DESC) - 1);
 +}
 +
 +
 +/* called with netif_addr_lock_bh() */
 +void bnx2x_set_q_rx_mode(struct bnx2x *bp, u8 cl_id,
 +                       unsigned long rx_mode_flags,
 +                       unsigned long rx_accept_flags,
 +                       unsigned long tx_accept_flags,
 +                       unsigned long ramrod_flags)
 +{
 +      struct bnx2x_rx_mode_ramrod_params ramrod_param;
 +      int rc;
 +
 +      memset(&ramrod_param, 0, sizeof(ramrod_param));
 +
 +      /* Prepare ramrod parameters */
 +      ramrod_param.cid = 0;
 +      ramrod_param.cl_id = cl_id;
 +      ramrod_param.rx_mode_obj = &bp->rx_mode_obj;
 +      ramrod_param.func_id = BP_FUNC(bp);
 +
 +      ramrod_param.pstate = &bp->sp_state;
 +      ramrod_param.state = BNX2X_FILTER_RX_MODE_PENDING;
 +
 +      ramrod_param.rdata = bnx2x_sp(bp, rx_mode_rdata);
 +      ramrod_param.rdata_mapping = bnx2x_sp_mapping(bp, rx_mode_rdata);
 +
 +      set_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state);
 +
 +      ramrod_param.ramrod_flags = ramrod_flags;
 +      ramrod_param.rx_mode_flags = rx_mode_flags;
 +
 +      ramrod_param.rx_accept_flags = rx_accept_flags;
 +      ramrod_param.tx_accept_flags = tx_accept_flags;
 +
 +      rc = bnx2x_config_rx_mode(bp, &ramrod_param);
 +      if (rc < 0) {
 +              BNX2X_ERR("Set rx_mode %d failed\n", bp->rx_mode);
 +              return;
 +      }
 +}
 +
 +/* called with netif_addr_lock_bh() */
 +void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
 +{
 +      unsigned long rx_mode_flags = 0, ramrod_flags = 0;
 +      unsigned long rx_accept_flags = 0, tx_accept_flags = 0;
 +
 +#ifdef BCM_CNIC
 +      if (!NO_FCOE(bp))
 +
 +              /* Configure rx_mode of FCoE Queue */
 +              __set_bit(BNX2X_RX_MODE_FCOE_ETH, &rx_mode_flags);
 +#endif
 +
 +      switch (bp->rx_mode) {
 +      case BNX2X_RX_MODE_NONE:
 +              /*
 +               * 'drop all' supersedes any accept flags that may have been
 +               * passed to the function.
 +               */
 +              break;
 +      case BNX2X_RX_MODE_NORMAL:
 +              __set_bit(BNX2X_ACCEPT_UNICAST, &rx_accept_flags);
 +              __set_bit(BNX2X_ACCEPT_MULTICAST, &rx_accept_flags);
 +              __set_bit(BNX2X_ACCEPT_BROADCAST, &rx_accept_flags);
 +
 +              /* internal switching mode */
 +              __set_bit(BNX2X_ACCEPT_UNICAST, &tx_accept_flags);
 +              __set_bit(BNX2X_ACCEPT_MULTICAST, &tx_accept_flags);
 +              __set_bit(BNX2X_ACCEPT_BROADCAST, &tx_accept_flags);
 +
 +              break;
 +      case BNX2X_RX_MODE_ALLMULTI:
 +              __set_bit(BNX2X_ACCEPT_UNICAST, &rx_accept_flags);
 +              __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, &rx_accept_flags);
 +              __set_bit(BNX2X_ACCEPT_BROADCAST, &rx_accept_flags);
 +
 +              /* internal switching mode */
 +              __set_bit(BNX2X_ACCEPT_UNICAST, &tx_accept_flags);
 +              __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, &tx_accept_flags);
 +              __set_bit(BNX2X_ACCEPT_BROADCAST, &tx_accept_flags);
 +
 +              break;
 +      case BNX2X_RX_MODE_PROMISC:
 +              /* According to deffinition of SI mode, iface in promisc mode
 +               * should receive matched and unmatched (in resolution of port)
 +               * unicast packets.
 +               */
 +              __set_bit(BNX2X_ACCEPT_UNMATCHED, &rx_accept_flags);
 +              __set_bit(BNX2X_ACCEPT_UNICAST, &rx_accept_flags);
 +              __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, &rx_accept_flags);
 +              __set_bit(BNX2X_ACCEPT_BROADCAST, &rx_accept_flags);
 +
 +              /* internal switching mode */
 +              __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, &tx_accept_flags);
 +              __set_bit(BNX2X_ACCEPT_BROADCAST, &tx_accept_flags);
 +
 +              if (IS_MF_SI(bp))
 +                      __set_bit(BNX2X_ACCEPT_ALL_UNICAST, &tx_accept_flags);
 +              else
 +                      __set_bit(BNX2X_ACCEPT_UNICAST, &tx_accept_flags);
 +
 +              break;
 +      default:
 +              BNX2X_ERR("Unknown rx_mode: %d\n", bp->rx_mode);
 +              return;
 +      }
 +
 +      if (bp->rx_mode != BNX2X_RX_MODE_NONE) {
 +              __set_bit(BNX2X_ACCEPT_ANY_VLAN, &rx_accept_flags);
 +              __set_bit(BNX2X_ACCEPT_ANY_VLAN, &tx_accept_flags);
 +      }
 +
 +      __set_bit(RAMROD_RX, &ramrod_flags);
 +      __set_bit(RAMROD_TX, &ramrod_flags);
 +
 +      bnx2x_set_q_rx_mode(bp, bp->fp->cl_id, rx_mode_flags, rx_accept_flags,
 +                          tx_accept_flags, ramrod_flags);
 +}
 +
 +static void bnx2x_init_internal_common(struct bnx2x *bp)
 +{
 +      int i;
 +
 +      if (IS_MF_SI(bp))
 +              /*
 +               * In switch independent mode, the TSTORM needs to accept
 +               * packets that failed classification, since approximate match
 +               * mac addresses aren't written to NIG LLH
 +               */
 +              REG_WR8(bp, BAR_TSTRORM_INTMEM +
 +                          TSTORM_ACCEPT_CLASSIFY_FAILED_OFFSET, 2);
 +      else if (!CHIP_IS_E1(bp)) /* 57710 doesn't support MF */
 +              REG_WR8(bp, BAR_TSTRORM_INTMEM +
 +                          TSTORM_ACCEPT_CLASSIFY_FAILED_OFFSET, 0);
 +
 +      /* Zero this manually as its initialization is
 +         currently missing in the initTool */
 +      for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
 +              REG_WR(bp, BAR_USTRORM_INTMEM +
 +                     USTORM_AGG_DATA_OFFSET + i * 4, 0);
 +      if (!CHIP_IS_E1x(bp)) {
 +              REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_IGU_MODE_OFFSET,
 +                      CHIP_INT_MODE_IS_BC(bp) ?
 +                      HC_IGU_BC_MODE : HC_IGU_NBC_MODE);
 +      }
 +}
 +
 +static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
 +{
 +      switch (load_code) {
 +      case FW_MSG_CODE_DRV_LOAD_COMMON:
 +      case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP:
 +              bnx2x_init_internal_common(bp);
 +              /* no break */
 +
 +      case FW_MSG_CODE_DRV_LOAD_PORT:
 +              /* nothing to do */
 +              /* no break */
 +
 +      case FW_MSG_CODE_DRV_LOAD_FUNCTION:
 +              /* internal memory per function is
 +                 initialized inside bnx2x_pf_init */
 +              break;
 +
 +      default:
 +              BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
 +              break;
 +      }
 +}
 +
 +static inline u8 bnx2x_fp_igu_sb_id(struct bnx2x_fastpath *fp)
 +{
 +      return fp->bp->igu_base_sb + fp->index + CNIC_PRESENT;
 +}
 +
 +static inline u8 bnx2x_fp_fw_sb_id(struct bnx2x_fastpath *fp)
 +{
 +      return fp->bp->base_fw_ndsb + fp->index + CNIC_PRESENT;
 +}
 +
 +static inline u8 bnx2x_fp_cl_id(struct bnx2x_fastpath *fp)
 +{
 +      if (CHIP_IS_E1x(fp->bp))
 +              return BP_L_ID(fp->bp) + fp->index;
 +      else    /* We want Client ID to be the same as IGU SB ID for 57712 */
 +              return bnx2x_fp_igu_sb_id(fp);
 +}
 +
 +static void bnx2x_init_eth_fp(struct bnx2x *bp, int fp_idx)
 +{
 +      struct bnx2x_fastpath *fp = &bp->fp[fp_idx];
 +      u8 cos;
 +      unsigned long q_type = 0;
 +      u32 cids[BNX2X_MULTI_TX_COS] = { 0 };
 +
 +      fp->cid = fp_idx;
 +      fp->cl_id = bnx2x_fp_cl_id(fp);
 +      fp->fw_sb_id = bnx2x_fp_fw_sb_id(fp);
 +      fp->igu_sb_id = bnx2x_fp_igu_sb_id(fp);
 +      /* qZone id equals to FW (per path) client id */
 +      fp->cl_qzone_id  = bnx2x_fp_qzone_id(fp);
 +
 +      /* init shortcut */
 +      fp->ustorm_rx_prods_offset = bnx2x_rx_ustorm_prods_offset(fp);
 +      /* Setup SB indicies */
 +      fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
 +
 +      /* Configure Queue State object */
 +      __set_bit(BNX2X_Q_TYPE_HAS_RX, &q_type);
 +      __set_bit(BNX2X_Q_TYPE_HAS_TX, &q_type);
 +
 +      BUG_ON(fp->max_cos > BNX2X_MULTI_TX_COS);
 +
 +      /* init tx data */
 +      for_each_cos_in_tx_queue(fp, cos) {
 +              bnx2x_init_txdata(bp, &fp->txdata[cos],
 +                                CID_COS_TO_TX_ONLY_CID(fp->cid, cos),
 +                                FP_COS_TO_TXQ(fp, cos),
 +                                BNX2X_TX_SB_INDEX_BASE + cos);
 +              cids[cos] = fp->txdata[cos].cid;
 +      }
 +
 +      bnx2x_init_queue_obj(bp, &fp->q_obj, fp->cl_id, cids, fp->max_cos,
 +                           BP_FUNC(bp), bnx2x_sp(bp, q_rdata),
 +                           bnx2x_sp_mapping(bp, q_rdata), q_type);
 +
 +      /**
 +       * Configure classification DBs: Always enable Tx switching
 +       */
 +      bnx2x_init_vlan_mac_fp_objs(fp, BNX2X_OBJ_TYPE_RX_TX);
 +
 +      DP(NETIF_MSG_IFUP, "queue[%d]:  bnx2x_init_sb(%p,%p)  "
 +                                 "cl_id %d  fw_sb %d  igu_sb %d\n",
 +                 fp_idx, bp, fp->status_blk.e2_sb, fp->cl_id, fp->fw_sb_id,
 +                 fp->igu_sb_id);
 +      bnx2x_init_sb(bp, fp->status_blk_mapping, BNX2X_VF_ID_INVALID, false,
 +                    fp->fw_sb_id, fp->igu_sb_id);
 +
 +      bnx2x_update_fpsb_idx(fp);
 +}
 +
 +void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
 +{
 +      int i;
 +
 +      for_each_eth_queue(bp, i)
 +              bnx2x_init_eth_fp(bp, i);
 +#ifdef BCM_CNIC
 +      if (!NO_FCOE(bp))
 +              bnx2x_init_fcoe_fp(bp);
 +
 +      bnx2x_init_sb(bp, bp->cnic_sb_mapping,
 +                    BNX2X_VF_ID_INVALID, false,
 +                    bnx2x_cnic_fw_sb_id(bp), bnx2x_cnic_igu_sb_id(bp));
 +
 +#endif
 +
 +      /* Initialize MOD_ABS interrupts */
 +      bnx2x_init_mod_abs_int(bp, &bp->link_vars, bp->common.chip_id,
 +                             bp->common.shmem_base, bp->common.shmem2_base,
 +                             BP_PORT(bp));
 +      /* ensure status block indices were read */
 +      rmb();
 +
 +      bnx2x_init_def_sb(bp);
 +      bnx2x_update_dsb_idx(bp);
 +      bnx2x_init_rx_rings(bp);
 +      bnx2x_init_tx_rings(bp);
 +      bnx2x_init_sp_ring(bp);
 +      bnx2x_init_eq_ring(bp);
 +      bnx2x_init_internal(bp, load_code);
 +      bnx2x_pf_init(bp);
 +      bnx2x_stats_init(bp);
 +
 +      /* flush all before enabling interrupts */
 +      mb();
 +      mmiowb();
 +
 +      bnx2x_int_enable(bp);
 +
 +      /* Check for SPIO5 */
 +      bnx2x_attn_int_deasserted0(bp,
 +              REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + BP_PORT(bp)*4) &
 +                                 AEU_INPUTS_ATTN_BITS_SPIO5);
 +}
 +
 +/* end of nic init */
 +
 +/*
 + * gzip service functions
 + */
 +
 +static int bnx2x_gunzip_init(struct bnx2x *bp)
 +{
 +      bp->gunzip_buf = dma_alloc_coherent(&bp->pdev->dev, FW_BUF_SIZE,
 +                                          &bp->gunzip_mapping, GFP_KERNEL);
 +      if (bp->gunzip_buf  == NULL)
 +              goto gunzip_nomem1;
 +
 +      bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
 +      if (bp->strm  == NULL)
 +              goto gunzip_nomem2;
 +
 +      bp->strm->workspace = vmalloc(zlib_inflate_workspacesize());
 +      if (bp->strm->workspace == NULL)
 +              goto gunzip_nomem3;
 +
 +      return 0;
 +
 +gunzip_nomem3:
 +      kfree(bp->strm);
 +      bp->strm = NULL;
 +
 +gunzip_nomem2:
 +      dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
 +                        bp->gunzip_mapping);
 +      bp->gunzip_buf = NULL;
 +
 +gunzip_nomem1:
 +      netdev_err(bp->dev, "Cannot allocate firmware buffer for"
 +             " un-compression\n");
 +      return -ENOMEM;
 +}
 +
 +static void bnx2x_gunzip_end(struct bnx2x *bp)
 +{
 +      if (bp->strm) {
 +              vfree(bp->strm->workspace);
 +              kfree(bp->strm);
 +              bp->strm = NULL;
 +      }
 +
 +      if (bp->gunzip_buf) {
 +              dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
 +                                bp->gunzip_mapping);
 +              bp->gunzip_buf = NULL;
 +      }
 +}
 +
 +static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len)
 +{
 +      int n, rc;
 +
 +      /* check gzip header */
 +      if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED)) {
 +              BNX2X_ERR("Bad gzip header\n");
 +              return -EINVAL;
 +      }
 +
 +      n = 10;
 +
 +#define FNAME                         0x8
 +
 +      if (zbuf[3] & FNAME)
 +              while ((zbuf[n++] != 0) && (n < len));
 +
 +      bp->strm->next_in = (typeof(bp->strm->next_in))zbuf + n;
 +      bp->strm->avail_in = len - n;
 +      bp->strm->next_out = bp->gunzip_buf;
 +      bp->strm->avail_out = FW_BUF_SIZE;
 +
 +      rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
 +      if (rc != Z_OK)
 +              return rc;
 +
 +      rc = zlib_inflate(bp->strm, Z_FINISH);
 +      if ((rc != Z_OK) && (rc != Z_STREAM_END))
 +              netdev_err(bp->dev, "Firmware decompression error: %s\n",
 +                         bp->strm->msg);
 +
 +      bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
 +      if (bp->gunzip_outlen & 0x3)
 +              netdev_err(bp->dev, "Firmware decompression error:"
 +                                  " gunzip_outlen (%d) not aligned\n",
 +                              bp->gunzip_outlen);
 +      bp->gunzip_outlen >>= 2;
 +
 +      zlib_inflateEnd(bp->strm);
 +
 +      if (rc == Z_STREAM_END)
 +              return 0;
 +
 +      return rc;
 +}
 +
 +/* nic load/unload */
 +
 +/*
 + * General service functions
 + */
 +
 +/* send a NIG loopback debug packet */
 +static void bnx2x_lb_pckt(struct bnx2x *bp)
 +{
 +      u32 wb_write[3];
 +
 +      /* Ethernet source and destination addresses */
 +      wb_write[0] = 0x55555555;
 +      wb_write[1] = 0x55555555;
 +      wb_write[2] = 0x20;             /* SOP */
 +      REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
 +
 +      /* NON-IP protocol */
 +      wb_write[0] = 0x09000000;
 +      wb_write[1] = 0x55555555;
 +      wb_write[2] = 0x10;             /* EOP, eop_bvalid = 0 */
 +      REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
 +}
 +
 +/* some of the internal memories
 + * are not directly readable from the driver
 + * to test them we send debug packets
 + */
 +static int bnx2x_int_mem_test(struct bnx2x *bp)
 +{
 +      int factor;
 +      int count, i;
 +      u32 val = 0;
 +
 +      if (CHIP_REV_IS_FPGA(bp))
 +              factor = 120;
 +      else if (CHIP_REV_IS_EMUL(bp))
 +              factor = 200;
 +      else
 +              factor = 1;
 +
 +      /* Disable inputs of parser neighbor blocks */
 +      REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
 +      REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
 +      REG_WR(bp, CFC_REG_DEBUG0, 0x1);
 +      REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
 +
 +      /*  Write 0 to parser credits for CFC search request */
 +      REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
 +
 +      /* send Ethernet packet */
 +      bnx2x_lb_pckt(bp);
 +
 +      /* TODO do i reset NIG statistic? */
 +      /* Wait until NIG register shows 1 packet of size 0x10 */
 +      count = 1000 * factor;
 +      while (count) {
 +
 +              bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
 +              val = *bnx2x_sp(bp, wb_data[0]);
 +              if (val == 0x10)
 +                      break;
 +
 +              msleep(10);
 +              count--;
 +      }
 +      if (val != 0x10) {
 +              BNX2X_ERR("NIG timeout  val = 0x%x\n", val);
 +              return -1;
 +      }
 +
 +      /* Wait until PRS register shows 1 packet */
 +      count = 1000 * factor;
 +      while (count) {
 +              val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
 +              if (val == 1)
 +                      break;
 +
 +              msleep(10);
 +              count--;
 +      }
 +      if (val != 0x1) {
 +              BNX2X_ERR("PRS timeout val = 0x%x\n", val);
 +              return -2;
 +      }
 +
 +      /* Reset and init BRB, PRS */
 +      REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
 +      msleep(50);
 +      REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
 +      msleep(50);
 +      bnx2x_init_block(bp, BLOCK_BRB1, PHASE_COMMON);
 +      bnx2x_init_block(bp, BLOCK_PRS, PHASE_COMMON);
 +
 +      DP(NETIF_MSG_HW, "part2\n");
 +
 +      /* Disable inputs of parser neighbor blocks */
 +      REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
 +      REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
 +      REG_WR(bp, CFC_REG_DEBUG0, 0x1);
 +      REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
 +
 +      /* Write 0 to parser credits for CFC search request */
 +      REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
 +
 +      /* send 10 Ethernet packets */
 +      for (i = 0; i < 10; i++)
 +              bnx2x_lb_pckt(bp);
 +
 +      /* Wait until NIG register shows 10 + 1
 +         packets of size 11*0x10 = 0xb0 */
 +      count = 1000 * factor;
 +      while (count) {
 +
 +              bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
 +              val = *bnx2x_sp(bp, wb_data[0]);
 +              if (val == 0xb0)
 +                      break;
 +
 +              msleep(10);
 +              count--;
 +      }
 +      if (val != 0xb0) {
 +              BNX2X_ERR("NIG timeout  val = 0x%x\n", val);
 +              return -3;
 +      }
 +
 +      /* Wait until PRS register shows 2 packets */
 +      val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
 +      if (val != 2)
 +              BNX2X_ERR("PRS timeout  val = 0x%x\n", val);
 +
 +      /* Write 1 to parser credits for CFC search request */
 +      REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
 +
 +      /* Wait until PRS register shows 3 packets */
 +      msleep(10 * factor);
 +      /* Wait until NIG register shows 1 packet of size 0x10 */
 +      val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
 +      if (val != 3)
 +              BNX2X_ERR("PRS timeout  val = 0x%x\n", val);
 +
 +      /* clear NIG EOP FIFO */
 +      for (i = 0; i < 11; i++)
 +              REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
 +      val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
 +      if (val != 1) {
 +              BNX2X_ERR("clear of NIG failed\n");
 +              return -4;
 +      }
 +
 +      /* Reset and init BRB, PRS, NIG */
 +      REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
 +      msleep(50);
 +      REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
 +      msleep(50);
 +      bnx2x_init_block(bp, BLOCK_BRB1, PHASE_COMMON);
 +      bnx2x_init_block(bp, BLOCK_PRS, PHASE_COMMON);
 +#ifndef BCM_CNIC
 +      /* set NIC mode */
 +      REG_WR(bp, PRS_REG_NIC_MODE, 1);
 +#endif
 +
 +      /* Enable inputs of parser neighbor blocks */
 +      REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
 +      REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
 +      REG_WR(bp, CFC_REG_DEBUG0, 0x0);
 +      REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
 +
 +      DP(NETIF_MSG_HW, "done\n");
 +
 +      return 0; /* OK */
 +}
 +
 +static void bnx2x_enable_blocks_attention(struct bnx2x *bp)
 +{
 +      REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
 +      if (!CHIP_IS_E1x(bp))
 +              REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0x40);
 +      else
 +              REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
 +      REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
 +      REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
 +      /*
 +       * mask read length error interrupts in brb for parser
 +       * (parsing unit and 'checksum and crc' unit)
 +       * these errors are legal (PU reads fixed length and CAC can cause
 +       * read length error on truncated packets)
 +       */
 +      REG_WR(bp, BRB1_REG_BRB1_INT_MASK, 0xFC00);
 +      REG_WR(bp, QM_REG_QM_INT_MASK, 0);
 +      REG_WR(bp, TM_REG_TM_INT_MASK, 0);
 +      REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
 +      REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
 +      REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
 +/*    REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
 +/*    REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
 +      REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
 +      REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
 +      REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
 +/*    REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
 +/*    REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
 +      REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
 +      REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
 +      REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
 +      REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
 +/*    REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
 +/*    REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
 +
 +      if (CHIP_REV_IS_FPGA(bp))
 +              REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
 +      else if (!CHIP_IS_E1x(bp))
 +              REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0,
 +                         (PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_OF
 +                              | PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_AFT
 +                              | PXP2_PXP2_INT_MASK_0_REG_PGL_PCIE_ATTN
 +                              | PXP2_PXP2_INT_MASK_0_REG_PGL_READ_BLOCKED
 +                              | PXP2_PXP2_INT_MASK_0_REG_PGL_WRITE_BLOCKED));
 +      else
 +              REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
 +      REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
 +      REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
 +      REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
 +/*    REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
 +
 +      if (!CHIP_IS_E1x(bp))
 +              /* enable VFC attentions: bits 11 and 12, bits 31:13 reserved */
 +              REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0x07ff);
 +
 +      REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
 +      REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
 +/*    REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
 +      REG_WR(bp, PBF_REG_PBF_INT_MASK, 0x18);         /* bit 3,4 masked */
 +}
 +
 +static void bnx2x_reset_common(struct bnx2x *bp)
 +{
 +      u32 val = 0x1400;
 +
 +      /* reset_common */
 +      REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
 +             0xd3ffff7f);
 +
 +      if (CHIP_IS_E3(bp)) {
 +              val |= MISC_REGISTERS_RESET_REG_2_MSTAT0;
 +              val |= MISC_REGISTERS_RESET_REG_2_MSTAT1;
 +      }
 +
 +      REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, val);
 +}
 +
 +static void bnx2x_setup_dmae(struct bnx2x *bp)
 +{
 +      bp->dmae_ready = 0;
 +      spin_lock_init(&bp->dmae_lock);
 +}
 +
 +static void bnx2x_init_pxp(struct bnx2x *bp)
 +{
 +      u16 devctl;
 +      int r_order, w_order;
 +
 +      pci_read_config_word(bp->pdev,
 +                           pci_pcie_cap(bp->pdev) + PCI_EXP_DEVCTL, &devctl);
 +      DP(NETIF_MSG_HW, "read 0x%x from devctl\n", devctl);
 +      w_order = ((devctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5);
 +      if (bp->mrrs == -1)
 +              r_order = ((devctl & PCI_EXP_DEVCTL_READRQ) >> 12);
 +      else {
 +              DP(NETIF_MSG_HW, "force read order to %d\n", bp->mrrs);
 +              r_order = bp->mrrs;
 +      }
 +
 +      bnx2x_init_pxp_arb(bp, r_order, w_order);
 +}
 +
 +static void bnx2x_setup_fan_failure_detection(struct bnx2x *bp)
 +{
 +      int is_required;
 +      u32 val;
 +      int port;
 +
 +      if (BP_NOMCP(bp))
 +              return;
 +
 +      is_required = 0;
 +      val = SHMEM_RD(bp, dev_info.shared_hw_config.config2) &
 +            SHARED_HW_CFG_FAN_FAILURE_MASK;
 +
 +      if (val == SHARED_HW_CFG_FAN_FAILURE_ENABLED)
 +              is_required = 1;
 +
 +      /*
 +       * The fan failure mechanism is usually related to the PHY type since
 +       * the power consumption of the board is affected by the PHY. Currently,
 +       * fan is required for most designs with SFX7101, BCM8727 and BCM8481.
 +       */
 +      else if (val == SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE)
 +              for (port = PORT_0; port < PORT_MAX; port++) {
 +                      is_required |=
 +                              bnx2x_fan_failure_det_req(
 +                                      bp,
 +                                      bp->common.shmem_base,
 +                                      bp->common.shmem2_base,
 +                                      port);
 +              }
 +
 +      DP(NETIF_MSG_HW, "fan detection setting: %d\n", is_required);
 +
 +      if (is_required == 0)
 +              return;
 +
 +      /* Fan failure is indicated by SPIO 5 */
 +      bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
 +                     MISC_REGISTERS_SPIO_INPUT_HI_Z);
 +
 +      /* set to active low mode */
 +      val = REG_RD(bp, MISC_REG_SPIO_INT);
 +      val |= ((1 << MISC_REGISTERS_SPIO_5) <<
 +                                      MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
 +      REG_WR(bp, MISC_REG_SPIO_INT, val);
 +
 +      /* enable interrupt to signal the IGU */
 +      val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
 +      val |= (1 << MISC_REGISTERS_SPIO_5);
 +      REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
 +}
 +
 +static void bnx2x_pretend_func(struct bnx2x *bp, u8 pretend_func_num)
 +{
 +      u32 offset = 0;
 +
 +      if (CHIP_IS_E1(bp))
 +              return;
 +      if (CHIP_IS_E1H(bp) && (pretend_func_num >= E1H_FUNC_MAX))
 +              return;
 +
 +      switch (BP_ABS_FUNC(bp)) {
 +      case 0:
 +              offset = PXP2_REG_PGL_PRETEND_FUNC_F0;
 +              break;
 +      case 1:
 +              offset = PXP2_REG_PGL_PRETEND_FUNC_F1;
 +              break;
 +      case 2:
 +              offset = PXP2_REG_PGL_PRETEND_FUNC_F2;
 +              break;
 +      case 3:
 +              offset = PXP2_REG_PGL_PRETEND_FUNC_F3;
 +              break;
 +      case 4:
 +              offset = PXP2_REG_PGL_PRETEND_FUNC_F4;
 +              break;
 +      case 5:
 +              offset = PXP2_REG_PGL_PRETEND_FUNC_F5;
 +              break;
 +      case 6:
 +              offset = PXP2_REG_PGL_PRETEND_FUNC_F6;
 +              break;
 +      case 7:
 +              offset = PXP2_REG_PGL_PRETEND_FUNC_F7;
 +              break;
 +      default:
 +              return;
 +      }
 +
 +      REG_WR(bp, offset, pretend_func_num);
 +      REG_RD(bp, offset);
 +      DP(NETIF_MSG_HW, "Pretending to func %d\n", pretend_func_num);
 +}
 +
 +void bnx2x_pf_disable(struct bnx2x *bp)
 +{
 +      u32 val = REG_RD(bp, IGU_REG_PF_CONFIGURATION);
 +      val &= ~IGU_PF_CONF_FUNC_EN;
 +
 +      REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
 +      REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 0);
 +      REG_WR(bp, CFC_REG_WEAK_ENABLE_PF, 0);
 +}
 +
 +static inline void bnx2x__common_init_phy(struct bnx2x *bp)
 +{
 +      u32 shmem_base[2], shmem2_base[2];
 +      shmem_base[0] =  bp->common.shmem_base;
 +      shmem2_base[0] = bp->common.shmem2_base;
 +      if (!CHIP_IS_E1x(bp)) {
 +              shmem_base[1] =
 +                      SHMEM2_RD(bp, other_shmem_base_addr);
 +              shmem2_base[1] =
 +                      SHMEM2_RD(bp, other_shmem2_base_addr);
 +      }
 +      bnx2x_acquire_phy_lock(bp);
 +      bnx2x_common_init_phy(bp, shmem_base, shmem2_base,
 +                            bp->common.chip_id);
 +      bnx2x_release_phy_lock(bp);
 +}
 +
 +/**
 + * bnx2x_init_hw_common - initialize the HW at the COMMON phase.
 + *
 + * @bp:               driver handle
 + */
 +static int bnx2x_init_hw_common(struct bnx2x *bp)
 +{
 +      u32 val;
 +
 +      DP(BNX2X_MSG_MCP, "starting common init  func %d\n", BP_ABS_FUNC(bp));
 +
 +      /*
 +       * take the UNDI lock to protect undi_unload flow from accessing
 +       * registers while we're resetting the chip
 +       */
-       bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
++      bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RESET);
 +
 +      bnx2x_reset_common(bp);
 +      REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
 +
 +      val = 0xfffc;
 +      if (CHIP_IS_E3(bp)) {
 +              val |= MISC_REGISTERS_RESET_REG_2_MSTAT0;
 +              val |= MISC_REGISTERS_RESET_REG_2_MSTAT1;
 +      }
 +      REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, val);
 +
-                               dsb_idx = BP_E1HVN(bp);
++      bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESET);
 +
 +      bnx2x_init_block(bp, BLOCK_MISC, PHASE_COMMON);
 +
 +      if (!CHIP_IS_E1x(bp)) {
 +              u8 abs_func_id;
 +
 +              /**
 +               * 4-port mode or 2-port mode we need to turn of master-enable
 +               * for everyone, after that, turn it back on for self.
 +               * so, we disregard multi-function or not, and always disable
 +               * for all functions on the given path, this means 0,2,4,6 for
 +               * path 0 and 1,3,5,7 for path 1
 +               */
 +              for (abs_func_id = BP_PATH(bp);
 +                   abs_func_id < E2_FUNC_MAX*2; abs_func_id += 2) {
 +                      if (abs_func_id == BP_ABS_FUNC(bp)) {
 +                              REG_WR(bp,
 +                                  PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER,
 +                                  1);
 +                              continue;
 +                      }
 +
 +                      bnx2x_pretend_func(bp, abs_func_id);
 +                      /* clear pf enable */
 +                      bnx2x_pf_disable(bp);
 +                      bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
 +              }
 +      }
 +
 +      bnx2x_init_block(bp, BLOCK_PXP, PHASE_COMMON);
 +      if (CHIP_IS_E1(bp)) {
 +              /* enable HW interrupt from PXP on USDM overflow
 +                 bit 16 on INT_MASK_0 */
 +              REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
 +      }
 +
 +      bnx2x_init_block(bp, BLOCK_PXP2, PHASE_COMMON);
 +      bnx2x_init_pxp(bp);
 +
 +#ifdef __BIG_ENDIAN
 +      REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
 +      REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
 +      REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
 +      REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
 +      REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
 +      /* make sure this value is 0 */
 +      REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0);
 +
 +/*    REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
 +      REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
 +      REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
 +      REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
 +      REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
 +#endif
 +
 +      bnx2x_ilt_init_page_size(bp, INITOP_SET);
 +
 +      if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
 +              REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
 +
 +      /* let the HW do it's magic ... */
 +      msleep(100);
 +      /* finish PXP init */
 +      val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
 +      if (val != 1) {
 +              BNX2X_ERR("PXP2 CFG failed\n");
 +              return -EBUSY;
 +      }
 +      val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
 +      if (val != 1) {
 +              BNX2X_ERR("PXP2 RD_INIT failed\n");
 +              return -EBUSY;
 +      }
 +
 +      /* Timers bug workaround E2 only. We need to set the entire ILT to
 +       * have entries with value "0" and valid bit on.
 +       * This needs to be done by the first PF that is loaded in a path
 +       * (i.e. common phase)
 +       */
 +      if (!CHIP_IS_E1x(bp)) {
 +/* In E2 there is a bug in the timers block that can cause function 6 / 7
 + * (i.e. vnic3) to start even if it is marked as "scan-off".
 + * This occurs when a different function (func2,3) is being marked
 + * as "scan-off". Real-life scenario for example: if a driver is being
 + * load-unloaded while func6,7 are down. This will cause the timer to access
 + * the ilt, translate to a logical address and send a request to read/write.
 + * Since the ilt for the function that is down is not valid, this will cause
 + * a translation error which is unrecoverable.
 + * The Workaround is intended to make sure that when this happens nothing fatal
 + * will occur. The workaround:
 + *    1.  First PF driver which loads on a path will:
 + *            a.  After taking the chip out of reset, by using pretend,
 + *                it will write "0" to the following registers of
 + *                the other vnics.
 + *                REG_WR(pdev, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 0);
 + *                REG_WR(pdev, CFC_REG_WEAK_ENABLE_PF,0);
 + *                REG_WR(pdev, CFC_REG_STRONG_ENABLE_PF,0);
 + *                And for itself it will write '1' to
 + *                PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER to enable
 + *                dmae-operations (writing to pram for example.)
 + *                note: can be done for only function 6,7 but cleaner this
 + *                      way.
 + *            b.  Write zero+valid to the entire ILT.
 + *            c.  Init the first_timers_ilt_entry, last_timers_ilt_entry of
 + *                VNIC3 (of that port). The range allocated will be the
 + *                entire ILT. This is needed to prevent  ILT range error.
 + *    2.  Any PF driver load flow:
 + *            a.  ILT update with the physical addresses of the allocated
 + *                logical pages.
 + *            b.  Wait 20msec. - note that this timeout is needed to make
 + *                sure there are no requests in one of the PXP internal
 + *                queues with "old" ILT addresses.
 + *            c.  PF enable in the PGLC.
 + *            d.  Clear the was_error of the PF in the PGLC. (could have
 + *                occured while driver was down)
 + *            e.  PF enable in the CFC (WEAK + STRONG)
 + *            f.  Timers scan enable
 + *    3.  PF driver unload flow:
 + *            a.  Clear the Timers scan_en.
 + *            b.  Polling for scan_on=0 for that PF.
 + *            c.  Clear the PF enable bit in the PXP.
 + *            d.  Clear the PF enable in the CFC (WEAK + STRONG)
 + *            e.  Write zero+valid to all ILT entries (The valid bit must
 + *                stay set)
 + *            f.  If this is VNIC 3 of a port then also init
 + *                first_timers_ilt_entry to zero and last_timers_ilt_entry
 + *                to the last enrty in the ILT.
 + *
 + *    Notes:
 + *    Currently the PF error in the PGLC is non recoverable.
 + *    In the future the there will be a recovery routine for this error.
 + *    Currently attention is masked.
 + *    Having an MCP lock on the load/unload process does not guarantee that
 + *    there is no Timer disable during Func6/7 enable. This is because the
 + *    Timers scan is currently being cleared by the MCP on FLR.
 + *    Step 2.d can be done only for PF6/7 and the driver can also check if
 + *    there is error before clearing it. But the flow above is simpler and
 + *    more general.
 + *    All ILT entries are written by zero+valid and not just PF6/7
 + *    ILT entries since in the future the ILT entries allocation for
 + *    PF-s might be dynamic.
 + */
 +              struct ilt_client_info ilt_cli;
 +              struct bnx2x_ilt ilt;
 +              memset(&ilt_cli, 0, sizeof(struct ilt_client_info));
 +              memset(&ilt, 0, sizeof(struct bnx2x_ilt));
 +
 +              /* initialize dummy TM client */
 +              ilt_cli.start = 0;
 +              ilt_cli.end = ILT_NUM_PAGE_ENTRIES - 1;
 +              ilt_cli.client_num = ILT_CLIENT_TM;
 +
 +              /* Step 1: set zeroes to all ilt page entries with valid bit on
 +               * Step 2: set the timers first/last ilt entry to point
 +               * to the entire range to prevent ILT range error for 3rd/4th
 +               * vnic (this code assumes existance of the vnic)
 +               *
 +               * both steps performed by call to bnx2x_ilt_client_init_op()
 +               * with dummy TM client
 +               *
 +               * we must use pretend since PXP2_REG_RQ_##blk##_FIRST_ILT
 +               * and his brother are split registers
 +               */
 +              bnx2x_pretend_func(bp, (BP_PATH(bp) + 6));
 +              bnx2x_ilt_client_init_op_ilt(bp, &ilt, &ilt_cli, INITOP_CLEAR);
 +              bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
 +
 +              REG_WR(bp, PXP2_REG_RQ_DRAM_ALIGN, BNX2X_PXP_DRAM_ALIGN);
 +              REG_WR(bp, PXP2_REG_RQ_DRAM_ALIGN_RD, BNX2X_PXP_DRAM_ALIGN);
 +              REG_WR(bp, PXP2_REG_RQ_DRAM_ALIGN_SEL, 1);
 +      }
 +
 +
 +      REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
 +      REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
 +
 +      if (!CHIP_IS_E1x(bp)) {
 +              int factor = CHIP_REV_IS_EMUL(bp) ? 1000 :
 +                              (CHIP_REV_IS_FPGA(bp) ? 400 : 0);
 +              bnx2x_init_block(bp, BLOCK_PGLUE_B, PHASE_COMMON);
 +
 +              bnx2x_init_block(bp, BLOCK_ATC, PHASE_COMMON);
 +
 +              /* let the HW do it's magic ... */
 +              do {
 +                      msleep(200);
 +                      val = REG_RD(bp, ATC_REG_ATC_INIT_DONE);
 +              } while (factor-- && (val != 1));
 +
 +              if (val != 1) {
 +                      BNX2X_ERR("ATC_INIT failed\n");
 +                      return -EBUSY;
 +              }
 +      }
 +
 +      bnx2x_init_block(bp, BLOCK_DMAE, PHASE_COMMON);
 +
 +      /* clean the DMAE memory */
 +      bp->dmae_ready = 1;
 +      bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8, 1);
 +
 +      bnx2x_init_block(bp, BLOCK_TCM, PHASE_COMMON);
 +
 +      bnx2x_init_block(bp, BLOCK_UCM, PHASE_COMMON);
 +
 +      bnx2x_init_block(bp, BLOCK_CCM, PHASE_COMMON);
 +
 +      bnx2x_init_block(bp, BLOCK_XCM, PHASE_COMMON);
 +
 +      bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
 +      bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
 +      bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
 +      bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
 +
 +      bnx2x_init_block(bp, BLOCK_QM, PHASE_COMMON);
 +
 +
 +      /* QM queues pointers table */
 +      bnx2x_qm_init_ptr_table(bp, bp->qm_cid_count, INITOP_SET);
 +
 +      /* soft reset pulse */
 +      REG_WR(bp, QM_REG_SOFT_RESET, 1);
 +      REG_WR(bp, QM_REG_SOFT_RESET, 0);
 +
 +#ifdef BCM_CNIC
 +      bnx2x_init_block(bp, BLOCK_TM, PHASE_COMMON);
 +#endif
 +
 +      bnx2x_init_block(bp, BLOCK_DORQ, PHASE_COMMON);
 +      REG_WR(bp, DORQ_REG_DPM_CID_OFST, BNX2X_DB_SHIFT);
 +      if (!CHIP_REV_IS_SLOW(bp))
 +              /* enable hw interrupt from doorbell Q */
 +              REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
 +
 +      bnx2x_init_block(bp, BLOCK_BRB1, PHASE_COMMON);
 +
 +      bnx2x_init_block(bp, BLOCK_PRS, PHASE_COMMON);
 +      REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
 +
 +      if (!CHIP_IS_E1(bp))
 +              REG_WR(bp, PRS_REG_E1HOV_MODE, bp->path_has_ovlan);
 +
 +      if (!CHIP_IS_E1x(bp) && !CHIP_IS_E3B0(bp))
 +              /* Bit-map indicating which L2 hdrs may appear
 +               * after the basic Ethernet header
 +               */
 +              REG_WR(bp, PRS_REG_HDRS_AFTER_BASIC,
 +                     bp->path_has_ovlan ? 7 : 6);
 +
 +      bnx2x_init_block(bp, BLOCK_TSDM, PHASE_COMMON);
 +      bnx2x_init_block(bp, BLOCK_CSDM, PHASE_COMMON);
 +      bnx2x_init_block(bp, BLOCK_USDM, PHASE_COMMON);
 +      bnx2x_init_block(bp, BLOCK_XSDM, PHASE_COMMON);
 +
 +      if (!CHIP_IS_E1x(bp)) {
 +              /* reset VFC memories */
 +              REG_WR(bp, TSEM_REG_FAST_MEMORY + VFC_REG_MEMORIES_RST,
 +                         VFC_MEMORIES_RST_REG_CAM_RST |
 +                         VFC_MEMORIES_RST_REG_RAM_RST);
 +              REG_WR(bp, XSEM_REG_FAST_MEMORY + VFC_REG_MEMORIES_RST,
 +                         VFC_MEMORIES_RST_REG_CAM_RST |
 +                         VFC_MEMORIES_RST_REG_RAM_RST);
 +
 +              msleep(20);
 +      }
 +
 +      bnx2x_init_block(bp, BLOCK_TSEM, PHASE_COMMON);
 +      bnx2x_init_block(bp, BLOCK_USEM, PHASE_COMMON);
 +      bnx2x_init_block(bp, BLOCK_CSEM, PHASE_COMMON);
 +      bnx2x_init_block(bp, BLOCK_XSEM, PHASE_COMMON);
 +
 +      /* sync semi rtc */
 +      REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
 +             0x80000000);
 +      REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
 +             0x80000000);
 +
 +      bnx2x_init_block(bp, BLOCK_UPB, PHASE_COMMON);
 +      bnx2x_init_block(bp, BLOCK_XPB, PHASE_COMMON);
 +      bnx2x_init_block(bp, BLOCK_PBF, PHASE_COMMON);
 +
 +      if (!CHIP_IS_E1x(bp))
 +              REG_WR(bp, PBF_REG_HDRS_AFTER_BASIC,
 +                     bp->path_has_ovlan ? 7 : 6);
 +
 +      REG_WR(bp, SRC_REG_SOFT_RST, 1);
 +
 +      bnx2x_init_block(bp, BLOCK_SRC, PHASE_COMMON);
 +
 +#ifdef BCM_CNIC
 +      REG_WR(bp, SRC_REG_KEYSEARCH_0, 0x63285672);
 +      REG_WR(bp, SRC_REG_KEYSEARCH_1, 0x24b8f2cc);
 +      REG_WR(bp, SRC_REG_KEYSEARCH_2, 0x223aef9b);
 +      REG_WR(bp, SRC_REG_KEYSEARCH_3, 0x26001e3a);
 +      REG_WR(bp, SRC_REG_KEYSEARCH_4, 0x7ae91116);
 +      REG_WR(bp, SRC_REG_KEYSEARCH_5, 0x5ce5230b);
 +      REG_WR(bp, SRC_REG_KEYSEARCH_6, 0x298d8adf);
 +      REG_WR(bp, SRC_REG_KEYSEARCH_7, 0x6eb0ff09);
 +      REG_WR(bp, SRC_REG_KEYSEARCH_8, 0x1830f82f);
 +      REG_WR(bp, SRC_REG_KEYSEARCH_9, 0x01e46be7);
 +#endif
 +      REG_WR(bp, SRC_REG_SOFT_RST, 0);
 +
 +      if (sizeof(union cdu_context) != 1024)
 +              /* we currently assume that a context is 1024 bytes */
 +              dev_alert(&bp->pdev->dev, "please adjust the size "
 +                                        "of cdu_context(%ld)\n",
 +                       (long)sizeof(union cdu_context));
 +
 +      bnx2x_init_block(bp, BLOCK_CDU, PHASE_COMMON);
 +      val = (4 << 24) + (0 << 12) + 1024;
 +      REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
 +
 +      bnx2x_init_block(bp, BLOCK_CFC, PHASE_COMMON);
 +      REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
 +      /* enable context validation interrupt from CFC */
 +      REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
 +
 +      /* set the thresholds to prevent CFC/CDU race */
 +      REG_WR(bp, CFC_REG_DEBUG0, 0x20020000);
 +
 +      bnx2x_init_block(bp, BLOCK_HC, PHASE_COMMON);
 +
 +      if (!CHIP_IS_E1x(bp) && BP_NOMCP(bp))
 +              REG_WR(bp, IGU_REG_RESET_MEMORIES, 0x36);
 +
 +      bnx2x_init_block(bp, BLOCK_IGU, PHASE_COMMON);
 +      bnx2x_init_block(bp, BLOCK_MISC_AEU, PHASE_COMMON);
 +
 +      /* Reset PCIE errors for debug */
 +      REG_WR(bp, 0x2814, 0xffffffff);
 +      REG_WR(bp, 0x3820, 0xffffffff);
 +
 +      if (!CHIP_IS_E1x(bp)) {
 +              REG_WR(bp, PCICFG_OFFSET + PXPCS_TL_CONTROL_5,
 +                         (PXPCS_TL_CONTROL_5_ERR_UNSPPORT1 |
 +                              PXPCS_TL_CONTROL_5_ERR_UNSPPORT));
 +              REG_WR(bp, PCICFG_OFFSET + PXPCS_TL_FUNC345_STAT,
 +                         (PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT4 |
 +                              PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT3 |
 +                              PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT2));
 +              REG_WR(bp, PCICFG_OFFSET + PXPCS_TL_FUNC678_STAT,
 +                         (PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT7 |
 +                              PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT6 |
 +                              PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT5));
 +      }
 +
 +      bnx2x_init_block(bp, BLOCK_NIG, PHASE_COMMON);
 +      if (!CHIP_IS_E1(bp)) {
 +              /* in E3 this done in per-port section */
 +              if (!CHIP_IS_E3(bp))
 +                      REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_MF(bp));
 +      }
 +      if (CHIP_IS_E1H(bp))
 +              /* not applicable for E2 (and above ...) */
 +              REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_MF_SD(bp));
 +
 +      if (CHIP_REV_IS_SLOW(bp))
 +              msleep(200);
 +
 +      /* finish CFC init */
 +      val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
 +      if (val != 1) {
 +              BNX2X_ERR("CFC LL_INIT failed\n");
 +              return -EBUSY;
 +      }
 +      val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
 +      if (val != 1) {
 +              BNX2X_ERR("CFC AC_INIT failed\n");
 +              return -EBUSY;
 +      }
 +      val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
 +      if (val != 1) {
 +              BNX2X_ERR("CFC CAM_INIT failed\n");
 +              return -EBUSY;
 +      }
 +      REG_WR(bp, CFC_REG_DEBUG0, 0);
 +
 +      if (CHIP_IS_E1(bp)) {
 +              /* read NIG statistic
 +                 to see if this is our first up since powerup */
 +              bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
 +              val = *bnx2x_sp(bp, wb_data[0]);
 +
 +              /* do internal memory self test */
 +              if ((val == 0) && bnx2x_int_mem_test(bp)) {
 +                      BNX2X_ERR("internal mem self test failed\n");
 +                      return -EBUSY;
 +              }
 +      }
 +
 +      bnx2x_setup_fan_failure_detection(bp);
 +
 +      /* clear PXP2 attentions */
 +      REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
 +
 +      bnx2x_enable_blocks_attention(bp);
 +      bnx2x_enable_blocks_parity(bp);
 +
 +      if (!BP_NOMCP(bp)) {
 +              if (CHIP_IS_E1x(bp))
 +                      bnx2x__common_init_phy(bp);
 +      } else
 +              BNX2X_ERR("Bootcode is missing - can not initialize link\n");
 +
 +      return 0;
 +}
 +
 +/**
 + * bnx2x_init_hw_common_chip - init HW at the COMMON_CHIP phase.
 + *
 + * @bp:               driver handle
 + */
 +static int bnx2x_init_hw_common_chip(struct bnx2x *bp)
 +{
 +      int rc = bnx2x_init_hw_common(bp);
 +
 +      if (rc)
 +              return rc;
 +
 +      /* In E2 2-PORT mode, same ext phy is used for the two paths */
 +      if (!BP_NOMCP(bp))
 +              bnx2x__common_init_phy(bp);
 +
 +      return 0;
 +}
 +
 +static int bnx2x_init_hw_port(struct bnx2x *bp)
 +{
 +      int port = BP_PORT(bp);
 +      int init_phase = port ? PHASE_PORT1 : PHASE_PORT0;
 +      u32 low, high;
 +      u32 val;
 +
 +      bnx2x__link_reset(bp);
 +
 +      DP(BNX2X_MSG_MCP, "starting port init  port %d\n", port);
 +
 +      REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
 +
 +      bnx2x_init_block(bp, BLOCK_MISC, init_phase);
 +      bnx2x_init_block(bp, BLOCK_PXP, init_phase);
 +      bnx2x_init_block(bp, BLOCK_PXP2, init_phase);
 +
 +      /* Timers bug workaround: disables the pf_master bit in pglue at
 +       * common phase, we need to enable it here before any dmae access are
 +       * attempted. Therefore we manually added the enable-master to the
 +       * port phase (it also happens in the function phase)
 +       */
 +      if (!CHIP_IS_E1x(bp))
 +              REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
 +
 +      bnx2x_init_block(bp, BLOCK_ATC, init_phase);
 +      bnx2x_init_block(bp, BLOCK_DMAE, init_phase);
 +      bnx2x_init_block(bp, BLOCK_PGLUE_B, init_phase);
 +      bnx2x_init_block(bp, BLOCK_QM, init_phase);
 +
 +      bnx2x_init_block(bp, BLOCK_TCM, init_phase);
 +      bnx2x_init_block(bp, BLOCK_UCM, init_phase);
 +      bnx2x_init_block(bp, BLOCK_CCM, init_phase);
 +      bnx2x_init_block(bp, BLOCK_XCM, init_phase);
 +
 +      /* QM cid (connection) count */
 +      bnx2x_qm_init_cid_count(bp, bp->qm_cid_count, INITOP_SET);
 +
 +#ifdef BCM_CNIC
 +      bnx2x_init_block(bp, BLOCK_TM, init_phase);
 +      REG_WR(bp, TM_REG_LIN0_SCAN_TIME + port*4, 20);
 +      REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + port*4, 31);
 +#endif
 +
 +      bnx2x_init_block(bp, BLOCK_DORQ, init_phase);
 +
 +      if (CHIP_IS_E1(bp) || CHIP_IS_E1H(bp)) {
 +              bnx2x_init_block(bp, BLOCK_BRB1, init_phase);
 +
 +              if (IS_MF(bp))
 +                      low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246);
 +              else if (bp->dev->mtu > 4096) {
 +                      if (bp->flags & ONE_PORT_FLAG)
 +                              low = 160;
 +                      else {
 +                              val = bp->dev->mtu;
 +                              /* (24*1024 + val*4)/256 */
 +                              low = 96 + (val/64) +
 +                                              ((val % 64) ? 1 : 0);
 +                      }
 +              } else
 +                      low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160);
 +              high = low + 56;        /* 14*1024/256 */
 +              REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low);
 +              REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high);
 +      }
 +
 +      if (CHIP_MODE_IS_4_PORT(bp))
 +              REG_WR(bp, (BP_PORT(bp) ?
 +                          BRB1_REG_MAC_GUARANTIED_1 :
 +                          BRB1_REG_MAC_GUARANTIED_0), 40);
 +
 +
 +      bnx2x_init_block(bp, BLOCK_PRS, init_phase);
 +      if (CHIP_IS_E3B0(bp))
 +              /* Ovlan exists only if we are in multi-function +
 +               * switch-dependent mode, in switch-independent there
 +               * is no ovlan headers
 +               */
 +              REG_WR(bp, BP_PORT(bp) ?
 +                     PRS_REG_HDRS_AFTER_BASIC_PORT_1 :
 +                     PRS_REG_HDRS_AFTER_BASIC_PORT_0,
 +                     (bp->path_has_ovlan ? 7 : 6));
 +
 +      bnx2x_init_block(bp, BLOCK_TSDM, init_phase);
 +      bnx2x_init_block(bp, BLOCK_CSDM, init_phase);
 +      bnx2x_init_block(bp, BLOCK_USDM, init_phase);
 +      bnx2x_init_block(bp, BLOCK_XSDM, init_phase);
 +
 +      bnx2x_init_block(bp, BLOCK_TSEM, init_phase);
 +      bnx2x_init_block(bp, BLOCK_USEM, init_phase);
 +      bnx2x_init_block(bp, BLOCK_CSEM, init_phase);
 +      bnx2x_init_block(bp, BLOCK_XSEM, init_phase);
 +
 +      bnx2x_init_block(bp, BLOCK_UPB, init_phase);
 +      bnx2x_init_block(bp, BLOCK_XPB, init_phase);
 +
 +      bnx2x_init_block(bp, BLOCK_PBF, init_phase);
 +
 +      if (CHIP_IS_E1x(bp)) {
 +              /* configure PBF to work without PAUSE mtu 9000 */
 +              REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
 +
 +              /* update threshold */
 +              REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
 +              /* update init credit */
 +              REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
 +
 +              /* probe changes */
 +              REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
 +              udelay(50);
 +              REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
 +      }
 +
 +#ifdef BCM_CNIC
 +      bnx2x_init_block(bp, BLOCK_SRC, init_phase);
 +#endif
 +      bnx2x_init_block(bp, BLOCK_CDU, init_phase);
 +      bnx2x_init_block(bp, BLOCK_CFC, init_phase);
 +
 +      if (CHIP_IS_E1(bp)) {
 +              REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
 +              REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
 +      }
 +      bnx2x_init_block(bp, BLOCK_HC, init_phase);
 +
 +      bnx2x_init_block(bp, BLOCK_IGU, init_phase);
 +
 +      bnx2x_init_block(bp, BLOCK_MISC_AEU, init_phase);
 +      /* init aeu_mask_attn_func_0/1:
 +       *  - SF mode: bits 3-7 are masked. only bits 0-2 are in use
 +       *  - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
 +       *             bits 4-7 are used for "per vn group attention" */
 +      val = IS_MF(bp) ? 0xF7 : 0x7;
 +      /* Enable DCBX attention for all but E1 */
 +      val |= CHIP_IS_E1(bp) ? 0 : 0x10;
 +      REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, val);
 +
 +      bnx2x_init_block(bp, BLOCK_NIG, init_phase);
 +
 +      if (!CHIP_IS_E1x(bp)) {
 +              /* Bit-map indicating which L2 hdrs may appear after the
 +               * basic Ethernet header
 +               */
 +              REG_WR(bp, BP_PORT(bp) ?
 +                         NIG_REG_P1_HDRS_AFTER_BASIC :
 +                         NIG_REG_P0_HDRS_AFTER_BASIC,
 +                         IS_MF_SD(bp) ? 7 : 6);
 +
 +              if (CHIP_IS_E3(bp))
 +                      REG_WR(bp, BP_PORT(bp) ?
 +                                 NIG_REG_LLH1_MF_MODE :
 +                                 NIG_REG_LLH_MF_MODE, IS_MF(bp));
 +      }
 +      if (!CHIP_IS_E3(bp))
 +              REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
 +
 +      if (!CHIP_IS_E1(bp)) {
 +              /* 0x2 disable mf_ov, 0x1 enable */
 +              REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
 +                     (IS_MF_SD(bp) ? 0x1 : 0x2));
 +
 +              if (!CHIP_IS_E1x(bp)) {
 +                      val = 0;
 +                      switch (bp->mf_mode) {
 +                      case MULTI_FUNCTION_SD:
 +                              val = 1;
 +                              break;
 +                      case MULTI_FUNCTION_SI:
 +                              val = 2;
 +                              break;
 +                      }
 +
 +                      REG_WR(bp, (BP_PORT(bp) ? NIG_REG_LLH1_CLS_TYPE :
 +                                                NIG_REG_LLH0_CLS_TYPE), val);
 +              }
 +              {
 +                      REG_WR(bp, NIG_REG_LLFC_ENABLE_0 + port*4, 0);
 +                      REG_WR(bp, NIG_REG_LLFC_OUT_EN_0 + port*4, 0);
 +                      REG_WR(bp, NIG_REG_PAUSE_ENABLE_0 + port*4, 1);
 +              }
 +      }
 +
 +
 +      /* If SPIO5 is set to generate interrupts, enable it for this port */
 +      val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
 +      if (val & (1 << MISC_REGISTERS_SPIO_5)) {
 +              u32 reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
 +                                     MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
 +              val = REG_RD(bp, reg_addr);
 +              val |= AEU_INPUTS_ATTN_BITS_SPIO5;
 +              REG_WR(bp, reg_addr, val);
 +      }
 +
 +      return 0;
 +}
 +
 +static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
 +{
 +      int reg;
 +
 +      if (CHIP_IS_E1(bp))
 +              reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
 +      else
 +              reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
 +
 +      bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
 +}
 +
 +static inline void bnx2x_igu_clear_sb(struct bnx2x *bp, u8 idu_sb_id)
 +{
 +      bnx2x_igu_clear_sb_gen(bp, BP_FUNC(bp), idu_sb_id, true /*PF*/);
 +}
 +
 +static inline void bnx2x_clear_func_ilt(struct bnx2x *bp, u32 func)
 +{
 +      u32 i, base = FUNC_ILT_BASE(func);
 +      for (i = base; i < base + ILT_PER_FUNC; i++)
 +              bnx2x_ilt_wr(bp, i, 0);
 +}
 +
 +static int bnx2x_init_hw_func(struct bnx2x *bp)
 +{
 +      int port = BP_PORT(bp);
 +      int func = BP_FUNC(bp);
 +      int init_phase = PHASE_PF0 + func;
 +      struct bnx2x_ilt *ilt = BP_ILT(bp);
 +      u16 cdu_ilt_start;
 +      u32 addr, val;
 +      u32 main_mem_base, main_mem_size, main_mem_prty_clr;
 +      int i, main_mem_width;
 +
 +      DP(BNX2X_MSG_MCP, "starting func init  func %d\n", func);
 +
 +      /* FLR cleanup - hmmm */
 +      if (!CHIP_IS_E1x(bp))
 +              bnx2x_pf_flr_clnup(bp);
 +
 +      /* set MSI reconfigure capability */
 +      if (bp->common.int_block == INT_BLOCK_HC) {
 +              addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
 +              val = REG_RD(bp, addr);
 +              val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0;
 +              REG_WR(bp, addr, val);
 +      }
 +
 +      bnx2x_init_block(bp, BLOCK_PXP, init_phase);
 +      bnx2x_init_block(bp, BLOCK_PXP2, init_phase);
 +
 +      ilt = BP_ILT(bp);
 +      cdu_ilt_start = ilt->clients[ILT_CLIENT_CDU].start;
 +
 +      for (i = 0; i < L2_ILT_LINES(bp); i++) {
 +              ilt->lines[cdu_ilt_start + i].page =
 +                      bp->context.vcxt + (ILT_PAGE_CIDS * i);
 +              ilt->lines[cdu_ilt_start + i].page_mapping =
 +                      bp->context.cxt_mapping + (CDU_ILT_PAGE_SZ * i);
 +              /* cdu ilt pages are allocated manually so there's no need to
 +              set the size */
 +      }
 +      bnx2x_ilt_init_op(bp, INITOP_SET);
 +
 +#ifdef BCM_CNIC
 +      bnx2x_src_init_t2(bp, bp->t2, bp->t2_mapping, SRC_CONN_NUM);
 +
 +      /* T1 hash bits value determines the T1 number of entries */
 +      REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + port*4, SRC_HASH_BITS);
 +#endif
 +
 +#ifndef BCM_CNIC
 +      /* set NIC mode */
 +      REG_WR(bp, PRS_REG_NIC_MODE, 1);
 +#endif  /* BCM_CNIC */
 +
 +      if (!CHIP_IS_E1x(bp)) {
 +              u32 pf_conf = IGU_PF_CONF_FUNC_EN;
 +
 +              /* Turn on a single ISR mode in IGU if driver is going to use
 +               * INT#x or MSI
 +               */
 +              if (!(bp->flags & USING_MSIX_FLAG))
 +                      pf_conf |= IGU_PF_CONF_SINGLE_ISR_EN;
 +              /*
 +               * Timers workaround bug: function init part.
 +               * Need to wait 20msec after initializing ILT,
 +               * needed to make sure there are no requests in
 +               * one of the PXP internal queues with "old" ILT addresses
 +               */
 +              msleep(20);
 +              /*
 +               * Master enable - Due to WB DMAE writes performed before this
 +               * register is re-initialized as part of the regular function
 +               * init
 +               */
 +              REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
 +              /* Enable the function in IGU */
 +              REG_WR(bp, IGU_REG_PF_CONFIGURATION, pf_conf);
 +      }
 +
 +      bp->dmae_ready = 1;
 +
 +      bnx2x_init_block(bp, BLOCK_PGLUE_B, init_phase);
 +
 +      if (!CHIP_IS_E1x(bp))
 +              REG_WR(bp, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR, func);
 +
 +      bnx2x_init_block(bp, BLOCK_ATC, init_phase);
 +      bnx2x_init_block(bp, BLOCK_DMAE, init_phase);
 +      bnx2x_init_block(bp, BLOCK_NIG, init_phase);
 +      bnx2x_init_block(bp, BLOCK_SRC, init_phase);
 +      bnx2x_init_block(bp, BLOCK_MISC, init_phase);
 +      bnx2x_init_block(bp, BLOCK_TCM, init_phase);
 +      bnx2x_init_block(bp, BLOCK_UCM, init_phase);
 +      bnx2x_init_block(bp, BLOCK_CCM, init_phase);
 +      bnx2x_init_block(bp, BLOCK_XCM, init_phase);
 +      bnx2x_init_block(bp, BLOCK_TSEM, init_phase);
 +      bnx2x_init_block(bp, BLOCK_USEM, init_phase);
 +      bnx2x_init_block(bp, BLOCK_CSEM, init_phase);
 +      bnx2x_init_block(bp, BLOCK_XSEM, init_phase);
 +
 +      if (!CHIP_IS_E1x(bp))
 +              REG_WR(bp, QM_REG_PF_EN, 1);
 +
 +      if (!CHIP_IS_E1x(bp)) {
 +              REG_WR(bp, TSEM_REG_VFPF_ERR_NUM, BNX2X_MAX_NUM_OF_VFS + func);
 +              REG_WR(bp, USEM_REG_VFPF_ERR_NUM, BNX2X_MAX_NUM_OF_VFS + func);
 +              REG_WR(bp, CSEM_REG_VFPF_ERR_NUM, BNX2X_MAX_NUM_OF_VFS + func);
 +              REG_WR(bp, XSEM_REG_VFPF_ERR_NUM, BNX2X_MAX_NUM_OF_VFS + func);
 +      }
 +      bnx2x_init_block(bp, BLOCK_QM, init_phase);
 +
 +      bnx2x_init_block(bp, BLOCK_TM, init_phase);
 +      bnx2x_init_block(bp, BLOCK_DORQ, init_phase);
 +      bnx2x_init_block(bp, BLOCK_BRB1, init_phase);
 +      bnx2x_init_block(bp, BLOCK_PRS, init_phase);
 +      bnx2x_init_block(bp, BLOCK_TSDM, init_phase);
 +      bnx2x_init_block(bp, BLOCK_CSDM, init_phase);
 +      bnx2x_init_block(bp, BLOCK_USDM, init_phase);
 +      bnx2x_init_block(bp, BLOCK_XSDM, init_phase);
 +      bnx2x_init_block(bp, BLOCK_UPB, init_phase);
 +      bnx2x_init_block(bp, BLOCK_XPB, init_phase);
 +      bnx2x_init_block(bp, BLOCK_PBF, init_phase);
 +      if (!CHIP_IS_E1x(bp))
 +              REG_WR(bp, PBF_REG_DISABLE_PF, 0);
 +
 +      bnx2x_init_block(bp, BLOCK_CDU, init_phase);
 +
 +      bnx2x_init_block(bp, BLOCK_CFC, init_phase);
 +
 +      if (!CHIP_IS_E1x(bp))
 +              REG_WR(bp, CFC_REG_WEAK_ENABLE_PF, 1);
 +
 +      if (IS_MF(bp)) {
 +              REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
 +              REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->mf_ov);
 +      }
 +
 +      bnx2x_init_block(bp, BLOCK_MISC_AEU, init_phase);
 +
 +      /* HC init per function */
 +      if (bp->common.int_block == INT_BLOCK_HC) {
 +              if (CHIP_IS_E1H(bp)) {
 +                      REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
 +
 +                      REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
 +                      REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
 +              }
 +              bnx2x_init_block(bp, BLOCK_HC, init_phase);
 +
 +      } else {
 +              int num_segs, sb_idx, prod_offset;
 +
 +              REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
 +
 +              if (!CHIP_IS_E1x(bp)) {
 +                      REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, 0);
 +                      REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, 0);
 +              }
 +
 +              bnx2x_init_block(bp, BLOCK_IGU, init_phase);
 +
 +              if (!CHIP_IS_E1x(bp)) {
 +                      int dsb_idx = 0;
 +                      /**
 +                       * Producer memory:
 +                       * E2 mode: address 0-135 match to the mapping memory;
 +                       * 136 - PF0 default prod; 137 - PF1 default prod;
 +                       * 138 - PF2 default prod; 139 - PF3 default prod;
 +                       * 140 - PF0 attn prod;    141 - PF1 attn prod;
 +                       * 142 - PF2 attn prod;    143 - PF3 attn prod;
 +                       * 144-147 reserved.
 +                       *
 +                       * E1.5 mode - In backward compatible mode;
 +                       * for non default SB; each even line in the memory
 +                       * holds the U producer and each odd line hold
 +                       * the C producer. The first 128 producers are for
 +                       * NDSB (PF0 - 0-31; PF1 - 32-63 and so on). The last 20
 +                       * producers are for the DSB for each PF.
 +                       * Each PF has five segments: (the order inside each
 +                       * segment is PF0; PF1; PF2; PF3) - 128-131 U prods;
 +                       * 132-135 C prods; 136-139 X prods; 140-143 T prods;
 +                       * 144-147 attn prods;
 +                       */
 +                      /* non-default-status-blocks */
 +                      num_segs = CHIP_INT_MODE_IS_BC(bp) ?
 +                              IGU_BC_NDSB_NUM_SEGS : IGU_NORM_NDSB_NUM_SEGS;
 +                      for (sb_idx = 0; sb_idx < bp->igu_sb_cnt; sb_idx++) {
 +                              prod_offset = (bp->igu_base_sb + sb_idx) *
 +                                      num_segs;
 +
 +                              for (i = 0; i < num_segs; i++) {
 +                                      addr = IGU_REG_PROD_CONS_MEMORY +
 +                                                      (prod_offset + i) * 4;
 +                                      REG_WR(bp, addr, 0);
 +                              }
 +                              /* send consumer update with value 0 */
 +                              bnx2x_ack_sb(bp, bp->igu_base_sb + sb_idx,
 +                                           USTORM_ID, 0, IGU_INT_NOP, 1);
 +                              bnx2x_igu_clear_sb(bp,
 +                                                 bp->igu_base_sb + sb_idx);
 +                      }
 +
 +                      /* default-status-blocks */
 +                      num_segs = CHIP_INT_MODE_IS_BC(bp) ?
 +                              IGU_BC_DSB_NUM_SEGS : IGU_NORM_DSB_NUM_SEGS;
 +
 +                      if (CHIP_MODE_IS_4_PORT(bp))
 +                              dsb_idx = BP_FUNC(bp);
 +                      else
-               u8 entry = (BP_E1HVN(bp) + 1)*8;
++                              dsb_idx = BP_VN(bp);
 +
 +                      prod_offset = (CHIP_INT_MODE_IS_BC(bp) ?
 +                                     IGU_BC_BASE_DSB_PROD + dsb_idx :
 +                                     IGU_NORM_BASE_DSB_PROD + dsb_idx);
 +
++                      /*
++                       * igu prods come in chunks of E1HVN_MAX (4) -
++                       * does not matters what is the current chip mode
++                       */
 +                      for (i = 0; i < (num_segs * E1HVN_MAX);
 +                           i += E1HVN_MAX) {
 +                              addr = IGU_REG_PROD_CONS_MEMORY +
 +                                                      (prod_offset + i)*4;
 +                              REG_WR(bp, addr, 0);
 +                      }
 +                      /* send consumer update with 0 */
 +                      if (CHIP_INT_MODE_IS_BC(bp)) {
 +                              bnx2x_ack_sb(bp, bp->igu_dsb_id,
 +                                           USTORM_ID, 0, IGU_INT_NOP, 1);
 +                              bnx2x_ack_sb(bp, bp->igu_dsb_id,
 +                                           CSTORM_ID, 0, IGU_INT_NOP, 1);
 +                              bnx2x_ack_sb(bp, bp->igu_dsb_id,
 +                                           XSTORM_ID, 0, IGU_INT_NOP, 1);
 +                              bnx2x_ack_sb(bp, bp->igu_dsb_id,
 +                                           TSTORM_ID, 0, IGU_INT_NOP, 1);
 +                              bnx2x_ack_sb(bp, bp->igu_dsb_id,
 +                                           ATTENTION_ID, 0, IGU_INT_NOP, 1);
 +                      } else {
 +                              bnx2x_ack_sb(bp, bp->igu_dsb_id,
 +                                           USTORM_ID, 0, IGU_INT_NOP, 1);
 +                              bnx2x_ack_sb(bp, bp->igu_dsb_id,
 +                                           ATTENTION_ID, 0, IGU_INT_NOP, 1);
 +                      }
 +                      bnx2x_igu_clear_sb(bp, bp->igu_dsb_id);
 +
 +                      /* !!! these should become driver const once
 +                         rf-tool supports split-68 const */
 +                      REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_LSB, 0);
 +                      REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_MSB, 0);
 +                      REG_WR(bp, IGU_REG_SB_MASK_LSB, 0);
 +                      REG_WR(bp, IGU_REG_SB_MASK_MSB, 0);
 +                      REG_WR(bp, IGU_REG_PBA_STATUS_LSB, 0);
 +                      REG_WR(bp, IGU_REG_PBA_STATUS_MSB, 0);
 +              }
 +      }
 +
 +      /* Reset PCIE errors for debug */
 +      REG_WR(bp, 0x2114, 0xffffffff);
 +      REG_WR(bp, 0x2120, 0xffffffff);
 +
 +      if (CHIP_IS_E1x(bp)) {
 +              main_mem_size = HC_REG_MAIN_MEMORY_SIZE / 2; /*dwords*/
 +              main_mem_base = HC_REG_MAIN_MEMORY +
 +                              BP_PORT(bp) * (main_mem_size * 4);
 +              main_mem_prty_clr = HC_REG_HC_PRTY_STS_CLR;
 +              main_mem_width = 8;
 +
 +              val = REG_RD(bp, main_mem_prty_clr);
 +              if (val)
 +                      DP(BNX2X_MSG_MCP, "Hmmm... Parity errors in HC "
 +                                        "block during "
 +                                        "function init (0x%x)!\n", val);
 +
 +              /* Clear "false" parity errors in MSI-X table */
 +              for (i = main_mem_base;
 +                   i < main_mem_base + main_mem_size * 4;
 +                   i += main_mem_width) {
 +                      bnx2x_read_dmae(bp, i, main_mem_width / 4);
 +                      bnx2x_write_dmae(bp, bnx2x_sp_mapping(bp, wb_data),
 +                                       i, main_mem_width / 4);
 +              }
 +              /* Clear HC parity attention */
 +              REG_RD(bp, main_mem_prty_clr);
 +      }
 +
 +#ifdef BNX2X_STOP_ON_ERROR
 +      /* Enable STORMs SP logging */
 +      REG_WR8(bp, BAR_USTRORM_INTMEM +
 +             USTORM_RECORD_SLOW_PATH_OFFSET(BP_FUNC(bp)), 1);
 +      REG_WR8(bp, BAR_TSTRORM_INTMEM +
 +             TSTORM_RECORD_SLOW_PATH_OFFSET(BP_FUNC(bp)), 1);
 +      REG_WR8(bp, BAR_CSTRORM_INTMEM +
 +             CSTORM_RECORD_SLOW_PATH_OFFSET(BP_FUNC(bp)), 1);
 +      REG_WR8(bp, BAR_XSTRORM_INTMEM +
 +             XSTORM_RECORD_SLOW_PATH_OFFSET(BP_FUNC(bp)), 1);
 +#endif
 +
 +      bnx2x_phy_probe(&bp->link_params);
 +
 +      return 0;
 +}
 +
 +
 +void bnx2x_free_mem(struct bnx2x *bp)
 +{
 +      /* fastpath */
 +      bnx2x_free_fp_mem(bp);
 +      /* end of fastpath */
 +
 +      BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
 +                     sizeof(struct host_sp_status_block));
 +
 +      BNX2X_PCI_FREE(bp->fw_stats, bp->fw_stats_mapping,
 +                     bp->fw_stats_data_sz + bp->fw_stats_req_sz);
 +
 +      BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
 +                     sizeof(struct bnx2x_slowpath));
 +
 +      BNX2X_PCI_FREE(bp->context.vcxt, bp->context.cxt_mapping,
 +                     bp->context.size);
 +
 +      bnx2x_ilt_mem_op(bp, ILT_MEMOP_FREE);
 +
 +      BNX2X_FREE(bp->ilt->lines);
 +
 +#ifdef BCM_CNIC
 +      if (!CHIP_IS_E1x(bp))
 +              BNX2X_PCI_FREE(bp->cnic_sb.e2_sb, bp->cnic_sb_mapping,
 +                             sizeof(struct host_hc_status_block_e2));
 +      else
 +              BNX2X_PCI_FREE(bp->cnic_sb.e1x_sb, bp->cnic_sb_mapping,
 +                             sizeof(struct host_hc_status_block_e1x));
 +
 +      BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, SRC_T2_SZ);
 +#endif
 +
 +      BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
 +
 +      BNX2X_PCI_FREE(bp->eq_ring, bp->eq_mapping,
 +                     BCM_PAGE_SIZE * NUM_EQ_PAGES);
 +}
 +
 +static inline int bnx2x_alloc_fw_stats_mem(struct bnx2x *bp)
 +{
 +      int num_groups;
 +
 +      /* number of eth_queues */
 +      u8 num_queue_stats = BNX2X_NUM_ETH_QUEUES(bp);
 +
 +      /* Total number of FW statistics requests =
 +       * 1 for port stats + 1 for PF stats + num_eth_queues */
 +      bp->fw_stats_num = 2 + num_queue_stats;
 +
 +
 +      /* Request is built from stats_query_header and an array of
 +       * stats_query_cmd_group each of which contains
 +       * STATS_QUERY_CMD_COUNT rules. The real number or requests is
 +       * configured in the stats_query_header.
 +       */
 +      num_groups = (2 + num_queue_stats) / STATS_QUERY_CMD_COUNT +
 +              (((2 + num_queue_stats) % STATS_QUERY_CMD_COUNT) ? 1 : 0);
 +
 +      bp->fw_stats_req_sz = sizeof(struct stats_query_header) +
 +                      num_groups * sizeof(struct stats_query_cmd_group);
 +
 +      /* Data for statistics requests + stats_conter
 +       *
 +       * stats_counter holds per-STORM counters that are incremented
 +       * when STORM has finished with the current request.
 +       */
 +      bp->fw_stats_data_sz = sizeof(struct per_port_stats) +
 +              sizeof(struct per_pf_stats) +
 +              sizeof(struct per_queue_stats) * num_queue_stats +
 +              sizeof(struct stats_counter);
 +
 +      BNX2X_PCI_ALLOC(bp->fw_stats, &bp->fw_stats_mapping,
 +                      bp->fw_stats_data_sz + bp->fw_stats_req_sz);
 +
 +      /* Set shortcuts */
 +      bp->fw_stats_req = (struct bnx2x_fw_stats_req *)bp->fw_stats;
 +      bp->fw_stats_req_mapping = bp->fw_stats_mapping;
 +
 +      bp->fw_stats_data = (struct bnx2x_fw_stats_data *)
 +              ((u8 *)bp->fw_stats + bp->fw_stats_req_sz);
 +
 +      bp->fw_stats_data_mapping = bp->fw_stats_mapping +
 +                                 bp->fw_stats_req_sz;
 +      return 0;
 +
 +alloc_mem_err:
 +      BNX2X_PCI_FREE(bp->fw_stats, bp->fw_stats_mapping,
 +                     bp->fw_stats_data_sz + bp->fw_stats_req_sz);
 +      return -ENOMEM;
 +}
 +
 +
 +int bnx2x_alloc_mem(struct bnx2x *bp)
 +{
 +#ifdef BCM_CNIC
 +      if (!CHIP_IS_E1x(bp))
 +              /* size = the status block + ramrod buffers */
 +              BNX2X_PCI_ALLOC(bp->cnic_sb.e2_sb, &bp->cnic_sb_mapping,
 +                              sizeof(struct host_hc_status_block_e2));
 +      else
 +              BNX2X_PCI_ALLOC(bp->cnic_sb.e1x_sb, &bp->cnic_sb_mapping,
 +                              sizeof(struct host_hc_status_block_e1x));
 +
 +      /* allocate searcher T2 table */
 +      BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, SRC_T2_SZ);
 +#endif
 +
 +
 +      BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
 +                      sizeof(struct host_sp_status_block));
 +
 +      BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
 +                      sizeof(struct bnx2x_slowpath));
 +
 +      /* Allocated memory for FW statistics  */
 +      if (bnx2x_alloc_fw_stats_mem(bp))
 +              goto alloc_mem_err;
 +
 +      bp->context.size = sizeof(union cdu_context) * BNX2X_L2_CID_COUNT(bp);
 +
 +      BNX2X_PCI_ALLOC(bp->context.vcxt, &bp->context.cxt_mapping,
 +                      bp->context.size);
 +
 +      BNX2X_ALLOC(bp->ilt->lines, sizeof(struct ilt_line) * ILT_MAX_LINES);
 +
 +      if (bnx2x_ilt_mem_op(bp, ILT_MEMOP_ALLOC))
 +              goto alloc_mem_err;
 +
 +      /* Slow path ring */
 +      BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
 +
 +      /* EQ */
 +      BNX2X_PCI_ALLOC(bp->eq_ring, &bp->eq_mapping,
 +                      BCM_PAGE_SIZE * NUM_EQ_PAGES);
 +
 +
 +      /* fastpath */
 +      /* need to be done at the end, since it's self adjusting to amount
 +       * of memory available for RSS queues
 +       */
 +      if (bnx2x_alloc_fp_mem(bp))
 +              goto alloc_mem_err;
 +      return 0;
 +
 +alloc_mem_err:
 +      bnx2x_free_mem(bp);
 +      return -ENOMEM;
 +}
 +
 +/*
 + * Init service functions
 + */
 +
 +int bnx2x_set_mac_one(struct bnx2x *bp, u8 *mac,
 +                    struct bnx2x_vlan_mac_obj *obj, bool set,
 +                    int mac_type, unsigned long *ramrod_flags)
 +{
 +      int rc;
 +      struct bnx2x_vlan_mac_ramrod_params ramrod_param;
 +
 +      memset(&ramrod_param, 0, sizeof(ramrod_param));
 +
 +      /* Fill general parameters */
 +      ramrod_param.vlan_mac_obj = obj;
 +      ramrod_param.ramrod_flags = *ramrod_flags;
 +
 +      /* Fill a user request section if needed */
 +      if (!test_bit(RAMROD_CONT, ramrod_flags)) {
 +              memcpy(ramrod_param.user_req.u.mac.mac, mac, ETH_ALEN);
 +
 +              __set_bit(mac_type, &ramrod_param.user_req.vlan_mac_flags);
 +
 +              /* Set the command: ADD or DEL */
 +              if (set)
 +                      ramrod_param.user_req.cmd = BNX2X_VLAN_MAC_ADD;
 +              else
 +                      ramrod_param.user_req.cmd = BNX2X_VLAN_MAC_DEL;
 +      }
 +
 +      rc = bnx2x_config_vlan_mac(bp, &ramrod_param);
 +      if (rc < 0)
 +              BNX2X_ERR("%s MAC failed\n", (set ? "Set" : "Del"));
 +      return rc;
 +}
 +
 +int bnx2x_del_all_macs(struct bnx2x *bp,
 +                     struct bnx2x_vlan_mac_obj *mac_obj,
 +                     int mac_type, bool wait_for_comp)
 +{
 +      int rc;
 +      unsigned long ramrod_flags = 0, vlan_mac_flags = 0;
 +
 +      /* Wait for completion of requested */
 +      if (wait_for_comp)
 +              __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
 +
 +      /* Set the mac type of addresses we want to clear */
 +      __set_bit(mac_type, &vlan_mac_flags);
 +
 +      rc = mac_obj->delete_all(bp, mac_obj, &vlan_mac_flags, &ramrod_flags);
 +      if (rc < 0)
 +              BNX2X_ERR("Failed to delete MACs: %d\n", rc);
 +
 +      return rc;
 +}
 +
 +int bnx2x_set_eth_mac(struct bnx2x *bp, bool set)
 +{
 +      unsigned long ramrod_flags = 0;
 +
 +      DP(NETIF_MSG_IFUP, "Adding Eth MAC\n");
 +
 +      __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
 +      /* Eth MAC is set on RSS leading client (fp[0]) */
 +      return bnx2x_set_mac_one(bp, bp->dev->dev_addr, &bp->fp->mac_obj, set,
 +                               BNX2X_ETH_MAC, &ramrod_flags);
 +}
 +
 +int bnx2x_setup_leading(struct bnx2x *bp)
 +{
 +      return bnx2x_setup_queue(bp, &bp->fp[0], 1);
 +}
 +
 +/**
 + * bnx2x_set_int_mode - configure interrupt mode
 + *
 + * @bp:               driver handle
 + *
 + * In case of MSI-X it will also try to enable MSI-X.
 + */
 +static void __devinit bnx2x_set_int_mode(struct bnx2x *bp)
 +{
 +      switch (int_mode) {
 +      case INT_MODE_MSI:
 +              bnx2x_enable_msi(bp);
 +              /* falling through... */
 +      case INT_MODE_INTx:
 +              bp->num_queues = 1 + NON_ETH_CONTEXT_USE;
 +              DP(NETIF_MSG_IFUP, "set number of queues to 1\n");
 +              break;
 +      default:
 +              /* Set number of queues according to bp->multi_mode value */
 +              bnx2x_set_num_queues(bp);
 +
 +              DP(NETIF_MSG_IFUP, "set number of queues to %d\n",
 +                 bp->num_queues);
 +
 +              /* if we can't use MSI-X we only need one fp,
 +               * so try to enable MSI-X with the requested number of fp's
 +               * and fallback to MSI or legacy INTx with one fp
 +               */
 +              if (bnx2x_enable_msix(bp)) {
 +                      /* failed to enable MSI-X */
 +                      if (bp->multi_mode)
 +                              DP(NETIF_MSG_IFUP,
 +                                        "Multi requested but failed to "
 +                                        "enable MSI-X (%d), "
 +                                        "set number of queues to %d\n",
 +                                 bp->num_queues,
 +                                 1 + NON_ETH_CONTEXT_USE);
 +                      bp->num_queues = 1 + NON_ETH_CONTEXT_USE;
 +
 +                      /* Try to enable MSI */
 +                      if (!(bp->flags & DISABLE_MSI_FLAG))
 +                              bnx2x_enable_msi(bp);
 +              }
 +              break;
 +      }
 +}
 +
 +/* must be called prioir to any HW initializations */
 +static inline u16 bnx2x_cid_ilt_lines(struct bnx2x *bp)
 +{
 +      return L2_ILT_LINES(bp);
 +}
 +
 +void bnx2x_ilt_set_info(struct bnx2x *bp)
 +{
 +      struct ilt_client_info *ilt_client;
 +      struct bnx2x_ilt *ilt = BP_ILT(bp);
 +      u16 line = 0;
 +
 +      ilt->start_line = FUNC_ILT_BASE(BP_FUNC(bp));
 +      DP(BNX2X_MSG_SP, "ilt starts at line %d\n", ilt->start_line);
 +
 +      /* CDU */
 +      ilt_client = &ilt->clients[ILT_CLIENT_CDU];
 +      ilt_client->client_num = ILT_CLIENT_CDU;
 +      ilt_client->page_size = CDU_ILT_PAGE_SZ;
 +      ilt_client->flags = ILT_CLIENT_SKIP_MEM;
 +      ilt_client->start = line;
 +      line += bnx2x_cid_ilt_lines(bp);
 +#ifdef BCM_CNIC
 +      line += CNIC_ILT_LINES;
 +#endif
 +      ilt_client->end = line - 1;
 +
 +      DP(BNX2X_MSG_SP, "ilt client[CDU]: start %d, end %d, psz 0x%x, "
 +                                       "flags 0x%x, hw psz %d\n",
 +         ilt_client->start,
 +         ilt_client->end,
 +         ilt_client->page_size,
 +         ilt_client->flags,
 +         ilog2(ilt_client->page_size >> 12));
 +
 +      /* QM */
 +      if (QM_INIT(bp->qm_cid_count)) {
 +              ilt_client = &ilt->clients[ILT_CLIENT_QM];
 +              ilt_client->client_num = ILT_CLIENT_QM;
 +              ilt_client->page_size = QM_ILT_PAGE_SZ;
 +              ilt_client->flags = 0;
 +              ilt_client->start = line;
 +
 +              /* 4 bytes for each cid */
 +              line += DIV_ROUND_UP(bp->qm_cid_count * QM_QUEUES_PER_FUNC * 4,
 +                                                       QM_ILT_PAGE_SZ);
 +
 +              ilt_client->end = line - 1;
 +
 +              DP(BNX2X_MSG_SP, "ilt client[QM]: start %d, end %d, psz 0x%x, "
 +                                               "flags 0x%x, hw psz %d\n",
 +                 ilt_client->start,
 +                 ilt_client->end,
 +                 ilt_client->page_size,
 +                 ilt_client->flags,
 +                 ilog2(ilt_client->page_size >> 12));
 +
 +      }
 +      /* SRC */
 +      ilt_client = &ilt->clients[ILT_CLIENT_SRC];
 +#ifdef BCM_CNIC
 +      ilt_client->client_num = ILT_CLIENT_SRC;
 +      ilt_client->page_size = SRC_ILT_PAGE_SZ;
 +      ilt_client->flags = 0;
 +      ilt_client->start = line;
 +      line += SRC_ILT_LINES;
 +      ilt_client->end = line - 1;
 +
 +      DP(BNX2X_MSG_SP, "ilt client[SRC]: start %d, end %d, psz 0x%x, "
 +                                       "flags 0x%x, hw psz %d\n",
 +         ilt_client->start,
 +         ilt_client->end,
 +         ilt_client->page_size,
 +         ilt_client->flags,
 +         ilog2(ilt_client->page_size >> 12));
 +
 +#else
 +      ilt_client->flags = (ILT_CLIENT_SKIP_INIT | ILT_CLIENT_SKIP_MEM);
 +#endif
 +
 +      /* TM */
 +      ilt_client = &ilt->clients[ILT_CLIENT_TM];
 +#ifdef BCM_CNIC
 +      ilt_client->client_num = ILT_CLIENT_TM;
 +      ilt_client->page_size = TM_ILT_PAGE_SZ;
 +      ilt_client->flags = 0;
 +      ilt_client->start = line;
 +      line += TM_ILT_LINES;
 +      ilt_client->end = line - 1;
 +
 +      DP(BNX2X_MSG_SP, "ilt client[TM]: start %d, end %d, psz 0x%x, "
 +                                       "flags 0x%x, hw psz %d\n",
 +         ilt_client->start,
 +         ilt_client->end,
 +         ilt_client->page_size,
 +         ilt_client->flags,
 +         ilog2(ilt_client->page_size >> 12));
 +
 +#else
 +      ilt_client->flags = (ILT_CLIENT_SKIP_INIT | ILT_CLIENT_SKIP_MEM);
 +#endif
 +      BUG_ON(line > ILT_MAX_LINES);
 +}
 +
 +/**
 + * bnx2x_pf_q_prep_init - prepare INIT transition parameters
 + *
 + * @bp:                       driver handle
 + * @fp:                       pointer to fastpath
 + * @init_params:      pointer to parameters structure
 + *
 + * parameters configured:
 + *      - HC configuration
 + *      - Queue's CDU context
 + */
 +static inline void bnx2x_pf_q_prep_init(struct bnx2x *bp,
 +      struct bnx2x_fastpath *fp, struct bnx2x_queue_init_params *init_params)
 +{
 +
 +      u8 cos;
 +      /* FCoE Queue uses Default SB, thus has no HC capabilities */
 +      if (!IS_FCOE_FP(fp)) {
 +              __set_bit(BNX2X_Q_FLG_HC, &init_params->rx.flags);
 +              __set_bit(BNX2X_Q_FLG_HC, &init_params->tx.flags);
 +
 +              /* If HC is supporterd, enable host coalescing in the transition
 +               * to INIT state.
 +               */
 +              __set_bit(BNX2X_Q_FLG_HC_EN, &init_params->rx.flags);
 +              __set_bit(BNX2X_Q_FLG_HC_EN, &init_params->tx.flags);
 +
 +              /* HC rate */
 +              init_params->rx.hc_rate = bp->rx_ticks ?
 +                      (1000000 / bp->rx_ticks) : 0;
 +              init_params->tx.hc_rate = bp->tx_ticks ?
 +                      (1000000 / bp->tx_ticks) : 0;
 +
 +              /* FW SB ID */
 +              init_params->rx.fw_sb_id = init_params->tx.fw_sb_id =
 +                      fp->fw_sb_id;
 +
 +              /*
 +               * CQ index among the SB indices: FCoE clients uses the default
 +               * SB, therefore it's different.
 +               */
 +              init_params->rx.sb_cq_index = HC_INDEX_ETH_RX_CQ_CONS;
 +              init_params->tx.sb_cq_index = HC_INDEX_ETH_FIRST_TX_CQ_CONS;
 +      }
 +
 +      /* set maximum number of COSs supported by this queue */
 +      init_params->max_cos = fp->max_cos;
 +
 +      DP(BNX2X_MSG_SP, "fp: %d setting queue params max cos to: %d\n",
 +          fp->index, init_params->max_cos);
 +
 +      /* set the context pointers queue object */
 +      for (cos = FIRST_TX_COS_INDEX; cos < init_params->max_cos; cos++)
 +              init_params->cxts[cos] =
 +                      &bp->context.vcxt[fp->txdata[cos].cid].eth;
 +}
 +
 +int bnx2x_setup_tx_only(struct bnx2x *bp, struct bnx2x_fastpath *fp,
 +                      struct bnx2x_queue_state_params *q_params,
 +                      struct bnx2x_queue_setup_tx_only_params *tx_only_params,
 +                      int tx_index, bool leading)
 +{
 +      memset(tx_only_params, 0, sizeof(*tx_only_params));
 +
 +      /* Set the command */
 +      q_params->cmd = BNX2X_Q_CMD_SETUP_TX_ONLY;
 +
 +      /* Set tx-only QUEUE flags: don't zero statistics */
 +      tx_only_params->flags = bnx2x_get_common_flags(bp, fp, false);
 +
 +      /* choose the index of the cid to send the slow path on */
 +      tx_only_params->cid_index = tx_index;
 +
 +      /* Set general TX_ONLY_SETUP parameters */
 +      bnx2x_pf_q_prep_general(bp, fp, &tx_only_params->gen_params, tx_index);
 +
 +      /* Set Tx TX_ONLY_SETUP parameters */
 +      bnx2x_pf_tx_q_prep(bp, fp, &tx_only_params->txq_params, tx_index);
 +
 +      DP(BNX2X_MSG_SP, "preparing to send tx-only ramrod for connection:"
 +                       "cos %d, primary cid %d, cid %d, "
 +                       "client id %d, sp-client id %d, flags %lx\n",
 +         tx_index, q_params->q_obj->cids[FIRST_TX_COS_INDEX],
 +         q_params->q_obj->cids[tx_index], q_params->q_obj->cl_id,
 +         tx_only_params->gen_params.spcl_id, tx_only_params->flags);
 +
 +      /* send the ramrod */
 +      return bnx2x_queue_state_change(bp, q_params);
 +}
 +
 +
 +/**
 + * bnx2x_setup_queue - setup queue
 + *
 + * @bp:               driver handle
 + * @fp:               pointer to fastpath
 + * @leading:  is leading
 + *
 + * This function performs 2 steps in a Queue state machine
 + *      actually: 1) RESET->INIT 2) INIT->SETUP
 + */
 +
 +int bnx2x_setup_queue(struct bnx2x *bp, struct bnx2x_fastpath *fp,
 +                     bool leading)
 +{
 +      struct bnx2x_queue_state_params q_params = {0};
 +      struct bnx2x_queue_setup_params *setup_params =
 +                                              &q_params.params.setup;
 +      struct bnx2x_queue_setup_tx_only_params *tx_only_params =
 +                                              &q_params.params.tx_only;
 +      int rc;
 +      u8 tx_index;
 +
 +      DP(BNX2X_MSG_SP, "setting up queue %d\n", fp->index);
 +
 +      /* reset IGU state skip FCoE L2 queue */
 +      if (!IS_FCOE_FP(fp))
 +              bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0,
 +                           IGU_INT_ENABLE, 0);
 +
 +      q_params.q_obj = &fp->q_obj;
 +      /* We want to wait for completion in this context */
 +      __set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
 +
 +      /* Prepare the INIT parameters */
 +      bnx2x_pf_q_prep_init(bp, fp, &q_params.params.init);
 +
 +      /* Set the command */
 +      q_params.cmd = BNX2X_Q_CMD_INIT;
 +
 +      /* Change the state to INIT */
 +      rc = bnx2x_queue_state_change(bp, &q_params);
 +      if (rc) {
 +              BNX2X_ERR("Queue(%d) INIT failed\n", fp->index);
 +              return rc;
 +      }
 +
 +      DP(BNX2X_MSG_SP, "init complete\n");
 +
 +
 +      /* Now move the Queue to the SETUP state... */
 +      memset(setup_params, 0, sizeof(*setup_params));
 +
 +      /* Set QUEUE flags */
 +      setup_params->flags = bnx2x_get_q_flags(bp, fp, leading);
 +
 +      /* Set general SETUP parameters */
 +      bnx2x_pf_q_prep_general(bp, fp, &setup_params->gen_params,
 +                              FIRST_TX_COS_INDEX);
 +
 +      bnx2x_pf_rx_q_prep(bp, fp, &setup_params->pause_params,
 +                          &setup_params->rxq_params);
 +
 +      bnx2x_pf_tx_q_prep(bp, fp, &setup_params->txq_params,
 +                         FIRST_TX_COS_INDEX);
 +
 +      /* Set the command */
 +      q_params.cmd = BNX2X_Q_CMD_SETUP;
 +
 +      /* Change the state to SETUP */
 +      rc = bnx2x_queue_state_change(bp, &q_params);
 +      if (rc) {
 +              BNX2X_ERR("Queue(%d) SETUP failed\n", fp->index);
 +              return rc;
 +      }
 +
 +      /* loop through the relevant tx-only indices */
 +      for (tx_index = FIRST_TX_ONLY_COS_INDEX;
 +            tx_index < fp->max_cos;
 +            tx_index++) {
 +
 +              /* prepare and send tx-only ramrod*/
 +              rc = bnx2x_setup_tx_only(bp, fp, &q_params,
 +                                        tx_only_params, tx_index, leading);
 +              if (rc) {
 +                      BNX2X_ERR("Queue(%d.%d) TX_ONLY_SETUP failed\n",
 +                                fp->index, tx_index);
 +                      return rc;
 +              }
 +      }
 +
 +      return rc;
 +}
 +
 +static int bnx2x_stop_queue(struct bnx2x *bp, int index)
 +{
 +      struct bnx2x_fastpath *fp = &bp->fp[index];
 +      struct bnx2x_fp_txdata *txdata;
 +      struct bnx2x_queue_state_params q_params = {0};
 +      int rc, tx_index;
 +
 +      DP(BNX2X_MSG_SP, "stopping queue %d cid %d\n", index, fp->cid);
 +
 +      q_params.q_obj = &fp->q_obj;
 +      /* We want to wait for completion in this context */
 +      __set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
 +
 +
 +      /* close tx-only connections */
 +      for (tx_index = FIRST_TX_ONLY_COS_INDEX;
 +           tx_index < fp->max_cos;
 +           tx_index++){
 +
 +              /* ascertain this is a normal queue*/
 +              txdata = &fp->txdata[tx_index];
 +
 +              DP(BNX2X_MSG_SP, "stopping tx-only queue %d\n",
 +                                                      txdata->txq_index);
 +
 +              /* send halt terminate on tx-only connection */
 +              q_params.cmd = BNX2X_Q_CMD_TERMINATE;
 +              memset(&q_params.params.terminate, 0,
 +                     sizeof(q_params.params.terminate));
 +              q_params.params.terminate.cid_index = tx_index;
 +
 +              rc = bnx2x_queue_state_change(bp, &q_params);
 +              if (rc)
 +                      return rc;
 +
 +              /* send halt terminate on tx-only connection */
 +              q_params.cmd = BNX2X_Q_CMD_CFC_DEL;
 +              memset(&q_params.params.cfc_del, 0,
 +                     sizeof(q_params.params.cfc_del));
 +              q_params.params.cfc_del.cid_index = tx_index;
 +              rc = bnx2x_queue_state_change(bp, &q_params);
 +              if (rc)
 +                      return rc;
 +      }
 +      /* Stop the primary connection: */
 +      /* ...halt the connection */
 +      q_params.cmd = BNX2X_Q_CMD_HALT;
 +      rc = bnx2x_queue_state_change(bp, &q_params);
 +      if (rc)
 +              return rc;
 +
 +      /* ...terminate the connection */
 +      q_params.cmd = BNX2X_Q_CMD_TERMINATE;
 +      memset(&q_params.params.terminate, 0,
 +             sizeof(q_params.params.terminate));
 +      q_params.params.terminate.cid_index = FIRST_TX_COS_INDEX;
 +      rc = bnx2x_queue_state_change(bp, &q_params);
 +      if (rc)
 +              return rc;
 +      /* ...delete cfc entry */
 +      q_params.cmd = BNX2X_Q_CMD_CFC_DEL;
 +      memset(&q_params.params.cfc_del, 0,
 +             sizeof(q_params.params.cfc_del));
 +      q_params.params.cfc_del.cid_index = FIRST_TX_COS_INDEX;
 +      return bnx2x_queue_state_change(bp, &q_params);
 +}
 +
 +
 +static void bnx2x_reset_func(struct bnx2x *bp)
 +{
 +      int port = BP_PORT(bp);
 +      int func = BP_FUNC(bp);
 +      int i;
 +
 +      /* Disable the function in the FW */
 +      REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(func), 0);
 +      REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(func), 0);
 +      REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(func), 0);
 +      REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(func), 0);
 +
 +      /* FP SBs */
 +      for_each_eth_queue(bp, i) {
 +              struct bnx2x_fastpath *fp = &bp->fp[i];
 +              REG_WR8(bp, BAR_CSTRORM_INTMEM +
 +                         CSTORM_STATUS_BLOCK_DATA_STATE_OFFSET(fp->fw_sb_id),
 +                         SB_DISABLED);
 +      }
 +
 +#ifdef BCM_CNIC
 +      /* CNIC SB */
 +      REG_WR8(bp, BAR_CSTRORM_INTMEM +
 +              CSTORM_STATUS_BLOCK_DATA_STATE_OFFSET(bnx2x_cnic_fw_sb_id(bp)),
 +              SB_DISABLED);
 +#endif
 +      /* SP SB */
 +      REG_WR8(bp, BAR_CSTRORM_INTMEM +
 +                 CSTORM_SP_STATUS_BLOCK_DATA_STATE_OFFSET(func),
 +                 SB_DISABLED);
 +
 +      for (i = 0; i < XSTORM_SPQ_DATA_SIZE / 4; i++)
 +              REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_DATA_OFFSET(func),
 +                     0);
 +
 +      /* Configure IGU */
 +      if (bp->common.int_block == INT_BLOCK_HC) {
 +              REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
 +              REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
 +      } else {
 +              REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, 0);
 +              REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, 0);
 +      }
 +
 +#ifdef BCM_CNIC
 +      /* Disable Timer scan */
 +      REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
 +      /*
 +       * Wait for at least 10ms and up to 2 second for the timers scan to
 +       * complete
 +       */
 +      for (i = 0; i < 200; i++) {
 +              msleep(10);
 +              if (!REG_RD(bp, TM_REG_LIN0_SCAN_ON + port*4))
 +                      break;
 +      }
 +#endif
 +      /* Clear ILT */
 +      bnx2x_clear_func_ilt(bp, func);
 +
 +      /* Timers workaround bug for E2: if this is vnic-3,
 +       * we need to set the entire ilt range for this timers.
 +       */
 +      if (!CHIP_IS_E1x(bp) && BP_VN(bp) == 3) {
 +              struct ilt_client_info ilt_cli;
 +              /* use dummy TM client */
 +              memset(&ilt_cli, 0, sizeof(struct ilt_client_info));
 +              ilt_cli.start = 0;
 +              ilt_cli.end = ILT_NUM_PAGE_ENTRIES - 1;
 +              ilt_cli.client_num = ILT_CLIENT_TM;
 +
 +              bnx2x_ilt_boundry_init_op(bp, &ilt_cli, 0, INITOP_CLEAR);
 +      }
 +
 +      /* this assumes that reset_port() called before reset_func()*/
 +      if (!CHIP_IS_E1x(bp))
 +              bnx2x_pf_disable(bp);
 +
 +      bp->dmae_ready = 0;
 +}
 +
 +static void bnx2x_reset_port(struct bnx2x *bp)
 +{
 +      int port = BP_PORT(bp);
 +      u32 val;
 +
 +      /* Reset physical Link */
 +      bnx2x__link_reset(bp);
 +
 +      REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
 +
 +      /* Do not rcv packets to BRB */
 +      REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
 +      /* Do not direct rcv packets that are not for MCP to the BRB */
 +      REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
 +                         NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
 +
 +      /* Configure AEU */
 +      REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
 +
 +      msleep(100);
 +      /* Check for BRB port occupancy */
 +      val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
 +      if (val)
 +              DP(NETIF_MSG_IFDOWN,
 +                 "BRB1 is not empty  %d blocks are occupied\n", val);
 +
 +      /* TODO: Close Doorbell port? */
 +}
 +
 +static inline int bnx2x_reset_hw(struct bnx2x *bp, u32 load_code)
 +{
 +      struct bnx2x_func_state_params func_params = {0};
 +
 +      /* Prepare parameters for function state transitions */
 +      __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
 +
 +      func_params.f_obj = &bp->func_obj;
 +      func_params.cmd = BNX2X_F_CMD_HW_RESET;
 +
 +      func_params.params.hw_init.load_phase = load_code;
 +
 +      return bnx2x_func_state_change(bp, &func_params);
 +}
 +
 +static inline int bnx2x_func_stop(struct bnx2x *bp)
 +{
 +      struct bnx2x_func_state_params func_params = {0};
 +      int rc;
 +
 +      /* Prepare parameters for function state transitions */
 +      __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
 +      func_params.f_obj = &bp->func_obj;
 +      func_params.cmd = BNX2X_F_CMD_STOP;
 +
 +      /*
 +       * Try to stop the function the 'good way'. If fails (in case
 +       * of a parity error during bnx2x_chip_cleanup()) and we are
 +       * not in a debug mode, perform a state transaction in order to
 +       * enable further HW_RESET transaction.
 +       */
 +      rc = bnx2x_func_state_change(bp, &func_params);
 +      if (rc) {
 +#ifdef BNX2X_STOP_ON_ERROR
 +              return rc;
 +#else
 +              BNX2X_ERR("FUNC_STOP ramrod failed. Running a dry "
 +                        "transaction\n");
 +              __set_bit(RAMROD_DRV_CLR_ONLY, &func_params.ramrod_flags);
 +              return bnx2x_func_state_change(bp, &func_params);
 +#endif
 +      }
 +
 +      return 0;
 +}
 +
 +/**
 + * bnx2x_send_unload_req - request unload mode from the MCP.
 + *
 + * @bp:                       driver handle
 + * @unload_mode:      requested function's unload mode
 + *
 + * Return unload mode returned by the MCP: COMMON, PORT or FUNC.
 + */
 +u32 bnx2x_send_unload_req(struct bnx2x *bp, int unload_mode)
 +{
 +      u32 reset_code = 0;
 +      int port = BP_PORT(bp);
 +
 +      /* Select the UNLOAD request mode */
 +      if (unload_mode == UNLOAD_NORMAL)
 +              reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
 +
 +      else if (bp->flags & NO_WOL_FLAG)
 +              reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
 +
 +      else if (bp->wol) {
 +              u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
 +              u8 *mac_addr = bp->dev->dev_addr;
 +              u32 val;
 +              /* The mac address is written to entries 1-4 to
 +                 preserve entry 0 which is used by the PMF */
-               /* Check if it is the UNDI driver
++              u8 entry = (BP_VN(bp) + 1)*8;
 +
 +              val = (mac_addr[0] << 8) | mac_addr[1];
 +              EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
 +
 +              val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
 +                    (mac_addr[4] << 8) | mac_addr[5];
 +              EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
 +
 +              reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
 +
 +      } else
 +              reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
 +
 +      /* Send the request to the MCP */
 +      if (!BP_NOMCP(bp))
 +              reset_code = bnx2x_fw_command(bp, reset_code, 0);
 +      else {
 +              int path = BP_PATH(bp);
 +
 +              DP(NETIF_MSG_IFDOWN, "NO MCP - load counts[%d]      "
 +                                   "%d, %d, %d\n",
 +                 path, load_count[path][0], load_count[path][1],
 +                 load_count[path][2]);
 +              load_count[path][0]--;
 +              load_count[path][1 + port]--;
 +              DP(NETIF_MSG_IFDOWN, "NO MCP - new load counts[%d]  "
 +                                   "%d, %d, %d\n",
 +                 path, load_count[path][0], load_count[path][1],
 +                 load_count[path][2]);
 +              if (load_count[path][0] == 0)
 +                      reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
 +              else if (load_count[path][1 + port] == 0)
 +                      reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
 +              else
 +                      reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
 +      }
 +
 +      return reset_code;
 +}
 +
 +/**
 + * bnx2x_send_unload_done - send UNLOAD_DONE command to the MCP.
 + *
 + * @bp:               driver handle
 + */
 +void bnx2x_send_unload_done(struct bnx2x *bp)
 +{
 +      /* Report UNLOAD_DONE to MCP */
 +      if (!BP_NOMCP(bp))
 +              bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
 +}
 +
 +static inline int bnx2x_func_wait_started(struct bnx2x *bp)
 +{
 +      int tout = 50;
 +      int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
 +
 +      if (!bp->port.pmf)
 +              return 0;
 +
 +      /*
 +       * (assumption: No Attention from MCP at this stage)
 +       * PMF probably in the middle of TXdisable/enable transaction
 +       * 1. Sync IRS for default SB
 +       * 2. Sync SP queue - this guarantes us that attention handling started
 +       * 3. Wait, that TXdisable/enable transaction completes
 +       *
 +       * 1+2 guranty that if DCBx attention was scheduled it already changed
 +       * pending bit of transaction from STARTED-->TX_STOPPED, if we alredy
 +       * received complettion for the transaction the state is TX_STOPPED.
 +       * State will return to STARTED after completion of TX_STOPPED-->STARTED
 +       * transaction.
 +       */
 +
 +      /* make sure default SB ISR is done */
 +      if (msix)
 +              synchronize_irq(bp->msix_table[0].vector);
 +      else
 +              synchronize_irq(bp->pdev->irq);
 +
 +      flush_workqueue(bnx2x_wq);
 +
 +      while (bnx2x_func_get_state(bp, &bp->func_obj) !=
 +                              BNX2X_F_STATE_STARTED && tout--)
 +              msleep(20);
 +
 +      if (bnx2x_func_get_state(bp, &bp->func_obj) !=
 +                                              BNX2X_F_STATE_STARTED) {
 +#ifdef BNX2X_STOP_ON_ERROR
 +              return -EBUSY;
 +#else
 +              /*
 +               * Failed to complete the transaction in a "good way"
 +               * Force both transactions with CLR bit
 +               */
 +              struct bnx2x_func_state_params func_params = {0};
 +
 +              DP(BNX2X_MSG_SP, "Hmmm... unexpected function state! "
 +                        "Forcing STARTED-->TX_ST0PPED-->STARTED\n");
 +
 +              func_params.f_obj = &bp->func_obj;
 +              __set_bit(RAMROD_DRV_CLR_ONLY,
 +                                      &func_params.ramrod_flags);
 +
 +              /* STARTED-->TX_ST0PPED */
 +              func_params.cmd = BNX2X_F_CMD_TX_STOP;
 +              bnx2x_func_state_change(bp, &func_params);
 +
 +              /* TX_ST0PPED-->STARTED */
 +              func_params.cmd = BNX2X_F_CMD_TX_START;
 +              return bnx2x_func_state_change(bp, &func_params);
 +#endif
 +      }
 +
 +      return 0;
 +}
 +
 +void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode)
 +{
 +      int port = BP_PORT(bp);
 +      int i, rc = 0;
 +      u8 cos;
 +      struct bnx2x_mcast_ramrod_params rparam = {0};
 +      u32 reset_code;
 +
 +      /* Wait until tx fastpath tasks complete */
 +      for_each_tx_queue(bp, i) {
 +              struct bnx2x_fastpath *fp = &bp->fp[i];
 +
 +              for_each_cos_in_tx_queue(fp, cos)
 +                      rc = bnx2x_clean_tx_queue(bp, &fp->txdata[cos]);
 +#ifdef BNX2X_STOP_ON_ERROR
 +              if (rc)
 +                      return;
 +#endif
 +      }
 +
 +      /* Give HW time to discard old tx messages */
 +      usleep_range(1000, 1000);
 +
 +      /* Clean all ETH MACs */
 +      rc = bnx2x_del_all_macs(bp, &bp->fp[0].mac_obj, BNX2X_ETH_MAC, false);
 +      if (rc < 0)
 +              BNX2X_ERR("Failed to delete all ETH macs: %d\n", rc);
 +
 +      /* Clean up UC list  */
 +      rc = bnx2x_del_all_macs(bp, &bp->fp[0].mac_obj, BNX2X_UC_LIST_MAC,
 +                              true);
 +      if (rc < 0)
 +              BNX2X_ERR("Failed to schedule DEL commands for UC MACs list: "
 +                        "%d\n", rc);
 +
 +      /* Disable LLH */
 +      if (!CHIP_IS_E1(bp))
 +              REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
 +
 +      /* Set "drop all" (stop Rx).
 +       * We need to take a netif_addr_lock() here in order to prevent
 +       * a race between the completion code and this code.
 +       */
 +      netif_addr_lock_bh(bp->dev);
 +      /* Schedule the rx_mode command */
 +      if (test_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state))
 +              set_bit(BNX2X_FILTER_RX_MODE_SCHED, &bp->sp_state);
 +      else
 +              bnx2x_set_storm_rx_mode(bp);
 +
 +      /* Cleanup multicast configuration */
 +      rparam.mcast_obj = &bp->mcast_obj;
 +      rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL);
 +      if (rc < 0)
 +              BNX2X_ERR("Failed to send DEL multicast command: %d\n", rc);
 +
 +      netif_addr_unlock_bh(bp->dev);
 +
 +
 +
 +      /*
 +       * Send the UNLOAD_REQUEST to the MCP. This will return if
 +       * this function should perform FUNC, PORT or COMMON HW
 +       * reset.
 +       */
 +      reset_code = bnx2x_send_unload_req(bp, unload_mode);
 +
 +      /*
 +       * (assumption: No Attention from MCP at this stage)
 +       * PMF probably in the middle of TXdisable/enable transaction
 +       */
 +      rc = bnx2x_func_wait_started(bp);
 +      if (rc) {
 +              BNX2X_ERR("bnx2x_func_wait_started failed\n");
 +#ifdef BNX2X_STOP_ON_ERROR
 +              return;
 +#endif
 +      }
 +
 +      /* Close multi and leading connections
 +       * Completions for ramrods are collected in a synchronous way
 +       */
 +      for_each_queue(bp, i)
 +              if (bnx2x_stop_queue(bp, i))
 +#ifdef BNX2X_STOP_ON_ERROR
 +                      return;
 +#else
 +                      goto unload_error;
 +#endif
 +      /* If SP settings didn't get completed so far - something
 +       * very wrong has happen.
 +       */
 +      if (!bnx2x_wait_sp_comp(bp, ~0x0UL))
 +              BNX2X_ERR("Hmmm... Common slow path ramrods got stuck!\n");
 +
 +#ifndef BNX2X_STOP_ON_ERROR
 +unload_error:
 +#endif
 +      rc = bnx2x_func_stop(bp);
 +      if (rc) {
 +              BNX2X_ERR("Function stop failed!\n");
 +#ifdef BNX2X_STOP_ON_ERROR
 +              return;
 +#endif
 +      }
 +
 +      /* Disable HW interrupts, NAPI */
 +      bnx2x_netif_stop(bp, 1);
 +
 +      /* Release IRQs */
 +      bnx2x_free_irq(bp);
 +
 +      /* Reset the chip */
 +      rc = bnx2x_reset_hw(bp, reset_code);
 +      if (rc)
 +              BNX2X_ERR("HW_RESET failed\n");
 +
 +
 +      /* Report UNLOAD_DONE to MCP */
 +      bnx2x_send_unload_done(bp);
 +}
 +
 +void bnx2x_disable_close_the_gate(struct bnx2x *bp)
 +{
 +      u32 val;
 +
 +      DP(NETIF_MSG_HW, "Disabling \"close the gates\"\n");
 +
 +      if (CHIP_IS_E1(bp)) {
 +              int port = BP_PORT(bp);
 +              u32 addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
 +                      MISC_REG_AEU_MASK_ATTN_FUNC_0;
 +
 +              val = REG_RD(bp, addr);
 +              val &= ~(0x300);
 +              REG_WR(bp, addr, val);
 +      } else {
 +              val = REG_RD(bp, MISC_REG_AEU_GENERAL_MASK);
 +              val &= ~(MISC_AEU_GENERAL_MASK_REG_AEU_PXP_CLOSE_MASK |
 +                       MISC_AEU_GENERAL_MASK_REG_AEU_NIG_CLOSE_MASK);
 +              REG_WR(bp, MISC_REG_AEU_GENERAL_MASK, val);
 +      }
 +}
 +
 +/* Close gates #2, #3 and #4: */
 +static void bnx2x_set_234_gates(struct bnx2x *bp, bool close)
 +{
 +      u32 val;
 +
 +      /* Gates #2 and #4a are closed/opened for "not E1" only */
 +      if (!CHIP_IS_E1(bp)) {
 +              /* #4 */
 +              REG_WR(bp, PXP_REG_HST_DISCARD_DOORBELLS, !!close);
 +              /* #2 */
 +              REG_WR(bp, PXP_REG_HST_DISCARD_INTERNAL_WRITES, !!close);
 +      }
 +
 +      /* #3 */
 +      if (CHIP_IS_E1x(bp)) {
 +              /* Prevent interrupts from HC on both ports */
 +              val = REG_RD(bp, HC_REG_CONFIG_1);
 +              REG_WR(bp, HC_REG_CONFIG_1,
 +                     (!close) ? (val | HC_CONFIG_1_REG_BLOCK_DISABLE_1) :
 +                     (val & ~(u32)HC_CONFIG_1_REG_BLOCK_DISABLE_1));
 +
 +              val = REG_RD(bp, HC_REG_CONFIG_0);
 +              REG_WR(bp, HC_REG_CONFIG_0,
 +                     (!close) ? (val | HC_CONFIG_0_REG_BLOCK_DISABLE_0) :
 +                     (val & ~(u32)HC_CONFIG_0_REG_BLOCK_DISABLE_0));
 +      } else {
 +              /* Prevent incomming interrupts in IGU */
 +              val = REG_RD(bp, IGU_REG_BLOCK_CONFIGURATION);
 +
 +              REG_WR(bp, IGU_REG_BLOCK_CONFIGURATION,
 +                     (!close) ?
 +                     (val | IGU_BLOCK_CONFIGURATION_REG_BLOCK_ENABLE) :
 +                     (val & ~(u32)IGU_BLOCK_CONFIGURATION_REG_BLOCK_ENABLE));
 +      }
 +
 +      DP(NETIF_MSG_HW, "%s gates #2, #3 and #4\n",
 +              close ? "closing" : "opening");
 +      mmiowb();
 +}
 +
 +#define SHARED_MF_CLP_MAGIC  0x80000000 /* `magic' bit */
 +
 +static void bnx2x_clp_reset_prep(struct bnx2x *bp, u32 *magic_val)
 +{
 +      /* Do some magic... */
 +      u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb);
 +      *magic_val = val & SHARED_MF_CLP_MAGIC;
 +      MF_CFG_WR(bp, shared_mf_config.clp_mb, val | SHARED_MF_CLP_MAGIC);
 +}
 +
 +/**
 + * bnx2x_clp_reset_done - restore the value of the `magic' bit.
 + *
 + * @bp:               driver handle
 + * @magic_val:        old value of the `magic' bit.
 + */
 +static void bnx2x_clp_reset_done(struct bnx2x *bp, u32 magic_val)
 +{
 +      /* Restore the `magic' bit value... */
 +      u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb);
 +      MF_CFG_WR(bp, shared_mf_config.clp_mb,
 +              (val & (~SHARED_MF_CLP_MAGIC)) | magic_val);
 +}
 +
 +/**
 + * bnx2x_reset_mcp_prep - prepare for MCP reset.
 + *
 + * @bp:               driver handle
 + * @magic_val:        old value of 'magic' bit.
 + *
 + * Takes care of CLP configurations.
 + */
 +static void bnx2x_reset_mcp_prep(struct bnx2x *bp, u32 *magic_val)
 +{
 +      u32 shmem;
 +      u32 validity_offset;
 +
 +      DP(NETIF_MSG_HW, "Starting\n");
 +
 +      /* Set `magic' bit in order to save MF config */
 +      if (!CHIP_IS_E1(bp))
 +              bnx2x_clp_reset_prep(bp, magic_val);
 +
 +      /* Get shmem offset */
 +      shmem = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
 +      validity_offset = offsetof(struct shmem_region, validity_map[0]);
 +
 +      /* Clear validity map flags */
 +      if (shmem > 0)
 +              REG_WR(bp, shmem + validity_offset, 0);
 +}
 +
 +#define MCP_TIMEOUT      5000   /* 5 seconds (in ms) */
 +#define MCP_ONE_TIMEOUT  100    /* 100 ms */
 +
 +/**
 + * bnx2x_mcp_wait_one - wait for MCP_ONE_TIMEOUT
 + *
 + * @bp:       driver handle
 + */
 +static inline void bnx2x_mcp_wait_one(struct bnx2x *bp)
 +{
 +      /* special handling for emulation and FPGA,
 +         wait 10 times longer */
 +      if (CHIP_REV_IS_SLOW(bp))
 +              msleep(MCP_ONE_TIMEOUT*10);
 +      else
 +              msleep(MCP_ONE_TIMEOUT);
 +}
 +
 +/*
 + * initializes bp->common.shmem_base and waits for validity signature to appear
 + */
 +static int bnx2x_init_shmem(struct bnx2x *bp)
 +{
 +      int cnt = 0;
 +      u32 val = 0;
 +
 +      do {
 +              bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
 +              if (bp->common.shmem_base) {
 +                      val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
 +                      if (val & SHR_MEM_VALIDITY_MB)
 +                              return 0;
 +              }
 +
 +              bnx2x_mcp_wait_one(bp);
 +
 +      } while (cnt++ < (MCP_TIMEOUT / MCP_ONE_TIMEOUT));
 +
 +      BNX2X_ERR("BAD MCP validity signature\n");
 +
 +      return -ENODEV;
 +}
 +
 +static int bnx2x_reset_mcp_comp(struct bnx2x *bp, u32 magic_val)
 +{
 +      int rc = bnx2x_init_shmem(bp);
 +
 +      /* Restore the `magic' bit value */
 +      if (!CHIP_IS_E1(bp))
 +              bnx2x_clp_reset_done(bp, magic_val);
 +
 +      return rc;
 +}
 +
 +static void bnx2x_pxp_prep(struct bnx2x *bp)
 +{
 +      if (!CHIP_IS_E1(bp)) {
 +              REG_WR(bp, PXP2_REG_RD_START_INIT, 0);
 +              REG_WR(bp, PXP2_REG_RQ_RBC_DONE, 0);
 +              mmiowb();
 +      }
 +}
 +
 +/*
 + * Reset the whole chip except for:
 + *      - PCIE core
 + *      - PCI Glue, PSWHST, PXP/PXP2 RF (all controlled by
 + *              one reset bit)
 + *      - IGU
 + *      - MISC (including AEU)
 + *      - GRC
 + *      - RBCN, RBCP
 + */
 +static void bnx2x_process_kill_chip_reset(struct bnx2x *bp, bool global)
 +{
 +      u32 not_reset_mask1, reset_mask1, not_reset_mask2, reset_mask2;
 +      u32 global_bits2, stay_reset2;
 +
 +      /*
 +       * Bits that have to be set in reset_mask2 if we want to reset 'global'
 +       * (per chip) blocks.
 +       */
 +      global_bits2 =
 +              MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_CMN_CPU |
 +              MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_CMN_CORE;
 +
 +      /* Don't reset the following blocks */
 +      not_reset_mask1 =
 +              MISC_REGISTERS_RESET_REG_1_RST_HC |
 +              MISC_REGISTERS_RESET_REG_1_RST_PXPV |
 +              MISC_REGISTERS_RESET_REG_1_RST_PXP;
 +
 +      not_reset_mask2 =
 +              MISC_REGISTERS_RESET_REG_2_RST_PCI_MDIO |
 +              MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE |
 +              MISC_REGISTERS_RESET_REG_2_RST_EMAC1_HARD_CORE |
 +              MISC_REGISTERS_RESET_REG_2_RST_MISC_CORE |
 +              MISC_REGISTERS_RESET_REG_2_RST_RBCN |
 +              MISC_REGISTERS_RESET_REG_2_RST_GRC  |
 +              MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_REG_HARD_CORE |
 +              MISC_REGISTERS_RESET_REG_2_RST_MCP_N_HARD_CORE_RST_B |
 +              MISC_REGISTERS_RESET_REG_2_RST_ATC |
 +              MISC_REGISTERS_RESET_REG_2_PGLC;
 +
 +      /*
 +       * Keep the following blocks in reset:
 +       *  - all xxMACs are handled by the bnx2x_link code.
 +       */
 +      stay_reset2 =
 +              MISC_REGISTERS_RESET_REG_2_RST_BMAC0 |
 +              MISC_REGISTERS_RESET_REG_2_RST_BMAC1 |
 +              MISC_REGISTERS_RESET_REG_2_RST_EMAC0 |
 +              MISC_REGISTERS_RESET_REG_2_RST_EMAC1 |
 +              MISC_REGISTERS_RESET_REG_2_UMAC0 |
 +              MISC_REGISTERS_RESET_REG_2_UMAC1 |
 +              MISC_REGISTERS_RESET_REG_2_XMAC |
 +              MISC_REGISTERS_RESET_REG_2_XMAC_SOFT;
 +
 +      /* Full reset masks according to the chip */
 +      reset_mask1 = 0xffffffff;
 +
 +      if (CHIP_IS_E1(bp))
 +              reset_mask2 = 0xffff;
 +      else if (CHIP_IS_E1H(bp))
 +              reset_mask2 = 0x1ffff;
 +      else if (CHIP_IS_E2(bp))
 +              reset_mask2 = 0xfffff;
 +      else /* CHIP_IS_E3 */
 +              reset_mask2 = 0x3ffffff;
 +
 +      /* Don't reset global blocks unless we need to */
 +      if (!global)
 +              reset_mask2 &= ~global_bits2;
 +
 +      /*
 +       * In case of attention in the QM, we need to reset PXP
 +       * (MISC_REGISTERS_RESET_REG_2_RST_PXP_RQ_RD_WR) before QM
 +       * because otherwise QM reset would release 'close the gates' shortly
 +       * before resetting the PXP, then the PSWRQ would send a write
 +       * request to PGLUE. Then when PXP is reset, PGLUE would try to
 +       * read the payload data from PSWWR, but PSWWR would not
 +       * respond. The write queue in PGLUE would stuck, dmae commands
 +       * would not return. Therefore it's important to reset the second
 +       * reset register (containing the
 +       * MISC_REGISTERS_RESET_REG_2_RST_PXP_RQ_RD_WR bit) before the
 +       * first one (containing the MISC_REGISTERS_RESET_REG_1_RST_QM
 +       * bit).
 +       */
 +      REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
 +             reset_mask2 & (~not_reset_mask2));
 +
 +      REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
 +             reset_mask1 & (~not_reset_mask1));
 +
 +      barrier();
 +      mmiowb();
 +
 +      REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET,
 +             reset_mask2 & (~stay_reset2));
 +
 +      barrier();
 +      mmiowb();
 +
 +      REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, reset_mask1);
 +      mmiowb();
 +}
 +
 +/**
 + * bnx2x_er_poll_igu_vq - poll for pending writes bit.
 + * It should get cleared in no more than 1s.
 + *
 + * @bp:       driver handle
 + *
 + * It should get cleared in no more than 1s. Returns 0 if
 + * pending writes bit gets cleared.
 + */
 +static int bnx2x_er_poll_igu_vq(struct bnx2x *bp)
 +{
 +      u32 cnt = 1000;
 +      u32 pend_bits = 0;
 +
 +      do {
 +              pend_bits  = REG_RD(bp, IGU_REG_PENDING_BITS_STATUS);
 +
 +              if (pend_bits == 0)
 +                      break;
 +
 +              usleep_range(1000, 1000);
 +      } while (cnt-- > 0);
 +
 +      if (cnt <= 0) {
 +              BNX2X_ERR("Still pending IGU requests pend_bits=%x!\n",
 +                        pend_bits);
 +              return -EBUSY;
 +      }
 +
 +      return 0;
 +}
 +
 +static int bnx2x_process_kill(struct bnx2x *bp, bool global)
 +{
 +      int cnt = 1000;
 +      u32 val = 0;
 +      u32 sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1, pgl_exp_rom2;
 +
 +
 +      /* Empty the Tetris buffer, wait for 1s */
 +      do {
 +              sr_cnt  = REG_RD(bp, PXP2_REG_RD_SR_CNT);
 +              blk_cnt = REG_RD(bp, PXP2_REG_RD_BLK_CNT);
 +              port_is_idle_0 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_0);
 +              port_is_idle_1 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_1);
 +              pgl_exp_rom2 = REG_RD(bp, PXP2_REG_PGL_EXP_ROM2);
 +              if ((sr_cnt == 0x7e) && (blk_cnt == 0xa0) &&
 +                  ((port_is_idle_0 & 0x1) == 0x1) &&
 +                  ((port_is_idle_1 & 0x1) == 0x1) &&
 +                  (pgl_exp_rom2 == 0xffffffff))
 +                      break;
 +              usleep_range(1000, 1000);
 +      } while (cnt-- > 0);
 +
 +      if (cnt <= 0) {
 +              DP(NETIF_MSG_HW, "Tetris buffer didn't get empty or there"
 +                        " are still"
 +                        " outstanding read requests after 1s!\n");
 +              DP(NETIF_MSG_HW, "sr_cnt=0x%08x, blk_cnt=0x%08x,"
 +                        " port_is_idle_0=0x%08x,"
 +                        " port_is_idle_1=0x%08x, pgl_exp_rom2=0x%08x\n",
 +                        sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1,
 +                        pgl_exp_rom2);
 +              return -EAGAIN;
 +      }
 +
 +      barrier();
 +
 +      /* Close gates #2, #3 and #4 */
 +      bnx2x_set_234_gates(bp, true);
 +
 +      /* Poll for IGU VQs for 57712 and newer chips */
 +      if (!CHIP_IS_E1x(bp) && bnx2x_er_poll_igu_vq(bp))
 +              return -EAGAIN;
 +
 +
 +      /* TBD: Indicate that "process kill" is in progress to MCP */
 +
 +      /* Clear "unprepared" bit */
 +      REG_WR(bp, MISC_REG_UNPREPARED, 0);
 +      barrier();
 +
 +      /* Make sure all is written to the chip before the reset */
 +      mmiowb();
 +
 +      /* Wait for 1ms to empty GLUE and PCI-E core queues,
 +       * PSWHST, GRC and PSWRD Tetris buffer.
 +       */
 +      usleep_range(1000, 1000);
 +
 +      /* Prepare to chip reset: */
 +      /* MCP */
 +      if (global)
 +              bnx2x_reset_mcp_prep(bp, &val);
 +
 +      /* PXP */
 +      bnx2x_pxp_prep(bp);
 +      barrier();
 +
 +      /* reset the chip */
 +      bnx2x_process_kill_chip_reset(bp, global);
 +      barrier();
 +
 +      /* Recover after reset: */
 +      /* MCP */
 +      if (global && bnx2x_reset_mcp_comp(bp, val))
 +              return -EAGAIN;
 +
 +      /* TBD: Add resetting the NO_MCP mode DB here */
 +
 +      /* PXP */
 +      bnx2x_pxp_prep(bp);
 +
 +      /* Open the gates #2, #3 and #4 */
 +      bnx2x_set_234_gates(bp, false);
 +
 +      /* TBD: IGU/AEU preparation bring back the AEU/IGU to a
 +       * reset state, re-enable attentions. */
 +
 +      return 0;
 +}
 +
 +int bnx2x_leader_reset(struct bnx2x *bp)
 +{
 +      int rc = 0;
 +      bool global = bnx2x_reset_is_global(bp);
 +
 +      /* Try to recover after the failure */
 +      if (bnx2x_process_kill(bp, global)) {
 +              netdev_err(bp->dev, "Something bad had happen on engine %d! "
 +                                  "Aii!\n", BP_PATH(bp));
 +              rc = -EAGAIN;
 +              goto exit_leader_reset;
 +      }
 +
 +      /*
 +       * Clear RESET_IN_PROGRES and RESET_GLOBAL bits and update the driver
 +       * state.
 +       */
 +      bnx2x_set_reset_done(bp);
 +      if (global)
 +              bnx2x_clear_reset_global(bp);
 +
 +exit_leader_reset:
 +      bp->is_leader = 0;
 +      bnx2x_release_leader_lock(bp);
 +      smp_mb();
 +      return rc;
 +}
 +
 +static inline void bnx2x_recovery_failed(struct bnx2x *bp)
 +{
 +      netdev_err(bp->dev, "Recovery has failed. Power cycle is needed.\n");
 +
 +      /* Disconnect this device */
 +      netif_device_detach(bp->dev);
 +
 +      /*
 +       * Block ifup for all function on this engine until "process kill"
 +       * or power cycle.
 +       */
 +      bnx2x_set_reset_in_progress(bp);
 +
 +      /* Shut down the power */
 +      bnx2x_set_power_state(bp, PCI_D3hot);
 +
 +      bp->recovery_state = BNX2X_RECOVERY_FAILED;
 +
 +      smp_mb();
 +}
 +
 +/*
 + * Assumption: runs under rtnl lock. This together with the fact
 + * that it's called only from bnx2x_sp_rtnl() ensure that it
 + * will never be called when netif_running(bp->dev) is false.
 + */
 +static void bnx2x_parity_recover(struct bnx2x *bp)
 +{
 +      bool global = false;
 +
 +      DP(NETIF_MSG_HW, "Handling parity\n");
 +      while (1) {
 +              switch (bp->recovery_state) {
 +              case BNX2X_RECOVERY_INIT:
 +                      DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_INIT\n");
 +                      bnx2x_chk_parity_attn(bp, &global, false);
 +
 +                      /* Try to get a LEADER_LOCK HW lock */
 +                      if (bnx2x_trylock_leader_lock(bp)) {
 +                              bnx2x_set_reset_in_progress(bp);
 +                              /*
 +                               * Check if there is a global attention and if
 +                               * there was a global attention, set the global
 +                               * reset bit.
 +                               */
 +
 +                              if (global)
 +                                      bnx2x_set_reset_global(bp);
 +
 +                              bp->is_leader = 1;
 +                      }
 +
 +                      /* Stop the driver */
 +                      /* If interface has been removed - break */
 +                      if (bnx2x_nic_unload(bp, UNLOAD_RECOVERY))
 +                              return;
 +
 +                      bp->recovery_state = BNX2X_RECOVERY_WAIT;
 +
 +                      /*
 +                       * Reset MCP command sequence number and MCP mail box
 +                       * sequence as we are going to reset the MCP.
 +                       */
 +                      if (global) {
 +                              bp->fw_seq = 0;
 +                              bp->fw_drv_pulse_wr_seq = 0;
 +                      }
 +
 +                      /* Ensure "is_leader", MCP command sequence and
 +                       * "recovery_state" update values are seen on other
 +                       * CPUs.
 +                       */
 +                      smp_mb();
 +                      break;
 +
 +              case BNX2X_RECOVERY_WAIT:
 +                      DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_WAIT\n");
 +                      if (bp->is_leader) {
 +                              int other_engine = BP_PATH(bp) ? 0 : 1;
 +                              u32 other_load_counter =
 +                                      bnx2x_get_load_cnt(bp, other_engine);
 +                              u32 load_counter =
 +                                      bnx2x_get_load_cnt(bp, BP_PATH(bp));
 +                              global = bnx2x_reset_is_global(bp);
 +
 +                              /*
 +                               * In case of a parity in a global block, let
 +                               * the first leader that performs a
 +                               * leader_reset() reset the global blocks in
 +                               * order to clear global attentions. Otherwise
 +                               * the the gates will remain closed for that
 +                               * engine.
 +                               */
 +                              if (load_counter ||
 +                                  (global && other_load_counter)) {
 +                                      /* Wait until all other functions get
 +                                       * down.
 +                                       */
 +                                      schedule_delayed_work(&bp->sp_rtnl_task,
 +                                                              HZ/10);
 +                                      return;
 +                              } else {
 +                                      /* If all other functions got down -
 +                                       * try to bring the chip back to
 +                                       * normal. In any case it's an exit
 +                                       * point for a leader.
 +                                       */
 +                                      if (bnx2x_leader_reset(bp)) {
 +                                              bnx2x_recovery_failed(bp);
 +                                              return;
 +                                      }
 +
 +                                      /* If we are here, means that the
 +                                       * leader has succeeded and doesn't
 +                                       * want to be a leader any more. Try
 +                                       * to continue as a none-leader.
 +                                       */
 +                                      break;
 +                              }
 +                      } else { /* non-leader */
 +                              if (!bnx2x_reset_is_done(bp, BP_PATH(bp))) {
 +                                      /* Try to get a LEADER_LOCK HW lock as
 +                                       * long as a former leader may have
 +                                       * been unloaded by the user or
 +                                       * released a leadership by another
 +                                       * reason.
 +                                       */
 +                                      if (bnx2x_trylock_leader_lock(bp)) {
 +                                              /* I'm a leader now! Restart a
 +                                               * switch case.
 +                                               */
 +                                              bp->is_leader = 1;
 +                                              break;
 +                                      }
 +
 +                                      schedule_delayed_work(&bp->sp_rtnl_task,
 +                                                              HZ/10);
 +                                      return;
 +
 +                              } else {
 +                                      /*
 +                                       * If there was a global attention, wait
 +                                       * for it to be cleared.
 +                                       */
 +                                      if (bnx2x_reset_is_global(bp)) {
 +                                              schedule_delayed_work(
 +                                                      &bp->sp_rtnl_task,
 +                                                      HZ/10);
 +                                              return;
 +                                      }
 +
 +                                      if (bnx2x_nic_load(bp, LOAD_NORMAL))
 +                                              bnx2x_recovery_failed(bp);
 +                                      else {
 +                                              bp->recovery_state =
 +                                                      BNX2X_RECOVERY_DONE;
 +                                              smp_mb();
 +                                      }
 +
 +                                      return;
 +                              }
 +                      }
 +              default:
 +                      return;
 +              }
 +      }
 +}
 +
 +/* bnx2x_nic_unload() flushes the bnx2x_wq, thus reset task is
 + * scheduled on a general queue in order to prevent a dead lock.
 + */
 +static void bnx2x_sp_rtnl_task(struct work_struct *work)
 +{
 +      struct bnx2x *bp = container_of(work, struct bnx2x, sp_rtnl_task.work);
 +
 +      rtnl_lock();
 +
 +      if (!netif_running(bp->dev))
 +              goto sp_rtnl_exit;
 +
 +      /* if stop on error is defined no recovery flows should be executed */
 +#ifdef BNX2X_STOP_ON_ERROR
 +      BNX2X_ERR("recovery flow called but STOP_ON_ERROR defined "
 +                "so reset not done to allow debug dump,\n"
 +                "you will need to reboot when done\n");
 +      goto sp_rtnl_not_reset;
 +#endif
 +
 +      if (unlikely(bp->recovery_state != BNX2X_RECOVERY_DONE)) {
 +              /*
 +               * Clear all pending SP commands as we are going to reset the
 +               * function anyway.
 +               */
 +              bp->sp_rtnl_state = 0;
 +              smp_mb();
 +
 +              bnx2x_parity_recover(bp);
 +
 +              goto sp_rtnl_exit;
 +      }
 +
 +      if (test_and_clear_bit(BNX2X_SP_RTNL_TX_TIMEOUT, &bp->sp_rtnl_state)) {
 +              /*
 +               * Clear all pending SP commands as we are going to reset the
 +               * function anyway.
 +               */
 +              bp->sp_rtnl_state = 0;
 +              smp_mb();
 +
 +              bnx2x_nic_unload(bp, UNLOAD_NORMAL);
 +              bnx2x_nic_load(bp, LOAD_NORMAL);
 +
 +              goto sp_rtnl_exit;
 +      }
 +#ifdef BNX2X_STOP_ON_ERROR
 +sp_rtnl_not_reset:
 +#endif
 +      if (test_and_clear_bit(BNX2X_SP_RTNL_SETUP_TC, &bp->sp_rtnl_state))
 +              bnx2x_setup_tc(bp->dev, bp->dcbx_port_params.ets.num_of_cos);
 +
 +sp_rtnl_exit:
 +      rtnl_unlock();
 +}
 +
 +/* end of nic load/unload */
 +
 +static void bnx2x_period_task(struct work_struct *work)
 +{
 +      struct bnx2x *bp = container_of(work, struct bnx2x, period_task.work);
 +
 +      if (!netif_running(bp->dev))
 +              goto period_task_exit;
 +
 +      if (CHIP_REV_IS_SLOW(bp)) {
 +              BNX2X_ERR("period task called on emulation, ignoring\n");
 +              goto period_task_exit;
 +      }
 +
 +      bnx2x_acquire_phy_lock(bp);
 +      /*
 +       * The barrier is needed to ensure the ordering between the writing to
 +       * the bp->port.pmf in the bnx2x_nic_load() or bnx2x_pmf_update() and
 +       * the reading here.
 +       */
 +      smp_mb();
 +      if (bp->port.pmf) {
 +              bnx2x_period_func(&bp->link_params, &bp->link_vars);
 +
 +              /* Re-queue task in 1 sec */
 +              queue_delayed_work(bnx2x_wq, &bp->period_task, 1*HZ);
 +      }
 +
 +      bnx2x_release_phy_lock(bp);
 +period_task_exit:
 +      return;
 +}
 +
 +/*
 + * Init service functions
 + */
 +
 +static u32 bnx2x_get_pretend_reg(struct bnx2x *bp)
 +{
 +      u32 base = PXP2_REG_PGL_PRETEND_FUNC_F0;
 +      u32 stride = PXP2_REG_PGL_PRETEND_FUNC_F1 - base;
 +      return base + (BP_ABS_FUNC(bp)) * stride;
 +}
 +
 +static void bnx2x_undi_int_disable_e1h(struct bnx2x *bp)
 +{
 +      u32 reg = bnx2x_get_pretend_reg(bp);
 +
 +      /* Flush all outstanding writes */
 +      mmiowb();
 +
 +      /* Pretend to be function 0 */
 +      REG_WR(bp, reg, 0);
 +      REG_RD(bp, reg);        /* Flush the GRC transaction (in the chip) */
 +
 +      /* From now we are in the "like-E1" mode */
 +      bnx2x_int_disable(bp);
 +
 +      /* Flush all outstanding writes */
 +      mmiowb();
 +
 +      /* Restore the original function */
 +      REG_WR(bp, reg, BP_ABS_FUNC(bp));
 +      REG_RD(bp, reg);
 +}
 +
 +static inline void bnx2x_undi_int_disable(struct bnx2x *bp)
 +{
 +      if (CHIP_IS_E1(bp))
 +              bnx2x_int_disable(bp);
 +      else
 +              bnx2x_undi_int_disable_e1h(bp);
 +}
 +
 +static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
 +{
 +      u32 val;
 +
 +      /* Check if there is any driver already loaded */
 +      val = REG_RD(bp, MISC_REG_UNPREPARED);
 +      if (val == 0x1) {
-               bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
++
++              bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RESET);
++              /*
++               * Check if it is the UNDI driver
 +               * UNDI driver initializes CID offset for normal bell to 0x7
 +               */
-                       /* now it's safe to release the lock */
-                       bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
 +              val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
 +              if (val == 0x7) {
 +                      u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
 +                      /* save our pf_num */
 +                      int orig_pf_num = bp->pf_num;
 +                      int port;
 +                      u32 swap_en, swap_val, value;
 +
 +                      /* clear the UNDI indication */
 +                      REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
 +
 +                      BNX2X_DEV_INFO("UNDI is active! reset device\n");
 +
 +                      /* try unload UNDI on port 0 */
 +                      bp->pf_num = 0;
 +                      bp->fw_seq =
 +                            (SHMEM_RD(bp, func_mb[bp->pf_num].drv_mb_header) &
 +                              DRV_MSG_SEQ_NUMBER_MASK);
 +                      reset_code = bnx2x_fw_command(bp, reset_code, 0);
 +
 +                      /* if UNDI is loaded on the other port */
 +                      if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
 +
 +                              /* send "DONE" for previous unload */
 +                              bnx2x_fw_command(bp,
 +                                               DRV_MSG_CODE_UNLOAD_DONE, 0);
 +
 +                              /* unload UNDI on port 1 */
 +                              bp->pf_num = 1;
 +                              bp->fw_seq =
 +                            (SHMEM_RD(bp, func_mb[bp->pf_num].drv_mb_header) &
 +                                      DRV_MSG_SEQ_NUMBER_MASK);
 +                              reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
 +
 +                              bnx2x_fw_command(bp, reset_code, 0);
 +                      }
 +
-               } else
-                       bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
 +                      bnx2x_undi_int_disable(bp);
 +                      port = BP_PORT(bp);
 +
 +                      /* close input traffic and wait for it */
 +                      /* Do not rcv packets to BRB */
 +                      REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_DRV_MASK :
 +                                         NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
 +                      /* Do not direct rcv packets that are not for MCP to
 +                       * the BRB */
 +                      REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
 +                                         NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
 +                      /* clear AEU */
 +                      REG_WR(bp, (port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
 +                                         MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
 +                      msleep(10);
 +
 +                      /* save NIG port swap info */
 +                      swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
 +                      swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
 +                      /* reset device */
 +                      REG_WR(bp,
 +                             GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
 +                             0xd3ffffff);
 +
 +                      value = 0x1400;
 +                      if (CHIP_IS_E3(bp)) {
 +                              value |= MISC_REGISTERS_RESET_REG_2_MSTAT0;
 +                              value |= MISC_REGISTERS_RESET_REG_2_MSTAT1;
 +                      }
 +
 +                      REG_WR(bp,
 +                             GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
 +                             value);
 +
 +                      /* take the NIG out of reset and restore swap values */
 +                      REG_WR(bp,
 +                             GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
 +                             MISC_REGISTERS_RESET_REG_1_RST_NIG);
 +                      REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
 +                      REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
 +
 +                      /* send unload done to the MCP */
 +                      bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
 +
 +                      /* restore our func and fw_seq */
 +                      bp->pf_num = orig_pf_num;
 +                      bp->fw_seq =
 +                            (SHMEM_RD(bp, func_mb[bp->pf_num].drv_mb_header) &
 +                              DRV_MSG_SEQ_NUMBER_MASK);
-       int vn = BP_E1HVN(bp);
++              }
++
++              /* now it's safe to release the lock */
++              bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESET);
 +      }
 +}
 +
 +static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
 +{
 +      u32 val, val2, val3, val4, id;
 +      u16 pmc;
 +
 +      /* Get the chip revision id and number. */
 +      /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
 +      val = REG_RD(bp, MISC_REG_CHIP_NUM);
 +      id = ((val & 0xffff) << 16);
 +      val = REG_RD(bp, MISC_REG_CHIP_REV);
 +      id |= ((val & 0xf) << 12);
 +      val = REG_RD(bp, MISC_REG_CHIP_METAL);
 +      id |= ((val & 0xff) << 4);
 +      val = REG_RD(bp, MISC_REG_BOND_ID);
 +      id |= (val & 0xf);
 +      bp->common.chip_id = id;
 +
 +      /* Set doorbell size */
 +      bp->db_size = (1 << BNX2X_DB_SHIFT);
 +
 +      if (!CHIP_IS_E1x(bp)) {
 +              val = REG_RD(bp, MISC_REG_PORT4MODE_EN_OVWR);
 +              if ((val & 1) == 0)
 +                      val = REG_RD(bp, MISC_REG_PORT4MODE_EN);
 +              else
 +                      val = (val >> 1) & 1;
 +              BNX2X_DEV_INFO("chip is in %s\n", val ? "4_PORT_MODE" :
 +                                                     "2_PORT_MODE");
 +              bp->common.chip_port_mode = val ? CHIP_4_PORT_MODE :
 +                                               CHIP_2_PORT_MODE;
 +
 +              if (CHIP_MODE_IS_4_PORT(bp))
 +                      bp->pfid = (bp->pf_num >> 1);   /* 0..3 */
 +              else
 +                      bp->pfid = (bp->pf_num & 0x6);  /* 0, 2, 4, 6 */
 +      } else {
 +              bp->common.chip_port_mode = CHIP_PORT_MODE_NONE; /* N/A */
 +              bp->pfid = bp->pf_num;                  /* 0..7 */
 +      }
 +
 +      bp->link_params.chip_id = bp->common.chip_id;
 +      BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
 +
 +      val = (REG_RD(bp, 0x2874) & 0x55);
 +      if ((bp->common.chip_id & 0x1) ||
 +          (CHIP_IS_E1(bp) && val) || (CHIP_IS_E1H(bp) && (val == 0x55))) {
 +              bp->flags |= ONE_PORT_FLAG;
 +              BNX2X_DEV_INFO("single port device\n");
 +      }
 +
 +      val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
 +      bp->common.flash_size = (BNX2X_NVRAM_1MB_SIZE <<
 +                               (val & MCPR_NVM_CFG4_FLASH_SIZE));
 +      BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
 +                     bp->common.flash_size, bp->common.flash_size);
 +
 +      bnx2x_init_shmem(bp);
 +
 +
 +
 +      bp->common.shmem2_base = REG_RD(bp, (BP_PATH(bp) ?
 +                                      MISC_REG_GENERIC_CR_1 :
 +                                      MISC_REG_GENERIC_CR_0));
 +
 +      bp->link_params.shmem_base = bp->common.shmem_base;
 +      bp->link_params.shmem2_base = bp->common.shmem2_base;
 +      BNX2X_DEV_INFO("shmem offset 0x%x  shmem2 offset 0x%x\n",
 +                     bp->common.shmem_base, bp->common.shmem2_base);
 +
 +      if (!bp->common.shmem_base) {
 +              BNX2X_DEV_INFO("MCP not active\n");
 +              bp->flags |= NO_MCP_FLAG;
 +              return;
 +      }
 +
 +      bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
 +      BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config);
 +
 +      bp->link_params.hw_led_mode = ((bp->common.hw_config &
 +                                      SHARED_HW_CFG_LED_MODE_MASK) >>
 +                                     SHARED_HW_CFG_LED_MODE_SHIFT);
 +
 +      bp->link_params.feature_config_flags = 0;
 +      val = SHMEM_RD(bp, dev_info.shared_feature_config.config);
 +      if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED)
 +              bp->link_params.feature_config_flags |=
 +                              FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
 +      else
 +              bp->link_params.feature_config_flags &=
 +                              ~FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
 +
 +      val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
 +      bp->common.bc_ver = val;
 +      BNX2X_DEV_INFO("bc_ver %X\n", val);
 +      if (val < BNX2X_BC_VER) {
 +              /* for now only warn
 +               * later we might need to enforce this */
 +              BNX2X_ERR("This driver needs bc_ver %X but found %X, "
 +                        "please upgrade BC\n", BNX2X_BC_VER, val);
 +      }
 +      bp->link_params.feature_config_flags |=
 +                              (val >= REQ_BC_VER_4_VRFY_FIRST_PHY_OPT_MDL) ?
 +                              FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY : 0;
 +
 +      bp->link_params.feature_config_flags |=
 +              (val >= REQ_BC_VER_4_VRFY_SPECIFIC_PHY_OPT_MDL) ?
 +              FEATURE_CONFIG_BC_SUPPORTS_DUAL_PHY_OPT_MDL_VRFY : 0;
 +
 +      bp->link_params.feature_config_flags |=
 +              (val >= REQ_BC_VER_4_SFP_TX_DISABLE_SUPPORTED) ?
 +              FEATURE_CONFIG_BC_SUPPORTS_SFP_TX_DISABLED : 0;
 +
 +      pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
 +      bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
 +
 +      BNX2X_DEV_INFO("%sWoL capable\n",
 +                     (bp->flags & NO_WOL_FLAG) ? "not " : "");
 +
 +      val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
 +      val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
 +      val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
 +      val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
 +
 +      dev_info(&bp->pdev->dev, "part number %X-%X-%X-%X\n",
 +               val, val2, val3, val4);
 +}
 +
 +#define IGU_FID(val)  GET_FIELD((val), IGU_REG_MAPPING_MEMORY_FID)
 +#define IGU_VEC(val)  GET_FIELD((val), IGU_REG_MAPPING_MEMORY_VECTOR)
 +
 +static void __devinit bnx2x_get_igu_cam_info(struct bnx2x *bp)
 +{
 +      int pfid = BP_FUNC(bp);
-       vn = BP_E1HVN(bp);
 +      int igu_sb_id;
 +      u32 val;
 +      u8 fid, igu_sb_cnt = 0;
 +
 +      bp->igu_base_sb = 0xff;
 +      if (CHIP_INT_MODE_IS_BC(bp)) {
++              int vn = BP_VN(bp);
 +              igu_sb_cnt = bp->igu_sb_cnt;
 +              bp->igu_base_sb = (CHIP_MODE_IS_4_PORT(bp) ? pfid : vn) *
 +                      FP_SB_MAX_E1x;
 +
 +              bp->igu_dsb_id =  E1HVN_MAX * FP_SB_MAX_E1x +
 +                      (CHIP_MODE_IS_4_PORT(bp) ? pfid : vn);
 +
 +              return;
 +      }
 +
 +      /* IGU in normal mode - read CAM */
 +      for (igu_sb_id = 0; igu_sb_id < IGU_REG_MAPPING_MEMORY_SIZE;
 +           igu_sb_id++) {
 +              val = REG_RD(bp, IGU_REG_MAPPING_MEMORY + igu_sb_id * 4);
 +              if (!(val & IGU_REG_MAPPING_MEMORY_VALID))
 +                      continue;
 +              fid = IGU_FID(val);
 +              if ((fid & IGU_FID_ENCODE_IS_PF)) {
 +                      if ((fid & IGU_FID_PF_NUM_MASK) != pfid)
 +                              continue;
 +                      if (IGU_VEC(val) == 0)
 +                              /* default status block */
 +                              bp->igu_dsb_id = igu_sb_id;
 +                      else {
 +                              if (bp->igu_base_sb == 0xff)
 +                                      bp->igu_base_sb = igu_sb_id;
 +                              igu_sb_cnt++;
 +                      }
 +              }
 +      }
 +
 +#ifdef CONFIG_PCI_MSI
 +      /*
 +       * It's expected that number of CAM entries for this functions is equal
 +       * to the number evaluated based on the MSI-X table size. We want a
 +       * harsh warning if these values are different!
 +       */
 +      WARN_ON(bp->igu_sb_cnt != igu_sb_cnt);
 +#endif
 +
 +      if (igu_sb_cnt == 0)
 +              BNX2X_ERR("CAM configuration error\n");
 +}
 +
 +static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
 +                                                  u32 switch_cfg)
 +{
 +      int cfg_size = 0, idx, port = BP_PORT(bp);
 +
 +      /* Aggregation of supported attributes of all external phys */
 +      bp->port.supported[0] = 0;
 +      bp->port.supported[1] = 0;
 +      switch (bp->link_params.num_phys) {
 +      case 1:
 +              bp->port.supported[0] = bp->link_params.phy[INT_PHY].supported;
 +              cfg_size = 1;
 +              break;
 +      case 2:
 +              bp->port.supported[0] = bp->link_params.phy[EXT_PHY1].supported;
 +              cfg_size = 1;
 +              break;
 +      case 3:
 +              if (bp->link_params.multi_phy_config &
 +                  PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
 +                      bp->port.supported[1] =
 +                              bp->link_params.phy[EXT_PHY1].supported;
 +                      bp->port.supported[0] =
 +                              bp->link_params.phy[EXT_PHY2].supported;
 +              } else {
 +                      bp->port.supported[0] =
 +                              bp->link_params.phy[EXT_PHY1].supported;
 +                      bp->port.supported[1] =
 +                              bp->link_params.phy[EXT_PHY2].supported;
 +              }
 +              cfg_size = 2;
 +              break;
 +      }
 +
 +      if (!(bp->port.supported[0] || bp->port.supported[1])) {
 +              BNX2X_ERR("NVRAM config error. BAD phy config."
 +                        "PHY1 config 0x%x, PHY2 config 0x%x\n",
 +                         SHMEM_RD(bp,
 +                         dev_info.port_hw_config[port].external_phy_config),
 +                         SHMEM_RD(bp,
 +                         dev_info.port_hw_config[port].external_phy_config2));
 +                      return;
 +      }
 +
 +      if (CHIP_IS_E3(bp))
 +              bp->port.phy_addr = REG_RD(bp, MISC_REG_WC0_CTRL_PHY_ADDR);
 +      else {
 +              switch (switch_cfg) {
 +              case SWITCH_CFG_1G:
 +                      bp->port.phy_addr = REG_RD(
 +                              bp, NIG_REG_SERDES0_CTRL_PHY_ADDR + port*0x10);
 +                      break;
 +              case SWITCH_CFG_10G:
 +                      bp->port.phy_addr = REG_RD(
 +                              bp, NIG_REG_XGXS0_CTRL_PHY_ADDR + port*0x18);
 +                      break;
 +              default:
 +                      BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
 +                                bp->port.link_config[0]);
 +                      return;
 +              }
 +      }
 +      BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
 +      /* mask what we support according to speed_cap_mask per configuration */
 +      for (idx = 0; idx < cfg_size; idx++) {
 +              if (!(bp->link_params.speed_cap_mask[idx] &
 +                              PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
 +                      bp->port.supported[idx] &= ~SUPPORTED_10baseT_Half;
 +
 +              if (!(bp->link_params.speed_cap_mask[idx] &
 +                              PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
 +                      bp->port.supported[idx] &= ~SUPPORTED_10baseT_Full;
 +
 +              if (!(bp->link_params.speed_cap_mask[idx] &
 +                              PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
 +                      bp->port.supported[idx] &= ~SUPPORTED_100baseT_Half;
 +
 +              if (!(bp->link_params.speed_cap_mask[idx] &
 +                              PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
 +                      bp->port.supported[idx] &= ~SUPPORTED_100baseT_Full;
 +
 +              if (!(bp->link_params.speed_cap_mask[idx] &
 +                                      PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
 +                      bp->port.supported[idx] &= ~(SUPPORTED_1000baseT_Half |
 +                                                   SUPPORTED_1000baseT_Full);
 +
 +              if (!(bp->link_params.speed_cap_mask[idx] &
 +                                      PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
 +                      bp->port.supported[idx] &= ~SUPPORTED_2500baseX_Full;
 +
 +              if (!(bp->link_params.speed_cap_mask[idx] &
 +                                      PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
 +                      bp->port.supported[idx] &= ~SUPPORTED_10000baseT_Full;
 +
 +      }
 +
 +      BNX2X_DEV_INFO("supported 0x%x 0x%x\n", bp->port.supported[0],
 +                     bp->port.supported[1]);
 +}
 +
 +static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
 +{
 +      u32 link_config, idx, cfg_size = 0;
 +      bp->port.advertising[0] = 0;
 +      bp->port.advertising[1] = 0;
 +      switch (bp->link_params.num_phys) {
 +      case 1:
 +      case 2:
 +              cfg_size = 1;
 +              break;
 +      case 3:
 +              cfg_size = 2;
 +              break;
 +      }
 +      for (idx = 0; idx < cfg_size; idx++) {
 +              bp->link_params.req_duplex[idx] = DUPLEX_FULL;
 +              link_config = bp->port.link_config[idx];
 +              switch (link_config & PORT_FEATURE_LINK_SPEED_MASK) {
 +              case PORT_FEATURE_LINK_SPEED_AUTO:
 +                      if (bp->port.supported[idx] & SUPPORTED_Autoneg) {
 +                              bp->link_params.req_line_speed[idx] =
 +                                      SPEED_AUTO_NEG;
 +                              bp->port.advertising[idx] |=
 +                                      bp->port.supported[idx];
 +                      } else {
 +                              /* force 10G, no AN */
 +                              bp->link_params.req_line_speed[idx] =
 +                                      SPEED_10000;
 +                              bp->port.advertising[idx] |=
 +                                      (ADVERTISED_10000baseT_Full |
 +                                       ADVERTISED_FIBRE);
 +                              continue;
 +                      }
 +                      break;
 +
 +              case PORT_FEATURE_LINK_SPEED_10M_FULL:
 +                      if (bp->port.supported[idx] & SUPPORTED_10baseT_Full) {
 +                              bp->link_params.req_line_speed[idx] =
 +                                      SPEED_10;
 +                              bp->port.advertising[idx] |=
 +                                      (ADVERTISED_10baseT_Full |
 +                                       ADVERTISED_TP);
 +                      } else {
 +                              BNX2X_ERR("NVRAM config error. "
 +                                          "Invalid link_config 0x%x"
 +                                          "  speed_cap_mask 0x%x\n",
 +                                          link_config,
 +                                  bp->link_params.speed_cap_mask[idx]);
 +                              return;
 +                      }
 +                      break;
 +
 +              case PORT_FEATURE_LINK_SPEED_10M_HALF:
 +                      if (bp->port.supported[idx] & SUPPORTED_10baseT_Half) {
 +                              bp->link_params.req_line_speed[idx] =
 +                                      SPEED_10;
 +                              bp->link_params.req_duplex[idx] =
 +                                      DUPLEX_HALF;
 +                              bp->port.advertising[idx] |=
 +                                      (ADVERTISED_10baseT_Half |
 +                                       ADVERTISED_TP);
 +                      } else {
 +                              BNX2X_ERR("NVRAM config error. "
 +                                          "Invalid link_config 0x%x"
 +                                          "  speed_cap_mask 0x%x\n",
 +                                          link_config,
 +                                        bp->link_params.speed_cap_mask[idx]);
 +                              return;
 +                      }
 +                      break;
 +
 +              case PORT_FEATURE_LINK_SPEED_100M_FULL:
 +                      if (bp->port.supported[idx] &
 +                          SUPPORTED_100baseT_Full) {
 +                              bp->link_params.req_line_speed[idx] =
 +                                      SPEED_100;
 +                              bp->port.advertising[idx] |=
 +                                      (ADVERTISED_100baseT_Full |
 +                                       ADVERTISED_TP);
 +                      } else {
 +                              BNX2X_ERR("NVRAM config error. "
 +                                          "Invalid link_config 0x%x"
 +                                          "  speed_cap_mask 0x%x\n",
 +                                          link_config,
 +                                        bp->link_params.speed_cap_mask[idx]);
 +                              return;
 +                      }
 +                      break;
 +
 +              case PORT_FEATURE_LINK_SPEED_100M_HALF:
 +                      if (bp->port.supported[idx] &
 +                          SUPPORTED_100baseT_Half) {
 +                              bp->link_params.req_line_speed[idx] =
 +                                                              SPEED_100;
 +                              bp->link_params.req_duplex[idx] =
 +                                                              DUPLEX_HALF;
 +                              bp->port.advertising[idx] |=
 +                                      (ADVERTISED_100baseT_Half |
 +                                       ADVERTISED_TP);
 +                      } else {
 +                              BNX2X_ERR("NVRAM config error. "
 +                                  "Invalid link_config 0x%x"
 +                                  "  speed_cap_mask 0x%x\n",
 +                                  link_config,
 +                                  bp->link_params.speed_cap_mask[idx]);
 +                              return;
 +                      }
 +                      break;
 +
 +              case PORT_FEATURE_LINK_SPEED_1G:
 +                      if (bp->port.supported[idx] &
 +                          SUPPORTED_1000baseT_Full) {
 +                              bp->link_params.req_line_speed[idx] =
 +                                      SPEED_1000;
 +                              bp->port.advertising[idx] |=
 +                                      (ADVERTISED_1000baseT_Full |
 +                                       ADVERTISED_TP);
 +                      } else {
 +                              BNX2X_ERR("NVRAM config error. "
 +                                  "Invalid link_config 0x%x"
 +                                  "  speed_cap_mask 0x%x\n",
 +                                  link_config,
 +                                  bp->link_params.speed_cap_mask[idx]);
 +                              return;
 +                      }
 +                      break;
 +
 +              case PORT_FEATURE_LINK_SPEED_2_5G:
 +                      if (bp->port.supported[idx] &
 +                          SUPPORTED_2500baseX_Full) {
 +                              bp->link_params.req_line_speed[idx] =
 +                                      SPEED_2500;
 +                              bp->port.advertising[idx] |=
 +                                      (ADVERTISED_2500baseX_Full |
 +                                              ADVERTISED_TP);
 +                      } else {
 +                              BNX2X_ERR("NVRAM config error. "
 +                                  "Invalid link_config 0x%x"
 +                                  "  speed_cap_mask 0x%x\n",
 +                                  link_config,
 +                                  bp->link_params.speed_cap_mask[idx]);
 +                              return;
 +                      }
 +                      break;
 +
 +              case PORT_FEATURE_LINK_SPEED_10G_CX4:
 +                      if (bp->port.supported[idx] &
 +                          SUPPORTED_10000baseT_Full) {
 +                              bp->link_params.req_line_speed[idx] =
 +                                      SPEED_10000;
 +                              bp->port.advertising[idx] |=
 +                                      (ADVERTISED_10000baseT_Full |
 +                                              ADVERTISED_FIBRE);
 +                      } else {
 +                              BNX2X_ERR("NVRAM config error. "
 +                                  "Invalid link_config 0x%x"
 +                                  "  speed_cap_mask 0x%x\n",
 +                                  link_config,
 +                                  bp->link_params.speed_cap_mask[idx]);
 +                              return;
 +                      }
 +                      break;
 +              case PORT_FEATURE_LINK_SPEED_20G:
 +                      bp->link_params.req_line_speed[idx] = SPEED_20000;
 +
 +                      break;
 +              default:
 +                      BNX2X_ERR("NVRAM config error. "
 +                                "BAD link speed link_config 0x%x\n",
 +                                link_config);
 +                              bp->link_params.req_line_speed[idx] =
 +                                                      SPEED_AUTO_NEG;
 +                              bp->port.advertising[idx] =
 +                                              bp->port.supported[idx];
 +                      break;
 +              }
 +
 +              bp->link_params.req_flow_ctrl[idx] = (link_config &
 +                                       PORT_FEATURE_FLOW_CONTROL_MASK);
 +              if ((bp->link_params.req_flow_ctrl[idx] ==
 +                   BNX2X_FLOW_CTRL_AUTO) &&
 +                  !(bp->port.supported[idx] & SUPPORTED_Autoneg)) {
 +                      bp->link_params.req_flow_ctrl[idx] =
 +                              BNX2X_FLOW_CTRL_NONE;
 +              }
 +
 +              BNX2X_DEV_INFO("req_line_speed %d  req_duplex %d req_flow_ctrl"
 +                             " 0x%x advertising 0x%x\n",
 +                             bp->link_params.req_line_speed[idx],
 +                             bp->link_params.req_duplex[idx],
 +                             bp->link_params.req_flow_ctrl[idx],
 +                             bp->port.advertising[idx]);
 +      }
 +}
 +
 +static void __devinit bnx2x_set_mac_buf(u8 *mac_buf, u32 mac_lo, u16 mac_hi)
 +{
 +      mac_hi = cpu_to_be16(mac_hi);
 +      mac_lo = cpu_to_be32(mac_lo);
 +      memcpy(mac_buf, &mac_hi, sizeof(mac_hi));
 +      memcpy(mac_buf + sizeof(mac_hi), &mac_lo, sizeof(mac_lo));
 +}
 +
 +static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
 +{
 +      int port = BP_PORT(bp);
 +      u32 config;
 +      u32 ext_phy_type, ext_phy_config;
 +
 +      bp->link_params.bp = bp;
 +      bp->link_params.port = port;
 +
 +      bp->link_params.lane_config =
 +              SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
 +
 +      bp->link_params.speed_cap_mask[0] =
 +              SHMEM_RD(bp,
 +                       dev_info.port_hw_config[port].speed_capability_mask);
 +      bp->link_params.speed_cap_mask[1] =
 +              SHMEM_RD(bp,
 +                       dev_info.port_hw_config[port].speed_capability_mask2);
 +      bp->port.link_config[0] =
 +              SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
 +
 +      bp->port.link_config[1] =
 +              SHMEM_RD(bp, dev_info.port_feature_config[port].link_config2);
 +
 +      bp->link_params.multi_phy_config =
 +              SHMEM_RD(bp, dev_info.port_hw_config[port].multi_phy_config);
 +      /* If the device is capable of WoL, set the default state according
 +       * to the HW
 +       */
 +      config = SHMEM_RD(bp, dev_info.port_feature_config[port].config);
 +      bp->wol = (!(bp->flags & NO_WOL_FLAG) &&
 +                 (config & PORT_FEATURE_WOL_ENABLED));
 +
 +      BNX2X_DEV_INFO("lane_config 0x%08x  "
 +                     "speed_cap_mask0 0x%08x  link_config0 0x%08x\n",
 +                     bp->link_params.lane_config,
 +                     bp->link_params.speed_cap_mask[0],
 +                     bp->port.link_config[0]);
 +
 +      bp->link_params.switch_cfg = (bp->port.link_config[0] &
 +                                    PORT_FEATURE_CONNECTED_SWITCH_MASK);
 +      bnx2x_phy_probe(&bp->link_params);
 +      bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
 +
 +      bnx2x_link_settings_requested(bp);
 +
 +      /*
 +       * If connected directly, work with the internal PHY, otherwise, work
 +       * with the external PHY
 +       */
 +      ext_phy_config =
 +              SHMEM_RD(bp,
 +                       dev_info.port_hw_config[port].external_phy_config);
 +      ext_phy_type = XGXS_EXT_PHY_TYPE(ext_phy_config);
 +      if (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT)
 +              bp->mdio.prtad = bp->port.phy_addr;
 +
 +      else if ((ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) &&
 +               (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN))
 +              bp->mdio.prtad =
 +                      XGXS_EXT_PHY_ADDR(ext_phy_config);
 +
 +      /*
 +       * Check if hw lock is required to access MDC/MDIO bus to the PHY(s)
 +       * In MF mode, it is set to cover self test cases
 +       */
 +      if (IS_MF(bp))
 +              bp->port.need_hw_lock = 1;
 +      else
 +              bp->port.need_hw_lock = bnx2x_hw_lock_required(bp,
 +                                                      bp->common.shmem_base,
 +                                                      bp->common.shmem2_base);
 +}
 +
 +#ifdef BCM_CNIC
 +static void __devinit bnx2x_get_cnic_info(struct bnx2x *bp)
 +{
 +      int port = BP_PORT(bp);
 +      int func = BP_ABS_FUNC(bp);
 +
 +      u32 max_iscsi_conn = FW_ENCODE_32BIT_PATTERN ^ SHMEM_RD(bp,
 +                              drv_lic_key[port].max_iscsi_conn);
 +      u32 max_fcoe_conn = FW_ENCODE_32BIT_PATTERN ^ SHMEM_RD(bp,
 +                              drv_lic_key[port].max_fcoe_conn);
 +
 +      /* Get the number of maximum allowed iSCSI and FCoE connections */
 +      bp->cnic_eth_dev.max_iscsi_conn =
 +              (max_iscsi_conn & BNX2X_MAX_ISCSI_INIT_CONN_MASK) >>
 +              BNX2X_MAX_ISCSI_INIT_CONN_SHIFT;
 +
 +      bp->cnic_eth_dev.max_fcoe_conn =
 +              (max_fcoe_conn & BNX2X_MAX_FCOE_INIT_CONN_MASK) >>
 +              BNX2X_MAX_FCOE_INIT_CONN_SHIFT;
 +
 +      /* Read the WWN: */
 +      if (!IS_MF(bp)) {
 +              /* Port info */
 +              bp->cnic_eth_dev.fcoe_wwn_port_name_hi =
 +                      SHMEM_RD(bp,
 +                              dev_info.port_hw_config[port].
 +                               fcoe_wwn_port_name_upper);
 +              bp->cnic_eth_dev.fcoe_wwn_port_name_lo =
 +                      SHMEM_RD(bp,
 +                              dev_info.port_hw_config[port].
 +                               fcoe_wwn_port_name_lower);
 +
 +              /* Node info */
 +              bp->cnic_eth_dev.fcoe_wwn_node_name_hi =
 +                      SHMEM_RD(bp,
 +                              dev_info.port_hw_config[port].
 +                               fcoe_wwn_node_name_upper);
 +              bp->cnic_eth_dev.fcoe_wwn_node_name_lo =
 +                      SHMEM_RD(bp,
 +                              dev_info.port_hw_config[port].
 +                               fcoe_wwn_node_name_lower);
 +      } else if (!IS_MF_SD(bp)) {
 +              u32 cfg = MF_CFG_RD(bp, func_ext_config[func].func_cfg);
 +
 +              /*
 +               * Read the WWN info only if the FCoE feature is enabled for
 +               * this function.
 +               */
 +              if (cfg & MACP_FUNC_CFG_FLAGS_FCOE_OFFLOAD) {
 +                      /* Port info */
 +                      bp->cnic_eth_dev.fcoe_wwn_port_name_hi =
 +                              MF_CFG_RD(bp, func_ext_config[func].
 +                                              fcoe_wwn_port_name_upper);
 +                      bp->cnic_eth_dev.fcoe_wwn_port_name_lo =
 +                              MF_CFG_RD(bp, func_ext_config[func].
 +                                              fcoe_wwn_port_name_lower);
 +
 +                      /* Node info */
 +                      bp->cnic_eth_dev.fcoe_wwn_node_name_hi =
 +                              MF_CFG_RD(bp, func_ext_config[func].
 +                                              fcoe_wwn_node_name_upper);
 +                      bp->cnic_eth_dev.fcoe_wwn_node_name_lo =
 +                              MF_CFG_RD(bp, func_ext_config[func].
 +                                              fcoe_wwn_node_name_lower);
 +              }
 +      }
 +
 +      BNX2X_DEV_INFO("max_iscsi_conn 0x%x max_fcoe_conn 0x%x\n",
 +                     bp->cnic_eth_dev.max_iscsi_conn,
 +                     bp->cnic_eth_dev.max_fcoe_conn);
 +
 +      /*
 +       * If maximum allowed number of connections is zero -
 +       * disable the feature.
 +       */
 +      if (!bp->cnic_eth_dev.max_iscsi_conn)
 +              bp->flags |= NO_ISCSI_OOO_FLAG | NO_ISCSI_FLAG;
 +
 +      if (!bp->cnic_eth_dev.max_fcoe_conn)
 +              bp->flags |= NO_FCOE_FLAG;
 +}
 +#endif
 +
 +static void __devinit bnx2x_get_mac_hwinfo(struct bnx2x *bp)
 +{
 +      u32 val, val2;
 +      int func = BP_ABS_FUNC(bp);
 +      int port = BP_PORT(bp);
 +#ifdef BCM_CNIC
 +      u8 *iscsi_mac = bp->cnic_eth_dev.iscsi_mac;
 +      u8 *fip_mac = bp->fip_mac;
 +#endif
 +
 +      /* Zero primary MAC configuration */
 +      memset(bp->dev->dev_addr, 0, ETH_ALEN);
 +
 +      if (BP_NOMCP(bp)) {
 +              BNX2X_ERROR("warning: random MAC workaround active\n");
 +              random_ether_addr(bp->dev->dev_addr);
 +      } else if (IS_MF(bp)) {
 +              val2 = MF_CFG_RD(bp, func_mf_config[func].mac_upper);
 +              val = MF_CFG_RD(bp, func_mf_config[func].mac_lower);
 +              if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
 +                  (val != FUNC_MF_CFG_LOWERMAC_DEFAULT))
 +                      bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
 +
 +#ifdef BCM_CNIC
 +              /* iSCSI and FCoE NPAR MACs: if there is no either iSCSI or
 +               * FCoE MAC then the appropriate feature should be disabled.
 +               */
 +              if (IS_MF_SI(bp)) {
 +                      u32 cfg = MF_CFG_RD(bp, func_ext_config[func].func_cfg);
 +                      if (cfg & MACP_FUNC_CFG_FLAGS_ISCSI_OFFLOAD) {
 +                              val2 = MF_CFG_RD(bp, func_ext_config[func].
 +                                                   iscsi_mac_addr_upper);
 +                              val = MF_CFG_RD(bp, func_ext_config[func].
 +                                                  iscsi_mac_addr_lower);
 +                              bnx2x_set_mac_buf(iscsi_mac, val, val2);
 +                              BNX2X_DEV_INFO("Read iSCSI MAC: %pM\n",
 +                                             iscsi_mac);
 +                      } else
 +                              bp->flags |= NO_ISCSI_OOO_FLAG | NO_ISCSI_FLAG;
 +
 +                      if (cfg & MACP_FUNC_CFG_FLAGS_FCOE_OFFLOAD) {
 +                              val2 = MF_CFG_RD(bp, func_ext_config[func].
 +                                                   fcoe_mac_addr_upper);
 +                              val = MF_CFG_RD(bp, func_ext_config[func].
 +                                                  fcoe_mac_addr_lower);
 +                              bnx2x_set_mac_buf(fip_mac, val, val2);
 +                              BNX2X_DEV_INFO("Read FCoE L2 MAC to %pM\n",
 +                                             fip_mac);
 +
 +                      } else
 +                              bp->flags |= NO_FCOE_FLAG;
 +              }
 +#endif
 +      } else {
 +              /* in SF read MACs from port configuration */
 +              val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
 +              val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
 +              bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
 +
 +#ifdef BCM_CNIC
 +              val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].
 +                                  iscsi_mac_upper);
 +              val = SHMEM_RD(bp, dev_info.port_hw_config[port].
 +                                 iscsi_mac_lower);
 +              bnx2x_set_mac_buf(iscsi_mac, val, val2);
 +
 +              val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].
 +                                  fcoe_fip_mac_upper);
 +              val = SHMEM_RD(bp, dev_info.port_hw_config[port].
 +                                 fcoe_fip_mac_lower);
 +              bnx2x_set_mac_buf(fip_mac, val, val2);
 +#endif
 +      }
 +
 +      memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
 +      memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
 +
 +#ifdef BCM_CNIC
 +      /* Set the FCoE MAC in MF_SD mode */
 +      if (!CHIP_IS_E1x(bp) && IS_MF_SD(bp))
 +              memcpy(fip_mac, bp->dev->dev_addr, ETH_ALEN);
 +
 +      /* Disable iSCSI if MAC configuration is
 +       * invalid.
 +       */
 +      if (!is_valid_ether_addr(iscsi_mac)) {
 +              bp->flags |= NO_ISCSI_FLAG;
 +              memset(iscsi_mac, 0, ETH_ALEN);
 +      }
 +
 +      /* Disable FCoE if MAC configuration is
 +       * invalid.
 +       */
 +      if (!is_valid_ether_addr(fip_mac)) {
 +              bp->flags |= NO_FCOE_FLAG;
 +              memset(bp->fip_mac, 0, ETH_ALEN);
 +      }
 +#endif
 +
 +      if (!is_valid_ether_addr(bp->dev->dev_addr))
 +              dev_err(&bp->pdev->dev,
 +                      "bad Ethernet MAC address configuration: "
 +                      "%pM, change it manually before bringing up "
 +                      "the appropriate network interface\n",
 +                      bp->dev->dev_addr);
 +}
 +
 +static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
 +{
 +      int /*abs*/func = BP_ABS_FUNC(bp);
 +      int vn;
 +      u32 val = 0;
 +      int rc = 0;
 +
 +      bnx2x_get_common_hwinfo(bp);
 +
 +      /*
 +       * initialize IGU parameters
 +       */
 +      if (CHIP_IS_E1x(bp)) {
 +              bp->common.int_block = INT_BLOCK_HC;
 +
 +              bp->igu_dsb_id = DEF_SB_IGU_ID;
 +              bp->igu_base_sb = 0;
 +      } else {
 +              bp->common.int_block = INT_BLOCK_IGU;
++
++              /* do not allow device reset during IGU info preocessing */
++              bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RESET);
++
 +              val = REG_RD(bp, IGU_REG_BLOCK_CONFIGURATION);
 +
 +              if (val & IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN) {
 +                      int tout = 5000;
 +
 +                      BNX2X_DEV_INFO("FORCING Normal Mode\n");
 +
 +                      val &= ~(IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN);
 +                      REG_WR(bp, IGU_REG_BLOCK_CONFIGURATION, val);
 +                      REG_WR(bp, IGU_REG_RESET_MEMORIES, 0x7f);
 +
 +                      while (tout && REG_RD(bp, IGU_REG_RESET_MEMORIES)) {
 +                              tout--;
 +                              usleep_range(1000, 1000);
 +                      }
 +
 +                      if (REG_RD(bp, IGU_REG_RESET_MEMORIES)) {
 +                              dev_err(&bp->pdev->dev,
 +                                      "FORCING Normal Mode failed!!!\n");
 +                              return -EPERM;
 +                      }
 +              }
 +
 +              if (val & IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN) {
 +                      BNX2X_DEV_INFO("IGU Backward Compatible Mode\n");
 +                      bp->common.int_block |= INT_BLOCK_MODE_BW_COMP;
 +              } else
 +                      BNX2X_DEV_INFO("IGU Normal Mode\n");
 +
 +              bnx2x_get_igu_cam_info(bp);
 +
++              bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESET);
 +      }
 +
 +      /*
 +       * set base FW non-default (fast path) status block id, this value is
 +       * used to initialize the fw_sb_id saved on the fp/queue structure to
 +       * determine the id used by the FW.
 +       */
 +      if (CHIP_IS_E1x(bp))
 +              bp->base_fw_ndsb = BP_PORT(bp) * FP_SB_MAX_E1x + BP_L_ID(bp);
 +      else /*
 +            * 57712 - we currently use one FW SB per IGU SB (Rx and Tx of
 +            * the same queue are indicated on the same IGU SB). So we prefer
 +            * FW and IGU SBs to be the same value.
 +            */
 +              bp->base_fw_ndsb = bp->igu_base_sb;
 +
 +      BNX2X_DEV_INFO("igu_dsb_id %d  igu_base_sb %d  igu_sb_cnt %d\n"
 +                     "base_fw_ndsb %d\n", bp->igu_dsb_id, bp->igu_base_sb,
 +                     bp->igu_sb_cnt, bp->base_fw_ndsb);
 +
 +      /*
 +       * Initialize MF configuration
 +       */
 +
 +      bp->mf_ov = 0;
 +      bp->mf_mode = 0;
-       if (!BP_NOMCP(bp)) {
-               bp->fw_seq =
-                       (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
-                        DRV_MSG_SEQ_NUMBER_MASK);
-               BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
-       }
++      vn = BP_VN(bp);
 +
 +      if (!CHIP_IS_E1(bp) && !BP_NOMCP(bp)) {
 +              BNX2X_DEV_INFO("shmem2base 0x%x, size %d, mfcfg offset %d\n",
 +                             bp->common.shmem2_base, SHMEM2_RD(bp, size),
 +                            (u32)offsetof(struct shmem2_region, mf_cfg_addr));
 +
 +              if (SHMEM2_HAS(bp, mf_cfg_addr))
 +                      bp->common.mf_cfg_base = SHMEM2_RD(bp, mf_cfg_addr);
 +              else
 +                      bp->common.mf_cfg_base = bp->common.shmem_base +
 +                              offsetof(struct shmem_region, func_mb) +
 +                              E1H_FUNC_MAX * sizeof(struct drv_func_mb);
 +              /*
 +               * get mf configuration:
 +               * 1. existence of MF configuration
 +               * 2. MAC address must be legal (check only upper bytes)
 +               *    for  Switch-Independent mode;
 +               *    OVLAN must be legal for Switch-Dependent mode
 +               * 3. SF_MODE configures specific MF mode
 +               */
 +              if (bp->common.mf_cfg_base != SHMEM_MF_CFG_ADDR_NONE) {
 +                      /* get mf configuration */
 +                      val = SHMEM_RD(bp,
 +                                     dev_info.shared_feature_config.config);
 +                      val &= SHARED_FEAT_CFG_FORCE_SF_MODE_MASK;
 +
 +                      switch (val) {
 +                      case SHARED_FEAT_CFG_FORCE_SF_MODE_SWITCH_INDEPT:
 +                              val = MF_CFG_RD(bp, func_mf_config[func].
 +                                              mac_upper);
 +                              /* check for legal mac (upper bytes)*/
 +                              if (val != 0xffff) {
 +                                      bp->mf_mode = MULTI_FUNCTION_SI;
 +                                      bp->mf_config[vn] = MF_CFG_RD(bp,
 +                                                 func_mf_config[func].config);
 +                              } else
 +                                      BNX2X_DEV_INFO("illegal MAC address "
 +                                                     "for SI\n");
 +                              break;
 +                      case SHARED_FEAT_CFG_FORCE_SF_MODE_MF_ALLOWED:
 +                              /* get OV configuration */
 +                              val = MF_CFG_RD(bp,
 +                                      func_mf_config[FUNC_0].e1hov_tag);
 +                              val &= FUNC_MF_CFG_E1HOV_TAG_MASK;
 +
 +                              if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
 +                                      bp->mf_mode = MULTI_FUNCTION_SD;
 +                                      bp->mf_config[vn] = MF_CFG_RD(bp,
 +                                              func_mf_config[func].config);
 +                              } else
 +                                      BNX2X_DEV_INFO("illegal OV for SD\n");
 +                              break;
 +                      default:
 +                              /* Unknown configuration: reset mf_config */
 +                              bp->mf_config[vn] = 0;
 +                              BNX2X_DEV_INFO("unkown MF mode 0x%x\n", val);
 +                      }
 +              }
 +
 +              BNX2X_DEV_INFO("%s function mode\n",
 +                             IS_MF(bp) ? "multi" : "single");
 +
 +              switch (bp->mf_mode) {
 +              case MULTI_FUNCTION_SD:
 +                      val = MF_CFG_RD(bp, func_mf_config[func].e1hov_tag) &
 +                            FUNC_MF_CFG_E1HOV_TAG_MASK;
 +                      if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
 +                              bp->mf_ov = val;
 +                              bp->path_has_ovlan = true;
 +
 +                              BNX2X_DEV_INFO("MF OV for func %d is %d "
 +                                             "(0x%04x)\n", func, bp->mf_ov,
 +                                             bp->mf_ov);
 +                      } else {
 +                              dev_err(&bp->pdev->dev,
 +                                      "No valid MF OV for func %d, "
 +                                      "aborting\n", func);
 +                              return -EPERM;
 +                      }
 +                      break;
 +              case MULTI_FUNCTION_SI:
 +                      BNX2X_DEV_INFO("func %d is in MF "
 +                                     "switch-independent mode\n", func);
 +                      break;
 +              default:
 +                      if (vn) {
 +                              dev_err(&bp->pdev->dev,
 +                                      "VN %d is in a single function mode, "
 +                                      "aborting\n", vn);
 +                              return -EPERM;
 +                      }
 +                      break;
 +              }
 +
 +              /* check if other port on the path needs ovlan:
 +               * Since MF configuration is shared between ports
 +               * Possible mixed modes are only
 +               * {SF, SI} {SF, SD} {SD, SF} {SI, SF}
 +               */
 +              if (CHIP_MODE_IS_4_PORT(bp) &&
 +                  !bp->path_has_ovlan &&
 +                  !IS_MF(bp) &&
 +                  bp->common.mf_cfg_base != SHMEM_MF_CFG_ADDR_NONE) {
 +                      u8 other_port = !BP_PORT(bp);
 +                      u8 other_func = BP_PATH(bp) + 2*other_port;
 +                      val = MF_CFG_RD(bp,
 +                                      func_mf_config[other_func].e1hov_tag);
 +                      if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT)
 +                              bp->path_has_ovlan = true;
 +              }
 +      }
 +
 +      /* adjust igu_sb_cnt to MF for E1x */
 +      if (CHIP_IS_E1x(bp) && IS_MF(bp))
 +              bp->igu_sb_cnt /= E1HVN_MAX;
 +
 +      /* port info */
 +      bnx2x_get_port_hwinfo(bp);
 +
-       /* Clean the following indirect addresses for all functions since it
 +      /* Get MAC addresses */
 +      bnx2x_get_mac_hwinfo(bp);
 +
 +#ifdef BCM_CNIC
 +      bnx2x_get_cnic_info(bp);
 +#endif
 +
 +      /* Get current FW pulse sequence */
 +      if (!BP_NOMCP(bp)) {
 +              int mb_idx = BP_FW_MB_IDX(bp);
 +
 +              bp->fw_drv_pulse_wr_seq =
 +                              (SHMEM_RD(bp, func_mb[mb_idx].drv_pulse_mb) &
 +                               DRV_PULSE_SEQ_MASK);
 +              BNX2X_DEV_INFO("drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
 +      }
 +
 +      return rc;
 +}
 +
 +static void __devinit bnx2x_read_fwinfo(struct bnx2x *bp)
 +{
 +      int cnt, i, block_end, rodi;
 +      char vpd_data[BNX2X_VPD_LEN+1];
 +      char str_id_reg[VENDOR_ID_LEN+1];
 +      char str_id_cap[VENDOR_ID_LEN+1];
 +      u8 len;
 +
 +      cnt = pci_read_vpd(bp->pdev, 0, BNX2X_VPD_LEN, vpd_data);
 +      memset(bp->fw_ver, 0, sizeof(bp->fw_ver));
 +
 +      if (cnt < BNX2X_VPD_LEN)
 +              goto out_not_found;
 +
 +      i = pci_vpd_find_tag(vpd_data, 0, BNX2X_VPD_LEN,
 +                           PCI_VPD_LRDT_RO_DATA);
 +      if (i < 0)
 +              goto out_not_found;
 +
 +
 +      block_end = i + PCI_VPD_LRDT_TAG_SIZE +
 +                  pci_vpd_lrdt_size(&vpd_data[i]);
 +
 +      i += PCI_VPD_LRDT_TAG_SIZE;
 +
 +      if (block_end > BNX2X_VPD_LEN)
 +              goto out_not_found;
 +
 +      rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end,
 +                                 PCI_VPD_RO_KEYWORD_MFR_ID);
 +      if (rodi < 0)
 +              goto out_not_found;
 +
 +      len = pci_vpd_info_field_size(&vpd_data[rodi]);
 +
 +      if (len != VENDOR_ID_LEN)
 +              goto out_not_found;
 +
 +      rodi += PCI_VPD_INFO_FLD_HDR_SIZE;
 +
 +      /* vendor specific info */
 +      snprintf(str_id_reg, VENDOR_ID_LEN + 1, "%04x", PCI_VENDOR_ID_DELL);
 +      snprintf(str_id_cap, VENDOR_ID_LEN + 1, "%04X", PCI_VENDOR_ID_DELL);
 +      if (!strncmp(str_id_reg, &vpd_data[rodi], VENDOR_ID_LEN) ||
 +          !strncmp(str_id_cap, &vpd_data[rodi], VENDOR_ID_LEN)) {
 +
 +              rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end,
 +                                              PCI_VPD_RO_KEYWORD_VENDOR0);
 +              if (rodi >= 0) {
 +                      len = pci_vpd_info_field_size(&vpd_data[rodi]);
 +
 +                      rodi += PCI_VPD_INFO_FLD_HDR_SIZE;
 +
 +                      if (len < 32 && (len + rodi) <= BNX2X_VPD_LEN) {
 +                              memcpy(bp->fw_ver, &vpd_data[rodi], len);
 +                              bp->fw_ver[len] = ' ';
 +                      }
 +              }
 +              return;
 +      }
 +out_not_found:
 +      return;
 +}
 +
 +static void __devinit bnx2x_set_modes_bitmap(struct bnx2x *bp)
 +{
 +      u32 flags = 0;
 +
 +      if (CHIP_REV_IS_FPGA(bp))
 +              SET_FLAGS(flags, MODE_FPGA);
 +      else if (CHIP_REV_IS_EMUL(bp))
 +              SET_FLAGS(flags, MODE_EMUL);
 +      else
 +              SET_FLAGS(flags, MODE_ASIC);
 +
 +      if (CHIP_MODE_IS_4_PORT(bp))
 +              SET_FLAGS(flags, MODE_PORT4);
 +      else
 +              SET_FLAGS(flags, MODE_PORT2);
 +
 +      if (CHIP_IS_E2(bp))
 +              SET_FLAGS(flags, MODE_E2);
 +      else if (CHIP_IS_E3(bp)) {
 +              SET_FLAGS(flags, MODE_E3);
 +              if (CHIP_REV(bp) == CHIP_REV_Ax)
 +                      SET_FLAGS(flags, MODE_E3_A0);
 +              else /*if (CHIP_REV(bp) == CHIP_REV_Bx)*/
 +                      SET_FLAGS(flags, MODE_E3_B0 | MODE_COS3);
 +      }
 +
 +      if (IS_MF(bp)) {
 +              SET_FLAGS(flags, MODE_MF);
 +              switch (bp->mf_mode) {
 +              case MULTI_FUNCTION_SD:
 +                      SET_FLAGS(flags, MODE_MF_SD);
 +                      break;
 +              case MULTI_FUNCTION_SI:
 +                      SET_FLAGS(flags, MODE_MF_SI);
 +                      break;
 +              }
 +      } else
 +              SET_FLAGS(flags, MODE_SF);
 +
 +#if defined(__LITTLE_ENDIAN)
 +      SET_FLAGS(flags, MODE_LITTLE_ENDIAN);
 +#else /*(__BIG_ENDIAN)*/
 +      SET_FLAGS(flags, MODE_BIG_ENDIAN);
 +#endif
 +      INIT_MODE_FLAGS(bp) = flags;
 +}
 +
 +static int __devinit bnx2x_init_bp(struct bnx2x *bp)
 +{
 +      int func;
 +      int timer_interval;
 +      int rc;
 +
 +      mutex_init(&bp->port.phy_mutex);
 +      mutex_init(&bp->fw_mb_mutex);
 +      spin_lock_init(&bp->stats_lock);
 +#ifdef BCM_CNIC
 +      mutex_init(&bp->cnic_mutex);
 +#endif
 +
 +      INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
 +      INIT_DELAYED_WORK(&bp->sp_rtnl_task, bnx2x_sp_rtnl_task);
 +      INIT_DELAYED_WORK(&bp->period_task, bnx2x_period_task);
 +      rc = bnx2x_get_hwinfo(bp);
 +      if (rc)
 +              return rc;
 +
 +      bnx2x_set_modes_bitmap(bp);
 +
 +      rc = bnx2x_alloc_mem_bp(bp);
 +      if (rc)
 +              return rc;
 +
 +      bnx2x_read_fwinfo(bp);
 +
 +      func = BP_FUNC(bp);
 +
 +      /* need to reset chip if undi was active */
 +      if (!BP_NOMCP(bp))
 +              bnx2x_undi_unload(bp);
 +
++      /* init fw_seq after undi_unload! */
++      if (!BP_NOMCP(bp)) {
++              bp->fw_seq =
++                      (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
++                       DRV_MSG_SEQ_NUMBER_MASK);
++              BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
++      }
++
 +      if (CHIP_REV_IS_FPGA(bp))
 +              dev_err(&bp->pdev->dev, "FPGA detected\n");
 +
 +      if (BP_NOMCP(bp) && (func == 0))
 +              dev_err(&bp->pdev->dev, "MCP disabled, "
 +                                      "must load devices in order!\n");
 +
 +      bp->multi_mode = multi_mode;
 +
 +      /* Set TPA flags */
 +      if (disable_tpa) {
 +              bp->flags &= ~TPA_ENABLE_FLAG;
 +              bp->dev->features &= ~NETIF_F_LRO;
 +      } else {
 +              bp->flags |= TPA_ENABLE_FLAG;
 +              bp->dev->features |= NETIF_F_LRO;
 +      }
 +      bp->disable_tpa = disable_tpa;
 +
 +      if (CHIP_IS_E1(bp))
 +              bp->dropless_fc = 0;
 +      else
 +              bp->dropless_fc = dropless_fc;
 +
 +      bp->mrrs = mrrs;
 +
 +      bp->tx_ring_size = MAX_TX_AVAIL;
 +
 +      /* make sure that the numbers are in the right granularity */
 +      bp->tx_ticks = (50 / BNX2X_BTR) * BNX2X_BTR;
 +      bp->rx_ticks = (25 / BNX2X_BTR) * BNX2X_BTR;
 +
 +      timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
 +      bp->current_interval = (poll ? poll : timer_interval);
 +
 +      init_timer(&bp->timer);
 +      bp->timer.expires = jiffies + bp->current_interval;
 +      bp->timer.data = (unsigned long) bp;
 +      bp->timer.function = bnx2x_timer;
 +
 +      bnx2x_dcbx_set_state(bp, true, BNX2X_DCBX_ENABLED_ON_NEG_ON);
 +      bnx2x_dcbx_init_params(bp);
 +
 +#ifdef BCM_CNIC
 +      if (CHIP_IS_E1x(bp))
 +              bp->cnic_base_cl_id = FP_SB_MAX_E1x;
 +      else
 +              bp->cnic_base_cl_id = FP_SB_MAX_E2;
 +#endif
 +
 +      /* multiple tx priority */
 +      if (CHIP_IS_E1x(bp))
 +              bp->max_cos = BNX2X_MULTI_TX_COS_E1X;
 +      if (CHIP_IS_E2(bp) || CHIP_IS_E3A0(bp))
 +              bp->max_cos = BNX2X_MULTI_TX_COS_E2_E3A0;
 +      if (CHIP_IS_E3B0(bp))
 +              bp->max_cos = BNX2X_MULTI_TX_COS_E3B0;
 +
 +      return rc;
 +}
 +
 +
 +/****************************************************************************
 +* General service functions
 +****************************************************************************/
 +
 +/*
 + * net_device service functions
 + */
 +
 +/* called with rtnl_lock */
 +static int bnx2x_open(struct net_device *dev)
 +{
 +      struct bnx2x *bp = netdev_priv(dev);
 +      bool global = false;
 +      int other_engine = BP_PATH(bp) ? 0 : 1;
 +      u32 other_load_counter, load_counter;
 +
 +      netif_carrier_off(dev);
 +
 +      bnx2x_set_power_state(bp, PCI_D0);
 +
 +      other_load_counter = bnx2x_get_load_cnt(bp, other_engine);
 +      load_counter = bnx2x_get_load_cnt(bp, BP_PATH(bp));
 +
 +      /*
 +       * If parity had happen during the unload, then attentions
 +       * and/or RECOVERY_IN_PROGRES may still be set. In this case we
 +       * want the first function loaded on the current engine to
 +       * complete the recovery.
 +       */
 +      if (!bnx2x_reset_is_done(bp, BP_PATH(bp)) ||
 +          bnx2x_chk_parity_attn(bp, &global, true))
 +              do {
 +                      /*
 +                       * If there are attentions and they are in a global
 +                       * blocks, set the GLOBAL_RESET bit regardless whether
 +                       * it will be this function that will complete the
 +                       * recovery or not.
 +                       */
 +                      if (global)
 +                              bnx2x_set_reset_global(bp);
 +
 +                      /*
 +                       * Only the first function on the current engine should
 +                       * try to recover in open. In case of attentions in
 +                       * global blocks only the first in the chip should try
 +                       * to recover.
 +                       */
 +                      if ((!load_counter &&
 +                           (!global || !other_load_counter)) &&
 +                          bnx2x_trylock_leader_lock(bp) &&
 +                          !bnx2x_leader_reset(bp)) {
 +                              netdev_info(bp->dev, "Recovered in open\n");
 +                              break;
 +                      }
 +
 +                      /* recovery has failed... */
 +                      bnx2x_set_power_state(bp, PCI_D3hot);
 +                      bp->recovery_state = BNX2X_RECOVERY_FAILED;
 +
 +                      netdev_err(bp->dev, "Recovery flow hasn't been properly"
 +                      " completed yet. Try again later. If u still see this"
 +                      " message after a few retries then power cycle is"
 +                      " required.\n");
 +
 +                      return -EAGAIN;
 +              } while (0);
 +
 +      bp->recovery_state = BNX2X_RECOVERY_DONE;
 +      return bnx2x_nic_load(bp, LOAD_OPEN);
 +}
 +
 +/* called with rtnl_lock */
 +static int bnx2x_close(struct net_device *dev)
 +{
 +      struct bnx2x *bp = netdev_priv(dev);
 +
 +      /* Unload the driver, release IRQs */
 +      bnx2x_nic_unload(bp, UNLOAD_CLOSE);
 +
 +      /* Power off */
 +      bnx2x_set_power_state(bp, PCI_D3hot);
 +
 +      return 0;
 +}
 +
 +static inline int bnx2x_init_mcast_macs_list(struct bnx2x *bp,
 +                                       struct bnx2x_mcast_ramrod_params *p)
 +{
 +      int mc_count = netdev_mc_count(bp->dev);
 +      struct bnx2x_mcast_list_elem *mc_mac =
 +              kzalloc(sizeof(*mc_mac) * mc_count, GFP_ATOMIC);
 +      struct netdev_hw_addr *ha;
 +
 +      if (!mc_mac)
 +              return -ENOMEM;
 +
 +      INIT_LIST_HEAD(&p->mcast_list);
 +
 +      netdev_for_each_mc_addr(ha, bp->dev) {
 +              mc_mac->mac = bnx2x_mc_addr(ha);
 +              list_add_tail(&mc_mac->link, &p->mcast_list);
 +              mc_mac++;
 +      }
 +
 +      p->mcast_list_len = mc_count;
 +
 +      return 0;
 +}
 +
 +static inline void bnx2x_free_mcast_macs_list(
 +      struct bnx2x_mcast_ramrod_params *p)
 +{
 +      struct bnx2x_mcast_list_elem *mc_mac =
 +              list_first_entry(&p->mcast_list, struct bnx2x_mcast_list_elem,
 +                               link);
 +
 +      WARN_ON(!mc_mac);
 +      kfree(mc_mac);
 +}
 +
 +/**
 + * bnx2x_set_uc_list - configure a new unicast MACs list.
 + *
 + * @bp: driver handle
 + *
 + * We will use zero (0) as a MAC type for these MACs.
 + */
 +static inline int bnx2x_set_uc_list(struct bnx2x *bp)
 +{
 +      int rc;
 +      struct net_device *dev = bp->dev;
 +      struct netdev_hw_addr *ha;
 +      struct bnx2x_vlan_mac_obj *mac_obj = &bp->fp->mac_obj;
 +      unsigned long ramrod_flags = 0;
 +
 +      /* First schedule a cleanup up of old configuration */
 +      rc = bnx2x_del_all_macs(bp, mac_obj, BNX2X_UC_LIST_MAC, false);
 +      if (rc < 0) {
 +              BNX2X_ERR("Failed to schedule DELETE operations: %d\n", rc);
 +              return rc;
 +      }
 +
 +      netdev_for_each_uc_addr(ha, dev) {
 +              rc = bnx2x_set_mac_one(bp, bnx2x_uc_addr(ha), mac_obj, true,
 +                                     BNX2X_UC_LIST_MAC, &ramrod_flags);
 +              if (rc < 0) {
 +                      BNX2X_ERR("Failed to schedule ADD operations: %d\n",
 +                                rc);
 +                      return rc;
 +              }
 +      }
 +
 +      /* Execute the pending commands */
 +      __set_bit(RAMROD_CONT, &ramrod_flags);
 +      return bnx2x_set_mac_one(bp, NULL, mac_obj, false /* don't care */,
 +                               BNX2X_UC_LIST_MAC, &ramrod_flags);
 +}
 +
 +static inline int bnx2x_set_mc_list(struct bnx2x *bp)
 +{
 +      struct net_device *dev = bp->dev;
 +      struct bnx2x_mcast_ramrod_params rparam = {0};
 +      int rc = 0;
 +
 +      rparam.mcast_obj = &bp->mcast_obj;
 +
 +      /* first, clear all configured multicast MACs */
 +      rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL);
 +      if (rc < 0) {
 +              BNX2X_ERR("Failed to clear multicast "
 +                        "configuration: %d\n", rc);
 +              return rc;
 +      }
 +
 +      /* then, configure a new MACs list */
 +      if (netdev_mc_count(dev)) {
 +              rc = bnx2x_init_mcast_macs_list(bp, &rparam);
 +              if (rc) {
 +                      BNX2X_ERR("Failed to create multicast MACs "
 +                                "list: %d\n", rc);
 +                      return rc;
 +              }
 +
 +              /* Now add the new MACs */
 +              rc = bnx2x_config_mcast(bp, &rparam,
 +                                      BNX2X_MCAST_CMD_ADD);
 +              if (rc < 0)
 +                      BNX2X_ERR("Failed to set a new multicast "
 +                                "configuration: %d\n", rc);
 +
 +              bnx2x_free_mcast_macs_list(&rparam);
 +      }
 +
 +      return rc;
 +}
 +
 +
 +/* If bp->state is OPEN, should be called with netif_addr_lock_bh() */
 +void bnx2x_set_rx_mode(struct net_device *dev)
 +{
 +      struct bnx2x *bp = netdev_priv(dev);
 +      u32 rx_mode = BNX2X_RX_MODE_NORMAL;
 +
 +      if (bp->state != BNX2X_STATE_OPEN) {
 +              DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
 +              return;
 +      }
 +
 +      DP(NETIF_MSG_IFUP, "dev->flags = %x\n", bp->dev->flags);
 +
 +      if (dev->flags & IFF_PROMISC)
 +              rx_mode = BNX2X_RX_MODE_PROMISC;
 +      else if ((dev->flags & IFF_ALLMULTI) ||
 +               ((netdev_mc_count(dev) > BNX2X_MAX_MULTICAST) &&
 +                CHIP_IS_E1(bp)))
 +              rx_mode = BNX2X_RX_MODE_ALLMULTI;
 +      else {
 +              /* some multicasts */
 +              if (bnx2x_set_mc_list(bp) < 0)
 +                      rx_mode = BNX2X_RX_MODE_ALLMULTI;
 +
 +              if (bnx2x_set_uc_list(bp) < 0)
 +                      rx_mode = BNX2X_RX_MODE_PROMISC;
 +      }
 +
 +      bp->rx_mode = rx_mode;
 +
 +      /* Schedule the rx_mode command */
 +      if (test_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state)) {
 +              set_bit(BNX2X_FILTER_RX_MODE_SCHED, &bp->sp_state);
 +              return;
 +      }
 +
 +      bnx2x_set_storm_rx_mode(bp);
 +}
 +
 +/* called with rtnl_lock */
 +static int bnx2x_mdio_read(struct net_device *netdev, int prtad,
 +                         int devad, u16 addr)
 +{
 +      struct bnx2x *bp = netdev_priv(netdev);
 +      u16 value;
 +      int rc;
 +
 +      DP(NETIF_MSG_LINK, "mdio_read: prtad 0x%x, devad 0x%x, addr 0x%x\n",
 +         prtad, devad, addr);
 +
 +      /* The HW expects different devad if CL22 is used */
 +      devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
 +
 +      bnx2x_acquire_phy_lock(bp);
 +      rc = bnx2x_phy_read(&bp->link_params, prtad, devad, addr, &value);
 +      bnx2x_release_phy_lock(bp);
 +      DP(NETIF_MSG_LINK, "mdio_read_val 0x%x rc = 0x%x\n", value, rc);
 +
 +      if (!rc)
 +              rc = value;
 +      return rc;
 +}
 +
 +/* called with rtnl_lock */
 +static int bnx2x_mdio_write(struct net_device *netdev, int prtad, int devad,
 +                          u16 addr, u16 value)
 +{
 +      struct bnx2x *bp = netdev_priv(netdev);
 +      int rc;
 +
 +      DP(NETIF_MSG_LINK, "mdio_write: prtad 0x%x, devad 0x%x, addr 0x%x,"
 +                         " value 0x%x\n", prtad, devad, addr, value);
 +
 +      /* The HW expects different devad if CL22 is used */
 +      devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
 +
 +      bnx2x_acquire_phy_lock(bp);
 +      rc = bnx2x_phy_write(&bp->link_params, prtad, devad, addr, value);
 +      bnx2x_release_phy_lock(bp);
 +      return rc;
 +}
 +
 +/* called with rtnl_lock */
 +static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
 +{
 +      struct bnx2x *bp = netdev_priv(dev);
 +      struct mii_ioctl_data *mdio = if_mii(ifr);
 +
 +      DP(NETIF_MSG_LINK, "ioctl: phy id 0x%x, reg 0x%x, val_in 0x%x\n",
 +         mdio->phy_id, mdio->reg_num, mdio->val_in);
 +
 +      if (!netif_running(dev))
 +              return -EAGAIN;
 +
 +      return mdio_mii_ioctl(&bp->mdio, mdio, cmd);
 +}
 +
 +#ifdef CONFIG_NET_POLL_CONTROLLER
 +static void poll_bnx2x(struct net_device *dev)
 +{
 +      struct bnx2x *bp = netdev_priv(dev);
 +
 +      disable_irq(bp->pdev->irq);
 +      bnx2x_interrupt(bp->pdev->irq, dev);
 +      enable_irq(bp->pdev->irq);
 +}
 +#endif
 +
 +static const struct net_device_ops bnx2x_netdev_ops = {
 +      .ndo_open               = bnx2x_open,
 +      .ndo_stop               = bnx2x_close,
 +      .ndo_start_xmit         = bnx2x_start_xmit,
 +      .ndo_select_queue       = bnx2x_select_queue,
 +      .ndo_set_rx_mode        = bnx2x_set_rx_mode,
 +      .ndo_set_mac_address    = bnx2x_change_mac_addr,
 +      .ndo_validate_addr      = eth_validate_addr,
 +      .ndo_do_ioctl           = bnx2x_ioctl,
 +      .ndo_change_mtu         = bnx2x_change_mtu,
 +      .ndo_fix_features       = bnx2x_fix_features,
 +      .ndo_set_features       = bnx2x_set_features,
 +      .ndo_tx_timeout         = bnx2x_tx_timeout,
 +#ifdef CONFIG_NET_POLL_CONTROLLER
 +      .ndo_poll_controller    = poll_bnx2x,
 +#endif
 +      .ndo_setup_tc           = bnx2x_setup_tc,
 +
 +#if defined(NETDEV_FCOE_WWNN) && defined(BCM_CNIC)
 +      .ndo_fcoe_get_wwn       = bnx2x_fcoe_get_wwn,
 +#endif
 +};
 +
 +static inline int bnx2x_set_coherency_mask(struct bnx2x *bp)
 +{
 +      struct device *dev = &bp->pdev->dev;
 +
 +      if (dma_set_mask(dev, DMA_BIT_MASK(64)) == 0) {
 +              bp->flags |= USING_DAC_FLAG;
 +              if (dma_set_coherent_mask(dev, DMA_BIT_MASK(64)) != 0) {
 +                      dev_err(dev, "dma_set_coherent_mask failed, "
 +                                   "aborting\n");
 +                      return -EIO;
 +              }
 +      } else if (dma_set_mask(dev, DMA_BIT_MASK(32)) != 0) {
 +              dev_err(dev, "System does not support DMA, aborting\n");
 +              return -EIO;
 +      }
 +
 +      return 0;
 +}
 +
 +static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
 +                                  struct net_device *dev,
 +                                  unsigned long board_type)
 +{
 +      struct bnx2x *bp;
 +      int rc;
 +
 +      SET_NETDEV_DEV(dev, &pdev->dev);
 +      bp = netdev_priv(dev);
 +
 +      bp->dev = dev;
 +      bp->pdev = pdev;
 +      bp->flags = 0;
 +      bp->pf_num = PCI_FUNC(pdev->devfn);
 +
 +      rc = pci_enable_device(pdev);
 +      if (rc) {
 +              dev_err(&bp->pdev->dev,
 +                      "Cannot enable PCI device, aborting\n");
 +              goto err_out;
 +      }
 +
 +      if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
 +              dev_err(&bp->pdev->dev,
 +                      "Cannot find PCI device base address, aborting\n");
 +              rc = -ENODEV;
 +              goto err_out_disable;
 +      }
 +
 +      if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
 +              dev_err(&bp->pdev->dev, "Cannot find second PCI device"
 +                     " base address, aborting\n");
 +              rc = -ENODEV;
 +              goto err_out_disable;
 +      }
 +
 +      if (atomic_read(&pdev->enable_cnt) == 1) {
 +              rc = pci_request_regions(pdev, DRV_MODULE_NAME);
 +              if (rc) {
 +                      dev_err(&bp->pdev->dev,
 +                              "Cannot obtain PCI resources, aborting\n");
 +                      goto err_out_disable;
 +              }
 +
 +              pci_set_master(pdev);
 +              pci_save_state(pdev);
 +      }
 +
 +      bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
 +      if (bp->pm_cap == 0) {
 +              dev_err(&bp->pdev->dev,
 +                      "Cannot find power management capability, aborting\n");
 +              rc = -EIO;
 +              goto err_out_release;
 +      }
 +
 +      if (!pci_is_pcie(pdev)) {
 +              dev_err(&bp->pdev->dev, "Not PCI Express, aborting\n");
 +              rc = -EIO;
 +              goto err_out_release;
 +      }
 +
 +      rc = bnx2x_set_coherency_mask(bp);
 +      if (rc)
 +              goto err_out_release;
 +
 +      dev->mem_start = pci_resource_start(pdev, 0);
 +      dev->base_addr = dev->mem_start;
 +      dev->mem_end = pci_resource_end(pdev, 0);
 +
 +      dev->irq = pdev->irq;
 +
 +      bp->regview = pci_ioremap_bar(pdev, 0);
 +      if (!bp->regview) {
 +              dev_err(&bp->pdev->dev,
 +                      "Cannot map register space, aborting\n");
 +              rc = -ENOMEM;
 +              goto err_out_release;
 +      }
 +
 +      bnx2x_set_power_state(bp, PCI_D0);
 +
 +      /* clean indirect addresses */
 +      pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
 +                             PCICFG_VENDOR_ID_OFFSET);
-       REG_WR(bp, PXP2_REG_PGL_ADDR_88_F1, 0);
-       REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F1, 0);
-       REG_WR(bp, PXP2_REG_PGL_ADDR_90_F1, 0);
-       REG_WR(bp, PXP2_REG_PGL_ADDR_94_F1, 0);
++      /*
++       * Clean the following indirect addresses for all functions since it
 +       * is not used by the driver.
 +       */
 +      REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0, 0);
 +      REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0, 0);
 +      REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0, 0);
 +      REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0, 0);
++
++      if (CHIP_IS_E1x(bp)) {
++              REG_WR(bp, PXP2_REG_PGL_ADDR_88_F1, 0);
++              REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F1, 0);
++              REG_WR(bp, PXP2_REG_PGL_ADDR_90_F1, 0);
++              REG_WR(bp, PXP2_REG_PGL_ADDR_94_F1, 0);
++      }
 +
 +      /*
 +       * Enable internal target-read (in case we are probed after PF FLR).
 +       * Must be done prior to any BAR read access. Only for 57712 and up
 +       */
 +      if (board_type != BCM57710 &&
 +          board_type != BCM57711 &&
 +          board_type != BCM57711E)
 +              REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1);
 +
 +      /* Reset the load counter */
 +      bnx2x_clear_load_cnt(bp);
 +
 +      dev->watchdog_timeo = TX_TIMEOUT;
 +
 +      dev->netdev_ops = &bnx2x_netdev_ops;
 +      bnx2x_set_ethtool_ops(dev);
 +
 +      dev->priv_flags |= IFF_UNICAST_FLT;
 +
 +      dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
 +              NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 | NETIF_F_LRO |
 +              NETIF_F_RXCSUM | NETIF_F_RXHASH | NETIF_F_HW_VLAN_TX;
 +
 +      dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
 +              NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 | NETIF_F_HIGHDMA;
 +
 +      dev->features |= dev->hw_features | NETIF_F_HW_VLAN_RX;
 +      if (bp->flags & USING_DAC_FLAG)
 +              dev->features |= NETIF_F_HIGHDMA;
 +
 +      /* Add Loopback capability to the device */
 +      dev->hw_features |= NETIF_F_LOOPBACK;
 +
 +#ifdef BCM_DCBNL
 +      dev->dcbnl_ops = &bnx2x_dcbnl_ops;
 +#endif
 +
 +      /* get_port_hwinfo() will set prtad and mmds properly */
 +      bp->mdio.prtad = MDIO_PRTAD_NONE;
 +      bp->mdio.mmds = 0;
 +      bp->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
 +      bp->mdio.dev = dev;
 +      bp->mdio.mdio_read = bnx2x_mdio_read;
 +      bp->mdio.mdio_write = bnx2x_mdio_write;
 +
 +      return 0;
 +
 +err_out_release:
 +      if (atomic_read(&pdev->enable_cnt) == 1)
 +              pci_release_regions(pdev);
 +
 +err_out_disable:
 +      pci_disable_device(pdev);
 +      pci_set_drvdata(pdev, NULL);
 +
 +err_out:
 +      return rc;
 +}
 +
 +static void __devinit bnx2x_get_pcie_width_speed(struct bnx2x *bp,
 +                                               int *width, int *speed)
 +{
 +      u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
 +
 +      *width = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
 +
 +      /* return value of 1=2.5GHz 2=5GHz */
 +      *speed = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
 +}
 +
 +static int bnx2x_check_firmware(struct bnx2x *bp)
 +{
 +      const struct firmware *firmware = bp->firmware;
 +      struct bnx2x_fw_file_hdr *fw_hdr;
 +      struct bnx2x_fw_file_section *sections;
 +      u32 offset, len, num_ops;
 +      u16 *ops_offsets;
 +      int i;
 +      const u8 *fw_ver;
 +
 +      if (firmware->size < sizeof(struct bnx2x_fw_file_hdr))
 +              return -EINVAL;
 +
 +      fw_hdr = (struct bnx2x_fw_file_hdr *)firmware->data;
 +      sections = (struct bnx2x_fw_file_section *)fw_hdr;
 +
 +      /* Make sure none of the offsets and sizes make us read beyond
 +       * the end of the firmware data */
 +      for (i = 0; i < sizeof(*fw_hdr) / sizeof(*sections); i++) {
 +              offset = be32_to_cpu(sections[i].offset);
 +              len = be32_to_cpu(sections[i].len);
 +              if (offset + len > firmware->size) {
 +                      dev_err(&bp->pdev->dev,
 +                              "Section %d length is out of bounds\n", i);
 +                      return -EINVAL;
 +              }
 +      }
 +
 +      /* Likewise for the init_ops offsets */
 +      offset = be32_to_cpu(fw_hdr->init_ops_offsets.offset);
 +      ops_offsets = (u16 *)(firmware->data + offset);
 +      num_ops = be32_to_cpu(fw_hdr->init_ops.len) / sizeof(struct raw_op);
 +
 +      for (i = 0; i < be32_to_cpu(fw_hdr->init_ops_offsets.len) / 2; i++) {
 +              if (be16_to_cpu(ops_offsets[i]) > num_ops) {
 +                      dev_err(&bp->pdev->dev,
 +                              "Section offset %d is out of bounds\n", i);
 +                      return -EINVAL;
 +              }
 +      }
 +
 +      /* Check FW version */
 +      offset = be32_to_cpu(fw_hdr->fw_version.offset);
 +      fw_ver = firmware->data + offset;
 +      if ((fw_ver[0] != BCM_5710_FW_MAJOR_VERSION) ||
 +          (fw_ver[1] != BCM_5710_FW_MINOR_VERSION) ||
 +          (fw_ver[2] != BCM_5710_FW_REVISION_VERSION) ||
 +          (fw_ver[3] != BCM_5710_FW_ENGINEERING_VERSION)) {
 +              dev_err(&bp->pdev->dev,
 +                      "Bad FW version:%d.%d.%d.%d. Should be %d.%d.%d.%d\n",
 +                     fw_ver[0], fw_ver[1], fw_ver[2],
 +                     fw_ver[3], BCM_5710_FW_MAJOR_VERSION,
 +                     BCM_5710_FW_MINOR_VERSION,
 +                     BCM_5710_FW_REVISION_VERSION,
 +                     BCM_5710_FW_ENGINEERING_VERSION);
 +              return -EINVAL;
 +      }
 +
 +      return 0;
 +}
 +
 +static inline void be32_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
 +{
 +      const __be32 *source = (const __be32 *)_source;
 +      u32 *target = (u32 *)_target;
 +      u32 i;
 +
 +      for (i = 0; i < n/4; i++)
 +              target[i] = be32_to_cpu(source[i]);
 +}
 +
 +/*
 +   Ops array is stored in the following format:
 +   {op(8bit), offset(24bit, big endian), data(32bit, big endian)}
 + */
 +static inline void bnx2x_prep_ops(const u8 *_source, u8 *_target, u32 n)
 +{
 +      const __be32 *source = (const __be32 *)_source;
 +      struct raw_op *target = (struct raw_op *)_target;
 +      u32 i, j, tmp;
 +
 +      for (i = 0, j = 0; i < n/8; i++, j += 2) {
 +              tmp = be32_to_cpu(source[j]);
 +              target[i].op = (tmp >> 24) & 0xff;
 +              target[i].offset = tmp & 0xffffff;
 +              target[i].raw_data = be32_to_cpu(source[j + 1]);
 +      }
 +}
 +
 +/**
 + * IRO array is stored in the following format:
 + * {base(24bit), m1(16bit), m2(16bit), m3(16bit), size(16bit) }
 + */
 +static inline void bnx2x_prep_iro(const u8 *_source, u8 *_target, u32 n)
 +{
 +      const __be32 *source = (const __be32 *)_source;
 +      struct iro *target = (struct iro *)_target;
 +      u32 i, j, tmp;
 +
 +      for (i = 0, j = 0; i < n/sizeof(struct iro); i++) {
 +              target[i].base = be32_to_cpu(source[j]);
 +              j++;
 +              tmp = be32_to_cpu(source[j]);
 +              target[i].m1 = (tmp >> 16) & 0xffff;
 +              target[i].m2 = tmp & 0xffff;
 +              j++;
 +              tmp = be32_to_cpu(source[j]);
 +              target[i].m3 = (tmp >> 16) & 0xffff;
 +              target[i].size = tmp & 0xffff;
 +              j++;
 +      }
 +}
 +
 +static inline void be16_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
 +{
 +      const __be16 *source = (const __be16 *)_source;
 +      u16 *target = (u16 *)_target;
 +      u32 i;
 +
 +      for (i = 0; i < n/2; i++)
 +              target[i] = be16_to_cpu(source[i]);
 +}
 +
 +#define BNX2X_ALLOC_AND_SET(arr, lbl, func)                           \
 +do {                                                                  \
 +      u32 len = be32_to_cpu(fw_hdr->arr.len);                         \
 +      bp->arr = kmalloc(len, GFP_KERNEL);                             \
 +      if (!bp->arr) {                                                 \
 +              pr_err("Failed to allocate %d bytes for "#arr"\n", len); \
 +              goto lbl;                                               \
 +      }                                                               \
 +      func(bp->firmware->data + be32_to_cpu(fw_hdr->arr.offset),      \
 +           (u8 *)bp->arr, len);                                       \
 +} while (0)
 +
 +int bnx2x_init_firmware(struct bnx2x *bp)
 +{
 +      const char *fw_file_name;
 +      struct bnx2x_fw_file_hdr *fw_hdr;
 +      int rc;
 +
 +      if (CHIP_IS_E1(bp))
 +              fw_file_name = FW_FILE_NAME_E1;
 +      else if (CHIP_IS_E1H(bp))
 +              fw_file_name = FW_FILE_NAME_E1H;
 +      else if (!CHIP_IS_E1x(bp))
 +              fw_file_name = FW_FILE_NAME_E2;
 +      else {
 +              BNX2X_ERR("Unsupported chip revision\n");
 +              return -EINVAL;
 +      }
 +
 +      BNX2X_DEV_INFO("Loading %s\n", fw_file_name);
 +
 +      rc = request_firmware(&bp->firmware, fw_file_name, &bp->pdev->dev);
 +      if (rc) {
 +              BNX2X_ERR("Can't load firmware file %s\n", fw_file_name);
 +              goto request_firmware_exit;
 +      }
 +
 +      rc = bnx2x_check_firmware(bp);
 +      if (rc) {
 +              BNX2X_ERR("Corrupt firmware file %s\n", fw_file_name);
 +              goto request_firmware_exit;
 +      }
 +
 +      fw_hdr = (struct bnx2x_fw_file_hdr *)bp->firmware->data;
 +
 +      /* Initialize the pointers to the init arrays */
 +      /* Blob */
 +      BNX2X_ALLOC_AND_SET(init_data, request_firmware_exit, be32_to_cpu_n);
 +
 +      /* Opcodes */
 +      BNX2X_ALLOC_AND_SET(init_ops, init_ops_alloc_err, bnx2x_prep_ops);
 +
 +      /* Offsets */
 +      BNX2X_ALLOC_AND_SET(init_ops_offsets, init_offsets_alloc_err,
 +                          be16_to_cpu_n);
 +
 +      /* STORMs firmware */
 +      INIT_TSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
 +                      be32_to_cpu(fw_hdr->tsem_int_table_data.offset);
 +      INIT_TSEM_PRAM_DATA(bp)      = bp->firmware->data +
 +                      be32_to_cpu(fw_hdr->tsem_pram_data.offset);
 +      INIT_USEM_INT_TABLE_DATA(bp) = bp->firmware->data +
 +                      be32_to_cpu(fw_hdr->usem_int_table_data.offset);
 +      INIT_USEM_PRAM_DATA(bp)      = bp->firmware->data +
 +                      be32_to_cpu(fw_hdr->usem_pram_data.offset);
 +      INIT_XSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
 +                      be32_to_cpu(fw_hdr->xsem_int_table_data.offset);
 +      INIT_XSEM_PRAM_DATA(bp)      = bp->firmware->data +
 +                      be32_to_cpu(fw_hdr->xsem_pram_data.offset);
 +      INIT_CSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
 +                      be32_to_cpu(fw_hdr->csem_int_table_data.offset);
 +      INIT_CSEM_PRAM_DATA(bp)      = bp->firmware->data +
 +                      be32_to_cpu(fw_hdr->csem_pram_data.offset);
 +      /* IRO */
 +      BNX2X_ALLOC_AND_SET(iro_arr, iro_alloc_err, bnx2x_prep_iro);
 +
 +      return 0;
 +
 +iro_alloc_err:
 +      kfree(bp->init_ops_offsets);
 +init_offsets_alloc_err:
 +      kfree(bp->init_ops);
 +init_ops_alloc_err:
 +      kfree(bp->init_data);
 +request_firmware_exit:
 +      release_firmware(bp->firmware);
 +
 +      return rc;
 +}
 +
 +static void bnx2x_release_firmware(struct bnx2x *bp)
 +{
 +      kfree(bp->init_ops_offsets);
 +      kfree(bp->init_ops);
 +      kfree(bp->init_data);
 +      release_firmware(bp->firmware);
 +}
 +
 +
 +static struct bnx2x_func_sp_drv_ops bnx2x_func_sp_drv = {
 +      .init_hw_cmn_chip = bnx2x_init_hw_common_chip,
 +      .init_hw_cmn      = bnx2x_init_hw_common,
 +      .init_hw_port     = bnx2x_init_hw_port,
 +      .init_hw_func     = bnx2x_init_hw_func,
 +
 +      .reset_hw_cmn     = bnx2x_reset_common,
 +      .reset_hw_port    = bnx2x_reset_port,
 +      .reset_hw_func    = bnx2x_reset_func,
 +
 +      .gunzip_init      = bnx2x_gunzip_init,
 +      .gunzip_end       = bnx2x_gunzip_end,
 +
 +      .init_fw          = bnx2x_init_firmware,
 +      .release_fw       = bnx2x_release_firmware,
 +};
 +
 +void bnx2x__init_func_obj(struct bnx2x *bp)
 +{
 +      /* Prepare DMAE related driver resources */
 +      bnx2x_setup_dmae(bp);
 +
 +      bnx2x_init_func_obj(bp, &bp->func_obj,
 +                          bnx2x_sp(bp, func_rdata),
 +                          bnx2x_sp_mapping(bp, func_rdata),
 +                          &bnx2x_func_sp_drv);
 +}
 +
 +/* must be called after sriov-enable */
 +static inline int bnx2x_set_qm_cid_count(struct bnx2x *bp)
 +{
 +      int cid_count = BNX2X_L2_CID_COUNT(bp);
 +
 +#ifdef BCM_CNIC
 +      cid_count += CNIC_CID_MAX;
 +#endif
 +      return roundup(cid_count, QM_CID_ROUND);
 +}
 +
 +/**
 + * bnx2x_get_num_none_def_sbs - return the number of none default SBs
 + *
 + * @dev:      pci device
 + *
 + */
 +static inline int bnx2x_get_num_non_def_sbs(struct pci_dev *pdev)
 +{
 +      int pos;
 +      u16 control;
 +
 +      pos = pci_find_capability(pdev, PCI_CAP_ID_MSIX);
 +
 +      /*
 +       * If MSI-X is not supported - return number of SBs needed to support
 +       * one fast path queue: one FP queue + SB for CNIC
 +       */
 +      if (!pos)
 +              return 1 + CNIC_PRESENT;
 +
 +      /*
 +       * The value in the PCI configuration space is the index of the last
 +       * entry, namely one less than the actual size of the table, which is
 +       * exactly what we want to return from this function: number of all SBs
 +       * without the default SB.
 +       */
 +      pci_read_config_word(pdev, pos  + PCI_MSI_FLAGS, &control);
 +      return control & PCI_MSIX_FLAGS_QSIZE;
 +}
 +
 +static int __devinit bnx2x_init_one(struct pci_dev *pdev,
 +                                  const struct pci_device_id *ent)
 +{
 +      struct net_device *dev = NULL;
 +      struct bnx2x *bp;
 +      int pcie_width, pcie_speed;
 +      int rc, max_non_def_sbs;
 +      int rx_count, tx_count, rss_count;
 +      /*
 +       * An estimated maximum supported CoS number according to the chip
 +       * version.
 +       * We will try to roughly estimate the maximum number of CoSes this chip
 +       * may support in order to minimize the memory allocated for Tx
 +       * netdev_queue's. This number will be accurately calculated during the
 +       * initialization of bp->max_cos based on the chip versions AND chip
 +       * revision in the bnx2x_init_bp().
 +       */
 +      u8 max_cos_est = 0;
 +
 +      switch (ent->driver_data) {
 +      case BCM57710:
 +      case BCM57711:
 +      case BCM57711E:
 +              max_cos_est = BNX2X_MULTI_TX_COS_E1X;
 +              break;
 +
 +      case BCM57712:
 +      case BCM57712_MF:
 +              max_cos_est = BNX2X_MULTI_TX_COS_E2_E3A0;
 +              break;
 +
 +      case BCM57800:
 +      case BCM57800_MF:
 +      case BCM57810:
 +      case BCM57810_MF:
 +      case BCM57840:
 +      case BCM57840_MF:
 +              max_cos_est = BNX2X_MULTI_TX_COS_E3B0;
 +              break;
 +
 +      default:
 +              pr_err("Unknown board_type (%ld), aborting\n",
 +                         ent->driver_data);
 +              return -ENODEV;
 +      }
 +
 +      max_non_def_sbs = bnx2x_get_num_non_def_sbs(pdev);
 +
 +      /* !!! FIXME !!!
 +       * Do not allow the maximum SB count to grow above 16
 +       * since Special CIDs starts from 16*BNX2X_MULTI_TX_COS=48.
 +       * We will use the FP_SB_MAX_E1x macro for this matter.
 +       */
 +      max_non_def_sbs = min_t(int, FP_SB_MAX_E1x, max_non_def_sbs);
 +
 +      WARN_ON(!max_non_def_sbs);
 +
 +      /* Maximum number of RSS queues: one IGU SB goes to CNIC */
 +      rss_count = max_non_def_sbs - CNIC_PRESENT;
 +
 +      /* Maximum number of netdev Rx queues: RSS + FCoE L2 */
 +      rx_count = rss_count + FCOE_PRESENT;
 +
 +      /*
 +       * Maximum number of netdev Tx queues:
 +       *      Maximum TSS queues * Maximum supported number of CoS  + FCoE L2
 +       */
 +      tx_count = MAX_TXQS_PER_COS * max_cos_est + FCOE_PRESENT;
 +
 +      /* dev zeroed in init_etherdev */
 +      dev = alloc_etherdev_mqs(sizeof(*bp), tx_count, rx_count);
 +      if (!dev) {
 +              dev_err(&pdev->dev, "Cannot allocate net device\n");
 +              return -ENOMEM;
 +      }
 +
 +      bp = netdev_priv(dev);
 +
 +      DP(NETIF_MSG_DRV, "Allocated netdev with %d tx and %d rx queues\n",
 +                        tx_count, rx_count);
 +
 +      bp->igu_sb_cnt = max_non_def_sbs;
 +      bp->msg_enable = debug;
 +      pci_set_drvdata(pdev, dev);
 +
 +      rc = bnx2x_init_dev(pdev, dev, ent->driver_data);
 +      if (rc < 0) {
 +              free_netdev(dev);
 +              return rc;
 +      }
 +
 +      DP(NETIF_MSG_DRV, "max_non_def_sbs %d\n", max_non_def_sbs);
 +
 +      rc = bnx2x_init_bp(bp);
 +      if (rc)
 +              goto init_one_exit;
 +
 +      /*
 +       * Map doorbels here as we need the real value of bp->max_cos which
 +       * is initialized in bnx2x_init_bp().
 +       */
 +      bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
 +                                      min_t(u64, BNX2X_DB_SIZE(bp),
 +                                            pci_resource_len(pdev, 2)));
 +      if (!bp->doorbells) {
 +              dev_err(&bp->pdev->dev,
 +                      "Cannot map doorbell space, aborting\n");
 +              rc = -ENOMEM;
 +              goto init_one_exit;
 +      }
 +
 +      /* calc qm_cid_count */
 +      bp->qm_cid_count = bnx2x_set_qm_cid_count(bp);
 +
 +#ifdef BCM_CNIC
 +      /* disable FCOE L2 queue for E1x and E3*/
 +      if (CHIP_IS_E1x(bp) || CHIP_IS_E3(bp))
 +              bp->flags |= NO_FCOE_FLAG;
 +
 +#endif
 +
 +      /* Configure interrupt mode: try to enable MSI-X/MSI if
 +       * needed, set bp->num_queues appropriately.
 +       */
 +      bnx2x_set_int_mode(bp);
 +
 +      /* Add all NAPI objects */
 +      bnx2x_add_all_napi(bp);
 +
 +      rc = register_netdev(dev);
 +      if (rc) {
 +              dev_err(&pdev->dev, "Cannot register net device\n");
 +              goto init_one_exit;
 +      }
 +
 +#ifdef BCM_CNIC
 +      if (!NO_FCOE(bp)) {
 +              /* Add storage MAC address */
 +              rtnl_lock();
 +              dev_addr_add(bp->dev, bp->fip_mac, NETDEV_HW_ADDR_T_SAN);
 +              rtnl_unlock();
 +      }
 +#endif
 +
 +      bnx2x_get_pcie_width_speed(bp, &pcie_width, &pcie_speed);
 +
 +      netdev_info(dev, "%s (%c%d) PCI-E x%d %s found at mem %lx, IRQ %d, node addr %pM\n",
 +                  board_info[ent->driver_data].name,
 +                  (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
 +                  pcie_width,
 +                  ((!CHIP_IS_E2(bp) && pcie_speed == 2) ||
 +                   (CHIP_IS_E2(bp) && pcie_speed == 1)) ?
 +                  "5GHz (Gen2)" : "2.5GHz",
 +                  dev->base_addr, bp->pdev->irq, dev->dev_addr);
 +
 +      return 0;
 +
 +init_one_exit:
 +      if (bp->regview)
 +              iounmap(bp->regview);
 +
 +      if (bp->doorbells)
 +              iounmap(bp->doorbells);
 +
 +      free_netdev(dev);
 +
 +      if (atomic_read(&pdev->enable_cnt) == 1)
 +              pci_release_regions(pdev);
 +
 +      pci_disable_device(pdev);
 +      pci_set_drvdata(pdev, NULL);
 +
 +      return rc;
 +}
 +
 +static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
 +{
 +      struct net_device *dev = pci_get_drvdata(pdev);
 +      struct bnx2x *bp;
 +
 +      if (!dev) {
 +              dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
 +              return;
 +      }
 +      bp = netdev_priv(dev);
 +
 +#ifdef BCM_CNIC
 +      /* Delete storage MAC address */
 +      if (!NO_FCOE(bp)) {
 +              rtnl_lock();
 +              dev_addr_del(bp->dev, bp->fip_mac, NETDEV_HW_ADDR_T_SAN);
 +              rtnl_unlock();
 +      }
 +#endif
 +
 +#ifdef BCM_DCBNL
 +      /* Delete app tlvs from dcbnl */
 +      bnx2x_dcbnl_update_applist(bp, true);
 +#endif
 +
 +      unregister_netdev(dev);
 +
 +      /* Delete all NAPI objects */
 +      bnx2x_del_all_napi(bp);
 +
 +      /* Power on: we can't let PCI layer write to us while we are in D3 */
 +      bnx2x_set_power_state(bp, PCI_D0);
 +
 +      /* Disable MSI/MSI-X */
 +      bnx2x_disable_msi(bp);
 +
 +      /* Power off */
 +      bnx2x_set_power_state(bp, PCI_D3hot);
 +
 +      /* Make sure RESET task is not scheduled before continuing */
 +      cancel_delayed_work_sync(&bp->sp_rtnl_task);
 +
 +      if (bp->regview)
 +              iounmap(bp->regview);
 +
 +      if (bp->doorbells)
 +              iounmap(bp->doorbells);
 +
 +      bnx2x_free_mem_bp(bp);
 +
 +      free_netdev(dev);
 +
 +      if (atomic_read(&pdev->enable_cnt) == 1)
 +              pci_release_regions(pdev);
 +
 +      pci_disable_device(pdev);
 +      pci_set_drvdata(pdev, NULL);
 +}
 +
 +static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
 +{
 +      int i;
 +
 +      bp->state = BNX2X_STATE_ERROR;
 +
 +      bp->rx_mode = BNX2X_RX_MODE_NONE;
 +
 +#ifdef BCM_CNIC
 +      bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
 +#endif
 +      /* Stop Tx */
 +      bnx2x_tx_disable(bp);
 +
 +      bnx2x_netif_stop(bp, 0);
 +
 +      del_timer_sync(&bp->timer);
 +
 +      bnx2x_stats_handle(bp, STATS_EVENT_STOP);
 +
 +      /* Release IRQs */
 +      bnx2x_free_irq(bp);
 +
 +      /* Free SKBs, SGEs, TPA pool and driver internals */
 +      bnx2x_free_skbs(bp);
 +
 +      for_each_rx_queue(bp, i)
 +              bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
 +
 +      bnx2x_free_mem(bp);
 +
 +      bp->state = BNX2X_STATE_CLOSED;
 +
 +      netif_carrier_off(bp->dev);
 +
 +      return 0;
 +}
 +
 +static void bnx2x_eeh_recover(struct bnx2x *bp)
 +{
 +      u32 val;
 +
 +      mutex_init(&bp->port.phy_mutex);
 +
 +      bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
 +      bp->link_params.shmem_base = bp->common.shmem_base;
 +      BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
 +
 +      if (!bp->common.shmem_base ||
 +          (bp->common.shmem_base < 0xA0000) ||
 +          (bp->common.shmem_base >= 0xC0000)) {
 +              BNX2X_DEV_INFO("MCP not active\n");
 +              bp->flags |= NO_MCP_FLAG;
 +              return;
 +      }
 +
 +      val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
 +      if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
 +              != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
 +              BNX2X_ERR("BAD MCP validity signature\n");
 +
 +      if (!BP_NOMCP(bp)) {
 +              bp->fw_seq =
 +                  (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
 +                  DRV_MSG_SEQ_NUMBER_MASK);
 +              BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
 +      }
 +}
 +
 +/**
 + * bnx2x_io_error_detected - called when PCI error is detected
 + * @pdev: Pointer to PCI device
 + * @state: The current pci connection state
 + *
 + * This function is called after a PCI bus error affecting
 + * this device has been detected.
 + */
 +static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
 +                                              pci_channel_state_t state)
 +{
 +      struct net_device *dev = pci_get_drvdata(pdev);
 +      struct bnx2x *bp = netdev_priv(dev);
 +
 +      rtnl_lock();
 +
 +      netif_device_detach(dev);
 +
 +      if (state == pci_channel_io_perm_failure) {
 +              rtnl_unlock();
 +              return PCI_ERS_RESULT_DISCONNECT;
 +      }
 +
 +      if (netif_running(dev))
 +              bnx2x_eeh_nic_unload(bp);
 +
 +      pci_disable_device(pdev);
 +
 +      rtnl_unlock();
 +
 +      /* Request a slot reset */
 +      return PCI_ERS_RESULT_NEED_RESET;
 +}
 +
 +/**
 + * bnx2x_io_slot_reset - called after the PCI bus has been reset
 + * @pdev: Pointer to PCI device
 + *
 + * Restart the card from scratch, as if from a cold-boot.
 + */
 +static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
 +{
 +      struct net_device *dev = pci_get_drvdata(pdev);
 +      struct bnx2x *bp = netdev_priv(dev);
 +
 +      rtnl_lock();
 +
 +      if (pci_enable_device(pdev)) {
 +              dev_err(&pdev->dev,
 +                      "Cannot re-enable PCI device after reset\n");
 +              rtnl_unlock();
 +              return PCI_ERS_RESULT_DISCONNECT;
 +      }
 +
 +      pci_set_master(pdev);
 +      pci_restore_state(pdev);
 +
 +      if (netif_running(dev))
 +              bnx2x_set_power_state(bp, PCI_D0);
 +
 +      rtnl_unlock();
 +
 +      return PCI_ERS_RESULT_RECOVERED;
 +}
 +
 +/**
 + * bnx2x_io_resume - called when traffic can start flowing again
 + * @pdev: Pointer to PCI device
 + *
 + * This callback is called when the error recovery driver tells us that
 + * its OK to resume normal operation.
 + */
 +static void bnx2x_io_resume(struct pci_dev *pdev)
 +{
 +      struct net_device *dev = pci_get_drvdata(pdev);
 +      struct bnx2x *bp = netdev_priv(dev);
 +
 +      if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
 +              netdev_err(bp->dev, "Handling parity error recovery. "
 +                                  "Try again later\n");
 +              return;
 +      }
 +
 +      rtnl_lock();
 +
 +      bnx2x_eeh_recover(bp);
 +
 +      if (netif_running(dev))
 +              bnx2x_nic_load(bp, LOAD_NORMAL);
 +
 +      netif_device_attach(dev);
 +
 +      rtnl_unlock();
 +}
 +
 +static struct pci_error_handlers bnx2x_err_handler = {
 +      .error_detected = bnx2x_io_error_detected,
 +      .slot_reset     = bnx2x_io_slot_reset,
 +      .resume         = bnx2x_io_resume,
 +};
 +
 +static struct pci_driver bnx2x_pci_driver = {
 +      .name        = DRV_MODULE_NAME,
 +      .id_table    = bnx2x_pci_tbl,
 +      .probe       = bnx2x_init_one,
 +      .remove      = __devexit_p(bnx2x_remove_one),
 +      .suspend     = bnx2x_suspend,
 +      .resume      = bnx2x_resume,
 +      .err_handler = &bnx2x_err_handler,
 +};
 +
 +static int __init bnx2x_init(void)
 +{
 +      int ret;
 +
 +      pr_info("%s", version);
 +
 +      bnx2x_wq = create_singlethread_workqueue("bnx2x");
 +      if (bnx2x_wq == NULL) {
 +              pr_err("Cannot create workqueue\n");
 +              return -ENOMEM;
 +      }
 +
 +      ret = pci_register_driver(&bnx2x_pci_driver);
 +      if (ret) {
 +              pr_err("Cannot register driver\n");
 +              destroy_workqueue(bnx2x_wq);
 +      }
 +      return ret;
 +}
 +
 +static void __exit bnx2x_cleanup(void)
 +{
 +      pci_unregister_driver(&bnx2x_pci_driver);
 +
 +      destroy_workqueue(bnx2x_wq);
 +}
 +
 +void bnx2x_notify_link_changed(struct bnx2x *bp)
 +{
 +      REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + BP_FUNC(bp)*sizeof(u32), 1);
 +}
 +
 +module_init(bnx2x_init);
 +module_exit(bnx2x_cleanup);
 +
 +#ifdef BCM_CNIC
 +/**
 + * bnx2x_set_iscsi_eth_mac_addr - set iSCSI MAC(s).
 + *
 + * @bp:               driver handle
 + * @set:      set or clear the CAM entry
 + *
 + * This function will wait until the ramdord completion returns.
 + * Return 0 if success, -ENODEV if ramrod doesn't return.
 + */
 +static inline int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp)
 +{
 +      unsigned long ramrod_flags = 0;
 +
 +      __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
 +      return bnx2x_set_mac_one(bp, bp->cnic_eth_dev.iscsi_mac,
 +                               &bp->iscsi_l2_mac_obj, true,
 +                               BNX2X_ISCSI_ETH_MAC, &ramrod_flags);
 +}
 +
 +/* count denotes the number of new completions we have seen */
 +static void bnx2x_cnic_sp_post(struct bnx2x *bp, int count)
 +{
 +      struct eth_spe *spe;
 +
 +#ifdef BNX2X_STOP_ON_ERROR
 +      if (unlikely(bp->panic))
 +              return;
 +#endif
 +
 +      spin_lock_bh(&bp->spq_lock);
 +      BUG_ON(bp->cnic_spq_pending < count);
 +      bp->cnic_spq_pending -= count;
 +
 +
 +      for (; bp->cnic_kwq_pending; bp->cnic_kwq_pending--) {
 +              u16 type =  (le16_to_cpu(bp->cnic_kwq_cons->hdr.type)
 +                              & SPE_HDR_CONN_TYPE) >>
 +                              SPE_HDR_CONN_TYPE_SHIFT;
 +              u8 cmd = (le32_to_cpu(bp->cnic_kwq_cons->hdr.conn_and_cmd_data)
 +                              >> SPE_HDR_CMD_ID_SHIFT) & 0xff;
 +
 +              /* Set validation for iSCSI L2 client before sending SETUP
 +               *  ramrod
 +               */
 +              if (type == ETH_CONNECTION_TYPE) {
 +                      if (cmd == RAMROD_CMD_ID_ETH_CLIENT_SETUP)
 +                              bnx2x_set_ctx_validation(bp, &bp->context.
 +                                      vcxt[BNX2X_ISCSI_ETH_CID].eth,
 +                                      BNX2X_ISCSI_ETH_CID);
 +              }
 +
 +              /*
 +               * There may be not more than 8 L2, not more than 8 L5 SPEs
 +               * and in the air. We also check that number of outstanding
 +               * COMMON ramrods is not more than the EQ and SPQ can
 +               * accommodate.
 +               */
 +              if (type == ETH_CONNECTION_TYPE) {
 +                      if (!atomic_read(&bp->cq_spq_left))
 +                              break;
 +                      else
 +                              atomic_dec(&bp->cq_spq_left);
 +              } else if (type == NONE_CONNECTION_TYPE) {
 +                      if (!atomic_read(&bp->eq_spq_left))
 +                              break;
 +                      else
 +                              atomic_dec(&bp->eq_spq_left);
 +              } else if ((type == ISCSI_CONNECTION_TYPE) ||
 +                         (type == FCOE_CONNECTION_TYPE)) {
 +                      if (bp->cnic_spq_pending >=
 +                          bp->cnic_eth_dev.max_kwqe_pending)
 +                              break;
 +                      else
 +                              bp->cnic_spq_pending++;
 +              } else {
 +                      BNX2X_ERR("Unknown SPE type: %d\n", type);
 +                      bnx2x_panic();
 +                      break;
 +              }
 +
 +              spe = bnx2x_sp_get_next(bp);
 +              *spe = *bp->cnic_kwq_cons;
 +
 +              DP(NETIF_MSG_TIMER, "pending on SPQ %d, on KWQ %d count %d\n",
 +                 bp->cnic_spq_pending, bp->cnic_kwq_pending, count);
 +
 +              if (bp->cnic_kwq_cons == bp->cnic_kwq_last)
 +                      bp->cnic_kwq_cons = bp->cnic_kwq;
 +              else
 +                      bp->cnic_kwq_cons++;
 +      }
 +      bnx2x_sp_prod_update(bp);
 +      spin_unlock_bh(&bp->spq_lock);
 +}
 +
 +static int bnx2x_cnic_sp_queue(struct net_device *dev,
 +                             struct kwqe_16 *kwqes[], u32 count)
 +{
 +      struct bnx2x *bp = netdev_priv(dev);
 +      int i;
 +
 +#ifdef BNX2X_STOP_ON_ERROR
 +      if (unlikely(bp->panic))
 +              return -EIO;
 +#endif
 +
 +      spin_lock_bh(&bp->spq_lock);
 +
 +      for (i = 0; i < count; i++) {
 +              struct eth_spe *spe = (struct eth_spe *)kwqes[i];
 +
 +              if (bp->cnic_kwq_pending == MAX_SP_DESC_CNT)
 +                      break;
 +
 +              *bp->cnic_kwq_prod = *spe;
 +
 +              bp->cnic_kwq_pending++;
 +
 +              DP(NETIF_MSG_TIMER, "L5 SPQE %x %x %x:%x pos %d\n",
 +                 spe->hdr.conn_and_cmd_data, spe->hdr.type,
 +                 spe->data.update_data_addr.hi,
 +                 spe->data.update_data_addr.lo,
 +                 bp->cnic_kwq_pending);
 +
 +              if (bp->cnic_kwq_prod == bp->cnic_kwq_last)
 +                      bp->cnic_kwq_prod = bp->cnic_kwq;
 +              else
 +                      bp->cnic_kwq_prod++;
 +      }
 +
 +      spin_unlock_bh(&bp->spq_lock);
 +
 +      if (bp->cnic_spq_pending < bp->cnic_eth_dev.max_kwqe_pending)
 +              bnx2x_cnic_sp_post(bp, 0);
 +
 +      return i;
 +}
 +
 +static int bnx2x_cnic_ctl_send(struct bnx2x *bp, struct cnic_ctl_info *ctl)
 +{
 +      struct cnic_ops *c_ops;
 +      int rc = 0;
 +
 +      mutex_lock(&bp->cnic_mutex);
 +      c_ops = rcu_dereference_protected(bp->cnic_ops,
 +                                        lockdep_is_held(&bp->cnic_mutex));
 +      if (c_ops)
 +              rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
 +      mutex_unlock(&bp->cnic_mutex);
 +
 +      return rc;
 +}
 +
 +static int bnx2x_cnic_ctl_send_bh(struct bnx2x *bp, struct cnic_ctl_info *ctl)
 +{
 +      struct cnic_ops *c_ops;
 +      int rc = 0;
 +
 +      rcu_read_lock();
 +      c_ops = rcu_dereference(bp->cnic_ops);
 +      if (c_ops)
 +              rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
 +      rcu_read_unlock();
 +
 +      return rc;
 +}
 +
 +/*
 + * for commands that have no data
 + */
 +int bnx2x_cnic_notify(struct bnx2x *bp, int cmd)
 +{
 +      struct cnic_ctl_info ctl = {0};
 +
 +      ctl.cmd = cmd;
 +
 +      return bnx2x_cnic_ctl_send(bp, &ctl);
 +}
 +
 +static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid, u8 err)
 +{
 +      struct cnic_ctl_info ctl = {0};
 +
 +      /* first we tell CNIC and only then we count this as a completion */
 +      ctl.cmd = CNIC_CTL_COMPLETION_CMD;
 +      ctl.data.comp.cid = cid;
 +      ctl.data.comp.error = err;
 +
 +      bnx2x_cnic_ctl_send_bh(bp, &ctl);
 +      bnx2x_cnic_sp_post(bp, 0);
 +}
 +
 +
 +/* Called with netif_addr_lock_bh() taken.
 + * Sets an rx_mode config for an iSCSI ETH client.
 + * Doesn't block.
 + * Completion should be checked outside.
 + */
 +static void bnx2x_set_iscsi_eth_rx_mode(struct bnx2x *bp, bool start)
 +{
 +      unsigned long accept_flags = 0, ramrod_flags = 0;
 +      u8 cl_id = bnx2x_cnic_eth_cl_id(bp, BNX2X_ISCSI_ETH_CL_ID_IDX);
 +      int sched_state = BNX2X_FILTER_ISCSI_ETH_STOP_SCHED;
 +
 +      if (start) {
 +              /* Start accepting on iSCSI L2 ring. Accept all multicasts
 +               * because it's the only way for UIO Queue to accept
 +               * multicasts (in non-promiscuous mode only one Queue per
 +               * function will receive multicast packets (leading in our
 +               * case).
 +               */
 +              __set_bit(BNX2X_ACCEPT_UNICAST, &accept_flags);
 +              __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, &accept_flags);
 +              __set_bit(BNX2X_ACCEPT_BROADCAST, &accept_flags);
 +              __set_bit(BNX2X_ACCEPT_ANY_VLAN, &accept_flags);
 +
 +              /* Clear STOP_PENDING bit if START is requested */
 +              clear_bit(BNX2X_FILTER_ISCSI_ETH_STOP_SCHED, &bp->sp_state);
 +
 +              sched_state = BNX2X_FILTER_ISCSI_ETH_START_SCHED;
 +      } else
 +              /* Clear START_PENDING bit if STOP is requested */
 +              clear_bit(BNX2X_FILTER_ISCSI_ETH_START_SCHED, &bp->sp_state);
 +
 +      if (test_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state))
 +              set_bit(sched_state, &bp->sp_state);
 +      else {
 +              __set_bit(RAMROD_RX, &ramrod_flags);
 +              bnx2x_set_q_rx_mode(bp, cl_id, 0, accept_flags, 0,
 +                                  ramrod_flags);
 +      }
 +}
 +
 +
 +static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl)
 +{
 +      struct bnx2x *bp = netdev_priv(dev);
 +      int rc = 0;
 +
 +      switch (ctl->cmd) {
 +      case DRV_CTL_CTXTBL_WR_CMD: {
 +              u32 index = ctl->data.io.offset;
 +              dma_addr_t addr = ctl->data.io.dma_addr;
 +
 +              bnx2x_ilt_wr(bp, index, addr);
 +              break;
 +      }
 +
 +      case DRV_CTL_RET_L5_SPQ_CREDIT_CMD: {
 +              int count = ctl->data.credit.credit_count;
 +
 +              bnx2x_cnic_sp_post(bp, count);
 +              break;
 +      }
 +
 +      /* rtnl_lock is held.  */
 +      case DRV_CTL_START_L2_CMD: {
 +              struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
 +              unsigned long sp_bits = 0;
 +
 +              /* Configure the iSCSI classification object */
 +              bnx2x_init_mac_obj(bp, &bp->iscsi_l2_mac_obj,
 +                                 cp->iscsi_l2_client_id,
 +                                 cp->iscsi_l2_cid, BP_FUNC(bp),
 +                                 bnx2x_sp(bp, mac_rdata),
 +                                 bnx2x_sp_mapping(bp, mac_rdata),
 +                                 BNX2X_FILTER_MAC_PENDING,
 +                                 &bp->sp_state, BNX2X_OBJ_TYPE_RX,
 +                                 &bp->macs_pool);
 +
 +              /* Set iSCSI MAC address */
 +              rc = bnx2x_set_iscsi_eth_mac_addr(bp);
 +              if (rc)
 +                      break;
 +
 +              mmiowb();
 +              barrier();
 +
 +              /* Start accepting on iSCSI L2 ring */
 +
 +              netif_addr_lock_bh(dev);
 +              bnx2x_set_iscsi_eth_rx_mode(bp, true);
 +              netif_addr_unlock_bh(dev);
 +
 +              /* bits to wait on */
 +              __set_bit(BNX2X_FILTER_RX_MODE_PENDING, &sp_bits);
 +              __set_bit(BNX2X_FILTER_ISCSI_ETH_START_SCHED, &sp_bits);
 +
 +              if (!bnx2x_wait_sp_comp(bp, sp_bits))
 +                      BNX2X_ERR("rx_mode completion timed out!\n");
 +
 +              break;
 +      }
 +
 +      /* rtnl_lock is held.  */
 +      case DRV_CTL_STOP_L2_CMD: {
 +              unsigned long sp_bits = 0;
 +
 +              /* Stop accepting on iSCSI L2 ring */
 +              netif_addr_lock_bh(dev);
 +              bnx2x_set_iscsi_eth_rx_mode(bp, false);
 +              netif_addr_unlock_bh(dev);
 +
 +              /* bits to wait on */
 +              __set_bit(BNX2X_FILTER_RX_MODE_PENDING, &sp_bits);
 +              __set_bit(BNX2X_FILTER_ISCSI_ETH_STOP_SCHED, &sp_bits);
 +
 +              if (!bnx2x_wait_sp_comp(bp, sp_bits))
 +                      BNX2X_ERR("rx_mode completion timed out!\n");
 +
 +              mmiowb();
 +              barrier();
 +
 +              /* Unset iSCSI L2 MAC */
 +              rc = bnx2x_del_all_macs(bp, &bp->iscsi_l2_mac_obj,
 +                                      BNX2X_ISCSI_ETH_MAC, true);
 +              break;
 +      }
 +      case DRV_CTL_RET_L2_SPQ_CREDIT_CMD: {
 +              int count = ctl->data.credit.credit_count;
 +
 +              smp_mb__before_atomic_inc();
 +              atomic_add(count, &bp->cq_spq_left);
 +              smp_mb__after_atomic_inc();
 +              break;
 +      }
 +
 +      default:
 +              BNX2X_ERR("unknown command %x\n", ctl->cmd);
 +              rc = -EINVAL;
 +      }
 +
 +      return rc;
 +}
 +
 +void bnx2x_setup_cnic_irq_info(struct bnx2x *bp)
 +{
 +      struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
 +
 +      if (bp->flags & USING_MSIX_FLAG) {
 +              cp->drv_state |= CNIC_DRV_STATE_USING_MSIX;
 +              cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX;
 +              cp->irq_arr[0].vector = bp->msix_table[1].vector;
 +      } else {
 +              cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX;
 +              cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX;
 +      }
 +      if (!CHIP_IS_E1x(bp))
 +              cp->irq_arr[0].status_blk = (void *)bp->cnic_sb.e2_sb;
 +      else
 +              cp->irq_arr[0].status_blk = (void *)bp->cnic_sb.e1x_sb;
 +
 +      cp->irq_arr[0].status_blk_num =  bnx2x_cnic_fw_sb_id(bp);
 +      cp->irq_arr[0].status_blk_num2 = bnx2x_cnic_igu_sb_id(bp);
 +      cp->irq_arr[1].status_blk = bp->def_status_blk;
 +      cp->irq_arr[1].status_blk_num = DEF_SB_ID;
 +      cp->irq_arr[1].status_blk_num2 = DEF_SB_IGU_ID;
 +
 +      cp->num_irq = 2;
 +}
 +
 +static int bnx2x_register_cnic(struct net_device *dev, struct cnic_ops *ops,
 +                             void *data)
 +{
 +      struct bnx2x *bp = netdev_priv(dev);
 +      struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
 +
 +      if (ops == NULL)
 +              return -EINVAL;
 +
 +      bp->cnic_kwq = kzalloc(PAGE_SIZE, GFP_KERNEL);
 +      if (!bp->cnic_kwq)
 +              return -ENOMEM;
 +
 +      bp->cnic_kwq_cons = bp->cnic_kwq;
 +      bp->cnic_kwq_prod = bp->cnic_kwq;
 +      bp->cnic_kwq_last = bp->cnic_kwq + MAX_SP_DESC_CNT;
 +
 +      bp->cnic_spq_pending = 0;
 +      bp->cnic_kwq_pending = 0;
 +
 +      bp->cnic_data = data;
 +
 +      cp->num_irq = 0;
 +      cp->drv_state |= CNIC_DRV_STATE_REGD;
 +      cp->iro_arr = bp->iro_arr;
 +
 +      bnx2x_setup_cnic_irq_info(bp);
 +
 +      rcu_assign_pointer(bp->cnic_ops, ops);
 +
 +      return 0;
 +}
 +
 +static int bnx2x_unregister_cnic(struct net_device *dev)
 +{
 +      struct bnx2x *bp = netdev_priv(dev);
 +      struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
 +
 +      mutex_lock(&bp->cnic_mutex);
 +      cp->drv_state = 0;
 +      rcu_assign_pointer(bp->cnic_ops, NULL);
 +      mutex_unlock(&bp->cnic_mutex);
 +      synchronize_rcu();
 +      kfree(bp->cnic_kwq);
 +      bp->cnic_kwq = NULL;
 +
 +      return 0;
 +}
 +
 +struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev)
 +{
 +      struct bnx2x *bp = netdev_priv(dev);
 +      struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
 +
 +      /* If both iSCSI and FCoE are disabled - return NULL in
 +       * order to indicate CNIC that it should not try to work
 +       * with this device.
 +       */
 +      if (NO_ISCSI(bp) && NO_FCOE(bp))
 +              return NULL;
 +
 +      cp->drv_owner = THIS_MODULE;
 +      cp->chip_id = CHIP_ID(bp);
 +      cp->pdev = bp->pdev;
 +      cp->io_base = bp->regview;
 +      cp->io_base2 = bp->doorbells;
 +      cp->max_kwqe_pending = 8;
 +      cp->ctx_blk_size = CDU_ILT_PAGE_SZ;
 +      cp->ctx_tbl_offset = FUNC_ILT_BASE(BP_FUNC(bp)) +
 +                           bnx2x_cid_ilt_lines(bp);
 +      cp->ctx_tbl_len = CNIC_ILT_LINES;
 +      cp->starting_cid = bnx2x_cid_ilt_lines(bp) * ILT_PAGE_CIDS;
 +      cp->drv_submit_kwqes_16 = bnx2x_cnic_sp_queue;
 +      cp->drv_ctl = bnx2x_drv_ctl;
 +      cp->drv_register_cnic = bnx2x_register_cnic;
 +      cp->drv_unregister_cnic = bnx2x_unregister_cnic;
 +      cp->fcoe_init_cid = BNX2X_FCOE_ETH_CID;
 +      cp->iscsi_l2_client_id =
 +              bnx2x_cnic_eth_cl_id(bp, BNX2X_ISCSI_ETH_CL_ID_IDX);
 +      cp->iscsi_l2_cid = BNX2X_ISCSI_ETH_CID;
 +
 +      if (NO_ISCSI_OOO(bp))
 +              cp->drv_state |= CNIC_DRV_STATE_NO_ISCSI_OOO;
 +
 +      if (NO_ISCSI(bp))
 +              cp->drv_state |= CNIC_DRV_STATE_NO_ISCSI;
 +
 +      if (NO_FCOE(bp))
 +              cp->drv_state |= CNIC_DRV_STATE_NO_FCOE;
 +
 +      DP(BNX2X_MSG_SP, "page_size %d, tbl_offset %d, tbl_lines %d, "
 +                       "starting cid %d\n",
 +         cp->ctx_blk_size,
 +         cp->ctx_tbl_offset,
 +         cp->ctx_tbl_len,
 +         cp->starting_cid);
 +      return cp;
 +}
 +EXPORT_SYMBOL(bnx2x_cnic_probe);
 +
 +#endif /* BCM_CNIC */
 +
index 628f7b9,0000000..02ac6a7
mode 100644,000000..100644
--- /dev/null
@@@ -1,1598 -1,0 +1,1599 @@@
-               BNX2X_ERR("stats updated by DMAE but no MAC active\n");
 +/* bnx2x_stats.c: Broadcom Everest network driver.
 + *
 + * Copyright (c) 2007-2011 Broadcom Corporation
 + *
 + * This program is free software; you can redistribute it and/or modify
 + * it under the terms of the GNU General Public License as published by
 + * the Free Software Foundation.
 + *
 + * Maintained by: Eilon Greenstein <eilong@broadcom.com>
 + * Written by: Eliezer Tamir
 + * Based on code from Michael Chan's bnx2 driver
 + * UDP CSUM errata workaround by Arik Gendelman
 + * Slowpath and fastpath rework by Vladislav Zolotarov
 + * Statistics and Link management by Yitchak Gertner
 + *
 + */
 +
 +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 +
 +#include "bnx2x_stats.h"
 +#include "bnx2x_cmn.h"
 +
 +
 +/* Statistics */
 +
 +/*
 + * General service functions
 + */
 +
 +static inline long bnx2x_hilo(u32 *hiref)
 +{
 +      u32 lo = *(hiref + 1);
 +#if (BITS_PER_LONG == 64)
 +      u32 hi = *hiref;
 +
 +      return HILO_U64(hi, lo);
 +#else
 +      return lo;
 +#endif
 +}
 +
 +/*
 + * Init service functions
 + */
 +
 +/* Post the next statistics ramrod. Protect it with the spin in
 + * order to ensure the strict order between statistics ramrods
 + * (each ramrod has a sequence number passed in a
 + * bp->fw_stats_req->hdr.drv_stats_counter and ramrods must be
 + * sent in order).
 + */
 +static void bnx2x_storm_stats_post(struct bnx2x *bp)
 +{
 +      if (!bp->stats_pending) {
 +              int rc;
 +
 +              spin_lock_bh(&bp->stats_lock);
 +
 +              if (bp->stats_pending) {
 +                      spin_unlock_bh(&bp->stats_lock);
 +                      return;
 +              }
 +
 +              bp->fw_stats_req->hdr.drv_stats_counter =
 +                      cpu_to_le16(bp->stats_counter++);
 +
 +              DP(NETIF_MSG_TIMER, "Sending statistics ramrod %d\n",
 +                      bp->fw_stats_req->hdr.drv_stats_counter);
 +
 +
 +
 +              /* send FW stats ramrod */
 +              rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_STAT_QUERY, 0,
 +                                 U64_HI(bp->fw_stats_req_mapping),
 +                                 U64_LO(bp->fw_stats_req_mapping),
 +                                 NONE_CONNECTION_TYPE);
 +              if (rc == 0)
 +                      bp->stats_pending = 1;
 +
 +              spin_unlock_bh(&bp->stats_lock);
 +      }
 +}
 +
 +static void bnx2x_hw_stats_post(struct bnx2x *bp)
 +{
 +      struct dmae_command *dmae = &bp->stats_dmae;
 +      u32 *stats_comp = bnx2x_sp(bp, stats_comp);
 +
 +      *stats_comp = DMAE_COMP_VAL;
 +      if (CHIP_REV_IS_SLOW(bp))
 +              return;
 +
 +      /* loader */
 +      if (bp->executer_idx) {
 +              int loader_idx = PMF_DMAE_C(bp);
 +              u32 opcode =  bnx2x_dmae_opcode(bp, DMAE_SRC_PCI, DMAE_DST_GRC,
 +                                               true, DMAE_COMP_GRC);
 +              opcode = bnx2x_dmae_opcode_clr_src_reset(opcode);
 +
 +              memset(dmae, 0, sizeof(struct dmae_command));
 +              dmae->opcode = opcode;
 +              dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
 +              dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
 +              dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
 +                                   sizeof(struct dmae_command) *
 +                                   (loader_idx + 1)) >> 2;
 +              dmae->dst_addr_hi = 0;
 +              dmae->len = sizeof(struct dmae_command) >> 2;
 +              if (CHIP_IS_E1(bp))
 +                      dmae->len--;
 +              dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
 +              dmae->comp_addr_hi = 0;
 +              dmae->comp_val = 1;
 +
 +              *stats_comp = 0;
 +              bnx2x_post_dmae(bp, dmae, loader_idx);
 +
 +      } else if (bp->func_stx) {
 +              *stats_comp = 0;
 +              bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
 +      }
 +}
 +
 +static int bnx2x_stats_comp(struct bnx2x *bp)
 +{
 +      u32 *stats_comp = bnx2x_sp(bp, stats_comp);
 +      int cnt = 10;
 +
 +      might_sleep();
 +      while (*stats_comp != DMAE_COMP_VAL) {
 +              if (!cnt) {
 +                      BNX2X_ERR("timeout waiting for stats finished\n");
 +                      break;
 +              }
 +              cnt--;
 +              usleep_range(1000, 1000);
 +      }
 +      return 1;
 +}
 +
 +/*
 + * Statistics service functions
 + */
 +
 +static void bnx2x_stats_pmf_update(struct bnx2x *bp)
 +{
 +      struct dmae_command *dmae;
 +      u32 opcode;
 +      int loader_idx = PMF_DMAE_C(bp);
 +      u32 *stats_comp = bnx2x_sp(bp, stats_comp);
 +
 +      /* sanity */
 +      if (!IS_MF(bp) || !bp->port.pmf || !bp->port.port_stx) {
 +              BNX2X_ERR("BUG!\n");
 +              return;
 +      }
 +
 +      bp->executer_idx = 0;
 +
 +      opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_GRC, DMAE_DST_PCI, false, 0);
 +
 +      dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
 +      dmae->opcode = bnx2x_dmae_opcode_add_comp(opcode, DMAE_COMP_GRC);
 +      dmae->src_addr_lo = bp->port.port_stx >> 2;
 +      dmae->src_addr_hi = 0;
 +      dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
 +      dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
 +      dmae->len = DMAE_LEN32_RD_MAX;
 +      dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
 +      dmae->comp_addr_hi = 0;
 +      dmae->comp_val = 1;
 +
 +      dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
 +      dmae->opcode = bnx2x_dmae_opcode_add_comp(opcode, DMAE_COMP_PCI);
 +      dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX;
 +      dmae->src_addr_hi = 0;
 +      dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) +
 +                                 DMAE_LEN32_RD_MAX * 4);
 +      dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) +
 +                                 DMAE_LEN32_RD_MAX * 4);
 +      dmae->len = (sizeof(struct host_port_stats) >> 2) - DMAE_LEN32_RD_MAX;
 +      dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
 +      dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
 +      dmae->comp_val = DMAE_COMP_VAL;
 +
 +      *stats_comp = 0;
 +      bnx2x_hw_stats_post(bp);
 +      bnx2x_stats_comp(bp);
 +}
 +
 +static void bnx2x_port_stats_init(struct bnx2x *bp)
 +{
 +      struct dmae_command *dmae;
 +      int port = BP_PORT(bp);
 +      u32 opcode;
 +      int loader_idx = PMF_DMAE_C(bp);
 +      u32 mac_addr;
 +      u32 *stats_comp = bnx2x_sp(bp, stats_comp);
 +
 +      /* sanity */
 +      if (!bp->link_vars.link_up || !bp->port.pmf) {
 +              BNX2X_ERR("BUG!\n");
 +              return;
 +      }
 +
 +      bp->executer_idx = 0;
 +
 +      /* MCP */
 +      opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_PCI, DMAE_DST_GRC,
 +                                  true, DMAE_COMP_GRC);
 +
 +      if (bp->port.port_stx) {
 +
 +              dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
 +              dmae->opcode = opcode;
 +              dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
 +              dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
 +              dmae->dst_addr_lo = bp->port.port_stx >> 2;
 +              dmae->dst_addr_hi = 0;
 +              dmae->len = sizeof(struct host_port_stats) >> 2;
 +              dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
 +              dmae->comp_addr_hi = 0;
 +              dmae->comp_val = 1;
 +      }
 +
 +      if (bp->func_stx) {
 +
 +              dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
 +              dmae->opcode = opcode;
 +              dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
 +              dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
 +              dmae->dst_addr_lo = bp->func_stx >> 2;
 +              dmae->dst_addr_hi = 0;
 +              dmae->len = sizeof(struct host_func_stats) >> 2;
 +              dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
 +              dmae->comp_addr_hi = 0;
 +              dmae->comp_val = 1;
 +      }
 +
 +      /* MAC */
 +      opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_GRC, DMAE_DST_PCI,
 +                                 true, DMAE_COMP_GRC);
 +
 +      /* EMAC is special */
 +      if (bp->link_vars.mac_type == MAC_TYPE_EMAC) {
 +              mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
 +
 +              /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
 +              dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
 +              dmae->opcode = opcode;
 +              dmae->src_addr_lo = (mac_addr +
 +                                   EMAC_REG_EMAC_RX_STAT_AC) >> 2;
 +              dmae->src_addr_hi = 0;
 +              dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
 +              dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
 +              dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
 +              dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
 +              dmae->comp_addr_hi = 0;
 +              dmae->comp_val = 1;
 +
 +              /* EMAC_REG_EMAC_RX_STAT_AC_28 */
 +              dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
 +              dmae->opcode = opcode;
 +              dmae->src_addr_lo = (mac_addr +
 +                                   EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
 +              dmae->src_addr_hi = 0;
 +              dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
 +                   offsetof(struct emac_stats, rx_stat_falsecarriererrors));
 +              dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
 +                   offsetof(struct emac_stats, rx_stat_falsecarriererrors));
 +              dmae->len = 1;
 +              dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
 +              dmae->comp_addr_hi = 0;
 +              dmae->comp_val = 1;
 +
 +              /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
 +              dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
 +              dmae->opcode = opcode;
 +              dmae->src_addr_lo = (mac_addr +
 +                                   EMAC_REG_EMAC_TX_STAT_AC) >> 2;
 +              dmae->src_addr_hi = 0;
 +              dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
 +                      offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
 +              dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
 +                      offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
 +              dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
 +              dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
 +              dmae->comp_addr_hi = 0;
 +              dmae->comp_val = 1;
 +      } else {
 +              u32 tx_src_addr_lo, rx_src_addr_lo;
 +              u16 rx_len, tx_len;
 +
 +              /* configure the params according to MAC type */
 +              switch (bp->link_vars.mac_type) {
 +              case MAC_TYPE_BMAC:
 +                      mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
 +                                         NIG_REG_INGRESS_BMAC0_MEM);
 +
 +                      /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
 +                         BIGMAC_REGISTER_TX_STAT_GTBYT */
 +                      if (CHIP_IS_E1x(bp)) {
 +                              tx_src_addr_lo = (mac_addr +
 +                                      BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
 +                              tx_len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
 +                                        BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
 +                              rx_src_addr_lo = (mac_addr +
 +                                      BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
 +                              rx_len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
 +                                        BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
 +                      } else {
 +                              tx_src_addr_lo = (mac_addr +
 +                                      BIGMAC2_REGISTER_TX_STAT_GTPOK) >> 2;
 +                              tx_len = (8 + BIGMAC2_REGISTER_TX_STAT_GTBYT -
 +                                        BIGMAC2_REGISTER_TX_STAT_GTPOK) >> 2;
 +                              rx_src_addr_lo = (mac_addr +
 +                                      BIGMAC2_REGISTER_RX_STAT_GR64) >> 2;
 +                              rx_len = (8 + BIGMAC2_REGISTER_RX_STAT_GRIPJ -
 +                                        BIGMAC2_REGISTER_RX_STAT_GR64) >> 2;
 +                      }
 +                      break;
 +
 +              case MAC_TYPE_UMAC: /* handled by MSTAT */
 +              case MAC_TYPE_XMAC: /* handled by MSTAT */
 +              default:
 +                      mac_addr = port ? GRCBASE_MSTAT1 : GRCBASE_MSTAT0;
 +                      tx_src_addr_lo = (mac_addr +
 +                                        MSTAT_REG_TX_STAT_GTXPOK_LO) >> 2;
 +                      rx_src_addr_lo = (mac_addr +
 +                                        MSTAT_REG_RX_STAT_GR64_LO) >> 2;
 +                      tx_len = sizeof(bp->slowpath->
 +                                      mac_stats.mstat_stats.stats_tx) >> 2;
 +                      rx_len = sizeof(bp->slowpath->
 +                                      mac_stats.mstat_stats.stats_rx) >> 2;
 +                      break;
 +              }
 +
 +              /* TX stats */
 +              dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
 +              dmae->opcode = opcode;
 +              dmae->src_addr_lo = tx_src_addr_lo;
 +              dmae->src_addr_hi = 0;
 +              dmae->len = tx_len;
 +              dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
 +              dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
 +              dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
 +              dmae->comp_addr_hi = 0;
 +              dmae->comp_val = 1;
 +
 +              /* RX stats */
 +              dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
 +              dmae->opcode = opcode;
 +              dmae->src_addr_hi = 0;
 +              dmae->src_addr_lo = rx_src_addr_lo;
 +              dmae->dst_addr_lo =
 +                      U64_LO(bnx2x_sp_mapping(bp, mac_stats) + (tx_len << 2));
 +              dmae->dst_addr_hi =
 +                      U64_HI(bnx2x_sp_mapping(bp, mac_stats) + (tx_len << 2));
 +              dmae->len = rx_len;
 +              dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
 +              dmae->comp_addr_hi = 0;
 +              dmae->comp_val = 1;
 +      }
 +
 +      /* NIG */
 +      if (!CHIP_IS_E3(bp)) {
 +              dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
 +              dmae->opcode = opcode;
 +              dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 :
 +                                          NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2;
 +              dmae->src_addr_hi = 0;
 +              dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
 +                              offsetof(struct nig_stats, egress_mac_pkt0_lo));
 +              dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
 +                              offsetof(struct nig_stats, egress_mac_pkt0_lo));
 +              dmae->len = (2*sizeof(u32)) >> 2;
 +              dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
 +              dmae->comp_addr_hi = 0;
 +              dmae->comp_val = 1;
 +
 +              dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
 +              dmae->opcode = opcode;
 +              dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
 +                                          NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
 +              dmae->src_addr_hi = 0;
 +              dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
 +                              offsetof(struct nig_stats, egress_mac_pkt1_lo));
 +              dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
 +                              offsetof(struct nig_stats, egress_mac_pkt1_lo));
 +              dmae->len = (2*sizeof(u32)) >> 2;
 +              dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
 +              dmae->comp_addr_hi = 0;
 +              dmae->comp_val = 1;
 +      }
 +
 +      dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
 +      dmae->opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_GRC, DMAE_DST_PCI,
 +                                               true, DMAE_COMP_PCI);
 +      dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
 +                                  NIG_REG_STAT0_BRB_DISCARD) >> 2;
 +      dmae->src_addr_hi = 0;
 +      dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats));
 +      dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats));
 +      dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2;
 +
 +      dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
 +      dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
 +      dmae->comp_val = DMAE_COMP_VAL;
 +
 +      *stats_comp = 0;
 +}
 +
 +static void bnx2x_func_stats_init(struct bnx2x *bp)
 +{
 +      struct dmae_command *dmae = &bp->stats_dmae;
 +      u32 *stats_comp = bnx2x_sp(bp, stats_comp);
 +
 +      /* sanity */
 +      if (!bp->func_stx) {
 +              BNX2X_ERR("BUG!\n");
 +              return;
 +      }
 +
 +      bp->executer_idx = 0;
 +      memset(dmae, 0, sizeof(struct dmae_command));
 +
 +      dmae->opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_PCI, DMAE_DST_GRC,
 +                                       true, DMAE_COMP_PCI);
 +      dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
 +      dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
 +      dmae->dst_addr_lo = bp->func_stx >> 2;
 +      dmae->dst_addr_hi = 0;
 +      dmae->len = sizeof(struct host_func_stats) >> 2;
 +      dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
 +      dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
 +      dmae->comp_val = DMAE_COMP_VAL;
 +
 +      *stats_comp = 0;
 +}
 +
 +static void bnx2x_stats_start(struct bnx2x *bp)
 +{
 +      if (bp->port.pmf)
 +              bnx2x_port_stats_init(bp);
 +
 +      else if (bp->func_stx)
 +              bnx2x_func_stats_init(bp);
 +
 +      bnx2x_hw_stats_post(bp);
 +      bnx2x_storm_stats_post(bp);
 +}
 +
 +static void bnx2x_stats_pmf_start(struct bnx2x *bp)
 +{
 +      bnx2x_stats_comp(bp);
 +      bnx2x_stats_pmf_update(bp);
 +      bnx2x_stats_start(bp);
 +}
 +
 +static void bnx2x_stats_restart(struct bnx2x *bp)
 +{
 +      bnx2x_stats_comp(bp);
 +      bnx2x_stats_start(bp);
 +}
 +
 +static void bnx2x_bmac_stats_update(struct bnx2x *bp)
 +{
 +      struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
 +      struct bnx2x_eth_stats *estats = &bp->eth_stats;
 +      struct {
 +              u32 lo;
 +              u32 hi;
 +      } diff;
 +
 +      if (CHIP_IS_E1x(bp)) {
 +              struct bmac1_stats *new = bnx2x_sp(bp, mac_stats.bmac1_stats);
 +
 +              /* the macros below will use "bmac1_stats" type */
 +              UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
 +              UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
 +              UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
 +              UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
 +              UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
 +              UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
 +              UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
 +              UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
 +              UPDATE_STAT64(rx_stat_grxpf, rx_stat_mac_xpf);
 +
 +              UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
 +              UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
 +              UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
 +              UPDATE_STAT64(tx_stat_gt127,
 +                              tx_stat_etherstatspkts65octetsto127octets);
 +              UPDATE_STAT64(tx_stat_gt255,
 +                              tx_stat_etherstatspkts128octetsto255octets);
 +              UPDATE_STAT64(tx_stat_gt511,
 +                              tx_stat_etherstatspkts256octetsto511octets);
 +              UPDATE_STAT64(tx_stat_gt1023,
 +                              tx_stat_etherstatspkts512octetsto1023octets);
 +              UPDATE_STAT64(tx_stat_gt1518,
 +                              tx_stat_etherstatspkts1024octetsto1522octets);
 +              UPDATE_STAT64(tx_stat_gt2047, tx_stat_mac_2047);
 +              UPDATE_STAT64(tx_stat_gt4095, tx_stat_mac_4095);
 +              UPDATE_STAT64(tx_stat_gt9216, tx_stat_mac_9216);
 +              UPDATE_STAT64(tx_stat_gt16383, tx_stat_mac_16383);
 +              UPDATE_STAT64(tx_stat_gterr,
 +                              tx_stat_dot3statsinternalmactransmiterrors);
 +              UPDATE_STAT64(tx_stat_gtufl, tx_stat_mac_ufl);
 +
 +      } else {
 +              struct bmac2_stats *new = bnx2x_sp(bp, mac_stats.bmac2_stats);
 +
 +              /* the macros below will use "bmac2_stats" type */
 +              UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
 +              UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
 +              UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
 +              UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
 +              UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
 +              UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
 +              UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
 +              UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
 +              UPDATE_STAT64(rx_stat_grxpf, rx_stat_mac_xpf);
 +              UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
 +              UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
 +              UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
 +              UPDATE_STAT64(tx_stat_gt127,
 +                              tx_stat_etherstatspkts65octetsto127octets);
 +              UPDATE_STAT64(tx_stat_gt255,
 +                              tx_stat_etherstatspkts128octetsto255octets);
 +              UPDATE_STAT64(tx_stat_gt511,
 +                              tx_stat_etherstatspkts256octetsto511octets);
 +              UPDATE_STAT64(tx_stat_gt1023,
 +                              tx_stat_etherstatspkts512octetsto1023octets);
 +              UPDATE_STAT64(tx_stat_gt1518,
 +                              tx_stat_etherstatspkts1024octetsto1522octets);
 +              UPDATE_STAT64(tx_stat_gt2047, tx_stat_mac_2047);
 +              UPDATE_STAT64(tx_stat_gt4095, tx_stat_mac_4095);
 +              UPDATE_STAT64(tx_stat_gt9216, tx_stat_mac_9216);
 +              UPDATE_STAT64(tx_stat_gt16383, tx_stat_mac_16383);
 +              UPDATE_STAT64(tx_stat_gterr,
 +                              tx_stat_dot3statsinternalmactransmiterrors);
 +              UPDATE_STAT64(tx_stat_gtufl, tx_stat_mac_ufl);
 +      }
 +
 +      estats->pause_frames_received_hi =
 +                              pstats->mac_stx[1].rx_stat_mac_xpf_hi;
 +      estats->pause_frames_received_lo =
 +                              pstats->mac_stx[1].rx_stat_mac_xpf_lo;
 +
 +      estats->pause_frames_sent_hi =
 +                              pstats->mac_stx[1].tx_stat_outxoffsent_hi;
 +      estats->pause_frames_sent_lo =
 +                              pstats->mac_stx[1].tx_stat_outxoffsent_lo;
 +}
 +
 +static void bnx2x_mstat_stats_update(struct bnx2x *bp)
 +{
 +      struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
 +      struct bnx2x_eth_stats *estats = &bp->eth_stats;
 +
 +      struct mstat_stats *new = bnx2x_sp(bp, mac_stats.mstat_stats);
 +
 +      ADD_STAT64(stats_rx.rx_grerb, rx_stat_ifhcinbadoctets);
 +      ADD_STAT64(stats_rx.rx_grfcs, rx_stat_dot3statsfcserrors);
 +      ADD_STAT64(stats_rx.rx_grund, rx_stat_etherstatsundersizepkts);
 +      ADD_STAT64(stats_rx.rx_grovr, rx_stat_dot3statsframestoolong);
 +      ADD_STAT64(stats_rx.rx_grfrg, rx_stat_etherstatsfragments);
 +      ADD_STAT64(stats_rx.rx_grxcf, rx_stat_maccontrolframesreceived);
 +      ADD_STAT64(stats_rx.rx_grxpf, rx_stat_xoffstateentered);
 +      ADD_STAT64(stats_rx.rx_grxpf, rx_stat_mac_xpf);
 +      ADD_STAT64(stats_tx.tx_gtxpf, tx_stat_outxoffsent);
 +      ADD_STAT64(stats_tx.tx_gtxpf, tx_stat_flowcontroldone);
 +
 +
 +      ADD_STAT64(stats_tx.tx_gt64, tx_stat_etherstatspkts64octets);
 +      ADD_STAT64(stats_tx.tx_gt127,
 +                      tx_stat_etherstatspkts65octetsto127octets);
 +      ADD_STAT64(stats_tx.tx_gt255,
 +                      tx_stat_etherstatspkts128octetsto255octets);
 +      ADD_STAT64(stats_tx.tx_gt511,
 +                      tx_stat_etherstatspkts256octetsto511octets);
 +      ADD_STAT64(stats_tx.tx_gt1023,
 +                      tx_stat_etherstatspkts512octetsto1023octets);
 +      ADD_STAT64(stats_tx.tx_gt1518,
 +                      tx_stat_etherstatspkts1024octetsto1522octets);
 +      ADD_STAT64(stats_tx.tx_gt2047, tx_stat_mac_2047);
 +
 +      ADD_STAT64(stats_tx.tx_gt4095, tx_stat_mac_4095);
 +      ADD_STAT64(stats_tx.tx_gt9216, tx_stat_mac_9216);
 +      ADD_STAT64(stats_tx.tx_gt16383, tx_stat_mac_16383);
 +
 +      ADD_STAT64(stats_tx.tx_gterr,
 +                      tx_stat_dot3statsinternalmactransmiterrors);
 +      ADD_STAT64(stats_tx.tx_gtufl, tx_stat_mac_ufl);
 +
 +      ADD_64(estats->etherstatspkts1024octetsto1522octets_hi,
 +             new->stats_tx.tx_gt1518_hi,
 +             estats->etherstatspkts1024octetsto1522octets_lo,
 +             new->stats_tx.tx_gt1518_lo);
 +
 +      ADD_64(estats->etherstatspktsover1522octets_hi,
 +             new->stats_tx.tx_gt2047_hi,
 +             estats->etherstatspktsover1522octets_lo,
 +             new->stats_tx.tx_gt2047_lo);
 +
 +      ADD_64(estats->etherstatspktsover1522octets_hi,
 +             new->stats_tx.tx_gt4095_hi,
 +             estats->etherstatspktsover1522octets_lo,
 +             new->stats_tx.tx_gt4095_lo);
 +
 +      ADD_64(estats->etherstatspktsover1522octets_hi,
 +             new->stats_tx.tx_gt9216_hi,
 +             estats->etherstatspktsover1522octets_lo,
 +             new->stats_tx.tx_gt9216_lo);
 +
 +
 +      ADD_64(estats->etherstatspktsover1522octets_hi,
 +             new->stats_tx.tx_gt16383_hi,
 +             estats->etherstatspktsover1522octets_lo,
 +             new->stats_tx.tx_gt16383_lo);
 +
 +      estats->pause_frames_received_hi =
 +                              pstats->mac_stx[1].rx_stat_mac_xpf_hi;
 +      estats->pause_frames_received_lo =
 +                              pstats->mac_stx[1].rx_stat_mac_xpf_lo;
 +
 +      estats->pause_frames_sent_hi =
 +                              pstats->mac_stx[1].tx_stat_outxoffsent_hi;
 +      estats->pause_frames_sent_lo =
 +                              pstats->mac_stx[1].tx_stat_outxoffsent_lo;
 +}
 +
 +static void bnx2x_emac_stats_update(struct bnx2x *bp)
 +{
 +      struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats);
 +      struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
 +      struct bnx2x_eth_stats *estats = &bp->eth_stats;
 +
 +      UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets);
 +      UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets);
 +      UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors);
 +      UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors);
 +      UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors);
 +      UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors);
 +      UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts);
 +      UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong);
 +      UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments);
 +      UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers);
 +      UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived);
 +      UPDATE_EXTEND_STAT(rx_stat_xoffstateentered);
 +      UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived);
 +      UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived);
 +      UPDATE_EXTEND_STAT(tx_stat_outxonsent);
 +      UPDATE_EXTEND_STAT(tx_stat_outxoffsent);
 +      UPDATE_EXTEND_STAT(tx_stat_flowcontroldone);
 +      UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions);
 +      UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes);
 +      UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes);
 +      UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions);
 +      UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions);
 +      UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions);
 +      UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets);
 +      UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets);
 +      UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets);
 +      UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets);
 +      UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets);
 +      UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets);
 +      UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets);
 +      UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors);
 +
 +      estats->pause_frames_received_hi =
 +                      pstats->mac_stx[1].rx_stat_xonpauseframesreceived_hi;
 +      estats->pause_frames_received_lo =
 +                      pstats->mac_stx[1].rx_stat_xonpauseframesreceived_lo;
 +      ADD_64(estats->pause_frames_received_hi,
 +             pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_hi,
 +             estats->pause_frames_received_lo,
 +             pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_lo);
 +
 +      estats->pause_frames_sent_hi =
 +                      pstats->mac_stx[1].tx_stat_outxonsent_hi;
 +      estats->pause_frames_sent_lo =
 +                      pstats->mac_stx[1].tx_stat_outxonsent_lo;
 +      ADD_64(estats->pause_frames_sent_hi,
 +             pstats->mac_stx[1].tx_stat_outxoffsent_hi,
 +             estats->pause_frames_sent_lo,
 +             pstats->mac_stx[1].tx_stat_outxoffsent_lo);
 +}
 +
 +static int bnx2x_hw_stats_update(struct bnx2x *bp)
 +{
 +      struct nig_stats *new = bnx2x_sp(bp, nig_stats);
 +      struct nig_stats *old = &(bp->port.old_nig_stats);
 +      struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
 +      struct bnx2x_eth_stats *estats = &bp->eth_stats;
 +      struct {
 +              u32 lo;
 +              u32 hi;
 +      } diff;
 +
 +      switch (bp->link_vars.mac_type) {
 +      case MAC_TYPE_BMAC:
 +              bnx2x_bmac_stats_update(bp);
 +              break;
 +
 +      case MAC_TYPE_EMAC:
 +              bnx2x_emac_stats_update(bp);
 +              break;
 +
 +      case MAC_TYPE_UMAC:
 +      case MAC_TYPE_XMAC:
 +              bnx2x_mstat_stats_update(bp);
 +              break;
 +
 +      case MAC_TYPE_NONE: /* unreached */
-       int vn, vn_max = IS_MF(bp) ? E1HVN_MAX : E1VN_MAX;
++              DP(BNX2X_MSG_STATS,
++                 "stats updated by DMAE but no MAC active\n");
 +              return -1;
 +
 +      default: /* unreached */
 +              BNX2X_ERR("Unknown MAC type\n");
 +      }
 +
 +      ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
 +                    new->brb_discard - old->brb_discard);
 +      ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
 +                    new->brb_truncate - old->brb_truncate);
 +
 +      if (!CHIP_IS_E3(bp)) {
 +              UPDATE_STAT64_NIG(egress_mac_pkt0,
 +                                      etherstatspkts1024octetsto1522octets);
 +              UPDATE_STAT64_NIG(egress_mac_pkt1,
 +                                      etherstatspktsover1522octets);
 +      }
 +
 +      memcpy(old, new, sizeof(struct nig_stats));
 +
 +      memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
 +             sizeof(struct mac_stx));
 +      estats->brb_drop_hi = pstats->brb_drop_hi;
 +      estats->brb_drop_lo = pstats->brb_drop_lo;
 +
 +      pstats->host_port_stats_start = ++pstats->host_port_stats_end;
 +
 +      if (!BP_NOMCP(bp)) {
 +              u32 nig_timer_max =
 +                      SHMEM_RD(bp, port_mb[BP_PORT(bp)].stat_nig_timer);
 +              if (nig_timer_max != estats->nig_timer_max) {
 +                      estats->nig_timer_max = nig_timer_max;
 +                      BNX2X_ERR("NIG timer max (%u)\n",
 +                                estats->nig_timer_max);
 +              }
 +      }
 +
 +      return 0;
 +}
 +
 +static int bnx2x_storm_stats_update(struct bnx2x *bp)
 +{
 +      struct tstorm_per_port_stats *tport =
 +                              &bp->fw_stats_data->port.tstorm_port_statistics;
 +      struct tstorm_per_pf_stats *tfunc =
 +                              &bp->fw_stats_data->pf.tstorm_pf_statistics;
 +      struct host_func_stats *fstats = bnx2x_sp(bp, func_stats);
 +      struct bnx2x_eth_stats *estats = &bp->eth_stats;
 +      struct stats_counter *counters = &bp->fw_stats_data->storm_counters;
 +      int i;
 +      u16 cur_stats_counter;
 +
 +      /* Make sure we use the value of the counter
 +       * used for sending the last stats ramrod.
 +       */
 +      spin_lock_bh(&bp->stats_lock);
 +      cur_stats_counter = bp->stats_counter - 1;
 +      spin_unlock_bh(&bp->stats_lock);
 +
 +      /* are storm stats valid? */
 +      if (le16_to_cpu(counters->xstats_counter) != cur_stats_counter) {
 +              DP(BNX2X_MSG_STATS, "stats not updated by xstorm"
 +                 "  xstorm counter (0x%x) != stats_counter (0x%x)\n",
 +                 le16_to_cpu(counters->xstats_counter), bp->stats_counter);
 +              return -EAGAIN;
 +      }
 +
 +      if (le16_to_cpu(counters->ustats_counter) != cur_stats_counter) {
 +              DP(BNX2X_MSG_STATS, "stats not updated by ustorm"
 +                 "  ustorm counter (0x%x) != stats_counter (0x%x)\n",
 +                 le16_to_cpu(counters->ustats_counter), bp->stats_counter);
 +              return -EAGAIN;
 +      }
 +
 +      if (le16_to_cpu(counters->cstats_counter) != cur_stats_counter) {
 +              DP(BNX2X_MSG_STATS, "stats not updated by cstorm"
 +                 "  cstorm counter (0x%x) != stats_counter (0x%x)\n",
 +                 le16_to_cpu(counters->cstats_counter), bp->stats_counter);
 +              return -EAGAIN;
 +      }
 +
 +      if (le16_to_cpu(counters->tstats_counter) != cur_stats_counter) {
 +              DP(BNX2X_MSG_STATS, "stats not updated by tstorm"
 +                 "  tstorm counter (0x%x) != stats_counter (0x%x)\n",
 +                 le16_to_cpu(counters->tstats_counter), bp->stats_counter);
 +              return -EAGAIN;
 +      }
 +
 +      memcpy(&(fstats->total_bytes_received_hi),
 +             &(bnx2x_sp(bp, func_stats_base)->total_bytes_received_hi),
 +             sizeof(struct host_func_stats) - 2*sizeof(u32));
 +      estats->error_bytes_received_hi = 0;
 +      estats->error_bytes_received_lo = 0;
 +      estats->etherstatsoverrsizepkts_hi = 0;
 +      estats->etherstatsoverrsizepkts_lo = 0;
 +      estats->no_buff_discard_hi = 0;
 +      estats->no_buff_discard_lo = 0;
 +      estats->total_tpa_aggregations_hi = 0;
 +      estats->total_tpa_aggregations_lo = 0;
 +      estats->total_tpa_aggregated_frames_hi = 0;
 +      estats->total_tpa_aggregated_frames_lo = 0;
 +      estats->total_tpa_bytes_hi = 0;
 +      estats->total_tpa_bytes_lo = 0;
 +
 +      for_each_eth_queue(bp, i) {
 +              struct bnx2x_fastpath *fp = &bp->fp[i];
 +              struct tstorm_per_queue_stats *tclient =
 +                      &bp->fw_stats_data->queue_stats[i].
 +                      tstorm_queue_statistics;
 +              struct tstorm_per_queue_stats *old_tclient = &fp->old_tclient;
 +              struct ustorm_per_queue_stats *uclient =
 +                      &bp->fw_stats_data->queue_stats[i].
 +                      ustorm_queue_statistics;
 +              struct ustorm_per_queue_stats *old_uclient = &fp->old_uclient;
 +              struct xstorm_per_queue_stats *xclient =
 +                      &bp->fw_stats_data->queue_stats[i].
 +                      xstorm_queue_statistics;
 +              struct xstorm_per_queue_stats *old_xclient = &fp->old_xclient;
 +              struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
 +              u32 diff;
 +
 +              DP(BNX2X_MSG_STATS, "queue[%d]: ucast_sent 0x%x, "
 +                                  "bcast_sent 0x%x mcast_sent 0x%x\n",
 +                 i, xclient->ucast_pkts_sent,
 +                 xclient->bcast_pkts_sent, xclient->mcast_pkts_sent);
 +
 +              DP(BNX2X_MSG_STATS, "---------------\n");
 +
 +              qstats->total_broadcast_bytes_received_hi =
 +                      le32_to_cpu(tclient->rcv_bcast_bytes.hi);
 +              qstats->total_broadcast_bytes_received_lo =
 +                      le32_to_cpu(tclient->rcv_bcast_bytes.lo);
 +
 +              qstats->total_multicast_bytes_received_hi =
 +                      le32_to_cpu(tclient->rcv_mcast_bytes.hi);
 +              qstats->total_multicast_bytes_received_lo =
 +                      le32_to_cpu(tclient->rcv_mcast_bytes.lo);
 +
 +              qstats->total_unicast_bytes_received_hi =
 +                      le32_to_cpu(tclient->rcv_ucast_bytes.hi);
 +              qstats->total_unicast_bytes_received_lo =
 +                      le32_to_cpu(tclient->rcv_ucast_bytes.lo);
 +
 +              /*
 +               * sum to total_bytes_received all
 +               * unicast/multicast/broadcast
 +               */
 +              qstats->total_bytes_received_hi =
 +                      qstats->total_broadcast_bytes_received_hi;
 +              qstats->total_bytes_received_lo =
 +                      qstats->total_broadcast_bytes_received_lo;
 +
 +              ADD_64(qstats->total_bytes_received_hi,
 +                     qstats->total_multicast_bytes_received_hi,
 +                     qstats->total_bytes_received_lo,
 +                     qstats->total_multicast_bytes_received_lo);
 +
 +              ADD_64(qstats->total_bytes_received_hi,
 +                     qstats->total_unicast_bytes_received_hi,
 +                     qstats->total_bytes_received_lo,
 +                     qstats->total_unicast_bytes_received_lo);
 +
 +              qstats->valid_bytes_received_hi =
 +                                      qstats->total_bytes_received_hi;
 +              qstats->valid_bytes_received_lo =
 +                                      qstats->total_bytes_received_lo;
 +
 +
 +              UPDATE_EXTEND_TSTAT(rcv_ucast_pkts,
 +                                      total_unicast_packets_received);
 +              UPDATE_EXTEND_TSTAT(rcv_mcast_pkts,
 +                                      total_multicast_packets_received);
 +              UPDATE_EXTEND_TSTAT(rcv_bcast_pkts,
 +                                      total_broadcast_packets_received);
 +              UPDATE_EXTEND_TSTAT(pkts_too_big_discard,
 +                                      etherstatsoverrsizepkts);
 +              UPDATE_EXTEND_TSTAT(no_buff_discard, no_buff_discard);
 +
 +              SUB_EXTEND_USTAT(ucast_no_buff_pkts,
 +                                      total_unicast_packets_received);
 +              SUB_EXTEND_USTAT(mcast_no_buff_pkts,
 +                                      total_multicast_packets_received);
 +              SUB_EXTEND_USTAT(bcast_no_buff_pkts,
 +                                      total_broadcast_packets_received);
 +              UPDATE_EXTEND_USTAT(ucast_no_buff_pkts, no_buff_discard);
 +              UPDATE_EXTEND_USTAT(mcast_no_buff_pkts, no_buff_discard);
 +              UPDATE_EXTEND_USTAT(bcast_no_buff_pkts, no_buff_discard);
 +
 +              qstats->total_broadcast_bytes_transmitted_hi =
 +                      le32_to_cpu(xclient->bcast_bytes_sent.hi);
 +              qstats->total_broadcast_bytes_transmitted_lo =
 +                      le32_to_cpu(xclient->bcast_bytes_sent.lo);
 +
 +              qstats->total_multicast_bytes_transmitted_hi =
 +                      le32_to_cpu(xclient->mcast_bytes_sent.hi);
 +              qstats->total_multicast_bytes_transmitted_lo =
 +                      le32_to_cpu(xclient->mcast_bytes_sent.lo);
 +
 +              qstats->total_unicast_bytes_transmitted_hi =
 +                      le32_to_cpu(xclient->ucast_bytes_sent.hi);
 +              qstats->total_unicast_bytes_transmitted_lo =
 +                      le32_to_cpu(xclient->ucast_bytes_sent.lo);
 +              /*
 +               * sum to total_bytes_transmitted all
 +               * unicast/multicast/broadcast
 +               */
 +              qstats->total_bytes_transmitted_hi =
 +                              qstats->total_unicast_bytes_transmitted_hi;
 +              qstats->total_bytes_transmitted_lo =
 +                              qstats->total_unicast_bytes_transmitted_lo;
 +
 +              ADD_64(qstats->total_bytes_transmitted_hi,
 +                     qstats->total_broadcast_bytes_transmitted_hi,
 +                     qstats->total_bytes_transmitted_lo,
 +                     qstats->total_broadcast_bytes_transmitted_lo);
 +
 +              ADD_64(qstats->total_bytes_transmitted_hi,
 +                     qstats->total_multicast_bytes_transmitted_hi,
 +                     qstats->total_bytes_transmitted_lo,
 +                     qstats->total_multicast_bytes_transmitted_lo);
 +
 +              UPDATE_EXTEND_XSTAT(ucast_pkts_sent,
 +                                      total_unicast_packets_transmitted);
 +              UPDATE_EXTEND_XSTAT(mcast_pkts_sent,
 +                                      total_multicast_packets_transmitted);
 +              UPDATE_EXTEND_XSTAT(bcast_pkts_sent,
 +                                      total_broadcast_packets_transmitted);
 +
 +              UPDATE_EXTEND_TSTAT(checksum_discard,
 +                                  total_packets_received_checksum_discarded);
 +              UPDATE_EXTEND_TSTAT(ttl0_discard,
 +                                  total_packets_received_ttl0_discarded);
 +
 +              UPDATE_EXTEND_XSTAT(error_drop_pkts,
 +                                  total_transmitted_dropped_packets_error);
 +
 +              /* TPA aggregations completed */
 +              UPDATE_EXTEND_USTAT(coalesced_events, total_tpa_aggregations);
 +              /* Number of network frames aggregated by TPA */
 +              UPDATE_EXTEND_USTAT(coalesced_pkts,
 +                                  total_tpa_aggregated_frames);
 +              /* Total number of bytes in completed TPA aggregations */
 +              qstats->total_tpa_bytes_lo =
 +                      le32_to_cpu(uclient->coalesced_bytes.lo);
 +              qstats->total_tpa_bytes_hi =
 +                      le32_to_cpu(uclient->coalesced_bytes.hi);
 +
 +              /* TPA stats per-function */
 +              ADD_64(estats->total_tpa_aggregations_hi,
 +                     qstats->total_tpa_aggregations_hi,
 +                     estats->total_tpa_aggregations_lo,
 +                     qstats->total_tpa_aggregations_lo);
 +              ADD_64(estats->total_tpa_aggregated_frames_hi,
 +                     qstats->total_tpa_aggregated_frames_hi,
 +                     estats->total_tpa_aggregated_frames_lo,
 +                     qstats->total_tpa_aggregated_frames_lo);
 +              ADD_64(estats->total_tpa_bytes_hi,
 +                     qstats->total_tpa_bytes_hi,
 +                     estats->total_tpa_bytes_lo,
 +                     qstats->total_tpa_bytes_lo);
 +
 +              ADD_64(fstats->total_bytes_received_hi,
 +                     qstats->total_bytes_received_hi,
 +                     fstats->total_bytes_received_lo,
 +                     qstats->total_bytes_received_lo);
 +              ADD_64(fstats->total_bytes_transmitted_hi,
 +                     qstats->total_bytes_transmitted_hi,
 +                     fstats->total_bytes_transmitted_lo,
 +                     qstats->total_bytes_transmitted_lo);
 +              ADD_64(fstats->total_unicast_packets_received_hi,
 +                     qstats->total_unicast_packets_received_hi,
 +                     fstats->total_unicast_packets_received_lo,
 +                     qstats->total_unicast_packets_received_lo);
 +              ADD_64(fstats->total_multicast_packets_received_hi,
 +                     qstats->total_multicast_packets_received_hi,
 +                     fstats->total_multicast_packets_received_lo,
 +                     qstats->total_multicast_packets_received_lo);
 +              ADD_64(fstats->total_broadcast_packets_received_hi,
 +                     qstats->total_broadcast_packets_received_hi,
 +                     fstats->total_broadcast_packets_received_lo,
 +                     qstats->total_broadcast_packets_received_lo);
 +              ADD_64(fstats->total_unicast_packets_transmitted_hi,
 +                     qstats->total_unicast_packets_transmitted_hi,
 +                     fstats->total_unicast_packets_transmitted_lo,
 +                     qstats->total_unicast_packets_transmitted_lo);
 +              ADD_64(fstats->total_multicast_packets_transmitted_hi,
 +                     qstats->total_multicast_packets_transmitted_hi,
 +                     fstats->total_multicast_packets_transmitted_lo,
 +                     qstats->total_multicast_packets_transmitted_lo);
 +              ADD_64(fstats->total_broadcast_packets_transmitted_hi,
 +                     qstats->total_broadcast_packets_transmitted_hi,
 +                     fstats->total_broadcast_packets_transmitted_lo,
 +                     qstats->total_broadcast_packets_transmitted_lo);
 +              ADD_64(fstats->valid_bytes_received_hi,
 +                     qstats->valid_bytes_received_hi,
 +                     fstats->valid_bytes_received_lo,
 +                     qstats->valid_bytes_received_lo);
 +
 +              ADD_64(estats->etherstatsoverrsizepkts_hi,
 +                     qstats->etherstatsoverrsizepkts_hi,
 +                     estats->etherstatsoverrsizepkts_lo,
 +                     qstats->etherstatsoverrsizepkts_lo);
 +              ADD_64(estats->no_buff_discard_hi, qstats->no_buff_discard_hi,
 +                     estats->no_buff_discard_lo, qstats->no_buff_discard_lo);
 +      }
 +
 +      ADD_64(fstats->total_bytes_received_hi,
 +             estats->rx_stat_ifhcinbadoctets_hi,
 +             fstats->total_bytes_received_lo,
 +             estats->rx_stat_ifhcinbadoctets_lo);
 +
 +      ADD_64(fstats->total_bytes_received_hi,
 +             tfunc->rcv_error_bytes.hi,
 +             fstats->total_bytes_received_lo,
 +             tfunc->rcv_error_bytes.lo);
 +
 +      memcpy(estats, &(fstats->total_bytes_received_hi),
 +             sizeof(struct host_func_stats) - 2*sizeof(u32));
 +
 +      ADD_64(estats->error_bytes_received_hi,
 +             tfunc->rcv_error_bytes.hi,
 +             estats->error_bytes_received_lo,
 +             tfunc->rcv_error_bytes.lo);
 +
 +      ADD_64(estats->etherstatsoverrsizepkts_hi,
 +             estats->rx_stat_dot3statsframestoolong_hi,
 +             estats->etherstatsoverrsizepkts_lo,
 +             estats->rx_stat_dot3statsframestoolong_lo);
 +      ADD_64(estats->error_bytes_received_hi,
 +             estats->rx_stat_ifhcinbadoctets_hi,
 +             estats->error_bytes_received_lo,
 +             estats->rx_stat_ifhcinbadoctets_lo);
 +
 +      if (bp->port.pmf) {
 +              estats->mac_filter_discard =
 +                              le32_to_cpu(tport->mac_filter_discard);
 +              estats->mf_tag_discard =
 +                              le32_to_cpu(tport->mf_tag_discard);
 +              estats->brb_truncate_discard =
 +                              le32_to_cpu(tport->brb_truncate_discard);
 +              estats->mac_discard = le32_to_cpu(tport->mac_discard);
 +      }
 +
 +      fstats->host_func_stats_start = ++fstats->host_func_stats_end;
 +
 +      bp->stats_pending = 0;
 +
 +      return 0;
 +}
 +
 +static void bnx2x_net_stats_update(struct bnx2x *bp)
 +{
 +      struct bnx2x_eth_stats *estats = &bp->eth_stats;
 +      struct net_device_stats *nstats = &bp->dev->stats;
 +      unsigned long tmp;
 +      int i;
 +
 +      nstats->rx_packets =
 +              bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
 +              bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
 +              bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
 +
 +      nstats->tx_packets =
 +              bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
 +              bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
 +              bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
 +
 +      nstats->rx_bytes = bnx2x_hilo(&estats->total_bytes_received_hi);
 +
 +      nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
 +
 +      tmp = estats->mac_discard;
 +      for_each_rx_queue(bp, i)
 +              tmp += le32_to_cpu(bp->fp[i].old_tclient.checksum_discard);
 +      nstats->rx_dropped = tmp;
 +
 +      nstats->tx_dropped = 0;
 +
 +      nstats->multicast =
 +              bnx2x_hilo(&estats->total_multicast_packets_received_hi);
 +
 +      nstats->collisions =
 +              bnx2x_hilo(&estats->tx_stat_etherstatscollisions_hi);
 +
 +      nstats->rx_length_errors =
 +              bnx2x_hilo(&estats->rx_stat_etherstatsundersizepkts_hi) +
 +              bnx2x_hilo(&estats->etherstatsoverrsizepkts_hi);
 +      nstats->rx_over_errors = bnx2x_hilo(&estats->brb_drop_hi) +
 +                               bnx2x_hilo(&estats->brb_truncate_hi);
 +      nstats->rx_crc_errors =
 +              bnx2x_hilo(&estats->rx_stat_dot3statsfcserrors_hi);
 +      nstats->rx_frame_errors =
 +              bnx2x_hilo(&estats->rx_stat_dot3statsalignmenterrors_hi);
 +      nstats->rx_fifo_errors = bnx2x_hilo(&estats->no_buff_discard_hi);
 +      nstats->rx_missed_errors = 0;
 +
 +      nstats->rx_errors = nstats->rx_length_errors +
 +                          nstats->rx_over_errors +
 +                          nstats->rx_crc_errors +
 +                          nstats->rx_frame_errors +
 +                          nstats->rx_fifo_errors +
 +                          nstats->rx_missed_errors;
 +
 +      nstats->tx_aborted_errors =
 +              bnx2x_hilo(&estats->tx_stat_dot3statslatecollisions_hi) +
 +              bnx2x_hilo(&estats->tx_stat_dot3statsexcessivecollisions_hi);
 +      nstats->tx_carrier_errors =
 +              bnx2x_hilo(&estats->rx_stat_dot3statscarriersenseerrors_hi);
 +      nstats->tx_fifo_errors = 0;
 +      nstats->tx_heartbeat_errors = 0;
 +      nstats->tx_window_errors = 0;
 +
 +      nstats->tx_errors = nstats->tx_aborted_errors +
 +                          nstats->tx_carrier_errors +
 +          bnx2x_hilo(&estats->tx_stat_dot3statsinternalmactransmiterrors_hi);
 +}
 +
 +static void bnx2x_drv_stats_update(struct bnx2x *bp)
 +{
 +      struct bnx2x_eth_stats *estats = &bp->eth_stats;
 +      int i;
 +
 +      estats->driver_xoff = 0;
 +      estats->rx_err_discard_pkt = 0;
 +      estats->rx_skb_alloc_failed = 0;
 +      estats->hw_csum_err = 0;
 +      for_each_queue(bp, i) {
 +              struct bnx2x_eth_q_stats *qstats = &bp->fp[i].eth_q_stats;
 +
 +              estats->driver_xoff += qstats->driver_xoff;
 +              estats->rx_err_discard_pkt += qstats->rx_err_discard_pkt;
 +              estats->rx_skb_alloc_failed += qstats->rx_skb_alloc_failed;
 +              estats->hw_csum_err += qstats->hw_csum_err;
 +      }
 +}
 +
 +static bool bnx2x_edebug_stats_stopped(struct bnx2x *bp)
 +{
 +      u32 val;
 +
 +      if (SHMEM2_HAS(bp, edebug_driver_if[1])) {
 +              val = SHMEM2_RD(bp, edebug_driver_if[1]);
 +
 +              if (val == EDEBUG_DRIVER_IF_OP_CODE_DISABLE_STAT)
 +                      return true;
 +      }
 +
 +      return false;
 +}
 +
 +static void bnx2x_stats_update(struct bnx2x *bp)
 +{
 +      u32 *stats_comp = bnx2x_sp(bp, stats_comp);
 +
 +      if (bnx2x_edebug_stats_stopped(bp))
 +              return;
 +
 +      if (*stats_comp != DMAE_COMP_VAL)
 +              return;
 +
 +      if (bp->port.pmf)
 +              bnx2x_hw_stats_update(bp);
 +
 +      if (bnx2x_storm_stats_update(bp) && (bp->stats_pending++ == 3)) {
 +              BNX2X_ERR("storm stats were not updated for 3 times\n");
 +              bnx2x_panic();
 +              return;
 +      }
 +
 +      bnx2x_net_stats_update(bp);
 +      bnx2x_drv_stats_update(bp);
 +
 +      if (netif_msg_timer(bp)) {
 +              struct bnx2x_eth_stats *estats = &bp->eth_stats;
 +              int i, cos;
 +
 +              netdev_dbg(bp->dev, "brb drops %u  brb truncate %u\n",
 +                     estats->brb_drop_lo, estats->brb_truncate_lo);
 +
 +              for_each_eth_queue(bp, i) {
 +                      struct bnx2x_fastpath *fp = &bp->fp[i];
 +                      struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
 +
 +                      pr_debug("%s: rx usage(%4u)  *rx_cons_sb(%u)  rx pkt(%lu)  rx calls(%lu %lu)\n",
 +                               fp->name, (le16_to_cpu(*fp->rx_cons_sb) -
 +                                          fp->rx_comp_cons),
 +                               le16_to_cpu(*fp->rx_cons_sb),
 +                               bnx2x_hilo(&qstats->
 +                                          total_unicast_packets_received_hi),
 +                               fp->rx_calls, fp->rx_pkt);
 +              }
 +
 +              for_each_eth_queue(bp, i) {
 +                      struct bnx2x_fastpath *fp = &bp->fp[i];
 +                      struct bnx2x_fp_txdata *txdata;
 +                      struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
 +                      struct netdev_queue *txq;
 +
 +                      pr_debug("%s: tx pkt(%lu) (Xoff events %u)",
 +                               fp->name,
 +                               bnx2x_hilo(
 +                                       &qstats->total_unicast_packets_transmitted_hi),
 +                               qstats->driver_xoff);
 +
 +                      for_each_cos_in_tx_queue(fp, cos) {
 +                              txdata = &fp->txdata[cos];
 +                              txq = netdev_get_tx_queue(bp->dev,
 +                                              FP_COS_TO_TXQ(fp, cos));
 +
 +                              pr_debug("%d: tx avail(%4u)  *tx_cons_sb(%u)  tx calls (%lu)  %s\n",
 +                                       cos,
 +                                       bnx2x_tx_avail(bp, txdata),
 +                                       le16_to_cpu(*txdata->tx_cons_sb),
 +                                       txdata->tx_pkt,
 +                                       (netif_tx_queue_stopped(txq) ?
 +                                        "Xoff" : "Xon")
 +                                      );
 +                      }
 +              }
 +      }
 +
 +      bnx2x_hw_stats_post(bp);
 +      bnx2x_storm_stats_post(bp);
 +}
 +
 +static void bnx2x_port_stats_stop(struct bnx2x *bp)
 +{
 +      struct dmae_command *dmae;
 +      u32 opcode;
 +      int loader_idx = PMF_DMAE_C(bp);
 +      u32 *stats_comp = bnx2x_sp(bp, stats_comp);
 +
 +      bp->executer_idx = 0;
 +
 +      opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_PCI, DMAE_DST_GRC, false, 0);
 +
 +      if (bp->port.port_stx) {
 +
 +              dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
 +              if (bp->func_stx)
 +                      dmae->opcode = bnx2x_dmae_opcode_add_comp(
 +                                              opcode, DMAE_COMP_GRC);
 +              else
 +                      dmae->opcode = bnx2x_dmae_opcode_add_comp(
 +                                              opcode, DMAE_COMP_PCI);
 +
 +              dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
 +              dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
 +              dmae->dst_addr_lo = bp->port.port_stx >> 2;
 +              dmae->dst_addr_hi = 0;
 +              dmae->len = sizeof(struct host_port_stats) >> 2;
 +              if (bp->func_stx) {
 +                      dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
 +                      dmae->comp_addr_hi = 0;
 +                      dmae->comp_val = 1;
 +              } else {
 +                      dmae->comp_addr_lo =
 +                              U64_LO(bnx2x_sp_mapping(bp, stats_comp));
 +                      dmae->comp_addr_hi =
 +                              U64_HI(bnx2x_sp_mapping(bp, stats_comp));
 +                      dmae->comp_val = DMAE_COMP_VAL;
 +
 +                      *stats_comp = 0;
 +              }
 +      }
 +
 +      if (bp->func_stx) {
 +
 +              dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
 +              dmae->opcode =
 +                      bnx2x_dmae_opcode_add_comp(opcode, DMAE_COMP_PCI);
 +              dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
 +              dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
 +              dmae->dst_addr_lo = bp->func_stx >> 2;
 +              dmae->dst_addr_hi = 0;
 +              dmae->len = sizeof(struct host_func_stats) >> 2;
 +              dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
 +              dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
 +              dmae->comp_val = DMAE_COMP_VAL;
 +
 +              *stats_comp = 0;
 +      }
 +}
 +
 +static void bnx2x_stats_stop(struct bnx2x *bp)
 +{
 +      int update = 0;
 +
 +      bnx2x_stats_comp(bp);
 +
 +      if (bp->port.pmf)
 +              update = (bnx2x_hw_stats_update(bp) == 0);
 +
 +      update |= (bnx2x_storm_stats_update(bp) == 0);
 +
 +      if (update) {
 +              bnx2x_net_stats_update(bp);
 +
 +              if (bp->port.pmf)
 +                      bnx2x_port_stats_stop(bp);
 +
 +              bnx2x_hw_stats_post(bp);
 +              bnx2x_stats_comp(bp);
 +      }
 +}
 +
 +static void bnx2x_stats_do_nothing(struct bnx2x *bp)
 +{
 +}
 +
 +static const struct {
 +      void (*action)(struct bnx2x *bp);
 +      enum bnx2x_stats_state next_state;
 +} bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = {
 +/* state      event   */
 +{
 +/* DISABLED   PMF     */ {bnx2x_stats_pmf_update, STATS_STATE_DISABLED},
 +/*            LINK_UP */ {bnx2x_stats_start,      STATS_STATE_ENABLED},
 +/*            UPDATE  */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED},
 +/*            STOP    */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED}
 +},
 +{
 +/* ENABLED    PMF     */ {bnx2x_stats_pmf_start,  STATS_STATE_ENABLED},
 +/*            LINK_UP */ {bnx2x_stats_restart,    STATS_STATE_ENABLED},
 +/*            UPDATE  */ {bnx2x_stats_update,     STATS_STATE_ENABLED},
 +/*            STOP    */ {bnx2x_stats_stop,       STATS_STATE_DISABLED}
 +}
 +};
 +
 +void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
 +{
 +      enum bnx2x_stats_state state;
 +      if (unlikely(bp->panic))
 +              return;
 +      bnx2x_stats_stm[bp->stats_state][event].action(bp);
 +      spin_lock_bh(&bp->stats_lock);
 +      state = bp->stats_state;
 +      bp->stats_state = bnx2x_stats_stm[state][event].next_state;
 +      spin_unlock_bh(&bp->stats_lock);
 +
 +      if ((event != STATS_EVENT_UPDATE) || netif_msg_timer(bp))
 +              DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
 +                 state, event, bp->stats_state);
 +}
 +
 +static void bnx2x_port_stats_base_init(struct bnx2x *bp)
 +{
 +      struct dmae_command *dmae;
 +      u32 *stats_comp = bnx2x_sp(bp, stats_comp);
 +
 +      /* sanity */
 +      if (!bp->port.pmf || !bp->port.port_stx) {
 +              BNX2X_ERR("BUG!\n");
 +              return;
 +      }
 +
 +      bp->executer_idx = 0;
 +
 +      dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
 +      dmae->opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_PCI, DMAE_DST_GRC,
 +                                       true, DMAE_COMP_PCI);
 +      dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
 +      dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
 +      dmae->dst_addr_lo = bp->port.port_stx >> 2;
 +      dmae->dst_addr_hi = 0;
 +      dmae->len = sizeof(struct host_port_stats) >> 2;
 +      dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
 +      dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
 +      dmae->comp_val = DMAE_COMP_VAL;
 +
 +      *stats_comp = 0;
 +      bnx2x_hw_stats_post(bp);
 +      bnx2x_stats_comp(bp);
 +}
 +
 +static void bnx2x_func_stats_base_init(struct bnx2x *bp)
 +{
-               int mb_idx = CHIP_IS_E1x(bp) ? 2*vn + BP_PORT(bp) : vn;
++      int vn, vn_max = IS_MF(bp) ? BP_MAX_VN_NUM(bp) : E1VN_MAX;
 +      u32 func_stx;
 +
 +      /* sanity */
 +      if (!bp->port.pmf || !bp->func_stx) {
 +              BNX2X_ERR("BUG!\n");
 +              return;
 +      }
 +
 +      /* save our func_stx */
 +      func_stx = bp->func_stx;
 +
 +      for (vn = VN_0; vn < vn_max; vn++) {
++              int mb_idx = BP_FW_MB_IDX_VN(bp, vn);
 +
 +              bp->func_stx = SHMEM_RD(bp, func_mb[mb_idx].fw_mb_param);
 +              bnx2x_func_stats_init(bp);
 +              bnx2x_hw_stats_post(bp);
 +              bnx2x_stats_comp(bp);
 +      }
 +
 +      /* restore our func_stx */
 +      bp->func_stx = func_stx;
 +}
 +
 +static void bnx2x_func_stats_base_update(struct bnx2x *bp)
 +{
 +      struct dmae_command *dmae = &bp->stats_dmae;
 +      u32 *stats_comp = bnx2x_sp(bp, stats_comp);
 +
 +      /* sanity */
 +      if (!bp->func_stx) {
 +              BNX2X_ERR("BUG!\n");
 +              return;
 +      }
 +
 +      bp->executer_idx = 0;
 +      memset(dmae, 0, sizeof(struct dmae_command));
 +
 +      dmae->opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_GRC, DMAE_DST_PCI,
 +                                       true, DMAE_COMP_PCI);
 +      dmae->src_addr_lo = bp->func_stx >> 2;
 +      dmae->src_addr_hi = 0;
 +      dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats_base));
 +      dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats_base));
 +      dmae->len = sizeof(struct host_func_stats) >> 2;
 +      dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
 +      dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
 +      dmae->comp_val = DMAE_COMP_VAL;
 +
 +      *stats_comp = 0;
 +      bnx2x_hw_stats_post(bp);
 +      bnx2x_stats_comp(bp);
 +}
 +
 +/**
 + * This function will prepare the statistics ramrod data the way
 + * we will only have to increment the statistics counter and
 + * send the ramrod each time we have to.
 + *
 + * @param bp
 + */
 +static inline void bnx2x_prep_fw_stats_req(struct bnx2x *bp)
 +{
 +      int i;
 +      struct stats_query_header *stats_hdr = &bp->fw_stats_req->hdr;
 +
 +      dma_addr_t cur_data_offset;
 +      struct stats_query_entry *cur_query_entry;
 +
 +      stats_hdr->cmd_num = bp->fw_stats_num;
 +      stats_hdr->drv_stats_counter = 0;
 +
 +      /* storm_counters struct contains the counters of completed
 +       * statistics requests per storm which are incremented by FW
 +       * each time it completes hadning a statistics ramrod. We will
 +       * check these counters in the timer handler and discard a
 +       * (statistics) ramrod completion.
 +       */
 +      cur_data_offset = bp->fw_stats_data_mapping +
 +              offsetof(struct bnx2x_fw_stats_data, storm_counters);
 +
 +      stats_hdr->stats_counters_addrs.hi =
 +              cpu_to_le32(U64_HI(cur_data_offset));
 +      stats_hdr->stats_counters_addrs.lo =
 +              cpu_to_le32(U64_LO(cur_data_offset));
 +
 +      /* prepare to the first stats ramrod (will be completed with
 +       * the counters equal to zero) - init counters to somethig different.
 +       */
 +      memset(&bp->fw_stats_data->storm_counters, 0xff,
 +             sizeof(struct stats_counter));
 +
 +      /**** Port FW statistics data ****/
 +      cur_data_offset = bp->fw_stats_data_mapping +
 +              offsetof(struct bnx2x_fw_stats_data, port);
 +
 +      cur_query_entry = &bp->fw_stats_req->query[BNX2X_PORT_QUERY_IDX];
 +
 +      cur_query_entry->kind = STATS_TYPE_PORT;
 +      /* For port query index is a DONT CARE */
 +      cur_query_entry->index = BP_PORT(bp);
 +      /* For port query funcID is a DONT CARE */
 +      cur_query_entry->funcID = cpu_to_le16(BP_FUNC(bp));
 +      cur_query_entry->address.hi = cpu_to_le32(U64_HI(cur_data_offset));
 +      cur_query_entry->address.lo = cpu_to_le32(U64_LO(cur_data_offset));
 +
 +      /**** PF FW statistics data ****/
 +      cur_data_offset = bp->fw_stats_data_mapping +
 +              offsetof(struct bnx2x_fw_stats_data, pf);
 +
 +      cur_query_entry = &bp->fw_stats_req->query[BNX2X_PF_QUERY_IDX];
 +
 +      cur_query_entry->kind = STATS_TYPE_PF;
 +      /* For PF query index is a DONT CARE */
 +      cur_query_entry->index = BP_PORT(bp);
 +      cur_query_entry->funcID = cpu_to_le16(BP_FUNC(bp));
 +      cur_query_entry->address.hi = cpu_to_le32(U64_HI(cur_data_offset));
 +      cur_query_entry->address.lo = cpu_to_le32(U64_LO(cur_data_offset));
 +
 +      /**** Clients' queries ****/
 +      cur_data_offset = bp->fw_stats_data_mapping +
 +              offsetof(struct bnx2x_fw_stats_data, queue_stats);
 +
 +      for_each_eth_queue(bp, i) {
 +              cur_query_entry =
 +                      &bp->fw_stats_req->
 +                                      query[BNX2X_FIRST_QUEUE_QUERY_IDX + i];
 +
 +              cur_query_entry->kind = STATS_TYPE_QUEUE;
 +              cur_query_entry->index = bnx2x_stats_id(&bp->fp[i]);
 +              cur_query_entry->funcID = cpu_to_le16(BP_FUNC(bp));
 +              cur_query_entry->address.hi =
 +                      cpu_to_le32(U64_HI(cur_data_offset));
 +              cur_query_entry->address.lo =
 +                      cpu_to_le32(U64_LO(cur_data_offset));
 +
 +              cur_data_offset += sizeof(struct per_queue_stats);
 +      }
 +}
 +
 +void bnx2x_stats_init(struct bnx2x *bp)
 +{
 +      int /*abs*/port = BP_PORT(bp);
 +      int mb_idx = BP_FW_MB_IDX(bp);
 +      int i;
 +
 +      bp->stats_pending = 0;
 +      bp->executer_idx = 0;
 +      bp->stats_counter = 0;
 +
 +      /* port and func stats for management */
 +      if (!BP_NOMCP(bp)) {
 +              bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx);
 +              bp->func_stx = SHMEM_RD(bp, func_mb[mb_idx].fw_mb_param);
 +
 +      } else {
 +              bp->port.port_stx = 0;
 +              bp->func_stx = 0;
 +      }
 +      DP(BNX2X_MSG_STATS, "port_stx 0x%x  func_stx 0x%x\n",
 +         bp->port.port_stx, bp->func_stx);
 +
 +      port = BP_PORT(bp);
 +      /* port stats */
 +      memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats));
 +      bp->port.old_nig_stats.brb_discard =
 +                      REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
 +      bp->port.old_nig_stats.brb_truncate =
 +                      REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
 +      if (!CHIP_IS_E3(bp)) {
 +              REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
 +                          &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2);
 +              REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
 +                          &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2);
 +      }
 +
 +      /* function stats */
 +      for_each_queue(bp, i) {
 +              struct bnx2x_fastpath *fp = &bp->fp[i];
 +
 +              memset(&fp->old_tclient, 0, sizeof(fp->old_tclient));
 +              memset(&fp->old_uclient, 0, sizeof(fp->old_uclient));
 +              memset(&fp->old_xclient, 0, sizeof(fp->old_xclient));
 +              memset(&fp->eth_q_stats, 0, sizeof(fp->eth_q_stats));
 +      }
 +
 +      /* Prepare statistics ramrod data */
 +      bnx2x_prep_fw_stats_req(bp);
 +
 +      memset(&bp->dev->stats, 0, sizeof(bp->dev->stats));
 +      memset(&bp->eth_stats, 0, sizeof(bp->eth_stats));
 +
 +      bp->stats_state = STATS_STATE_DISABLED;
 +
 +      if (bp->port.pmf) {
 +              if (bp->port.port_stx)
 +                      bnx2x_port_stats_base_init(bp);
 +
 +              if (bp->func_stx)
 +                      bnx2x_func_stats_base_init(bp);
 +
 +      } else if (bp->func_stx)
 +              bnx2x_func_stats_base_update(bp);
 +}
index 1485013,0000000..26c6bd4
mode 100644,000000..100644
--- /dev/null
@@@ -1,15955 -1,0 +1,15953 @@@
- #ifdef BCM_KERNEL_SUPPORTS_8021Q
 +/*
 + * tg3.c: Broadcom Tigon3 ethernet driver.
 + *
 + * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
 + * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
 + * Copyright (C) 2004 Sun Microsystems Inc.
 + * Copyright (C) 2005-2011 Broadcom Corporation.
 + *
 + * Firmware is:
 + *    Derived from proprietary unpublished source code,
 + *    Copyright (C) 2000-2003 Broadcom Corporation.
 + *
 + *    Permission is hereby granted for the distribution of this firmware
 + *    data in hexadecimal or equivalent format, provided this copyright
 + *    notice is accompanying it.
 + */
 +
 +
 +#include <linux/module.h>
 +#include <linux/moduleparam.h>
 +#include <linux/stringify.h>
 +#include <linux/kernel.h>
 +#include <linux/types.h>
 +#include <linux/compiler.h>
 +#include <linux/slab.h>
 +#include <linux/delay.h>
 +#include <linux/in.h>
 +#include <linux/init.h>
 +#include <linux/interrupt.h>
 +#include <linux/ioport.h>
 +#include <linux/pci.h>
 +#include <linux/netdevice.h>
 +#include <linux/etherdevice.h>
 +#include <linux/skbuff.h>
 +#include <linux/ethtool.h>
 +#include <linux/mdio.h>
 +#include <linux/mii.h>
 +#include <linux/phy.h>
 +#include <linux/brcmphy.h>
 +#include <linux/if_vlan.h>
 +#include <linux/ip.h>
 +#include <linux/tcp.h>
 +#include <linux/workqueue.h>
 +#include <linux/prefetch.h>
 +#include <linux/dma-mapping.h>
 +#include <linux/firmware.h>
 +
 +#include <net/checksum.h>
 +#include <net/ip.h>
 +
 +#include <asm/system.h>
 +#include <linux/io.h>
 +#include <asm/byteorder.h>
 +#include <linux/uaccess.h>
 +
 +#ifdef CONFIG_SPARC
 +#include <asm/idprom.h>
 +#include <asm/prom.h>
 +#endif
 +
 +#define BAR_0 0
 +#define BAR_2 2
 +
 +#include "tg3.h"
 +
 +/* Functions & macros to verify TG3_FLAGS types */
 +
 +static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits)
 +{
 +      return test_bit(flag, bits);
 +}
 +
 +static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits)
 +{
 +      set_bit(flag, bits);
 +}
 +
 +static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
 +{
 +      clear_bit(flag, bits);
 +}
 +
 +#define tg3_flag(tp, flag)                            \
 +      _tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
 +#define tg3_flag_set(tp, flag)                                \
 +      _tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
 +#define tg3_flag_clear(tp, flag)                      \
 +      _tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
 +
 +#define DRV_MODULE_NAME               "tg3"
 +#define TG3_MAJ_NUM                   3
 +#define TG3_MIN_NUM                   120
 +#define DRV_MODULE_VERSION    \
 +      __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
 +#define DRV_MODULE_RELDATE    "August 18, 2011"
 +
 +#define RESET_KIND_SHUTDOWN   0
 +#define RESET_KIND_INIT               1
 +#define RESET_KIND_SUSPEND    2
 +
 +#define TG3_DEF_RX_MODE               0
 +#define TG3_DEF_TX_MODE               0
 +#define TG3_DEF_MSG_ENABLE      \
 +      (NETIF_MSG_DRV          | \
 +       NETIF_MSG_PROBE        | \
 +       NETIF_MSG_LINK         | \
 +       NETIF_MSG_TIMER        | \
 +       NETIF_MSG_IFDOWN       | \
 +       NETIF_MSG_IFUP         | \
 +       NETIF_MSG_RX_ERR       | \
 +       NETIF_MSG_TX_ERR)
 +
 +#define TG3_GRC_LCLCTL_PWRSW_DELAY    100
 +
 +/* length of time before we decide the hardware is borked,
 + * and dev->tx_timeout() should be called to fix the problem
 + */
 +
 +#define TG3_TX_TIMEOUT                        (5 * HZ)
 +
 +/* hardware minimum and maximum for a single frame's data payload */
 +#define TG3_MIN_MTU                   60
 +#define TG3_MAX_MTU(tp)       \
 +      (tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
 +
 +/* These numbers seem to be hard coded in the NIC firmware somehow.
 + * You can't change the ring sizes, but you can change where you place
 + * them in the NIC onboard memory.
 + */
 +#define TG3_RX_STD_RING_SIZE(tp) \
 +      (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
 +       TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
 +#define TG3_DEF_RX_RING_PENDING               200
 +#define TG3_RX_JMB_RING_SIZE(tp) \
 +      (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
 +       TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
 +#define TG3_DEF_RX_JUMBO_RING_PENDING 100
 +#define TG3_RSS_INDIR_TBL_SIZE                128
 +
 +/* Do not place this n-ring entries value into the tp struct itself,
 + * we really want to expose these constants to GCC so that modulo et
 + * al.  operations are done with shifts and masks instead of with
 + * hw multiply/modulo instructions.  Another solution would be to
 + * replace things like '% foo' with '& (foo - 1)'.
 + */
 +
 +#define TG3_TX_RING_SIZE              512
 +#define TG3_DEF_TX_RING_PENDING               (TG3_TX_RING_SIZE - 1)
 +
 +#define TG3_RX_STD_RING_BYTES(tp) \
 +      (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
 +#define TG3_RX_JMB_RING_BYTES(tp) \
 +      (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
 +#define TG3_RX_RCB_RING_BYTES(tp) \
 +      (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
 +#define TG3_TX_RING_BYTES     (sizeof(struct tg3_tx_buffer_desc) * \
 +                               TG3_TX_RING_SIZE)
 +#define NEXT_TX(N)            (((N) + 1) & (TG3_TX_RING_SIZE - 1))
 +
 +#define TG3_DMA_BYTE_ENAB             64
 +
 +#define TG3_RX_STD_DMA_SZ             1536
 +#define TG3_RX_JMB_DMA_SZ             9046
 +
 +#define TG3_RX_DMA_TO_MAP_SZ(x)               ((x) + TG3_DMA_BYTE_ENAB)
 +
 +#define TG3_RX_STD_MAP_SZ             TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
 +#define TG3_RX_JMB_MAP_SZ             TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
 +
 +#define TG3_RX_STD_BUFF_RING_SIZE(tp) \
 +      (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
 +
 +#define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
 +      (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
 +
 +/* Due to a hardware bug, the 5701 can only DMA to memory addresses
 + * that are at least dword aligned when used in PCIX mode.  The driver
 + * works around this bug by double copying the packet.  This workaround
 + * is built into the normal double copy length check for efficiency.
 + *
 + * However, the double copy is only necessary on those architectures
 + * where unaligned memory accesses are inefficient.  For those architectures
 + * where unaligned memory accesses incur little penalty, we can reintegrate
 + * the 5701 in the normal rx path.  Doing so saves a device structure
 + * dereference by hardcoding the double copy threshold in place.
 + */
 +#define TG3_RX_COPY_THRESHOLD         256
 +#if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
 +      #define TG3_RX_COPY_THRESH(tp)  TG3_RX_COPY_THRESHOLD
 +#else
 +      #define TG3_RX_COPY_THRESH(tp)  ((tp)->rx_copy_thresh)
 +#endif
 +
 +#if (NET_IP_ALIGN != 0)
 +#define TG3_RX_OFFSET(tp)     ((tp)->rx_offset)
 +#else
 +#define TG3_RX_OFFSET(tp)     0
 +#endif
 +
 +/* minimum number of free TX descriptors required to wake up TX process */
 +#define TG3_TX_WAKEUP_THRESH(tnapi)           ((tnapi)->tx_pending / 4)
 +#define TG3_TX_BD_DMA_MAX             4096
 +
 +#define TG3_RAW_IP_ALIGN 2
 +
 +#define TG3_FW_UPDATE_TIMEOUT_SEC     5
 +
 +#define FIRMWARE_TG3          "tigon/tg3.bin"
 +#define FIRMWARE_TG3TSO               "tigon/tg3_tso.bin"
 +#define FIRMWARE_TG3TSO5      "tigon/tg3_tso5.bin"
 +
 +static char version[] __devinitdata =
 +      DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")";
 +
 +MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
 +MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
 +MODULE_LICENSE("GPL");
 +MODULE_VERSION(DRV_MODULE_VERSION);
 +MODULE_FIRMWARE(FIRMWARE_TG3);
 +MODULE_FIRMWARE(FIRMWARE_TG3TSO);
 +MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
 +
 +static int tg3_debug = -1;    /* -1 == use TG3_DEF_MSG_ENABLE as value */
 +module_param(tg3_debug, int, 0);
 +MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
 +
 +static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = {
 +      {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
 +      {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
 +      {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
 +      {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
 +      {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
 +      {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
 +      {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
 +      {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
 +      {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
 +      {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
 +      {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
 +      {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
 +      {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
 +      {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
 +      {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
 +      {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
 +      {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
 +      {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
 +      {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901)},
 +      {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2)},
 +      {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
 +      {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F)},
 +      {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
 +      {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
 +      {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
 +      {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
 +      {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F)},
 +      {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
 +      {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
 +      {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
 +      {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
 +      {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F)},
 +      {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
 +      {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
 +      {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
 +      {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
 +      {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
 +      {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
 +      {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
 +      {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
 +      {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F)},
 +      {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
 +      {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
 +      {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
 +      {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
 +      {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
 +      {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
 +      {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
 +      {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
 +      {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
 +      {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
 +      {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
 +      {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
 +      {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
 +      {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
 +      {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
 +      {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
 +      {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
 +      {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
 +      {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
 +      {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
 +      {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790)},
 +      {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
 +      {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
 +      {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
 +      {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
 +      {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
 +      {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
 +      {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
 +      {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791)},
 +      {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795)},
 +      {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
 +      {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
 +      {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
 +      {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
 +      {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
 +      {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
 +      {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
 +      {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
 +      {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
 +      {PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */
 +      {}
 +};
 +
 +MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
 +
 +static const struct {
 +      const char string[ETH_GSTRING_LEN];
 +} ethtool_stats_keys[] = {
 +      { "rx_octets" },
 +      { "rx_fragments" },
 +      { "rx_ucast_packets" },
 +      { "rx_mcast_packets" },
 +      { "rx_bcast_packets" },
 +      { "rx_fcs_errors" },
 +      { "rx_align_errors" },
 +      { "rx_xon_pause_rcvd" },
 +      { "rx_xoff_pause_rcvd" },
 +      { "rx_mac_ctrl_rcvd" },
 +      { "rx_xoff_entered" },
 +      { "rx_frame_too_long_errors" },
 +      { "rx_jabbers" },
 +      { "rx_undersize_packets" },
 +      { "rx_in_length_errors" },
 +      { "rx_out_length_errors" },
 +      { "rx_64_or_less_octet_packets" },
 +      { "rx_65_to_127_octet_packets" },
 +      { "rx_128_to_255_octet_packets" },
 +      { "rx_256_to_511_octet_packets" },
 +      { "rx_512_to_1023_octet_packets" },
 +      { "rx_1024_to_1522_octet_packets" },
 +      { "rx_1523_to_2047_octet_packets" },
 +      { "rx_2048_to_4095_octet_packets" },
 +      { "rx_4096_to_8191_octet_packets" },
 +      { "rx_8192_to_9022_octet_packets" },
 +
 +      { "tx_octets" },
 +      { "tx_collisions" },
 +
 +      { "tx_xon_sent" },
 +      { "tx_xoff_sent" },
 +      { "tx_flow_control" },
 +      { "tx_mac_errors" },
 +      { "tx_single_collisions" },
 +      { "tx_mult_collisions" },
 +      { "tx_deferred" },
 +      { "tx_excessive_collisions" },
 +      { "tx_late_collisions" },
 +      { "tx_collide_2times" },
 +      { "tx_collide_3times" },
 +      { "tx_collide_4times" },
 +      { "tx_collide_5times" },
 +      { "tx_collide_6times" },
 +      { "tx_collide_7times" },
 +      { "tx_collide_8times" },
 +      { "tx_collide_9times" },
 +      { "tx_collide_10times" },
 +      { "tx_collide_11times" },
 +      { "tx_collide_12times" },
 +      { "tx_collide_13times" },
 +      { "tx_collide_14times" },
 +      { "tx_collide_15times" },
 +      { "tx_ucast_packets" },
 +      { "tx_mcast_packets" },
 +      { "tx_bcast_packets" },
 +      { "tx_carrier_sense_errors" },
 +      { "tx_discards" },
 +      { "tx_errors" },
 +
 +      { "dma_writeq_full" },
 +      { "dma_write_prioq_full" },
 +      { "rxbds_empty" },
 +      { "rx_discards" },
 +      { "rx_errors" },
 +      { "rx_threshold_hit" },
 +
 +      { "dma_readq_full" },
 +      { "dma_read_prioq_full" },
 +      { "tx_comp_queue_full" },
 +
 +      { "ring_set_send_prod_index" },
 +      { "ring_status_update" },
 +      { "nic_irqs" },
 +      { "nic_avoided_irqs" },
 +      { "nic_tx_threshold_hit" },
 +
 +      { "mbuf_lwm_thresh_hit" },
 +};
 +
 +#define TG3_NUM_STATS ARRAY_SIZE(ethtool_stats_keys)
 +
 +
 +static const struct {
 +      const char string[ETH_GSTRING_LEN];
 +} ethtool_test_keys[] = {
 +      { "nvram test        (online) " },
 +      { "link test         (online) " },
 +      { "register test     (offline)" },
 +      { "memory test       (offline)" },
 +      { "mac loopback test (offline)" },
 +      { "phy loopback test (offline)" },
 +      { "ext loopback test (offline)" },
 +      { "interrupt test    (offline)" },
 +};
 +
 +#define TG3_NUM_TEST  ARRAY_SIZE(ethtool_test_keys)
 +
 +
 +static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
 +{
 +      writel(val, tp->regs + off);
 +}
 +
 +static u32 tg3_read32(struct tg3 *tp, u32 off)
 +{
 +      return readl(tp->regs + off);
 +}
 +
 +static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
 +{
 +      writel(val, tp->aperegs + off);
 +}
 +
 +static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
 +{
 +      return readl(tp->aperegs + off);
 +}
 +
 +static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
 +{
 +      unsigned long flags;
 +
 +      spin_lock_irqsave(&tp->indirect_lock, flags);
 +      pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
 +      pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
 +      spin_unlock_irqrestore(&tp->indirect_lock, flags);
 +}
 +
 +static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
 +{
 +      writel(val, tp->regs + off);
 +      readl(tp->regs + off);
 +}
 +
 +static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
 +{
 +      unsigned long flags;
 +      u32 val;
 +
 +      spin_lock_irqsave(&tp->indirect_lock, flags);
 +      pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
 +      pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
 +      spin_unlock_irqrestore(&tp->indirect_lock, flags);
 +      return val;
 +}
 +
 +static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
 +{
 +      unsigned long flags;
 +
 +      if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
 +              pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
 +                                     TG3_64BIT_REG_LOW, val);
 +              return;
 +      }
 +      if (off == TG3_RX_STD_PROD_IDX_REG) {
 +              pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
 +                                     TG3_64BIT_REG_LOW, val);
 +              return;
 +      }
 +
 +      spin_lock_irqsave(&tp->indirect_lock, flags);
 +      pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
 +      pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
 +      spin_unlock_irqrestore(&tp->indirect_lock, flags);
 +
 +      /* In indirect mode when disabling interrupts, we also need
 +       * to clear the interrupt bit in the GRC local ctrl register.
 +       */
 +      if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
 +          (val == 0x1)) {
 +              pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
 +                                     tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
 +      }
 +}
 +
 +static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
 +{
 +      unsigned long flags;
 +      u32 val;
 +
 +      spin_lock_irqsave(&tp->indirect_lock, flags);
 +      pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
 +      pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
 +      spin_unlock_irqrestore(&tp->indirect_lock, flags);
 +      return val;
 +}
 +
 +/* usec_wait specifies the wait time in usec when writing to certain registers
 + * where it is unsafe to read back the register without some delay.
 + * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
 + * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
 + */
 +static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
 +{
 +      if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND))
 +              /* Non-posted methods */
 +              tp->write32(tp, off, val);
 +      else {
 +              /* Posted method */
 +              tg3_write32(tp, off, val);
 +              if (usec_wait)
 +                      udelay(usec_wait);
 +              tp->read32(tp, off);
 +      }
 +      /* Wait again after the read for the posted method to guarantee that
 +       * the wait time is met.
 +       */
 +      if (usec_wait)
 +              udelay(usec_wait);
 +}
 +
 +static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
 +{
 +      tp->write32_mbox(tp, off, val);
 +      if (!tg3_flag(tp, MBOX_WRITE_REORDER) && !tg3_flag(tp, ICH_WORKAROUND))
 +              tp->read32_mbox(tp, off);
 +}
 +
 +static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
 +{
 +      void __iomem *mbox = tp->regs + off;
 +      writel(val, mbox);
 +      if (tg3_flag(tp, TXD_MBOX_HWBUG))
 +              writel(val, mbox);
 +      if (tg3_flag(tp, MBOX_WRITE_REORDER))
 +              readl(mbox);
 +}
 +
 +static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
 +{
 +      return readl(tp->regs + off + GRCMBOX_BASE);
 +}
 +
 +static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
 +{
 +      writel(val, tp->regs + off + GRCMBOX_BASE);
 +}
 +
 +#define tw32_mailbox(reg, val)                tp->write32_mbox(tp, reg, val)
 +#define tw32_mailbox_f(reg, val)      tw32_mailbox_flush(tp, (reg), (val))
 +#define tw32_rx_mbox(reg, val)                tp->write32_rx_mbox(tp, reg, val)
 +#define tw32_tx_mbox(reg, val)                tp->write32_tx_mbox(tp, reg, val)
 +#define tr32_mailbox(reg)             tp->read32_mbox(tp, reg)
 +
 +#define tw32(reg, val)                        tp->write32(tp, reg, val)
 +#define tw32_f(reg, val)              _tw32_flush(tp, (reg), (val), 0)
 +#define tw32_wait_f(reg, val, us)     _tw32_flush(tp, (reg), (val), (us))
 +#define tr32(reg)                     tp->read32(tp, reg)
 +
 +static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
 +{
 +      unsigned long flags;
 +
 +      if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
 +          (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
 +              return;
 +
 +      spin_lock_irqsave(&tp->indirect_lock, flags);
 +      if (tg3_flag(tp, SRAM_USE_CONFIG)) {
 +              pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
 +              pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
 +
 +              /* Always leave this as zero. */
 +              pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
 +      } else {
 +              tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
 +              tw32_f(TG3PCI_MEM_WIN_DATA, val);
 +
 +              /* Always leave this as zero. */
 +              tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
 +      }
 +      spin_unlock_irqrestore(&tp->indirect_lock, flags);
 +}
 +
 +static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
 +{
 +      unsigned long flags;
 +
 +      if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
 +          (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
 +              *val = 0;
 +              return;
 +      }
 +
 +      spin_lock_irqsave(&tp->indirect_lock, flags);
 +      if (tg3_flag(tp, SRAM_USE_CONFIG)) {
 +              pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
 +              pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
 +
 +              /* Always leave this as zero. */
 +              pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
 +      } else {
 +              tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
 +              *val = tr32(TG3PCI_MEM_WIN_DATA);
 +
 +              /* Always leave this as zero. */
 +              tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
 +      }
 +      spin_unlock_irqrestore(&tp->indirect_lock, flags);
 +}
 +
 +static void tg3_ape_lock_init(struct tg3 *tp)
 +{
 +      int i;
 +      u32 regbase, bit;
 +
 +      if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
 +              regbase = TG3_APE_LOCK_GRANT;
 +      else
 +              regbase = TG3_APE_PER_LOCK_GRANT;
 +
 +      /* Make sure the driver hasn't any stale locks. */
 +      for (i = 0; i < 8; i++) {
 +              if (i == TG3_APE_LOCK_GPIO)
 +                      continue;
 +              tg3_ape_write32(tp, regbase + 4 * i, APE_LOCK_GRANT_DRIVER);
 +      }
 +
 +      /* Clear the correct bit of the GPIO lock too. */
 +      if (!tp->pci_fn)
 +              bit = APE_LOCK_GRANT_DRIVER;
 +      else
 +              bit = 1 << tp->pci_fn;
 +
 +      tg3_ape_write32(tp, regbase + 4 * TG3_APE_LOCK_GPIO, bit);
 +}
 +
 +static int tg3_ape_lock(struct tg3 *tp, int locknum)
 +{
 +      int i, off;
 +      int ret = 0;
 +      u32 status, req, gnt, bit;
 +
 +      if (!tg3_flag(tp, ENABLE_APE))
 +              return 0;
 +
 +      switch (locknum) {
 +      case TG3_APE_LOCK_GPIO:
 +              if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
 +                      return 0;
 +      case TG3_APE_LOCK_GRC:
 +      case TG3_APE_LOCK_MEM:
 +              break;
 +      default:
 +              return -EINVAL;
 +      }
 +
 +      if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
 +              req = TG3_APE_LOCK_REQ;
 +              gnt = TG3_APE_LOCK_GRANT;
 +      } else {
 +              req = TG3_APE_PER_LOCK_REQ;
 +              gnt = TG3_APE_PER_LOCK_GRANT;
 +      }
 +
 +      off = 4 * locknum;
 +
 +      if (locknum != TG3_APE_LOCK_GPIO || !tp->pci_fn)
 +              bit = APE_LOCK_REQ_DRIVER;
 +      else
 +              bit = 1 << tp->pci_fn;
 +
 +      tg3_ape_write32(tp, req + off, bit);
 +
 +      /* Wait for up to 1 millisecond to acquire lock. */
 +      for (i = 0; i < 100; i++) {
 +              status = tg3_ape_read32(tp, gnt + off);
 +              if (status == bit)
 +                      break;
 +              udelay(10);
 +      }
 +
 +      if (status != bit) {
 +              /* Revoke the lock request. */
 +              tg3_ape_write32(tp, gnt + off, bit);
 +              ret = -EBUSY;
 +      }
 +
 +      return ret;
 +}
 +
 +static void tg3_ape_unlock(struct tg3 *tp, int locknum)
 +{
 +      u32 gnt, bit;
 +
 +      if (!tg3_flag(tp, ENABLE_APE))
 +              return;
 +
 +      switch (locknum) {
 +      case TG3_APE_LOCK_GPIO:
 +              if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
 +                      return;
 +      case TG3_APE_LOCK_GRC:
 +      case TG3_APE_LOCK_MEM:
 +              break;
 +      default:
 +              return;
 +      }
 +
 +      if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
 +              gnt = TG3_APE_LOCK_GRANT;
 +      else
 +              gnt = TG3_APE_PER_LOCK_GRANT;
 +
 +      if (locknum != TG3_APE_LOCK_GPIO || !tp->pci_fn)
 +              bit = APE_LOCK_GRANT_DRIVER;
 +      else
 +              bit = 1 << tp->pci_fn;
 +
 +      tg3_ape_write32(tp, gnt + 4 * locknum, bit);
 +}
 +
 +static void tg3_ape_send_event(struct tg3 *tp, u32 event)
 +{
 +      int i;
 +      u32 apedata;
 +
 +      /* NCSI does not support APE events */
 +      if (tg3_flag(tp, APE_HAS_NCSI))
 +              return;
 +
 +      apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
 +      if (apedata != APE_SEG_SIG_MAGIC)
 +              return;
 +
 +      apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
 +      if (!(apedata & APE_FW_STATUS_READY))
 +              return;
 +
 +      /* Wait for up to 1 millisecond for APE to service previous event. */
 +      for (i = 0; i < 10; i++) {
 +              if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
 +                      return;
 +
 +              apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
 +
 +              if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
 +                      tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
 +                                      event | APE_EVENT_STATUS_EVENT_PENDING);
 +
 +              tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
 +
 +              if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
 +                      break;
 +
 +              udelay(100);
 +      }
 +
 +      if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
 +              tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
 +}
 +
 +static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
 +{
 +      u32 event;
 +      u32 apedata;
 +
 +      if (!tg3_flag(tp, ENABLE_APE))
 +              return;
 +
 +      switch (kind) {
 +      case RESET_KIND_INIT:
 +              tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
 +                              APE_HOST_SEG_SIG_MAGIC);
 +              tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
 +                              APE_HOST_SEG_LEN_MAGIC);
 +              apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
 +              tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
 +              tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
 +                      APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM));
 +              tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
 +                              APE_HOST_BEHAV_NO_PHYLOCK);
 +              tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE,
 +                                  TG3_APE_HOST_DRVR_STATE_START);
 +
 +              event = APE_EVENT_STATUS_STATE_START;
 +              break;
 +      case RESET_KIND_SHUTDOWN:
 +              /* With the interface we are currently using,
 +               * APE does not track driver state.  Wiping
 +               * out the HOST SEGMENT SIGNATURE forces
 +               * the APE to assume OS absent status.
 +               */
 +              tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0);
 +
 +              if (device_may_wakeup(&tp->pdev->dev) &&
 +                  tg3_flag(tp, WOL_ENABLE)) {
 +                      tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED,
 +                                          TG3_APE_HOST_WOL_SPEED_AUTO);
 +                      apedata = TG3_APE_HOST_DRVR_STATE_WOL;
 +              } else
 +                      apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD;
 +
 +              tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata);
 +
 +              event = APE_EVENT_STATUS_STATE_UNLOAD;
 +              break;
 +      case RESET_KIND_SUSPEND:
 +              event = APE_EVENT_STATUS_STATE_SUSPEND;
 +              break;
 +      default:
 +              return;
 +      }
 +
 +      event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
 +
 +      tg3_ape_send_event(tp, event);
 +}
 +
 +static void tg3_disable_ints(struct tg3 *tp)
 +{
 +      int i;
 +
 +      tw32(TG3PCI_MISC_HOST_CTRL,
 +           (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
 +      for (i = 0; i < tp->irq_max; i++)
 +              tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
 +}
 +
 +static void tg3_enable_ints(struct tg3 *tp)
 +{
 +      int i;
 +
 +      tp->irq_sync = 0;
 +      wmb();
 +
 +      tw32(TG3PCI_MISC_HOST_CTRL,
 +           (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
 +
 +      tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
 +      for (i = 0; i < tp->irq_cnt; i++) {
 +              struct tg3_napi *tnapi = &tp->napi[i];
 +
 +              tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
 +              if (tg3_flag(tp, 1SHOT_MSI))
 +                      tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
 +
 +              tp->coal_now |= tnapi->coal_now;
 +      }
 +
 +      /* Force an initial interrupt */
 +      if (!tg3_flag(tp, TAGGED_STATUS) &&
 +          (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
 +              tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
 +      else
 +              tw32(HOSTCC_MODE, tp->coal_now);
 +
 +      tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
 +}
 +
 +static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
 +{
 +      struct tg3 *tp = tnapi->tp;
 +      struct tg3_hw_status *sblk = tnapi->hw_status;
 +      unsigned int work_exists = 0;
 +
 +      /* check for phy events */
 +      if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
 +              if (sblk->status & SD_STATUS_LINK_CHG)
 +                      work_exists = 1;
 +      }
 +      /* check for RX/TX work to do */
 +      if (sblk->idx[0].tx_consumer != tnapi->tx_cons ||
 +          *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
 +              work_exists = 1;
 +
 +      return work_exists;
 +}
 +
 +/* tg3_int_reenable
 + *  similar to tg3_enable_ints, but it accurately determines whether there
 + *  is new work pending and can return without flushing the PIO write
 + *  which reenables interrupts
 + */
 +static void tg3_int_reenable(struct tg3_napi *tnapi)
 +{
 +      struct tg3 *tp = tnapi->tp;
 +
 +      tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
 +      mmiowb();
 +
 +      /* When doing tagged status, this work check is unnecessary.
 +       * The last_tag we write above tells the chip which piece of
 +       * work we've completed.
 +       */
 +      if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi))
 +              tw32(HOSTCC_MODE, tp->coalesce_mode |
 +                   HOSTCC_MODE_ENABLE | tnapi->coal_now);
 +}
 +
 +static void tg3_switch_clocks(struct tg3 *tp)
 +{
 +      u32 clock_ctrl;
 +      u32 orig_clock_ctrl;
 +
 +      if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS))
 +              return;
 +
 +      clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
 +
 +      orig_clock_ctrl = clock_ctrl;
 +      clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
 +                     CLOCK_CTRL_CLKRUN_OENABLE |
 +                     0x1f);
 +      tp->pci_clock_ctrl = clock_ctrl;
 +
 +      if (tg3_flag(tp, 5705_PLUS)) {
 +              if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
 +                      tw32_wait_f(TG3PCI_CLOCK_CTRL,
 +                                  clock_ctrl | CLOCK_CTRL_625_CORE, 40);
 +              }
 +      } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
 +              tw32_wait_f(TG3PCI_CLOCK_CTRL,
 +                          clock_ctrl |
 +                          (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
 +                          40);
 +              tw32_wait_f(TG3PCI_CLOCK_CTRL,
 +                          clock_ctrl | (CLOCK_CTRL_ALTCLK),
 +                          40);
 +      }
 +      tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
 +}
 +
 +#define PHY_BUSY_LOOPS        5000
 +
 +static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
 +{
 +      u32 frame_val;
 +      unsigned int loops;
 +      int ret;
 +
 +      if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
 +              tw32_f(MAC_MI_MODE,
 +                   (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
 +              udelay(80);
 +      }
 +
 +      *val = 0x0;
 +
 +      frame_val  = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
 +                    MI_COM_PHY_ADDR_MASK);
 +      frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
 +                    MI_COM_REG_ADDR_MASK);
 +      frame_val |= (MI_COM_CMD_READ | MI_COM_START);
 +
 +      tw32_f(MAC_MI_COM, frame_val);
 +
 +      loops = PHY_BUSY_LOOPS;
 +      while (loops != 0) {
 +              udelay(10);
 +              frame_val = tr32(MAC_MI_COM);
 +
 +              if ((frame_val & MI_COM_BUSY) == 0) {
 +                      udelay(5);
 +                      frame_val = tr32(MAC_MI_COM);
 +                      break;
 +              }
 +              loops -= 1;
 +      }
 +
 +      ret = -EBUSY;
 +      if (loops != 0) {
 +              *val = frame_val & MI_COM_DATA_MASK;
 +              ret = 0;
 +      }
 +
 +      if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
 +              tw32_f(MAC_MI_MODE, tp->mi_mode);
 +              udelay(80);
 +      }
 +
 +      return ret;
 +}
 +
 +static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
 +{
 +      u32 frame_val;
 +      unsigned int loops;
 +      int ret;
 +
 +      if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
 +          (reg == MII_CTRL1000 || reg == MII_TG3_AUX_CTRL))
 +              return 0;
 +
 +      if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
 +              tw32_f(MAC_MI_MODE,
 +                   (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
 +              udelay(80);
 +      }
 +
 +      frame_val  = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
 +                    MI_COM_PHY_ADDR_MASK);
 +      frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
 +                    MI_COM_REG_ADDR_MASK);
 +      frame_val |= (val & MI_COM_DATA_MASK);
 +      frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
 +
 +      tw32_f(MAC_MI_COM, frame_val);
 +
 +      loops = PHY_BUSY_LOOPS;
 +      while (loops != 0) {
 +              udelay(10);
 +              frame_val = tr32(MAC_MI_COM);
 +              if ((frame_val & MI_COM_BUSY) == 0) {
 +                      udelay(5);
 +                      frame_val = tr32(MAC_MI_COM);
 +                      break;
 +              }
 +              loops -= 1;
 +      }
 +
 +      ret = -EBUSY;
 +      if (loops != 0)
 +              ret = 0;
 +
 +      if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
 +              tw32_f(MAC_MI_MODE, tp->mi_mode);
 +              udelay(80);
 +      }
 +
 +      return ret;
 +}
 +
 +static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val)
 +{
 +      int err;
 +
 +      err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
 +      if (err)
 +              goto done;
 +
 +      err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
 +      if (err)
 +              goto done;
 +
 +      err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
 +                         MII_TG3_MMD_CTRL_DATA_NOINC | devad);
 +      if (err)
 +              goto done;
 +
 +      err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val);
 +
 +done:
 +      return err;
 +}
 +
 +static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val)
 +{
 +      int err;
 +
 +      err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
 +      if (err)
 +              goto done;
 +
 +      err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
 +      if (err)
 +              goto done;
 +
 +      err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
 +                         MII_TG3_MMD_CTRL_DATA_NOINC | devad);
 +      if (err)
 +              goto done;
 +
 +      err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val);
 +
 +done:
 +      return err;
 +}
 +
 +static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val)
 +{
 +      int err;
 +
 +      err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
 +      if (!err)
 +              err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val);
 +
 +      return err;
 +}
 +
 +static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
 +{
 +      int err;
 +
 +      err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
 +      if (!err)
 +              err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
 +
 +      return err;
 +}
 +
 +static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val)
 +{
 +      int err;
 +
 +      err = tg3_writephy(tp, MII_TG3_AUX_CTRL,
 +                         (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) |
 +                         MII_TG3_AUXCTL_SHDWSEL_MISC);
 +      if (!err)
 +              err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val);
 +
 +      return err;
 +}
 +
 +static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set)
 +{
 +      if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC)
 +              set |= MII_TG3_AUXCTL_MISC_WREN;
 +
 +      return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg);
 +}
 +
 +#define TG3_PHY_AUXCTL_SMDSP_ENABLE(tp) \
 +      tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
 +                           MII_TG3_AUXCTL_ACTL_SMDSP_ENA | \
 +                           MII_TG3_AUXCTL_ACTL_TX_6DB)
 +
 +#define TG3_PHY_AUXCTL_SMDSP_DISABLE(tp) \
 +      tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
 +                           MII_TG3_AUXCTL_ACTL_TX_6DB);
 +
 +static int tg3_bmcr_reset(struct tg3 *tp)
 +{
 +      u32 phy_control;
 +      int limit, err;
 +
 +      /* OK, reset it, and poll the BMCR_RESET bit until it
 +       * clears or we time out.
 +       */
 +      phy_control = BMCR_RESET;
 +      err = tg3_writephy(tp, MII_BMCR, phy_control);
 +      if (err != 0)
 +              return -EBUSY;
 +
 +      limit = 5000;
 +      while (limit--) {
 +              err = tg3_readphy(tp, MII_BMCR, &phy_control);
 +              if (err != 0)
 +                      return -EBUSY;
 +
 +              if ((phy_control & BMCR_RESET) == 0) {
 +                      udelay(40);
 +                      break;
 +              }
 +              udelay(10);
 +      }
 +      if (limit < 0)
 +              return -EBUSY;
 +
 +      return 0;
 +}
 +
 +static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
 +{
 +      struct tg3 *tp = bp->priv;
 +      u32 val;
 +
 +      spin_lock_bh(&tp->lock);
 +
 +      if (tg3_readphy(tp, reg, &val))
 +              val = -EIO;
 +
 +      spin_unlock_bh(&tp->lock);
 +
 +      return val;
 +}
 +
 +static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
 +{
 +      struct tg3 *tp = bp->priv;
 +      u32 ret = 0;
 +
 +      spin_lock_bh(&tp->lock);
 +
 +      if (tg3_writephy(tp, reg, val))
 +              ret = -EIO;
 +
 +      spin_unlock_bh(&tp->lock);
 +
 +      return ret;
 +}
 +
 +static int tg3_mdio_reset(struct mii_bus *bp)
 +{
 +      return 0;
 +}
 +
 +static void tg3_mdio_config_5785(struct tg3 *tp)
 +{
 +      u32 val;
 +      struct phy_device *phydev;
 +
 +      phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
 +      switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
 +      case PHY_ID_BCM50610:
 +      case PHY_ID_BCM50610M:
 +              val = MAC_PHYCFG2_50610_LED_MODES;
 +              break;
 +      case PHY_ID_BCMAC131:
 +              val = MAC_PHYCFG2_AC131_LED_MODES;
 +              break;
 +      case PHY_ID_RTL8211C:
 +              val = MAC_PHYCFG2_RTL8211C_LED_MODES;
 +              break;
 +      case PHY_ID_RTL8201E:
 +              val = MAC_PHYCFG2_RTL8201E_LED_MODES;
 +              break;
 +      default:
 +              return;
 +      }
 +
 +      if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
 +              tw32(MAC_PHYCFG2, val);
 +
 +              val = tr32(MAC_PHYCFG1);
 +              val &= ~(MAC_PHYCFG1_RGMII_INT |
 +                       MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
 +              val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
 +              tw32(MAC_PHYCFG1, val);
 +
 +              return;
 +      }
 +
 +      if (!tg3_flag(tp, RGMII_INBAND_DISABLE))
 +              val |= MAC_PHYCFG2_EMODE_MASK_MASK |
 +                     MAC_PHYCFG2_FMODE_MASK_MASK |
 +                     MAC_PHYCFG2_GMODE_MASK_MASK |
 +                     MAC_PHYCFG2_ACT_MASK_MASK   |
 +                     MAC_PHYCFG2_QUAL_MASK_MASK |
 +                     MAC_PHYCFG2_INBAND_ENABLE;
 +
 +      tw32(MAC_PHYCFG2, val);
 +
 +      val = tr32(MAC_PHYCFG1);
 +      val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
 +               MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
 +      if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
 +              if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
 +                      val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
 +              if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
 +                      val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
 +      }
 +      val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
 +             MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
 +      tw32(MAC_PHYCFG1, val);
 +
 +      val = tr32(MAC_EXT_RGMII_MODE);
 +      val &= ~(MAC_RGMII_MODE_RX_INT_B |
 +               MAC_RGMII_MODE_RX_QUALITY |
 +               MAC_RGMII_MODE_RX_ACTIVITY |
 +               MAC_RGMII_MODE_RX_ENG_DET |
 +               MAC_RGMII_MODE_TX_ENABLE |
 +               MAC_RGMII_MODE_TX_LOWPWR |
 +               MAC_RGMII_MODE_TX_RESET);
 +      if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
 +              if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
 +                      val |= MAC_RGMII_MODE_RX_INT_B |
 +                             MAC_RGMII_MODE_RX_QUALITY |
 +                             MAC_RGMII_MODE_RX_ACTIVITY |
 +                             MAC_RGMII_MODE_RX_ENG_DET;
 +              if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
 +                      val |= MAC_RGMII_MODE_TX_ENABLE |
 +                             MAC_RGMII_MODE_TX_LOWPWR |
 +                             MAC_RGMII_MODE_TX_RESET;
 +      }
 +      tw32(MAC_EXT_RGMII_MODE, val);
 +}
 +
 +static void tg3_mdio_start(struct tg3 *tp)
 +{
 +      tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
 +      tw32_f(MAC_MI_MODE, tp->mi_mode);
 +      udelay(80);
 +
 +      if (tg3_flag(tp, MDIOBUS_INITED) &&
 +          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
 +              tg3_mdio_config_5785(tp);
 +}
 +
 +static int tg3_mdio_init(struct tg3 *tp)
 +{
 +      int i;
 +      u32 reg;
 +      struct phy_device *phydev;
 +
 +      if (tg3_flag(tp, 5717_PLUS)) {
 +              u32 is_serdes;
 +
 +              tp->phy_addr = tp->pci_fn + 1;
 +
 +              if (tp->pci_chip_rev_id != CHIPREV_ID_5717_A0)
 +                      is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
 +              else
 +                      is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
 +                                  TG3_CPMU_PHY_STRAP_IS_SERDES;
 +              if (is_serdes)
 +                      tp->phy_addr += 7;
 +      } else
 +              tp->phy_addr = TG3_PHY_MII_ADDR;
 +
 +      tg3_mdio_start(tp);
 +
 +      if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED))
 +              return 0;
 +
 +      tp->mdio_bus = mdiobus_alloc();
 +      if (tp->mdio_bus == NULL)
 +              return -ENOMEM;
 +
 +      tp->mdio_bus->name     = "tg3 mdio bus";
 +      snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
 +               (tp->pdev->bus->number << 8) | tp->pdev->devfn);
 +      tp->mdio_bus->priv     = tp;
 +      tp->mdio_bus->parent   = &tp->pdev->dev;
 +      tp->mdio_bus->read     = &tg3_mdio_read;
 +      tp->mdio_bus->write    = &tg3_mdio_write;
 +      tp->mdio_bus->reset    = &tg3_mdio_reset;
 +      tp->mdio_bus->phy_mask = ~(1 << TG3_PHY_MII_ADDR);
 +      tp->mdio_bus->irq      = &tp->mdio_irq[0];
 +
 +      for (i = 0; i < PHY_MAX_ADDR; i++)
 +              tp->mdio_bus->irq[i] = PHY_POLL;
 +
 +      /* The bus registration will look for all the PHYs on the mdio bus.
 +       * Unfortunately, it does not ensure the PHY is powered up before
 +       * accessing the PHY ID registers.  A chip reset is the
 +       * quickest way to bring the device back to an operational state..
 +       */
 +      if (tg3_readphy(tp, MII_BMCR, &reg) || (reg & BMCR_PDOWN))
 +              tg3_bmcr_reset(tp);
 +
 +      i = mdiobus_register(tp->mdio_bus);
 +      if (i) {
 +              dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
 +              mdiobus_free(tp->mdio_bus);
 +              return i;
 +      }
 +
 +      phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
 +
 +      if (!phydev || !phydev->drv) {
 +              dev_warn(&tp->pdev->dev, "No PHY devices\n");
 +              mdiobus_unregister(tp->mdio_bus);
 +              mdiobus_free(tp->mdio_bus);
 +              return -ENODEV;
 +      }
 +
 +      switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
 +      case PHY_ID_BCM57780:
 +              phydev->interface = PHY_INTERFACE_MODE_GMII;
 +              phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
 +              break;
 +      case PHY_ID_BCM50610:
 +      case PHY_ID_BCM50610M:
 +              phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
 +                                   PHY_BRCM_RX_REFCLK_UNUSED |
 +                                   PHY_BRCM_DIS_TXCRXC_NOENRGY |
 +                                   PHY_BRCM_AUTO_PWRDWN_ENABLE;
 +              if (tg3_flag(tp, RGMII_INBAND_DISABLE))
 +                      phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
 +              if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
 +                      phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
 +              if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
 +                      phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
 +              /* fallthru */
 +      case PHY_ID_RTL8211C:
 +              phydev->interface = PHY_INTERFACE_MODE_RGMII;
 +              break;
 +      case PHY_ID_RTL8201E:
 +      case PHY_ID_BCMAC131:
 +              phydev->interface = PHY_INTERFACE_MODE_MII;
 +              phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
 +              tp->phy_flags |= TG3_PHYFLG_IS_FET;
 +              break;
 +      }
 +
 +      tg3_flag_set(tp, MDIOBUS_INITED);
 +
 +      if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
 +              tg3_mdio_config_5785(tp);
 +
 +      return 0;
 +}
 +
 +static void tg3_mdio_fini(struct tg3 *tp)
 +{
 +      if (tg3_flag(tp, MDIOBUS_INITED)) {
 +              tg3_flag_clear(tp, MDIOBUS_INITED);
 +              mdiobus_unregister(tp->mdio_bus);
 +              mdiobus_free(tp->mdio_bus);
 +      }
 +}
 +
 +/* tp->lock is held. */
 +static inline void tg3_generate_fw_event(struct tg3 *tp)
 +{
 +      u32 val;
 +
 +      val = tr32(GRC_RX_CPU_EVENT);
 +      val |= GRC_RX_CPU_DRIVER_EVENT;
 +      tw32_f(GRC_RX_CPU_EVENT, val);
 +
 +      tp->last_event_jiffies = jiffies;
 +}
 +
 +#define TG3_FW_EVENT_TIMEOUT_USEC 2500
 +
 +/* tp->lock is held. */
 +static void tg3_wait_for_event_ack(struct tg3 *tp)
 +{
 +      int i;
 +      unsigned int delay_cnt;
 +      long time_remain;
 +
 +      /* If enough time has passed, no wait is necessary. */
 +      time_remain = (long)(tp->last_event_jiffies + 1 +
 +                    usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
 +                    (long)jiffies;
 +      if (time_remain < 0)
 +              return;
 +
 +      /* Check if we can shorten the wait time. */
 +      delay_cnt = jiffies_to_usecs(time_remain);
 +      if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
 +              delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
 +      delay_cnt = (delay_cnt >> 3) + 1;
 +
 +      for (i = 0; i < delay_cnt; i++) {
 +              if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
 +                      break;
 +              udelay(8);
 +      }
 +}
 +
 +/* tp->lock is held. */
 +static void tg3_ump_link_report(struct tg3 *tp)
 +{
 +      u32 reg;
 +      u32 val;
 +
 +      if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF))
 +              return;
 +
 +      tg3_wait_for_event_ack(tp);
 +
 +      tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
 +
 +      tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
 +
 +      val = 0;
 +      if (!tg3_readphy(tp, MII_BMCR, &reg))
 +              val = reg << 16;
 +      if (!tg3_readphy(tp, MII_BMSR, &reg))
 +              val |= (reg & 0xffff);
 +      tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, val);
 +
 +      val = 0;
 +      if (!tg3_readphy(tp, MII_ADVERTISE, &reg))
 +              val = reg << 16;
 +      if (!tg3_readphy(tp, MII_LPA, &reg))
 +              val |= (reg & 0xffff);
 +      tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 4, val);
 +
 +      val = 0;
 +      if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) {
 +              if (!tg3_readphy(tp, MII_CTRL1000, &reg))
 +                      val = reg << 16;
 +              if (!tg3_readphy(tp, MII_STAT1000, &reg))
 +                      val |= (reg & 0xffff);
 +      }
 +      tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 8, val);
 +
 +      if (!tg3_readphy(tp, MII_PHYADDR, &reg))
 +              val = reg << 16;
 +      else
 +              val = 0;
 +      tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 12, val);
 +
 +      tg3_generate_fw_event(tp);
 +}
 +
 +/* tp->lock is held. */
 +static void tg3_stop_fw(struct tg3 *tp)
 +{
 +      if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
 +              /* Wait for RX cpu to ACK the previous event. */
 +              tg3_wait_for_event_ack(tp);
 +
 +              tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
 +
 +              tg3_generate_fw_event(tp);
 +
 +              /* Wait for RX cpu to ACK this event. */
 +              tg3_wait_for_event_ack(tp);
 +      }
 +}
 +
 +/* tp->lock is held. */
 +static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
 +{
 +      tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
 +                    NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
 +
 +      if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
 +              switch (kind) {
 +              case RESET_KIND_INIT:
 +                      tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
 +                                    DRV_STATE_START);
 +                      break;
 +
 +              case RESET_KIND_SHUTDOWN:
 +                      tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
 +                                    DRV_STATE_UNLOAD);
 +                      break;
 +
 +              case RESET_KIND_SUSPEND:
 +                      tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
 +                                    DRV_STATE_SUSPEND);
 +                      break;
 +
 +              default:
 +                      break;
 +              }
 +      }
 +
 +      if (kind == RESET_KIND_INIT ||
 +          kind == RESET_KIND_SUSPEND)
 +              tg3_ape_driver_state_change(tp, kind);
 +}
 +
 +/* tp->lock is held. */
 +static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
 +{
 +      if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
 +              switch (kind) {
 +              case RESET_KIND_INIT:
 +                      tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
 +                                    DRV_STATE_START_DONE);
 +                      break;
 +
 +              case RESET_KIND_SHUTDOWN:
 +                      tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
 +                                    DRV_STATE_UNLOAD_DONE);
 +                      break;
 +
 +              default:
 +                      break;
 +              }
 +      }
 +
 +      if (kind == RESET_KIND_SHUTDOWN)
 +              tg3_ape_driver_state_change(tp, kind);
 +}
 +
 +/* tp->lock is held. */
 +static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
 +{
 +      if (tg3_flag(tp, ENABLE_ASF)) {
 +              switch (kind) {
 +              case RESET_KIND_INIT:
 +                      tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
 +                                    DRV_STATE_START);
 +                      break;
 +
 +              case RESET_KIND_SHUTDOWN:
 +                      tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
 +                                    DRV_STATE_UNLOAD);
 +                      break;
 +
 +              case RESET_KIND_SUSPEND:
 +                      tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
 +                                    DRV_STATE_SUSPEND);
 +                      break;
 +
 +              default:
 +                      break;
 +              }
 +      }
 +}
 +
 +static int tg3_poll_fw(struct tg3 *tp)
 +{
 +      int i;
 +      u32 val;
 +
 +      if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
 +              /* Wait up to 20ms for init done. */
 +              for (i = 0; i < 200; i++) {
 +                      if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
 +                              return 0;
 +                      udelay(100);
 +              }
 +              return -ENODEV;
 +      }
 +
 +      /* Wait for firmware initialization to complete. */
 +      for (i = 0; i < 100000; i++) {
 +              tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
 +              if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
 +                      break;
 +              udelay(10);
 +      }
 +
 +      /* Chip might not be fitted with firmware.  Some Sun onboard
 +       * parts are configured like that.  So don't signal the timeout
 +       * of the above loop as an error, but do report the lack of
 +       * running firmware once.
 +       */
 +      if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) {
 +              tg3_flag_set(tp, NO_FWARE_REPORTED);
 +
 +              netdev_info(tp->dev, "No firmware running\n");
 +      }
 +
 +      if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
 +              /* The 57765 A0 needs a little more
 +               * time to do some important work.
 +               */
 +              mdelay(10);
 +      }
 +
 +      return 0;
 +}
 +
 +static void tg3_link_report(struct tg3 *tp)
 +{
 +      if (!netif_carrier_ok(tp->dev)) {
 +              netif_info(tp, link, tp->dev, "Link is down\n");
 +              tg3_ump_link_report(tp);
 +      } else if (netif_msg_link(tp)) {
 +              netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n",
 +                          (tp->link_config.active_speed == SPEED_1000 ?
 +                           1000 :
 +                           (tp->link_config.active_speed == SPEED_100 ?
 +                            100 : 10)),
 +                          (tp->link_config.active_duplex == DUPLEX_FULL ?
 +                           "full" : "half"));
 +
 +              netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n",
 +                          (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
 +                          "on" : "off",
 +                          (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
 +                          "on" : "off");
 +
 +              if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
 +                      netdev_info(tp->dev, "EEE is %s\n",
 +                                  tp->setlpicnt ? "enabled" : "disabled");
 +
 +              tg3_ump_link_report(tp);
 +      }
 +}
 +
 +static u16 tg3_advert_flowctrl_1000T(u8 flow_ctrl)
 +{
 +      u16 miireg;
 +
 +      if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
 +              miireg = ADVERTISE_PAUSE_CAP;
 +      else if (flow_ctrl & FLOW_CTRL_TX)
 +              miireg = ADVERTISE_PAUSE_ASYM;
 +      else if (flow_ctrl & FLOW_CTRL_RX)
 +              miireg = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
 +      else
 +              miireg = 0;
 +
 +      return miireg;
 +}
 +
 +static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
 +{
 +      u16 miireg;
 +
 +      if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
 +              miireg = ADVERTISE_1000XPAUSE;
 +      else if (flow_ctrl & FLOW_CTRL_TX)
 +              miireg = ADVERTISE_1000XPSE_ASYM;
 +      else if (flow_ctrl & FLOW_CTRL_RX)
 +              miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
 +      else
 +              miireg = 0;
 +
 +      return miireg;
 +}
 +
 +static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
 +{
 +      u8 cap = 0;
 +
 +      if (lcladv & ADVERTISE_1000XPAUSE) {
 +              if (lcladv & ADVERTISE_1000XPSE_ASYM) {
 +                      if (rmtadv & LPA_1000XPAUSE)
 +                              cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
 +                      else if (rmtadv & LPA_1000XPAUSE_ASYM)
 +                              cap = FLOW_CTRL_RX;
 +              } else {
 +                      if (rmtadv & LPA_1000XPAUSE)
 +                              cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
 +              }
 +      } else if (lcladv & ADVERTISE_1000XPSE_ASYM) {
 +              if ((rmtadv & LPA_1000XPAUSE) && (rmtadv & LPA_1000XPAUSE_ASYM))
 +                      cap = FLOW_CTRL_TX;
 +      }
 +
 +      return cap;
 +}
 +
 +static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
 +{
 +      u8 autoneg;
 +      u8 flowctrl = 0;
 +      u32 old_rx_mode = tp->rx_mode;
 +      u32 old_tx_mode = tp->tx_mode;
 +
 +      if (tg3_flag(tp, USE_PHYLIB))
 +              autoneg = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]->autoneg;
 +      else
 +              autoneg = tp->link_config.autoneg;
 +
 +      if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) {
 +              if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
 +                      flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
 +              else
 +                      flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
 +      } else
 +              flowctrl = tp->link_config.flowctrl;
 +
 +      tp->link_config.active_flowctrl = flowctrl;
 +
 +      if (flowctrl & FLOW_CTRL_RX)
 +              tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
 +      else
 +              tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
 +
 +      if (old_rx_mode != tp->rx_mode)
 +              tw32_f(MAC_RX_MODE, tp->rx_mode);
 +
 +      if (flowctrl & FLOW_CTRL_TX)
 +              tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
 +      else
 +              tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
 +
 +      if (old_tx_mode != tp->tx_mode)
 +              tw32_f(MAC_TX_MODE, tp->tx_mode);
 +}
 +
 +static void tg3_adjust_link(struct net_device *dev)
 +{
 +      u8 oldflowctrl, linkmesg = 0;
 +      u32 mac_mode, lcl_adv, rmt_adv;
 +      struct tg3 *tp = netdev_priv(dev);
 +      struct phy_device *phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
 +
 +      spin_lock_bh(&tp->lock);
 +
 +      mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
 +                                  MAC_MODE_HALF_DUPLEX);
 +
 +      oldflowctrl = tp->link_config.active_flowctrl;
 +
 +      if (phydev->link) {
 +              lcl_adv = 0;
 +              rmt_adv = 0;
 +
 +              if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
 +                      mac_mode |= MAC_MODE_PORT_MODE_MII;
 +              else if (phydev->speed == SPEED_1000 ||
 +                       GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785)
 +                      mac_mode |= MAC_MODE_PORT_MODE_GMII;
 +              else
 +                      mac_mode |= MAC_MODE_PORT_MODE_MII;
 +
 +              if (phydev->duplex == DUPLEX_HALF)
 +                      mac_mode |= MAC_MODE_HALF_DUPLEX;
 +              else {
 +                      lcl_adv = tg3_advert_flowctrl_1000T(
 +                                tp->link_config.flowctrl);
 +
 +                      if (phydev->pause)
 +                              rmt_adv = LPA_PAUSE_CAP;
 +                      if (phydev->asym_pause)
 +                              rmt_adv |= LPA_PAUSE_ASYM;
 +              }
 +
 +              tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
 +      } else
 +              mac_mode |= MAC_MODE_PORT_MODE_GMII;
 +
 +      if (mac_mode != tp->mac_mode) {
 +              tp->mac_mode = mac_mode;
 +              tw32_f(MAC_MODE, tp->mac_mode);
 +              udelay(40);
 +      }
 +
 +      if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
 +              if (phydev->speed == SPEED_10)
 +                      tw32(MAC_MI_STAT,
 +                           MAC_MI_STAT_10MBPS_MODE |
 +                           MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
 +              else
 +                      tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
 +      }
 +
 +      if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
 +              tw32(MAC_TX_LENGTHS,
 +                   ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
 +                    (6 << TX_LENGTHS_IPG_SHIFT) |
 +                    (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
 +      else
 +              tw32(MAC_TX_LENGTHS,
 +                   ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
 +                    (6 << TX_LENGTHS_IPG_SHIFT) |
 +                    (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
 +
 +      if ((phydev->link && tp->link_config.active_speed == SPEED_INVALID) ||
 +          (!phydev->link && tp->link_config.active_speed != SPEED_INVALID) ||
 +          phydev->speed != tp->link_config.active_speed ||
 +          phydev->duplex != tp->link_config.active_duplex ||
 +          oldflowctrl != tp->link_config.active_flowctrl)
 +              linkmesg = 1;
 +
 +      tp->link_config.active_speed = phydev->speed;
 +      tp->link_config.active_duplex = phydev->duplex;
 +
 +      spin_unlock_bh(&tp->lock);
 +
 +      if (linkmesg)
 +              tg3_link_report(tp);
 +}
 +
 +static int tg3_phy_init(struct tg3 *tp)
 +{
 +      struct phy_device *phydev;
 +
 +      if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)
 +              return 0;
 +
 +      /* Bring the PHY back to a known state. */
 +      tg3_bmcr_reset(tp);
 +
 +      phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
 +
 +      /* Attach the MAC to the PHY. */
 +      phydev = phy_connect(tp->dev, dev_name(&phydev->dev), tg3_adjust_link,
 +                           phydev->dev_flags, phydev->interface);
 +      if (IS_ERR(phydev)) {
 +              dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
 +              return PTR_ERR(phydev);
 +      }
 +
 +      /* Mask with MAC supported features. */
 +      switch (phydev->interface) {
 +      case PHY_INTERFACE_MODE_GMII:
 +      case PHY_INTERFACE_MODE_RGMII:
 +              if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
 +                      phydev->supported &= (PHY_GBIT_FEATURES |
 +                                            SUPPORTED_Pause |
 +                                            SUPPORTED_Asym_Pause);
 +                      break;
 +              }
 +              /* fallthru */
 +      case PHY_INTERFACE_MODE_MII:
 +              phydev->supported &= (PHY_BASIC_FEATURES |
 +                                    SUPPORTED_Pause |
 +                                    SUPPORTED_Asym_Pause);
 +              break;
 +      default:
 +              phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
 +              return -EINVAL;
 +      }
 +
 +      tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED;
 +
 +      phydev->advertising = phydev->supported;
 +
 +      return 0;
 +}
 +
 +static void tg3_phy_start(struct tg3 *tp)
 +{
 +      struct phy_device *phydev;
 +
 +      if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
 +              return;
 +
 +      phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
 +
 +      if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
 +              tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
 +              phydev->speed = tp->link_config.orig_speed;
 +              phydev->duplex = tp->link_config.orig_duplex;
 +              phydev->autoneg = tp->link_config.orig_autoneg;
 +              phydev->advertising = tp->link_config.orig_advertising;
 +      }
 +
 +      phy_start(phydev);
 +
 +      phy_start_aneg(phydev);
 +}
 +
 +static void tg3_phy_stop(struct tg3 *tp)
 +{
 +      if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
 +              return;
 +
 +      phy_stop(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
 +}
 +
 +static void tg3_phy_fini(struct tg3 *tp)
 +{
 +      if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
 +              phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
 +              tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED;
 +      }
 +}
 +
 +static int tg3_phy_set_extloopbk(struct tg3 *tp)
 +{
 +      int err;
 +      u32 val;
 +
 +      if (tp->phy_flags & TG3_PHYFLG_IS_FET)
 +              return 0;
 +
 +      if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
 +              /* Cannot do read-modify-write on 5401 */
 +              err = tg3_phy_auxctl_write(tp,
 +                                         MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
 +                                         MII_TG3_AUXCTL_ACTL_EXTLOOPBK |
 +                                         0x4c20);
 +              goto done;
 +      }
 +
 +      err = tg3_phy_auxctl_read(tp,
 +                                MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
 +      if (err)
 +              return err;
 +
 +      val |= MII_TG3_AUXCTL_ACTL_EXTLOOPBK;
 +      err = tg3_phy_auxctl_write(tp,
 +                                 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, val);
 +
 +done:
 +      return err;
 +}
 +
 +static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
 +{
 +      u32 phytest;
 +
 +      if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
 +              u32 phy;
 +
 +              tg3_writephy(tp, MII_TG3_FET_TEST,
 +                           phytest | MII_TG3_FET_SHADOW_EN);
 +              if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
 +                      if (enable)
 +                              phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
 +                      else
 +                              phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
 +                      tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
 +              }
 +              tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
 +      }
 +}
 +
 +static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
 +{
 +      u32 reg;
 +
 +      if (!tg3_flag(tp, 5705_PLUS) ||
 +          (tg3_flag(tp, 5717_PLUS) &&
 +           (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
 +              return;
 +
 +      if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
 +              tg3_phy_fet_toggle_apd(tp, enable);
 +              return;
 +      }
 +
 +      reg = MII_TG3_MISC_SHDW_WREN |
 +            MII_TG3_MISC_SHDW_SCR5_SEL |
 +            MII_TG3_MISC_SHDW_SCR5_LPED |
 +            MII_TG3_MISC_SHDW_SCR5_DLPTLM |
 +            MII_TG3_MISC_SHDW_SCR5_SDTL |
 +            MII_TG3_MISC_SHDW_SCR5_C125OE;
 +      if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 || !enable)
 +              reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
 +
 +      tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
 +
 +
 +      reg = MII_TG3_MISC_SHDW_WREN |
 +            MII_TG3_MISC_SHDW_APD_SEL |
 +            MII_TG3_MISC_SHDW_APD_WKTM_84MS;
 +      if (enable)
 +              reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
 +
 +      tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
 +}
 +
 +static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
 +{
 +      u32 phy;
 +
 +      if (!tg3_flag(tp, 5705_PLUS) ||
 +          (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
 +              return;
 +
 +      if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
 +              u32 ephy;
 +
 +              if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
 +                      u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
 +
 +                      tg3_writephy(tp, MII_TG3_FET_TEST,
 +                                   ephy | MII_TG3_FET_SHADOW_EN);
 +                      if (!tg3_readphy(tp, reg, &phy)) {
 +                              if (enable)
 +                                      phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
 +                              else
 +                                      phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
 +                              tg3_writephy(tp, reg, phy);
 +                      }
 +                      tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
 +              }
 +      } else {
 +              int ret;
 +
 +              ret = tg3_phy_auxctl_read(tp,
 +                                        MII_TG3_AUXCTL_SHDWSEL_MISC, &phy);
 +              if (!ret) {
 +                      if (enable)
 +                              phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
 +                      else
 +                              phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
 +                      tg3_phy_auxctl_write(tp,
 +                                           MII_TG3_AUXCTL_SHDWSEL_MISC, phy);
 +              }
 +      }
 +}
 +
 +static void tg3_phy_set_wirespeed(struct tg3 *tp)
 +{
 +      int ret;
 +      u32 val;
 +
 +      if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED)
 +              return;
 +
 +      ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val);
 +      if (!ret)
 +              tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC,
 +                                   val | MII_TG3_AUXCTL_MISC_WIRESPD_EN);
 +}
 +
 +static void tg3_phy_apply_otp(struct tg3 *tp)
 +{
 +      u32 otp, phy;
 +
 +      if (!tp->phy_otp)
 +              return;
 +
 +      otp = tp->phy_otp;
 +
 +      if (TG3_PHY_AUXCTL_SMDSP_ENABLE(tp))
 +              return;
 +
 +      phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
 +      phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
 +      tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
 +
 +      phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
 +            ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
 +      tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
 +
 +      phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
 +      phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
 +      tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
 +
 +      phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
 +      tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
 +
 +      phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
 +      tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
 +
 +      phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
 +            ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
 +      tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
 +
 +      TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
 +}
 +
 +static void tg3_phy_eee_adjust(struct tg3 *tp, u32 current_link_up)
 +{
 +      u32 val;
 +
 +      if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
 +              return;
 +
 +      tp->setlpicnt = 0;
 +
 +      if (tp->link_config.autoneg == AUTONEG_ENABLE &&
 +          current_link_up == 1 &&
 +          tp->link_config.active_duplex == DUPLEX_FULL &&
 +          (tp->link_config.active_speed == SPEED_100 ||
 +           tp->link_config.active_speed == SPEED_1000)) {
 +              u32 eeectl;
 +
 +              if (tp->link_config.active_speed == SPEED_1000)
 +                      eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US;
 +              else
 +                      eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US;
 +
 +              tw32(TG3_CPMU_EEE_CTRL, eeectl);
 +
 +              tg3_phy_cl45_read(tp, MDIO_MMD_AN,
 +                                TG3_CL45_D7_EEERES_STAT, &val);
 +
 +              if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T ||
 +                  val == TG3_CL45_D7_EEERES_STAT_LP_100TX)
 +                      tp->setlpicnt = 2;
 +      }
 +
 +      if (!tp->setlpicnt) {
 +              if (current_link_up == 1 &&
 +                 !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
 +                      tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000);
 +                      TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
 +              }
 +
 +              val = tr32(TG3_CPMU_EEE_MODE);
 +              tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE);
 +      }
 +}
 +
 +static void tg3_phy_eee_enable(struct tg3 *tp)
 +{
 +      u32 val;
 +
 +      if (tp->link_config.active_speed == SPEED_1000 &&
 +          (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
 +           GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
 +           GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) &&
 +          !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
 +              val = MII_TG3_DSP_TAP26_ALNOKO |
 +                    MII_TG3_DSP_TAP26_RMRXSTO;
 +              tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
 +              TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
 +      }
 +
 +      val = tr32(TG3_CPMU_EEE_MODE);
 +      tw32(TG3_CPMU_EEE_MODE, val | TG3_CPMU_EEEMD_LPI_ENABLE);
 +}
 +
 +static int tg3_wait_macro_done(struct tg3 *tp)
 +{
 +      int limit = 100;
 +
 +      while (limit--) {
 +              u32 tmp32;
 +
 +              if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) {
 +                      if ((tmp32 & 0x1000) == 0)
 +                              break;
 +              }
 +      }
 +      if (limit < 0)
 +              return -EBUSY;
 +
 +      return 0;
 +}
 +
 +static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
 +{
 +      static const u32 test_pat[4][6] = {
 +      { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
 +      { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
 +      { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
 +      { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
 +      };
 +      int chan;
 +
 +      for (chan = 0; chan < 4; chan++) {
 +              int i;
 +
 +              tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
 +                           (chan * 0x2000) | 0x0200);
 +              tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
 +
 +              for (i = 0; i < 6; i++)
 +                      tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
 +                                   test_pat[chan][i]);
 +
 +              tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
 +              if (tg3_wait_macro_done(tp)) {
 +                      *resetp = 1;
 +                      return -EBUSY;
 +              }
 +
 +              tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
 +                           (chan * 0x2000) | 0x0200);
 +              tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082);
 +              if (tg3_wait_macro_done(tp)) {
 +                      *resetp = 1;
 +                      return -EBUSY;
 +              }
 +
 +              tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802);
 +              if (tg3_wait_macro_done(tp)) {
 +                      *resetp = 1;
 +                      return -EBUSY;
 +              }
 +
 +              for (i = 0; i < 6; i += 2) {
 +                      u32 low, high;
 +
 +                      if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
 +                          tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
 +                          tg3_wait_macro_done(tp)) {
 +                              *resetp = 1;
 +                              return -EBUSY;
 +                      }
 +                      low &= 0x7fff;
 +                      high &= 0x000f;
 +                      if (low != test_pat[chan][i] ||
 +                          high != test_pat[chan][i+1]) {
 +                              tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
 +                              tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
 +                              tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
 +
 +                              return -EBUSY;
 +                      }
 +              }
 +      }
 +
 +      return 0;
 +}
 +
 +static int tg3_phy_reset_chanpat(struct tg3 *tp)
 +{
 +      int chan;
 +
 +      for (chan = 0; chan < 4; chan++) {
 +              int i;
 +
 +              tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
 +                           (chan * 0x2000) | 0x0200);
 +              tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
 +              for (i = 0; i < 6; i++)
 +                      tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
 +              tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
 +              if (tg3_wait_macro_done(tp))
 +                      return -EBUSY;
 +      }
 +
 +      return 0;
 +}
 +
 +static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
 +{
 +      u32 reg32, phy9_orig;
 +      int retries, do_phy_reset, err;
 +
 +      retries = 10;
 +      do_phy_reset = 1;
 +      do {
 +              if (do_phy_reset) {
 +                      err = tg3_bmcr_reset(tp);
 +                      if (err)
 +                              return err;
 +                      do_phy_reset = 0;
 +              }
 +
 +              /* Disable transmitter and interrupt.  */
 +              if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
 +                      continue;
 +
 +              reg32 |= 0x3000;
 +              tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
 +
 +              /* Set full-duplex, 1000 mbps.  */
 +              tg3_writephy(tp, MII_BMCR,
 +                           BMCR_FULLDPLX | BMCR_SPEED1000);
 +
 +              /* Set to master mode.  */
 +              if (tg3_readphy(tp, MII_CTRL1000, &phy9_orig))
 +                      continue;
 +
 +              tg3_writephy(tp, MII_CTRL1000,
 +                           CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
 +
 +              err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp);
 +              if (err)
 +                      return err;
 +
 +              /* Block the PHY control access.  */
 +              tg3_phydsp_write(tp, 0x8005, 0x0800);
 +
 +              err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
 +              if (!err)
 +                      break;
 +      } while (--retries);
 +
 +      err = tg3_phy_reset_chanpat(tp);
 +      if (err)
 +              return err;
 +
 +      tg3_phydsp_write(tp, 0x8005, 0x0000);
 +
 +      tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
 +      tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
 +
 +      TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
 +
 +      tg3_writephy(tp, MII_CTRL1000, phy9_orig);
 +
 +      if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
 +              reg32 &= ~0x3000;
 +              tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
 +      } else if (!err)
 +              err = -EBUSY;
 +
 +      return err;
 +}
 +
 +/* This will reset the tigon3 PHY if there is no valid
 + * link unless the FORCE argument is non-zero.
 + */
 +static int tg3_phy_reset(struct tg3 *tp)
 +{
 +      u32 val, cpmuctrl;
 +      int err;
 +
 +      if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
 +              val = tr32(GRC_MISC_CFG);
 +              tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
 +              udelay(40);
 +      }
 +      err  = tg3_readphy(tp, MII_BMSR, &val);
 +      err |= tg3_readphy(tp, MII_BMSR, &val);
 +      if (err != 0)
 +              return -EBUSY;
 +
 +      if (netif_running(tp->dev) && netif_carrier_ok(tp->dev)) {
 +              netif_carrier_off(tp->dev);
 +              tg3_link_report(tp);
 +      }
 +
 +      if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
 +          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
 +          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
 +              err = tg3_phy_reset_5703_4_5(tp);
 +              if (err)
 +                      return err;
 +              goto out;
 +      }
 +
 +      cpmuctrl = 0;
 +      if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
 +          GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
 +              cpmuctrl = tr32(TG3_CPMU_CTRL);
 +              if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
 +                      tw32(TG3_CPMU_CTRL,
 +                           cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
 +      }
 +
 +      err = tg3_bmcr_reset(tp);
 +      if (err)
 +              return err;
 +
 +      if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
 +              val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
 +              tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val);
 +
 +              tw32(TG3_CPMU_CTRL, cpmuctrl);
 +      }
 +
 +      if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
 +          GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
 +              val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
 +              if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
 +                  CPMU_LSPD_1000MB_MACCLK_12_5) {
 +                      val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
 +                      udelay(40);
 +                      tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
 +              }
 +      }
 +
 +      if (tg3_flag(tp, 5717_PLUS) &&
 +          (tp->phy_flags & TG3_PHYFLG_MII_SERDES))
 +              return 0;
 +
 +      tg3_phy_apply_otp(tp);
 +
 +      if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
 +              tg3_phy_toggle_apd(tp, true);
 +      else
 +              tg3_phy_toggle_apd(tp, false);
 +
 +out:
 +      if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) &&
 +          !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
 +              tg3_phydsp_write(tp, 0x201f, 0x2aaa);
 +              tg3_phydsp_write(tp, 0x000a, 0x0323);
 +              TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
 +      }
 +
 +      if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
 +              tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
 +              tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
 +      }
 +
 +      if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
 +              if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
 +                      tg3_phydsp_write(tp, 0x000a, 0x310b);
 +                      tg3_phydsp_write(tp, 0x201f, 0x9506);
 +                      tg3_phydsp_write(tp, 0x401f, 0x14e2);
 +                      TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
 +              }
 +      } else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
 +              if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
 +                      tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
 +                      if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
 +                              tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
 +                              tg3_writephy(tp, MII_TG3_TEST1,
 +                                           MII_TG3_TEST1_TRIM_EN | 0x4);
 +                      } else
 +                              tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
 +
 +                      TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
 +              }
 +      }
 +
 +      /* Set Extended packet length bit (bit 14) on all chips that */
 +      /* support jumbo frames */
 +      if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
 +              /* Cannot do read-modify-write on 5401 */
 +              tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
 +      } else if (tg3_flag(tp, JUMBO_CAPABLE)) {
 +              /* Set bit 14 with read-modify-write to preserve other bits */
 +              err = tg3_phy_auxctl_read(tp,
 +                                        MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
 +              if (!err)
 +                      tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
 +                                         val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN);
 +      }
 +
 +      /* Set phy register 0x10 bit 0 to high fifo elasticity to support
 +       * jumbo frames transmission.
 +       */
 +      if (tg3_flag(tp, JUMBO_CAPABLE)) {
 +              if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val))
 +                      tg3_writephy(tp, MII_TG3_EXT_CTRL,
 +                                   val | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
 +      }
 +
 +      if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
 +              /* adjust output voltage */
 +              tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
 +      }
 +
 +      tg3_phy_toggle_automdix(tp, 1);
 +      tg3_phy_set_wirespeed(tp);
 +      return 0;
 +}
 +
 +#define TG3_GPIO_MSG_DRVR_PRES                 0x00000001
 +#define TG3_GPIO_MSG_NEED_VAUX                 0x00000002
 +#define TG3_GPIO_MSG_MASK              (TG3_GPIO_MSG_DRVR_PRES | \
 +                                        TG3_GPIO_MSG_NEED_VAUX)
 +#define TG3_GPIO_MSG_ALL_DRVR_PRES_MASK \
 +      ((TG3_GPIO_MSG_DRVR_PRES << 0) | \
 +       (TG3_GPIO_MSG_DRVR_PRES << 4) | \
 +       (TG3_GPIO_MSG_DRVR_PRES << 8) | \
 +       (TG3_GPIO_MSG_DRVR_PRES << 12))
 +
 +#define TG3_GPIO_MSG_ALL_NEED_VAUX_MASK \
 +      ((TG3_GPIO_MSG_NEED_VAUX << 0) | \
 +       (TG3_GPIO_MSG_NEED_VAUX << 4) | \
 +       (TG3_GPIO_MSG_NEED_VAUX << 8) | \
 +       (TG3_GPIO_MSG_NEED_VAUX << 12))
 +
 +static inline u32 tg3_set_function_status(struct tg3 *tp, u32 newstat)
 +{
 +      u32 status, shift;
 +
 +      if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
 +          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
 +              status = tg3_ape_read32(tp, TG3_APE_GPIO_MSG);
 +      else
 +              status = tr32(TG3_CPMU_DRV_STATUS);
 +
 +      shift = TG3_APE_GPIO_MSG_SHIFT + 4 * tp->pci_fn;
 +      status &= ~(TG3_GPIO_MSG_MASK << shift);
 +      status |= (newstat << shift);
 +
 +      if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
 +          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
 +              tg3_ape_write32(tp, TG3_APE_GPIO_MSG, status);
 +      else
 +              tw32(TG3_CPMU_DRV_STATUS, status);
 +
 +      return status >> TG3_APE_GPIO_MSG_SHIFT;
 +}
 +
 +static inline int tg3_pwrsrc_switch_to_vmain(struct tg3 *tp)
 +{
 +      if (!tg3_flag(tp, IS_NIC))
 +              return 0;
 +
 +      if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
 +          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
 +          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
 +              if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
 +                      return -EIO;
 +
 +              tg3_set_function_status(tp, TG3_GPIO_MSG_DRVR_PRES);
 +
 +              tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
 +                          TG3_GRC_LCLCTL_PWRSW_DELAY);
 +
 +              tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
 +      } else {
 +              tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
 +                          TG3_GRC_LCLCTL_PWRSW_DELAY);
 +      }
 +
 +      return 0;
 +}
 +
 +static void tg3_pwrsrc_die_with_vmain(struct tg3 *tp)
 +{
 +      u32 grc_local_ctrl;
 +
 +      if (!tg3_flag(tp, IS_NIC) ||
 +          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
 +          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)
 +              return;
 +
 +      grc_local_ctrl = tp->grc_local_ctrl | GRC_LCLCTRL_GPIO_OE1;
 +
 +      tw32_wait_f(GRC_LOCAL_CTRL,
 +                  grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
 +                  TG3_GRC_LCLCTL_PWRSW_DELAY);
 +
 +      tw32_wait_f(GRC_LOCAL_CTRL,
 +                  grc_local_ctrl,
 +                  TG3_GRC_LCLCTL_PWRSW_DELAY);
 +
 +      tw32_wait_f(GRC_LOCAL_CTRL,
 +                  grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
 +                  TG3_GRC_LCLCTL_PWRSW_DELAY);
 +}
 +
 +static void tg3_pwrsrc_switch_to_vaux(struct tg3 *tp)
 +{
 +      if (!tg3_flag(tp, IS_NIC))
 +              return;
 +
 +      if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
 +          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
 +              tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
 +                          (GRC_LCLCTRL_GPIO_OE0 |
 +                           GRC_LCLCTRL_GPIO_OE1 |
 +                           GRC_LCLCTRL_GPIO_OE2 |
 +                           GRC_LCLCTRL_GPIO_OUTPUT0 |
 +                           GRC_LCLCTRL_GPIO_OUTPUT1),
 +                          TG3_GRC_LCLCTL_PWRSW_DELAY);
 +      } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
 +                 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
 +              /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
 +              u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
 +                                   GRC_LCLCTRL_GPIO_OE1 |
 +                                   GRC_LCLCTRL_GPIO_OE2 |
 +                                   GRC_LCLCTRL_GPIO_OUTPUT0 |
 +                                   GRC_LCLCTRL_GPIO_OUTPUT1 |
 +                                   tp->grc_local_ctrl;
 +              tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
 +                          TG3_GRC_LCLCTL_PWRSW_DELAY);
 +
 +              grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
 +              tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
 +                          TG3_GRC_LCLCTL_PWRSW_DELAY);
 +
 +              grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
 +              tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
 +                          TG3_GRC_LCLCTL_PWRSW_DELAY);
 +      } else {
 +              u32 no_gpio2;
 +              u32 grc_local_ctrl = 0;
 +
 +              /* Workaround to prevent overdrawing Amps. */
 +              if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
 +                      grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
 +                      tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
 +                                  grc_local_ctrl,
 +                                  TG3_GRC_LCLCTL_PWRSW_DELAY);
 +              }
 +
 +              /* On 5753 and variants, GPIO2 cannot be used. */
 +              no_gpio2 = tp->nic_sram_data_cfg &
 +                         NIC_SRAM_DATA_CFG_NO_GPIO2;
 +
 +              grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
 +                                GRC_LCLCTRL_GPIO_OE1 |
 +                                GRC_LCLCTRL_GPIO_OE2 |
 +                                GRC_LCLCTRL_GPIO_OUTPUT1 |
 +                                GRC_LCLCTRL_GPIO_OUTPUT2;
 +              if (no_gpio2) {
 +                      grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
 +                                          GRC_LCLCTRL_GPIO_OUTPUT2);
 +              }
 +              tw32_wait_f(GRC_LOCAL_CTRL,
 +                          tp->grc_local_ctrl | grc_local_ctrl,
 +                          TG3_GRC_LCLCTL_PWRSW_DELAY);
 +
 +              grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
 +
 +              tw32_wait_f(GRC_LOCAL_CTRL,
 +                          tp->grc_local_ctrl | grc_local_ctrl,
 +                          TG3_GRC_LCLCTL_PWRSW_DELAY);
 +
 +              if (!no_gpio2) {
 +                      grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
 +                      tw32_wait_f(GRC_LOCAL_CTRL,
 +                                  tp->grc_local_ctrl | grc_local_ctrl,
 +                                  TG3_GRC_LCLCTL_PWRSW_DELAY);
 +              }
 +      }
 +}
 +
 +static void tg3_frob_aux_power_5717(struct tg3 *tp, bool wol_enable)
 +{
 +      u32 msg = 0;
 +
 +      /* Serialize power state transitions */
 +      if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
 +              return;
 +
 +      if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE) || wol_enable)
 +              msg = TG3_GPIO_MSG_NEED_VAUX;
 +
 +      msg = tg3_set_function_status(tp, msg);
 +
 +      if (msg & TG3_GPIO_MSG_ALL_DRVR_PRES_MASK)
 +              goto done;
 +
 +      if (msg & TG3_GPIO_MSG_ALL_NEED_VAUX_MASK)
 +              tg3_pwrsrc_switch_to_vaux(tp);
 +      else
 +              tg3_pwrsrc_die_with_vmain(tp);
 +
 +done:
 +      tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
 +}
 +
 +static void tg3_frob_aux_power(struct tg3 *tp, bool include_wol)
 +{
 +      bool need_vaux = false;
 +
 +      /* The GPIOs do something completely different on 57765. */
 +      if (!tg3_flag(tp, IS_NIC) ||
 +          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
 +              return;
 +
 +      if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
 +          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
 +          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
 +              tg3_frob_aux_power_5717(tp, include_wol ?
 +                                      tg3_flag(tp, WOL_ENABLE) != 0 : 0);
 +              return;
 +      }
 +
 +      if (tp->pdev_peer && tp->pdev_peer != tp->pdev) {
 +              struct net_device *dev_peer;
 +
 +              dev_peer = pci_get_drvdata(tp->pdev_peer);
 +
 +              /* remove_one() may have been run on the peer. */
 +              if (dev_peer) {
 +                      struct tg3 *tp_peer = netdev_priv(dev_peer);
 +
 +                      if (tg3_flag(tp_peer, INIT_COMPLETE))
 +                              return;
 +
 +                      if ((include_wol && tg3_flag(tp_peer, WOL_ENABLE)) ||
 +                          tg3_flag(tp_peer, ENABLE_ASF))
 +                              need_vaux = true;
 +              }
 +      }
 +
 +      if ((include_wol && tg3_flag(tp, WOL_ENABLE)) ||
 +          tg3_flag(tp, ENABLE_ASF))
 +              need_vaux = true;
 +
 +      if (need_vaux)
 +              tg3_pwrsrc_switch_to_vaux(tp);
 +      else
 +              tg3_pwrsrc_die_with_vmain(tp);
 +}
 +
 +static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
 +{
 +      if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
 +              return 1;
 +      else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) {
 +              if (speed != SPEED_10)
 +                      return 1;
 +      } else if (speed == SPEED_10)
 +              return 1;
 +
 +      return 0;
 +}
 +
 +static int tg3_setup_phy(struct tg3 *, int);
 +static int tg3_halt_cpu(struct tg3 *, u32);
 +
 +static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
 +{
 +      u32 val;
 +
 +      if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
 +              if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
 +                      u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
 +                      u32 serdes_cfg = tr32(MAC_SERDES_CFG);
 +
 +                      sg_dig_ctrl |=
 +                              SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
 +                      tw32(SG_DIG_CTRL, sg_dig_ctrl);
 +                      tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
 +              }
 +              return;
 +      }
 +
 +      if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
 +              tg3_bmcr_reset(tp);
 +              val = tr32(GRC_MISC_CFG);
 +              tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
 +              udelay(40);
 +              return;
 +      } else if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
 +              u32 phytest;
 +              if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
 +                      u32 phy;
 +
 +                      tg3_writephy(tp, MII_ADVERTISE, 0);
 +                      tg3_writephy(tp, MII_BMCR,
 +                                   BMCR_ANENABLE | BMCR_ANRESTART);
 +
 +                      tg3_writephy(tp, MII_TG3_FET_TEST,
 +                                   phytest | MII_TG3_FET_SHADOW_EN);
 +                      if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
 +                              phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
 +                              tg3_writephy(tp,
 +                                           MII_TG3_FET_SHDW_AUXMODE4,
 +                                           phy);
 +                      }
 +                      tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
 +              }
 +              return;
 +      } else if (do_low_power) {
 +              tg3_writephy(tp, MII_TG3_EXT_CTRL,
 +                           MII_TG3_EXT_CTRL_FORCE_LED_OFF);
 +
 +              val = MII_TG3_AUXCTL_PCTL_100TX_LPWR |
 +                    MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
 +                    MII_TG3_AUXCTL_PCTL_VREG_11V;
 +              tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val);
 +      }
 +
 +      /* The PHY should not be powered down on some chips because
 +       * of bugs.
 +       */
 +      if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
 +          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
 +          (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
 +           (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
 +              return;
 +
 +      if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
 +          GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
 +              val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
 +              val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
 +              val |= CPMU_LSPD_1000MB_MACCLK_12_5;
 +              tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
 +      }
 +
 +      tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
 +}
 +
 +/* tp->lock is held. */
 +static int tg3_nvram_lock(struct tg3 *tp)
 +{
 +      if (tg3_flag(tp, NVRAM)) {
 +              int i;
 +
 +              if (tp->nvram_lock_cnt == 0) {
 +                      tw32(NVRAM_SWARB, SWARB_REQ_SET1);
 +                      for (i = 0; i < 8000; i++) {
 +                              if (tr32(NVRAM_SWARB) & SWARB_GNT1)
 +                                      break;
 +                              udelay(20);
 +                      }
 +                      if (i == 8000) {
 +                              tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
 +                              return -ENODEV;
 +                      }
 +              }
 +              tp->nvram_lock_cnt++;
 +      }
 +      return 0;
 +}
 +
 +/* tp->lock is held. */
 +static void tg3_nvram_unlock(struct tg3 *tp)
 +{
 +      if (tg3_flag(tp, NVRAM)) {
 +              if (tp->nvram_lock_cnt > 0)
 +                      tp->nvram_lock_cnt--;
 +              if (tp->nvram_lock_cnt == 0)
 +                      tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
 +      }
 +}
 +
 +/* tp->lock is held. */
 +static void tg3_enable_nvram_access(struct tg3 *tp)
 +{
 +      if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
 +              u32 nvaccess = tr32(NVRAM_ACCESS);
 +
 +              tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
 +      }
 +}
 +
 +/* tp->lock is held. */
 +static void tg3_disable_nvram_access(struct tg3 *tp)
 +{
 +      if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
 +              u32 nvaccess = tr32(NVRAM_ACCESS);
 +
 +              tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
 +      }
 +}
 +
 +static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
 +                                      u32 offset, u32 *val)
 +{
 +      u32 tmp;
 +      int i;
 +
 +      if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
 +              return -EINVAL;
 +
 +      tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
 +                                      EEPROM_ADDR_DEVID_MASK |
 +                                      EEPROM_ADDR_READ);
 +      tw32(GRC_EEPROM_ADDR,
 +           tmp |
 +           (0 << EEPROM_ADDR_DEVID_SHIFT) |
 +           ((offset << EEPROM_ADDR_ADDR_SHIFT) &
 +            EEPROM_ADDR_ADDR_MASK) |
 +           EEPROM_ADDR_READ | EEPROM_ADDR_START);
 +
 +      for (i = 0; i < 1000; i++) {
 +              tmp = tr32(GRC_EEPROM_ADDR);
 +
 +              if (tmp & EEPROM_ADDR_COMPLETE)
 +                      break;
 +              msleep(1);
 +      }
 +      if (!(tmp & EEPROM_ADDR_COMPLETE))
 +              return -EBUSY;
 +
 +      tmp = tr32(GRC_EEPROM_DATA);
 +
 +      /*
 +       * The data will always be opposite the native endian
 +       * format.  Perform a blind byteswap to compensate.
 +       */
 +      *val = swab32(tmp);
 +
 +      return 0;
 +}
 +
 +#define NVRAM_CMD_TIMEOUT 10000
 +
 +static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
 +{
 +      int i;
 +
 +      tw32(NVRAM_CMD, nvram_cmd);
 +      for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
 +              udelay(10);
 +              if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
 +                      udelay(10);
 +                      break;
 +              }
 +      }
 +
 +      if (i == NVRAM_CMD_TIMEOUT)
 +              return -EBUSY;
 +
 +      return 0;
 +}
 +
 +static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
 +{
 +      if (tg3_flag(tp, NVRAM) &&
 +          tg3_flag(tp, NVRAM_BUFFERED) &&
 +          tg3_flag(tp, FLASH) &&
 +          !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
 +          (tp->nvram_jedecnum == JEDEC_ATMEL))
 +
 +              addr = ((addr / tp->nvram_pagesize) <<
 +                      ATMEL_AT45DB0X1B_PAGE_POS) +
 +                     (addr % tp->nvram_pagesize);
 +
 +      return addr;
 +}
 +
 +static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
 +{
 +      if (tg3_flag(tp, NVRAM) &&
 +          tg3_flag(tp, NVRAM_BUFFERED) &&
 +          tg3_flag(tp, FLASH) &&
 +          !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
 +          (tp->nvram_jedecnum == JEDEC_ATMEL))
 +
 +              addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
 +                      tp->nvram_pagesize) +
 +                     (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
 +
 +      return addr;
 +}
 +
 +/* NOTE: Data read in from NVRAM is byteswapped according to
 + * the byteswapping settings for all other register accesses.
 + * tg3 devices are BE devices, so on a BE machine, the data
 + * returned will be exactly as it is seen in NVRAM.  On a LE
 + * machine, the 32-bit value will be byteswapped.
 + */
 +static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
 +{
 +      int ret;
 +
 +      if (!tg3_flag(tp, NVRAM))
 +              return tg3_nvram_read_using_eeprom(tp, offset, val);
 +
 +      offset = tg3_nvram_phys_addr(tp, offset);
 +
 +      if (offset > NVRAM_ADDR_MSK)
 +              return -EINVAL;
 +
 +      ret = tg3_nvram_lock(tp);
 +      if (ret)
 +              return ret;
 +
 +      tg3_enable_nvram_access(tp);
 +
 +      tw32(NVRAM_ADDR, offset);
 +      ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
 +              NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
 +
 +      if (ret == 0)
 +              *val = tr32(NVRAM_RDDATA);
 +
 +      tg3_disable_nvram_access(tp);
 +
 +      tg3_nvram_unlock(tp);
 +
 +      return ret;
 +}
 +
 +/* Ensures NVRAM data is in bytestream format. */
 +static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
 +{
 +      u32 v;
 +      int res = tg3_nvram_read(tp, offset, &v);
 +      if (!res)
 +              *val = cpu_to_be32(v);
 +      return res;
 +}
 +
 +#define RX_CPU_SCRATCH_BASE   0x30000
 +#define RX_CPU_SCRATCH_SIZE   0x04000
 +#define TX_CPU_SCRATCH_BASE   0x34000
 +#define TX_CPU_SCRATCH_SIZE   0x04000
 +
 +/* tp->lock is held. */
 +static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
 +{
 +      int i;
 +
 +      BUG_ON(offset == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS));
 +
 +      if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
 +              u32 val = tr32(GRC_VCPU_EXT_CTRL);
 +
 +              tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
 +              return 0;
 +      }
 +      if (offset == RX_CPU_BASE) {
 +              for (i = 0; i < 10000; i++) {
 +                      tw32(offset + CPU_STATE, 0xffffffff);
 +                      tw32(offset + CPU_MODE,  CPU_MODE_HALT);
 +                      if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
 +                              break;
 +              }
 +
 +              tw32(offset + CPU_STATE, 0xffffffff);
 +              tw32_f(offset + CPU_MODE,  CPU_MODE_HALT);
 +              udelay(10);
 +      } else {
 +              for (i = 0; i < 10000; i++) {
 +                      tw32(offset + CPU_STATE, 0xffffffff);
 +                      tw32(offset + CPU_MODE,  CPU_MODE_HALT);
 +                      if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
 +                              break;
 +              }
 +      }
 +
 +      if (i >= 10000) {
 +              netdev_err(tp->dev, "%s timed out, %s CPU\n",
 +                         __func__, offset == RX_CPU_BASE ? "RX" : "TX");
 +              return -ENODEV;
 +      }
 +
 +      /* Clear firmware's nvram arbitration. */
 +      if (tg3_flag(tp, NVRAM))
 +              tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
 +      return 0;
 +}
 +
 +struct fw_info {
 +      unsigned int fw_base;
 +      unsigned int fw_len;
 +      const __be32 *fw_data;
 +};
 +
 +/* tp->lock is held. */
 +static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base,
 +                               u32 cpu_scratch_base, int cpu_scratch_size,
 +                               struct fw_info *info)
 +{
 +      int err, lock_err, i;
 +      void (*write_op)(struct tg3 *, u32, u32);
 +
 +      if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) {
 +              netdev_err(tp->dev,
 +                         "%s: Trying to load TX cpu firmware which is 5705\n",
 +                         __func__);
 +              return -EINVAL;
 +      }
 +
 +      if (tg3_flag(tp, 5705_PLUS))
 +              write_op = tg3_write_mem;
 +      else
 +              write_op = tg3_write_indirect_reg32;
 +
 +      /* It is possible that bootcode is still loading at this point.
 +       * Get the nvram lock first before halting the cpu.
 +       */
 +      lock_err = tg3_nvram_lock(tp);
 +      err = tg3_halt_cpu(tp, cpu_base);
 +      if (!lock_err)
 +              tg3_nvram_unlock(tp);
 +      if (err)
 +              goto out;
 +
 +      for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
 +              write_op(tp, cpu_scratch_base + i, 0);
 +      tw32(cpu_base + CPU_STATE, 0xffffffff);
 +      tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
 +      for (i = 0; i < (info->fw_len / sizeof(u32)); i++)
 +              write_op(tp, (cpu_scratch_base +
 +                            (info->fw_base & 0xffff) +
 +                            (i * sizeof(u32))),
 +                            be32_to_cpu(info->fw_data[i]));
 +
 +      err = 0;
 +
 +out:
 +      return err;
 +}
 +
 +/* tp->lock is held. */
 +static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
 +{
 +      struct fw_info info;
 +      const __be32 *fw_data;
 +      int err, i;
 +
 +      fw_data = (void *)tp->fw->data;
 +
 +      /* Firmware blob starts with version numbers, followed by
 +         start address and length. We are setting complete length.
 +         length = end_address_of_bss - start_address_of_text.
 +         Remainder is the blob to be loaded contiguously
 +         from start address. */
 +
 +      info.fw_base = be32_to_cpu(fw_data[1]);
 +      info.fw_len = tp->fw->size - 12;
 +      info.fw_data = &fw_data[3];
 +
 +      err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
 +                                  RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
 +                                  &info);
 +      if (err)
 +              return err;
 +
 +      err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
 +                                  TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
 +                                  &info);
 +      if (err)
 +              return err;
 +
 +      /* Now startup only the RX cpu. */
 +      tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
 +      tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
 +
 +      for (i = 0; i < 5; i++) {
 +              if (tr32(RX_CPU_BASE + CPU_PC) == info.fw_base)
 +                      break;
 +              tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
 +              tw32(RX_CPU_BASE + CPU_MODE,  CPU_MODE_HALT);
 +              tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
 +              udelay(1000);
 +      }
 +      if (i >= 5) {
 +              netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x "
 +                         "should be %08x\n", __func__,
 +                         tr32(RX_CPU_BASE + CPU_PC), info.fw_base);
 +              return -ENODEV;
 +      }
 +      tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
 +      tw32_f(RX_CPU_BASE + CPU_MODE,  0x00000000);
 +
 +      return 0;
 +}
 +
 +/* tp->lock is held. */
 +static int tg3_load_tso_firmware(struct tg3 *tp)
 +{
 +      struct fw_info info;
 +      const __be32 *fw_data;
 +      unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
 +      int err, i;
 +
 +      if (tg3_flag(tp, HW_TSO_1) ||
 +          tg3_flag(tp, HW_TSO_2) ||
 +          tg3_flag(tp, HW_TSO_3))
 +              return 0;
 +
 +      fw_data = (void *)tp->fw->data;
 +
 +      /* Firmware blob starts with version numbers, followed by
 +         start address and length. We are setting complete length.
 +         length = end_address_of_bss - start_address_of_text.
 +         Remainder is the blob to be loaded contiguously
 +         from start address. */
 +
 +      info.fw_base = be32_to_cpu(fw_data[1]);
 +      cpu_scratch_size = tp->fw_len;
 +      info.fw_len = tp->fw->size - 12;
 +      info.fw_data = &fw_data[3];
 +
 +      if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
 +              cpu_base = RX_CPU_BASE;
 +              cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
 +      } else {
 +              cpu_base = TX_CPU_BASE;
 +              cpu_scratch_base = TX_CPU_SCRATCH_BASE;
 +              cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
 +      }
 +
 +      err = tg3_load_firmware_cpu(tp, cpu_base,
 +                                  cpu_scratch_base, cpu_scratch_size,
 +                                  &info);
 +      if (err)
 +              return err;
 +
 +      /* Now startup the cpu. */
 +      tw32(cpu_base + CPU_STATE, 0xffffffff);
 +      tw32_f(cpu_base + CPU_PC, info.fw_base);
 +
 +      for (i = 0; i < 5; i++) {
 +              if (tr32(cpu_base + CPU_PC) == info.fw_base)
 +                      break;
 +              tw32(cpu_base + CPU_STATE, 0xffffffff);
 +              tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
 +              tw32_f(cpu_base + CPU_PC, info.fw_base);
 +              udelay(1000);
 +      }
 +      if (i >= 5) {
 +              netdev_err(tp->dev,
 +                         "%s fails to set CPU PC, is %08x should be %08x\n",
 +                         __func__, tr32(cpu_base + CPU_PC), info.fw_base);
 +              return -ENODEV;
 +      }
 +      tw32(cpu_base + CPU_STATE, 0xffffffff);
 +      tw32_f(cpu_base + CPU_MODE,  0x00000000);
 +      return 0;
 +}
 +
 +
 +/* tp->lock is held. */
 +static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1)
 +{
 +      u32 addr_high, addr_low;
 +      int i;
 +
 +      addr_high = ((tp->dev->dev_addr[0] << 8) |
 +                   tp->dev->dev_addr[1]);
 +      addr_low = ((tp->dev->dev_addr[2] << 24) |
 +                  (tp->dev->dev_addr[3] << 16) |
 +                  (tp->dev->dev_addr[4] <<  8) |
 +                  (tp->dev->dev_addr[5] <<  0));
 +      for (i = 0; i < 4; i++) {
 +              if (i == 1 && skip_mac_1)
 +                      continue;
 +              tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
 +              tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
 +      }
 +
 +      if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
 +          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
 +              for (i = 0; i < 12; i++) {
 +                      tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
 +                      tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
 +              }
 +      }
 +
 +      addr_high = (tp->dev->dev_addr[0] +
 +                   tp->dev->dev_addr[1] +
 +                   tp->dev->dev_addr[2] +
 +                   tp->dev->dev_addr[3] +
 +                   tp->dev->dev_addr[4] +
 +                   tp->dev->dev_addr[5]) &
 +              TX_BACKOFF_SEED_MASK;
 +      tw32(MAC_TX_BACKOFF_SEED, addr_high);
 +}
 +
 +static void tg3_enable_register_access(struct tg3 *tp)
 +{
 +      /*
 +       * Make sure register accesses (indirect or otherwise) will function
 +       * correctly.
 +       */
 +      pci_write_config_dword(tp->pdev,
 +                             TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
 +}
 +
 +static int tg3_power_up(struct tg3 *tp)
 +{
 +      int err;
 +
 +      tg3_enable_register_access(tp);
 +
 +      err = pci_set_power_state(tp->pdev, PCI_D0);
 +      if (!err) {
 +              /* Switch out of Vaux if it is a NIC */
 +              tg3_pwrsrc_switch_to_vmain(tp);
 +      } else {
 +              netdev_err(tp->dev, "Transition to D0 failed\n");
 +      }
 +
 +      return err;
 +}
 +
 +static int tg3_power_down_prepare(struct tg3 *tp)
 +{
 +      u32 misc_host_ctrl;
 +      bool device_should_wake, do_low_power;
 +
 +      tg3_enable_register_access(tp);
 +
 +      /* Restore the CLKREQ setting. */
 +      if (tg3_flag(tp, CLKREQ_BUG)) {
 +              u16 lnkctl;
 +
 +              pci_read_config_word(tp->pdev,
 +                                   pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
 +                                   &lnkctl);
 +              lnkctl |= PCI_EXP_LNKCTL_CLKREQ_EN;
 +              pci_write_config_word(tp->pdev,
 +                                    pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
 +                                    lnkctl);
 +      }
 +
 +      misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
 +      tw32(TG3PCI_MISC_HOST_CTRL,
 +           misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
 +
 +      device_should_wake = device_may_wakeup(&tp->pdev->dev) &&
 +                           tg3_flag(tp, WOL_ENABLE);
 +
 +      if (tg3_flag(tp, USE_PHYLIB)) {
 +              do_low_power = false;
 +              if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) &&
 +                  !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
 +                      struct phy_device *phydev;
 +                      u32 phyid, advertising;
 +
 +                      phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
 +
 +                      tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
 +
 +                      tp->link_config.orig_speed = phydev->speed;
 +                      tp->link_config.orig_duplex = phydev->duplex;
 +                      tp->link_config.orig_autoneg = phydev->autoneg;
 +                      tp->link_config.orig_advertising = phydev->advertising;
 +
 +                      advertising = ADVERTISED_TP |
 +                                    ADVERTISED_Pause |
 +                                    ADVERTISED_Autoneg |
 +                                    ADVERTISED_10baseT_Half;
 +
 +                      if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) {
 +                              if (tg3_flag(tp, WOL_SPEED_100MB))
 +                                      advertising |=
 +                                              ADVERTISED_100baseT_Half |
 +                                              ADVERTISED_100baseT_Full |
 +                                              ADVERTISED_10baseT_Full;
 +                              else
 +                                      advertising |= ADVERTISED_10baseT_Full;
 +                      }
 +
 +                      phydev->advertising = advertising;
 +
 +                      phy_start_aneg(phydev);
 +
 +                      phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
 +                      if (phyid != PHY_ID_BCMAC131) {
 +                              phyid &= PHY_BCM_OUI_MASK;
 +                              if (phyid == PHY_BCM_OUI_1 ||
 +                                  phyid == PHY_BCM_OUI_2 ||
 +                                  phyid == PHY_BCM_OUI_3)
 +                                      do_low_power = true;
 +                      }
 +              }
 +      } else {
 +              do_low_power = true;
 +
 +              if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
 +                      tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
 +                      tp->link_config.orig_speed = tp->link_config.speed;
 +                      tp->link_config.orig_duplex = tp->link_config.duplex;
 +                      tp->link_config.orig_autoneg = tp->link_config.autoneg;
 +              }
 +
 +              if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
 +                      tp->link_config.speed = SPEED_10;
 +                      tp->link_config.duplex = DUPLEX_HALF;
 +                      tp->link_config.autoneg = AUTONEG_ENABLE;
 +                      tg3_setup_phy(tp, 0);
 +              }
 +      }
 +
 +      if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
 +              u32 val;
 +
 +              val = tr32(GRC_VCPU_EXT_CTRL);
 +              tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
 +      } else if (!tg3_flag(tp, ENABLE_ASF)) {
 +              int i;
 +              u32 val;
 +
 +              for (i = 0; i < 200; i++) {
 +                      tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
 +                      if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
 +                              break;
 +                      msleep(1);
 +              }
 +      }
 +      if (tg3_flag(tp, WOL_CAP))
 +              tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
 +                                                   WOL_DRV_STATE_SHUTDOWN |
 +                                                   WOL_DRV_WOL |
 +                                                   WOL_SET_MAGIC_PKT);
 +
 +      if (device_should_wake) {
 +              u32 mac_mode;
 +
 +              if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
 +                      if (do_low_power &&
 +                          !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
 +                              tg3_phy_auxctl_write(tp,
 +                                             MII_TG3_AUXCTL_SHDWSEL_PWRCTL,
 +                                             MII_TG3_AUXCTL_PCTL_WOL_EN |
 +                                             MII_TG3_AUXCTL_PCTL_100TX_LPWR |
 +                                             MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC);
 +                              udelay(40);
 +                      }
 +
 +                      if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
 +                              mac_mode = MAC_MODE_PORT_MODE_GMII;
 +                      else
 +                              mac_mode = MAC_MODE_PORT_MODE_MII;
 +
 +                      mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
 +                      if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
 +                          ASIC_REV_5700) {
 +                              u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ?
 +                                           SPEED_100 : SPEED_10;
 +                              if (tg3_5700_link_polarity(tp, speed))
 +                                      mac_mode |= MAC_MODE_LINK_POLARITY;
 +                              else
 +                                      mac_mode &= ~MAC_MODE_LINK_POLARITY;
 +                      }
 +              } else {
 +                      mac_mode = MAC_MODE_PORT_MODE_TBI;
 +              }
 +
 +              if (!tg3_flag(tp, 5750_PLUS))
 +                      tw32(MAC_LED_CTRL, tp->led_ctrl);
 +
 +              mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
 +              if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) &&
 +                  (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)))
 +                      mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
 +
 +              if (tg3_flag(tp, ENABLE_APE))
 +                      mac_mode |= MAC_MODE_APE_TX_EN |
 +                                  MAC_MODE_APE_RX_EN |
 +                                  MAC_MODE_TDE_ENABLE;
 +
 +              tw32_f(MAC_MODE, mac_mode);
 +              udelay(100);
 +
 +              tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
 +              udelay(10);
 +      }
 +
 +      if (!tg3_flag(tp, WOL_SPEED_100MB) &&
 +          (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
 +           GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
 +              u32 base_val;
 +
 +              base_val = tp->pci_clock_ctrl;
 +              base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
 +                           CLOCK_CTRL_TXCLK_DISABLE);
 +
 +              tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
 +                          CLOCK_CTRL_PWRDOWN_PLL133, 40);
 +      } else if (tg3_flag(tp, 5780_CLASS) ||
 +                 tg3_flag(tp, CPMU_PRESENT) ||
 +                 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
 +              /* do nothing */
 +      } else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) {
 +              u32 newbits1, newbits2;
 +
 +              if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
 +                  GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
 +                      newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
 +                                  CLOCK_CTRL_TXCLK_DISABLE |
 +                                  CLOCK_CTRL_ALTCLK);
 +                      newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
 +              } else if (tg3_flag(tp, 5705_PLUS)) {
 +                      newbits1 = CLOCK_CTRL_625_CORE;
 +                      newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
 +              } else {
 +                      newbits1 = CLOCK_CTRL_ALTCLK;
 +                      newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
 +              }
 +
 +              tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
 +                          40);
 +
 +              tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
 +                          40);
 +
 +              if (!tg3_flag(tp, 5705_PLUS)) {
 +                      u32 newbits3;
 +
 +                      if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
 +                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
 +                              newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
 +                                          CLOCK_CTRL_TXCLK_DISABLE |
 +                                          CLOCK_CTRL_44MHZ_CORE);
 +                      } else {
 +                              newbits3 = CLOCK_CTRL_44MHZ_CORE;
 +                      }
 +
 +                      tw32_wait_f(TG3PCI_CLOCK_CTRL,
 +                                  tp->pci_clock_ctrl | newbits3, 40);
 +              }
 +      }
 +
 +      if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF))
 +              tg3_power_down_phy(tp, do_low_power);
 +
 +      tg3_frob_aux_power(tp, true);
 +
 +      /* Workaround for unstable PLL clock */
 +      if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
 +          (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
 +              u32 val = tr32(0x7d00);
 +
 +              val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
 +              tw32(0x7d00, val);
 +              if (!tg3_flag(tp, ENABLE_ASF)) {
 +                      int err;
 +
 +                      err = tg3_nvram_lock(tp);
 +                      tg3_halt_cpu(tp, RX_CPU_BASE);
 +                      if (!err)
 +                              tg3_nvram_unlock(tp);
 +              }
 +      }
 +
 +      tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
 +
 +      return 0;
 +}
 +
 +static void tg3_power_down(struct tg3 *tp)
 +{
 +      tg3_power_down_prepare(tp);
 +
 +      pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE));
 +      pci_set_power_state(tp->pdev, PCI_D3hot);
 +}
 +
 +static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
 +{
 +      switch (val & MII_TG3_AUX_STAT_SPDMASK) {
 +      case MII_TG3_AUX_STAT_10HALF:
 +              *speed = SPEED_10;
 +              *duplex = DUPLEX_HALF;
 +              break;
 +
 +      case MII_TG3_AUX_STAT_10FULL:
 +              *speed = SPEED_10;
 +              *duplex = DUPLEX_FULL;
 +              break;
 +
 +      case MII_TG3_AUX_STAT_100HALF:
 +              *speed = SPEED_100;
 +              *duplex = DUPLEX_HALF;
 +              break;
 +
 +      case MII_TG3_AUX_STAT_100FULL:
 +              *speed = SPEED_100;
 +              *duplex = DUPLEX_FULL;
 +              break;
 +
 +      case MII_TG3_AUX_STAT_1000HALF:
 +              *speed = SPEED_1000;
 +              *duplex = DUPLEX_HALF;
 +              break;
 +
 +      case MII_TG3_AUX_STAT_1000FULL:
 +              *speed = SPEED_1000;
 +              *duplex = DUPLEX_FULL;
 +              break;
 +
 +      default:
 +              if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
 +                      *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
 +                               SPEED_10;
 +                      *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
 +                                DUPLEX_HALF;
 +                      break;
 +              }
 +              *speed = SPEED_INVALID;
 +              *duplex = DUPLEX_INVALID;
 +              break;
 +      }
 +}
 +
 +static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
 +{
 +      int err = 0;
 +      u32 val, new_adv;
 +
 +      new_adv = ADVERTISE_CSMA;
 +      if (advertise & ADVERTISED_10baseT_Half)
 +              new_adv |= ADVERTISE_10HALF;
 +      if (advertise & ADVERTISED_10baseT_Full)
 +              new_adv |= ADVERTISE_10FULL;
 +      if (advertise & ADVERTISED_100baseT_Half)
 +              new_adv |= ADVERTISE_100HALF;
 +      if (advertise & ADVERTISED_100baseT_Full)
 +              new_adv |= ADVERTISE_100FULL;
 +
 +      new_adv |= tg3_advert_flowctrl_1000T(flowctrl);
 +
 +      err = tg3_writephy(tp, MII_ADVERTISE, new_adv);
 +      if (err)
 +              goto done;
 +
 +      if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
 +              goto done;
 +
 +      new_adv = 0;
 +      if (advertise & ADVERTISED_1000baseT_Half)
 +              new_adv |= ADVERTISE_1000HALF;
 +      if (advertise & ADVERTISED_1000baseT_Full)
 +              new_adv |= ADVERTISE_1000FULL;
 +
 +      if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
 +          tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
 +              new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
 +
 +      err = tg3_writephy(tp, MII_CTRL1000, new_adv);
 +      if (err)
 +              goto done;
 +
 +      if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
 +              goto done;
 +
 +      tw32(TG3_CPMU_EEE_MODE,
 +           tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
 +
 +      err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp);
 +      if (!err) {
 +              u32 err2;
 +
 +              val = 0;
 +              /* Advertise 100-BaseTX EEE ability */
 +              if (advertise & ADVERTISED_100baseT_Full)
 +                      val |= MDIO_AN_EEE_ADV_100TX;
 +              /* Advertise 1000-BaseT EEE ability */
 +              if (advertise & ADVERTISED_1000baseT_Full)
 +                      val |= MDIO_AN_EEE_ADV_1000T;
 +              err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
 +              if (err)
 +                      val = 0;
 +
 +              switch (GET_ASIC_REV(tp->pci_chip_rev_id)) {
 +              case ASIC_REV_5717:
 +              case ASIC_REV_57765:
 +              case ASIC_REV_5719:
 +                      /* If we advertised any eee advertisements above... */
 +                      if (val)
 +                              val = MII_TG3_DSP_TAP26_ALNOKO |
 +                                    MII_TG3_DSP_TAP26_RMRXSTO |
 +                                    MII_TG3_DSP_TAP26_OPCSINPT;
 +                      tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
 +                      /* Fall through */
 +              case ASIC_REV_5720:
 +                      if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
 +                              tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val |
 +                                               MII_TG3_DSP_CH34TP2_HIBW01);
 +              }
 +
 +              err2 = TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
 +              if (!err)
 +                      err = err2;
 +      }
 +
 +done:
 +      return err;
 +}
 +
 +static void tg3_phy_copper_begin(struct tg3 *tp)
 +{
 +      u32 new_adv;
 +      int i;
 +
 +      if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
 +              new_adv = ADVERTISED_10baseT_Half |
 +                        ADVERTISED_10baseT_Full;
 +              if (tg3_flag(tp, WOL_SPEED_100MB))
 +                      new_adv |= ADVERTISED_100baseT_Half |
 +                                 ADVERTISED_100baseT_Full;
 +
 +              tg3_phy_autoneg_cfg(tp, new_adv,
 +                                  FLOW_CTRL_TX | FLOW_CTRL_RX);
 +      } else if (tp->link_config.speed == SPEED_INVALID) {
 +              if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
 +                      tp->link_config.advertising &=
 +                              ~(ADVERTISED_1000baseT_Half |
 +                                ADVERTISED_1000baseT_Full);
 +
 +              tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
 +                                  tp->link_config.flowctrl);
 +      } else {
 +              /* Asking for a specific link mode. */
 +              if (tp->link_config.speed == SPEED_1000) {
 +                      if (tp->link_config.duplex == DUPLEX_FULL)
 +                              new_adv = ADVERTISED_1000baseT_Full;
 +                      else
 +                              new_adv = ADVERTISED_1000baseT_Half;
 +              } else if (tp->link_config.speed == SPEED_100) {
 +                      if (tp->link_config.duplex == DUPLEX_FULL)
 +                              new_adv = ADVERTISED_100baseT_Full;
 +                      else
 +                              new_adv = ADVERTISED_100baseT_Half;
 +              } else {
 +                      if (tp->link_config.duplex == DUPLEX_FULL)
 +                              new_adv = ADVERTISED_10baseT_Full;
 +                      else
 +                              new_adv = ADVERTISED_10baseT_Half;
 +              }
 +
 +              tg3_phy_autoneg_cfg(tp, new_adv,
 +                                  tp->link_config.flowctrl);
 +      }
 +
 +      if (tp->link_config.autoneg == AUTONEG_DISABLE &&
 +          tp->link_config.speed != SPEED_INVALID) {
 +              u32 bmcr, orig_bmcr;
 +
 +              tp->link_config.active_speed = tp->link_config.speed;
 +              tp->link_config.active_duplex = tp->link_config.duplex;
 +
 +              bmcr = 0;
 +              switch (tp->link_config.speed) {
 +              default:
 +              case SPEED_10:
 +                      break;
 +
 +              case SPEED_100:
 +                      bmcr |= BMCR_SPEED100;
 +                      break;
 +
 +              case SPEED_1000:
 +                      bmcr |= BMCR_SPEED1000;
 +                      break;
 +              }
 +
 +              if (tp->link_config.duplex == DUPLEX_FULL)
 +                      bmcr |= BMCR_FULLDPLX;
 +
 +              if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
 +                  (bmcr != orig_bmcr)) {
 +                      tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
 +                      for (i = 0; i < 1500; i++) {
 +                              u32 tmp;
 +
 +                              udelay(10);
 +                              if (tg3_readphy(tp, MII_BMSR, &tmp) ||
 +                                  tg3_readphy(tp, MII_BMSR, &tmp))
 +                                      continue;
 +                              if (!(tmp & BMSR_LSTATUS)) {
 +                                      udelay(40);
 +                                      break;
 +                              }
 +                      }
 +                      tg3_writephy(tp, MII_BMCR, bmcr);
 +                      udelay(40);
 +              }
 +      } else {
 +              tg3_writephy(tp, MII_BMCR,
 +                           BMCR_ANENABLE | BMCR_ANRESTART);
 +      }
 +}
 +
 +static int tg3_init_5401phy_dsp(struct tg3 *tp)
 +{
 +      int err;
 +
 +      /* Turn off tap power management. */
 +      /* Set Extended packet length bit */
 +      err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
 +
 +      err |= tg3_phydsp_write(tp, 0x0012, 0x1804);
 +      err |= tg3_phydsp_write(tp, 0x0013, 0x1204);
 +      err |= tg3_phydsp_write(tp, 0x8006, 0x0132);
 +      err |= tg3_phydsp_write(tp, 0x8006, 0x0232);
 +      err |= tg3_phydsp_write(tp, 0x201f, 0x0a20);
 +
 +      udelay(40);
 +
 +      return err;
 +}
 +
 +static int tg3_copper_is_advertising_all(struct tg3 *tp, u32 mask)
 +{
 +      u32 adv_reg, all_mask = 0;
 +
 +      if (mask & ADVERTISED_10baseT_Half)
 +              all_mask |= ADVERTISE_10HALF;
 +      if (mask & ADVERTISED_10baseT_Full)
 +              all_mask |= ADVERTISE_10FULL;
 +      if (mask & ADVERTISED_100baseT_Half)
 +              all_mask |= ADVERTISE_100HALF;
 +      if (mask & ADVERTISED_100baseT_Full)
 +              all_mask |= ADVERTISE_100FULL;
 +
 +      if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg))
 +              return 0;
 +
 +      if ((adv_reg & ADVERTISE_ALL) != all_mask)
 +              return 0;
 +
 +      if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
 +              u32 tg3_ctrl;
 +
 +              all_mask = 0;
 +              if (mask & ADVERTISED_1000baseT_Half)
 +                      all_mask |= ADVERTISE_1000HALF;
 +              if (mask & ADVERTISED_1000baseT_Full)
 +                      all_mask |= ADVERTISE_1000FULL;
 +
 +              if (tg3_readphy(tp, MII_CTRL1000, &tg3_ctrl))
 +                      return 0;
 +
 +              tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL);
 +              if (tg3_ctrl != all_mask)
 +                      return 0;
 +      }
 +
 +      return 1;
 +}
 +
 +static int tg3_adv_1000T_flowctrl_ok(struct tg3 *tp, u32 *lcladv, u32 *rmtadv)
 +{
 +      u32 curadv, reqadv;
 +
 +      if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
 +              return 1;
 +
 +      curadv = *lcladv & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
 +      reqadv = tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
 +
 +      if (tp->link_config.active_duplex == DUPLEX_FULL) {
 +              if (curadv != reqadv)
 +                      return 0;
 +
 +              if (tg3_flag(tp, PAUSE_AUTONEG))
 +                      tg3_readphy(tp, MII_LPA, rmtadv);
 +      } else {
 +              /* Reprogram the advertisement register, even if it
 +               * does not affect the current link.  If the link
 +               * gets renegotiated in the future, we can save an
 +               * additional renegotiation cycle by advertising
 +               * it correctly in the first place.
 +               */
 +              if (curadv != reqadv) {
 +                      *lcladv &= ~(ADVERTISE_PAUSE_CAP |
 +                                   ADVERTISE_PAUSE_ASYM);
 +                      tg3_writephy(tp, MII_ADVERTISE, *lcladv | reqadv);
 +              }
 +      }
 +
 +      return 1;
 +}
 +
 +static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
 +{
 +      int current_link_up;
 +      u32 bmsr, val;
 +      u32 lcl_adv, rmt_adv;
 +      u16 current_speed;
 +      u8 current_duplex;
 +      int i, err;
 +
 +      tw32(MAC_EVENT, 0);
 +
 +      tw32_f(MAC_STATUS,
 +           (MAC_STATUS_SYNC_CHANGED |
 +            MAC_STATUS_CFG_CHANGED |
 +            MAC_STATUS_MI_COMPLETION |
 +            MAC_STATUS_LNKSTATE_CHANGED));
 +      udelay(40);
 +
 +      if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
 +              tw32_f(MAC_MI_MODE,
 +                   (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
 +              udelay(80);
 +      }
 +
 +      tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0);
 +
 +      /* Some third-party PHYs need to be reset on link going
 +       * down.
 +       */
 +      if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
 +           GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
 +           GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
 +          netif_carrier_ok(tp->dev)) {
 +              tg3_readphy(tp, MII_BMSR, &bmsr);
 +              if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
 +                  !(bmsr & BMSR_LSTATUS))
 +                      force_reset = 1;
 +      }
 +      if (force_reset)
 +              tg3_phy_reset(tp);
 +
 +      if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
 +              tg3_readphy(tp, MII_BMSR, &bmsr);
 +              if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
 +                  !tg3_flag(tp, INIT_COMPLETE))
 +                      bmsr = 0;
 +
 +              if (!(bmsr & BMSR_LSTATUS)) {
 +                      err = tg3_init_5401phy_dsp(tp);
 +                      if (err)
 +                              return err;
 +
 +                      tg3_readphy(tp, MII_BMSR, &bmsr);
 +                      for (i = 0; i < 1000; i++) {
 +                              udelay(10);
 +                              if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
 +                                  (bmsr & BMSR_LSTATUS)) {
 +                                      udelay(40);
 +                                      break;
 +                              }
 +                      }
 +
 +                      if ((tp->phy_id & TG3_PHY_ID_REV_MASK) ==
 +                          TG3_PHY_REV_BCM5401_B0 &&
 +                          !(bmsr & BMSR_LSTATUS) &&
 +                          tp->link_config.active_speed == SPEED_1000) {
 +                              err = tg3_phy_reset(tp);
 +                              if (!err)
 +                                      err = tg3_init_5401phy_dsp(tp);
 +                              if (err)
 +                                      return err;
 +                      }
 +              }
 +      } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
 +                 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
 +              /* 5701 {A0,B0} CRC bug workaround */
 +              tg3_writephy(tp, 0x15, 0x0a75);
 +              tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
 +              tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
 +              tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
 +      }
 +
 +      /* Clear pending interrupts... */
 +      tg3_readphy(tp, MII_TG3_ISTAT, &val);
 +      tg3_readphy(tp, MII_TG3_ISTAT, &val);
 +
 +      if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT)
 +              tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
 +      else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET))
 +              tg3_writephy(tp, MII_TG3_IMASK, ~0);
 +
 +      if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
 +          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
 +              if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
 +                      tg3_writephy(tp, MII_TG3_EXT_CTRL,
 +                                   MII_TG3_EXT_CTRL_LNK3_LED_MODE);
 +              else
 +                      tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
 +      }
 +
 +      current_link_up = 0;
 +      current_speed = SPEED_INVALID;
 +      current_duplex = DUPLEX_INVALID;
 +
 +      if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
 +              err = tg3_phy_auxctl_read(tp,
 +                                        MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
 +                                        &val);
 +              if (!err && !(val & (1 << 10))) {
 +                      tg3_phy_auxctl_write(tp,
 +                                           MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
 +                                           val | (1 << 10));
 +                      goto relink;
 +              }
 +      }
 +
 +      bmsr = 0;
 +      for (i = 0; i < 100; i++) {
 +              tg3_readphy(tp, MII_BMSR, &bmsr);
 +              if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
 +                  (bmsr & BMSR_LSTATUS))
 +                      break;
 +              udelay(40);
 +      }
 +
 +      if (bmsr & BMSR_LSTATUS) {
 +              u32 aux_stat, bmcr;
 +
 +              tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
 +              for (i = 0; i < 2000; i++) {
 +                      udelay(10);
 +                      if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
 +                          aux_stat)
 +                              break;
 +              }
 +
 +              tg3_aux_stat_to_speed_duplex(tp, aux_stat,
 +                                           &current_speed,
 +                                           &current_duplex);
 +
 +              bmcr = 0;
 +              for (i = 0; i < 200; i++) {
 +                      tg3_readphy(tp, MII_BMCR, &bmcr);
 +                      if (tg3_readphy(tp, MII_BMCR, &bmcr))
 +                              continue;
 +                      if (bmcr && bmcr != 0x7fff)
 +                              break;
 +                      udelay(10);
 +              }
 +
 +              lcl_adv = 0;
 +              rmt_adv = 0;
 +
 +              tp->link_config.active_speed = current_speed;
 +              tp->link_config.active_duplex = current_duplex;
 +
 +              if (tp->link_config.autoneg == AUTONEG_ENABLE) {
 +                      if ((bmcr & BMCR_ANENABLE) &&
 +                          tg3_copper_is_advertising_all(tp,
 +                                              tp->link_config.advertising)) {
 +                              if (tg3_adv_1000T_flowctrl_ok(tp, &lcl_adv,
 +                                                                &rmt_adv))
 +                                      current_link_up = 1;
 +                      }
 +              } else {
 +                      if (!(bmcr & BMCR_ANENABLE) &&
 +                          tp->link_config.speed == current_speed &&
 +                          tp->link_config.duplex == current_duplex &&
 +                          tp->link_config.flowctrl ==
 +                          tp->link_config.active_flowctrl) {
 +                              current_link_up = 1;
 +                      }
 +              }
 +
 +              if (current_link_up == 1 &&
 +                  tp->link_config.active_duplex == DUPLEX_FULL)
 +                      tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
 +      }
 +
 +relink:
 +      if (current_link_up == 0 || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
 +              tg3_phy_copper_begin(tp);
 +
 +              tg3_readphy(tp, MII_BMSR, &bmsr);
 +              if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) ||
 +                  (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
 +                      current_link_up = 1;
 +      }
 +
 +      tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
 +      if (current_link_up == 1) {
 +              if (tp->link_config.active_speed == SPEED_100 ||
 +                  tp->link_config.active_speed == SPEED_10)
 +                      tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
 +              else
 +                      tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
 +      } else if (tp->phy_flags & TG3_PHYFLG_IS_FET)
 +              tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
 +      else
 +              tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
 +
 +      tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
 +      if (tp->link_config.active_duplex == DUPLEX_HALF)
 +              tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
 +
 +      if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
 +              if (current_link_up == 1 &&
 +                  tg3_5700_link_polarity(tp, tp->link_config.active_speed))
 +                      tp->mac_mode |= MAC_MODE_LINK_POLARITY;
 +              else
 +                      tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
 +      }
 +
 +      /* ??? Without this setting Netgear GA302T PHY does not
 +       * ??? send/receive packets...
 +       */
 +      if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
 +          tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
 +              tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
 +              tw32_f(MAC_MI_MODE, tp->mi_mode);
 +              udelay(80);
 +      }
 +
 +      tw32_f(MAC_MODE, tp->mac_mode);
 +      udelay(40);
 +
 +      tg3_phy_eee_adjust(tp, current_link_up);
 +
 +      if (tg3_flag(tp, USE_LINKCHG_REG)) {
 +              /* Polled via timer. */
 +              tw32_f(MAC_EVENT, 0);
 +      } else {
 +              tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
 +      }
 +      udelay(40);
 +
 +      if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
 +          current_link_up == 1 &&
 +          tp->link_config.active_speed == SPEED_1000 &&
 +          (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) {
 +              udelay(120);
 +              tw32_f(MAC_STATUS,
 +                   (MAC_STATUS_SYNC_CHANGED |
 +                    MAC_STATUS_CFG_CHANGED));
 +              udelay(40);
 +              tg3_write_mem(tp,
 +                            NIC_SRAM_FIRMWARE_MBOX,
 +                            NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
 +      }
 +
 +      /* Prevent send BD corruption. */
 +      if (tg3_flag(tp, CLKREQ_BUG)) {
 +              u16 oldlnkctl, newlnkctl;
 +
 +              pci_read_config_word(tp->pdev,
 +                                   pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
 +                                   &oldlnkctl);
 +              if (tp->link_config.active_speed == SPEED_100 ||
 +                  tp->link_config.active_speed == SPEED_10)
 +                      newlnkctl = oldlnkctl & ~PCI_EXP_LNKCTL_CLKREQ_EN;
 +              else
 +                      newlnkctl = oldlnkctl | PCI_EXP_LNKCTL_CLKREQ_EN;
 +              if (newlnkctl != oldlnkctl)
 +                      pci_write_config_word(tp->pdev,
 +                                            pci_pcie_cap(tp->pdev) +
 +                                            PCI_EXP_LNKCTL, newlnkctl);
 +      }
 +
 +      if (current_link_up != netif_carrier_ok(tp->dev)) {
 +              if (current_link_up)
 +                      netif_carrier_on(tp->dev);
 +              else
 +                      netif_carrier_off(tp->dev);
 +              tg3_link_report(tp);
 +      }
 +
 +      return 0;
 +}
 +
 +struct tg3_fiber_aneginfo {
 +      int state;
 +#define ANEG_STATE_UNKNOWN            0
 +#define ANEG_STATE_AN_ENABLE          1
 +#define ANEG_STATE_RESTART_INIT               2
 +#define ANEG_STATE_RESTART            3
 +#define ANEG_STATE_DISABLE_LINK_OK    4
 +#define ANEG_STATE_ABILITY_DETECT_INIT        5
 +#define ANEG_STATE_ABILITY_DETECT     6
 +#define ANEG_STATE_ACK_DETECT_INIT    7
 +#define ANEG_STATE_ACK_DETECT         8
 +#define ANEG_STATE_COMPLETE_ACK_INIT  9
 +#define ANEG_STATE_COMPLETE_ACK               10
 +#define ANEG_STATE_IDLE_DETECT_INIT   11
 +#define ANEG_STATE_IDLE_DETECT                12
 +#define ANEG_STATE_LINK_OK            13
 +#define ANEG_STATE_NEXT_PAGE_WAIT_INIT        14
 +#define ANEG_STATE_NEXT_PAGE_WAIT     15
 +
 +      u32 flags;
 +#define MR_AN_ENABLE          0x00000001
 +#define MR_RESTART_AN         0x00000002
 +#define MR_AN_COMPLETE                0x00000004
 +#define MR_PAGE_RX            0x00000008
 +#define MR_NP_LOADED          0x00000010
 +#define MR_TOGGLE_TX          0x00000020
 +#define MR_LP_ADV_FULL_DUPLEX 0x00000040
 +#define MR_LP_ADV_HALF_DUPLEX 0x00000080
 +#define MR_LP_ADV_SYM_PAUSE   0x00000100
 +#define MR_LP_ADV_ASYM_PAUSE  0x00000200
 +#define MR_LP_ADV_REMOTE_FAULT1       0x00000400
 +#define MR_LP_ADV_REMOTE_FAULT2       0x00000800
 +#define MR_LP_ADV_NEXT_PAGE   0x00001000
 +#define MR_TOGGLE_RX          0x00002000
 +#define MR_NP_RX              0x00004000
 +
 +#define MR_LINK_OK            0x80000000
 +
 +      unsigned long link_time, cur_time;
 +
 +      u32 ability_match_cfg;
 +      int ability_match_count;
 +
 +      char ability_match, idle_match, ack_match;
 +
 +      u32 txconfig, rxconfig;
 +#define ANEG_CFG_NP           0x00000080
 +#define ANEG_CFG_ACK          0x00000040
 +#define ANEG_CFG_RF2          0x00000020
 +#define ANEG_CFG_RF1          0x00000010
 +#define ANEG_CFG_PS2          0x00000001
 +#define ANEG_CFG_PS1          0x00008000
 +#define ANEG_CFG_HD           0x00004000
 +#define ANEG_CFG_FD           0x00002000
 +#define ANEG_CFG_INVAL                0x00001f06
 +
 +};
 +#define ANEG_OK               0
 +#define ANEG_DONE     1
 +#define ANEG_TIMER_ENAB       2
 +#define ANEG_FAILED   -1
 +
 +#define ANEG_STATE_SETTLE_TIME        10000
 +
 +static int tg3_fiber_aneg_smachine(struct tg3 *tp,
 +                                 struct tg3_fiber_aneginfo *ap)
 +{
 +      u16 flowctrl;
 +      unsigned long delta;
 +      u32 rx_cfg_reg;
 +      int ret;
 +
 +      if (ap->state == ANEG_STATE_UNKNOWN) {
 +              ap->rxconfig = 0;
 +              ap->link_time = 0;
 +              ap->cur_time = 0;
 +              ap->ability_match_cfg = 0;
 +              ap->ability_match_count = 0;
 +              ap->ability_match = 0;
 +              ap->idle_match = 0;
 +              ap->ack_match = 0;
 +      }
 +      ap->cur_time++;
 +
 +      if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
 +              rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
 +
 +              if (rx_cfg_reg != ap->ability_match_cfg) {
 +                      ap->ability_match_cfg = rx_cfg_reg;
 +                      ap->ability_match = 0;
 +                      ap->ability_match_count = 0;
 +              } else {
 +                      if (++ap->ability_match_count > 1) {
 +                              ap->ability_match = 1;
 +                              ap->ability_match_cfg = rx_cfg_reg;
 +                      }
 +              }
 +              if (rx_cfg_reg & ANEG_CFG_ACK)
 +                      ap->ack_match = 1;
 +              else
 +                      ap->ack_match = 0;
 +
 +              ap->idle_match = 0;
 +      } else {
 +              ap->idle_match = 1;
 +              ap->ability_match_cfg = 0;
 +              ap->ability_match_count = 0;
 +              ap->ability_match = 0;
 +              ap->ack_match = 0;
 +
 +              rx_cfg_reg = 0;
 +      }
 +
 +      ap->rxconfig = rx_cfg_reg;
 +      ret = ANEG_OK;
 +
 +      switch (ap->state) {
 +      case ANEG_STATE_UNKNOWN:
 +              if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
 +                      ap->state = ANEG_STATE_AN_ENABLE;
 +
 +              /* fallthru */
 +      case ANEG_STATE_AN_ENABLE:
 +              ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
 +              if (ap->flags & MR_AN_ENABLE) {
 +                      ap->link_time = 0;
 +                      ap->cur_time = 0;
 +                      ap->ability_match_cfg = 0;
 +                      ap->ability_match_count = 0;
 +                      ap->ability_match = 0;
 +                      ap->idle_match = 0;
 +                      ap->ack_match = 0;
 +
 +                      ap->state = ANEG_STATE_RESTART_INIT;
 +              } else {
 +                      ap->state = ANEG_STATE_DISABLE_LINK_OK;
 +              }
 +              break;
 +
 +      case ANEG_STATE_RESTART_INIT:
 +              ap->link_time = ap->cur_time;
 +              ap->flags &= ~(MR_NP_LOADED);
 +              ap->txconfig = 0;
 +              tw32(MAC_TX_AUTO_NEG, 0);
 +              tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
 +              tw32_f(MAC_MODE, tp->mac_mode);
 +              udelay(40);
 +
 +              ret = ANEG_TIMER_ENAB;
 +              ap->state = ANEG_STATE_RESTART;
 +
 +              /* fallthru */
 +      case ANEG_STATE_RESTART:
 +              delta = ap->cur_time - ap->link_time;
 +              if (delta > ANEG_STATE_SETTLE_TIME)
 +                      ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
 +              else
 +                      ret = ANEG_TIMER_ENAB;
 +              break;
 +
 +      case ANEG_STATE_DISABLE_LINK_OK:
 +              ret = ANEG_DONE;
 +              break;
 +
 +      case ANEG_STATE_ABILITY_DETECT_INIT:
 +              ap->flags &= ~(MR_TOGGLE_TX);
 +              ap->txconfig = ANEG_CFG_FD;
 +              flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
 +              if (flowctrl & ADVERTISE_1000XPAUSE)
 +                      ap->txconfig |= ANEG_CFG_PS1;
 +              if (flowctrl & ADVERTISE_1000XPSE_ASYM)
 +                      ap->txconfig |= ANEG_CFG_PS2;
 +              tw32(MAC_TX_AUTO_NEG, ap->txconfig);
 +              tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
 +              tw32_f(MAC_MODE, tp->mac_mode);
 +              udelay(40);
 +
 +              ap->state = ANEG_STATE_ABILITY_DETECT;
 +              break;
 +
 +      case ANEG_STATE_ABILITY_DETECT:
 +              if (ap->ability_match != 0 && ap->rxconfig != 0)
 +                      ap->state = ANEG_STATE_ACK_DETECT_INIT;
 +              break;
 +
 +      case ANEG_STATE_ACK_DETECT_INIT:
 +              ap->txconfig |= ANEG_CFG_ACK;
 +              tw32(MAC_TX_AUTO_NEG, ap->txconfig);
 +              tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
 +              tw32_f(MAC_MODE, tp->mac_mode);
 +              udelay(40);
 +
 +              ap->state = ANEG_STATE_ACK_DETECT;
 +
 +              /* fallthru */
 +      case ANEG_STATE_ACK_DETECT:
 +              if (ap->ack_match != 0) {
 +                      if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
 +                          (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
 +                              ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
 +                      } else {
 +                              ap->state = ANEG_STATE_AN_ENABLE;
 +                      }
 +              } else if (ap->ability_match != 0 &&
 +                         ap->rxconfig == 0) {
 +                      ap->state = ANEG_STATE_AN_ENABLE;
 +              }
 +              break;
 +
 +      case ANEG_STATE_COMPLETE_ACK_INIT:
 +              if (ap->rxconfig & ANEG_CFG_INVAL) {
 +                      ret = ANEG_FAILED;
 +                      break;
 +              }
 +              ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
 +                             MR_LP_ADV_HALF_DUPLEX |
 +                             MR_LP_ADV_SYM_PAUSE |
 +                             MR_LP_ADV_ASYM_PAUSE |
 +                             MR_LP_ADV_REMOTE_FAULT1 |
 +                             MR_LP_ADV_REMOTE_FAULT2 |
 +                             MR_LP_ADV_NEXT_PAGE |
 +                             MR_TOGGLE_RX |
 +                             MR_NP_RX);
 +              if (ap->rxconfig & ANEG_CFG_FD)
 +                      ap->flags |= MR_LP_ADV_FULL_DUPLEX;
 +              if (ap->rxconfig & ANEG_CFG_HD)
 +                      ap->flags |= MR_LP_ADV_HALF_DUPLEX;
 +              if (ap->rxconfig & ANEG_CFG_PS1)
 +                      ap->flags |= MR_LP_ADV_SYM_PAUSE;
 +              if (ap->rxconfig & ANEG_CFG_PS2)
 +                      ap->flags |= MR_LP_ADV_ASYM_PAUSE;
 +              if (ap->rxconfig & ANEG_CFG_RF1)
 +                      ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
 +              if (ap->rxconfig & ANEG_CFG_RF2)
 +                      ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
 +              if (ap->rxconfig & ANEG_CFG_NP)
 +                      ap->flags |= MR_LP_ADV_NEXT_PAGE;
 +
 +              ap->link_time = ap->cur_time;
 +
 +              ap->flags ^= (MR_TOGGLE_TX);
 +              if (ap->rxconfig & 0x0008)
 +                      ap->flags |= MR_TOGGLE_RX;
 +              if (ap->rxconfig & ANEG_CFG_NP)
 +                      ap->flags |= MR_NP_RX;
 +              ap->flags |= MR_PAGE_RX;
 +
 +              ap->state = ANEG_STATE_COMPLETE_ACK;
 +              ret = ANEG_TIMER_ENAB;
 +              break;
 +
 +      case ANEG_STATE_COMPLETE_ACK:
 +              if (ap->ability_match != 0 &&
 +                  ap->rxconfig == 0) {
 +                      ap->state = ANEG_STATE_AN_ENABLE;
 +                      break;
 +              }
 +              delta = ap->cur_time - ap->link_time;
 +              if (delta > ANEG_STATE_SETTLE_TIME) {
 +                      if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
 +                              ap->state = ANEG_STATE_IDLE_DETECT_INIT;
 +                      } else {
 +                              if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
 +                                  !(ap->flags & MR_NP_RX)) {
 +                                      ap->state = ANEG_STATE_IDLE_DETECT_INIT;
 +                              } else {
 +                                      ret = ANEG_FAILED;
 +                              }
 +                      }
 +              }
 +              break;
 +
 +      case ANEG_STATE_IDLE_DETECT_INIT:
 +              ap->link_time = ap->cur_time;
 +              tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
 +              tw32_f(MAC_MODE, tp->mac_mode);
 +              udelay(40);
 +
 +              ap->state = ANEG_STATE_IDLE_DETECT;
 +              ret = ANEG_TIMER_ENAB;
 +              break;
 +
 +      case ANEG_STATE_IDLE_DETECT:
 +              if (ap->ability_match != 0 &&
 +                  ap->rxconfig == 0) {
 +                      ap->state = ANEG_STATE_AN_ENABLE;
 +                      break;
 +              }
 +              delta = ap->cur_time - ap->link_time;
 +              if (delta > ANEG_STATE_SETTLE_TIME) {
 +                      /* XXX another gem from the Broadcom driver :( */
 +                      ap->state = ANEG_STATE_LINK_OK;
 +              }
 +              break;
 +
 +      case ANEG_STATE_LINK_OK:
 +              ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
 +              ret = ANEG_DONE;
 +              break;
 +
 +      case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
 +              /* ??? unimplemented */
 +              break;
 +
 +      case ANEG_STATE_NEXT_PAGE_WAIT:
 +              /* ??? unimplemented */
 +              break;
 +
 +      default:
 +              ret = ANEG_FAILED;
 +              break;
 +      }
 +
 +      return ret;
 +}
 +
 +static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
 +{
 +      int res = 0;
 +      struct tg3_fiber_aneginfo aninfo;
 +      int status = ANEG_FAILED;
 +      unsigned int tick;
 +      u32 tmp;
 +
 +      tw32_f(MAC_TX_AUTO_NEG, 0);
 +
 +      tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
 +      tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
 +      udelay(40);
 +
 +      tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
 +      udelay(40);
 +
 +      memset(&aninfo, 0, sizeof(aninfo));
 +      aninfo.flags |= MR_AN_ENABLE;
 +      aninfo.state = ANEG_STATE_UNKNOWN;
 +      aninfo.cur_time = 0;
 +      tick = 0;
 +      while (++tick < 195000) {
 +              status = tg3_fiber_aneg_smachine(tp, &aninfo);
 +              if (status == ANEG_DONE || status == ANEG_FAILED)
 +                      break;
 +
 +              udelay(1);
 +      }
 +
 +      tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
 +      tw32_f(MAC_MODE, tp->mac_mode);
 +      udelay(40);
 +
 +      *txflags = aninfo.txconfig;
 +      *rxflags = aninfo.flags;
 +
 +      if (status == ANEG_DONE &&
 +          (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
 +                           MR_LP_ADV_FULL_DUPLEX)))
 +              res = 1;
 +
 +      return res;
 +}
 +
 +static void tg3_init_bcm8002(struct tg3 *tp)
 +{
 +      u32 mac_status = tr32(MAC_STATUS);
 +      int i;
 +
 +      /* Reset when initting first time or we have a link. */
 +      if (tg3_flag(tp, INIT_COMPLETE) &&
 +          !(mac_status & MAC_STATUS_PCS_SYNCED))
 +              return;
 +
 +      /* Set PLL lock range. */
 +      tg3_writephy(tp, 0x16, 0x8007);
 +
 +      /* SW reset */
 +      tg3_writephy(tp, MII_BMCR, BMCR_RESET);
 +
 +      /* Wait for reset to complete. */
 +      /* XXX schedule_timeout() ... */
 +      for (i = 0; i < 500; i++)
 +              udelay(10);
 +
 +      /* Config mode; select PMA/Ch 1 regs. */
 +      tg3_writephy(tp, 0x10, 0x8411);
 +
 +      /* Enable auto-lock and comdet, select txclk for tx. */
 +      tg3_writephy(tp, 0x11, 0x0a10);
 +
 +      tg3_writephy(tp, 0x18, 0x00a0);
 +      tg3_writephy(tp, 0x16, 0x41ff);
 +
 +      /* Assert and deassert POR. */
 +      tg3_writephy(tp, 0x13, 0x0400);
 +      udelay(40);
 +      tg3_writephy(tp, 0x13, 0x0000);
 +
 +      tg3_writephy(tp, 0x11, 0x0a50);
 +      udelay(40);
 +      tg3_writephy(tp, 0x11, 0x0a10);
 +
 +      /* Wait for signal to stabilize */
 +      /* XXX schedule_timeout() ... */
 +      for (i = 0; i < 15000; i++)
 +              udelay(10);
 +
 +      /* Deselect the channel register so we can read the PHYID
 +       * later.
 +       */
 +      tg3_writephy(tp, 0x10, 0x8011);
 +}
 +
 +static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
 +{
 +      u16 flowctrl;
 +      u32 sg_dig_ctrl, sg_dig_status;
 +      u32 serdes_cfg, expected_sg_dig_ctrl;
 +      int workaround, port_a;
 +      int current_link_up;
 +
 +      serdes_cfg = 0;
 +      expected_sg_dig_ctrl = 0;
 +      workaround = 0;
 +      port_a = 1;
 +      current_link_up = 0;
 +
 +      if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
 +          tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
 +              workaround = 1;
 +              if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
 +                      port_a = 0;
 +
 +              /* preserve bits 0-11,13,14 for signal pre-emphasis */
 +              /* preserve bits 20-23 for voltage regulator */
 +              serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
 +      }
 +
 +      sg_dig_ctrl = tr32(SG_DIG_CTRL);
 +
 +      if (tp->link_config.autoneg != AUTONEG_ENABLE) {
 +              if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
 +                      if (workaround) {
 +                              u32 val = serdes_cfg;
 +
 +                              if (port_a)
 +                                      val |= 0xc010000;
 +                              else
 +                                      val |= 0x4010000;
 +                              tw32_f(MAC_SERDES_CFG, val);
 +                      }
 +
 +                      tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
 +              }
 +              if (mac_status & MAC_STATUS_PCS_SYNCED) {
 +                      tg3_setup_flow_control(tp, 0, 0);
 +                      current_link_up = 1;
 +              }
 +              goto out;
 +      }
 +
 +      /* Want auto-negotiation.  */
 +      expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
 +
 +      flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
 +      if (flowctrl & ADVERTISE_1000XPAUSE)
 +              expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
 +      if (flowctrl & ADVERTISE_1000XPSE_ASYM)
 +              expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
 +
 +      if (sg_dig_ctrl != expected_sg_dig_ctrl) {
 +              if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) &&
 +                  tp->serdes_counter &&
 +                  ((mac_status & (MAC_STATUS_PCS_SYNCED |
 +                                  MAC_STATUS_RCVD_CFG)) ==
 +                   MAC_STATUS_PCS_SYNCED)) {
 +                      tp->serdes_counter--;
 +                      current_link_up = 1;
 +                      goto out;
 +              }
 +restart_autoneg:
 +              if (workaround)
 +                      tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
 +              tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
 +              udelay(5);
 +              tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
 +
 +              tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
 +              tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
 +      } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
 +                               MAC_STATUS_SIGNAL_DET)) {
 +              sg_dig_status = tr32(SG_DIG_STATUS);
 +              mac_status = tr32(MAC_STATUS);
 +
 +              if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
 +                  (mac_status & MAC_STATUS_PCS_SYNCED)) {
 +                      u32 local_adv = 0, remote_adv = 0;
 +
 +                      if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
 +                              local_adv |= ADVERTISE_1000XPAUSE;
 +                      if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
 +                              local_adv |= ADVERTISE_1000XPSE_ASYM;
 +
 +                      if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
 +                              remote_adv |= LPA_1000XPAUSE;
 +                      if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
 +                              remote_adv |= LPA_1000XPAUSE_ASYM;
 +
 +                      tg3_setup_flow_control(tp, local_adv, remote_adv);
 +                      current_link_up = 1;
 +                      tp->serdes_counter = 0;
 +                      tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
 +              } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
 +                      if (tp->serdes_counter)
 +                              tp->serdes_counter--;
 +                      else {
 +                              if (workaround) {
 +                                      u32 val = serdes_cfg;
 +
 +                                      if (port_a)
 +                                              val |= 0xc010000;
 +                                      else
 +                                              val |= 0x4010000;
 +
 +                                      tw32_f(MAC_SERDES_CFG, val);
 +                              }
 +
 +                              tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
 +                              udelay(40);
 +
 +                              /* Link parallel detection - link is up */
 +                              /* only if we have PCS_SYNC and not */
 +                              /* receiving config code words */
 +                              mac_status = tr32(MAC_STATUS);
 +                              if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
 +                                  !(mac_status & MAC_STATUS_RCVD_CFG)) {
 +                                      tg3_setup_flow_control(tp, 0, 0);
 +                                      current_link_up = 1;
 +                                      tp->phy_flags |=
 +                                              TG3_PHYFLG_PARALLEL_DETECT;
 +                                      tp->serdes_counter =
 +                                              SERDES_PARALLEL_DET_TIMEOUT;
 +                              } else
 +                                      goto restart_autoneg;
 +                      }
 +              }
 +      } else {
 +              tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
 +              tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
 +      }
 +
 +out:
 +      return current_link_up;
 +}
 +
 +static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
 +{
 +      int current_link_up = 0;
 +
 +      if (!(mac_status & MAC_STATUS_PCS_SYNCED))
 +              goto out;
 +
 +      if (tp->link_config.autoneg == AUTONEG_ENABLE) {
 +              u32 txflags, rxflags;
 +              int i;
 +
 +              if (fiber_autoneg(tp, &txflags, &rxflags)) {
 +                      u32 local_adv = 0, remote_adv = 0;
 +
 +                      if (txflags & ANEG_CFG_PS1)
 +                              local_adv |= ADVERTISE_1000XPAUSE;
 +                      if (txflags & ANEG_CFG_PS2)
 +                              local_adv |= ADVERTISE_1000XPSE_ASYM;
 +
 +                      if (rxflags & MR_LP_ADV_SYM_PAUSE)
 +                              remote_adv |= LPA_1000XPAUSE;
 +                      if (rxflags & MR_LP_ADV_ASYM_PAUSE)
 +                              remote_adv |= LPA_1000XPAUSE_ASYM;
 +
 +                      tg3_setup_flow_control(tp, local_adv, remote_adv);
 +
 +                      current_link_up = 1;
 +              }
 +              for (i = 0; i < 30; i++) {
 +                      udelay(20);
 +                      tw32_f(MAC_STATUS,
 +                             (MAC_STATUS_SYNC_CHANGED |
 +                              MAC_STATUS_CFG_CHANGED));
 +                      udelay(40);
 +                      if ((tr32(MAC_STATUS) &
 +                           (MAC_STATUS_SYNC_CHANGED |
 +                            MAC_STATUS_CFG_CHANGED)) == 0)
 +                              break;
 +              }
 +
 +              mac_status = tr32(MAC_STATUS);
 +              if (current_link_up == 0 &&
 +                  (mac_status & MAC_STATUS_PCS_SYNCED) &&
 +                  !(mac_status & MAC_STATUS_RCVD_CFG))
 +                      current_link_up = 1;
 +      } else {
 +              tg3_setup_flow_control(tp, 0, 0);
 +
 +              /* Forcing 1000FD link up. */
 +              current_link_up = 1;
 +
 +              tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
 +              udelay(40);
 +
 +              tw32_f(MAC_MODE, tp->mac_mode);
 +              udelay(40);
 +      }
 +
 +out:
 +      return current_link_up;
 +}
 +
 +static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
 +{
 +      u32 orig_pause_cfg;
 +      u16 orig_active_speed;
 +      u8 orig_active_duplex;
 +      u32 mac_status;
 +      int current_link_up;
 +      int i;
 +
 +      orig_pause_cfg = tp->link_config.active_flowctrl;
 +      orig_active_speed = tp->link_config.active_speed;
 +      orig_active_duplex = tp->link_config.active_duplex;
 +
 +      if (!tg3_flag(tp, HW_AUTONEG) &&
 +          netif_carrier_ok(tp->dev) &&
 +          tg3_flag(tp, INIT_COMPLETE)) {
 +              mac_status = tr32(MAC_STATUS);
 +              mac_status &= (MAC_STATUS_PCS_SYNCED |
 +                             MAC_STATUS_SIGNAL_DET |
 +                             MAC_STATUS_CFG_CHANGED |
 +                             MAC_STATUS_RCVD_CFG);
 +              if (mac_status == (MAC_STATUS_PCS_SYNCED |
 +                                 MAC_STATUS_SIGNAL_DET)) {
 +                      tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
 +                                          MAC_STATUS_CFG_CHANGED));
 +                      return 0;
 +              }
 +      }
 +
 +      tw32_f(MAC_TX_AUTO_NEG, 0);
 +
 +      tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
 +      tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
 +      tw32_f(MAC_MODE, tp->mac_mode);
 +      udelay(40);
 +
 +      if (tp->phy_id == TG3_PHY_ID_BCM8002)
 +              tg3_init_bcm8002(tp);
 +
 +      /* Enable link change event even when serdes polling.  */
 +      tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
 +      udelay(40);
 +
 +      current_link_up = 0;
 +      mac_status = tr32(MAC_STATUS);
 +
 +      if (tg3_flag(tp, HW_AUTONEG))
 +              current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
 +      else
 +              current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
 +
 +      tp->napi[0].hw_status->status =
 +              (SD_STATUS_UPDATED |
 +               (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
 +
 +      for (i = 0; i < 100; i++) {
 +              tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
 +                                  MAC_STATUS_CFG_CHANGED));
 +              udelay(5);
 +              if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
 +                                       MAC_STATUS_CFG_CHANGED |
 +                                       MAC_STATUS_LNKSTATE_CHANGED)) == 0)
 +                      break;
 +      }
 +
 +      mac_status = tr32(MAC_STATUS);
 +      if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
 +              current_link_up = 0;
 +              if (tp->link_config.autoneg == AUTONEG_ENABLE &&
 +                  tp->serdes_counter == 0) {
 +                      tw32_f(MAC_MODE, (tp->mac_mode |
 +                                        MAC_MODE_SEND_CONFIGS));
 +                      udelay(1);
 +                      tw32_f(MAC_MODE, tp->mac_mode);
 +              }
 +      }
 +
 +      if (current_link_up == 1) {
 +              tp->link_config.active_speed = SPEED_1000;
 +              tp->link_config.active_duplex = DUPLEX_FULL;
 +              tw32(MAC_LED_CTRL, (tp->led_ctrl |
 +                                  LED_CTRL_LNKLED_OVERRIDE |
 +                                  LED_CTRL_1000MBPS_ON));
 +      } else {
 +              tp->link_config.active_speed = SPEED_INVALID;
 +              tp->link_config.active_duplex = DUPLEX_INVALID;
 +              tw32(MAC_LED_CTRL, (tp->led_ctrl |
 +                                  LED_CTRL_LNKLED_OVERRIDE |
 +                                  LED_CTRL_TRAFFIC_OVERRIDE));
 +      }
 +
 +      if (current_link_up != netif_carrier_ok(tp->dev)) {
 +              if (current_link_up)
 +                      netif_carrier_on(tp->dev);
 +              else
 +                      netif_carrier_off(tp->dev);
 +              tg3_link_report(tp);
 +      } else {
 +              u32 now_pause_cfg = tp->link_config.active_flowctrl;
 +              if (orig_pause_cfg != now_pause_cfg ||
 +                  orig_active_speed != tp->link_config.active_speed ||
 +                  orig_active_duplex != tp->link_config.active_duplex)
 +                      tg3_link_report(tp);
 +      }
 +
 +      return 0;
 +}
 +
 +static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
 +{
 +      int current_link_up, err = 0;
 +      u32 bmsr, bmcr;
 +      u16 current_speed;
 +      u8 current_duplex;
 +      u32 local_adv, remote_adv;
 +
 +      tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
 +      tw32_f(MAC_MODE, tp->mac_mode);
 +      udelay(40);
 +
 +      tw32(MAC_EVENT, 0);
 +
 +      tw32_f(MAC_STATUS,
 +           (MAC_STATUS_SYNC_CHANGED |
 +            MAC_STATUS_CFG_CHANGED |
 +            MAC_STATUS_MI_COMPLETION |
 +            MAC_STATUS_LNKSTATE_CHANGED));
 +      udelay(40);
 +
 +      if (force_reset)
 +              tg3_phy_reset(tp);
 +
 +      current_link_up = 0;
 +      current_speed = SPEED_INVALID;
 +      current_duplex = DUPLEX_INVALID;
 +
 +      err |= tg3_readphy(tp, MII_BMSR, &bmsr);
 +      err |= tg3_readphy(tp, MII_BMSR, &bmsr);
 +      if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
 +              if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
 +                      bmsr |= BMSR_LSTATUS;
 +              else
 +                      bmsr &= ~BMSR_LSTATUS;
 +      }
 +
 +      err |= tg3_readphy(tp, MII_BMCR, &bmcr);
 +
 +      if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
 +          (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
 +              /* do nothing, just check for link up at the end */
 +      } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
 +              u32 adv, new_adv;
 +
 +              err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
 +              new_adv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
 +                                ADVERTISE_1000XPAUSE |
 +                                ADVERTISE_1000XPSE_ASYM |
 +                                ADVERTISE_SLCT);
 +
 +              new_adv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
 +
 +              if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
 +                      new_adv |= ADVERTISE_1000XHALF;
 +              if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
 +                      new_adv |= ADVERTISE_1000XFULL;
 +
 +              if ((new_adv != adv) || !(bmcr & BMCR_ANENABLE)) {
 +                      tg3_writephy(tp, MII_ADVERTISE, new_adv);
 +                      bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
 +                      tg3_writephy(tp, MII_BMCR, bmcr);
 +
 +                      tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
 +                      tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
 +                      tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
 +
 +                      return err;
 +              }
 +      } else {
 +              u32 new_bmcr;
 +
 +              bmcr &= ~BMCR_SPEED1000;
 +              new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
 +
 +              if (tp->link_config.duplex == DUPLEX_FULL)
 +                      new_bmcr |= BMCR_FULLDPLX;
 +
 +              if (new_bmcr != bmcr) {
 +                      /* BMCR_SPEED1000 is a reserved bit that needs
 +                       * to be set on write.
 +                       */
 +                      new_bmcr |= BMCR_SPEED1000;
 +
 +                      /* Force a linkdown */
 +                      if (netif_carrier_ok(tp->dev)) {
 +                              u32 adv;
 +
 +                              err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
 +                              adv &= ~(ADVERTISE_1000XFULL |
 +                                       ADVERTISE_1000XHALF |
 +                                       ADVERTISE_SLCT);
 +                              tg3_writephy(tp, MII_ADVERTISE, adv);
 +                              tg3_writephy(tp, MII_BMCR, bmcr |
 +                                                         BMCR_ANRESTART |
 +                                                         BMCR_ANENABLE);
 +                              udelay(10);
 +                              netif_carrier_off(tp->dev);
 +                      }
 +                      tg3_writephy(tp, MII_BMCR, new_bmcr);
 +                      bmcr = new_bmcr;
 +                      err |= tg3_readphy(tp, MII_BMSR, &bmsr);
 +                      err |= tg3_readphy(tp, MII_BMSR, &bmsr);
 +                      if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
 +                          ASIC_REV_5714) {
 +                              if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
 +                                      bmsr |= BMSR_LSTATUS;
 +                              else
 +                                      bmsr &= ~BMSR_LSTATUS;
 +                      }
 +                      tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
 +              }
 +      }
 +
 +      if (bmsr & BMSR_LSTATUS) {
 +              current_speed = SPEED_1000;
 +              current_link_up = 1;
 +              if (bmcr & BMCR_FULLDPLX)
 +                      current_duplex = DUPLEX_FULL;
 +              else
 +                      current_duplex = DUPLEX_HALF;
 +
 +              local_adv = 0;
 +              remote_adv = 0;
 +
 +              if (bmcr & BMCR_ANENABLE) {
 +                      u32 common;
 +
 +                      err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
 +                      err |= tg3_readphy(tp, MII_LPA, &remote_adv);
 +                      common = local_adv & remote_adv;
 +                      if (common & (ADVERTISE_1000XHALF |
 +                                    ADVERTISE_1000XFULL)) {
 +                              if (common & ADVERTISE_1000XFULL)
 +                                      current_duplex = DUPLEX_FULL;
 +                              else
 +                                      current_duplex = DUPLEX_HALF;
 +                      } else if (!tg3_flag(tp, 5780_CLASS)) {
 +                              /* Link is up via parallel detect */
 +                      } else {
 +                              current_link_up = 0;
 +                      }
 +              }
 +      }
 +
 +      if (current_link_up == 1 && current_duplex == DUPLEX_FULL)
 +              tg3_setup_flow_control(tp, local_adv, remote_adv);
 +
 +      tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
 +      if (tp->link_config.active_duplex == DUPLEX_HALF)
 +              tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
 +
 +      tw32_f(MAC_MODE, tp->mac_mode);
 +      udelay(40);
 +
 +      tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
 +
 +      tp->link_config.active_speed = current_speed;
 +      tp->link_config.active_duplex = current_duplex;
 +
 +      if (current_link_up != netif_carrier_ok(tp->dev)) {
 +              if (current_link_up)
 +                      netif_carrier_on(tp->dev);
 +              else {
 +                      netif_carrier_off(tp->dev);
 +                      tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
 +              }
 +              tg3_link_report(tp);
 +      }
 +      return err;
 +}
 +
 +static void tg3_serdes_parallel_detect(struct tg3 *tp)
 +{
 +      if (tp->serdes_counter) {
 +              /* Give autoneg time to complete. */
 +              tp->serdes_counter--;
 +              return;
 +      }
 +
 +      if (!netif_carrier_ok(tp->dev) &&
 +          (tp->link_config.autoneg == AUTONEG_ENABLE)) {
 +              u32 bmcr;
 +
 +              tg3_readphy(tp, MII_BMCR, &bmcr);
 +              if (bmcr & BMCR_ANENABLE) {
 +                      u32 phy1, phy2;
 +
 +                      /* Select shadow register 0x1f */
 +                      tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00);
 +                      tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1);
 +
 +                      /* Select expansion interrupt status register */
 +                      tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
 +                                       MII_TG3_DSP_EXP1_INT_STAT);
 +                      tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
 +                      tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
 +
 +                      if ((phy1 & 0x10) && !(phy2 & 0x20)) {
 +                              /* We have signal detect and not receiving
 +                               * config code words, link is up by parallel
 +                               * detection.
 +                               */
 +
 +                              bmcr &= ~BMCR_ANENABLE;
 +                              bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
 +                              tg3_writephy(tp, MII_BMCR, bmcr);
 +                              tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT;
 +                      }
 +              }
 +      } else if (netif_carrier_ok(tp->dev) &&
 +                 (tp->link_config.autoneg == AUTONEG_ENABLE) &&
 +                 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
 +              u32 phy2;
 +
 +              /* Select expansion interrupt status register */
 +              tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
 +                               MII_TG3_DSP_EXP1_INT_STAT);
 +              tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
 +              if (phy2 & 0x20) {
 +                      u32 bmcr;
 +
 +                      /* Config code words received, turn on autoneg. */
 +                      tg3_readphy(tp, MII_BMCR, &bmcr);
 +                      tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
 +
 +                      tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
 +
 +              }
 +      }
 +}
 +
 +static int tg3_setup_phy(struct tg3 *tp, int force_reset)
 +{
 +      u32 val;
 +      int err;
 +
 +      if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
 +              err = tg3_setup_fiber_phy(tp, force_reset);
 +      else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
 +              err = tg3_setup_fiber_mii_phy(tp, force_reset);
 +      else
 +              err = tg3_setup_copper_phy(tp, force_reset);
 +
 +      if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
 +              u32 scale;
 +
 +              val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
 +              if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
 +                      scale = 65;
 +              else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
 +                      scale = 6;
 +              else
 +                      scale = 12;
 +
 +              val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
 +              val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
 +              tw32(GRC_MISC_CFG, val);
 +      }
 +
 +      val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
 +            (6 << TX_LENGTHS_IPG_SHIFT);
 +      if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
 +              val |= tr32(MAC_TX_LENGTHS) &
 +                     (TX_LENGTHS_JMB_FRM_LEN_MSK |
 +                      TX_LENGTHS_CNT_DWN_VAL_MSK);
 +
 +      if (tp->link_config.active_speed == SPEED_1000 &&
 +          tp->link_config.active_duplex == DUPLEX_HALF)
 +              tw32(MAC_TX_LENGTHS, val |
 +                   (0xff << TX_LENGTHS_SLOT_TIME_SHIFT));
 +      else
 +              tw32(MAC_TX_LENGTHS, val |
 +                   (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
 +
 +      if (!tg3_flag(tp, 5705_PLUS)) {
 +              if (netif_carrier_ok(tp->dev)) {
 +                      tw32(HOSTCC_STAT_COAL_TICKS,
 +                           tp->coal.stats_block_coalesce_usecs);
 +              } else {
 +                      tw32(HOSTCC_STAT_COAL_TICKS, 0);
 +              }
 +      }
 +
 +      if (tg3_flag(tp, ASPM_WORKAROUND)) {
 +              val = tr32(PCIE_PWR_MGMT_THRESH);
 +              if (!netif_carrier_ok(tp->dev))
 +                      val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
 +                            tp->pwrmgmt_thresh;
 +              else
 +                      val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
 +              tw32(PCIE_PWR_MGMT_THRESH, val);
 +      }
 +
 +      return err;
 +}
 +
 +static inline int tg3_irq_sync(struct tg3 *tp)
 +{
 +      return tp->irq_sync;
 +}
 +
 +static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len)
 +{
 +      int i;
 +
 +      dst = (u32 *)((u8 *)dst + off);
 +      for (i = 0; i < len; i += sizeof(u32))
 +              *dst++ = tr32(off + i);
 +}
 +
 +static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs)
 +{
 +      tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0);
 +      tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200);
 +      tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0);
 +      tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0);
 +      tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04);
 +      tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80);
 +      tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48);
 +      tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04);
 +      tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20);
 +      tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c);
 +      tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c);
 +      tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c);
 +      tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44);
 +      tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04);
 +      tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20);
 +      tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14);
 +      tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08);
 +      tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08);
 +      tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100);
 +
 +      if (tg3_flag(tp, SUPPORT_MSIX))
 +              tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180);
 +
 +      tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10);
 +      tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58);
 +      tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08);
 +      tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08);
 +      tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04);
 +      tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04);
 +      tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04);
 +      tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04);
 +
 +      if (!tg3_flag(tp, 5705_PLUS)) {
 +              tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04);
 +              tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04);
 +              tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04);
 +      }
 +
 +      tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110);
 +      tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120);
 +      tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c);
 +      tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04);
 +      tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c);
 +
 +      if (tg3_flag(tp, NVRAM))
 +              tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24);
 +}
 +
 +static void tg3_dump_state(struct tg3 *tp)
 +{
 +      int i;
 +      u32 *regs;
 +
 +      regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC);
 +      if (!regs) {
 +              netdev_err(tp->dev, "Failed allocating register dump buffer\n");
 +              return;
 +      }
 +
 +      if (tg3_flag(tp, PCI_EXPRESS)) {
 +              /* Read up to but not including private PCI registers */
 +              for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32))
 +                      regs[i / sizeof(u32)] = tr32(i);
 +      } else
 +              tg3_dump_legacy_regs(tp, regs);
 +
 +      for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) {
 +              if (!regs[i + 0] && !regs[i + 1] &&
 +                  !regs[i + 2] && !regs[i + 3])
 +                      continue;
 +
 +              netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
 +                         i * 4,
 +                         regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]);
 +      }
 +
 +      kfree(regs);
 +
 +      for (i = 0; i < tp->irq_cnt; i++) {
 +              struct tg3_napi *tnapi = &tp->napi[i];
 +
 +              /* SW status block */
 +              netdev_err(tp->dev,
 +                       "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
 +                         i,
 +                         tnapi->hw_status->status,
 +                         tnapi->hw_status->status_tag,
 +                         tnapi->hw_status->rx_jumbo_consumer,
 +                         tnapi->hw_status->rx_consumer,
 +                         tnapi->hw_status->rx_mini_consumer,
 +                         tnapi->hw_status->idx[0].rx_producer,
 +                         tnapi->hw_status->idx[0].tx_consumer);
 +
 +              netdev_err(tp->dev,
 +              "%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
 +                         i,
 +                         tnapi->last_tag, tnapi->last_irq_tag,
 +                         tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending,
 +                         tnapi->rx_rcb_ptr,
 +                         tnapi->prodring.rx_std_prod_idx,
 +                         tnapi->prodring.rx_std_cons_idx,
 +                         tnapi->prodring.rx_jmb_prod_idx,
 +                         tnapi->prodring.rx_jmb_cons_idx);
 +      }
 +}
 +
 +/* This is called whenever we suspect that the system chipset is re-
 + * ordering the sequence of MMIO to the tx send mailbox. The symptom
 + * is bogus tx completions. We try to recover by setting the
 + * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
 + * in the workqueue.
 + */
 +static void tg3_tx_recover(struct tg3 *tp)
 +{
 +      BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) ||
 +             tp->write32_tx_mbox == tg3_write_indirect_mbox);
 +
 +      netdev_warn(tp->dev,
 +                  "The system may be re-ordering memory-mapped I/O "
 +                  "cycles to the network device, attempting to recover. "
 +                  "Please report the problem to the driver maintainer "
 +                  "and include system chipset information.\n");
 +
 +      spin_lock(&tp->lock);
 +      tg3_flag_set(tp, TX_RECOVERY_PENDING);
 +      spin_unlock(&tp->lock);
 +}
 +
 +static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
 +{
 +      /* Tell compiler to fetch tx indices from memory. */
 +      barrier();
 +      return tnapi->tx_pending -
 +             ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
 +}
 +
 +/* Tigon3 never reports partial packet sends.  So we do not
 + * need special logic to handle SKBs that have not had all
 + * of their frags sent yet, like SunGEM does.
 + */
 +static void tg3_tx(struct tg3_napi *tnapi)
 +{
 +      struct tg3 *tp = tnapi->tp;
 +      u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
 +      u32 sw_idx = tnapi->tx_cons;
 +      struct netdev_queue *txq;
 +      int index = tnapi - tp->napi;
 +
 +      if (tg3_flag(tp, ENABLE_TSS))
 +              index--;
 +
 +      txq = netdev_get_tx_queue(tp->dev, index);
 +
 +      while (sw_idx != hw_idx) {
 +              struct tg3_tx_ring_info *ri = &tnapi->tx_buffers[sw_idx];
 +              struct sk_buff *skb = ri->skb;
 +              int i, tx_bug = 0;
 +
 +              if (unlikely(skb == NULL)) {
 +                      tg3_tx_recover(tp);
 +                      return;
 +              }
 +
 +              pci_unmap_single(tp->pdev,
 +                               dma_unmap_addr(ri, mapping),
 +                               skb_headlen(skb),
 +                               PCI_DMA_TODEVICE);
 +
 +              ri->skb = NULL;
 +
 +              while (ri->fragmented) {
 +                      ri->fragmented = false;
 +                      sw_idx = NEXT_TX(sw_idx);
 +                      ri = &tnapi->tx_buffers[sw_idx];
 +              }
 +
 +              sw_idx = NEXT_TX(sw_idx);
 +
 +              for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
 +                      ri = &tnapi->tx_buffers[sw_idx];
 +                      if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
 +                              tx_bug = 1;
 +
 +                      pci_unmap_page(tp->pdev,
 +                                     dma_unmap_addr(ri, mapping),
 +                                     skb_shinfo(skb)->frags[i].size,
 +                                     PCI_DMA_TODEVICE);
 +
 +                      while (ri->fragmented) {
 +                              ri->fragmented = false;
 +                              sw_idx = NEXT_TX(sw_idx);
 +                              ri = &tnapi->tx_buffers[sw_idx];
 +                      }
 +
 +                      sw_idx = NEXT_TX(sw_idx);
 +              }
 +
 +              dev_kfree_skb(skb);
 +
 +              if (unlikely(tx_bug)) {
 +                      tg3_tx_recover(tp);
 +                      return;
 +              }
 +      }
 +
 +      tnapi->tx_cons = sw_idx;
 +
 +      /* Need to make the tx_cons update visible to tg3_start_xmit()
 +       * before checking for netif_queue_stopped().  Without the
 +       * memory barrier, there is a small possibility that tg3_start_xmit()
 +       * will miss it and cause the queue to be stopped forever.
 +       */
 +      smp_mb();
 +
 +      if (unlikely(netif_tx_queue_stopped(txq) &&
 +                   (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
 +              __netif_tx_lock(txq, smp_processor_id());
 +              if (netif_tx_queue_stopped(txq) &&
 +                  (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
 +                      netif_tx_wake_queue(txq);
 +              __netif_tx_unlock(txq);
 +      }
 +}
 +
 +static void tg3_rx_skb_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
 +{
 +      if (!ri->skb)
 +              return;
 +
 +      pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping),
 +                       map_sz, PCI_DMA_FROMDEVICE);
 +      dev_kfree_skb_any(ri->skb);
 +      ri->skb = NULL;
 +}
 +
 +/* Returns size of skb allocated or < 0 on error.
 + *
 + * We only need to fill in the address because the other members
 + * of the RX descriptor are invariant, see tg3_init_rings.
 + *
 + * Note the purposeful assymetry of cpu vs. chip accesses.  For
 + * posting buffers we only dirty the first cache line of the RX
 + * descriptor (containing the address).  Whereas for the RX status
 + * buffers the cpu only reads the last cacheline of the RX descriptor
 + * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
 + */
 +static int tg3_alloc_rx_skb(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
 +                          u32 opaque_key, u32 dest_idx_unmasked)
 +{
 +      struct tg3_rx_buffer_desc *desc;
 +      struct ring_info *map;
 +      struct sk_buff *skb;
 +      dma_addr_t mapping;
 +      int skb_size, dest_idx;
 +
 +      switch (opaque_key) {
 +      case RXD_OPAQUE_RING_STD:
 +              dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
 +              desc = &tpr->rx_std[dest_idx];
 +              map = &tpr->rx_std_buffers[dest_idx];
 +              skb_size = tp->rx_pkt_map_sz;
 +              break;
 +
 +      case RXD_OPAQUE_RING_JUMBO:
 +              dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
 +              desc = &tpr->rx_jmb[dest_idx].std;
 +              map = &tpr->rx_jmb_buffers[dest_idx];
 +              skb_size = TG3_RX_JMB_MAP_SZ;
 +              break;
 +
 +      default:
 +              return -EINVAL;
 +      }
 +
 +      /* Do not overwrite any of the map or rp information
 +       * until we are sure we can commit to a new buffer.
 +       *
 +       * Callers depend upon this behavior and assume that
 +       * we leave everything unchanged if we fail.
 +       */
 +      skb = netdev_alloc_skb(tp->dev, skb_size + TG3_RX_OFFSET(tp));
 +      if (skb == NULL)
 +              return -ENOMEM;
 +
 +      skb_reserve(skb, TG3_RX_OFFSET(tp));
 +
 +      mapping = pci_map_single(tp->pdev, skb->data, skb_size,
 +                               PCI_DMA_FROMDEVICE);
 +      if (pci_dma_mapping_error(tp->pdev, mapping)) {
 +              dev_kfree_skb(skb);
 +              return -EIO;
 +      }
 +
 +      map->skb = skb;
 +      dma_unmap_addr_set(map, mapping, mapping);
 +
 +      desc->addr_hi = ((u64)mapping >> 32);
 +      desc->addr_lo = ((u64)mapping & 0xffffffff);
 +
 +      return skb_size;
 +}
 +
 +/* We only need to move over in the address because the other
 + * members of the RX descriptor are invariant.  See notes above
 + * tg3_alloc_rx_skb for full details.
 + */
 +static void tg3_recycle_rx(struct tg3_napi *tnapi,
 +                         struct tg3_rx_prodring_set *dpr,
 +                         u32 opaque_key, int src_idx,
 +                         u32 dest_idx_unmasked)
 +{
 +      struct tg3 *tp = tnapi->tp;
 +      struct tg3_rx_buffer_desc *src_desc, *dest_desc;
 +      struct ring_info *src_map, *dest_map;
 +      struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring;
 +      int dest_idx;
 +
 +      switch (opaque_key) {
 +      case RXD_OPAQUE_RING_STD:
 +              dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
 +              dest_desc = &dpr->rx_std[dest_idx];
 +              dest_map = &dpr->rx_std_buffers[dest_idx];
 +              src_desc = &spr->rx_std[src_idx];
 +              src_map = &spr->rx_std_buffers[src_idx];
 +              break;
 +
 +      case RXD_OPAQUE_RING_JUMBO:
 +              dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
 +              dest_desc = &dpr->rx_jmb[dest_idx].std;
 +              dest_map = &dpr->rx_jmb_buffers[dest_idx];
 +              src_desc = &spr->rx_jmb[src_idx].std;
 +              src_map = &spr->rx_jmb_buffers[src_idx];
 +              break;
 +
 +      default:
 +              return;
 +      }
 +
 +      dest_map->skb = src_map->skb;
 +      dma_unmap_addr_set(dest_map, mapping,
 +                         dma_unmap_addr(src_map, mapping));
 +      dest_desc->addr_hi = src_desc->addr_hi;
 +      dest_desc->addr_lo = src_desc->addr_lo;
 +
 +      /* Ensure that the update to the skb happens after the physical
 +       * addresses have been transferred to the new BD location.
 +       */
 +      smp_wmb();
 +
 +      src_map->skb = NULL;
 +}
 +
 +/* The RX ring scheme is composed of multiple rings which post fresh
 + * buffers to the chip, and one special ring the chip uses to report
 + * status back to the host.
 + *
 + * The special ring reports the status of received packets to the
 + * host.  The chip does not write into the original descriptor the
 + * RX buffer was obtained from.  The chip simply takes the original
 + * descriptor as provided by the host, updates the status and length
 + * field, then writes this into the next status ring entry.
 + *
 + * Each ring the host uses to post buffers to the chip is described
 + * by a TG3_BDINFO entry in the chips SRAM area.  When a packet arrives,
 + * it is first placed into the on-chip ram.  When the packet's length
 + * is known, it walks down the TG3_BDINFO entries to select the ring.
 + * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
 + * which is within the range of the new packet's length is chosen.
 + *
 + * The "separate ring for rx status" scheme may sound queer, but it makes
 + * sense from a cache coherency perspective.  If only the host writes
 + * to the buffer post rings, and only the chip writes to the rx status
 + * rings, then cache lines never move beyond shared-modified state.
 + * If both the host and chip were to write into the same ring, cache line
 + * eviction could occur since both entities want it in an exclusive state.
 + */
 +static int tg3_rx(struct tg3_napi *tnapi, int budget)
 +{
 +      struct tg3 *tp = tnapi->tp;
 +      u32 work_mask, rx_std_posted = 0;
 +      u32 std_prod_idx, jmb_prod_idx;
 +      u32 sw_idx = tnapi->rx_rcb_ptr;
 +      u16 hw_idx;
 +      int received;
 +      struct tg3_rx_prodring_set *tpr = &tnapi->prodring;
 +
 +      hw_idx = *(tnapi->rx_rcb_prod_idx);
 +      /*
 +       * We need to order the read of hw_idx and the read of
 +       * the opaque cookie.
 +       */
 +      rmb();
 +      work_mask = 0;
 +      received = 0;
 +      std_prod_idx = tpr->rx_std_prod_idx;
 +      jmb_prod_idx = tpr->rx_jmb_prod_idx;
 +      while (sw_idx != hw_idx && budget > 0) {
 +              struct ring_info *ri;
 +              struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
 +              unsigned int len;
 +              struct sk_buff *skb;
 +              dma_addr_t dma_addr;
 +              u32 opaque_key, desc_idx, *post_ptr;
 +
 +              desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
 +              opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
 +              if (opaque_key == RXD_OPAQUE_RING_STD) {
 +                      ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx];
 +                      dma_addr = dma_unmap_addr(ri, mapping);
 +                      skb = ri->skb;
 +                      post_ptr = &std_prod_idx;
 +                      rx_std_posted++;
 +              } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
 +                      ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx];
 +                      dma_addr = dma_unmap_addr(ri, mapping);
 +                      skb = ri->skb;
 +                      post_ptr = &jmb_prod_idx;
 +              } else
 +                      goto next_pkt_nopost;
 +
 +              work_mask |= opaque_key;
 +
 +              if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
 +                  (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
 +              drop_it:
 +                      tg3_recycle_rx(tnapi, tpr, opaque_key,
 +                                     desc_idx, *post_ptr);
 +              drop_it_no_recycle:
 +                      /* Other statistics kept track of by card. */
 +                      tp->rx_dropped++;
 +                      goto next_pkt;
 +              }
 +
 +              len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
 +                    ETH_FCS_LEN;
 +
 +              if (len > TG3_RX_COPY_THRESH(tp)) {
 +                      int skb_size;
 +
 +                      skb_size = tg3_alloc_rx_skb(tp, tpr, opaque_key,
 +                                                  *post_ptr);
 +                      if (skb_size < 0)
 +                              goto drop_it;
 +
 +                      pci_unmap_single(tp->pdev, dma_addr, skb_size,
 +                                       PCI_DMA_FROMDEVICE);
 +
 +                      /* Ensure that the update to the skb happens
 +                       * after the usage of the old DMA mapping.
 +                       */
 +                      smp_wmb();
 +
 +                      ri->skb = NULL;
 +
 +                      skb_put(skb, len);
 +              } else {
 +                      struct sk_buff *copy_skb;
 +
 +                      tg3_recycle_rx(tnapi, tpr, opaque_key,
 +                                     desc_idx, *post_ptr);
 +
 +                      copy_skb = netdev_alloc_skb(tp->dev, len +
 +                                                  TG3_RAW_IP_ALIGN);
 +                      if (copy_skb == NULL)
 +                              goto drop_it_no_recycle;
 +
 +                      skb_reserve(copy_skb, TG3_RAW_IP_ALIGN);
 +                      skb_put(copy_skb, len);
 +                      pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
 +                      skb_copy_from_linear_data(skb, copy_skb->data, len);
 +                      pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
 +
 +                      /* We'll reuse the original ring buffer. */
 +                      skb = copy_skb;
 +              }
 +
 +              if ((tp->dev->features & NETIF_F_RXCSUM) &&
 +                  (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
 +                  (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
 +                    >> RXD_TCPCSUM_SHIFT) == 0xffff))
 +                      skb->ip_summed = CHECKSUM_UNNECESSARY;
 +              else
 +                      skb_checksum_none_assert(skb);
 +
 +              skb->protocol = eth_type_trans(skb, tp->dev);
 +
 +              if (len > (tp->dev->mtu + ETH_HLEN) &&
 +                  skb->protocol != htons(ETH_P_8021Q)) {
 +                      dev_kfree_skb(skb);
 +                      goto drop_it_no_recycle;
 +              }
 +
 +              if (desc->type_flags & RXD_FLAG_VLAN &&
 +                  !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG))
 +                      __vlan_hwaccel_put_tag(skb,
 +                                             desc->err_vlan & RXD_VLAN_MASK);
 +
 +              napi_gro_receive(&tnapi->napi, skb);
 +
 +              received++;
 +              budget--;
 +
 +next_pkt:
 +              (*post_ptr)++;
 +
 +              if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
 +                      tpr->rx_std_prod_idx = std_prod_idx &
 +                                             tp->rx_std_ring_mask;
 +                      tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
 +                                   tpr->rx_std_prod_idx);
 +                      work_mask &= ~RXD_OPAQUE_RING_STD;
 +                      rx_std_posted = 0;
 +              }
 +next_pkt_nopost:
 +              sw_idx++;
 +              sw_idx &= tp->rx_ret_ring_mask;
 +
 +              /* Refresh hw_idx to see if there is new work */
 +              if (sw_idx == hw_idx) {
 +                      hw_idx = *(tnapi->rx_rcb_prod_idx);
 +                      rmb();
 +              }
 +      }
 +
 +      /* ACK the status ring. */
 +      tnapi->rx_rcb_ptr = sw_idx;
 +      tw32_rx_mbox(tnapi->consmbox, sw_idx);
 +
 +      /* Refill RX ring(s). */
 +      if (!tg3_flag(tp, ENABLE_RSS)) {
 +              if (work_mask & RXD_OPAQUE_RING_STD) {
 +                      tpr->rx_std_prod_idx = std_prod_idx &
 +                                             tp->rx_std_ring_mask;
 +                      tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
 +                                   tpr->rx_std_prod_idx);
 +              }
 +              if (work_mask & RXD_OPAQUE_RING_JUMBO) {
 +                      tpr->rx_jmb_prod_idx = jmb_prod_idx &
 +                                             tp->rx_jmb_ring_mask;
 +                      tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
 +                                   tpr->rx_jmb_prod_idx);
 +              }
 +              mmiowb();
 +      } else if (work_mask) {
 +              /* rx_std_buffers[] and rx_jmb_buffers[] entries must be
 +               * updated before the producer indices can be updated.
 +               */
 +              smp_wmb();
 +
 +              tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask;
 +              tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask;
 +
 +              if (tnapi != &tp->napi[1])
 +                      napi_schedule(&tp->napi[1].napi);
 +      }
 +
 +      return received;
 +}
 +
 +static void tg3_poll_link(struct tg3 *tp)
 +{
 +      /* handle link change and other phy events */
 +      if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
 +              struct tg3_hw_status *sblk = tp->napi[0].hw_status;
 +
 +              if (sblk->status & SD_STATUS_LINK_CHG) {
 +                      sblk->status = SD_STATUS_UPDATED |
 +                                     (sblk->status & ~SD_STATUS_LINK_CHG);
 +                      spin_lock(&tp->lock);
 +                      if (tg3_flag(tp, USE_PHYLIB)) {
 +                              tw32_f(MAC_STATUS,
 +                                   (MAC_STATUS_SYNC_CHANGED |
 +                                    MAC_STATUS_CFG_CHANGED |
 +                                    MAC_STATUS_MI_COMPLETION |
 +                                    MAC_STATUS_LNKSTATE_CHANGED));
 +                              udelay(40);
 +                      } else
 +                              tg3_setup_phy(tp, 0);
 +                      spin_unlock(&tp->lock);
 +              }
 +      }
 +}
 +
 +static int tg3_rx_prodring_xfer(struct tg3 *tp,
 +                              struct tg3_rx_prodring_set *dpr,
 +                              struct tg3_rx_prodring_set *spr)
 +{
 +      u32 si, di, cpycnt, src_prod_idx;
 +      int i, err = 0;
 +
 +      while (1) {
 +              src_prod_idx = spr->rx_std_prod_idx;
 +
 +              /* Make sure updates to the rx_std_buffers[] entries and the
 +               * standard producer index are seen in the correct order.
 +               */
 +              smp_rmb();
 +
 +              if (spr->rx_std_cons_idx == src_prod_idx)
 +                      break;
 +
 +              if (spr->rx_std_cons_idx < src_prod_idx)
 +                      cpycnt = src_prod_idx - spr->rx_std_cons_idx;
 +              else
 +                      cpycnt = tp->rx_std_ring_mask + 1 -
 +                               spr->rx_std_cons_idx;
 +
 +              cpycnt = min(cpycnt,
 +                           tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx);
 +
 +              si = spr->rx_std_cons_idx;
 +              di = dpr->rx_std_prod_idx;
 +
 +              for (i = di; i < di + cpycnt; i++) {
 +                      if (dpr->rx_std_buffers[i].skb) {
 +                              cpycnt = i - di;
 +                              err = -ENOSPC;
 +                              break;
 +                      }
 +              }
 +
 +              if (!cpycnt)
 +                      break;
 +
 +              /* Ensure that updates to the rx_std_buffers ring and the
 +               * shadowed hardware producer ring from tg3_recycle_skb() are
 +               * ordered correctly WRT the skb check above.
 +               */
 +              smp_rmb();
 +
 +              memcpy(&dpr->rx_std_buffers[di],
 +                     &spr->rx_std_buffers[si],
 +                     cpycnt * sizeof(struct ring_info));
 +
 +              for (i = 0; i < cpycnt; i++, di++, si++) {
 +                      struct tg3_rx_buffer_desc *sbd, *dbd;
 +                      sbd = &spr->rx_std[si];
 +                      dbd = &dpr->rx_std[di];
 +                      dbd->addr_hi = sbd->addr_hi;
 +                      dbd->addr_lo = sbd->addr_lo;
 +              }
 +
 +              spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) &
 +                                     tp->rx_std_ring_mask;
 +              dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) &
 +                                     tp->rx_std_ring_mask;
 +      }
 +
 +      while (1) {
 +              src_prod_idx = spr->rx_jmb_prod_idx;
 +
 +              /* Make sure updates to the rx_jmb_buffers[] entries and
 +               * the jumbo producer index are seen in the correct order.
 +               */
 +              smp_rmb();
 +
 +              if (spr->rx_jmb_cons_idx == src_prod_idx)
 +                      break;
 +
 +              if (spr->rx_jmb_cons_idx < src_prod_idx)
 +                      cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
 +              else
 +                      cpycnt = tp->rx_jmb_ring_mask + 1 -
 +                               spr->rx_jmb_cons_idx;
 +
 +              cpycnt = min(cpycnt,
 +                           tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx);
 +
 +              si = spr->rx_jmb_cons_idx;
 +              di = dpr->rx_jmb_prod_idx;
 +
 +              for (i = di; i < di + cpycnt; i++) {
 +                      if (dpr->rx_jmb_buffers[i].skb) {
 +                              cpycnt = i - di;
 +                              err = -ENOSPC;
 +                              break;
 +                      }
 +              }
 +
 +              if (!cpycnt)
 +                      break;
 +
 +              /* Ensure that updates to the rx_jmb_buffers ring and the
 +               * shadowed hardware producer ring from tg3_recycle_skb() are
 +               * ordered correctly WRT the skb check above.
 +               */
 +              smp_rmb();
 +
 +              memcpy(&dpr->rx_jmb_buffers[di],
 +                     &spr->rx_jmb_buffers[si],
 +                     cpycnt * sizeof(struct ring_info));
 +
 +              for (i = 0; i < cpycnt; i++, di++, si++) {
 +                      struct tg3_rx_buffer_desc *sbd, *dbd;
 +                      sbd = &spr->rx_jmb[si].std;
 +                      dbd = &dpr->rx_jmb[di].std;
 +                      dbd->addr_hi = sbd->addr_hi;
 +                      dbd->addr_lo = sbd->addr_lo;
 +              }
 +
 +              spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) &
 +                                     tp->rx_jmb_ring_mask;
 +              dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) &
 +                                     tp->rx_jmb_ring_mask;
 +      }
 +
 +      return err;
 +}
 +
 +static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
 +{
 +      struct tg3 *tp = tnapi->tp;
 +
 +      /* run TX completion thread */
 +      if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
 +              tg3_tx(tnapi);
 +              if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
 +                      return work_done;
 +      }
 +
 +      /* run RX thread, within the bounds set by NAPI.
 +       * All RX "locking" is done by ensuring outside
 +       * code synchronizes with tg3->napi.poll()
 +       */
 +      if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
 +              work_done += tg3_rx(tnapi, budget - work_done);
 +
 +      if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) {
 +              struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring;
 +              int i, err = 0;
 +              u32 std_prod_idx = dpr->rx_std_prod_idx;
 +              u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
 +
 +              for (i = 1; i < tp->irq_cnt; i++)
 +                      err |= tg3_rx_prodring_xfer(tp, dpr,
 +                                                  &tp->napi[i].prodring);
 +
 +              wmb();
 +
 +              if (std_prod_idx != dpr->rx_std_prod_idx)
 +                      tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
 +                                   dpr->rx_std_prod_idx);
 +
 +              if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
 +                      tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
 +                                   dpr->rx_jmb_prod_idx);
 +
 +              mmiowb();
 +
 +              if (err)
 +                      tw32_f(HOSTCC_MODE, tp->coal_now);
 +      }
 +
 +      return work_done;
 +}
 +
 +static int tg3_poll_msix(struct napi_struct *napi, int budget)
 +{
 +      struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
 +      struct tg3 *tp = tnapi->tp;
 +      int work_done = 0;
 +      struct tg3_hw_status *sblk = tnapi->hw_status;
 +
 +      while (1) {
 +              work_done = tg3_poll_work(tnapi, work_done, budget);
 +
 +              if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
 +                      goto tx_recovery;
 +
 +              if (unlikely(work_done >= budget))
 +                      break;
 +
 +              /* tp->last_tag is used in tg3_int_reenable() below
 +               * to tell the hw how much work has been processed,
 +               * so we must read it before checking for more work.
 +               */
 +              tnapi->last_tag = sblk->status_tag;
 +              tnapi->last_irq_tag = tnapi->last_tag;
 +              rmb();
 +
 +              /* check for RX/TX work to do */
 +              if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons &&
 +                         *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) {
 +                      napi_complete(napi);
 +                      /* Reenable interrupts. */
 +                      tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
 +                      mmiowb();
 +                      break;
 +              }
 +      }
 +
 +      return work_done;
 +
 +tx_recovery:
 +      /* work_done is guaranteed to be less than budget. */
 +      napi_complete(napi);
 +      schedule_work(&tp->reset_task);
 +      return work_done;
 +}
 +
 +static void tg3_process_error(struct tg3 *tp)
 +{
 +      u32 val;
 +      bool real_error = false;
 +
 +      if (tg3_flag(tp, ERROR_PROCESSED))
 +              return;
 +
 +      /* Check Flow Attention register */
 +      val = tr32(HOSTCC_FLOW_ATTN);
 +      if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) {
 +              netdev_err(tp->dev, "FLOW Attention error.  Resetting chip.\n");
 +              real_error = true;
 +      }
 +
 +      if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) {
 +              netdev_err(tp->dev, "MSI Status error.  Resetting chip.\n");
 +              real_error = true;
 +      }
 +
 +      if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) {
 +              netdev_err(tp->dev, "DMA Status error.  Resetting chip.\n");
 +              real_error = true;
 +      }
 +
 +      if (!real_error)
 +              return;
 +
 +      tg3_dump_state(tp);
 +
 +      tg3_flag_set(tp, ERROR_PROCESSED);
 +      schedule_work(&tp->reset_task);
 +}
 +
 +static int tg3_poll(struct napi_struct *napi, int budget)
 +{
 +      struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
 +      struct tg3 *tp = tnapi->tp;
 +      int work_done = 0;
 +      struct tg3_hw_status *sblk = tnapi->hw_status;
 +
 +      while (1) {
 +              if (sblk->status & SD_STATUS_ERROR)
 +                      tg3_process_error(tp);
 +
 +              tg3_poll_link(tp);
 +
 +              work_done = tg3_poll_work(tnapi, work_done, budget);
 +
 +              if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
 +                      goto tx_recovery;
 +
 +              if (unlikely(work_done >= budget))
 +                      break;
 +
 +              if (tg3_flag(tp, TAGGED_STATUS)) {
 +                      /* tp->last_tag is used in tg3_int_reenable() below
 +                       * to tell the hw how much work has been processed,
 +                       * so we must read it before checking for more work.
 +                       */
 +                      tnapi->last_tag = sblk->status_tag;
 +                      tnapi->last_irq_tag = tnapi->last_tag;
 +                      rmb();
 +              } else
 +                      sblk->status &= ~SD_STATUS_UPDATED;
 +
 +              if (likely(!tg3_has_work(tnapi))) {
 +                      napi_complete(napi);
 +                      tg3_int_reenable(tnapi);
 +                      break;
 +              }
 +      }
 +
 +      return work_done;
 +
 +tx_recovery:
 +      /* work_done is guaranteed to be less than budget. */
 +      napi_complete(napi);
 +      schedule_work(&tp->reset_task);
 +      return work_done;
 +}
 +
 +static void tg3_napi_disable(struct tg3 *tp)
 +{
 +      int i;
 +
 +      for (i = tp->irq_cnt - 1; i >= 0; i--)
 +              napi_disable(&tp->napi[i].napi);
 +}
 +
 +static void tg3_napi_enable(struct tg3 *tp)
 +{
 +      int i;
 +
 +      for (i = 0; i < tp->irq_cnt; i++)
 +              napi_enable(&tp->napi[i].napi);
 +}
 +
 +static void tg3_napi_init(struct tg3 *tp)
 +{
 +      int i;
 +
 +      netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll, 64);
 +      for (i = 1; i < tp->irq_cnt; i++)
 +              netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix, 64);
 +}
 +
 +static void tg3_napi_fini(struct tg3 *tp)
 +{
 +      int i;
 +
 +      for (i = 0; i < tp->irq_cnt; i++)
 +              netif_napi_del(&tp->napi[i].napi);
 +}
 +
 +static inline void tg3_netif_stop(struct tg3 *tp)
 +{
 +      tp->dev->trans_start = jiffies; /* prevent tx timeout */
 +      tg3_napi_disable(tp);
 +      netif_tx_disable(tp->dev);
 +}
 +
 +static inline void tg3_netif_start(struct tg3 *tp)
 +{
 +      /* NOTE: unconditional netif_tx_wake_all_queues is only
 +       * appropriate so long as all callers are assured to
 +       * have free tx slots (such as after tg3_init_hw)
 +       */
 +      netif_tx_wake_all_queues(tp->dev);
 +
 +      tg3_napi_enable(tp);
 +      tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
 +      tg3_enable_ints(tp);
 +}
 +
 +static void tg3_irq_quiesce(struct tg3 *tp)
 +{
 +      int i;
 +
 +      BUG_ON(tp->irq_sync);
 +
 +      tp->irq_sync = 1;
 +      smp_mb();
 +
 +      for (i = 0; i < tp->irq_cnt; i++)
 +              synchronize_irq(tp->napi[i].irq_vec);
 +}
 +
 +/* Fully shutdown all tg3 driver activity elsewhere in the system.
 + * If irq_sync is non-zero, then the IRQ handler must be synchronized
 + * with as well.  Most of the time, this is not necessary except when
 + * shutting down the device.
 + */
 +static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
 +{
 +      spin_lock_bh(&tp->lock);
 +      if (irq_sync)
 +              tg3_irq_quiesce(tp);
 +}
 +
 +static inline void tg3_full_unlock(struct tg3 *tp)
 +{
 +      spin_unlock_bh(&tp->lock);
 +}
 +
 +/* One-shot MSI handler - Chip automatically disables interrupt
 + * after sending MSI so driver doesn't have to do it.
 + */
 +static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
 +{
 +      struct tg3_napi *tnapi = dev_id;
 +      struct tg3 *tp = tnapi->tp;
 +
 +      prefetch(tnapi->hw_status);
 +      if (tnapi->rx_rcb)
 +              prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
 +
 +      if (likely(!tg3_irq_sync(tp)))
 +              napi_schedule(&tnapi->napi);
 +
 +      return IRQ_HANDLED;
 +}
 +
 +/* MSI ISR - No need to check for interrupt sharing and no need to
 + * flush status block and interrupt mailbox. PCI ordering rules
 + * guarantee that MSI will arrive after the status block.
 + */
 +static irqreturn_t tg3_msi(int irq, void *dev_id)
 +{
 +      struct tg3_napi *tnapi = dev_id;
 +      struct tg3 *tp = tnapi->tp;
 +
 +      prefetch(tnapi->hw_status);
 +      if (tnapi->rx_rcb)
 +              prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
 +      /*
 +       * Writing any value to intr-mbox-0 clears PCI INTA# and
 +       * chip-internal interrupt pending events.
 +       * Writing non-zero to intr-mbox-0 additional tells the
 +       * NIC to stop sending us irqs, engaging "in-intr-handler"
 +       * event coalescing.
 +       */
 +      tw32_mailbox(tnapi->int_mbox, 0x00000001);
 +      if (likely(!tg3_irq_sync(tp)))
 +              napi_schedule(&tnapi->napi);
 +
 +      return IRQ_RETVAL(1);
 +}
 +
 +static irqreturn_t tg3_interrupt(int irq, void *dev_id)
 +{
 +      struct tg3_napi *tnapi = dev_id;
 +      struct tg3 *tp = tnapi->tp;
 +      struct tg3_hw_status *sblk = tnapi->hw_status;
 +      unsigned int handled = 1;
 +
 +      /* In INTx mode, it is possible for the interrupt to arrive at
 +       * the CPU before the status block posted prior to the interrupt.
 +       * Reading the PCI State register will confirm whether the
 +       * interrupt is ours and will flush the status block.
 +       */
 +      if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
 +              if (tg3_flag(tp, CHIP_RESETTING) ||
 +                  (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
 +                      handled = 0;
 +                      goto out;
 +              }
 +      }
 +
 +      /*
 +       * Writing any value to intr-mbox-0 clears PCI INTA# and
 +       * chip-internal interrupt pending events.
 +       * Writing non-zero to intr-mbox-0 additional tells the
 +       * NIC to stop sending us irqs, engaging "in-intr-handler"
 +       * event coalescing.
 +       *
 +       * Flush the mailbox to de-assert the IRQ immediately to prevent
 +       * spurious interrupts.  The flush impacts performance but
 +       * excessive spurious interrupts can be worse in some cases.
 +       */
 +      tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
 +      if (tg3_irq_sync(tp))
 +              goto out;
 +      sblk->status &= ~SD_STATUS_UPDATED;
 +      if (likely(tg3_has_work(tnapi))) {
 +              prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
 +              napi_schedule(&tnapi->napi);
 +      } else {
 +              /* No work, shared interrupt perhaps?  re-enable
 +               * interrupts, and flush that PCI write
 +               */
 +              tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
 +                             0x00000000);
 +      }
 +out:
 +      return IRQ_RETVAL(handled);
 +}
 +
 +static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
 +{
 +      struct tg3_napi *tnapi = dev_id;
 +      struct tg3 *tp = tnapi->tp;
 +      struct tg3_hw_status *sblk = tnapi->hw_status;
 +      unsigned int handled = 1;
 +
 +      /* In INTx mode, it is possible for the interrupt to arrive at
 +       * the CPU before the status block posted prior to the interrupt.
 +       * Reading the PCI State register will confirm whether the
 +       * interrupt is ours and will flush the status block.
 +       */
 +      if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
 +              if (tg3_flag(tp, CHIP_RESETTING) ||
 +                  (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
 +                      handled = 0;
 +                      goto out;
 +              }
 +      }
 +
 +      /*
 +       * writing any value to intr-mbox-0 clears PCI INTA# and
 +       * chip-internal interrupt pending events.
 +       * writing non-zero to intr-mbox-0 additional tells the
 +       * NIC to stop sending us irqs, engaging "in-intr-handler"
 +       * event coalescing.
 +       *
 +       * Flush the mailbox to de-assert the IRQ immediately to prevent
 +       * spurious interrupts.  The flush impacts performance but
 +       * excessive spurious interrupts can be worse in some cases.
 +       */
 +      tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
 +
 +      /*
 +       * In a shared interrupt configuration, sometimes other devices'
 +       * interrupts will scream.  We record the current status tag here
 +       * so that the above check can report that the screaming interrupts
 +       * are unhandled.  Eventually they will be silenced.
 +       */
 +      tnapi->last_irq_tag = sblk->status_tag;
 +
 +      if (tg3_irq_sync(tp))
 +              goto out;
 +
 +      prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
 +
 +      napi_schedule(&tnapi->napi);
 +
 +out:
 +      return IRQ_RETVAL(handled);
 +}
 +
 +/* ISR for interrupt test */
 +static irqreturn_t tg3_test_isr(int irq, void *dev_id)
 +{
 +      struct tg3_napi *tnapi = dev_id;
 +      struct tg3 *tp = tnapi->tp;
 +      struct tg3_hw_status *sblk = tnapi->hw_status;
 +
 +      if ((sblk->status & SD_STATUS_UPDATED) ||
 +          !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
 +              tg3_disable_ints(tp);
 +              return IRQ_RETVAL(1);
 +      }
 +      return IRQ_RETVAL(0);
 +}
 +
 +static int tg3_init_hw(struct tg3 *, int);
 +static int tg3_halt(struct tg3 *, int, int);
 +
 +/* Restart hardware after configuration changes, self-test, etc.
 + * Invoked with tp->lock held.
 + */
 +static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
 +      __releases(tp->lock)
 +      __acquires(tp->lock)
 +{
 +      int err;
 +
 +      err = tg3_init_hw(tp, reset_phy);
 +      if (err) {
 +              netdev_err(tp->dev,
 +                         "Failed to re-initialize device, aborting\n");
 +              tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
 +              tg3_full_unlock(tp);
 +              del_timer_sync(&tp->timer);
 +              tp->irq_sync = 0;
 +              tg3_napi_enable(tp);
 +              dev_close(tp->dev);
 +              tg3_full_lock(tp, 0);
 +      }
 +      return err;
 +}
 +
 +#ifdef CONFIG_NET_POLL_CONTROLLER
 +static void tg3_poll_controller(struct net_device *dev)
 +{
 +      int i;
 +      struct tg3 *tp = netdev_priv(dev);
 +
 +      for (i = 0; i < tp->irq_cnt; i++)
 +              tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
 +}
 +#endif
 +
 +static void tg3_reset_task(struct work_struct *work)
 +{
 +      struct tg3 *tp = container_of(work, struct tg3, reset_task);
 +      int err;
 +      unsigned int restart_timer;
 +
 +      tg3_full_lock(tp, 0);
 +
 +      if (!netif_running(tp->dev)) {
 +              tg3_full_unlock(tp);
 +              return;
 +      }
 +
 +      tg3_full_unlock(tp);
 +
 +      tg3_phy_stop(tp);
 +
 +      tg3_netif_stop(tp);
 +
 +      tg3_full_lock(tp, 1);
 +
 +      restart_timer = tg3_flag(tp, RESTART_TIMER);
 +      tg3_flag_clear(tp, RESTART_TIMER);
 +
 +      if (tg3_flag(tp, TX_RECOVERY_PENDING)) {
 +              tp->write32_tx_mbox = tg3_write32_tx_mbox;
 +              tp->write32_rx_mbox = tg3_write_flush_reg32;
 +              tg3_flag_set(tp, MBOX_WRITE_REORDER);
 +              tg3_flag_clear(tp, TX_RECOVERY_PENDING);
 +      }
 +
 +      tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
 +      err = tg3_init_hw(tp, 1);
 +      if (err)
 +              goto out;
 +
 +      tg3_netif_start(tp);
 +
 +      if (restart_timer)
 +              mod_timer(&tp->timer, jiffies + 1);
 +
 +out:
 +      tg3_full_unlock(tp);
 +
 +      if (!err)
 +              tg3_phy_start(tp);
 +}
 +
 +static void tg3_tx_timeout(struct net_device *dev)
 +{
 +      struct tg3 *tp = netdev_priv(dev);
 +
 +      if (netif_msg_tx_err(tp)) {
 +              netdev_err(dev, "transmit timed out, resetting\n");
 +              tg3_dump_state(tp);
 +      }
 +
 +      schedule_work(&tp->reset_task);
 +}
 +
 +/* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
 +static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
 +{
 +      u32 base = (u32) mapping & 0xffffffff;
 +
 +      return (base > 0xffffdcc0) && (base + len + 8 < base);
 +}
 +
 +/* Test for DMA addresses > 40-bit */
 +static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
 +                                        int len)
 +{
 +#if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
 +      if (tg3_flag(tp, 40BIT_DMA_BUG))
 +              return ((u64) mapping + len) > DMA_BIT_MASK(40);
 +      return 0;
 +#else
 +      return 0;
 +#endif
 +}
 +
 +static inline void tg3_tx_set_bd(struct tg3_tx_buffer_desc *txbd,
 +                               dma_addr_t mapping, u32 len, u32 flags,
 +                               u32 mss, u32 vlan)
 +{
 +      txbd->addr_hi = ((u64) mapping >> 32);
 +      txbd->addr_lo = ((u64) mapping & 0xffffffff);
 +      txbd->len_flags = (len << TXD_LEN_SHIFT) | (flags & 0x0000ffff);
 +      txbd->vlan_tag = (mss << TXD_MSS_SHIFT) | (vlan << TXD_VLAN_TAG_SHIFT);
 +}
 +
 +static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 *entry, u32 *budget,
 +                          dma_addr_t map, u32 len, u32 flags,
 +                          u32 mss, u32 vlan)
 +{
 +      struct tg3 *tp = tnapi->tp;
 +      bool hwbug = false;
 +
 +      if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8)
 +              hwbug = 1;
 +
 +      if (tg3_4g_overflow_test(map, len))
 +              hwbug = 1;
 +
 +      if (tg3_40bit_overflow_test(tp, map, len))
 +              hwbug = 1;
 +
 +      if (tg3_flag(tp, 4K_FIFO_LIMIT)) {
 +              u32 tmp_flag = flags & ~TXD_FLAG_END;
 +              while (len > TG3_TX_BD_DMA_MAX) {
 +                      u32 frag_len = TG3_TX_BD_DMA_MAX;
 +                      len -= TG3_TX_BD_DMA_MAX;
 +
 +                      if (len) {
 +                              tnapi->tx_buffers[*entry].fragmented = true;
 +                              /* Avoid the 8byte DMA problem */
 +                              if (len <= 8) {
 +                                      len += TG3_TX_BD_DMA_MAX / 2;
 +                                      frag_len = TG3_TX_BD_DMA_MAX / 2;
 +                              }
 +                      } else
 +                              tmp_flag = flags;
 +
 +                      if (*budget) {
 +                              tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
 +                                            frag_len, tmp_flag, mss, vlan);
 +                              (*budget)--;
 +                              *entry = NEXT_TX(*entry);
 +                      } else {
 +                              hwbug = 1;
 +                              break;
 +                      }
 +
 +                      map += frag_len;
 +              }
 +
 +              if (len) {
 +                      if (*budget) {
 +                              tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
 +                                            len, flags, mss, vlan);
 +                              (*budget)--;
 +                              *entry = NEXT_TX(*entry);
 +                      } else {
 +                              hwbug = 1;
 +                      }
 +              }
 +      } else {
 +              tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
 +                            len, flags, mss, vlan);
 +              *entry = NEXT_TX(*entry);
 +      }
 +
 +      return hwbug;
 +}
 +
 +static void tg3_tx_skb_unmap(struct tg3_napi *tnapi, u32 entry, int last)
 +{
 +      int i;
 +      struct sk_buff *skb;
 +      struct tg3_tx_ring_info *txb = &tnapi->tx_buffers[entry];
 +
 +      skb = txb->skb;
 +      txb->skb = NULL;
 +
 +      pci_unmap_single(tnapi->tp->pdev,
 +                       dma_unmap_addr(txb, mapping),
 +                       skb_headlen(skb),
 +                       PCI_DMA_TODEVICE);
 +
 +      while (txb->fragmented) {
 +              txb->fragmented = false;
 +              entry = NEXT_TX(entry);
 +              txb = &tnapi->tx_buffers[entry];
 +      }
 +
 +      for (i = 0; i < last; i++) {
 +              skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
 +
 +              entry = NEXT_TX(entry);
 +              txb = &tnapi->tx_buffers[entry];
 +
 +              pci_unmap_page(tnapi->tp->pdev,
 +                             dma_unmap_addr(txb, mapping),
 +                             frag->size, PCI_DMA_TODEVICE);
 +
 +              while (txb->fragmented) {
 +                      txb->fragmented = false;
 +                      entry = NEXT_TX(entry);
 +                      txb = &tnapi->tx_buffers[entry];
 +              }
 +      }
 +}
 +
 +/* Workaround 4GB and 40-bit hardware DMA bugs. */
 +static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
 +                                     struct sk_buff *skb,
 +                                     u32 *entry, u32 *budget,
 +                                     u32 base_flags, u32 mss, u32 vlan)
 +{
 +      struct tg3 *tp = tnapi->tp;
 +      struct sk_buff *new_skb;
 +      dma_addr_t new_addr = 0;
 +      int ret = 0;
 +
 +      if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
 +              new_skb = skb_copy(skb, GFP_ATOMIC);
 +      else {
 +              int more_headroom = 4 - ((unsigned long)skb->data & 3);
 +
 +              new_skb = skb_copy_expand(skb,
 +                                        skb_headroom(skb) + more_headroom,
 +                                        skb_tailroom(skb), GFP_ATOMIC);
 +      }
 +
 +      if (!new_skb) {
 +              ret = -1;
 +      } else {
 +              /* New SKB is guaranteed to be linear. */
 +              new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
 +                                        PCI_DMA_TODEVICE);
 +              /* Make sure the mapping succeeded */
 +              if (pci_dma_mapping_error(tp->pdev, new_addr)) {
 +                      dev_kfree_skb(new_skb);
 +                      ret = -1;
 +              } else {
 +                      base_flags |= TXD_FLAG_END;
 +
 +                      tnapi->tx_buffers[*entry].skb = new_skb;
 +                      dma_unmap_addr_set(&tnapi->tx_buffers[*entry],
 +                                         mapping, new_addr);
 +
 +                      if (tg3_tx_frag_set(tnapi, entry, budget, new_addr,
 +                                          new_skb->len, base_flags,
 +                                          mss, vlan)) {
 +                              tg3_tx_skb_unmap(tnapi, *entry, 0);
 +                              dev_kfree_skb(new_skb);
 +                              ret = -1;
 +                      }
 +              }
 +      }
 +
 +      dev_kfree_skb(skb);
 +
 +      return ret;
 +}
 +
 +static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *);
 +
 +/* Use GSO to workaround a rare TSO bug that may be triggered when the
 + * TSO header is greater than 80 bytes.
 + */
 +static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
 +{
 +      struct sk_buff *segs, *nskb;
 +      u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
 +
 +      /* Estimate the number of fragments in the worst case */
 +      if (unlikely(tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)) {
 +              netif_stop_queue(tp->dev);
 +
 +              /* netif_tx_stop_queue() must be done before checking
 +               * checking tx index in tg3_tx_avail() below, because in
 +               * tg3_tx(), we update tx index before checking for
 +               * netif_tx_queue_stopped().
 +               */
 +              smp_mb();
 +              if (tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)
 +                      return NETDEV_TX_BUSY;
 +
 +              netif_wake_queue(tp->dev);
 +      }
 +
 +      segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
 +      if (IS_ERR(segs))
 +              goto tg3_tso_bug_end;
 +
 +      do {
 +              nskb = segs;
 +              segs = segs->next;
 +              nskb->next = NULL;
 +              tg3_start_xmit(nskb, tp->dev);
 +      } while (segs);
 +
 +tg3_tso_bug_end:
 +      dev_kfree_skb(skb);
 +
 +      return NETDEV_TX_OK;
 +}
 +
 +/* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
 + * support TG3_FLAG_HW_TSO_1 or firmware TSO only.
 + */
 +static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
 +{
 +      struct tg3 *tp = netdev_priv(dev);
 +      u32 len, entry, base_flags, mss, vlan = 0;
 +      u32 budget;
 +      int i = -1, would_hit_hwbug;
 +      dma_addr_t mapping;
 +      struct tg3_napi *tnapi;
 +      struct netdev_queue *txq;
 +      unsigned int last;
 +
 +      txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
 +      tnapi = &tp->napi[skb_get_queue_mapping(skb)];
 +      if (tg3_flag(tp, ENABLE_TSS))
 +              tnapi++;
 +
 +      budget = tg3_tx_avail(tnapi);
 +
 +      /* We are running in BH disabled context with netif_tx_lock
 +       * and TX reclaim runs via tp->napi.poll inside of a software
 +       * interrupt.  Furthermore, IRQ processing runs lockless so we have
 +       * no IRQ context deadlocks to worry about either.  Rejoice!
 +       */
 +      if (unlikely(budget <= (skb_shinfo(skb)->nr_frags + 1))) {
 +              if (!netif_tx_queue_stopped(txq)) {
 +                      netif_tx_stop_queue(txq);
 +
 +                      /* This is a hard error, log it. */
 +                      netdev_err(dev,
 +                                 "BUG! Tx Ring full when queue awake!\n");
 +              }
 +              return NETDEV_TX_BUSY;
 +      }
 +
 +      entry = tnapi->tx_prod;
 +      base_flags = 0;
 +      if (skb->ip_summed == CHECKSUM_PARTIAL)
 +              base_flags |= TXD_FLAG_TCPUDP_CSUM;
 +
 +      mss = skb_shinfo(skb)->gso_size;
 +      if (mss) {
 +              struct iphdr *iph;
 +              u32 tcp_opt_len, hdr_len;
 +
 +              if (skb_header_cloned(skb) &&
 +                  pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
 +                      dev_kfree_skb(skb);
 +                      goto out_unlock;
 +              }
 +
 +              iph = ip_hdr(skb);
 +              tcp_opt_len = tcp_optlen(skb);
 +
 +              if (skb_is_gso_v6(skb)) {
 +                      hdr_len = skb_headlen(skb) - ETH_HLEN;
 +              } else {
 +                      u32 ip_tcp_len;
 +
 +                      ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
 +                      hdr_len = ip_tcp_len + tcp_opt_len;
 +
 +                      iph->check = 0;
 +                      iph->tot_len = htons(mss + hdr_len);
 +              }
 +
 +              if (unlikely((ETH_HLEN + hdr_len) > 80) &&
 +                  tg3_flag(tp, TSO_BUG))
 +                      return tg3_tso_bug(tp, skb);
 +
 +              base_flags |= (TXD_FLAG_CPU_PRE_DMA |
 +                             TXD_FLAG_CPU_POST_DMA);
 +
 +              if (tg3_flag(tp, HW_TSO_1) ||
 +                  tg3_flag(tp, HW_TSO_2) ||
 +                  tg3_flag(tp, HW_TSO_3)) {
 +                      tcp_hdr(skb)->check = 0;
 +                      base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
 +              } else
 +                      tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
 +                                                               iph->daddr, 0,
 +                                                               IPPROTO_TCP,
 +                                                               0);
 +
 +              if (tg3_flag(tp, HW_TSO_3)) {
 +                      mss |= (hdr_len & 0xc) << 12;
 +                      if (hdr_len & 0x10)
 +                              base_flags |= 0x00000010;
 +                      base_flags |= (hdr_len & 0x3e0) << 5;
 +              } else if (tg3_flag(tp, HW_TSO_2))
 +                      mss |= hdr_len << 9;
 +              else if (tg3_flag(tp, HW_TSO_1) ||
 +                       GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
 +                      if (tcp_opt_len || iph->ihl > 5) {
 +                              int tsflags;
 +
 +                              tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
 +                              mss |= (tsflags << 11);
 +                      }
 +              } else {
 +                      if (tcp_opt_len || iph->ihl > 5) {
 +                              int tsflags;
 +
 +                              tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
 +                              base_flags |= tsflags << 12;
 +                      }
 +              }
 +      }
 +
 +      if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
 +          !mss && skb->len > VLAN_ETH_FRAME_LEN)
 +              base_flags |= TXD_FLAG_JMB_PKT;
 +
- #endif
 +      if (vlan_tx_tag_present(skb)) {
 +              base_flags |= TXD_FLAG_VLAN;
 +              vlan = vlan_tx_tag_get(skb);
 +      }
 +
 +      len = skb_headlen(skb);
 +
 +      mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
 +      if (pci_dma_mapping_error(tp->pdev, mapping)) {
 +              dev_kfree_skb(skb);
 +              goto out_unlock;
 +      }
 +
 +      tnapi->tx_buffers[entry].skb = skb;
 +      dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
 +
 +      would_hit_hwbug = 0;
 +
 +      if (tg3_flag(tp, 5701_DMA_BUG))
 +              would_hit_hwbug = 1;
 +
 +      if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping, len, base_flags |
 +                        ((skb_shinfo(skb)->nr_frags == 0) ? TXD_FLAG_END : 0),
 +                          mss, vlan))
 +              would_hit_hwbug = 1;
 +
 +      /* Now loop through additional data fragments, and queue them. */
 +      if (skb_shinfo(skb)->nr_frags > 0) {
 +              u32 tmp_mss = mss;
 +
 +              if (!tg3_flag(tp, HW_TSO_1) &&
 +                  !tg3_flag(tp, HW_TSO_2) &&
 +                  !tg3_flag(tp, HW_TSO_3))
 +                      tmp_mss = 0;
 +
 +              last = skb_shinfo(skb)->nr_frags - 1;
 +              for (i = 0; i <= last; i++) {
 +                      skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
 +
 +                      len = frag->size;
 +                      mapping = skb_frag_dma_map(&tp->pdev->dev, frag, 0,
 +                                                 len, PCI_DMA_TODEVICE);
 +
 +                      tnapi->tx_buffers[entry].skb = NULL;
 +                      dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
 +                                         mapping);
 +                      if (pci_dma_mapping_error(tp->pdev, mapping))
 +                              goto dma_error;
 +
 +                      if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping,
 +                                          len, base_flags |
 +                                          ((i == last) ? TXD_FLAG_END : 0),
 +                                          tmp_mss, vlan))
 +                              would_hit_hwbug = 1;
 +              }
 +      }
 +
 +      if (would_hit_hwbug) {
 +              tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i);
 +
 +              /* If the workaround fails due to memory/mapping
 +               * failure, silently drop this packet.
 +               */
 +              entry = tnapi->tx_prod;
 +              budget = tg3_tx_avail(tnapi);
 +              if (tigon3_dma_hwbug_workaround(tnapi, skb, &entry, &budget,
 +                                              base_flags, mss, vlan))
 +                      goto out_unlock;
 +      }
 +
 +      skb_tx_timestamp(skb);
 +
 +      /* Packets are ready, update Tx producer idx local and on card. */
 +      tw32_tx_mbox(tnapi->prodmbox, entry);
 +
 +      tnapi->tx_prod = entry;
 +      if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
 +              netif_tx_stop_queue(txq);
 +
 +              /* netif_tx_stop_queue() must be done before checking
 +               * checking tx index in tg3_tx_avail() below, because in
 +               * tg3_tx(), we update tx index before checking for
 +               * netif_tx_queue_stopped().
 +               */
 +              smp_mb();
 +              if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
 +                      netif_tx_wake_queue(txq);
 +      }
 +
 +out_unlock:
 +      mmiowb();
 +
 +      return NETDEV_TX_OK;
 +
 +dma_error:
 +      tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i);
 +      dev_kfree_skb(skb);
 +      tnapi->tx_buffers[tnapi->tx_prod].skb = NULL;
 +      return NETDEV_TX_OK;
 +}
 +
 +static void tg3_mac_loopback(struct tg3 *tp, bool enable)
 +{
 +      if (enable) {
 +              tp->mac_mode &= ~(MAC_MODE_HALF_DUPLEX |
 +                                MAC_MODE_PORT_MODE_MASK);
 +
 +              tp->mac_mode |= MAC_MODE_PORT_INT_LPBACK;
 +
 +              if (!tg3_flag(tp, 5705_PLUS))
 +                      tp->mac_mode |= MAC_MODE_LINK_POLARITY;
 +
 +              if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
 +                      tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
 +              else
 +                      tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
 +      } else {
 +              tp->mac_mode &= ~MAC_MODE_PORT_INT_LPBACK;
 +
 +              if (tg3_flag(tp, 5705_PLUS) ||
 +                  (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) ||
 +                  GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
 +                      tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
 +      }
 +
 +      tw32(MAC_MODE, tp->mac_mode);
 +      udelay(40);
 +}
 +
 +static int tg3_phy_lpbk_set(struct tg3 *tp, u32 speed, bool extlpbk)
 +{
 +      u32 val, bmcr, mac_mode, ptest = 0;
 +
 +      tg3_phy_toggle_apd(tp, false);
 +      tg3_phy_toggle_automdix(tp, 0);
 +
 +      if (extlpbk && tg3_phy_set_extloopbk(tp))
 +              return -EIO;
 +
 +      bmcr = BMCR_FULLDPLX;
 +      switch (speed) {
 +      case SPEED_10:
 +              break;
 +      case SPEED_100:
 +              bmcr |= BMCR_SPEED100;
 +              break;
 +      case SPEED_1000:
 +      default:
 +              if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
 +                      speed = SPEED_100;
 +                      bmcr |= BMCR_SPEED100;
 +              } else {
 +                      speed = SPEED_1000;
 +                      bmcr |= BMCR_SPEED1000;
 +              }
 +      }
 +
 +      if (extlpbk) {
 +              if (!(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
 +                      tg3_readphy(tp, MII_CTRL1000, &val);
 +                      val |= CTL1000_AS_MASTER |
 +                             CTL1000_ENABLE_MASTER;
 +                      tg3_writephy(tp, MII_CTRL1000, val);
 +              } else {
 +                      ptest = MII_TG3_FET_PTEST_TRIM_SEL |
 +                              MII_TG3_FET_PTEST_TRIM_2;
 +                      tg3_writephy(tp, MII_TG3_FET_PTEST, ptest);
 +              }
 +      } else
 +              bmcr |= BMCR_LOOPBACK;
 +
 +      tg3_writephy(tp, MII_BMCR, bmcr);
 +
 +      /* The write needs to be flushed for the FETs */
 +      if (tp->phy_flags & TG3_PHYFLG_IS_FET)
 +              tg3_readphy(tp, MII_BMCR, &bmcr);
 +
 +      udelay(40);
 +
 +      if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
 +          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
 +              tg3_writephy(tp, MII_TG3_FET_PTEST, ptest |
 +                           MII_TG3_FET_PTEST_FRC_TX_LINK |
 +                           MII_TG3_FET_PTEST_FRC_TX_LOCK);
 +
 +              /* The write needs to be flushed for the AC131 */
 +              tg3_readphy(tp, MII_TG3_FET_PTEST, &val);
 +      }
 +
 +      /* Reset to prevent losing 1st rx packet intermittently */
 +      if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
 +          tg3_flag(tp, 5780_CLASS)) {
 +              tw32_f(MAC_RX_MODE, RX_MODE_RESET);
 +              udelay(10);
 +              tw32_f(MAC_RX_MODE, tp->rx_mode);
 +      }
 +
 +      mac_mode = tp->mac_mode &
 +                 ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
 +      if (speed == SPEED_1000)
 +              mac_mode |= MAC_MODE_PORT_MODE_GMII;
 +      else
 +              mac_mode |= MAC_MODE_PORT_MODE_MII;
 +
 +      if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
 +              u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK;
 +
 +              if (masked_phy_id == TG3_PHY_ID_BCM5401)
 +                      mac_mode &= ~MAC_MODE_LINK_POLARITY;
 +              else if (masked_phy_id == TG3_PHY_ID_BCM5411)
 +                      mac_mode |= MAC_MODE_LINK_POLARITY;
 +
 +              tg3_writephy(tp, MII_TG3_EXT_CTRL,
 +                           MII_TG3_EXT_CTRL_LNK3_LED_MODE);
 +      }
 +
 +      tw32(MAC_MODE, mac_mode);
 +      udelay(40);
 +
 +      return 0;
 +}
 +
 +static void tg3_set_loopback(struct net_device *dev, u32 features)
 +{
 +      struct tg3 *tp = netdev_priv(dev);
 +
 +      if (features & NETIF_F_LOOPBACK) {
 +              if (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)
 +                      return;
 +
 +              spin_lock_bh(&tp->lock);
 +              tg3_mac_loopback(tp, true);
 +              netif_carrier_on(tp->dev);
 +              spin_unlock_bh(&tp->lock);
 +              netdev_info(dev, "Internal MAC loopback mode enabled.\n");
 +      } else {
 +              if (!(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
 +                      return;
 +
 +              spin_lock_bh(&tp->lock);
 +              tg3_mac_loopback(tp, false);
 +              /* Force link status check */
 +              tg3_setup_phy(tp, 1);
 +              spin_unlock_bh(&tp->lock);
 +              netdev_info(dev, "Internal MAC loopback mode disabled.\n");
 +      }
 +}
 +
 +static u32 tg3_fix_features(struct net_device *dev, u32 features)
 +{
 +      struct tg3 *tp = netdev_priv(dev);
 +
 +      if (dev->mtu > ETH_DATA_LEN && tg3_flag(tp, 5780_CLASS))
 +              features &= ~NETIF_F_ALL_TSO;
 +
 +      return features;
 +}
 +
 +static int tg3_set_features(struct net_device *dev, u32 features)
 +{
 +      u32 changed = dev->features ^ features;
 +
 +      if ((changed & NETIF_F_LOOPBACK) && netif_running(dev))
 +              tg3_set_loopback(dev, features);
 +
 +      return 0;
 +}
 +
 +static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
 +                             int new_mtu)
 +{
 +      dev->mtu = new_mtu;
 +
 +      if (new_mtu > ETH_DATA_LEN) {
 +              if (tg3_flag(tp, 5780_CLASS)) {
 +                      netdev_update_features(dev);
 +                      tg3_flag_clear(tp, TSO_CAPABLE);
 +              } else {
 +                      tg3_flag_set(tp, JUMBO_RING_ENABLE);
 +              }
 +      } else {
 +              if (tg3_flag(tp, 5780_CLASS)) {
 +                      tg3_flag_set(tp, TSO_CAPABLE);
 +                      netdev_update_features(dev);
 +              }
 +              tg3_flag_clear(tp, JUMBO_RING_ENABLE);
 +      }
 +}
 +
 +static int tg3_change_mtu(struct net_device *dev, int new_mtu)
 +{
 +      struct tg3 *tp = netdev_priv(dev);
 +      int err;
 +
 +      if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
 +              return -EINVAL;
 +
 +      if (!netif_running(dev)) {
 +              /* We'll just catch it later when the
 +               * device is up'd.
 +               */
 +              tg3_set_mtu(dev, tp, new_mtu);
 +              return 0;
 +      }
 +
 +      tg3_phy_stop(tp);
 +
 +      tg3_netif_stop(tp);
 +
 +      tg3_full_lock(tp, 1);
 +
 +      tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
 +
 +      tg3_set_mtu(dev, tp, new_mtu);
 +
 +      err = tg3_restart_hw(tp, 0);
 +
 +      if (!err)
 +              tg3_netif_start(tp);
 +
 +      tg3_full_unlock(tp);
 +
 +      if (!err)
 +              tg3_phy_start(tp);
 +
 +      return err;
 +}
 +
 +static void tg3_rx_prodring_free(struct tg3 *tp,
 +                               struct tg3_rx_prodring_set *tpr)
 +{
 +      int i;
 +
 +      if (tpr != &tp->napi[0].prodring) {
 +              for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
 +                   i = (i + 1) & tp->rx_std_ring_mask)
 +                      tg3_rx_skb_free(tp, &tpr->rx_std_buffers[i],
 +                                      tp->rx_pkt_map_sz);
 +
 +              if (tg3_flag(tp, JUMBO_CAPABLE)) {
 +                      for (i = tpr->rx_jmb_cons_idx;
 +                           i != tpr->rx_jmb_prod_idx;
 +                           i = (i + 1) & tp->rx_jmb_ring_mask) {
 +                              tg3_rx_skb_free(tp, &tpr->rx_jmb_buffers[i],
 +                                              TG3_RX_JMB_MAP_SZ);
 +                      }
 +              }
 +
 +              return;
 +      }
 +
 +      for (i = 0; i <= tp->rx_std_ring_mask; i++)
 +              tg3_rx_skb_free(tp, &tpr->rx_std_buffers[i],
 +                              tp->rx_pkt_map_sz);
 +
 +      if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
 +              for (i = 0; i <= tp->rx_jmb_ring_mask; i++)
 +                      tg3_rx_skb_free(tp, &tpr->rx_jmb_buffers[i],
 +                                      TG3_RX_JMB_MAP_SZ);
 +      }
 +}
 +
 +/* Initialize rx rings for packet processing.
 + *
 + * The chip has been shut down and the driver detached from
 + * the networking, so no interrupts or new tx packets will
 + * end up in the driver.  tp->{tx,}lock are held and thus
 + * we may not sleep.
 + */
 +static int tg3_rx_prodring_alloc(struct tg3 *tp,
 +                               struct tg3_rx_prodring_set *tpr)
 +{
 +      u32 i, rx_pkt_dma_sz;
 +
 +      tpr->rx_std_cons_idx = 0;
 +      tpr->rx_std_prod_idx = 0;
 +      tpr->rx_jmb_cons_idx = 0;
 +      tpr->rx_jmb_prod_idx = 0;
 +
 +      if (tpr != &tp->napi[0].prodring) {
 +              memset(&tpr->rx_std_buffers[0], 0,
 +                     TG3_RX_STD_BUFF_RING_SIZE(tp));
 +              if (tpr->rx_jmb_buffers)
 +                      memset(&tpr->rx_jmb_buffers[0], 0,
 +                             TG3_RX_JMB_BUFF_RING_SIZE(tp));
 +              goto done;
 +      }
 +
 +      /* Zero out all descriptors. */
 +      memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp));
 +
 +      rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
 +      if (tg3_flag(tp, 5780_CLASS) &&
 +          tp->dev->mtu > ETH_DATA_LEN)
 +              rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ;
 +      tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz);
 +
 +      /* Initialize invariants of the rings, we only set this
 +       * stuff once.  This works because the card does not
 +       * write into the rx buffer posting rings.
 +       */
 +      for (i = 0; i <= tp->rx_std_ring_mask; i++) {
 +              struct tg3_rx_buffer_desc *rxd;
 +
 +              rxd = &tpr->rx_std[i];
 +              rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT;
 +              rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
 +              rxd->opaque = (RXD_OPAQUE_RING_STD |
 +                             (i << RXD_OPAQUE_INDEX_SHIFT));
 +      }
 +
 +      /* Now allocate fresh SKBs for each rx ring. */
 +      for (i = 0; i < tp->rx_pending; i++) {
 +              if (tg3_alloc_rx_skb(tp, tpr, RXD_OPAQUE_RING_STD, i) < 0) {
 +                      netdev_warn(tp->dev,
 +                                  "Using a smaller RX standard ring. Only "
 +                                  "%d out of %d buffers were allocated "
 +                                  "successfully\n", i, tp->rx_pending);
 +                      if (i == 0)
 +                              goto initfail;
 +                      tp->rx_pending = i;
 +                      break;
 +              }
 +      }
 +
 +      if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
 +              goto done;
 +
 +      memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp));
 +
 +      if (!tg3_flag(tp, JUMBO_RING_ENABLE))
 +              goto done;
 +
 +      for (i = 0; i <= tp->rx_jmb_ring_mask; i++) {
 +              struct tg3_rx_buffer_desc *rxd;
 +
 +              rxd = &tpr->rx_jmb[i].std;
 +              rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
 +              rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
 +                                RXD_FLAG_JUMBO;
 +              rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
 +                     (i << RXD_OPAQUE_INDEX_SHIFT));
 +      }
 +
 +      for (i = 0; i < tp->rx_jumbo_pending; i++) {
 +              if (tg3_alloc_rx_skb(tp, tpr, RXD_OPAQUE_RING_JUMBO, i) < 0) {
 +                      netdev_warn(tp->dev,
 +                                  "Using a smaller RX jumbo ring. Only %d "
 +                                  "out of %d buffers were allocated "
 +                                  "successfully\n", i, tp->rx_jumbo_pending);
 +                      if (i == 0)
 +                              goto initfail;
 +                      tp->rx_jumbo_pending = i;
 +                      break;
 +              }
 +      }
 +
 +done:
 +      return 0;
 +
 +initfail:
 +      tg3_rx_prodring_free(tp, tpr);
 +      return -ENOMEM;
 +}
 +
 +static void tg3_rx_prodring_fini(struct tg3 *tp,
 +                               struct tg3_rx_prodring_set *tpr)
 +{
 +      kfree(tpr->rx_std_buffers);
 +      tpr->rx_std_buffers = NULL;
 +      kfree(tpr->rx_jmb_buffers);
 +      tpr->rx_jmb_buffers = NULL;
 +      if (tpr->rx_std) {
 +              dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp),
 +                                tpr->rx_std, tpr->rx_std_mapping);
 +              tpr->rx_std = NULL;
 +      }
 +      if (tpr->rx_jmb) {
 +              dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp),
 +                                tpr->rx_jmb, tpr->rx_jmb_mapping);
 +              tpr->rx_jmb = NULL;
 +      }
 +}
 +
 +static int tg3_rx_prodring_init(struct tg3 *tp,
 +                              struct tg3_rx_prodring_set *tpr)
 +{
 +      tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp),
 +                                    GFP_KERNEL);
 +      if (!tpr->rx_std_buffers)
 +              return -ENOMEM;
 +
 +      tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev,
 +                                       TG3_RX_STD_RING_BYTES(tp),
 +                                       &tpr->rx_std_mapping,
 +                                       GFP_KERNEL);
 +      if (!tpr->rx_std)
 +              goto err_out;
 +
 +      if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
 +              tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp),
 +                                            GFP_KERNEL);
 +              if (!tpr->rx_jmb_buffers)
 +                      goto err_out;
 +
 +              tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev,
 +                                               TG3_RX_JMB_RING_BYTES(tp),
 +                                               &tpr->rx_jmb_mapping,
 +                                               GFP_KERNEL);
 +              if (!tpr->rx_jmb)
 +                      goto err_out;
 +      }
 +
 +      return 0;
 +
 +err_out:
 +      tg3_rx_prodring_fini(tp, tpr);
 +      return -ENOMEM;
 +}
 +
 +/* Free up pending packets in all rx/tx rings.
 + *
 + * The chip has been shut down and the driver detached from
 + * the networking, so no interrupts or new tx packets will
 + * end up in the driver.  tp->{tx,}lock is not held and we are not
 + * in an interrupt context and thus may sleep.
 + */
 +static void tg3_free_rings(struct tg3 *tp)
 +{
 +      int i, j;
 +
 +      for (j = 0; j < tp->irq_cnt; j++) {
 +              struct tg3_napi *tnapi = &tp->napi[j];
 +
 +              tg3_rx_prodring_free(tp, &tnapi->prodring);
 +
 +              if (!tnapi->tx_buffers)
 +                      continue;
 +
 +              for (i = 0; i < TG3_TX_RING_SIZE; i++) {
 +                      struct sk_buff *skb = tnapi->tx_buffers[i].skb;
 +
 +                      if (!skb)
 +                              continue;
 +
 +                      tg3_tx_skb_unmap(tnapi, i, skb_shinfo(skb)->nr_frags);
 +
 +                      dev_kfree_skb_any(skb);
 +              }
 +      }
 +}
 +
 +/* Initialize tx/rx rings for packet processing.
 + *
 + * The chip has been shut down and the driver detached from
 + * the networking, so no interrupts or new tx packets will
 + * end up in the driver.  tp->{tx,}lock are held and thus
 + * we may not sleep.
 + */
 +static int tg3_init_rings(struct tg3 *tp)
 +{
 +      int i;
 +
 +      /* Free up all the SKBs. */
 +      tg3_free_rings(tp);
 +
 +      for (i = 0; i < tp->irq_cnt; i++) {
 +              struct tg3_napi *tnapi = &tp->napi[i];
 +
 +              tnapi->last_tag = 0;
 +              tnapi->last_irq_tag = 0;
 +              tnapi->hw_status->status = 0;
 +              tnapi->hw_status->status_tag = 0;
 +              memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
 +
 +              tnapi->tx_prod = 0;
 +              tnapi->tx_cons = 0;
 +              if (tnapi->tx_ring)
 +                      memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES);
 +
 +              tnapi->rx_rcb_ptr = 0;
 +              if (tnapi->rx_rcb)
 +                      memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
 +
 +              if (tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
 +                      tg3_free_rings(tp);
 +                      return -ENOMEM;
 +              }
 +      }
 +
 +      return 0;
 +}
 +
 +/*
 + * Must not be invoked with interrupt sources disabled and
 + * the hardware shutdown down.
 + */
 +static void tg3_free_consistent(struct tg3 *tp)
 +{
 +      int i;
 +
 +      for (i = 0; i < tp->irq_cnt; i++) {
 +              struct tg3_napi *tnapi = &tp->napi[i];
 +
 +              if (tnapi->tx_ring) {
 +                      dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES,
 +                              tnapi->tx_ring, tnapi->tx_desc_mapping);
 +                      tnapi->tx_ring = NULL;
 +              }
 +
 +              kfree(tnapi->tx_buffers);
 +              tnapi->tx_buffers = NULL;
 +
 +              if (tnapi->rx_rcb) {
 +                      dma_free_coherent(&tp->pdev->dev,
 +                                        TG3_RX_RCB_RING_BYTES(tp),
 +                                        tnapi->rx_rcb,
 +                                        tnapi->rx_rcb_mapping);
 +                      tnapi->rx_rcb = NULL;
 +              }
 +
 +              tg3_rx_prodring_fini(tp, &tnapi->prodring);
 +
 +              if (tnapi->hw_status) {
 +                      dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE,
 +                                        tnapi->hw_status,
 +                                        tnapi->status_mapping);
 +                      tnapi->hw_status = NULL;
 +              }
 +      }
 +
 +      if (tp->hw_stats) {
 +              dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
 +                                tp->hw_stats, tp->stats_mapping);
 +              tp->hw_stats = NULL;
 +      }
 +}
 +
 +/*
 + * Must not be invoked with interrupt sources disabled and
 + * the hardware shutdown down.  Can sleep.
 + */
 +static int tg3_alloc_consistent(struct tg3 *tp)
 +{
 +      int i;
 +
 +      tp->hw_stats = dma_alloc_coherent(&tp->pdev->dev,
 +                                        sizeof(struct tg3_hw_stats),
 +                                        &tp->stats_mapping,
 +                                        GFP_KERNEL);
 +      if (!tp->hw_stats)
 +              goto err_out;
 +
 +      memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
 +
 +      for (i = 0; i < tp->irq_cnt; i++) {
 +              struct tg3_napi *tnapi = &tp->napi[i];
 +              struct tg3_hw_status *sblk;
 +
 +              tnapi->hw_status = dma_alloc_coherent(&tp->pdev->dev,
 +                                                    TG3_HW_STATUS_SIZE,
 +                                                    &tnapi->status_mapping,
 +                                                    GFP_KERNEL);
 +              if (!tnapi->hw_status)
 +                      goto err_out;
 +
 +              memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
 +              sblk = tnapi->hw_status;
 +
 +              if (tg3_rx_prodring_init(tp, &tnapi->prodring))
 +                      goto err_out;
 +
 +              /* If multivector TSS is enabled, vector 0 does not handle
 +               * tx interrupts.  Don't allocate any resources for it.
 +               */
 +              if ((!i && !tg3_flag(tp, ENABLE_TSS)) ||
 +                  (i && tg3_flag(tp, ENABLE_TSS))) {
 +                      tnapi->tx_buffers = kzalloc(
 +                                             sizeof(struct tg3_tx_ring_info) *
 +                                             TG3_TX_RING_SIZE, GFP_KERNEL);
 +                      if (!tnapi->tx_buffers)
 +                              goto err_out;
 +
 +                      tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev,
 +                                                          TG3_TX_RING_BYTES,
 +                                                      &tnapi->tx_desc_mapping,
 +                                                          GFP_KERNEL);
 +                      if (!tnapi->tx_ring)
 +                              goto err_out;
 +              }
 +
 +              /*
 +               * When RSS is enabled, the status block format changes
 +               * slightly.  The "rx_jumbo_consumer", "reserved",
 +               * and "rx_mini_consumer" members get mapped to the
 +               * other three rx return ring producer indexes.
 +               */
 +              switch (i) {
 +              default:
 +                      tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
 +                      break;
 +              case 2:
 +                      tnapi->rx_rcb_prod_idx = &sblk->rx_jumbo_consumer;
 +                      break;
 +              case 3:
 +                      tnapi->rx_rcb_prod_idx = &sblk->reserved;
 +                      break;
 +              case 4:
 +                      tnapi->rx_rcb_prod_idx = &sblk->rx_mini_consumer;
 +                      break;
 +              }
 +
 +              /*
 +               * If multivector RSS is enabled, vector 0 does not handle
 +               * rx or tx interrupts.  Don't allocate any resources for it.
 +               */
 +              if (!i && tg3_flag(tp, ENABLE_RSS))
 +                      continue;
 +
 +              tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev,
 +                                                 TG3_RX_RCB_RING_BYTES(tp),
 +                                                 &tnapi->rx_rcb_mapping,
 +                                                 GFP_KERNEL);
 +              if (!tnapi->rx_rcb)
 +                      goto err_out;
 +
 +              memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
 +      }
 +
 +      return 0;
 +
 +err_out:
 +      tg3_free_consistent(tp);
 +      return -ENOMEM;
 +}
 +
 +#define MAX_WAIT_CNT 1000
 +
 +/* To stop a block, clear the enable bit and poll till it
 + * clears.  tp->lock is held.
 + */
 +static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
 +{
 +      unsigned int i;
 +      u32 val;
 +
 +      if (tg3_flag(tp, 5705_PLUS)) {
 +              switch (ofs) {
 +              case RCVLSC_MODE:
 +              case DMAC_MODE:
 +              case MBFREE_MODE:
 +              case BUFMGR_MODE:
 +              case MEMARB_MODE:
 +                      /* We can't enable/disable these bits of the
 +                       * 5705/5750, just say success.
 +                       */
 +                      return 0;
 +
 +              default:
 +                      break;
 +              }
 +      }
 +
 +      val = tr32(ofs);
 +      val &= ~enable_bit;
 +      tw32_f(ofs, val);
 +
 +      for (i = 0; i < MAX_WAIT_CNT; i++) {
 +              udelay(100);
 +              val = tr32(ofs);
 +              if ((val & enable_bit) == 0)
 +                      break;
 +      }
 +
 +      if (i == MAX_WAIT_CNT && !silent) {
 +              dev_err(&tp->pdev->dev,
 +                      "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
 +                      ofs, enable_bit);
 +              return -ENODEV;
 +      }
 +
 +      return 0;
 +}
 +
 +/* tp->lock is held. */
 +static int tg3_abort_hw(struct tg3 *tp, int silent)
 +{
 +      int i, err;
 +
 +      tg3_disable_ints(tp);
 +
 +      tp->rx_mode &= ~RX_MODE_ENABLE;
 +      tw32_f(MAC_RX_MODE, tp->rx_mode);
 +      udelay(10);
 +
 +      err  = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
 +      err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
 +      err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
 +      err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
 +      err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
 +      err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
 +
 +      err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
 +      err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
 +      err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
 +      err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
 +      err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
 +      err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
 +      err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
 +
 +      tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
 +      tw32_f(MAC_MODE, tp->mac_mode);
 +      udelay(40);
 +
 +      tp->tx_mode &= ~TX_MODE_ENABLE;
 +      tw32_f(MAC_TX_MODE, tp->tx_mode);
 +
 +      for (i = 0; i < MAX_WAIT_CNT; i++) {
 +              udelay(100);
 +              if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
 +                      break;
 +      }
 +      if (i >= MAX_WAIT_CNT) {
 +              dev_err(&tp->pdev->dev,
 +                      "%s timed out, TX_MODE_ENABLE will not clear "
 +                      "MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE));
 +              err |= -ENODEV;
 +      }
 +
 +      err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
 +      err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
 +      err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
 +
 +      tw32(FTQ_RESET, 0xffffffff);
 +      tw32(FTQ_RESET, 0x00000000);
 +
 +      err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
 +      err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
 +
 +      for (i = 0; i < tp->irq_cnt; i++) {
 +              struct tg3_napi *tnapi = &tp->napi[i];
 +              if (tnapi->hw_status)
 +                      memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
 +      }
 +      if (tp->hw_stats)
 +              memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
 +
 +      return err;
 +}
 +
 +/* Save PCI command register before chip reset */
 +static void tg3_save_pci_state(struct tg3 *tp)
 +{
 +      pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
 +}
 +
 +/* Restore PCI state after chip reset */
 +static void tg3_restore_pci_state(struct tg3 *tp)
 +{
 +      u32 val;
 +
 +      /* Re-enable indirect register accesses. */
 +      pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
 +                             tp->misc_host_ctrl);
 +
 +      /* Set MAX PCI retry to zero. */
 +      val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
 +      if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
 +          tg3_flag(tp, PCIX_MODE))
 +              val |= PCISTATE_RETRY_SAME_DMA;
 +      /* Allow reads and writes to the APE register and memory space. */
 +      if (tg3_flag(tp, ENABLE_APE))
 +              val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
 +                     PCISTATE_ALLOW_APE_SHMEM_WR |
 +                     PCISTATE_ALLOW_APE_PSPACE_WR;
 +      pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
 +
 +      pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
 +
 +      if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785) {
 +              if (tg3_flag(tp, PCI_EXPRESS))
 +                      pcie_set_readrq(tp->pdev, tp->pcie_readrq);
 +              else {
 +                      pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
 +                                            tp->pci_cacheline_sz);
 +                      pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
 +                                            tp->pci_lat_timer);
 +              }
 +      }
 +
 +      /* Make sure PCI-X relaxed ordering bit is clear. */
 +      if (tg3_flag(tp, PCIX_MODE)) {
 +              u16 pcix_cmd;
 +
 +              pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
 +                                   &pcix_cmd);
 +              pcix_cmd &= ~PCI_X_CMD_ERO;
 +              pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
 +                                    pcix_cmd);
 +      }
 +
 +      if (tg3_flag(tp, 5780_CLASS)) {
 +
 +              /* Chip reset on 5780 will reset MSI enable bit,
 +               * so need to restore it.
 +               */
 +              if (tg3_flag(tp, USING_MSI)) {
 +                      u16 ctrl;
 +
 +                      pci_read_config_word(tp->pdev,
 +                                           tp->msi_cap + PCI_MSI_FLAGS,
 +                                           &ctrl);
 +                      pci_write_config_word(tp->pdev,
 +                                            tp->msi_cap + PCI_MSI_FLAGS,
 +                                            ctrl | PCI_MSI_FLAGS_ENABLE);
 +                      val = tr32(MSGINT_MODE);
 +                      tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
 +              }
 +      }
 +}
 +
 +/* tp->lock is held. */
 +static int tg3_chip_reset(struct tg3 *tp)
 +{
 +      u32 val;
 +      void (*write_op)(struct tg3 *, u32, u32);
 +      int i, err;
 +
 +      tg3_nvram_lock(tp);
 +
 +      tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
 +
 +      /* No matching tg3_nvram_unlock() after this because
 +       * chip reset below will undo the nvram lock.
 +       */
 +      tp->nvram_lock_cnt = 0;
 +
 +      /* GRC_MISC_CFG core clock reset will clear the memory
 +       * enable bit in PCI register 4 and the MSI enable bit
 +       * on some chips, so we save relevant registers here.
 +       */
 +      tg3_save_pci_state(tp);
 +
 +      if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
 +          tg3_flag(tp, 5755_PLUS))
 +              tw32(GRC_FASTBOOT_PC, 0);
 +
 +      /*
 +       * We must avoid the readl() that normally takes place.
 +       * It locks machines, causes machine checks, and other
 +       * fun things.  So, temporarily disable the 5701
 +       * hardware workaround, while we do the reset.
 +       */
 +      write_op = tp->write32;
 +      if (write_op == tg3_write_flush_reg32)
 +              tp->write32 = tg3_write32;
 +
 +      /* Prevent the irq handler from reading or writing PCI registers
 +       * during chip reset when the memory enable bit in the PCI command
 +       * register may be cleared.  The chip does not generate interrupt
 +       * at this time, but the irq handler may still be called due to irq
 +       * sharing or irqpoll.
 +       */
 +      tg3_flag_set(tp, CHIP_RESETTING);
 +      for (i = 0; i < tp->irq_cnt; i++) {
 +              struct tg3_napi *tnapi = &tp->napi[i];
 +              if (tnapi->hw_status) {
 +                      tnapi->hw_status->status = 0;
 +                      tnapi->hw_status->status_tag = 0;
 +              }
 +              tnapi->last_tag = 0;
 +              tnapi->last_irq_tag = 0;
 +      }
 +      smp_mb();
 +
 +      for (i = 0; i < tp->irq_cnt; i++)
 +              synchronize_irq(tp->napi[i].irq_vec);
 +
 +      if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
 +              val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
 +              tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
 +      }
 +
 +      /* do the reset */
 +      val = GRC_MISC_CFG_CORECLK_RESET;
 +
 +      if (tg3_flag(tp, PCI_EXPRESS)) {
 +              /* Force PCIe 1.0a mode */
 +              if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
 +                  !tg3_flag(tp, 57765_PLUS) &&
 +                  tr32(TG3_PCIE_PHY_TSTCTL) ==
 +                  (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM))
 +                      tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM);
 +
 +              if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
 +                      tw32(GRC_MISC_CFG, (1 << 29));
 +                      val |= (1 << 29);
 +              }
 +      }
 +
 +      if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
 +              tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
 +              tw32(GRC_VCPU_EXT_CTRL,
 +                   tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
 +      }
 +
 +      /* Manage gphy power for all CPMU absent PCIe devices. */
 +      if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT))
 +              val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
 +
 +      tw32(GRC_MISC_CFG, val);
 +
 +      /* restore 5701 hardware bug workaround write method */
 +      tp->write32 = write_op;
 +
 +      /* Unfortunately, we have to delay before the PCI read back.
 +       * Some 575X chips even will not respond to a PCI cfg access
 +       * when the reset command is given to the chip.
 +       *
 +       * How do these hardware designers expect things to work
 +       * properly if the PCI write is posted for a long period
 +       * of time?  It is always necessary to have some method by
 +       * which a register read back can occur to push the write
 +       * out which does the reset.
 +       *
 +       * For most tg3 variants the trick below was working.
 +       * Ho hum...
 +       */
 +      udelay(120);
 +
 +      /* Flush PCI posted writes.  The normal MMIO registers
 +       * are inaccessible at this time so this is the only
 +       * way to make this reliably (actually, this is no longer
 +       * the case, see above).  I tried to use indirect
 +       * register read/write but this upset some 5701 variants.
 +       */
 +      pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
 +
 +      udelay(120);
 +
 +      if (tg3_flag(tp, PCI_EXPRESS) && pci_pcie_cap(tp->pdev)) {
 +              u16 val16;
 +
 +              if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
 +                      int i;
 +                      u32 cfg_val;
 +
 +                      /* Wait for link training to complete.  */
 +                      for (i = 0; i < 5000; i++)
 +                              udelay(100);
 +
 +                      pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
 +                      pci_write_config_dword(tp->pdev, 0xc4,
 +                                             cfg_val | (1 << 15));
 +              }
 +
 +              /* Clear the "no snoop" and "relaxed ordering" bits. */
 +              pci_read_config_word(tp->pdev,
 +                                   pci_pcie_cap(tp->pdev) + PCI_EXP_DEVCTL,
 +                                   &val16);
 +              val16 &= ~(PCI_EXP_DEVCTL_RELAX_EN |
 +                         PCI_EXP_DEVCTL_NOSNOOP_EN);
 +              /*
 +               * Older PCIe devices only support the 128 byte
 +               * MPS setting.  Enforce the restriction.
 +               */
 +              if (!tg3_flag(tp, CPMU_PRESENT))
 +                      val16 &= ~PCI_EXP_DEVCTL_PAYLOAD;
 +              pci_write_config_word(tp->pdev,
 +                                    pci_pcie_cap(tp->pdev) + PCI_EXP_DEVCTL,
 +                                    val16);
 +
 +              pcie_set_readrq(tp->pdev, tp->pcie_readrq);
 +
 +              /* Clear error status */
 +              pci_write_config_word(tp->pdev,
 +                                    pci_pcie_cap(tp->pdev) + PCI_EXP_DEVSTA,
 +                                    PCI_EXP_DEVSTA_CED |
 +                                    PCI_EXP_DEVSTA_NFED |
 +                                    PCI_EXP_DEVSTA_FED |
 +                                    PCI_EXP_DEVSTA_URD);
 +      }
 +
 +      tg3_restore_pci_state(tp);
 +
 +      tg3_flag_clear(tp, CHIP_RESETTING);
 +      tg3_flag_clear(tp, ERROR_PROCESSED);
 +
 +      val = 0;
 +      if (tg3_flag(tp, 5780_CLASS))
 +              val = tr32(MEMARB_MODE);
 +      tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
 +
 +      if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
 +              tg3_stop_fw(tp);
 +              tw32(0x5000, 0x400);
 +      }
 +
 +      tw32(GRC_MODE, tp->grc_mode);
 +
 +      if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
 +              val = tr32(0xc4);
 +
 +              tw32(0xc4, val | (1 << 15));
 +      }
 +
 +      if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
 +          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
 +              tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
 +              if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
 +                      tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
 +              tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
 +      }
 +
 +      if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
 +              tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
 +              val = tp->mac_mode;
 +      } else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
 +              tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
 +              val = tp->mac_mode;
 +      } else
 +              val = 0;
 +
 +      tw32_f(MAC_MODE, val);
 +      udelay(40);
 +
 +      tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
 +
 +      err = tg3_poll_fw(tp);
 +      if (err)
 +              return err;
 +
 +      tg3_mdio_start(tp);
 +
 +      if (tg3_flag(tp, PCI_EXPRESS) &&
 +          tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
 +          GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
 +          !tg3_flag(tp, 57765_PLUS)) {
 +              val = tr32(0x7c00);
 +
 +              tw32(0x7c00, val | (1 << 25));
 +      }
 +
 +      if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
 +              val = tr32(TG3_CPMU_CLCK_ORIDE);
 +              tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
 +      }
 +
 +      /* Reprobe ASF enable state.  */
 +      tg3_flag_clear(tp, ENABLE_ASF);
 +      tg3_flag_clear(tp, ASF_NEW_HANDSHAKE);
 +      tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
 +      if (val == NIC_SRAM_DATA_SIG_MAGIC) {
 +              u32 nic_cfg;
 +
 +              tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
 +              if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
 +                      tg3_flag_set(tp, ENABLE_ASF);
 +                      tp->last_event_jiffies = jiffies;
 +                      if (tg3_flag(tp, 5750_PLUS))
 +                              tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
 +              }
 +      }
 +
 +      return 0;
 +}
 +
 +/* tp->lock is held. */
 +static int tg3_halt(struct tg3 *tp, int kind, int silent)
 +{
 +      int err;
 +
 +      tg3_stop_fw(tp);
 +
 +      tg3_write_sig_pre_reset(tp, kind);
 +
 +      tg3_abort_hw(tp, silent);
 +      err = tg3_chip_reset(tp);
 +
 +      __tg3_set_mac_addr(tp, 0);
 +
 +      tg3_write_sig_legacy(tp, kind);
 +      tg3_write_sig_post_reset(tp, kind);
 +
 +      if (err)
 +              return err;
 +
 +      return 0;
 +}
 +
 +static int tg3_set_mac_addr(struct net_device *dev, void *p)
 +{
 +      struct tg3 *tp = netdev_priv(dev);
 +      struct sockaddr *addr = p;
 +      int err = 0, skip_mac_1 = 0;
 +
 +      if (!is_valid_ether_addr(addr->sa_data))
 +              return -EINVAL;
 +
 +      memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
 +
 +      if (!netif_running(dev))
 +              return 0;
 +
 +      if (tg3_flag(tp, ENABLE_ASF)) {
 +              u32 addr0_high, addr0_low, addr1_high, addr1_low;
 +
 +              addr0_high = tr32(MAC_ADDR_0_HIGH);
 +              addr0_low = tr32(MAC_ADDR_0_LOW);
 +              addr1_high = tr32(MAC_ADDR_1_HIGH);
 +              addr1_low = tr32(MAC_ADDR_1_LOW);
 +
 +              /* Skip MAC addr 1 if ASF is using it. */
 +              if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
 +                  !(addr1_high == 0 && addr1_low == 0))
 +                      skip_mac_1 = 1;
 +      }
 +      spin_lock_bh(&tp->lock);
 +      __tg3_set_mac_addr(tp, skip_mac_1);
 +      spin_unlock_bh(&tp->lock);
 +
 +      return err;
 +}
 +
 +/* tp->lock is held. */
 +static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
 +                         dma_addr_t mapping, u32 maxlen_flags,
 +                         u32 nic_addr)
 +{
 +      tg3_write_mem(tp,
 +                    (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
 +                    ((u64) mapping >> 32));
 +      tg3_write_mem(tp,
 +                    (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
 +                    ((u64) mapping & 0xffffffff));
 +      tg3_write_mem(tp,
 +                    (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
 +                     maxlen_flags);
 +
 +      if (!tg3_flag(tp, 5705_PLUS))
 +              tg3_write_mem(tp,
 +                            (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
 +                            nic_addr);
 +}
 +
 +static void __tg3_set_rx_mode(struct net_device *);
 +static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
 +{
 +      int i;
 +
 +      if (!tg3_flag(tp, ENABLE_TSS)) {
 +              tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
 +              tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
 +              tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
 +      } else {
 +              tw32(HOSTCC_TXCOL_TICKS, 0);
 +              tw32(HOSTCC_TXMAX_FRAMES, 0);
 +              tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
 +      }
 +
 +      if (!tg3_flag(tp, ENABLE_RSS)) {
 +              tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
 +              tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
 +              tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
 +      } else {
 +              tw32(HOSTCC_RXCOL_TICKS, 0);
 +              tw32(HOSTCC_RXMAX_FRAMES, 0);
 +              tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
 +      }
 +
 +      if (!tg3_flag(tp, 5705_PLUS)) {
 +              u32 val = ec->stats_block_coalesce_usecs;
 +
 +              tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
 +              tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
 +
 +              if (!netif_carrier_ok(tp->dev))
 +                      val = 0;
 +
 +              tw32(HOSTCC_STAT_COAL_TICKS, val);
 +      }
 +
 +      for (i = 0; i < tp->irq_cnt - 1; i++) {
 +              u32 reg;
 +
 +              reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18;
 +              tw32(reg, ec->rx_coalesce_usecs);
 +              reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18;
 +              tw32(reg, ec->rx_max_coalesced_frames);
 +              reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
 +              tw32(reg, ec->rx_max_coalesced_frames_irq);
 +
 +              if (tg3_flag(tp, ENABLE_TSS)) {
 +                      reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
 +                      tw32(reg, ec->tx_coalesce_usecs);
 +                      reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
 +                      tw32(reg, ec->tx_max_coalesced_frames);
 +                      reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
 +                      tw32(reg, ec->tx_max_coalesced_frames_irq);
 +              }
 +      }
 +
 +      for (; i < tp->irq_max - 1; i++) {
 +              tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0);
 +              tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
 +              tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
 +
 +              if (tg3_flag(tp, ENABLE_TSS)) {
 +                      tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
 +                      tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
 +                      tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
 +              }
 +      }
 +}
 +
 +/* tp->lock is held. */
 +static void tg3_rings_reset(struct tg3 *tp)
 +{
 +      int i;
 +      u32 stblk, txrcb, rxrcb, limit;
 +      struct tg3_napi *tnapi = &tp->napi[0];
 +
 +      /* Disable all transmit rings but the first. */
 +      if (!tg3_flag(tp, 5705_PLUS))
 +              limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
 +      else if (tg3_flag(tp, 5717_PLUS))
 +              limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4;
 +      else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
 +              limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
 +      else
 +              limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
 +
 +      for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
 +           txrcb < limit; txrcb += TG3_BDINFO_SIZE)
 +              tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS,
 +                            BDINFO_FLAGS_DISABLED);
 +
 +
 +      /* Disable all receive return rings but the first. */
 +      if (tg3_flag(tp, 5717_PLUS))
 +              limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
 +      else if (!tg3_flag(tp, 5705_PLUS))
 +              limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
 +      else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
 +               GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
 +              limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
 +      else
 +              limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
 +
 +      for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
 +           rxrcb < limit; rxrcb += TG3_BDINFO_SIZE)
 +              tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS,
 +                            BDINFO_FLAGS_DISABLED);
 +
 +      /* Disable interrupts */
 +      tw32_mailbox_f(tp->napi[0].int_mbox, 1);
 +      tp->napi[0].chk_msi_cnt = 0;
 +      tp->napi[0].last_rx_cons = 0;
 +      tp->napi[0].last_tx_cons = 0;
 +
 +      /* Zero mailbox registers. */
 +      if (tg3_flag(tp, SUPPORT_MSIX)) {
 +              for (i = 1; i < tp->irq_max; i++) {
 +                      tp->napi[i].tx_prod = 0;
 +                      tp->napi[i].tx_cons = 0;
 +                      if (tg3_flag(tp, ENABLE_TSS))
 +                              tw32_mailbox(tp->napi[i].prodmbox, 0);
 +                      tw32_rx_mbox(tp->napi[i].consmbox, 0);
 +                      tw32_mailbox_f(tp->napi[i].int_mbox, 1);
 +                      tp->napi[i].chk_msi_cnt = 0;
 +                      tp->napi[i].last_rx_cons = 0;
 +                      tp->napi[i].last_tx_cons = 0;
 +              }
 +              if (!tg3_flag(tp, ENABLE_TSS))
 +                      tw32_mailbox(tp->napi[0].prodmbox, 0);
 +      } else {
 +              tp->napi[0].tx_prod = 0;
 +              tp->napi[0].tx_cons = 0;
 +              tw32_mailbox(tp->napi[0].prodmbox, 0);
 +              tw32_rx_mbox(tp->napi[0].consmbox, 0);
 +      }
 +
 +      /* Make sure the NIC-based send BD rings are disabled. */
 +      if (!tg3_flag(tp, 5705_PLUS)) {
 +              u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
 +              for (i = 0; i < 16; i++)
 +                      tw32_tx_mbox(mbox + i * 8, 0);
 +      }
 +
 +      txrcb = NIC_SRAM_SEND_RCB;
 +      rxrcb = NIC_SRAM_RCV_RET_RCB;
 +
 +      /* Clear status block in ram. */
 +      memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
 +
 +      /* Set status block DMA address */
 +      tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
 +           ((u64) tnapi->status_mapping >> 32));
 +      tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
 +           ((u64) tnapi->status_mapping & 0xffffffff));
 +
 +      if (tnapi->tx_ring) {
 +              tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
 +                             (TG3_TX_RING_SIZE <<
 +                              BDINFO_FLAGS_MAXLEN_SHIFT),
 +                             NIC_SRAM_TX_BUFFER_DESC);
 +              txrcb += TG3_BDINFO_SIZE;
 +      }
 +
 +      if (tnapi->rx_rcb) {
 +              tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
 +                             (tp->rx_ret_ring_mask + 1) <<
 +                              BDINFO_FLAGS_MAXLEN_SHIFT, 0);
 +              rxrcb += TG3_BDINFO_SIZE;
 +      }
 +
 +      stblk = HOSTCC_STATBLCK_RING1;
 +
 +      for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) {
 +              u64 mapping = (u64)tnapi->status_mapping;
 +              tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32);
 +              tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff);
 +
 +              /* Clear status block in ram. */
 +              memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
 +
 +              if (tnapi->tx_ring) {
 +                      tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
 +                                     (TG3_TX_RING_SIZE <<
 +                                      BDINFO_FLAGS_MAXLEN_SHIFT),
 +                                     NIC_SRAM_TX_BUFFER_DESC);
 +                      txrcb += TG3_BDINFO_SIZE;
 +              }
 +
 +              tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
 +                             ((tp->rx_ret_ring_mask + 1) <<
 +                              BDINFO_FLAGS_MAXLEN_SHIFT), 0);
 +
 +              stblk += 8;
 +              rxrcb += TG3_BDINFO_SIZE;
 +      }
 +}
 +
 +static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
 +{
 +      u32 val, bdcache_maxcnt, host_rep_thresh, nic_rep_thresh;
 +
 +      if (!tg3_flag(tp, 5750_PLUS) ||
 +          tg3_flag(tp, 5780_CLASS) ||
 +          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
 +          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
 +              bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700;
 +      else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
 +               GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
 +              bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755;
 +      else
 +              bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906;
 +
 +      nic_rep_thresh = min(bdcache_maxcnt / 2, tp->rx_std_max_post);
 +      host_rep_thresh = max_t(u32, tp->rx_pending / 8, 1);
 +
 +      val = min(nic_rep_thresh, host_rep_thresh);
 +      tw32(RCVBDI_STD_THRESH, val);
 +
 +      if (tg3_flag(tp, 57765_PLUS))
 +              tw32(STD_REPLENISH_LWM, bdcache_maxcnt);
 +
 +      if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
 +              return;
 +
 +      if (!tg3_flag(tp, 5705_PLUS))
 +              bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700;
 +      else
 +              bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5717;
 +
 +      host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1);
 +
 +      val = min(bdcache_maxcnt / 2, host_rep_thresh);
 +      tw32(RCVBDI_JUMBO_THRESH, val);
 +
 +      if (tg3_flag(tp, 57765_PLUS))
 +              tw32(JMB_REPLENISH_LWM, bdcache_maxcnt);
 +}
 +
 +/* tp->lock is held. */
 +static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
 +{
 +      u32 val, rdmac_mode;
 +      int i, err, limit;
 +      struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
 +
 +      tg3_disable_ints(tp);
 +
 +      tg3_stop_fw(tp);
 +
 +      tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
 +
 +      if (tg3_flag(tp, INIT_COMPLETE))
 +              tg3_abort_hw(tp, 1);
 +
 +      /* Enable MAC control of LPI */
 +      if (tp->phy_flags & TG3_PHYFLG_EEE_CAP) {
 +              tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL,
 +                     TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
 +                     TG3_CPMU_EEE_LNKIDL_UART_IDL);
 +
 +              tw32_f(TG3_CPMU_EEE_CTRL,
 +                     TG3_CPMU_EEE_CTRL_EXIT_20_1_US);
 +
 +              val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET |
 +                    TG3_CPMU_EEEMD_LPI_IN_TX |
 +                    TG3_CPMU_EEEMD_LPI_IN_RX |
 +                    TG3_CPMU_EEEMD_EEE_ENABLE;
 +
 +              if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
 +                      val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN;
 +
 +              if (tg3_flag(tp, ENABLE_APE))
 +                      val |= TG3_CPMU_EEEMD_APE_TX_DET_EN;
 +
 +              tw32_f(TG3_CPMU_EEE_MODE, val);
 +
 +              tw32_f(TG3_CPMU_EEE_DBTMR1,
 +                     TG3_CPMU_DBTMR1_PCIEXIT_2047US |
 +                     TG3_CPMU_DBTMR1_LNKIDLE_2047US);
 +
 +              tw32_f(TG3_CPMU_EEE_DBTMR2,
 +                     TG3_CPMU_DBTMR2_APE_TX_2047US |
 +                     TG3_CPMU_DBTMR2_TXIDXEQ_2047US);
 +      }
 +
 +      if (reset_phy)
 +              tg3_phy_reset(tp);
 +
 +      err = tg3_chip_reset(tp);
 +      if (err)
 +              return err;
 +
 +      tg3_write_sig_legacy(tp, RESET_KIND_INIT);
 +
 +      if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
 +              val = tr32(TG3_CPMU_CTRL);
 +              val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
 +              tw32(TG3_CPMU_CTRL, val);
 +
 +              val = tr32(TG3_CPMU_LSPD_10MB_CLK);
 +              val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
 +              val |= CPMU_LSPD_10MB_MACCLK_6_25;
 +              tw32(TG3_CPMU_LSPD_10MB_CLK, val);
 +
 +              val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
 +              val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
 +              val |= CPMU_LNK_AWARE_MACCLK_6_25;
 +              tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
 +
 +              val = tr32(TG3_CPMU_HST_ACC);
 +              val &= ~CPMU_HST_ACC_MACCLK_MASK;
 +              val |= CPMU_HST_ACC_MACCLK_6_25;
 +              tw32(TG3_CPMU_HST_ACC, val);
 +      }
 +
 +      if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
 +              val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
 +              val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
 +                     PCIE_PWR_MGMT_L1_THRESH_4MS;
 +              tw32(PCIE_PWR_MGMT_THRESH, val);
 +
 +              val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
 +              tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
 +
 +              tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
 +
 +              val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
 +              tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
 +      }
 +
 +      if (tg3_flag(tp, L1PLLPD_EN)) {
 +              u32 grc_mode = tr32(GRC_MODE);
 +
 +              /* Access the lower 1K of PL PCIE block registers. */
 +              val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
 +              tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
 +
 +              val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1);
 +              tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1,
 +                   val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN);
 +
 +              tw32(GRC_MODE, grc_mode);
 +      }
 +
 +      if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
 +              if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
 +                      u32 grc_mode = tr32(GRC_MODE);
 +
 +                      /* Access the lower 1K of PL PCIE block registers. */
 +                      val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
 +                      tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
 +
 +                      val = tr32(TG3_PCIE_TLDLPL_PORT +
 +                                 TG3_PCIE_PL_LO_PHYCTL5);
 +                      tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
 +                           val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
 +
 +                      tw32(GRC_MODE, grc_mode);
 +              }
 +
 +              if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_57765_AX) {
 +                      u32 grc_mode = tr32(GRC_MODE);
 +
 +                      /* Access the lower 1K of DL PCIE block registers. */
 +                      val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
 +                      tw32(GRC_MODE, val | GRC_MODE_PCIE_DL_SEL);
 +
 +                      val = tr32(TG3_PCIE_TLDLPL_PORT +
 +                                 TG3_PCIE_DL_LO_FTSMAX);
 +                      val &= ~TG3_PCIE_DL_LO_FTSMAX_MSK;
 +                      tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_DL_LO_FTSMAX,
 +                           val | TG3_PCIE_DL_LO_FTSMAX_VAL);
 +
 +                      tw32(GRC_MODE, grc_mode);
 +              }
 +
 +              val = tr32(TG3_CPMU_LSPD_10MB_CLK);
 +              val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
 +              val |= CPMU_LSPD_10MB_MACCLK_6_25;
 +              tw32(TG3_CPMU_LSPD_10MB_CLK, val);
 +      }
 +
 +      /* This works around an issue with Athlon chipsets on
 +       * B3 tigon3 silicon.  This bit has no effect on any
 +       * other revision.  But do not set this on PCI Express
 +       * chips and don't even touch the clocks if the CPMU is present.
 +       */
 +      if (!tg3_flag(tp, CPMU_PRESENT)) {
 +              if (!tg3_flag(tp, PCI_EXPRESS))
 +                      tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
 +              tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
 +      }
 +
 +      if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
 +          tg3_flag(tp, PCIX_MODE)) {
 +              val = tr32(TG3PCI_PCISTATE);
 +              val |= PCISTATE_RETRY_SAME_DMA;
 +              tw32(TG3PCI_PCISTATE, val);
 +      }
 +
 +      if (tg3_flag(tp, ENABLE_APE)) {
 +              /* Allow reads and writes to the
 +               * APE register and memory space.
 +               */
 +              val = tr32(TG3PCI_PCISTATE);
 +              val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
 +                     PCISTATE_ALLOW_APE_SHMEM_WR |
 +                     PCISTATE_ALLOW_APE_PSPACE_WR;
 +              tw32(TG3PCI_PCISTATE, val);
 +      }
 +
 +      if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
 +              /* Enable some hw fixes.  */
 +              val = tr32(TG3PCI_MSI_DATA);
 +              val |= (1 << 26) | (1 << 28) | (1 << 29);
 +              tw32(TG3PCI_MSI_DATA, val);
 +      }
 +
 +      /* Descriptor ring init may make accesses to the
 +       * NIC SRAM area to setup the TX descriptors, so we
 +       * can only do this after the hardware has been
 +       * successfully reset.
 +       */
 +      err = tg3_init_rings(tp);
 +      if (err)
 +              return err;
 +
 +      if (tg3_flag(tp, 57765_PLUS)) {
 +              val = tr32(TG3PCI_DMA_RW_CTRL) &
 +                    ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
 +              if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0)
 +                      val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
 +              if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57765 &&
 +                  GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
 +                      val |= DMA_RWCTRL_TAGGED_STAT_WA;
 +              tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
 +      } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 &&
 +                 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) {
 +              /* This value is determined during the probe time DMA
 +               * engine test, tg3_test_dma.
 +               */
 +              tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
 +      }
 +
 +      tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
 +                        GRC_MODE_4X_NIC_SEND_RINGS |
 +                        GRC_MODE_NO_TX_PHDR_CSUM |
 +                        GRC_MODE_NO_RX_PHDR_CSUM);
 +      tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
 +
 +      /* Pseudo-header checksum is done by hardware logic and not
 +       * the offload processers, so make the chip do the pseudo-
 +       * header checksums on receive.  For transmit it is more
 +       * convenient to do the pseudo-header checksum in software
 +       * as Linux does that on transmit for us in all cases.
 +       */
 +      tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
 +
 +      tw32(GRC_MODE,
 +           tp->grc_mode |
 +           (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
 +
 +      /* Setup the timer prescalar register.  Clock is always 66Mhz. */
 +      val = tr32(GRC_MISC_CFG);
 +      val &= ~0xff;
 +      val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
 +      tw32(GRC_MISC_CFG, val);
 +
 +      /* Initialize MBUF/DESC pool. */
 +      if (tg3_flag(tp, 5750_PLUS)) {
 +              /* Do nothing.  */
 +      } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
 +              tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
 +              if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
 +                      tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
 +              else
 +                      tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
 +              tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
 +              tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
 +      } else if (tg3_flag(tp, TSO_CAPABLE)) {
 +              int fw_len;
 +
 +              fw_len = tp->fw_len;
 +              fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
 +              tw32(BUFMGR_MB_POOL_ADDR,
 +                   NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
 +              tw32(BUFMGR_MB_POOL_SIZE,
 +                   NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
 +      }
 +
 +      if (tp->dev->mtu <= ETH_DATA_LEN) {
 +              tw32(BUFMGR_MB_RDMA_LOW_WATER,
 +                   tp->bufmgr_config.mbuf_read_dma_low_water);
 +              tw32(BUFMGR_MB_MACRX_LOW_WATER,
 +                   tp->bufmgr_config.mbuf_mac_rx_low_water);
 +              tw32(BUFMGR_MB_HIGH_WATER,
 +                   tp->bufmgr_config.mbuf_high_water);
 +      } else {
 +              tw32(BUFMGR_MB_RDMA_LOW_WATER,
 +                   tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
 +              tw32(BUFMGR_MB_MACRX_LOW_WATER,
 +                   tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
 +              tw32(BUFMGR_MB_HIGH_WATER,
 +                   tp->bufmgr_config.mbuf_high_water_jumbo);
 +      }
 +      tw32(BUFMGR_DMA_LOW_WATER,
 +           tp->bufmgr_config.dma_low_water);
 +      tw32(BUFMGR_DMA_HIGH_WATER,
 +           tp->bufmgr_config.dma_high_water);
 +
 +      val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE;
 +      if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
 +              val |= BUFMGR_MODE_NO_TX_UNDERRUN;
 +      if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
 +          tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
 +          tp->pci_chip_rev_id == CHIPREV_ID_5720_A0)
 +              val |= BUFMGR_MODE_MBLOW_ATTN_ENAB;
 +      tw32(BUFMGR_MODE, val);
 +      for (i = 0; i < 2000; i++) {
 +              if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
 +                      break;
 +              udelay(10);
 +      }
 +      if (i >= 2000) {
 +              netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__);
 +              return -ENODEV;
 +      }
 +
 +      if (tp->pci_chip_rev_id == CHIPREV_ID_5906_A1)
 +              tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
 +
 +      tg3_setup_rxbd_thresholds(tp);
 +
 +      /* Initialize TG3_BDINFO's at:
 +       *  RCVDBDI_STD_BD:     standard eth size rx ring
 +       *  RCVDBDI_JUMBO_BD:   jumbo frame rx ring
 +       *  RCVDBDI_MINI_BD:    small frame rx ring (??? does not work)
 +       *
 +       * like so:
 +       *  TG3_BDINFO_HOST_ADDR:       high/low parts of DMA address of ring
 +       *  TG3_BDINFO_MAXLEN_FLAGS:    (rx max buffer size << 16) |
 +       *                              ring attribute flags
 +       *  TG3_BDINFO_NIC_ADDR:        location of descriptors in nic SRAM
 +       *
 +       * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
 +       * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
 +       *
 +       * The size of each ring is fixed in the firmware, but the location is
 +       * configurable.
 +       */
 +      tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
 +           ((u64) tpr->rx_std_mapping >> 32));
 +      tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
 +           ((u64) tpr->rx_std_mapping & 0xffffffff));
 +      if (!tg3_flag(tp, 5717_PLUS))
 +              tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
 +                   NIC_SRAM_RX_BUFFER_DESC);
 +
 +      /* Disable the mini ring */
 +      if (!tg3_flag(tp, 5705_PLUS))
 +              tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
 +                   BDINFO_FLAGS_DISABLED);
 +
 +      /* Program the jumbo buffer descriptor ring control
 +       * blocks on those devices that have them.
 +       */
 +      if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
 +          (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) {
 +
 +              if (tg3_flag(tp, JUMBO_RING_ENABLE)) {
 +                      tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
 +                           ((u64) tpr->rx_jmb_mapping >> 32));
 +                      tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
 +                           ((u64) tpr->rx_jmb_mapping & 0xffffffff));
 +                      val = TG3_RX_JMB_RING_SIZE(tp) <<
 +                            BDINFO_FLAGS_MAXLEN_SHIFT;
 +                      tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
 +                           val | BDINFO_FLAGS_USE_EXT_RECV);
 +                      if (!tg3_flag(tp, USE_JUMBO_BDFLAG) ||
 +                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
 +                              tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
 +                                   NIC_SRAM_RX_JUMBO_BUFFER_DESC);
 +              } else {
 +                      tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
 +                           BDINFO_FLAGS_DISABLED);
 +              }
 +
 +              if (tg3_flag(tp, 57765_PLUS)) {
 +                      if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
 +                              val = TG3_RX_STD_MAX_SIZE_5700;
 +                      else
 +                              val = TG3_RX_STD_MAX_SIZE_5717;
 +                      val <<= BDINFO_FLAGS_MAXLEN_SHIFT;
 +                      val |= (TG3_RX_STD_DMA_SZ << 2);
 +              } else
 +                      val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT;
 +      } else
 +              val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT;
 +
 +      tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
 +
 +      tpr->rx_std_prod_idx = tp->rx_pending;
 +      tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx);
 +
 +      tpr->rx_jmb_prod_idx =
 +              tg3_flag(tp, JUMBO_RING_ENABLE) ? tp->rx_jumbo_pending : 0;
 +      tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
 +
 +      tg3_rings_reset(tp);
 +
 +      /* Initialize MAC address and backoff seed. */
 +      __tg3_set_mac_addr(tp, 0);
 +
 +      /* MTU + ethernet header + FCS + optional VLAN tag */
 +      tw32(MAC_RX_MTU_SIZE,
 +           tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
 +
 +      /* The slot time is changed by tg3_setup_phy if we
 +       * run at gigabit with half duplex.
 +       */
 +      val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
 +            (6 << TX_LENGTHS_IPG_SHIFT) |
 +            (32 << TX_LENGTHS_SLOT_TIME_SHIFT);
 +
 +      if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
 +              val |= tr32(MAC_TX_LENGTHS) &
 +                     (TX_LENGTHS_JMB_FRM_LEN_MSK |
 +                      TX_LENGTHS_CNT_DWN_VAL_MSK);
 +
 +      tw32(MAC_TX_LENGTHS, val);
 +
 +      /* Receive rules. */
 +      tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
 +      tw32(RCVLPC_CONFIG, 0x0181);
 +
 +      /* Calculate RDMAC_MODE setting early, we need it to determine
 +       * the RCVLPC_STATE_ENABLE mask.
 +       */
 +      rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
 +                    RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
 +                    RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
 +                    RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
 +                    RDMAC_MODE_LNGREAD_ENAB);
 +
 +      if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
 +              rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
 +
 +      if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
 +          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
 +          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
 +              rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
 +                            RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
 +                            RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
 +
 +      if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
 +          tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
 +              if (tg3_flag(tp, TSO_CAPABLE) &&
 +                  GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
 +                      rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
 +              } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
 +                         !tg3_flag(tp, IS_5788)) {
 +                      rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
 +              }
 +      }
 +
 +      if (tg3_flag(tp, PCI_EXPRESS))
 +              rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
 +
 +      if (tg3_flag(tp, HW_TSO_1) ||
 +          tg3_flag(tp, HW_TSO_2) ||
 +          tg3_flag(tp, HW_TSO_3))
 +              rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
 +
 +      if (tg3_flag(tp, 57765_PLUS) ||
 +          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
 +          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
 +              rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
 +
 +      if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
 +              rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET;
 +
 +      if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
 +          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
 +          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
 +          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
 +          tg3_flag(tp, 57765_PLUS)) {
 +              val = tr32(TG3_RDMA_RSRVCTRL_REG);
 +              if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
 +                  GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
 +                      val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
 +                               TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
 +                               TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
 +                      val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B |
 +                             TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
 +                             TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K;
 +              }
 +              tw32(TG3_RDMA_RSRVCTRL_REG,
 +                   val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
 +      }
 +
 +      if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
 +          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
 +              val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
 +              tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val |
 +                   TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K |
 +                   TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K);
 +      }
 +
 +      /* Receive/send statistics. */
 +      if (tg3_flag(tp, 5750_PLUS)) {
 +              val = tr32(RCVLPC_STATS_ENABLE);
 +              val &= ~RCVLPC_STATSENAB_DACK_FIX;
 +              tw32(RCVLPC_STATS_ENABLE, val);
 +      } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
 +                 tg3_flag(tp, TSO_CAPABLE)) {
 +              val = tr32(RCVLPC_STATS_ENABLE);
 +              val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
 +              tw32(RCVLPC_STATS_ENABLE, val);
 +      } else {
 +              tw32(RCVLPC_STATS_ENABLE, 0xffffff);
 +      }
 +      tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
 +      tw32(SNDDATAI_STATSENAB, 0xffffff);
 +      tw32(SNDDATAI_STATSCTRL,
 +           (SNDDATAI_SCTRL_ENABLE |
 +            SNDDATAI_SCTRL_FASTUPD));
 +
 +      /* Setup host coalescing engine. */
 +      tw32(HOSTCC_MODE, 0);
 +      for (i = 0; i < 2000; i++) {
 +              if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
 +                      break;
 +              udelay(10);
 +      }
 +
 +      __tg3_set_coalesce(tp, &tp->coal);
 +
 +      if (!tg3_flag(tp, 5705_PLUS)) {
 +              /* Status/statistics block address.  See tg3_timer,
 +               * the tg3_periodic_fetch_stats call there, and
 +               * tg3_get_stats to see how this works for 5705/5750 chips.
 +               */
 +              tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
 +                   ((u64) tp->stats_mapping >> 32));
 +              tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
 +                   ((u64) tp->stats_mapping & 0xffffffff));
 +              tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
 +
 +              tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
 +
 +              /* Clear statistics and status block memory areas */
 +              for (i = NIC_SRAM_STATS_BLK;
 +                   i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
 +                   i += sizeof(u32)) {
 +                      tg3_write_mem(tp, i, 0);
 +                      udelay(40);
 +              }
 +      }
 +
 +      tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
 +
 +      tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
 +      tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
 +      if (!tg3_flag(tp, 5705_PLUS))
 +              tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
 +
 +      if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
 +              tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
 +              /* reset to prevent losing 1st rx packet intermittently */
 +              tw32_f(MAC_RX_MODE, RX_MODE_RESET);
 +              udelay(10);
 +      }
 +
 +      tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
 +                      MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE |
 +                      MAC_MODE_FHDE_ENABLE;
 +      if (tg3_flag(tp, ENABLE_APE))
 +              tp->mac_mode |= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
 +      if (!tg3_flag(tp, 5705_PLUS) &&
 +          !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
 +          GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
 +              tp->mac_mode |= MAC_MODE_LINK_POLARITY;
 +      tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
 +      udelay(40);
 +
 +      /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
 +       * If TG3_FLAG_IS_NIC is zero, we should read the
 +       * register to preserve the GPIO settings for LOMs. The GPIOs,
 +       * whether used as inputs or outputs, are set by boot code after
 +       * reset.
 +       */
 +      if (!tg3_flag(tp, IS_NIC)) {
 +              u32 gpio_mask;
 +
 +              gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
 +                          GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
 +                          GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
 +
 +              if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
 +                      gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
 +                                   GRC_LCLCTRL_GPIO_OUTPUT3;
 +
 +              if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
 +                      gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
 +
 +              tp->grc_local_ctrl &= ~gpio_mask;
 +              tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
 +
 +              /* GPIO1 must be driven high for eeprom write protect */
 +              if (tg3_flag(tp, EEPROM_WRITE_PROT))
 +                      tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
 +                                             GRC_LCLCTRL_GPIO_OUTPUT1);
 +      }
 +      tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
 +      udelay(100);
 +
 +      if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1) {
 +              val = tr32(MSGINT_MODE);
 +              val |= MSGINT_MODE_MULTIVEC_EN | MSGINT_MODE_ENABLE;
 +              if (!tg3_flag(tp, 1SHOT_MSI))
 +                      val |= MSGINT_MODE_ONE_SHOT_DISABLE;
 +              tw32(MSGINT_MODE, val);
 +      }
 +
 +      if (!tg3_flag(tp, 5705_PLUS)) {
 +              tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
 +              udelay(40);
 +      }
 +
 +      val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
 +             WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
 +             WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
 +             WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
 +             WDMAC_MODE_LNGREAD_ENAB);
 +
 +      if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
 +          tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
 +              if (tg3_flag(tp, TSO_CAPABLE) &&
 +                  (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
 +                   tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
 +                      /* nothing */
 +              } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
 +                         !tg3_flag(tp, IS_5788)) {
 +                      val |= WDMAC_MODE_RX_ACCEL;
 +              }
 +      }
 +
 +      /* Enable host coalescing bug fix */
 +      if (tg3_flag(tp, 5755_PLUS))
 +              val |= WDMAC_MODE_STATUS_TAG_FIX;
 +
 +      if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
 +              val |= WDMAC_MODE_BURST_ALL_DATA;
 +
 +      tw32_f(WDMAC_MODE, val);
 +      udelay(40);
 +
 +      if (tg3_flag(tp, PCIX_MODE)) {
 +              u16 pcix_cmd;
 +
 +              pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
 +                                   &pcix_cmd);
 +              if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
 +                      pcix_cmd &= ~PCI_X_CMD_MAX_READ;
 +                      pcix_cmd |= PCI_X_CMD_READ_2K;
 +              } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
 +                      pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
 +                      pcix_cmd |= PCI_X_CMD_READ_2K;
 +              }
 +              pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
 +                                    pcix_cmd);
 +      }
 +
 +      tw32_f(RDMAC_MODE, rdmac_mode);
 +      udelay(40);
 +
 +      tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
 +      if (!tg3_flag(tp, 5705_PLUS))
 +              tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
 +
 +      if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
 +              tw32(SNDDATAC_MODE,
 +                   SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
 +      else
 +              tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
 +
 +      tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
 +      tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
 +      val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ;
 +      if (tg3_flag(tp, LRG_PROD_RING_CAP))
 +              val |= RCVDBDI_MODE_LRG_RING_SZ;
 +      tw32(RCVDBDI_MODE, val);
 +      tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
 +      if (tg3_flag(tp, HW_TSO_1) ||
 +          tg3_flag(tp, HW_TSO_2) ||
 +          tg3_flag(tp, HW_TSO_3))
 +              tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
 +      val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
 +      if (tg3_flag(tp, ENABLE_TSS))
 +              val |= SNDBDI_MODE_MULTI_TXQ_EN;
 +      tw32(SNDBDI_MODE, val);
 +      tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
 +
 +      if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
 +              err = tg3_load_5701_a0_firmware_fix(tp);
 +              if (err)
 +                      return err;
 +      }
 +
 +      if (tg3_flag(tp, TSO_CAPABLE)) {
 +              err = tg3_load_tso_firmware(tp);
 +              if (err)
 +                      return err;
 +      }
 +
 +      tp->tx_mode = TX_MODE_ENABLE;
 +
 +      if (tg3_flag(tp, 5755_PLUS) ||
 +          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
 +              tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX;
 +
 +      if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
 +              val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE;
 +              tp->tx_mode &= ~val;
 +              tp->tx_mode |= tr32(MAC_TX_MODE) & val;
 +      }
 +
 +      tw32_f(MAC_TX_MODE, tp->tx_mode);
 +      udelay(100);
 +
 +      if (tg3_flag(tp, ENABLE_RSS)) {
 +              int i = 0;
 +              u32 reg = MAC_RSS_INDIR_TBL_0;
 +
 +              if (tp->irq_cnt == 2) {
 +                      for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i += 8) {
 +                              tw32(reg, 0x0);
 +                              reg += 4;
 +                      }
 +              } else {
 +                      u32 val;
 +
 +                      while (i < TG3_RSS_INDIR_TBL_SIZE) {
 +                              val = i % (tp->irq_cnt - 1);
 +                              i++;
 +                              for (; i % 8; i++) {
 +                                      val <<= 4;
 +                                      val |= (i % (tp->irq_cnt - 1));
 +                              }
 +                              tw32(reg, val);
 +                              reg += 4;
 +                      }
 +              }
 +
 +              /* Setup the "secret" hash key. */
 +              tw32(MAC_RSS_HASH_KEY_0, 0x5f865437);
 +              tw32(MAC_RSS_HASH_KEY_1, 0xe4ac62cc);
 +              tw32(MAC_RSS_HASH_KEY_2, 0x50103a45);
 +              tw32(MAC_RSS_HASH_KEY_3, 0x36621985);
 +              tw32(MAC_RSS_HASH_KEY_4, 0xbf14c0e8);
 +              tw32(MAC_RSS_HASH_KEY_5, 0x1bc27a1e);
 +              tw32(MAC_RSS_HASH_KEY_6, 0x84f4b556);
 +              tw32(MAC_RSS_HASH_KEY_7, 0x094ea6fe);
 +              tw32(MAC_RSS_HASH_KEY_8, 0x7dda01e7);
 +              tw32(MAC_RSS_HASH_KEY_9, 0xc04d7481);
 +      }
 +
 +      tp->rx_mode = RX_MODE_ENABLE;
 +      if (tg3_flag(tp, 5755_PLUS))
 +              tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
 +
 +      if (tg3_flag(tp, ENABLE_RSS))
 +              tp->rx_mode |= RX_MODE_RSS_ENABLE |
 +                             RX_MODE_RSS_ITBL_HASH_BITS_7 |
 +                             RX_MODE_RSS_IPV6_HASH_EN |
 +                             RX_MODE_RSS_TCP_IPV6_HASH_EN |
 +                             RX_MODE_RSS_IPV4_HASH_EN |
 +                             RX_MODE_RSS_TCP_IPV4_HASH_EN;
 +
 +      tw32_f(MAC_RX_MODE, tp->rx_mode);
 +      udelay(10);
 +
 +      tw32(MAC_LED_CTRL, tp->led_ctrl);
 +
 +      tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
 +      if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
 +              tw32_f(MAC_RX_MODE, RX_MODE_RESET);
 +              udelay(10);
 +      }
 +      tw32_f(MAC_RX_MODE, tp->rx_mode);
 +      udelay(10);
 +
 +      if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
 +              if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
 +                      !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) {
 +                      /* Set drive transmission level to 1.2V  */
 +                      /* only if the signal pre-emphasis bit is not set  */
 +                      val = tr32(MAC_SERDES_CFG);
 +                      val &= 0xfffff000;
 +                      val |= 0x880;
 +                      tw32(MAC_SERDES_CFG, val);
 +              }
 +              if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
 +                      tw32(MAC_SERDES_CFG, 0x616000);
 +      }
 +
 +      /* Prevent chip from dropping frames when flow control
 +       * is enabled.
 +       */
 +      if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
 +              val = 1;
 +      else
 +              val = 2;
 +      tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
 +
 +      if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
 +          (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
 +              /* Use hardware link auto-negotiation */
 +              tg3_flag_set(tp, HW_AUTONEG);
 +      }
 +
 +      if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
 +          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
 +              u32 tmp;
 +
 +              tmp = tr32(SERDES_RX_CTRL);
 +              tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
 +              tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
 +              tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
 +              tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
 +      }
 +
 +      if (!tg3_flag(tp, USE_PHYLIB)) {
 +              if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
 +                      tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
 +                      tp->link_config.speed = tp->link_config.orig_speed;
 +                      tp->link_config.duplex = tp->link_config.orig_duplex;
 +                      tp->link_config.autoneg = tp->link_config.orig_autoneg;
 +              }
 +
 +              err = tg3_setup_phy(tp, 0);
 +              if (err)
 +                      return err;
 +
 +              if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
 +                  !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
 +                      u32 tmp;
 +
 +                      /* Clear CRC stats. */
 +                      if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
 +                              tg3_writephy(tp, MII_TG3_TEST1,
 +                                           tmp | MII_TG3_TEST1_CRC_EN);
 +                              tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp);
 +                      }
 +              }
 +      }
 +
 +      __tg3_set_rx_mode(tp->dev);
 +
 +      /* Initialize receive rules. */
 +      tw32(MAC_RCV_RULE_0,  0xc2000000 & RCV_RULE_DISABLE_MASK);
 +      tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
 +      tw32(MAC_RCV_RULE_1,  0x86000004 & RCV_RULE_DISABLE_MASK);
 +      tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
 +
 +      if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS))
 +              limit = 8;
 +      else
 +              limit = 16;
 +      if (tg3_flag(tp, ENABLE_ASF))
 +              limit -= 4;
 +      switch (limit) {
 +      case 16:
 +              tw32(MAC_RCV_RULE_15,  0); tw32(MAC_RCV_VALUE_15,  0);
 +      case 15:
 +              tw32(MAC_RCV_RULE_14,  0); tw32(MAC_RCV_VALUE_14,  0);
 +      case 14:
 +              tw32(MAC_RCV_RULE_13,  0); tw32(MAC_RCV_VALUE_13,  0);
 +      case 13:
 +              tw32(MAC_RCV_RULE_12,  0); tw32(MAC_RCV_VALUE_12,  0);
 +      case 12:
 +              tw32(MAC_RCV_RULE_11,  0); tw32(MAC_RCV_VALUE_11,  0);
 +      case 11:
 +              tw32(MAC_RCV_RULE_10,  0); tw32(MAC_RCV_VALUE_10,  0);
 +      case 10:
 +              tw32(MAC_RCV_RULE_9,  0); tw32(MAC_RCV_VALUE_9,  0);
 +      case 9:
 +              tw32(MAC_RCV_RULE_8,  0); tw32(MAC_RCV_VALUE_8,  0);
 +      case 8:
 +              tw32(MAC_RCV_RULE_7,  0); tw32(MAC_RCV_VALUE_7,  0);
 +      case 7:
 +              tw32(MAC_RCV_RULE_6,  0); tw32(MAC_RCV_VALUE_6,  0);
 +      case 6:
 +              tw32(MAC_RCV_RULE_5,  0); tw32(MAC_RCV_VALUE_5,  0);
 +      case 5:
 +              tw32(MAC_RCV_RULE_4,  0); tw32(MAC_RCV_VALUE_4,  0);
 +      case 4:
 +              /* tw32(MAC_RCV_RULE_3,  0); tw32(MAC_RCV_VALUE_3,  0); */
 +      case 3:
 +              /* tw32(MAC_RCV_RULE_2,  0); tw32(MAC_RCV_VALUE_2,  0); */
 +      case 2:
 +      case 1:
 +
 +      default:
 +              break;
 +      }
 +
 +      if (tg3_flag(tp, ENABLE_APE))
 +              /* Write our heartbeat update interval to APE. */
 +              tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
 +                              APE_HOST_HEARTBEAT_INT_DISABLE);
 +
 +      tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
 +
 +      return 0;
 +}
 +
 +/* Called at device open time to get the chip ready for
 + * packet processing.  Invoked with tp->lock held.
 + */
 +static int tg3_init_hw(struct tg3 *tp, int reset_phy)
 +{
 +      tg3_switch_clocks(tp);
 +
 +      tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
 +
 +      return tg3_reset_hw(tp, reset_phy);
 +}
 +
 +#define TG3_STAT_ADD32(PSTAT, REG) \
 +do {  u32 __val = tr32(REG); \
 +      (PSTAT)->low += __val; \
 +      if ((PSTAT)->low < __val) \
 +              (PSTAT)->high += 1; \
 +} while (0)
 +
 +static void tg3_periodic_fetch_stats(struct tg3 *tp)
 +{
 +      struct tg3_hw_stats *sp = tp->hw_stats;
 +
 +      if (!netif_carrier_ok(tp->dev))
 +              return;
 +
 +      TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
 +      TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
 +      TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
 +      TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
 +      TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
 +      TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
 +      TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
 +      TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
 +      TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
 +      TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
 +      TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
 +      TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
 +      TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
 +
 +      TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
 +      TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
 +      TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
 +      TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
 +      TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
 +      TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
 +      TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
 +      TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
 +      TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
 +      TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
 +      TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
 +      TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
 +      TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
 +      TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
 +
 +      TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
 +      if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
 +          tp->pci_chip_rev_id != CHIPREV_ID_5719_A0 &&
 +          tp->pci_chip_rev_id != CHIPREV_ID_5720_A0) {
 +              TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
 +      } else {
 +              u32 val = tr32(HOSTCC_FLOW_ATTN);
 +              val = (val & HOSTCC_FLOW_ATTN_MBUF_LWM) ? 1 : 0;
 +              if (val) {
 +                      tw32(HOSTCC_FLOW_ATTN, HOSTCC_FLOW_ATTN_MBUF_LWM);
 +                      sp->rx_discards.low += val;
 +                      if (sp->rx_discards.low < val)
 +                              sp->rx_discards.high += 1;
 +              }
 +              sp->mbuf_lwm_thresh_hit = sp->rx_discards;
 +      }
 +      TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
 +}
 +
 +static void tg3_chk_missed_msi(struct tg3 *tp)
 +{
 +      u32 i;
 +
 +      for (i = 0; i < tp->irq_cnt; i++) {
 +              struct tg3_napi *tnapi = &tp->napi[i];
 +
 +              if (tg3_has_work(tnapi)) {
 +                      if (tnapi->last_rx_cons == tnapi->rx_rcb_ptr &&
 +                          tnapi->last_tx_cons == tnapi->tx_cons) {
 +                              if (tnapi->chk_msi_cnt < 1) {
 +                                      tnapi->chk_msi_cnt++;
 +                                      return;
 +                              }
 +                              tg3_msi(0, tnapi);
 +                      }
 +              }
 +              tnapi->chk_msi_cnt = 0;
 +              tnapi->last_rx_cons = tnapi->rx_rcb_ptr;
 +              tnapi->last_tx_cons = tnapi->tx_cons;
 +      }
 +}
 +
 +static void tg3_timer(unsigned long __opaque)
 +{
 +      struct tg3 *tp = (struct tg3 *) __opaque;
 +
 +      if (tp->irq_sync)
 +              goto restart_timer;
 +
 +      spin_lock(&tp->lock);
 +
 +      if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
 +          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
 +              tg3_chk_missed_msi(tp);
 +
 +      if (!tg3_flag(tp, TAGGED_STATUS)) {
 +              /* All of this garbage is because when using non-tagged
 +               * IRQ status the mailbox/status_block protocol the chip
 +               * uses with the cpu is race prone.
 +               */
 +              if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) {
 +                      tw32(GRC_LOCAL_CTRL,
 +                           tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
 +              } else {
 +                      tw32(HOSTCC_MODE, tp->coalesce_mode |
 +                           HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW);
 +              }
 +
 +              if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
 +                      tg3_flag_set(tp, RESTART_TIMER);
 +                      spin_unlock(&tp->lock);
 +                      schedule_work(&tp->reset_task);
 +                      return;
 +              }
 +      }
 +
 +      /* This part only runs once per second. */
 +      if (!--tp->timer_counter) {
 +              if (tg3_flag(tp, 5705_PLUS))
 +                      tg3_periodic_fetch_stats(tp);
 +
 +              if (tp->setlpicnt && !--tp->setlpicnt)
 +                      tg3_phy_eee_enable(tp);
 +
 +              if (tg3_flag(tp, USE_LINKCHG_REG)) {
 +                      u32 mac_stat;
 +                      int phy_event;
 +
 +                      mac_stat = tr32(MAC_STATUS);
 +
 +                      phy_event = 0;
 +                      if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) {
 +                              if (mac_stat & MAC_STATUS_MI_INTERRUPT)
 +                                      phy_event = 1;
 +                      } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
 +                              phy_event = 1;
 +
 +                      if (phy_event)
 +                              tg3_setup_phy(tp, 0);
 +              } else if (tg3_flag(tp, POLL_SERDES)) {
 +                      u32 mac_stat = tr32(MAC_STATUS);
 +                      int need_setup = 0;
 +
 +                      if (netif_carrier_ok(tp->dev) &&
 +                          (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
 +                              need_setup = 1;
 +                      }
 +                      if (!netif_carrier_ok(tp->dev) &&
 +                          (mac_stat & (MAC_STATUS_PCS_SYNCED |
 +                                       MAC_STATUS_SIGNAL_DET))) {
 +                              need_setup = 1;
 +                      }
 +                      if (need_setup) {
 +                              if (!tp->serdes_counter) {
 +                                      tw32_f(MAC_MODE,
 +                                           (tp->mac_mode &
 +                                            ~MAC_MODE_PORT_MODE_MASK));
 +                                      udelay(40);
 +                                      tw32_f(MAC_MODE, tp->mac_mode);
 +                                      udelay(40);
 +                              }
 +                              tg3_setup_phy(tp, 0);
 +                      }
 +              } else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
 +                         tg3_flag(tp, 5780_CLASS)) {
 +                      tg3_serdes_parallel_detect(tp);
 +              }
 +
 +              tp->timer_counter = tp->timer_multiplier;
 +      }
 +
 +      /* Heartbeat is only sent once every 2 seconds.
 +       *
 +       * The heartbeat is to tell the ASF firmware that the host
 +       * driver is still alive.  In the event that the OS crashes,
 +       * ASF needs to reset the hardware to free up the FIFO space
 +       * that may be filled with rx packets destined for the host.
 +       * If the FIFO is full, ASF will no longer function properly.
 +       *
 +       * Unintended resets have been reported on real time kernels
 +       * where the timer doesn't run on time.  Netpoll will also have
 +       * same problem.
 +       *
 +       * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
 +       * to check the ring condition when the heartbeat is expiring
 +       * before doing the reset.  This will prevent most unintended
 +       * resets.
 +       */
 +      if (!--tp->asf_counter) {
 +              if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
 +                      tg3_wait_for_event_ack(tp);
 +
 +                      tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
 +                                    FWCMD_NICDRV_ALIVE3);
 +                      tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
 +                      tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX,
 +                                    TG3_FW_UPDATE_TIMEOUT_SEC);
 +
 +                      tg3_generate_fw_event(tp);
 +              }
 +              tp->asf_counter = tp->asf_multiplier;
 +      }
 +
 +      spin_unlock(&tp->lock);
 +
 +restart_timer:
 +      tp->timer.expires = jiffies + tp->timer_offset;
 +      add_timer(&tp->timer);
 +}
 +
 +static int tg3_request_irq(struct tg3 *tp, int irq_num)
 +{
 +      irq_handler_t fn;
 +      unsigned long flags;
 +      char *name;
 +      struct tg3_napi *tnapi = &tp->napi[irq_num];
 +
 +      if (tp->irq_cnt == 1)
 +              name = tp->dev->name;
 +      else {
 +              name = &tnapi->irq_lbl[0];
 +              snprintf(name, IFNAMSIZ, "%s-%d", tp->dev->name, irq_num);
 +              name[IFNAMSIZ-1] = 0;
 +      }
 +
 +      if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
 +              fn = tg3_msi;
 +              if (tg3_flag(tp, 1SHOT_MSI))
 +                      fn = tg3_msi_1shot;
 +              flags = 0;
 +      } else {
 +              fn = tg3_interrupt;
 +              if (tg3_flag(tp, TAGGED_STATUS))
 +                      fn = tg3_interrupt_tagged;
 +              flags = IRQF_SHARED;
 +      }
 +
 +      return request_irq(tnapi->irq_vec, fn, flags, name, tnapi);
 +}
 +
 +static int tg3_test_interrupt(struct tg3 *tp)
 +{
 +      struct tg3_napi *tnapi = &tp->napi[0];
 +      struct net_device *dev = tp->dev;
 +      int err, i, intr_ok = 0;
 +      u32 val;
 +
 +      if (!netif_running(dev))
 +              return -ENODEV;
 +
 +      tg3_disable_ints(tp);
 +
 +      free_irq(tnapi->irq_vec, tnapi);
 +
 +      /*
 +       * Turn off MSI one shot mode.  Otherwise this test has no
 +       * observable way to know whether the interrupt was delivered.
 +       */
 +      if (tg3_flag(tp, 57765_PLUS)) {
 +              val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
 +              tw32(MSGINT_MODE, val);
 +      }
 +
 +      err = request_irq(tnapi->irq_vec, tg3_test_isr,
 +                        IRQF_SHARED | IRQF_SAMPLE_RANDOM, dev->name, tnapi);
 +      if (err)
 +              return err;
 +
 +      tnapi->hw_status->status &= ~SD_STATUS_UPDATED;
 +      tg3_enable_ints(tp);
 +
 +      tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
 +             tnapi->coal_now);
 +
 +      for (i = 0; i < 5; i++) {
 +              u32 int_mbox, misc_host_ctrl;
 +
 +              int_mbox = tr32_mailbox(tnapi->int_mbox);
 +              misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
 +
 +              if ((int_mbox != 0) ||
 +                  (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
 +                      intr_ok = 1;
 +                      break;
 +              }
 +
 +              if (tg3_flag(tp, 57765_PLUS) &&
 +                  tnapi->hw_status->status_tag != tnapi->last_tag)
 +                      tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
 +
 +              msleep(10);
 +      }
 +
 +      tg3_disable_ints(tp);
 +
 +      free_irq(tnapi->irq_vec, tnapi);
 +
 +      err = tg3_request_irq(tp, 0);
 +
 +      if (err)
 +              return err;
 +
 +      if (intr_ok) {
 +              /* Reenable MSI one shot mode. */
 +              if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, 1SHOT_MSI)) {
 +                      val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
 +                      tw32(MSGINT_MODE, val);
 +              }
 +              return 0;
 +      }
 +
 +      return -EIO;
 +}
 +
 +/* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
 + * successfully restored
 + */
 +static int tg3_test_msi(struct tg3 *tp)
 +{
 +      int err;
 +      u16 pci_cmd;
 +
 +      if (!tg3_flag(tp, USING_MSI))
 +              return 0;
 +
 +      /* Turn off SERR reporting in case MSI terminates with Master
 +       * Abort.
 +       */
 +      pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
 +      pci_write_config_word(tp->pdev, PCI_COMMAND,
 +                            pci_cmd & ~PCI_COMMAND_SERR);
 +
 +      err = tg3_test_interrupt(tp);
 +
 +      pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
 +
 +      if (!err)
 +              return 0;
 +
 +      /* other failures */
 +      if (err != -EIO)
 +              return err;
 +
 +      /* MSI test failed, go back to INTx mode */
 +      netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching "
 +                  "to INTx mode. Please report this failure to the PCI "
 +                  "maintainer and include system chipset information\n");
 +
 +      free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
 +
 +      pci_disable_msi(tp->pdev);
 +
 +      tg3_flag_clear(tp, USING_MSI);
 +      tp->napi[0].irq_vec = tp->pdev->irq;
 +
 +      err = tg3_request_irq(tp, 0);
 +      if (err)
 +              return err;
 +
 +      /* Need to reset the chip because the MSI cycle may have terminated
 +       * with Master Abort.
 +       */
 +      tg3_full_lock(tp, 1);
 +
 +      tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
 +      err = tg3_init_hw(tp, 1);
 +
 +      tg3_full_unlock(tp);
 +
 +      if (err)
 +              free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
 +
 +      return err;
 +}
 +
 +static int tg3_request_firmware(struct tg3 *tp)
 +{
 +      const __be32 *fw_data;
 +
 +      if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
 +              netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
 +                         tp->fw_needed);
 +              return -ENOENT;
 +      }
 +
 +      fw_data = (void *)tp->fw->data;
 +
 +      /* Firmware blob starts with version numbers, followed by
 +       * start address and _full_ length including BSS sections
 +       * (which must be longer than the actual data, of course
 +       */
 +
 +      tp->fw_len = be32_to_cpu(fw_data[2]);   /* includes bss */
 +      if (tp->fw_len < (tp->fw->size - 12)) {
 +              netdev_err(tp->dev, "bogus length %d in \"%s\"\n",
 +                         tp->fw_len, tp->fw_needed);
 +              release_firmware(tp->fw);
 +              tp->fw = NULL;
 +              return -EINVAL;
 +      }
 +
 +      /* We no longer need firmware; we have it. */
 +      tp->fw_needed = NULL;
 +      return 0;
 +}
 +
 +static bool tg3_enable_msix(struct tg3 *tp)
 +{
 +      int i, rc, cpus = num_online_cpus();
 +      struct msix_entry msix_ent[tp->irq_max];
 +
 +      if (cpus == 1)
 +              /* Just fallback to the simpler MSI mode. */
 +              return false;
 +
 +      /*
 +       * We want as many rx rings enabled as there are cpus.
 +       * The first MSIX vector only deals with link interrupts, etc,
 +       * so we add one to the number of vectors we are requesting.
 +       */
 +      tp->irq_cnt = min_t(unsigned, cpus + 1, tp->irq_max);
 +
 +      for (i = 0; i < tp->irq_max; i++) {
 +              msix_ent[i].entry  = i;
 +              msix_ent[i].vector = 0;
 +      }
 +
 +      rc = pci_enable_msix(tp->pdev, msix_ent, tp->irq_cnt);
 +      if (rc < 0) {
 +              return false;
 +      } else if (rc != 0) {
 +              if (pci_enable_msix(tp->pdev, msix_ent, rc))
 +                      return false;
 +              netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
 +                            tp->irq_cnt, rc);
 +              tp->irq_cnt = rc;
 +      }
 +
 +      for (i = 0; i < tp->irq_max; i++)
 +              tp->napi[i].irq_vec = msix_ent[i].vector;
 +
 +      netif_set_real_num_tx_queues(tp->dev, 1);
 +      rc = tp->irq_cnt > 1 ? tp->irq_cnt - 1 : 1;
 +      if (netif_set_real_num_rx_queues(tp->dev, rc)) {
 +              pci_disable_msix(tp->pdev);
 +              return false;
 +      }
 +
 +      if (tp->irq_cnt > 1) {
 +              tg3_flag_set(tp, ENABLE_RSS);
 +
 +              if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
 +                  GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
 +                      tg3_flag_set(tp, ENABLE_TSS);
 +                      netif_set_real_num_tx_queues(tp->dev, tp->irq_cnt - 1);
 +              }
 +      }
 +
 +      return true;
 +}
 +
 +static void tg3_ints_init(struct tg3 *tp)
 +{
 +      if ((tg3_flag(tp, SUPPORT_MSI) || tg3_flag(tp, SUPPORT_MSIX)) &&
 +          !tg3_flag(tp, TAGGED_STATUS)) {
 +              /* All MSI supporting chips should support tagged
 +               * status.  Assert that this is the case.
 +               */
 +              netdev_warn(tp->dev,
 +                          "MSI without TAGGED_STATUS? Not using MSI\n");
 +              goto defcfg;
 +      }
 +
 +      if (tg3_flag(tp, SUPPORT_MSIX) && tg3_enable_msix(tp))
 +              tg3_flag_set(tp, USING_MSIX);
 +      else if (tg3_flag(tp, SUPPORT_MSI) && pci_enable_msi(tp->pdev) == 0)
 +              tg3_flag_set(tp, USING_MSI);
 +
 +      if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
 +              u32 msi_mode = tr32(MSGINT_MODE);
 +              if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1)
 +                      msi_mode |= MSGINT_MODE_MULTIVEC_EN;
 +              if (!tg3_flag(tp, 1SHOT_MSI))
 +                      msi_mode |= MSGINT_MODE_ONE_SHOT_DISABLE;
 +              tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
 +      }
 +defcfg:
 +      if (!tg3_flag(tp, USING_MSIX)) {
 +              tp->irq_cnt = 1;
 +              tp->napi[0].irq_vec = tp->pdev->irq;
 +              netif_set_real_num_tx_queues(tp->dev, 1);
 +              netif_set_real_num_rx_queues(tp->dev, 1);
 +      }
 +}
 +
 +static void tg3_ints_fini(struct tg3 *tp)
 +{
 +      if (tg3_flag(tp, USING_MSIX))
 +              pci_disable_msix(tp->pdev);
 +      else if (tg3_flag(tp, USING_MSI))
 +              pci_disable_msi(tp->pdev);
 +      tg3_flag_clear(tp, USING_MSI);
 +      tg3_flag_clear(tp, USING_MSIX);
 +      tg3_flag_clear(tp, ENABLE_RSS);
 +      tg3_flag_clear(tp, ENABLE_TSS);
 +}
 +
 +static int tg3_open(struct net_device *dev)
 +{
 +      struct tg3 *tp = netdev_priv(dev);
 +      int i, err;
 +
 +      if (tp->fw_needed) {
 +              err = tg3_request_firmware(tp);
 +              if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
 +                      if (err)
 +                              return err;
 +              } else if (err) {
 +                      netdev_warn(tp->dev, "TSO capability disabled\n");
 +                      tg3_flag_clear(tp, TSO_CAPABLE);
 +              } else if (!tg3_flag(tp, TSO_CAPABLE)) {
 +                      netdev_notice(tp->dev, "TSO capability restored\n");
 +                      tg3_flag_set(tp, TSO_CAPABLE);
 +              }
 +      }
 +
 +      netif_carrier_off(tp->dev);
 +
 +      err = tg3_power_up(tp);
 +      if (err)
 +              return err;
 +
 +      tg3_full_lock(tp, 0);
 +
 +      tg3_disable_ints(tp);
 +      tg3_flag_clear(tp, INIT_COMPLETE);
 +
 +      tg3_full_unlock(tp);
 +
 +      /*
 +       * Setup interrupts first so we know how
 +       * many NAPI resources to allocate
 +       */
 +      tg3_ints_init(tp);
 +
 +      /* The placement of this call is tied
 +       * to the setup and use of Host TX descriptors.
 +       */
 +      err = tg3_alloc_consistent(tp);
 +      if (err)
 +              goto err_out1;
 +
 +      tg3_napi_init(tp);
 +
 +      tg3_napi_enable(tp);
 +
 +      for (i = 0; i < tp->irq_cnt; i++) {
 +              struct tg3_napi *tnapi = &tp->napi[i];
 +              err = tg3_request_irq(tp, i);
 +              if (err) {
 +                      for (i--; i >= 0; i--)
 +                              free_irq(tnapi->irq_vec, tnapi);
 +                      break;
 +              }
 +      }
 +
 +      if (err)
 +              goto err_out2;
 +
 +      tg3_full_lock(tp, 0);
 +
 +      err = tg3_init_hw(tp, 1);
 +      if (err) {
 +              tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
 +              tg3_free_rings(tp);
 +      } else {
 +              if (tg3_flag(tp, TAGGED_STATUS) &&
 +                      GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
 +                      GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57765)
 +                      tp->timer_offset = HZ;
 +              else
 +                      tp->timer_offset = HZ / 10;
 +
 +              BUG_ON(tp->timer_offset > HZ);
 +              tp->timer_counter = tp->timer_multiplier =
 +                      (HZ / tp->timer_offset);
 +              tp->asf_counter = tp->asf_multiplier =
 +                      ((HZ / tp->timer_offset) * 2);
 +
 +              init_timer(&tp->timer);
 +              tp->timer.expires = jiffies + tp->timer_offset;
 +              tp->timer.data = (unsigned long) tp;
 +              tp->timer.function = tg3_timer;
 +      }
 +
 +      tg3_full_unlock(tp);
 +
 +      if (err)
 +              goto err_out3;
 +
 +      if (tg3_flag(tp, USING_MSI)) {
 +              err = tg3_test_msi(tp);
 +
 +              if (err) {
 +                      tg3_full_lock(tp, 0);
 +                      tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
 +                      tg3_free_rings(tp);
 +                      tg3_full_unlock(tp);
 +
 +                      goto err_out2;
 +              }
 +
 +              if (!tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
 +                      u32 val = tr32(PCIE_TRANSACTION_CFG);
 +
 +                      tw32(PCIE_TRANSACTION_CFG,
 +                           val | PCIE_TRANS_CFG_1SHOT_MSI);
 +              }
 +      }
 +
 +      tg3_phy_start(tp);
 +
 +      tg3_full_lock(tp, 0);
 +
 +      add_timer(&tp->timer);
 +      tg3_flag_set(tp, INIT_COMPLETE);
 +      tg3_enable_ints(tp);
 +
 +      tg3_full_unlock(tp);
 +
 +      netif_tx_start_all_queues(dev);
 +
 +      /*
 +       * Reset loopback feature if it was turned on while the device was down
 +       * make sure that it's installed properly now.
 +       */
 +      if (dev->features & NETIF_F_LOOPBACK)
 +              tg3_set_loopback(dev, dev->features);
 +
 +      return 0;
 +
 +err_out3:
 +      for (i = tp->irq_cnt - 1; i >= 0; i--) {
 +              struct tg3_napi *tnapi = &tp->napi[i];
 +              free_irq(tnapi->irq_vec, tnapi);
 +      }
 +
 +err_out2:
 +      tg3_napi_disable(tp);
 +      tg3_napi_fini(tp);
 +      tg3_free_consistent(tp);
 +
 +err_out1:
 +      tg3_ints_fini(tp);
 +      tg3_frob_aux_power(tp, false);
 +      pci_set_power_state(tp->pdev, PCI_D3hot);
 +      return err;
 +}
 +
 +static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *,
 +                                               struct rtnl_link_stats64 *);
 +static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *);
 +
 +static int tg3_close(struct net_device *dev)
 +{
 +      int i;
 +      struct tg3 *tp = netdev_priv(dev);
 +
 +      tg3_napi_disable(tp);
 +      cancel_work_sync(&tp->reset_task);
 +
 +      netif_tx_stop_all_queues(dev);
 +
 +      del_timer_sync(&tp->timer);
 +
 +      tg3_phy_stop(tp);
 +
 +      tg3_full_lock(tp, 1);
 +
 +      tg3_disable_ints(tp);
 +
 +      tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
 +      tg3_free_rings(tp);
 +      tg3_flag_clear(tp, INIT_COMPLETE);
 +
 +      tg3_full_unlock(tp);
 +
 +      for (i = tp->irq_cnt - 1; i >= 0; i--) {
 +              struct tg3_napi *tnapi = &tp->napi[i];
 +              free_irq(tnapi->irq_vec, tnapi);
 +      }
 +
 +      tg3_ints_fini(tp);
 +
 +      tg3_get_stats64(tp->dev, &tp->net_stats_prev);
 +
 +      memcpy(&tp->estats_prev, tg3_get_estats(tp),
 +             sizeof(tp->estats_prev));
 +
 +      tg3_napi_fini(tp);
 +
 +      tg3_free_consistent(tp);
 +
 +      tg3_power_down(tp);
 +
 +      netif_carrier_off(tp->dev);
 +
 +      return 0;
 +}
 +
 +static inline u64 get_stat64(tg3_stat64_t *val)
 +{
 +       return ((u64)val->high << 32) | ((u64)val->low);
 +}
 +
 +static u64 calc_crc_errors(struct tg3 *tp)
 +{
 +      struct tg3_hw_stats *hw_stats = tp->hw_stats;
 +
 +      if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
 +          (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
 +           GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
 +              u32 val;
 +
 +              spin_lock_bh(&tp->lock);
 +              if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
 +                      tg3_writephy(tp, MII_TG3_TEST1,
 +                                   val | MII_TG3_TEST1_CRC_EN);
 +                      tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val);
 +              } else
 +                      val = 0;
 +              spin_unlock_bh(&tp->lock);
 +
 +              tp->phy_crc_errors += val;
 +
 +              return tp->phy_crc_errors;
 +      }
 +
 +      return get_stat64(&hw_stats->rx_fcs_errors);
 +}
 +
 +#define ESTAT_ADD(member) \
 +      estats->member =        old_estats->member + \
 +                              get_stat64(&hw_stats->member)
 +
 +static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp)
 +{
 +      struct tg3_ethtool_stats *estats = &tp->estats;
 +      struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
 +      struct tg3_hw_stats *hw_stats = tp->hw_stats;
 +
 +      if (!hw_stats)
 +              return old_estats;
 +
 +      ESTAT_ADD(rx_octets);
 +      ESTAT_ADD(rx_fragments);
 +      ESTAT_ADD(rx_ucast_packets);
 +      ESTAT_ADD(rx_mcast_packets);
 +      ESTAT_ADD(rx_bcast_packets);
 +      ESTAT_ADD(rx_fcs_errors);
 +      ESTAT_ADD(rx_align_errors);
 +      ESTAT_ADD(rx_xon_pause_rcvd);
 +      ESTAT_ADD(rx_xoff_pause_rcvd);
 +      ESTAT_ADD(rx_mac_ctrl_rcvd);
 +      ESTAT_ADD(rx_xoff_entered);
 +      ESTAT_ADD(rx_frame_too_long_errors);
 +      ESTAT_ADD(rx_jabbers);
 +      ESTAT_ADD(rx_undersize_packets);
 +      ESTAT_ADD(rx_in_length_errors);
 +      ESTAT_ADD(rx_out_length_errors);
 +      ESTAT_ADD(rx_64_or_less_octet_packets);
 +      ESTAT_ADD(rx_65_to_127_octet_packets);
 +      ESTAT_ADD(rx_128_to_255_octet_packets);
 +      ESTAT_ADD(rx_256_to_511_octet_packets);
 +      ESTAT_ADD(rx_512_to_1023_octet_packets);
 +      ESTAT_ADD(rx_1024_to_1522_octet_packets);
 +      ESTAT_ADD(rx_1523_to_2047_octet_packets);
 +      ESTAT_ADD(rx_2048_to_4095_octet_packets);
 +      ESTAT_ADD(rx_4096_to_8191_octet_packets);
 +      ESTAT_ADD(rx_8192_to_9022_octet_packets);
 +
 +      ESTAT_ADD(tx_octets);
 +      ESTAT_ADD(tx_collisions);
 +      ESTAT_ADD(tx_xon_sent);
 +      ESTAT_ADD(tx_xoff_sent);
 +      ESTAT_ADD(tx_flow_control);
 +      ESTAT_ADD(tx_mac_errors);
 +      ESTAT_ADD(tx_single_collisions);
 +      ESTAT_ADD(tx_mult_collisions);
 +      ESTAT_ADD(tx_deferred);
 +      ESTAT_ADD(tx_excessive_collisions);
 +      ESTAT_ADD(tx_late_collisions);
 +      ESTAT_ADD(tx_collide_2times);
 +      ESTAT_ADD(tx_collide_3times);
 +      ESTAT_ADD(tx_collide_4times);
 +      ESTAT_ADD(tx_collide_5times);
 +      ESTAT_ADD(tx_collide_6times);
 +      ESTAT_ADD(tx_collide_7times);
 +      ESTAT_ADD(tx_collide_8times);
 +      ESTAT_ADD(tx_collide_9times);
 +      ESTAT_ADD(tx_collide_10times);
 +      ESTAT_ADD(tx_collide_11times);
 +      ESTAT_ADD(tx_collide_12times);
 +      ESTAT_ADD(tx_collide_13times);
 +      ESTAT_ADD(tx_collide_14times);
 +      ESTAT_ADD(tx_collide_15times);
 +      ESTAT_ADD(tx_ucast_packets);
 +      ESTAT_ADD(tx_mcast_packets);
 +      ESTAT_ADD(tx_bcast_packets);
 +      ESTAT_ADD(tx_carrier_sense_errors);
 +      ESTAT_ADD(tx_discards);
 +      ESTAT_ADD(tx_errors);
 +
 +      ESTAT_ADD(dma_writeq_full);
 +      ESTAT_ADD(dma_write_prioq_full);
 +      ESTAT_ADD(rxbds_empty);
 +      ESTAT_ADD(rx_discards);
 +      ESTAT_ADD(rx_errors);
 +      ESTAT_ADD(rx_threshold_hit);
 +
 +      ESTAT_ADD(dma_readq_full);
 +      ESTAT_ADD(dma_read_prioq_full);
 +      ESTAT_ADD(tx_comp_queue_full);
 +
 +      ESTAT_ADD(ring_set_send_prod_index);
 +      ESTAT_ADD(ring_status_update);
 +      ESTAT_ADD(nic_irqs);
 +      ESTAT_ADD(nic_avoided_irqs);
 +      ESTAT_ADD(nic_tx_threshold_hit);
 +
 +      ESTAT_ADD(mbuf_lwm_thresh_hit);
 +
 +      return estats;
 +}
 +
 +static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev,
 +                                               struct rtnl_link_stats64 *stats)
 +{
 +      struct tg3 *tp = netdev_priv(dev);
 +      struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev;
 +      struct tg3_hw_stats *hw_stats = tp->hw_stats;
 +
 +      if (!hw_stats)
 +              return old_stats;
 +
 +      stats->rx_packets = old_stats->rx_packets +
 +              get_stat64(&hw_stats->rx_ucast_packets) +
 +              get_stat64(&hw_stats->rx_mcast_packets) +
 +              get_stat64(&hw_stats->rx_bcast_packets);
 +
 +      stats->tx_packets = old_stats->tx_packets +
 +              get_stat64(&hw_stats->tx_ucast_packets) +
 +              get_stat64(&hw_stats->tx_mcast_packets) +
 +              get_stat64(&hw_stats->tx_bcast_packets);
 +
 +      stats->rx_bytes = old_stats->rx_bytes +
 +              get_stat64(&hw_stats->rx_octets);
 +      stats->tx_bytes = old_stats->tx_bytes +
 +              get_stat64(&hw_stats->tx_octets);
 +
 +      stats->rx_errors = old_stats->rx_errors +
 +              get_stat64(&hw_stats->rx_errors);
 +      stats->tx_errors = old_stats->tx_errors +
 +              get_stat64(&hw_stats->tx_errors) +
 +              get_stat64(&hw_stats->tx_mac_errors) +
 +              get_stat64(&hw_stats->tx_carrier_sense_errors) +
 +              get_stat64(&hw_stats->tx_discards);
 +
 +      stats->multicast = old_stats->multicast +
 +              get_stat64(&hw_stats->rx_mcast_packets);
 +      stats->collisions = old_stats->collisions +
 +              get_stat64(&hw_stats->tx_collisions);
 +
 +      stats->rx_length_errors = old_stats->rx_length_errors +
 +              get_stat64(&hw_stats->rx_frame_too_long_errors) +
 +              get_stat64(&hw_stats->rx_undersize_packets);
 +
 +      stats->rx_over_errors = old_stats->rx_over_errors +
 +              get_stat64(&hw_stats->rxbds_empty);
 +      stats->rx_frame_errors = old_stats->rx_frame_errors +
 +              get_stat64(&hw_stats->rx_align_errors);
 +      stats->tx_aborted_errors = old_stats->tx_aborted_errors +
 +              get_stat64(&hw_stats->tx_discards);
 +      stats->tx_carrier_errors = old_stats->tx_carrier_errors +
 +              get_stat64(&hw_stats->tx_carrier_sense_errors);
 +
 +      stats->rx_crc_errors = old_stats->rx_crc_errors +
 +              calc_crc_errors(tp);
 +
 +      stats->rx_missed_errors = old_stats->rx_missed_errors +
 +              get_stat64(&hw_stats->rx_discards);
 +
 +      stats->rx_dropped = tp->rx_dropped;
 +
 +      return stats;
 +}
 +
 +static inline u32 calc_crc(unsigned char *buf, int len)
 +{
 +      u32 reg;
 +      u32 tmp;
 +      int j, k;
 +
 +      reg = 0xffffffff;
 +
 +      for (j = 0; j < len; j++) {
 +              reg ^= buf[j];
 +
 +              for (k = 0; k < 8; k++) {
 +                      tmp = reg & 0x01;
 +
 +                      reg >>= 1;
 +
 +                      if (tmp)
 +                              reg ^= 0xedb88320;
 +              }
 +      }
 +
 +      return ~reg;
 +}
 +
 +static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
 +{
 +      /* accept or reject all multicast frames */
 +      tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
 +      tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
 +      tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
 +      tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
 +}
 +
 +static void __tg3_set_rx_mode(struct net_device *dev)
 +{
 +      struct tg3 *tp = netdev_priv(dev);
 +      u32 rx_mode;
 +
 +      rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
 +                                RX_MODE_KEEP_VLAN_TAG);
 +
 +#if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
 +      /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
 +       * flag clear.
 +       */
 +      if (!tg3_flag(tp, ENABLE_ASF))
 +              rx_mode |= RX_MODE_KEEP_VLAN_TAG;
 +#endif
 +
 +      if (dev->flags & IFF_PROMISC) {
 +              /* Promiscuous mode. */
 +              rx_mode |= RX_MODE_PROMISC;
 +      } else if (dev->flags & IFF_ALLMULTI) {
 +              /* Accept all multicast. */
 +              tg3_set_multi(tp, 1);
 +      } else if (netdev_mc_empty(dev)) {
 +              /* Reject all multicast. */
 +              tg3_set_multi(tp, 0);
 +      } else {
 +              /* Accept one or more multicast(s). */
 +              struct netdev_hw_addr *ha;
 +              u32 mc_filter[4] = { 0, };
 +              u32 regidx;
 +              u32 bit;
 +              u32 crc;
 +
 +              netdev_for_each_mc_addr(ha, dev) {
 +                      crc = calc_crc(ha->addr, ETH_ALEN);
 +                      bit = ~crc & 0x7f;
 +                      regidx = (bit & 0x60) >> 5;
 +                      bit &= 0x1f;
 +                      mc_filter[regidx] |= (1 << bit);
 +              }
 +
 +              tw32(MAC_HASH_REG_0, mc_filter[0]);
 +              tw32(MAC_HASH_REG_1, mc_filter[1]);
 +              tw32(MAC_HASH_REG_2, mc_filter[2]);
 +              tw32(MAC_HASH_REG_3, mc_filter[3]);
 +      }
 +
 +      if (rx_mode != tp->rx_mode) {
 +              tp->rx_mode = rx_mode;
 +              tw32_f(MAC_RX_MODE, rx_mode);
 +              udelay(10);
 +      }
 +}
 +
 +static void tg3_set_rx_mode(struct net_device *dev)
 +{
 +      struct tg3 *tp = netdev_priv(dev);
 +
 +      if (!netif_running(dev))
 +              return;
 +
 +      tg3_full_lock(tp, 0);
 +      __tg3_set_rx_mode(dev);
 +      tg3_full_unlock(tp);
 +}
 +
 +static int tg3_get_regs_len(struct net_device *dev)
 +{
 +      return TG3_REG_BLK_SIZE;
 +}
 +
 +static void tg3_get_regs(struct net_device *dev,
 +              struct ethtool_regs *regs, void *_p)
 +{
 +      struct tg3 *tp = netdev_priv(dev);
 +
 +      regs->version = 0;
 +
 +      memset(_p, 0, TG3_REG_BLK_SIZE);
 +
 +      if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
 +              return;
 +
 +      tg3_full_lock(tp, 0);
 +
 +      tg3_dump_legacy_regs(tp, (u32 *)_p);
 +
 +      tg3_full_unlock(tp);
 +}
 +
 +static int tg3_get_eeprom_len(struct net_device *dev)
 +{
 +      struct tg3 *tp = netdev_priv(dev);
 +
 +      return tp->nvram_size;
 +}
 +
 +static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
 +{
 +      struct tg3 *tp = netdev_priv(dev);
 +      int ret;
 +      u8  *pd;
 +      u32 i, offset, len, b_offset, b_count;
 +      __be32 val;
 +
 +      if (tg3_flag(tp, NO_NVRAM))
 +              return -EINVAL;
 +
 +      if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
 +              return -EAGAIN;
 +
 +      offset = eeprom->offset;
 +      len = eeprom->len;
 +      eeprom->len = 0;
 +
 +      eeprom->magic = TG3_EEPROM_MAGIC;
 +
 +      if (offset & 3) {
 +              /* adjustments to start on required 4 byte boundary */
 +              b_offset = offset & 3;
 +              b_count = 4 - b_offset;
 +              if (b_count > len) {
 +                      /* i.e. offset=1 len=2 */
 +                      b_count = len;
 +              }
 +              ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
 +              if (ret)
 +                      return ret;
 +              memcpy(data, ((char *)&val) + b_offset, b_count);
 +              len -= b_count;
 +              offset += b_count;
 +              eeprom->len += b_count;
 +      }
 +
 +      /* read bytes up to the last 4 byte boundary */
 +      pd = &data[eeprom->len];
 +      for (i = 0; i < (len - (len & 3)); i += 4) {
 +              ret = tg3_nvram_read_be32(tp, offset + i, &val);
 +              if (ret) {
 +                      eeprom->len += i;
 +                      return ret;
 +              }
 +              memcpy(pd + i, &val, 4);
 +      }
 +      eeprom->len += i;
 +
 +      if (len & 3) {
 +              /* read last bytes not ending on 4 byte boundary */
 +              pd = &data[eeprom->len];
 +              b_count = len & 3;
 +              b_offset = offset + len - b_count;
 +              ret = tg3_nvram_read_be32(tp, b_offset, &val);
 +              if (ret)
 +                      return ret;
 +              memcpy(pd, &val, b_count);
 +              eeprom->len += b_count;
 +      }
 +      return 0;
 +}
 +
 +static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf);
 +
 +static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
 +{
 +      struct tg3 *tp = netdev_priv(dev);
 +      int ret;
 +      u32 offset, len, b_offset, odd_len;
 +      u8 *buf;
 +      __be32 start, end;
 +
 +      if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
 +              return -EAGAIN;
 +
 +      if (tg3_flag(tp, NO_NVRAM) ||
 +          eeprom->magic != TG3_EEPROM_MAGIC)
 +              return -EINVAL;
 +
 +      offset = eeprom->offset;
 +      len = eeprom->len;
 +
 +      if ((b_offset = (offset & 3))) {
 +              /* adjustments to start on required 4 byte boundary */
 +              ret = tg3_nvram_read_be32(tp, offset-b_offset, &start);
 +              if (ret)
 +                      return ret;
 +              len += b_offset;
 +              offset &= ~3;
 +              if (len < 4)
 +                      len = 4;
 +      }
 +
 +      odd_len = 0;
 +      if (len & 3) {
 +              /* adjustments to end on required 4 byte boundary */
 +              odd_len = 1;
 +              len = (len + 3) & ~3;
 +              ret = tg3_nvram_read_be32(tp, offset+len-4, &end);
 +              if (ret)
 +                      return ret;
 +      }
 +
 +      buf = data;
 +      if (b_offset || odd_len) {
 +              buf = kmalloc(len, GFP_KERNEL);
 +              if (!buf)
 +                      return -ENOMEM;
 +              if (b_offset)
 +                      memcpy(buf, &start, 4);
 +              if (odd_len)
 +                      memcpy(buf+len-4, &end, 4);
 +              memcpy(buf + b_offset, data, eeprom->len);
 +      }
 +
 +      ret = tg3_nvram_write_block(tp, offset, len, buf);
 +
 +      if (buf != data)
 +              kfree(buf);
 +
 +      return ret;
 +}
 +
 +static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
 +{
 +      struct tg3 *tp = netdev_priv(dev);
 +
 +      if (tg3_flag(tp, USE_PHYLIB)) {
 +              struct phy_device *phydev;
 +              if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
 +                      return -EAGAIN;
 +              phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
 +              return phy_ethtool_gset(phydev, cmd);
 +      }
 +
 +      cmd->supported = (SUPPORTED_Autoneg);
 +
 +      if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
 +              cmd->supported |= (SUPPORTED_1000baseT_Half |
 +                                 SUPPORTED_1000baseT_Full);
 +
 +      if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
 +              cmd->supported |= (SUPPORTED_100baseT_Half |
 +                                SUPPORTED_100baseT_Full |
 +                                SUPPORTED_10baseT_Half |
 +                                SUPPORTED_10baseT_Full |
 +                                SUPPORTED_TP);
 +              cmd->port = PORT_TP;
 +      } else {
 +              cmd->supported |= SUPPORTED_FIBRE;
 +              cmd->port = PORT_FIBRE;
 +      }
 +
 +      cmd->advertising = tp->link_config.advertising;
 +      if (tg3_flag(tp, PAUSE_AUTONEG)) {
 +              if (tp->link_config.flowctrl & FLOW_CTRL_RX) {
 +                      if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
 +                              cmd->advertising |= ADVERTISED_Pause;
 +                      } else {
 +                              cmd->advertising |= ADVERTISED_Pause |
 +                                                  ADVERTISED_Asym_Pause;
 +                      }
 +              } else if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
 +                      cmd->advertising |= ADVERTISED_Asym_Pause;
 +              }
 +      }
 +      if (netif_running(dev)) {
 +              ethtool_cmd_speed_set(cmd, tp->link_config.active_speed);
 +              cmd->duplex = tp->link_config.active_duplex;
 +      } else {
 +              ethtool_cmd_speed_set(cmd, SPEED_INVALID);
 +              cmd->duplex = DUPLEX_INVALID;
 +      }
 +      cmd->phy_address = tp->phy_addr;
 +      cmd->transceiver = XCVR_INTERNAL;
 +      cmd->autoneg = tp->link_config.autoneg;
 +      cmd->maxtxpkt = 0;
 +      cmd->maxrxpkt = 0;
 +      return 0;
 +}
 +
 +static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
 +{
 +      struct tg3 *tp = netdev_priv(dev);
 +      u32 speed = ethtool_cmd_speed(cmd);
 +
 +      if (tg3_flag(tp, USE_PHYLIB)) {
 +              struct phy_device *phydev;
 +              if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
 +                      return -EAGAIN;
 +              phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
 +              return phy_ethtool_sset(phydev, cmd);
 +      }
 +
 +      if (cmd->autoneg != AUTONEG_ENABLE &&
 +          cmd->autoneg != AUTONEG_DISABLE)
 +              return -EINVAL;
 +
 +      if (cmd->autoneg == AUTONEG_DISABLE &&
 +          cmd->duplex != DUPLEX_FULL &&
 +          cmd->duplex != DUPLEX_HALF)
 +              return -EINVAL;
 +
 +      if (cmd->autoneg == AUTONEG_ENABLE) {
 +              u32 mask = ADVERTISED_Autoneg |
 +                         ADVERTISED_Pause |
 +                         ADVERTISED_Asym_Pause;
 +
 +              if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
 +                      mask |= ADVERTISED_1000baseT_Half |
 +                              ADVERTISED_1000baseT_Full;
 +
 +              if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
 +                      mask |= ADVERTISED_100baseT_Half |
 +                              ADVERTISED_100baseT_Full |
 +                              ADVERTISED_10baseT_Half |
 +                              ADVERTISED_10baseT_Full |
 +                              ADVERTISED_TP;
 +              else
 +                      mask |= ADVERTISED_FIBRE;
 +
 +              if (cmd->advertising & ~mask)
 +                      return -EINVAL;
 +
 +              mask &= (ADVERTISED_1000baseT_Half |
 +                       ADVERTISED_1000baseT_Full |
 +                       ADVERTISED_100baseT_Half |
 +                       ADVERTISED_100baseT_Full |
 +                       ADVERTISED_10baseT_Half |
 +                       ADVERTISED_10baseT_Full);
 +
 +              cmd->advertising &= mask;
 +      } else {
 +              if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) {
 +                      if (speed != SPEED_1000)
 +                              return -EINVAL;
 +
 +                      if (cmd->duplex != DUPLEX_FULL)
 +                              return -EINVAL;
 +              } else {
 +                      if (speed != SPEED_100 &&
 +                          speed != SPEED_10)
 +                              return -EINVAL;
 +              }
 +      }
 +
 +      tg3_full_lock(tp, 0);
 +
 +      tp->link_config.autoneg = cmd->autoneg;
 +      if (cmd->autoneg == AUTONEG_ENABLE) {
 +              tp->link_config.advertising = (cmd->advertising |
 +                                            ADVERTISED_Autoneg);
 +              tp->link_config.speed = SPEED_INVALID;
 +              tp->link_config.duplex = DUPLEX_INVALID;
 +      } else {
 +              tp->link_config.advertising = 0;
 +              tp->link_config.speed = speed;
 +              tp->link_config.duplex = cmd->duplex;
 +      }
 +
 +      tp->link_config.orig_speed = tp->link_config.speed;
 +      tp->link_config.orig_duplex = tp->link_config.duplex;
 +      tp->link_config.orig_autoneg = tp->link_config.autoneg;
 +
 +      if (netif_running(dev))
 +              tg3_setup_phy(tp, 1);
 +
 +      tg3_full_unlock(tp);
 +
 +      return 0;
 +}
 +
 +static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
 +{
 +      struct tg3 *tp = netdev_priv(dev);
 +
 +      strcpy(info->driver, DRV_MODULE_NAME);
 +      strcpy(info->version, DRV_MODULE_VERSION);
 +      strcpy(info->fw_version, tp->fw_ver);
 +      strcpy(info->bus_info, pci_name(tp->pdev));
 +}
 +
 +static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
 +{
 +      struct tg3 *tp = netdev_priv(dev);
 +
 +      if (tg3_flag(tp, WOL_CAP) && device_can_wakeup(&tp->pdev->dev))
 +              wol->supported = WAKE_MAGIC;
 +      else
 +              wol->supported = 0;
 +      wol->wolopts = 0;
 +      if (tg3_flag(tp, WOL_ENABLE) && device_can_wakeup(&tp->pdev->dev))
 +              wol->wolopts = WAKE_MAGIC;
 +      memset(&wol->sopass, 0, sizeof(wol->sopass));
 +}
 +
 +static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
 +{
 +      struct tg3 *tp = netdev_priv(dev);
 +      struct device *dp = &tp->pdev->dev;
 +
 +      if (wol->wolopts & ~WAKE_MAGIC)
 +              return -EINVAL;
 +      if ((wol->wolopts & WAKE_MAGIC) &&
 +          !(tg3_flag(tp, WOL_CAP) && device_can_wakeup(dp)))
 +              return -EINVAL;
 +
 +      device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC);
 +
 +      spin_lock_bh(&tp->lock);
 +      if (device_may_wakeup(dp))
 +              tg3_flag_set(tp, WOL_ENABLE);
 +      else
 +              tg3_flag_clear(tp, WOL_ENABLE);
 +      spin_unlock_bh(&tp->lock);
 +
 +      return 0;
 +}
 +
 +static u32 tg3_get_msglevel(struct net_device *dev)
 +{
 +      struct tg3 *tp = netdev_priv(dev);
 +      return tp->msg_enable;
 +}
 +
 +static void tg3_set_msglevel(struct net_device *dev, u32 value)
 +{
 +      struct tg3 *tp = netdev_priv(dev);
 +      tp->msg_enable = value;
 +}
 +
 +static int tg3_nway_reset(struct net_device *dev)
 +{
 +      struct tg3 *tp = netdev_priv(dev);
 +      int r;
 +
 +      if (!netif_running(dev))
 +              return -EAGAIN;
 +
 +      if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
 +              return -EINVAL;
 +
 +      if (tg3_flag(tp, USE_PHYLIB)) {
 +              if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
 +                      return -EAGAIN;
 +              r = phy_start_aneg(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
 +      } else {
 +              u32 bmcr;
 +
 +              spin_lock_bh(&tp->lock);
 +              r = -EINVAL;
 +              tg3_readphy(tp, MII_BMCR, &bmcr);
 +              if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
 +                  ((bmcr & BMCR_ANENABLE) ||
 +                   (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) {
 +                      tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
 +                                                 BMCR_ANENABLE);
 +                      r = 0;
 +              }
 +              spin_unlock_bh(&tp->lock);
 +      }
 +
 +      return r;
 +}
 +
 +static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
 +{
 +      struct tg3 *tp = netdev_priv(dev);
 +
 +      ering->rx_max_pending = tp->rx_std_ring_mask;
 +      ering->rx_mini_max_pending = 0;
 +      if (tg3_flag(tp, JUMBO_RING_ENABLE))
 +              ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask;
 +      else
 +              ering->rx_jumbo_max_pending = 0;
 +
 +      ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
 +
 +      ering->rx_pending = tp->rx_pending;
 +      ering->rx_mini_pending = 0;
 +      if (tg3_flag(tp, JUMBO_RING_ENABLE))
 +              ering->rx_jumbo_pending = tp->rx_jumbo_pending;
 +      else
 +              ering->rx_jumbo_pending = 0;
 +
 +      ering->tx_pending = tp->napi[0].tx_pending;
 +}
 +
 +static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
 +{
 +      struct tg3 *tp = netdev_priv(dev);
 +      int i, irq_sync = 0, err = 0;
 +
 +      if ((ering->rx_pending > tp->rx_std_ring_mask) ||
 +          (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) ||
 +          (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
 +          (ering->tx_pending <= MAX_SKB_FRAGS) ||
 +          (tg3_flag(tp, TSO_BUG) &&
 +           (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
 +              return -EINVAL;
 +
 +      if (netif_running(dev)) {
 +              tg3_phy_stop(tp);
 +              tg3_netif_stop(tp);
 +              irq_sync = 1;
 +      }
 +
 +      tg3_full_lock(tp, irq_sync);
 +
 +      tp->rx_pending = ering->rx_pending;
 +
 +      if (tg3_flag(tp, MAX_RXPEND_64) &&
 +          tp->rx_pending > 63)
 +              tp->rx_pending = 63;
 +      tp->rx_jumbo_pending = ering->rx_jumbo_pending;
 +
 +      for (i = 0; i < tp->irq_max; i++)
 +              tp->napi[i].tx_pending = ering->tx_pending;
 +
 +      if (netif_running(dev)) {
 +              tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
 +              err = tg3_restart_hw(tp, 1);
 +              if (!err)
 +                      tg3_netif_start(tp);
 +      }
 +
 +      tg3_full_unlock(tp);
 +
 +      if (irq_sync && !err)
 +              tg3_phy_start(tp);
 +
 +      return err;
 +}
 +
 +static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
 +{
 +      struct tg3 *tp = netdev_priv(dev);
 +
 +      epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG);
 +
 +      if (tp->link_config.active_flowctrl & FLOW_CTRL_RX)
 +              epause->rx_pause = 1;
 +      else
 +              epause->rx_pause = 0;
 +
 +      if (tp->link_config.active_flowctrl & FLOW_CTRL_TX)
 +              epause->tx_pause = 1;
 +      else
 +              epause->tx_pause = 0;
 +}
 +
 +static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
 +{
 +      struct tg3 *tp = netdev_priv(dev);
 +      int err = 0;
 +
 +      if (tg3_flag(tp, USE_PHYLIB)) {
 +              u32 newadv;
 +              struct phy_device *phydev;
 +
 +              phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
 +
 +              if (!(phydev->supported & SUPPORTED_Pause) ||
 +                  (!(phydev->supported & SUPPORTED_Asym_Pause) &&
 +                   (epause->rx_pause != epause->tx_pause)))
 +                      return -EINVAL;
 +
 +              tp->link_config.flowctrl = 0;
 +              if (epause->rx_pause) {
 +                      tp->link_config.flowctrl |= FLOW_CTRL_RX;
 +
 +                      if (epause->tx_pause) {
 +                              tp->link_config.flowctrl |= FLOW_CTRL_TX;
 +                              newadv = ADVERTISED_Pause;
 +                      } else
 +                              newadv = ADVERTISED_Pause |
 +                                       ADVERTISED_Asym_Pause;
 +              } else if (epause->tx_pause) {
 +                      tp->link_config.flowctrl |= FLOW_CTRL_TX;
 +                      newadv = ADVERTISED_Asym_Pause;
 +              } else
 +                      newadv = 0;
 +
 +              if (epause->autoneg)
 +                      tg3_flag_set(tp, PAUSE_AUTONEG);
 +              else
 +                      tg3_flag_clear(tp, PAUSE_AUTONEG);
 +
 +              if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
 +                      u32 oldadv = phydev->advertising &
 +                                   (ADVERTISED_Pause | ADVERTISED_Asym_Pause);
 +                      if (oldadv != newadv) {
 +                              phydev->advertising &=
 +                                      ~(ADVERTISED_Pause |
 +                                        ADVERTISED_Asym_Pause);
 +                              phydev->advertising |= newadv;
 +                              if (phydev->autoneg) {
 +                                      /*
 +                                       * Always renegotiate the link to
 +                                       * inform our link partner of our
 +                                       * flow control settings, even if the
 +                                       * flow control is forced.  Let
 +                                       * tg3_adjust_link() do the final
 +                                       * flow control setup.
 +                                       */
 +                                      return phy_start_aneg(phydev);
 +                              }
 +                      }
 +
 +                      if (!epause->autoneg)
 +                              tg3_setup_flow_control(tp, 0, 0);
 +              } else {
 +                      tp->link_config.orig_advertising &=
 +                                      ~(ADVERTISED_Pause |
 +                                        ADVERTISED_Asym_Pause);
 +                      tp->link_config.orig_advertising |= newadv;
 +              }
 +      } else {
 +              int irq_sync = 0;
 +
 +              if (netif_running(dev)) {
 +                      tg3_netif_stop(tp);
 +                      irq_sync = 1;
 +              }
 +
 +              tg3_full_lock(tp, irq_sync);
 +
 +              if (epause->autoneg)
 +                      tg3_flag_set(tp, PAUSE_AUTONEG);
 +              else
 +                      tg3_flag_clear(tp, PAUSE_AUTONEG);
 +              if (epause->rx_pause)
 +                      tp->link_config.flowctrl |= FLOW_CTRL_RX;
 +              else
 +                      tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
 +              if (epause->tx_pause)
 +                      tp->link_config.flowctrl |= FLOW_CTRL_TX;
 +              else
 +                      tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
 +
 +              if (netif_running(dev)) {
 +                      tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
 +                      err = tg3_restart_hw(tp, 1);
 +                      if (!err)
 +                              tg3_netif_start(tp);
 +              }
 +
 +              tg3_full_unlock(tp);
 +      }
 +
 +      return err;
 +}
 +
 +static int tg3_get_sset_count(struct net_device *dev, int sset)
 +{
 +      switch (sset) {
 +      case ETH_SS_TEST:
 +              return TG3_NUM_TEST;
 +      case ETH_SS_STATS:
 +              return TG3_NUM_STATS;
 +      default:
 +              return -EOPNOTSUPP;
 +      }
 +}
 +
 +static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
 +{
 +      switch (stringset) {
 +      case ETH_SS_STATS:
 +              memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
 +              break;
 +      case ETH_SS_TEST:
 +              memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
 +              break;
 +      default:
 +              WARN_ON(1);     /* we need a WARN() */
 +              break;
 +      }
 +}
 +
 +static int tg3_set_phys_id(struct net_device *dev,
 +                          enum ethtool_phys_id_state state)
 +{
 +      struct tg3 *tp = netdev_priv(dev);
 +
 +      if (!netif_running(tp->dev))
 +              return -EAGAIN;
 +
 +      switch (state) {
 +      case ETHTOOL_ID_ACTIVE:
 +              return 1;       /* cycle on/off once per second */
 +
 +      case ETHTOOL_ID_ON:
 +              tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
 +                   LED_CTRL_1000MBPS_ON |
 +                   LED_CTRL_100MBPS_ON |
 +                   LED_CTRL_10MBPS_ON |
 +                   LED_CTRL_TRAFFIC_OVERRIDE |
 +                   LED_CTRL_TRAFFIC_BLINK |
 +                   LED_CTRL_TRAFFIC_LED);
 +              break;
 +
 +      case ETHTOOL_ID_OFF:
 +              tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
 +                   LED_CTRL_TRAFFIC_OVERRIDE);
 +              break;
 +
 +      case ETHTOOL_ID_INACTIVE:
 +              tw32(MAC_LED_CTRL, tp->led_ctrl);
 +              break;
 +      }
 +
 +      return 0;
 +}
 +
 +static void tg3_get_ethtool_stats(struct net_device *dev,
 +                                 struct ethtool_stats *estats, u64 *tmp_stats)
 +{
 +      struct tg3 *tp = netdev_priv(dev);
 +      memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats));
 +}
 +
 +static __be32 *tg3_vpd_readblock(struct tg3 *tp, u32 *vpdlen)
 +{
 +      int i;
 +      __be32 *buf;
 +      u32 offset = 0, len = 0;
 +      u32 magic, val;
 +
 +      if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &magic))
 +              return NULL;
 +
 +      if (magic == TG3_EEPROM_MAGIC) {
 +              for (offset = TG3_NVM_DIR_START;
 +                   offset < TG3_NVM_DIR_END;
 +                   offset += TG3_NVM_DIRENT_SIZE) {
 +                      if (tg3_nvram_read(tp, offset, &val))
 +                              return NULL;
 +
 +                      if ((val >> TG3_NVM_DIRTYPE_SHIFT) ==
 +                          TG3_NVM_DIRTYPE_EXTVPD)
 +                              break;
 +              }
 +
 +              if (offset != TG3_NVM_DIR_END) {
 +                      len = (val & TG3_NVM_DIRTYPE_LENMSK) * 4;
 +                      if (tg3_nvram_read(tp, offset + 4, &offset))
 +                              return NULL;
 +
 +                      offset = tg3_nvram_logical_addr(tp, offset);
 +              }
 +      }
 +
 +      if (!offset || !len) {
 +              offset = TG3_NVM_VPD_OFF;
 +              len = TG3_NVM_VPD_LEN;
 +      }
 +
 +      buf = kmalloc(len, GFP_KERNEL);
 +      if (buf == NULL)
 +              return NULL;
 +
 +      if (magic == TG3_EEPROM_MAGIC) {
 +              for (i = 0; i < len; i += 4) {
 +                      /* The data is in little-endian format in NVRAM.
 +                       * Use the big-endian read routines to preserve
 +                       * the byte order as it exists in NVRAM.
 +                       */
 +                      if (tg3_nvram_read_be32(tp, offset + i, &buf[i/4]))
 +                              goto error;
 +              }
 +      } else {
 +              u8 *ptr;
 +              ssize_t cnt;
 +              unsigned int pos = 0;
 +
 +              ptr = (u8 *)&buf[0];
 +              for (i = 0; pos < len && i < 3; i++, pos += cnt, ptr += cnt) {
 +                      cnt = pci_read_vpd(tp->pdev, pos,
 +                                         len - pos, ptr);
 +                      if (cnt == -ETIMEDOUT || cnt == -EINTR)
 +                              cnt = 0;
 +                      else if (cnt < 0)
 +                              goto error;
 +              }
 +              if (pos != len)
 +                      goto error;
 +      }
 +
 +      *vpdlen = len;
 +
 +      return buf;
 +
 +error:
 +      kfree(buf);
 +      return NULL;
 +}
 +
 +#define NVRAM_TEST_SIZE 0x100
 +#define NVRAM_SELFBOOT_FORMAT1_0_SIZE 0x14
 +#define NVRAM_SELFBOOT_FORMAT1_2_SIZE 0x18
 +#define NVRAM_SELFBOOT_FORMAT1_3_SIZE 0x1c
 +#define NVRAM_SELFBOOT_FORMAT1_4_SIZE 0x20
 +#define NVRAM_SELFBOOT_FORMAT1_5_SIZE 0x24
 +#define NVRAM_SELFBOOT_FORMAT1_6_SIZE 0x50
 +#define NVRAM_SELFBOOT_HW_SIZE 0x20
 +#define NVRAM_SELFBOOT_DATA_SIZE 0x1c
 +
 +static int tg3_test_nvram(struct tg3 *tp)
 +{
 +      u32 csum, magic, len;
 +      __be32 *buf;
 +      int i, j, k, err = 0, size;
 +
 +      if (tg3_flag(tp, NO_NVRAM))
 +              return 0;
 +
 +      if (tg3_nvram_read(tp, 0, &magic) != 0)
 +              return -EIO;
 +
 +      if (magic == TG3_EEPROM_MAGIC)
 +              size = NVRAM_TEST_SIZE;
 +      else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
 +              if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
 +                  TG3_EEPROM_SB_FORMAT_1) {
 +                      switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
 +                      case TG3_EEPROM_SB_REVISION_0:
 +                              size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
 +                              break;
 +                      case TG3_EEPROM_SB_REVISION_2:
 +                              size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
 +                              break;
 +                      case TG3_EEPROM_SB_REVISION_3:
 +                              size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
 +                              break;
 +                      case TG3_EEPROM_SB_REVISION_4:
 +                              size = NVRAM_SELFBOOT_FORMAT1_4_SIZE;
 +                              break;
 +                      case TG3_EEPROM_SB_REVISION_5:
 +                              size = NVRAM_SELFBOOT_FORMAT1_5_SIZE;
 +                              break;
 +                      case TG3_EEPROM_SB_REVISION_6:
 +                              size = NVRAM_SELFBOOT_FORMAT1_6_SIZE;
 +                              break;
 +                      default:
 +                              return -EIO;
 +                      }
 +              } else
 +                      return 0;
 +      } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
 +              size = NVRAM_SELFBOOT_HW_SIZE;
 +      else
 +              return -EIO;
 +
 +      buf = kmalloc(size, GFP_KERNEL);
 +      if (buf == NULL)
 +              return -ENOMEM;
 +
 +      err = -EIO;
 +      for (i = 0, j = 0; i < size; i += 4, j++) {
 +              err = tg3_nvram_read_be32(tp, i, &buf[j]);
 +              if (err)
 +                      break;
 +      }
 +      if (i < size)
 +              goto out;
 +
 +      /* Selfboot format */
 +      magic = be32_to_cpu(buf[0]);
 +      if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
 +          TG3_EEPROM_MAGIC_FW) {
 +              u8 *buf8 = (u8 *) buf, csum8 = 0;
 +
 +              if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
 +                  TG3_EEPROM_SB_REVISION_2) {
 +                      /* For rev 2, the csum doesn't include the MBA. */
 +                      for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
 +                              csum8 += buf8[i];
 +                      for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
 +                              csum8 += buf8[i];
 +              } else {
 +                      for (i = 0; i < size; i++)
 +                              csum8 += buf8[i];
 +              }
 +
 +              if (csum8 == 0) {
 +                      err = 0;
 +                      goto out;
 +              }
 +
 +              err = -EIO;
 +              goto out;
 +      }
 +
 +      if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
 +          TG3_EEPROM_MAGIC_HW) {
 +              u8 data[NVRAM_SELFBOOT_DATA_SIZE];
 +              u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
 +              u8 *buf8 = (u8 *) buf;
 +
 +              /* Separate the parity bits and the data bytes.  */
 +              for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
 +                      if ((i == 0) || (i == 8)) {
 +                              int l;
 +                              u8 msk;
 +
 +                              for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
 +                                      parity[k++] = buf8[i] & msk;
 +                              i++;
 +                      } else if (i == 16) {
 +                              int l;
 +                              u8 msk;
 +
 +                              for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
 +                                      parity[k++] = buf8[i] & msk;
 +                              i++;
 +
 +                              for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
 +                                      parity[k++] = buf8[i] & msk;
 +                              i++;
 +                      }
 +                      data[j++] = buf8[i];
 +              }
 +
 +              err = -EIO;
 +              for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
 +                      u8 hw8 = hweight8(data[i]);
 +
 +                      if ((hw8 & 0x1) && parity[i])
 +                              goto out;
 +                      else if (!(hw8 & 0x1) && !parity[i])
 +                              goto out;
 +              }
 +              err = 0;
 +              goto out;
 +      }
 +
 +      err = -EIO;
 +
 +      /* Bootstrap checksum at offset 0x10 */
 +      csum = calc_crc((unsigned char *) buf, 0x10);
 +      if (csum != le32_to_cpu(buf[0x10/4]))
 +              goto out;
 +
 +      /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
 +      csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
 +      if (csum != le32_to_cpu(buf[0xfc/4]))
 +              goto out;
 +
 +      kfree(buf);
 +
 +      buf = tg3_vpd_readblock(tp, &len);
 +      if (!buf)
 +              return -ENOMEM;
 +
 +      i = pci_vpd_find_tag((u8 *)buf, 0, len, PCI_VPD_LRDT_RO_DATA);
 +      if (i > 0) {
 +              j = pci_vpd_lrdt_size(&((u8 *)buf)[i]);
 +              if (j < 0)
 +                      goto out;
 +
 +              if (i + PCI_VPD_LRDT_TAG_SIZE + j > len)
 +                      goto out;
 +
 +              i += PCI_VPD_LRDT_TAG_SIZE;
 +              j = pci_vpd_find_info_keyword((u8 *)buf, i, j,
 +                                            PCI_VPD_RO_KEYWORD_CHKSUM);
 +              if (j > 0) {
 +                      u8 csum8 = 0;
 +
 +                      j += PCI_VPD_INFO_FLD_HDR_SIZE;
 +
 +                      for (i = 0; i <= j; i++)
 +                              csum8 += ((u8 *)buf)[i];
 +
 +                      if (csum8)
 +                              goto out;
 +              }
 +      }
 +
 +      err = 0;
 +
 +out:
 +      kfree(buf);
 +      return err;
 +}
 +
 +#define TG3_SERDES_TIMEOUT_SEC        2
 +#define TG3_COPPER_TIMEOUT_SEC        6
 +
 +static int tg3_test_link(struct tg3 *tp)
 +{
 +      int i, max;
 +
 +      if (!netif_running(tp->dev))
 +              return -ENODEV;
 +
 +      if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
 +              max = TG3_SERDES_TIMEOUT_SEC;
 +      else
 +              max = TG3_COPPER_TIMEOUT_SEC;
 +
 +      for (i = 0; i < max; i++) {
 +              if (netif_carrier_ok(tp->dev))
 +                      return 0;
 +
 +              if (msleep_interruptible(1000))
 +                      break;
 +      }
 +
 +      return -EIO;
 +}
 +
 +/* Only test the commonly used registers */
 +static int tg3_test_registers(struct tg3 *tp)
 +{
 +      int i, is_5705, is_5750;
 +      u32 offset, read_mask, write_mask, val, save_val, read_val;
 +      static struct {
 +              u16 offset;
 +              u16 flags;
 +#define TG3_FL_5705   0x1
 +#define TG3_FL_NOT_5705       0x2
 +#define TG3_FL_NOT_5788       0x4
 +#define TG3_FL_NOT_5750       0x8
 +              u32 read_mask;
 +              u32 write_mask;
 +      } reg_tbl[] = {
 +              /* MAC Control Registers */
 +              { MAC_MODE, TG3_FL_NOT_5705,
 +                      0x00000000, 0x00ef6f8c },
 +              { MAC_MODE, TG3_FL_5705,
 +                      0x00000000, 0x01ef6b8c },
 +              { MAC_STATUS, TG3_FL_NOT_5705,
 +                      0x03800107, 0x00000000 },
 +              { MAC_STATUS, TG3_FL_5705,
 +                      0x03800100, 0x00000000 },
 +              { MAC_ADDR_0_HIGH, 0x0000,
 +                      0x00000000, 0x0000ffff },
 +              { MAC_ADDR_0_LOW, 0x0000,
 +                      0x00000000, 0xffffffff },
 +              { MAC_RX_MTU_SIZE, 0x0000,
 +                      0x00000000, 0x0000ffff },
 +              { MAC_TX_MODE, 0x0000,
 +                      0x00000000, 0x00000070 },
 +              { MAC_TX_LENGTHS, 0x0000,
 +                      0x00000000, 0x00003fff },
 +              { MAC_RX_MODE, TG3_FL_NOT_5705,
 +                      0x00000000, 0x000007fc },
 +              { MAC_RX_MODE, TG3_FL_5705,
 +                      0x00000000, 0x000007dc },
 +              { MAC_HASH_REG_0, 0x0000,
 +                      0x00000000, 0xffffffff },
 +              { MAC_HASH_REG_1, 0x0000,
 +                      0x00000000, 0xffffffff },
 +              { MAC_HASH_REG_2, 0x0000,
 +                      0x00000000, 0xffffffff },
 +              { MAC_HASH_REG_3, 0x0000,
 +                      0x00000000, 0xffffffff },
 +
 +              /* Receive Data and Receive BD Initiator Control Registers. */
 +              { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
 +                      0x00000000, 0xffffffff },
 +              { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
 +                      0x00000000, 0xffffffff },
 +              { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
 +                      0x00000000, 0x00000003 },
 +              { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
 +                      0x00000000, 0xffffffff },
 +              { RCVDBDI_STD_BD+0, 0x0000,
 +                      0x00000000, 0xffffffff },
 +              { RCVDBDI_STD_BD+4, 0x0000,
 +                      0x00000000, 0xffffffff },
 +              { RCVDBDI_STD_BD+8, 0x0000,
 +                      0x00000000, 0xffff0002 },
 +              { RCVDBDI_STD_BD+0xc, 0x0000,
 +                      0x00000000, 0xffffffff },
 +
 +              /* Receive BD Initiator Control Registers. */
 +              { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
 +                      0x00000000, 0xffffffff },
 +              { RCVBDI_STD_THRESH, TG3_FL_5705,
 +                      0x00000000, 0x000003ff },
 +              { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
 +                      0x00000000, 0xffffffff },
 +
 +              /* Host Coalescing Control Registers. */
 +              { HOSTCC_MODE, TG3_FL_NOT_5705,
 +                      0x00000000, 0x00000004 },
 +              { HOSTCC_MODE, TG3_FL_5705,
 +                      0x00000000, 0x000000f6 },
 +              { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
 +                      0x00000000, 0xffffffff },
 +              { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
 +                      0x00000000, 0x000003ff },
 +              { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
 +                      0x00000000, 0xffffffff },
 +              { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
 +                      0x00000000, 0x000003ff },
 +              { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
 +                      0x00000000, 0xffffffff },
 +              { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
 +                      0x00000000, 0x000000ff },
 +              { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
 +                      0x00000000, 0xffffffff },
 +              { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
 +                      0x00000000, 0x000000ff },
 +              { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
 +                      0x00000000, 0xffffffff },
 +              { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
 +                      0x00000000, 0xffffffff },
 +              { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
 +                      0x00000000, 0xffffffff },
 +              { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
 +                      0x00000000, 0x000000ff },
 +              { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
 +                      0x00000000, 0xffffffff },
 +              { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
 +                      0x00000000, 0x000000ff },
 +              { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
 +                      0x00000000, 0xffffffff },
 +              { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
 +                      0x00000000, 0xffffffff },
 +              { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
 +                      0x00000000, 0xffffffff },
 +              { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
 +                      0x00000000, 0xffffffff },
 +              { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
 +                      0x00000000, 0xffffffff },
 +              { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
 +                      0xffffffff, 0x00000000 },
 +              { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
 +                      0xffffffff, 0x00000000 },
 +
 +              /* Buffer Manager Control Registers. */
 +              { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
 +                      0x00000000, 0x007fff80 },
 +              { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
 +                      0x00000000, 0x007fffff },
 +              { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
 +                      0x00000000, 0x0000003f },
 +              { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
 +                      0x00000000, 0x000001ff },
 +              { BUFMGR_MB_HIGH_WATER, 0x0000,
 +                      0x00000000, 0x000001ff },
 +              { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
 +                      0xffffffff, 0x00000000 },
 +              { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
 +                      0xffffffff, 0x00000000 },
 +
 +              /* Mailbox Registers */
 +              { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
 +                      0x00000000, 0x000001ff },
 +              { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
 +                      0x00000000, 0x000001ff },
 +              { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
 +                      0x00000000, 0x000007ff },
 +              { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
 +                      0x00000000, 0x000001ff },
 +
 +              { 0xffff, 0x0000, 0x00000000, 0x00000000 },
 +      };
 +
 +      is_5705 = is_5750 = 0;
 +      if (tg3_flag(tp, 5705_PLUS)) {
 +              is_5705 = 1;
 +              if (tg3_flag(tp, 5750_PLUS))
 +                      is_5750 = 1;
 +      }
 +
 +      for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
 +              if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
 +                      continue;
 +
 +              if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
 +                      continue;
 +
 +              if (tg3_flag(tp, IS_5788) &&
 +                  (reg_tbl[i].flags & TG3_FL_NOT_5788))
 +                      continue;
 +
 +              if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
 +                      continue;
 +
 +              offset = (u32) reg_tbl[i].offset;
 +              read_mask = reg_tbl[i].read_mask;
 +              write_mask = reg_tbl[i].write_mask;
 +
 +              /* Save the original register content */
 +              save_val = tr32(offset);
 +
 +              /* Determine the read-only value. */
 +              read_val = save_val & read_mask;
 +
 +              /* Write zero to the register, then make sure the read-only bits
 +               * are not changed and the read/write bits are all zeros.
 +               */
 +              tw32(offset, 0);
 +
 +              val = tr32(offset);
 +
 +              /* Test the read-only and read/write bits. */
 +              if (((val & read_mask) != read_val) || (val & write_mask))
 +                      goto out;
 +
 +              /* Write ones to all the bits defined by RdMask and WrMask, then
 +               * make sure the read-only bits are not changed and the
 +               * read/write bits are all ones.
 +               */
 +              tw32(offset, read_mask | write_mask);
 +
 +              val = tr32(offset);
 +
 +              /* Test the read-only bits. */
 +              if ((val & read_mask) != read_val)
 +                      goto out;
 +
 +              /* Test the read/write bits. */
 +              if ((val & write_mask) != write_mask)
 +                      goto out;
 +
 +              tw32(offset, save_val);
 +      }
 +
 +      return 0;
 +
 +out:
 +      if (netif_msg_hw(tp))
 +              netdev_err(tp->dev,
 +                         "Register test failed at offset %x\n", offset);
 +      tw32(offset, save_val);
 +      return -EIO;
 +}
 +
 +static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
 +{
 +      static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
 +      int i;
 +      u32 j;
 +
 +      for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
 +              for (j = 0; j < len; j += 4) {
 +                      u32 val;
 +
 +                      tg3_write_mem(tp, offset + j, test_pattern[i]);
 +                      tg3_read_mem(tp, offset + j, &val);
 +                      if (val != test_pattern[i])
 +                              return -EIO;
 +              }
 +      }
 +      return 0;
 +}
 +
 +static int tg3_test_memory(struct tg3 *tp)
 +{
 +      static struct mem_entry {
 +              u32 offset;
 +              u32 len;
 +      } mem_tbl_570x[] = {
 +              { 0x00000000, 0x00b50},
 +              { 0x00002000, 0x1c000},
 +              { 0xffffffff, 0x00000}
 +      }, mem_tbl_5705[] = {
 +              { 0x00000100, 0x0000c},
 +              { 0x00000200, 0x00008},
 +              { 0x00004000, 0x00800},
 +              { 0x00006000, 0x01000},
 +              { 0x00008000, 0x02000},
 +              { 0x00010000, 0x0e000},
 +              { 0xffffffff, 0x00000}
 +      }, mem_tbl_5755[] = {
 +              { 0x00000200, 0x00008},
 +              { 0x00004000, 0x00800},
 +              { 0x00006000, 0x00800},
 +              { 0x00008000, 0x02000},
 +              { 0x00010000, 0x0c000},
 +              { 0xffffffff, 0x00000}
 +      }, mem_tbl_5906[] = {
 +              { 0x00000200, 0x00008},
 +              { 0x00004000, 0x00400},
 +              { 0x00006000, 0x00400},
 +              { 0x00008000, 0x01000},
 +              { 0x00010000, 0x01000},
 +              { 0xffffffff, 0x00000}
 +      }, mem_tbl_5717[] = {
 +              { 0x00000200, 0x00008},
 +              { 0x00010000, 0x0a000},
 +              { 0x00020000, 0x13c00},
 +              { 0xffffffff, 0x00000}
 +      }, mem_tbl_57765[] = {
 +              { 0x00000200, 0x00008},
 +              { 0x00004000, 0x00800},
 +              { 0x00006000, 0x09800},
 +              { 0x00010000, 0x0a000},
 +              { 0xffffffff, 0x00000}
 +      };
 +      struct mem_entry *mem_tbl;
 +      int err = 0;
 +      int i;
 +
 +      if (tg3_flag(tp, 5717_PLUS))
 +              mem_tbl = mem_tbl_5717;
 +      else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
 +              mem_tbl = mem_tbl_57765;
 +      else if (tg3_flag(tp, 5755_PLUS))
 +              mem_tbl = mem_tbl_5755;
 +      else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
 +              mem_tbl = mem_tbl_5906;
 +      else if (tg3_flag(tp, 5705_PLUS))
 +              mem_tbl = mem_tbl_5705;
 +      else
 +              mem_tbl = mem_tbl_570x;
 +
 +      for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
 +              err = tg3_do_mem_test(tp, mem_tbl[i].offset, mem_tbl[i].len);
 +              if (err)
 +                      break;
 +      }
 +
 +      return err;
 +}
 +
 +#define TG3_TSO_MSS           500
 +
 +#define TG3_TSO_IP_HDR_LEN    20
 +#define TG3_TSO_TCP_HDR_LEN   20
 +#define TG3_TSO_TCP_OPT_LEN   12
 +
 +static const u8 tg3_tso_header[] = {
 +0x08, 0x00,
 +0x45, 0x00, 0x00, 0x00,
 +0x00, 0x00, 0x40, 0x00,
 +0x40, 0x06, 0x00, 0x00,
 +0x0a, 0x00, 0x00, 0x01,
 +0x0a, 0x00, 0x00, 0x02,
 +0x0d, 0x00, 0xe0, 0x00,
 +0x00, 0x00, 0x01, 0x00,
 +0x00, 0x00, 0x02, 0x00,
 +0x80, 0x10, 0x10, 0x00,
 +0x14, 0x09, 0x00, 0x00,
 +0x01, 0x01, 0x08, 0x0a,
 +0x11, 0x11, 0x11, 0x11,
 +0x11, 0x11, 0x11, 0x11,
 +};
 +
 +static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback)
 +{
 +      u32 rx_start_idx, rx_idx, tx_idx, opaque_key;
 +      u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val;
 +      u32 budget;
 +      struct sk_buff *skb, *rx_skb;
 +      u8 *tx_data;
 +      dma_addr_t map;
 +      int num_pkts, tx_len, rx_len, i, err;
 +      struct tg3_rx_buffer_desc *desc;
 +      struct tg3_napi *tnapi, *rnapi;
 +      struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
 +
 +      tnapi = &tp->napi[0];
 +      rnapi = &tp->napi[0];
 +      if (tp->irq_cnt > 1) {
 +              if (tg3_flag(tp, ENABLE_RSS))
 +                      rnapi = &tp->napi[1];
 +              if (tg3_flag(tp, ENABLE_TSS))
 +                      tnapi = &tp->napi[1];
 +      }
 +      coal_now = tnapi->coal_now | rnapi->coal_now;
 +
 +      err = -EIO;
 +
 +      tx_len = pktsz;
 +      skb = netdev_alloc_skb(tp->dev, tx_len);
 +      if (!skb)
 +              return -ENOMEM;
 +
 +      tx_data = skb_put(skb, tx_len);
 +      memcpy(tx_data, tp->dev->dev_addr, 6);
 +      memset(tx_data + 6, 0x0, 8);
 +
 +      tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN);
 +
 +      if (tso_loopback) {
 +              struct iphdr *iph = (struct iphdr *)&tx_data[ETH_HLEN];
 +
 +              u32 hdr_len = TG3_TSO_IP_HDR_LEN + TG3_TSO_TCP_HDR_LEN +
 +                            TG3_TSO_TCP_OPT_LEN;
 +
 +              memcpy(tx_data + ETH_ALEN * 2, tg3_tso_header,
 +                     sizeof(tg3_tso_header));
 +              mss = TG3_TSO_MSS;
 +
 +              val = tx_len - ETH_ALEN * 2 - sizeof(tg3_tso_header);
 +              num_pkts = DIV_ROUND_UP(val, TG3_TSO_MSS);
 +
 +              /* Set the total length field in the IP header */
 +              iph->tot_len = htons((u16)(mss + hdr_len));
 +
 +              base_flags = (TXD_FLAG_CPU_PRE_DMA |
 +                            TXD_FLAG_CPU_POST_DMA);
 +
 +              if (tg3_flag(tp, HW_TSO_1) ||
 +                  tg3_flag(tp, HW_TSO_2) ||
 +                  tg3_flag(tp, HW_TSO_3)) {
 +                      struct tcphdr *th;
 +                      val = ETH_HLEN + TG3_TSO_IP_HDR_LEN;
 +                      th = (struct tcphdr *)&tx_data[val];
 +                      th->check = 0;
 +              } else
 +                      base_flags |= TXD_FLAG_TCPUDP_CSUM;
 +
 +              if (tg3_flag(tp, HW_TSO_3)) {
 +                      mss |= (hdr_len & 0xc) << 12;
 +                      if (hdr_len & 0x10)
 +                              base_flags |= 0x00000010;
 +                      base_flags |= (hdr_len & 0x3e0) << 5;
 +              } else if (tg3_flag(tp, HW_TSO_2))
 +                      mss |= hdr_len << 9;
 +              else if (tg3_flag(tp, HW_TSO_1) ||
 +                       GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
 +                      mss |= (TG3_TSO_TCP_OPT_LEN << 9);
 +              } else {
 +                      base_flags |= (TG3_TSO_TCP_OPT_LEN << 10);
 +              }
 +
 +              data_off = ETH_ALEN * 2 + sizeof(tg3_tso_header);
 +      } else {
 +              num_pkts = 1;
 +              data_off = ETH_HLEN;
 +      }
 +
 +      for (i = data_off; i < tx_len; i++)
 +              tx_data[i] = (u8) (i & 0xff);
 +
 +      map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
 +      if (pci_dma_mapping_error(tp->pdev, map)) {
 +              dev_kfree_skb(skb);
 +              return -EIO;
 +      }
 +
 +      val = tnapi->tx_prod;
 +      tnapi->tx_buffers[val].skb = skb;
 +      dma_unmap_addr_set(&tnapi->tx_buffers[val], mapping, map);
 +
 +      tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
 +             rnapi->coal_now);
 +
 +      udelay(10);
 +
 +      rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
 +
 +      budget = tg3_tx_avail(tnapi);
 +      if (tg3_tx_frag_set(tnapi, &val, &budget, map, tx_len,
 +                          base_flags | TXD_FLAG_END, mss, 0)) {
 +              tnapi->tx_buffers[val].skb = NULL;
 +              dev_kfree_skb(skb);
 +              return -EIO;
 +      }
 +
 +      tnapi->tx_prod++;
 +
 +      tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
 +      tr32_mailbox(tnapi->prodmbox);
 +
 +      udelay(10);
 +
 +      /* 350 usec to allow enough time on some 10/100 Mbps devices.  */
 +      for (i = 0; i < 35; i++) {
 +              tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
 +                     coal_now);
 +
 +              udelay(10);
 +
 +              tx_idx = tnapi->hw_status->idx[0].tx_consumer;
 +              rx_idx = rnapi->hw_status->idx[0].rx_producer;
 +              if ((tx_idx == tnapi->tx_prod) &&
 +                  (rx_idx == (rx_start_idx + num_pkts)))
 +                      break;
 +      }
 +
 +      tg3_tx_skb_unmap(tnapi, tnapi->tx_prod - 1, 0);
 +      dev_kfree_skb(skb);
 +
 +      if (tx_idx != tnapi->tx_prod)
 +              goto out;
 +
 +      if (rx_idx != rx_start_idx + num_pkts)
 +              goto out;
 +
 +      val = data_off;
 +      while (rx_idx != rx_start_idx) {
 +              desc = &rnapi->rx_rcb[rx_start_idx++];
 +              desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
 +              opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
 +
 +              if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
 +                  (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
 +                      goto out;
 +
 +              rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT)
 +                       - ETH_FCS_LEN;
 +
 +              if (!tso_loopback) {
 +                      if (rx_len != tx_len)
 +                              goto out;
 +
 +                      if (pktsz <= TG3_RX_STD_DMA_SZ - ETH_FCS_LEN) {
 +                              if (opaque_key != RXD_OPAQUE_RING_STD)
 +                                      goto out;
 +                      } else {
 +                              if (opaque_key != RXD_OPAQUE_RING_JUMBO)
 +                                      goto out;
 +                      }
 +              } else if ((desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
 +                         (desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
 +                          >> RXD_TCPCSUM_SHIFT != 0xffff) {
 +                      goto out;
 +              }
 +
 +              if (opaque_key == RXD_OPAQUE_RING_STD) {
 +                      rx_skb = tpr->rx_std_buffers[desc_idx].skb;
 +                      map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx],
 +                                           mapping);
 +              } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
 +                      rx_skb = tpr->rx_jmb_buffers[desc_idx].skb;
 +                      map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx],
 +                                           mapping);
 +              } else
 +                      goto out;
 +
 +              pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len,
 +                                          PCI_DMA_FROMDEVICE);
 +
 +              for (i = data_off; i < rx_len; i++, val++) {
 +                      if (*(rx_skb->data + i) != (u8) (val & 0xff))
 +                              goto out;
 +              }
 +      }
 +
 +      err = 0;
 +
 +      /* tg3_free_rings will unmap and free the rx_skb */
 +out:
 +      return err;
 +}
 +
 +#define TG3_STD_LOOPBACK_FAILED               1
 +#define TG3_JMB_LOOPBACK_FAILED               2
 +#define TG3_TSO_LOOPBACK_FAILED               4
 +#define TG3_LOOPBACK_FAILED \
 +      (TG3_STD_LOOPBACK_FAILED | \
 +       TG3_JMB_LOOPBACK_FAILED | \
 +       TG3_TSO_LOOPBACK_FAILED)
 +
 +static int tg3_test_loopback(struct tg3 *tp, u64 *data, bool do_extlpbk)
 +{
 +      int err = -EIO;
 +      u32 eee_cap;
 +
 +      eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP;
 +      tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
 +
 +      if (!netif_running(tp->dev)) {
 +              data[0] = TG3_LOOPBACK_FAILED;
 +              data[1] = TG3_LOOPBACK_FAILED;
 +              if (do_extlpbk)
 +                      data[2] = TG3_LOOPBACK_FAILED;
 +              goto done;
 +      }
 +
 +      err = tg3_reset_hw(tp, 1);
 +      if (err) {
 +              data[0] = TG3_LOOPBACK_FAILED;
 +              data[1] = TG3_LOOPBACK_FAILED;
 +              if (do_extlpbk)
 +                      data[2] = TG3_LOOPBACK_FAILED;
 +              goto done;
 +      }
 +
 +      if (tg3_flag(tp, ENABLE_RSS)) {
 +              int i;
 +
 +              /* Reroute all rx packets to the 1st queue */
 +              for (i = MAC_RSS_INDIR_TBL_0;
 +                   i < MAC_RSS_INDIR_TBL_0 + TG3_RSS_INDIR_TBL_SIZE; i += 4)
 +                      tw32(i, 0x0);
 +      }
 +
 +      /* HW errata - mac loopback fails in some cases on 5780.
 +       * Normal traffic and PHY loopback are not affected by
 +       * errata.  Also, the MAC loopback test is deprecated for
 +       * all newer ASIC revisions.
 +       */
 +      if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5780 &&
 +          !tg3_flag(tp, CPMU_PRESENT)) {
 +              tg3_mac_loopback(tp, true);
 +
 +              if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
 +                      data[0] |= TG3_STD_LOOPBACK_FAILED;
 +
 +              if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
 +                  tg3_run_loopback(tp, 9000 + ETH_HLEN, false))
 +                      data[0] |= TG3_JMB_LOOPBACK_FAILED;
 +
 +              tg3_mac_loopback(tp, false);
 +      }
 +
 +      if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
 +          !tg3_flag(tp, USE_PHYLIB)) {
 +              int i;
 +
 +              tg3_phy_lpbk_set(tp, 0, false);
 +
 +              /* Wait for link */
 +              for (i = 0; i < 100; i++) {
 +                      if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
 +                              break;
 +                      mdelay(1);
 +              }
 +
 +              if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
 +                      data[1] |= TG3_STD_LOOPBACK_FAILED;
 +              if (tg3_flag(tp, TSO_CAPABLE) &&
 +                  tg3_run_loopback(tp, ETH_FRAME_LEN, true))
 +                      data[1] |= TG3_TSO_LOOPBACK_FAILED;
 +              if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
 +                  tg3_run_loopback(tp, 9000 + ETH_HLEN, false))
 +                      data[1] |= TG3_JMB_LOOPBACK_FAILED;
 +
 +              if (do_extlpbk) {
 +                      tg3_phy_lpbk_set(tp, 0, true);
 +
 +                      /* All link indications report up, but the hardware
 +                       * isn't really ready for about 20 msec.  Double it
 +                       * to be sure.
 +                       */
 +                      mdelay(40);
 +
 +                      if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
 +                              data[2] |= TG3_STD_LOOPBACK_FAILED;
 +                      if (tg3_flag(tp, TSO_CAPABLE) &&
 +                          tg3_run_loopback(tp, ETH_FRAME_LEN, true))
 +                              data[2] |= TG3_TSO_LOOPBACK_FAILED;
 +                      if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
 +                          tg3_run_loopback(tp, 9000 + ETH_HLEN, false))
 +                              data[2] |= TG3_JMB_LOOPBACK_FAILED;
 +              }
 +
 +              /* Re-enable gphy autopowerdown. */
 +              if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
 +                      tg3_phy_toggle_apd(tp, true);
 +      }
 +
 +      err = (data[0] | data[1] | data[2]) ? -EIO : 0;
 +
 +done:
 +      tp->phy_flags |= eee_cap;
 +
 +      return err;
 +}
 +
 +static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
 +                        u64 *data)
 +{
 +      struct tg3 *tp = netdev_priv(dev);
 +      bool doextlpbk = etest->flags & ETH_TEST_FL_EXTERNAL_LB;
 +
 +      if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
 +          tg3_power_up(tp)) {
 +              etest->flags |= ETH_TEST_FL_FAILED;
 +              memset(data, 1, sizeof(u64) * TG3_NUM_TEST);
 +              return;
 +      }
 +
 +      memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
 +
 +      if (tg3_test_nvram(tp) != 0) {
 +              etest->flags |= ETH_TEST_FL_FAILED;
 +              data[0] = 1;
 +      }
 +      if (!doextlpbk && tg3_test_link(tp)) {
 +              etest->flags |= ETH_TEST_FL_FAILED;
 +              data[1] = 1;
 +      }
 +      if (etest->flags & ETH_TEST_FL_OFFLINE) {
 +              int err, err2 = 0, irq_sync = 0;
 +
 +              if (netif_running(dev)) {
 +                      tg3_phy_stop(tp);
 +                      tg3_netif_stop(tp);
 +                      irq_sync = 1;
 +              }
 +
 +              tg3_full_lock(tp, irq_sync);
 +
 +              tg3_halt(tp, RESET_KIND_SUSPEND, 1);
 +              err = tg3_nvram_lock(tp);
 +              tg3_halt_cpu(tp, RX_CPU_BASE);
 +              if (!tg3_flag(tp, 5705_PLUS))
 +                      tg3_halt_cpu(tp, TX_CPU_BASE);
 +              if (!err)
 +                      tg3_nvram_unlock(tp);
 +
 +              if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
 +                      tg3_phy_reset(tp);
 +
 +              if (tg3_test_registers(tp) != 0) {
 +                      etest->flags |= ETH_TEST_FL_FAILED;
 +                      data[2] = 1;
 +              }
 +
 +              if (tg3_test_memory(tp) != 0) {
 +                      etest->flags |= ETH_TEST_FL_FAILED;
 +                      data[3] = 1;
 +              }
 +
 +              if (doextlpbk)
 +                      etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
 +
 +              if (tg3_test_loopback(tp, &data[4], doextlpbk))
 +                      etest->flags |= ETH_TEST_FL_FAILED;
 +
 +              tg3_full_unlock(tp);
 +
 +              if (tg3_test_interrupt(tp) != 0) {
 +                      etest->flags |= ETH_TEST_FL_FAILED;
 +                      data[7] = 1;
 +              }
 +
 +              tg3_full_lock(tp, 0);
 +
 +              tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
 +              if (netif_running(dev)) {
 +                      tg3_flag_set(tp, INIT_COMPLETE);
 +                      err2 = tg3_restart_hw(tp, 1);
 +                      if (!err2)
 +                              tg3_netif_start(tp);
 +              }
 +
 +              tg3_full_unlock(tp);
 +
 +              if (irq_sync && !err2)
 +                      tg3_phy_start(tp);
 +      }
 +      if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
 +              tg3_power_down(tp);
 +
 +}
 +
 +static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
 +{
 +      struct mii_ioctl_data *data = if_mii(ifr);
 +      struct tg3 *tp = netdev_priv(dev);
 +      int err;
 +
 +      if (tg3_flag(tp, USE_PHYLIB)) {
 +              struct phy_device *phydev;
 +              if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
 +                      return -EAGAIN;
 +              phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
 +              return phy_mii_ioctl(phydev, ifr, cmd);
 +      }
 +
 +      switch (cmd) {
 +      case SIOCGMIIPHY:
 +              data->phy_id = tp->phy_addr;
 +
 +              /* fallthru */
 +      case SIOCGMIIREG: {
 +              u32 mii_regval;
 +
 +              if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
 +                      break;                  /* We have no PHY */
 +
 +              if (!netif_running(dev))
 +                      return -EAGAIN;
 +
 +              spin_lock_bh(&tp->lock);
 +              err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
 +              spin_unlock_bh(&tp->lock);
 +
 +              data->val_out = mii_regval;
 +
 +              return err;
 +      }
 +
 +      case SIOCSMIIREG:
 +              if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
 +                      break;                  /* We have no PHY */
 +
 +              if (!netif_running(dev))
 +                      return -EAGAIN;
 +
 +              spin_lock_bh(&tp->lock);
 +              err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
 +              spin_unlock_bh(&tp->lock);
 +
 +              return err;
 +
 +      default:
 +              /* do nothing */
 +              break;
 +      }
 +      return -EOPNOTSUPP;
 +}
 +
 +static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
 +{
 +      struct tg3 *tp = netdev_priv(dev);
 +
 +      memcpy(ec, &tp->coal, sizeof(*ec));
 +      return 0;
 +}
 +
 +static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
 +{
 +      struct tg3 *tp = netdev_priv(dev);
 +      u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
 +      u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
 +
 +      if (!tg3_flag(tp, 5705_PLUS)) {
 +              max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
 +              max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
 +              max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
 +              min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
 +      }
 +
 +      if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
 +          (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
 +          (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
 +          (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
 +          (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
 +          (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
 +          (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
 +          (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
 +          (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
 +          (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
 +              return -EINVAL;
 +
 +      /* No rx interrupts will be generated if both are zero */
 +      if ((ec->rx_coalesce_usecs == 0) &&
 +          (ec->rx_max_coalesced_frames == 0))
 +              return -EINVAL;
 +
 +      /* No tx interrupts will be generated if both are zero */
 +      if ((ec->tx_coalesce_usecs == 0) &&
 +          (ec->tx_max_coalesced_frames == 0))
 +              return -EINVAL;
 +
 +      /* Only copy relevant parameters, ignore all others. */
 +      tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
 +      tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
 +      tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
 +      tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
 +      tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
 +      tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
 +      tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
 +      tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
 +      tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
 +
 +      if (netif_running(dev)) {
 +              tg3_full_lock(tp, 0);
 +              __tg3_set_coalesce(tp, &tp->coal);
 +              tg3_full_unlock(tp);
 +      }
 +      return 0;
 +}
 +
 +static const struct ethtool_ops tg3_ethtool_ops = {
 +      .get_settings           = tg3_get_settings,
 +      .set_settings           = tg3_set_settings,
 +      .get_drvinfo            = tg3_get_drvinfo,
 +      .get_regs_len           = tg3_get_regs_len,
 +      .get_regs               = tg3_get_regs,
 +      .get_wol                = tg3_get_wol,
 +      .set_wol                = tg3_set_wol,
 +      .get_msglevel           = tg3_get_msglevel,
 +      .set_msglevel           = tg3_set_msglevel,
 +      .nway_reset             = tg3_nway_reset,
 +      .get_link               = ethtool_op_get_link,
 +      .get_eeprom_len         = tg3_get_eeprom_len,
 +      .get_eeprom             = tg3_get_eeprom,
 +      .set_eeprom             = tg3_set_eeprom,
 +      .get_ringparam          = tg3_get_ringparam,
 +      .set_ringparam          = tg3_set_ringparam,
 +      .get_pauseparam         = tg3_get_pauseparam,
 +      .set_pauseparam         = tg3_set_pauseparam,
 +      .self_test              = tg3_self_test,
 +      .get_strings            = tg3_get_strings,
 +      .set_phys_id            = tg3_set_phys_id,
 +      .get_ethtool_stats      = tg3_get_ethtool_stats,
 +      .get_coalesce           = tg3_get_coalesce,
 +      .set_coalesce           = tg3_set_coalesce,
 +      .get_sset_count         = tg3_get_sset_count,
 +};
 +
 +static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
 +{
 +      u32 cursize, val, magic;
 +
 +      tp->nvram_size = EEPROM_CHIP_SIZE;
 +
 +      if (tg3_nvram_read(tp, 0, &magic) != 0)
 +              return;
 +
 +      if ((magic != TG3_EEPROM_MAGIC) &&
 +          ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
 +          ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
 +              return;
 +
 +      /*
 +       * Size the chip by reading offsets at increasing powers of two.
 +       * When we encounter our validation signature, we know the addressing
 +       * has wrapped around, and thus have our chip size.
 +       */
 +      cursize = 0x10;
 +
 +      while (cursize < tp->nvram_size) {
 +              if (tg3_nvram_read(tp, cursize, &val) != 0)
 +                      return;
 +
 +              if (val == magic)
 +                      break;
 +
 +              cursize <<= 1;
 +      }
 +
 +      tp->nvram_size = cursize;
 +}
 +
 +static void __devinit tg3_get_nvram_size(struct tg3 *tp)
 +{
 +      u32 val;
 +
 +      if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &val) != 0)
 +              return;
 +
 +      /* Selfboot format */
 +      if (val != TG3_EEPROM_MAGIC) {
 +              tg3_get_eeprom_size(tp);
 +              return;
 +      }
 +
 +      if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
 +              if (val != 0) {
 +                      /* This is confusing.  We want to operate on the
 +                       * 16-bit value at offset 0xf2.  The tg3_nvram_read()
 +                       * call will read from NVRAM and byteswap the data
 +                       * according to the byteswapping settings for all
 +                       * other register accesses.  This ensures the data we
 +                       * want will always reside in the lower 16-bits.
 +                       * However, the data in NVRAM is in LE format, which
 +                       * means the data from the NVRAM read will always be
 +                       * opposite the endianness of the CPU.  The 16-bit
 +                       * byteswap then brings the data to CPU endianness.
 +                       */
 +                      tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024;
 +                      return;
 +              }
 +      }
 +      tp->nvram_size = TG3_NVRAM_SIZE_512KB;
 +}
 +
 +static void __devinit tg3_get_nvram_info(struct tg3 *tp)
 +{
 +      u32 nvcfg1;
 +
 +      nvcfg1 = tr32(NVRAM_CFG1);
 +      if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
 +              tg3_flag_set(tp, FLASH);
 +      } else {
 +              nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
 +              tw32(NVRAM_CFG1, nvcfg1);
 +      }
 +
 +      if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
 +          tg3_flag(tp, 5780_CLASS)) {
 +              switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
 +              case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
 +                      tp->nvram_jedecnum = JEDEC_ATMEL;
 +                      tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
 +                      tg3_flag_set(tp, NVRAM_BUFFERED);
 +                      break;
 +              case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
 +                      tp->nvram_jedecnum = JEDEC_ATMEL;
 +                      tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
 +                      break;
 +              case FLASH_VENDOR_ATMEL_EEPROM:
 +                      tp->nvram_jedecnum = JEDEC_ATMEL;
 +                      tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
 +                      tg3_flag_set(tp, NVRAM_BUFFERED);
 +                      break;
 +              case FLASH_VENDOR_ST:
 +                      tp->nvram_jedecnum = JEDEC_ST;
 +                      tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
 +                      tg3_flag_set(tp, NVRAM_BUFFERED);
 +                      break;
 +              case FLASH_VENDOR_SAIFUN:
 +                      tp->nvram_jedecnum = JEDEC_SAIFUN;
 +                      tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
 +                      break;
 +              case FLASH_VENDOR_SST_SMALL:
 +              case FLASH_VENDOR_SST_LARGE:
 +                      tp->nvram_jedecnum = JEDEC_SST;
 +                      tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
 +                      break;
 +              }
 +      } else {
 +              tp->nvram_jedecnum = JEDEC_ATMEL;
 +              tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
 +              tg3_flag_set(tp, NVRAM_BUFFERED);
 +      }
 +}
 +
 +static void __devinit tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1)
 +{
 +      switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
 +      case FLASH_5752PAGE_SIZE_256:
 +              tp->nvram_pagesize = 256;
 +              break;
 +      case FLASH_5752PAGE_SIZE_512:
 +              tp->nvram_pagesize = 512;
 +              break;
 +      case FLASH_5752PAGE_SIZE_1K:
 +              tp->nvram_pagesize = 1024;
 +              break;
 +      case FLASH_5752PAGE_SIZE_2K:
 +              tp->nvram_pagesize = 2048;
 +              break;
 +      case FLASH_5752PAGE_SIZE_4K:
 +              tp->nvram_pagesize = 4096;
 +              break;
 +      case FLASH_5752PAGE_SIZE_264:
 +              tp->nvram_pagesize = 264;
 +              break;
 +      case FLASH_5752PAGE_SIZE_528:
 +              tp->nvram_pagesize = 528;
 +              break;
 +      }
 +}
 +
 +static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
 +{
 +      u32 nvcfg1;
 +
 +      nvcfg1 = tr32(NVRAM_CFG1);
 +
 +      /* NVRAM protection for TPM */
 +      if (nvcfg1 & (1 << 27))
 +              tg3_flag_set(tp, PROTECTED_NVRAM);
 +
 +      switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
 +      case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
 +      case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
 +              tp->nvram_jedecnum = JEDEC_ATMEL;
 +              tg3_flag_set(tp, NVRAM_BUFFERED);
 +              break;
 +      case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
 +              tp->nvram_jedecnum = JEDEC_ATMEL;
 +              tg3_flag_set(tp, NVRAM_BUFFERED);
 +              tg3_flag_set(tp, FLASH);
 +              break;
 +      case FLASH_5752VENDOR_ST_M45PE10:
 +      case FLASH_5752VENDOR_ST_M45PE20:
 +      case FLASH_5752VENDOR_ST_M45PE40:
 +              tp->nvram_jedecnum = JEDEC_ST;
 +              tg3_flag_set(tp, NVRAM_BUFFERED);
 +              tg3_flag_set(tp, FLASH);
 +              break;
 +      }
 +
 +      if (tg3_flag(tp, FLASH)) {
 +              tg3_nvram_get_pagesize(tp, nvcfg1);
 +      } else {
 +              /* For eeprom, set pagesize to maximum eeprom size */
 +              tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
 +
 +              nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
 +              tw32(NVRAM_CFG1, nvcfg1);
 +      }
 +}
 +
 +static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp)
 +{
 +      u32 nvcfg1, protect = 0;
 +
 +      nvcfg1 = tr32(NVRAM_CFG1);
 +
 +      /* NVRAM protection for TPM */
 +      if (nvcfg1 & (1 << 27)) {
 +              tg3_flag_set(tp, PROTECTED_NVRAM);
 +              protect = 1;
 +      }
 +
 +      nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
 +      switch (nvcfg1) {
 +      case FLASH_5755VENDOR_ATMEL_FLASH_1:
 +      case FLASH_5755VENDOR_ATMEL_FLASH_2:
 +      case FLASH_5755VENDOR_ATMEL_FLASH_3:
 +      case FLASH_5755VENDOR_ATMEL_FLASH_5:
 +              tp->nvram_jedecnum = JEDEC_ATMEL;
 +              tg3_flag_set(tp, NVRAM_BUFFERED);
 +              tg3_flag_set(tp, FLASH);
 +              tp->nvram_pagesize = 264;
 +              if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
 +                  nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
 +                      tp->nvram_size = (protect ? 0x3e200 :
 +                                        TG3_NVRAM_SIZE_512KB);
 +              else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
 +                      tp->nvram_size = (protect ? 0x1f200 :
 +                                        TG3_NVRAM_SIZE_256KB);
 +              else
 +                      tp->nvram_size = (protect ? 0x1f200 :
 +                                        TG3_NVRAM_SIZE_128KB);
 +              break;
 +      case FLASH_5752VENDOR_ST_M45PE10:
 +      case FLASH_5752VENDOR_ST_M45PE20:
 +      case FLASH_5752VENDOR_ST_M45PE40:
 +              tp->nvram_jedecnum = JEDEC_ST;
 +              tg3_flag_set(tp, NVRAM_BUFFERED);
 +              tg3_flag_set(tp, FLASH);
 +              tp->nvram_pagesize = 256;
 +              if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
 +                      tp->nvram_size = (protect ?
 +                                        TG3_NVRAM_SIZE_64KB :
 +                                        TG3_NVRAM_SIZE_128KB);
 +              else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
 +                      tp->nvram_size = (protect ?
 +                                        TG3_NVRAM_SIZE_64KB :
 +                                        TG3_NVRAM_SIZE_256KB);
 +              else
 +                      tp->nvram_size = (protect ?
 +                                        TG3_NVRAM_SIZE_128KB :
 +                                        TG3_NVRAM_SIZE_512KB);
 +              break;
 +      }
 +}
 +
 +static void __devinit tg3_get_5787_nvram_info(struct tg3 *tp)
 +{
 +      u32 nvcfg1;
 +
 +      nvcfg1 = tr32(NVRAM_CFG1);
 +
 +      switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
 +      case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
 +      case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
 +      case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
 +      case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
 +              tp->nvram_jedecnum = JEDEC_ATMEL;
 +              tg3_flag_set(tp, NVRAM_BUFFERED);
 +              tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
 +
 +              nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
 +              tw32(NVRAM_CFG1, nvcfg1);
 +              break;
 +      case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
 +      case FLASH_5755VENDOR_ATMEL_FLASH_1:
 +      case FLASH_5755VENDOR_ATMEL_FLASH_2:
 +      case FLASH_5755VENDOR_ATMEL_FLASH_3:
 +              tp->nvram_jedecnum = JEDEC_ATMEL;
 +              tg3_flag_set(tp, NVRAM_BUFFERED);
 +              tg3_flag_set(tp, FLASH);
 +              tp->nvram_pagesize = 264;
 +              break;
 +      case FLASH_5752VENDOR_ST_M45PE10:
 +      case FLASH_5752VENDOR_ST_M45PE20:
 +      case FLASH_5752VENDOR_ST_M45PE40:
 +              tp->nvram_jedecnum = JEDEC_ST;
 +              tg3_flag_set(tp, NVRAM_BUFFERED);
 +              tg3_flag_set(tp, FLASH);
 +              tp->nvram_pagesize = 256;
 +              break;
 +      }
 +}
 +
 +static void __devinit tg3_get_5761_nvram_info(struct tg3 *tp)
 +{
 +      u32 nvcfg1, protect = 0;
 +
 +      nvcfg1 = tr32(NVRAM_CFG1);
 +
 +      /* NVRAM protection for TPM */
 +      if (nvcfg1 & (1 << 27)) {
 +              tg3_flag_set(tp, PROTECTED_NVRAM);
 +              protect = 1;
 +      }
 +
 +      nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
 +      switch (nvcfg1) {
 +      case FLASH_5761VENDOR_ATMEL_ADB021D:
 +      case FLASH_5761VENDOR_ATMEL_ADB041D:
 +      case FLASH_5761VENDOR_ATMEL_ADB081D:
 +      case FLASH_5761VENDOR_ATMEL_ADB161D:
 +      case FLASH_5761VENDOR_ATMEL_MDB021D:
 +      case FLASH_5761VENDOR_ATMEL_MDB041D:
 +      case FLASH_5761VENDOR_ATMEL_MDB081D:
 +      case FLASH_5761VENDOR_ATMEL_MDB161D:
 +              tp->nvram_jedecnum = JEDEC_ATMEL;
 +              tg3_flag_set(tp, NVRAM_BUFFERED);
 +              tg3_flag_set(tp, FLASH);
 +              tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
 +              tp->nvram_pagesize = 256;
 +              break;
 +      case FLASH_5761VENDOR_ST_A_M45PE20:
 +      case FLASH_5761VENDOR_ST_A_M45PE40:
 +      case FLASH_5761VENDOR_ST_A_M45PE80:
 +      case FLASH_5761VENDOR_ST_A_M45PE16:
 +      case FLASH_5761VENDOR_ST_M_M45PE20:
 +      case FLASH_5761VENDOR_ST_M_M45PE40:
 +      case FLASH_5761VENDOR_ST_M_M45PE80:
 +      case FLASH_5761VENDOR_ST_M_M45PE16:
 +              tp->nvram_jedecnum = JEDEC_ST;
 +              tg3_flag_set(tp, NVRAM_BUFFERED);
 +              tg3_flag_set(tp, FLASH);
 +              tp->nvram_pagesize = 256;
 +              break;
 +      }
 +
 +      if (protect) {
 +              tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
 +      } else {
 +              switch (nvcfg1) {
 +              case FLASH_5761VENDOR_ATMEL_ADB161D:
 +              case FLASH_5761VENDOR_ATMEL_MDB161D:
 +              case FLASH_5761VENDOR_ST_A_M45PE16:
 +              case FLASH_5761VENDOR_ST_M_M45PE16:
 +                      tp->nvram_size = TG3_NVRAM_SIZE_2MB;
 +                      break;
 +              case FLASH_5761VENDOR_ATMEL_ADB081D:
 +              case FLASH_5761VENDOR_ATMEL_MDB081D:
 +              case FLASH_5761VENDOR_ST_A_M45PE80:
 +              case FLASH_5761VENDOR_ST_M_M45PE80:
 +                      tp->nvram_size = TG3_NVRAM_SIZE_1MB;
 +                      break;
 +              case FLASH_5761VENDOR_ATMEL_ADB041D:
 +              case FLASH_5761VENDOR_ATMEL_MDB041D:
 +              case FLASH_5761VENDOR_ST_A_M45PE40:
 +              case FLASH_5761VENDOR_ST_M_M45PE40:
 +                      tp->nvram_size = TG3_NVRAM_SIZE_512KB;
 +                      break;
 +              case FLASH_5761VENDOR_ATMEL_ADB021D:
 +              case FLASH_5761VENDOR_ATMEL_MDB021D:
 +              case FLASH_5761VENDOR_ST_A_M45PE20:
 +              case FLASH_5761VENDOR_ST_M_M45PE20:
 +                      tp->nvram_size = TG3_NVRAM_SIZE_256KB;
 +                      break;
 +              }
 +      }
 +}
 +
 +static void __devinit tg3_get_5906_nvram_info(struct tg3 *tp)
 +{
 +      tp->nvram_jedecnum = JEDEC_ATMEL;
 +      tg3_flag_set(tp, NVRAM_BUFFERED);
 +      tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
 +}
 +
 +static void __devinit tg3_get_57780_nvram_info(struct tg3 *tp)
 +{
 +      u32 nvcfg1;
 +
 +      nvcfg1 = tr32(NVRAM_CFG1);
 +
 +      switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
 +      case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
 +      case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
 +              tp->nvram_jedecnum = JEDEC_ATMEL;
 +              tg3_flag_set(tp, NVRAM_BUFFERED);
 +              tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
 +
 +              nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
 +              tw32(NVRAM_CFG1, nvcfg1);
 +              return;
 +      case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
 +      case FLASH_57780VENDOR_ATMEL_AT45DB011D:
 +      case FLASH_57780VENDOR_ATMEL_AT45DB011B:
 +      case FLASH_57780VENDOR_ATMEL_AT45DB021D:
 +      case FLASH_57780VENDOR_ATMEL_AT45DB021B:
 +      case FLASH_57780VENDOR_ATMEL_AT45DB041D:
 +      case FLASH_57780VENDOR_ATMEL_AT45DB041B:
 +              tp->nvram_jedecnum = JEDEC_ATMEL;
 +              tg3_flag_set(tp, NVRAM_BUFFERED);
 +              tg3_flag_set(tp, FLASH);
 +
 +              switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
 +              case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
 +              case FLASH_57780VENDOR_ATMEL_AT45DB011D:
 +              case FLASH_57780VENDOR_ATMEL_AT45DB011B:
 +                      tp->nvram_size = TG3_NVRAM_SIZE_128KB;
 +                      break;
 +              case FLASH_57780VENDOR_ATMEL_AT45DB021D:
 +              case FLASH_57780VENDOR_ATMEL_AT45DB021B:
 +                      tp->nvram_size = TG3_NVRAM_SIZE_256KB;
 +                      break;
 +              case FLASH_57780VENDOR_ATMEL_AT45DB041D:
 +              case FLASH_57780VENDOR_ATMEL_AT45DB041B:
 +                      tp->nvram_size = TG3_NVRAM_SIZE_512KB;
 +                      break;
 +              }
 +              break;
 +      case FLASH_5752VENDOR_ST_M45PE10:
 +      case FLASH_5752VENDOR_ST_M45PE20:
 +      case FLASH_5752VENDOR_ST_M45PE40:
 +              tp->nvram_jedecnum = JEDEC_ST;
 +              tg3_flag_set(tp, NVRAM_BUFFERED);
 +              tg3_flag_set(tp, FLASH);
 +
 +              switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
 +              case FLASH_5752VENDOR_ST_M45PE10:
 +                      tp->nvram_size = TG3_NVRAM_SIZE_128KB;
 +                      break;
 +              case FLASH_5752VENDOR_ST_M45PE20:
 +                      tp->nvram_size = TG3_NVRAM_SIZE_256KB;
 +                      break;
 +              case FLASH_5752VENDOR_ST_M45PE40:
 +                      tp->nvram_size = TG3_NVRAM_SIZE_512KB;
 +                      break;
 +              }
 +              break;
 +      default:
 +              tg3_flag_set(tp, NO_NVRAM);
 +              return;
 +      }
 +
 +      tg3_nvram_get_pagesize(tp, nvcfg1);
 +      if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
 +              tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
 +}
 +
 +
 +static void __devinit tg3_get_5717_nvram_info(struct tg3 *tp)
 +{
 +      u32 nvcfg1;
 +
 +      nvcfg1 = tr32(NVRAM_CFG1);
 +
 +      switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
 +      case FLASH_5717VENDOR_ATMEL_EEPROM:
 +      case FLASH_5717VENDOR_MICRO_EEPROM:
 +              tp->nvram_jedecnum = JEDEC_ATMEL;
 +              tg3_flag_set(tp, NVRAM_BUFFERED);
 +              tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
 +
 +              nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
 +              tw32(NVRAM_CFG1, nvcfg1);
 +              return;
 +      case FLASH_5717VENDOR_ATMEL_MDB011D:
 +      case FLASH_5717VENDOR_ATMEL_ADB011B:
 +      case FLASH_5717VENDOR_ATMEL_ADB011D:
 +      case FLASH_5717VENDOR_ATMEL_MDB021D:
 +      case FLASH_5717VENDOR_ATMEL_ADB021B:
 +      case FLASH_5717VENDOR_ATMEL_ADB021D:
 +      case FLASH_5717VENDOR_ATMEL_45USPT:
 +              tp->nvram_jedecnum = JEDEC_ATMEL;
 +              tg3_flag_set(tp, NVRAM_BUFFERED);
 +              tg3_flag_set(tp, FLASH);
 +
 +              switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
 +              case FLASH_5717VENDOR_ATMEL_MDB021D:
 +                      /* Detect size with tg3_nvram_get_size() */
 +                      break;
 +              case FLASH_5717VENDOR_ATMEL_ADB021B:
 +              case FLASH_5717VENDOR_ATMEL_ADB021D:
 +                      tp->nvram_size = TG3_NVRAM_SIZE_256KB;
 +                      break;
 +              default:
 +                      tp->nvram_size = TG3_NVRAM_SIZE_128KB;
 +                      break;
 +              }
 +              break;
 +      case FLASH_5717VENDOR_ST_M_M25PE10:
 +      case FLASH_5717VENDOR_ST_A_M25PE10:
 +      case FLASH_5717VENDOR_ST_M_M45PE10:
 +      case FLASH_5717VENDOR_ST_A_M45PE10:
 +      case FLASH_5717VENDOR_ST_M_M25PE20:
 +      case FLASH_5717VENDOR_ST_A_M25PE20:
 +      case FLASH_5717VENDOR_ST_M_M45PE20:
 +      case FLASH_5717VENDOR_ST_A_M45PE20:
 +      case FLASH_5717VENDOR_ST_25USPT:
 +      case FLASH_5717VENDOR_ST_45USPT:
 +              tp->nvram_jedecnum = JEDEC_ST;
 +              tg3_flag_set(tp, NVRAM_BUFFERED);
 +              tg3_flag_set(tp, FLASH);
 +
 +              switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
 +              case FLASH_5717VENDOR_ST_M_M25PE20:
 +              case FLASH_5717VENDOR_ST_M_M45PE20:
 +                      /* Detect size with tg3_nvram_get_size() */
 +                      break;
 +              case FLASH_5717VENDOR_ST_A_M25PE20:
 +              case FLASH_5717VENDOR_ST_A_M45PE20:
 +                      tp->nvram_size = TG3_NVRAM_SIZE_256KB;
 +                      break;
 +              default:
 +                      tp->nvram_size = TG3_NVRAM_SIZE_128KB;
 +                      break;
 +              }
 +              break;
 +      default:
 +              tg3_flag_set(tp, NO_NVRAM);
 +              return;
 +      }
 +
 +      tg3_nvram_get_pagesize(tp, nvcfg1);
 +      if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
 +              tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
 +}
 +
 +static void __devinit tg3_get_5720_nvram_info(struct tg3 *tp)
 +{
 +      u32 nvcfg1, nvmpinstrp;
 +
 +      nvcfg1 = tr32(NVRAM_CFG1);
 +      nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK;
 +
 +      switch (nvmpinstrp) {
 +      case FLASH_5720_EEPROM_HD:
 +      case FLASH_5720_EEPROM_LD:
 +              tp->nvram_jedecnum = JEDEC_ATMEL;
 +              tg3_flag_set(tp, NVRAM_BUFFERED);
 +
 +              nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
 +              tw32(NVRAM_CFG1, nvcfg1);
 +              if (nvmpinstrp == FLASH_5720_EEPROM_HD)
 +                      tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
 +              else
 +                      tp->nvram_pagesize = ATMEL_AT24C02_CHIP_SIZE;
 +              return;
 +      case FLASH_5720VENDOR_M_ATMEL_DB011D:
 +      case FLASH_5720VENDOR_A_ATMEL_DB011B:
 +      case FLASH_5720VENDOR_A_ATMEL_DB011D:
 +      case FLASH_5720VENDOR_M_ATMEL_DB021D:
 +      case FLASH_5720VENDOR_A_ATMEL_DB021B:
 +      case FLASH_5720VENDOR_A_ATMEL_DB021D:
 +      case FLASH_5720VENDOR_M_ATMEL_DB041D:
 +      case FLASH_5720VENDOR_A_ATMEL_DB041B:
 +      case FLASH_5720VENDOR_A_ATMEL_DB041D:
 +      case FLASH_5720VENDOR_M_ATMEL_DB081D:
 +      case FLASH_5720VENDOR_A_ATMEL_DB081D:
 +      case FLASH_5720VENDOR_ATMEL_45USPT:
 +              tp->nvram_jedecnum = JEDEC_ATMEL;
 +              tg3_flag_set(tp, NVRAM_BUFFERED);
 +              tg3_flag_set(tp, FLASH);
 +
 +              switch (nvmpinstrp) {
 +              case FLASH_5720VENDOR_M_ATMEL_DB021D:
 +              case FLASH_5720VENDOR_A_ATMEL_DB021B:
 +              case FLASH_5720VENDOR_A_ATMEL_DB021D:
 +                      tp->nvram_size = TG3_NVRAM_SIZE_256KB;
 +                      break;
 +              case FLASH_5720VENDOR_M_ATMEL_DB041D:
 +              case FLASH_5720VENDOR_A_ATMEL_DB041B:
 +              case FLASH_5720VENDOR_A_ATMEL_DB041D:
 +                      tp->nvram_size = TG3_NVRAM_SIZE_512KB;
 +                      break;
 +              case FLASH_5720VENDOR_M_ATMEL_DB081D:
 +              case FLASH_5720VENDOR_A_ATMEL_DB081D:
 +                      tp->nvram_size = TG3_NVRAM_SIZE_1MB;
 +                      break;
 +              default:
 +                      tp->nvram_size = TG3_NVRAM_SIZE_128KB;
 +                      break;
 +              }
 +              break;
 +      case FLASH_5720VENDOR_M_ST_M25PE10:
 +      case FLASH_5720VENDOR_M_ST_M45PE10:
 +      case FLASH_5720VENDOR_A_ST_M25PE10:
 +      case FLASH_5720VENDOR_A_ST_M45PE10:
 +      case FLASH_5720VENDOR_M_ST_M25PE20:
 +      case FLASH_5720VENDOR_M_ST_M45PE20:
 +      case FLASH_5720VENDOR_A_ST_M25PE20:
 +      case FLASH_5720VENDOR_A_ST_M45PE20:
 +      case FLASH_5720VENDOR_M_ST_M25PE40:
 +      case FLASH_5720VENDOR_M_ST_M45PE40:
 +      case FLASH_5720VENDOR_A_ST_M25PE40:
 +      case FLASH_5720VENDOR_A_ST_M45PE40:
 +      case FLASH_5720VENDOR_M_ST_M25PE80:
 +      case FLASH_5720VENDOR_M_ST_M45PE80:
 +      case FLASH_5720VENDOR_A_ST_M25PE80:
 +      case FLASH_5720VENDOR_A_ST_M45PE80:
 +      case FLASH_5720VENDOR_ST_25USPT:
 +      case FLASH_5720VENDOR_ST_45USPT:
 +              tp->nvram_jedecnum = JEDEC_ST;
 +              tg3_flag_set(tp, NVRAM_BUFFERED);
 +              tg3_flag_set(tp, FLASH);
 +
 +              switch (nvmpinstrp) {
 +              case FLASH_5720VENDOR_M_ST_M25PE20:
 +              case FLASH_5720VENDOR_M_ST_M45PE20:
 +              case FLASH_5720VENDOR_A_ST_M25PE20:
 +              case FLASH_5720VENDOR_A_ST_M45PE20:
 +                      tp->nvram_size = TG3_NVRAM_SIZE_256KB;
 +                      break;
 +              case FLASH_5720VENDOR_M_ST_M25PE40:
 +              case FLASH_5720VENDOR_M_ST_M45PE40:
 +              case FLASH_5720VENDOR_A_ST_M25PE40:
 +              case FLASH_5720VENDOR_A_ST_M45PE40:
 +                      tp->nvram_size = TG3_NVRAM_SIZE_512KB;
 +                      break;
 +              case FLASH_5720VENDOR_M_ST_M25PE80:
 +              case FLASH_5720VENDOR_M_ST_M45PE80:
 +              case FLASH_5720VENDOR_A_ST_M25PE80:
 +              case FLASH_5720VENDOR_A_ST_M45PE80:
 +                      tp->nvram_size = TG3_NVRAM_SIZE_1MB;
 +                      break;
 +              default:
 +                      tp->nvram_size = TG3_NVRAM_SIZE_128KB;
 +                      break;
 +              }
 +              break;
 +      default:
 +              tg3_flag_set(tp, NO_NVRAM);
 +              return;
 +      }
 +
 +      tg3_nvram_get_pagesize(tp, nvcfg1);
 +      if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
 +              tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
 +}
 +
 +/* Chips other than 5700/5701 use the NVRAM for fetching info. */
 +static void __devinit tg3_nvram_init(struct tg3 *tp)
 +{
 +      tw32_f(GRC_EEPROM_ADDR,
 +           (EEPROM_ADDR_FSM_RESET |
 +            (EEPROM_DEFAULT_CLOCK_PERIOD <<
 +             EEPROM_ADDR_CLKPERD_SHIFT)));
 +
 +      msleep(1);
 +
 +      /* Enable seeprom accesses. */
 +      tw32_f(GRC_LOCAL_CTRL,
 +           tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
 +      udelay(100);
 +
 +      if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
 +          GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
 +              tg3_flag_set(tp, NVRAM);
 +
 +              if (tg3_nvram_lock(tp)) {
 +                      netdev_warn(tp->dev,
 +                                  "Cannot get nvram lock, %s failed\n",
 +                                  __func__);
 +                      return;
 +              }
 +              tg3_enable_nvram_access(tp);
 +
 +              tp->nvram_size = 0;
 +
 +              if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
 +                      tg3_get_5752_nvram_info(tp);
 +              else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
 +                      tg3_get_5755_nvram_info(tp);
 +              else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
 +                       GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
 +                       GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
 +                      tg3_get_5787_nvram_info(tp);
 +              else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
 +                      tg3_get_5761_nvram_info(tp);
 +              else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
 +                      tg3_get_5906_nvram_info(tp);
 +              else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
 +                       GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
 +                      tg3_get_57780_nvram_info(tp);
 +              else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
 +                       GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
 +                      tg3_get_5717_nvram_info(tp);
 +              else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
 +                      tg3_get_5720_nvram_info(tp);
 +              else
 +                      tg3_get_nvram_info(tp);
 +
 +              if (tp->nvram_size == 0)
 +                      tg3_get_nvram_size(tp);
 +
 +              tg3_disable_nvram_access(tp);
 +              tg3_nvram_unlock(tp);
 +
 +      } else {
 +              tg3_flag_clear(tp, NVRAM);
 +              tg3_flag_clear(tp, NVRAM_BUFFERED);
 +
 +              tg3_get_eeprom_size(tp);
 +      }
 +}
 +
 +static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
 +                                  u32 offset, u32 len, u8 *buf)
 +{
 +      int i, j, rc = 0;
 +      u32 val;
 +
 +      for (i = 0; i < len; i += 4) {
 +              u32 addr;
 +              __be32 data;
 +
 +              addr = offset + i;
 +
 +              memcpy(&data, buf + i, 4);
 +
 +              /*
 +               * The SEEPROM interface expects the data to always be opposite
 +               * the native endian format.  We accomplish this by reversing
 +               * all the operations that would have been performed on the
 +               * data from a call to tg3_nvram_read_be32().
 +               */
 +              tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data)));
 +
 +              val = tr32(GRC_EEPROM_ADDR);
 +              tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
 +
 +              val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
 +                      EEPROM_ADDR_READ);
 +              tw32(GRC_EEPROM_ADDR, val |
 +                      (0 << EEPROM_ADDR_DEVID_SHIFT) |
 +                      (addr & EEPROM_ADDR_ADDR_MASK) |
 +                      EEPROM_ADDR_START |
 +                      EEPROM_ADDR_WRITE);
 +
 +              for (j = 0; j < 1000; j++) {
 +                      val = tr32(GRC_EEPROM_ADDR);
 +
 +                      if (val & EEPROM_ADDR_COMPLETE)
 +                              break;
 +                      msleep(1);
 +              }
 +              if (!(val & EEPROM_ADDR_COMPLETE)) {
 +                      rc = -EBUSY;
 +                      break;
 +              }
 +      }
 +
 +      return rc;
 +}
 +
 +/* offset and length are dword aligned */
 +static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
 +              u8 *buf)
 +{
 +      int ret = 0;
 +      u32 pagesize = tp->nvram_pagesize;
 +      u32 pagemask = pagesize - 1;
 +      u32 nvram_cmd;
 +      u8 *tmp;
 +
 +      tmp = kmalloc(pagesize, GFP_KERNEL);
 +      if (tmp == NULL)
 +              return -ENOMEM;
 +
 +      while (len) {
 +              int j;
 +              u32 phy_addr, page_off, size;
 +
 +              phy_addr = offset & ~pagemask;
 +
 +              for (j = 0; j < pagesize; j += 4) {
 +                      ret = tg3_nvram_read_be32(tp, phy_addr + j,
 +                                                (__be32 *) (tmp + j));
 +                      if (ret)
 +                              break;
 +              }
 +              if (ret)
 +                      break;
 +
 +              page_off = offset & pagemask;
 +              size = pagesize;
 +              if (len < size)
 +                      size = len;
 +
 +              len -= size;
 +
 +              memcpy(tmp + page_off, buf, size);
 +
 +              offset = offset + (pagesize - page_off);
 +
 +              tg3_enable_nvram_access(tp);
 +
 +              /*
 +               * Before we can erase the flash page, we need
 +               * to issue a special "write enable" command.
 +               */
 +              nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
 +
 +              if (tg3_nvram_exec_cmd(tp, nvram_cmd))
 +                      break;
 +
 +              /* Erase the target page */
 +              tw32(NVRAM_ADDR, phy_addr);
 +
 +              nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
 +                      NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
 +
 +              if (tg3_nvram_exec_cmd(tp, nvram_cmd))
 +                      break;
 +
 +              /* Issue another write enable to start the write. */
 +              nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
 +
 +              if (tg3_nvram_exec_cmd(tp, nvram_cmd))
 +                      break;
 +
 +              for (j = 0; j < pagesize; j += 4) {
 +                      __be32 data;
 +
 +                      data = *((__be32 *) (tmp + j));
 +
 +                      tw32(NVRAM_WRDATA, be32_to_cpu(data));
 +
 +                      tw32(NVRAM_ADDR, phy_addr + j);
 +
 +                      nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
 +                              NVRAM_CMD_WR;
 +
 +                      if (j == 0)
 +                              nvram_cmd |= NVRAM_CMD_FIRST;
 +                      else if (j == (pagesize - 4))
 +                              nvram_cmd |= NVRAM_CMD_LAST;
 +
 +                      if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
 +                              break;
 +              }
 +              if (ret)
 +                      break;
 +      }
 +
 +      nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
 +      tg3_nvram_exec_cmd(tp, nvram_cmd);
 +
 +      kfree(tmp);
 +
 +      return ret;
 +}
 +
 +/* offset and length are dword aligned */
 +static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
 +              u8 *buf)
 +{
 +      int i, ret = 0;
 +
 +      for (i = 0; i < len; i += 4, offset += 4) {
 +              u32 page_off, phy_addr, nvram_cmd;
 +              __be32 data;
 +
 +              memcpy(&data, buf + i, 4);
 +              tw32(NVRAM_WRDATA, be32_to_cpu(data));
 +
 +              page_off = offset % tp->nvram_pagesize;
 +
 +              phy_addr = tg3_nvram_phys_addr(tp, offset);
 +
 +              tw32(NVRAM_ADDR, phy_addr);
 +
 +              nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
 +
 +              if (page_off == 0 || i == 0)
 +                      nvram_cmd |= NVRAM_CMD_FIRST;
 +              if (page_off == (tp->nvram_pagesize - 4))
 +                      nvram_cmd |= NVRAM_CMD_LAST;
 +
 +              if (i == (len - 4))
 +                      nvram_cmd |= NVRAM_CMD_LAST;
 +
 +              if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752 &&
 +                  !tg3_flag(tp, 5755_PLUS) &&
 +                  (tp->nvram_jedecnum == JEDEC_ST) &&
 +                  (nvram_cmd & NVRAM_CMD_FIRST)) {
 +
 +                      if ((ret = tg3_nvram_exec_cmd(tp,
 +                              NVRAM_CMD_WREN | NVRAM_CMD_GO |
 +                              NVRAM_CMD_DONE)))
 +
 +                              break;
 +              }
 +              if (!tg3_flag(tp, FLASH)) {
 +                      /* We always do complete word writes to eeprom. */
 +                      nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
 +              }
 +
 +              if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
 +                      break;
 +      }
 +      return ret;
 +}
 +
 +/* offset and length are dword aligned */
 +static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
 +{
 +      int ret;
 +
 +      if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
 +              tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
 +                     ~GRC_LCLCTRL_GPIO_OUTPUT1);
 +              udelay(40);
 +      }
 +
 +      if (!tg3_flag(tp, NVRAM)) {
 +              ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
 +      } else {
 +              u32 grc_mode;
 +
 +              ret = tg3_nvram_lock(tp);
 +              if (ret)
 +                      return ret;
 +
 +              tg3_enable_nvram_access(tp);
 +              if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM))
 +                      tw32(NVRAM_WRITE1, 0x406);
 +
 +              grc_mode = tr32(GRC_MODE);
 +              tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
 +
 +              if (tg3_flag(tp, NVRAM_BUFFERED) || !tg3_flag(tp, FLASH)) {
 +                      ret = tg3_nvram_write_block_buffered(tp, offset, len,
 +                              buf);
 +              } else {
 +                      ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
 +                              buf);
 +              }
 +
 +              grc_mode = tr32(GRC_MODE);
 +              tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
 +
 +              tg3_disable_nvram_access(tp);
 +              tg3_nvram_unlock(tp);
 +      }
 +
 +      if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
 +              tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
 +              udelay(40);
 +      }
 +
 +      return ret;
 +}
 +
 +struct subsys_tbl_ent {
 +      u16 subsys_vendor, subsys_devid;
 +      u32 phy_id;
 +};
 +
 +static struct subsys_tbl_ent subsys_id_to_phy_id[] __devinitdata = {
 +      /* Broadcom boards. */
 +      { TG3PCI_SUBVENDOR_ID_BROADCOM,
 +        TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 },
 +      { TG3PCI_SUBVENDOR_ID_BROADCOM,
 +        TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 },
 +      { TG3PCI_SUBVENDOR_ID_BROADCOM,
 +        TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 },
 +      { TG3PCI_SUBVENDOR_ID_BROADCOM,
 +        TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 },
 +      { TG3PCI_SUBVENDOR_ID_BROADCOM,
 +        TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 },
 +      { TG3PCI_SUBVENDOR_ID_BROADCOM,
 +        TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 },
 +      { TG3PCI_SUBVENDOR_ID_BROADCOM,
 +        TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 },
 +      { TG3PCI_SUBVENDOR_ID_BROADCOM,
 +        TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 },
 +      { TG3PCI_SUBVENDOR_ID_BROADCOM,
 +        TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 },
 +      { TG3PCI_SUBVENDOR_ID_BROADCOM,
 +        TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 },
 +      { TG3PCI_SUBVENDOR_ID_BROADCOM,
 +        TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 },
 +
 +      /* 3com boards. */
 +      { TG3PCI_SUBVENDOR_ID_3COM,
 +        TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 },
 +      { TG3PCI_SUBVENDOR_ID_3COM,
 +        TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 },
 +      { TG3PCI_SUBVENDOR_ID_3COM,
 +        TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 },
 +      { TG3PCI_SUBVENDOR_ID_3COM,
 +        TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 },
 +      { TG3PCI_SUBVENDOR_ID_3COM,
 +        TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 },
 +
 +      /* DELL boards. */
 +      { TG3PCI_SUBVENDOR_ID_DELL,
 +        TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 },
 +      { TG3PCI_SUBVENDOR_ID_DELL,
 +        TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 },
 +      { TG3PCI_SUBVENDOR_ID_DELL,
 +        TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 },
 +      { TG3PCI_SUBVENDOR_ID_DELL,
 +        TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 },
 +
 +      /* Compaq boards. */
 +      { TG3PCI_SUBVENDOR_ID_COMPAQ,
 +        TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 },
 +      { TG3PCI_SUBVENDOR_ID_COMPAQ,
 +        TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 },
 +      { TG3PCI_SUBVENDOR_ID_COMPAQ,
 +        TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 },
 +      { TG3PCI_SUBVENDOR_ID_COMPAQ,
 +        TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 },
 +      { TG3PCI_SUBVENDOR_ID_COMPAQ,
 +        TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 },
 +
 +      /* IBM boards. */
 +      { TG3PCI_SUBVENDOR_ID_IBM,
 +        TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 }
 +};
 +
 +static struct subsys_tbl_ent * __devinit tg3_lookup_by_subsys(struct tg3 *tp)
 +{
 +      int i;
 +
 +      for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
 +              if ((subsys_id_to_phy_id[i].subsys_vendor ==
 +                   tp->pdev->subsystem_vendor) &&
 +                  (subsys_id_to_phy_id[i].subsys_devid ==
 +                   tp->pdev->subsystem_device))
 +                      return &subsys_id_to_phy_id[i];
 +      }
 +      return NULL;
 +}
 +
 +static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
 +{
 +      u32 val;
 +
 +      tp->phy_id = TG3_PHY_ID_INVALID;
 +      tp->led_ctrl = LED_CTRL_MODE_PHY_1;
 +
 +      /* Assume an onboard device and WOL capable by default.  */
 +      tg3_flag_set(tp, EEPROM_WRITE_PROT);
 +      tg3_flag_set(tp, WOL_CAP);
 +
 +      if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
 +              if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
 +                      tg3_flag_clear(tp, EEPROM_WRITE_PROT);
 +                      tg3_flag_set(tp, IS_NIC);
 +              }
 +              val = tr32(VCPU_CFGSHDW);
 +              if (val & VCPU_CFGSHDW_ASPM_DBNC)
 +                      tg3_flag_set(tp, ASPM_WORKAROUND);
 +              if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
 +                  (val & VCPU_CFGSHDW_WOL_MAGPKT)) {
 +                      tg3_flag_set(tp, WOL_ENABLE);
 +                      device_set_wakeup_enable(&tp->pdev->dev, true);
 +              }
 +              goto done;
 +      }
 +
 +      tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
 +      if (val == NIC_SRAM_DATA_SIG_MAGIC) {
 +              u32 nic_cfg, led_cfg;
 +              u32 nic_phy_id, ver, cfg2 = 0, cfg4 = 0, eeprom_phy_id;
 +              int eeprom_phy_serdes = 0;
 +
 +              tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
 +              tp->nic_sram_data_cfg = nic_cfg;
 +
 +              tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
 +              ver >>= NIC_SRAM_DATA_VER_SHIFT;
 +              if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
 +                  GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
 +                  GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703 &&
 +                  (ver > 0) && (ver < 0x100))
 +                      tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
 +
 +              if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
 +                      tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
 +
 +              if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
 +                  NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
 +                      eeprom_phy_serdes = 1;
 +
 +              tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
 +              if (nic_phy_id != 0) {
 +                      u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
 +                      u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
 +
 +                      eeprom_phy_id  = (id1 >> 16) << 10;
 +                      eeprom_phy_id |= (id2 & 0xfc00) << 16;
 +                      eeprom_phy_id |= (id2 & 0x03ff) <<  0;
 +              } else
 +                      eeprom_phy_id = 0;
 +
 +              tp->phy_id = eeprom_phy_id;
 +              if (eeprom_phy_serdes) {
 +                      if (!tg3_flag(tp, 5705_PLUS))
 +                              tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
 +                      else
 +                              tp->phy_flags |= TG3_PHYFLG_MII_SERDES;
 +              }
 +
 +              if (tg3_flag(tp, 5750_PLUS))
 +                      led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
 +                                  SHASTA_EXT_LED_MODE_MASK);
 +              else
 +                      led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
 +
 +              switch (led_cfg) {
 +              default:
 +              case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
 +                      tp->led_ctrl = LED_CTRL_MODE_PHY_1;
 +                      break;
 +
 +              case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
 +                      tp->led_ctrl = LED_CTRL_MODE_PHY_2;
 +                      break;
 +
 +              case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
 +                      tp->led_ctrl = LED_CTRL_MODE_MAC;
 +
 +                      /* Default to PHY_1_MODE if 0 (MAC_MODE) is
 +                       * read on some older 5700/5701 bootcode.
 +                       */
 +                      if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
 +                          ASIC_REV_5700 ||
 +                          GET_ASIC_REV(tp->pci_chip_rev_id) ==
 +                          ASIC_REV_5701)
 +                              tp->led_ctrl = LED_CTRL_MODE_PHY_1;
 +
 +                      break;
 +
 +              case SHASTA_EXT_LED_SHARED:
 +                      tp->led_ctrl = LED_CTRL_MODE_SHARED;
 +                      if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
 +                          tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
 +                              tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
 +                                               LED_CTRL_MODE_PHY_2);
 +                      break;
 +
 +              case SHASTA_EXT_LED_MAC:
 +                      tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
 +                      break;
 +
 +              case SHASTA_EXT_LED_COMBO:
 +                      tp->led_ctrl = LED_CTRL_MODE_COMBO;
 +                      if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
 +                              tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
 +                                               LED_CTRL_MODE_PHY_2);
 +                      break;
 +
 +              }
 +
 +              if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
 +                   GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
 +                  tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
 +                      tp->led_ctrl = LED_CTRL_MODE_PHY_2;
 +
 +              if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX)
 +                      tp->led_ctrl = LED_CTRL_MODE_PHY_1;
 +
 +              if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
 +                      tg3_flag_set(tp, EEPROM_WRITE_PROT);
 +                      if ((tp->pdev->subsystem_vendor ==
 +                           PCI_VENDOR_ID_ARIMA) &&
 +                          (tp->pdev->subsystem_device == 0x205a ||
 +                           tp->pdev->subsystem_device == 0x2063))
 +                              tg3_flag_clear(tp, EEPROM_WRITE_PROT);
 +              } else {
 +                      tg3_flag_clear(tp, EEPROM_WRITE_PROT);
 +                      tg3_flag_set(tp, IS_NIC);
 +              }
 +
 +              if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
 +                      tg3_flag_set(tp, ENABLE_ASF);
 +                      if (tg3_flag(tp, 5750_PLUS))
 +                              tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
 +              }
 +
 +              if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
 +                  tg3_flag(tp, 5750_PLUS))
 +                      tg3_flag_set(tp, ENABLE_APE);
 +
 +              if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES &&
 +                  !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
 +                      tg3_flag_clear(tp, WOL_CAP);
 +
 +              if (tg3_flag(tp, WOL_CAP) &&
 +                  (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) {
 +                      tg3_flag_set(tp, WOL_ENABLE);
 +                      device_set_wakeup_enable(&tp->pdev->dev, true);
 +              }
 +
 +              if (cfg2 & (1 << 17))
 +                      tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING;
 +
 +              /* serdes signal pre-emphasis in register 0x590 set by */
 +              /* bootcode if bit 18 is set */
 +              if (cfg2 & (1 << 18))
 +                      tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
 +
 +              if ((tg3_flag(tp, 57765_PLUS) ||
 +                   (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
 +                    GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX)) &&
 +                  (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
 +                      tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
 +
 +              if (tg3_flag(tp, PCI_EXPRESS) &&
 +                  GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
 +                  !tg3_flag(tp, 57765_PLUS)) {
 +                      u32 cfg3;
 +
 +                      tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
 +                      if (cfg3 & NIC_SRAM_ASPM_DEBOUNCE)
 +                              tg3_flag_set(tp, ASPM_WORKAROUND);
 +              }
 +
 +              if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE)
 +                      tg3_flag_set(tp, RGMII_INBAND_DISABLE);
 +              if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
 +                      tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN);
 +              if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
 +                      tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN);
 +      }
 +done:
 +      if (tg3_flag(tp, WOL_CAP))
 +              device_set_wakeup_enable(&tp->pdev->dev,
 +                                       tg3_flag(tp, WOL_ENABLE));
 +      else
 +              device_set_wakeup_capable(&tp->pdev->dev, false);
 +}
 +
 +static int __devinit tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
 +{
 +      int i;
 +      u32 val;
 +
 +      tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
 +      tw32(OTP_CTRL, cmd);
 +
 +      /* Wait for up to 1 ms for command to execute. */
 +      for (i = 0; i < 100; i++) {
 +              val = tr32(OTP_STATUS);
 +              if (val & OTP_STATUS_CMD_DONE)
 +                      break;
 +              udelay(10);
 +      }
 +
 +      return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
 +}
 +
 +/* Read the gphy configuration from the OTP region of the chip.  The gphy
 + * configuration is a 32-bit value that straddles the alignment boundary.
 + * We do two 32-bit reads and then shift and merge the results.
 + */
 +static u32 __devinit tg3_read_otp_phycfg(struct tg3 *tp)
 +{
 +      u32 bhalf_otp, thalf_otp;
 +
 +      tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
 +
 +      if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
 +              return 0;
 +
 +      tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
 +
 +      if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
 +              return 0;
 +
 +      thalf_otp = tr32(OTP_READ_DATA);
 +
 +      tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
 +
 +      if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
 +              return 0;
 +
 +      bhalf_otp = tr32(OTP_READ_DATA);
 +
 +      return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
 +}
 +
 +static void __devinit tg3_phy_init_link_config(struct tg3 *tp)
 +{
 +      u32 adv = ADVERTISED_Autoneg |
 +                ADVERTISED_Pause;
 +
 +      if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
 +              adv |= ADVERTISED_1000baseT_Half |
 +                     ADVERTISED_1000baseT_Full;
 +
 +      if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
 +              adv |= ADVERTISED_100baseT_Half |
 +                     ADVERTISED_100baseT_Full |
 +                     ADVERTISED_10baseT_Half |
 +                     ADVERTISED_10baseT_Full |
 +                     ADVERTISED_TP;
 +      else
 +              adv |= ADVERTISED_FIBRE;
 +
 +      tp->link_config.advertising = adv;
 +      tp->link_config.speed = SPEED_INVALID;
 +      tp->link_config.duplex = DUPLEX_INVALID;
 +      tp->link_config.autoneg = AUTONEG_ENABLE;
 +      tp->link_config.active_speed = SPEED_INVALID;
 +      tp->link_config.active_duplex = DUPLEX_INVALID;
 +      tp->link_config.orig_speed = SPEED_INVALID;
 +      tp->link_config.orig_duplex = DUPLEX_INVALID;
 +      tp->link_config.orig_autoneg = AUTONEG_INVALID;
 +}
 +
 +static int __devinit tg3_phy_probe(struct tg3 *tp)
 +{
 +      u32 hw_phy_id_1, hw_phy_id_2;
 +      u32 hw_phy_id, hw_phy_id_masked;
 +      int err;
 +
 +      /* flow control autonegotiation is default behavior */
 +      tg3_flag_set(tp, PAUSE_AUTONEG);
 +      tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
 +
 +      if (tg3_flag(tp, USE_PHYLIB))
 +              return tg3_phy_init(tp);
 +
 +      /* Reading the PHY ID register can conflict with ASF
 +       * firmware access to the PHY hardware.
 +       */
 +      err = 0;
 +      if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)) {
 +              hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID;
 +      } else {
 +              /* Now read the physical PHY_ID from the chip and verify
 +               * that it is sane.  If it doesn't look good, we fall back
 +               * to either the hard-coded table based PHY_ID and failing
 +               * that the value found in the eeprom area.
 +               */
 +              err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
 +              err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
 +
 +              hw_phy_id  = (hw_phy_id_1 & 0xffff) << 10;
 +              hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
 +              hw_phy_id |= (hw_phy_id_2 & 0x03ff) <<  0;
 +
 +              hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK;
 +      }
 +
 +      if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) {
 +              tp->phy_id = hw_phy_id;
 +              if (hw_phy_id_masked == TG3_PHY_ID_BCM8002)
 +                      tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
 +              else
 +                      tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES;
 +      } else {
 +              if (tp->phy_id != TG3_PHY_ID_INVALID) {
 +                      /* Do nothing, phy ID already set up in
 +                       * tg3_get_eeprom_hw_cfg().
 +                       */
 +              } else {
 +                      struct subsys_tbl_ent *p;
 +
 +                      /* No eeprom signature?  Try the hardcoded
 +                       * subsys device table.
 +                       */
 +                      p = tg3_lookup_by_subsys(tp);
 +                      if (!p)
 +                              return -ENODEV;
 +
 +                      tp->phy_id = p->phy_id;
 +                      if (!tp->phy_id ||
 +                          tp->phy_id == TG3_PHY_ID_BCM8002)
 +                              tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
 +              }
 +      }
 +
 +      if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
 +          (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
 +           GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720 ||
 +           (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 &&
 +            tp->pci_chip_rev_id != CHIPREV_ID_5717_A0) ||
 +           (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 &&
 +            tp->pci_chip_rev_id != CHIPREV_ID_57765_A0)))
 +              tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
 +
 +      tg3_phy_init_link_config(tp);
 +
 +      if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
 +          !tg3_flag(tp, ENABLE_APE) &&
 +          !tg3_flag(tp, ENABLE_ASF)) {
 +              u32 bmsr, mask;
 +
 +              tg3_readphy(tp, MII_BMSR, &bmsr);
 +              if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
 +                  (bmsr & BMSR_LSTATUS))
 +                      goto skip_phy_reset;
 +
 +              err = tg3_phy_reset(tp);
 +              if (err)
 +                      return err;
 +
 +              tg3_phy_set_wirespeed(tp);
 +
 +              mask = (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
 +                      ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
 +                      ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full);
 +              if (!tg3_copper_is_advertising_all(tp, mask)) {
 +                      tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
 +                                          tp->link_config.flowctrl);
 +
 +                      tg3_writephy(tp, MII_BMCR,
 +                                   BMCR_ANENABLE | BMCR_ANRESTART);
 +              }
 +      }
 +
 +skip_phy_reset:
 +      if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
 +              err = tg3_init_5401phy_dsp(tp);
 +              if (err)
 +                      return err;
 +
 +              err = tg3_init_5401phy_dsp(tp);
 +      }
 +
 +      return err;
 +}
 +
 +static void __devinit tg3_read_vpd(struct tg3 *tp)
 +{
 +      u8 *vpd_data;
 +      unsigned int block_end, rosize, len;
 +      u32 vpdlen;
 +      int j, i = 0;
 +
 +      vpd_data = (u8 *)tg3_vpd_readblock(tp, &vpdlen);
 +      if (!vpd_data)
 +              goto out_no_vpd;
 +
 +      i = pci_vpd_find_tag(vpd_data, 0, vpdlen, PCI_VPD_LRDT_RO_DATA);
 +      if (i < 0)
 +              goto out_not_found;
 +
 +      rosize = pci_vpd_lrdt_size(&vpd_data[i]);
 +      block_end = i + PCI_VPD_LRDT_TAG_SIZE + rosize;
 +      i += PCI_VPD_LRDT_TAG_SIZE;
 +
 +      if (block_end > vpdlen)
 +              goto out_not_found;
 +
 +      j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
 +                                    PCI_VPD_RO_KEYWORD_MFR_ID);
 +      if (j > 0) {
 +              len = pci_vpd_info_field_size(&vpd_data[j]);
 +
 +              j += PCI_VPD_INFO_FLD_HDR_SIZE;
 +              if (j + len > block_end || len != 4 ||
 +                  memcmp(&vpd_data[j], "1028", 4))
 +                      goto partno;
 +
 +              j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
 +                                            PCI_VPD_RO_KEYWORD_VENDOR0);
 +              if (j < 0)
 +                      goto partno;
 +
 +              len = pci_vpd_info_field_size(&vpd_data[j]);
 +
 +              j += PCI_VPD_INFO_FLD_HDR_SIZE;
 +              if (j + len > block_end)
 +                      goto partno;
 +
 +              memcpy(tp->fw_ver, &vpd_data[j], len);
 +              strncat(tp->fw_ver, " bc ", vpdlen - len - 1);
 +      }
 +
 +partno:
 +      i = pci_vpd_find_info_keyword(vpd_data, i, rosize,
 +                                    PCI_VPD_RO_KEYWORD_PARTNO);
 +      if (i < 0)
 +              goto out_not_found;
 +
 +      len = pci_vpd_info_field_size(&vpd_data[i]);
 +
 +      i += PCI_VPD_INFO_FLD_HDR_SIZE;
 +      if (len > TG3_BPN_SIZE ||
 +          (len + i) > vpdlen)
 +              goto out_not_found;
 +
 +      memcpy(tp->board_part_number, &vpd_data[i], len);
 +
 +out_not_found:
 +      kfree(vpd_data);
 +      if (tp->board_part_number[0])
 +              return;
 +
 +out_no_vpd:
 +      if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
 +              if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717)
 +                      strcpy(tp->board_part_number, "BCM5717");
 +              else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718)
 +                      strcpy(tp->board_part_number, "BCM5718");
 +              else
 +                      goto nomatch;
 +      } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
 +              if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
 +                      strcpy(tp->board_part_number, "BCM57780");
 +              else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
 +                      strcpy(tp->board_part_number, "BCM57760");
 +              else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
 +                      strcpy(tp->board_part_number, "BCM57790");
 +              else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
 +                      strcpy(tp->board_part_number, "BCM57788");
 +              else
 +                      goto nomatch;
 +      } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
 +              if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761)
 +                      strcpy(tp->board_part_number, "BCM57761");
 +              else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765)
 +                      strcpy(tp->board_part_number, "BCM57765");
 +              else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781)
 +                      strcpy(tp->board_part_number, "BCM57781");
 +              else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785)
 +                      strcpy(tp->board_part_number, "BCM57785");
 +              else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791)
 +                      strcpy(tp->board_part_number, "BCM57791");
 +              else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
 +                      strcpy(tp->board_part_number, "BCM57795");
 +              else
 +                      goto nomatch;
 +      } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
 +              strcpy(tp->board_part_number, "BCM95906");
 +      } else {
 +nomatch:
 +              strcpy(tp->board_part_number, "none");
 +      }
 +}
 +
 +static int __devinit tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
 +{
 +      u32 val;
 +
 +      if (tg3_nvram_read(tp, offset, &val) ||
 +          (val & 0xfc000000) != 0x0c000000 ||
 +          tg3_nvram_read(tp, offset + 4, &val) ||
 +          val != 0)
 +              return 0;
 +
 +      return 1;
 +}
 +
 +static void __devinit tg3_read_bc_ver(struct tg3 *tp)
 +{
 +      u32 val, offset, start, ver_offset;
 +      int i, dst_off;
 +      bool newver = false;
 +
 +      if (tg3_nvram_read(tp, 0xc, &offset) ||
 +          tg3_nvram_read(tp, 0x4, &start))
 +              return;
 +
 +      offset = tg3_nvram_logical_addr(tp, offset);
 +
 +      if (tg3_nvram_read(tp, offset, &val))
 +              return;
 +
 +      if ((val & 0xfc000000) == 0x0c000000) {
 +              if (tg3_nvram_read(tp, offset + 4, &val))
 +                      return;
 +
 +              if (val == 0)
 +                      newver = true;
 +      }
 +
 +      dst_off = strlen(tp->fw_ver);
 +
 +      if (newver) {
 +              if (TG3_VER_SIZE - dst_off < 16 ||
 +                  tg3_nvram_read(tp, offset + 8, &ver_offset))
 +                      return;
 +
 +              offset = offset + ver_offset - start;
 +              for (i = 0; i < 16; i += 4) {
 +                      __be32 v;
 +                      if (tg3_nvram_read_be32(tp, offset + i, &v))
 +                              return;
 +
 +                      memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v));
 +              }
 +      } else {
 +              u32 major, minor;
 +
 +              if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset))
 +                      return;
 +
 +              major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >>
 +                      TG3_NVM_BCVER_MAJSFT;
 +              minor = ver_offset & TG3_NVM_BCVER_MINMSK;
 +              snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off,
 +                       "v%d.%02d", major, minor);
 +      }
 +}
 +
 +static void __devinit tg3_read_hwsb_ver(struct tg3 *tp)
 +{
 +      u32 val, major, minor;
 +
 +      /* Use native endian representation */
 +      if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val))
 +              return;
 +
 +      major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >>
 +              TG3_NVM_HWSB_CFG1_MAJSFT;
 +      minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >>
 +              TG3_NVM_HWSB_CFG1_MINSFT;
 +
 +      snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor);
 +}
 +
 +static void __devinit tg3_read_sb_ver(struct tg3 *tp, u32 val)
 +{
 +      u32 offset, major, minor, build;
 +
 +      strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1);
 +
 +      if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
 +              return;
 +
 +      switch (val & TG3_EEPROM_SB_REVISION_MASK) {
 +      case TG3_EEPROM_SB_REVISION_0:
 +              offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
 +              break;
 +      case TG3_EEPROM_SB_REVISION_2:
 +              offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
 +              break;
 +      case TG3_EEPROM_SB_REVISION_3:
 +              offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
 +              break;
 +      case TG3_EEPROM_SB_REVISION_4:
 +              offset = TG3_EEPROM_SB_F1R4_EDH_OFF;
 +              break;
 +      case TG3_EEPROM_SB_REVISION_5:
 +              offset = TG3_EEPROM_SB_F1R5_EDH_OFF;
 +              break;
 +      case TG3_EEPROM_SB_REVISION_6:
 +              offset = TG3_EEPROM_SB_F1R6_EDH_OFF;
 +              break;
 +      default:
 +              return;
 +      }
 +
 +      if (tg3_nvram_read(tp, offset, &val))
 +              return;
 +
 +      build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
 +              TG3_EEPROM_SB_EDH_BLD_SHFT;
 +      major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
 +              TG3_EEPROM_SB_EDH_MAJ_SHFT;
 +      minor =  val & TG3_EEPROM_SB_EDH_MIN_MASK;
 +
 +      if (minor > 99 || build > 26)
 +              return;
 +
 +      offset = strlen(tp->fw_ver);
 +      snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset,
 +               " v%d.%02d", major, minor);
 +
 +      if (build > 0) {
 +              offset = strlen(tp->fw_ver);
 +              if (offset < TG3_VER_SIZE - 1)
 +                      tp->fw_ver[offset] = 'a' + build - 1;
 +      }
 +}
 +
 +static void __devinit tg3_read_mgmtfw_ver(struct tg3 *tp)
 +{
 +      u32 val, offset, start;
 +      int i, vlen;
 +
 +      for (offset = TG3_NVM_DIR_START;
 +           offset < TG3_NVM_DIR_END;
 +           offset += TG3_NVM_DIRENT_SIZE) {
 +              if (tg3_nvram_read(tp, offset, &val))
 +                      return;
 +
 +              if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
 +                      break;
 +      }
 +
 +      if (offset == TG3_NVM_DIR_END)
 +              return;
 +
 +      if (!tg3_flag(tp, 5705_PLUS))
 +              start = 0x08000000;
 +      else if (tg3_nvram_read(tp, offset - 4, &start))
 +              return;
 +
 +      if (tg3_nvram_read(tp, offset + 4, &offset) ||
 +          !tg3_fw_img_is_valid(tp, offset) ||
 +          tg3_nvram_read(tp, offset + 8, &val))
 +              return;
 +
 +      offset += val - start;
 +
 +      vlen = strlen(tp->fw_ver);
 +
 +      tp->fw_ver[vlen++] = ',';
 +      tp->fw_ver[vlen++] = ' ';
 +
 +      for (i = 0; i < 4; i++) {
 +              __be32 v;
 +              if (tg3_nvram_read_be32(tp, offset, &v))
 +                      return;
 +
 +              offset += sizeof(v);
 +
 +              if (vlen > TG3_VER_SIZE - sizeof(v)) {
 +                      memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen);
 +                      break;
 +              }
 +
 +              memcpy(&tp->fw_ver[vlen], &v, sizeof(v));
 +              vlen += sizeof(v);
 +      }
 +}
 +
 +static void __devinit tg3_read_dash_ver(struct tg3 *tp)
 +{
 +      int vlen;
 +      u32 apedata;
 +      char *fwtype;
 +
 +      if (!tg3_flag(tp, ENABLE_APE) || !tg3_flag(tp, ENABLE_ASF))
 +              return;
 +
 +      apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
 +      if (apedata != APE_SEG_SIG_MAGIC)
 +              return;
 +
 +      apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
 +      if (!(apedata & APE_FW_STATUS_READY))
 +              return;
 +
 +      apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
 +
 +      if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI) {
 +              tg3_flag_set(tp, APE_HAS_NCSI);
 +              fwtype = "NCSI";
 +      } else {
 +              fwtype = "DASH";
 +      }
 +
 +      vlen = strlen(tp->fw_ver);
 +
 +      snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " %s v%d.%d.%d.%d",
 +               fwtype,
 +               (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT,
 +               (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT,
 +               (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT,
 +               (apedata & APE_FW_VERSION_BLDMSK));
 +}
 +
 +static void __devinit tg3_read_fw_ver(struct tg3 *tp)
 +{
 +      u32 val;
 +      bool vpd_vers = false;
 +
 +      if (tp->fw_ver[0] != 0)
 +              vpd_vers = true;
 +
 +      if (tg3_flag(tp, NO_NVRAM)) {
 +              strcat(tp->fw_ver, "sb");
 +              return;
 +      }
 +
 +      if (tg3_nvram_read(tp, 0, &val))
 +              return;
 +
 +      if (val == TG3_EEPROM_MAGIC)
 +              tg3_read_bc_ver(tp);
 +      else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
 +              tg3_read_sb_ver(tp, val);
 +      else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
 +              tg3_read_hwsb_ver(tp);
 +      else
 +              return;
 +
 +      if (vpd_vers)
 +              goto done;
 +
 +      if (tg3_flag(tp, ENABLE_APE)) {
 +              if (tg3_flag(tp, ENABLE_ASF))
 +                      tg3_read_dash_ver(tp);
 +      } else if (tg3_flag(tp, ENABLE_ASF)) {
 +              tg3_read_mgmtfw_ver(tp);
 +      }
 +
 +done:
 +      tp->fw_ver[TG3_VER_SIZE - 1] = 0;
 +}
 +
 +static struct pci_dev * __devinit tg3_find_peer(struct tg3 *);
 +
 +static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp)
 +{
 +      if (tg3_flag(tp, LRG_PROD_RING_CAP))
 +              return TG3_RX_RET_MAX_SIZE_5717;
 +      else if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))
 +              return TG3_RX_RET_MAX_SIZE_5700;
 +      else
 +              return TG3_RX_RET_MAX_SIZE_5705;
 +}
 +
 +static DEFINE_PCI_DEVICE_TABLE(tg3_write_reorder_chipsets) = {
 +      { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) },
 +      { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) },
 +      { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) },
 +      { },
 +};
 +
 +static int __devinit tg3_get_invariants(struct tg3 *tp)
 +{
 +      u32 misc_ctrl_reg;
 +      u32 pci_state_reg, grc_misc_cfg;
 +      u32 val;
 +      u16 pci_cmd;
 +      int err;
 +
 +      /* Force memory write invalidate off.  If we leave it on,
 +       * then on 5700_BX chips we have to enable a workaround.
 +       * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
 +       * to match the cacheline size.  The Broadcom driver have this
 +       * workaround but turns MWI off all the times so never uses
 +       * it.  This seems to suggest that the workaround is insufficient.
 +       */
 +      pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
 +      pci_cmd &= ~PCI_COMMAND_INVALIDATE;
 +      pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
 +
 +      /* Important! -- Make sure register accesses are byteswapped
 +       * correctly.  Also, for those chips that require it, make
 +       * sure that indirect register accesses are enabled before
 +       * the first operation.
 +       */
 +      pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
 +                            &misc_ctrl_reg);
 +      tp->misc_host_ctrl |= (misc_ctrl_reg &
 +                             MISC_HOST_CTRL_CHIPREV);
 +      pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
 +                             tp->misc_host_ctrl);
 +
 +      tp->pci_chip_rev_id = (misc_ctrl_reg >>
 +                             MISC_HOST_CTRL_CHIPREV_SHIFT);
 +      if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_USE_PROD_ID_REG) {
 +              u32 prod_id_asic_rev;
 +
 +              if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
 +                  tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
 +                  tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
 +                  tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720)
 +                      pci_read_config_dword(tp->pdev,
 +                                            TG3PCI_GEN2_PRODID_ASICREV,
 +                                            &prod_id_asic_rev);
 +              else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
 +                       tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
 +                       tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
 +                       tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 ||
 +                       tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
 +                       tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
 +                      pci_read_config_dword(tp->pdev,
 +                                            TG3PCI_GEN15_PRODID_ASICREV,
 +                                            &prod_id_asic_rev);
 +              else
 +                      pci_read_config_dword(tp->pdev, TG3PCI_PRODID_ASICREV,
 +                                            &prod_id_asic_rev);
 +
 +              tp->pci_chip_rev_id = prod_id_asic_rev;
 +      }
 +
 +      /* Wrong chip ID in 5752 A0. This code can be removed later
 +       * as A0 is not in production.
 +       */
 +      if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
 +              tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
 +
 +      /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
 +       * we need to disable memory and use config. cycles
 +       * only to access all registers. The 5702/03 chips
 +       * can mistakenly decode the special cycles from the
 +       * ICH chipsets as memory write cycles, causing corruption
 +       * of register and memory space. Only certain ICH bridges
 +       * will drive special cycles with non-zero data during the
 +       * address phase which can fall within the 5703's address
 +       * range. This is not an ICH bug as the PCI spec allows
 +       * non-zero address during special cycles. However, only
 +       * these ICH bridges are known to drive non-zero addresses
 +       * during special cycles.
 +       *
 +       * Since special cycles do not cross PCI bridges, we only
 +       * enable this workaround if the 5703 is on the secondary
 +       * bus of these ICH bridges.
 +       */
 +      if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) ||
 +          (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) {
 +              static struct tg3_dev_id {
 +                      u32     vendor;
 +                      u32     device;
 +                      u32     rev;
 +              } ich_chipsets[] = {
 +                      { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
 +                        PCI_ANY_ID },
 +                      { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
 +                        PCI_ANY_ID },
 +                      { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
 +                        0xa },
 +                      { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
 +                        PCI_ANY_ID },
 +                      { },
 +              };
 +              struct tg3_dev_id *pci_id = &ich_chipsets[0];
 +              struct pci_dev *bridge = NULL;
 +
 +              while (pci_id->vendor != 0) {
 +                      bridge = pci_get_device(pci_id->vendor, pci_id->device,
 +                                              bridge);
 +                      if (!bridge) {
 +                              pci_id++;
 +                              continue;
 +                      }
 +                      if (pci_id->rev != PCI_ANY_ID) {
 +                              if (bridge->revision > pci_id->rev)
 +                                      continue;
 +                      }
 +                      if (bridge->subordinate &&
 +                          (bridge->subordinate->number ==
 +                           tp->pdev->bus->number)) {
 +                              tg3_flag_set(tp, ICH_WORKAROUND);
 +                              pci_dev_put(bridge);
 +                              break;
 +                      }
 +              }
 +      }
 +
 +      if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
 +              static struct tg3_dev_id {
 +                      u32     vendor;
 +                      u32     device;
 +              } bridge_chipsets[] = {
 +                      { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
 +                      { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
 +                      { },
 +              };
 +              struct tg3_dev_id *pci_id = &bridge_chipsets[0];
 +              struct pci_dev *bridge = NULL;
 +
 +              while (pci_id->vendor != 0) {
 +                      bridge = pci_get_device(pci_id->vendor,
 +                                              pci_id->device,
 +                                              bridge);
 +                      if (!bridge) {
 +                              pci_id++;
 +                              continue;
 +                      }
 +                      if (bridge->subordinate &&
 +                          (bridge->subordinate->number <=
 +                           tp->pdev->bus->number) &&
 +                          (bridge->subordinate->subordinate >=
 +                           tp->pdev->bus->number)) {
 +                              tg3_flag_set(tp, 5701_DMA_BUG);
 +                              pci_dev_put(bridge);
 +                              break;
 +                      }
 +              }
 +      }
 +
 +      /* The EPB bridge inside 5714, 5715, and 5780 cannot support
 +       * DMA addresses > 40-bit. This bridge may have other additional
 +       * 57xx devices behind it in some 4-port NIC designs for example.
 +       * Any tg3 device found behind the bridge will also need the 40-bit
 +       * DMA workaround.
 +       */
 +      if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
 +          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
 +              tg3_flag_set(tp, 5780_CLASS);
 +              tg3_flag_set(tp, 40BIT_DMA_BUG);
 +              tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
 +      } else {
 +              struct pci_dev *bridge = NULL;
 +
 +              do {
 +                      bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
 +                                              PCI_DEVICE_ID_SERVERWORKS_EPB,
 +                                              bridge);
 +                      if (bridge && bridge->subordinate &&
 +                          (bridge->subordinate->number <=
 +                           tp->pdev->bus->number) &&
 +                          (bridge->subordinate->subordinate >=
 +                           tp->pdev->bus->number)) {
 +                              tg3_flag_set(tp, 40BIT_DMA_BUG);
 +                              pci_dev_put(bridge);
 +                              break;
 +                      }
 +              } while (bridge);
 +      }
 +
 +      if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
 +          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)
 +              tp->pdev_peer = tg3_find_peer(tp);
 +
 +      if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
 +          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
 +          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
 +              tg3_flag_set(tp, 5717_PLUS);
 +
 +      if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 ||
 +          tg3_flag(tp, 5717_PLUS))
 +              tg3_flag_set(tp, 57765_PLUS);
 +
 +      /* Intentionally exclude ASIC_REV_5906 */
 +      if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
 +          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
 +          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
 +          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
 +          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
 +          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
 +          tg3_flag(tp, 57765_PLUS))
 +              tg3_flag_set(tp, 5755_PLUS);
 +
 +      if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
 +          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
 +          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
 +          tg3_flag(tp, 5755_PLUS) ||
 +          tg3_flag(tp, 5780_CLASS))
 +              tg3_flag_set(tp, 5750_PLUS);
 +
 +      if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
 +          tg3_flag(tp, 5750_PLUS))
 +              tg3_flag_set(tp, 5705_PLUS);
 +
 +      /* Determine TSO capabilities */
 +      if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0)
 +              ; /* Do nothing. HW bug. */
 +      else if (tg3_flag(tp, 57765_PLUS))
 +              tg3_flag_set(tp, HW_TSO_3);
 +      else if (tg3_flag(tp, 5755_PLUS) ||
 +               GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
 +              tg3_flag_set(tp, HW_TSO_2);
 +      else if (tg3_flag(tp, 5750_PLUS)) {
 +              tg3_flag_set(tp, HW_TSO_1);
 +              tg3_flag_set(tp, TSO_BUG);
 +              if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 &&
 +                  tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2)
 +                      tg3_flag_clear(tp, TSO_BUG);
 +      } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
 +                 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
 +                 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
 +                      tg3_flag_set(tp, TSO_BUG);
 +              if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)
 +                      tp->fw_needed = FIRMWARE_TG3TSO5;
 +              else
 +                      tp->fw_needed = FIRMWARE_TG3TSO;
 +      }
 +
 +      /* Selectively allow TSO based on operating conditions */
 +      if (tg3_flag(tp, HW_TSO_1) ||
 +          tg3_flag(tp, HW_TSO_2) ||
 +          tg3_flag(tp, HW_TSO_3) ||
 +          (tp->fw_needed && !tg3_flag(tp, ENABLE_ASF)))
 +              tg3_flag_set(tp, TSO_CAPABLE);
 +      else {
 +              tg3_flag_clear(tp, TSO_CAPABLE);
 +              tg3_flag_clear(tp, TSO_BUG);
 +              tp->fw_needed = NULL;
 +      }
 +
 +      if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0)
 +              tp->fw_needed = FIRMWARE_TG3;
 +
 +      tp->irq_max = 1;
 +
 +      if (tg3_flag(tp, 5750_PLUS)) {
 +              tg3_flag_set(tp, SUPPORT_MSI);
 +              if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX ||
 +                  GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX ||
 +                  (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 &&
 +                   tp->pci_chip_rev_id <= CHIPREV_ID_5714_A2 &&
 +                   tp->pdev_peer == tp->pdev))
 +                      tg3_flag_clear(tp, SUPPORT_MSI);
 +
 +              if (tg3_flag(tp, 5755_PLUS) ||
 +                  GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
 +                      tg3_flag_set(tp, 1SHOT_MSI);
 +              }
 +
 +              if (tg3_flag(tp, 57765_PLUS)) {
 +                      tg3_flag_set(tp, SUPPORT_MSIX);
 +                      tp->irq_max = TG3_IRQ_MAX_VECS;
 +              }
 +      }
 +
 +      if (tg3_flag(tp, 5755_PLUS))
 +              tg3_flag_set(tp, SHORT_DMA_BUG);
 +
 +      if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
 +              tg3_flag_set(tp, 4K_FIFO_LIMIT);
 +
 +      if (tg3_flag(tp, 5717_PLUS))
 +              tg3_flag_set(tp, LRG_PROD_RING_CAP);
 +
 +      if (tg3_flag(tp, 57765_PLUS) &&
 +          tp->pci_chip_rev_id != CHIPREV_ID_5719_A0)
 +              tg3_flag_set(tp, USE_JUMBO_BDFLAG);
 +
 +      if (!tg3_flag(tp, 5705_PLUS) ||
 +          tg3_flag(tp, 5780_CLASS) ||
 +          tg3_flag(tp, USE_JUMBO_BDFLAG))
 +              tg3_flag_set(tp, JUMBO_CAPABLE);
 +
 +      pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
 +                            &pci_state_reg);
 +
 +      if (pci_is_pcie(tp->pdev)) {
 +              u16 lnkctl;
 +
 +              tg3_flag_set(tp, PCI_EXPRESS);
 +
 +              tp->pcie_readrq = 4096;
 +              if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
 +                  GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
 +                      tp->pcie_readrq = 2048;
 +
 +              pcie_set_readrq(tp->pdev, tp->pcie_readrq);
 +
 +              pci_read_config_word(tp->pdev,
 +                                   pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
 +                                   &lnkctl);
 +              if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
 +                      if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
 +                          ASIC_REV_5906) {
 +                              tg3_flag_clear(tp, HW_TSO_2);
 +                              tg3_flag_clear(tp, TSO_CAPABLE);
 +                      }
 +                      if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
 +                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
 +                          tp->pci_chip_rev_id == CHIPREV_ID_57780_A0 ||
 +                          tp->pci_chip_rev_id == CHIPREV_ID_57780_A1)
 +                              tg3_flag_set(tp, CLKREQ_BUG);
 +              } else if (tp->pci_chip_rev_id == CHIPREV_ID_5717_A0) {
 +                      tg3_flag_set(tp, L1PLLPD_EN);
 +              }
 +      } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
 +              /* BCM5785 devices are effectively PCIe devices, and should
 +               * follow PCIe codepaths, but do not have a PCIe capabilities
 +               * section.
 +               */
 +              tg3_flag_set(tp, PCI_EXPRESS);
 +      } else if (!tg3_flag(tp, 5705_PLUS) ||
 +                 tg3_flag(tp, 5780_CLASS)) {
 +              tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
 +              if (!tp->pcix_cap) {
 +                      dev_err(&tp->pdev->dev,
 +                              "Cannot find PCI-X capability, aborting\n");
 +                      return -EIO;
 +              }
 +
 +              if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
 +                      tg3_flag_set(tp, PCIX_MODE);
 +      }
 +
 +      /* If we have an AMD 762 or VIA K8T800 chipset, write
 +       * reordering to the mailbox registers done by the host
 +       * controller can cause major troubles.  We read back from
 +       * every mailbox register write to force the writes to be
 +       * posted to the chip in order.
 +       */
 +      if (pci_dev_present(tg3_write_reorder_chipsets) &&
 +          !tg3_flag(tp, PCI_EXPRESS))
 +              tg3_flag_set(tp, MBOX_WRITE_REORDER);
 +
 +      pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
 +                           &tp->pci_cacheline_sz);
 +      pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
 +                           &tp->pci_lat_timer);
 +      if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
 +          tp->pci_lat_timer < 64) {
 +              tp->pci_lat_timer = 64;
 +              pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
 +                                    tp->pci_lat_timer);
 +      }
 +
 +      /* Important! -- It is critical that the PCI-X hw workaround
 +       * situation is decided before the first MMIO register access.
 +       */
 +      if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
 +              /* 5700 BX chips need to have their TX producer index
 +               * mailboxes written twice to workaround a bug.
 +               */
 +              tg3_flag_set(tp, TXD_MBOX_HWBUG);
 +
 +              /* If we are in PCI-X mode, enable register write workaround.
 +               *
 +               * The workaround is to use indirect register accesses
 +               * for all chip writes not to mailbox registers.
 +               */
 +              if (tg3_flag(tp, PCIX_MODE)) {
 +                      u32 pm_reg;
 +
 +                      tg3_flag_set(tp, PCIX_TARGET_HWBUG);
 +
 +                      /* The chip can have it's power management PCI config
 +                       * space registers clobbered due to this bug.
 +                       * So explicitly force the chip into D0 here.
 +                       */
 +                      pci_read_config_dword(tp->pdev,
 +                                            tp->pm_cap + PCI_PM_CTRL,
 +                                            &pm_reg);
 +                      pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
 +                      pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
 +                      pci_write_config_dword(tp->pdev,
 +                                             tp->pm_cap + PCI_PM_CTRL,
 +                                             pm_reg);
 +
 +                      /* Also, force SERR#/PERR# in PCI command. */
 +                      pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
 +                      pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
 +                      pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
 +              }
 +      }
 +
 +      if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
 +              tg3_flag_set(tp, PCI_HIGH_SPEED);
 +      if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
 +              tg3_flag_set(tp, PCI_32BIT);
 +
 +      /* Chip-specific fixup from Broadcom driver */
 +      if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
 +          (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
 +              pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
 +              pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
 +      }
 +
 +      /* Default fast path register access methods */
 +      tp->read32 = tg3_read32;
 +      tp->write32 = tg3_write32;
 +      tp->read32_mbox = tg3_read32;
 +      tp->write32_mbox = tg3_write32;
 +      tp->write32_tx_mbox = tg3_write32;
 +      tp->write32_rx_mbox = tg3_write32;
 +
 +      /* Various workaround register access methods */
 +      if (tg3_flag(tp, PCIX_TARGET_HWBUG))
 +              tp->write32 = tg3_write_indirect_reg32;
 +      else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
 +               (tg3_flag(tp, PCI_EXPRESS) &&
 +                tp->pci_chip_rev_id == CHIPREV_ID_5750_A0)) {
 +              /*
 +               * Back to back register writes can cause problems on these
 +               * chips, the workaround is to read back all reg writes
 +               * except those to mailbox regs.
 +               *
 +               * See tg3_write_indirect_reg32().
 +               */
 +              tp->write32 = tg3_write_flush_reg32;
 +      }
 +
 +      if (tg3_flag(tp, TXD_MBOX_HWBUG) || tg3_flag(tp, MBOX_WRITE_REORDER)) {
 +              tp->write32_tx_mbox = tg3_write32_tx_mbox;
 +              if (tg3_flag(tp, MBOX_WRITE_REORDER))
 +                      tp->write32_rx_mbox = tg3_write_flush_reg32;
 +      }
 +
 +      if (tg3_flag(tp, ICH_WORKAROUND)) {
 +              tp->read32 = tg3_read_indirect_reg32;
 +              tp->write32 = tg3_write_indirect_reg32;
 +              tp->read32_mbox = tg3_read_indirect_mbox;
 +              tp->write32_mbox = tg3_write_indirect_mbox;
 +              tp->write32_tx_mbox = tg3_write_indirect_mbox;
 +              tp->write32_rx_mbox = tg3_write_indirect_mbox;
 +
 +              iounmap(tp->regs);
 +              tp->regs = NULL;
 +
 +              pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
 +              pci_cmd &= ~PCI_COMMAND_MEMORY;
 +              pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
 +      }
 +      if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
 +              tp->read32_mbox = tg3_read32_mbox_5906;
 +              tp->write32_mbox = tg3_write32_mbox_5906;
 +              tp->write32_tx_mbox = tg3_write32_mbox_5906;
 +              tp->write32_rx_mbox = tg3_write32_mbox_5906;
 +      }
 +
 +      if (tp->write32 == tg3_write_indirect_reg32 ||
 +          (tg3_flag(tp, PCIX_MODE) &&
 +           (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
 +            GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)))
 +              tg3_flag_set(tp, SRAM_USE_CONFIG);
 +
 +      /* The memory arbiter has to be enabled in order for SRAM accesses
 +       * to succeed.  Normally on powerup the tg3 chip firmware will make
 +       * sure it is enabled, but other entities such as system netboot
 +       * code might disable it.
 +       */
 +      val = tr32(MEMARB_MODE);
 +      tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
 +
 +      if (tg3_flag(tp, PCIX_MODE)) {
 +              pci_read_config_dword(tp->pdev,
 +                                    tp->pcix_cap + PCI_X_STATUS, &val);
 +              tp->pci_fn = val & 0x7;
 +      } else {
 +              tp->pci_fn = PCI_FUNC(tp->pdev->devfn) & 3;
 +      }
 +
 +      /* Get eeprom hw config before calling tg3_set_power_state().
 +       * In particular, the TG3_FLAG_IS_NIC flag must be
 +       * determined before calling tg3_set_power_state() so that
 +       * we know whether or not to switch out of Vaux power.
 +       * When the flag is set, it means that GPIO1 is used for eeprom
 +       * write protect and also implies that it is a LOM where GPIOs
 +       * are not used to switch power.
 +       */
 +      tg3_get_eeprom_hw_cfg(tp);
 +
 +      if (tg3_flag(tp, ENABLE_APE)) {
 +              /* Allow reads and writes to the
 +               * APE register and memory space.
 +               */
 +              pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
 +                               PCISTATE_ALLOW_APE_SHMEM_WR |
 +                               PCISTATE_ALLOW_APE_PSPACE_WR;
 +              pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
 +                                     pci_state_reg);
 +
 +              tg3_ape_lock_init(tp);
 +      }
 +
 +      if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
 +          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
 +          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
 +          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
 +          tg3_flag(tp, 57765_PLUS))
 +              tg3_flag_set(tp, CPMU_PRESENT);
 +
 +      /* Set up tp->grc_local_ctrl before calling
 +       * tg3_pwrsrc_switch_to_vmain().  GPIO1 driven high
 +       * will bring 5700's external PHY out of reset.
 +       * It is also used as eeprom write protect on LOMs.
 +       */
 +      tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
 +      if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
 +          tg3_flag(tp, EEPROM_WRITE_PROT))
 +              tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
 +                                     GRC_LCLCTRL_GPIO_OUTPUT1);
 +      /* Unused GPIO3 must be driven as output on 5752 because there
 +       * are no pull-up resistors on unused GPIO pins.
 +       */
 +      else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
 +              tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
 +
 +      if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
 +          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
 +          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
 +              tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
 +
 +      if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
 +          tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
 +              /* Turn off the debug UART. */
 +              tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
 +              if (tg3_flag(tp, IS_NIC))
 +                      /* Keep VMain power. */
 +                      tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
 +                                            GRC_LCLCTRL_GPIO_OUTPUT0;
 +      }
 +
 +      /* Switch out of Vaux if it is a NIC */
 +      tg3_pwrsrc_switch_to_vmain(tp);
 +
 +      /* Derive initial jumbo mode from MTU assigned in
 +       * ether_setup() via the alloc_etherdev() call
 +       */
 +      if (tp->dev->mtu > ETH_DATA_LEN && !tg3_flag(tp, 5780_CLASS))
 +              tg3_flag_set(tp, JUMBO_RING_ENABLE);
 +
 +      /* Determine WakeOnLan speed to use. */
 +      if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
 +          tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
 +          tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
 +          tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
 +              tg3_flag_clear(tp, WOL_SPEED_100MB);
 +      } else {
 +              tg3_flag_set(tp, WOL_SPEED_100MB);
 +      }
 +
 +      if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
 +              tp->phy_flags |= TG3_PHYFLG_IS_FET;
 +
 +      /* A few boards don't want Ethernet@WireSpeed phy feature */
 +      if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
 +          (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
 +           (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
 +           (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
 +          (tp->phy_flags & TG3_PHYFLG_IS_FET) ||
 +          (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
 +              tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED;
 +
 +      if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
 +          GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
 +              tp->phy_flags |= TG3_PHYFLG_ADC_BUG;
 +      if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
 +              tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG;
 +
 +      if (tg3_flag(tp, 5705_PLUS) &&
 +          !(tp->phy_flags & TG3_PHYFLG_IS_FET) &&
 +          GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
 +          GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57780 &&
 +          !tg3_flag(tp, 57765_PLUS)) {
 +              if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
 +                  GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
 +                  GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
 +                  GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
 +                      if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
 +                          tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
 +                              tp->phy_flags |= TG3_PHYFLG_JITTER_BUG;
 +                      if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
 +                              tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM;
 +              } else
 +                      tp->phy_flags |= TG3_PHYFLG_BER_BUG;
 +      }
 +
 +      if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
 +          GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
 +              tp->phy_otp = tg3_read_otp_phycfg(tp);
 +              if (tp->phy_otp == 0)
 +                      tp->phy_otp = TG3_OTP_DEFAULT;
 +      }
 +
 +      if (tg3_flag(tp, CPMU_PRESENT))
 +              tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
 +      else
 +              tp->mi_mode = MAC_MI_MODE_BASE;
 +
 +      tp->coalesce_mode = 0;
 +      if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
 +          GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
 +              tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
 +
 +      /* Set these bits to enable statistics workaround. */
 +      if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
 +          tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
 +          tp->pci_chip_rev_id == CHIPREV_ID_5720_A0) {
 +              tp->coalesce_mode |= HOSTCC_MODE_ATTN;
 +              tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN;
 +      }
 +
 +      if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
 +          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
 +              tg3_flag_set(tp, USE_PHYLIB);
 +
 +      err = tg3_mdio_init(tp);
 +      if (err)
 +              return err;
 +
 +      /* Initialize data/descriptor byte/word swapping. */
 +      val = tr32(GRC_MODE);
 +      if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
 +              val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA |
 +                      GRC_MODE_WORD_SWAP_B2HRX_DATA |
 +                      GRC_MODE_B2HRX_ENABLE |
 +                      GRC_MODE_HTX2B_ENABLE |
 +                      GRC_MODE_HOST_STACKUP);
 +      else
 +              val &= GRC_MODE_HOST_STACKUP;
 +
 +      tw32(GRC_MODE, val | tp->grc_mode);
 +
 +      tg3_switch_clocks(tp);
 +
 +      /* Clear this out for sanity. */
 +      tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
 +
 +      pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
 +                            &pci_state_reg);
 +      if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
 +          !tg3_flag(tp, PCIX_TARGET_HWBUG)) {
 +              u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
 +
 +              if (chiprevid == CHIPREV_ID_5701_A0 ||
 +                  chiprevid == CHIPREV_ID_5701_B0 ||
 +                  chiprevid == CHIPREV_ID_5701_B2 ||
 +                  chiprevid == CHIPREV_ID_5701_B5) {
 +                      void __iomem *sram_base;
 +
 +                      /* Write some dummy words into the SRAM status block
 +                       * area, see if it reads back correctly.  If the return
 +                       * value is bad, force enable the PCIX workaround.
 +                       */
 +                      sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
 +
 +                      writel(0x00000000, sram_base);
 +                      writel(0x00000000, sram_base + 4);
 +                      writel(0xffffffff, sram_base + 4);
 +                      if (readl(sram_base) != 0x00000000)
 +                              tg3_flag_set(tp, PCIX_TARGET_HWBUG);
 +              }
 +      }
 +
 +      udelay(50);
 +      tg3_nvram_init(tp);
 +
 +      grc_misc_cfg = tr32(GRC_MISC_CFG);
 +      grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
 +
 +      if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
 +          (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
 +           grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
 +              tg3_flag_set(tp, IS_5788);
 +
 +      if (!tg3_flag(tp, IS_5788) &&
 +          GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
 +              tg3_flag_set(tp, TAGGED_STATUS);
 +      if (tg3_flag(tp, TAGGED_STATUS)) {
 +              tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
 +                                    HOSTCC_MODE_CLRTICK_TXBD);
 +
 +              tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
 +              pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
 +                                     tp->misc_host_ctrl);
 +      }
 +
 +      /* Preserve the APE MAC_MODE bits */
 +      if (tg3_flag(tp, ENABLE_APE))
 +              tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
 +      else
 +              tp->mac_mode = 0;
 +
 +      /* these are limited to 10/100 only */
 +      if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
 +           (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
 +          (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
 +           tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
 +           (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
 +            tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
 +            tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
 +          (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
 +           (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
 +            tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F ||
 +            tp->pdev->device == PCI_DEVICE_ID_TIGON3_5787F)) ||
 +          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790 ||
 +          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
 +          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
 +          (tp->phy_flags & TG3_PHYFLG_IS_FET))
 +              tp->phy_flags |= TG3_PHYFLG_10_100_ONLY;
 +
 +      err = tg3_phy_probe(tp);
 +      if (err) {
 +              dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err);
 +              /* ... but do not return immediately ... */
 +              tg3_mdio_fini(tp);
 +      }
 +
 +      tg3_read_vpd(tp);
 +      tg3_read_fw_ver(tp);
 +
 +      if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
 +              tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
 +      } else {
 +              if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
 +                      tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
 +              else
 +                      tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
 +      }
 +
 +      /* 5700 {AX,BX} chips have a broken status block link
 +       * change bit implementation, so we must use the
 +       * status register in those cases.
 +       */
 +      if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
 +              tg3_flag_set(tp, USE_LINKCHG_REG);
 +      else
 +              tg3_flag_clear(tp, USE_LINKCHG_REG);
 +
 +      /* The led_ctrl is set during tg3_phy_probe, here we might
 +       * have to force the link status polling mechanism based
 +       * upon subsystem IDs.
 +       */
 +      if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
 +          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
 +          !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
 +              tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
 +              tg3_flag_set(tp, USE_LINKCHG_REG);
 +      }
 +
 +      /* For all SERDES we poll the MAC status register. */
 +      if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
 +              tg3_flag_set(tp, POLL_SERDES);
 +      else
 +              tg3_flag_clear(tp, POLL_SERDES);
 +
 +      tp->rx_offset = NET_IP_ALIGN;
 +      tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
 +      if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
 +          tg3_flag(tp, PCIX_MODE)) {
 +              tp->rx_offset = 0;
 +#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
 +              tp->rx_copy_thresh = ~(u16)0;
 +#endif
 +      }
 +
 +      tp->rx_std_ring_mask = TG3_RX_STD_RING_SIZE(tp) - 1;
 +      tp->rx_jmb_ring_mask = TG3_RX_JMB_RING_SIZE(tp) - 1;
 +      tp->rx_ret_ring_mask = tg3_rx_ret_ring_size(tp) - 1;
 +
 +      tp->rx_std_max_post = tp->rx_std_ring_mask + 1;
 +
 +      /* Increment the rx prod index on the rx std ring by at most
 +       * 8 for these chips to workaround hw errata.
 +       */
 +      if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
 +          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
 +          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
 +              tp->rx_std_max_post = 8;
 +
 +      if (tg3_flag(tp, ASPM_WORKAROUND))
 +              tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
 +                                   PCIE_PWR_MGMT_L1_THRESH_MSK;
 +
 +      return err;
 +}
 +
 +#ifdef CONFIG_SPARC
 +static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
 +{
 +      struct net_device *dev = tp->dev;
 +      struct pci_dev *pdev = tp->pdev;
 +      struct device_node *dp = pci_device_to_OF_node(pdev);
 +      const unsigned char *addr;
 +      int len;
 +
 +      addr = of_get_property(dp, "local-mac-address", &len);
 +      if (addr && len == 6) {
 +              memcpy(dev->dev_addr, addr, 6);
 +              memcpy(dev->perm_addr, dev->dev_addr, 6);
 +              return 0;
 +      }
 +      return -ENODEV;
 +}
 +
 +static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
 +{
 +      struct net_device *dev = tp->dev;
 +
 +      memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
 +      memcpy(dev->perm_addr, idprom->id_ethaddr, 6);
 +      return 0;
 +}
 +#endif
 +
 +static int __devinit tg3_get_device_address(struct tg3 *tp)
 +{
 +      struct net_device *dev = tp->dev;
 +      u32 hi, lo, mac_offset;
 +      int addr_ok = 0;
 +
 +#ifdef CONFIG_SPARC
 +      if (!tg3_get_macaddr_sparc(tp))
 +              return 0;
 +#endif
 +
 +      mac_offset = 0x7c;
 +      if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
 +          tg3_flag(tp, 5780_CLASS)) {
 +              if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
 +                      mac_offset = 0xcc;
 +              if (tg3_nvram_lock(tp))
 +                      tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
 +              else
 +                      tg3_nvram_unlock(tp);
 +      } else if (tg3_flag(tp, 5717_PLUS)) {
 +              if (tp->pci_fn & 1)
 +                      mac_offset = 0xcc;
 +              if (tp->pci_fn > 1)
 +                      mac_offset += 0x18c;
 +      } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
 +              mac_offset = 0x10;
 +
 +      /* First try to get it from MAC address mailbox. */
 +      tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
 +      if ((hi >> 16) == 0x484b) {
 +              dev->dev_addr[0] = (hi >>  8) & 0xff;
 +              dev->dev_addr[1] = (hi >>  0) & 0xff;
 +
 +              tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
 +              dev->dev_addr[2] = (lo >> 24) & 0xff;
 +              dev->dev_addr[3] = (lo >> 16) & 0xff;
 +              dev->dev_addr[4] = (lo >>  8) & 0xff;
 +              dev->dev_addr[5] = (lo >>  0) & 0xff;
 +
 +              /* Some old bootcode may report a 0 MAC address in SRAM */
 +              addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
 +      }
 +      if (!addr_ok) {
 +              /* Next, try NVRAM. */
 +              if (!tg3_flag(tp, NO_NVRAM) &&
 +                  !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
 +                  !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
 +                      memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2);
 +                      memcpy(&dev->dev_addr[2], (char *)&lo, sizeof(lo));
 +              }
 +              /* Finally just fetch it out of the MAC control regs. */
 +              else {
 +                      hi = tr32(MAC_ADDR_0_HIGH);
 +                      lo = tr32(MAC_ADDR_0_LOW);
 +
 +                      dev->dev_addr[5] = lo & 0xff;
 +                      dev->dev_addr[4] = (lo >> 8) & 0xff;
 +                      dev->dev_addr[3] = (lo >> 16) & 0xff;
 +                      dev->dev_addr[2] = (lo >> 24) & 0xff;
 +                      dev->dev_addr[1] = hi & 0xff;
 +                      dev->dev_addr[0] = (hi >> 8) & 0xff;
 +              }
 +      }
 +
 +      if (!is_valid_ether_addr(&dev->dev_addr[0])) {
 +#ifdef CONFIG_SPARC
 +              if (!tg3_get_default_macaddr_sparc(tp))
 +                      return 0;
 +#endif
 +              return -EINVAL;
 +      }
 +      memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
 +      return 0;
 +}
 +
 +#define BOUNDARY_SINGLE_CACHELINE     1
 +#define BOUNDARY_MULTI_CACHELINE      2
 +
 +static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
 +{
 +      int cacheline_size;
 +      u8 byte;
 +      int goal;
 +
 +      pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
 +      if (byte == 0)
 +              cacheline_size = 1024;
 +      else
 +              cacheline_size = (int) byte * 4;
 +
 +      /* On 5703 and later chips, the boundary bits have no
 +       * effect.
 +       */
 +      if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
 +          GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
 +          !tg3_flag(tp, PCI_EXPRESS))
 +              goto out;
 +
 +#if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
 +      goal = BOUNDARY_MULTI_CACHELINE;
 +#else
 +#if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
 +      goal = BOUNDARY_SINGLE_CACHELINE;
 +#else
 +      goal = 0;
 +#endif
 +#endif
 +
 +      if (tg3_flag(tp, 57765_PLUS)) {
 +              val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
 +              goto out;
 +      }
 +
 +      if (!goal)
 +              goto out;
 +
 +      /* PCI controllers on most RISC systems tend to disconnect
 +       * when a device tries to burst across a cache-line boundary.
 +       * Therefore, letting tg3 do so just wastes PCI bandwidth.
 +       *
 +       * Unfortunately, for PCI-E there are only limited
 +       * write-side controls for this, and thus for reads
 +       * we will still get the disconnects.  We'll also waste
 +       * these PCI cycles for both read and write for chips
 +       * other than 5700 and 5701 which do not implement the
 +       * boundary bits.
 +       */
 +      if (tg3_flag(tp, PCIX_MODE) && !tg3_flag(tp, PCI_EXPRESS)) {
 +              switch (cacheline_size) {
 +              case 16:
 +              case 32:
 +              case 64:
 +              case 128:
 +                      if (goal == BOUNDARY_SINGLE_CACHELINE) {
 +                              val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
 +                                      DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
 +                      } else {
 +                              val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
 +                                      DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
 +                      }
 +                      break;
 +
 +              case 256:
 +                      val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
 +                              DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
 +                      break;
 +
 +              default:
 +                      val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
 +                              DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
 +                      break;
 +              }
 +      } else if (tg3_flag(tp, PCI_EXPRESS)) {
 +              switch (cacheline_size) {
 +              case 16:
 +              case 32:
 +              case 64:
 +                      if (goal == BOUNDARY_SINGLE_CACHELINE) {
 +                              val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
 +                              val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
 +                              break;
 +                      }
 +                      /* fallthrough */
 +              case 128:
 +              default:
 +                      val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
 +                      val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
 +                      break;
 +              }
 +      } else {
 +              switch (cacheline_size) {
 +              case 16:
 +                      if (goal == BOUNDARY_SINGLE_CACHELINE) {
 +                              val |= (DMA_RWCTRL_READ_BNDRY_16 |
 +                                      DMA_RWCTRL_WRITE_BNDRY_16);
 +                              break;
 +                      }
 +                      /* fallthrough */
 +              case 32:
 +                      if (goal == BOUNDARY_SINGLE_CACHELINE) {
 +                              val |= (DMA_RWCTRL_READ_BNDRY_32 |
 +                                      DMA_RWCTRL_WRITE_BNDRY_32);
 +                              break;
 +                      }
 +                      /* fallthrough */
 +              case 64:
 +                      if (goal == BOUNDARY_SINGLE_CACHELINE) {
 +                              val |= (DMA_RWCTRL_READ_BNDRY_64 |
 +                                      DMA_RWCTRL_WRITE_BNDRY_64);
 +                              break;
 +                      }
 +                      /* fallthrough */
 +              case 128:
 +                      if (goal == BOUNDARY_SINGLE_CACHELINE) {
 +                              val |= (DMA_RWCTRL_READ_BNDRY_128 |
 +                                      DMA_RWCTRL_WRITE_BNDRY_128);
 +                              break;
 +                      }
 +                      /* fallthrough */
 +              case 256:
 +                      val |= (DMA_RWCTRL_READ_BNDRY_256 |
 +                              DMA_RWCTRL_WRITE_BNDRY_256);
 +                      break;
 +              case 512:
 +                      val |= (DMA_RWCTRL_READ_BNDRY_512 |
 +                              DMA_RWCTRL_WRITE_BNDRY_512);
 +                      break;
 +              case 1024:
 +              default:
 +                      val |= (DMA_RWCTRL_READ_BNDRY_1024 |
 +                              DMA_RWCTRL_WRITE_BNDRY_1024);
 +                      break;
 +              }
 +      }
 +
 +out:
 +      return val;
 +}
 +
 +static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
 +{
 +      struct tg3_internal_buffer_desc test_desc;
 +      u32 sram_dma_descs;
 +      int i, ret;
 +
 +      sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
 +
 +      tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
 +      tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
 +      tw32(RDMAC_STATUS, 0);
 +      tw32(WDMAC_STATUS, 0);
 +
 +      tw32(BUFMGR_MODE, 0);
 +      tw32(FTQ_RESET, 0);
 +
 +      test_desc.addr_hi = ((u64) buf_dma) >> 32;
 +      test_desc.addr_lo = buf_dma & 0xffffffff;
 +      test_desc.nic_mbuf = 0x00002100;
 +      test_desc.len = size;
 +
 +      /*
 +       * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
 +       * the *second* time the tg3 driver was getting loaded after an
 +       * initial scan.
 +       *
 +       * Broadcom tells me:
 +       *   ...the DMA engine is connected to the GRC block and a DMA
 +       *   reset may affect the GRC block in some unpredictable way...
 +       *   The behavior of resets to individual blocks has not been tested.
 +       *
 +       * Broadcom noted the GRC reset will also reset all sub-components.
 +       */
 +      if (to_device) {
 +              test_desc.cqid_sqid = (13 << 8) | 2;
 +
 +              tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
 +              udelay(40);
 +      } else {
 +              test_desc.cqid_sqid = (16 << 8) | 7;
 +
 +              tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
 +              udelay(40);
 +      }
 +      test_desc.flags = 0x00000005;
 +
 +      for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
 +              u32 val;
 +
 +              val = *(((u32 *)&test_desc) + i);
 +              pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
 +                                     sram_dma_descs + (i * sizeof(u32)));
 +              pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
 +      }
 +      pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
 +
 +      if (to_device)
 +              tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
 +      else
 +              tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
 +
 +      ret = -ENODEV;
 +      for (i = 0; i < 40; i++) {
 +              u32 val;
 +
 +              if (to_device)
 +                      val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
 +              else
 +                      val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
 +              if ((val & 0xffff) == sram_dma_descs) {
 +                      ret = 0;
 +                      break;
 +              }
 +
 +              udelay(100);
 +      }
 +
 +      return ret;
 +}
 +
 +#define TEST_BUFFER_SIZE      0x2000
 +
 +static DEFINE_PCI_DEVICE_TABLE(tg3_dma_wait_state_chipsets) = {
 +      { PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
 +      { },
 +};
 +
 +static int __devinit tg3_test_dma(struct tg3 *tp)
 +{
 +      dma_addr_t buf_dma;
 +      u32 *buf, saved_dma_rwctrl;
 +      int ret = 0;
 +
 +      buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE,
 +                               &buf_dma, GFP_KERNEL);
 +      if (!buf) {
 +              ret = -ENOMEM;
 +              goto out_nofree;
 +      }
 +
 +      tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
 +                        (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
 +
 +      tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
 +
 +      if (tg3_flag(tp, 57765_PLUS))
 +              goto out;
 +
 +      if (tg3_flag(tp, PCI_EXPRESS)) {
 +              /* DMA read watermark not used on PCIE */
 +              tp->dma_rwctrl |= 0x00180000;
 +      } else if (!tg3_flag(tp, PCIX_MODE)) {
 +              if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
 +                  GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
 +                      tp->dma_rwctrl |= 0x003f0000;
 +              else
 +                      tp->dma_rwctrl |= 0x003f000f;
 +      } else {
 +              if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
 +                  GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
 +                      u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
 +                      u32 read_water = 0x7;
 +
 +                      /* If the 5704 is behind the EPB bridge, we can
 +                       * do the less restrictive ONE_DMA workaround for
 +                       * better performance.
 +                       */
 +                      if (tg3_flag(tp, 40BIT_DMA_BUG) &&
 +                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
 +                              tp->dma_rwctrl |= 0x8000;
 +                      else if (ccval == 0x6 || ccval == 0x7)
 +                              tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
 +
 +                      if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703)
 +                              read_water = 4;
 +                      /* Set bit 23 to enable PCIX hw bug fix */
 +                      tp->dma_rwctrl |=
 +                              (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
 +                              (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
 +                              (1 << 23);
 +              } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
 +                      /* 5780 always in PCIX mode */
 +                      tp->dma_rwctrl |= 0x00144000;
 +              } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
 +                      /* 5714 always in PCIX mode */
 +                      tp->dma_rwctrl |= 0x00148000;
 +              } else {
 +                      tp->dma_rwctrl |= 0x001b000f;
 +              }
 +      }
 +
 +      if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
 +          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
 +              tp->dma_rwctrl &= 0xfffffff0;
 +
 +      if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
 +          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
 +              /* Remove this if it causes problems for some boards. */
 +              tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
 +
 +              /* On 5700/5701 chips, we need to set this bit.
 +               * Otherwise the chip will issue cacheline transactions
 +               * to streamable DMA memory with not all the byte
 +               * enables turned on.  This is an error on several
 +               * RISC PCI controllers, in particular sparc64.
 +               *
 +               * On 5703/5704 chips, this bit has been reassigned
 +               * a different meaning.  In particular, it is used
 +               * on those chips to enable a PCI-X workaround.
 +               */
 +              tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
 +      }
 +
 +      tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
 +
 +#if 0
 +      /* Unneeded, already done by tg3_get_invariants.  */
 +      tg3_switch_clocks(tp);
 +#endif
 +
 +      if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
 +          GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
 +              goto out;
 +
 +      /* It is best to perform DMA test with maximum write burst size
 +       * to expose the 5700/5701 write DMA bug.
 +       */
 +      saved_dma_rwctrl = tp->dma_rwctrl;
 +      tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
 +      tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
 +
 +      while (1) {
 +              u32 *p = buf, i;
 +
 +              for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
 +                      p[i] = i;
 +
 +              /* Send the buffer to the chip. */
 +              ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
 +              if (ret) {
 +                      dev_err(&tp->pdev->dev,
 +                              "%s: Buffer write failed. err = %d\n",
 +                              __func__, ret);
 +                      break;
 +              }
 +
 +#if 0
 +              /* validate data reached card RAM correctly. */
 +              for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
 +                      u32 val;
 +                      tg3_read_mem(tp, 0x2100 + (i*4), &val);
 +                      if (le32_to_cpu(val) != p[i]) {
 +                              dev_err(&tp->pdev->dev,
 +                                      "%s: Buffer corrupted on device! "
 +                                      "(%d != %d)\n", __func__, val, i);
 +                              /* ret = -ENODEV here? */
 +                      }
 +                      p[i] = 0;
 +              }
 +#endif
 +              /* Now read it back. */
 +              ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
 +              if (ret) {
 +                      dev_err(&tp->pdev->dev, "%s: Buffer read failed. "
 +                              "err = %d\n", __func__, ret);
 +                      break;
 +              }
 +
 +              /* Verify it. */
 +              for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
 +                      if (p[i] == i)
 +                              continue;
 +
 +                      if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
 +                          DMA_RWCTRL_WRITE_BNDRY_16) {
 +                              tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
 +                              tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
 +                              tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
 +                              break;
 +                      } else {
 +                              dev_err(&tp->pdev->dev,
 +                                      "%s: Buffer corrupted on read back! "
 +                                      "(%d != %d)\n", __func__, p[i], i);
 +                              ret = -ENODEV;
 +                              goto out;
 +                      }
 +              }
 +
 +              if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
 +                      /* Success. */
 +                      ret = 0;
 +                      break;
 +              }
 +      }
 +      if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
 +          DMA_RWCTRL_WRITE_BNDRY_16) {
 +              /* DMA test passed without adjusting DMA boundary,
 +               * now look for chipsets that are known to expose the
 +               * DMA bug without failing the test.
 +               */
 +              if (pci_dev_present(tg3_dma_wait_state_chipsets)) {
 +                      tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
 +                      tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
 +              } else {
 +                      /* Safe to use the calculated DMA boundary. */
 +                      tp->dma_rwctrl = saved_dma_rwctrl;
 +              }
 +
 +              tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
 +      }
 +
 +out:
 +      dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma);
 +out_nofree:
 +      return ret;
 +}
 +
 +static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
 +{
 +      if (tg3_flag(tp, 57765_PLUS)) {
 +              tp->bufmgr_config.mbuf_read_dma_low_water =
 +                      DEFAULT_MB_RDMA_LOW_WATER_5705;
 +              tp->bufmgr_config.mbuf_mac_rx_low_water =
 +                      DEFAULT_MB_MACRX_LOW_WATER_57765;
 +              tp->bufmgr_config.mbuf_high_water =
 +                      DEFAULT_MB_HIGH_WATER_57765;
 +
 +              tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
 +                      DEFAULT_MB_RDMA_LOW_WATER_5705;
 +              tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
 +                      DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765;
 +              tp->bufmgr_config.mbuf_high_water_jumbo =
 +                      DEFAULT_MB_HIGH_WATER_JUMBO_57765;
 +      } else if (tg3_flag(tp, 5705_PLUS)) {
 +              tp->bufmgr_config.mbuf_read_dma_low_water =
 +                      DEFAULT_MB_RDMA_LOW_WATER_5705;
 +              tp->bufmgr_config.mbuf_mac_rx_low_water =
 +                      DEFAULT_MB_MACRX_LOW_WATER_5705;
 +              tp->bufmgr_config.mbuf_high_water =
 +                      DEFAULT_MB_HIGH_WATER_5705;
 +              if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
 +                      tp->bufmgr_config.mbuf_mac_rx_low_water =
 +                              DEFAULT_MB_MACRX_LOW_WATER_5906;
 +                      tp->bufmgr_config.mbuf_high_water =
 +                              DEFAULT_MB_HIGH_WATER_5906;
 +              }
 +
 +              tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
 +                      DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
 +              tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
 +                      DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
 +              tp->bufmgr_config.mbuf_high_water_jumbo =
 +                      DEFAULT_MB_HIGH_WATER_JUMBO_5780;
 +      } else {
 +              tp->bufmgr_config.mbuf_read_dma_low_water =
 +                      DEFAULT_MB_RDMA_LOW_WATER;
 +              tp->bufmgr_config.mbuf_mac_rx_low_water =
 +                      DEFAULT_MB_MACRX_LOW_WATER;
 +              tp->bufmgr_config.mbuf_high_water =
 +                      DEFAULT_MB_HIGH_WATER;
 +
 +              tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
 +                      DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
 +              tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
 +                      DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
 +              tp->bufmgr_config.mbuf_high_water_jumbo =
 +                      DEFAULT_MB_HIGH_WATER_JUMBO;
 +      }
 +
 +      tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
 +      tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
 +}
 +
 +static char * __devinit tg3_phy_string(struct tg3 *tp)
 +{
 +      switch (tp->phy_id & TG3_PHY_ID_MASK) {
 +      case TG3_PHY_ID_BCM5400:        return "5400";
 +      case TG3_PHY_ID_BCM5401:        return "5401";
 +      case TG3_PHY_ID_BCM5411:        return "5411";
 +      case TG3_PHY_ID_BCM5701:        return "5701";
 +      case TG3_PHY_ID_BCM5703:        return "5703";
 +      case TG3_PHY_ID_BCM5704:        return "5704";
 +      case TG3_PHY_ID_BCM5705:        return "5705";
 +      case TG3_PHY_ID_BCM5750:        return "5750";
 +      case TG3_PHY_ID_BCM5752:        return "5752";
 +      case TG3_PHY_ID_BCM5714:        return "5714";
 +      case TG3_PHY_ID_BCM5780:        return "5780";
 +      case TG3_PHY_ID_BCM5755:        return "5755";
 +      case TG3_PHY_ID_BCM5787:        return "5787";
 +      case TG3_PHY_ID_BCM5784:        return "5784";
 +      case TG3_PHY_ID_BCM5756:        return "5722/5756";
 +      case TG3_PHY_ID_BCM5906:        return "5906";
 +      case TG3_PHY_ID_BCM5761:        return "5761";
 +      case TG3_PHY_ID_BCM5718C:       return "5718C";
 +      case TG3_PHY_ID_BCM5718S:       return "5718S";
 +      case TG3_PHY_ID_BCM57765:       return "57765";
 +      case TG3_PHY_ID_BCM5719C:       return "5719C";
 +      case TG3_PHY_ID_BCM5720C:       return "5720C";
 +      case TG3_PHY_ID_BCM8002:        return "8002/serdes";
 +      case 0:                 return "serdes";
 +      default:                return "unknown";
 +      }
 +}
 +
 +static char * __devinit tg3_bus_string(struct tg3 *tp, char *str)
 +{
 +      if (tg3_flag(tp, PCI_EXPRESS)) {
 +              strcpy(str, "PCI Express");
 +              return str;
 +      } else if (tg3_flag(tp, PCIX_MODE)) {
 +              u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
 +
 +              strcpy(str, "PCIX:");
 +
 +              if ((clock_ctrl == 7) ||
 +                  ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
 +                   GRC_MISC_CFG_BOARD_ID_5704CIOBE))
 +                      strcat(str, "133MHz");
 +              else if (clock_ctrl == 0)
 +                      strcat(str, "33MHz");
 +              else if (clock_ctrl == 2)
 +                      strcat(str, "50MHz");
 +              else if (clock_ctrl == 4)
 +                      strcat(str, "66MHz");
 +              else if (clock_ctrl == 6)
 +                      strcat(str, "100MHz");
 +      } else {
 +              strcpy(str, "PCI:");
 +              if (tg3_flag(tp, PCI_HIGH_SPEED))
 +                      strcat(str, "66MHz");
 +              else
 +                      strcat(str, "33MHz");
 +      }
 +      if (tg3_flag(tp, PCI_32BIT))
 +              strcat(str, ":32-bit");
 +      else
 +              strcat(str, ":64-bit");
 +      return str;
 +}
 +
 +static struct pci_dev * __devinit tg3_find_peer(struct tg3 *tp)
 +{
 +      struct pci_dev *peer;
 +      unsigned int func, devnr = tp->pdev->devfn & ~7;
 +
 +      for (func = 0; func < 8; func++) {
 +              peer = pci_get_slot(tp->pdev->bus, devnr | func);
 +              if (peer && peer != tp->pdev)
 +                      break;
 +              pci_dev_put(peer);
 +      }
 +      /* 5704 can be configured in single-port mode, set peer to
 +       * tp->pdev in that case.
 +       */
 +      if (!peer) {
 +              peer = tp->pdev;
 +              return peer;
 +      }
 +
 +      /*
 +       * We don't need to keep the refcount elevated; there's no way
 +       * to remove one half of this device without removing the other
 +       */
 +      pci_dev_put(peer);
 +
 +      return peer;
 +}
 +
 +static void __devinit tg3_init_coal(struct tg3 *tp)
 +{
 +      struct ethtool_coalesce *ec = &tp->coal;
 +
 +      memset(ec, 0, sizeof(*ec));
 +      ec->cmd = ETHTOOL_GCOALESCE;
 +      ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
 +      ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
 +      ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
 +      ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
 +      ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
 +      ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
 +      ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
 +      ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
 +      ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
 +
 +      if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
 +                               HOSTCC_MODE_CLRTICK_TXBD)) {
 +              ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
 +              ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
 +              ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
 +              ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
 +      }
 +
 +      if (tg3_flag(tp, 5705_PLUS)) {
 +              ec->rx_coalesce_usecs_irq = 0;
 +              ec->tx_coalesce_usecs_irq = 0;
 +              ec->stats_block_coalesce_usecs = 0;
 +      }
 +}
 +
 +static const struct net_device_ops tg3_netdev_ops = {
 +      .ndo_open               = tg3_open,
 +      .ndo_stop               = tg3_close,
 +      .ndo_start_xmit         = tg3_start_xmit,
 +      .ndo_get_stats64        = tg3_get_stats64,
 +      .ndo_validate_addr      = eth_validate_addr,
 +      .ndo_set_rx_mode        = tg3_set_rx_mode,
 +      .ndo_set_mac_address    = tg3_set_mac_addr,
 +      .ndo_do_ioctl           = tg3_ioctl,
 +      .ndo_tx_timeout         = tg3_tx_timeout,
 +      .ndo_change_mtu         = tg3_change_mtu,
 +      .ndo_fix_features       = tg3_fix_features,
 +      .ndo_set_features       = tg3_set_features,
 +#ifdef CONFIG_NET_POLL_CONTROLLER
 +      .ndo_poll_controller    = tg3_poll_controller,
 +#endif
 +};
 +
 +static int __devinit tg3_init_one(struct pci_dev *pdev,
 +                                const struct pci_device_id *ent)
 +{
 +      struct net_device *dev;
 +      struct tg3 *tp;
 +      int i, err, pm_cap;
 +      u32 sndmbx, rcvmbx, intmbx;
 +      char str[40];
 +      u64 dma_mask, persist_dma_mask;
 +      u32 features = 0;
 +
 +      printk_once(KERN_INFO "%s\n", version);
 +
 +      err = pci_enable_device(pdev);
 +      if (err) {
 +              dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
 +              return err;
 +      }
 +
 +      err = pci_request_regions(pdev, DRV_MODULE_NAME);
 +      if (err) {
 +              dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
 +              goto err_out_disable_pdev;
 +      }
 +
 +      pci_set_master(pdev);
 +
 +      /* Find power-management capability. */
 +      pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
 +      if (pm_cap == 0) {
 +              dev_err(&pdev->dev,
 +                      "Cannot find Power Management capability, aborting\n");
 +              err = -EIO;
 +              goto err_out_free_res;
 +      }
 +
 +      err = pci_set_power_state(pdev, PCI_D0);
 +      if (err) {
 +              dev_err(&pdev->dev, "Transition to D0 failed, aborting\n");
 +              goto err_out_free_res;
 +      }
 +
 +      dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
 +      if (!dev) {
 +              dev_err(&pdev->dev, "Etherdev alloc failed, aborting\n");
 +              err = -ENOMEM;
 +              goto err_out_power_down;
 +      }
 +
 +      SET_NETDEV_DEV(dev, &pdev->dev);
 +
 +      tp = netdev_priv(dev);
 +      tp->pdev = pdev;
 +      tp->dev = dev;
 +      tp->pm_cap = pm_cap;
 +      tp->rx_mode = TG3_DEF_RX_MODE;
 +      tp->tx_mode = TG3_DEF_TX_MODE;
 +
 +      if (tg3_debug > 0)
 +              tp->msg_enable = tg3_debug;
 +      else
 +              tp->msg_enable = TG3_DEF_MSG_ENABLE;
 +
 +      /* The word/byte swap controls here control register access byte
 +       * swapping.  DMA data byte swapping is controlled in the GRC_MODE
 +       * setting below.
 +       */
 +      tp->misc_host_ctrl =
 +              MISC_HOST_CTRL_MASK_PCI_INT |
 +              MISC_HOST_CTRL_WORD_SWAP |
 +              MISC_HOST_CTRL_INDIR_ACCESS |
 +              MISC_HOST_CTRL_PCISTATE_RW;
 +
 +      /* The NONFRM (non-frame) byte/word swap controls take effect
 +       * on descriptor entries, anything which isn't packet data.
 +       *
 +       * The StrongARM chips on the board (one for tx, one for rx)
 +       * are running in big-endian mode.
 +       */
 +      tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
 +                      GRC_MODE_WSWAP_NONFRM_DATA);
 +#ifdef __BIG_ENDIAN
 +      tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
 +#endif
 +      spin_lock_init(&tp->lock);
 +      spin_lock_init(&tp->indirect_lock);
 +      INIT_WORK(&tp->reset_task, tg3_reset_task);
 +
 +      tp->regs = pci_ioremap_bar(pdev, BAR_0);
 +      if (!tp->regs) {
 +              dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
 +              err = -ENOMEM;
 +              goto err_out_free_dev;
 +      }
 +
 +      if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
 +          tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761E ||
 +          tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S ||
 +          tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761SE ||
 +          tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
 +          tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
 +          tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
 +          tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720) {
 +              tg3_flag_set(tp, ENABLE_APE);
 +              tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
 +              if (!tp->aperegs) {
 +                      dev_err(&pdev->dev,
 +                              "Cannot map APE registers, aborting\n");
 +                      err = -ENOMEM;
 +                      goto err_out_iounmap;
 +              }
 +      }
 +
 +      tp->rx_pending = TG3_DEF_RX_RING_PENDING;
 +      tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
 +
 +      dev->ethtool_ops = &tg3_ethtool_ops;
 +      dev->watchdog_timeo = TG3_TX_TIMEOUT;
 +      dev->netdev_ops = &tg3_netdev_ops;
 +      dev->irq = pdev->irq;
 +
 +      err = tg3_get_invariants(tp);
 +      if (err) {
 +              dev_err(&pdev->dev,
 +                      "Problem fetching invariants of chip, aborting\n");
 +              goto err_out_apeunmap;
 +      }
 +
 +      /* The EPB bridge inside 5714, 5715, and 5780 and any
 +       * device behind the EPB cannot support DMA addresses > 40-bit.
 +       * On 64-bit systems with IOMMU, use 40-bit dma_mask.
 +       * On 64-bit systems without IOMMU, use 64-bit dma_mask and
 +       * do DMA address check in tg3_start_xmit().
 +       */
 +      if (tg3_flag(tp, IS_5788))
 +              persist_dma_mask = dma_mask = DMA_BIT_MASK(32);
 +      else if (tg3_flag(tp, 40BIT_DMA_BUG)) {
 +              persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
 +#ifdef CONFIG_HIGHMEM
 +              dma_mask = DMA_BIT_MASK(64);
 +#endif
 +      } else
 +              persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
 +
 +      /* Configure DMA attributes. */
 +      if (dma_mask > DMA_BIT_MASK(32)) {
 +              err = pci_set_dma_mask(pdev, dma_mask);
 +              if (!err) {
 +                      features |= NETIF_F_HIGHDMA;
 +                      err = pci_set_consistent_dma_mask(pdev,
 +                                                        persist_dma_mask);
 +                      if (err < 0) {
 +                              dev_err(&pdev->dev, "Unable to obtain 64 bit "
 +                                      "DMA for consistent allocations\n");
 +                              goto err_out_apeunmap;
 +                      }
 +              }
 +      }
 +      if (err || dma_mask == DMA_BIT_MASK(32)) {
 +              err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
 +              if (err) {
 +                      dev_err(&pdev->dev,
 +                              "No usable DMA configuration, aborting\n");
 +                      goto err_out_apeunmap;
 +              }
 +      }
 +
 +      tg3_init_bufmgr_config(tp);
 +
 +      features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
 +
 +      /* 5700 B0 chips do not support checksumming correctly due
 +       * to hardware bugs.
 +       */
 +      if (tp->pci_chip_rev_id != CHIPREV_ID_5700_B0) {
 +              features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
 +
 +              if (tg3_flag(tp, 5755_PLUS))
 +                      features |= NETIF_F_IPV6_CSUM;
 +      }
 +
 +      /* TSO is on by default on chips that support hardware TSO.
 +       * Firmware TSO on older chips gives lower performance, so it
 +       * is off by default, but can be enabled using ethtool.
 +       */
 +      if ((tg3_flag(tp, HW_TSO_1) ||
 +           tg3_flag(tp, HW_TSO_2) ||
 +           tg3_flag(tp, HW_TSO_3)) &&
 +          (features & NETIF_F_IP_CSUM))
 +              features |= NETIF_F_TSO;
 +      if (tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) {
 +              if (features & NETIF_F_IPV6_CSUM)
 +                      features |= NETIF_F_TSO6;
 +              if (tg3_flag(tp, HW_TSO_3) ||
 +                  GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
 +                  (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
 +                   GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) ||
 +                  GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
 +                  GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
 +                      features |= NETIF_F_TSO_ECN;
 +      }
 +
 +      dev->features |= features;
 +      dev->vlan_features |= features;
 +
 +      /*
 +       * Add loopback capability only for a subset of devices that support
 +       * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
 +       * loopback for the remaining devices.
 +       */
 +      if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5780 &&
 +          !tg3_flag(tp, CPMU_PRESENT))
 +              /* Add the loopback capability */
 +              features |= NETIF_F_LOOPBACK;
 +
 +      dev->hw_features |= features;
 +
 +      if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
 +          !tg3_flag(tp, TSO_CAPABLE) &&
 +          !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
 +              tg3_flag_set(tp, MAX_RXPEND_64);
 +              tp->rx_pending = 63;
 +      }
 +
 +      err = tg3_get_device_address(tp);
 +      if (err) {
 +              dev_err(&pdev->dev,
 +                      "Could not obtain valid ethernet address, aborting\n");
 +              goto err_out_apeunmap;
 +      }
 +
 +      /*
 +       * Reset chip in case UNDI or EFI driver did not shutdown
 +       * DMA self test will enable WDMAC and we'll see (spurious)
 +       * pending DMA on the PCI bus at that point.
 +       */
 +      if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
 +          (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
 +              tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
 +              tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
 +      }
 +
 +      err = tg3_test_dma(tp);
 +      if (err) {
 +              dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
 +              goto err_out_apeunmap;
 +      }
 +
 +      intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
 +      rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
 +      sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
 +      for (i = 0; i < tp->irq_max; i++) {
 +              struct tg3_napi *tnapi = &tp->napi[i];
 +
 +              tnapi->tp = tp;
 +              tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
 +
 +              tnapi->int_mbox = intmbx;
 +              if (i <= 4)
 +                      intmbx += 0x8;
 +              else
 +                      intmbx += 0x4;
 +
 +              tnapi->consmbox = rcvmbx;
 +              tnapi->prodmbox = sndmbx;
 +
 +              if (i)
 +                      tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
 +              else
 +                      tnapi->coal_now = HOSTCC_MODE_NOW;
 +
 +              if (!tg3_flag(tp, SUPPORT_MSIX))
 +                      break;
 +
 +              /*
 +               * If we support MSIX, we'll be using RSS.  If we're using
 +               * RSS, the first vector only handles link interrupts and the
 +               * remaining vectors handle rx and tx interrupts.  Reuse the
 +               * mailbox values for the next iteration.  The values we setup
 +               * above are still useful for the single vectored mode.
 +               */
 +              if (!i)
 +                      continue;
 +
 +              rcvmbx += 0x8;
 +
 +              if (sndmbx & 0x4)
 +                      sndmbx -= 0x4;
 +              else
 +                      sndmbx += 0xc;
 +      }
 +
 +      tg3_init_coal(tp);
 +
 +      pci_set_drvdata(pdev, dev);
 +
 +      if (tg3_flag(tp, 5717_PLUS)) {
 +              /* Resume a low-power mode */
 +              tg3_frob_aux_power(tp, false);
 +      }
 +
 +      err = register_netdev(dev);
 +      if (err) {
 +              dev_err(&pdev->dev, "Cannot register net device, aborting\n");
 +              goto err_out_apeunmap;
 +      }
 +
 +      netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
 +                  tp->board_part_number,
 +                  tp->pci_chip_rev_id,
 +                  tg3_bus_string(tp, str),
 +                  dev->dev_addr);
 +
 +      if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
 +              struct phy_device *phydev;
 +              phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
 +              netdev_info(dev,
 +                          "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
 +                          phydev->drv->name, dev_name(&phydev->dev));
 +      } else {
 +              char *ethtype;
 +
 +              if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
 +                      ethtype = "10/100Base-TX";
 +              else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
 +                      ethtype = "1000Base-SX";
 +              else
 +                      ethtype = "10/100/1000Base-T";
 +
 +              netdev_info(dev, "attached PHY is %s (%s Ethernet) "
 +                          "(WireSpeed[%d], EEE[%d])\n",
 +                          tg3_phy_string(tp), ethtype,
 +                          (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0,
 +                          (tp->phy_flags & TG3_PHYFLG_EEE_CAP) != 0);
 +      }
 +
 +      netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
 +                  (dev->features & NETIF_F_RXCSUM) != 0,
 +                  tg3_flag(tp, USE_LINKCHG_REG) != 0,
 +                  (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0,
 +                  tg3_flag(tp, ENABLE_ASF) != 0,
 +                  tg3_flag(tp, TSO_CAPABLE) != 0);
 +      netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
 +                  tp->dma_rwctrl,
 +                  pdev->dma_mask == DMA_BIT_MASK(32) ? 32 :
 +                  ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64);
 +
 +      pci_save_state(pdev);
 +
 +      return 0;
 +
 +err_out_apeunmap:
 +      if (tp->aperegs) {
 +              iounmap(tp->aperegs);
 +              tp->aperegs = NULL;
 +      }
 +
 +err_out_iounmap:
 +      if (tp->regs) {
 +              iounmap(tp->regs);
 +              tp->regs = NULL;
 +      }
 +
 +err_out_free_dev:
 +      free_netdev(dev);
 +
 +err_out_power_down:
 +      pci_set_power_state(pdev, PCI_D3hot);
 +
 +err_out_free_res:
 +      pci_release_regions(pdev);
 +
 +err_out_disable_pdev:
 +      pci_disable_device(pdev);
 +      pci_set_drvdata(pdev, NULL);
 +      return err;
 +}
 +
 +static void __devexit tg3_remove_one(struct pci_dev *pdev)
 +{
 +      struct net_device *dev = pci_get_drvdata(pdev);
 +
 +      if (dev) {
 +              struct tg3 *tp = netdev_priv(dev);
 +
 +              if (tp->fw)
 +                      release_firmware(tp->fw);
 +
 +              cancel_work_sync(&tp->reset_task);
 +
 +              if (!tg3_flag(tp, USE_PHYLIB)) {
 +                      tg3_phy_fini(tp);
 +                      tg3_mdio_fini(tp);
 +              }
 +
 +              unregister_netdev(dev);
 +              if (tp->aperegs) {
 +                      iounmap(tp->aperegs);
 +                      tp->aperegs = NULL;
 +              }
 +              if (tp->regs) {
 +                      iounmap(tp->regs);
 +                      tp->regs = NULL;
 +              }
 +              free_netdev(dev);
 +              pci_release_regions(pdev);
 +              pci_disable_device(pdev);
 +              pci_set_drvdata(pdev, NULL);
 +      }
 +}
 +
 +#ifdef CONFIG_PM_SLEEP
 +static int tg3_suspend(struct device *device)
 +{
 +      struct pci_dev *pdev = to_pci_dev(device);
 +      struct net_device *dev = pci_get_drvdata(pdev);
 +      struct tg3 *tp = netdev_priv(dev);
 +      int err;
 +
 +      if (!netif_running(dev))
 +              return 0;
 +
 +      flush_work_sync(&tp->reset_task);
 +      tg3_phy_stop(tp);
 +      tg3_netif_stop(tp);
 +
 +      del_timer_sync(&tp->timer);
 +
 +      tg3_full_lock(tp, 1);
 +      tg3_disable_ints(tp);
 +      tg3_full_unlock(tp);
 +
 +      netif_device_detach(dev);
 +
 +      tg3_full_lock(tp, 0);
 +      tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
 +      tg3_flag_clear(tp, INIT_COMPLETE);
 +      tg3_full_unlock(tp);
 +
 +      err = tg3_power_down_prepare(tp);
 +      if (err) {
 +              int err2;
 +
 +              tg3_full_lock(tp, 0);
 +
 +              tg3_flag_set(tp, INIT_COMPLETE);
 +              err2 = tg3_restart_hw(tp, 1);
 +              if (err2)
 +                      goto out;
 +
 +              tp->timer.expires = jiffies + tp->timer_offset;
 +              add_timer(&tp->timer);
 +
 +              netif_device_attach(dev);
 +              tg3_netif_start(tp);
 +
 +out:
 +              tg3_full_unlock(tp);
 +
 +              if (!err2)
 +                      tg3_phy_start(tp);
 +      }
 +
 +      return err;
 +}
 +
 +static int tg3_resume(struct device *device)
 +{
 +      struct pci_dev *pdev = to_pci_dev(device);
 +      struct net_device *dev = pci_get_drvdata(pdev);
 +      struct tg3 *tp = netdev_priv(dev);
 +      int err;
 +
 +      if (!netif_running(dev))
 +              return 0;
 +
 +      netif_device_attach(dev);
 +
 +      tg3_full_lock(tp, 0);
 +
 +      tg3_flag_set(tp, INIT_COMPLETE);
 +      err = tg3_restart_hw(tp, 1);
 +      if (err)
 +              goto out;
 +
 +      tp->timer.expires = jiffies + tp->timer_offset;
 +      add_timer(&tp->timer);
 +
 +      tg3_netif_start(tp);
 +
 +out:
 +      tg3_full_unlock(tp);
 +
 +      if (!err)
 +              tg3_phy_start(tp);
 +
 +      return err;
 +}
 +
 +static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume);
 +#define TG3_PM_OPS (&tg3_pm_ops)
 +
 +#else
 +
 +#define TG3_PM_OPS NULL
 +
 +#endif /* CONFIG_PM_SLEEP */
 +
 +/**
 + * tg3_io_error_detected - called when PCI error is detected
 + * @pdev: Pointer to PCI device
 + * @state: The current pci connection state
 + *
 + * This function is called after a PCI bus error affecting
 + * this device has been detected.
 + */
 +static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
 +                                            pci_channel_state_t state)
 +{
 +      struct net_device *netdev = pci_get_drvdata(pdev);
 +      struct tg3 *tp = netdev_priv(netdev);
 +      pci_ers_result_t err = PCI_ERS_RESULT_NEED_RESET;
 +
 +      netdev_info(netdev, "PCI I/O error detected\n");
 +
 +      rtnl_lock();
 +
 +      if (!netif_running(netdev))
 +              goto done;
 +
 +      tg3_phy_stop(tp);
 +
 +      tg3_netif_stop(tp);
 +
 +      del_timer_sync(&tp->timer);
 +      tg3_flag_clear(tp, RESTART_TIMER);
 +
 +      /* Want to make sure that the reset task doesn't run */
 +      cancel_work_sync(&tp->reset_task);
 +      tg3_flag_clear(tp, TX_RECOVERY_PENDING);
 +      tg3_flag_clear(tp, RESTART_TIMER);
 +
 +      netif_device_detach(netdev);
 +
 +      /* Clean up software state, even if MMIO is blocked */
 +      tg3_full_lock(tp, 0);
 +      tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
 +      tg3_full_unlock(tp);
 +
 +done:
 +      if (state == pci_channel_io_perm_failure)
 +              err = PCI_ERS_RESULT_DISCONNECT;
 +      else
 +              pci_disable_device(pdev);
 +
 +      rtnl_unlock();
 +
 +      return err;
 +}
 +
 +/**
 + * tg3_io_slot_reset - called after the pci bus has been reset.
 + * @pdev: Pointer to PCI device
 + *
 + * Restart the card from scratch, as if from a cold-boot.
 + * At this point, the card has exprienced a hard reset,
 + * followed by fixups by BIOS, and has its config space
 + * set up identically to what it was at cold boot.
 + */
 +static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
 +{
 +      struct net_device *netdev = pci_get_drvdata(pdev);
 +      struct tg3 *tp = netdev_priv(netdev);
 +      pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
 +      int err;
 +
 +      rtnl_lock();
 +
 +      if (pci_enable_device(pdev)) {
 +              netdev_err(netdev, "Cannot re-enable PCI device after reset.\n");
 +              goto done;
 +      }
 +
 +      pci_set_master(pdev);
 +      pci_restore_state(pdev);
 +      pci_save_state(pdev);
 +
 +      if (!netif_running(netdev)) {
 +              rc = PCI_ERS_RESULT_RECOVERED;
 +              goto done;
 +      }
 +
 +      err = tg3_power_up(tp);
 +      if (err)
 +              goto done;
 +
 +      rc = PCI_ERS_RESULT_RECOVERED;
 +
 +done:
 +      rtnl_unlock();
 +
 +      return rc;
 +}
 +
 +/**
 + * tg3_io_resume - called when traffic can start flowing again.
 + * @pdev: Pointer to PCI device
 + *
 + * This callback is called when the error recovery driver tells
 + * us that its OK to resume normal operation.
 + */
 +static void tg3_io_resume(struct pci_dev *pdev)
 +{
 +      struct net_device *netdev = pci_get_drvdata(pdev);
 +      struct tg3 *tp = netdev_priv(netdev);
 +      int err;
 +
 +      rtnl_lock();
 +
 +      if (!netif_running(netdev))
 +              goto done;
 +
 +      tg3_full_lock(tp, 0);
 +      tg3_flag_set(tp, INIT_COMPLETE);
 +      err = tg3_restart_hw(tp, 1);
 +      tg3_full_unlock(tp);
 +      if (err) {
 +              netdev_err(netdev, "Cannot restart hardware after reset.\n");
 +              goto done;
 +      }
 +
 +      netif_device_attach(netdev);
 +
 +      tp->timer.expires = jiffies + tp->timer_offset;
 +      add_timer(&tp->timer);
 +
 +      tg3_netif_start(tp);
 +
 +      tg3_phy_start(tp);
 +
 +done:
 +      rtnl_unlock();
 +}
 +
 +static struct pci_error_handlers tg3_err_handler = {
 +      .error_detected = tg3_io_error_detected,
 +      .slot_reset     = tg3_io_slot_reset,
 +      .resume         = tg3_io_resume
 +};
 +
 +static struct pci_driver tg3_driver = {
 +      .name           = DRV_MODULE_NAME,
 +      .id_table       = tg3_pci_tbl,
 +      .probe          = tg3_init_one,
 +      .remove         = __devexit_p(tg3_remove_one),
 +      .err_handler    = &tg3_err_handler,
 +      .driver.pm      = TG3_PM_OPS,
 +};
 +
 +static int __init tg3_init(void)
 +{
 +      return pci_register_driver(&tg3_driver);
 +}
 +
 +static void __exit tg3_cleanup(void)
 +{
 +      pci_unregister_driver(&tg3_driver);
 +}
 +
 +module_init(tg3_init);
 +module_exit(tg3_cleanup);
index f30b96f,0000000..212736b
mode 100644,000000..100644
--- /dev/null
@@@ -1,1764 -1,0 +1,1764 @@@
-               if (i <= cmd->rule_cnt) {
-                       rule_locs[i] = comp->fs.location;
-                       i++;
-               }
 +/*
 + *  drivers/net/gianfar_ethtool.c
 + *
 + *  Gianfar Ethernet Driver
 + *  Ethtool support for Gianfar Enet
 + *  Based on e1000 ethtool support
 + *
 + *  Author: Andy Fleming
 + *  Maintainer: Kumar Gala
 + *  Modifier: Sandeep Gopalpet <sandeep.kumar@freescale.com>
 + *
 + *  Copyright 2003-2006, 2008-2009, 2011 Freescale Semiconductor, Inc.
 + *
 + *  This software may be used and distributed according to
 + *  the terms of the GNU Public License, Version 2, incorporated herein
 + *  by reference.
 + */
 +
 +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 +
 +#include <linux/kernel.h>
 +#include <linux/string.h>
 +#include <linux/errno.h>
 +#include <linux/interrupt.h>
 +#include <linux/init.h>
 +#include <linux/delay.h>
 +#include <linux/netdevice.h>
 +#include <linux/etherdevice.h>
 +#include <linux/skbuff.h>
 +#include <linux/spinlock.h>
 +#include <linux/mm.h>
 +
 +#include <asm/io.h>
 +#include <asm/irq.h>
 +#include <asm/uaccess.h>
 +#include <linux/module.h>
 +#include <linux/crc32.h>
 +#include <asm/types.h>
 +#include <linux/ethtool.h>
 +#include <linux/mii.h>
 +#include <linux/phy.h>
 +#include <linux/sort.h>
 +#include <linux/if_vlan.h>
 +
 +#include "gianfar.h"
 +
 +extern void gfar_start(struct net_device *dev);
 +extern int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit);
 +
 +#define GFAR_MAX_COAL_USECS 0xffff
 +#define GFAR_MAX_COAL_FRAMES 0xff
 +static void gfar_fill_stats(struct net_device *dev, struct ethtool_stats *dummy,
 +                   u64 * buf);
 +static void gfar_gstrings(struct net_device *dev, u32 stringset, u8 * buf);
 +static int gfar_gcoalesce(struct net_device *dev, struct ethtool_coalesce *cvals);
 +static int gfar_scoalesce(struct net_device *dev, struct ethtool_coalesce *cvals);
 +static void gfar_gringparam(struct net_device *dev, struct ethtool_ringparam *rvals);
 +static int gfar_sringparam(struct net_device *dev, struct ethtool_ringparam *rvals);
 +static void gfar_gdrvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo);
 +
 +static char stat_gstrings[][ETH_GSTRING_LEN] = {
 +      "rx-dropped-by-kernel",
 +      "rx-large-frame-errors",
 +      "rx-short-frame-errors",
 +      "rx-non-octet-errors",
 +      "rx-crc-errors",
 +      "rx-overrun-errors",
 +      "rx-busy-errors",
 +      "rx-babbling-errors",
 +      "rx-truncated-frames",
 +      "ethernet-bus-error",
 +      "tx-babbling-errors",
 +      "tx-underrun-errors",
 +      "rx-skb-missing-errors",
 +      "tx-timeout-errors",
 +      "tx-rx-64-frames",
 +      "tx-rx-65-127-frames",
 +      "tx-rx-128-255-frames",
 +      "tx-rx-256-511-frames",
 +      "tx-rx-512-1023-frames",
 +      "tx-rx-1024-1518-frames",
 +      "tx-rx-1519-1522-good-vlan",
 +      "rx-bytes",
 +      "rx-packets",
 +      "rx-fcs-errors",
 +      "receive-multicast-packet",
 +      "receive-broadcast-packet",
 +      "rx-control-frame-packets",
 +      "rx-pause-frame-packets",
 +      "rx-unknown-op-code",
 +      "rx-alignment-error",
 +      "rx-frame-length-error",
 +      "rx-code-error",
 +      "rx-carrier-sense-error",
 +      "rx-undersize-packets",
 +      "rx-oversize-packets",
 +      "rx-fragmented-frames",
 +      "rx-jabber-frames",
 +      "rx-dropped-frames",
 +      "tx-byte-counter",
 +      "tx-packets",
 +      "tx-multicast-packets",
 +      "tx-broadcast-packets",
 +      "tx-pause-control-frames",
 +      "tx-deferral-packets",
 +      "tx-excessive-deferral-packets",
 +      "tx-single-collision-packets",
 +      "tx-multiple-collision-packets",
 +      "tx-late-collision-packets",
 +      "tx-excessive-collision-packets",
 +      "tx-total-collision",
 +      "reserved",
 +      "tx-dropped-frames",
 +      "tx-jabber-frames",
 +      "tx-fcs-errors",
 +      "tx-control-frames",
 +      "tx-oversize-frames",
 +      "tx-undersize-frames",
 +      "tx-fragmented-frames",
 +};
 +
 +/* Fill in a buffer with the strings which correspond to the
 + * stats */
 +static void gfar_gstrings(struct net_device *dev, u32 stringset, u8 * buf)
 +{
 +      struct gfar_private *priv = netdev_priv(dev);
 +
 +      if (priv->device_flags & FSL_GIANFAR_DEV_HAS_RMON)
 +              memcpy(buf, stat_gstrings, GFAR_STATS_LEN * ETH_GSTRING_LEN);
 +      else
 +              memcpy(buf, stat_gstrings,
 +                              GFAR_EXTRA_STATS_LEN * ETH_GSTRING_LEN);
 +}
 +
 +/* Fill in an array of 64-bit statistics from various sources.
 + * This array will be appended to the end of the ethtool_stats
 + * structure, and returned to user space
 + */
 +static void gfar_fill_stats(struct net_device *dev, struct ethtool_stats *dummy, u64 * buf)
 +{
 +      int i;
 +      struct gfar_private *priv = netdev_priv(dev);
 +      struct gfar __iomem *regs = priv->gfargrp[0].regs;
 +      u64 *extra = (u64 *) & priv->extra_stats;
 +
 +      if (priv->device_flags & FSL_GIANFAR_DEV_HAS_RMON) {
 +              u32 __iomem *rmon = (u32 __iomem *) &regs->rmon;
 +              struct gfar_stats *stats = (struct gfar_stats *) buf;
 +
 +              for (i = 0; i < GFAR_RMON_LEN; i++)
 +                      stats->rmon[i] = (u64) gfar_read(&rmon[i]);
 +
 +              for (i = 0; i < GFAR_EXTRA_STATS_LEN; i++)
 +                      stats->extra[i] = extra[i];
 +      } else
 +              for (i = 0; i < GFAR_EXTRA_STATS_LEN; i++)
 +                      buf[i] = extra[i];
 +}
 +
 +static int gfar_sset_count(struct net_device *dev, int sset)
 +{
 +      struct gfar_private *priv = netdev_priv(dev);
 +
 +      switch (sset) {
 +      case ETH_SS_STATS:
 +              if (priv->device_flags & FSL_GIANFAR_DEV_HAS_RMON)
 +                      return GFAR_STATS_LEN;
 +              else
 +                      return GFAR_EXTRA_STATS_LEN;
 +      default:
 +              return -EOPNOTSUPP;
 +      }
 +}
 +
 +/* Fills in the drvinfo structure with some basic info */
 +static void gfar_gdrvinfo(struct net_device *dev, struct
 +            ethtool_drvinfo *drvinfo)
 +{
 +      strncpy(drvinfo->driver, DRV_NAME, GFAR_INFOSTR_LEN);
 +      strncpy(drvinfo->version, gfar_driver_version, GFAR_INFOSTR_LEN);
 +      strncpy(drvinfo->fw_version, "N/A", GFAR_INFOSTR_LEN);
 +      strncpy(drvinfo->bus_info, "N/A", GFAR_INFOSTR_LEN);
 +      drvinfo->regdump_len = 0;
 +      drvinfo->eedump_len = 0;
 +}
 +
 +
 +static int gfar_ssettings(struct net_device *dev, struct ethtool_cmd *cmd)
 +{
 +      struct gfar_private *priv = netdev_priv(dev);
 +      struct phy_device *phydev = priv->phydev;
 +
 +      if (NULL == phydev)
 +              return -ENODEV;
 +
 +      return phy_ethtool_sset(phydev, cmd);
 +}
 +
 +
 +/* Return the current settings in the ethtool_cmd structure */
 +static int gfar_gsettings(struct net_device *dev, struct ethtool_cmd *cmd)
 +{
 +      struct gfar_private *priv = netdev_priv(dev);
 +      struct phy_device *phydev = priv->phydev;
 +      struct gfar_priv_rx_q *rx_queue = NULL;
 +      struct gfar_priv_tx_q *tx_queue = NULL;
 +
 +      if (NULL == phydev)
 +              return -ENODEV;
 +      tx_queue = priv->tx_queue[0];
 +      rx_queue = priv->rx_queue[0];
 +
 +      /* etsec-1.7 and older versions have only one txic
 +       * and rxic regs although they support multiple queues */
 +      cmd->maxtxpkt = get_icft_value(tx_queue->txic);
 +      cmd->maxrxpkt = get_icft_value(rx_queue->rxic);
 +
 +      return phy_ethtool_gset(phydev, cmd);
 +}
 +
 +/* Return the length of the register structure */
 +static int gfar_reglen(struct net_device *dev)
 +{
 +      return sizeof (struct gfar);
 +}
 +
 +/* Return a dump of the GFAR register space */
 +static void gfar_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *regbuf)
 +{
 +      int i;
 +      struct gfar_private *priv = netdev_priv(dev);
 +      u32 __iomem *theregs = (u32 __iomem *) priv->gfargrp[0].regs;
 +      u32 *buf = (u32 *) regbuf;
 +
 +      for (i = 0; i < sizeof (struct gfar) / sizeof (u32); i++)
 +              buf[i] = gfar_read(&theregs[i]);
 +}
 +
 +/* Convert microseconds to ethernet clock ticks, which changes
 + * depending on what speed the controller is running at */
 +static unsigned int gfar_usecs2ticks(struct gfar_private *priv, unsigned int usecs)
 +{
 +      unsigned int count;
 +
 +      /* The timer is different, depending on the interface speed */
 +      switch (priv->phydev->speed) {
 +      case SPEED_1000:
 +              count = GFAR_GBIT_TIME;
 +              break;
 +      case SPEED_100:
 +              count = GFAR_100_TIME;
 +              break;
 +      case SPEED_10:
 +      default:
 +              count = GFAR_10_TIME;
 +              break;
 +      }
 +
 +      /* Make sure we return a number greater than 0
 +       * if usecs > 0 */
 +      return (usecs * 1000 + count - 1) / count;
 +}
 +
 +/* Convert ethernet clock ticks to microseconds */
 +static unsigned int gfar_ticks2usecs(struct gfar_private *priv, unsigned int ticks)
 +{
 +      unsigned int count;
 +
 +      /* The timer is different, depending on the interface speed */
 +      switch (priv->phydev->speed) {
 +      case SPEED_1000:
 +              count = GFAR_GBIT_TIME;
 +              break;
 +      case SPEED_100:
 +              count = GFAR_100_TIME;
 +              break;
 +      case SPEED_10:
 +      default:
 +              count = GFAR_10_TIME;
 +              break;
 +      }
 +
 +      /* Make sure we return a number greater than 0 */
 +      /* if ticks is > 0 */
 +      return (ticks * count) / 1000;
 +}
 +
 +/* Get the coalescing parameters, and put them in the cvals
 + * structure.  */
 +static int gfar_gcoalesce(struct net_device *dev, struct ethtool_coalesce *cvals)
 +{
 +      struct gfar_private *priv = netdev_priv(dev);
 +      struct gfar_priv_rx_q *rx_queue = NULL;
 +      struct gfar_priv_tx_q *tx_queue = NULL;
 +      unsigned long rxtime;
 +      unsigned long rxcount;
 +      unsigned long txtime;
 +      unsigned long txcount;
 +
 +      if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_COALESCE))
 +              return -EOPNOTSUPP;
 +
 +      if (NULL == priv->phydev)
 +              return -ENODEV;
 +
 +      rx_queue = priv->rx_queue[0];
 +      tx_queue = priv->tx_queue[0];
 +
 +      rxtime  = get_ictt_value(rx_queue->rxic);
 +      rxcount = get_icft_value(rx_queue->rxic);
 +      txtime  = get_ictt_value(tx_queue->txic);
 +      txcount = get_icft_value(tx_queue->txic);
 +      cvals->rx_coalesce_usecs = gfar_ticks2usecs(priv, rxtime);
 +      cvals->rx_max_coalesced_frames = rxcount;
 +
 +      cvals->tx_coalesce_usecs = gfar_ticks2usecs(priv, txtime);
 +      cvals->tx_max_coalesced_frames = txcount;
 +
 +      cvals->use_adaptive_rx_coalesce = 0;
 +      cvals->use_adaptive_tx_coalesce = 0;
 +
 +      cvals->pkt_rate_low = 0;
 +      cvals->rx_coalesce_usecs_low = 0;
 +      cvals->rx_max_coalesced_frames_low = 0;
 +      cvals->tx_coalesce_usecs_low = 0;
 +      cvals->tx_max_coalesced_frames_low = 0;
 +
 +      /* When the packet rate is below pkt_rate_high but above
 +       * pkt_rate_low (both measured in packets per second) the
 +       * normal {rx,tx}_* coalescing parameters are used.
 +       */
 +
 +      /* When the packet rate is (measured in packets per second)
 +       * is above pkt_rate_high, the {rx,tx}_*_high parameters are
 +       * used.
 +       */
 +      cvals->pkt_rate_high = 0;
 +      cvals->rx_coalesce_usecs_high = 0;
 +      cvals->rx_max_coalesced_frames_high = 0;
 +      cvals->tx_coalesce_usecs_high = 0;
 +      cvals->tx_max_coalesced_frames_high = 0;
 +
 +      /* How often to do adaptive coalescing packet rate sampling,
 +       * measured in seconds.  Must not be zero.
 +       */
 +      cvals->rate_sample_interval = 0;
 +
 +      return 0;
 +}
 +
 +/* Change the coalescing values.
 + * Both cvals->*_usecs and cvals->*_frames have to be > 0
 + * in order for coalescing to be active
 + */
 +static int gfar_scoalesce(struct net_device *dev, struct ethtool_coalesce *cvals)
 +{
 +      struct gfar_private *priv = netdev_priv(dev);
 +      int i = 0;
 +
 +      if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_COALESCE))
 +              return -EOPNOTSUPP;
 +
 +      /* Set up rx coalescing */
 +      /* As of now, we will enable/disable coalescing for all
 +       * queues together in case of eTSEC2, this will be modified
 +       * along with the ethtool interface */
 +      if ((cvals->rx_coalesce_usecs == 0) ||
 +          (cvals->rx_max_coalesced_frames == 0)) {
 +              for (i = 0; i < priv->num_rx_queues; i++)
 +                      priv->rx_queue[i]->rxcoalescing = 0;
 +      } else {
 +              for (i = 0; i < priv->num_rx_queues; i++)
 +                      priv->rx_queue[i]->rxcoalescing = 1;
 +      }
 +
 +      if (NULL == priv->phydev)
 +              return -ENODEV;
 +
 +      /* Check the bounds of the values */
 +      if (cvals->rx_coalesce_usecs > GFAR_MAX_COAL_USECS) {
 +              pr_info("Coalescing is limited to %d microseconds\n",
 +                      GFAR_MAX_COAL_USECS);
 +              return -EINVAL;
 +      }
 +
 +      if (cvals->rx_max_coalesced_frames > GFAR_MAX_COAL_FRAMES) {
 +              pr_info("Coalescing is limited to %d frames\n",
 +                      GFAR_MAX_COAL_FRAMES);
 +              return -EINVAL;
 +      }
 +
 +      for (i = 0; i < priv->num_rx_queues; i++) {
 +              priv->rx_queue[i]->rxic = mk_ic_value(
 +                      cvals->rx_max_coalesced_frames,
 +                      gfar_usecs2ticks(priv, cvals->rx_coalesce_usecs));
 +      }
 +
 +      /* Set up tx coalescing */
 +      if ((cvals->tx_coalesce_usecs == 0) ||
 +          (cvals->tx_max_coalesced_frames == 0)) {
 +              for (i = 0; i < priv->num_tx_queues; i++)
 +                      priv->tx_queue[i]->txcoalescing = 0;
 +      } else {
 +              for (i = 0; i < priv->num_tx_queues; i++)
 +                      priv->tx_queue[i]->txcoalescing = 1;
 +      }
 +
 +      /* Check the bounds of the values */
 +      if (cvals->tx_coalesce_usecs > GFAR_MAX_COAL_USECS) {
 +              pr_info("Coalescing is limited to %d microseconds\n",
 +                      GFAR_MAX_COAL_USECS);
 +              return -EINVAL;
 +      }
 +
 +      if (cvals->tx_max_coalesced_frames > GFAR_MAX_COAL_FRAMES) {
 +              pr_info("Coalescing is limited to %d frames\n",
 +                      GFAR_MAX_COAL_FRAMES);
 +              return -EINVAL;
 +      }
 +
 +      for (i = 0; i < priv->num_tx_queues; i++) {
 +              priv->tx_queue[i]->txic = mk_ic_value(
 +                      cvals->tx_max_coalesced_frames,
 +                      gfar_usecs2ticks(priv, cvals->tx_coalesce_usecs));
 +      }
 +
 +      gfar_configure_coalescing(priv, 0xFF, 0xFF);
 +
 +      return 0;
 +}
 +
 +/* Fills in rvals with the current ring parameters.  Currently,
 + * rx, rx_mini, and rx_jumbo rings are the same size, as mini and
 + * jumbo are ignored by the driver */
 +static void gfar_gringparam(struct net_device *dev, struct ethtool_ringparam *rvals)
 +{
 +      struct gfar_private *priv = netdev_priv(dev);
 +      struct gfar_priv_tx_q *tx_queue = NULL;
 +      struct gfar_priv_rx_q *rx_queue = NULL;
 +
 +      tx_queue = priv->tx_queue[0];
 +      rx_queue = priv->rx_queue[0];
 +
 +      rvals->rx_max_pending = GFAR_RX_MAX_RING_SIZE;
 +      rvals->rx_mini_max_pending = GFAR_RX_MAX_RING_SIZE;
 +      rvals->rx_jumbo_max_pending = GFAR_RX_MAX_RING_SIZE;
 +      rvals->tx_max_pending = GFAR_TX_MAX_RING_SIZE;
 +
 +      /* Values changeable by the user.  The valid values are
 +       * in the range 1 to the "*_max_pending" counterpart above.
 +       */
 +      rvals->rx_pending = rx_queue->rx_ring_size;
 +      rvals->rx_mini_pending = rx_queue->rx_ring_size;
 +      rvals->rx_jumbo_pending = rx_queue->rx_ring_size;
 +      rvals->tx_pending = tx_queue->tx_ring_size;
 +}
 +
 +/* Change the current ring parameters, stopping the controller if
 + * necessary so that we don't mess things up while we're in
 + * motion.  We wait for the ring to be clean before reallocating
 + * the rings. */
 +static int gfar_sringparam(struct net_device *dev, struct ethtool_ringparam *rvals)
 +{
 +      struct gfar_private *priv = netdev_priv(dev);
 +      int err = 0, i = 0;
 +
 +      if (rvals->rx_pending > GFAR_RX_MAX_RING_SIZE)
 +              return -EINVAL;
 +
 +      if (!is_power_of_2(rvals->rx_pending)) {
 +              netdev_err(dev, "Ring sizes must be a power of 2\n");
 +              return -EINVAL;
 +      }
 +
 +      if (rvals->tx_pending > GFAR_TX_MAX_RING_SIZE)
 +              return -EINVAL;
 +
 +      if (!is_power_of_2(rvals->tx_pending)) {
 +              netdev_err(dev, "Ring sizes must be a power of 2\n");
 +              return -EINVAL;
 +      }
 +
 +
 +      if (dev->flags & IFF_UP) {
 +              unsigned long flags;
 +
 +              /* Halt TX and RX, and process the frames which
 +               * have already been received */
 +              local_irq_save(flags);
 +              lock_tx_qs(priv);
 +              lock_rx_qs(priv);
 +
 +              gfar_halt(dev);
 +
 +              unlock_rx_qs(priv);
 +              unlock_tx_qs(priv);
 +              local_irq_restore(flags);
 +
 +              for (i = 0; i < priv->num_rx_queues; i++)
 +                      gfar_clean_rx_ring(priv->rx_queue[i],
 +                                      priv->rx_queue[i]->rx_ring_size);
 +
 +              /* Now we take down the rings to rebuild them */
 +              stop_gfar(dev);
 +      }
 +
 +      /* Change the size */
 +      for (i = 0; i < priv->num_rx_queues; i++) {
 +              priv->rx_queue[i]->rx_ring_size = rvals->rx_pending;
 +              priv->tx_queue[i]->tx_ring_size = rvals->tx_pending;
 +              priv->tx_queue[i]->num_txbdfree = priv->tx_queue[i]->tx_ring_size;
 +      }
 +
 +      /* Rebuild the rings with the new size */
 +      if (dev->flags & IFF_UP) {
 +              err = startup_gfar(dev);
 +              netif_tx_wake_all_queues(dev);
 +      }
 +      return err;
 +}
 +
 +int gfar_set_features(struct net_device *dev, u32 features)
 +{
 +      struct gfar_private *priv = netdev_priv(dev);
 +      unsigned long flags;
 +      int err = 0, i = 0;
 +      u32 changed = dev->features ^ features;
 +
 +      if (changed & (NETIF_F_HW_VLAN_TX|NETIF_F_HW_VLAN_RX))
 +              gfar_vlan_mode(dev, features);
 +
 +      if (!(changed & NETIF_F_RXCSUM))
 +              return 0;
 +
 +      if (dev->flags & IFF_UP) {
 +              /* Halt TX and RX, and process the frames which
 +               * have already been received */
 +              local_irq_save(flags);
 +              lock_tx_qs(priv);
 +              lock_rx_qs(priv);
 +
 +              gfar_halt(dev);
 +
 +              unlock_tx_qs(priv);
 +              unlock_rx_qs(priv);
 +              local_irq_restore(flags);
 +
 +              for (i = 0; i < priv->num_rx_queues; i++)
 +                      gfar_clean_rx_ring(priv->rx_queue[i],
 +                                      priv->rx_queue[i]->rx_ring_size);
 +
 +              /* Now we take down the rings to rebuild them */
 +              stop_gfar(dev);
 +
 +              dev->features = features;
 +
 +              err = startup_gfar(dev);
 +              netif_tx_wake_all_queues(dev);
 +      }
 +      return err;
 +}
 +
 +static uint32_t gfar_get_msglevel(struct net_device *dev)
 +{
 +      struct gfar_private *priv = netdev_priv(dev);
 +      return priv->msg_enable;
 +}
 +
 +static void gfar_set_msglevel(struct net_device *dev, uint32_t data)
 +{
 +      struct gfar_private *priv = netdev_priv(dev);
 +      priv->msg_enable = data;
 +}
 +
 +#ifdef CONFIG_PM
 +static void gfar_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
 +{
 +      struct gfar_private *priv = netdev_priv(dev);
 +
 +      if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET) {
 +              wol->supported = WAKE_MAGIC;
 +              wol->wolopts = priv->wol_en ? WAKE_MAGIC : 0;
 +      } else {
 +              wol->supported = wol->wolopts = 0;
 +      }
 +}
 +
 +static int gfar_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
 +{
 +      struct gfar_private *priv = netdev_priv(dev);
 +      unsigned long flags;
 +
 +      if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET) &&
 +          wol->wolopts != 0)
 +              return -EINVAL;
 +
 +      if (wol->wolopts & ~WAKE_MAGIC)
 +              return -EINVAL;
 +
 +      device_set_wakeup_enable(&dev->dev, wol->wolopts & WAKE_MAGIC);
 +
 +      spin_lock_irqsave(&priv->bflock, flags);
 +      priv->wol_en =  !!device_may_wakeup(&dev->dev);
 +      spin_unlock_irqrestore(&priv->bflock, flags);
 +
 +      return 0;
 +}
 +#endif
 +
 +static void ethflow_to_filer_rules (struct gfar_private *priv, u64 ethflow)
 +{
 +      u32 fcr = 0x0, fpr = FPR_FILER_MASK;
 +
 +      if (ethflow & RXH_L2DA) {
 +              fcr = RQFCR_PID_DAH |RQFCR_CMP_NOMATCH |
 +                      RQFCR_HASH | RQFCR_AND | RQFCR_HASHTBL_0;
 +              priv->ftp_rqfpr[priv->cur_filer_idx] = fpr;
 +              priv->ftp_rqfcr[priv->cur_filer_idx] = fcr;
 +              gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
 +              priv->cur_filer_idx = priv->cur_filer_idx - 1;
 +
 +              fcr = RQFCR_PID_DAL | RQFCR_AND | RQFCR_CMP_NOMATCH |
 +                              RQFCR_HASH | RQFCR_AND | RQFCR_HASHTBL_0;
 +              priv->ftp_rqfpr[priv->cur_filer_idx] = fpr;
 +              priv->ftp_rqfcr[priv->cur_filer_idx] = fcr;
 +              gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
 +              priv->cur_filer_idx = priv->cur_filer_idx - 1;
 +      }
 +
 +      if (ethflow & RXH_VLAN) {
 +              fcr = RQFCR_PID_VID | RQFCR_CMP_NOMATCH | RQFCR_HASH |
 +                              RQFCR_AND | RQFCR_HASHTBL_0;
 +              gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
 +              priv->ftp_rqfpr[priv->cur_filer_idx] = fpr;
 +              priv->ftp_rqfcr[priv->cur_filer_idx] = fcr;
 +              priv->cur_filer_idx = priv->cur_filer_idx - 1;
 +      }
 +
 +      if (ethflow & RXH_IP_SRC) {
 +              fcr = RQFCR_PID_SIA | RQFCR_CMP_NOMATCH | RQFCR_HASH |
 +                      RQFCR_AND | RQFCR_HASHTBL_0;
 +              priv->ftp_rqfpr[priv->cur_filer_idx] = fpr;
 +              priv->ftp_rqfcr[priv->cur_filer_idx] = fcr;
 +              gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
 +              priv->cur_filer_idx = priv->cur_filer_idx - 1;
 +      }
 +
 +      if (ethflow & (RXH_IP_DST)) {
 +              fcr = RQFCR_PID_DIA | RQFCR_CMP_NOMATCH | RQFCR_HASH |
 +                      RQFCR_AND | RQFCR_HASHTBL_0;
 +              priv->ftp_rqfpr[priv->cur_filer_idx] = fpr;
 +              priv->ftp_rqfcr[priv->cur_filer_idx] = fcr;
 +              gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
 +              priv->cur_filer_idx = priv->cur_filer_idx - 1;
 +      }
 +
 +      if (ethflow & RXH_L3_PROTO) {
 +              fcr = RQFCR_PID_L4P | RQFCR_CMP_NOMATCH | RQFCR_HASH |
 +                      RQFCR_AND | RQFCR_HASHTBL_0;
 +              priv->ftp_rqfpr[priv->cur_filer_idx] = fpr;
 +              priv->ftp_rqfcr[priv->cur_filer_idx] = fcr;
 +              gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
 +              priv->cur_filer_idx = priv->cur_filer_idx - 1;
 +      }
 +
 +      if (ethflow & RXH_L4_B_0_1) {
 +              fcr = RQFCR_PID_SPT | RQFCR_CMP_NOMATCH | RQFCR_HASH |
 +                      RQFCR_AND | RQFCR_HASHTBL_0;
 +              priv->ftp_rqfpr[priv->cur_filer_idx] = fpr;
 +              priv->ftp_rqfcr[priv->cur_filer_idx] = fcr;
 +              gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
 +              priv->cur_filer_idx = priv->cur_filer_idx - 1;
 +      }
 +
 +      if (ethflow & RXH_L4_B_2_3) {
 +              fcr = RQFCR_PID_DPT | RQFCR_CMP_NOMATCH | RQFCR_HASH |
 +                      RQFCR_AND | RQFCR_HASHTBL_0;
 +              priv->ftp_rqfpr[priv->cur_filer_idx] = fpr;
 +              priv->ftp_rqfcr[priv->cur_filer_idx] = fcr;
 +              gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
 +              priv->cur_filer_idx = priv->cur_filer_idx - 1;
 +      }
 +}
 +
 +static int gfar_ethflow_to_filer_table(struct gfar_private *priv, u64 ethflow, u64 class)
 +{
 +      unsigned int last_rule_idx = priv->cur_filer_idx;
 +      unsigned int cmp_rqfpr;
 +      unsigned int *local_rqfpr;
 +      unsigned int *local_rqfcr;
 +      int i = 0x0, k = 0x0;
 +      int j = MAX_FILER_IDX, l = 0x0;
 +      int ret = 1;
 +
 +      local_rqfpr = kmalloc(sizeof(unsigned int) * (MAX_FILER_IDX + 1),
 +              GFP_KERNEL);
 +      local_rqfcr = kmalloc(sizeof(unsigned int) * (MAX_FILER_IDX + 1),
 +              GFP_KERNEL);
 +      if (!local_rqfpr || !local_rqfcr) {
 +              pr_err("Out of memory\n");
 +              ret = 0;
 +              goto err;
 +      }
 +
 +      switch (class) {
 +      case TCP_V4_FLOW:
 +              cmp_rqfpr = RQFPR_IPV4 |RQFPR_TCP;
 +              break;
 +      case UDP_V4_FLOW:
 +              cmp_rqfpr = RQFPR_IPV4 |RQFPR_UDP;
 +              break;
 +      case TCP_V6_FLOW:
 +              cmp_rqfpr = RQFPR_IPV6 |RQFPR_TCP;
 +              break;
 +      case UDP_V6_FLOW:
 +              cmp_rqfpr = RQFPR_IPV6 |RQFPR_UDP;
 +              break;
 +      default:
 +              pr_err("Right now this class is not supported\n");
 +              ret = 0;
 +              goto err;
 +      }
 +
 +      for (i = 0; i < MAX_FILER_IDX + 1; i++) {
 +              local_rqfpr[j] = priv->ftp_rqfpr[i];
 +              local_rqfcr[j] = priv->ftp_rqfcr[i];
 +              j--;
 +              if ((priv->ftp_rqfcr[i] == (RQFCR_PID_PARSE |
 +                      RQFCR_CLE |RQFCR_AND)) &&
 +                      (priv->ftp_rqfpr[i] == cmp_rqfpr))
 +                      break;
 +      }
 +
 +      if (i == MAX_FILER_IDX + 1) {
 +              pr_err("No parse rule found, can't create hash rules\n");
 +              ret = 0;
 +              goto err;
 +      }
 +
 +      /* If a match was found, then it begins the starting of a cluster rule
 +       * if it was already programmed, we need to overwrite these rules
 +       */
 +      for (l = i+1; l < MAX_FILER_IDX; l++) {
 +              if ((priv->ftp_rqfcr[l] & RQFCR_CLE) &&
 +                      !(priv->ftp_rqfcr[l] & RQFCR_AND)) {
 +                      priv->ftp_rqfcr[l] = RQFCR_CLE | RQFCR_CMP_EXACT |
 +                              RQFCR_HASHTBL_0 | RQFCR_PID_MASK;
 +                      priv->ftp_rqfpr[l] = FPR_FILER_MASK;
 +                      gfar_write_filer(priv, l, priv->ftp_rqfcr[l],
 +                              priv->ftp_rqfpr[l]);
 +                      break;
 +              }
 +
 +              if (!(priv->ftp_rqfcr[l] & RQFCR_CLE) &&
 +                      (priv->ftp_rqfcr[l] & RQFCR_AND))
 +                      continue;
 +              else {
 +                      local_rqfpr[j] = priv->ftp_rqfpr[l];
 +                      local_rqfcr[j] = priv->ftp_rqfcr[l];
 +                      j--;
 +              }
 +      }
 +
 +      priv->cur_filer_idx = l - 1;
 +      last_rule_idx = l;
 +
 +      /* hash rules */
 +      ethflow_to_filer_rules(priv, ethflow);
 +
 +      /* Write back the popped out rules again */
 +      for (k = j+1; k < MAX_FILER_IDX; k++) {
 +              priv->ftp_rqfpr[priv->cur_filer_idx] = local_rqfpr[k];
 +              priv->ftp_rqfcr[priv->cur_filer_idx] = local_rqfcr[k];
 +              gfar_write_filer(priv, priv->cur_filer_idx,
 +                              local_rqfcr[k], local_rqfpr[k]);
 +              if (!priv->cur_filer_idx)
 +                      break;
 +              priv->cur_filer_idx = priv->cur_filer_idx - 1;
 +      }
 +
 +err:
 +      kfree(local_rqfcr);
 +      kfree(local_rqfpr);
 +      return ret;
 +}
 +
 +static int gfar_set_hash_opts(struct gfar_private *priv, struct ethtool_rxnfc *cmd)
 +{
 +      /* write the filer rules here */
 +      if (!gfar_ethflow_to_filer_table(priv, cmd->data, cmd->flow_type))
 +              return -EINVAL;
 +
 +      return 0;
 +}
 +
 +static int gfar_check_filer_hardware(struct gfar_private *priv)
 +{
 +      struct gfar __iomem *regs = NULL;
 +      u32 i;
 +
 +      regs = priv->gfargrp[0].regs;
 +
 +      /* Check if we are in FIFO mode */
 +      i = gfar_read(&regs->ecntrl);
 +      i &= ECNTRL_FIFM;
 +      if (i == ECNTRL_FIFM) {
 +              netdev_notice(priv->ndev, "Interface in FIFO mode\n");
 +              i = gfar_read(&regs->rctrl);
 +              i &= RCTRL_PRSDEP_MASK | RCTRL_PRSFM;
 +              if (i == (RCTRL_PRSDEP_MASK | RCTRL_PRSFM)) {
 +                      netdev_info(priv->ndev,
 +                                      "Receive Queue Filtering enabled\n");
 +              } else {
 +                      netdev_warn(priv->ndev,
 +                                      "Receive Queue Filtering disabled\n");
 +                      return -EOPNOTSUPP;
 +              }
 +      }
 +      /* Or in standard mode */
 +      else {
 +              i = gfar_read(&regs->rctrl);
 +              i &= RCTRL_PRSDEP_MASK;
 +              if (i == RCTRL_PRSDEP_MASK) {
 +                      netdev_info(priv->ndev,
 +                                      "Receive Queue Filtering enabled\n");
 +              } else {
 +                      netdev_warn(priv->ndev,
 +                                      "Receive Queue Filtering disabled\n");
 +                      return -EOPNOTSUPP;
 +              }
 +      }
 +
 +      /* Sets the properties for arbitrary filer rule
 +       * to the first 4 Layer 4 Bytes */
 +      regs->rbifx = 0xC0C1C2C3;
 +      return 0;
 +}
 +
 +static int gfar_comp_asc(const void *a, const void *b)
 +{
 +      return memcmp(a, b, 4);
 +}
 +
 +static int gfar_comp_desc(const void *a, const void *b)
 +{
 +      return -memcmp(a, b, 4);
 +}
 +
 +static void gfar_swap(void *a, void *b, int size)
 +{
 +      u32 *_a = a;
 +      u32 *_b = b;
 +
 +      swap(_a[0], _b[0]);
 +      swap(_a[1], _b[1]);
 +      swap(_a[2], _b[2]);
 +      swap(_a[3], _b[3]);
 +}
 +
 +/* Write a mask to filer cache */
 +static void gfar_set_mask(u32 mask, struct filer_table *tab)
 +{
 +      tab->fe[tab->index].ctrl = RQFCR_AND | RQFCR_PID_MASK | RQFCR_CMP_EXACT;
 +      tab->fe[tab->index].prop = mask;
 +      tab->index++;
 +}
 +
 +/* Sets parse bits (e.g. IP or TCP) */
 +static void gfar_set_parse_bits(u32 value, u32 mask, struct filer_table *tab)
 +{
 +      gfar_set_mask(mask, tab);
 +      tab->fe[tab->index].ctrl = RQFCR_CMP_EXACT | RQFCR_PID_PARSE
 +                      | RQFCR_AND;
 +      tab->fe[tab->index].prop = value;
 +      tab->index++;
 +}
 +
 +static void gfar_set_general_attribute(u32 value, u32 mask, u32 flag,
 +              struct filer_table *tab)
 +{
 +      gfar_set_mask(mask, tab);
 +      tab->fe[tab->index].ctrl = RQFCR_CMP_EXACT | RQFCR_AND | flag;
 +      tab->fe[tab->index].prop = value;
 +      tab->index++;
 +}
 +
 +/*
 + * For setting a tuple of value and mask of type flag
 + * Example:
 + * IP-Src = 10.0.0.0/255.0.0.0
 + * value: 0x0A000000 mask: FF000000 flag: RQFPR_IPV4
 + *
 + * Ethtool gives us a value=0 and mask=~0 for don't care a tuple
 + * For a don't care mask it gives us a 0
 + *
 + * The check if don't care and the mask adjustment if mask=0 is done for VLAN
 + * and MAC stuff on an upper level (due to missing information on this level).
 + * For these guys we can discard them if they are value=0 and mask=0.
 + *
 + * Further the all masks are one-padded for better hardware efficiency.
 + */
 +static void gfar_set_attribute(u32 value, u32 mask, u32 flag,
 +              struct filer_table *tab)
 +{
 +      switch (flag) {
 +              /* 3bit */
 +      case RQFCR_PID_PRI:
 +              if (!(value | mask))
 +                      return;
 +              mask |= RQFCR_PID_PRI_MASK;
 +              break;
 +              /* 8bit */
 +      case RQFCR_PID_L4P:
 +      case RQFCR_PID_TOS:
 +              if (!~(mask | RQFCR_PID_L4P_MASK))
 +                      return;
 +              if (!mask)
 +                      mask = ~0;
 +              else
 +                      mask |= RQFCR_PID_L4P_MASK;
 +              break;
 +              /* 12bit */
 +      case RQFCR_PID_VID:
 +              if (!(value | mask))
 +                      return;
 +              mask |= RQFCR_PID_VID_MASK;
 +              break;
 +              /* 16bit */
 +      case RQFCR_PID_DPT:
 +      case RQFCR_PID_SPT:
 +      case RQFCR_PID_ETY:
 +              if (!~(mask | RQFCR_PID_PORT_MASK))
 +                      return;
 +              if (!mask)
 +                      mask = ~0;
 +              else
 +                      mask |= RQFCR_PID_PORT_MASK;
 +              break;
 +              /* 24bit */
 +      case RQFCR_PID_DAH:
 +      case RQFCR_PID_DAL:
 +      case RQFCR_PID_SAH:
 +      case RQFCR_PID_SAL:
 +              if (!(value | mask))
 +                      return;
 +              mask |= RQFCR_PID_MAC_MASK;
 +              break;
 +              /* for all real 32bit masks */
 +      default:
 +              if (!~mask)
 +                      return;
 +              if (!mask)
 +                      mask = ~0;
 +              break;
 +      }
 +      gfar_set_general_attribute(value, mask, flag, tab);
 +}
 +
 +/* Translates value and mask for UDP, TCP or SCTP */
 +static void gfar_set_basic_ip(struct ethtool_tcpip4_spec *value,
 +              struct ethtool_tcpip4_spec *mask, struct filer_table *tab)
 +{
 +      gfar_set_attribute(value->ip4src, mask->ip4src, RQFCR_PID_SIA, tab);
 +      gfar_set_attribute(value->ip4dst, mask->ip4dst, RQFCR_PID_DIA, tab);
 +      gfar_set_attribute(value->pdst, mask->pdst, RQFCR_PID_DPT, tab);
 +      gfar_set_attribute(value->psrc, mask->psrc, RQFCR_PID_SPT, tab);
 +      gfar_set_attribute(value->tos, mask->tos, RQFCR_PID_TOS, tab);
 +}
 +
 +/* Translates value and mask for RAW-IP4 */
 +static void gfar_set_user_ip(struct ethtool_usrip4_spec *value,
 +              struct ethtool_usrip4_spec *mask, struct filer_table *tab)
 +{
 +      gfar_set_attribute(value->ip4src, mask->ip4src, RQFCR_PID_SIA, tab);
 +      gfar_set_attribute(value->ip4dst, mask->ip4dst, RQFCR_PID_DIA, tab);
 +      gfar_set_attribute(value->tos, mask->tos, RQFCR_PID_TOS, tab);
 +      gfar_set_attribute(value->proto, mask->proto, RQFCR_PID_L4P, tab);
 +      gfar_set_attribute(value->l4_4_bytes, mask->l4_4_bytes, RQFCR_PID_ARB,
 +                      tab);
 +
 +}
 +
 +/* Translates value and mask for ETHER spec */
 +static void gfar_set_ether(struct ethhdr *value, struct ethhdr *mask,
 +              struct filer_table *tab)
 +{
 +      u32 upper_temp_mask = 0;
 +      u32 lower_temp_mask = 0;
 +      /* Source address */
 +      if (!is_broadcast_ether_addr(mask->h_source)) {
 +
 +              if (is_zero_ether_addr(mask->h_source)) {
 +                      upper_temp_mask = 0xFFFFFFFF;
 +                      lower_temp_mask = 0xFFFFFFFF;
 +              } else {
 +                      upper_temp_mask = mask->h_source[0] << 16
 +                                      | mask->h_source[1] << 8
 +                                      | mask->h_source[2];
 +                      lower_temp_mask = mask->h_source[3] << 16
 +                                      | mask->h_source[4] << 8
 +                                      | mask->h_source[5];
 +              }
 +              /* Upper 24bit */
 +              gfar_set_attribute(
 +                              value->h_source[0] << 16 | value->h_source[1]
 +                                              << 8 | value->h_source[2],
 +                              upper_temp_mask, RQFCR_PID_SAH, tab);
 +              /* And the same for the lower part */
 +              gfar_set_attribute(
 +                              value->h_source[3] << 16 | value->h_source[4]
 +                                              << 8 | value->h_source[5],
 +                              lower_temp_mask, RQFCR_PID_SAL, tab);
 +      }
 +      /* Destination address */
 +      if (!is_broadcast_ether_addr(mask->h_dest)) {
 +
 +              /* Special for destination is limited broadcast */
 +              if ((is_broadcast_ether_addr(value->h_dest)
 +                              && is_zero_ether_addr(mask->h_dest))) {
 +                      gfar_set_parse_bits(RQFPR_EBC, RQFPR_EBC, tab);
 +              } else {
 +
 +                      if (is_zero_ether_addr(mask->h_dest)) {
 +                              upper_temp_mask = 0xFFFFFFFF;
 +                              lower_temp_mask = 0xFFFFFFFF;
 +                      } else {
 +                              upper_temp_mask = mask->h_dest[0] << 16
 +                                              | mask->h_dest[1] << 8
 +                                              | mask->h_dest[2];
 +                              lower_temp_mask = mask->h_dest[3] << 16
 +                                              | mask->h_dest[4] << 8
 +                                              | mask->h_dest[5];
 +                      }
 +
 +                      /* Upper 24bit */
 +                      gfar_set_attribute(
 +                                      value->h_dest[0] << 16
 +                                                      | value->h_dest[1] << 8
 +                                                      | value->h_dest[2],
 +                                      upper_temp_mask, RQFCR_PID_DAH, tab);
 +                      /* And the same for the lower part */
 +                      gfar_set_attribute(
 +                                      value->h_dest[3] << 16
 +                                                      | value->h_dest[4] << 8
 +                                                      | value->h_dest[5],
 +                                      lower_temp_mask, RQFCR_PID_DAL, tab);
 +              }
 +      }
 +
 +      gfar_set_attribute(value->h_proto, mask->h_proto, RQFCR_PID_ETY, tab);
 +
 +}
 +
 +/* Convert a rule to binary filter format of gianfar */
 +static int gfar_convert_to_filer(struct ethtool_rx_flow_spec *rule,
 +              struct filer_table *tab)
 +{
 +      u32 vlan = 0, vlan_mask = 0;
 +      u32 id = 0, id_mask = 0;
 +      u32 cfi = 0, cfi_mask = 0;
 +      u32 prio = 0, prio_mask = 0;
 +
 +      u32 old_index = tab->index;
 +
 +      /* Check if vlan is wanted */
 +      if ((rule->flow_type & FLOW_EXT) && (rule->m_ext.vlan_tci != 0xFFFF)) {
 +              if (!rule->m_ext.vlan_tci)
 +                      rule->m_ext.vlan_tci = 0xFFFF;
 +
 +              vlan = RQFPR_VLN;
 +              vlan_mask = RQFPR_VLN;
 +
 +              /* Separate the fields */
 +              id = rule->h_ext.vlan_tci & VLAN_VID_MASK;
 +              id_mask = rule->m_ext.vlan_tci & VLAN_VID_MASK;
 +              cfi = rule->h_ext.vlan_tci & VLAN_CFI_MASK;
 +              cfi_mask = rule->m_ext.vlan_tci & VLAN_CFI_MASK;
 +              prio = (rule->h_ext.vlan_tci & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
 +              prio_mask = (rule->m_ext.vlan_tci & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
 +
 +              if (cfi == VLAN_TAG_PRESENT && cfi_mask == VLAN_TAG_PRESENT) {
 +                      vlan |= RQFPR_CFI;
 +                      vlan_mask |= RQFPR_CFI;
 +              } else if (cfi != VLAN_TAG_PRESENT && cfi_mask == VLAN_TAG_PRESENT) {
 +                      vlan_mask |= RQFPR_CFI;
 +              }
 +      }
 +
 +      switch (rule->flow_type & ~FLOW_EXT) {
 +      case TCP_V4_FLOW:
 +              gfar_set_parse_bits(RQFPR_IPV4 | RQFPR_TCP | vlan,
 +                              RQFPR_IPV4 | RQFPR_TCP | vlan_mask, tab);
 +              gfar_set_basic_ip(&rule->h_u.tcp_ip4_spec,
 +                              &rule->m_u.tcp_ip4_spec, tab);
 +              break;
 +      case UDP_V4_FLOW:
 +              gfar_set_parse_bits(RQFPR_IPV4 | RQFPR_UDP | vlan,
 +                              RQFPR_IPV4 | RQFPR_UDP | vlan_mask, tab);
 +              gfar_set_basic_ip(&rule->h_u.udp_ip4_spec,
 +                              &rule->m_u.udp_ip4_spec, tab);
 +              break;
 +      case SCTP_V4_FLOW:
 +              gfar_set_parse_bits(RQFPR_IPV4 | vlan, RQFPR_IPV4 | vlan_mask,
 +                              tab);
 +              gfar_set_attribute(132, 0, RQFCR_PID_L4P, tab);
 +              gfar_set_basic_ip((struct ethtool_tcpip4_spec *) &rule->h_u,
 +                              (struct ethtool_tcpip4_spec *) &rule->m_u, tab);
 +              break;
 +      case IP_USER_FLOW:
 +              gfar_set_parse_bits(RQFPR_IPV4 | vlan, RQFPR_IPV4 | vlan_mask,
 +                              tab);
 +              gfar_set_user_ip((struct ethtool_usrip4_spec *) &rule->h_u,
 +                              (struct ethtool_usrip4_spec *) &rule->m_u, tab);
 +              break;
 +      case ETHER_FLOW:
 +              if (vlan)
 +                      gfar_set_parse_bits(vlan, vlan_mask, tab);
 +              gfar_set_ether((struct ethhdr *) &rule->h_u,
 +                              (struct ethhdr *) &rule->m_u, tab);
 +              break;
 +      default:
 +              return -1;
 +      }
 +
 +      /* Set the vlan attributes in the end */
 +      if (vlan) {
 +              gfar_set_attribute(id, id_mask, RQFCR_PID_VID, tab);
 +              gfar_set_attribute(prio, prio_mask, RQFCR_PID_PRI, tab);
 +      }
 +
 +      /* If there has been nothing written till now, it must be a default */
 +      if (tab->index == old_index) {
 +              gfar_set_mask(0xFFFFFFFF, tab);
 +              tab->fe[tab->index].ctrl = 0x20;
 +              tab->fe[tab->index].prop = 0x0;
 +              tab->index++;
 +      }
 +
 +      /* Remove last AND */
 +      tab->fe[tab->index - 1].ctrl &= (~RQFCR_AND);
 +
 +      /* Specify which queue to use or to drop */
 +      if (rule->ring_cookie == RX_CLS_FLOW_DISC)
 +              tab->fe[tab->index - 1].ctrl |= RQFCR_RJE;
 +      else
 +              tab->fe[tab->index - 1].ctrl |= (rule->ring_cookie << 10);
 +
 +      /* Only big enough entries can be clustered */
 +      if (tab->index > (old_index + 2)) {
 +              tab->fe[old_index + 1].ctrl |= RQFCR_CLE;
 +              tab->fe[tab->index - 1].ctrl |= RQFCR_CLE;
 +      }
 +
 +      /* In rare cases the cache can be full while there is free space in hw */
 +      if (tab->index > MAX_FILER_CACHE_IDX - 1)
 +              return -EBUSY;
 +
 +      return 0;
 +}
 +
 +/* Copy size filer entries */
 +static void gfar_copy_filer_entries(struct gfar_filer_entry dst[0],
 +              struct gfar_filer_entry src[0], s32 size)
 +{
 +      while (size > 0) {
 +              size--;
 +              dst[size].ctrl = src[size].ctrl;
 +              dst[size].prop = src[size].prop;
 +      }
 +}
 +
 +/* Delete the contents of the filer-table between start and end
 + * and collapse them */
 +static int gfar_trim_filer_entries(u32 begin, u32 end, struct filer_table *tab)
 +{
 +      int length;
 +      if (end > MAX_FILER_CACHE_IDX || end < begin)
 +              return -EINVAL;
 +
 +      end++;
 +      length = end - begin;
 +
 +      /* Copy */
 +      while (end < tab->index) {
 +              tab->fe[begin].ctrl = tab->fe[end].ctrl;
 +              tab->fe[begin++].prop = tab->fe[end++].prop;
 +
 +      }
 +      /* Fill up with don't cares */
 +      while (begin < tab->index) {
 +              tab->fe[begin].ctrl = 0x60;
 +              tab->fe[begin].prop = 0xFFFFFFFF;
 +              begin++;
 +      }
 +
 +      tab->index -= length;
 +      return 0;
 +}
 +
 +/* Make space on the wanted location */
 +static int gfar_expand_filer_entries(u32 begin, u32 length,
 +              struct filer_table *tab)
 +{
 +      if (length == 0 || length + tab->index > MAX_FILER_CACHE_IDX || begin
 +                      > MAX_FILER_CACHE_IDX)
 +              return -EINVAL;
 +
 +      gfar_copy_filer_entries(&(tab->fe[begin + length]), &(tab->fe[begin]),
 +                      tab->index - length + 1);
 +
 +      tab->index += length;
 +      return 0;
 +}
 +
 +static int gfar_get_next_cluster_start(int start, struct filer_table *tab)
 +{
 +      for (; (start < tab->index) && (start < MAX_FILER_CACHE_IDX - 1); start++) {
 +              if ((tab->fe[start].ctrl & (RQFCR_AND | RQFCR_CLE))
 +                              == (RQFCR_AND | RQFCR_CLE))
 +                      return start;
 +      }
 +      return -1;
 +}
 +
 +static int gfar_get_next_cluster_end(int start, struct filer_table *tab)
 +{
 +      for (; (start < tab->index) && (start < MAX_FILER_CACHE_IDX - 1); start++) {
 +              if ((tab->fe[start].ctrl & (RQFCR_AND | RQFCR_CLE))
 +                              == (RQFCR_CLE))
 +                      return start;
 +      }
 +      return -1;
 +}
 +
 +/*
 + * Uses hardwares clustering option to reduce
 + * the number of filer table entries
 + */
 +static void gfar_cluster_filer(struct filer_table *tab)
 +{
 +      s32 i = -1, j, iend, jend;
 +
 +      while ((i = gfar_get_next_cluster_start(++i, tab)) != -1) {
 +              j = i;
 +              while ((j = gfar_get_next_cluster_start(++j, tab)) != -1) {
 +                      /*
 +                       * The cluster entries self and the previous one
 +                       * (a mask) must be identical!
 +                       */
 +                      if (tab->fe[i].ctrl != tab->fe[j].ctrl)
 +                              break;
 +                      if (tab->fe[i].prop != tab->fe[j].prop)
 +                              break;
 +                      if (tab->fe[i - 1].ctrl != tab->fe[j - 1].ctrl)
 +                              break;
 +                      if (tab->fe[i - 1].prop != tab->fe[j - 1].prop)
 +                              break;
 +                      iend = gfar_get_next_cluster_end(i, tab);
 +                      jend = gfar_get_next_cluster_end(j, tab);
 +                      if (jend == -1 || iend == -1)
 +                              break;
 +                      /*
 +                       * First we make some free space, where our cluster
 +                       * element should be. Then we copy it there and finally
 +                       * delete in from its old location.
 +                       */
 +
 +                      if (gfar_expand_filer_entries(iend, (jend - j), tab)
 +                                      == -EINVAL)
 +                              break;
 +
 +                      gfar_copy_filer_entries(&(tab->fe[iend + 1]),
 +                                      &(tab->fe[jend + 1]), jend - j);
 +
 +                      if (gfar_trim_filer_entries(jend - 1,
 +                                      jend + (jend - j), tab) == -EINVAL)
 +                              return;
 +
 +                      /* Mask out cluster bit */
 +                      tab->fe[iend].ctrl &= ~(RQFCR_CLE);
 +              }
 +      }
 +}
 +
 +/* Swaps the masked bits of a1<>a2 and b1<>b2 */
 +static void gfar_swap_bits(struct gfar_filer_entry *a1,
 +              struct gfar_filer_entry *a2, struct gfar_filer_entry *b1,
 +              struct gfar_filer_entry *b2, u32 mask)
 +{
 +      u32 temp[4];
 +      temp[0] = a1->ctrl & mask;
 +      temp[1] = a2->ctrl & mask;
 +      temp[2] = b1->ctrl & mask;
 +      temp[3] = b2->ctrl & mask;
 +
 +      a1->ctrl &= ~mask;
 +      a2->ctrl &= ~mask;
 +      b1->ctrl &= ~mask;
 +      b2->ctrl &= ~mask;
 +
 +      a1->ctrl |= temp[1];
 +      a2->ctrl |= temp[0];
 +      b1->ctrl |= temp[3];
 +      b2->ctrl |= temp[2];
 +}
 +
 +/*
 + * Generate a list consisting of masks values with their start and
 + * end of validity and block as indicator for parts belonging
 + * together (glued by ANDs) in mask_table
 + */
 +static u32 gfar_generate_mask_table(struct gfar_mask_entry *mask_table,
 +              struct filer_table *tab)
 +{
 +      u32 i, and_index = 0, block_index = 1;
 +
 +      for (i = 0; i < tab->index; i++) {
 +
 +              /* LSByte of control = 0 sets a mask */
 +              if (!(tab->fe[i].ctrl & 0xF)) {
 +                      mask_table[and_index].mask = tab->fe[i].prop;
 +                      mask_table[and_index].start = i;
 +                      mask_table[and_index].block = block_index;
 +                      if (and_index >= 1)
 +                              mask_table[and_index - 1].end = i - 1;
 +                      and_index++;
 +              }
 +              /* cluster starts and ends will be separated because they should
 +               * hold their position */
 +              if (tab->fe[i].ctrl & RQFCR_CLE)
 +                      block_index++;
 +              /* A not set AND indicates the end of a depended block */
 +              if (!(tab->fe[i].ctrl & RQFCR_AND))
 +                      block_index++;
 +
 +      }
 +
 +      mask_table[and_index - 1].end = i - 1;
 +
 +      return and_index;
 +}
 +
 +/*
 + * Sorts the entries of mask_table by the values of the masks.
 + * Important: The 0xFF80 flags of the first and last entry of a
 + * block must hold their position (which queue, CLusterEnable, ReJEct,
 + * AND)
 + */
 +static void gfar_sort_mask_table(struct gfar_mask_entry *mask_table,
 +              struct filer_table *temp_table, u32 and_index)
 +{
 +      /* Pointer to compare function (_asc or _desc) */
 +      int (*gfar_comp)(const void *, const void *);
 +
 +      u32 i, size = 0, start = 0, prev = 1;
 +      u32 old_first, old_last, new_first, new_last;
 +
 +      gfar_comp = &gfar_comp_desc;
 +
 +      for (i = 0; i < and_index; i++) {
 +
 +              if (prev != mask_table[i].block) {
 +                      old_first = mask_table[start].start + 1;
 +                      old_last = mask_table[i - 1].end;
 +                      sort(mask_table + start, size,
 +                                      sizeof(struct gfar_mask_entry),
 +                                      gfar_comp, &gfar_swap);
 +
 +                      /* Toggle order for every block. This makes the
 +                       * thing more efficient! */
 +                      if (gfar_comp == gfar_comp_desc)
 +                              gfar_comp = &gfar_comp_asc;
 +                      else
 +                              gfar_comp = &gfar_comp_desc;
 +
 +                      new_first = mask_table[start].start + 1;
 +                      new_last = mask_table[i - 1].end;
 +
 +                      gfar_swap_bits(&temp_table->fe[new_first],
 +                                      &temp_table->fe[old_first],
 +                                      &temp_table->fe[new_last],
 +                                      &temp_table->fe[old_last],
 +                                      RQFCR_QUEUE | RQFCR_CLE |
 +                                              RQFCR_RJE | RQFCR_AND
 +                                      );
 +
 +                      start = i;
 +                      size = 0;
 +              }
 +              size++;
 +              prev = mask_table[i].block;
 +      }
 +
 +}
 +
 +/*
 + * Reduces the number of masks needed in the filer table to save entries
 + * This is done by sorting the masks of a depended block. A depended block is
 + * identified by gluing ANDs or CLE. The sorting order toggles after every
 + * block. Of course entries in scope of a mask must change their location with
 + * it.
 + */
 +static int gfar_optimize_filer_masks(struct filer_table *tab)
 +{
 +      struct filer_table *temp_table;
 +      struct gfar_mask_entry *mask_table;
 +
 +      u32 and_index = 0, previous_mask = 0, i = 0, j = 0, size = 0;
 +      s32 ret = 0;
 +
 +      /* We need a copy of the filer table because
 +       * we want to change its order */
 +      temp_table = kmalloc(sizeof(*temp_table), GFP_KERNEL);
 +      if (temp_table == NULL)
 +              return -ENOMEM;
 +      memcpy(temp_table, tab, sizeof(*temp_table));
 +
 +      mask_table = kcalloc(MAX_FILER_CACHE_IDX / 2 + 1,
 +                      sizeof(struct gfar_mask_entry), GFP_KERNEL);
 +
 +      if (mask_table == NULL) {
 +              ret = -ENOMEM;
 +              goto end;
 +      }
 +
 +      and_index = gfar_generate_mask_table(mask_table, tab);
 +
 +      gfar_sort_mask_table(mask_table, temp_table, and_index);
 +
 +      /* Now we can copy the data from our duplicated filer table to
 +       * the real one in the order the mask table says */
 +      for (i = 0; i < and_index; i++) {
 +              size = mask_table[i].end - mask_table[i].start + 1;
 +              gfar_copy_filer_entries(&(tab->fe[j]),
 +                              &(temp_table->fe[mask_table[i].start]), size);
 +              j += size;
 +      }
 +
 +      /* And finally we just have to check for duplicated masks and drop the
 +       * second ones */
 +      for (i = 0; i < tab->index && i < MAX_FILER_CACHE_IDX; i++) {
 +              if (tab->fe[i].ctrl == 0x80) {
 +                      previous_mask = i++;
 +                      break;
 +              }
 +      }
 +      for (; i < tab->index && i < MAX_FILER_CACHE_IDX; i++) {
 +              if (tab->fe[i].ctrl == 0x80) {
 +                      if (tab->fe[i].prop == tab->fe[previous_mask].prop) {
 +                              /* Two identical ones found!
 +                               * So drop the second one! */
 +                              gfar_trim_filer_entries(i, i, tab);
 +                      } else
 +                              /* Not identical! */
 +                              previous_mask = i;
 +              }
 +      }
 +
 +      kfree(mask_table);
 +end:  kfree(temp_table);
 +      return ret;
 +}
 +
 +/* Write the bit-pattern from software's buffer to hardware registers */
 +static int gfar_write_filer_table(struct gfar_private *priv,
 +              struct filer_table *tab)
 +{
 +      u32 i = 0;
 +      if (tab->index > MAX_FILER_IDX - 1)
 +              return -EBUSY;
 +
 +      /* Avoid inconsistent filer table to be processed */
 +      lock_rx_qs(priv);
 +
 +      /* Fill regular entries */
 +      for (; i < MAX_FILER_IDX - 1 && (tab->fe[i].ctrl | tab->fe[i].ctrl); i++)
 +              gfar_write_filer(priv, i, tab->fe[i].ctrl, tab->fe[i].prop);
 +      /* Fill the rest with fall-troughs */
 +      for (; i < MAX_FILER_IDX - 1; i++)
 +              gfar_write_filer(priv, i, 0x60, 0xFFFFFFFF);
 +      /* Last entry must be default accept
 +       * because that's what people expect */
 +      gfar_write_filer(priv, i, 0x20, 0x0);
 +
 +      unlock_rx_qs(priv);
 +
 +      return 0;
 +}
 +
 +static int gfar_check_capability(struct ethtool_rx_flow_spec *flow,
 +              struct gfar_private *priv)
 +{
 +
 +      if (flow->flow_type & FLOW_EXT) {
 +              if (~flow->m_ext.data[0] || ~flow->m_ext.data[1])
 +                      netdev_warn(priv->ndev,
 +                                      "User-specific data not supported!\n");
 +              if (~flow->m_ext.vlan_etype)
 +                      netdev_warn(priv->ndev,
 +                                      "VLAN-etype not supported!\n");
 +      }
 +      if (flow->flow_type == IP_USER_FLOW)
 +              if (flow->h_u.usr_ip4_spec.ip_ver != ETH_RX_NFC_IP4)
 +                      netdev_warn(priv->ndev,
 +                                      "IP-Version differing from IPv4 not supported!\n");
 +
 +      return 0;
 +}
 +
 +static int gfar_process_filer_changes(struct gfar_private *priv)
 +{
 +      struct ethtool_flow_spec_container *j;
 +      struct filer_table *tab;
 +      s32 i = 0;
 +      s32 ret = 0;
 +
 +      /* So index is set to zero, too! */
 +      tab = kzalloc(sizeof(*tab), GFP_KERNEL);
 +      if (tab == NULL)
 +              return -ENOMEM;
 +
 +      /* Now convert the existing filer data from flow_spec into
 +       * filer tables binary format */
 +      list_for_each_entry(j, &priv->rx_list.list, list) {
 +              ret = gfar_convert_to_filer(&j->fs, tab);
 +              if (ret == -EBUSY) {
 +                      netdev_err(priv->ndev, "Rule not added: No free space!\n");
 +                      goto end;
 +              }
 +              if (ret == -1) {
 +                      netdev_err(priv->ndev, "Rule not added: Unsupported Flow-type!\n");
 +                      goto end;
 +              }
 +      }
 +
 +      i = tab->index;
 +
 +      /* Optimizations to save entries */
 +      gfar_cluster_filer(tab);
 +      gfar_optimize_filer_masks(tab);
 +
 +      pr_debug("\n\tSummary:\n"
 +              "\tData on hardware: %d\n"
 +              "\tCompression rate: %d%%\n",
 +              tab->index, 100 - (100 * tab->index) / i);
 +
 +      /* Write everything to hardware */
 +      ret = gfar_write_filer_table(priv, tab);
 +      if (ret == -EBUSY) {
 +              netdev_err(priv->ndev, "Rule not added: No free space!\n");
 +              goto end;
 +      }
 +
 +end:  kfree(tab);
 +      return ret;
 +}
 +
 +static void gfar_invert_masks(struct ethtool_rx_flow_spec *flow)
 +{
 +      u32 i = 0;
 +
 +      for (i = 0; i < sizeof(flow->m_u); i++)
 +              flow->m_u.hdata[i] ^= 0xFF;
 +
 +      flow->m_ext.vlan_etype ^= 0xFFFF;
 +      flow->m_ext.vlan_tci ^= 0xFFFF;
 +      flow->m_ext.data[0] ^= ~0;
 +      flow->m_ext.data[1] ^= ~0;
 +}
 +
 +static int gfar_add_cls(struct gfar_private *priv,
 +              struct ethtool_rx_flow_spec *flow)
 +{
 +      struct ethtool_flow_spec_container *temp, *comp;
 +      int ret = 0;
 +
 +      temp = kmalloc(sizeof(*temp), GFP_KERNEL);
 +      if (temp == NULL)
 +              return -ENOMEM;
 +      memcpy(&temp->fs, flow, sizeof(temp->fs));
 +
 +      gfar_invert_masks(&temp->fs);
 +      ret = gfar_check_capability(&temp->fs, priv);
 +      if (ret)
 +              goto clean_mem;
 +      /* Link in the new element at the right @location */
 +      if (list_empty(&priv->rx_list.list)) {
 +              ret = gfar_check_filer_hardware(priv);
 +              if (ret != 0)
 +                      goto clean_mem;
 +              list_add(&temp->list, &priv->rx_list.list);
 +              goto process;
 +      } else {
 +
 +              list_for_each_entry(comp, &priv->rx_list.list, list) {
 +                      if (comp->fs.location > flow->location) {
 +                              list_add_tail(&temp->list, &comp->list);
 +                              goto process;
 +                      }
 +                      if (comp->fs.location == flow->location) {
 +                              netdev_err(priv->ndev,
 +                                              "Rule not added: ID %d not free!\n",
 +                                      flow->location);
 +                              ret = -EBUSY;
 +                              goto clean_mem;
 +                      }
 +              }
 +              list_add_tail(&temp->list, &priv->rx_list.list);
 +      }
 +
 +process:
 +      ret = gfar_process_filer_changes(priv);
 +      if (ret)
 +              goto clean_list;
 +      priv->rx_list.count++;
 +      return ret;
 +
 +clean_list:
 +      list_del(&temp->list);
 +clean_mem:
 +      kfree(temp);
 +      return ret;
 +}
 +
 +static int gfar_del_cls(struct gfar_private *priv, u32 loc)
 +{
 +      struct ethtool_flow_spec_container *comp;
 +      u32 ret = -EINVAL;
 +
 +      if (list_empty(&priv->rx_list.list))
 +              return ret;
 +
 +      list_for_each_entry(comp, &priv->rx_list.list, list) {
 +              if (comp->fs.location == loc) {
 +                      list_del(&comp->list);
 +                      kfree(comp);
 +                      priv->rx_list.count--;
 +                      gfar_process_filer_changes(priv);
 +                      ret = 0;
 +                      break;
 +              }
 +      }
 +
 +      return ret;
 +
 +}
 +
 +static int gfar_get_cls(struct gfar_private *priv, struct ethtool_rxnfc *cmd)
 +{
 +      struct ethtool_flow_spec_container *comp;
 +      u32 ret = -EINVAL;
 +
 +      list_for_each_entry(comp, &priv->rx_list.list, list) {
 +              if (comp->fs.location == cmd->fs.location) {
 +                      memcpy(&cmd->fs, &comp->fs, sizeof(cmd->fs));
 +                      gfar_invert_masks(&cmd->fs);
 +                      ret = 0;
 +                      break;
 +              }
 +      }
 +
 +      return ret;
 +}
 +
 +static int gfar_get_cls_all(struct gfar_private *priv,
 +              struct ethtool_rxnfc *cmd, u32 *rule_locs)
 +{
 +      struct ethtool_flow_spec_container *comp;
 +      u32 i = 0;
 +
 +      list_for_each_entry(comp, &priv->rx_list.list, list) {
++              if (i == cmd->rule_cnt)
++                      return -EMSGSIZE;
++              rule_locs[i] = comp->fs.location;
++              i++;
 +      }
 +
 +      cmd->data = MAX_FILER_IDX;
 +      cmd->rule_cnt = i;
 +
 +      return 0;
 +}
 +
 +static int gfar_set_nfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
 +{
 +      struct gfar_private *priv = netdev_priv(dev);
 +      int ret = 0;
 +
 +      mutex_lock(&priv->rx_queue_access);
 +
 +      switch (cmd->cmd) {
 +      case ETHTOOL_SRXFH:
 +              ret = gfar_set_hash_opts(priv, cmd);
 +              break;
 +      case ETHTOOL_SRXCLSRLINS:
 +              if (cmd->fs.ring_cookie != RX_CLS_FLOW_DISC &&
 +                      cmd->fs.ring_cookie >= priv->num_rx_queues) {
 +                      ret = -EINVAL;
 +                      break;
 +              }
 +              ret = gfar_add_cls(priv, &cmd->fs);
 +              break;
 +      case ETHTOOL_SRXCLSRLDEL:
 +              ret = gfar_del_cls(priv, cmd->fs.location);
 +              break;
 +      default:
 +              ret = -EINVAL;
 +      }
 +
 +      mutex_unlock(&priv->rx_queue_access);
 +
 +      return ret;
 +}
 +
 +static int gfar_get_nfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
 +              u32 *rule_locs)
 +{
 +      struct gfar_private *priv = netdev_priv(dev);
 +      int ret = 0;
 +
 +      switch (cmd->cmd) {
 +      case ETHTOOL_GRXRINGS:
 +              cmd->data = priv->num_rx_queues;
 +              break;
 +      case ETHTOOL_GRXCLSRLCNT:
 +              cmd->rule_cnt = priv->rx_list.count;
 +              break;
 +      case ETHTOOL_GRXCLSRULE:
 +              ret = gfar_get_cls(priv, cmd);
 +              break;
 +      case ETHTOOL_GRXCLSRLALL:
 +              ret = gfar_get_cls_all(priv, cmd, rule_locs);
 +              break;
 +      default:
 +              ret = -EINVAL;
 +              break;
 +      }
 +
 +      return ret;
 +}
 +
 +const struct ethtool_ops gfar_ethtool_ops = {
 +      .get_settings = gfar_gsettings,
 +      .set_settings = gfar_ssettings,
 +      .get_drvinfo = gfar_gdrvinfo,
 +      .get_regs_len = gfar_reglen,
 +      .get_regs = gfar_get_regs,
 +      .get_link = ethtool_op_get_link,
 +      .get_coalesce = gfar_gcoalesce,
 +      .set_coalesce = gfar_scoalesce,
 +      .get_ringparam = gfar_gringparam,
 +      .set_ringparam = gfar_sringparam,
 +      .get_strings = gfar_gstrings,
 +      .get_sset_count = gfar_sset_count,
 +      .get_ethtool_stats = gfar_fill_stats,
 +      .get_msglevel = gfar_get_msglevel,
 +      .set_msglevel = gfar_set_msglevel,
 +#ifdef CONFIG_PM
 +      .get_wol = gfar_get_wol,
 +      .set_wol = gfar_set_wol,
 +#endif
 +      .set_rxnfc = gfar_set_nfc,
 +      .get_rxnfc = gfar_get_nfc,
 +};
index 8cca4a6,0000000..72b84de
mode 100644,000000..100644
--- /dev/null
@@@ -1,1618 -1,0 +1,1638 @@@
- static void ibmveth_rxq_recycle_buffer(struct ibmveth_adapter *adapter)
 +/*
 + * IBM Power Virtual Ethernet Device Driver
 + *
 + * This program is free software; you can redistribute it and/or modify
 + * it under the terms of the GNU General Public License as published by
 + * the Free Software Foundation; either version 2 of the License, or
 + * (at your option) any later version.
 + *
 + * This program is distributed in the hope that it will be useful,
 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 + * GNU General Public License for more details.
 + *
 + * You should have received a copy of the GNU General Public License
 + * along with this program; if not, write to the Free Software
 + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
 + *
 + * Copyright (C) IBM Corporation, 2003, 2010
 + *
 + * Authors: Dave Larson <larson1@us.ibm.com>
 + *        Santiago Leon <santil@linux.vnet.ibm.com>
 + *        Brian King <brking@linux.vnet.ibm.com>
 + *        Robert Jennings <rcj@linux.vnet.ibm.com>
 + *        Anton Blanchard <anton@au.ibm.com>
 + */
 +
 +#include <linux/module.h>
 +#include <linux/moduleparam.h>
 +#include <linux/types.h>
 +#include <linux/errno.h>
 +#include <linux/dma-mapping.h>
 +#include <linux/kernel.h>
 +#include <linux/netdevice.h>
 +#include <linux/etherdevice.h>
 +#include <linux/skbuff.h>
 +#include <linux/init.h>
 +#include <linux/interrupt.h>
 +#include <linux/mm.h>
 +#include <linux/pm.h>
 +#include <linux/ethtool.h>
 +#include <linux/in.h>
 +#include <linux/ip.h>
 +#include <linux/ipv6.h>
 +#include <linux/slab.h>
 +#include <asm/hvcall.h>
 +#include <linux/atomic.h>
 +#include <asm/vio.h>
 +#include <asm/iommu.h>
 +#include <asm/firmware.h>
 +
 +#include "ibmveth.h"
 +
 +static irqreturn_t ibmveth_interrupt(int irq, void *dev_instance);
 +static void ibmveth_rxq_harvest_buffer(struct ibmveth_adapter *adapter);
 +static unsigned long ibmveth_get_desired_dma(struct vio_dev *vdev);
 +
 +static struct kobj_type ktype_veth_pool;
 +
 +
 +static const char ibmveth_driver_name[] = "ibmveth";
 +static const char ibmveth_driver_string[] = "IBM Power Virtual Ethernet Driver";
 +#define ibmveth_driver_version "1.04"
 +
 +MODULE_AUTHOR("Santiago Leon <santil@linux.vnet.ibm.com>");
 +MODULE_DESCRIPTION("IBM Power Virtual Ethernet Driver");
 +MODULE_LICENSE("GPL");
 +MODULE_VERSION(ibmveth_driver_version);
 +
 +static unsigned int tx_copybreak __read_mostly = 128;
 +module_param(tx_copybreak, uint, 0644);
 +MODULE_PARM_DESC(tx_copybreak,
 +      "Maximum size of packet that is copied to a new buffer on transmit");
 +
 +static unsigned int rx_copybreak __read_mostly = 128;
 +module_param(rx_copybreak, uint, 0644);
 +MODULE_PARM_DESC(rx_copybreak,
 +      "Maximum size of packet that is copied to a new buffer on receive");
 +
 +static unsigned int rx_flush __read_mostly = 0;
 +module_param(rx_flush, uint, 0644);
 +MODULE_PARM_DESC(rx_flush, "Flush receive buffers before use");
 +
 +struct ibmveth_stat {
 +      char name[ETH_GSTRING_LEN];
 +      int offset;
 +};
 +
 +#define IBMVETH_STAT_OFF(stat) offsetof(struct ibmveth_adapter, stat)
 +#define IBMVETH_GET_STAT(a, off) *((u64 *)(((unsigned long)(a)) + off))
 +
 +struct ibmveth_stat ibmveth_stats[] = {
 +      { "replenish_task_cycles", IBMVETH_STAT_OFF(replenish_task_cycles) },
 +      { "replenish_no_mem", IBMVETH_STAT_OFF(replenish_no_mem) },
 +      { "replenish_add_buff_failure",
 +                      IBMVETH_STAT_OFF(replenish_add_buff_failure) },
 +      { "replenish_add_buff_success",
 +                      IBMVETH_STAT_OFF(replenish_add_buff_success) },
 +      { "rx_invalid_buffer", IBMVETH_STAT_OFF(rx_invalid_buffer) },
 +      { "rx_no_buffer", IBMVETH_STAT_OFF(rx_no_buffer) },
 +      { "tx_map_failed", IBMVETH_STAT_OFF(tx_map_failed) },
 +      { "tx_send_failed", IBMVETH_STAT_OFF(tx_send_failed) },
 +      { "fw_enabled_ipv4_csum", IBMVETH_STAT_OFF(fw_ipv4_csum_support) },
 +      { "fw_enabled_ipv6_csum", IBMVETH_STAT_OFF(fw_ipv6_csum_support) },
 +};
 +
 +/* simple methods of getting data from the current rxq entry */
 +static inline u32 ibmveth_rxq_flags(struct ibmveth_adapter *adapter)
 +{
 +      return adapter->rx_queue.queue_addr[adapter->rx_queue.index].flags_off;
 +}
 +
 +static inline int ibmveth_rxq_toggle(struct ibmveth_adapter *adapter)
 +{
 +      return (ibmveth_rxq_flags(adapter) & IBMVETH_RXQ_TOGGLE) >>
 +                      IBMVETH_RXQ_TOGGLE_SHIFT;
 +}
 +
 +static inline int ibmveth_rxq_pending_buffer(struct ibmveth_adapter *adapter)
 +{
 +      return ibmveth_rxq_toggle(adapter) == adapter->rx_queue.toggle;
 +}
 +
 +static inline int ibmveth_rxq_buffer_valid(struct ibmveth_adapter *adapter)
 +{
 +      return ibmveth_rxq_flags(adapter) & IBMVETH_RXQ_VALID;
 +}
 +
 +static inline int ibmveth_rxq_frame_offset(struct ibmveth_adapter *adapter)
 +{
 +      return ibmveth_rxq_flags(adapter) & IBMVETH_RXQ_OFF_MASK;
 +}
 +
 +static inline int ibmveth_rxq_frame_length(struct ibmveth_adapter *adapter)
 +{
 +      return adapter->rx_queue.queue_addr[adapter->rx_queue.index].length;
 +}
 +
 +static inline int ibmveth_rxq_csum_good(struct ibmveth_adapter *adapter)
 +{
 +      return ibmveth_rxq_flags(adapter) & IBMVETH_RXQ_CSUM_GOOD;
 +}
 +
 +/* setup the initial settings for a buffer pool */
 +static void ibmveth_init_buffer_pool(struct ibmveth_buff_pool *pool,
 +                                   u32 pool_index, u32 pool_size,
 +                                   u32 buff_size, u32 pool_active)
 +{
 +      pool->size = pool_size;
 +      pool->index = pool_index;
 +      pool->buff_size = buff_size;
 +      pool->threshold = pool_size * 7 / 8;
 +      pool->active = pool_active;
 +}
 +
 +/* allocate and setup an buffer pool - called during open */
 +static int ibmveth_alloc_buffer_pool(struct ibmveth_buff_pool *pool)
 +{
 +      int i;
 +
 +      pool->free_map = kmalloc(sizeof(u16) * pool->size, GFP_KERNEL);
 +
 +      if (!pool->free_map)
 +              return -1;
 +
 +      pool->dma_addr = kmalloc(sizeof(dma_addr_t) * pool->size, GFP_KERNEL);
 +      if (!pool->dma_addr) {
 +              kfree(pool->free_map);
 +              pool->free_map = NULL;
 +              return -1;
 +      }
 +
 +      pool->skbuff = kcalloc(pool->size, sizeof(void *), GFP_KERNEL);
 +
 +      if (!pool->skbuff) {
 +              kfree(pool->dma_addr);
 +              pool->dma_addr = NULL;
 +
 +              kfree(pool->free_map);
 +              pool->free_map = NULL;
 +              return -1;
 +      }
 +
 +      memset(pool->dma_addr, 0, sizeof(dma_addr_t) * pool->size);
 +
 +      for (i = 0; i < pool->size; ++i)
 +              pool->free_map[i] = i;
 +
 +      atomic_set(&pool->available, 0);
 +      pool->producer_index = 0;
 +      pool->consumer_index = 0;
 +
 +      return 0;
 +}
 +
 +static inline void ibmveth_flush_buffer(void *addr, unsigned long length)
 +{
 +      unsigned long offset;
 +
 +      for (offset = 0; offset < length; offset += SMP_CACHE_BYTES)
 +              asm("dcbfl %0,%1" :: "b" (addr), "r" (offset));
 +}
 +
 +/* replenish the buffers for a pool.  note that we don't need to
 + * skb_reserve these since they are used for incoming...
 + */
 +static void ibmveth_replenish_buffer_pool(struct ibmveth_adapter *adapter,
 +                                        struct ibmveth_buff_pool *pool)
 +{
 +      u32 i;
 +      u32 count = pool->size - atomic_read(&pool->available);
 +      u32 buffers_added = 0;
 +      struct sk_buff *skb;
 +      unsigned int free_index, index;
 +      u64 correlator;
 +      unsigned long lpar_rc;
 +      dma_addr_t dma_addr;
 +
 +      mb();
 +
 +      for (i = 0; i < count; ++i) {
 +              union ibmveth_buf_desc desc;
 +
 +              skb = netdev_alloc_skb(adapter->netdev, pool->buff_size);
 +
 +              if (!skb) {
 +                      netdev_dbg(adapter->netdev,
 +                                 "replenish: unable to allocate skb\n");
 +                      adapter->replenish_no_mem++;
 +                      break;
 +              }
 +
 +              free_index = pool->consumer_index;
 +              pool->consumer_index++;
 +              if (pool->consumer_index >= pool->size)
 +                      pool->consumer_index = 0;
 +              index = pool->free_map[free_index];
 +
 +              BUG_ON(index == IBM_VETH_INVALID_MAP);
 +              BUG_ON(pool->skbuff[index] != NULL);
 +
 +              dma_addr = dma_map_single(&adapter->vdev->dev, skb->data,
 +                              pool->buff_size, DMA_FROM_DEVICE);
 +
 +              if (dma_mapping_error(&adapter->vdev->dev, dma_addr))
 +                      goto failure;
 +
 +              pool->free_map[free_index] = IBM_VETH_INVALID_MAP;
 +              pool->dma_addr[index] = dma_addr;
 +              pool->skbuff[index] = skb;
 +
 +              correlator = ((u64)pool->index << 32) | index;
 +              *(u64 *)skb->data = correlator;
 +
 +              desc.fields.flags_len = IBMVETH_BUF_VALID | pool->buff_size;
 +              desc.fields.address = dma_addr;
 +
 +              if (rx_flush) {
 +                      unsigned int len = min(pool->buff_size,
 +                                              adapter->netdev->mtu +
 +                                              IBMVETH_BUFF_OH);
 +                      ibmveth_flush_buffer(skb->data, len);
 +              }
 +              lpar_rc = h_add_logical_lan_buffer(adapter->vdev->unit_address,
 +                                                 desc.desc);
 +
 +              if (lpar_rc != H_SUCCESS) {
 +                      goto failure;
 +              } else {
 +                      buffers_added++;
 +                      adapter->replenish_add_buff_success++;
 +              }
 +      }
 +
 +      mb();
 +      atomic_add(buffers_added, &(pool->available));
 +      return;
 +
 +failure:
 +      pool->free_map[free_index] = index;
 +      pool->skbuff[index] = NULL;
 +      if (pool->consumer_index == 0)
 +              pool->consumer_index = pool->size - 1;
 +      else
 +              pool->consumer_index--;
 +      if (!dma_mapping_error(&adapter->vdev->dev, dma_addr))
 +              dma_unmap_single(&adapter->vdev->dev,
 +                               pool->dma_addr[index], pool->buff_size,
 +                               DMA_FROM_DEVICE);
 +      dev_kfree_skb_any(skb);
 +      adapter->replenish_add_buff_failure++;
 +
 +      mb();
 +      atomic_add(buffers_added, &(pool->available));
 +}
 +
 +/* replenish routine */
 +static void ibmveth_replenish_task(struct ibmveth_adapter *adapter)
 +{
 +      int i;
 +
 +      adapter->replenish_task_cycles++;
 +
 +      for (i = (IBMVETH_NUM_BUFF_POOLS - 1); i >= 0; i--) {
 +              struct ibmveth_buff_pool *pool = &adapter->rx_buff_pool[i];
 +
 +              if (pool->active &&
 +                  (atomic_read(&pool->available) < pool->threshold))
 +                      ibmveth_replenish_buffer_pool(adapter, pool);
 +      }
 +
 +      adapter->rx_no_buffer = *(u64 *)(((char*)adapter->buffer_list_addr) +
 +                                              4096 - 8);
 +}
 +
 +/* empty and free ana buffer pool - also used to do cleanup in error paths */
 +static void ibmveth_free_buffer_pool(struct ibmveth_adapter *adapter,
 +                                   struct ibmveth_buff_pool *pool)
 +{
 +      int i;
 +
 +      kfree(pool->free_map);
 +      pool->free_map = NULL;
 +
 +      if (pool->skbuff && pool->dma_addr) {
 +              for (i = 0; i < pool->size; ++i) {
 +                      struct sk_buff *skb = pool->skbuff[i];
 +                      if (skb) {
 +                              dma_unmap_single(&adapter->vdev->dev,
 +                                               pool->dma_addr[i],
 +                                               pool->buff_size,
 +                                               DMA_FROM_DEVICE);
 +                              dev_kfree_skb_any(skb);
 +                              pool->skbuff[i] = NULL;
 +                      }
 +              }
 +      }
 +
 +      if (pool->dma_addr) {
 +              kfree(pool->dma_addr);
 +              pool->dma_addr = NULL;
 +      }
 +
 +      if (pool->skbuff) {
 +              kfree(pool->skbuff);
 +              pool->skbuff = NULL;
 +      }
 +}
 +
 +/* remove a buffer from a pool */
 +static void ibmveth_remove_buffer_from_pool(struct ibmveth_adapter *adapter,
 +                                          u64 correlator)
 +{
 +      unsigned int pool  = correlator >> 32;
 +      unsigned int index = correlator & 0xffffffffUL;
 +      unsigned int free_index;
 +      struct sk_buff *skb;
 +
 +      BUG_ON(pool >= IBMVETH_NUM_BUFF_POOLS);
 +      BUG_ON(index >= adapter->rx_buff_pool[pool].size);
 +
 +      skb = adapter->rx_buff_pool[pool].skbuff[index];
 +
 +      BUG_ON(skb == NULL);
 +
 +      adapter->rx_buff_pool[pool].skbuff[index] = NULL;
 +
 +      dma_unmap_single(&adapter->vdev->dev,
 +                       adapter->rx_buff_pool[pool].dma_addr[index],
 +                       adapter->rx_buff_pool[pool].buff_size,
 +                       DMA_FROM_DEVICE);
 +
 +      free_index = adapter->rx_buff_pool[pool].producer_index;
 +      adapter->rx_buff_pool[pool].producer_index++;
 +      if (adapter->rx_buff_pool[pool].producer_index >=
 +          adapter->rx_buff_pool[pool].size)
 +              adapter->rx_buff_pool[pool].producer_index = 0;
 +      adapter->rx_buff_pool[pool].free_map[free_index] = index;
 +
 +      mb();
 +
 +      atomic_dec(&(adapter->rx_buff_pool[pool].available));
 +}
 +
 +/* get the current buffer on the rx queue */
 +static inline struct sk_buff *ibmveth_rxq_get_buffer(struct ibmveth_adapter *adapter)
 +{
 +      u64 correlator = adapter->rx_queue.queue_addr[adapter->rx_queue.index].correlator;
 +      unsigned int pool = correlator >> 32;
 +      unsigned int index = correlator & 0xffffffffUL;
 +
 +      BUG_ON(pool >= IBMVETH_NUM_BUFF_POOLS);
 +      BUG_ON(index >= adapter->rx_buff_pool[pool].size);
 +
 +      return adapter->rx_buff_pool[pool].skbuff[index];
 +}
 +
 +/* recycle the current buffer on the rx queue */
-               return;
++static int ibmveth_rxq_recycle_buffer(struct ibmveth_adapter *adapter)
 +{
 +      u32 q_index = adapter->rx_queue.index;
 +      u64 correlator = adapter->rx_queue.queue_addr[q_index].correlator;
 +      unsigned int pool = correlator >> 32;
 +      unsigned int index = correlator & 0xffffffffUL;
 +      union ibmveth_buf_desc desc;
 +      unsigned long lpar_rc;
++      int ret = 1;
 +
 +      BUG_ON(pool >= IBMVETH_NUM_BUFF_POOLS);
 +      BUG_ON(index >= adapter->rx_buff_pool[pool].size);
 +
 +      if (!adapter->rx_buff_pool[pool].active) {
 +              ibmveth_rxq_harvest_buffer(adapter);
 +              ibmveth_free_buffer_pool(adapter, &adapter->rx_buff_pool[pool]);
-       long ret, ret6;
++              goto out;
 +      }
 +
 +      desc.fields.flags_len = IBMVETH_BUF_VALID |
 +              adapter->rx_buff_pool[pool].buff_size;
 +      desc.fields.address = adapter->rx_buff_pool[pool].dma_addr[index];
 +
 +      lpar_rc = h_add_logical_lan_buffer(adapter->vdev->unit_address, desc.desc);
 +
 +      if (lpar_rc != H_SUCCESS) {
 +              netdev_dbg(adapter->netdev, "h_add_logical_lan_buffer failed "
 +                         "during recycle rc=%ld", lpar_rc);
 +              ibmveth_remove_buffer_from_pool(adapter, adapter->rx_queue.queue_addr[adapter->rx_queue.index].correlator);
++              ret = 0;
 +      }
 +
 +      if (++adapter->rx_queue.index == adapter->rx_queue.num_slots) {
 +              adapter->rx_queue.index = 0;
 +              adapter->rx_queue.toggle = !adapter->rx_queue.toggle;
 +      }
++
++out:
++      return ret;
 +}
 +
 +static void ibmveth_rxq_harvest_buffer(struct ibmveth_adapter *adapter)
 +{
 +      ibmveth_remove_buffer_from_pool(adapter, adapter->rx_queue.queue_addr[adapter->rx_queue.index].correlator);
 +
 +      if (++adapter->rx_queue.index == adapter->rx_queue.num_slots) {
 +              adapter->rx_queue.index = 0;
 +              adapter->rx_queue.toggle = !adapter->rx_queue.toggle;
 +      }
 +}
 +
 +static void ibmveth_cleanup(struct ibmveth_adapter *adapter)
 +{
 +      int i;
 +      struct device *dev = &adapter->vdev->dev;
 +
 +      if (adapter->buffer_list_addr != NULL) {
 +              if (!dma_mapping_error(dev, adapter->buffer_list_dma)) {
 +                      dma_unmap_single(dev, adapter->buffer_list_dma, 4096,
 +                                      DMA_BIDIRECTIONAL);
 +                      adapter->buffer_list_dma = DMA_ERROR_CODE;
 +              }
 +              free_page((unsigned long)adapter->buffer_list_addr);
 +              adapter->buffer_list_addr = NULL;
 +      }
 +
 +      if (adapter->filter_list_addr != NULL) {
 +              if (!dma_mapping_error(dev, adapter->filter_list_dma)) {
 +                      dma_unmap_single(dev, adapter->filter_list_dma, 4096,
 +                                      DMA_BIDIRECTIONAL);
 +                      adapter->filter_list_dma = DMA_ERROR_CODE;
 +              }
 +              free_page((unsigned long)adapter->filter_list_addr);
 +              adapter->filter_list_addr = NULL;
 +      }
 +
 +      if (adapter->rx_queue.queue_addr != NULL) {
 +              if (!dma_mapping_error(dev, adapter->rx_queue.queue_dma)) {
 +                      dma_unmap_single(dev,
 +                                      adapter->rx_queue.queue_dma,
 +                                      adapter->rx_queue.queue_len,
 +                                      DMA_BIDIRECTIONAL);
 +                      adapter->rx_queue.queue_dma = DMA_ERROR_CODE;
 +              }
 +              kfree(adapter->rx_queue.queue_addr);
 +              adapter->rx_queue.queue_addr = NULL;
 +      }
 +
 +      for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++)
 +              if (adapter->rx_buff_pool[i].active)
 +                      ibmveth_free_buffer_pool(adapter,
 +                                               &adapter->rx_buff_pool[i]);
 +
 +      if (adapter->bounce_buffer != NULL) {
 +              if (!dma_mapping_error(dev, adapter->bounce_buffer_dma)) {
 +                      dma_unmap_single(&adapter->vdev->dev,
 +                                      adapter->bounce_buffer_dma,
 +                                      adapter->netdev->mtu + IBMVETH_BUFF_OH,
 +                                      DMA_BIDIRECTIONAL);
 +                      adapter->bounce_buffer_dma = DMA_ERROR_CODE;
 +              }
 +              kfree(adapter->bounce_buffer);
 +              adapter->bounce_buffer = NULL;
 +      }
 +}
 +
 +static int ibmveth_register_logical_lan(struct ibmveth_adapter *adapter,
 +        union ibmveth_buf_desc rxq_desc, u64 mac_address)
 +{
 +      int rc, try_again = 1;
 +
 +      /*
 +       * After a kexec the adapter will still be open, so our attempt to
 +       * open it will fail. So if we get a failure we free the adapter and
 +       * try again, but only once.
 +       */
 +retry:
 +      rc = h_register_logical_lan(adapter->vdev->unit_address,
 +                                  adapter->buffer_list_dma, rxq_desc.desc,
 +                                  adapter->filter_list_dma, mac_address);
 +
 +      if (rc != H_SUCCESS && try_again) {
 +              do {
 +                      rc = h_free_logical_lan(adapter->vdev->unit_address);
 +              } while (H_IS_LONG_BUSY(rc) || (rc == H_BUSY));
 +
 +              try_again = 0;
 +              goto retry;
 +      }
 +
 +      return rc;
 +}
 +
 +static int ibmveth_open(struct net_device *netdev)
 +{
 +      struct ibmveth_adapter *adapter = netdev_priv(netdev);
 +      u64 mac_address = 0;
 +      int rxq_entries = 1;
 +      unsigned long lpar_rc;
 +      int rc;
 +      union ibmveth_buf_desc rxq_desc;
 +      int i;
 +      struct device *dev;
 +
 +      netdev_dbg(netdev, "open starting\n");
 +
 +      napi_enable(&adapter->napi);
 +
 +      for(i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++)
 +              rxq_entries += adapter->rx_buff_pool[i].size;
 +
 +      adapter->buffer_list_addr = (void*) get_zeroed_page(GFP_KERNEL);
 +      adapter->filter_list_addr = (void*) get_zeroed_page(GFP_KERNEL);
 +
 +      if (!adapter->buffer_list_addr || !adapter->filter_list_addr) {
 +              netdev_err(netdev, "unable to allocate filter or buffer list "
 +                         "pages\n");
 +              rc = -ENOMEM;
 +              goto err_out;
 +      }
 +
 +      adapter->rx_queue.queue_len = sizeof(struct ibmveth_rx_q_entry) *
 +                                              rxq_entries;
 +      adapter->rx_queue.queue_addr = kmalloc(adapter->rx_queue.queue_len,
 +                                              GFP_KERNEL);
 +
 +      if (!adapter->rx_queue.queue_addr) {
 +              netdev_err(netdev, "unable to allocate rx queue pages\n");
 +              rc = -ENOMEM;
 +              goto err_out;
 +      }
 +
 +      dev = &adapter->vdev->dev;
 +
 +      adapter->buffer_list_dma = dma_map_single(dev,
 +                      adapter->buffer_list_addr, 4096, DMA_BIDIRECTIONAL);
 +      adapter->filter_list_dma = dma_map_single(dev,
 +                      adapter->filter_list_addr, 4096, DMA_BIDIRECTIONAL);
 +      adapter->rx_queue.queue_dma = dma_map_single(dev,
 +                      adapter->rx_queue.queue_addr,
 +                      adapter->rx_queue.queue_len, DMA_BIDIRECTIONAL);
 +
 +      if ((dma_mapping_error(dev, adapter->buffer_list_dma)) ||
 +          (dma_mapping_error(dev, adapter->filter_list_dma)) ||
 +          (dma_mapping_error(dev, adapter->rx_queue.queue_dma))) {
 +              netdev_err(netdev, "unable to map filter or buffer list "
 +                         "pages\n");
 +              rc = -ENOMEM;
 +              goto err_out;
 +      }
 +
 +      adapter->rx_queue.index = 0;
 +      adapter->rx_queue.num_slots = rxq_entries;
 +      adapter->rx_queue.toggle = 1;
 +
 +      memcpy(&mac_address, netdev->dev_addr, netdev->addr_len);
 +      mac_address = mac_address >> 16;
 +
 +      rxq_desc.fields.flags_len = IBMVETH_BUF_VALID |
 +                                      adapter->rx_queue.queue_len;
 +      rxq_desc.fields.address = adapter->rx_queue.queue_dma;
 +
 +      netdev_dbg(netdev, "buffer list @ 0x%p\n", adapter->buffer_list_addr);
 +      netdev_dbg(netdev, "filter list @ 0x%p\n", adapter->filter_list_addr);
 +      netdev_dbg(netdev, "receive q   @ 0x%p\n", adapter->rx_queue.queue_addr);
 +
 +      h_vio_signal(adapter->vdev->unit_address, VIO_IRQ_DISABLE);
 +
 +      lpar_rc = ibmveth_register_logical_lan(adapter, rxq_desc, mac_address);
 +
 +      if (lpar_rc != H_SUCCESS) {
 +              netdev_err(netdev, "h_register_logical_lan failed with %ld\n",
 +                         lpar_rc);
 +              netdev_err(netdev, "buffer TCE:0x%llx filter TCE:0x%llx rxq "
 +                         "desc:0x%llx MAC:0x%llx\n",
 +                                   adapter->buffer_list_dma,
 +                                   adapter->filter_list_dma,
 +                                   rxq_desc.desc,
 +                                   mac_address);
 +              rc = -ENONET;
 +              goto err_out;
 +      }
 +
 +      for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) {
 +              if (!adapter->rx_buff_pool[i].active)
 +                      continue;
 +              if (ibmveth_alloc_buffer_pool(&adapter->rx_buff_pool[i])) {
 +                      netdev_err(netdev, "unable to alloc pool\n");
 +                      adapter->rx_buff_pool[i].active = 0;
 +                      rc = -ENOMEM;
 +                      goto err_out;
 +              }
 +      }
 +
 +      netdev_dbg(netdev, "registering irq 0x%x\n", netdev->irq);
 +      rc = request_irq(netdev->irq, ibmveth_interrupt, 0, netdev->name,
 +                       netdev);
 +      if (rc != 0) {
 +              netdev_err(netdev, "unable to request irq 0x%x, rc %d\n",
 +                         netdev->irq, rc);
 +              do {
 +                      rc = h_free_logical_lan(adapter->vdev->unit_address);
 +              } while (H_IS_LONG_BUSY(rc) || (rc == H_BUSY));
 +
 +              goto err_out;
 +      }
 +
 +      adapter->bounce_buffer =
 +          kmalloc(netdev->mtu + IBMVETH_BUFF_OH, GFP_KERNEL);
 +      if (!adapter->bounce_buffer) {
 +              netdev_err(netdev, "unable to allocate bounce buffer\n");
 +              rc = -ENOMEM;
 +              goto err_out_free_irq;
 +      }
 +      adapter->bounce_buffer_dma =
 +          dma_map_single(&adapter->vdev->dev, adapter->bounce_buffer,
 +                         netdev->mtu + IBMVETH_BUFF_OH, DMA_BIDIRECTIONAL);
 +      if (dma_mapping_error(dev, adapter->bounce_buffer_dma)) {
 +              netdev_err(netdev, "unable to map bounce buffer\n");
 +              rc = -ENOMEM;
 +              goto err_out_free_irq;
 +      }
 +
 +      netdev_dbg(netdev, "initial replenish cycle\n");
 +      ibmveth_interrupt(netdev->irq, netdev);
 +
 +      netif_start_queue(netdev);
 +
 +      netdev_dbg(netdev, "open complete\n");
 +
 +      return 0;
 +
 +err_out_free_irq:
 +      free_irq(netdev->irq, netdev);
 +err_out:
 +      ibmveth_cleanup(adapter);
 +      napi_disable(&adapter->napi);
 +      return rc;
 +}
 +
 +static int ibmveth_close(struct net_device *netdev)
 +{
 +      struct ibmveth_adapter *adapter = netdev_priv(netdev);
 +      long lpar_rc;
 +
 +      netdev_dbg(netdev, "close starting\n");
 +
 +      napi_disable(&adapter->napi);
 +
 +      if (!adapter->pool_config)
 +              netif_stop_queue(netdev);
 +
 +      h_vio_signal(adapter->vdev->unit_address, VIO_IRQ_DISABLE);
 +
 +      do {
 +              lpar_rc = h_free_logical_lan(adapter->vdev->unit_address);
 +      } while (H_IS_LONG_BUSY(lpar_rc) || (lpar_rc == H_BUSY));
 +
 +      if (lpar_rc != H_SUCCESS) {
 +              netdev_err(netdev, "h_free_logical_lan failed with %lx, "
 +                         "continuing with close\n", lpar_rc);
 +      }
 +
 +      free_irq(netdev->irq, netdev);
 +
 +      adapter->rx_no_buffer = *(u64 *)(((char *)adapter->buffer_list_addr) +
 +                                              4096 - 8);
 +
 +      ibmveth_cleanup(adapter);
 +
 +      netdev_dbg(netdev, "close complete\n");
 +
 +      return 0;
 +}
 +
 +static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
 +{
 +      cmd->supported = (SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg |
 +                              SUPPORTED_FIBRE);
 +      cmd->advertising = (ADVERTISED_1000baseT_Full | ADVERTISED_Autoneg |
 +                              ADVERTISED_FIBRE);
 +      ethtool_cmd_speed_set(cmd, SPEED_1000);
 +      cmd->duplex = DUPLEX_FULL;
 +      cmd->port = PORT_FIBRE;
 +      cmd->phy_address = 0;
 +      cmd->transceiver = XCVR_INTERNAL;
 +      cmd->autoneg = AUTONEG_ENABLE;
 +      cmd->maxtxpkt = 0;
 +      cmd->maxrxpkt = 1;
 +      return 0;
 +}
 +
 +static void netdev_get_drvinfo(struct net_device *dev,
 +                             struct ethtool_drvinfo *info)
 +{
 +      strncpy(info->driver, ibmveth_driver_name, sizeof(info->driver) - 1);
 +      strncpy(info->version, ibmveth_driver_version,
 +              sizeof(info->version) - 1);
 +}
 +
 +static u32 ibmveth_fix_features(struct net_device *dev, u32 features)
 +{
 +      /*
 +       * Since the ibmveth firmware interface does not have the
 +       * concept of separate tx/rx checksum offload enable, if rx
 +       * checksum is disabled we also have to disable tx checksum
 +       * offload. Once we disable rx checksum offload, we are no
 +       * longer allowed to send tx buffers that are not properly
 +       * checksummed.
 +       */
 +
 +      if (!(features & NETIF_F_RXCSUM))
 +              features &= ~NETIF_F_ALL_CSUM;
 +
 +      return features;
 +}
 +
 +static int ibmveth_set_csum_offload(struct net_device *dev, u32 data)
 +{
 +      struct ibmveth_adapter *adapter = netdev_priv(dev);
 +      unsigned long set_attr, clr_attr, ret_attr;
 +      unsigned long set_attr6, clr_attr6;
-               ret = h_illan_attributes(adapter->vdev->unit_address, clr_attr,
++      long ret, ret4, ret6;
 +      int rc1 = 0, rc2 = 0;
 +      int restart = 0;
 +
 +      if (netif_running(dev)) {
 +              restart = 1;
 +              adapter->pool_config = 1;
 +              ibmveth_close(dev);
 +              adapter->pool_config = 0;
 +      }
 +
 +      set_attr = 0;
 +      clr_attr = 0;
++      set_attr6 = 0;
++      clr_attr6 = 0;
 +
 +      if (data) {
 +              set_attr = IBMVETH_ILLAN_IPV4_TCP_CSUM;
 +              set_attr6 = IBMVETH_ILLAN_IPV6_TCP_CSUM;
 +      } else {
 +              clr_attr = IBMVETH_ILLAN_IPV4_TCP_CSUM;
 +              clr_attr6 = IBMVETH_ILLAN_IPV6_TCP_CSUM;
 +      }
 +
 +      ret = h_illan_attributes(adapter->vdev->unit_address, 0, 0, &ret_attr);
 +
 +      if (ret == H_SUCCESS && !(ret_attr & IBMVETH_ILLAN_ACTIVE_TRUNK) &&
 +          !(ret_attr & IBMVETH_ILLAN_TRUNK_PRI_MASK) &&
 +          (ret_attr & IBMVETH_ILLAN_PADDED_PKT_CSUM)) {
-               if (ret != H_SUCCESS) {
++              ret4 = h_illan_attributes(adapter->vdev->unit_address, clr_attr,
 +                                       set_attr, &ret_attr);
 +
-                                       data, ret);
++              if (ret4 != H_SUCCESS) {
 +                      netdev_err(dev, "unable to change IPv4 checksum "
 +                                      "offload settings. %d rc=%ld\n",
-                       ret = h_illan_attributes(adapter->vdev->unit_address,
-                                                set_attr, clr_attr, &ret_attr);
++                                      data, ret4);
++
++                      h_illan_attributes(adapter->vdev->unit_address,
++                                         set_attr, clr_attr, &ret_attr);
++
++                      if (data == 1)
++                              dev->features &= ~NETIF_F_IP_CSUM;
 +
-                                       data, ret);
 +              } else {
 +                      adapter->fw_ipv4_csum_support = data;
 +              }
 +
 +              ret6 = h_illan_attributes(adapter->vdev->unit_address,
 +                                       clr_attr6, set_attr6, &ret_attr);
 +
 +              if (ret6 != H_SUCCESS) {
 +                      netdev_err(dev, "unable to change IPv6 checksum "
 +                                      "offload settings. %d rc=%ld\n",
-                       ret = h_illan_attributes(adapter->vdev->unit_address,
-                                                set_attr6, clr_attr6,
-                                                &ret_attr);
++                                      data, ret6);
++
++                      h_illan_attributes(adapter->vdev->unit_address,
++                                         set_attr6, clr_attr6, &ret_attr);
++
++                      if (data == 1)
++                              dev->features &= ~NETIF_F_IPV6_CSUM;
 +
-               if (ret != H_SUCCESS || ret6 != H_SUCCESS)
 +              } else
 +                      adapter->fw_ipv6_csum_support = data;
 +
-       descs[0].fields.address = dma_map_single(&adapter->vdev->dev, skb->data,
-                                                skb_headlen(skb),
-                                                DMA_TO_DEVICE);
-       if (dma_mapping_error(&adapter->vdev->dev, descs[0].fields.address))
++              if (ret4 == H_SUCCESS || ret6 == H_SUCCESS)
 +                      adapter->rx_csum = data;
 +              else
 +                      rc1 = -EIO;
 +      } else {
 +              rc1 = -EIO;
 +              netdev_err(dev, "unable to change checksum offload settings."
 +                                   " %d rc=%ld ret_attr=%lx\n", data, ret,
 +                                   ret_attr);
 +      }
 +
 +      if (restart)
 +              rc2 = ibmveth_open(dev);
 +
 +      return rc1 ? rc1 : rc2;
 +}
 +
 +static int ibmveth_set_features(struct net_device *dev, u32 features)
 +{
 +      struct ibmveth_adapter *adapter = netdev_priv(dev);
 +      int rx_csum = !!(features & NETIF_F_RXCSUM);
 +      int rc;
 +
 +      if (rx_csum == adapter->rx_csum)
 +              return 0;
 +
 +      rc = ibmveth_set_csum_offload(dev, rx_csum);
 +      if (rc && !adapter->rx_csum)
 +              dev->features = features & ~(NETIF_F_ALL_CSUM | NETIF_F_RXCSUM);
 +
 +      return rc;
 +}
 +
 +static void ibmveth_get_strings(struct net_device *dev, u32 stringset, u8 *data)
 +{
 +      int i;
 +
 +      if (stringset != ETH_SS_STATS)
 +              return;
 +
 +      for (i = 0; i < ARRAY_SIZE(ibmveth_stats); i++, data += ETH_GSTRING_LEN)
 +              memcpy(data, ibmveth_stats[i].name, ETH_GSTRING_LEN);
 +}
 +
 +static int ibmveth_get_sset_count(struct net_device *dev, int sset)
 +{
 +      switch (sset) {
 +      case ETH_SS_STATS:
 +              return ARRAY_SIZE(ibmveth_stats);
 +      default:
 +              return -EOPNOTSUPP;
 +      }
 +}
 +
 +static void ibmveth_get_ethtool_stats(struct net_device *dev,
 +                                    struct ethtool_stats *stats, u64 *data)
 +{
 +      int i;
 +      struct ibmveth_adapter *adapter = netdev_priv(dev);
 +
 +      for (i = 0; i < ARRAY_SIZE(ibmveth_stats); i++)
 +              data[i] = IBMVETH_GET_STAT(adapter, ibmveth_stats[i].offset);
 +}
 +
 +static const struct ethtool_ops netdev_ethtool_ops = {
 +      .get_drvinfo            = netdev_get_drvinfo,
 +      .get_settings           = netdev_get_settings,
 +      .get_link               = ethtool_op_get_link,
 +      .get_strings            = ibmveth_get_strings,
 +      .get_sset_count         = ibmveth_get_sset_count,
 +      .get_ethtool_stats      = ibmveth_get_ethtool_stats,
 +};
 +
 +static int ibmveth_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
 +{
 +      return -EOPNOTSUPP;
 +}
 +
 +#define page_offset(v) ((unsigned long)(v) & ((1 << 12) - 1))
 +
 +static int ibmveth_send(struct ibmveth_adapter *adapter,
 +                      union ibmveth_buf_desc *descs)
 +{
 +      unsigned long correlator;
 +      unsigned int retry_count;
 +      unsigned long ret;
 +
 +      /*
 +       * The retry count sets a maximum for the number of broadcast and
 +       * multicast destinations within the system.
 +       */
 +      retry_count = 1024;
 +      correlator = 0;
 +      do {
 +              ret = h_send_logical_lan(adapter->vdev->unit_address,
 +                                           descs[0].desc, descs[1].desc,
 +                                           descs[2].desc, descs[3].desc,
 +                                           descs[4].desc, descs[5].desc,
 +                                           correlator, &correlator);
 +      } while ((ret == H_BUSY) && (retry_count--));
 +
 +      if (ret != H_SUCCESS && ret != H_DROPPED) {
 +              netdev_err(adapter->netdev, "tx: h_send_logical_lan failed "
 +                         "with rc=%ld\n", ret);
 +              return 1;
 +      }
 +
 +      return 0;
 +}
 +
 +static netdev_tx_t ibmveth_start_xmit(struct sk_buff *skb,
 +                                    struct net_device *netdev)
 +{
 +      struct ibmveth_adapter *adapter = netdev_priv(netdev);
 +      unsigned int desc_flags;
 +      union ibmveth_buf_desc descs[6];
 +      int last, i;
 +      int force_bounce = 0;
++      dma_addr_t dma_addr;
 +
 +      /*
 +       * veth handles a maximum of 6 segments including the header, so
 +       * we have to linearize the skb if there are more than this.
 +       */
 +      if (skb_shinfo(skb)->nr_frags > 5 && __skb_linearize(skb)) {
 +              netdev->stats.tx_dropped++;
 +              goto out;
 +      }
 +
 +      /* veth can't checksum offload UDP */
 +      if (skb->ip_summed == CHECKSUM_PARTIAL &&
 +          ((skb->protocol == htons(ETH_P_IP) &&
 +            ip_hdr(skb)->protocol != IPPROTO_TCP) ||
 +           (skb->protocol == htons(ETH_P_IPV6) &&
 +            ipv6_hdr(skb)->nexthdr != IPPROTO_TCP)) &&
 +          skb_checksum_help(skb)) {
 +
 +              netdev_err(netdev, "tx: failed to checksum packet\n");
 +              netdev->stats.tx_dropped++;
 +              goto out;
 +      }
 +
 +      desc_flags = IBMVETH_BUF_VALID;
 +
 +      if (skb->ip_summed == CHECKSUM_PARTIAL) {
 +              unsigned char *buf = skb_transport_header(skb) +
 +                                              skb->csum_offset;
 +
 +              desc_flags |= (IBMVETH_BUF_NO_CSUM | IBMVETH_BUF_CSUM_GOOD);
 +
 +              /* Need to zero out the checksum */
 +              buf[0] = 0;
 +              buf[1] = 0;
 +      }
 +
 +retry_bounce:
 +      memset(descs, 0, sizeof(descs));
 +
 +      /*
 +       * If a linear packet is below the rx threshold then
 +       * copy it into the static bounce buffer. This avoids the
 +       * cost of a TCE insert and remove.
 +       */
 +      if (force_bounce || (!skb_is_nonlinear(skb) &&
 +                              (skb->len < tx_copybreak))) {
 +              skb_copy_from_linear_data(skb, adapter->bounce_buffer,
 +                                        skb->len);
 +
 +              descs[0].fields.flags_len = desc_flags | skb->len;
 +              descs[0].fields.address = adapter->bounce_buffer_dma;
 +
 +              if (ibmveth_send(adapter, descs)) {
 +                      adapter->tx_send_failed++;
 +                      netdev->stats.tx_dropped++;
 +              } else {
 +                      netdev->stats.tx_packets++;
 +                      netdev->stats.tx_bytes += skb->len;
 +              }
 +
 +              goto out;
 +      }
 +
 +      /* Map the header */
-               unsigned long dma_addr;
++      dma_addr = dma_map_single(&adapter->vdev->dev, skb->data,
++                                skb_headlen(skb), DMA_TO_DEVICE);
++      if (dma_mapping_error(&adapter->vdev->dev, dma_addr))
 +              goto map_failed;
 +
 +      descs[0].fields.flags_len = desc_flags | skb_headlen(skb);
++      descs[0].fields.address = dma_addr;
 +
 +      /* Map the frags */
 +      for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
-       for (i = 0; i < skb_shinfo(skb)->nr_frags + 1; i++)
 +              skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
 +
 +              dma_addr = skb_frag_dma_map(&adapter->vdev->dev, frag, 0,
 +                                          frag->size, DMA_TO_DEVICE);
 +
 +              if (dma_mapping_error(&adapter->vdev->dev, dma_addr))
 +                      goto map_failed_frags;
 +
 +              descs[i+1].fields.flags_len = desc_flags | frag->size;
 +              descs[i+1].fields.address = dma_addr;
 +      }
 +
 +      if (ibmveth_send(adapter, descs)) {
 +              adapter->tx_send_failed++;
 +              netdev->stats.tx_dropped++;
 +      } else {
 +              netdev->stats.tx_packets++;
 +              netdev->stats.tx_bytes += skb->len;
 +      }
 +
-                               ibmveth_rxq_recycle_buffer(adapter);
++      dma_unmap_single(&adapter->vdev->dev,
++                       descs[0].fields.address,
++                       descs[0].fields.flags_len & IBMVETH_BUF_LEN_MASK,
++                       DMA_TO_DEVICE);
++
++      for (i = 1; i < skb_shinfo(skb)->nr_frags + 1; i++)
 +              dma_unmap_page(&adapter->vdev->dev, descs[i].fields.address,
 +                             descs[i].fields.flags_len & IBMVETH_BUF_LEN_MASK,
 +                             DMA_TO_DEVICE);
 +
 +out:
 +      dev_kfree_skb(skb);
 +      return NETDEV_TX_OK;
 +
 +map_failed_frags:
 +      last = i+1;
 +      for (i = 0; i < last; i++)
 +              dma_unmap_page(&adapter->vdev->dev, descs[i].fields.address,
 +                             descs[i].fields.flags_len & IBMVETH_BUF_LEN_MASK,
 +                             DMA_TO_DEVICE);
 +
 +map_failed:
 +      if (!firmware_has_feature(FW_FEATURE_CMO))
 +              netdev_err(netdev, "tx: unable to map xmit buffer\n");
 +      adapter->tx_map_failed++;
 +      skb_linearize(skb);
 +      force_bounce = 1;
 +      goto retry_bounce;
 +}
 +
 +static int ibmveth_poll(struct napi_struct *napi, int budget)
 +{
 +      struct ibmveth_adapter *adapter =
 +                      container_of(napi, struct ibmveth_adapter, napi);
 +      struct net_device *netdev = adapter->netdev;
 +      int frames_processed = 0;
 +      unsigned long lpar_rc;
 +
 +restart_poll:
 +      do {
 +              if (!ibmveth_rxq_pending_buffer(adapter))
 +                      break;
 +
 +              smp_rmb();
 +              if (!ibmveth_rxq_buffer_valid(adapter)) {
 +                      wmb(); /* suggested by larson1 */
 +                      adapter->rx_invalid_buffer++;
 +                      netdev_dbg(netdev, "recycling invalid buffer\n");
 +                      ibmveth_rxq_recycle_buffer(adapter);
 +              } else {
 +                      struct sk_buff *skb, *new_skb;
 +                      int length = ibmveth_rxq_frame_length(adapter);
 +                      int offset = ibmveth_rxq_frame_offset(adapter);
 +                      int csum_good = ibmveth_rxq_csum_good(adapter);
 +
 +                      skb = ibmveth_rxq_get_buffer(adapter);
 +
 +                      new_skb = NULL;
 +                      if (length < rx_copybreak)
 +                              new_skb = netdev_alloc_skb(netdev, length);
 +
 +                      if (new_skb) {
 +                              skb_copy_to_linear_data(new_skb,
 +                                                      skb->data + offset,
 +                                                      length);
 +                              if (rx_flush)
 +                                      ibmveth_flush_buffer(skb->data,
 +                                              length + offset);
++                              if (!ibmveth_rxq_recycle_buffer(adapter))
++                                      kfree_skb(skb);
 +                              skb = new_skb;
 +                      } else {
 +                              ibmveth_rxq_harvest_buffer(adapter);
 +                              skb_reserve(skb, offset);
 +                      }
 +
 +                      skb_put(skb, length);
 +                      skb->protocol = eth_type_trans(skb, netdev);
 +
 +                      if (csum_good)
 +                              skb->ip_summed = CHECKSUM_UNNECESSARY;
 +
 +                      netif_receive_skb(skb); /* send it up */
 +
 +                      netdev->stats.rx_packets++;
 +                      netdev->stats.rx_bytes += length;
 +                      frames_processed++;
 +              }
 +      } while (frames_processed < budget);
 +
 +      ibmveth_replenish_task(adapter);
 +
 +      if (frames_processed < budget) {
 +              /* We think we are done - reenable interrupts,
 +               * then check once more to make sure we are done.
 +               */
 +              lpar_rc = h_vio_signal(adapter->vdev->unit_address,
 +                                     VIO_IRQ_ENABLE);
 +
 +              BUG_ON(lpar_rc != H_SUCCESS);
 +
 +              napi_complete(napi);
 +
 +              if (ibmveth_rxq_pending_buffer(adapter) &&
 +                  napi_reschedule(napi)) {
 +                      lpar_rc = h_vio_signal(adapter->vdev->unit_address,
 +                                             VIO_IRQ_DISABLE);
 +                      goto restart_poll;
 +              }
 +      }
 +
 +      return frames_processed;
 +}
 +
 +static irqreturn_t ibmveth_interrupt(int irq, void *dev_instance)
 +{
 +      struct net_device *netdev = dev_instance;
 +      struct ibmveth_adapter *adapter = netdev_priv(netdev);
 +      unsigned long lpar_rc;
 +
 +      if (napi_schedule_prep(&adapter->napi)) {
 +              lpar_rc = h_vio_signal(adapter->vdev->unit_address,
 +                                     VIO_IRQ_DISABLE);
 +              BUG_ON(lpar_rc != H_SUCCESS);
 +              __napi_schedule(&adapter->napi);
 +      }
 +      return IRQ_HANDLED;
 +}
 +
 +static void ibmveth_set_multicast_list(struct net_device *netdev)
 +{
 +      struct ibmveth_adapter *adapter = netdev_priv(netdev);
 +      unsigned long lpar_rc;
 +
 +      if ((netdev->flags & IFF_PROMISC) ||
 +          (netdev_mc_count(netdev) > adapter->mcastFilterSize)) {
 +              lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address,
 +                                         IbmVethMcastEnableRecv |
 +                                         IbmVethMcastDisableFiltering,
 +                                         0);
 +              if (lpar_rc != H_SUCCESS) {
 +                      netdev_err(netdev, "h_multicast_ctrl rc=%ld when "
 +                                 "entering promisc mode\n", lpar_rc);
 +              }
 +      } else {
 +              struct netdev_hw_addr *ha;
 +              /* clear the filter table & disable filtering */
 +              lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address,
 +                                         IbmVethMcastEnableRecv |
 +                                         IbmVethMcastDisableFiltering |
 +                                         IbmVethMcastClearFilterTable,
 +                                         0);
 +              if (lpar_rc != H_SUCCESS) {
 +                      netdev_err(netdev, "h_multicast_ctrl rc=%ld when "
 +                                 "attempting to clear filter table\n",
 +                                 lpar_rc);
 +              }
 +              /* add the addresses to the filter table */
 +              netdev_for_each_mc_addr(ha, netdev) {
 +                      /* add the multicast address to the filter table */
 +                      unsigned long mcast_addr = 0;
 +                      memcpy(((char *)&mcast_addr)+2, ha->addr, 6);
 +                      lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address,
 +                                                 IbmVethMcastAddFilter,
 +                                                 mcast_addr);
 +                      if (lpar_rc != H_SUCCESS) {
 +                              netdev_err(netdev, "h_multicast_ctrl rc=%ld "
 +                                         "when adding an entry to the filter "
 +                                         "table\n", lpar_rc);
 +                      }
 +              }
 +
 +              /* re-enable filtering */
 +              lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address,
 +                                         IbmVethMcastEnableFiltering,
 +                                         0);
 +              if (lpar_rc != H_SUCCESS) {
 +                      netdev_err(netdev, "h_multicast_ctrl rc=%ld when "
 +                                 "enabling filtering\n", lpar_rc);
 +              }
 +      }
 +}
 +
 +static int ibmveth_change_mtu(struct net_device *dev, int new_mtu)
 +{
 +      struct ibmveth_adapter *adapter = netdev_priv(dev);
 +      struct vio_dev *viodev = adapter->vdev;
 +      int new_mtu_oh = new_mtu + IBMVETH_BUFF_OH;
 +      int i, rc;
 +      int need_restart = 0;
 +
 +      if (new_mtu < IBMVETH_MIN_MTU)
 +              return -EINVAL;
 +
 +      for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++)
 +              if (new_mtu_oh < adapter->rx_buff_pool[i].buff_size)
 +                      break;
 +
 +      if (i == IBMVETH_NUM_BUFF_POOLS)
 +              return -EINVAL;
 +
 +      /* Deactivate all the buffer pools so that the next loop can activate
 +         only the buffer pools necessary to hold the new MTU */
 +      if (netif_running(adapter->netdev)) {
 +              need_restart = 1;
 +              adapter->pool_config = 1;
 +              ibmveth_close(adapter->netdev);
 +              adapter->pool_config = 0;
 +      }
 +
 +      /* Look for an active buffer pool that can hold the new MTU */
 +      for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) {
 +              adapter->rx_buff_pool[i].active = 1;
 +
 +              if (new_mtu_oh < adapter->rx_buff_pool[i].buff_size) {
 +                      dev->mtu = new_mtu;
 +                      vio_cmo_set_dev_desired(viodev,
 +                                              ibmveth_get_desired_dma
 +                                              (viodev));
 +                      if (need_restart) {
 +                              return ibmveth_open(adapter->netdev);
 +                      }
 +                      return 0;
 +              }
 +      }
 +
 +      if (need_restart && (rc = ibmveth_open(adapter->netdev)))
 +              return rc;
 +
 +      return -EINVAL;
 +}
 +
 +#ifdef CONFIG_NET_POLL_CONTROLLER
 +static void ibmveth_poll_controller(struct net_device *dev)
 +{
 +      ibmveth_replenish_task(netdev_priv(dev));
 +      ibmveth_interrupt(dev->irq, dev);
 +}
 +#endif
 +
 +/**
 + * ibmveth_get_desired_dma - Calculate IO memory desired by the driver
 + *
 + * @vdev: struct vio_dev for the device whose desired IO mem is to be returned
 + *
 + * Return value:
 + *    Number of bytes of IO data the driver will need to perform well.
 + */
 +static unsigned long ibmveth_get_desired_dma(struct vio_dev *vdev)
 +{
 +      struct net_device *netdev = dev_get_drvdata(&vdev->dev);
 +      struct ibmveth_adapter *adapter;
 +      unsigned long ret;
 +      int i;
 +      int rxqentries = 1;
 +
 +      /* netdev inits at probe time along with the structures we need below*/
 +      if (netdev == NULL)
 +              return IOMMU_PAGE_ALIGN(IBMVETH_IO_ENTITLEMENT_DEFAULT);
 +
 +      adapter = netdev_priv(netdev);
 +
 +      ret = IBMVETH_BUFF_LIST_SIZE + IBMVETH_FILT_LIST_SIZE;
 +      ret += IOMMU_PAGE_ALIGN(netdev->mtu);
 +
 +      for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) {
 +              /* add the size of the active receive buffers */
 +              if (adapter->rx_buff_pool[i].active)
 +                      ret +=
 +                          adapter->rx_buff_pool[i].size *
 +                          IOMMU_PAGE_ALIGN(adapter->rx_buff_pool[i].
 +                                  buff_size);
 +              rxqentries += adapter->rx_buff_pool[i].size;
 +      }
 +      /* add the size of the receive queue entries */
 +      ret += IOMMU_PAGE_ALIGN(rxqentries * sizeof(struct ibmveth_rx_q_entry));
 +
 +      return ret;
 +}
 +
 +static const struct net_device_ops ibmveth_netdev_ops = {
 +      .ndo_open               = ibmveth_open,
 +      .ndo_stop               = ibmveth_close,
 +      .ndo_start_xmit         = ibmveth_start_xmit,
 +      .ndo_set_rx_mode        = ibmveth_set_multicast_list,
 +      .ndo_do_ioctl           = ibmveth_ioctl,
 +      .ndo_change_mtu         = ibmveth_change_mtu,
 +      .ndo_fix_features       = ibmveth_fix_features,
 +      .ndo_set_features       = ibmveth_set_features,
 +      .ndo_validate_addr      = eth_validate_addr,
 +      .ndo_set_mac_address    = eth_mac_addr,
 +#ifdef CONFIG_NET_POLL_CONTROLLER
 +      .ndo_poll_controller    = ibmveth_poll_controller,
 +#endif
 +};
 +
 +static int __devinit ibmveth_probe(struct vio_dev *dev,
 +                                 const struct vio_device_id *id)
 +{
 +      int rc, i;
 +      struct net_device *netdev;
 +      struct ibmveth_adapter *adapter;
 +      unsigned char *mac_addr_p;
 +      unsigned int *mcastFilterSize_p;
 +
 +      dev_dbg(&dev->dev, "entering ibmveth_probe for UA 0x%x\n",
 +              dev->unit_address);
 +
 +      mac_addr_p = (unsigned char *)vio_get_attribute(dev, VETH_MAC_ADDR,
 +                                                      NULL);
 +      if (!mac_addr_p) {
 +              dev_err(&dev->dev, "Can't find VETH_MAC_ADDR attribute\n");
 +              return -EINVAL;
 +      }
 +
 +      mcastFilterSize_p = (unsigned int *)vio_get_attribute(dev,
 +                                              VETH_MCAST_FILTER_SIZE, NULL);
 +      if (!mcastFilterSize_p) {
 +              dev_err(&dev->dev, "Can't find VETH_MCAST_FILTER_SIZE "
 +                      "attribute\n");
 +              return -EINVAL;
 +      }
 +
 +      netdev = alloc_etherdev(sizeof(struct ibmveth_adapter));
 +
 +      if (!netdev)
 +              return -ENOMEM;
 +
 +      adapter = netdev_priv(netdev);
 +      dev_set_drvdata(&dev->dev, netdev);
 +
 +      adapter->vdev = dev;
 +      adapter->netdev = netdev;
 +      adapter->mcastFilterSize = *mcastFilterSize_p;
 +      adapter->pool_config = 0;
 +
 +      netif_napi_add(netdev, &adapter->napi, ibmveth_poll, 16);
 +
 +      /*
 +       * Some older boxes running PHYP non-natively have an OF that returns
 +       * a 8-byte local-mac-address field (and the first 2 bytes have to be
 +       * ignored) while newer boxes' OF return a 6-byte field. Note that
 +       * IEEE 1275 specifies that local-mac-address must be a 6-byte field.
 +       * The RPA doc specifies that the first byte must be 10b, so we'll
 +       * just look for it to solve this 8 vs. 6 byte field issue
 +       */
 +      if ((*mac_addr_p & 0x3) != 0x02)
 +              mac_addr_p += 2;
 +
 +      adapter->mac_addr = 0;
 +      memcpy(&adapter->mac_addr, mac_addr_p, 6);
 +
 +      netdev->irq = dev->irq;
 +      netdev->netdev_ops = &ibmveth_netdev_ops;
 +      netdev->ethtool_ops = &netdev_ethtool_ops;
 +      SET_NETDEV_DEV(netdev, &dev->dev);
 +      netdev->hw_features = NETIF_F_SG | NETIF_F_RXCSUM |
 +              NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
 +      netdev->features |= netdev->hw_features;
 +
 +      memcpy(netdev->dev_addr, &adapter->mac_addr, netdev->addr_len);
 +
 +      for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) {
 +              struct kobject *kobj = &adapter->rx_buff_pool[i].kobj;
 +              int error;
 +
 +              ibmveth_init_buffer_pool(&adapter->rx_buff_pool[i], i,
 +                                       pool_count[i], pool_size[i],
 +                                       pool_active[i]);
 +              error = kobject_init_and_add(kobj, &ktype_veth_pool,
 +                                           &dev->dev.kobj, "pool%d", i);
 +              if (!error)
 +                      kobject_uevent(kobj, KOBJ_ADD);
 +      }
 +
 +      netdev_dbg(netdev, "adapter @ 0x%p\n", adapter);
 +
 +      adapter->buffer_list_dma = DMA_ERROR_CODE;
 +      adapter->filter_list_dma = DMA_ERROR_CODE;
 +      adapter->rx_queue.queue_dma = DMA_ERROR_CODE;
 +
 +      netdev_dbg(netdev, "registering netdev...\n");
 +
 +      ibmveth_set_features(netdev, netdev->features);
 +
 +      rc = register_netdev(netdev);
 +
 +      if (rc) {
 +              netdev_dbg(netdev, "failed to register netdev rc=%d\n", rc);
 +              free_netdev(netdev);
 +              return rc;
 +      }
 +
 +      netdev_dbg(netdev, "registered\n");
 +
 +      return 0;
 +}
 +
 +static int __devexit ibmveth_remove(struct vio_dev *dev)
 +{
 +      struct net_device *netdev = dev_get_drvdata(&dev->dev);
 +      struct ibmveth_adapter *adapter = netdev_priv(netdev);
 +      int i;
 +
 +      for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++)
 +              kobject_put(&adapter->rx_buff_pool[i].kobj);
 +
 +      unregister_netdev(netdev);
 +
 +      free_netdev(netdev);
 +      dev_set_drvdata(&dev->dev, NULL);
 +
 +      return 0;
 +}
 +
 +static struct attribute veth_active_attr;
 +static struct attribute veth_num_attr;
 +static struct attribute veth_size_attr;
 +
 +static ssize_t veth_pool_show(struct kobject *kobj,
 +                            struct attribute *attr, char *buf)
 +{
 +      struct ibmveth_buff_pool *pool = container_of(kobj,
 +                                                    struct ibmveth_buff_pool,
 +                                                    kobj);
 +
 +      if (attr == &veth_active_attr)
 +              return sprintf(buf, "%d\n", pool->active);
 +      else if (attr == &veth_num_attr)
 +              return sprintf(buf, "%d\n", pool->size);
 +      else if (attr == &veth_size_attr)
 +              return sprintf(buf, "%d\n", pool->buff_size);
 +      return 0;
 +}
 +
 +static ssize_t veth_pool_store(struct kobject *kobj, struct attribute *attr,
 +                             const char *buf, size_t count)
 +{
 +      struct ibmveth_buff_pool *pool = container_of(kobj,
 +                                                    struct ibmveth_buff_pool,
 +                                                    kobj);
 +      struct net_device *netdev = dev_get_drvdata(
 +          container_of(kobj->parent, struct device, kobj));
 +      struct ibmveth_adapter *adapter = netdev_priv(netdev);
 +      long value = simple_strtol(buf, NULL, 10);
 +      long rc;
 +
 +      if (attr == &veth_active_attr) {
 +              if (value && !pool->active) {
 +                      if (netif_running(netdev)) {
 +                              if (ibmveth_alloc_buffer_pool(pool)) {
 +                                      netdev_err(netdev,
 +                                                 "unable to alloc pool\n");
 +                                      return -ENOMEM;
 +                              }
 +                              pool->active = 1;
 +                              adapter->pool_config = 1;
 +                              ibmveth_close(netdev);
 +                              adapter->pool_config = 0;
 +                              if ((rc = ibmveth_open(netdev)))
 +                                      return rc;
 +                      } else {
 +                              pool->active = 1;
 +                      }
 +              } else if (!value && pool->active) {
 +                      int mtu = netdev->mtu + IBMVETH_BUFF_OH;
 +                      int i;
 +                      /* Make sure there is a buffer pool with buffers that
 +                         can hold a packet of the size of the MTU */
 +                      for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) {
 +                              if (pool == &adapter->rx_buff_pool[i])
 +                                      continue;
 +                              if (!adapter->rx_buff_pool[i].active)
 +                                      continue;
 +                              if (mtu <= adapter->rx_buff_pool[i].buff_size)
 +                                      break;
 +                      }
 +
 +                      if (i == IBMVETH_NUM_BUFF_POOLS) {
 +                              netdev_err(netdev, "no active pool >= MTU\n");
 +                              return -EPERM;
 +                      }
 +
 +                      if (netif_running(netdev)) {
 +                              adapter->pool_config = 1;
 +                              ibmveth_close(netdev);
 +                              pool->active = 0;
 +                              adapter->pool_config = 0;
 +                              if ((rc = ibmveth_open(netdev)))
 +                                      return rc;
 +                      }
 +                      pool->active = 0;
 +              }
 +      } else if (attr == &veth_num_attr) {
 +              if (value <= 0 || value > IBMVETH_MAX_POOL_COUNT) {
 +                      return -EINVAL;
 +              } else {
 +                      if (netif_running(netdev)) {
 +                              adapter->pool_config = 1;
 +                              ibmveth_close(netdev);
 +                              adapter->pool_config = 0;
 +                              pool->size = value;
 +                              if ((rc = ibmveth_open(netdev)))
 +                                      return rc;
 +                      } else {
 +                              pool->size = value;
 +                      }
 +              }
 +      } else if (attr == &veth_size_attr) {
 +              if (value <= IBMVETH_BUFF_OH || value > IBMVETH_MAX_BUF_SIZE) {
 +                      return -EINVAL;
 +              } else {
 +                      if (netif_running(netdev)) {
 +                              adapter->pool_config = 1;
 +                              ibmveth_close(netdev);
 +                              adapter->pool_config = 0;
 +                              pool->buff_size = value;
 +                              if ((rc = ibmveth_open(netdev)))
 +                                      return rc;
 +                      } else {
 +                              pool->buff_size = value;
 +                      }
 +              }
 +      }
 +
 +      /* kick the interrupt handler to allocate/deallocate pools */
 +      ibmveth_interrupt(netdev->irq, netdev);
 +      return count;
 +}
 +
 +
 +#define ATTR(_name, _mode)                            \
 +      struct attribute veth_##_name##_attr = {        \
 +      .name = __stringify(_name), .mode = _mode,      \
 +      };
 +
 +static ATTR(active, 0644);
 +static ATTR(num, 0644);
 +static ATTR(size, 0644);
 +
 +static struct attribute *veth_pool_attrs[] = {
 +      &veth_active_attr,
 +      &veth_num_attr,
 +      &veth_size_attr,
 +      NULL,
 +};
 +
 +static const struct sysfs_ops veth_pool_ops = {
 +      .show   = veth_pool_show,
 +      .store  = veth_pool_store,
 +};
 +
 +static struct kobj_type ktype_veth_pool = {
 +      .release        = NULL,
 +      .sysfs_ops      = &veth_pool_ops,
 +      .default_attrs  = veth_pool_attrs,
 +};
 +
 +static int ibmveth_resume(struct device *dev)
 +{
 +      struct net_device *netdev = dev_get_drvdata(dev);
 +      ibmveth_interrupt(netdev->irq, netdev);
 +      return 0;
 +}
 +
 +static struct vio_device_id ibmveth_device_table[] __devinitdata = {
 +      { "network", "IBM,l-lan"},
 +      { "", "" }
 +};
 +MODULE_DEVICE_TABLE(vio, ibmveth_device_table);
 +
 +static struct dev_pm_ops ibmveth_pm_ops = {
 +      .resume = ibmveth_resume
 +};
 +
 +static struct vio_driver ibmveth_driver = {
 +      .id_table       = ibmveth_device_table,
 +      .probe          = ibmveth_probe,
 +      .remove         = ibmveth_remove,
 +      .get_desired_dma = ibmveth_get_desired_dma,
 +      .driver         = {
 +              .name   = ibmveth_driver_name,
 +              .owner  = THIS_MODULE,
 +              .pm = &ibmveth_pm_ops,
 +      }
 +};
 +
 +static int __init ibmveth_module_init(void)
 +{
 +      printk(KERN_DEBUG "%s: %s %s\n", ibmveth_driver_name,
 +             ibmveth_driver_string, ibmveth_driver_version);
 +
 +      return vio_register_driver(&ibmveth_driver);
 +}
 +
 +static void __exit ibmveth_module_exit(void)
 +{
 +      vio_unregister_driver(&ibmveth_driver);
 +}
 +
 +module_init(ibmveth_module_init);
 +module_exit(ibmveth_module_exit);
index 49e82de,0000000..08439ca
mode 100644,000000..100644
--- /dev/null
@@@ -1,7757 -1,0 +1,7757 @@@
-               /* if this is a skb from previous receive DMA will be 0 */
-               if (rx_buffer_info->dma) {
 +/*******************************************************************************
 +
 +  Intel 10 Gigabit PCI Express Linux driver
 +  Copyright(c) 1999 - 2011 Intel Corporation.
 +
 +  This program is free software; you can redistribute it and/or modify it
 +  under the terms and conditions of the GNU General Public License,
 +  version 2, as published by the Free Software Foundation.
 +
 +  This program is distributed in the hope it will be useful, but WITHOUT
 +  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 +  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
 +  more details.
 +
 +  You should have received a copy of the GNU General Public License along with
 +  this program; if not, write to the Free Software Foundation, Inc.,
 +  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
 +
 +  The full GNU General Public License is included in this distribution in
 +  the file called "COPYING".
 +
 +  Contact Information:
 +  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
 +  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
 +
 +*******************************************************************************/
 +
 +#include <linux/types.h>
 +#include <linux/module.h>
 +#include <linux/pci.h>
 +#include <linux/netdevice.h>
 +#include <linux/vmalloc.h>
 +#include <linux/string.h>
 +#include <linux/in.h>
 +#include <linux/interrupt.h>
 +#include <linux/ip.h>
 +#include <linux/tcp.h>
 +#include <linux/sctp.h>
 +#include <linux/pkt_sched.h>
 +#include <linux/ipv6.h>
 +#include <linux/slab.h>
 +#include <net/checksum.h>
 +#include <net/ip6_checksum.h>
 +#include <linux/ethtool.h>
 +#include <linux/if.h>
 +#include <linux/if_vlan.h>
 +#include <linux/prefetch.h>
 +#include <scsi/fc/fc_fcoe.h>
 +
 +#include "ixgbe.h"
 +#include "ixgbe_common.h"
 +#include "ixgbe_dcb_82599.h"
 +#include "ixgbe_sriov.h"
 +
 +char ixgbe_driver_name[] = "ixgbe";
 +static const char ixgbe_driver_string[] =
 +                            "Intel(R) 10 Gigabit PCI Express Network Driver";
 +#define MAJ 3
 +#define MIN 4
 +#define BUILD 8
 +#define DRV_VERSION __stringify(MAJ) "." __stringify(MIN) "." \
 +      __stringify(BUILD) "-k"
 +const char ixgbe_driver_version[] = DRV_VERSION;
 +static const char ixgbe_copyright[] =
 +                              "Copyright (c) 1999-2011 Intel Corporation.";
 +
 +static const struct ixgbe_info *ixgbe_info_tbl[] = {
 +      [board_82598] = &ixgbe_82598_info,
 +      [board_82599] = &ixgbe_82599_info,
 +      [board_X540] = &ixgbe_X540_info,
 +};
 +
 +/* ixgbe_pci_tbl - PCI Device ID Table
 + *
 + * Wildcard entries (PCI_ANY_ID) should come last
 + * Last entry must be all 0s
 + *
 + * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
 + *   Class, Class Mask, private data (not used) }
 + */
 +static DEFINE_PCI_DEVICE_TABLE(ixgbe_pci_tbl) = {
 +      {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598), board_82598 },
 +      {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AF_DUAL_PORT), board_82598 },
 +      {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AF_SINGLE_PORT), board_82598 },
 +      {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AT), board_82598 },
 +      {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AT2), board_82598 },
 +      {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_CX4), board_82598 },
 +      {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_CX4_DUAL_PORT), board_82598 },
 +      {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_DA_DUAL_PORT), board_82598 },
 +      {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM), board_82598 },
 +      {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_XF_LR), board_82598 },
 +      {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_SFP_LOM), board_82598 },
 +      {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_BX), board_82598 },
 +      {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_KX4), board_82599 },
 +      {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_XAUI_LOM), board_82599 },
 +      {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_KR), board_82599 },
 +      {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP), board_82599 },
 +      {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_EM), board_82599 },
 +      {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_KX4_MEZZ), board_82599 },
 +      {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_CX4), board_82599 },
 +      {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_BACKPLANE_FCOE), board_82599 },
 +      {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_FCOE), board_82599 },
 +      {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_T3_LOM), board_82599 },
 +      {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_COMBO_BACKPLANE), board_82599 },
 +      {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540T), board_X540 },
 +      {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_SF2), board_82599 },
 +      {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_LS), board_82599 },
 +      /* required last entry */
 +      {0, }
 +};
 +MODULE_DEVICE_TABLE(pci, ixgbe_pci_tbl);
 +
 +#ifdef CONFIG_IXGBE_DCA
 +static int ixgbe_notify_dca(struct notifier_block *, unsigned long event,
 +                          void *p);
 +static struct notifier_block dca_notifier = {
 +      .notifier_call = ixgbe_notify_dca,
 +      .next          = NULL,
 +      .priority      = 0
 +};
 +#endif
 +
 +#ifdef CONFIG_PCI_IOV
 +static unsigned int max_vfs;
 +module_param(max_vfs, uint, 0);
 +MODULE_PARM_DESC(max_vfs,
 +               "Maximum number of virtual functions to allocate per physical function");
 +#endif /* CONFIG_PCI_IOV */
 +
 +MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
 +MODULE_DESCRIPTION("Intel(R) 10 Gigabit PCI Express Network Driver");
 +MODULE_LICENSE("GPL");
 +MODULE_VERSION(DRV_VERSION);
 +
 +#define DEFAULT_DEBUG_LEVEL_SHIFT 3
 +
 +static inline void ixgbe_disable_sriov(struct ixgbe_adapter *adapter)
 +{
 +      struct ixgbe_hw *hw = &adapter->hw;
 +      u32 gcr;
 +      u32 gpie;
 +      u32 vmdctl;
 +
 +#ifdef CONFIG_PCI_IOV
 +      /* disable iov and allow time for transactions to clear */
 +      pci_disable_sriov(adapter->pdev);
 +#endif
 +
 +      /* turn off device IOV mode */
 +      gcr = IXGBE_READ_REG(hw, IXGBE_GCR_EXT);
 +      gcr &= ~(IXGBE_GCR_EXT_SRIOV);
 +      IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr);
 +      gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
 +      gpie &= ~IXGBE_GPIE_VTMODE_MASK;
 +      IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
 +
 +      /* set default pool back to 0 */
 +      vmdctl = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
 +      vmdctl &= ~IXGBE_VT_CTL_POOL_MASK;
 +      IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vmdctl);
 +      IXGBE_WRITE_FLUSH(hw);
 +
 +      /* take a breather then clean up driver data */
 +      msleep(100);
 +
 +      kfree(adapter->vfinfo);
 +      adapter->vfinfo = NULL;
 +
 +      adapter->num_vfs = 0;
 +      adapter->flags &= ~IXGBE_FLAG_SRIOV_ENABLED;
 +}
 +
 +static void ixgbe_service_event_schedule(struct ixgbe_adapter *adapter)
 +{
 +      if (!test_bit(__IXGBE_DOWN, &adapter->state) &&
 +          !test_and_set_bit(__IXGBE_SERVICE_SCHED, &adapter->state))
 +              schedule_work(&adapter->service_task);
 +}
 +
 +static void ixgbe_service_event_complete(struct ixgbe_adapter *adapter)
 +{
 +      BUG_ON(!test_bit(__IXGBE_SERVICE_SCHED, &adapter->state));
 +
 +      /* flush memory to make sure state is correct before next watchog */
 +      smp_mb__before_clear_bit();
 +      clear_bit(__IXGBE_SERVICE_SCHED, &adapter->state);
 +}
 +
 +struct ixgbe_reg_info {
 +      u32 ofs;
 +      char *name;
 +};
 +
 +static const struct ixgbe_reg_info ixgbe_reg_info_tbl[] = {
 +
 +      /* General Registers */
 +      {IXGBE_CTRL, "CTRL"},
 +      {IXGBE_STATUS, "STATUS"},
 +      {IXGBE_CTRL_EXT, "CTRL_EXT"},
 +
 +      /* Interrupt Registers */
 +      {IXGBE_EICR, "EICR"},
 +
 +      /* RX Registers */
 +      {IXGBE_SRRCTL(0), "SRRCTL"},
 +      {IXGBE_DCA_RXCTRL(0), "DRXCTL"},
 +      {IXGBE_RDLEN(0), "RDLEN"},
 +      {IXGBE_RDH(0), "RDH"},
 +      {IXGBE_RDT(0), "RDT"},
 +      {IXGBE_RXDCTL(0), "RXDCTL"},
 +      {IXGBE_RDBAL(0), "RDBAL"},
 +      {IXGBE_RDBAH(0), "RDBAH"},
 +
 +      /* TX Registers */
 +      {IXGBE_TDBAL(0), "TDBAL"},
 +      {IXGBE_TDBAH(0), "TDBAH"},
 +      {IXGBE_TDLEN(0), "TDLEN"},
 +      {IXGBE_TDH(0), "TDH"},
 +      {IXGBE_TDT(0), "TDT"},
 +      {IXGBE_TXDCTL(0), "TXDCTL"},
 +
 +      /* List Terminator */
 +      {}
 +};
 +
 +
 +/*
 + * ixgbe_regdump - register printout routine
 + */
 +static void ixgbe_regdump(struct ixgbe_hw *hw, struct ixgbe_reg_info *reginfo)
 +{
 +      int i = 0, j = 0;
 +      char rname[16];
 +      u32 regs[64];
 +
 +      switch (reginfo->ofs) {
 +      case IXGBE_SRRCTL(0):
 +              for (i = 0; i < 64; i++)
 +                      regs[i] = IXGBE_READ_REG(hw, IXGBE_SRRCTL(i));
 +              break;
 +      case IXGBE_DCA_RXCTRL(0):
 +              for (i = 0; i < 64; i++)
 +                      regs[i] = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
 +              break;
 +      case IXGBE_RDLEN(0):
 +              for (i = 0; i < 64; i++)
 +                      regs[i] = IXGBE_READ_REG(hw, IXGBE_RDLEN(i));
 +              break;
 +      case IXGBE_RDH(0):
 +              for (i = 0; i < 64; i++)
 +                      regs[i] = IXGBE_READ_REG(hw, IXGBE_RDH(i));
 +              break;
 +      case IXGBE_RDT(0):
 +              for (i = 0; i < 64; i++)
 +                      regs[i] = IXGBE_READ_REG(hw, IXGBE_RDT(i));
 +              break;
 +      case IXGBE_RXDCTL(0):
 +              for (i = 0; i < 64; i++)
 +                      regs[i] = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i));
 +              break;
 +      case IXGBE_RDBAL(0):
 +              for (i = 0; i < 64; i++)
 +                      regs[i] = IXGBE_READ_REG(hw, IXGBE_RDBAL(i));
 +              break;
 +      case IXGBE_RDBAH(0):
 +              for (i = 0; i < 64; i++)
 +                      regs[i] = IXGBE_READ_REG(hw, IXGBE_RDBAH(i));
 +              break;
 +      case IXGBE_TDBAL(0):
 +              for (i = 0; i < 64; i++)
 +                      regs[i] = IXGBE_READ_REG(hw, IXGBE_TDBAL(i));
 +              break;
 +      case IXGBE_TDBAH(0):
 +              for (i = 0; i < 64; i++)
 +                      regs[i] = IXGBE_READ_REG(hw, IXGBE_TDBAH(i));
 +              break;
 +      case IXGBE_TDLEN(0):
 +              for (i = 0; i < 64; i++)
 +                      regs[i] = IXGBE_READ_REG(hw, IXGBE_TDLEN(i));
 +              break;
 +      case IXGBE_TDH(0):
 +              for (i = 0; i < 64; i++)
 +                      regs[i] = IXGBE_READ_REG(hw, IXGBE_TDH(i));
 +              break;
 +      case IXGBE_TDT(0):
 +              for (i = 0; i < 64; i++)
 +                      regs[i] = IXGBE_READ_REG(hw, IXGBE_TDT(i));
 +              break;
 +      case IXGBE_TXDCTL(0):
 +              for (i = 0; i < 64; i++)
 +                      regs[i] = IXGBE_READ_REG(hw, IXGBE_TXDCTL(i));
 +              break;
 +      default:
 +              pr_info("%-15s %08x\n", reginfo->name,
 +                      IXGBE_READ_REG(hw, reginfo->ofs));
 +              return;
 +      }
 +
 +      for (i = 0; i < 8; i++) {
 +              snprintf(rname, 16, "%s[%d-%d]", reginfo->name, i*8, i*8+7);
 +              pr_err("%-15s", rname);
 +              for (j = 0; j < 8; j++)
 +                      pr_cont(" %08x", regs[i*8+j]);
 +              pr_cont("\n");
 +      }
 +
 +}
 +
 +/*
 + * ixgbe_dump - Print registers, tx-rings and rx-rings
 + */
 +static void ixgbe_dump(struct ixgbe_adapter *adapter)
 +{
 +      struct net_device *netdev = adapter->netdev;
 +      struct ixgbe_hw *hw = &adapter->hw;
 +      struct ixgbe_reg_info *reginfo;
 +      int n = 0;
 +      struct ixgbe_ring *tx_ring;
 +      struct ixgbe_tx_buffer *tx_buffer_info;
 +      union ixgbe_adv_tx_desc *tx_desc;
 +      struct my_u0 { u64 a; u64 b; } *u0;
 +      struct ixgbe_ring *rx_ring;
 +      union ixgbe_adv_rx_desc *rx_desc;
 +      struct ixgbe_rx_buffer *rx_buffer_info;
 +      u32 staterr;
 +      int i = 0;
 +
 +      if (!netif_msg_hw(adapter))
 +              return;
 +
 +      /* Print netdevice Info */
 +      if (netdev) {
 +              dev_info(&adapter->pdev->dev, "Net device Info\n");
 +              pr_info("Device Name     state            "
 +                      "trans_start      last_rx\n");
 +              pr_info("%-15s %016lX %016lX %016lX\n",
 +                      netdev->name,
 +                      netdev->state,
 +                      netdev->trans_start,
 +                      netdev->last_rx);
 +      }
 +
 +      /* Print Registers */
 +      dev_info(&adapter->pdev->dev, "Register Dump\n");
 +      pr_info(" Register Name   Value\n");
 +      for (reginfo = (struct ixgbe_reg_info *)ixgbe_reg_info_tbl;
 +           reginfo->name; reginfo++) {
 +              ixgbe_regdump(hw, reginfo);
 +      }
 +
 +      /* Print TX Ring Summary */
 +      if (!netdev || !netif_running(netdev))
 +              goto exit;
 +
 +      dev_info(&adapter->pdev->dev, "TX Rings Summary\n");
 +      pr_info("Queue [NTU] [NTC] [bi(ntc)->dma  ] leng ntw timestamp\n");
 +      for (n = 0; n < adapter->num_tx_queues; n++) {
 +              tx_ring = adapter->tx_ring[n];
 +              tx_buffer_info =
 +                      &tx_ring->tx_buffer_info[tx_ring->next_to_clean];
 +              pr_info(" %5d %5X %5X %016llX %04X %p %016llX\n",
 +                         n, tx_ring->next_to_use, tx_ring->next_to_clean,
 +                         (u64)tx_buffer_info->dma,
 +                         tx_buffer_info->length,
 +                         tx_buffer_info->next_to_watch,
 +                         (u64)tx_buffer_info->time_stamp);
 +      }
 +
 +      /* Print TX Rings */
 +      if (!netif_msg_tx_done(adapter))
 +              goto rx_ring_summary;
 +
 +      dev_info(&adapter->pdev->dev, "TX Rings Dump\n");
 +
 +      /* Transmit Descriptor Formats
 +       *
 +       * Advanced Transmit Descriptor
 +       *   +--------------------------------------------------------------+
 +       * 0 |         Buffer Address [63:0]                                |
 +       *   +--------------------------------------------------------------+
 +       * 8 |  PAYLEN  | PORTS  | IDX | STA | DCMD  |DTYP |  RSV |  DTALEN |
 +       *   +--------------------------------------------------------------+
 +       *   63       46 45    40 39 36 35 32 31   24 23 20 19              0
 +       */
 +
 +      for (n = 0; n < adapter->num_tx_queues; n++) {
 +              tx_ring = adapter->tx_ring[n];
 +              pr_info("------------------------------------\n");
 +              pr_info("TX QUEUE INDEX = %d\n", tx_ring->queue_index);
 +              pr_info("------------------------------------\n");
 +              pr_info("T [desc]     [address 63:0  ] "
 +                      "[PlPOIdStDDt Ln] [bi->dma       ] "
 +                      "leng  ntw timestamp        bi->skb\n");
 +
 +              for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) {
 +                      tx_desc = IXGBE_TX_DESC_ADV(tx_ring, i);
 +                      tx_buffer_info = &tx_ring->tx_buffer_info[i];
 +                      u0 = (struct my_u0 *)tx_desc;
 +                      pr_info("T [0x%03X]    %016llX %016llX %016llX"
 +                              " %04X  %p %016llX %p", i,
 +                              le64_to_cpu(u0->a),
 +                              le64_to_cpu(u0->b),
 +                              (u64)tx_buffer_info->dma,
 +                              tx_buffer_info->length,
 +                              tx_buffer_info->next_to_watch,
 +                              (u64)tx_buffer_info->time_stamp,
 +                              tx_buffer_info->skb);
 +                      if (i == tx_ring->next_to_use &&
 +                              i == tx_ring->next_to_clean)
 +                              pr_cont(" NTC/U\n");
 +                      else if (i == tx_ring->next_to_use)
 +                              pr_cont(" NTU\n");
 +                      else if (i == tx_ring->next_to_clean)
 +                              pr_cont(" NTC\n");
 +                      else
 +                              pr_cont("\n");
 +
 +                      if (netif_msg_pktdata(adapter) &&
 +                              tx_buffer_info->dma != 0)
 +                              print_hex_dump(KERN_INFO, "",
 +                                      DUMP_PREFIX_ADDRESS, 16, 1,
 +                                      phys_to_virt(tx_buffer_info->dma),
 +                                      tx_buffer_info->length, true);
 +              }
 +      }
 +
 +      /* Print RX Rings Summary */
 +rx_ring_summary:
 +      dev_info(&adapter->pdev->dev, "RX Rings Summary\n");
 +      pr_info("Queue [NTU] [NTC]\n");
 +      for (n = 0; n < adapter->num_rx_queues; n++) {
 +              rx_ring = adapter->rx_ring[n];
 +              pr_info("%5d %5X %5X\n",
 +                      n, rx_ring->next_to_use, rx_ring->next_to_clean);
 +      }
 +
 +      /* Print RX Rings */
 +      if (!netif_msg_rx_status(adapter))
 +              goto exit;
 +
 +      dev_info(&adapter->pdev->dev, "RX Rings Dump\n");
 +
 +      /* Advanced Receive Descriptor (Read) Format
 +       *    63                                           1        0
 +       *    +-----------------------------------------------------+
 +       *  0 |       Packet Buffer Address [63:1]           |A0/NSE|
 +       *    +----------------------------------------------+------+
 +       *  8 |       Header Buffer Address [63:1]           |  DD  |
 +       *    +-----------------------------------------------------+
 +       *
 +       *
 +       * Advanced Receive Descriptor (Write-Back) Format
 +       *
 +       *   63       48 47    32 31  30      21 20 16 15   4 3     0
 +       *   +------------------------------------------------------+
 +       * 0 | Packet     IP     |SPH| HDR_LEN   | RSV|Packet|  RSS |
 +       *   | Checksum   Ident  |   |           |    | Type | Type |
 +       *   +------------------------------------------------------+
 +       * 8 | VLAN Tag | Length | Extended Error | Extended Status |
 +       *   +------------------------------------------------------+
 +       *   63       48 47    32 31            20 19               0
 +       */
 +      for (n = 0; n < adapter->num_rx_queues; n++) {
 +              rx_ring = adapter->rx_ring[n];
 +              pr_info("------------------------------------\n");
 +              pr_info("RX QUEUE INDEX = %d\n", rx_ring->queue_index);
 +              pr_info("------------------------------------\n");
 +              pr_info("R  [desc]      [ PktBuf     A0] "
 +                      "[  HeadBuf   DD] [bi->dma       ] [bi->skb] "
 +                      "<-- Adv Rx Read format\n");
 +              pr_info("RWB[desc]      [PcsmIpSHl PtRs] "
 +                      "[vl er S cks ln] ---------------- [bi->skb] "
 +                      "<-- Adv Rx Write-Back format\n");
 +
 +              for (i = 0; i < rx_ring->count; i++) {
 +                      rx_buffer_info = &rx_ring->rx_buffer_info[i];
 +                      rx_desc = IXGBE_RX_DESC_ADV(rx_ring, i);
 +                      u0 = (struct my_u0 *)rx_desc;
 +                      staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
 +                      if (staterr & IXGBE_RXD_STAT_DD) {
 +                              /* Descriptor Done */
 +                              pr_info("RWB[0x%03X]     %016llX "
 +                                      "%016llX ---------------- %p", i,
 +                                      le64_to_cpu(u0->a),
 +                                      le64_to_cpu(u0->b),
 +                                      rx_buffer_info->skb);
 +                      } else {
 +                              pr_info("R  [0x%03X]     %016llX "
 +                                      "%016llX %016llX %p", i,
 +                                      le64_to_cpu(u0->a),
 +                                      le64_to_cpu(u0->b),
 +                                      (u64)rx_buffer_info->dma,
 +                                      rx_buffer_info->skb);
 +
 +                              if (netif_msg_pktdata(adapter)) {
 +                                      print_hex_dump(KERN_INFO, "",
 +                                         DUMP_PREFIX_ADDRESS, 16, 1,
 +                                         phys_to_virt(rx_buffer_info->dma),
 +                                         rx_ring->rx_buf_len, true);
 +
 +                                      if (rx_ring->rx_buf_len
 +                                              < IXGBE_RXBUFFER_2K)
 +                                              print_hex_dump(KERN_INFO, "",
 +                                                DUMP_PREFIX_ADDRESS, 16, 1,
 +                                                phys_to_virt(
 +                                                  rx_buffer_info->page_dma +
 +                                                  rx_buffer_info->page_offset
 +                                                ),
 +                                                PAGE_SIZE/2, true);
 +                              }
 +                      }
 +
 +                      if (i == rx_ring->next_to_use)
 +                              pr_cont(" NTU\n");
 +                      else if (i == rx_ring->next_to_clean)
 +                              pr_cont(" NTC\n");
 +                      else
 +                              pr_cont("\n");
 +
 +              }
 +      }
 +
 +exit:
 +      return;
 +}
 +
 +static void ixgbe_release_hw_control(struct ixgbe_adapter *adapter)
 +{
 +      u32 ctrl_ext;
 +
 +      /* Let firmware take over control of h/w */
 +      ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
 +      IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT,
 +                      ctrl_ext & ~IXGBE_CTRL_EXT_DRV_LOAD);
 +}
 +
 +static void ixgbe_get_hw_control(struct ixgbe_adapter *adapter)
 +{
 +      u32 ctrl_ext;
 +
 +      /* Let firmware know the driver has taken over */
 +      ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
 +      IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT,
 +                      ctrl_ext | IXGBE_CTRL_EXT_DRV_LOAD);
 +}
 +
 +/*
 + * ixgbe_set_ivar - set the IVAR registers, mapping interrupt causes to vectors
 + * @adapter: pointer to adapter struct
 + * @direction: 0 for Rx, 1 for Tx, -1 for other causes
 + * @queue: queue to map the corresponding interrupt to
 + * @msix_vector: the vector to map to the corresponding queue
 + *
 + */
 +static void ixgbe_set_ivar(struct ixgbe_adapter *adapter, s8 direction,
 +                         u8 queue, u8 msix_vector)
 +{
 +      u32 ivar, index;
 +      struct ixgbe_hw *hw = &adapter->hw;
 +      switch (hw->mac.type) {
 +      case ixgbe_mac_82598EB:
 +              msix_vector |= IXGBE_IVAR_ALLOC_VAL;
 +              if (direction == -1)
 +                      direction = 0;
 +              index = (((direction * 64) + queue) >> 2) & 0x1F;
 +              ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
 +              ivar &= ~(0xFF << (8 * (queue & 0x3)));
 +              ivar |= (msix_vector << (8 * (queue & 0x3)));
 +              IXGBE_WRITE_REG(hw, IXGBE_IVAR(index), ivar);
 +              break;
 +      case ixgbe_mac_82599EB:
 +      case ixgbe_mac_X540:
 +              if (direction == -1) {
 +                      /* other causes */
 +                      msix_vector |= IXGBE_IVAR_ALLOC_VAL;
 +                      index = ((queue & 1) * 8);
 +                      ivar = IXGBE_READ_REG(&adapter->hw, IXGBE_IVAR_MISC);
 +                      ivar &= ~(0xFF << index);
 +                      ivar |= (msix_vector << index);
 +                      IXGBE_WRITE_REG(&adapter->hw, IXGBE_IVAR_MISC, ivar);
 +                      break;
 +              } else {
 +                      /* tx or rx causes */
 +                      msix_vector |= IXGBE_IVAR_ALLOC_VAL;
 +                      index = ((16 * (queue & 1)) + (8 * direction));
 +                      ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(queue >> 1));
 +                      ivar &= ~(0xFF << index);
 +                      ivar |= (msix_vector << index);
 +                      IXGBE_WRITE_REG(hw, IXGBE_IVAR(queue >> 1), ivar);
 +                      break;
 +              }
 +      default:
 +              break;
 +      }
 +}
 +
 +static inline void ixgbe_irq_rearm_queues(struct ixgbe_adapter *adapter,
 +                                        u64 qmask)
 +{
 +      u32 mask;
 +
 +      switch (adapter->hw.mac.type) {
 +      case ixgbe_mac_82598EB:
 +              mask = (IXGBE_EIMS_RTX_QUEUE & qmask);
 +              IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask);
 +              break;
 +      case ixgbe_mac_82599EB:
 +      case ixgbe_mac_X540:
 +              mask = (qmask & 0xFFFFFFFF);
 +              IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(0), mask);
 +              mask = (qmask >> 32);
 +              IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(1), mask);
 +              break;
 +      default:
 +              break;
 +      }
 +}
 +
 +static inline void ixgbe_unmap_tx_resource(struct ixgbe_ring *ring,
 +                                         struct ixgbe_tx_buffer *tx_buffer)
 +{
 +      if (tx_buffer->dma) {
 +              if (tx_buffer->tx_flags & IXGBE_TX_FLAGS_MAPPED_AS_PAGE)
 +                      dma_unmap_page(ring->dev,
 +                                     tx_buffer->dma,
 +                                     tx_buffer->length,
 +                                     DMA_TO_DEVICE);
 +              else
 +                      dma_unmap_single(ring->dev,
 +                                       tx_buffer->dma,
 +                                       tx_buffer->length,
 +                                       DMA_TO_DEVICE);
 +      }
 +      tx_buffer->dma = 0;
 +}
 +
 +void ixgbe_unmap_and_free_tx_resource(struct ixgbe_ring *tx_ring,
 +                                    struct ixgbe_tx_buffer *tx_buffer_info)
 +{
 +      ixgbe_unmap_tx_resource(tx_ring, tx_buffer_info);
 +      if (tx_buffer_info->skb)
 +              dev_kfree_skb_any(tx_buffer_info->skb);
 +      tx_buffer_info->skb = NULL;
 +      /* tx_buffer_info must be completely set up in the transmit path */
 +}
 +
 +static void ixgbe_update_xoff_received(struct ixgbe_adapter *adapter)
 +{
 +      struct ixgbe_hw *hw = &adapter->hw;
 +      struct ixgbe_hw_stats *hwstats = &adapter->stats;
 +      u32 data = 0;
 +      u32 xoff[8] = {0};
 +      int i;
 +
 +      if ((hw->fc.current_mode == ixgbe_fc_full) ||
 +          (hw->fc.current_mode == ixgbe_fc_rx_pause)) {
 +              switch (hw->mac.type) {
 +              case ixgbe_mac_82598EB:
 +                      data = IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
 +                      break;
 +              default:
 +                      data = IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
 +              }
 +              hwstats->lxoffrxc += data;
 +
 +              /* refill credits (no tx hang) if we received xoff */
 +              if (!data)
 +                      return;
 +
 +              for (i = 0; i < adapter->num_tx_queues; i++)
 +                      clear_bit(__IXGBE_HANG_CHECK_ARMED,
 +                                &adapter->tx_ring[i]->state);
 +              return;
 +      } else if (!(adapter->dcb_cfg.pfc_mode_enable))
 +              return;
 +
 +      /* update stats for each tc, only valid with PFC enabled */
 +      for (i = 0; i < MAX_TX_PACKET_BUFFERS; i++) {
 +              switch (hw->mac.type) {
 +              case ixgbe_mac_82598EB:
 +                      xoff[i] = IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i));
 +                      break;
 +              default:
 +                      xoff[i] = IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i));
 +              }
 +              hwstats->pxoffrxc[i] += xoff[i];
 +      }
 +
 +      /* disarm tx queues that have received xoff frames */
 +      for (i = 0; i < adapter->num_tx_queues; i++) {
 +              struct ixgbe_ring *tx_ring = adapter->tx_ring[i];
 +              u8 tc = tx_ring->dcb_tc;
 +
 +              if (xoff[tc])
 +                      clear_bit(__IXGBE_HANG_CHECK_ARMED, &tx_ring->state);
 +      }
 +}
 +
 +static u64 ixgbe_get_tx_completed(struct ixgbe_ring *ring)
 +{
 +      return ring->tx_stats.completed;
 +}
 +
 +static u64 ixgbe_get_tx_pending(struct ixgbe_ring *ring)
 +{
 +      struct ixgbe_adapter *adapter = netdev_priv(ring->netdev);
 +      struct ixgbe_hw *hw = &adapter->hw;
 +
 +      u32 head = IXGBE_READ_REG(hw, IXGBE_TDH(ring->reg_idx));
 +      u32 tail = IXGBE_READ_REG(hw, IXGBE_TDT(ring->reg_idx));
 +
 +      if (head != tail)
 +              return (head < tail) ?
 +                      tail - head : (tail + ring->count - head);
 +
 +      return 0;
 +}
 +
 +static inline bool ixgbe_check_tx_hang(struct ixgbe_ring *tx_ring)
 +{
 +      u32 tx_done = ixgbe_get_tx_completed(tx_ring);
 +      u32 tx_done_old = tx_ring->tx_stats.tx_done_old;
 +      u32 tx_pending = ixgbe_get_tx_pending(tx_ring);
 +      bool ret = false;
 +
 +      clear_check_for_tx_hang(tx_ring);
 +
 +      /*
 +       * Check for a hung queue, but be thorough. This verifies
 +       * that a transmit has been completed since the previous
 +       * check AND there is at least one packet pending. The
 +       * ARMED bit is set to indicate a potential hang. The
 +       * bit is cleared if a pause frame is received to remove
 +       * false hang detection due to PFC or 802.3x frames. By
 +       * requiring this to fail twice we avoid races with
 +       * pfc clearing the ARMED bit and conditions where we
 +       * run the check_tx_hang logic with a transmit completion
 +       * pending but without time to complete it yet.
 +       */
 +      if ((tx_done_old == tx_done) && tx_pending) {
 +              /* make sure it is true for two checks in a row */
 +              ret = test_and_set_bit(__IXGBE_HANG_CHECK_ARMED,
 +                                     &tx_ring->state);
 +      } else {
 +              /* update completed stats and continue */
 +              tx_ring->tx_stats.tx_done_old = tx_done;
 +              /* reset the countdown */
 +              clear_bit(__IXGBE_HANG_CHECK_ARMED, &tx_ring->state);
 +      }
 +
 +      return ret;
 +}
 +
 +/**
 + * ixgbe_tx_timeout_reset - initiate reset due to Tx timeout
 + * @adapter: driver private struct
 + **/
 +static void ixgbe_tx_timeout_reset(struct ixgbe_adapter *adapter)
 +{
 +
 +      /* Do the reset outside of interrupt context */
 +      if (!test_bit(__IXGBE_DOWN, &adapter->state)) {
 +              adapter->flags2 |= IXGBE_FLAG2_RESET_REQUESTED;
 +              ixgbe_service_event_schedule(adapter);
 +      }
 +}
 +
 +/**
 + * ixgbe_clean_tx_irq - Reclaim resources after transmit completes
 + * @q_vector: structure containing interrupt and ring information
 + * @tx_ring: tx ring to clean
 + **/
 +static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
 +                             struct ixgbe_ring *tx_ring)
 +{
 +      struct ixgbe_adapter *adapter = q_vector->adapter;
 +      struct ixgbe_tx_buffer *tx_buffer;
 +      union ixgbe_adv_tx_desc *tx_desc;
 +      unsigned int total_bytes = 0, total_packets = 0;
 +      unsigned int budget = q_vector->tx.work_limit;
 +      u16 i = tx_ring->next_to_clean;
 +
 +      tx_buffer = &tx_ring->tx_buffer_info[i];
 +      tx_desc = IXGBE_TX_DESC_ADV(tx_ring, i);
 +
 +      for (; budget; budget--) {
 +              union ixgbe_adv_tx_desc *eop_desc = tx_buffer->next_to_watch;
 +
 +              /* if next_to_watch is not set then there is no work pending */
 +              if (!eop_desc)
 +                      break;
 +
 +              /* if DD is not set pending work has not been completed */
 +              if (!(eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)))
 +                      break;
 +
 +              /* count the packet as being completed */
 +              tx_ring->tx_stats.completed++;
 +
 +              /* clear next_to_watch to prevent false hangs */
 +              tx_buffer->next_to_watch = NULL;
 +
 +              /* prevent any other reads prior to eop_desc being verified */
 +              rmb();
 +
 +              do {
 +                      ixgbe_unmap_tx_resource(tx_ring, tx_buffer);
 +                      tx_desc->wb.status = 0;
 +                      if (likely(tx_desc == eop_desc)) {
 +                              eop_desc = NULL;
 +                              dev_kfree_skb_any(tx_buffer->skb);
 +                              tx_buffer->skb = NULL;
 +
 +                              total_bytes += tx_buffer->bytecount;
 +                              total_packets += tx_buffer->gso_segs;
 +                      }
 +
 +                      tx_buffer++;
 +                      tx_desc++;
 +                      i++;
 +                      if (unlikely(i == tx_ring->count)) {
 +                              i = 0;
 +
 +                              tx_buffer = tx_ring->tx_buffer_info;
 +                              tx_desc = IXGBE_TX_DESC_ADV(tx_ring, 0);
 +                      }
 +
 +              } while (eop_desc);
 +      }
 +
 +      tx_ring->next_to_clean = i;
 +      u64_stats_update_begin(&tx_ring->syncp);
 +      tx_ring->stats.bytes += total_bytes;
 +      tx_ring->stats.packets += total_packets;
 +      u64_stats_update_end(&tx_ring->syncp);
 +      q_vector->tx.total_bytes += total_bytes;
 +      q_vector->tx.total_packets += total_packets;
 +
 +      if (check_for_tx_hang(tx_ring) && ixgbe_check_tx_hang(tx_ring)) {
 +              /* schedule immediate reset if we believe we hung */
 +              struct ixgbe_hw *hw = &adapter->hw;
 +              tx_desc = IXGBE_TX_DESC_ADV(tx_ring, i);
 +              e_err(drv, "Detected Tx Unit Hang\n"
 +                      "  Tx Queue             <%d>\n"
 +                      "  TDH, TDT             <%x>, <%x>\n"
 +                      "  next_to_use          <%x>\n"
 +                      "  next_to_clean        <%x>\n"
 +                      "tx_buffer_info[next_to_clean]\n"
 +                      "  time_stamp           <%lx>\n"
 +                      "  jiffies              <%lx>\n",
 +                      tx_ring->queue_index,
 +                      IXGBE_READ_REG(hw, IXGBE_TDH(tx_ring->reg_idx)),
 +                      IXGBE_READ_REG(hw, IXGBE_TDT(tx_ring->reg_idx)),
 +                      tx_ring->next_to_use, i,
 +                      tx_ring->tx_buffer_info[i].time_stamp, jiffies);
 +
 +              netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
 +
 +              e_info(probe,
 +                     "tx hang %d detected on queue %d, resetting adapter\n",
 +                      adapter->tx_timeout_count + 1, tx_ring->queue_index);
 +
 +              /* schedule immediate reset if we believe we hung */
 +              ixgbe_tx_timeout_reset(adapter);
 +
 +              /* the adapter is about to reset, no point in enabling stuff */
 +              return true;
 +      }
 +
 +#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
 +      if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) &&
 +                   (ixgbe_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD))) {
 +              /* Make sure that anybody stopping the queue after this
 +               * sees the new next_to_clean.
 +               */
 +              smp_mb();
 +              if (__netif_subqueue_stopped(tx_ring->netdev, tx_ring->queue_index) &&
 +                  !test_bit(__IXGBE_DOWN, &adapter->state)) {
 +                      netif_wake_subqueue(tx_ring->netdev, tx_ring->queue_index);
 +                      ++tx_ring->tx_stats.restart_queue;
 +              }
 +      }
 +
 +      return !!budget;
 +}
 +
 +#ifdef CONFIG_IXGBE_DCA
 +static void ixgbe_update_rx_dca(struct ixgbe_adapter *adapter,
 +                              struct ixgbe_ring *rx_ring,
 +                              int cpu)
 +{
 +      struct ixgbe_hw *hw = &adapter->hw;
 +      u32 rxctrl;
 +      u8 reg_idx = rx_ring->reg_idx;
 +
 +      rxctrl = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(reg_idx));
 +      switch (hw->mac.type) {
 +      case ixgbe_mac_82598EB:
 +              rxctrl &= ~IXGBE_DCA_RXCTRL_CPUID_MASK;
 +              rxctrl |= dca3_get_tag(rx_ring->dev, cpu);
 +              break;
 +      case ixgbe_mac_82599EB:
 +      case ixgbe_mac_X540:
 +              rxctrl &= ~IXGBE_DCA_RXCTRL_CPUID_MASK_82599;
 +              rxctrl |= (dca3_get_tag(rx_ring->dev, cpu) <<
 +                         IXGBE_DCA_RXCTRL_CPUID_SHIFT_82599);
 +              break;
 +      default:
 +              break;
 +      }
 +      rxctrl |= IXGBE_DCA_RXCTRL_DESC_DCA_EN;
 +      rxctrl |= IXGBE_DCA_RXCTRL_HEAD_DCA_EN;
 +      rxctrl &= ~(IXGBE_DCA_RXCTRL_DESC_RRO_EN);
 +      IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(reg_idx), rxctrl);
 +}
 +
 +static void ixgbe_update_tx_dca(struct ixgbe_adapter *adapter,
 +                              struct ixgbe_ring *tx_ring,
 +                              int cpu)
 +{
 +      struct ixgbe_hw *hw = &adapter->hw;
 +      u32 txctrl;
 +      u8 reg_idx = tx_ring->reg_idx;
 +
 +      switch (hw->mac.type) {
 +      case ixgbe_mac_82598EB:
 +              txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(reg_idx));
 +              txctrl &= ~IXGBE_DCA_TXCTRL_CPUID_MASK;
 +              txctrl |= dca3_get_tag(tx_ring->dev, cpu);
 +              txctrl |= IXGBE_DCA_TXCTRL_DESC_DCA_EN;
 +              IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(reg_idx), txctrl);
 +              break;
 +      case ixgbe_mac_82599EB:
 +      case ixgbe_mac_X540:
 +              txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(reg_idx));
 +              txctrl &= ~IXGBE_DCA_TXCTRL_CPUID_MASK_82599;
 +              txctrl |= (dca3_get_tag(tx_ring->dev, cpu) <<
 +                         IXGBE_DCA_TXCTRL_CPUID_SHIFT_82599);
 +              txctrl |= IXGBE_DCA_TXCTRL_DESC_DCA_EN;
 +              IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(reg_idx), txctrl);
 +              break;
 +      default:
 +              break;
 +      }
 +}
 +
 +static void ixgbe_update_dca(struct ixgbe_q_vector *q_vector)
 +{
 +      struct ixgbe_adapter *adapter = q_vector->adapter;
 +      struct ixgbe_ring *ring;
 +      int cpu = get_cpu();
 +
 +      if (q_vector->cpu == cpu)
 +              goto out_no_update;
 +
 +      for (ring = q_vector->tx.ring; ring != NULL; ring = ring->next)
 +              ixgbe_update_tx_dca(adapter, ring, cpu);
 +
 +      for (ring = q_vector->rx.ring; ring != NULL; ring = ring->next)
 +              ixgbe_update_rx_dca(adapter, ring, cpu);
 +
 +      q_vector->cpu = cpu;
 +out_no_update:
 +      put_cpu();
 +}
 +
 +static void ixgbe_setup_dca(struct ixgbe_adapter *adapter)
 +{
 +      int num_q_vectors;
 +      int i;
 +
 +      if (!(adapter->flags & IXGBE_FLAG_DCA_ENABLED))
 +              return;
 +
 +      /* always use CB2 mode, difference is masked in the CB driver */
 +      IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, 2);
 +
 +      if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
 +              num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
 +      else
 +              num_q_vectors = 1;
 +
 +      for (i = 0; i < num_q_vectors; i++) {
 +              adapter->q_vector[i]->cpu = -1;
 +              ixgbe_update_dca(adapter->q_vector[i]);
 +      }
 +}
 +
 +static int __ixgbe_notify_dca(struct device *dev, void *data)
 +{
 +      struct ixgbe_adapter *adapter = dev_get_drvdata(dev);
 +      unsigned long event = *(unsigned long *)data;
 +
 +      if (!(adapter->flags & IXGBE_FLAG_DCA_CAPABLE))
 +              return 0;
 +
 +      switch (event) {
 +      case DCA_PROVIDER_ADD:
 +              /* if we're already enabled, don't do it again */
 +              if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
 +                      break;
 +              if (dca_add_requester(dev) == 0) {
 +                      adapter->flags |= IXGBE_FLAG_DCA_ENABLED;
 +                      ixgbe_setup_dca(adapter);
 +                      break;
 +              }
 +              /* Fall Through since DCA is disabled. */
 +      case DCA_PROVIDER_REMOVE:
 +              if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) {
 +                      dca_remove_requester(dev);
 +                      adapter->flags &= ~IXGBE_FLAG_DCA_ENABLED;
 +                      IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, 1);
 +              }
 +              break;
 +      }
 +
 +      return 0;
 +}
 +#endif /* CONFIG_IXGBE_DCA */
 +
 +static inline void ixgbe_rx_hash(union ixgbe_adv_rx_desc *rx_desc,
 +                               struct sk_buff *skb)
 +{
 +      skb->rxhash = le32_to_cpu(rx_desc->wb.lower.hi_dword.rss);
 +}
 +
 +/**
 + * ixgbe_rx_is_fcoe - check the rx desc for incoming pkt type
 + * @adapter: address of board private structure
 + * @rx_desc: advanced rx descriptor
 + *
 + * Returns : true if it is FCoE pkt
 + */
 +static inline bool ixgbe_rx_is_fcoe(struct ixgbe_adapter *adapter,
 +                                  union ixgbe_adv_rx_desc *rx_desc)
 +{
 +      __le16 pkt_info = rx_desc->wb.lower.lo_dword.hs_rss.pkt_info;
 +
 +      return (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) &&
 +             ((pkt_info & cpu_to_le16(IXGBE_RXDADV_PKTTYPE_ETQF_MASK)) ==
 +              (cpu_to_le16(IXGBE_ETQF_FILTER_FCOE <<
 +                           IXGBE_RXDADV_PKTTYPE_ETQF_SHIFT)));
 +}
 +
 +/**
 + * ixgbe_receive_skb - Send a completed packet up the stack
 + * @adapter: board private structure
 + * @skb: packet to send up
 + * @status: hardware indication of status of receive
 + * @rx_ring: rx descriptor ring (for a specific queue) to setup
 + * @rx_desc: rx descriptor
 + **/
 +static void ixgbe_receive_skb(struct ixgbe_q_vector *q_vector,
 +                            struct sk_buff *skb, u8 status,
 +                            struct ixgbe_ring *ring,
 +                            union ixgbe_adv_rx_desc *rx_desc)
 +{
 +      struct ixgbe_adapter *adapter = q_vector->adapter;
 +      struct napi_struct *napi = &q_vector->napi;
 +      bool is_vlan = (status & IXGBE_RXD_STAT_VP);
 +      u16 tag = le16_to_cpu(rx_desc->wb.upper.vlan);
 +
 +      if (is_vlan && (tag & VLAN_VID_MASK))
 +              __vlan_hwaccel_put_tag(skb, tag);
 +
 +      if (!(adapter->flags & IXGBE_FLAG_IN_NETPOLL))
 +              napi_gro_receive(napi, skb);
 +      else
 +              netif_rx(skb);
 +}
 +
 +/**
 + * ixgbe_rx_checksum - indicate in skb if hw indicated a good cksum
 + * @adapter: address of board private structure
 + * @status_err: hardware indication of status of receive
 + * @skb: skb currently being received and modified
 + * @status_err: status error value of last descriptor in packet
 + **/
 +static inline void ixgbe_rx_checksum(struct ixgbe_adapter *adapter,
 +                                   union ixgbe_adv_rx_desc *rx_desc,
 +                                   struct sk_buff *skb,
 +                                   u32 status_err)
 +{
 +      skb->ip_summed = CHECKSUM_NONE;
 +
 +      /* Rx csum disabled */
 +      if (!(adapter->flags & IXGBE_FLAG_RX_CSUM_ENABLED))
 +              return;
 +
 +      /* if IP and error */
 +      if ((status_err & IXGBE_RXD_STAT_IPCS) &&
 +          (status_err & IXGBE_RXDADV_ERR_IPE)) {
 +              adapter->hw_csum_rx_error++;
 +              return;
 +      }
 +
 +      if (!(status_err & IXGBE_RXD_STAT_L4CS))
 +              return;
 +
 +      if (status_err & IXGBE_RXDADV_ERR_TCPE) {
 +              u16 pkt_info = rx_desc->wb.lower.lo_dword.hs_rss.pkt_info;
 +
 +              /*
 +               * 82599 errata, UDP frames with a 0 checksum can be marked as
 +               * checksum errors.
 +               */
 +              if ((pkt_info & IXGBE_RXDADV_PKTTYPE_UDP) &&
 +                  (adapter->hw.mac.type == ixgbe_mac_82599EB))
 +                      return;
 +
 +              adapter->hw_csum_rx_error++;
 +              return;
 +      }
 +
 +      /* It must be a TCP or UDP packet with a valid checksum */
 +      skb->ip_summed = CHECKSUM_UNNECESSARY;
 +}
 +
 +static inline void ixgbe_release_rx_desc(struct ixgbe_ring *rx_ring, u32 val)
 +{
 +      /*
 +       * Force memory writes to complete before letting h/w
 +       * know there are new descriptors to fetch.  (Only
 +       * applicable for weak-ordered memory model archs,
 +       * such as IA-64).
 +       */
 +      wmb();
 +      writel(val, rx_ring->tail);
 +}
 +
 +/**
 + * ixgbe_alloc_rx_buffers - Replace used receive buffers; packet split
 + * @rx_ring: ring to place buffers on
 + * @cleaned_count: number of buffers to replace
 + **/
 +void ixgbe_alloc_rx_buffers(struct ixgbe_ring *rx_ring, u16 cleaned_count)
 +{
 +      union ixgbe_adv_rx_desc *rx_desc;
 +      struct ixgbe_rx_buffer *bi;
 +      struct sk_buff *skb;
 +      u16 i = rx_ring->next_to_use;
 +
 +      /* do nothing if no valid netdev defined */
 +      if (!rx_ring->netdev)
 +              return;
 +
 +      while (cleaned_count--) {
 +              rx_desc = IXGBE_RX_DESC_ADV(rx_ring, i);
 +              bi = &rx_ring->rx_buffer_info[i];
 +              skb = bi->skb;
 +
 +              if (!skb) {
 +                      skb = netdev_alloc_skb_ip_align(rx_ring->netdev,
 +                                                      rx_ring->rx_buf_len);
 +                      if (!skb) {
 +                              rx_ring->rx_stats.alloc_rx_buff_failed++;
 +                              goto no_buffers;
 +                      }
 +                      /* initialize queue mapping */
 +                      skb_record_rx_queue(skb, rx_ring->queue_index);
 +                      bi->skb = skb;
 +              }
 +
 +              if (!bi->dma) {
 +                      bi->dma = dma_map_single(rx_ring->dev,
 +                                               skb->data,
 +                                               rx_ring->rx_buf_len,
 +                                               DMA_FROM_DEVICE);
 +                      if (dma_mapping_error(rx_ring->dev, bi->dma)) {
 +                              rx_ring->rx_stats.alloc_rx_buff_failed++;
 +                              bi->dma = 0;
 +                              goto no_buffers;
 +                      }
 +              }
 +
 +              if (ring_is_ps_enabled(rx_ring)) {
 +                      if (!bi->page) {
 +                              bi->page = netdev_alloc_page(rx_ring->netdev);
 +                              if (!bi->page) {
 +                                      rx_ring->rx_stats.alloc_rx_page_failed++;
 +                                      goto no_buffers;
 +                              }
 +                      }
 +
 +                      if (!bi->page_dma) {
 +                              /* use a half page if we're re-using */
 +                              bi->page_offset ^= PAGE_SIZE / 2;
 +                              bi->page_dma = dma_map_page(rx_ring->dev,
 +                                                          bi->page,
 +                                                          bi->page_offset,
 +                                                          PAGE_SIZE / 2,
 +                                                          DMA_FROM_DEVICE);
 +                              if (dma_mapping_error(rx_ring->dev,
 +                                                    bi->page_dma)) {
 +                                      rx_ring->rx_stats.alloc_rx_page_failed++;
 +                                      bi->page_dma = 0;
 +                                      goto no_buffers;
 +                              }
 +                      }
 +
 +                      /* Refresh the desc even if buffer_addrs didn't change
 +                       * because each write-back erases this info. */
 +                      rx_desc->read.pkt_addr = cpu_to_le64(bi->page_dma);
 +                      rx_desc->read.hdr_addr = cpu_to_le64(bi->dma);
 +              } else {
 +                      rx_desc->read.pkt_addr = cpu_to_le64(bi->dma);
 +                      rx_desc->read.hdr_addr = 0;
 +              }
 +
 +              i++;
 +              if (i == rx_ring->count)
 +                      i = 0;
 +      }
 +
 +no_buffers:
 +      if (rx_ring->next_to_use != i) {
 +              rx_ring->next_to_use = i;
 +              ixgbe_release_rx_desc(rx_ring, i);
 +      }
 +}
 +
 +static inline u16 ixgbe_get_hlen(union ixgbe_adv_rx_desc *rx_desc)
 +{
 +      /* HW will not DMA in data larger than the given buffer, even if it
 +       * parses the (NFS, of course) header to be larger.  In that case, it
 +       * fills the header buffer and spills the rest into the page.
 +       */
 +      u16 hdr_info = le16_to_cpu(rx_desc->wb.lower.lo_dword.hs_rss.hdr_info);
 +      u16 hlen = (hdr_info &  IXGBE_RXDADV_HDRBUFLEN_MASK) >>
 +                  IXGBE_RXDADV_HDRBUFLEN_SHIFT;
 +      if (hlen > IXGBE_RX_HDR_SIZE)
 +              hlen = IXGBE_RX_HDR_SIZE;
 +      return hlen;
 +}
 +
 +/**
 + * ixgbe_transform_rsc_queue - change rsc queue into a full packet
 + * @skb: pointer to the last skb in the rsc queue
 + *
 + * This function changes a queue full of hw rsc buffers into a completed
 + * packet.  It uses the ->prev pointers to find the first packet and then
 + * turns it into the frag list owner.
 + **/
 +static inline struct sk_buff *ixgbe_transform_rsc_queue(struct sk_buff *skb)
 +{
 +      unsigned int frag_list_size = 0;
 +      unsigned int skb_cnt = 1;
 +
 +      while (skb->prev) {
 +              struct sk_buff *prev = skb->prev;
 +              frag_list_size += skb->len;
 +              skb->prev = NULL;
 +              skb = prev;
 +              skb_cnt++;
 +      }
 +
 +      skb_shinfo(skb)->frag_list = skb->next;
 +      skb->next = NULL;
 +      skb->len += frag_list_size;
 +      skb->data_len += frag_list_size;
 +      skb->truesize += frag_list_size;
 +      IXGBE_RSC_CB(skb)->skb_cnt = skb_cnt;
 +
 +      return skb;
 +}
 +
 +static inline bool ixgbe_get_rsc_state(union ixgbe_adv_rx_desc *rx_desc)
 +{
 +      return !!(le32_to_cpu(rx_desc->wb.lower.lo_dword.data) &
 +              IXGBE_RXDADV_RSCCNT_MASK);
 +}
 +
 +static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
 +                             struct ixgbe_ring *rx_ring,
 +                             int budget)
 +{
 +      struct ixgbe_adapter *adapter = q_vector->adapter;
 +      union ixgbe_adv_rx_desc *rx_desc, *next_rxd;
 +      struct ixgbe_rx_buffer *rx_buffer_info, *next_buffer;
 +      struct sk_buff *skb;
 +      unsigned int total_rx_bytes = 0, total_rx_packets = 0;
 +      const int current_node = numa_node_id();
 +#ifdef IXGBE_FCOE
 +      int ddp_bytes = 0;
 +#endif /* IXGBE_FCOE */
 +      u32 staterr;
 +      u16 i;
 +      u16 cleaned_count = 0;
 +      bool pkt_is_rsc = false;
 +
 +      i = rx_ring->next_to_clean;
 +      rx_desc = IXGBE_RX_DESC_ADV(rx_ring, i);
 +      staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
 +
 +      while (staterr & IXGBE_RXD_STAT_DD) {
 +              u32 upper_len = 0;
 +
 +              rmb(); /* read descriptor and rx_buffer_info after status DD */
 +
 +              rx_buffer_info = &rx_ring->rx_buffer_info[i];
 +
 +              skb = rx_buffer_info->skb;
 +              rx_buffer_info->skb = NULL;
 +              prefetch(skb->data);
 +
 +              if (ring_is_rsc_enabled(rx_ring))
 +                      pkt_is_rsc = ixgbe_get_rsc_state(rx_desc);
 +
++              /* linear means we are building an skb from multiple pages */
++              if (!skb_is_nonlinear(skb)) {
 +                      u16 hlen;
 +                      if (pkt_is_rsc &&
 +                          !(staterr & IXGBE_RXD_STAT_EOP) &&
 +                          !skb->prev) {
 +                              /*
 +                               * When HWRSC is enabled, delay unmapping
 +                               * of the first packet. It carries the
 +                               * header information, HW may still
 +                               * access the header after the writeback.
 +                               * Only unmap it when EOP is reached
 +                               */
 +                              IXGBE_RSC_CB(skb)->delay_unmap = true;
 +                              IXGBE_RSC_CB(skb)->dma = rx_buffer_info->dma;
 +                      } else {
 +                              dma_unmap_single(rx_ring->dev,
 +                                               rx_buffer_info->dma,
 +                                               rx_ring->rx_buf_len,
 +                                               DMA_FROM_DEVICE);
 +                      }
 +                      rx_buffer_info->dma = 0;
 +
 +                      if (ring_is_ps_enabled(rx_ring)) {
 +                              hlen = ixgbe_get_hlen(rx_desc);
 +                              upper_len = le16_to_cpu(rx_desc->wb.upper.length);
 +                      } else {
 +                              hlen = le16_to_cpu(rx_desc->wb.upper.length);
 +                      }
 +
 +                      skb_put(skb, hlen);
 +              } else {
 +                      /* assume packet split since header is unmapped */
 +                      upper_len = le16_to_cpu(rx_desc->wb.upper.length);
 +              }
 +
 +              if (upper_len) {
 +                      dma_unmap_page(rx_ring->dev,
 +                                     rx_buffer_info->page_dma,
 +                                     PAGE_SIZE / 2,
 +                                     DMA_FROM_DEVICE);
 +                      rx_buffer_info->page_dma = 0;
 +                      skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
 +                                         rx_buffer_info->page,
 +                                         rx_buffer_info->page_offset,
 +                                         upper_len);
 +
 +                      if ((page_count(rx_buffer_info->page) == 1) &&
 +                          (page_to_nid(rx_buffer_info->page) == current_node))
 +                              get_page(rx_buffer_info->page);
 +                      else
 +                              rx_buffer_info->page = NULL;
 +
 +                      skb->len += upper_len;
 +                      skb->data_len += upper_len;
 +                      skb->truesize += upper_len;
 +              }
 +
 +              i++;
 +              if (i == rx_ring->count)
 +                      i = 0;
 +
 +              next_rxd = IXGBE_RX_DESC_ADV(rx_ring, i);
 +              prefetch(next_rxd);
 +              cleaned_count++;
 +
 +              if (pkt_is_rsc) {
 +                      u32 nextp = (staterr & IXGBE_RXDADV_NEXTP_MASK) >>
 +                                   IXGBE_RXDADV_NEXTP_SHIFT;
 +                      next_buffer = &rx_ring->rx_buffer_info[nextp];
 +              } else {
 +                      next_buffer = &rx_ring->rx_buffer_info[i];
 +              }
 +
 +              if (!(staterr & IXGBE_RXD_STAT_EOP)) {
 +                      if (ring_is_ps_enabled(rx_ring)) {
 +                              rx_buffer_info->skb = next_buffer->skb;
 +                              rx_buffer_info->dma = next_buffer->dma;
 +                              next_buffer->skb = skb;
 +                              next_buffer->dma = 0;
 +                      } else {
 +                              skb->next = next_buffer->skb;
 +                              skb->next->prev = skb;
 +                      }
 +                      rx_ring->rx_stats.non_eop_descs++;
 +                      goto next_desc;
 +              }
 +
 +              if (skb->prev) {
 +                      skb = ixgbe_transform_rsc_queue(skb);
 +                      /* if we got here without RSC the packet is invalid */
 +                      if (!pkt_is_rsc) {
 +                              __pskb_trim(skb, 0);
 +                              rx_buffer_info->skb = skb;
 +                              goto next_desc;
 +                      }
 +              }
 +
 +              if (ring_is_rsc_enabled(rx_ring)) {
 +                      if (IXGBE_RSC_CB(skb)->delay_unmap) {
 +                              dma_unmap_single(rx_ring->dev,
 +                                               IXGBE_RSC_CB(skb)->dma,
 +                                               rx_ring->rx_buf_len,
 +                                               DMA_FROM_DEVICE);
 +                              IXGBE_RSC_CB(skb)->dma = 0;
 +                              IXGBE_RSC_CB(skb)->delay_unmap = false;
 +                      }
 +              }
 +              if (pkt_is_rsc) {
 +                      if (ring_is_ps_enabled(rx_ring))
 +                              rx_ring->rx_stats.rsc_count +=
 +                                      skb_shinfo(skb)->nr_frags;
 +                      else
 +                              rx_ring->rx_stats.rsc_count +=
 +                                      IXGBE_RSC_CB(skb)->skb_cnt;
 +                      rx_ring->rx_stats.rsc_flush++;
 +              }
 +
 +              /* ERR_MASK will only have valid bits if EOP set */
 +              if (unlikely(staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK)) {
 +                      dev_kfree_skb_any(skb);
 +                      goto next_desc;
 +              }
 +
 +              ixgbe_rx_checksum(adapter, rx_desc, skb, staterr);
 +              if (adapter->netdev->features & NETIF_F_RXHASH)
 +                      ixgbe_rx_hash(rx_desc, skb);
 +
 +              /* probably a little skewed due to removing CRC */
 +              total_rx_bytes += skb->len;
 +              total_rx_packets++;
 +
 +              skb->protocol = eth_type_trans(skb, rx_ring->netdev);
 +#ifdef IXGBE_FCOE
 +              /* if ddp, not passing to ULD unless for FCP_RSP or error */
 +              if (ixgbe_rx_is_fcoe(adapter, rx_desc)) {
 +                      ddp_bytes = ixgbe_fcoe_ddp(adapter, rx_desc, skb,
 +                                                 staterr);
 +                      if (!ddp_bytes) {
 +                              dev_kfree_skb_any(skb);
 +                              goto next_desc;
 +                      }
 +              }
 +#endif /* IXGBE_FCOE */
 +              ixgbe_receive_skb(q_vector, skb, staterr, rx_ring, rx_desc);
 +
 +              budget--;
 +next_desc:
 +              rx_desc->wb.upper.status_error = 0;
 +
 +              if (!budget)
 +                      break;
 +
 +              /* return some buffers to hardware, one at a time is too slow */
 +              if (cleaned_count >= IXGBE_RX_BUFFER_WRITE) {
 +                      ixgbe_alloc_rx_buffers(rx_ring, cleaned_count);
 +                      cleaned_count = 0;
 +              }
 +
 +              /* use prefetched values */
 +              rx_desc = next_rxd;
 +              staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
 +      }
 +
 +      rx_ring->next_to_clean = i;
 +      cleaned_count = ixgbe_desc_unused(rx_ring);
 +
 +      if (cleaned_count)
 +              ixgbe_alloc_rx_buffers(rx_ring, cleaned_count);
 +
 +#ifdef IXGBE_FCOE
 +      /* include DDPed FCoE data */
 +      if (ddp_bytes > 0) {
 +              unsigned int mss;
 +
 +              mss = rx_ring->netdev->mtu - sizeof(struct fcoe_hdr) -
 +                      sizeof(struct fc_frame_header) -
 +                      sizeof(struct fcoe_crc_eof);
 +              if (mss > 512)
 +                      mss &= ~511;
 +              total_rx_bytes += ddp_bytes;
 +              total_rx_packets += DIV_ROUND_UP(ddp_bytes, mss);
 +      }
 +#endif /* IXGBE_FCOE */
 +
 +      u64_stats_update_begin(&rx_ring->syncp);
 +      rx_ring->stats.packets += total_rx_packets;
 +      rx_ring->stats.bytes += total_rx_bytes;
 +      u64_stats_update_end(&rx_ring->syncp);
 +      q_vector->rx.total_packets += total_rx_packets;
 +      q_vector->rx.total_bytes += total_rx_bytes;
 +
 +      return !!budget;
 +}
 +
 +/**
 + * ixgbe_configure_msix - Configure MSI-X hardware
 + * @adapter: board private structure
 + *
 + * ixgbe_configure_msix sets up the hardware to properly generate MSI-X
 + * interrupts.
 + **/
 +static void ixgbe_configure_msix(struct ixgbe_adapter *adapter)
 +{
 +      struct ixgbe_q_vector *q_vector;
 +      int q_vectors, v_idx;
 +      u32 mask;
 +
 +      q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
 +
 +      /* Populate MSIX to EITR Select */
 +      if (adapter->num_vfs > 32) {
 +              u32 eitrsel = (1 << (adapter->num_vfs - 32)) - 1;
 +              IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITRSEL, eitrsel);
 +      }
 +
 +      /*
 +       * Populate the IVAR table and set the ITR values to the
 +       * corresponding register.
 +       */
 +      for (v_idx = 0; v_idx < q_vectors; v_idx++) {
 +              struct ixgbe_ring *ring;
 +              q_vector = adapter->q_vector[v_idx];
 +
 +              for (ring = q_vector->rx.ring; ring != NULL; ring = ring->next)
 +                      ixgbe_set_ivar(adapter, 0, ring->reg_idx, v_idx);
 +
 +              for (ring = q_vector->tx.ring; ring != NULL; ring = ring->next)
 +                      ixgbe_set_ivar(adapter, 1, ring->reg_idx, v_idx);
 +
 +              if (q_vector->tx.ring && !q_vector->rx.ring)
 +                      /* tx only */
 +                      q_vector->eitr = adapter->tx_eitr_param;
 +              else if (q_vector->rx.ring)
 +                      /* rx or mixed */
 +                      q_vector->eitr = adapter->rx_eitr_param;
 +
 +              ixgbe_write_eitr(q_vector);
 +      }
 +
 +      switch (adapter->hw.mac.type) {
 +      case ixgbe_mac_82598EB:
 +              ixgbe_set_ivar(adapter, -1, IXGBE_IVAR_OTHER_CAUSES_INDEX,
 +                             v_idx);
 +              break;
 +      case ixgbe_mac_82599EB:
 +      case ixgbe_mac_X540:
 +              ixgbe_set_ivar(adapter, -1, 1, v_idx);
 +              break;
 +
 +      default:
 +              break;
 +      }
 +      IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(v_idx), 1950);
 +
 +      /* set up to autoclear timer, and the vectors */
 +      mask = IXGBE_EIMS_ENABLE_MASK;
 +      if (adapter->num_vfs)
 +              mask &= ~(IXGBE_EIMS_OTHER |
 +                        IXGBE_EIMS_MAILBOX |
 +                        IXGBE_EIMS_LSC);
 +      else
 +              mask &= ~(IXGBE_EIMS_OTHER | IXGBE_EIMS_LSC);
 +      IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, mask);
 +}
 +
 +enum latency_range {
 +      lowest_latency = 0,
 +      low_latency = 1,
 +      bulk_latency = 2,
 +      latency_invalid = 255
 +};
 +
 +/**
 + * ixgbe_update_itr - update the dynamic ITR value based on statistics
 + * @q_vector: structure containing interrupt and ring information
 + * @ring_container: structure containing ring performance data
 + *
 + *      Stores a new ITR value based on packets and byte
 + *      counts during the last interrupt.  The advantage of per interrupt
 + *      computation is faster updates and more accurate ITR for the current
 + *      traffic pattern.  Constants in this function were computed
 + *      based on theoretical maximum wire speed and thresholds were set based
 + *      on testing data as well as attempting to minimize response time
 + *      while increasing bulk throughput.
 + *      this functionality is controlled by the InterruptThrottleRate module
 + *      parameter (see ixgbe_param.c)
 + **/
 +static void ixgbe_update_itr(struct ixgbe_q_vector *q_vector,
 +                           struct ixgbe_ring_container *ring_container)
 +{
 +      u64 bytes_perint;
 +      struct ixgbe_adapter *adapter = q_vector->adapter;
 +      int bytes = ring_container->total_bytes;
 +      int packets = ring_container->total_packets;
 +      u32 timepassed_us;
 +      u8 itr_setting = ring_container->itr;
 +
 +      if (packets == 0)
 +              return;
 +
 +      /* simple throttlerate management
 +       *    0-20MB/s lowest (100000 ints/s)
 +       *   20-100MB/s low   (20000 ints/s)
 +       *  100-1249MB/s bulk (8000 ints/s)
 +       */
 +      /* what was last interrupt timeslice? */
 +      timepassed_us = 1000000/q_vector->eitr;
 +      bytes_perint = bytes / timepassed_us; /* bytes/usec */
 +
 +      switch (itr_setting) {
 +      case lowest_latency:
 +              if (bytes_perint > adapter->eitr_low)
 +                      itr_setting = low_latency;
 +              break;
 +      case low_latency:
 +              if (bytes_perint > adapter->eitr_high)
 +                      itr_setting = bulk_latency;
 +              else if (bytes_perint <= adapter->eitr_low)
 +                      itr_setting = lowest_latency;
 +              break;
 +      case bulk_latency:
 +              if (bytes_perint <= adapter->eitr_high)
 +                      itr_setting = low_latency;
 +              break;
 +      }
 +
 +      /* clear work counters since we have the values we need */
 +      ring_container->total_bytes = 0;
 +      ring_container->total_packets = 0;
 +
 +      /* write updated itr to ring container */
 +      ring_container->itr = itr_setting;
 +}
 +
 +/**
 + * ixgbe_write_eitr - write EITR register in hardware specific way
 + * @q_vector: structure containing interrupt and ring information
 + *
 + * This function is made to be called by ethtool and by the driver
 + * when it needs to update EITR registers at runtime.  Hardware
 + * specific quirks/differences are taken care of here.
 + */
 +void ixgbe_write_eitr(struct ixgbe_q_vector *q_vector)
 +{
 +      struct ixgbe_adapter *adapter = q_vector->adapter;
 +      struct ixgbe_hw *hw = &adapter->hw;
 +      int v_idx = q_vector->v_idx;
 +      u32 itr_reg = EITR_INTS_PER_SEC_TO_REG(q_vector->eitr);
 +
 +      switch (adapter->hw.mac.type) {
 +      case ixgbe_mac_82598EB:
 +              /* must write high and low 16 bits to reset counter */
 +              itr_reg |= (itr_reg << 16);
 +              break;
 +      case ixgbe_mac_82599EB:
 +      case ixgbe_mac_X540:
 +              /*
 +               * 82599 and X540 can support a value of zero, so allow it for
 +               * max interrupt rate, but there is an errata where it can
 +               * not be zero with RSC
 +               */
 +              if (itr_reg == 8 &&
 +                  !(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED))
 +                      itr_reg = 0;
 +
 +              /*
 +               * set the WDIS bit to not clear the timer bits and cause an
 +               * immediate assertion of the interrupt
 +               */
 +              itr_reg |= IXGBE_EITR_CNT_WDIS;
 +              break;
 +      default:
 +              break;
 +      }
 +      IXGBE_WRITE_REG(hw, IXGBE_EITR(v_idx), itr_reg);
 +}
 +
 +static void ixgbe_set_itr(struct ixgbe_q_vector *q_vector)
 +{
 +      u32 new_itr = q_vector->eitr;
 +      u8 current_itr;
 +
 +      ixgbe_update_itr(q_vector, &q_vector->tx);
 +      ixgbe_update_itr(q_vector, &q_vector->rx);
 +
 +      current_itr = max(q_vector->rx.itr, q_vector->tx.itr);
 +
 +      switch (current_itr) {
 +      /* counts and packets in update_itr are dependent on these numbers */
 +      case lowest_latency:
 +              new_itr = 100000;
 +              break;
 +      case low_latency:
 +              new_itr = 20000; /* aka hwitr = ~200 */
 +              break;
 +      case bulk_latency:
 +              new_itr = 8000;
 +              break;
 +      default:
 +              break;
 +      }
 +
 +      if (new_itr != q_vector->eitr) {
 +              /* do an exponential smoothing */
 +              new_itr = ((q_vector->eitr * 9) + new_itr)/10;
 +
 +              /* save the algorithm value here */
 +              q_vector->eitr = new_itr;
 +
 +              ixgbe_write_eitr(q_vector);
 +      }
 +}
 +
 +/**
 + * ixgbe_check_overtemp_subtask - check for over tempurature
 + * @adapter: pointer to adapter
 + **/
 +static void ixgbe_check_overtemp_subtask(struct ixgbe_adapter *adapter)
 +{
 +      struct ixgbe_hw *hw = &adapter->hw;
 +      u32 eicr = adapter->interrupt_event;
 +
 +      if (test_bit(__IXGBE_DOWN, &adapter->state))
 +              return;
 +
 +      if (!(adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) &&
 +          !(adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_EVENT))
 +              return;
 +
 +      adapter->flags2 &= ~IXGBE_FLAG2_TEMP_SENSOR_EVENT;
 +
 +      switch (hw->device_id) {
 +      case IXGBE_DEV_ID_82599_T3_LOM:
 +              /*
 +               * Since the warning interrupt is for both ports
 +               * we don't have to check if:
 +               *  - This interrupt wasn't for our port.
 +               *  - We may have missed the interrupt so always have to
 +               *    check if we  got a LSC
 +               */
 +              if (!(eicr & IXGBE_EICR_GPI_SDP0) &&
 +                  !(eicr & IXGBE_EICR_LSC))
 +                      return;
 +
 +              if (!(eicr & IXGBE_EICR_LSC) && hw->mac.ops.check_link) {
 +                      u32 autoneg;
 +                      bool link_up = false;
 +
 +                      hw->mac.ops.check_link(hw, &autoneg, &link_up, false);
 +
 +                      if (link_up)
 +                              return;
 +              }
 +
 +              /* Check if this is not due to overtemp */
 +              if (hw->phy.ops.check_overtemp(hw) != IXGBE_ERR_OVERTEMP)
 +                      return;
 +
 +              break;
 +      default:
 +              if (!(eicr & IXGBE_EICR_GPI_SDP0))
 +                      return;
 +              break;
 +      }
 +      e_crit(drv,
 +             "Network adapter has been stopped because it has over heated. "
 +             "Restart the computer. If the problem persists, "
 +             "power off the system and replace the adapter\n");
 +
 +      adapter->interrupt_event = 0;
 +}
 +
 +static void ixgbe_check_fan_failure(struct ixgbe_adapter *adapter, u32 eicr)
 +{
 +      struct ixgbe_hw *hw = &adapter->hw;
 +
 +      if ((adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) &&
 +          (eicr & IXGBE_EICR_GPI_SDP1)) {
 +              e_crit(probe, "Fan has stopped, replace the adapter\n");
 +              /* write to clear the interrupt */
 +              IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1);
 +      }
 +}
 +
 +static void ixgbe_check_sfp_event(struct ixgbe_adapter *adapter, u32 eicr)
 +{
 +      struct ixgbe_hw *hw = &adapter->hw;
 +
 +      if (eicr & IXGBE_EICR_GPI_SDP2) {
 +              /* Clear the interrupt */
 +              IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP2);
 +              if (!test_bit(__IXGBE_DOWN, &adapter->state)) {
 +                      adapter->flags2 |= IXGBE_FLAG2_SFP_NEEDS_RESET;
 +                      ixgbe_service_event_schedule(adapter);
 +              }
 +      }
 +
 +      if (eicr & IXGBE_EICR_GPI_SDP1) {
 +              /* Clear the interrupt */
 +              IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1);
 +              if (!test_bit(__IXGBE_DOWN, &adapter->state)) {
 +                      adapter->flags |= IXGBE_FLAG_NEED_LINK_CONFIG;
 +                      ixgbe_service_event_schedule(adapter);
 +              }
 +      }
 +}
 +
 +static void ixgbe_check_lsc(struct ixgbe_adapter *adapter)
 +{
 +      struct ixgbe_hw *hw = &adapter->hw;
 +
 +      adapter->lsc_int++;
 +      adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
 +      adapter->link_check_timeout = jiffies;
 +      if (!test_bit(__IXGBE_DOWN, &adapter->state)) {
 +              IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_LSC);
 +              IXGBE_WRITE_FLUSH(hw);
 +              ixgbe_service_event_schedule(adapter);
 +      }
 +}
 +
 +static inline void ixgbe_irq_enable_queues(struct ixgbe_adapter *adapter,
 +                                         u64 qmask)
 +{
 +      u32 mask;
 +      struct ixgbe_hw *hw = &adapter->hw;
 +
 +      switch (hw->mac.type) {
 +      case ixgbe_mac_82598EB:
 +              mask = (IXGBE_EIMS_RTX_QUEUE & qmask);
 +              IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
 +              break;
 +      case ixgbe_mac_82599EB:
 +      case ixgbe_mac_X540:
 +              mask = (qmask & 0xFFFFFFFF);
 +              if (mask)
 +                      IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask);
 +              mask = (qmask >> 32);
 +              if (mask)
 +                      IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask);
 +              break;
 +      default:
 +              break;
 +      }
 +      /* skip the flush */
 +}
 +
 +static inline void ixgbe_irq_disable_queues(struct ixgbe_adapter *adapter,
 +                                          u64 qmask)
 +{
 +      u32 mask;
 +      struct ixgbe_hw *hw = &adapter->hw;
 +
 +      switch (hw->mac.type) {
 +      case ixgbe_mac_82598EB:
 +              mask = (IXGBE_EIMS_RTX_QUEUE & qmask);
 +              IXGBE_WRITE_REG(hw, IXGBE_EIMC, mask);
 +              break;
 +      case ixgbe_mac_82599EB:
 +      case ixgbe_mac_X540:
 +              mask = (qmask & 0xFFFFFFFF);
 +              if (mask)
 +                      IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), mask);
 +              mask = (qmask >> 32);
 +              if (mask)
 +                      IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), mask);
 +              break;
 +      default:
 +              break;
 +      }
 +      /* skip the flush */
 +}
 +
 +/**
 + * ixgbe_irq_enable - Enable default interrupt generation settings
 + * @adapter: board private structure
 + **/
 +static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter, bool queues,
 +                                  bool flush)
 +{
 +      u32 mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
 +
 +      /* don't reenable LSC while waiting for link */
 +      if (adapter->flags & IXGBE_FLAG_NEED_LINK_UPDATE)
 +              mask &= ~IXGBE_EIMS_LSC;
 +
 +      if (adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE)
 +              mask |= IXGBE_EIMS_GPI_SDP0;
 +      if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE)
 +              mask |= IXGBE_EIMS_GPI_SDP1;
 +      switch (adapter->hw.mac.type) {
 +      case ixgbe_mac_82599EB:
 +      case ixgbe_mac_X540:
 +              mask |= IXGBE_EIMS_ECC;
 +              mask |= IXGBE_EIMS_GPI_SDP1;
 +              mask |= IXGBE_EIMS_GPI_SDP2;
 +              mask |= IXGBE_EIMS_MAILBOX;
 +              break;
 +      default:
 +              break;
 +      }
 +      if ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) &&
 +          !(adapter->flags2 & IXGBE_FLAG2_FDIR_REQUIRES_REINIT))
 +              mask |= IXGBE_EIMS_FLOW_DIR;
 +
 +      IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask);
 +      if (queues)
 +              ixgbe_irq_enable_queues(adapter, ~0);
 +      if (flush)
 +              IXGBE_WRITE_FLUSH(&adapter->hw);
 +}
 +
 +static irqreturn_t ixgbe_msix_other(int irq, void *data)
 +{
 +      struct ixgbe_adapter *adapter = data;
 +      struct ixgbe_hw *hw = &adapter->hw;
 +      u32 eicr;
 +
 +      /*
 +       * Workaround for Silicon errata.  Use clear-by-write instead
 +       * of clear-by-read.  Reading with EICS will return the
 +       * interrupt causes without clearing, which later be done
 +       * with the write to EICR.
 +       */
 +      eicr = IXGBE_READ_REG(hw, IXGBE_EICS);
 +      IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr);
 +
 +      if (eicr & IXGBE_EICR_LSC)
 +              ixgbe_check_lsc(adapter);
 +
 +      if (eicr & IXGBE_EICR_MAILBOX)
 +              ixgbe_msg_task(adapter);
 +
 +      switch (hw->mac.type) {
 +      case ixgbe_mac_82599EB:
 +      case ixgbe_mac_X540:
 +              if (eicr & IXGBE_EICR_ECC)
 +                      e_info(link, "Received unrecoverable ECC Err, please "
 +                             "reboot\n");
 +              /* Handle Flow Director Full threshold interrupt */
 +              if (eicr & IXGBE_EICR_FLOW_DIR) {
 +                      int reinit_count = 0;
 +                      int i;
 +                      for (i = 0; i < adapter->num_tx_queues; i++) {
 +                              struct ixgbe_ring *ring = adapter->tx_ring[i];
 +                              if (test_and_clear_bit(__IXGBE_TX_FDIR_INIT_DONE,
 +                                                     &ring->state))
 +                                      reinit_count++;
 +                      }
 +                      if (reinit_count) {
 +                              /* no more flow director interrupts until after init */
 +                              IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_FLOW_DIR);
 +                              adapter->flags2 |= IXGBE_FLAG2_FDIR_REQUIRES_REINIT;
 +                              ixgbe_service_event_schedule(adapter);
 +                      }
 +              }
 +              ixgbe_check_sfp_event(adapter, eicr);
 +              if ((adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) &&
 +                  ((eicr & IXGBE_EICR_GPI_SDP0) || (eicr & IXGBE_EICR_LSC))) {
 +                      if (!test_bit(__IXGBE_DOWN, &adapter->state)) {
 +                              adapter->interrupt_event = eicr;
 +                              adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_EVENT;
 +                              ixgbe_service_event_schedule(adapter);
 +                      }
 +              }
 +              break;
 +      default:
 +              break;
 +      }
 +
 +      ixgbe_check_fan_failure(adapter, eicr);
 +
 +      /* re-enable the original interrupt state, no lsc, no queues */
 +      if (!test_bit(__IXGBE_DOWN, &adapter->state))
 +              ixgbe_irq_enable(adapter, false, false);
 +
 +      return IRQ_HANDLED;
 +}
 +
 +static irqreturn_t ixgbe_msix_clean_rings(int irq, void *data)
 +{
 +      struct ixgbe_q_vector *q_vector = data;
 +
 +      /* EIAM disabled interrupts (on this vector) for us */
 +
 +      if (q_vector->rx.ring || q_vector->tx.ring)
 +              napi_schedule(&q_vector->napi);
 +
 +      return IRQ_HANDLED;
 +}
 +
 +static inline void map_vector_to_rxq(struct ixgbe_adapter *a, int v_idx,
 +                                   int r_idx)
 +{
 +      struct ixgbe_q_vector *q_vector = a->q_vector[v_idx];
 +      struct ixgbe_ring *rx_ring = a->rx_ring[r_idx];
 +
 +      rx_ring->q_vector = q_vector;
 +      rx_ring->next = q_vector->rx.ring;
 +      q_vector->rx.ring = rx_ring;
 +      q_vector->rx.count++;
 +}
 +
 +static inline void map_vector_to_txq(struct ixgbe_adapter *a, int v_idx,
 +                                   int t_idx)
 +{
 +      struct ixgbe_q_vector *q_vector = a->q_vector[v_idx];
 +      struct ixgbe_ring *tx_ring = a->tx_ring[t_idx];
 +
 +      tx_ring->q_vector = q_vector;
 +      tx_ring->next = q_vector->tx.ring;
 +      q_vector->tx.ring = tx_ring;
 +      q_vector->tx.count++;
 +      q_vector->tx.work_limit = a->tx_work_limit;
 +}
 +
 +/**
 + * ixgbe_map_rings_to_vectors - Maps descriptor rings to vectors
 + * @adapter: board private structure to initialize
 + *
 + * This function maps descriptor rings to the queue-specific vectors
 + * we were allotted through the MSI-X enabling code.  Ideally, we'd have
 + * one vector per ring/queue, but on a constrained vector budget, we
 + * group the rings as "efficiently" as possible.  You would add new
 + * mapping configurations in here.
 + **/
 +static void ixgbe_map_rings_to_vectors(struct ixgbe_adapter *adapter)
 +{
 +      int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
 +      int rxr_remaining = adapter->num_rx_queues, rxr_idx = 0;
 +      int txr_remaining = adapter->num_tx_queues, txr_idx = 0;
 +      int v_start = 0;
 +
 +      /* only one q_vector if MSI-X is disabled. */
 +      if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED))
 +              q_vectors = 1;
 +
 +      /*
 +       * If we don't have enough vectors for a 1-to-1 mapping, we'll have to
 +       * group them so there are multiple queues per vector.
 +       *
 +       * Re-adjusting *qpv takes care of the remainder.
 +       */
 +      for (; v_start < q_vectors && rxr_remaining; v_start++) {
 +              int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - v_start);
 +              for (; rqpv; rqpv--, rxr_idx++, rxr_remaining--)
 +                      map_vector_to_rxq(adapter, v_start, rxr_idx);
 +      }
 +
 +      /*
 +       * If there are not enough q_vectors for each ring to have it's own
 +       * vector then we must pair up Rx/Tx on a each vector
 +       */
 +      if ((v_start + txr_remaining) > q_vectors)
 +              v_start = 0;
 +
 +      for (; v_start < q_vectors && txr_remaining; v_start++) {
 +              int tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - v_start);
 +              for (; tqpv; tqpv--, txr_idx++, txr_remaining--)
 +                      map_vector_to_txq(adapter, v_start, txr_idx);
 +      }
 +}
 +
 +/**
 + * ixgbe_request_msix_irqs - Initialize MSI-X interrupts
 + * @adapter: board private structure
 + *
 + * ixgbe_request_msix_irqs allocates MSI-X vectors and requests
 + * interrupts from the kernel.
 + **/
 +static int ixgbe_request_msix_irqs(struct ixgbe_adapter *adapter)
 +{
 +      struct net_device *netdev = adapter->netdev;
 +      int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
 +      int vector, err;
 +      int ri = 0, ti = 0;
 +
 +      for (vector = 0; vector < q_vectors; vector++) {
 +              struct ixgbe_q_vector *q_vector = adapter->q_vector[vector];
 +              struct msix_entry *entry = &adapter->msix_entries[vector];
 +
 +              if (q_vector->tx.ring && q_vector->rx.ring) {
 +                      snprintf(q_vector->name, sizeof(q_vector->name) - 1,
 +                               "%s-%s-%d", netdev->name, "TxRx", ri++);
 +                      ti++;
 +              } else if (q_vector->rx.ring) {
 +                      snprintf(q_vector->name, sizeof(q_vector->name) - 1,
 +                               "%s-%s-%d", netdev->name, "rx", ri++);
 +              } else if (q_vector->tx.ring) {
 +                      snprintf(q_vector->name, sizeof(q_vector->name) - 1,
 +                               "%s-%s-%d", netdev->name, "tx", ti++);
 +              } else {
 +                      /* skip this unused q_vector */
 +                      continue;
 +              }
 +              err = request_irq(entry->vector, &ixgbe_msix_clean_rings, 0,
 +                                q_vector->name, q_vector);
 +              if (err) {
 +                      e_err(probe, "request_irq failed for MSIX interrupt "
 +                            "Error: %d\n", err);
 +                      goto free_queue_irqs;
 +              }
 +              /* If Flow Director is enabled, set interrupt affinity */
 +              if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) {
 +                      /* assign the mask for this irq */
 +                      irq_set_affinity_hint(entry->vector,
 +                                            q_vector->affinity_mask);
 +              }
 +      }
 +
 +      err = request_irq(adapter->msix_entries[vector].vector,
 +                        ixgbe_msix_other, 0, netdev->name, adapter);
 +      if (err) {
 +              e_err(probe, "request_irq for msix_lsc failed: %d\n", err);
 +              goto free_queue_irqs;
 +      }
 +
 +      return 0;
 +
 +free_queue_irqs:
 +      while (vector) {
 +              vector--;
 +              irq_set_affinity_hint(adapter->msix_entries[vector].vector,
 +                                    NULL);
 +              free_irq(adapter->msix_entries[vector].vector,
 +                       adapter->q_vector[vector]);
 +      }
 +      adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED;
 +      pci_disable_msix(adapter->pdev);
 +      kfree(adapter->msix_entries);
 +      adapter->msix_entries = NULL;
 +      return err;
 +}
 +
 +/**
 + * ixgbe_intr - legacy mode Interrupt Handler
 + * @irq: interrupt number
 + * @data: pointer to a network interface device structure
 + **/
 +static irqreturn_t ixgbe_intr(int irq, void *data)
 +{
 +      struct ixgbe_adapter *adapter = data;
 +      struct ixgbe_hw *hw = &adapter->hw;
 +      struct ixgbe_q_vector *q_vector = adapter->q_vector[0];
 +      u32 eicr;
 +
 +      /*
 +       * Workaround for silicon errata on 82598.  Mask the interrupts
 +       * before the read of EICR.
 +       */
 +      IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_IRQ_CLEAR_MASK);
 +
 +      /* for NAPI, using EIAM to auto-mask tx/rx interrupt bits on read
 +       * therefore no explict interrupt disable is necessary */
 +      eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
 +      if (!eicr) {
 +              /*
 +               * shared interrupt alert!
 +               * make sure interrupts are enabled because the read will
 +               * have disabled interrupts due to EIAM
 +               * finish the workaround of silicon errata on 82598.  Unmask
 +               * the interrupt that we masked before the EICR read.
 +               */
 +              if (!test_bit(__IXGBE_DOWN, &adapter->state))
 +                      ixgbe_irq_enable(adapter, true, true);
 +              return IRQ_NONE;        /* Not our interrupt */
 +      }
 +
 +      if (eicr & IXGBE_EICR_LSC)
 +              ixgbe_check_lsc(adapter);
 +
 +      switch (hw->mac.type) {
 +      case ixgbe_mac_82599EB:
 +              ixgbe_check_sfp_event(adapter, eicr);
 +              if ((adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) &&
 +                  ((eicr & IXGBE_EICR_GPI_SDP0) || (eicr & IXGBE_EICR_LSC))) {
 +                      if (!test_bit(__IXGBE_DOWN, &adapter->state)) {
 +                              adapter->interrupt_event = eicr;
 +                              adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_EVENT;
 +                              ixgbe_service_event_schedule(adapter);
 +                      }
 +              }
 +              break;
 +      default:
 +              break;
 +      }
 +
 +      ixgbe_check_fan_failure(adapter, eicr);
 +
 +      if (napi_schedule_prep(&(q_vector->napi))) {
 +              /* would disable interrupts here but EIAM disabled it */
 +              __napi_schedule(&(q_vector->napi));
 +      }
 +
 +      /*
 +       * re-enable link(maybe) and non-queue interrupts, no flush.
 +       * ixgbe_poll will re-enable the queue interrupts
 +       */
 +
 +      if (!test_bit(__IXGBE_DOWN, &adapter->state))
 +              ixgbe_irq_enable(adapter, false, false);
 +
 +      return IRQ_HANDLED;
 +}
 +
 +static inline void ixgbe_reset_q_vectors(struct ixgbe_adapter *adapter)
 +{
 +      int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
 +      int i;
 +
 +      /* legacy and MSI only use one vector */
 +      if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED))
 +              q_vectors = 1;
 +
 +      for (i = 0; i < adapter->num_rx_queues; i++) {
 +              adapter->rx_ring[i]->q_vector = NULL;
 +              adapter->rx_ring[i]->next = NULL;
 +      }
 +      for (i = 0; i < adapter->num_tx_queues; i++) {
 +              adapter->tx_ring[i]->q_vector = NULL;
 +              adapter->tx_ring[i]->next = NULL;
 +      }
 +
 +      for (i = 0; i < q_vectors; i++) {
 +              struct ixgbe_q_vector *q_vector = adapter->q_vector[i];
 +              memset(&q_vector->rx, 0, sizeof(struct ixgbe_ring_container));
 +              memset(&q_vector->tx, 0, sizeof(struct ixgbe_ring_container));
 +      }
 +}
 +
 +/**
 + * ixgbe_request_irq - initialize interrupts
 + * @adapter: board private structure
 + *
 + * Attempts to configure interrupts using the best available
 + * capabilities of the hardware and kernel.
 + **/
 +static int ixgbe_request_irq(struct ixgbe_adapter *adapter)
 +{
 +      struct net_device *netdev = adapter->netdev;
 +      int err;
 +
 +      /* map all of the rings to the q_vectors */
 +      ixgbe_map_rings_to_vectors(adapter);
 +
 +      if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
 +              err = ixgbe_request_msix_irqs(adapter);
 +      else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED)
 +              err = request_irq(adapter->pdev->irq, ixgbe_intr, 0,
 +                                netdev->name, adapter);
 +      else
 +              err = request_irq(adapter->pdev->irq, ixgbe_intr, IRQF_SHARED,
 +                                netdev->name, adapter);
 +
 +      if (err) {
 +              e_err(probe, "request_irq failed, Error %d\n", err);
 +
 +              /* place q_vectors and rings back into a known good state */
 +              ixgbe_reset_q_vectors(adapter);
 +      }
 +
 +      return err;
 +}
 +
 +static void ixgbe_free_irq(struct ixgbe_adapter *adapter)
 +{
 +      if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
 +              int i, q_vectors;
 +
 +              q_vectors = adapter->num_msix_vectors;
 +              i = q_vectors - 1;
 +              free_irq(adapter->msix_entries[i].vector, adapter);
 +              i--;
 +
 +              for (; i >= 0; i--) {
 +                      /* free only the irqs that were actually requested */
 +                      if (!adapter->q_vector[i]->rx.ring &&
 +                          !adapter->q_vector[i]->tx.ring)
 +                              continue;
 +
 +                      /* clear the affinity_mask in the IRQ descriptor */
 +                      irq_set_affinity_hint(adapter->msix_entries[i].vector,
 +                                            NULL);
 +
 +                      free_irq(adapter->msix_entries[i].vector,
 +                               adapter->q_vector[i]);
 +              }
 +      } else {
 +              free_irq(adapter->pdev->irq, adapter);
 +      }
 +
 +      /* clear q_vector state information */
 +      ixgbe_reset_q_vectors(adapter);
 +}
 +
 +/**
 + * ixgbe_irq_disable - Mask off interrupt generation on the NIC
 + * @adapter: board private structure
 + **/
 +static inline void ixgbe_irq_disable(struct ixgbe_adapter *adapter)
 +{
 +      switch (adapter->hw.mac.type) {
 +      case ixgbe_mac_82598EB:
 +              IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~0);
 +              break;
 +      case ixgbe_mac_82599EB:
 +      case ixgbe_mac_X540:
 +              IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFF0000);
 +              IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), ~0);
 +              IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), ~0);
 +              break;
 +      default:
 +              break;
 +      }
 +      IXGBE_WRITE_FLUSH(&adapter->hw);
 +      if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
 +              int i;
 +              for (i = 0; i < adapter->num_msix_vectors; i++)
 +                      synchronize_irq(adapter->msix_entries[i].vector);
 +      } else {
 +              synchronize_irq(adapter->pdev->irq);
 +      }
 +}
 +
 +/**
 + * ixgbe_configure_msi_and_legacy - Initialize PIN (INTA...) and MSI interrupts
 + *
 + **/
 +static void ixgbe_configure_msi_and_legacy(struct ixgbe_adapter *adapter)
 +{
 +      struct ixgbe_hw *hw = &adapter->hw;
 +
 +      IXGBE_WRITE_REG(hw, IXGBE_EITR(0),
 +                      EITR_INTS_PER_SEC_TO_REG(adapter->rx_eitr_param));
 +
 +      ixgbe_set_ivar(adapter, 0, 0, 0);
 +      ixgbe_set_ivar(adapter, 1, 0, 0);
 +
 +      e_info(hw, "Legacy interrupt IVAR setup done\n");
 +}
 +
 +/**
 + * ixgbe_configure_tx_ring - Configure 8259x Tx ring after Reset
 + * @adapter: board private structure
 + * @ring: structure containing ring specific data
 + *
 + * Configure the Tx descriptor ring after a reset.
 + **/
 +void ixgbe_configure_tx_ring(struct ixgbe_adapter *adapter,
 +                           struct ixgbe_ring *ring)
 +{
 +      struct ixgbe_hw *hw = &adapter->hw;
 +      u64 tdba = ring->dma;
 +      int wait_loop = 10;
 +      u32 txdctl = IXGBE_TXDCTL_ENABLE;
 +      u8 reg_idx = ring->reg_idx;
 +
 +      /* disable queue to avoid issues while updating state */
 +      IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), 0);
 +      IXGBE_WRITE_FLUSH(hw);
 +
 +      IXGBE_WRITE_REG(hw, IXGBE_TDBAL(reg_idx),
 +                      (tdba & DMA_BIT_MASK(32)));
 +      IXGBE_WRITE_REG(hw, IXGBE_TDBAH(reg_idx), (tdba >> 32));
 +      IXGBE_WRITE_REG(hw, IXGBE_TDLEN(reg_idx),
 +                      ring->count * sizeof(union ixgbe_adv_tx_desc));
 +      IXGBE_WRITE_REG(hw, IXGBE_TDH(reg_idx), 0);
 +      IXGBE_WRITE_REG(hw, IXGBE_TDT(reg_idx), 0);
 +      ring->tail = hw->hw_addr + IXGBE_TDT(reg_idx);
 +
 +      /*
 +       * set WTHRESH to encourage burst writeback, it should not be set
 +       * higher than 1 when ITR is 0 as it could cause false TX hangs
 +       *
 +       * In order to avoid issues WTHRESH + PTHRESH should always be equal
 +       * to or less than the number of on chip descriptors, which is
 +       * currently 40.
 +       */
 +      if (!adapter->tx_itr_setting || !adapter->rx_itr_setting)
 +              txdctl |= (1 << 16);    /* WTHRESH = 1 */
 +      else
 +              txdctl |= (8 << 16);    /* WTHRESH = 8 */
 +
 +      /* PTHRESH=32 is needed to avoid a Tx hang with DFP enabled. */
 +      txdctl |= (1 << 8) |    /* HTHRESH = 1 */
 +                 32;          /* PTHRESH = 32 */
 +
 +      /* reinitialize flowdirector state */
 +      if ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) &&
 +          adapter->atr_sample_rate) {
 +              ring->atr_sample_rate = adapter->atr_sample_rate;
 +              ring->atr_count = 0;
 +              set_bit(__IXGBE_TX_FDIR_INIT_DONE, &ring->state);
 +      } else {
 +              ring->atr_sample_rate = 0;
 +      }
 +
 +      clear_bit(__IXGBE_HANG_CHECK_ARMED, &ring->state);
 +
 +      /* enable queue */
 +      IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), txdctl);
 +
 +      /* TXDCTL.EN will return 0 on 82598 if link is down, so skip it */
 +      if (hw->mac.type == ixgbe_mac_82598EB &&
 +          !(IXGBE_READ_REG(hw, IXGBE_LINKS) & IXGBE_LINKS_UP))
 +              return;
 +
 +      /* poll to verify queue is enabled */
 +      do {
 +              usleep_range(1000, 2000);
 +              txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(reg_idx));
 +      } while (--wait_loop && !(txdctl & IXGBE_TXDCTL_ENABLE));
 +      if (!wait_loop)
 +              e_err(drv, "Could not enable Tx Queue %d\n", reg_idx);
 +}
 +
 +static void ixgbe_setup_mtqc(struct ixgbe_adapter *adapter)
 +{
 +      struct ixgbe_hw *hw = &adapter->hw;
 +      u32 rttdcs;
 +      u32 reg;
 +      u8 tcs = netdev_get_num_tc(adapter->netdev);
 +
 +      if (hw->mac.type == ixgbe_mac_82598EB)
 +              return;
 +
 +      /* disable the arbiter while setting MTQC */
 +      rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
 +      rttdcs |= IXGBE_RTTDCS_ARBDIS;
 +      IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
 +
 +      /* set transmit pool layout */
 +      switch (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
 +      case (IXGBE_FLAG_SRIOV_ENABLED):
 +              IXGBE_WRITE_REG(hw, IXGBE_MTQC,
 +                              (IXGBE_MTQC_VT_ENA | IXGBE_MTQC_64VF));
 +              break;
 +      default:
 +              if (!tcs)
 +                      reg = IXGBE_MTQC_64Q_1PB;
 +              else if (tcs <= 4)
 +                      reg = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_4TC_4TQ;
 +              else
 +                      reg = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_8TC_8TQ;
 +
 +              IXGBE_WRITE_REG(hw, IXGBE_MTQC, reg);
 +
 +              /* Enable Security TX Buffer IFG for multiple pb */
 +              if (tcs) {
 +                      reg = IXGBE_READ_REG(hw, IXGBE_SECTXMINIFG);
 +                      reg |= IXGBE_SECTX_DCB;
 +                      IXGBE_WRITE_REG(hw, IXGBE_SECTXMINIFG, reg);
 +              }
 +              break;
 +      }
 +
 +      /* re-enable the arbiter */
 +      rttdcs &= ~IXGBE_RTTDCS_ARBDIS;
 +      IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
 +}
 +
 +/**
 + * ixgbe_configure_tx - Configure 8259x Transmit Unit after Reset
 + * @adapter: board private structure
 + *
 + * Configure the Tx unit of the MAC after a reset.
 + **/
 +static void ixgbe_configure_tx(struct ixgbe_adapter *adapter)
 +{
 +      struct ixgbe_hw *hw = &adapter->hw;
 +      u32 dmatxctl;
 +      u32 i;
 +
 +      ixgbe_setup_mtqc(adapter);
 +
 +      if (hw->mac.type != ixgbe_mac_82598EB) {
 +              /* DMATXCTL.EN must be before Tx queues are enabled */
 +              dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
 +              dmatxctl |= IXGBE_DMATXCTL_TE;
 +              IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl);
 +      }
 +
 +      /* Setup the HW Tx Head and Tail descriptor pointers */
 +      for (i = 0; i < adapter->num_tx_queues; i++)
 +              ixgbe_configure_tx_ring(adapter, adapter->tx_ring[i]);
 +}
 +
 +#define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
 +
 +static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter,
 +                                 struct ixgbe_ring *rx_ring)
 +{
 +      u32 srrctl;
 +      u8 reg_idx = rx_ring->reg_idx;
 +
 +      switch (adapter->hw.mac.type) {
 +      case ixgbe_mac_82598EB: {
 +              struct ixgbe_ring_feature *feature = adapter->ring_feature;
 +              const int mask = feature[RING_F_RSS].mask;
 +              reg_idx = reg_idx & mask;
 +      }
 +              break;
 +      case ixgbe_mac_82599EB:
 +      case ixgbe_mac_X540:
 +      default:
 +              break;
 +      }
 +
 +      srrctl = IXGBE_READ_REG(&adapter->hw, IXGBE_SRRCTL(reg_idx));
 +
 +      srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
 +      srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
 +      if (adapter->num_vfs)
 +              srrctl |= IXGBE_SRRCTL_DROP_EN;
 +
 +      srrctl |= (IXGBE_RX_HDR_SIZE << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) &
 +                IXGBE_SRRCTL_BSIZEHDR_MASK;
 +
 +      if (ring_is_ps_enabled(rx_ring)) {
 +#if (PAGE_SIZE / 2) > IXGBE_MAX_RXBUFFER
 +              srrctl |= IXGBE_MAX_RXBUFFER >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
 +#else
 +              srrctl |= (PAGE_SIZE / 2) >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
 +#endif
 +              srrctl |= IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
 +      } else {
 +              srrctl |= ALIGN(rx_ring->rx_buf_len, 1024) >>
 +                        IXGBE_SRRCTL_BSIZEPKT_SHIFT;
 +              srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
 +      }
 +
 +      IXGBE_WRITE_REG(&adapter->hw, IXGBE_SRRCTL(reg_idx), srrctl);
 +}
 +
 +static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter)
 +{
 +      struct ixgbe_hw *hw = &adapter->hw;
 +      static const u32 seed[10] = { 0xE291D73D, 0x1805EC6C, 0x2A94B30D,
 +                        0xA54F2BEC, 0xEA49AF7C, 0xE214AD3D, 0xB855AABE,
 +                        0x6A3E67EA, 0x14364D17, 0x3BED200D};
 +      u32 mrqc = 0, reta = 0;
 +      u32 rxcsum;
 +      int i, j;
 +      u8 tcs = netdev_get_num_tc(adapter->netdev);
 +      int maxq = adapter->ring_feature[RING_F_RSS].indices;
 +
 +      if (tcs)
 +              maxq = min(maxq, adapter->num_tx_queues / tcs);
 +
 +      /* Fill out hash function seeds */
 +      for (i = 0; i < 10; i++)
 +              IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), seed[i]);
 +
 +      /* Fill out redirection table */
 +      for (i = 0, j = 0; i < 128; i++, j++) {
 +              if (j == maxq)
 +                      j = 0;
 +              /* reta = 4-byte sliding window of
 +               * 0x00..(indices-1)(indices-1)00..etc. */
 +              reta = (reta << 8) | (j * 0x11);
 +              if ((i & 3) == 3)
 +                      IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
 +      }
 +
 +      /* Disable indicating checksum in descriptor, enables RSS hash */
 +      rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
 +      rxcsum |= IXGBE_RXCSUM_PCSD;
 +      IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
 +
 +      if (adapter->hw.mac.type == ixgbe_mac_82598EB &&
 +          (adapter->flags & IXGBE_FLAG_RSS_ENABLED)) {
 +              mrqc = IXGBE_MRQC_RSSEN;
 +      } else {
 +              int mask = adapter->flags & (IXGBE_FLAG_RSS_ENABLED
 +                                           | IXGBE_FLAG_SRIOV_ENABLED);
 +
 +              switch (mask) {
 +              case (IXGBE_FLAG_RSS_ENABLED):
 +                      if (!tcs)
 +                              mrqc = IXGBE_MRQC_RSSEN;
 +                      else if (tcs <= 4)
 +                              mrqc = IXGBE_MRQC_RTRSS4TCEN;
 +                      else
 +                              mrqc = IXGBE_MRQC_RTRSS8TCEN;
 +                      break;
 +              case (IXGBE_FLAG_SRIOV_ENABLED):
 +                      mrqc = IXGBE_MRQC_VMDQEN;
 +                      break;
 +              default:
 +                      break;
 +              }
 +      }
 +
 +      /* Perform hash on these packet types */
 +      mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4
 +            | IXGBE_MRQC_RSS_FIELD_IPV4_TCP
 +            | IXGBE_MRQC_RSS_FIELD_IPV6
 +            | IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
 +
 +      IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
 +}
 +
 +/**
 + * ixgbe_configure_rscctl - enable RSC for the indicated ring
 + * @adapter:    address of board private structure
 + * @index:      index of ring to set
 + **/
 +static void ixgbe_configure_rscctl(struct ixgbe_adapter *adapter,
 +                                 struct ixgbe_ring *ring)
 +{
 +      struct ixgbe_hw *hw = &adapter->hw;
 +      u32 rscctrl;
 +      int rx_buf_len;
 +      u8 reg_idx = ring->reg_idx;
 +
 +      if (!ring_is_rsc_enabled(ring))
 +              return;
 +
 +      rx_buf_len = ring->rx_buf_len;
 +      rscctrl = IXGBE_READ_REG(hw, IXGBE_RSCCTL(reg_idx));
 +      rscctrl |= IXGBE_RSCCTL_RSCEN;
 +      /*
 +       * we must limit the number of descriptors so that the
 +       * total size of max desc * buf_len is not greater
 +       * than 65535
 +       */
 +      if (ring_is_ps_enabled(ring)) {
 +#if (MAX_SKB_FRAGS > 16)
 +              rscctrl |= IXGBE_RSCCTL_MAXDESC_16;
 +#elif (MAX_SKB_FRAGS > 8)
 +              rscctrl |= IXGBE_RSCCTL_MAXDESC_8;
 +#elif (MAX_SKB_FRAGS > 4)
 +              rscctrl |= IXGBE_RSCCTL_MAXDESC_4;
 +#else
 +              rscctrl |= IXGBE_RSCCTL_MAXDESC_1;
 +#endif
 +      } else {
 +              if (rx_buf_len < IXGBE_RXBUFFER_4K)
 +                      rscctrl |= IXGBE_RSCCTL_MAXDESC_16;
 +              else if (rx_buf_len < IXGBE_RXBUFFER_8K)
 +                      rscctrl |= IXGBE_RSCCTL_MAXDESC_8;
 +              else
 +                      rscctrl |= IXGBE_RSCCTL_MAXDESC_4;
 +      }
 +      IXGBE_WRITE_REG(hw, IXGBE_RSCCTL(reg_idx), rscctrl);
 +}
 +
 +/**
 + *  ixgbe_set_uta - Set unicast filter table address
 + *  @adapter: board private structure
 + *
 + *  The unicast table address is a register array of 32-bit registers.
 + *  The table is meant to be used in a way similar to how the MTA is used
 + *  however due to certain limitations in the hardware it is necessary to
 + *  set all the hash bits to 1 and use the VMOLR ROPE bit as a promiscuous
 + *  enable bit to allow vlan tag stripping when promiscuous mode is enabled
 + **/
 +static void ixgbe_set_uta(struct ixgbe_adapter *adapter)
 +{
 +      struct ixgbe_hw *hw = &adapter->hw;
 +      int i;
 +
 +      /* The UTA table only exists on 82599 hardware and newer */
 +      if (hw->mac.type < ixgbe_mac_82599EB)
 +              return;
 +
 +      /* we only need to do this if VMDq is enabled */
 +      if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
 +              return;
 +
 +      for (i = 0; i < 128; i++)
 +              IXGBE_WRITE_REG(hw, IXGBE_UTA(i), ~0);
 +}
 +
 +#define IXGBE_MAX_RX_DESC_POLL 10
 +static void ixgbe_rx_desc_queue_enable(struct ixgbe_adapter *adapter,
 +                                     struct ixgbe_ring *ring)
 +{
 +      struct ixgbe_hw *hw = &adapter->hw;
 +      int wait_loop = IXGBE_MAX_RX_DESC_POLL;
 +      u32 rxdctl;
 +      u8 reg_idx = ring->reg_idx;
 +
 +      /* RXDCTL.EN will return 0 on 82598 if link is down, so skip it */
 +      if (hw->mac.type == ixgbe_mac_82598EB &&
 +          !(IXGBE_READ_REG(hw, IXGBE_LINKS) & IXGBE_LINKS_UP))
 +              return;
 +
 +      do {
 +              usleep_range(1000, 2000);
 +              rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx));
 +      } while (--wait_loop && !(rxdctl & IXGBE_RXDCTL_ENABLE));
 +
 +      if (!wait_loop) {
 +              e_err(drv, "RXDCTL.ENABLE on Rx queue %d not set within "
 +                    "the polling period\n", reg_idx);
 +      }
 +}
 +
 +void ixgbe_disable_rx_queue(struct ixgbe_adapter *adapter,
 +                          struct ixgbe_ring *ring)
 +{
 +      struct ixgbe_hw *hw = &adapter->hw;
 +      int wait_loop = IXGBE_MAX_RX_DESC_POLL;
 +      u32 rxdctl;
 +      u8 reg_idx = ring->reg_idx;
 +
 +      rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx));
 +      rxdctl &= ~IXGBE_RXDCTL_ENABLE;
 +
 +      /* write value back with RXDCTL.ENABLE bit cleared */
 +      IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx), rxdctl);
 +
 +      if (hw->mac.type == ixgbe_mac_82598EB &&
 +          !(IXGBE_READ_REG(hw, IXGBE_LINKS) & IXGBE_LINKS_UP))
 +              return;
 +
 +      /* the hardware may take up to 100us to really disable the rx queue */
 +      do {
 +              udelay(10);
 +              rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx));
 +      } while (--wait_loop && (rxdctl & IXGBE_RXDCTL_ENABLE));
 +
 +      if (!wait_loop) {
 +              e_err(drv, "RXDCTL.ENABLE on Rx queue %d not cleared within "
 +                    "the polling period\n", reg_idx);
 +      }
 +}
 +
 +void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter,
 +                           struct ixgbe_ring *ring)
 +{
 +      struct ixgbe_hw *hw = &adapter->hw;
 +      u64 rdba = ring->dma;
 +      u32 rxdctl;
 +      u8 reg_idx = ring->reg_idx;
 +
 +      /* disable queue to avoid issues while updating state */
 +      rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx));
 +      ixgbe_disable_rx_queue(adapter, ring);
 +
 +      IXGBE_WRITE_REG(hw, IXGBE_RDBAL(reg_idx), (rdba & DMA_BIT_MASK(32)));
 +      IXGBE_WRITE_REG(hw, IXGBE_RDBAH(reg_idx), (rdba >> 32));
 +      IXGBE_WRITE_REG(hw, IXGBE_RDLEN(reg_idx),
 +                      ring->count * sizeof(union ixgbe_adv_rx_desc));
 +      IXGBE_WRITE_REG(hw, IXGBE_RDH(reg_idx), 0);
 +      IXGBE_WRITE_REG(hw, IXGBE_RDT(reg_idx), 0);
 +      ring->tail = hw->hw_addr + IXGBE_RDT(reg_idx);
 +
 +      ixgbe_configure_srrctl(adapter, ring);
 +      ixgbe_configure_rscctl(adapter, ring);
 +
 +      /* If operating in IOV mode set RLPML for X540 */
 +      if ((adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) &&
 +          hw->mac.type == ixgbe_mac_X540) {
 +              rxdctl &= ~IXGBE_RXDCTL_RLPMLMASK;
 +              rxdctl |= ((ring->netdev->mtu + ETH_HLEN +
 +                          ETH_FCS_LEN + VLAN_HLEN) | IXGBE_RXDCTL_RLPML_EN);
 +      }
 +
 +      if (hw->mac.type == ixgbe_mac_82598EB) {
 +              /*
 +               * enable cache line friendly hardware writes:
 +               * PTHRESH=32 descriptors (half the internal cache),
 +               * this also removes ugly rx_no_buffer_count increment
 +               * HTHRESH=4 descriptors (to minimize latency on fetch)
 +               * WTHRESH=8 burst writeback up to two cache lines
 +               */
 +              rxdctl &= ~0x3FFFFF;
 +              rxdctl |=  0x080420;
 +      }
 +
 +      /* enable receive descriptor ring */
 +      rxdctl |= IXGBE_RXDCTL_ENABLE;
 +      IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx), rxdctl);
 +
 +      ixgbe_rx_desc_queue_enable(adapter, ring);
 +      ixgbe_alloc_rx_buffers(ring, ixgbe_desc_unused(ring));
 +}
 +
 +static void ixgbe_setup_psrtype(struct ixgbe_adapter *adapter)
 +{
 +      struct ixgbe_hw *hw = &adapter->hw;
 +      int p;
 +
 +      /* PSRTYPE must be initialized in non 82598 adapters */
 +      u32 psrtype = IXGBE_PSRTYPE_TCPHDR |
 +                    IXGBE_PSRTYPE_UDPHDR |
 +                    IXGBE_PSRTYPE_IPV4HDR |
 +                    IXGBE_PSRTYPE_L2HDR |
 +                    IXGBE_PSRTYPE_IPV6HDR;
 +
 +      if (hw->mac.type == ixgbe_mac_82598EB)
 +              return;
 +
 +      if (adapter->flags & IXGBE_FLAG_RSS_ENABLED)
 +              psrtype |= (adapter->num_rx_queues_per_pool << 29);
 +
 +      for (p = 0; p < adapter->num_rx_pools; p++)
 +              IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(adapter->num_vfs + p),
 +                              psrtype);
 +}
 +
 +static void ixgbe_configure_virtualization(struct ixgbe_adapter *adapter)
 +{
 +      struct ixgbe_hw *hw = &adapter->hw;
 +      u32 gcr_ext;
 +      u32 vt_reg_bits;
 +      u32 reg_offset, vf_shift;
 +      u32 vmdctl;
 +
 +      if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
 +              return;
 +
 +      vmdctl = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
 +      vt_reg_bits = IXGBE_VMD_CTL_VMDQ_EN | IXGBE_VT_CTL_REPLEN;
 +      vt_reg_bits |= (adapter->num_vfs << IXGBE_VT_CTL_POOL_SHIFT);
 +      IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vmdctl | vt_reg_bits);
 +
 +      vf_shift = adapter->num_vfs % 32;
 +      reg_offset = (adapter->num_vfs > 32) ? 1 : 0;
 +
 +      /* Enable only the PF's pool for Tx/Rx */
 +      IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), (1 << vf_shift));
 +      IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset ^ 1), 0);
 +      IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset), (1 << vf_shift));
 +      IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset ^ 1), 0);
 +      IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN);
 +
 +      /* Map PF MAC address in RAR Entry 0 to first pool following VFs */
 +      hw->mac.ops.set_vmdq(hw, 0, adapter->num_vfs);
 +
 +      /*
 +       * Set up VF register offsets for selected VT Mode,
 +       * i.e. 32 or 64 VFs for SR-IOV
 +       */
 +      gcr_ext = IXGBE_READ_REG(hw, IXGBE_GCR_EXT);
 +      gcr_ext |= IXGBE_GCR_EXT_MSIX_EN;
 +      gcr_ext |= IXGBE_GCR_EXT_VT_MODE_64;
 +      IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr_ext);
 +
 +      /* enable Tx loopback for VF/PF communication */
 +      IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN);
 +      /* Enable MAC Anti-Spoofing */
 +      hw->mac.ops.set_mac_anti_spoofing(hw,
 +                                        (adapter->antispoofing_enabled =
 +                                         (adapter->num_vfs != 0)),
 +                                        adapter->num_vfs);
 +}
 +
 +static void ixgbe_set_rx_buffer_len(struct ixgbe_adapter *adapter)
 +{
 +      struct ixgbe_hw *hw = &adapter->hw;
 +      struct net_device *netdev = adapter->netdev;
 +      int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
 +      int rx_buf_len;
 +      struct ixgbe_ring *rx_ring;
 +      int i;
 +      u32 mhadd, hlreg0;
 +
 +      /* Decide whether to use packet split mode or not */
 +      /* On by default */
 +      adapter->flags |= IXGBE_FLAG_RX_PS_ENABLED;
 +
 +      /* Do not use packet split if we're in SR-IOV Mode */
 +      if (adapter->num_vfs)
 +              adapter->flags &= ~IXGBE_FLAG_RX_PS_ENABLED;
 +
 +      /* Disable packet split due to 82599 erratum #45 */
 +      if (hw->mac.type == ixgbe_mac_82599EB)
 +              adapter->flags &= ~IXGBE_FLAG_RX_PS_ENABLED;
 +
 +#ifdef IXGBE_FCOE
 +      /* adjust max frame to be able to do baby jumbo for FCoE */
 +      if ((adapter->flags & IXGBE_FLAG_FCOE_ENABLED) &&
 +          (max_frame < IXGBE_FCOE_JUMBO_FRAME_SIZE))
 +              max_frame = IXGBE_FCOE_JUMBO_FRAME_SIZE;
 +
 +#endif /* IXGBE_FCOE */
 +      mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
 +      if (max_frame != (mhadd >> IXGBE_MHADD_MFS_SHIFT)) {
 +              mhadd &= ~IXGBE_MHADD_MFS_MASK;
 +              mhadd |= max_frame << IXGBE_MHADD_MFS_SHIFT;
 +
 +              IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
 +      }
 +
 +      /* MHADD will allow an extra 4 bytes past for vlan tagged frames */
 +      max_frame += VLAN_HLEN;
 +
 +      /* Set the RX buffer length according to the mode */
 +      if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
 +              rx_buf_len = IXGBE_RX_HDR_SIZE;
 +      } else {
 +              if (!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) &&
 +                  (netdev->mtu <= ETH_DATA_LEN))
 +                      rx_buf_len = MAXIMUM_ETHERNET_VLAN_SIZE;
 +              /*
 +               * Make best use of allocation by using all but 1K of a
 +               * power of 2 allocation that will be used for skb->head.
 +               */
 +              else if (max_frame <= IXGBE_RXBUFFER_3K)
 +                      rx_buf_len = IXGBE_RXBUFFER_3K;
 +              else if (max_frame <= IXGBE_RXBUFFER_7K)
 +                      rx_buf_len = IXGBE_RXBUFFER_7K;
 +              else if (max_frame <= IXGBE_RXBUFFER_15K)
 +                      rx_buf_len = IXGBE_RXBUFFER_15K;
 +              else
 +                      rx_buf_len = IXGBE_MAX_RXBUFFER;
 +      }
 +
 +      hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
 +      /* set jumbo enable since MHADD.MFS is keeping size locked at max_frame */
 +      hlreg0 |= IXGBE_HLREG0_JUMBOEN;
 +      IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
 +
 +      /*
 +       * Setup the HW Rx Head and Tail Descriptor Pointers and
 +       * the Base and Length of the Rx Descriptor Ring
 +       */
 +      for (i = 0; i < adapter->num_rx_queues; i++) {
 +              rx_ring = adapter->rx_ring[i];
 +              rx_ring->rx_buf_len = rx_buf_len;
 +
 +              if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED)
 +                      set_ring_ps_enabled(rx_ring);
 +              else
 +                      clear_ring_ps_enabled(rx_ring);
 +
 +              if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)
 +                      set_ring_rsc_enabled(rx_ring);
 +              else
 +                      clear_ring_rsc_enabled(rx_ring);
 +
 +#ifdef IXGBE_FCOE
 +              if (netdev->features & NETIF_F_FCOE_MTU) {
 +                      struct ixgbe_ring_feature *f;
 +                      f = &adapter->ring_feature[RING_F_FCOE];
 +                      if ((i >= f->mask) && (i < f->mask + f->indices)) {
 +                              clear_ring_ps_enabled(rx_ring);
 +                              if (rx_buf_len < IXGBE_FCOE_JUMBO_FRAME_SIZE)
 +                                      rx_ring->rx_buf_len =
 +                                              IXGBE_FCOE_JUMBO_FRAME_SIZE;
 +                      } else if (!ring_is_rsc_enabled(rx_ring) &&
 +                                 !ring_is_ps_enabled(rx_ring)) {
 +                              rx_ring->rx_buf_len =
 +                                              IXGBE_FCOE_JUMBO_FRAME_SIZE;
 +                      }
 +              }
 +#endif /* IXGBE_FCOE */
 +      }
 +}
 +
 +static void ixgbe_setup_rdrxctl(struct ixgbe_adapter *adapter)
 +{
 +      struct ixgbe_hw *hw = &adapter->hw;
 +      u32 rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
 +
 +      switch (hw->mac.type) {
 +      case ixgbe_mac_82598EB:
 +              /*
 +               * For VMDq support of different descriptor types or
 +               * buffer sizes through the use of multiple SRRCTL
 +               * registers, RDRXCTL.MVMEN must be set to 1
 +               *
 +               * also, the manual doesn't mention it clearly but DCA hints
 +               * will only use queue 0's tags unless this bit is set.  Side
 +               * effects of setting this bit are only that SRRCTL must be
 +               * fully programmed [0..15]
 +               */
 +              rdrxctl |= IXGBE_RDRXCTL_MVMEN;
 +              break;
 +      case ixgbe_mac_82599EB:
 +      case ixgbe_mac_X540:
 +              /* Disable RSC for ACK packets */
 +              IXGBE_WRITE_REG(hw, IXGBE_RSCDBU,
 +                 (IXGBE_RSCDBU_RSCACKDIS | IXGBE_READ_REG(hw, IXGBE_RSCDBU)));
 +              rdrxctl &= ~IXGBE_RDRXCTL_RSCFRSTSIZE;
 +              /* hardware requires some bits to be set by default */
 +              rdrxctl |= (IXGBE_RDRXCTL_RSCACKC | IXGBE_RDRXCTL_FCOE_WRFIX);
 +              rdrxctl |= IXGBE_RDRXCTL_CRCSTRIP;
 +              break;
 +      default:
 +              /* We should do nothing since we don't know this hardware */
 +              return;
 +      }
 +
 +      IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl);
 +}
 +
 +/**
 + * ixgbe_configure_rx - Configure 8259x Receive Unit after Reset
 + * @adapter: board private structure
 + *
 + * Configure the Rx unit of the MAC after a reset.
 + **/
 +static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
 +{
 +      struct ixgbe_hw *hw = &adapter->hw;
 +      int i;
 +      u32 rxctrl;
 +
 +      /* disable receives while setting up the descriptors */
 +      rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
 +      IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl & ~IXGBE_RXCTRL_RXEN);
 +
 +      ixgbe_setup_psrtype(adapter);
 +      ixgbe_setup_rdrxctl(adapter);
 +
 +      /* Program registers for the distribution of queues */
 +      ixgbe_setup_mrqc(adapter);
 +
 +      ixgbe_set_uta(adapter);
 +
 +      /* set_rx_buffer_len must be called before ring initialization */
 +      ixgbe_set_rx_buffer_len(adapter);
 +
 +      /*
 +       * Setup the HW Rx Head and Tail Descriptor Pointers and
 +       * the Base and Length of the Rx Descriptor Ring
 +       */
 +      for (i = 0; i < adapter->num_rx_queues; i++)
 +              ixgbe_configure_rx_ring(adapter, adapter->rx_ring[i]);
 +
 +      /* disable drop enable for 82598 parts */
 +      if (hw->mac.type == ixgbe_mac_82598EB)
 +              rxctrl |= IXGBE_RXCTRL_DMBYPS;
 +
 +      /* enable all receives */
 +      rxctrl |= IXGBE_RXCTRL_RXEN;
 +      hw->mac.ops.enable_rx_dma(hw, rxctrl);
 +}
 +
 +static void ixgbe_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
 +{
 +      struct ixgbe_adapter *adapter = netdev_priv(netdev);
 +      struct ixgbe_hw *hw = &adapter->hw;
 +      int pool_ndx = adapter->num_vfs;
 +
 +      /* add VID to filter table */
 +      hw->mac.ops.set_vfta(&adapter->hw, vid, pool_ndx, true);
 +      set_bit(vid, adapter->active_vlans);
 +}
 +
 +static void ixgbe_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
 +{
 +      struct ixgbe_adapter *adapter = netdev_priv(netdev);
 +      struct ixgbe_hw *hw = &adapter->hw;
 +      int pool_ndx = adapter->num_vfs;
 +
 +      /* remove VID from filter table */
 +      hw->mac.ops.set_vfta(&adapter->hw, vid, pool_ndx, false);
 +      clear_bit(vid, adapter->active_vlans);
 +}
 +
 +/**
 + * ixgbe_vlan_filter_disable - helper to disable hw vlan filtering
 + * @adapter: driver data
 + */
 +static void ixgbe_vlan_filter_disable(struct ixgbe_adapter *adapter)
 +{
 +      struct ixgbe_hw *hw = &adapter->hw;
 +      u32 vlnctrl;
 +
 +      vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
 +      vlnctrl &= ~(IXGBE_VLNCTRL_VFE | IXGBE_VLNCTRL_CFIEN);
 +      IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
 +}
 +
 +/**
 + * ixgbe_vlan_filter_enable - helper to enable hw vlan filtering
 + * @adapter: driver data
 + */
 +static void ixgbe_vlan_filter_enable(struct ixgbe_adapter *adapter)
 +{
 +      struct ixgbe_hw *hw = &adapter->hw;
 +      u32 vlnctrl;
 +
 +      vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
 +      vlnctrl |= IXGBE_VLNCTRL_VFE;
 +      vlnctrl &= ~IXGBE_VLNCTRL_CFIEN;
 +      IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
 +}
 +
 +/**
 + * ixgbe_vlan_strip_disable - helper to disable hw vlan stripping
 + * @adapter: driver data
 + */
 +static void ixgbe_vlan_strip_disable(struct ixgbe_adapter *adapter)
 +{
 +      struct ixgbe_hw *hw = &adapter->hw;
 +      u32 vlnctrl;
 +      int i, j;
 +
 +      switch (hw->mac.type) {
 +      case ixgbe_mac_82598EB:
 +              vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
 +              vlnctrl &= ~IXGBE_VLNCTRL_VME;
 +              IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
 +              break;
 +      case ixgbe_mac_82599EB:
 +      case ixgbe_mac_X540:
 +              for (i = 0; i < adapter->num_rx_queues; i++) {
 +                      j = adapter->rx_ring[i]->reg_idx;
 +                      vlnctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j));
 +                      vlnctrl &= ~IXGBE_RXDCTL_VME;
 +                      IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(j), vlnctrl);
 +              }
 +              break;
 +      default:
 +              break;
 +      }
 +}
 +
 +/**
 + * ixgbe_vlan_strip_enable - helper to enable hw vlan stripping
 + * @adapter: driver data
 + */
 +static void ixgbe_vlan_strip_enable(struct ixgbe_adapter *adapter)
 +{
 +      struct ixgbe_hw *hw = &adapter->hw;
 +      u32 vlnctrl;
 +      int i, j;
 +
 +      switch (hw->mac.type) {
 +      case ixgbe_mac_82598EB:
 +              vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
 +              vlnctrl |= IXGBE_VLNCTRL_VME;
 +              IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
 +              break;
 +      case ixgbe_mac_82599EB:
 +      case ixgbe_mac_X540:
 +              for (i = 0; i < adapter->num_rx_queues; i++) {
 +                      j = adapter->rx_ring[i]->reg_idx;
 +                      vlnctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j));
 +                      vlnctrl |= IXGBE_RXDCTL_VME;
 +                      IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(j), vlnctrl);
 +              }
 +              break;
 +      default:
 +              break;
 +      }
 +}
 +
 +static void ixgbe_restore_vlan(struct ixgbe_adapter *adapter)
 +{
 +      u16 vid;
 +
 +      ixgbe_vlan_rx_add_vid(adapter->netdev, 0);
 +
 +      for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
 +              ixgbe_vlan_rx_add_vid(adapter->netdev, vid);
 +}
 +
 +/**
 + * ixgbe_write_uc_addr_list - write unicast addresses to RAR table
 + * @netdev: network interface device structure
 + *
 + * Writes unicast address list to the RAR table.
 + * Returns: -ENOMEM on failure/insufficient address space
 + *                0 on no addresses written
 + *                X on writing X addresses to the RAR table
 + **/
 +static int ixgbe_write_uc_addr_list(struct net_device *netdev)
 +{
 +      struct ixgbe_adapter *adapter = netdev_priv(netdev);
 +      struct ixgbe_hw *hw = &adapter->hw;
 +      unsigned int vfn = adapter->num_vfs;
 +      unsigned int rar_entries = IXGBE_MAX_PF_MACVLANS;
 +      int count = 0;
 +
 +      /* return ENOMEM indicating insufficient memory for addresses */
 +      if (netdev_uc_count(netdev) > rar_entries)
 +              return -ENOMEM;
 +
 +      if (!netdev_uc_empty(netdev) && rar_entries) {
 +              struct netdev_hw_addr *ha;
 +              /* return error if we do not support writing to RAR table */
 +              if (!hw->mac.ops.set_rar)
 +                      return -ENOMEM;
 +
 +              netdev_for_each_uc_addr(ha, netdev) {
 +                      if (!rar_entries)
 +                              break;
 +                      hw->mac.ops.set_rar(hw, rar_entries--, ha->addr,
 +                                          vfn, IXGBE_RAH_AV);
 +                      count++;
 +              }
 +      }
 +      /* write the addresses in reverse order to avoid write combining */
 +      for (; rar_entries > 0 ; rar_entries--)
 +              hw->mac.ops.clear_rar(hw, rar_entries);
 +
 +      return count;
 +}
 +
 +/**
 + * ixgbe_set_rx_mode - Unicast, Multicast and Promiscuous mode set
 + * @netdev: network interface device structure
 + *
 + * The set_rx_method entry point is called whenever the unicast/multicast
 + * address list or the network interface flags are updated.  This routine is
 + * responsible for configuring the hardware for proper unicast, multicast and
 + * promiscuous mode.
 + **/
 +void ixgbe_set_rx_mode(struct net_device *netdev)
 +{
 +      struct ixgbe_adapter *adapter = netdev_priv(netdev);
 +      struct ixgbe_hw *hw = &adapter->hw;
 +      u32 fctrl, vmolr = IXGBE_VMOLR_BAM | IXGBE_VMOLR_AUPE;
 +      int count;
 +
 +      /* Check for Promiscuous and All Multicast modes */
 +
 +      fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
 +
 +      /* set all bits that we expect to always be set */
 +      fctrl |= IXGBE_FCTRL_BAM;
 +      fctrl |= IXGBE_FCTRL_DPF; /* discard pause frames when FC enabled */
 +      fctrl |= IXGBE_FCTRL_PMCF;
 +
 +      /* clear the bits we are changing the status of */
 +      fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
 +
 +      if (netdev->flags & IFF_PROMISC) {
 +              hw->addr_ctrl.user_set_promisc = true;
 +              fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
 +              vmolr |= (IXGBE_VMOLR_ROPE | IXGBE_VMOLR_MPE);
 +              /* don't hardware filter vlans in promisc mode */
 +              ixgbe_vlan_filter_disable(adapter);
 +      } else {
 +              if (netdev->flags & IFF_ALLMULTI) {
 +                      fctrl |= IXGBE_FCTRL_MPE;
 +                      vmolr |= IXGBE_VMOLR_MPE;
 +              } else {
 +                      /*
 +                       * Write addresses to the MTA, if the attempt fails
 +                       * then we should just turn on promiscuous mode so
 +                       * that we can at least receive multicast traffic
 +                       */
 +                      hw->mac.ops.update_mc_addr_list(hw, netdev);
 +                      vmolr |= IXGBE_VMOLR_ROMPE;
 +              }
 +              ixgbe_vlan_filter_enable(adapter);
 +              hw->addr_ctrl.user_set_promisc = false;
 +              /*
 +               * Write addresses to available RAR registers, if there is not
 +               * sufficient space to store all the addresses then enable
 +               * unicast promiscuous mode
 +               */
 +              count = ixgbe_write_uc_addr_list(netdev);
 +              if (count < 0) {
 +                      fctrl |= IXGBE_FCTRL_UPE;
 +                      vmolr |= IXGBE_VMOLR_ROPE;
 +              }
 +      }
 +
 +      if (adapter->num_vfs) {
 +              ixgbe_restore_vf_multicasts(adapter);
 +              vmolr |= IXGBE_READ_REG(hw, IXGBE_VMOLR(adapter->num_vfs)) &
 +                       ~(IXGBE_VMOLR_MPE | IXGBE_VMOLR_ROMPE |
 +                         IXGBE_VMOLR_ROPE);
 +              IXGBE_WRITE_REG(hw, IXGBE_VMOLR(adapter->num_vfs), vmolr);
 +      }
 +
 +      IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
 +
 +      if (netdev->features & NETIF_F_HW_VLAN_RX)
 +              ixgbe_vlan_strip_enable(adapter);
 +      else
 +              ixgbe_vlan_strip_disable(adapter);
 +}
 +
 +static void ixgbe_napi_enable_all(struct ixgbe_adapter *adapter)
 +{
 +      int q_idx;
 +      struct ixgbe_q_vector *q_vector;
 +      int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
 +
 +      /* legacy and MSI only use one vector */
 +      if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED))
 +              q_vectors = 1;
 +
 +      for (q_idx = 0; q_idx < q_vectors; q_idx++) {
 +              q_vector = adapter->q_vector[q_idx];
 +              napi_enable(&q_vector->napi);
 +      }
 +}
 +
 +static void ixgbe_napi_disable_all(struct ixgbe_adapter *adapter)
 +{
 +      int q_idx;
 +      struct ixgbe_q_vector *q_vector;
 +      int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
 +
 +      /* legacy and MSI only use one vector */
 +      if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED))
 +              q_vectors = 1;
 +
 +      for (q_idx = 0; q_idx < q_vectors; q_idx++) {
 +              q_vector = adapter->q_vector[q_idx];
 +              napi_disable(&q_vector->napi);
 +      }
 +}
 +
 +#ifdef CONFIG_IXGBE_DCB
 +/*
 + * ixgbe_configure_dcb - Configure DCB hardware
 + * @adapter: ixgbe adapter struct
 + *
 + * This is called by the driver on open to configure the DCB hardware.
 + * This is also called by the gennetlink interface when reconfiguring
 + * the DCB state.
 + */
 +static void ixgbe_configure_dcb(struct ixgbe_adapter *adapter)
 +{
 +      struct ixgbe_hw *hw = &adapter->hw;
 +      int max_frame = adapter->netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
 +
 +      if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED)) {
 +              if (hw->mac.type == ixgbe_mac_82598EB)
 +                      netif_set_gso_max_size(adapter->netdev, 65536);
 +              return;
 +      }
 +
 +      if (hw->mac.type == ixgbe_mac_82598EB)
 +              netif_set_gso_max_size(adapter->netdev, 32768);
 +
 +
 +      /* Enable VLAN tag insert/strip */
 +      adapter->netdev->features |= NETIF_F_HW_VLAN_RX;
 +
 +      hw->mac.ops.set_vfta(&adapter->hw, 0, 0, true);
 +
 +      /* reconfigure the hardware */
 +      if (adapter->dcbx_cap & DCB_CAP_DCBX_VER_CEE) {
 +#ifdef IXGBE_FCOE
 +              if (adapter->netdev->features & NETIF_F_FCOE_MTU)
 +                      max_frame = max(max_frame, IXGBE_FCOE_JUMBO_FRAME_SIZE);
 +#endif
 +              ixgbe_dcb_calculate_tc_credits(hw, &adapter->dcb_cfg, max_frame,
 +                                              DCB_TX_CONFIG);
 +              ixgbe_dcb_calculate_tc_credits(hw, &adapter->dcb_cfg, max_frame,
 +                                              DCB_RX_CONFIG);
 +              ixgbe_dcb_hw_config(hw, &adapter->dcb_cfg);
 +      } else {
 +              struct net_device *dev = adapter->netdev;
 +
 +              if (adapter->ixgbe_ieee_ets)
 +                      dev->dcbnl_ops->ieee_setets(dev,
 +                                                  adapter->ixgbe_ieee_ets);
 +              if (adapter->ixgbe_ieee_pfc)
 +                      dev->dcbnl_ops->ieee_setpfc(dev,
 +                                                  adapter->ixgbe_ieee_pfc);
 +      }
 +
 +      /* Enable RSS Hash per TC */
 +      if (hw->mac.type != ixgbe_mac_82598EB) {
 +              int i;
 +              u32 reg = 0;
 +
 +              for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
 +                      u8 msb = 0;
 +                      u8 cnt = adapter->netdev->tc_to_txq[i].count;
 +
 +                      while (cnt >>= 1)
 +                              msb++;
 +
 +                      reg |= msb << IXGBE_RQTC_SHIFT_TC(i);
 +              }
 +              IXGBE_WRITE_REG(hw, IXGBE_RQTC, reg);
 +      }
 +}
 +
 +#endif
 +
 +static void ixgbe_configure_pb(struct ixgbe_adapter *adapter)
 +{
 +      struct ixgbe_hw *hw = &adapter->hw;
 +      int hdrm;
 +      u8 tc = netdev_get_num_tc(adapter->netdev);
 +
 +      if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE ||
 +          adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)
 +              hdrm = 32 << adapter->fdir_pballoc;
 +      else
 +              hdrm = 0;
 +
 +      hw->mac.ops.set_rxpba(hw, tc, hdrm, PBA_STRATEGY_EQUAL);
 +}
 +
 +static void ixgbe_fdir_filter_restore(struct ixgbe_adapter *adapter)
 +{
 +      struct ixgbe_hw *hw = &adapter->hw;
 +      struct hlist_node *node, *node2;
 +      struct ixgbe_fdir_filter *filter;
 +
 +      spin_lock(&adapter->fdir_perfect_lock);
 +
 +      if (!hlist_empty(&adapter->fdir_filter_list))
 +              ixgbe_fdir_set_input_mask_82599(hw, &adapter->fdir_mask);
 +
 +      hlist_for_each_entry_safe(filter, node, node2,
 +                                &adapter->fdir_filter_list, fdir_node) {
 +              ixgbe_fdir_write_perfect_filter_82599(hw,
 +                              &filter->filter,
 +                              filter->sw_idx,
 +                              (filter->action == IXGBE_FDIR_DROP_QUEUE) ?
 +                              IXGBE_FDIR_DROP_QUEUE :
 +                              adapter->rx_ring[filter->action]->reg_idx);
 +      }
 +
 +      spin_unlock(&adapter->fdir_perfect_lock);
 +}
 +
 +static void ixgbe_configure(struct ixgbe_adapter *adapter)
 +{
 +      ixgbe_configure_pb(adapter);
 +#ifdef CONFIG_IXGBE_DCB
 +      ixgbe_configure_dcb(adapter);
 +#endif
 +
 +      ixgbe_set_rx_mode(adapter->netdev);
 +      ixgbe_restore_vlan(adapter);
 +
 +#ifdef IXGBE_FCOE
 +      if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED)
 +              ixgbe_configure_fcoe(adapter);
 +
 +#endif /* IXGBE_FCOE */
 +      if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) {
 +              ixgbe_init_fdir_signature_82599(&adapter->hw,
 +                                              adapter->fdir_pballoc);
 +      } else if (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE) {
 +              ixgbe_init_fdir_perfect_82599(&adapter->hw,
 +                                            adapter->fdir_pballoc);
 +              ixgbe_fdir_filter_restore(adapter);
 +      }
 +
 +      ixgbe_configure_virtualization(adapter);
 +
 +      ixgbe_configure_tx(adapter);
 +      ixgbe_configure_rx(adapter);
 +}
 +
 +static inline bool ixgbe_is_sfp(struct ixgbe_hw *hw)
 +{
 +      switch (hw->phy.type) {
 +      case ixgbe_phy_sfp_avago:
 +      case ixgbe_phy_sfp_ftl:
 +      case ixgbe_phy_sfp_intel:
 +      case ixgbe_phy_sfp_unknown:
 +      case ixgbe_phy_sfp_passive_tyco:
 +      case ixgbe_phy_sfp_passive_unknown:
 +      case ixgbe_phy_sfp_active_unknown:
 +      case ixgbe_phy_sfp_ftl_active:
 +              return true;
 +      case ixgbe_phy_nl:
 +              if (hw->mac.type == ixgbe_mac_82598EB)
 +                      return true;
 +      default:
 +              return false;
 +      }
 +}
 +
 +/**
 + * ixgbe_sfp_link_config - set up SFP+ link
 + * @adapter: pointer to private adapter struct
 + **/
 +static void ixgbe_sfp_link_config(struct ixgbe_adapter *adapter)
 +{
 +      /*
 +       * We are assuming the worst case scenerio here, and that
 +       * is that an SFP was inserted/removed after the reset
 +       * but before SFP detection was enabled.  As such the best
 +       * solution is to just start searching as soon as we start
 +       */
 +      if (adapter->hw.mac.type == ixgbe_mac_82598EB)
 +              adapter->flags2 |= IXGBE_FLAG2_SEARCH_FOR_SFP;
 +
 +      adapter->flags2 |= IXGBE_FLAG2_SFP_NEEDS_RESET;
 +}
 +
 +/**
 + * ixgbe_non_sfp_link_config - set up non-SFP+ link
 + * @hw: pointer to private hardware struct
 + *
 + * Returns 0 on success, negative on failure
 + **/
 +static int ixgbe_non_sfp_link_config(struct ixgbe_hw *hw)
 +{
 +      u32 autoneg;
 +      bool negotiation, link_up = false;
 +      u32 ret = IXGBE_ERR_LINK_SETUP;
 +
 +      if (hw->mac.ops.check_link)
 +              ret = hw->mac.ops.check_link(hw, &autoneg, &link_up, false);
 +
 +      if (ret)
 +              goto link_cfg_out;
 +
 +      autoneg = hw->phy.autoneg_advertised;
 +      if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
 +              ret = hw->mac.ops.get_link_capabilities(hw, &autoneg,
 +                                                      &negotiation);
 +      if (ret)
 +              goto link_cfg_out;
 +
 +      if (hw->mac.ops.setup_link)
 +              ret = hw->mac.ops.setup_link(hw, autoneg, negotiation, link_up);
 +link_cfg_out:
 +      return ret;
 +}
 +
 +static void ixgbe_setup_gpie(struct ixgbe_adapter *adapter)
 +{
 +      struct ixgbe_hw *hw = &adapter->hw;
 +      u32 gpie = 0;
 +
 +      if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
 +              gpie = IXGBE_GPIE_MSIX_MODE | IXGBE_GPIE_PBA_SUPPORT |
 +                     IXGBE_GPIE_OCD;
 +              gpie |= IXGBE_GPIE_EIAME;
 +              /*
 +               * use EIAM to auto-mask when MSI-X interrupt is asserted
 +               * this saves a register write for every interrupt
 +               */
 +              switch (hw->mac.type) {
 +              case ixgbe_mac_82598EB:
 +                      IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
 +                      break;
 +              case ixgbe_mac_82599EB:
 +              case ixgbe_mac_X540:
 +              default:
 +                      IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF);
 +                      IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF);
 +                      break;
 +              }
 +      } else {
 +              /* legacy interrupts, use EIAM to auto-mask when reading EICR,
 +               * specifically only auto mask tx and rx interrupts */
 +              IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
 +      }
 +
 +      /* XXX: to interrupt immediately for EICS writes, enable this */
 +      /* gpie |= IXGBE_GPIE_EIMEN; */
 +
 +      if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
 +              gpie &= ~IXGBE_GPIE_VTMODE_MASK;
 +              gpie |= IXGBE_GPIE_VTMODE_64;
 +      }
 +
 +      /* Enable Thermal over heat sensor interrupt */
 +      if (adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE)
 +              gpie |= IXGBE_SDP0_GPIEN;
 +
 +      /* Enable fan failure interrupt */
 +      if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE)
 +              gpie |= IXGBE_SDP1_GPIEN;
 +
 +      if (hw->mac.type == ixgbe_mac_82599EB) {
 +              gpie |= IXGBE_SDP1_GPIEN;
 +              gpie |= IXGBE_SDP2_GPIEN;
 +      }
 +
 +      IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
 +}
 +
 +static void ixgbe_up_complete(struct ixgbe_adapter *adapter)
 +{
 +      struct ixgbe_hw *hw = &adapter->hw;
 +      int err;
 +      u32 ctrl_ext;
 +
 +      ixgbe_get_hw_control(adapter);
 +      ixgbe_setup_gpie(adapter);
 +
 +      if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
 +              ixgbe_configure_msix(adapter);
 +      else
 +              ixgbe_configure_msi_and_legacy(adapter);
 +
 +      /* enable the optics for both mult-speed fiber and 82599 SFP+ fiber */
 +      if (hw->mac.ops.enable_tx_laser &&
 +          ((hw->phy.multispeed_fiber) ||
 +           ((hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber) &&
 +            (hw->mac.type == ixgbe_mac_82599EB))))
 +              hw->mac.ops.enable_tx_laser(hw);
 +
 +      clear_bit(__IXGBE_DOWN, &adapter->state);
 +      ixgbe_napi_enable_all(adapter);
 +
 +      if (ixgbe_is_sfp(hw)) {
 +              ixgbe_sfp_link_config(adapter);
 +      } else {
 +              err = ixgbe_non_sfp_link_config(hw);
 +              if (err)
 +                      e_err(probe, "link_config FAILED %d\n", err);
 +      }
 +
 +      /* clear any pending interrupts, may auto mask */
 +      IXGBE_READ_REG(hw, IXGBE_EICR);
 +      ixgbe_irq_enable(adapter, true, true);
 +
 +      /*
 +       * If this adapter has a fan, check to see if we had a failure
 +       * before we enabled the interrupt.
 +       */
 +      if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) {
 +              u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
 +              if (esdp & IXGBE_ESDP_SDP1)
 +                      e_crit(drv, "Fan has stopped, replace the adapter\n");
 +      }
 +
 +      /* enable transmits */
 +      netif_tx_start_all_queues(adapter->netdev);
 +
 +      /* bring the link up in the watchdog, this could race with our first
 +       * link up interrupt but shouldn't be a problem */
 +      adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
 +      adapter->link_check_timeout = jiffies;
 +      mod_timer(&adapter->service_timer, jiffies);
 +
 +      /* Set PF Reset Done bit so PF/VF Mail Ops can work */
 +      ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
 +      ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD;
 +      IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
 +}
 +
 +void ixgbe_reinit_locked(struct ixgbe_adapter *adapter)
 +{
 +      WARN_ON(in_interrupt());
 +      /* put off any impending NetWatchDogTimeout */
 +      adapter->netdev->trans_start = jiffies;
 +
 +      while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state))
 +              usleep_range(1000, 2000);
 +      ixgbe_down(adapter);
 +      /*
 +       * If SR-IOV enabled then wait a bit before bringing the adapter
 +       * back up to give the VFs time to respond to the reset.  The
 +       * two second wait is based upon the watchdog timer cycle in
 +       * the VF driver.
 +       */
 +      if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
 +              msleep(2000);
 +      ixgbe_up(adapter);
 +      clear_bit(__IXGBE_RESETTING, &adapter->state);
 +}
 +
 +void ixgbe_up(struct ixgbe_adapter *adapter)
 +{
 +      /* hardware has been reset, we need to reload some things */
 +      ixgbe_configure(adapter);
 +
 +      ixgbe_up_complete(adapter);
 +}
 +
 +void ixgbe_reset(struct ixgbe_adapter *adapter)
 +{
 +      struct ixgbe_hw *hw = &adapter->hw;
 +      int err;
 +
 +      /* lock SFP init bit to prevent race conditions with the watchdog */
 +      while (test_and_set_bit(__IXGBE_IN_SFP_INIT, &adapter->state))
 +              usleep_range(1000, 2000);
 +
 +      /* clear all SFP and link config related flags while holding SFP_INIT */
 +      adapter->flags2 &= ~(IXGBE_FLAG2_SEARCH_FOR_SFP |
 +                           IXGBE_FLAG2_SFP_NEEDS_RESET);
 +      adapter->flags &= ~IXGBE_FLAG_NEED_LINK_CONFIG;
 +
 +      err = hw->mac.ops.init_hw(hw);
 +      switch (err) {
 +      case 0:
 +      case IXGBE_ERR_SFP_NOT_PRESENT:
 +      case IXGBE_ERR_SFP_NOT_SUPPORTED:
 +              break;
 +      case IXGBE_ERR_MASTER_REQUESTS_PENDING:
 +              e_dev_err("master disable timed out\n");
 +              break;
 +      case IXGBE_ERR_EEPROM_VERSION:
 +              /* We are running on a pre-production device, log a warning */
 +              e_dev_warn("This device is a pre-production adapter/LOM. "
 +                         "Please be aware there may be issuesassociated with "
 +                         "your hardware.  If you are experiencing problems "
 +                         "please contact your Intel or hardware "
 +                         "representative who provided you with this "
 +                         "hardware.\n");
 +              break;
 +      default:
 +              e_dev_err("Hardware Error: %d\n", err);
 +      }
 +
 +      clear_bit(__IXGBE_IN_SFP_INIT, &adapter->state);
 +
 +      /* reprogram the RAR[0] in case user changed it. */
 +      hw->mac.ops.set_rar(hw, 0, hw->mac.addr, adapter->num_vfs,
 +                          IXGBE_RAH_AV);
 +}
 +
 +/**
 + * ixgbe_clean_rx_ring - Free Rx Buffers per Queue
 + * @rx_ring: ring to free buffers from
 + **/
 +static void ixgbe_clean_rx_ring(struct ixgbe_ring *rx_ring)
 +{
 +      struct device *dev = rx_ring->dev;
 +      unsigned long size;
 +      u16 i;
 +
 +      /* ring already cleared, nothing to do */
 +      if (!rx_ring->rx_buffer_info)
 +              return;
 +
 +      /* Free all the Rx ring sk_buffs */
 +      for (i = 0; i < rx_ring->count; i++) {
 +              struct ixgbe_rx_buffer *rx_buffer_info;
 +
 +              rx_buffer_info = &rx_ring->rx_buffer_info[i];
 +              if (rx_buffer_info->dma) {
 +                      dma_unmap_single(rx_ring->dev, rx_buffer_info->dma,
 +                                       rx_ring->rx_buf_len,
 +                                       DMA_FROM_DEVICE);
 +                      rx_buffer_info->dma = 0;
 +              }
 +              if (rx_buffer_info->skb) {
 +                      struct sk_buff *skb = rx_buffer_info->skb;
 +                      rx_buffer_info->skb = NULL;
 +                      do {
 +                              struct sk_buff *this = skb;
 +                              if (IXGBE_RSC_CB(this)->delay_unmap) {
 +                                      dma_unmap_single(dev,
 +                                                       IXGBE_RSC_CB(this)->dma,
 +                                                       rx_ring->rx_buf_len,
 +                                                       DMA_FROM_DEVICE);
 +                                      IXGBE_RSC_CB(this)->dma = 0;
 +                                      IXGBE_RSC_CB(skb)->delay_unmap = false;
 +                              }
 +                              skb = skb->prev;
 +                              dev_kfree_skb(this);
 +                      } while (skb);
 +              }
 +              if (!rx_buffer_info->page)
 +                      continue;
 +              if (rx_buffer_info->page_dma) {
 +                      dma_unmap_page(dev, rx_buffer_info->page_dma,
 +                                     PAGE_SIZE / 2, DMA_FROM_DEVICE);
 +                      rx_buffer_info->page_dma = 0;
 +              }
 +              put_page(rx_buffer_info->page);
 +              rx_buffer_info->page = NULL;
 +              rx_buffer_info->page_offset = 0;
 +      }
 +
 +      size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count;
 +      memset(rx_ring->rx_buffer_info, 0, size);
 +
 +      /* Zero out the descriptor ring */
 +      memset(rx_ring->desc, 0, rx_ring->size);
 +
 +      rx_ring->next_to_clean = 0;
 +      rx_ring->next_to_use = 0;
 +}
 +
 +/**
 + * ixgbe_clean_tx_ring - Free Tx Buffers
 + * @tx_ring: ring to be cleaned
 + **/
 +static void ixgbe_clean_tx_ring(struct ixgbe_ring *tx_ring)
 +{
 +      struct ixgbe_tx_buffer *tx_buffer_info;
 +      unsigned long size;
 +      u16 i;
 +
 +      /* ring already cleared, nothing to do */
 +      if (!tx_ring->tx_buffer_info)
 +              return;
 +
 +      /* Free all the Tx ring sk_buffs */
 +      for (i = 0; i < tx_ring->count; i++) {
 +              tx_buffer_info = &tx_ring->tx_buffer_info[i];
 +              ixgbe_unmap_and_free_tx_resource(tx_ring, tx_buffer_info);
 +      }
 +
 +      size = sizeof(struct ixgbe_tx_buffer) * tx_ring->count;
 +      memset(tx_ring->tx_buffer_info, 0, size);
 +
 +      /* Zero out the descriptor ring */
 +      memset(tx_ring->desc, 0, tx_ring->size);
 +
 +      tx_ring->next_to_use = 0;
 +      tx_ring->next_to_clean = 0;
 +}
 +
 +/**
 + * ixgbe_clean_all_rx_rings - Free Rx Buffers for all queues
 + * @adapter: board private structure
 + **/
 +static void ixgbe_clean_all_rx_rings(struct ixgbe_adapter *adapter)
 +{
 +      int i;
 +
 +      for (i = 0; i < adapter->num_rx_queues; i++)
 +              ixgbe_clean_rx_ring(adapter->rx_ring[i]);
 +}
 +
 +/**
 + * ixgbe_clean_all_tx_rings - Free Tx Buffers for all queues
 + * @adapter: board private structure
 + **/
 +static void ixgbe_clean_all_tx_rings(struct ixgbe_adapter *adapter)
 +{
 +      int i;
 +
 +      for (i = 0; i < adapter->num_tx_queues; i++)
 +              ixgbe_clean_tx_ring(adapter->tx_ring[i]);
 +}
 +
 +static void ixgbe_fdir_filter_exit(struct ixgbe_adapter *adapter)
 +{
 +      struct hlist_node *node, *node2;
 +      struct ixgbe_fdir_filter *filter;
 +
 +      spin_lock(&adapter->fdir_perfect_lock);
 +
 +      hlist_for_each_entry_safe(filter, node, node2,
 +                                &adapter->fdir_filter_list, fdir_node) {
 +              hlist_del(&filter->fdir_node);
 +              kfree(filter);
 +      }
 +      adapter->fdir_filter_count = 0;
 +
 +      spin_unlock(&adapter->fdir_perfect_lock);
 +}
 +
 +void ixgbe_down(struct ixgbe_adapter *adapter)
 +{
 +      struct net_device *netdev = adapter->netdev;
 +      struct ixgbe_hw *hw = &adapter->hw;
 +      u32 rxctrl;
 +      int i;
 +
 +      /* signal that we are down to the interrupt handler */
 +      set_bit(__IXGBE_DOWN, &adapter->state);
 +
 +      /* disable receives */
 +      rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
 +      IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl & ~IXGBE_RXCTRL_RXEN);
 +
 +      /* disable all enabled rx queues */
 +      for (i = 0; i < adapter->num_rx_queues; i++)
 +              /* this call also flushes the previous write */
 +              ixgbe_disable_rx_queue(adapter, adapter->rx_ring[i]);
 +
 +      usleep_range(10000, 20000);
 +
 +      netif_tx_stop_all_queues(netdev);
 +
 +      /* call carrier off first to avoid false dev_watchdog timeouts */
 +      netif_carrier_off(netdev);
 +      netif_tx_disable(netdev);
 +
 +      ixgbe_irq_disable(adapter);
 +
 +      ixgbe_napi_disable_all(adapter);
 +
 +      adapter->flags2 &= ~(IXGBE_FLAG2_FDIR_REQUIRES_REINIT |
 +                           IXGBE_FLAG2_RESET_REQUESTED);
 +      adapter->flags &= ~IXGBE_FLAG_NEED_LINK_UPDATE;
 +
 +      del_timer_sync(&adapter->service_timer);
 +
 +      if (adapter->num_vfs) {
 +              /* Clear EITR Select mapping */
 +              IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITRSEL, 0);
 +
 +              /* Mark all the VFs as inactive */
 +              for (i = 0 ; i < adapter->num_vfs; i++)
 +                      adapter->vfinfo[i].clear_to_send = 0;
 +
 +              /* ping all the active vfs to let them know we are going down */
 +              ixgbe_ping_all_vfs(adapter);
 +
 +              /* Disable all VFTE/VFRE TX/RX */
 +              ixgbe_disable_tx_rx(adapter);
 +      }
 +
 +      /* disable transmits in the hardware now that interrupts are off */
 +      for (i = 0; i < adapter->num_tx_queues; i++) {
 +              u8 reg_idx = adapter->tx_ring[i]->reg_idx;
 +              IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), IXGBE_TXDCTL_SWFLSH);
 +      }
 +
 +      /* Disable the Tx DMA engine on 82599 and X540 */
 +      switch (hw->mac.type) {
 +      case ixgbe_mac_82599EB:
 +      case ixgbe_mac_X540:
 +              IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL,
 +                              (IXGBE_READ_REG(hw, IXGBE_DMATXCTL) &
 +                               ~IXGBE_DMATXCTL_TE));
 +              break;
 +      default:
 +              break;
 +      }
 +
 +      if (!pci_channel_offline(adapter->pdev))
 +              ixgbe_reset(adapter);
 +
 +      /* power down the optics for multispeed fiber and 82599 SFP+ fiber */
 +      if (hw->mac.ops.disable_tx_laser &&
 +          ((hw->phy.multispeed_fiber) ||
 +           ((hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber) &&
 +            (hw->mac.type == ixgbe_mac_82599EB))))
 +              hw->mac.ops.disable_tx_laser(hw);
 +
 +      ixgbe_clean_all_tx_rings(adapter);
 +      ixgbe_clean_all_rx_rings(adapter);
 +
 +#ifdef CONFIG_IXGBE_DCA
 +      /* since we reset the hardware DCA settings were cleared */
 +      ixgbe_setup_dca(adapter);
 +#endif
 +}
 +
 +/**
 + * ixgbe_poll - NAPI Rx polling callback
 + * @napi: structure for representing this polling device
 + * @budget: how many packets driver is allowed to clean
 + *
 + * This function is used for legacy and MSI, NAPI mode
 + **/
 +static int ixgbe_poll(struct napi_struct *napi, int budget)
 +{
 +      struct ixgbe_q_vector *q_vector =
 +                              container_of(napi, struct ixgbe_q_vector, napi);
 +      struct ixgbe_adapter *adapter = q_vector->adapter;
 +      struct ixgbe_ring *ring;
 +      int per_ring_budget;
 +      bool clean_complete = true;
 +
 +#ifdef CONFIG_IXGBE_DCA
 +      if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
 +              ixgbe_update_dca(q_vector);
 +#endif
 +
 +      for (ring = q_vector->tx.ring; ring != NULL; ring = ring->next)
 +              clean_complete &= !!ixgbe_clean_tx_irq(q_vector, ring);
 +
 +      /* attempt to distribute budget to each queue fairly, but don't allow
 +       * the budget to go below 1 because we'll exit polling */
 +      if (q_vector->rx.count > 1)
 +              per_ring_budget = max(budget/q_vector->rx.count, 1);
 +      else
 +              per_ring_budget = budget;
 +
 +      for (ring = q_vector->rx.ring; ring != NULL; ring = ring->next)
 +              clean_complete &= ixgbe_clean_rx_irq(q_vector, ring,
 +                                                   per_ring_budget);
 +
 +      /* If all work not completed, return budget and keep polling */
 +      if (!clean_complete)
 +              return budget;
 +
 +      /* all work done, exit the polling mode */
 +      napi_complete(napi);
 +      if (adapter->rx_itr_setting & 1)
 +              ixgbe_set_itr(q_vector);
 +      if (!test_bit(__IXGBE_DOWN, &adapter->state))
 +              ixgbe_irq_enable_queues(adapter, ((u64)1 << q_vector->v_idx));
 +
 +      return 0;
 +}
 +
 +/**
 + * ixgbe_tx_timeout - Respond to a Tx Hang
 + * @netdev: network interface device structure
 + **/
 +static void ixgbe_tx_timeout(struct net_device *netdev)
 +{
 +      struct ixgbe_adapter *adapter = netdev_priv(netdev);
 +
 +      /* Do the reset outside of interrupt context */
 +      ixgbe_tx_timeout_reset(adapter);
 +}
 +
 +/**
 + * ixgbe_set_rss_queues: Allocate queues for RSS
 + * @adapter: board private structure to initialize
 + *
 + * This is our "base" multiqueue mode.  RSS (Receive Side Scaling) will try
 + * to allocate one Rx queue per CPU, and if available, one Tx queue per CPU.
 + *
 + **/
 +static inline bool ixgbe_set_rss_queues(struct ixgbe_adapter *adapter)
 +{
 +      bool ret = false;
 +      struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_RSS];
 +
 +      if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) {
 +              f->mask = 0xF;
 +              adapter->num_rx_queues = f->indices;
 +              adapter->num_tx_queues = f->indices;
 +              ret = true;
 +      } else {
 +              ret = false;
 +      }
 +
 +      return ret;
 +}
 +
 +/**
 + * ixgbe_set_fdir_queues: Allocate queues for Flow Director
 + * @adapter: board private structure to initialize
 + *
 + * Flow Director is an advanced Rx filter, attempting to get Rx flows back
 + * to the original CPU that initiated the Tx session.  This runs in addition
 + * to RSS, so if a packet doesn't match an FDIR filter, we can still spread the
 + * Rx load across CPUs using RSS.
 + *
 + **/
 +static inline bool ixgbe_set_fdir_queues(struct ixgbe_adapter *adapter)
 +{
 +      bool ret = false;
 +      struct ixgbe_ring_feature *f_fdir = &adapter->ring_feature[RING_F_FDIR];
 +
 +      f_fdir->indices = min((int)num_online_cpus(), f_fdir->indices);
 +      f_fdir->mask = 0;
 +
 +      /* Flow Director must have RSS enabled */
 +      if ((adapter->flags & IXGBE_FLAG_RSS_ENABLED) &&
 +          (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE)) {
 +              adapter->num_tx_queues = f_fdir->indices;
 +              adapter->num_rx_queues = f_fdir->indices;
 +              ret = true;
 +      } else {
 +              adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
 +      }
 +      return ret;
 +}
 +
 +#ifdef IXGBE_FCOE
 +/**
 + * ixgbe_set_fcoe_queues: Allocate queues for Fiber Channel over Ethernet (FCoE)
 + * @adapter: board private structure to initialize
 + *
 + * FCoE RX FCRETA can use up to 8 rx queues for up to 8 different exchanges.
 + * The ring feature mask is not used as a mask for FCoE, as it can take any 8
 + * rx queues out of the max number of rx queues, instead, it is used as the
 + * index of the first rx queue used by FCoE.
 + *
 + **/
 +static inline bool ixgbe_set_fcoe_queues(struct ixgbe_adapter *adapter)
 +{
 +      struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_FCOE];
 +
 +      if (!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED))
 +              return false;
 +
 +      f->indices = min((int)num_online_cpus(), f->indices);
 +
 +      adapter->num_rx_queues = 1;
 +      adapter->num_tx_queues = 1;
 +
 +      if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) {
 +              e_info(probe, "FCoE enabled with RSS\n");
 +              if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE)
 +                      ixgbe_set_fdir_queues(adapter);
 +              else
 +                      ixgbe_set_rss_queues(adapter);
 +      }
 +
 +      /* adding FCoE rx rings to the end */
 +      f->mask = adapter->num_rx_queues;
 +      adapter->num_rx_queues += f->indices;
 +      adapter->num_tx_queues += f->indices;
 +
 +      return true;
 +}
 +#endif /* IXGBE_FCOE */
 +
 +/* Artificial max queue cap per traffic class in DCB mode */
 +#define DCB_QUEUE_CAP 8
 +
 +#ifdef CONFIG_IXGBE_DCB
 +static inline bool ixgbe_set_dcb_queues(struct ixgbe_adapter *adapter)
 +{
 +      int per_tc_q, q, i, offset = 0;
 +      struct net_device *dev = adapter->netdev;
 +      int tcs = netdev_get_num_tc(dev);
 +
 +      if (!tcs)
 +              return false;
 +
 +      /* Map queue offset and counts onto allocated tx queues */
 +      per_tc_q = min(dev->num_tx_queues / tcs, (unsigned int)DCB_QUEUE_CAP);
 +      q = min((int)num_online_cpus(), per_tc_q);
 +
 +      for (i = 0; i < tcs; i++) {
 +              netdev_set_prio_tc_map(dev, i, i);
 +              netdev_set_tc_queue(dev, i, q, offset);
 +              offset += q;
 +      }
 +
 +      adapter->num_tx_queues = q * tcs;
 +      adapter->num_rx_queues = q * tcs;
 +
 +#ifdef IXGBE_FCOE
 +      /* FCoE enabled queues require special configuration indexed
 +       * by feature specific indices and mask. Here we map FCoE
 +       * indices onto the DCB queue pairs allowing FCoE to own
 +       * configuration later.
 +       */
 +      if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
 +              int tc;
 +              struct ixgbe_ring_feature *f =
 +                                      &adapter->ring_feature[RING_F_FCOE];
 +
 +              tc = netdev_get_prio_tc_map(dev, adapter->fcoe.up);
 +              f->indices = dev->tc_to_txq[tc].count;
 +              f->mask = dev->tc_to_txq[tc].offset;
 +      }
 +#endif
 +
 +      return true;
 +}
 +#endif
 +
 +/**
 + * ixgbe_set_sriov_queues: Allocate queues for IOV use
 + * @adapter: board private structure to initialize
 + *
 + * IOV doesn't actually use anything, so just NAK the
 + * request for now and let the other queue routines
 + * figure out what to do.
 + */
 +static inline bool ixgbe_set_sriov_queues(struct ixgbe_adapter *adapter)
 +{
 +      return false;
 +}
 +
 +/*
 + * ixgbe_set_num_queues: Allocate queues for device, feature dependent
 + * @adapter: board private structure to initialize
 + *
 + * This is the top level queue allocation routine.  The order here is very
 + * important, starting with the "most" number of features turned on at once,
 + * and ending with the smallest set of features.  This way large combinations
 + * can be allocated if they're turned on, and smaller combinations are the
 + * fallthrough conditions.
 + *
 + **/
 +static int ixgbe_set_num_queues(struct ixgbe_adapter *adapter)
 +{
 +      /* Start with base case */
 +      adapter->num_rx_queues = 1;
 +      adapter->num_tx_queues = 1;
 +      adapter->num_rx_pools = adapter->num_rx_queues;
 +      adapter->num_rx_queues_per_pool = 1;
 +
 +      if (ixgbe_set_sriov_queues(adapter))
 +              goto done;
 +
 +#ifdef CONFIG_IXGBE_DCB
 +      if (ixgbe_set_dcb_queues(adapter))
 +              goto done;
 +
 +#endif
 +#ifdef IXGBE_FCOE
 +      if (ixgbe_set_fcoe_queues(adapter))
 +              goto done;
 +
 +#endif /* IXGBE_FCOE */
 +      if (ixgbe_set_fdir_queues(adapter))
 +              goto done;
 +
 +      if (ixgbe_set_rss_queues(adapter))
 +              goto done;
 +
 +      /* fallback to base case */
 +      adapter->num_rx_queues = 1;
 +      adapter->num_tx_queues = 1;
 +
 +done:
 +      /* Notify the stack of the (possibly) reduced queue counts. */
 +      netif_set_real_num_tx_queues(adapter->netdev, adapter->num_tx_queues);
 +      return netif_set_real_num_rx_queues(adapter->netdev,
 +                                          adapter->num_rx_queues);
 +}
 +
 +static void ixgbe_acquire_msix_vectors(struct ixgbe_adapter *adapter,
 +                                     int vectors)
 +{
 +      int err, vector_threshold;
 +
 +      /* We'll want at least 3 (vector_threshold):
 +       * 1) TxQ[0] Cleanup
 +       * 2) RxQ[0] Cleanup
 +       * 3) Other (Link Status Change, etc.)
 +       * 4) TCP Timer (optional)
 +       */
 +      vector_threshold = MIN_MSIX_COUNT;
 +
 +      /* The more we get, the more we will assign to Tx/Rx Cleanup
 +       * for the separate queues...where Rx Cleanup >= Tx Cleanup.
 +       * Right now, we simply care about how many we'll get; we'll
 +       * set them up later while requesting irq's.
 +       */
 +      while (vectors >= vector_threshold) {
 +              err = pci_enable_msix(adapter->pdev, adapter->msix_entries,
 +                                    vectors);
 +              if (!err) /* Success in acquiring all requested vectors. */
 +                      break;
 +              else if (err < 0)
 +                      vectors = 0; /* Nasty failure, quit now */
 +              else /* err == number of vectors we should try again with */
 +                      vectors = err;
 +      }
 +
 +      if (vectors < vector_threshold) {
 +              /* Can't allocate enough MSI-X interrupts?  Oh well.
 +               * This just means we'll go with either a single MSI
 +               * vector or fall back to legacy interrupts.
 +               */
 +              netif_printk(adapter, hw, KERN_DEBUG, adapter->netdev,
 +                           "Unable to allocate MSI-X interrupts\n");
 +              adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED;
 +              kfree(adapter->msix_entries);
 +              adapter->msix_entries = NULL;
 +      } else {
 +              adapter->flags |= IXGBE_FLAG_MSIX_ENABLED; /* Woot! */
 +              /*
 +               * Adjust for only the vectors we'll use, which is minimum
 +               * of max_msix_q_vectors + NON_Q_VECTORS, or the number of
 +               * vectors we were allocated.
 +               */
 +              adapter->num_msix_vectors = min(vectors,
 +                                 adapter->max_msix_q_vectors + NON_Q_VECTORS);
 +      }
 +}
 +
 +/**
 + * ixgbe_cache_ring_rss - Descriptor ring to register mapping for RSS
 + * @adapter: board private structure to initialize
 + *
 + * Cache the descriptor ring offsets for RSS to the assigned rings.
 + *
 + **/
 +static inline bool ixgbe_cache_ring_rss(struct ixgbe_adapter *adapter)
 +{
 +      int i;
 +
 +      if (!(adapter->flags & IXGBE_FLAG_RSS_ENABLED))
 +              return false;
 +
 +      for (i = 0; i < adapter->num_rx_queues; i++)
 +              adapter->rx_ring[i]->reg_idx = i;
 +      for (i = 0; i < adapter->num_tx_queues; i++)
 +              adapter->tx_ring[i]->reg_idx = i;
 +
 +      return true;
 +}
 +
 +#ifdef CONFIG_IXGBE_DCB
 +
 +/* ixgbe_get_first_reg_idx - Return first register index associated with ring */
 +static void ixgbe_get_first_reg_idx(struct ixgbe_adapter *adapter, u8 tc,
 +                                  unsigned int *tx, unsigned int *rx)
 +{
 +      struct net_device *dev = adapter->netdev;
 +      struct ixgbe_hw *hw = &adapter->hw;
 +      u8 num_tcs = netdev_get_num_tc(dev);
 +
 +      *tx = 0;
 +      *rx = 0;
 +
 +      switch (hw->mac.type) {
 +      case ixgbe_mac_82598EB:
 +              *tx = tc << 2;
 +              *rx = tc << 3;
 +              break;
 +      case ixgbe_mac_82599EB:
 +      case ixgbe_mac_X540:
 +              if (num_tcs > 4) {
 +                      if (tc < 3) {
 +                              *tx = tc << 5;
 +                              *rx = tc << 4;
 +                      } else if (tc <  5) {
 +                              *tx = ((tc + 2) << 4);
 +                              *rx = tc << 4;
 +                      } else if (tc < num_tcs) {
 +                              *tx = ((tc + 8) << 3);
 +                              *rx = tc << 4;
 +                      }
 +              } else {
 +                      *rx =  tc << 5;
 +                      switch (tc) {
 +                      case 0:
 +                              *tx =  0;
 +                              break;
 +                      case 1:
 +                              *tx = 64;
 +                              break;
 +                      case 2:
 +                              *tx = 96;
 +                              break;
 +                      case 3:
 +                              *tx = 112;
 +                              break;
 +                      default:
 +                              break;
 +                      }
 +              }
 +              break;
 +      default:
 +              break;
 +      }
 +}
 +
 +/**
 + * ixgbe_cache_ring_dcb - Descriptor ring to register mapping for DCB
 + * @adapter: board private structure to initialize
 + *
 + * Cache the descriptor ring offsets for DCB to the assigned rings.
 + *
 + **/
 +static inline bool ixgbe_cache_ring_dcb(struct ixgbe_adapter *adapter)
 +{
 +      struct net_device *dev = adapter->netdev;
 +      int i, j, k;
 +      u8 num_tcs = netdev_get_num_tc(dev);
 +
 +      if (!num_tcs)
 +              return false;
 +
 +      for (i = 0, k = 0; i < num_tcs; i++) {
 +              unsigned int tx_s, rx_s;
 +              u16 count = dev->tc_to_txq[i].count;
 +
 +              ixgbe_get_first_reg_idx(adapter, i, &tx_s, &rx_s);
 +              for (j = 0; j < count; j++, k++) {
 +                      adapter->tx_ring[k]->reg_idx = tx_s + j;
 +                      adapter->rx_ring[k]->reg_idx = rx_s + j;
 +                      adapter->tx_ring[k]->dcb_tc = i;
 +                      adapter->rx_ring[k]->dcb_tc = i;
 +              }
 +      }
 +
 +      return true;
 +}
 +#endif
 +
 +/**
 + * ixgbe_cache_ring_fdir - Descriptor ring to register mapping for Flow Director
 + * @adapter: board private structure to initialize
 + *
 + * Cache the descriptor ring offsets for Flow Director to the assigned rings.
 + *
 + **/
 +static inline bool ixgbe_cache_ring_fdir(struct ixgbe_adapter *adapter)
 +{
 +      int i;
 +      bool ret = false;
 +
 +      if ((adapter->flags & IXGBE_FLAG_RSS_ENABLED) &&
 +          (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE)) {
 +              for (i = 0; i < adapter->num_rx_queues; i++)
 +                      adapter->rx_ring[i]->reg_idx = i;
 +              for (i = 0; i < adapter->num_tx_queues; i++)
 +                      adapter->tx_ring[i]->reg_idx = i;
 +              ret = true;
 +      }
 +
 +      return ret;
 +}
 +
 +#ifdef IXGBE_FCOE
 +/**
 + * ixgbe_cache_ring_fcoe - Descriptor ring to register mapping for the FCoE
 + * @adapter: board private structure to initialize
 + *
 + * Cache the descriptor ring offsets for FCoE mode to the assigned rings.
 + *
 + */
 +static inline bool ixgbe_cache_ring_fcoe(struct ixgbe_adapter *adapter)
 +{
 +      struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_FCOE];
 +      int i;
 +      u8 fcoe_rx_i = 0, fcoe_tx_i = 0;
 +
 +      if (!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED))
 +              return false;
 +
 +      if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) {
 +              if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE)
 +                      ixgbe_cache_ring_fdir(adapter);
 +              else
 +                      ixgbe_cache_ring_rss(adapter);
 +
 +              fcoe_rx_i = f->mask;
 +              fcoe_tx_i = f->mask;
 +      }
 +      for (i = 0; i < f->indices; i++, fcoe_rx_i++, fcoe_tx_i++) {
 +              adapter->rx_ring[f->mask + i]->reg_idx = fcoe_rx_i;
 +              adapter->tx_ring[f->mask + i]->reg_idx = fcoe_tx_i;
 +      }
 +      return true;
 +}
 +
 +#endif /* IXGBE_FCOE */
 +/**
 + * ixgbe_cache_ring_sriov - Descriptor ring to register mapping for sriov
 + * @adapter: board private structure to initialize
 + *
 + * SR-IOV doesn't use any descriptor rings but changes the default if
 + * no other mapping is used.
 + *
 + */
 +static inline bool ixgbe_cache_ring_sriov(struct ixgbe_adapter *adapter)
 +{
 +      adapter->rx_ring[0]->reg_idx = adapter->num_vfs * 2;
 +      adapter->tx_ring[0]->reg_idx = adapter->num_vfs * 2;
 +      if (adapter->num_vfs)
 +              return true;
 +      else
 +              return false;
 +}
 +
 +/**
 + * ixgbe_cache_ring_register - Descriptor ring to register mapping
 + * @adapter: board private structure to initialize
 + *
 + * Once we know the feature-set enabled for the device, we'll cache
 + * the register offset the descriptor ring is assigned to.
 + *
 + * Note, the order the various feature calls is important.  It must start with
 + * the "most" features enabled at the same time, then trickle down to the
 + * least amount of features turned on at once.
 + **/
 +static void ixgbe_cache_ring_register(struct ixgbe_adapter *adapter)
 +{
 +      /* start with default case */
 +      adapter->rx_ring[0]->reg_idx = 0;
 +      adapter->tx_ring[0]->reg_idx = 0;
 +
 +      if (ixgbe_cache_ring_sriov(adapter))
 +              return;
 +
 +#ifdef CONFIG_IXGBE_DCB
 +      if (ixgbe_cache_ring_dcb(adapter))
 +              return;
 +#endif
 +
 +#ifdef IXGBE_FCOE
 +      if (ixgbe_cache_ring_fcoe(adapter))
 +              return;
 +#endif /* IXGBE_FCOE */
 +
 +      if (ixgbe_cache_ring_fdir(adapter))
 +              return;
 +
 +      if (ixgbe_cache_ring_rss(adapter))
 +              return;
 +}
 +
 +/**
 + * ixgbe_alloc_queues - Allocate memory for all rings
 + * @adapter: board private structure to initialize
 + *
 + * We allocate one ring per queue at run-time since we don't know the
 + * number of queues at compile-time.  The polling_netdev array is
 + * intended for Multiqueue, but should work fine with a single queue.
 + **/
 +static int ixgbe_alloc_queues(struct ixgbe_adapter *adapter)
 +{
 +      int rx = 0, tx = 0, nid = adapter->node;
 +
 +      if (nid < 0 || !node_online(nid))
 +              nid = first_online_node;
 +
 +      for (; tx < adapter->num_tx_queues; tx++) {
 +              struct ixgbe_ring *ring;
 +
 +              ring = kzalloc_node(sizeof(*ring), GFP_KERNEL, nid);
 +              if (!ring)
 +                      ring = kzalloc(sizeof(*ring), GFP_KERNEL);
 +              if (!ring)
 +                      goto err_allocation;
 +              ring->count = adapter->tx_ring_count;
 +              ring->queue_index = tx;
 +              ring->numa_node = nid;
 +              ring->dev = &adapter->pdev->dev;
 +              ring->netdev = adapter->netdev;
 +
 +              adapter->tx_ring[tx] = ring;
 +      }
 +
 +      for (; rx < adapter->num_rx_queues; rx++) {
 +              struct ixgbe_ring *ring;
 +
 +              ring = kzalloc_node(sizeof(*ring), GFP_KERNEL, nid);
 +              if (!ring)
 +                      ring = kzalloc(sizeof(*ring), GFP_KERNEL);
 +              if (!ring)
 +                      goto err_allocation;
 +              ring->count = adapter->rx_ring_count;
 +              ring->queue_index = rx;
 +              ring->numa_node = nid;
 +              ring->dev = &adapter->pdev->dev;
 +              ring->netdev = adapter->netdev;
 +
 +              adapter->rx_ring[rx] = ring;
 +      }
 +
 +      ixgbe_cache_ring_register(adapter);
 +
 +      return 0;
 +
 +err_allocation:
 +      while (tx)
 +              kfree(adapter->tx_ring[--tx]);
 +
 +      while (rx)
 +              kfree(adapter->rx_ring[--rx]);
 +      return -ENOMEM;
 +}
 +
 +/**
 + * ixgbe_set_interrupt_capability - set MSI-X or MSI if supported
 + * @adapter: board private structure to initialize
 + *
 + * Attempt to configure the interrupts using the best available
 + * capabilities of the hardware and the kernel.
 + **/
 +static int ixgbe_set_interrupt_capability(struct ixgbe_adapter *adapter)
 +{
 +      struct ixgbe_hw *hw = &adapter->hw;
 +      int err = 0;
 +      int vector, v_budget;
 +
 +      /*
 +       * It's easy to be greedy for MSI-X vectors, but it really
 +       * doesn't do us much good if we have a lot more vectors
 +       * than CPU's.  So let's be conservative and only ask for
 +       * (roughly) the same number of vectors as there are CPU's.
 +       */
 +      v_budget = min(adapter->num_rx_queues + adapter->num_tx_queues,
 +                     (int)num_online_cpus()) + NON_Q_VECTORS;
 +
 +      /*
 +       * At the same time, hardware can only support a maximum of
 +       * hw.mac->max_msix_vectors vectors.  With features
 +       * such as RSS and VMDq, we can easily surpass the number of Rx and Tx
 +       * descriptor queues supported by our device.  Thus, we cap it off in
 +       * those rare cases where the cpu count also exceeds our vector limit.
 +       */
 +      v_budget = min(v_budget, (int)hw->mac.max_msix_vectors);
 +
 +      /* A failure in MSI-X entry allocation isn't fatal, but it does
 +       * mean we disable MSI-X capabilities of the adapter. */
 +      adapter->msix_entries = kcalloc(v_budget,
 +                                      sizeof(struct msix_entry), GFP_KERNEL);
 +      if (adapter->msix_entries) {
 +              for (vector = 0; vector < v_budget; vector++)
 +                      adapter->msix_entries[vector].entry = vector;
 +
 +              ixgbe_acquire_msix_vectors(adapter, v_budget);
 +
 +              if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
 +                      goto out;
 +      }
 +
 +      adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED;
 +      adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED;
 +      if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) {
 +              e_err(probe,
 +                    "ATR is not supported while multiple "
 +                    "queues are disabled.  Disabling Flow Director\n");
 +      }
 +      adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
 +      adapter->atr_sample_rate = 0;
 +      if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
 +              ixgbe_disable_sriov(adapter);
 +
 +      err = ixgbe_set_num_queues(adapter);
 +      if (err)
 +              return err;
 +
 +      err = pci_enable_msi(adapter->pdev);
 +      if (!err) {
 +              adapter->flags |= IXGBE_FLAG_MSI_ENABLED;
 +      } else {
 +              netif_printk(adapter, hw, KERN_DEBUG, adapter->netdev,
 +                           "Unable to allocate MSI interrupt, "
 +                           "falling back to legacy.  Error: %d\n", err);
 +              /* reset err */
 +              err = 0;
 +      }
 +
 +out:
 +      return err;
 +}
 +
 +/**
 + * ixgbe_alloc_q_vectors - Allocate memory for interrupt vectors
 + * @adapter: board private structure to initialize
 + *
 + * We allocate one q_vector per queue interrupt.  If allocation fails we
 + * return -ENOMEM.
 + **/
 +static int ixgbe_alloc_q_vectors(struct ixgbe_adapter *adapter)
 +{
 +      int v_idx, num_q_vectors;
 +      struct ixgbe_q_vector *q_vector;
 +
 +      if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
 +              num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
 +      else
 +              num_q_vectors = 1;
 +
 +      for (v_idx = 0; v_idx < num_q_vectors; v_idx++) {
 +              q_vector = kzalloc_node(sizeof(struct ixgbe_q_vector),
 +                                      GFP_KERNEL, adapter->node);
 +              if (!q_vector)
 +                      q_vector = kzalloc(sizeof(struct ixgbe_q_vector),
 +                                         GFP_KERNEL);
 +              if (!q_vector)
 +                      goto err_out;
 +
 +              q_vector->adapter = adapter;
 +              q_vector->v_idx = v_idx;
 +
 +              /* Allocate the affinity_hint cpumask, configure the mask */
 +              if (!alloc_cpumask_var(&q_vector->affinity_mask, GFP_KERNEL))
 +                      goto err_out;
 +              cpumask_set_cpu(v_idx, q_vector->affinity_mask);
 +
 +              if (q_vector->tx.count && !q_vector->rx.count)
 +                      q_vector->eitr = adapter->tx_eitr_param;
 +              else
 +                      q_vector->eitr = adapter->rx_eitr_param;
 +
 +              netif_napi_add(adapter->netdev, &q_vector->napi,
 +                             ixgbe_poll, 64);
 +              adapter->q_vector[v_idx] = q_vector;
 +      }
 +
 +      return 0;
 +
 +err_out:
 +      while (v_idx) {
 +              v_idx--;
 +              q_vector = adapter->q_vector[v_idx];
 +              netif_napi_del(&q_vector->napi);
 +              free_cpumask_var(q_vector->affinity_mask);
 +              kfree(q_vector);
 +              adapter->q_vector[v_idx] = NULL;
 +      }
 +      return -ENOMEM;
 +}
 +
 +/**
 + * ixgbe_free_q_vectors - Free memory allocated for interrupt vectors
 + * @adapter: board private structure to initialize
 + *
 + * This function frees the memory allocated to the q_vectors.  In addition if
 + * NAPI is enabled it will delete any references to the NAPI struct prior
 + * to freeing the q_vector.
 + **/
 +static void ixgbe_free_q_vectors(struct ixgbe_adapter *adapter)
 +{
 +      int v_idx, num_q_vectors;
 +
 +      if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
 +              num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
 +      else
 +              num_q_vectors = 1;
 +
 +      for (v_idx = 0; v_idx < num_q_vectors; v_idx++) {
 +              struct ixgbe_q_vector *q_vector = adapter->q_vector[v_idx];
 +              adapter->q_vector[v_idx] = NULL;
 +              netif_napi_del(&q_vector->napi);
 +              free_cpumask_var(q_vector->affinity_mask);
 +              kfree(q_vector);
 +      }
 +}
 +
 +static void ixgbe_reset_interrupt_capability(struct ixgbe_adapter *adapter)
 +{
 +      if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
 +              adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED;
 +              pci_disable_msix(adapter->pdev);
 +              kfree(adapter->msix_entries);
 +              adapter->msix_entries = NULL;
 +      } else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) {
 +              adapter->flags &= ~IXGBE_FLAG_MSI_ENABLED;
 +              pci_disable_msi(adapter->pdev);
 +      }
 +}
 +
 +/**
 + * ixgbe_init_interrupt_scheme - Determine proper interrupt scheme
 + * @adapter: board private structure to initialize
 + *
 + * We determine which interrupt scheme to use based on...
 + * - Kernel support (MSI, MSI-X)
 + *   - which can be user-defined (via MODULE_PARAM)
 + * - Hardware queue count (num_*_queues)
 + *   - defined by miscellaneous hardware support/features (RSS, etc.)
 + **/
 +int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter)
 +{
 +      int err;
 +
 +      /* Number of supported queues */
 +      err = ixgbe_set_num_queues(adapter);
 +      if (err)
 +              return err;
 +
 +      err = ixgbe_set_interrupt_capability(adapter);
 +      if (err) {
 +              e_dev_err("Unable to setup interrupt capabilities\n");
 +              goto err_set_interrupt;
 +      }
 +
 +      err = ixgbe_alloc_q_vectors(adapter);
 +      if (err) {
 +              e_dev_err("Unable to allocate memory for queue vectors\n");
 +              goto err_alloc_q_vectors;
 +      }
 +
 +      err = ixgbe_alloc_queues(adapter);
 +      if (err) {
 +              e_dev_err("Unable to allocate memory for queues\n");
 +              goto err_alloc_queues;
 +      }
 +
 +      e_dev_info("Multiqueue %s: Rx Queue count = %u, Tx Queue count = %u\n",
 +                 (adapter->num_rx_queues > 1) ? "Enabled" : "Disabled",
 +                 adapter->num_rx_queues, adapter->num_tx_queues);
 +
 +      set_bit(__IXGBE_DOWN, &adapter->state);
 +
 +      return 0;
 +
 +err_alloc_queues:
 +      ixgbe_free_q_vectors(adapter);
 +err_alloc_q_vectors:
 +      ixgbe_reset_interrupt_capability(adapter);
 +err_set_interrupt:
 +      return err;
 +}
 +
 +/**
 + * ixgbe_clear_interrupt_scheme - Clear the current interrupt scheme settings
 + * @adapter: board private structure to clear interrupt scheme on
 + *
 + * We go through and clear interrupt specific resources and reset the structure
 + * to pre-load conditions
 + **/
 +void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter *adapter)
 +{
 +      int i;
 +
 +      for (i = 0; i < adapter->num_tx_queues; i++) {
 +              kfree(adapter->tx_ring[i]);
 +              adapter->tx_ring[i] = NULL;
 +      }
 +      for (i = 0; i < adapter->num_rx_queues; i++) {
 +              struct ixgbe_ring *ring = adapter->rx_ring[i];
 +
 +              /* ixgbe_get_stats64() might access this ring, we must wait
 +               * a grace period before freeing it.
 +               */
 +              kfree_rcu(ring, rcu);
 +              adapter->rx_ring[i] = NULL;
 +      }
 +
 +      adapter->num_tx_queues = 0;
 +      adapter->num_rx_queues = 0;
 +
 +      ixgbe_free_q_vectors(adapter);
 +      ixgbe_reset_interrupt_capability(adapter);
 +}
 +
 +/**
 + * ixgbe_sw_init - Initialize general software structures (struct ixgbe_adapter)
 + * @adapter: board private structure to initialize
 + *
 + * ixgbe_sw_init initializes the Adapter private data structure.
 + * Fields are initialized based on PCI device information and
 + * OS network device settings (MTU size).
 + **/
 +static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
 +{
 +      struct ixgbe_hw *hw = &adapter->hw;
 +      struct pci_dev *pdev = adapter->pdev;
 +      struct net_device *dev = adapter->netdev;
 +      unsigned int rss;
 +#ifdef CONFIG_IXGBE_DCB
 +      int j;
 +      struct tc_configuration *tc;
 +#endif
 +      int max_frame = dev->mtu + ETH_HLEN + ETH_FCS_LEN;
 +
 +      /* PCI config space info */
 +
 +      hw->vendor_id = pdev->vendor;
 +      hw->device_id = pdev->device;
 +      hw->revision_id = pdev->revision;
 +      hw->subsystem_vendor_id = pdev->subsystem_vendor;
 +      hw->subsystem_device_id = pdev->subsystem_device;
 +
 +      /* Set capability flags */
 +      rss = min(IXGBE_MAX_RSS_INDICES, (int)num_online_cpus());
 +      adapter->ring_feature[RING_F_RSS].indices = rss;
 +      adapter->flags |= IXGBE_FLAG_RSS_ENABLED;
 +      switch (hw->mac.type) {
 +      case ixgbe_mac_82598EB:
 +              if (hw->device_id == IXGBE_DEV_ID_82598AT)
 +                      adapter->flags |= IXGBE_FLAG_FAN_FAIL_CAPABLE;
 +              adapter->max_msix_q_vectors = MAX_MSIX_Q_VECTORS_82598;
 +              break;
 +      case ixgbe_mac_82599EB:
 +      case ixgbe_mac_X540:
 +              adapter->max_msix_q_vectors = MAX_MSIX_Q_VECTORS_82599;
 +              adapter->flags2 |= IXGBE_FLAG2_RSC_CAPABLE;
 +              adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED;
 +              if (hw->device_id == IXGBE_DEV_ID_82599_T3_LOM)
 +                      adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE;
 +              /* Flow Director hash filters enabled */
 +              adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE;
 +              adapter->atr_sample_rate = 20;
 +              adapter->ring_feature[RING_F_FDIR].indices =
 +                                                       IXGBE_MAX_FDIR_INDICES;
 +              adapter->fdir_pballoc = IXGBE_FDIR_PBALLOC_64K;
 +#ifdef IXGBE_FCOE
 +              adapter->flags |= IXGBE_FLAG_FCOE_CAPABLE;
 +              adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED;
 +              adapter->ring_feature[RING_F_FCOE].indices = 0;
 +#ifdef CONFIG_IXGBE_DCB
 +              /* Default traffic class to use for FCoE */
 +              adapter->fcoe.up = IXGBE_FCOE_DEFTC;
 +#endif
 +#endif /* IXGBE_FCOE */
 +              break;
 +      default:
 +              break;
 +      }
 +
 +      /* n-tuple support exists, always init our spinlock */
 +      spin_lock_init(&adapter->fdir_perfect_lock);
 +
 +#ifdef CONFIG_IXGBE_DCB
 +      /* Configure DCB traffic classes */
 +      for (j = 0; j < MAX_TRAFFIC_CLASS; j++) {
 +              tc = &adapter->dcb_cfg.tc_config[j];
 +              tc->path[DCB_TX_CONFIG].bwg_id = 0;
 +              tc->path[DCB_TX_CONFIG].bwg_percent = 12 + (j & 1);
 +              tc->path[DCB_RX_CONFIG].bwg_id = 0;
 +              tc->path[DCB_RX_CONFIG].bwg_percent = 12 + (j & 1);
 +              tc->dcb_pfc = pfc_disabled;
 +      }
 +      adapter->dcb_cfg.bw_percentage[DCB_TX_CONFIG][0] = 100;
 +      adapter->dcb_cfg.bw_percentage[DCB_RX_CONFIG][0] = 100;
 +      adapter->dcb_cfg.pfc_mode_enable = false;
 +      adapter->dcb_set_bitmap = 0x00;
 +      adapter->dcbx_cap = DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_CEE;
 +      ixgbe_copy_dcb_cfg(&adapter->dcb_cfg, &adapter->temp_dcb_cfg,
 +                         MAX_TRAFFIC_CLASS);
 +
 +#endif
 +
 +      /* default flow control settings */
 +      hw->fc.requested_mode = ixgbe_fc_full;
 +      hw->fc.current_mode = ixgbe_fc_full;    /* init for ethtool output */
 +#ifdef CONFIG_DCB
 +      adapter->last_lfc_mode = hw->fc.current_mode;
 +#endif
 +      hw->fc.high_water = FC_HIGH_WATER(max_frame);
 +      hw->fc.low_water = FC_LOW_WATER(max_frame);
 +      hw->fc.pause_time = IXGBE_DEFAULT_FCPAUSE;
 +      hw->fc.send_xon = true;
 +      hw->fc.disable_fc_autoneg = false;
 +
 +      /* enable itr by default in dynamic mode */
 +      adapter->rx_itr_setting = 1;
 +      adapter->rx_eitr_param = 20000;
 +      adapter->tx_itr_setting = 1;
 +      adapter->tx_eitr_param = 10000;
 +
 +      /* set defaults for eitr in MegaBytes */
 +      adapter->eitr_low = 10;
 +      adapter->eitr_high = 20;
 +
 +      /* set default ring sizes */
 +      adapter->tx_ring_count = IXGBE_DEFAULT_TXD;
 +      adapter->rx_ring_count = IXGBE_DEFAULT_RXD;
 +
 +      /* set default work limits */
 +      adapter->tx_work_limit = IXGBE_DEFAULT_TX_WORK;
 +
 +      /* initialize eeprom parameters */
 +      if (ixgbe_init_eeprom_params_generic(hw)) {
 +              e_dev_err("EEPROM initialization failed\n");
 +              return -EIO;
 +      }
 +
 +      /* enable rx csum by default */
 +      adapter->flags |= IXGBE_FLAG_RX_CSUM_ENABLED;
 +
 +      /* get assigned NUMA node */
 +      adapter->node = dev_to_node(&pdev->dev);
 +
 +      set_bit(__IXGBE_DOWN, &adapter->state);
 +
 +      return 0;
 +}
 +
 +/**
 + * ixgbe_setup_tx_resources - allocate Tx resources (Descriptors)
 + * @tx_ring:    tx descriptor ring (for a specific queue) to setup
 + *
 + * Return 0 on success, negative on failure
 + **/
 +int ixgbe_setup_tx_resources(struct ixgbe_ring *tx_ring)
 +{
 +      struct device *dev = tx_ring->dev;
 +      int size;
 +
 +      size = sizeof(struct ixgbe_tx_buffer) * tx_ring->count;
 +      tx_ring->tx_buffer_info = vzalloc_node(size, tx_ring->numa_node);
 +      if (!tx_ring->tx_buffer_info)
 +              tx_ring->tx_buffer_info = vzalloc(size);
 +      if (!tx_ring->tx_buffer_info)
 +              goto err;
 +
 +      /* round up to nearest 4K */
 +      tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc);
 +      tx_ring->size = ALIGN(tx_ring->size, 4096);
 +
 +      tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size,
 +                                         &tx_ring->dma, GFP_KERNEL);
 +      if (!tx_ring->desc)
 +              goto err;
 +
 +      tx_ring->next_to_use = 0;
 +      tx_ring->next_to_clean = 0;
 +      return 0;
 +
 +err:
 +      vfree(tx_ring->tx_buffer_info);
 +      tx_ring->tx_buffer_info = NULL;
 +      dev_err(dev, "Unable to allocate memory for the Tx descriptor ring\n");
 +      return -ENOMEM;
 +}
 +
 +/**
 + * ixgbe_setup_all_tx_resources - allocate all queues Tx resources
 + * @adapter: board private structure
 + *
 + * If this function returns with an error, then it's possible one or
 + * more of the rings is populated (while the rest are not).  It is the
 + * callers duty to clean those orphaned rings.
 + *
 + * Return 0 on success, negative on failure
 + **/
 +static int ixgbe_setup_all_tx_resources(struct ixgbe_adapter *adapter)
 +{
 +      int i, err = 0;
 +
 +      for (i = 0; i < adapter->num_tx_queues; i++) {
 +              err = ixgbe_setup_tx_resources(adapter->tx_ring[i]);
 +              if (!err)
 +                      continue;
 +              e_err(probe, "Allocation for Tx Queue %u failed\n", i);
 +              break;
 +      }
 +
 +      return err;
 +}
 +
 +/**
 + * ixgbe_setup_rx_resources - allocate Rx resources (Descriptors)
 + * @rx_ring:    rx descriptor ring (for a specific queue) to setup
 + *
 + * Returns 0 on success, negative on failure
 + **/
 +int ixgbe_setup_rx_resources(struct ixgbe_ring *rx_ring)
 +{
 +      struct device *dev = rx_ring->dev;
 +      int size;
 +
 +      size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count;
 +      rx_ring->rx_buffer_info = vzalloc_node(size, rx_ring->numa_node);
 +      if (!rx_ring->rx_buffer_info)
 +              rx_ring->rx_buffer_info = vzalloc(size);
 +      if (!rx_ring->rx_buffer_info)
 +              goto err;
 +
 +      /* Round up to nearest 4K */
 +      rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc);
 +      rx_ring->size = ALIGN(rx_ring->size, 4096);
 +
 +      rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size,
 +                                         &rx_ring->dma, GFP_KERNEL);
 +
 +      if (!rx_ring->desc)
 +              goto err;
 +
 +      rx_ring->next_to_clean = 0;
 +      rx_ring->next_to_use = 0;
 +
 +      return 0;
 +err:
 +      vfree(rx_ring->rx_buffer_info);
 +      rx_ring->rx_buffer_info = NULL;
 +      dev_err(dev, "Unable to allocate memory for the Rx descriptor ring\n");
 +      return -ENOMEM;
 +}
 +
 +/**
 + * ixgbe_setup_all_rx_resources - allocate all queues Rx resources
 + * @adapter: board private structure
 + *
 + * If this function returns with an error, then it's possible one or
 + * more of the rings is populated (while the rest are not).  It is the
 + * callers duty to clean those orphaned rings.
 + *
 + * Return 0 on success, negative on failure
 + **/
 +static int ixgbe_setup_all_rx_resources(struct ixgbe_adapter *adapter)
 +{
 +      int i, err = 0;
 +
 +      for (i = 0; i < adapter->num_rx_queues; i++) {
 +              err = ixgbe_setup_rx_resources(adapter->rx_ring[i]);
 +              if (!err)
 +                      continue;
 +              e_err(probe, "Allocation for Rx Queue %u failed\n", i);
 +              break;
 +      }
 +
 +      return err;
 +}
 +
 +/**
 + * ixgbe_free_tx_resources - Free Tx Resources per Queue
 + * @tx_ring: Tx descriptor ring for a specific queue
 + *
 + * Free all transmit software resources
 + **/
 +void ixgbe_free_tx_resources(struct ixgbe_ring *tx_ring)
 +{
 +      ixgbe_clean_tx_ring(tx_ring);
 +
 +      vfree(tx_ring->tx_buffer_info);
 +      tx_ring->tx_buffer_info = NULL;
 +
 +      /* if not set, then don't free */
 +      if (!tx_ring->desc)
 +              return;
 +
 +      dma_free_coherent(tx_ring->dev, tx_ring->size,
 +                        tx_ring->desc, tx_ring->dma);
 +
 +      tx_ring->desc = NULL;
 +}
 +
 +/**
 + * ixgbe_free_all_tx_resources - Free Tx Resources for All Queues
 + * @adapter: board private structure
 + *
 + * Free all transmit software resources
 + **/
 +static void ixgbe_free_all_tx_resources(struct ixgbe_adapter *adapter)
 +{
 +      int i;
 +
 +      for (i = 0; i < adapter->num_tx_queues; i++)
 +              if (adapter->tx_ring[i]->desc)
 +                      ixgbe_free_tx_resources(adapter->tx_ring[i]);
 +}
 +
 +/**
 + * ixgbe_free_rx_resources - Free Rx Resources
 + * @rx_ring: ring to clean the resources from
 + *
 + * Free all receive software resources
 + **/
 +void ixgbe_free_rx_resources(struct ixgbe_ring *rx_ring)
 +{
 +      ixgbe_clean_rx_ring(rx_ring);
 +
 +      vfree(rx_ring->rx_buffer_info);
 +      rx_ring->rx_buffer_info = NULL;
 +
 +      /* if not set, then don't free */
 +      if (!rx_ring->desc)
 +              return;
 +
 +      dma_free_coherent(rx_ring->dev, rx_ring->size,
 +                        rx_ring->desc, rx_ring->dma);
 +
 +      rx_ring->desc = NULL;
 +}
 +
 +/**
 + * ixgbe_free_all_rx_resources - Free Rx Resources for All Queues
 + * @adapter: board private structure
 + *
 + * Free all receive software resources
 + **/
 +static void ixgbe_free_all_rx_resources(struct ixgbe_adapter *adapter)
 +{
 +      int i;
 +
 +      for (i = 0; i < adapter->num_rx_queues; i++)
 +              if (adapter->rx_ring[i]->desc)
 +                      ixgbe_free_rx_resources(adapter->rx_ring[i]);
 +}
 +
 +/**
 + * ixgbe_change_mtu - Change the Maximum Transfer Unit
 + * @netdev: network interface device structure
 + * @new_mtu: new value for maximum frame size
 + *
 + * Returns 0 on success, negative on failure
 + **/
 +static int ixgbe_change_mtu(struct net_device *netdev, int new_mtu)
 +{
 +      struct ixgbe_adapter *adapter = netdev_priv(netdev);
 +      struct ixgbe_hw *hw = &adapter->hw;
 +      int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
 +
 +      /* MTU < 68 is an error and causes problems on some kernels */
 +      if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED &&
 +          hw->mac.type != ixgbe_mac_X540) {
 +              if ((new_mtu < 68) || (max_frame > MAXIMUM_ETHERNET_VLAN_SIZE))
 +                      return -EINVAL;
 +      } else {
 +              if ((new_mtu < 68) || (max_frame > IXGBE_MAX_JUMBO_FRAME_SIZE))
 +                      return -EINVAL;
 +      }
 +
 +      e_info(probe, "changing MTU from %d to %d\n", netdev->mtu, new_mtu);
 +      /* must set new MTU before calling down or up */
 +      netdev->mtu = new_mtu;
 +
 +      hw->fc.high_water = FC_HIGH_WATER(max_frame);
 +      hw->fc.low_water = FC_LOW_WATER(max_frame);
 +
 +      if (netif_running(netdev))
 +              ixgbe_reinit_locked(adapter);
 +
 +      return 0;
 +}
 +
 +/**
 + * ixgbe_open - Called when a network interface is made active
 + * @netdev: network interface device structure
 + *
 + * Returns 0 on success, negative value on failure
 + *
 + * The open entry point is called when a network interface is made
 + * active by the system (IFF_UP).  At this point all resources needed
 + * for transmit and receive operations are allocated, the interrupt
 + * handler is registered with the OS, the watchdog timer is started,
 + * and the stack is notified that the interface is ready.
 + **/
 +static int ixgbe_open(struct net_device *netdev)
 +{
 +      struct ixgbe_adapter *adapter = netdev_priv(netdev);
 +      int err;
 +
 +      /* disallow open during test */
 +      if (test_bit(__IXGBE_TESTING, &adapter->state))
 +              return -EBUSY;
 +
 +      netif_carrier_off(netdev);
 +
 +      /* allocate transmit descriptors */
 +      err = ixgbe_setup_all_tx_resources(adapter);
 +      if (err)
 +              goto err_setup_tx;
 +
 +      /* allocate receive descriptors */
 +      err = ixgbe_setup_all_rx_resources(adapter);
 +      if (err)
 +              goto err_setup_rx;
 +
 +      ixgbe_configure(adapter);
 +
 +      err = ixgbe_request_irq(adapter);
 +      if (err)
 +              goto err_req_irq;
 +
 +      ixgbe_up_complete(adapter);
 +
 +      return 0;
 +
 +err_req_irq:
 +err_setup_rx:
 +      ixgbe_free_all_rx_resources(adapter);
 +err_setup_tx:
 +      ixgbe_free_all_tx_resources(adapter);
 +      ixgbe_reset(adapter);
 +
 +      return err;
 +}
 +
 +/**
 + * ixgbe_close - Disables a network interface
 + * @netdev: network interface device structure
 + *
 + * Returns 0, this is not allowed to fail
 + *
 + * The close entry point is called when an interface is de-activated
 + * by the OS.  The hardware is still under the drivers control, but
 + * needs to be disabled.  A global MAC reset is issued to stop the
 + * hardware, and all transmit and receive resources are freed.
 + **/
 +static int ixgbe_close(struct net_device *netdev)
 +{
 +      struct ixgbe_adapter *adapter = netdev_priv(netdev);
 +
 +      ixgbe_down(adapter);
 +      ixgbe_free_irq(adapter);
 +
 +      ixgbe_fdir_filter_exit(adapter);
 +
 +      ixgbe_free_all_tx_resources(adapter);
 +      ixgbe_free_all_rx_resources(adapter);
 +
 +      ixgbe_release_hw_control(adapter);
 +
 +      return 0;
 +}
 +
 +#ifdef CONFIG_PM
 +static int ixgbe_resume(struct pci_dev *pdev)
 +{
 +      struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
 +      struct net_device *netdev = adapter->netdev;
 +      u32 err;
 +
 +      pci_set_power_state(pdev, PCI_D0);
 +      pci_restore_state(pdev);
 +      /*
 +       * pci_restore_state clears dev->state_saved so call
 +       * pci_save_state to restore it.
 +       */
 +      pci_save_state(pdev);
 +
 +      err = pci_enable_device_mem(pdev);
 +      if (err) {
 +              e_dev_err("Cannot enable PCI device from suspend\n");
 +              return err;
 +      }
 +      pci_set_master(pdev);
 +
 +      pci_wake_from_d3(pdev, false);
 +
 +      err = ixgbe_init_interrupt_scheme(adapter);
 +      if (err) {
 +              e_dev_err("Cannot initialize interrupts for device\n");
 +              return err;
 +      }
 +
 +      ixgbe_reset(adapter);
 +
 +      IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0);
 +
 +      if (netif_running(netdev)) {
 +              err = ixgbe_open(netdev);
 +              if (err)
 +                      return err;
 +      }
 +
 +      netif_device_attach(netdev);
 +
 +      return 0;
 +}
 +#endif /* CONFIG_PM */
 +
 +static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake)
 +{
 +      struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
 +      struct net_device *netdev = adapter->netdev;
 +      struct ixgbe_hw *hw = &adapter->hw;
 +      u32 ctrl, fctrl;
 +      u32 wufc = adapter->wol;
 +#ifdef CONFIG_PM
 +      int retval = 0;
 +#endif
 +
 +      netif_device_detach(netdev);
 +
 +      if (netif_running(netdev)) {
 +              ixgbe_down(adapter);
 +              ixgbe_free_irq(adapter);
 +              ixgbe_free_all_tx_resources(adapter);
 +              ixgbe_free_all_rx_resources(adapter);
 +      }
 +
 +      ixgbe_clear_interrupt_scheme(adapter);
 +#ifdef CONFIG_DCB
 +      kfree(adapter->ixgbe_ieee_pfc);
 +      kfree(adapter->ixgbe_ieee_ets);
 +#endif
 +
 +#ifdef CONFIG_PM
 +      retval = pci_save_state(pdev);
 +      if (retval)
 +              return retval;
 +
 +#endif
 +      if (wufc) {
 +              ixgbe_set_rx_mode(netdev);
 +
 +              /* turn on all-multi mode if wake on multicast is enabled */
 +              if (wufc & IXGBE_WUFC_MC) {
 +                      fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
 +                      fctrl |= IXGBE_FCTRL_MPE;
 +                      IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
 +              }
 +
 +              ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
 +              ctrl |= IXGBE_CTRL_GIO_DIS;
 +              IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl);
 +
 +              IXGBE_WRITE_REG(hw, IXGBE_WUFC, wufc);
 +      } else {
 +              IXGBE_WRITE_REG(hw, IXGBE_WUC, 0);
 +              IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0);
 +      }
 +
 +      switch (hw->mac.type) {
 +      case ixgbe_mac_82598EB:
 +              pci_wake_from_d3(pdev, false);
 +              break;
 +      case ixgbe_mac_82599EB:
 +      case ixgbe_mac_X540:
 +              pci_wake_from_d3(pdev, !!wufc);
 +              break;
 +      default:
 +              break;
 +      }
 +
 +      *enable_wake = !!wufc;
 +
 +      ixgbe_release_hw_control(adapter);
 +
 +      pci_disable_device(pdev);
 +
 +      return 0;
 +}
 +
 +#ifdef CONFIG_PM
 +static int ixgbe_suspend(struct pci_dev *pdev, pm_message_t state)
 +{
 +      int retval;
 +      bool wake;
 +
 +      retval = __ixgbe_shutdown(pdev, &wake);
 +      if (retval)
 +              return retval;
 +
 +      if (wake) {
 +              pci_prepare_to_sleep(pdev);
 +      } else {
 +              pci_wake_from_d3(pdev, false);
 +              pci_set_power_state(pdev, PCI_D3hot);
 +      }
 +
 +      return 0;
 +}
 +#endif /* CONFIG_PM */
 +
 +static void ixgbe_shutdown(struct pci_dev *pdev)
 +{
 +      bool wake;
 +
 +      __ixgbe_shutdown(pdev, &wake);
 +
 +      if (system_state == SYSTEM_POWER_OFF) {
 +              pci_wake_from_d3(pdev, wake);
 +              pci_set_power_state(pdev, PCI_D3hot);
 +      }
 +}
 +
 +/**
 + * ixgbe_update_stats - Update the board statistics counters.
 + * @adapter: board private structure
 + **/
 +void ixgbe_update_stats(struct ixgbe_adapter *adapter)
 +{
 +      struct net_device *netdev = adapter->netdev;
 +      struct ixgbe_hw *hw = &adapter->hw;
 +      struct ixgbe_hw_stats *hwstats = &adapter->stats;
 +      u64 total_mpc = 0;
 +      u32 i, missed_rx = 0, mpc, bprc, lxon, lxoff, xon_off_tot;
 +      u64 non_eop_descs = 0, restart_queue = 0, tx_busy = 0;
 +      u64 alloc_rx_page_failed = 0, alloc_rx_buff_failed = 0;
 +      u64 bytes = 0, packets = 0;
 +
 +      if (test_bit(__IXGBE_DOWN, &adapter->state) ||
 +          test_bit(__IXGBE_RESETTING, &adapter->state))
 +              return;
 +
 +      if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) {
 +              u64 rsc_count = 0;
 +              u64 rsc_flush = 0;
 +              for (i = 0; i < 16; i++)
 +                      adapter->hw_rx_no_dma_resources +=
 +                              IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
 +              for (i = 0; i < adapter->num_rx_queues; i++) {
 +                      rsc_count += adapter->rx_ring[i]->rx_stats.rsc_count;
 +                      rsc_flush += adapter->rx_ring[i]->rx_stats.rsc_flush;
 +              }
 +              adapter->rsc_total_count = rsc_count;
 +              adapter->rsc_total_flush = rsc_flush;
 +      }
 +
 +      for (i = 0; i < adapter->num_rx_queues; i++) {
 +              struct ixgbe_ring *rx_ring = adapter->rx_ring[i];
 +              non_eop_descs += rx_ring->rx_stats.non_eop_descs;
 +              alloc_rx_page_failed += rx_ring->rx_stats.alloc_rx_page_failed;
 +              alloc_rx_buff_failed += rx_ring->rx_stats.alloc_rx_buff_failed;
 +              bytes += rx_ring->stats.bytes;
 +              packets += rx_ring->stats.packets;
 +      }
 +      adapter->non_eop_descs = non_eop_descs;
 +      adapter->alloc_rx_page_failed = alloc_rx_page_failed;
 +      adapter->alloc_rx_buff_failed = alloc_rx_buff_failed;
 +      netdev->stats.rx_bytes = bytes;
 +      netdev->stats.rx_packets = packets;
 +
 +      bytes = 0;
 +      packets = 0;
 +      /* gather some stats to the adapter struct that are per queue */
 +      for (i = 0; i < adapter->num_tx_queues; i++) {
 +              struct ixgbe_ring *tx_ring = adapter->tx_ring[i];
 +              restart_queue += tx_ring->tx_stats.restart_queue;
 +              tx_busy += tx_ring->tx_stats.tx_busy;
 +              bytes += tx_ring->stats.bytes;
 +              packets += tx_ring->stats.packets;
 +      }
 +      adapter->restart_queue = restart_queue;
 +      adapter->tx_busy = tx_busy;
 +      netdev->stats.tx_bytes = bytes;
 +      netdev->stats.tx_packets = packets;
 +
 +      hwstats->crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS);
 +
 +      /* 8 register reads */
 +      for (i = 0; i < 8; i++) {
 +              /* for packet buffers not used, the register should read 0 */
 +              mpc = IXGBE_READ_REG(hw, IXGBE_MPC(i));
 +              missed_rx += mpc;
 +              hwstats->mpc[i] += mpc;
 +              total_mpc += hwstats->mpc[i];
 +              hwstats->pxontxc[i] += IXGBE_READ_REG(hw, IXGBE_PXONTXC(i));
 +              hwstats->pxofftxc[i] += IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i));
 +              switch (hw->mac.type) {
 +              case ixgbe_mac_82598EB:
 +                      hwstats->rnbc[i] += IXGBE_READ_REG(hw, IXGBE_RNBC(i));
 +                      hwstats->qbtc[i] += IXGBE_READ_REG(hw, IXGBE_QBTC(i));
 +                      hwstats->qbrc[i] += IXGBE_READ_REG(hw, IXGBE_QBRC(i));
 +                      hwstats->pxonrxc[i] +=
 +                              IXGBE_READ_REG(hw, IXGBE_PXONRXC(i));
 +                      break;
 +              case ixgbe_mac_82599EB:
 +              case ixgbe_mac_X540:
 +                      hwstats->pxonrxc[i] +=
 +                              IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i));
 +                      break;
 +              default:
 +                      break;
 +              }
 +      }
 +
 +      /*16 register reads */
 +      for (i = 0; i < 16; i++) {
 +              hwstats->qptc[i] += IXGBE_READ_REG(hw, IXGBE_QPTC(i));
 +              hwstats->qprc[i] += IXGBE_READ_REG(hw, IXGBE_QPRC(i));
 +              if ((hw->mac.type == ixgbe_mac_82599EB) ||
 +                  (hw->mac.type == ixgbe_mac_X540)) {
 +                      hwstats->qbtc[i] += IXGBE_READ_REG(hw, IXGBE_QBTC_L(i));
 +                      IXGBE_READ_REG(hw, IXGBE_QBTC_H(i)); /* to clear */
 +                      hwstats->qbrc[i] += IXGBE_READ_REG(hw, IXGBE_QBRC_L(i));
 +                      IXGBE_READ_REG(hw, IXGBE_QBRC_H(i)); /* to clear */
 +              }
 +      }
 +
 +      hwstats->gprc += IXGBE_READ_REG(hw, IXGBE_GPRC);
 +      /* work around hardware counting issue */
 +      hwstats->gprc -= missed_rx;
 +
 +      ixgbe_update_xoff_received(adapter);
 +
 +      /* 82598 hardware only has a 32 bit counter in the high register */
 +      switch (hw->mac.type) {
 +      case ixgbe_mac_82598EB:
 +              hwstats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
 +              hwstats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCH);
 +              hwstats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH);
 +              hwstats->tor += IXGBE_READ_REG(hw, IXGBE_TORH);
 +              break;
 +      case ixgbe_mac_X540:
 +              /* OS2BMC stats are X540 only*/
 +              hwstats->o2bgptc += IXGBE_READ_REG(hw, IXGBE_O2BGPTC);
 +              hwstats->o2bspc += IXGBE_READ_REG(hw, IXGBE_O2BSPC);
 +              hwstats->b2ospc += IXGBE_READ_REG(hw, IXGBE_B2OSPC);
 +              hwstats->b2ogprc += IXGBE_READ_REG(hw, IXGBE_B2OGPRC);
 +      case ixgbe_mac_82599EB:
 +              hwstats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCL);
 +              IXGBE_READ_REG(hw, IXGBE_GORCH); /* to clear */
 +              hwstats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL);
 +              IXGBE_READ_REG(hw, IXGBE_GOTCH); /* to clear */
 +              hwstats->tor += IXGBE_READ_REG(hw, IXGBE_TORL);
 +              IXGBE_READ_REG(hw, IXGBE_TORH); /* to clear */
 +              hwstats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
 +              hwstats->fdirmatch += IXGBE_READ_REG(hw, IXGBE_FDIRMATCH);
 +              hwstats->fdirmiss += IXGBE_READ_REG(hw, IXGBE_FDIRMISS);
 +#ifdef IXGBE_FCOE
 +              hwstats->fccrc += IXGBE_READ_REG(hw, IXGBE_FCCRC);
 +              hwstats->fcoerpdc += IXGBE_READ_REG(hw, IXGBE_FCOERPDC);
 +              hwstats->fcoeprc += IXGBE_READ_REG(hw, IXGBE_FCOEPRC);
 +              hwstats->fcoeptc += IXGBE_READ_REG(hw, IXGBE_FCOEPTC);
 +              hwstats->fcoedwrc += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC);
 +              hwstats->fcoedwtc += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC);
 +#endif /* IXGBE_FCOE */
 +              break;
 +      default:
 +              break;
 +      }
 +      bprc = IXGBE_READ_REG(hw, IXGBE_BPRC);
 +      hwstats->bprc += bprc;
 +      hwstats->mprc += IXGBE_READ_REG(hw, IXGBE_MPRC);
 +      if (hw->mac.type == ixgbe_mac_82598EB)
 +              hwstats->mprc -= bprc;
 +      hwstats->roc += IXGBE_READ_REG(hw, IXGBE_ROC);
 +      hwstats->prc64 += IXGBE_READ_REG(hw, IXGBE_PRC64);
 +      hwstats->prc127 += IXGBE_READ_REG(hw, IXGBE_PRC127);
 +      hwstats->prc255 += IXGBE_READ_REG(hw, IXGBE_PRC255);
 +      hwstats->prc511 += IXGBE_READ_REG(hw, IXGBE_PRC511);
 +      hwstats->prc1023 += IXGBE_READ_REG(hw, IXGBE_PRC1023);
 +      hwstats->prc1522 += IXGBE_READ_REG(hw, IXGBE_PRC1522);
 +      hwstats->rlec += IXGBE_READ_REG(hw, IXGBE_RLEC);
 +      lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC);
 +      hwstats->lxontxc += lxon;
 +      lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
 +      hwstats->lxofftxc += lxoff;
 +      hwstats->gptc += IXGBE_READ_REG(hw, IXGBE_GPTC);
 +      hwstats->mptc += IXGBE_READ_REG(hw, IXGBE_MPTC);
 +      /*
 +       * 82598 errata - tx of flow control packets is included in tx counters
 +       */
 +      xon_off_tot = lxon + lxoff;
 +      hwstats->gptc -= xon_off_tot;
 +      hwstats->mptc -= xon_off_tot;
 +      hwstats->gotc -= (xon_off_tot * (ETH_ZLEN + ETH_FCS_LEN));
 +      hwstats->ruc += IXGBE_READ_REG(hw, IXGBE_RUC);
 +      hwstats->rfc += IXGBE_READ_REG(hw, IXGBE_RFC);
 +      hwstats->rjc += IXGBE_READ_REG(hw, IXGBE_RJC);
 +      hwstats->tpr += IXGBE_READ_REG(hw, IXGBE_TPR);
 +      hwstats->ptc64 += IXGBE_READ_REG(hw, IXGBE_PTC64);
 +      hwstats->ptc64 -= xon_off_tot;
 +      hwstats->ptc127 += IXGBE_READ_REG(hw, IXGBE_PTC127);
 +      hwstats->ptc255 += IXGBE_READ_REG(hw, IXGBE_PTC255);
 +      hwstats->ptc511 += IXGBE_READ_REG(hw, IXGBE_PTC511);
 +      hwstats->ptc1023 += IXGBE_READ_REG(hw, IXGBE_PTC1023);
 +      hwstats->ptc1522 += IXGBE_READ_REG(hw, IXGBE_PTC1522);
 +      hwstats->bptc += IXGBE_READ_REG(hw, IXGBE_BPTC);
 +
 +      /* Fill out the OS statistics structure */
 +      netdev->stats.multicast = hwstats->mprc;
 +
 +      /* Rx Errors */
 +      netdev->stats.rx_errors = hwstats->crcerrs + hwstats->rlec;
 +      netdev->stats.rx_dropped = 0;
 +      netdev->stats.rx_length_errors = hwstats->rlec;
 +      netdev->stats.rx_crc_errors = hwstats->crcerrs;
 +      netdev->stats.rx_missed_errors = total_mpc;
 +}
 +
 +/**
 + * ixgbe_fdir_reinit_subtask - worker thread to reinit FDIR filter table
 + * @adapter - pointer to the device adapter structure
 + **/
 +static void ixgbe_fdir_reinit_subtask(struct ixgbe_adapter *adapter)
 +{
 +      struct ixgbe_hw *hw = &adapter->hw;
 +      int i;
 +
 +      if (!(adapter->flags2 & IXGBE_FLAG2_FDIR_REQUIRES_REINIT))
 +              return;
 +
 +      adapter->flags2 &= ~IXGBE_FLAG2_FDIR_REQUIRES_REINIT;
 +
 +      /* if interface is down do nothing */
 +      if (test_bit(__IXGBE_DOWN, &adapter->state))
 +              return;
 +
 +      /* do nothing if we are not using signature filters */
 +      if (!(adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE))
 +              return;
 +
 +      adapter->fdir_overflow++;
 +
 +      if (ixgbe_reinit_fdir_tables_82599(hw) == 0) {
 +              for (i = 0; i < adapter->num_tx_queues; i++)
 +                      set_bit(__IXGBE_TX_FDIR_INIT_DONE,
 +                              &(adapter->tx_ring[i]->state));
 +              /* re-enable flow director interrupts */
 +              IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_FLOW_DIR);
 +      } else {
 +              e_err(probe, "failed to finish FDIR re-initialization, "
 +                    "ignored adding FDIR ATR filters\n");
 +      }
 +}
 +
 +/**
 + * ixgbe_check_hang_subtask - check for hung queues and dropped interrupts
 + * @adapter - pointer to the device adapter structure
 + *
 + * This function serves two purposes.  First it strobes the interrupt lines
 + * in order to make certain interrupts are occuring.  Secondly it sets the
 + * bits needed to check for TX hangs.  As a result we should immediately
 + * determine if a hang has occured.
 + */
 +static void ixgbe_check_hang_subtask(struct ixgbe_adapter *adapter)
 +{
 +      struct ixgbe_hw *hw = &adapter->hw;
 +      u64 eics = 0;
 +      int i;
 +
 +      /* If we're down or resetting, just bail */
 +      if (test_bit(__IXGBE_DOWN, &adapter->state) ||
 +          test_bit(__IXGBE_RESETTING, &adapter->state))
 +              return;
 +
 +      /* Force detection of hung controller */
 +      if (netif_carrier_ok(adapter->netdev)) {
 +              for (i = 0; i < adapter->num_tx_queues; i++)
 +                      set_check_for_tx_hang(adapter->tx_ring[i]);
 +      }
 +
 +      if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) {
 +              /*
 +               * for legacy and MSI interrupts don't set any bits
 +               * that are enabled for EIAM, because this operation
 +               * would set *both* EIMS and EICS for any bit in EIAM
 +               */
 +              IXGBE_WRITE_REG(hw, IXGBE_EICS,
 +                      (IXGBE_EICS_TCP_TIMER | IXGBE_EICS_OTHER));
 +      } else {
 +              /* get one bit for every active tx/rx interrupt vector */
 +              for (i = 0; i < adapter->num_msix_vectors - NON_Q_VECTORS; i++) {
 +                      struct ixgbe_q_vector *qv = adapter->q_vector[i];
 +                      if (qv->rx.ring || qv->tx.ring)
 +                              eics |= ((u64)1 << i);
 +              }
 +      }
 +
 +      /* Cause software interrupt to ensure rings are cleaned */
 +      ixgbe_irq_rearm_queues(adapter, eics);
 +
 +}
 +
 +/**
 + * ixgbe_watchdog_update_link - update the link status
 + * @adapter - pointer to the device adapter structure
 + * @link_speed - pointer to a u32 to store the link_speed
 + **/
 +static void ixgbe_watchdog_update_link(struct ixgbe_adapter *adapter)
 +{
 +      struct ixgbe_hw *hw = &adapter->hw;
 +      u32 link_speed = adapter->link_speed;
 +      bool link_up = adapter->link_up;
 +      int i;
 +
 +      if (!(adapter->flags & IXGBE_FLAG_NEED_LINK_UPDATE))
 +              return;
 +
 +      if (hw->mac.ops.check_link) {
 +              hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
 +      } else {
 +              /* always assume link is up, if no check link function */
 +              link_speed = IXGBE_LINK_SPEED_10GB_FULL;
 +              link_up = true;
 +      }
 +      if (link_up) {
 +              if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
 +                      for (i = 0; i < MAX_TRAFFIC_CLASS; i++)
 +                              hw->mac.ops.fc_enable(hw, i);
 +              } else {
 +                      hw->mac.ops.fc_enable(hw, 0);
 +              }
 +      }
 +
 +      if (link_up ||
 +          time_after(jiffies, (adapter->link_check_timeout +
 +                               IXGBE_TRY_LINK_TIMEOUT))) {
 +              adapter->flags &= ~IXGBE_FLAG_NEED_LINK_UPDATE;
 +              IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMC_LSC);
 +              IXGBE_WRITE_FLUSH(hw);
 +      }
 +
 +      adapter->link_up = link_up;
 +      adapter->link_speed = link_speed;
 +}
 +
 +/**
 + * ixgbe_watchdog_link_is_up - update netif_carrier status and
 + *                             print link up message
 + * @adapter - pointer to the device adapter structure
 + **/
 +static void ixgbe_watchdog_link_is_up(struct ixgbe_adapter *adapter)
 +{
 +      struct net_device *netdev = adapter->netdev;
 +      struct ixgbe_hw *hw = &adapter->hw;
 +      u32 link_speed = adapter->link_speed;
 +      bool flow_rx, flow_tx;
 +
 +      /* only continue if link was previously down */
 +      if (netif_carrier_ok(netdev))
 +              return;
 +
 +      adapter->flags2 &= ~IXGBE_FLAG2_SEARCH_FOR_SFP;
 +
 +      switch (hw->mac.type) {
 +      case ixgbe_mac_82598EB: {
 +              u32 frctl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
 +              u32 rmcs = IXGBE_READ_REG(hw, IXGBE_RMCS);
 +              flow_rx = !!(frctl & IXGBE_FCTRL_RFCE);
 +              flow_tx = !!(rmcs & IXGBE_RMCS_TFCE_802_3X);
 +      }
 +              break;
 +      case ixgbe_mac_X540:
 +      case ixgbe_mac_82599EB: {
 +              u32 mflcn = IXGBE_READ_REG(hw, IXGBE_MFLCN);
 +              u32 fccfg = IXGBE_READ_REG(hw, IXGBE_FCCFG);
 +              flow_rx = !!(mflcn & IXGBE_MFLCN_RFCE);
 +              flow_tx = !!(fccfg & IXGBE_FCCFG_TFCE_802_3X);
 +      }
 +              break;
 +      default:
 +              flow_tx = false;
 +              flow_rx = false;
 +              break;
 +      }
 +      e_info(drv, "NIC Link is Up %s, Flow Control: %s\n",
 +             (link_speed == IXGBE_LINK_SPEED_10GB_FULL ?
 +             "10 Gbps" :
 +             (link_speed == IXGBE_LINK_SPEED_1GB_FULL ?
 +             "1 Gbps" :
 +             (link_speed == IXGBE_LINK_SPEED_100_FULL ?
 +             "100 Mbps" :
 +             "unknown speed"))),
 +             ((flow_rx && flow_tx) ? "RX/TX" :
 +             (flow_rx ? "RX" :
 +             (flow_tx ? "TX" : "None"))));
 +
 +      netif_carrier_on(netdev);
 +      ixgbe_check_vf_rate_limit(adapter);
 +}
 +
 +/**
 + * ixgbe_watchdog_link_is_down - update netif_carrier status and
 + *                               print link down message
 + * @adapter - pointer to the adapter structure
 + **/
 +static void ixgbe_watchdog_link_is_down(struct ixgbe_adapter* adapter)
 +{
 +      struct net_device *netdev = adapter->netdev;
 +      struct ixgbe_hw *hw = &adapter->hw;
 +
 +      adapter->link_up = false;
 +      adapter->link_speed = 0;
 +
 +      /* only continue if link was up previously */
 +      if (!netif_carrier_ok(netdev))
 +              return;
 +
 +      /* poll for SFP+ cable when link is down */
 +      if (ixgbe_is_sfp(hw) && hw->mac.type == ixgbe_mac_82598EB)
 +              adapter->flags2 |= IXGBE_FLAG2_SEARCH_FOR_SFP;
 +
 +      e_info(drv, "NIC Link is Down\n");
 +      netif_carrier_off(netdev);
 +}
 +
 +/**
 + * ixgbe_watchdog_flush_tx - flush queues on link down
 + * @adapter - pointer to the device adapter structure
 + **/
 +static void ixgbe_watchdog_flush_tx(struct ixgbe_adapter *adapter)
 +{
 +      int i;
 +      int some_tx_pending = 0;
 +
 +      if (!netif_carrier_ok(adapter->netdev)) {
 +              for (i = 0; i < adapter->num_tx_queues; i++) {
 +                      struct ixgbe_ring *tx_ring = adapter->tx_ring[i];
 +                      if (tx_ring->next_to_use != tx_ring->next_to_clean) {
 +                              some_tx_pending = 1;
 +                              break;
 +                      }
 +              }
 +
 +              if (some_tx_pending) {
 +                      /* We've lost link, so the controller stops DMA,
 +                       * but we've got queued Tx work that's never going
 +                       * to get done, so reset controller to flush Tx.
 +                       * (Do the reset outside of interrupt context).
 +                       */
 +                      adapter->flags2 |= IXGBE_FLAG2_RESET_REQUESTED;
 +              }
 +      }
 +}
 +
 +static void ixgbe_spoof_check(struct ixgbe_adapter *adapter)
 +{
 +      u32 ssvpc;
 +
 +      /* Do not perform spoof check for 82598 */
 +      if (adapter->hw.mac.type == ixgbe_mac_82598EB)
 +              return;
 +
 +      ssvpc = IXGBE_READ_REG(&adapter->hw, IXGBE_SSVPC);
 +
 +      /*
 +       * ssvpc register is cleared on read, if zero then no
 +       * spoofed packets in the last interval.
 +       */
 +      if (!ssvpc)
 +              return;
 +
 +      e_warn(drv, "%d Spoofed packets detected\n", ssvpc);
 +}
 +
 +/**
 + * ixgbe_watchdog_subtask - check and bring link up
 + * @adapter - pointer to the device adapter structure
 + **/
 +static void ixgbe_watchdog_subtask(struct ixgbe_adapter *adapter)
 +{
 +      /* if interface is down do nothing */
 +      if (test_bit(__IXGBE_DOWN, &adapter->state))
 +              return;
 +
 +      ixgbe_watchdog_update_link(adapter);
 +
 +      if (adapter->link_up)
 +              ixgbe_watchdog_link_is_up(adapter);
 +      else
 +              ixgbe_watchdog_link_is_down(adapter);
 +
 +      ixgbe_spoof_check(adapter);
 +      ixgbe_update_stats(adapter);
 +
 +      ixgbe_watchdog_flush_tx(adapter);
 +}
 +
 +/**
 + * ixgbe_sfp_detection_subtask - poll for SFP+ cable
 + * @adapter - the ixgbe adapter structure
 + **/
 +static void ixgbe_sfp_detection_subtask(struct ixgbe_adapter *adapter)
 +{
 +      struct ixgbe_hw *hw = &adapter->hw;
 +      s32 err;
 +
 +      /* not searching for SFP so there is nothing to do here */
 +      if (!(adapter->flags2 & IXGBE_FLAG2_SEARCH_FOR_SFP) &&
 +          !(adapter->flags2 & IXGBE_FLAG2_SFP_NEEDS_RESET))
 +              return;
 +
 +      /* someone else is in init, wait until next service event */
 +      if (test_and_set_bit(__IXGBE_IN_SFP_INIT, &adapter->state))
 +              return;
 +
 +      err = hw->phy.ops.identify_sfp(hw);
 +      if (err == IXGBE_ERR_SFP_NOT_SUPPORTED)
 +              goto sfp_out;
 +
 +      if (err == IXGBE_ERR_SFP_NOT_PRESENT) {
 +              /* If no cable is present, then we need to reset
 +               * the next time we find a good cable. */
 +              adapter->flags2 |= IXGBE_FLAG2_SFP_NEEDS_RESET;
 +      }
 +
 +      /* exit on error */
 +      if (err)
 +              goto sfp_out;
 +
 +      /* exit if reset not needed */
 +      if (!(adapter->flags2 & IXGBE_FLAG2_SFP_NEEDS_RESET))
 +              goto sfp_out;
 +
 +      adapter->flags2 &= ~IXGBE_FLAG2_SFP_NEEDS_RESET;
 +
 +      /*
 +       * A module may be identified correctly, but the EEPROM may not have
 +       * support for that module.  setup_sfp() will fail in that case, so
 +       * we should not allow that module to load.
 +       */
 +      if (hw->mac.type == ixgbe_mac_82598EB)
 +              err = hw->phy.ops.reset(hw);
 +      else
 +              err = hw->mac.ops.setup_sfp(hw);
 +
 +      if (err == IXGBE_ERR_SFP_NOT_SUPPORTED)
 +              goto sfp_out;
 +
 +      adapter->flags |= IXGBE_FLAG_NEED_LINK_CONFIG;
 +      e_info(probe, "detected SFP+: %d\n", hw->phy.sfp_type);
 +
 +sfp_out:
 +      clear_bit(__IXGBE_IN_SFP_INIT, &adapter->state);
 +
 +      if ((err == IXGBE_ERR_SFP_NOT_SUPPORTED) &&
 +          (adapter->netdev->reg_state == NETREG_REGISTERED)) {
 +              e_dev_err("failed to initialize because an unsupported "
 +                        "SFP+ module type was detected.\n");
 +              e_dev_err("Reload the driver after installing a "
 +                        "supported module.\n");
 +              unregister_netdev(adapter->netdev);
 +      }
 +}
 +
 +/**
 + * ixgbe_sfp_link_config_subtask - set up link SFP after module install
 + * @adapter - the ixgbe adapter structure
 + **/
 +static void ixgbe_sfp_link_config_subtask(struct ixgbe_adapter *adapter)
 +{
 +      struct ixgbe_hw *hw = &adapter->hw;
 +      u32 autoneg;
 +      bool negotiation;
 +
 +      if (!(adapter->flags & IXGBE_FLAG_NEED_LINK_CONFIG))
 +              return;
 +
 +      /* someone else is in init, wait until next service event */
 +      if (test_and_set_bit(__IXGBE_IN_SFP_INIT, &adapter->state))
 +              return;
 +
 +      adapter->flags &= ~IXGBE_FLAG_NEED_LINK_CONFIG;
 +
 +      autoneg = hw->phy.autoneg_advertised;
 +      if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
 +              hw->mac.ops.get_link_capabilities(hw, &autoneg, &negotiation);
 +      hw->mac.autotry_restart = false;
 +      if (hw->mac.ops.setup_link)
 +              hw->mac.ops.setup_link(hw, autoneg, negotiation, true);
 +
 +      adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
 +      adapter->link_check_timeout = jiffies;
 +      clear_bit(__IXGBE_IN_SFP_INIT, &adapter->state);
 +}
 +
 +/**
 + * ixgbe_service_timer - Timer Call-back
 + * @data: pointer to adapter cast into an unsigned long
 + **/
 +static void ixgbe_service_timer(unsigned long data)
 +{
 +      struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data;
 +      unsigned long next_event_offset;
 +
 +      /* poll faster when waiting for link */
 +      if (adapter->flags & IXGBE_FLAG_NEED_LINK_UPDATE)
 +              next_event_offset = HZ / 10;
 +      else
 +              next_event_offset = HZ * 2;
 +
 +      /* Reset the timer */
 +      mod_timer(&adapter->service_timer, next_event_offset + jiffies);
 +
 +      ixgbe_service_event_schedule(adapter);
 +}
 +
 +static void ixgbe_reset_subtask(struct ixgbe_adapter *adapter)
 +{
 +      if (!(adapter->flags2 & IXGBE_FLAG2_RESET_REQUESTED))
 +              return;
 +
 +      adapter->flags2 &= ~IXGBE_FLAG2_RESET_REQUESTED;
 +
 +      /* If we're already down or resetting, just bail */
 +      if (test_bit(__IXGBE_DOWN, &adapter->state) ||
 +          test_bit(__IXGBE_RESETTING, &adapter->state))
 +              return;
 +
 +      ixgbe_dump(adapter);
 +      netdev_err(adapter->netdev, "Reset adapter\n");
 +      adapter->tx_timeout_count++;
 +
 +      ixgbe_reinit_locked(adapter);
 +}
 +
 +/**
 + * ixgbe_service_task - manages and runs subtasks
 + * @work: pointer to work_struct containing our data
 + **/
 +static void ixgbe_service_task(struct work_struct *work)
 +{
 +      struct ixgbe_adapter *adapter = container_of(work,
 +                                                   struct ixgbe_adapter,
 +                                                   service_task);
 +
 +      ixgbe_reset_subtask(adapter);
 +      ixgbe_sfp_detection_subtask(adapter);
 +      ixgbe_sfp_link_config_subtask(adapter);
 +      ixgbe_check_overtemp_subtask(adapter);
 +      ixgbe_watchdog_subtask(adapter);
 +      ixgbe_fdir_reinit_subtask(adapter);
 +      ixgbe_check_hang_subtask(adapter);
 +
 +      ixgbe_service_event_complete(adapter);
 +}
 +
 +void ixgbe_tx_ctxtdesc(struct ixgbe_ring *tx_ring, u32 vlan_macip_lens,
 +                     u32 fcoe_sof_eof, u32 type_tucmd, u32 mss_l4len_idx)
 +{
 +      struct ixgbe_adv_tx_context_desc *context_desc;
 +      u16 i = tx_ring->next_to_use;
 +
 +      context_desc = IXGBE_TX_CTXTDESC_ADV(tx_ring, i);
 +
 +      i++;
 +      tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
 +
 +      /* set bits to identify this as an advanced context descriptor */
 +      type_tucmd |= IXGBE_TXD_CMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
 +
 +      context_desc->vlan_macip_lens   = cpu_to_le32(vlan_macip_lens);
 +      context_desc->seqnum_seed       = cpu_to_le32(fcoe_sof_eof);
 +      context_desc->type_tucmd_mlhl   = cpu_to_le32(type_tucmd);
 +      context_desc->mss_l4len_idx     = cpu_to_le32(mss_l4len_idx);
 +}
 +
 +static int ixgbe_tso(struct ixgbe_ring *tx_ring, struct sk_buff *skb,
 +                   u32 tx_flags, __be16 protocol, u8 *hdr_len)
 +{
 +      int err;
 +      u32 vlan_macip_lens, type_tucmd;
 +      u32 mss_l4len_idx, l4len;
 +
 +      if (!skb_is_gso(skb))
 +              return 0;
 +
 +      if (skb_header_cloned(skb)) {
 +              err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
 +              if (err)
 +                      return err;
 +      }
 +
 +      /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
 +      type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP;
 +
 +      if (protocol == __constant_htons(ETH_P_IP)) {
 +              struct iphdr *iph = ip_hdr(skb);
 +              iph->tot_len = 0;
 +              iph->check = 0;
 +              tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
 +                                                       iph->daddr, 0,
 +                                                       IPPROTO_TCP,
 +                                                       0);
 +              type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4;
 +      } else if (skb_is_gso_v6(skb)) {
 +              ipv6_hdr(skb)->payload_len = 0;
 +              tcp_hdr(skb)->check =
 +                  ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
 +                                   &ipv6_hdr(skb)->daddr,
 +                                   0, IPPROTO_TCP, 0);
 +      }
 +
 +      l4len = tcp_hdrlen(skb);
 +      *hdr_len = skb_transport_offset(skb) + l4len;
 +
 +      /* mss_l4len_id: use 1 as index for TSO */
 +      mss_l4len_idx = l4len << IXGBE_ADVTXD_L4LEN_SHIFT;
 +      mss_l4len_idx |= skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT;
 +      mss_l4len_idx |= 1 << IXGBE_ADVTXD_IDX_SHIFT;
 +
 +      /* vlan_macip_lens: HEADLEN, MACLEN, VLAN tag */
 +      vlan_macip_lens = skb_network_header_len(skb);
 +      vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT;
 +      vlan_macip_lens |= tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
 +
 +      ixgbe_tx_ctxtdesc(tx_ring, vlan_macip_lens, 0, type_tucmd,
 +                        mss_l4len_idx);
 +
 +      return 1;
 +}
 +
 +static bool ixgbe_tx_csum(struct ixgbe_ring *tx_ring,
 +                        struct sk_buff *skb, u32 tx_flags,
 +                        __be16 protocol)
 +{
 +      u32 vlan_macip_lens = 0;
 +      u32 mss_l4len_idx = 0;
 +      u32 type_tucmd = 0;
 +
 +      if (skb->ip_summed != CHECKSUM_PARTIAL) {
 +          if (!(tx_flags & IXGBE_TX_FLAGS_HW_VLAN) &&
 +              !(tx_flags & IXGBE_TX_FLAGS_TXSW))
 +                      return false;
 +      } else {
 +              u8 l4_hdr = 0;
 +              switch (protocol) {
 +              case __constant_htons(ETH_P_IP):
 +                      vlan_macip_lens |= skb_network_header_len(skb);
 +                      type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4;
 +                      l4_hdr = ip_hdr(skb)->protocol;
 +                      break;
 +              case __constant_htons(ETH_P_IPV6):
 +                      vlan_macip_lens |= skb_network_header_len(skb);
 +                      l4_hdr = ipv6_hdr(skb)->nexthdr;
 +                      break;
 +              default:
 +                      if (unlikely(net_ratelimit())) {
 +                              dev_warn(tx_ring->dev,
 +                               "partial checksum but proto=%x!\n",
 +                               skb->protocol);
 +                      }
 +                      break;
 +              }
 +
 +              switch (l4_hdr) {
 +              case IPPROTO_TCP:
 +                      type_tucmd |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
 +                      mss_l4len_idx = tcp_hdrlen(skb) <<
 +                                      IXGBE_ADVTXD_L4LEN_SHIFT;
 +                      break;
 +              case IPPROTO_SCTP:
 +                      type_tucmd |= IXGBE_ADVTXD_TUCMD_L4T_SCTP;
 +                      mss_l4len_idx = sizeof(struct sctphdr) <<
 +                                      IXGBE_ADVTXD_L4LEN_SHIFT;
 +                      break;
 +              case IPPROTO_UDP:
 +                      mss_l4len_idx = sizeof(struct udphdr) <<
 +                                      IXGBE_ADVTXD_L4LEN_SHIFT;
 +                      break;
 +              default:
 +                      if (unlikely(net_ratelimit())) {
 +                              dev_warn(tx_ring->dev,
 +                               "partial checksum but l4 proto=%x!\n",
 +                               skb->protocol);
 +                      }
 +                      break;
 +              }
 +      }
 +
 +      vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT;
 +      vlan_macip_lens |= tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
 +
 +      ixgbe_tx_ctxtdesc(tx_ring, vlan_macip_lens, 0,
 +                        type_tucmd, mss_l4len_idx);
 +
 +      return (skb->ip_summed == CHECKSUM_PARTIAL);
 +}
 +
 +static __le32 ixgbe_tx_cmd_type(u32 tx_flags)
 +{
 +      /* set type for advanced descriptor with frame checksum insertion */
 +      __le32 cmd_type = cpu_to_le32(IXGBE_ADVTXD_DTYP_DATA |
 +                                    IXGBE_ADVTXD_DCMD_IFCS |
 +                                    IXGBE_ADVTXD_DCMD_DEXT);
 +
 +      /* set HW vlan bit if vlan is present */
 +      if (tx_flags & IXGBE_TX_FLAGS_HW_VLAN)
 +              cmd_type |= cpu_to_le32(IXGBE_ADVTXD_DCMD_VLE);
 +
 +      /* set segmentation enable bits for TSO/FSO */
 +#ifdef IXGBE_FCOE
 +      if ((tx_flags & IXGBE_TX_FLAGS_TSO) || (tx_flags & IXGBE_TX_FLAGS_FSO))
 +#else
 +      if (tx_flags & IXGBE_TX_FLAGS_TSO)
 +#endif
 +              cmd_type |= cpu_to_le32(IXGBE_ADVTXD_DCMD_TSE);
 +
 +      return cmd_type;
 +}
 +
 +static __le32 ixgbe_tx_olinfo_status(u32 tx_flags, unsigned int paylen)
 +{
 +      __le32 olinfo_status =
 +              cpu_to_le32(paylen << IXGBE_ADVTXD_PAYLEN_SHIFT);
 +
 +      if (tx_flags & IXGBE_TX_FLAGS_TSO) {
 +              olinfo_status |= cpu_to_le32(IXGBE_ADVTXD_POPTS_TXSM |
 +                                          (1 << IXGBE_ADVTXD_IDX_SHIFT));
 +              /* enble IPv4 checksum for TSO */
 +              if (tx_flags & IXGBE_TX_FLAGS_IPV4)
 +                      olinfo_status |= cpu_to_le32(IXGBE_ADVTXD_POPTS_IXSM);
 +      }
 +
 +      /* enable L4 checksum for TSO and TX checksum offload */
 +      if (tx_flags & IXGBE_TX_FLAGS_CSUM)
 +              olinfo_status |= cpu_to_le32(IXGBE_ADVTXD_POPTS_TXSM);
 +
 +#ifdef IXGBE_FCOE
 +      /* use index 1 context for FCOE/FSO */
 +      if (tx_flags & IXGBE_TX_FLAGS_FCOE)
 +              olinfo_status |= cpu_to_le32(IXGBE_ADVTXD_CC |
 +                                          (1 << IXGBE_ADVTXD_IDX_SHIFT));
 +
 +#endif
 +      /*
 +       * Check Context must be set if Tx switch is enabled, which it
 +       * always is for case where virtual functions are running
 +       */
 +      if (tx_flags & IXGBE_TX_FLAGS_TXSW)
 +              olinfo_status |= cpu_to_le32(IXGBE_ADVTXD_CC);
 +
 +      return olinfo_status;
 +}
 +
 +#define IXGBE_TXD_CMD (IXGBE_TXD_CMD_EOP | \
 +                     IXGBE_TXD_CMD_RS)
 +
 +static void ixgbe_tx_map(struct ixgbe_ring *tx_ring,
 +                       struct sk_buff *skb,
 +                       struct ixgbe_tx_buffer *first,
 +                       u32 tx_flags,
 +                       const u8 hdr_len)
 +{
 +      struct device *dev = tx_ring->dev;
 +      struct ixgbe_tx_buffer *tx_buffer_info;
 +      union ixgbe_adv_tx_desc *tx_desc;
 +      dma_addr_t dma;
 +      __le32 cmd_type, olinfo_status;
 +      struct skb_frag_struct *frag;
 +      unsigned int f = 0;
 +      unsigned int data_len = skb->data_len;
 +      unsigned int size = skb_headlen(skb);
 +      u32 offset = 0;
 +      u32 paylen = skb->len - hdr_len;
 +      u16 i = tx_ring->next_to_use;
 +      u16 gso_segs;
 +
 +#ifdef IXGBE_FCOE
 +      if (tx_flags & IXGBE_TX_FLAGS_FCOE) {
 +              if (data_len >= sizeof(struct fcoe_crc_eof)) {
 +                      data_len -= sizeof(struct fcoe_crc_eof);
 +              } else {
 +                      size -= sizeof(struct fcoe_crc_eof) - data_len;
 +                      data_len = 0;
 +              }
 +      }
 +
 +#endif
 +      dma = dma_map_single(dev, skb->data, size, DMA_TO_DEVICE);
 +      if (dma_mapping_error(dev, dma))
 +              goto dma_error;
 +
 +      cmd_type = ixgbe_tx_cmd_type(tx_flags);
 +      olinfo_status = ixgbe_tx_olinfo_status(tx_flags, paylen);
 +
 +      tx_desc = IXGBE_TX_DESC_ADV(tx_ring, i);
 +
 +      for (;;) {
 +              while (size > IXGBE_MAX_DATA_PER_TXD) {
 +                      tx_desc->read.buffer_addr = cpu_to_le64(dma + offset);
 +                      tx_desc->read.cmd_type_len =
 +                              cmd_type | cpu_to_le32(IXGBE_MAX_DATA_PER_TXD);
 +                      tx_desc->read.olinfo_status = olinfo_status;
 +
 +                      offset += IXGBE_MAX_DATA_PER_TXD;
 +                      size -= IXGBE_MAX_DATA_PER_TXD;
 +
 +                      tx_desc++;
 +                      i++;
 +                      if (i == tx_ring->count) {
 +                              tx_desc = IXGBE_TX_DESC_ADV(tx_ring, 0);
 +                              i = 0;
 +                      }
 +              }
 +
 +              tx_buffer_info = &tx_ring->tx_buffer_info[i];
 +              tx_buffer_info->length = offset + size;
 +              tx_buffer_info->tx_flags = tx_flags;
 +              tx_buffer_info->dma = dma;
 +
 +              tx_desc->read.buffer_addr = cpu_to_le64(dma + offset);
 +              tx_desc->read.cmd_type_len = cmd_type | cpu_to_le32(size);
 +              tx_desc->read.olinfo_status = olinfo_status;
 +
 +              if (!data_len)
 +                      break;
 +
 +              frag = &skb_shinfo(skb)->frags[f];
 +#ifdef IXGBE_FCOE
 +              size = min_t(unsigned int, data_len, frag->size);
 +#else
 +              size = frag->size;
 +#endif
 +              data_len -= size;
 +              f++;
 +
 +              offset = 0;
 +              tx_flags |= IXGBE_TX_FLAGS_MAPPED_AS_PAGE;
 +
 +              dma = skb_frag_dma_map(dev, frag, 0, size, DMA_TO_DEVICE);
 +              if (dma_mapping_error(dev, dma))
 +                      goto dma_error;
 +
 +              tx_desc++;
 +              i++;
 +              if (i == tx_ring->count) {
 +                      tx_desc = IXGBE_TX_DESC_ADV(tx_ring, 0);
 +                      i = 0;
 +              }
 +      }
 +
 +      tx_desc->read.cmd_type_len |= cpu_to_le32(IXGBE_TXD_CMD);
 +
 +      i++;
 +      if (i == tx_ring->count)
 +              i = 0;
 +
 +      tx_ring->next_to_use = i;
 +
 +      if (tx_flags & IXGBE_TX_FLAGS_TSO)
 +              gso_segs = skb_shinfo(skb)->gso_segs;
 +#ifdef IXGBE_FCOE
 +      /* adjust for FCoE Sequence Offload */
 +      else if (tx_flags & IXGBE_TX_FLAGS_FSO)
 +              gso_segs = DIV_ROUND_UP(skb->len - hdr_len,
 +                                      skb_shinfo(skb)->gso_size);
 +#endif /* IXGBE_FCOE */
 +      else
 +              gso_segs = 1;
 +
 +      /* multiply data chunks by size of headers */
 +      tx_buffer_info->bytecount = paylen + (gso_segs * hdr_len);
 +      tx_buffer_info->gso_segs = gso_segs;
 +      tx_buffer_info->skb = skb;
 +
 +      /* set the timestamp */
 +      first->time_stamp = jiffies;
 +
 +      /*
 +       * Force memory writes to complete before letting h/w
 +       * know there are new descriptors to fetch.  (Only
 +       * applicable for weak-ordered memory model archs,
 +       * such as IA-64).
 +       */
 +      wmb();
 +
 +      /* set next_to_watch value indicating a packet is present */
 +      first->next_to_watch = tx_desc;
 +
 +      /* notify HW of packet */
 +      writel(i, tx_ring->tail);
 +
 +      return;
 +dma_error:
 +      dev_err(dev, "TX DMA map failed\n");
 +
 +      /* clear dma mappings for failed tx_buffer_info map */
 +      for (;;) {
 +              tx_buffer_info = &tx_ring->tx_buffer_info[i];
 +              ixgbe_unmap_tx_resource(tx_ring, tx_buffer_info);
 +              if (tx_buffer_info == first)
 +                      break;
 +              if (i == 0)
 +                      i = tx_ring->count;
 +              i--;
 +      }
 +
 +      dev_kfree_skb_any(skb);
 +
 +      tx_ring->next_to_use = i;
 +}
 +
 +static void ixgbe_atr(struct ixgbe_ring *ring, struct sk_buff *skb,
 +                    u32 tx_flags, __be16 protocol)
 +{
 +      struct ixgbe_q_vector *q_vector = ring->q_vector;
 +      union ixgbe_atr_hash_dword input = { .dword = 0 };
 +      union ixgbe_atr_hash_dword common = { .dword = 0 };
 +      union {
 +              unsigned char *network;
 +              struct iphdr *ipv4;
 +              struct ipv6hdr *ipv6;
 +      } hdr;
 +      struct tcphdr *th;
 +      __be16 vlan_id;
 +
 +      /* if ring doesn't have a interrupt vector, cannot perform ATR */
 +      if (!q_vector)
 +              return;
 +
 +      /* do nothing if sampling is disabled */
 +      if (!ring->atr_sample_rate)
 +              return;
 +
 +      ring->atr_count++;
 +
 +      /* snag network header to get L4 type and address */
 +      hdr.network = skb_network_header(skb);
 +
 +      /* Currently only IPv4/IPv6 with TCP is supported */
 +      if ((protocol != __constant_htons(ETH_P_IPV6) ||
 +           hdr.ipv6->nexthdr != IPPROTO_TCP) &&
 +          (protocol != __constant_htons(ETH_P_IP) ||
 +           hdr.ipv4->protocol != IPPROTO_TCP))
 +              return;
 +
 +      th = tcp_hdr(skb);
 +
 +      /* skip this packet since it is invalid or the socket is closing */
 +      if (!th || th->fin)
 +              return;
 +
 +      /* sample on all syn packets or once every atr sample count */
 +      if (!th->syn && (ring->atr_count < ring->atr_sample_rate))
 +              return;
 +
 +      /* reset sample count */
 +      ring->atr_count = 0;
 +
 +      vlan_id = htons(tx_flags >> IXGBE_TX_FLAGS_VLAN_SHIFT);
 +
 +      /*
 +       * src and dst are inverted, think how the receiver sees them
 +       *
 +       * The input is broken into two sections, a non-compressed section
 +       * containing vm_pool, vlan_id, and flow_type.  The rest of the data
 +       * is XORed together and stored in the compressed dword.
 +       */
 +      input.formatted.vlan_id = vlan_id;
 +
 +      /*
 +       * since src port and flex bytes occupy the same word XOR them together
 +       * and write the value to source port portion of compressed dword
 +       */
 +      if (tx_flags & (IXGBE_TX_FLAGS_SW_VLAN | IXGBE_TX_FLAGS_HW_VLAN))
 +              common.port.src ^= th->dest ^ __constant_htons(ETH_P_8021Q);
 +      else
 +              common.port.src ^= th->dest ^ protocol;
 +      common.port.dst ^= th->source;
 +
 +      if (protocol == __constant_htons(ETH_P_IP)) {
 +              input.formatted.flow_type = IXGBE_ATR_FLOW_TYPE_TCPV4;
 +              common.ip ^= hdr.ipv4->saddr ^ hdr.ipv4->daddr;
 +      } else {
 +              input.formatted.flow_type = IXGBE_ATR_FLOW_TYPE_TCPV6;
 +              common.ip ^= hdr.ipv6->saddr.s6_addr32[0] ^
 +                           hdr.ipv6->saddr.s6_addr32[1] ^
 +                           hdr.ipv6->saddr.s6_addr32[2] ^
 +                           hdr.ipv6->saddr.s6_addr32[3] ^
 +                           hdr.ipv6->daddr.s6_addr32[0] ^
 +                           hdr.ipv6->daddr.s6_addr32[1] ^
 +                           hdr.ipv6->daddr.s6_addr32[2] ^
 +                           hdr.ipv6->daddr.s6_addr32[3];
 +      }
 +
 +      /* This assumes the Rx queue and Tx queue are bound to the same CPU */
 +      ixgbe_fdir_add_signature_filter_82599(&q_vector->adapter->hw,
 +                                            input, common, ring->queue_index);
 +}
 +
 +static int __ixgbe_maybe_stop_tx(struct ixgbe_ring *tx_ring, u16 size)
 +{
 +      netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
 +      /* Herbert's original patch had:
 +       *  smp_mb__after_netif_stop_queue();
 +       * but since that doesn't exist yet, just open code it. */
 +      smp_mb();
 +
 +      /* We need to check again in a case another CPU has just
 +       * made room available. */
 +      if (likely(ixgbe_desc_unused(tx_ring) < size))
 +              return -EBUSY;
 +
 +      /* A reprieve! - use start_queue because it doesn't call schedule */
 +      netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index);
 +      ++tx_ring->tx_stats.restart_queue;
 +      return 0;
 +}
 +
 +static inline int ixgbe_maybe_stop_tx(struct ixgbe_ring *tx_ring, u16 size)
 +{
 +      if (likely(ixgbe_desc_unused(tx_ring) >= size))
 +              return 0;
 +      return __ixgbe_maybe_stop_tx(tx_ring, size);
 +}
 +
 +static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb)
 +{
 +      struct ixgbe_adapter *adapter = netdev_priv(dev);
 +      int txq = skb_rx_queue_recorded(skb) ? skb_get_rx_queue(skb) :
 +                                             smp_processor_id();
 +#ifdef IXGBE_FCOE
 +      __be16 protocol = vlan_get_protocol(skb);
 +
 +      if (((protocol == htons(ETH_P_FCOE)) ||
 +          (protocol == htons(ETH_P_FIP))) &&
 +          (adapter->flags & IXGBE_FLAG_FCOE_ENABLED)) {
 +              txq &= (adapter->ring_feature[RING_F_FCOE].indices - 1);
 +              txq += adapter->ring_feature[RING_F_FCOE].mask;
 +              return txq;
 +      }
 +#endif
 +
 +      if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) {
 +              while (unlikely(txq >= dev->real_num_tx_queues))
 +                      txq -= dev->real_num_tx_queues;
 +              return txq;
 +      }
 +
 +      return skb_tx_hash(dev, skb);
 +}
 +
 +netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
 +                        struct ixgbe_adapter *adapter,
 +                        struct ixgbe_ring *tx_ring)
 +{
 +      struct ixgbe_tx_buffer *first;
 +      int tso;
 +      u32 tx_flags = 0;
 +#if PAGE_SIZE > IXGBE_MAX_DATA_PER_TXD
 +      unsigned short f;
 +#endif
 +      u16 count = TXD_USE_COUNT(skb_headlen(skb));
 +      __be16 protocol = skb->protocol;
 +      u8 hdr_len = 0;
 +
 +      /*
 +       * need: 1 descriptor per page * PAGE_SIZE/IXGBE_MAX_DATA_PER_TXD,
 +       *       + 1 desc for skb_head_len/IXGBE_MAX_DATA_PER_TXD,
 +       *       + 2 desc gap to keep tail from touching head,
 +       *       + 1 desc for context descriptor,
 +       * otherwise try next time
 +       */
 +#if PAGE_SIZE > IXGBE_MAX_DATA_PER_TXD
 +      for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
 +              count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
 +#else
 +      count += skb_shinfo(skb)->nr_frags;
 +#endif
 +      if (ixgbe_maybe_stop_tx(tx_ring, count + 3)) {
 +              tx_ring->tx_stats.tx_busy++;
 +              return NETDEV_TX_BUSY;
 +      }
 +
 +#ifdef CONFIG_PCI_IOV
 +      if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
 +              tx_flags |= IXGBE_TX_FLAGS_TXSW;
 +
 +#endif
 +      /* if we have a HW VLAN tag being added default to the HW one */
 +      if (vlan_tx_tag_present(skb)) {
 +              tx_flags |= vlan_tx_tag_get(skb) << IXGBE_TX_FLAGS_VLAN_SHIFT;
 +              tx_flags |= IXGBE_TX_FLAGS_HW_VLAN;
 +      /* else if it is a SW VLAN check the next protocol and store the tag */
 +      } else if (protocol == __constant_htons(ETH_P_8021Q)) {
 +              struct vlan_hdr *vhdr, _vhdr;
 +              vhdr = skb_header_pointer(skb, ETH_HLEN, sizeof(_vhdr), &_vhdr);
 +              if (!vhdr)
 +                      goto out_drop;
 +
 +              protocol = vhdr->h_vlan_encapsulated_proto;
 +              tx_flags |= ntohs(vhdr->h_vlan_TCI) << IXGBE_TX_FLAGS_VLAN_SHIFT;
 +              tx_flags |= IXGBE_TX_FLAGS_SW_VLAN;
 +      }
 +
 +      if ((adapter->flags & IXGBE_FLAG_DCB_ENABLED) &&
 +          ((tx_flags & (IXGBE_TX_FLAGS_HW_VLAN | IXGBE_TX_FLAGS_SW_VLAN)) ||
 +           (skb->priority != TC_PRIO_CONTROL))) {
 +              tx_flags &= ~IXGBE_TX_FLAGS_VLAN_PRIO_MASK;
 +              tx_flags |= tx_ring->dcb_tc <<
 +                          IXGBE_TX_FLAGS_VLAN_PRIO_SHIFT;
 +              if (tx_flags & IXGBE_TX_FLAGS_SW_VLAN) {
 +                      struct vlan_ethhdr *vhdr;
 +                      if (skb_header_cloned(skb) &&
 +                          pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
 +                              goto out_drop;
 +                      vhdr = (struct vlan_ethhdr *)skb->data;
 +                      vhdr->h_vlan_TCI = htons(tx_flags >>
 +                                               IXGBE_TX_FLAGS_VLAN_SHIFT);
 +              } else {
 +                      tx_flags |= IXGBE_TX_FLAGS_HW_VLAN;
 +              }
 +      }
 +
 +      /* record the location of the first descriptor for this packet */
 +      first = &tx_ring->tx_buffer_info[tx_ring->next_to_use];
 +
 +#ifdef IXGBE_FCOE
 +      /* setup tx offload for FCoE */
 +      if ((protocol == __constant_htons(ETH_P_FCOE)) &&
 +          (adapter->flags & IXGBE_FLAG_FCOE_ENABLED)) {
 +              tso = ixgbe_fso(tx_ring, skb, tx_flags, &hdr_len);
 +              if (tso < 0)
 +                      goto out_drop;
 +              else if (tso)
 +                      tx_flags |= IXGBE_TX_FLAGS_FSO |
 +                                  IXGBE_TX_FLAGS_FCOE;
 +              else
 +                      tx_flags |= IXGBE_TX_FLAGS_FCOE;
 +
 +              goto xmit_fcoe;
 +      }
 +
 +#endif /* IXGBE_FCOE */
 +      /* setup IPv4/IPv6 offloads */
 +      if (protocol == __constant_htons(ETH_P_IP))
 +              tx_flags |= IXGBE_TX_FLAGS_IPV4;
 +
 +      tso = ixgbe_tso(tx_ring, skb, tx_flags, protocol, &hdr_len);
 +      if (tso < 0)
 +              goto out_drop;
 +      else if (tso)
 +              tx_flags |= IXGBE_TX_FLAGS_TSO;
 +      else if (ixgbe_tx_csum(tx_ring, skb, tx_flags, protocol))
 +              tx_flags |= IXGBE_TX_FLAGS_CSUM;
 +
 +      /* add the ATR filter if ATR is on */
 +      if (test_bit(__IXGBE_TX_FDIR_INIT_DONE, &tx_ring->state))
 +              ixgbe_atr(tx_ring, skb, tx_flags, protocol);
 +
 +#ifdef IXGBE_FCOE
 +xmit_fcoe:
 +#endif /* IXGBE_FCOE */
 +      ixgbe_tx_map(tx_ring, skb, first, tx_flags, hdr_len);
 +
 +      ixgbe_maybe_stop_tx(tx_ring, DESC_NEEDED);
 +
 +      return NETDEV_TX_OK;
 +
 +out_drop:
 +      dev_kfree_skb_any(skb);
 +      return NETDEV_TX_OK;
 +}
 +
 +static netdev_tx_t ixgbe_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
 +{
 +      struct ixgbe_adapter *adapter = netdev_priv(netdev);
 +      struct ixgbe_ring *tx_ring;
 +
 +      tx_ring = adapter->tx_ring[skb->queue_mapping];
 +      return ixgbe_xmit_frame_ring(skb, adapter, tx_ring);
 +}
 +
 +/**
 + * ixgbe_set_mac - Change the Ethernet Address of the NIC
 + * @netdev: network interface device structure
 + * @p: pointer to an address structure
 + *
 + * Returns 0 on success, negative on failure
 + **/
 +static int ixgbe_set_mac(struct net_device *netdev, void *p)
 +{
 +      struct ixgbe_adapter *adapter = netdev_priv(netdev);
 +      struct ixgbe_hw *hw = &adapter->hw;
 +      struct sockaddr *addr = p;
 +
 +      if (!is_valid_ether_addr(addr->sa_data))
 +              return -EADDRNOTAVAIL;
 +
 +      memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
 +      memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
 +
 +      hw->mac.ops.set_rar(hw, 0, hw->mac.addr, adapter->num_vfs,
 +                          IXGBE_RAH_AV);
 +
 +      return 0;
 +}
 +
 +static int
 +ixgbe_mdio_read(struct net_device *netdev, int prtad, int devad, u16 addr)
 +{
 +      struct ixgbe_adapter *adapter = netdev_priv(netdev);
 +      struct ixgbe_hw *hw = &adapter->hw;
 +      u16 value;
 +      int rc;
 +
 +      if (prtad != hw->phy.mdio.prtad)
 +              return -EINVAL;
 +      rc = hw->phy.ops.read_reg(hw, addr, devad, &value);
 +      if (!rc)
 +              rc = value;
 +      return rc;
 +}
 +
 +static int ixgbe_mdio_write(struct net_device *netdev, int prtad, int devad,
 +                          u16 addr, u16 value)
 +{
 +      struct ixgbe_adapter *adapter = netdev_priv(netdev);
 +      struct ixgbe_hw *hw = &adapter->hw;
 +
 +      if (prtad != hw->phy.mdio.prtad)
 +              return -EINVAL;
 +      return hw->phy.ops.write_reg(hw, addr, devad, value);
 +}
 +
 +static int ixgbe_ioctl(struct net_device *netdev, struct ifreq *req, int cmd)
 +{
 +      struct ixgbe_adapter *adapter = netdev_priv(netdev);
 +
 +      return mdio_mii_ioctl(&adapter->hw.phy.mdio, if_mii(req), cmd);
 +}
 +
 +/**
 + * ixgbe_add_sanmac_netdev - Add the SAN MAC address to the corresponding
 + * netdev->dev_addrs
 + * @netdev: network interface device structure
 + *
 + * Returns non-zero on failure
 + **/
 +static int ixgbe_add_sanmac_netdev(struct net_device *dev)
 +{
 +      int err = 0;
 +      struct ixgbe_adapter *adapter = netdev_priv(dev);
 +      struct ixgbe_mac_info *mac = &adapter->hw.mac;
 +
 +      if (is_valid_ether_addr(mac->san_addr)) {
 +              rtnl_lock();
 +              err = dev_addr_add(dev, mac->san_addr, NETDEV_HW_ADDR_T_SAN);
 +              rtnl_unlock();
 +      }
 +      return err;
 +}
 +
 +/**
 + * ixgbe_del_sanmac_netdev - Removes the SAN MAC address to the corresponding
 + * netdev->dev_addrs
 + * @netdev: network interface device structure
 + *
 + * Returns non-zero on failure
 + **/
 +static int ixgbe_del_sanmac_netdev(struct net_device *dev)
 +{
 +      int err = 0;
 +      struct ixgbe_adapter *adapter = netdev_priv(dev);
 +      struct ixgbe_mac_info *mac = &adapter->hw.mac;
 +
 +      if (is_valid_ether_addr(mac->san_addr)) {
 +              rtnl_lock();
 +              err = dev_addr_del(dev, mac->san_addr, NETDEV_HW_ADDR_T_SAN);
 +              rtnl_unlock();
 +      }
 +      return err;
 +}
 +
 +#ifdef CONFIG_NET_POLL_CONTROLLER
 +/*
 + * Polling 'interrupt' - used by things like netconsole to send skbs
 + * without having to re-enable interrupts. It's not called while
 + * the interrupt routine is executing.
 + */
 +static void ixgbe_netpoll(struct net_device *netdev)
 +{
 +      struct ixgbe_adapter *adapter = netdev_priv(netdev);
 +      int i;
 +
 +      /* if interface is down do nothing */
 +      if (test_bit(__IXGBE_DOWN, &adapter->state))
 +              return;
 +
 +      adapter->flags |= IXGBE_FLAG_IN_NETPOLL;
 +      if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
 +              int num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
 +              for (i = 0; i < num_q_vectors; i++) {
 +                      struct ixgbe_q_vector *q_vector = adapter->q_vector[i];
 +                      ixgbe_msix_clean_rings(0, q_vector);
 +              }
 +      } else {
 +              ixgbe_intr(adapter->pdev->irq, netdev);
 +      }
 +      adapter->flags &= ~IXGBE_FLAG_IN_NETPOLL;
 +}
 +#endif
 +
 +static struct rtnl_link_stats64 *ixgbe_get_stats64(struct net_device *netdev,
 +                                                 struct rtnl_link_stats64 *stats)
 +{
 +      struct ixgbe_adapter *adapter = netdev_priv(netdev);
 +      int i;
 +
 +      rcu_read_lock();
 +      for (i = 0; i < adapter->num_rx_queues; i++) {
 +              struct ixgbe_ring *ring = ACCESS_ONCE(adapter->rx_ring[i]);
 +              u64 bytes, packets;
 +              unsigned int start;
 +
 +              if (ring) {
 +                      do {
 +                              start = u64_stats_fetch_begin_bh(&ring->syncp);
 +                              packets = ring->stats.packets;
 +                              bytes   = ring->stats.bytes;
 +                      } while (u64_stats_fetch_retry_bh(&ring->syncp, start));
 +                      stats->rx_packets += packets;
 +                      stats->rx_bytes   += bytes;
 +              }
 +      }
 +
 +      for (i = 0; i < adapter->num_tx_queues; i++) {
 +              struct ixgbe_ring *ring = ACCESS_ONCE(adapter->tx_ring[i]);
 +              u64 bytes, packets;
 +              unsigned int start;
 +
 +              if (ring) {
 +                      do {
 +                              start = u64_stats_fetch_begin_bh(&ring->syncp);
 +                              packets = ring->stats.packets;
 +                              bytes   = ring->stats.bytes;
 +                      } while (u64_stats_fetch_retry_bh(&ring->syncp, start));
 +                      stats->tx_packets += packets;
 +                      stats->tx_bytes   += bytes;
 +              }
 +      }
 +      rcu_read_unlock();
 +      /* following stats updated by ixgbe_watchdog_task() */
 +      stats->multicast        = netdev->stats.multicast;
 +      stats->rx_errors        = netdev->stats.rx_errors;
 +      stats->rx_length_errors = netdev->stats.rx_length_errors;
 +      stats->rx_crc_errors    = netdev->stats.rx_crc_errors;
 +      stats->rx_missed_errors = netdev->stats.rx_missed_errors;
 +      return stats;
 +}
 +
 +/* ixgbe_validate_rtr - verify 802.1Qp to Rx packet buffer mapping is valid.
 + * #adapter: pointer to ixgbe_adapter
 + * @tc: number of traffic classes currently enabled
 + *
 + * Configure a valid 802.1Qp to Rx packet buffer mapping ie confirm
 + * 802.1Q priority maps to a packet buffer that exists.
 + */
 +static void ixgbe_validate_rtr(struct ixgbe_adapter *adapter, u8 tc)
 +{
 +      struct ixgbe_hw *hw = &adapter->hw;
 +      u32 reg, rsave;
 +      int i;
 +
 +      /* 82598 have a static priority to TC mapping that can not
 +       * be changed so no validation is needed.
 +       */
 +      if (hw->mac.type == ixgbe_mac_82598EB)
 +              return;
 +
 +      reg = IXGBE_READ_REG(hw, IXGBE_RTRUP2TC);
 +      rsave = reg;
 +
 +      for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
 +              u8 up2tc = reg >> (i * IXGBE_RTRUP2TC_UP_SHIFT);
 +
 +              /* If up2tc is out of bounds default to zero */
 +              if (up2tc > tc)
 +                      reg &= ~(0x7 << IXGBE_RTRUP2TC_UP_SHIFT);
 +      }
 +
 +      if (reg != rsave)
 +              IXGBE_WRITE_REG(hw, IXGBE_RTRUP2TC, reg);
 +
 +      return;
 +}
 +
 +
 +/* ixgbe_setup_tc - routine to configure net_device for multiple traffic
 + * classes.
 + *
 + * @netdev: net device to configure
 + * @tc: number of traffic classes to enable
 + */
 +int ixgbe_setup_tc(struct net_device *dev, u8 tc)
 +{
 +      struct ixgbe_adapter *adapter = netdev_priv(dev);
 +      struct ixgbe_hw *hw = &adapter->hw;
 +
 +      /* Multiple traffic classes requires multiple queues */
 +      if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) {
 +              e_err(drv, "Enable failed, needs MSI-X\n");
 +              return -EINVAL;
 +      }
 +
 +      /* Hardware supports up to 8 traffic classes */
 +      if (tc > MAX_TRAFFIC_CLASS ||
 +          (hw->mac.type == ixgbe_mac_82598EB && tc < MAX_TRAFFIC_CLASS))
 +              return -EINVAL;
 +
 +      /* Hardware has to reinitialize queues and interrupts to
 +       * match packet buffer alignment. Unfortunantly, the
 +       * hardware is not flexible enough to do this dynamically.
 +       */
 +      if (netif_running(dev))
 +              ixgbe_close(dev);
 +      ixgbe_clear_interrupt_scheme(adapter);
 +
 +      if (tc) {
 +              netdev_set_num_tc(dev, tc);
 +              adapter->last_lfc_mode = adapter->hw.fc.current_mode;
 +
 +              adapter->flags |= IXGBE_FLAG_DCB_ENABLED;
 +              adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
 +
 +              if (adapter->hw.mac.type == ixgbe_mac_82598EB)
 +                      adapter->hw.fc.requested_mode = ixgbe_fc_none;
 +      } else {
 +              netdev_reset_tc(dev);
 +
 +              adapter->hw.fc.requested_mode = adapter->last_lfc_mode;
 +
 +              adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED;
 +              adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE;
 +
 +              adapter->temp_dcb_cfg.pfc_mode_enable = false;
 +              adapter->dcb_cfg.pfc_mode_enable = false;
 +      }
 +
 +      ixgbe_init_interrupt_scheme(adapter);
 +      ixgbe_validate_rtr(adapter, tc);
 +      if (netif_running(dev))
 +              ixgbe_open(dev);
 +
 +      return 0;
 +}
 +
 +void ixgbe_do_reset(struct net_device *netdev)
 +{
 +      struct ixgbe_adapter *adapter = netdev_priv(netdev);
 +
 +      if (netif_running(netdev))
 +              ixgbe_reinit_locked(adapter);
 +      else
 +              ixgbe_reset(adapter);
 +}
 +
 +static u32 ixgbe_fix_features(struct net_device *netdev, u32 data)
 +{
 +      struct ixgbe_adapter *adapter = netdev_priv(netdev);
 +
 +#ifdef CONFIG_DCB
 +      if (adapter->flags & IXGBE_FLAG_DCB_ENABLED)
 +              data &= ~NETIF_F_HW_VLAN_RX;
 +#endif
 +
 +      /* return error if RXHASH is being enabled when RSS is not supported */
 +      if (!(adapter->flags & IXGBE_FLAG_RSS_ENABLED))
 +              data &= ~NETIF_F_RXHASH;
 +
 +      /* If Rx checksum is disabled, then RSC/LRO should also be disabled */
 +      if (!(data & NETIF_F_RXCSUM))
 +              data &= ~NETIF_F_LRO;
 +
 +      /* Turn off LRO if not RSC capable or invalid ITR settings */
 +      if (!(adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE)) {
 +              data &= ~NETIF_F_LRO;
 +      } else if (!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) &&
 +                 (adapter->rx_itr_setting != 1 &&
 +                  adapter->rx_itr_setting > IXGBE_MAX_RSC_INT_RATE)) {
 +              data &= ~NETIF_F_LRO;
 +              e_info(probe, "rx-usecs set too low, not enabling RSC\n");
 +      }
 +
 +      return data;
 +}
 +
 +static int ixgbe_set_features(struct net_device *netdev, u32 data)
 +{
 +      struct ixgbe_adapter *adapter = netdev_priv(netdev);
 +      bool need_reset = false;
 +
 +      /* If Rx checksum is disabled, then RSC/LRO should also be disabled */
 +      if (!(data & NETIF_F_RXCSUM))
 +              adapter->flags &= ~IXGBE_FLAG_RX_CSUM_ENABLED;
 +      else
 +              adapter->flags |= IXGBE_FLAG_RX_CSUM_ENABLED;
 +
 +      /* Make sure RSC matches LRO, reset if change */
 +      if (!!(data & NETIF_F_LRO) !=
 +           !!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)) {
 +              adapter->flags2 ^= IXGBE_FLAG2_RSC_ENABLED;
 +              switch (adapter->hw.mac.type) {
 +              case ixgbe_mac_X540:
 +              case ixgbe_mac_82599EB:
 +                      need_reset = true;
 +                      break;
 +              default:
 +                      break;
 +              }
 +      }
 +
 +      /*
 +       * Check if Flow Director n-tuple support was enabled or disabled.  If
 +       * the state changed, we need to reset.
 +       */
 +      if (!(adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)) {
 +              /* turn off ATR, enable perfect filters and reset */
 +              if (data & NETIF_F_NTUPLE) {
 +                      adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
 +                      adapter->flags |= IXGBE_FLAG_FDIR_PERFECT_CAPABLE;
 +                      need_reset = true;
 +              }
 +      } else if (!(data & NETIF_F_NTUPLE)) {
 +              /* turn off Flow Director, set ATR and reset */
 +              adapter->flags &= ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE;
 +              if ((adapter->flags &  IXGBE_FLAG_RSS_ENABLED) &&
 +                  !(adapter->flags &  IXGBE_FLAG_DCB_ENABLED))
 +                      adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE;
 +              need_reset = true;
 +      }
 +
 +      if (need_reset)
 +              ixgbe_do_reset(netdev);
 +
 +      return 0;
 +
 +}
 +
 +static const struct net_device_ops ixgbe_netdev_ops = {
 +      .ndo_open               = ixgbe_open,
 +      .ndo_stop               = ixgbe_close,
 +      .ndo_start_xmit         = ixgbe_xmit_frame,
 +      .ndo_select_queue       = ixgbe_select_queue,
 +      .ndo_set_rx_mode        = ixgbe_set_rx_mode,
 +      .ndo_validate_addr      = eth_validate_addr,
 +      .ndo_set_mac_address    = ixgbe_set_mac,
 +      .ndo_change_mtu         = ixgbe_change_mtu,
 +      .ndo_tx_timeout         = ixgbe_tx_timeout,
 +      .ndo_vlan_rx_add_vid    = ixgbe_vlan_rx_add_vid,
 +      .ndo_vlan_rx_kill_vid   = ixgbe_vlan_rx_kill_vid,
 +      .ndo_do_ioctl           = ixgbe_ioctl,
 +      .ndo_set_vf_mac         = ixgbe_ndo_set_vf_mac,
 +      .ndo_set_vf_vlan        = ixgbe_ndo_set_vf_vlan,
 +      .ndo_set_vf_tx_rate     = ixgbe_ndo_set_vf_bw,
 +      .ndo_get_vf_config      = ixgbe_ndo_get_vf_config,
 +      .ndo_get_stats64        = ixgbe_get_stats64,
 +      .ndo_setup_tc           = ixgbe_setup_tc,
 +#ifdef CONFIG_NET_POLL_CONTROLLER
 +      .ndo_poll_controller    = ixgbe_netpoll,
 +#endif
 +#ifdef IXGBE_FCOE
 +      .ndo_fcoe_ddp_setup = ixgbe_fcoe_ddp_get,
 +      .ndo_fcoe_ddp_target = ixgbe_fcoe_ddp_target,
 +      .ndo_fcoe_ddp_done = ixgbe_fcoe_ddp_put,
 +      .ndo_fcoe_enable = ixgbe_fcoe_enable,
 +      .ndo_fcoe_disable = ixgbe_fcoe_disable,
 +      .ndo_fcoe_get_wwn = ixgbe_fcoe_get_wwn,
 +#endif /* IXGBE_FCOE */
 +      .ndo_set_features = ixgbe_set_features,
 +      .ndo_fix_features = ixgbe_fix_features,
 +};
 +
 +static void __devinit ixgbe_probe_vf(struct ixgbe_adapter *adapter,
 +                         const struct ixgbe_info *ii)
 +{
 +#ifdef CONFIG_PCI_IOV
 +      struct ixgbe_hw *hw = &adapter->hw;
 +      int err;
 +      int num_vf_macvlans, i;
 +      struct vf_macvlans *mv_list;
 +
 +      if (hw->mac.type == ixgbe_mac_82598EB || !max_vfs)
 +              return;
 +
 +      /* The 82599 supports up to 64 VFs per physical function
 +       * but this implementation limits allocation to 63 so that
 +       * basic networking resources are still available to the
 +       * physical function
 +       */
 +      adapter->num_vfs = (max_vfs > 63) ? 63 : max_vfs;
 +      adapter->flags |= IXGBE_FLAG_SRIOV_ENABLED;
 +      err = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
 +      if (err) {
 +              e_err(probe, "Failed to enable PCI sriov: %d\n", err);
 +              goto err_novfs;
 +      }
 +
 +      num_vf_macvlans = hw->mac.num_rar_entries -
 +              (IXGBE_MAX_PF_MACVLANS + 1 + adapter->num_vfs);
 +
 +      adapter->mv_list = mv_list = kcalloc(num_vf_macvlans,
 +                                           sizeof(struct vf_macvlans),
 +                                           GFP_KERNEL);
 +      if (mv_list) {
 +              /* Initialize list of VF macvlans */
 +              INIT_LIST_HEAD(&adapter->vf_mvs.l);
 +              for (i = 0; i < num_vf_macvlans; i++) {
 +                      mv_list->vf = -1;
 +                      mv_list->free = true;
 +                      mv_list->rar_entry = hw->mac.num_rar_entries -
 +                              (i + adapter->num_vfs + 1);
 +                      list_add(&mv_list->l, &adapter->vf_mvs.l);
 +                      mv_list++;
 +              }
 +      }
 +
 +      /* If call to enable VFs succeeded then allocate memory
 +       * for per VF control structures.
 +       */
 +      adapter->vfinfo =
 +              kcalloc(adapter->num_vfs,
 +                      sizeof(struct vf_data_storage), GFP_KERNEL);
 +      if (adapter->vfinfo) {
 +              /* Now that we're sure SR-IOV is enabled
 +               * and memory allocated set up the mailbox parameters
 +               */
 +              ixgbe_init_mbx_params_pf(hw);
 +              memcpy(&hw->mbx.ops, ii->mbx_ops,
 +                     sizeof(hw->mbx.ops));
 +
 +              /* Disable RSC when in SR-IOV mode */
 +              adapter->flags2 &= ~(IXGBE_FLAG2_RSC_CAPABLE |
 +                                   IXGBE_FLAG2_RSC_ENABLED);
 +              return;
 +      }
 +
 +      /* Oh oh */
 +      e_err(probe, "Unable to allocate memory for VF Data Storage - "
 +            "SRIOV disabled\n");
 +      pci_disable_sriov(adapter->pdev);
 +
 +err_novfs:
 +      adapter->flags &= ~IXGBE_FLAG_SRIOV_ENABLED;
 +      adapter->num_vfs = 0;
 +#endif /* CONFIG_PCI_IOV */
 +}
 +
 +/**
 + * ixgbe_probe - Device Initialization Routine
 + * @pdev: PCI device information struct
 + * @ent: entry in ixgbe_pci_tbl
 + *
 + * Returns 0 on success, negative on failure
 + *
 + * ixgbe_probe initializes an adapter identified by a pci_dev structure.
 + * The OS initialization, configuring of the adapter private structure,
 + * and a hardware reset occur.
 + **/
 +static int __devinit ixgbe_probe(struct pci_dev *pdev,
 +                               const struct pci_device_id *ent)
 +{
 +      struct net_device *netdev;
 +      struct ixgbe_adapter *adapter = NULL;
 +      struct ixgbe_hw *hw;
 +      const struct ixgbe_info *ii = ixgbe_info_tbl[ent->driver_data];
 +      static int cards_found;
 +      int i, err, pci_using_dac;
 +      u8 part_str[IXGBE_PBANUM_LENGTH];
 +      unsigned int indices = num_possible_cpus();
 +#ifdef IXGBE_FCOE
 +      u16 device_caps;
 +#endif
 +      u32 eec;
 +
 +      /* Catch broken hardware that put the wrong VF device ID in
 +       * the PCIe SR-IOV capability.
 +       */
 +      if (pdev->is_virtfn) {
 +              WARN(1, KERN_ERR "%s (%hx:%hx) should not be a VF!\n",
 +                   pci_name(pdev), pdev->vendor, pdev->device);
 +              return -EINVAL;
 +      }
 +
 +      err = pci_enable_device_mem(pdev);
 +      if (err)
 +              return err;
 +
 +      if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) &&
 +          !dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) {
 +              pci_using_dac = 1;
 +      } else {
 +              err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
 +              if (err) {
 +                      err = dma_set_coherent_mask(&pdev->dev,
 +                                                  DMA_BIT_MASK(32));
 +                      if (err) {
 +                              dev_err(&pdev->dev,
 +                                      "No usable DMA configuration, aborting\n");
 +                              goto err_dma;
 +                      }
 +              }
 +              pci_using_dac = 0;
 +      }
 +
 +      err = pci_request_selected_regions(pdev, pci_select_bars(pdev,
 +                                         IORESOURCE_MEM), ixgbe_driver_name);
 +      if (err) {
 +              dev_err(&pdev->dev,
 +                      "pci_request_selected_regions failed 0x%x\n", err);
 +              goto err_pci_reg;
 +      }
 +
 +      pci_enable_pcie_error_reporting(pdev);
 +
 +      pci_set_master(pdev);
 +      pci_save_state(pdev);
 +
 +#ifdef CONFIG_IXGBE_DCB
 +      indices *= MAX_TRAFFIC_CLASS;
 +#endif
 +
 +      if (ii->mac == ixgbe_mac_82598EB)
 +              indices = min_t(unsigned int, indices, IXGBE_MAX_RSS_INDICES);
 +      else
 +              indices = min_t(unsigned int, indices, IXGBE_MAX_FDIR_INDICES);
 +
 +#ifdef IXGBE_FCOE
 +      indices += min_t(unsigned int, num_possible_cpus(),
 +                       IXGBE_MAX_FCOE_INDICES);
 +#endif
 +      netdev = alloc_etherdev_mq(sizeof(struct ixgbe_adapter), indices);
 +      if (!netdev) {
 +              err = -ENOMEM;
 +              goto err_alloc_etherdev;
 +      }
 +
 +      SET_NETDEV_DEV(netdev, &pdev->dev);
 +
 +      adapter = netdev_priv(netdev);
 +      pci_set_drvdata(pdev, adapter);
 +
 +      adapter->netdev = netdev;
 +      adapter->pdev = pdev;
 +      hw = &adapter->hw;
 +      hw->back = adapter;
 +      adapter->msg_enable = (1 << DEFAULT_DEBUG_LEVEL_SHIFT) - 1;
 +
 +      hw->hw_addr = ioremap(pci_resource_start(pdev, 0),
 +                            pci_resource_len(pdev, 0));
 +      if (!hw->hw_addr) {
 +              err = -EIO;
 +              goto err_ioremap;
 +      }
 +
 +      for (i = 1; i <= 5; i++) {
 +              if (pci_resource_len(pdev, i) == 0)
 +                      continue;
 +      }
 +
 +      netdev->netdev_ops = &ixgbe_netdev_ops;
 +      ixgbe_set_ethtool_ops(netdev);
 +      netdev->watchdog_timeo = 5 * HZ;
 +      strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
 +
 +      adapter->bd_number = cards_found;
 +
 +      /* Setup hw api */
 +      memcpy(&hw->mac.ops, ii->mac_ops, sizeof(hw->mac.ops));
 +      hw->mac.type  = ii->mac;
 +
 +      /* EEPROM */
 +      memcpy(&hw->eeprom.ops, ii->eeprom_ops, sizeof(hw->eeprom.ops));
 +      eec = IXGBE_READ_REG(hw, IXGBE_EEC);
 +      /* If EEPROM is valid (bit 8 = 1), use default otherwise use bit bang */
 +      if (!(eec & (1 << 8)))
 +              hw->eeprom.ops.read = &ixgbe_read_eeprom_bit_bang_generic;
 +
 +      /* PHY */
 +      memcpy(&hw->phy.ops, ii->phy_ops, sizeof(hw->phy.ops));
 +      hw->phy.sfp_type = ixgbe_sfp_type_unknown;
 +      /* ixgbe_identify_phy_generic will set prtad and mmds properly */
 +      hw->phy.mdio.prtad = MDIO_PRTAD_NONE;
 +      hw->phy.mdio.mmds = 0;
 +      hw->phy.mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
 +      hw->phy.mdio.dev = netdev;
 +      hw->phy.mdio.mdio_read = ixgbe_mdio_read;
 +      hw->phy.mdio.mdio_write = ixgbe_mdio_write;
 +
 +      ii->get_invariants(hw);
 +
 +      /* setup the private structure */
 +      err = ixgbe_sw_init(adapter);
 +      if (err)
 +              goto err_sw_init;
 +
 +      /* Make it possible the adapter to be woken up via WOL */
 +      switch (adapter->hw.mac.type) {
 +      case ixgbe_mac_82599EB:
 +      case ixgbe_mac_X540:
 +              IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0);
 +              break;
 +      default:
 +              break;
 +      }
 +
 +      /*
 +       * If there is a fan on this device and it has failed log the
 +       * failure.
 +       */
 +      if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) {
 +              u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
 +              if (esdp & IXGBE_ESDP_SDP1)
 +                      e_crit(probe, "Fan has stopped, replace the adapter\n");
 +      }
 +
 +      /* reset_hw fills in the perm_addr as well */
 +      hw->phy.reset_if_overtemp = true;
 +      err = hw->mac.ops.reset_hw(hw);
 +      hw->phy.reset_if_overtemp = false;
 +      if (err == IXGBE_ERR_SFP_NOT_PRESENT &&
 +          hw->mac.type == ixgbe_mac_82598EB) {
 +              err = 0;
 +      } else if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
 +              e_dev_err("failed to load because an unsupported SFP+ "
 +                        "module type was detected.\n");
 +              e_dev_err("Reload the driver after installing a supported "
 +                        "module.\n");
 +              goto err_sw_init;
 +      } else if (err) {
 +              e_dev_err("HW Init failed: %d\n", err);
 +              goto err_sw_init;
 +      }
 +
 +      ixgbe_probe_vf(adapter, ii);
 +
 +      netdev->features = NETIF_F_SG |
 +                         NETIF_F_IP_CSUM |
 +                         NETIF_F_IPV6_CSUM |
 +                         NETIF_F_HW_VLAN_TX |
 +                         NETIF_F_HW_VLAN_RX |
 +                         NETIF_F_HW_VLAN_FILTER |
 +                         NETIF_F_TSO |
 +                         NETIF_F_TSO6 |
 +                         NETIF_F_RXHASH |
 +                         NETIF_F_RXCSUM;
 +
 +      netdev->hw_features = netdev->features;
 +
 +      switch (adapter->hw.mac.type) {
 +      case ixgbe_mac_82599EB:
 +      case ixgbe_mac_X540:
 +              netdev->features |= NETIF_F_SCTP_CSUM;
 +              netdev->hw_features |= NETIF_F_SCTP_CSUM |
 +                                     NETIF_F_NTUPLE;
 +              break;
 +      default:
 +              break;
 +      }
 +
 +      netdev->vlan_features |= NETIF_F_TSO;
 +      netdev->vlan_features |= NETIF_F_TSO6;
 +      netdev->vlan_features |= NETIF_F_IP_CSUM;
 +      netdev->vlan_features |= NETIF_F_IPV6_CSUM;
 +      netdev->vlan_features |= NETIF_F_SG;
 +
 +      netdev->priv_flags |= IFF_UNICAST_FLT;
 +
 +      if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
 +              adapter->flags &= ~(IXGBE_FLAG_RSS_ENABLED |
 +                                  IXGBE_FLAG_DCB_ENABLED);
 +
 +#ifdef CONFIG_IXGBE_DCB
 +      netdev->dcbnl_ops = &dcbnl_ops;
 +#endif
 +
 +#ifdef IXGBE_FCOE
 +      if (adapter->flags & IXGBE_FLAG_FCOE_CAPABLE) {
 +              if (hw->mac.ops.get_device_caps) {
 +                      hw->mac.ops.get_device_caps(hw, &device_caps);
 +                      if (device_caps & IXGBE_DEVICE_CAPS_FCOE_OFFLOADS)
 +                              adapter->flags &= ~IXGBE_FLAG_FCOE_CAPABLE;
 +              }
 +      }
 +      if (adapter->flags & IXGBE_FLAG_FCOE_CAPABLE) {
 +              netdev->vlan_features |= NETIF_F_FCOE_CRC;
 +              netdev->vlan_features |= NETIF_F_FSO;
 +              netdev->vlan_features |= NETIF_F_FCOE_MTU;
 +      }
 +#endif /* IXGBE_FCOE */
 +      if (pci_using_dac) {
 +              netdev->features |= NETIF_F_HIGHDMA;
 +              netdev->vlan_features |= NETIF_F_HIGHDMA;
 +      }
 +
 +      if (adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE)
 +              netdev->hw_features |= NETIF_F_LRO;
 +      if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)
 +              netdev->features |= NETIF_F_LRO;
 +
 +      /* make sure the EEPROM is good */
 +      if (hw->eeprom.ops.validate_checksum(hw, NULL) < 0) {
 +              e_dev_err("The EEPROM Checksum Is Not Valid\n");
 +              err = -EIO;
 +              goto err_eeprom;
 +      }
 +
 +      memcpy(netdev->dev_addr, hw->mac.perm_addr, netdev->addr_len);
 +      memcpy(netdev->perm_addr, hw->mac.perm_addr, netdev->addr_len);
 +
 +      if (ixgbe_validate_mac_addr(netdev->perm_addr)) {
 +              e_dev_err("invalid MAC address\n");
 +              err = -EIO;
 +              goto err_eeprom;
 +      }
 +
 +      /* power down the optics for multispeed fiber and 82599 SFP+ fiber */
 +      if (hw->mac.ops.disable_tx_laser &&
 +          ((hw->phy.multispeed_fiber) ||
 +           ((hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber) &&
 +            (hw->mac.type == ixgbe_mac_82599EB))))
 +              hw->mac.ops.disable_tx_laser(hw);
 +
 +      setup_timer(&adapter->service_timer, &ixgbe_service_timer,
 +                  (unsigned long) adapter);
 +
 +      INIT_WORK(&adapter->service_task, ixgbe_service_task);
 +      clear_bit(__IXGBE_SERVICE_SCHED, &adapter->state);
 +
 +      err = ixgbe_init_interrupt_scheme(adapter);
 +      if (err)
 +              goto err_sw_init;
 +
 +      if (!(adapter->flags & IXGBE_FLAG_RSS_ENABLED)) {
 +              netdev->hw_features &= ~NETIF_F_RXHASH;
 +              netdev->features &= ~NETIF_F_RXHASH;
 +      }
 +
 +      switch (pdev->device) {
 +      case IXGBE_DEV_ID_82599_SFP:
 +              /* Only this subdevice supports WOL */
 +              if (pdev->subsystem_device == IXGBE_SUBDEV_ID_82599_SFP)
 +                      adapter->wol = IXGBE_WUFC_MAG;
 +              break;
 +      case IXGBE_DEV_ID_82599_COMBO_BACKPLANE:
 +              /* All except this subdevice support WOL */
 +              if (pdev->subsystem_device != IXGBE_SUBDEV_ID_82599_KX4_KR_MEZZ)
 +                      adapter->wol = IXGBE_WUFC_MAG;
 +              break;
 +      case IXGBE_DEV_ID_82599_KX4:
 +              adapter->wol = IXGBE_WUFC_MAG;
 +              break;
 +      default:
 +              adapter->wol = 0;
 +              break;
 +      }
 +      device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
 +
 +      /* pick up the PCI bus settings for reporting later */
 +      hw->mac.ops.get_bus_info(hw);
 +
 +      /* print bus type/speed/width info */
 +      e_dev_info("(PCI Express:%s:%s) %pM\n",
 +                 (hw->bus.speed == ixgbe_bus_speed_5000 ? "5.0GT/s" :
 +                  hw->bus.speed == ixgbe_bus_speed_2500 ? "2.5GT/s" :
 +                  "Unknown"),
 +                 (hw->bus.width == ixgbe_bus_width_pcie_x8 ? "Width x8" :
 +                  hw->bus.width == ixgbe_bus_width_pcie_x4 ? "Width x4" :
 +                  hw->bus.width == ixgbe_bus_width_pcie_x1 ? "Width x1" :
 +                  "Unknown"),
 +                 netdev->dev_addr);
 +
 +      err = ixgbe_read_pba_string_generic(hw, part_str, IXGBE_PBANUM_LENGTH);
 +      if (err)
 +              strncpy(part_str, "Unknown", IXGBE_PBANUM_LENGTH);
 +      if (ixgbe_is_sfp(hw) && hw->phy.sfp_type != ixgbe_sfp_type_not_present)
 +              e_dev_info("MAC: %d, PHY: %d, SFP+: %d, PBA No: %s\n",
 +                         hw->mac.type, hw->phy.type, hw->phy.sfp_type,
 +                         part_str);
 +      else
 +              e_dev_info("MAC: %d, PHY: %d, PBA No: %s\n",
 +                         hw->mac.type, hw->phy.type, part_str);
 +
 +      if (hw->bus.width <= ixgbe_bus_width_pcie_x4) {
 +              e_dev_warn("PCI-Express bandwidth available for this card is "
 +                         "not sufficient for optimal performance.\n");
 +              e_dev_warn("For optimal performance a x8 PCI-Express slot "
 +                         "is required.\n");
 +      }
 +
 +      /* save off EEPROM version number */
 +      hw->eeprom.ops.read(hw, 0x29, &adapter->eeprom_version);
 +
 +      /* reset the hardware with the new settings */
 +      err = hw->mac.ops.start_hw(hw);
 +
 +      if (err == IXGBE_ERR_EEPROM_VERSION) {
 +              /* We are running on a pre-production device, log a warning */
 +              e_dev_warn("This device is a pre-production adapter/LOM. "
 +                         "Please be aware there may be issues associated "
 +                         "with your hardware.  If you are experiencing "
 +                         "problems please contact your Intel or hardware "
 +                         "representative who provided you with this "
 +                         "hardware.\n");
 +      }
 +      strcpy(netdev->name, "eth%d");
 +      err = register_netdev(netdev);
 +      if (err)
 +              goto err_register;
 +
 +      /* carrier off reporting is important to ethtool even BEFORE open */
 +      netif_carrier_off(netdev);
 +
 +#ifdef CONFIG_IXGBE_DCA
 +      if (dca_add_requester(&pdev->dev) == 0) {
 +              adapter->flags |= IXGBE_FLAG_DCA_ENABLED;
 +              ixgbe_setup_dca(adapter);
 +      }
 +#endif
 +      if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
 +              e_info(probe, "IOV is enabled with %d VFs\n", adapter->num_vfs);
 +              for (i = 0; i < adapter->num_vfs; i++)
 +                      ixgbe_vf_configuration(pdev, (i | 0x10000000));
 +      }
 +
 +      /* Inform firmware of driver version */
 +      if (hw->mac.ops.set_fw_drv_ver)
 +              hw->mac.ops.set_fw_drv_ver(hw, MAJ, MIN, BUILD,
 +                                         FW_CEM_UNUSED_VER);
 +
 +      /* add san mac addr to netdev */
 +      ixgbe_add_sanmac_netdev(netdev);
 +
 +      e_dev_info("Intel(R) 10 Gigabit Network Connection\n");
 +      cards_found++;
 +      return 0;
 +
 +err_register:
 +      ixgbe_release_hw_control(adapter);
 +      ixgbe_clear_interrupt_scheme(adapter);
 +err_sw_init:
 +err_eeprom:
 +      if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
 +              ixgbe_disable_sriov(adapter);
 +      adapter->flags2 &= ~IXGBE_FLAG2_SEARCH_FOR_SFP;
 +      iounmap(hw->hw_addr);
 +err_ioremap:
 +      free_netdev(netdev);
 +err_alloc_etherdev:
 +      pci_release_selected_regions(pdev,
 +                                   pci_select_bars(pdev, IORESOURCE_MEM));
 +err_pci_reg:
 +err_dma:
 +      pci_disable_device(pdev);
 +      return err;
 +}
 +
 +/**
 + * ixgbe_remove - Device Removal Routine
 + * @pdev: PCI device information struct
 + *
 + * ixgbe_remove is called by the PCI subsystem to alert the driver
 + * that it should release a PCI device.  The could be caused by a
 + * Hot-Plug event, or because the driver is going to be removed from
 + * memory.
 + **/
 +static void __devexit ixgbe_remove(struct pci_dev *pdev)
 +{
 +      struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
 +      struct net_device *netdev = adapter->netdev;
 +
 +      set_bit(__IXGBE_DOWN, &adapter->state);
 +      cancel_work_sync(&adapter->service_task);
 +
 +#ifdef CONFIG_IXGBE_DCA
 +      if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) {
 +              adapter->flags &= ~IXGBE_FLAG_DCA_ENABLED;
 +              dca_remove_requester(&pdev->dev);
 +              IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, 1);
 +      }
 +
 +#endif
 +#ifdef IXGBE_FCOE
 +      if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED)
 +              ixgbe_cleanup_fcoe(adapter);
 +
 +#endif /* IXGBE_FCOE */
 +
 +      /* remove the added san mac */
 +      ixgbe_del_sanmac_netdev(netdev);
 +
 +      if (netdev->reg_state == NETREG_REGISTERED)
 +              unregister_netdev(netdev);
 +
 +      if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
 +              ixgbe_disable_sriov(adapter);
 +
 +      ixgbe_clear_interrupt_scheme(adapter);
 +
 +      ixgbe_release_hw_control(adapter);
 +
 +      iounmap(adapter->hw.hw_addr);
 +      pci_release_selected_regions(pdev, pci_select_bars(pdev,
 +                                   IORESOURCE_MEM));
 +
 +      e_dev_info("complete\n");
 +
 +      free_netdev(netdev);
 +
 +      pci_disable_pcie_error_reporting(pdev);
 +
 +      pci_disable_device(pdev);
 +}
 +
 +/**
 + * ixgbe_io_error_detected - called when PCI error is detected
 + * @pdev: Pointer to PCI device
 + * @state: The current pci connection state
 + *
 + * This function is called after a PCI bus error affecting
 + * this device has been detected.
 + */
 +static pci_ers_result_t ixgbe_io_error_detected(struct pci_dev *pdev,
 +                                              pci_channel_state_t state)
 +{
 +      struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
 +      struct net_device *netdev = adapter->netdev;
 +
 +      netif_device_detach(netdev);
 +
 +      if (state == pci_channel_io_perm_failure)
 +              return PCI_ERS_RESULT_DISCONNECT;
 +
 +      if (netif_running(netdev))
 +              ixgbe_down(adapter);
 +      pci_disable_device(pdev);
 +
 +      /* Request a slot reset. */
 +      return PCI_ERS_RESULT_NEED_RESET;
 +}
 +
 +/**
 + * ixgbe_io_slot_reset - called after the pci bus has been reset.
 + * @pdev: Pointer to PCI device
 + *
 + * Restart the card from scratch, as if from a cold-boot.
 + */
 +static pci_ers_result_t ixgbe_io_slot_reset(struct pci_dev *pdev)
 +{
 +      struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
 +      pci_ers_result_t result;
 +      int err;
 +
 +      if (pci_enable_device_mem(pdev)) {
 +              e_err(probe, "Cannot re-enable PCI device after reset.\n");
 +              result = PCI_ERS_RESULT_DISCONNECT;
 +      } else {
 +              pci_set_master(pdev);
 +              pci_restore_state(pdev);
 +              pci_save_state(pdev);
 +
 +              pci_wake_from_d3(pdev, false);
 +
 +              ixgbe_reset(adapter);
 +              IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0);
 +              result = PCI_ERS_RESULT_RECOVERED;
 +      }
 +
 +      err = pci_cleanup_aer_uncorrect_error_status(pdev);
 +      if (err) {
 +              e_dev_err("pci_cleanup_aer_uncorrect_error_status "
 +                        "failed 0x%0x\n", err);
 +              /* non-fatal, continue */
 +      }
 +
 +      return result;
 +}
 +
 +/**
 + * ixgbe_io_resume - called when traffic can start flowing again.
 + * @pdev: Pointer to PCI device
 + *
 + * This callback is called when the error recovery driver tells us that
 + * its OK to resume normal operation.
 + */
 +static void ixgbe_io_resume(struct pci_dev *pdev)
 +{
 +      struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
 +      struct net_device *netdev = adapter->netdev;
 +
 +      if (netif_running(netdev))
 +              ixgbe_up(adapter);
 +
 +      netif_device_attach(netdev);
 +}
 +
 +static struct pci_error_handlers ixgbe_err_handler = {
 +      .error_detected = ixgbe_io_error_detected,
 +      .slot_reset = ixgbe_io_slot_reset,
 +      .resume = ixgbe_io_resume,
 +};
 +
 +static struct pci_driver ixgbe_driver = {
 +      .name     = ixgbe_driver_name,
 +      .id_table = ixgbe_pci_tbl,
 +      .probe    = ixgbe_probe,
 +      .remove   = __devexit_p(ixgbe_remove),
 +#ifdef CONFIG_PM
 +      .suspend  = ixgbe_suspend,
 +      .resume   = ixgbe_resume,
 +#endif
 +      .shutdown = ixgbe_shutdown,
 +      .err_handler = &ixgbe_err_handler
 +};
 +
 +/**
 + * ixgbe_init_module - Driver Registration Routine
 + *
 + * ixgbe_init_module is the first routine called when the driver is
 + * loaded. All it does is register with the PCI subsystem.
 + **/
 +static int __init ixgbe_init_module(void)
 +{
 +      int ret;
 +      pr_info("%s - version %s\n", ixgbe_driver_string, ixgbe_driver_version);
 +      pr_info("%s\n", ixgbe_copyright);
 +
 +#ifdef CONFIG_IXGBE_DCA
 +      dca_register_notify(&dca_notifier);
 +#endif
 +
 +      ret = pci_register_driver(&ixgbe_driver);
 +      return ret;
 +}
 +
 +module_init(ixgbe_init_module);
 +
 +/**
 + * ixgbe_exit_module - Driver Exit Cleanup Routine
 + *
 + * ixgbe_exit_module is called just before the driver is removed
 + * from memory.
 + **/
 +static void __exit ixgbe_exit_module(void)
 +{
 +#ifdef CONFIG_IXGBE_DCA
 +      dca_unregister_notify(&dca_notifier);
 +#endif
 +      pci_unregister_driver(&ixgbe_driver);
 +      rcu_barrier(); /* Wait for completion of call_rcu()'s */
 +}
 +
 +#ifdef CONFIG_IXGBE_DCA
 +static int ixgbe_notify_dca(struct notifier_block *nb, unsigned long event,
 +                          void *p)
 +{
 +      int ret_val;
 +
 +      ret_val = driver_for_each_device(&ixgbe_driver.driver, NULL, &event,
 +                                       __ixgbe_notify_dca);
 +
 +      return ret_val ? NOTIFY_BAD : NOTIFY_DONE;
 +}
 +
 +#endif /* CONFIG_IXGBE_DCA */
 +
 +module_exit(ixgbe_exit_module);
 +
 +/* ixgbe_main.c */
index 7efa624,0000000..00bc4fc
mode 100644,000000..100644
--- /dev/null
@@@ -1,21 -1,0 +1,22 @@@
-       tristate "OKI SEMICONDUCTOR ML7223 IOH GbE (Intel EG20T PCH)"
 +#
 +# OKI Semiconductor device configuration
 +#
 +
 +config PCH_GBE
-         Output Hub), ML7223.
-         ML7223 IOH is for MP(Media Phone) use.
-         ML7223 is companion chip for Intel Atom E6xx series.
-         ML7223 is completely compatible for Intel EG20T PCH.
++      tristate "OKI SEMICONDUCTOR IOH(ML7223/ML7831) GbE"
 +      depends on PCI
 +      select NET_CORE
 +      select MII
 +      ---help---
 +        This is a gigabit ethernet driver for EG20T PCH.
 +        EG20T PCH is the platform controller hub that is used in Intel's
 +        general embedded platform.  EG20T PCH has Gigabit Ethernet interface.
 +        Using this interface, it is able to access system devices connected
 +        to Gigabit Ethernet.  This driver enables Gigabit Ethernet function.
 +
 +        This driver also can be used for OKI SEMICONDUCTOR IOH(Input/
++        Output Hub), ML7223/ML7831.
++        ML7223 IOH is for MP(Media Phone) use. ML7831 IOH is for general
++        purpose use.
++        ML7223/ML7831 is companion chip for Intel Atom E6xx series.
++        ML7223/ML7831 is completely compatible for Intel EG20T PCH.
index 72276fe,0000000..35a7c21
mode 100644,000000..100644
--- /dev/null
@@@ -1,2523 -1,0 +1,2607 @@@
- #include <linux/prefetch.h>
 +/*
 + * Copyright (C) 1999 - 2010 Intel Corporation.
 + * Copyright (C) 2010 OKI SEMICONDUCTOR CO., LTD.
 + *
 + * This code was derived from the Intel e1000e Linux driver.
 + *
 + * This program is free software; you can redistribute it and/or modify
 + * it under the terms of the GNU General Public License as published by
 + * the Free Software Foundation; version 2 of the License.
 + *
 + * This program is distributed in the hope that it will be useful,
 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 + * GNU General Public License for more details.
 + *
 + * You should have received a copy of the GNU General Public License
 + * along with this program; if not, write to the Free Software
 + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307, USA.
 + */
 +
 +#include "pch_gbe.h"
 +#include "pch_gbe_api.h"
-       if (netdev->features & NETIF_F_RXCSUM) {
-               tcpip &= ~PCH_GBE_RX_TCPIPACC_OFF;
-               tcpip |= PCH_GBE_RX_TCPIPACC_EN;
-       } else {
-               tcpip |= PCH_GBE_RX_TCPIPACC_OFF;
-               tcpip &= ~PCH_GBE_RX_TCPIPACC_EN;
-       }
 +
 +#define DRV_VERSION     "1.00"
 +const char pch_driver_version[] = DRV_VERSION;
 +
 +#define PCI_DEVICE_ID_INTEL_IOH1_GBE  0x8802          /* Pci device ID */
 +#define PCH_GBE_MAR_ENTRIES           16
 +#define PCH_GBE_SHORT_PKT             64
 +#define DSC_INIT16                    0xC000
 +#define PCH_GBE_DMA_ALIGN             0
 +#define PCH_GBE_DMA_PADDING           2
 +#define PCH_GBE_WATCHDOG_PERIOD               (1 * HZ)        /* watchdog time */
 +#define PCH_GBE_COPYBREAK_DEFAULT     256
 +#define PCH_GBE_PCI_BAR                       1
++#define PCH_GBE_RESERVE_MEMORY                0x200000        /* 2MB */
 +
 +/* Macros for ML7223 */
 +#define PCI_VENDOR_ID_ROHM                    0x10db
 +#define PCI_DEVICE_ID_ROHM_ML7223_GBE         0x8013
 +
++/* Macros for ML7831 */
++#define PCI_DEVICE_ID_ROHM_ML7831_GBE         0x8802
++
 +#define PCH_GBE_TX_WEIGHT         64
 +#define PCH_GBE_RX_WEIGHT         64
 +#define PCH_GBE_RX_BUFFER_WRITE   16
 +
 +/* Initialize the wake-on-LAN settings */
 +#define PCH_GBE_WL_INIT_SETTING    (PCH_GBE_WLC_MP)
 +
 +#define PCH_GBE_MAC_RGMII_CTRL_SETTING ( \
 +      PCH_GBE_CHIP_TYPE_INTERNAL | \
 +      PCH_GBE_RGMII_MODE_RGMII     \
 +      )
 +
 +/* Ethertype field values */
++#define PCH_GBE_MAX_RX_BUFFER_SIZE      0x2880
 +#define PCH_GBE_MAX_JUMBO_FRAME_SIZE    10318
 +#define PCH_GBE_FRAME_SIZE_2048         2048
 +#define PCH_GBE_FRAME_SIZE_4096         4096
 +#define PCH_GBE_FRAME_SIZE_8192         8192
 +
 +#define PCH_GBE_GET_DESC(R, i, type)    (&(((struct type *)((R).desc))[i]))
 +#define PCH_GBE_RX_DESC(R, i)           PCH_GBE_GET_DESC(R, i, pch_gbe_rx_desc)
 +#define PCH_GBE_TX_DESC(R, i)           PCH_GBE_GET_DESC(R, i, pch_gbe_tx_desc)
 +#define PCH_GBE_DESC_UNUSED(R) \
 +      ((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \
 +      (R)->next_to_clean - (R)->next_to_use - 1)
 +
 +/* Pause packet value */
 +#define       PCH_GBE_PAUSE_PKT1_VALUE    0x00C28001
 +#define       PCH_GBE_PAUSE_PKT2_VALUE    0x00000100
 +#define       PCH_GBE_PAUSE_PKT4_VALUE    0x01000888
 +#define       PCH_GBE_PAUSE_PKT5_VALUE    0x0000FFFF
 +
 +#define PCH_GBE_ETH_ALEN            6
 +
 +/* This defines the bits that are set in the Interrupt Mask
 + * Set/Read Register.  Each bit is documented below:
 + *   o RXT0   = Receiver Timer Interrupt (ring 0)
 + *   o TXDW   = Transmit Descriptor Written Back
 + *   o RXDMT0 = Receive Descriptor Minimum Threshold hit (ring 0)
 + *   o RXSEQ  = Receive Sequence Error
 + *   o LSC    = Link Status Change
 + */
 +#define PCH_GBE_INT_ENABLE_MASK ( \
 +      PCH_GBE_INT_RX_DMA_CMPLT |    \
 +      PCH_GBE_INT_RX_DSC_EMP   |    \
++      PCH_GBE_INT_RX_FIFO_ERR  |    \
 +      PCH_GBE_INT_WOL_DET      |    \
 +      PCH_GBE_INT_TX_CMPLT          \
 +      )
 +
++#define PCH_GBE_INT_DISABLE_ALL               0
 +
 +static unsigned int copybreak __read_mostly = PCH_GBE_COPYBREAK_DEFAULT;
 +
 +static int pch_gbe_mdio_read(struct net_device *netdev, int addr, int reg);
 +static void pch_gbe_mdio_write(struct net_device *netdev, int addr, int reg,
 +                             int data);
 +
 +inline void pch_gbe_mac_load_mac_addr(struct pch_gbe_hw *hw)
 +{
 +      iowrite32(0x01, &hw->reg->MAC_ADDR_LOAD);
 +}
 +
 +/**
 + * pch_gbe_mac_read_mac_addr - Read MAC address
 + * @hw:                   Pointer to the HW structure
 + * Returns
 + *    0:                      Successful.
 + */
 +s32 pch_gbe_mac_read_mac_addr(struct pch_gbe_hw *hw)
 +{
 +      u32  adr1a, adr1b;
 +
 +      adr1a = ioread32(&hw->reg->mac_adr[0].high);
 +      adr1b = ioread32(&hw->reg->mac_adr[0].low);
 +
 +      hw->mac.addr[0] = (u8)(adr1a & 0xFF);
 +      hw->mac.addr[1] = (u8)((adr1a >> 8) & 0xFF);
 +      hw->mac.addr[2] = (u8)((adr1a >> 16) & 0xFF);
 +      hw->mac.addr[3] = (u8)((adr1a >> 24) & 0xFF);
 +      hw->mac.addr[4] = (u8)(adr1b & 0xFF);
 +      hw->mac.addr[5] = (u8)((adr1b >> 8) & 0xFF);
 +
 +      pr_debug("hw->mac.addr : %pM\n", hw->mac.addr);
 +      return 0;
 +}
 +
 +/**
 + * pch_gbe_wait_clr_bit - Wait to clear a bit
 + * @reg:      Pointer of register
 + * @busy:     Busy bit
 + */
 +static void pch_gbe_wait_clr_bit(void *reg, u32 bit)
 +{
 +      u32 tmp;
 +      /* wait busy */
 +      tmp = 1000;
 +      while ((ioread32(reg) & bit) && --tmp)
 +              cpu_relax();
 +      if (!tmp)
 +              pr_err("Error: busy bit is not cleared\n");
 +}
++
++/**
++ * pch_gbe_wait_clr_bit_irq - Wait to clear a bit for interrupt context
++ * @reg:      Pointer of register
++ * @busy:     Busy bit
++ */
++static int pch_gbe_wait_clr_bit_irq(void *reg, u32 bit)
++{
++      u32 tmp;
++      int ret = -1;
++      /* wait busy */
++      tmp = 20;
++      while ((ioread32(reg) & bit) && --tmp)
++              udelay(5);
++      if (!tmp)
++              pr_err("Error: busy bit is not cleared\n");
++      else
++              ret = 0;
++      return ret;
++}
++
 +/**
 + * pch_gbe_mac_mar_set - Set MAC address register
 + * @hw:           Pointer to the HW structure
 + * @addr:   Pointer to the MAC address
 + * @index:  MAC address array register
 + */
 +static void pch_gbe_mac_mar_set(struct pch_gbe_hw *hw, u8 * addr, u32 index)
 +{
 +      u32 mar_low, mar_high, adrmask;
 +
 +      pr_debug("index : 0x%x\n", index);
 +
 +      /*
 +       * HW expects these in little endian so we reverse the byte order
 +       * from network order (big endian) to little endian
 +       */
 +      mar_high = ((u32) addr[0] | ((u32) addr[1] << 8) |
 +                 ((u32) addr[2] << 16) | ((u32) addr[3] << 24));
 +      mar_low = ((u32) addr[4] | ((u32) addr[5] << 8));
 +      /* Stop the MAC Address of index. */
 +      adrmask = ioread32(&hw->reg->ADDR_MASK);
 +      iowrite32((adrmask | (0x0001 << index)), &hw->reg->ADDR_MASK);
 +      /* wait busy */
 +      pch_gbe_wait_clr_bit(&hw->reg->ADDR_MASK, PCH_GBE_BUSY);
 +      /* Set the MAC address to the MAC address 1A/1B register */
 +      iowrite32(mar_high, &hw->reg->mac_adr[index].high);
 +      iowrite32(mar_low, &hw->reg->mac_adr[index].low);
 +      /* Start the MAC address of index */
 +      iowrite32((adrmask & ~(0x0001 << index)), &hw->reg->ADDR_MASK);
 +      /* wait busy */
 +      pch_gbe_wait_clr_bit(&hw->reg->ADDR_MASK, PCH_GBE_BUSY);
 +}
 +
 +/**
 + * pch_gbe_mac_reset_hw - Reset hardware
 + * @hw:       Pointer to the HW structure
 + */
 +static void pch_gbe_mac_reset_hw(struct pch_gbe_hw *hw)
 +{
 +      /* Read the MAC address. and store to the private data */
 +      pch_gbe_mac_read_mac_addr(hw);
 +      iowrite32(PCH_GBE_ALL_RST, &hw->reg->RESET);
 +#ifdef PCH_GBE_MAC_IFOP_RGMII
 +      iowrite32(PCH_GBE_MODE_GMII_ETHER, &hw->reg->MODE);
 +#endif
 +      pch_gbe_wait_clr_bit(&hw->reg->RESET, PCH_GBE_ALL_RST);
 +      /* Setup the receive address */
 +      pch_gbe_mac_mar_set(hw, hw->mac.addr, 0);
 +      return;
 +}
 +
++static void pch_gbe_mac_reset_rx(struct pch_gbe_hw *hw)
++{
++      /* Read the MAC address. and store to the private data */
++      pch_gbe_mac_read_mac_addr(hw);
++      iowrite32(PCH_GBE_RX_RST, &hw->reg->RESET);
++      pch_gbe_wait_clr_bit_irq(&hw->reg->RESET, PCH_GBE_RX_RST);
++      /* Setup the MAC address */
++      pch_gbe_mac_mar_set(hw, hw->mac.addr, 0);
++      return;
++}
++
 +/**
 + * pch_gbe_mac_init_rx_addrs - Initialize receive address's
 + * @hw:       Pointer to the HW structure
 + * @mar_count: Receive address registers
 + */
 +static void pch_gbe_mac_init_rx_addrs(struct pch_gbe_hw *hw, u16 mar_count)
 +{
 +      u32 i;
 +
 +      /* Setup the receive address */
 +      pch_gbe_mac_mar_set(hw, hw->mac.addr, 0);
 +
 +      /* Zero out the other receive addresses */
 +      for (i = 1; i < mar_count; i++) {
 +              iowrite32(0, &hw->reg->mac_adr[i].high);
 +              iowrite32(0, &hw->reg->mac_adr[i].low);
 +      }
 +      iowrite32(0xFFFE, &hw->reg->ADDR_MASK);
 +      /* wait busy */
 +      pch_gbe_wait_clr_bit(&hw->reg->ADDR_MASK, PCH_GBE_BUSY);
 +}
 +
 +
 +/**
 + * pch_gbe_mac_mc_addr_list_update - Update Multicast addresses
 + * @hw:                   Pointer to the HW structure
 + * @mc_addr_list:   Array of multicast addresses to program
 + * @mc_addr_count:  Number of multicast addresses to program
 + * @mar_used_count: The first MAC Address register free to program
 + * @mar_total_num:  Total number of supported MAC Address Registers
 + */
 +static void pch_gbe_mac_mc_addr_list_update(struct pch_gbe_hw *hw,
 +                                          u8 *mc_addr_list, u32 mc_addr_count,
 +                                          u32 mar_used_count, u32 mar_total_num)
 +{
 +      u32 i, adrmask;
 +
 +      /* Load the first set of multicast addresses into the exact
 +       * filters (RAR).  If there are not enough to fill the RAR
 +       * array, clear the filters.
 +       */
 +      for (i = mar_used_count; i < mar_total_num; i++) {
 +              if (mc_addr_count) {
 +                      pch_gbe_mac_mar_set(hw, mc_addr_list, i);
 +                      mc_addr_count--;
 +                      mc_addr_list += PCH_GBE_ETH_ALEN;
 +              } else {
 +                      /* Clear MAC address mask */
 +                      adrmask = ioread32(&hw->reg->ADDR_MASK);
 +                      iowrite32((adrmask | (0x0001 << i)),
 +                                      &hw->reg->ADDR_MASK);
 +                      /* wait busy */
 +                      pch_gbe_wait_clr_bit(&hw->reg->ADDR_MASK, PCH_GBE_BUSY);
 +                      /* Clear MAC address */
 +                      iowrite32(0, &hw->reg->mac_adr[i].high);
 +                      iowrite32(0, &hw->reg->mac_adr[i].low);
 +              }
 +      }
 +}
 +
 +/**
 + * pch_gbe_mac_force_mac_fc - Force the MAC's flow control settings
 + * @hw:                   Pointer to the HW structure
 + * Returns
 + *    0:                      Successful.
 + *    Negative value:         Failed.
 + */
 +s32 pch_gbe_mac_force_mac_fc(struct pch_gbe_hw *hw)
 +{
 +      struct pch_gbe_mac_info *mac = &hw->mac;
 +      u32 rx_fctrl;
 +
 +      pr_debug("mac->fc = %u\n", mac->fc);
 +
 +      rx_fctrl = ioread32(&hw->reg->RX_FCTRL);
 +
 +      switch (mac->fc) {
 +      case PCH_GBE_FC_NONE:
 +              rx_fctrl &= ~PCH_GBE_FL_CTRL_EN;
 +              mac->tx_fc_enable = false;
 +              break;
 +      case PCH_GBE_FC_RX_PAUSE:
 +              rx_fctrl |= PCH_GBE_FL_CTRL_EN;
 +              mac->tx_fc_enable = false;
 +              break;
 +      case PCH_GBE_FC_TX_PAUSE:
 +              rx_fctrl &= ~PCH_GBE_FL_CTRL_EN;
 +              mac->tx_fc_enable = true;
 +              break;
 +      case PCH_GBE_FC_FULL:
 +              rx_fctrl |= PCH_GBE_FL_CTRL_EN;
 +              mac->tx_fc_enable = true;
 +              break;
 +      default:
 +              pr_err("Flow control param set incorrectly\n");
 +              return -EINVAL;
 +      }
 +      if (mac->link_duplex == DUPLEX_HALF)
 +              rx_fctrl &= ~PCH_GBE_FL_CTRL_EN;
 +      iowrite32(rx_fctrl, &hw->reg->RX_FCTRL);
 +      pr_debug("RX_FCTRL reg : 0x%08x  mac->tx_fc_enable : %d\n",
 +               ioread32(&hw->reg->RX_FCTRL), mac->tx_fc_enable);
 +      return 0;
 +}
 +
 +/**
 + * pch_gbe_mac_set_wol_event - Set wake-on-lan event
 + * @hw:     Pointer to the HW structure
 + * @wu_evt: Wake up event
 + */
 +static void pch_gbe_mac_set_wol_event(struct pch_gbe_hw *hw, u32 wu_evt)
 +{
 +      u32 addr_mask;
 +
 +      pr_debug("wu_evt : 0x%08x  ADDR_MASK reg : 0x%08x\n",
 +               wu_evt, ioread32(&hw->reg->ADDR_MASK));
 +
 +      if (wu_evt) {
 +              /* Set Wake-On-Lan address mask */
 +              addr_mask = ioread32(&hw->reg->ADDR_MASK);
 +              iowrite32(addr_mask, &hw->reg->WOL_ADDR_MASK);
 +              /* wait busy */
 +              pch_gbe_wait_clr_bit(&hw->reg->WOL_ADDR_MASK, PCH_GBE_WLA_BUSY);
 +              iowrite32(0, &hw->reg->WOL_ST);
 +              iowrite32((wu_evt | PCH_GBE_WLC_WOL_MODE), &hw->reg->WOL_CTRL);
 +              iowrite32(0x02, &hw->reg->TCPIP_ACC);
 +              iowrite32(PCH_GBE_INT_ENABLE_MASK, &hw->reg->INT_EN);
 +      } else {
 +              iowrite32(0, &hw->reg->WOL_CTRL);
 +              iowrite32(0, &hw->reg->WOL_ST);
 +      }
 +      return;
 +}
 +
 +/**
 + * pch_gbe_mac_ctrl_miim - Control MIIM interface
 + * @hw:   Pointer to the HW structure
 + * @addr: Address of PHY
 + * @dir:  Operetion. (Write or Read)
 + * @reg:  Access register of PHY
 + * @data: Write data.
 + *
 + * Returns: Read date.
 + */
 +u16 pch_gbe_mac_ctrl_miim(struct pch_gbe_hw *hw, u32 addr, u32 dir, u32 reg,
 +                      u16 data)
 +{
 +      u32 data_out = 0;
 +      unsigned int i;
 +      unsigned long flags;
 +
 +      spin_lock_irqsave(&hw->miim_lock, flags);
 +
 +      for (i = 100; i; --i) {
 +              if ((ioread32(&hw->reg->MIIM) & PCH_GBE_MIIM_OPER_READY))
 +                      break;
 +              udelay(20);
 +      }
 +      if (i == 0) {
 +              pr_err("pch-gbe.miim won't go Ready\n");
 +              spin_unlock_irqrestore(&hw->miim_lock, flags);
 +              return 0;       /* No way to indicate timeout error */
 +      }
 +      iowrite32(((reg << PCH_GBE_MIIM_REG_ADDR_SHIFT) |
 +                (addr << PCH_GBE_MIIM_PHY_ADDR_SHIFT) |
 +                dir | data), &hw->reg->MIIM);
 +      for (i = 0; i < 100; i++) {
 +              udelay(20);
 +              data_out = ioread32(&hw->reg->MIIM);
 +              if ((data_out & PCH_GBE_MIIM_OPER_READY))
 +                      break;
 +      }
 +      spin_unlock_irqrestore(&hw->miim_lock, flags);
 +
 +      pr_debug("PHY %s: reg=%d, data=0x%04X\n",
 +               dir == PCH_GBE_MIIM_OPER_READ ? "READ" : "WRITE", reg,
 +               dir == PCH_GBE_MIIM_OPER_READ ? data_out : data);
 +      return (u16) data_out;
 +}
 +
 +/**
 + * pch_gbe_mac_set_pause_packet - Set pause packet
 + * @hw:   Pointer to the HW structure
 + */
 +static void pch_gbe_mac_set_pause_packet(struct pch_gbe_hw *hw)
 +{
 +      unsigned long tmp2, tmp3;
 +
 +      /* Set Pause packet */
 +      tmp2 = hw->mac.addr[1];
 +      tmp2 = (tmp2 << 8) | hw->mac.addr[0];
 +      tmp2 = PCH_GBE_PAUSE_PKT2_VALUE | (tmp2 << 16);
 +
 +      tmp3 = hw->mac.addr[5];
 +      tmp3 = (tmp3 << 8) | hw->mac.addr[4];
 +      tmp3 = (tmp3 << 8) | hw->mac.addr[3];
 +      tmp3 = (tmp3 << 8) | hw->mac.addr[2];
 +
 +      iowrite32(PCH_GBE_PAUSE_PKT1_VALUE, &hw->reg->PAUSE_PKT1);
 +      iowrite32(tmp2, &hw->reg->PAUSE_PKT2);
 +      iowrite32(tmp3, &hw->reg->PAUSE_PKT3);
 +      iowrite32(PCH_GBE_PAUSE_PKT4_VALUE, &hw->reg->PAUSE_PKT4);
 +      iowrite32(PCH_GBE_PAUSE_PKT5_VALUE, &hw->reg->PAUSE_PKT5);
 +
 +      /* Transmit Pause Packet */
 +      iowrite32(PCH_GBE_PS_PKT_RQ, &hw->reg->PAUSE_REQ);
 +
 +      pr_debug("PAUSE_PKT1-5 reg : 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
 +               ioread32(&hw->reg->PAUSE_PKT1), ioread32(&hw->reg->PAUSE_PKT2),
 +               ioread32(&hw->reg->PAUSE_PKT3), ioread32(&hw->reg->PAUSE_PKT4),
 +               ioread32(&hw->reg->PAUSE_PKT5));
 +
 +      return;
 +}
 +
 +
 +/**
 + * pch_gbe_alloc_queues - Allocate memory for all rings
 + * @adapter:  Board private structure to initialize
 + * Returns
 + *    0:      Successfully
 + *    Negative value: Failed
 + */
 +static int pch_gbe_alloc_queues(struct pch_gbe_adapter *adapter)
 +{
 +      int size;
 +
 +      size = (int)sizeof(struct pch_gbe_tx_ring);
 +      adapter->tx_ring = kzalloc(size, GFP_KERNEL);
 +      if (!adapter->tx_ring)
 +              return -ENOMEM;
 +      size = (int)sizeof(struct pch_gbe_rx_ring);
 +      adapter->rx_ring = kzalloc(size, GFP_KERNEL);
 +      if (!adapter->rx_ring) {
 +              kfree(adapter->tx_ring);
 +              return -ENOMEM;
 +      }
 +      return 0;
 +}
 +
 +/**
 + * pch_gbe_init_stats - Initialize status
 + * @adapter:  Board private structure to initialize
 + */
 +static void pch_gbe_init_stats(struct pch_gbe_adapter *adapter)
 +{
 +      memset(&adapter->stats, 0, sizeof(adapter->stats));
 +      return;
 +}
 +
 +/**
 + * pch_gbe_init_phy - Initialize PHY
 + * @adapter:  Board private structure to initialize
 + * Returns
 + *    0:      Successfully
 + *    Negative value: Failed
 + */
 +static int pch_gbe_init_phy(struct pch_gbe_adapter *adapter)
 +{
 +      struct net_device *netdev = adapter->netdev;
 +      u32 addr;
 +      u16 bmcr, stat;
 +
 +      /* Discover phy addr by searching addrs in order {1,0,2,..., 31} */
 +      for (addr = 0; addr < PCH_GBE_PHY_REGS_LEN; addr++) {
 +              adapter->mii.phy_id = (addr == 0) ? 1 : (addr == 1) ? 0 : addr;
 +              bmcr = pch_gbe_mdio_read(netdev, adapter->mii.phy_id, MII_BMCR);
 +              stat = pch_gbe_mdio_read(netdev, adapter->mii.phy_id, MII_BMSR);
 +              stat = pch_gbe_mdio_read(netdev, adapter->mii.phy_id, MII_BMSR);
 +              if (!((bmcr == 0xFFFF) || ((stat == 0) && (bmcr == 0))))
 +                      break;
 +      }
 +      adapter->hw.phy.addr = adapter->mii.phy_id;
 +      pr_debug("phy_addr = %d\n", adapter->mii.phy_id);
 +      if (addr == 32)
 +              return -EAGAIN;
 +      /* Selected the phy and isolate the rest */
 +      for (addr = 0; addr < PCH_GBE_PHY_REGS_LEN; addr++) {
 +              if (addr != adapter->mii.phy_id) {
 +                      pch_gbe_mdio_write(netdev, addr, MII_BMCR,
 +                                         BMCR_ISOLATE);
 +              } else {
 +                      bmcr = pch_gbe_mdio_read(netdev, addr, MII_BMCR);
 +                      pch_gbe_mdio_write(netdev, addr, MII_BMCR,
 +                                         bmcr & ~BMCR_ISOLATE);
 +              }
 +      }
 +
 +      /* MII setup */
 +      adapter->mii.phy_id_mask = 0x1F;
 +      adapter->mii.reg_num_mask = 0x1F;
 +      adapter->mii.dev = adapter->netdev;
 +      adapter->mii.mdio_read = pch_gbe_mdio_read;
 +      adapter->mii.mdio_write = pch_gbe_mdio_write;
 +      adapter->mii.supports_gmii = mii_check_gmii_support(&adapter->mii);
 +      return 0;
 +}
 +
 +/**
 + * pch_gbe_mdio_read - The read function for mii
 + * @netdev: Network interface device structure
 + * @addr:   Phy ID
 + * @reg:    Access location
 + * Returns
 + *    0:      Successfully
 + *    Negative value: Failed
 + */
 +static int pch_gbe_mdio_read(struct net_device *netdev, int addr, int reg)
 +{
 +      struct pch_gbe_adapter *adapter = netdev_priv(netdev);
 +      struct pch_gbe_hw *hw = &adapter->hw;
 +
 +      return pch_gbe_mac_ctrl_miim(hw, addr, PCH_GBE_HAL_MIIM_READ, reg,
 +                                   (u16) 0);
 +}
 +
 +/**
 + * pch_gbe_mdio_write - The write function for mii
 + * @netdev: Network interface device structure
 + * @addr:   Phy ID (not used)
 + * @reg:    Access location
 + * @data:   Write data
 + */
 +static void pch_gbe_mdio_write(struct net_device *netdev,
 +                             int addr, int reg, int data)
 +{
 +      struct pch_gbe_adapter *adapter = netdev_priv(netdev);
 +      struct pch_gbe_hw *hw = &adapter->hw;
 +
 +      pch_gbe_mac_ctrl_miim(hw, addr, PCH_GBE_HAL_MIIM_WRITE, reg, data);
 +}
 +
 +/**
 + * pch_gbe_reset_task - Reset processing at the time of transmission timeout
 + * @work:  Pointer of board private structure
 + */
 +static void pch_gbe_reset_task(struct work_struct *work)
 +{
 +      struct pch_gbe_adapter *adapter;
 +      adapter = container_of(work, struct pch_gbe_adapter, reset_task);
 +
 +      rtnl_lock();
 +      pch_gbe_reinit_locked(adapter);
 +      rtnl_unlock();
 +}
 +
 +/**
 + * pch_gbe_reinit_locked- Re-initialization
 + * @adapter:  Board private structure
 + */
 +void pch_gbe_reinit_locked(struct pch_gbe_adapter *adapter)
 +{
 +      pch_gbe_down(adapter);
 +      pch_gbe_up(adapter);
 +}
 +
 +/**
 + * pch_gbe_reset - Reset GbE
 + * @adapter:  Board private structure
 + */
 +void pch_gbe_reset(struct pch_gbe_adapter *adapter)
 +{
 +      pch_gbe_mac_reset_hw(&adapter->hw);
 +      /* Setup the receive address. */
 +      pch_gbe_mac_init_rx_addrs(&adapter->hw, PCH_GBE_MAR_ENTRIES);
 +      if (pch_gbe_hal_init_hw(&adapter->hw))
 +              pr_err("Hardware Error\n");
 +}
 +
 +/**
 + * pch_gbe_free_irq - Free an interrupt
 + * @adapter:  Board private structure
 + */
 +static void pch_gbe_free_irq(struct pch_gbe_adapter *adapter)
 +{
 +      struct net_device *netdev = adapter->netdev;
 +
 +      free_irq(adapter->pdev->irq, netdev);
 +      if (adapter->have_msi) {
 +              pci_disable_msi(adapter->pdev);
 +              pr_debug("call pci_disable_msi\n");
 +      }
 +}
 +
 +/**
 + * pch_gbe_irq_disable - Mask off interrupt generation on the NIC
 + * @adapter:  Board private structure
 + */
 +static void pch_gbe_irq_disable(struct pch_gbe_adapter *adapter)
 +{
 +      struct pch_gbe_hw *hw = &adapter->hw;
 +
 +      atomic_inc(&adapter->irq_sem);
 +      iowrite32(0, &hw->reg->INT_EN);
 +      ioread32(&hw->reg->INT_ST);
 +      synchronize_irq(adapter->pdev->irq);
 +
 +      pr_debug("INT_EN reg : 0x%08x\n", ioread32(&hw->reg->INT_EN));
 +}
 +
 +/**
 + * pch_gbe_irq_enable - Enable default interrupt generation settings
 + * @adapter:  Board private structure
 + */
 +static void pch_gbe_irq_enable(struct pch_gbe_adapter *adapter)
 +{
 +      struct pch_gbe_hw *hw = &adapter->hw;
 +
 +      if (likely(atomic_dec_and_test(&adapter->irq_sem)))
 +              iowrite32(PCH_GBE_INT_ENABLE_MASK, &hw->reg->INT_EN);
 +      ioread32(&hw->reg->INT_ST);
 +      pr_debug("INT_EN reg : 0x%08x\n", ioread32(&hw->reg->INT_EN));
 +}
 +
 +
 +
 +/**
 + * pch_gbe_setup_tctl - configure the Transmit control registers
 + * @adapter:  Board private structure
 + */
 +static void pch_gbe_setup_tctl(struct pch_gbe_adapter *adapter)
 +{
 +      struct pch_gbe_hw *hw = &adapter->hw;
 +      u32 tx_mode, tcpip;
 +
 +      tx_mode = PCH_GBE_TM_LONG_PKT |
 +              PCH_GBE_TM_ST_AND_FD |
 +              PCH_GBE_TM_SHORT_PKT |
 +              PCH_GBE_TM_TH_TX_STRT_8 |
 +              PCH_GBE_TM_TH_ALM_EMP_4 | PCH_GBE_TM_TH_ALM_FULL_8;
 +
 +      iowrite32(tx_mode, &hw->reg->TX_MODE);
 +
 +      tcpip = ioread32(&hw->reg->TCPIP_ACC);
 +      tcpip |= PCH_GBE_TX_TCPIPACC_EN;
 +      iowrite32(tcpip, &hw->reg->TCPIP_ACC);
 +      return;
 +}
 +
 +/**
 + * pch_gbe_configure_tx - Configure Transmit Unit after Reset
 + * @adapter:  Board private structure
 + */
 +static void pch_gbe_configure_tx(struct pch_gbe_adapter *adapter)
 +{
 +      struct pch_gbe_hw *hw = &adapter->hw;
 +      u32 tdba, tdlen, dctrl;
 +
 +      pr_debug("dma addr = 0x%08llx  size = 0x%08x\n",
 +               (unsigned long long)adapter->tx_ring->dma,
 +               adapter->tx_ring->size);
 +
 +      /* Setup the HW Tx Head and Tail descriptor pointers */
 +      tdba = adapter->tx_ring->dma;
 +      tdlen = adapter->tx_ring->size - 0x10;
 +      iowrite32(tdba, &hw->reg->TX_DSC_BASE);
 +      iowrite32(tdlen, &hw->reg->TX_DSC_SIZE);
 +      iowrite32(tdba, &hw->reg->TX_DSC_SW_P);
 +
 +      /* Enables Transmission DMA */
 +      dctrl = ioread32(&hw->reg->DMA_CTRL);
 +      dctrl |= PCH_GBE_TX_DMA_EN;
 +      iowrite32(dctrl, &hw->reg->DMA_CTRL);
 +}
 +
 +/**
 + * pch_gbe_setup_rctl - Configure the receive control registers
 + * @adapter:  Board private structure
 + */
 +static void pch_gbe_setup_rctl(struct pch_gbe_adapter *adapter)
 +{
 +      struct net_device *netdev = adapter->netdev;
 +      struct pch_gbe_hw *hw = &adapter->hw;
 +      u32 rx_mode, tcpip;
 +
 +      rx_mode = PCH_GBE_ADD_FIL_EN | PCH_GBE_MLT_FIL_EN |
 +      PCH_GBE_RH_ALM_EMP_4 | PCH_GBE_RH_ALM_FULL_4 | PCH_GBE_RH_RD_TRG_8;
 +
 +      iowrite32(rx_mode, &hw->reg->RX_MODE);
 +
 +      tcpip = ioread32(&hw->reg->TCPIP_ACC);
 +
-       /* Enables Receive DMA */
-       rxdma = ioread32(&hw->reg->DMA_CTRL);
-       rxdma |= PCH_GBE_RX_DMA_EN;
-       iowrite32(rxdma, &hw->reg->DMA_CTRL);
-       /* Enables Receive */
-       iowrite32(PCH_GBE_MRE_MAC_RX_EN, &hw->reg->MAC_RX_EN);
++      tcpip |= PCH_GBE_RX_TCPIPACC_OFF;
++      tcpip &= ~PCH_GBE_RX_TCPIPACC_EN;
 +      iowrite32(tcpip, &hw->reg->TCPIP_ACC);
 +      return;
 +}
 +
 +/**
 + * pch_gbe_configure_rx - Configure Receive Unit after Reset
 + * @adapter:  Board private structure
 + */
 +static void pch_gbe_configure_rx(struct pch_gbe_adapter *adapter)
 +{
 +      struct pch_gbe_hw *hw = &adapter->hw;
 +      u32 rdba, rdlen, rctl, rxdma;
 +
 +      pr_debug("dma adr = 0x%08llx  size = 0x%08x\n",
 +               (unsigned long long)adapter->rx_ring->dma,
 +               adapter->rx_ring->size);
 +
 +      pch_gbe_mac_force_mac_fc(hw);
 +
 +      /* Disables Receive MAC */
 +      rctl = ioread32(&hw->reg->MAC_RX_EN);
 +      iowrite32((rctl & ~PCH_GBE_MRE_MAC_RX_EN), &hw->reg->MAC_RX_EN);
 +
 +      /* Disables Receive DMA */
 +      rxdma = ioread32(&hw->reg->DMA_CTRL);
 +      rxdma &= ~PCH_GBE_RX_DMA_EN;
 +      iowrite32(rxdma, &hw->reg->DMA_CTRL);
 +
 +      pr_debug("MAC_RX_EN reg = 0x%08x  DMA_CTRL reg = 0x%08x\n",
 +               ioread32(&hw->reg->MAC_RX_EN),
 +               ioread32(&hw->reg->DMA_CTRL));
 +
 +      /* Setup the HW Rx Head and Tail Descriptor Pointers and
 +       * the Base and Length of the Rx Descriptor Ring */
 +      rdba = adapter->rx_ring->dma;
 +      rdlen = adapter->rx_ring->size - 0x10;
 +      iowrite32(rdba, &hw->reg->RX_DSC_BASE);
 +      iowrite32(rdlen, &hw->reg->RX_DSC_SIZE);
 +      iowrite32((rdba + rdlen), &hw->reg->RX_DSC_SW_P);
-               adapter->stats.intr_rx_fifo_err_count++;
 +}
 +
 +/**
 + * pch_gbe_unmap_and_free_tx_resource - Unmap and free tx socket buffer
 + * @adapter:     Board private structure
 + * @buffer_info: Buffer information structure
 + */
 +static void pch_gbe_unmap_and_free_tx_resource(
 +      struct pch_gbe_adapter *adapter, struct pch_gbe_buffer *buffer_info)
 +{
 +      if (buffer_info->mapped) {
 +              dma_unmap_single(&adapter->pdev->dev, buffer_info->dma,
 +                               buffer_info->length, DMA_TO_DEVICE);
 +              buffer_info->mapped = false;
 +      }
 +      if (buffer_info->skb) {
 +              dev_kfree_skb_any(buffer_info->skb);
 +              buffer_info->skb = NULL;
 +      }
 +}
 +
 +/**
 + * pch_gbe_unmap_and_free_rx_resource - Unmap and free rx socket buffer
 + * @adapter:      Board private structure
 + * @buffer_info:  Buffer information structure
 + */
 +static void pch_gbe_unmap_and_free_rx_resource(
 +                                      struct pch_gbe_adapter *adapter,
 +                                      struct pch_gbe_buffer *buffer_info)
 +{
 +      if (buffer_info->mapped) {
 +              dma_unmap_single(&adapter->pdev->dev, buffer_info->dma,
 +                               buffer_info->length, DMA_FROM_DEVICE);
 +              buffer_info->mapped = false;
 +      }
 +      if (buffer_info->skb) {
 +              dev_kfree_skb_any(buffer_info->skb);
 +              buffer_info->skb = NULL;
 +      }
 +}
 +
 +/**
 + * pch_gbe_clean_tx_ring - Free Tx Buffers
 + * @adapter:  Board private structure
 + * @tx_ring:  Ring to be cleaned
 + */
 +static void pch_gbe_clean_tx_ring(struct pch_gbe_adapter *adapter,
 +                                 struct pch_gbe_tx_ring *tx_ring)
 +{
 +      struct pch_gbe_hw *hw = &adapter->hw;
 +      struct pch_gbe_buffer *buffer_info;
 +      unsigned long size;
 +      unsigned int i;
 +
 +      /* Free all the Tx ring sk_buffs */
 +      for (i = 0; i < tx_ring->count; i++) {
 +              buffer_info = &tx_ring->buffer_info[i];
 +              pch_gbe_unmap_and_free_tx_resource(adapter, buffer_info);
 +      }
 +      pr_debug("call pch_gbe_unmap_and_free_tx_resource() %d count\n", i);
 +
 +      size = (unsigned long)sizeof(struct pch_gbe_buffer) * tx_ring->count;
 +      memset(tx_ring->buffer_info, 0, size);
 +
 +      /* Zero out the descriptor ring */
 +      memset(tx_ring->desc, 0, tx_ring->size);
 +      tx_ring->next_to_use = 0;
 +      tx_ring->next_to_clean = 0;
 +      iowrite32(tx_ring->dma, &hw->reg->TX_DSC_HW_P);
 +      iowrite32((tx_ring->size - 0x10), &hw->reg->TX_DSC_SIZE);
 +}
 +
 +/**
 + * pch_gbe_clean_rx_ring - Free Rx Buffers
 + * @adapter:  Board private structure
 + * @rx_ring:  Ring to free buffers from
 + */
 +static void
 +pch_gbe_clean_rx_ring(struct pch_gbe_adapter *adapter,
 +                    struct pch_gbe_rx_ring *rx_ring)
 +{
 +      struct pch_gbe_hw *hw = &adapter->hw;
 +      struct pch_gbe_buffer *buffer_info;
 +      unsigned long size;
 +      unsigned int i;
 +
 +      /* Free all the Rx ring sk_buffs */
 +      for (i = 0; i < rx_ring->count; i++) {
 +              buffer_info = &rx_ring->buffer_info[i];
 +              pch_gbe_unmap_and_free_rx_resource(adapter, buffer_info);
 +      }
 +      pr_debug("call pch_gbe_unmap_and_free_rx_resource() %d count\n", i);
 +      size = (unsigned long)sizeof(struct pch_gbe_buffer) * rx_ring->count;
 +      memset(rx_ring->buffer_info, 0, size);
 +
 +      /* Zero out the descriptor ring */
 +      memset(rx_ring->desc, 0, rx_ring->size);
 +      rx_ring->next_to_clean = 0;
 +      rx_ring->next_to_use = 0;
 +      iowrite32(rx_ring->dma, &hw->reg->RX_DSC_HW_P);
 +      iowrite32((rx_ring->size - 0x10), &hw->reg->RX_DSC_SIZE);
 +}
 +
 +static void pch_gbe_set_rgmii_ctrl(struct pch_gbe_adapter *adapter, u16 speed,
 +                                  u16 duplex)
 +{
 +      struct pch_gbe_hw *hw = &adapter->hw;
 +      unsigned long rgmii = 0;
 +
 +      /* Set the RGMII control. */
 +#ifdef PCH_GBE_MAC_IFOP_RGMII
 +      switch (speed) {
 +      case SPEED_10:
 +              rgmii = (PCH_GBE_RGMII_RATE_2_5M |
 +                       PCH_GBE_MAC_RGMII_CTRL_SETTING);
 +              break;
 +      case SPEED_100:
 +              rgmii = (PCH_GBE_RGMII_RATE_25M |
 +                       PCH_GBE_MAC_RGMII_CTRL_SETTING);
 +              break;
 +      case SPEED_1000:
 +              rgmii = (PCH_GBE_RGMII_RATE_125M |
 +                       PCH_GBE_MAC_RGMII_CTRL_SETTING);
 +              break;
 +      }
 +      iowrite32(rgmii, &hw->reg->RGMII_CTRL);
 +#else /* GMII */
 +      rgmii = 0;
 +      iowrite32(rgmii, &hw->reg->RGMII_CTRL);
 +#endif
 +}
 +static void pch_gbe_set_mode(struct pch_gbe_adapter *adapter, u16 speed,
 +                            u16 duplex)
 +{
 +      struct net_device *netdev = adapter->netdev;
 +      struct pch_gbe_hw *hw = &adapter->hw;
 +      unsigned long mode = 0;
 +
 +      /* Set the communication mode */
 +      switch (speed) {
 +      case SPEED_10:
 +              mode = PCH_GBE_MODE_MII_ETHER;
 +              netdev->tx_queue_len = 10;
 +              break;
 +      case SPEED_100:
 +              mode = PCH_GBE_MODE_MII_ETHER;
 +              netdev->tx_queue_len = 100;
 +              break;
 +      case SPEED_1000:
 +              mode = PCH_GBE_MODE_GMII_ETHER;
 +              break;
 +      }
 +      if (duplex == DUPLEX_FULL)
 +              mode |= PCH_GBE_MODE_FULL_DUPLEX;
 +      else
 +              mode |= PCH_GBE_MODE_HALF_DUPLEX;
 +      iowrite32(mode, &hw->reg->MODE);
 +}
 +
 +/**
 + * pch_gbe_watchdog - Watchdog process
 + * @data:  Board private structure
 + */
 +static void pch_gbe_watchdog(unsigned long data)
 +{
 +      struct pch_gbe_adapter *adapter = (struct pch_gbe_adapter *)data;
 +      struct net_device *netdev = adapter->netdev;
 +      struct pch_gbe_hw *hw = &adapter->hw;
 +
 +      pr_debug("right now = %ld\n", jiffies);
 +
 +      pch_gbe_update_stats(adapter);
 +      if ((mii_link_ok(&adapter->mii)) && (!netif_carrier_ok(netdev))) {
 +              struct ethtool_cmd cmd = { .cmd = ETHTOOL_GSET };
 +              netdev->tx_queue_len = adapter->tx_queue_len;
 +              /* mii library handles link maintenance tasks */
 +              if (mii_ethtool_gset(&adapter->mii, &cmd)) {
 +                      pr_err("ethtool get setting Error\n");
 +                      mod_timer(&adapter->watchdog_timer,
 +                                round_jiffies(jiffies +
 +                                              PCH_GBE_WATCHDOG_PERIOD));
 +                      return;
 +              }
 +              hw->mac.link_speed = ethtool_cmd_speed(&cmd);
 +              hw->mac.link_duplex = cmd.duplex;
 +              /* Set the RGMII control. */
 +              pch_gbe_set_rgmii_ctrl(adapter, hw->mac.link_speed,
 +                                              hw->mac.link_duplex);
 +              /* Set the communication mode */
 +              pch_gbe_set_mode(adapter, hw->mac.link_speed,
 +                               hw->mac.link_duplex);
 +              netdev_dbg(netdev,
 +                         "Link is Up %d Mbps %s-Duplex\n",
 +                         hw->mac.link_speed,
 +                         cmd.duplex == DUPLEX_FULL ? "Full" : "Half");
 +              netif_carrier_on(netdev);
 +              netif_wake_queue(netdev);
 +      } else if ((!mii_link_ok(&adapter->mii)) &&
 +                 (netif_carrier_ok(netdev))) {
 +              netdev_dbg(netdev, "NIC Link is Down\n");
 +              hw->mac.link_speed = SPEED_10;
 +              hw->mac.link_duplex = DUPLEX_HALF;
 +              netif_carrier_off(netdev);
 +              netif_stop_queue(netdev);
 +      }
 +      mod_timer(&adapter->watchdog_timer,
 +                round_jiffies(jiffies + PCH_GBE_WATCHDOG_PERIOD));
 +}
 +
 +/**
 + * pch_gbe_tx_queue - Carry out queuing of the transmission data
 + * @adapter:  Board private structure
 + * @tx_ring:  Tx descriptor ring structure
 + * @skb:      Sockt buffer structure
 + */
 +static void pch_gbe_tx_queue(struct pch_gbe_adapter *adapter,
 +                            struct pch_gbe_tx_ring *tx_ring,
 +                            struct sk_buff *skb)
 +{
 +      struct pch_gbe_hw *hw = &adapter->hw;
 +      struct pch_gbe_tx_desc *tx_desc;
 +      struct pch_gbe_buffer *buffer_info;
 +      struct sk_buff *tmp_skb;
 +      unsigned int frame_ctrl;
 +      unsigned int ring_num;
 +      unsigned long flags;
 +
 +      /*-- Set frame control --*/
 +      frame_ctrl = 0;
 +      if (unlikely(skb->len < PCH_GBE_SHORT_PKT))
 +              frame_ctrl |= PCH_GBE_TXD_CTRL_APAD;
 +      if (skb->ip_summed == CHECKSUM_NONE)
 +              frame_ctrl |= PCH_GBE_TXD_CTRL_TCPIP_ACC_OFF;
 +
 +      /* Performs checksum processing */
 +      /*
 +       * It is because the hardware accelerator does not support a checksum,
 +       * when the received data size is less than 64 bytes.
 +       */
 +      if (skb->len < PCH_GBE_SHORT_PKT && skb->ip_summed != CHECKSUM_NONE) {
 +              frame_ctrl |= PCH_GBE_TXD_CTRL_APAD |
 +                            PCH_GBE_TXD_CTRL_TCPIP_ACC_OFF;
 +              if (skb->protocol == htons(ETH_P_IP)) {
 +                      struct iphdr *iph = ip_hdr(skb);
 +                      unsigned int offset;
 +                      iph->check = 0;
 +                      iph->check = ip_fast_csum((u8 *) iph, iph->ihl);
 +                      offset = skb_transport_offset(skb);
 +                      if (iph->protocol == IPPROTO_TCP) {
 +                              skb->csum = 0;
 +                              tcp_hdr(skb)->check = 0;
 +                              skb->csum = skb_checksum(skb, offset,
 +                                                       skb->len - offset, 0);
 +                              tcp_hdr(skb)->check =
 +                                      csum_tcpudp_magic(iph->saddr,
 +                                                        iph->daddr,
 +                                                        skb->len - offset,
 +                                                        IPPROTO_TCP,
 +                                                        skb->csum);
 +                      } else if (iph->protocol == IPPROTO_UDP) {
 +                              skb->csum = 0;
 +                              udp_hdr(skb)->check = 0;
 +                              skb->csum =
 +                                      skb_checksum(skb, offset,
 +                                                   skb->len - offset, 0);
 +                              udp_hdr(skb)->check =
 +                                      csum_tcpudp_magic(iph->saddr,
 +                                                        iph->daddr,
 +                                                        skb->len - offset,
 +                                                        IPPROTO_UDP,
 +                                                        skb->csum);
 +                      }
 +              }
 +      }
 +      spin_lock_irqsave(&tx_ring->tx_lock, flags);
 +      ring_num = tx_ring->next_to_use;
 +      if (unlikely((ring_num + 1) == tx_ring->count))
 +              tx_ring->next_to_use = 0;
 +      else
 +              tx_ring->next_to_use = ring_num + 1;
 +
 +      spin_unlock_irqrestore(&tx_ring->tx_lock, flags);
 +      buffer_info = &tx_ring->buffer_info[ring_num];
 +      tmp_skb = buffer_info->skb;
 +
 +      /* [Header:14][payload] ---> [Header:14][paddong:2][payload]    */
 +      memcpy(tmp_skb->data, skb->data, ETH_HLEN);
 +      tmp_skb->data[ETH_HLEN] = 0x00;
 +      tmp_skb->data[ETH_HLEN + 1] = 0x00;
 +      tmp_skb->len = skb->len;
 +      memcpy(&tmp_skb->data[ETH_HLEN + 2], &skb->data[ETH_HLEN],
 +             (skb->len - ETH_HLEN));
 +      /*-- Set Buffer information --*/
 +      buffer_info->length = tmp_skb->len;
 +      buffer_info->dma = dma_map_single(&adapter->pdev->dev, tmp_skb->data,
 +                                        buffer_info->length,
 +                                        DMA_TO_DEVICE);
 +      if (dma_mapping_error(&adapter->pdev->dev, buffer_info->dma)) {
 +              pr_err("TX DMA map failed\n");
 +              buffer_info->dma = 0;
 +              buffer_info->time_stamp = 0;
 +              tx_ring->next_to_use = ring_num;
 +              return;
 +      }
 +      buffer_info->mapped = true;
 +      buffer_info->time_stamp = jiffies;
 +
 +      /*-- Set Tx descriptor --*/
 +      tx_desc = PCH_GBE_TX_DESC(*tx_ring, ring_num);
 +      tx_desc->buffer_addr = (buffer_info->dma);
 +      tx_desc->length = (tmp_skb->len);
 +      tx_desc->tx_words_eob = ((tmp_skb->len + 3));
 +      tx_desc->tx_frame_ctrl = (frame_ctrl);
 +      tx_desc->gbec_status = (DSC_INIT16);
 +
 +      if (unlikely(++ring_num == tx_ring->count))
 +              ring_num = 0;
 +
 +      /* Update software pointer of TX descriptor */
 +      iowrite32(tx_ring->dma +
 +                (int)sizeof(struct pch_gbe_tx_desc) * ring_num,
 +                &hw->reg->TX_DSC_SW_P);
 +      dev_kfree_skb_any(skb);
 +}
 +
 +/**
 + * pch_gbe_update_stats - Update the board statistics counters
 + * @adapter:  Board private structure
 + */
 +void pch_gbe_update_stats(struct pch_gbe_adapter *adapter)
 +{
 +      struct net_device *netdev = adapter->netdev;
 +      struct pci_dev *pdev = adapter->pdev;
 +      struct pch_gbe_hw_stats *stats = &adapter->stats;
 +      unsigned long flags;
 +
 +      /*
 +       * Prevent stats update while adapter is being reset, or if the pci
 +       * connection is down.
 +       */
 +      if ((pdev->error_state) && (pdev->error_state != pci_channel_io_normal))
 +              return;
 +
 +      spin_lock_irqsave(&adapter->stats_lock, flags);
 +
 +      /* Update device status "adapter->stats" */
 +      stats->rx_errors = stats->rx_crc_errors + stats->rx_frame_errors;
 +      stats->tx_errors = stats->tx_length_errors +
 +          stats->tx_aborted_errors +
 +          stats->tx_carrier_errors + stats->tx_timeout_count;
 +
 +      /* Update network device status "adapter->net_stats" */
 +      netdev->stats.rx_packets = stats->rx_packets;
 +      netdev->stats.rx_bytes = stats->rx_bytes;
 +      netdev->stats.rx_dropped = stats->rx_dropped;
 +      netdev->stats.tx_packets = stats->tx_packets;
 +      netdev->stats.tx_bytes = stats->tx_bytes;
 +      netdev->stats.tx_dropped = stats->tx_dropped;
 +      /* Fill out the OS statistics structure */
 +      netdev->stats.multicast = stats->multicast;
 +      netdev->stats.collisions = stats->collisions;
 +      /* Rx Errors */
 +      netdev->stats.rx_errors = stats->rx_errors;
 +      netdev->stats.rx_crc_errors = stats->rx_crc_errors;
 +      netdev->stats.rx_frame_errors = stats->rx_frame_errors;
 +      /* Tx Errors */
 +      netdev->stats.tx_errors = stats->tx_errors;
 +      netdev->stats.tx_aborted_errors = stats->tx_aborted_errors;
 +      netdev->stats.tx_carrier_errors = stats->tx_carrier_errors;
 +
 +      spin_unlock_irqrestore(&adapter->stats_lock, flags);
 +}
 +
++static void pch_gbe_stop_receive(struct pch_gbe_adapter *adapter)
++{
++      struct pch_gbe_hw *hw = &adapter->hw;
++      u32 rxdma;
++      u16 value;
++      int ret;
++
++      /* Disable Receive DMA */
++      rxdma = ioread32(&hw->reg->DMA_CTRL);
++      rxdma &= ~PCH_GBE_RX_DMA_EN;
++      iowrite32(rxdma, &hw->reg->DMA_CTRL);
++      /* Wait Rx DMA BUS is IDLE */
++      ret = pch_gbe_wait_clr_bit_irq(&hw->reg->RX_DMA_ST, PCH_GBE_IDLE_CHECK);
++      if (ret) {
++              /* Disable Bus master */
++              pci_read_config_word(adapter->pdev, PCI_COMMAND, &value);
++              value &= ~PCI_COMMAND_MASTER;
++              pci_write_config_word(adapter->pdev, PCI_COMMAND, value);
++              /* Stop Receive */
++              pch_gbe_mac_reset_rx(hw);
++              /* Enable Bus master */
++              value |= PCI_COMMAND_MASTER;
++              pci_write_config_word(adapter->pdev, PCI_COMMAND, value);
++      } else {
++              /* Stop Receive */
++              pch_gbe_mac_reset_rx(hw);
++      }
++}
++
++static void pch_gbe_start_receive(struct pch_gbe_hw *hw)
++{
++      u32 rxdma;
++
++      /* Enables Receive DMA */
++      rxdma = ioread32(&hw->reg->DMA_CTRL);
++      rxdma |= PCH_GBE_RX_DMA_EN;
++      iowrite32(rxdma, &hw->reg->DMA_CTRL);
++      /* Enables Receive */
++      iowrite32(PCH_GBE_MRE_MAC_RX_EN, &hw->reg->MAC_RX_EN);
++      return;
++}
++
 +/**
 + * pch_gbe_intr - Interrupt Handler
 + * @irq:   Interrupt number
 + * @data:  Pointer to a network interface device structure
 + * Returns
 + *    - IRQ_HANDLED:  Our interrupt
 + *    - IRQ_NONE:     Not our interrupt
 + */
 +static irqreturn_t pch_gbe_intr(int irq, void *data)
 +{
 +      struct net_device *netdev = data;
 +      struct pch_gbe_adapter *adapter = netdev_priv(netdev);
 +      struct pch_gbe_hw *hw = &adapter->hw;
 +      u32 int_st;
 +      u32 int_en;
 +
 +      /* Check request status */
 +      int_st = ioread32(&hw->reg->INT_ST);
 +      int_st = int_st & ioread32(&hw->reg->INT_EN);
 +      /* When request status is no interruption factor */
 +      if (unlikely(!int_st))
 +              return IRQ_NONE;        /* Not our interrupt. End processing. */
 +      pr_debug("%s occur int_st = 0x%08x\n", __func__, int_st);
 +      if (int_st & PCH_GBE_INT_RX_FRAME_ERR)
 +              adapter->stats.intr_rx_frame_err_count++;
 +      if (int_st & PCH_GBE_INT_RX_FIFO_ERR)
-               pr_err("Rx descriptor is empty\n");
++              if (!adapter->rx_stop_flag) {
++                      adapter->stats.intr_rx_fifo_err_count++;
++                      pr_debug("Rx fifo over run\n");
++                      adapter->rx_stop_flag = true;
++                      int_en = ioread32(&hw->reg->INT_EN);
++                      iowrite32((int_en & ~PCH_GBE_INT_RX_FIFO_ERR),
++                                &hw->reg->INT_EN);
++                      pch_gbe_stop_receive(adapter);
++              }
 +      if (int_st & PCH_GBE_INT_RX_DMA_ERR)
 +              adapter->stats.intr_rx_dma_err_count++;
 +      if (int_st & PCH_GBE_INT_TX_FIFO_ERR)
 +              adapter->stats.intr_tx_fifo_err_count++;
 +      if (int_st & PCH_GBE_INT_TX_DMA_ERR)
 +              adapter->stats.intr_tx_dma_err_count++;
 +      if (int_st & PCH_GBE_INT_TCPIP_ERR)
 +              adapter->stats.intr_tcpip_err_count++;
 +      /* When Rx descriptor is empty  */
 +      if ((int_st & PCH_GBE_INT_RX_DSC_EMP)) {
 +              adapter->stats.intr_rx_dsc_empty_count++;
-       bufsz = adapter->rx_buffer_len + PCH_GBE_DMA_ALIGN;
++              pr_debug("Rx descriptor is empty\n");
 +              int_en = ioread32(&hw->reg->INT_EN);
 +              iowrite32((int_en & ~PCH_GBE_INT_RX_DSC_EMP), &hw->reg->INT_EN);
 +              if (hw->mac.tx_fc_enable) {
 +                      /* Set Pause packet */
 +                      pch_gbe_mac_set_pause_packet(hw);
 +              }
 +              if ((int_en & (PCH_GBE_INT_RX_DMA_CMPLT | PCH_GBE_INT_TX_CMPLT))
 +                  == 0) {
 +                      return IRQ_HANDLED;
 +              }
 +      }
 +
 +      /* When request status is Receive interruption */
 +      if ((int_st & (PCH_GBE_INT_RX_DMA_CMPLT | PCH_GBE_INT_TX_CMPLT))) {
 +              if (likely(napi_schedule_prep(&adapter->napi))) {
 +                      /* Enable only Rx Descriptor empty */
 +                      atomic_inc(&adapter->irq_sem);
 +                      int_en = ioread32(&hw->reg->INT_EN);
 +                      int_en &=
 +                          ~(PCH_GBE_INT_RX_DMA_CMPLT | PCH_GBE_INT_TX_CMPLT);
 +                      iowrite32(int_en, &hw->reg->INT_EN);
 +                      /* Start polling for NAPI */
 +                      __napi_schedule(&adapter->napi);
 +              }
 +      }
 +      pr_debug("return = 0x%08x  INT_EN reg = 0x%08x\n",
 +               IRQ_HANDLED, ioread32(&hw->reg->INT_EN));
 +      return IRQ_HANDLED;
 +}
 +
 +/**
 + * pch_gbe_alloc_rx_buffers - Replace used receive buffers; legacy & extended
 + * @adapter:       Board private structure
 + * @rx_ring:       Rx descriptor ring
 + * @cleaned_count: Cleaned count
 + */
 +static void
 +pch_gbe_alloc_rx_buffers(struct pch_gbe_adapter *adapter,
 +                       struct pch_gbe_rx_ring *rx_ring, int cleaned_count)
 +{
 +      struct net_device *netdev = adapter->netdev;
 +      struct pci_dev *pdev = adapter->pdev;
 +      struct pch_gbe_hw *hw = &adapter->hw;
 +      struct pch_gbe_rx_desc *rx_desc;
 +      struct pch_gbe_buffer *buffer_info;
 +      struct sk_buff *skb;
 +      unsigned int i;
 +      unsigned int bufsz;
 +
-               skb = buffer_info->skb;
-               if (skb) {
-                       skb_trim(skb, 0);
-               } else {
-                       skb = netdev_alloc_skb(netdev, bufsz);
-                       if (unlikely(!skb)) {
-                               /* Better luck next round */
-                               adapter->stats.rx_alloc_buff_failed++;
-                               break;
-                       }
-                       /* 64byte align */
-                       skb_reserve(skb, PCH_GBE_DMA_ALIGN);
-                       buffer_info->skb = skb;
-                       buffer_info->length = adapter->rx_buffer_len;
++      bufsz = adapter->rx_buffer_len + NET_IP_ALIGN;
 +      i = rx_ring->next_to_use;
 +
 +      while ((cleaned_count--)) {
 +              buffer_info = &rx_ring->buffer_info[i];
-                                                 skb->data,
++              skb = netdev_alloc_skb(netdev, bufsz);
++              if (unlikely(!skb)) {
++                      /* Better luck next round */
++                      adapter->stats.rx_alloc_buff_failed++;
++                      break;
 +              }
++              /* align */
++              skb_reserve(skb, NET_IP_ALIGN);
++              buffer_info->skb = skb;
++
 +              buffer_info->dma = dma_map_single(&pdev->dev,
-       struct sk_buff *skb, *new_skb;
++                                                buffer_info->rx_buffer,
 +                                                buffer_info->length,
 +                                                DMA_FROM_DEVICE);
 +              if (dma_mapping_error(&adapter->pdev->dev, buffer_info->dma)) {
 +                      dev_kfree_skb(skb);
 +                      buffer_info->skb = NULL;
 +                      buffer_info->dma = 0;
 +                      adapter->stats.rx_alloc_buff_failed++;
 +                      break; /* while !buffer_info->skb */
 +              }
 +              buffer_info->mapped = true;
 +              rx_desc = PCH_GBE_RX_DESC(*rx_ring, i);
 +              rx_desc->buffer_addr = (buffer_info->dma);
 +              rx_desc->gbec_status = DSC_INIT16;
 +
 +              pr_debug("i = %d  buffer_info->dma = 0x08%llx  buffer_info->length = 0x%x\n",
 +                       i, (unsigned long long)buffer_info->dma,
 +                       buffer_info->length);
 +
 +              if (unlikely(++i == rx_ring->count))
 +                      i = 0;
 +      }
 +      if (likely(rx_ring->next_to_use != i)) {
 +              rx_ring->next_to_use = i;
 +              if (unlikely(i-- == 0))
 +                      i = (rx_ring->count - 1);
 +              iowrite32(rx_ring->dma +
 +                        (int)sizeof(struct pch_gbe_rx_desc) * i,
 +                        &hw->reg->RX_DSC_SW_P);
 +      }
 +      return;
 +}
 +
++static int
++pch_gbe_alloc_rx_buffers_pool(struct pch_gbe_adapter *adapter,
++                       struct pch_gbe_rx_ring *rx_ring, int cleaned_count)
++{
++      struct pci_dev *pdev = adapter->pdev;
++      struct pch_gbe_buffer *buffer_info;
++      unsigned int i;
++      unsigned int bufsz;
++      unsigned int size;
++
++      bufsz = adapter->rx_buffer_len;
++
++      size = rx_ring->count * bufsz + PCH_GBE_RESERVE_MEMORY;
++      rx_ring->rx_buff_pool = dma_alloc_coherent(&pdev->dev, size,
++                                              &rx_ring->rx_buff_pool_logic,
++                                              GFP_KERNEL);
++      if (!rx_ring->rx_buff_pool) {
++              pr_err("Unable to allocate memory for the receive poll buffer\n");
++              return -ENOMEM;
++      }
++      memset(rx_ring->rx_buff_pool, 0, size);
++      rx_ring->rx_buff_pool_size = size;
++      for (i = 0; i < rx_ring->count; i++) {
++              buffer_info = &rx_ring->buffer_info[i];
++              buffer_info->rx_buffer = rx_ring->rx_buff_pool + bufsz * i;
++              buffer_info->length = bufsz;
++      }
++      return 0;
++}
++
 +/**
 + * pch_gbe_alloc_tx_buffers - Allocate transmit buffers
 + * @adapter:   Board private structure
 + * @tx_ring:   Tx descriptor ring
 + */
 +static void pch_gbe_alloc_tx_buffers(struct pch_gbe_adapter *adapter,
 +                                      struct pch_gbe_tx_ring *tx_ring)
 +{
 +      struct pch_gbe_buffer *buffer_info;
 +      struct sk_buff *skb;
 +      unsigned int i;
 +      unsigned int bufsz;
 +      struct pch_gbe_tx_desc *tx_desc;
 +
 +      bufsz =
 +          adapter->hw.mac.max_frame_size + PCH_GBE_DMA_ALIGN + NET_IP_ALIGN;
 +
 +      for (i = 0; i < tx_ring->count; i++) {
 +              buffer_info = &tx_ring->buffer_info[i];
 +              skb = netdev_alloc_skb(adapter->netdev, bufsz);
 +              skb_reserve(skb, PCH_GBE_DMA_ALIGN);
 +              buffer_info->skb = skb;
 +              tx_desc = PCH_GBE_TX_DESC(*tx_ring, i);
 +              tx_desc->gbec_status = (DSC_INIT16);
 +      }
 +      return;
 +}
 +
 +/**
 + * pch_gbe_clean_tx - Reclaim resources after transmit completes
 + * @adapter:   Board private structure
 + * @tx_ring:   Tx descriptor ring
 + * Returns
 + *    true:  Cleaned the descriptor
 + *    false: Not cleaned the descriptor
 + */
 +static bool
 +pch_gbe_clean_tx(struct pch_gbe_adapter *adapter,
 +               struct pch_gbe_tx_ring *tx_ring)
 +{
 +      struct pch_gbe_tx_desc *tx_desc;
 +      struct pch_gbe_buffer *buffer_info;
 +      struct sk_buff *skb;
 +      unsigned int i;
 +      unsigned int cleaned_count = 0;
 +      bool cleaned = false;
 +
 +      pr_debug("next_to_clean : %d\n", tx_ring->next_to_clean);
 +
 +      i = tx_ring->next_to_clean;
 +      tx_desc = PCH_GBE_TX_DESC(*tx_ring, i);
 +      pr_debug("gbec_status:0x%04x  dma_status:0x%04x\n",
 +               tx_desc->gbec_status, tx_desc->dma_status);
 +
 +      while ((tx_desc->gbec_status & DSC_INIT16) == 0x0000) {
 +              pr_debug("gbec_status:0x%04x\n", tx_desc->gbec_status);
 +              cleaned = true;
 +              buffer_info = &tx_ring->buffer_info[i];
 +              skb = buffer_info->skb;
 +
 +              if ((tx_desc->gbec_status & PCH_GBE_TXD_GMAC_STAT_ABT)) {
 +                      adapter->stats.tx_aborted_errors++;
 +                      pr_err("Transfer Abort Error\n");
 +              } else if ((tx_desc->gbec_status & PCH_GBE_TXD_GMAC_STAT_CRSER)
 +                        ) {
 +                      adapter->stats.tx_carrier_errors++;
 +                      pr_err("Transfer Carrier Sense Error\n");
 +              } else if ((tx_desc->gbec_status & PCH_GBE_TXD_GMAC_STAT_EXCOL)
 +                        ) {
 +                      adapter->stats.tx_aborted_errors++;
 +                      pr_err("Transfer Collision Abort Error\n");
 +              } else if ((tx_desc->gbec_status &
 +                          (PCH_GBE_TXD_GMAC_STAT_SNGCOL |
 +                           PCH_GBE_TXD_GMAC_STAT_MLTCOL))) {
 +                      adapter->stats.collisions++;
 +                      adapter->stats.tx_packets++;
 +                      adapter->stats.tx_bytes += skb->len;
 +                      pr_debug("Transfer Collision\n");
 +              } else if ((tx_desc->gbec_status & PCH_GBE_TXD_GMAC_STAT_CMPLT)
 +                        ) {
 +                      adapter->stats.tx_packets++;
 +                      adapter->stats.tx_bytes += skb->len;
 +              }
 +              if (buffer_info->mapped) {
 +                      pr_debug("unmap buffer_info->dma : %d\n", i);
 +                      dma_unmap_single(&adapter->pdev->dev, buffer_info->dma,
 +                                       buffer_info->length, DMA_TO_DEVICE);
 +                      buffer_info->mapped = false;
 +              }
 +              if (buffer_info->skb) {
 +                      pr_debug("trim buffer_info->skb : %d\n", i);
 +                      skb_trim(buffer_info->skb, 0);
 +              }
 +              tx_desc->gbec_status = DSC_INIT16;
 +              if (unlikely(++i == tx_ring->count))
 +                      i = 0;
 +              tx_desc = PCH_GBE_TX_DESC(*tx_ring, i);
 +
 +              /* weight of a sort for tx, to avoid endless transmit cleanup */
 +              if (cleaned_count++ == PCH_GBE_TX_WEIGHT)
 +                      break;
 +      }
 +      pr_debug("called pch_gbe_unmap_and_free_tx_resource() %d count\n",
 +               cleaned_count);
 +      /* Recover from running out of Tx resources in xmit_frame */
 +      if (unlikely(cleaned && (netif_queue_stopped(adapter->netdev)))) {
 +              netif_wake_queue(adapter->netdev);
 +              adapter->stats.tx_restart_count++;
 +              pr_debug("Tx wake queue\n");
 +      }
 +      spin_lock(&adapter->tx_queue_lock);
 +      tx_ring->next_to_clean = i;
 +      spin_unlock(&adapter->tx_queue_lock);
 +      pr_debug("next_to_clean : %d\n", tx_ring->next_to_clean);
 +      return cleaned;
 +}
 +
 +/**
 + * pch_gbe_clean_rx - Send received data up the network stack; legacy
 + * @adapter:     Board private structure
 + * @rx_ring:     Rx descriptor ring
 + * @work_done:   Completed count
 + * @work_to_do:  Request count
 + * Returns
 + *    true:  Cleaned the descriptor
 + *    false: Not cleaned the descriptor
 + */
 +static bool
 +pch_gbe_clean_rx(struct pch_gbe_adapter *adapter,
 +               struct pch_gbe_rx_ring *rx_ring,
 +               int *work_done, int work_to_do)
 +{
 +      struct net_device *netdev = adapter->netdev;
 +      struct pci_dev *pdev = adapter->pdev;
 +      struct pch_gbe_buffer *buffer_info;
 +      struct pch_gbe_rx_desc *rx_desc;
 +      u32 length;
 +      unsigned int i;
 +      unsigned int cleaned_count = 0;
 +      bool cleaned = false;
-               /* Prefetch the packet */
-               prefetch(skb->data);
++      struct sk_buff *skb;
 +      u8 dma_status;
 +      u16 gbec_status;
 +      u32 tcp_ip_status;
 +
 +      i = rx_ring->next_to_clean;
 +
 +      while (*work_done < work_to_do) {
 +              /* Check Rx descriptor status */
 +              rx_desc = PCH_GBE_RX_DESC(*rx_ring, i);
 +              if (rx_desc->gbec_status == DSC_INIT16)
 +                      break;
 +              cleaned = true;
 +              cleaned_count++;
 +
 +              dma_status = rx_desc->dma_status;
 +              gbec_status = rx_desc->gbec_status;
 +              tcp_ip_status = rx_desc->tcp_ip_status;
 +              rx_desc->gbec_status = DSC_INIT16;
 +              buffer_info = &rx_ring->buffer_info[i];
 +              skb = buffer_info->skb;
++              buffer_info->skb = NULL;
 +
 +              /* unmap dma */
 +              dma_unmap_single(&pdev->dev, buffer_info->dma,
 +                                 buffer_info->length, DMA_FROM_DEVICE);
 +              buffer_info->mapped = false;
-                       /* length convert[-3] */
-                       length = (rx_desc->rx_words_eob) - 3;
-                       /* Decide the data conversion method */
-                       if (!(netdev->features & NETIF_F_RXCSUM)) {
-                               /* [Header:14][payload] */
-                               if (NET_IP_ALIGN) {
-                                       /* Because alignment differs,
-                                        * the new_skb is newly allocated,
-                                        * and data is copied to new_skb.*/
-                                       new_skb = netdev_alloc_skb(netdev,
-                                                        length + NET_IP_ALIGN);
-                                       if (!new_skb) {
-                                               /* dorrop error */
-                                               pr_err("New skb allocation "
-                                                       "Error\n");
-                                               goto dorrop;
-                                       }
-                                       skb_reserve(new_skb, NET_IP_ALIGN);
-                                       memcpy(new_skb->data, skb->data,
-                                              length);
-                                       skb = new_skb;
-                               } else {
-                                       /* DMA buffer is used as SKB as it is.*/
-                                       buffer_info->skb = NULL;
-                               }
-                       } else {
-                               /* [Header:14][padding:2][payload] */
-                               /* The length includes padding length */
-                               length = length - PCH_GBE_DMA_PADDING;
-                               if ((length < copybreak) ||
-                                   (NET_IP_ALIGN != PCH_GBE_DMA_PADDING)) {
-                                       /* Because alignment differs,
-                                        * the new_skb is newly allocated,
-                                        * and data is copied to new_skb.
-                                        * Padding data is deleted
-                                        * at the time of a copy.*/
-                                       new_skb = netdev_alloc_skb(netdev,
-                                                        length + NET_IP_ALIGN);
-                                       if (!new_skb) {
-                                               /* dorrop error */
-                                               pr_err("New skb allocation "
-                                                       "Error\n");
-                                               goto dorrop;
-                                       }
-                                       skb_reserve(new_skb, NET_IP_ALIGN);
-                                       memcpy(new_skb->data, skb->data,
-                                              ETH_HLEN);
-                                       memcpy(&new_skb->data[ETH_HLEN],
-                                              &skb->data[ETH_HLEN +
-                                              PCH_GBE_DMA_PADDING],
-                                              length - ETH_HLEN);
-                                       skb = new_skb;
-                               } else {
-                                       /* Padding data is deleted
-                                        * by moving header data.*/
-                                       memmove(&skb->data[PCH_GBE_DMA_PADDING],
-                                               &skb->data[0], ETH_HLEN);
-                                       skb_reserve(skb, NET_IP_ALIGN);
-                                       buffer_info->skb = NULL;
-                               }
-                       }
-                       /* The length includes FCS length */
-                       length = length - ETH_FCS_LEN;
 +
 +              pr_debug("RxDecNo = 0x%04x  Status[DMA:0x%02x GBE:0x%04x "
 +                       "TCP:0x%08x]  BufInf = 0x%p\n",
 +                       i, dma_status, gbec_status, tcp_ip_status,
 +                       buffer_info);
 +              /* Error check */
 +              if (unlikely(gbec_status & PCH_GBE_RXD_GMAC_STAT_NOTOCTAL)) {
 +                      adapter->stats.rx_frame_errors++;
 +                      pr_err("Receive Not Octal Error\n");
 +              } else if (unlikely(gbec_status &
 +                              PCH_GBE_RXD_GMAC_STAT_NBLERR)) {
 +                      adapter->stats.rx_frame_errors++;
 +                      pr_err("Receive Nibble Error\n");
 +              } else if (unlikely(gbec_status &
 +                              PCH_GBE_RXD_GMAC_STAT_CRCERR)) {
 +                      adapter->stats.rx_crc_errors++;
 +                      pr_err("Receive CRC Error\n");
 +              } else {
 +                      /* get receive length */
- dorrop:
++                      /* length convert[-3], length includes FCS length */
++                      length = (rx_desc->rx_words_eob) - 3 - ETH_FCS_LEN;
++                      if (rx_desc->rx_words_eob & 0x02)
++                              length = length - 4;
++                      /*
++                       * buffer_info->rx_buffer: [Header:14][payload]
++                       * skb->data: [Reserve:2][Header:14][payload]
++                       */
++                      memcpy(skb->data, buffer_info->rx_buffer, length);
++
 +                      /* update status of driver */
 +                      adapter->stats.rx_bytes += length;
 +                      adapter->stats.rx_packets++;
 +                      if ((gbec_status & PCH_GBE_RXD_GMAC_STAT_MARMLT))
 +                              adapter->stats.multicast++;
 +                      /* Write meta date of skb */
 +                      skb_put(skb, length);
 +                      skb->protocol = eth_type_trans(skb, netdev);
 +                      if (tcp_ip_status & PCH_GBE_RXD_ACC_STAT_TCPIPOK)
 +                              skb->ip_summed = CHECKSUM_NONE;
 +                      else
 +                              skb->ip_summed = CHECKSUM_UNNECESSARY;
 +
 +                      napi_gro_receive(&adapter->napi, skb);
 +                      (*work_done)++;
 +                      pr_debug("Receive skb->ip_summed: %d length: %d\n",
 +                               skb->ip_summed, length);
 +              }
-               adapter->rx_buffer_len = PCH_GBE_MAX_JUMBO_FRAME_SIZE;
-       netdev->mtu = new_mtu;
-       adapter->hw.mac.max_frame_size = max_frame;
 +              /* return some buffers to hardware, one at a time is too slow */
 +              if (unlikely(cleaned_count >= PCH_GBE_RX_BUFFER_WRITE)) {
 +                      pch_gbe_alloc_rx_buffers(adapter, rx_ring,
 +                                               cleaned_count);
 +                      cleaned_count = 0;
 +              }
 +              if (++i == rx_ring->count)
 +                      i = 0;
 +      }
 +      rx_ring->next_to_clean = i;
 +      if (cleaned_count)
 +              pch_gbe_alloc_rx_buffers(adapter, rx_ring, cleaned_count);
 +      return cleaned;
 +}
 +
 +/**
 + * pch_gbe_setup_tx_resources - Allocate Tx resources (Descriptors)
 + * @adapter:  Board private structure
 + * @tx_ring:  Tx descriptor ring (for a specific queue) to setup
 + * Returns
 + *    0:              Successfully
 + *    Negative value: Failed
 + */
 +int pch_gbe_setup_tx_resources(struct pch_gbe_adapter *adapter,
 +                              struct pch_gbe_tx_ring *tx_ring)
 +{
 +      struct pci_dev *pdev = adapter->pdev;
 +      struct pch_gbe_tx_desc *tx_desc;
 +      int size;
 +      int desNo;
 +
 +      size = (int)sizeof(struct pch_gbe_buffer) * tx_ring->count;
 +      tx_ring->buffer_info = vzalloc(size);
 +      if (!tx_ring->buffer_info) {
 +              pr_err("Unable to allocate memory for the buffer information\n");
 +              return -ENOMEM;
 +      }
 +
 +      tx_ring->size = tx_ring->count * (int)sizeof(struct pch_gbe_tx_desc);
 +
 +      tx_ring->desc = dma_alloc_coherent(&pdev->dev, tx_ring->size,
 +                                         &tx_ring->dma, GFP_KERNEL);
 +      if (!tx_ring->desc) {
 +              vfree(tx_ring->buffer_info);
 +              pr_err("Unable to allocate memory for the transmit descriptor ring\n");
 +              return -ENOMEM;
 +      }
 +      memset(tx_ring->desc, 0, tx_ring->size);
 +
 +      tx_ring->next_to_use = 0;
 +      tx_ring->next_to_clean = 0;
 +      spin_lock_init(&tx_ring->tx_lock);
 +
 +      for (desNo = 0; desNo < tx_ring->count; desNo++) {
 +              tx_desc = PCH_GBE_TX_DESC(*tx_ring, desNo);
 +              tx_desc->gbec_status = DSC_INIT16;
 +      }
 +      pr_debug("tx_ring->desc = 0x%p  tx_ring->dma = 0x%08llx\n"
 +               "next_to_clean = 0x%08x  next_to_use = 0x%08x\n",
 +               tx_ring->desc, (unsigned long long)tx_ring->dma,
 +               tx_ring->next_to_clean, tx_ring->next_to_use);
 +      return 0;
 +}
 +
 +/**
 + * pch_gbe_setup_rx_resources - Allocate Rx resources (Descriptors)
 + * @adapter:  Board private structure
 + * @rx_ring:  Rx descriptor ring (for a specific queue) to setup
 + * Returns
 + *    0:              Successfully
 + *    Negative value: Failed
 + */
 +int pch_gbe_setup_rx_resources(struct pch_gbe_adapter *adapter,
 +                              struct pch_gbe_rx_ring *rx_ring)
 +{
 +      struct pci_dev *pdev = adapter->pdev;
 +      struct pch_gbe_rx_desc *rx_desc;
 +      int size;
 +      int desNo;
 +
 +      size = (int)sizeof(struct pch_gbe_buffer) * rx_ring->count;
 +      rx_ring->buffer_info = vzalloc(size);
 +      if (!rx_ring->buffer_info) {
 +              pr_err("Unable to allocate memory for the receive descriptor ring\n");
 +              return -ENOMEM;
 +      }
 +      rx_ring->size = rx_ring->count * (int)sizeof(struct pch_gbe_rx_desc);
 +      rx_ring->desc = dma_alloc_coherent(&pdev->dev, rx_ring->size,
 +                                         &rx_ring->dma, GFP_KERNEL);
 +
 +      if (!rx_ring->desc) {
 +              pr_err("Unable to allocate memory for the receive descriptor ring\n");
 +              vfree(rx_ring->buffer_info);
 +              return -ENOMEM;
 +      }
 +      memset(rx_ring->desc, 0, rx_ring->size);
 +      rx_ring->next_to_clean = 0;
 +      rx_ring->next_to_use = 0;
 +      for (desNo = 0; desNo < rx_ring->count; desNo++) {
 +              rx_desc = PCH_GBE_RX_DESC(*rx_ring, desNo);
 +              rx_desc->gbec_status = DSC_INIT16;
 +      }
 +      pr_debug("rx_ring->desc = 0x%p  rx_ring->dma = 0x%08llx "
 +               "next_to_clean = 0x%08x  next_to_use = 0x%08x\n",
 +               rx_ring->desc, (unsigned long long)rx_ring->dma,
 +               rx_ring->next_to_clean, rx_ring->next_to_use);
 +      return 0;
 +}
 +
 +/**
 + * pch_gbe_free_tx_resources - Free Tx Resources
 + * @adapter:  Board private structure
 + * @tx_ring:  Tx descriptor ring for a specific queue
 + */
 +void pch_gbe_free_tx_resources(struct pch_gbe_adapter *adapter,
 +                              struct pch_gbe_tx_ring *tx_ring)
 +{
 +      struct pci_dev *pdev = adapter->pdev;
 +
 +      pch_gbe_clean_tx_ring(adapter, tx_ring);
 +      vfree(tx_ring->buffer_info);
 +      tx_ring->buffer_info = NULL;
 +      pci_free_consistent(pdev, tx_ring->size, tx_ring->desc, tx_ring->dma);
 +      tx_ring->desc = NULL;
 +}
 +
 +/**
 + * pch_gbe_free_rx_resources - Free Rx Resources
 + * @adapter:  Board private structure
 + * @rx_ring:  Ring to clean the resources from
 + */
 +void pch_gbe_free_rx_resources(struct pch_gbe_adapter *adapter,
 +                              struct pch_gbe_rx_ring *rx_ring)
 +{
 +      struct pci_dev *pdev = adapter->pdev;
 +
 +      pch_gbe_clean_rx_ring(adapter, rx_ring);
 +      vfree(rx_ring->buffer_info);
 +      rx_ring->buffer_info = NULL;
 +      pci_free_consistent(pdev, rx_ring->size, rx_ring->desc, rx_ring->dma);
 +      rx_ring->desc = NULL;
 +}
 +
 +/**
 + * pch_gbe_request_irq - Allocate an interrupt line
 + * @adapter:  Board private structure
 + * Returns
 + *    0:              Successfully
 + *    Negative value: Failed
 + */
 +static int pch_gbe_request_irq(struct pch_gbe_adapter *adapter)
 +{
 +      struct net_device *netdev = adapter->netdev;
 +      int err;
 +      int flags;
 +
 +      flags = IRQF_SHARED;
 +      adapter->have_msi = false;
 +      err = pci_enable_msi(adapter->pdev);
 +      pr_debug("call pci_enable_msi\n");
 +      if (err) {
 +              pr_debug("call pci_enable_msi - Error: %d\n", err);
 +      } else {
 +              flags = 0;
 +              adapter->have_msi = true;
 +      }
 +      err = request_irq(adapter->pdev->irq, &pch_gbe_intr,
 +                        flags, netdev->name, netdev);
 +      if (err)
 +              pr_err("Unable to allocate interrupt Error: %d\n", err);
 +      pr_debug("adapter->have_msi : %d  flags : 0x%04x  return : 0x%04x\n",
 +               adapter->have_msi, flags, err);
 +      return err;
 +}
 +
 +
 +static void pch_gbe_set_multi(struct net_device *netdev);
 +/**
 + * pch_gbe_up - Up GbE network device
 + * @adapter:  Board private structure
 + * Returns
 + *    0:              Successfully
 + *    Negative value: Failed
 + */
 +int pch_gbe_up(struct pch_gbe_adapter *adapter)
 +{
 +      struct net_device *netdev = adapter->netdev;
 +      struct pch_gbe_tx_ring *tx_ring = adapter->tx_ring;
 +      struct pch_gbe_rx_ring *rx_ring = adapter->rx_ring;
 +      int err;
 +
 +      /* hardware has been reset, we need to reload some things */
 +      pch_gbe_set_multi(netdev);
 +
 +      pch_gbe_setup_tctl(adapter);
 +      pch_gbe_configure_tx(adapter);
 +      pch_gbe_setup_rctl(adapter);
 +      pch_gbe_configure_rx(adapter);
 +
 +      err = pch_gbe_request_irq(adapter);
 +      if (err) {
 +              pr_err("Error: can't bring device up\n");
 +              return err;
 +      }
++      err = pch_gbe_alloc_rx_buffers_pool(adapter, rx_ring, rx_ring->count);
++      if (err) {
++              pr_err("Error: can't bring device up\n");
++              return err;
++      }
 +      pch_gbe_alloc_tx_buffers(adapter, tx_ring);
 +      pch_gbe_alloc_rx_buffers(adapter, rx_ring, rx_ring->count);
 +      adapter->tx_queue_len = netdev->tx_queue_len;
++      pch_gbe_start_receive(&adapter->hw);
 +
 +      mod_timer(&adapter->watchdog_timer, jiffies);
 +
 +      napi_enable(&adapter->napi);
 +      pch_gbe_irq_enable(adapter);
 +      netif_start_queue(adapter->netdev);
 +
 +      return 0;
 +}
 +
 +/**
 + * pch_gbe_down - Down GbE network device
 + * @adapter:  Board private structure
 + */
 +void pch_gbe_down(struct pch_gbe_adapter *adapter)
 +{
 +      struct net_device *netdev = adapter->netdev;
++      struct pch_gbe_rx_ring *rx_ring = adapter->rx_ring;
 +
 +      /* signal that we're down so the interrupt handler does not
 +       * reschedule our watchdog timer */
 +      napi_disable(&adapter->napi);
 +      atomic_set(&adapter->irq_sem, 0);
 +
 +      pch_gbe_irq_disable(adapter);
 +      pch_gbe_free_irq(adapter);
 +
 +      del_timer_sync(&adapter->watchdog_timer);
 +
 +      netdev->tx_queue_len = adapter->tx_queue_len;
 +      netif_carrier_off(netdev);
 +      netif_stop_queue(netdev);
 +
 +      pch_gbe_reset(adapter);
 +      pch_gbe_clean_tx_ring(adapter, adapter->tx_ring);
 +      pch_gbe_clean_rx_ring(adapter, adapter->rx_ring);
++
++      pci_free_consistent(adapter->pdev, rx_ring->rx_buff_pool_size,
++                          rx_ring->rx_buff_pool, rx_ring->rx_buff_pool_logic);
++      rx_ring->rx_buff_pool_logic = 0;
++      rx_ring->rx_buff_pool_size = 0;
++      rx_ring->rx_buff_pool = NULL;
 +}
 +
 +/**
 + * pch_gbe_sw_init - Initialize general software structures (struct pch_gbe_adapter)
 + * @adapter:  Board private structure to initialize
 + * Returns
 + *    0:              Successfully
 + *    Negative value: Failed
 + */
 +static int pch_gbe_sw_init(struct pch_gbe_adapter *adapter)
 +{
 +      struct pch_gbe_hw *hw = &adapter->hw;
 +      struct net_device *netdev = adapter->netdev;
 +
 +      adapter->rx_buffer_len = PCH_GBE_FRAME_SIZE_2048;
 +      hw->mac.max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
 +      hw->mac.min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
 +
 +      /* Initialize the hardware-specific values */
 +      if (pch_gbe_hal_setup_init_funcs(hw)) {
 +              pr_err("Hardware Initialization Failure\n");
 +              return -EIO;
 +      }
 +      if (pch_gbe_alloc_queues(adapter)) {
 +              pr_err("Unable to allocate memory for queues\n");
 +              return -ENOMEM;
 +      }
 +      spin_lock_init(&adapter->hw.miim_lock);
 +      spin_lock_init(&adapter->tx_queue_lock);
 +      spin_lock_init(&adapter->stats_lock);
 +      spin_lock_init(&adapter->ethtool_lock);
 +      atomic_set(&adapter->irq_sem, 0);
 +      pch_gbe_irq_disable(adapter);
 +
 +      pch_gbe_init_stats(adapter);
 +
 +      pr_debug("rx_buffer_len : %d  mac.min_frame_size : %d  mac.max_frame_size : %d\n",
 +               (u32) adapter->rx_buffer_len,
 +               hw->mac.min_frame_size, hw->mac.max_frame_size);
 +      return 0;
 +}
 +
 +/**
 + * pch_gbe_open - Called when a network interface is made active
 + * @netdev:   Network interface device structure
 + * Returns
 + *    0:              Successfully
 + *    Negative value: Failed
 + */
 +static int pch_gbe_open(struct net_device *netdev)
 +{
 +      struct pch_gbe_adapter *adapter = netdev_priv(netdev);
 +      struct pch_gbe_hw *hw = &adapter->hw;
 +      int err;
 +
 +      /* allocate transmit descriptors */
 +      err = pch_gbe_setup_tx_resources(adapter, adapter->tx_ring);
 +      if (err)
 +              goto err_setup_tx;
 +      /* allocate receive descriptors */
 +      err = pch_gbe_setup_rx_resources(adapter, adapter->rx_ring);
 +      if (err)
 +              goto err_setup_rx;
 +      pch_gbe_hal_power_up_phy(hw);
 +      err = pch_gbe_up(adapter);
 +      if (err)
 +              goto err_up;
 +      pr_debug("Success End\n");
 +      return 0;
 +
 +err_up:
 +      if (!adapter->wake_up_evt)
 +              pch_gbe_hal_power_down_phy(hw);
 +      pch_gbe_free_rx_resources(adapter, adapter->rx_ring);
 +err_setup_rx:
 +      pch_gbe_free_tx_resources(adapter, adapter->tx_ring);
 +err_setup_tx:
 +      pch_gbe_reset(adapter);
 +      pr_err("Error End\n");
 +      return err;
 +}
 +
 +/**
 + * pch_gbe_stop - Disables a network interface
 + * @netdev:  Network interface device structure
 + * Returns
 + *    0: Successfully
 + */
 +static int pch_gbe_stop(struct net_device *netdev)
 +{
 +      struct pch_gbe_adapter *adapter = netdev_priv(netdev);
 +      struct pch_gbe_hw *hw = &adapter->hw;
 +
 +      pch_gbe_down(adapter);
 +      if (!adapter->wake_up_evt)
 +              pch_gbe_hal_power_down_phy(hw);
 +      pch_gbe_free_tx_resources(adapter, adapter->tx_ring);
 +      pch_gbe_free_rx_resources(adapter, adapter->rx_ring);
 +      return 0;
 +}
 +
 +/**
 + * pch_gbe_xmit_frame - Packet transmitting start
 + * @skb:     Socket buffer structure
 + * @netdev:  Network interface device structure
 + * Returns
 + *    - NETDEV_TX_OK:   Normal end
 + *    - NETDEV_TX_BUSY: Error end
 + */
 +static int pch_gbe_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
 +{
 +      struct pch_gbe_adapter *adapter = netdev_priv(netdev);
 +      struct pch_gbe_tx_ring *tx_ring = adapter->tx_ring;
 +      unsigned long flags;
 +
 +      if (unlikely(skb->len > (adapter->hw.mac.max_frame_size - 4))) {
 +              pr_err("Transfer length Error: skb len: %d > max: %d\n",
 +                     skb->len, adapter->hw.mac.max_frame_size);
 +              dev_kfree_skb_any(skb);
 +              adapter->stats.tx_length_errors++;
 +              return NETDEV_TX_OK;
 +      }
 +      if (!spin_trylock_irqsave(&tx_ring->tx_lock, flags)) {
 +              /* Collision - tell upper layer to requeue */
 +              return NETDEV_TX_LOCKED;
 +      }
 +      if (unlikely(!PCH_GBE_DESC_UNUSED(tx_ring))) {
 +              netif_stop_queue(netdev);
 +              spin_unlock_irqrestore(&tx_ring->tx_lock, flags);
 +              pr_debug("Return : BUSY  next_to use : 0x%08x  next_to clean : 0x%08x\n",
 +                       tx_ring->next_to_use, tx_ring->next_to_clean);
 +              return NETDEV_TX_BUSY;
 +      }
 +      spin_unlock_irqrestore(&tx_ring->tx_lock, flags);
 +
 +      /* CRC,ITAG no support */
 +      pch_gbe_tx_queue(adapter, tx_ring, skb);
 +      return NETDEV_TX_OK;
 +}
 +
 +/**
 + * pch_gbe_get_stats - Get System Network Statistics
 + * @netdev:  Network interface device structure
 + * Returns:  The current stats
 + */
 +static struct net_device_stats *pch_gbe_get_stats(struct net_device *netdev)
 +{
 +      /* only return the current stats */
 +      return &netdev->stats;
 +}
 +
 +/**
 + * pch_gbe_set_multi - Multicast and Promiscuous mode set
 + * @netdev:   Network interface device structure
 + */
 +static void pch_gbe_set_multi(struct net_device *netdev)
 +{
 +      struct pch_gbe_adapter *adapter = netdev_priv(netdev);
 +      struct pch_gbe_hw *hw = &adapter->hw;
 +      struct netdev_hw_addr *ha;
 +      u8 *mta_list;
 +      u32 rctl;
 +      int i;
 +      int mc_count;
 +
 +      pr_debug("netdev->flags : 0x%08x\n", netdev->flags);
 +
 +      /* Check for Promiscuous and All Multicast modes */
 +      rctl = ioread32(&hw->reg->RX_MODE);
 +      mc_count = netdev_mc_count(netdev);
 +      if ((netdev->flags & IFF_PROMISC)) {
 +              rctl &= ~PCH_GBE_ADD_FIL_EN;
 +              rctl &= ~PCH_GBE_MLT_FIL_EN;
 +      } else if ((netdev->flags & IFF_ALLMULTI)) {
 +              /* all the multicasting receive permissions */
 +              rctl |= PCH_GBE_ADD_FIL_EN;
 +              rctl &= ~PCH_GBE_MLT_FIL_EN;
 +      } else {
 +              if (mc_count >= PCH_GBE_MAR_ENTRIES) {
 +                      /* all the multicasting receive permissions */
 +                      rctl |= PCH_GBE_ADD_FIL_EN;
 +                      rctl &= ~PCH_GBE_MLT_FIL_EN;
 +              } else {
 +                      rctl |= (PCH_GBE_ADD_FIL_EN | PCH_GBE_MLT_FIL_EN);
 +              }
 +      }
 +      iowrite32(rctl, &hw->reg->RX_MODE);
 +
 +      if (mc_count >= PCH_GBE_MAR_ENTRIES)
 +              return;
 +      mta_list = kmalloc(mc_count * ETH_ALEN, GFP_ATOMIC);
 +      if (!mta_list)
 +              return;
 +
 +      /* The shared function expects a packed array of only addresses. */
 +      i = 0;
 +      netdev_for_each_mc_addr(ha, netdev) {
 +              if (i == mc_count)
 +                      break;
 +              memcpy(mta_list + (i++ * ETH_ALEN), &ha->addr, ETH_ALEN);
 +      }
 +      pch_gbe_mac_mc_addr_list_update(hw, mta_list, i, 1,
 +                                      PCH_GBE_MAR_ENTRIES);
 +      kfree(mta_list);
 +
 +      pr_debug("RX_MODE reg(check bit31,30 ADD,MLT) : 0x%08x  netdev->mc_count : 0x%08x\n",
 +               ioread32(&hw->reg->RX_MODE), mc_count);
 +}
 +
 +/**
 + * pch_gbe_set_mac - Change the Ethernet Address of the NIC
 + * @netdev: Network interface device structure
 + * @addr:   Pointer to an address structure
 + * Returns
 + *    0:              Successfully
 + *    -EADDRNOTAVAIL: Failed
 + */
 +static int pch_gbe_set_mac(struct net_device *netdev, void *addr)
 +{
 +      struct pch_gbe_adapter *adapter = netdev_priv(netdev);
 +      struct sockaddr *skaddr = addr;
 +      int ret_val;
 +
 +      if (!is_valid_ether_addr(skaddr->sa_data)) {
 +              ret_val = -EADDRNOTAVAIL;
 +      } else {
 +              memcpy(netdev->dev_addr, skaddr->sa_data, netdev->addr_len);
 +              memcpy(adapter->hw.mac.addr, skaddr->sa_data, netdev->addr_len);
 +              pch_gbe_mac_mar_set(&adapter->hw, adapter->hw.mac.addr, 0);
 +              ret_val = 0;
 +      }
 +      pr_debug("ret_val : 0x%08x\n", ret_val);
 +      pr_debug("dev_addr : %pM\n", netdev->dev_addr);
 +      pr_debug("mac_addr : %pM\n", adapter->hw.mac.addr);
 +      pr_debug("MAC_ADR1AB reg : 0x%08x 0x%08x\n",
 +               ioread32(&adapter->hw.reg->mac_adr[0].high),
 +               ioread32(&adapter->hw.reg->mac_adr[0].low));
 +      return ret_val;
 +}
 +
 +/**
 + * pch_gbe_change_mtu - Change the Maximum Transfer Unit
 + * @netdev:   Network interface device structure
 + * @new_mtu:  New value for maximum frame size
 + * Returns
 + *    0:              Successfully
 + *    -EINVAL:        Failed
 + */
 +static int pch_gbe_change_mtu(struct net_device *netdev, int new_mtu)
 +{
 +      struct pch_gbe_adapter *adapter = netdev_priv(netdev);
 +      int max_frame;
++      unsigned long old_rx_buffer_len = adapter->rx_buffer_len;
++      int err;
 +
 +      max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
 +      if ((max_frame < ETH_ZLEN + ETH_FCS_LEN) ||
 +              (max_frame > PCH_GBE_MAX_JUMBO_FRAME_SIZE)) {
 +              pr_err("Invalid MTU setting\n");
 +              return -EINVAL;
 +      }
 +      if (max_frame <= PCH_GBE_FRAME_SIZE_2048)
 +              adapter->rx_buffer_len = PCH_GBE_FRAME_SIZE_2048;
 +      else if (max_frame <= PCH_GBE_FRAME_SIZE_4096)
 +              adapter->rx_buffer_len = PCH_GBE_FRAME_SIZE_4096;
 +      else if (max_frame <= PCH_GBE_FRAME_SIZE_8192)
 +              adapter->rx_buffer_len = PCH_GBE_FRAME_SIZE_8192;
 +      else
-       if (netif_running(netdev))
-               pch_gbe_reinit_locked(adapter);
-       else
++              adapter->rx_buffer_len = PCH_GBE_MAX_RX_BUFFER_SIZE;
 +
-               cleaned = pch_gbe_clean_tx(adapter, adapter->tx_ring);
++      if (netif_running(netdev)) {
++              pch_gbe_down(adapter);
++              err = pch_gbe_up(adapter);
++              if (err) {
++                      adapter->rx_buffer_len = old_rx_buffer_len;
++                      pch_gbe_up(adapter);
++                      return -ENOMEM;
++              } else {
++                      netdev->mtu = new_mtu;
++                      adapter->hw.mac.max_frame_size = max_frame;
++              }
++      } else {
 +              pch_gbe_reset(adapter);
++              netdev->mtu = new_mtu;
++              adapter->hw.mac.max_frame_size = max_frame;
++      }
 +
 +      pr_debug("max_frame : %d  rx_buffer_len : %d  mtu : %d  max_frame_size : %d\n",
 +               max_frame, (u32) adapter->rx_buffer_len, netdev->mtu,
 +               adapter->hw.mac.max_frame_size);
 +      return 0;
 +}
 +
 +/**
 + * pch_gbe_set_features - Reset device after features changed
 + * @netdev:   Network interface device structure
 + * @features:  New features
 + * Returns
 + *    0:              HW state updated successfully
 + */
 +static int pch_gbe_set_features(struct net_device *netdev, u32 features)
 +{
 +      struct pch_gbe_adapter *adapter = netdev_priv(netdev);
 +      u32 changed = features ^ netdev->features;
 +
 +      if (!(changed & NETIF_F_RXCSUM))
 +              return 0;
 +
 +      if (netif_running(netdev))
 +              pch_gbe_reinit_locked(adapter);
 +      else
 +              pch_gbe_reset(adapter);
 +
 +      return 0;
 +}
 +
 +/**
 + * pch_gbe_ioctl - Controls register through a MII interface
 + * @netdev:   Network interface device structure
 + * @ifr:      Pointer to ifr structure
 + * @cmd:      Control command
 + * Returns
 + *    0:      Successfully
 + *    Negative value: Failed
 + */
 +static int pch_gbe_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
 +{
 +      struct pch_gbe_adapter *adapter = netdev_priv(netdev);
 +
 +      pr_debug("cmd : 0x%04x\n", cmd);
 +
 +      return generic_mii_ioctl(&adapter->mii, if_mii(ifr), cmd, NULL);
 +}
 +
 +/**
 + * pch_gbe_tx_timeout - Respond to a Tx Hang
 + * @netdev:   Network interface device structure
 + */
 +static void pch_gbe_tx_timeout(struct net_device *netdev)
 +{
 +      struct pch_gbe_adapter *adapter = netdev_priv(netdev);
 +
 +      /* Do the reset outside of interrupt context */
 +      adapter->stats.tx_timeout_count++;
 +      schedule_work(&adapter->reset_task);
 +}
 +
 +/**
 + * pch_gbe_napi_poll - NAPI receive and transfer polling callback
 + * @napi:    Pointer of polling device struct
 + * @budget:  The maximum number of a packet
 + * Returns
 + *    false:  Exit the polling mode
 + *    true:   Continue the polling mode
 + */
 +static int pch_gbe_napi_poll(struct napi_struct *napi, int budget)
 +{
 +      struct pch_gbe_adapter *adapter =
 +          container_of(napi, struct pch_gbe_adapter, napi);
 +      struct net_device *netdev = adapter->netdev;
 +      int work_done = 0;
 +      bool poll_end_flag = false;
 +      bool cleaned = false;
++      u32 int_en;
 +
 +      pr_debug("budget : %d\n", budget);
 +
 +      /* Keep link state information with original netdev */
 +      if (!netif_carrier_ok(netdev)) {
 +              poll_end_flag = true;
 +      } else {
 +              pch_gbe_clean_rx(adapter, adapter->rx_ring, &work_done, budget);
++              if (adapter->rx_stop_flag) {
++                      adapter->rx_stop_flag = false;
++                      pch_gbe_start_receive(&adapter->hw);
++                      int_en = ioread32(&adapter->hw.reg->INT_EN);
++                      iowrite32((int_en | PCH_GBE_INT_RX_FIFO_ERR),
++                                      &adapter->hw.reg->INT_EN);
++              }
++              cleaned = pch_gbe_clean_tx(adapter, adapter->tx_ring);
 +
 +              if (cleaned)
 +                      work_done = budget;
 +              /* If no Tx and not enough Rx work done,
 +               * exit the polling mode
 +               */
 +              if ((work_done < budget) || !netif_running(netdev))
 +                      poll_end_flag = true;
 +      }
 +
 +      if (poll_end_flag) {
 +              napi_complete(napi);
 +              pch_gbe_irq_enable(adapter);
 +      }
 +
 +      pr_debug("poll_end_flag : %d  work_done : %d  budget : %d\n",
 +               poll_end_flag, work_done, budget);
 +
 +      return work_done;
 +}
 +
 +#ifdef CONFIG_NET_POLL_CONTROLLER
 +/**
 + * pch_gbe_netpoll - Used by things like netconsole to send skbs
 + * @netdev:  Network interface device structure
 + */
 +static void pch_gbe_netpoll(struct net_device *netdev)
 +{
 +      struct pch_gbe_adapter *adapter = netdev_priv(netdev);
 +
 +      disable_irq(adapter->pdev->irq);
 +      pch_gbe_intr(adapter->pdev->irq, netdev);
 +      enable_irq(adapter->pdev->irq);
 +}
 +#endif
 +
 +static const struct net_device_ops pch_gbe_netdev_ops = {
 +      .ndo_open = pch_gbe_open,
 +      .ndo_stop = pch_gbe_stop,
 +      .ndo_start_xmit = pch_gbe_xmit_frame,
 +      .ndo_get_stats = pch_gbe_get_stats,
 +      .ndo_set_mac_address = pch_gbe_set_mac,
 +      .ndo_tx_timeout = pch_gbe_tx_timeout,
 +      .ndo_change_mtu = pch_gbe_change_mtu,
 +      .ndo_set_features = pch_gbe_set_features,
 +      .ndo_do_ioctl = pch_gbe_ioctl,
 +      .ndo_set_rx_mode = pch_gbe_set_multi,
 +#ifdef CONFIG_NET_POLL_CONTROLLER
 +      .ndo_poll_controller = pch_gbe_netpoll,
 +#endif
 +};
 +
 +static pci_ers_result_t pch_gbe_io_error_detected(struct pci_dev *pdev,
 +                                              pci_channel_state_t state)
 +{
 +      struct net_device *netdev = pci_get_drvdata(pdev);
 +      struct pch_gbe_adapter *adapter = netdev_priv(netdev);
 +
 +      netif_device_detach(netdev);
 +      if (netif_running(netdev))
 +              pch_gbe_down(adapter);
 +      pci_disable_device(pdev);
 +      /* Request a slot slot reset. */
 +      return PCI_ERS_RESULT_NEED_RESET;
 +}
 +
 +static pci_ers_result_t pch_gbe_io_slot_reset(struct pci_dev *pdev)
 +{
 +      struct net_device *netdev = pci_get_drvdata(pdev);
 +      struct pch_gbe_adapter *adapter = netdev_priv(netdev);
 +      struct pch_gbe_hw *hw = &adapter->hw;
 +
 +      if (pci_enable_device(pdev)) {
 +              pr_err("Cannot re-enable PCI device after reset\n");
 +              return PCI_ERS_RESULT_DISCONNECT;
 +      }
 +      pci_set_master(pdev);
 +      pci_enable_wake(pdev, PCI_D0, 0);
 +      pch_gbe_hal_power_up_phy(hw);
 +      pch_gbe_reset(adapter);
 +      /* Clear wake up status */
 +      pch_gbe_mac_set_wol_event(hw, 0);
 +
 +      return PCI_ERS_RESULT_RECOVERED;
 +}
 +
 +static void pch_gbe_io_resume(struct pci_dev *pdev)
 +{
 +      struct net_device *netdev = pci_get_drvdata(pdev);
 +      struct pch_gbe_adapter *adapter = netdev_priv(netdev);
 +
 +      if (netif_running(netdev)) {
 +              if (pch_gbe_up(adapter)) {
 +                      pr_debug("can't bring device back up after reset\n");
 +                      return;
 +              }
 +      }
 +      netif_device_attach(netdev);
 +}
 +
 +static int __pch_gbe_suspend(struct pci_dev *pdev)
 +{
 +      struct net_device *netdev = pci_get_drvdata(pdev);
 +      struct pch_gbe_adapter *adapter = netdev_priv(netdev);
 +      struct pch_gbe_hw *hw = &adapter->hw;
 +      u32 wufc = adapter->wake_up_evt;
 +      int retval = 0;
 +
 +      netif_device_detach(netdev);
 +      if (netif_running(netdev))
 +              pch_gbe_down(adapter);
 +      if (wufc) {
 +              pch_gbe_set_multi(netdev);
 +              pch_gbe_setup_rctl(adapter);
 +              pch_gbe_configure_rx(adapter);
 +              pch_gbe_set_rgmii_ctrl(adapter, hw->mac.link_speed,
 +                                      hw->mac.link_duplex);
 +              pch_gbe_set_mode(adapter, hw->mac.link_speed,
 +                                      hw->mac.link_duplex);
 +              pch_gbe_mac_set_wol_event(hw, wufc);
 +              pci_disable_device(pdev);
 +      } else {
 +              pch_gbe_hal_power_down_phy(hw);
 +              pch_gbe_mac_set_wol_event(hw, wufc);
 +              pci_disable_device(pdev);
 +      }
 +      return retval;
 +}
 +
 +#ifdef CONFIG_PM
 +static int pch_gbe_suspend(struct device *device)
 +{
 +      struct pci_dev *pdev = to_pci_dev(device);
 +
 +      return __pch_gbe_suspend(pdev);
 +}
 +
 +static int pch_gbe_resume(struct device *device)
 +{
 +      struct pci_dev *pdev = to_pci_dev(device);
 +      struct net_device *netdev = pci_get_drvdata(pdev);
 +      struct pch_gbe_adapter *adapter = netdev_priv(netdev);
 +      struct pch_gbe_hw *hw = &adapter->hw;
 +      u32 err;
 +
 +      err = pci_enable_device(pdev);
 +      if (err) {
 +              pr_err("Cannot enable PCI device from suspend\n");
 +              return err;
 +      }
 +      pci_set_master(pdev);
 +      pch_gbe_hal_power_up_phy(hw);
 +      pch_gbe_reset(adapter);
 +      /* Clear wake on lan control and status */
 +      pch_gbe_mac_set_wol_event(hw, 0);
 +
 +      if (netif_running(netdev))
 +              pch_gbe_up(adapter);
 +      netif_device_attach(netdev);
 +
 +      return 0;
 +}
 +#endif /* CONFIG_PM */
 +
 +static void pch_gbe_shutdown(struct pci_dev *pdev)
 +{
 +      __pch_gbe_suspend(pdev);
 +      if (system_state == SYSTEM_POWER_OFF) {
 +              pci_wake_from_d3(pdev, true);
 +              pci_set_power_state(pdev, PCI_D3hot);
 +      }
 +}
 +
 +static void pch_gbe_remove(struct pci_dev *pdev)
 +{
 +      struct net_device *netdev = pci_get_drvdata(pdev);
 +      struct pch_gbe_adapter *adapter = netdev_priv(netdev);
 +
 +      cancel_work_sync(&adapter->reset_task);
 +      unregister_netdev(netdev);
 +
 +      pch_gbe_hal_phy_hw_reset(&adapter->hw);
 +
 +      kfree(adapter->tx_ring);
 +      kfree(adapter->rx_ring);
 +
 +      iounmap(adapter->hw.reg);
 +      pci_release_regions(pdev);
 +      free_netdev(netdev);
 +      pci_disable_device(pdev);
 +}
 +
 +static int pch_gbe_probe(struct pci_dev *pdev,
 +                        const struct pci_device_id *pci_id)
 +{
 +      struct net_device *netdev;
 +      struct pch_gbe_adapter *adapter;
 +      int ret;
 +
 +      ret = pci_enable_device(pdev);
 +      if (ret)
 +              return ret;
 +
 +      if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64))
 +              || pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
 +              ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
 +              if (ret) {
 +                      ret = pci_set_consistent_dma_mask(pdev,
 +                                                        DMA_BIT_MASK(32));
 +                      if (ret) {
 +                              dev_err(&pdev->dev, "ERR: No usable DMA "
 +                                      "configuration, aborting\n");
 +                              goto err_disable_device;
 +                      }
 +              }
 +      }
 +
 +      ret = pci_request_regions(pdev, KBUILD_MODNAME);
 +      if (ret) {
 +              dev_err(&pdev->dev,
 +                      "ERR: Can't reserve PCI I/O and memory resources\n");
 +              goto err_disable_device;
 +      }
 +      pci_set_master(pdev);
 +
 +      netdev = alloc_etherdev((int)sizeof(struct pch_gbe_adapter));
 +      if (!netdev) {
 +              ret = -ENOMEM;
 +              dev_err(&pdev->dev,
 +                      "ERR: Can't allocate and set up an Ethernet device\n");
 +              goto err_release_pci;
 +      }
 +      SET_NETDEV_DEV(netdev, &pdev->dev);
 +
 +      pci_set_drvdata(pdev, netdev);
 +      adapter = netdev_priv(netdev);
 +      adapter->netdev = netdev;
 +      adapter->pdev = pdev;
 +      adapter->hw.back = adapter;
 +      adapter->hw.reg = pci_iomap(pdev, PCH_GBE_PCI_BAR, 0);
 +      if (!adapter->hw.reg) {
 +              ret = -EIO;
 +              dev_err(&pdev->dev, "Can't ioremap\n");
 +              goto err_free_netdev;
 +      }
 +
 +      netdev->netdev_ops = &pch_gbe_netdev_ops;
 +      netdev->watchdog_timeo = PCH_GBE_WATCHDOG_PERIOD;
 +      netif_napi_add(netdev, &adapter->napi,
 +                     pch_gbe_napi_poll, PCH_GBE_RX_WEIGHT);
 +      netdev->hw_features = NETIF_F_RXCSUM |
 +              NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
 +      netdev->features = netdev->hw_features;
 +      pch_gbe_set_ethtool_ops(netdev);
 +
 +      pch_gbe_mac_load_mac_addr(&adapter->hw);
 +      pch_gbe_mac_reset_hw(&adapter->hw);
 +
 +      /* setup the private structure */
 +      ret = pch_gbe_sw_init(adapter);
 +      if (ret)
 +              goto err_iounmap;
 +
 +      /* Initialize PHY */
 +      ret = pch_gbe_init_phy(adapter);
 +      if (ret) {
 +              dev_err(&pdev->dev, "PHY initialize error\n");
 +              goto err_free_adapter;
 +      }
 +      pch_gbe_hal_get_bus_info(&adapter->hw);
 +
 +      /* Read the MAC address. and store to the private data */
 +      ret = pch_gbe_hal_read_mac_addr(&adapter->hw);
 +      if (ret) {
 +              dev_err(&pdev->dev, "MAC address Read Error\n");
 +              goto err_free_adapter;
 +      }
 +
 +      memcpy(netdev->dev_addr, adapter->hw.mac.addr, netdev->addr_len);
 +      if (!is_valid_ether_addr(netdev->dev_addr)) {
 +              dev_err(&pdev->dev, "Invalid MAC Address\n");
 +              ret = -EIO;
 +              goto err_free_adapter;
 +      }
 +      setup_timer(&adapter->watchdog_timer, pch_gbe_watchdog,
 +                  (unsigned long)adapter);
 +
 +      INIT_WORK(&adapter->reset_task, pch_gbe_reset_task);
 +
 +      pch_gbe_check_options(adapter);
 +
 +      /* initialize the wol settings based on the eeprom settings */
 +      adapter->wake_up_evt = PCH_GBE_WL_INIT_SETTING;
 +      dev_info(&pdev->dev, "MAC address : %pM\n", netdev->dev_addr);
 +
 +      /* reset the hardware with the new settings */
 +      pch_gbe_reset(adapter);
 +
 +      ret = register_netdev(netdev);
 +      if (ret)
 +              goto err_free_adapter;
 +      /* tell the stack to leave us alone until pch_gbe_open() is called */
 +      netif_carrier_off(netdev);
 +      netif_stop_queue(netdev);
 +
 +      dev_dbg(&pdev->dev, "OKIsemi(R) PCH Network Connection\n");
 +
 +      device_set_wakeup_enable(&pdev->dev, 1);
 +      return 0;
 +
 +err_free_adapter:
 +      pch_gbe_hal_phy_hw_reset(&adapter->hw);
 +      kfree(adapter->tx_ring);
 +      kfree(adapter->rx_ring);
 +err_iounmap:
 +      iounmap(adapter->hw.reg);
 +err_free_netdev:
 +      free_netdev(netdev);
 +err_release_pci:
 +      pci_release_regions(pdev);
 +err_disable_device:
 +      pci_disable_device(pdev);
 +      return ret;
 +}
 +
 +static DEFINE_PCI_DEVICE_TABLE(pch_gbe_pcidev_id) = {
 +      {.vendor = PCI_VENDOR_ID_INTEL,
 +       .device = PCI_DEVICE_ID_INTEL_IOH1_GBE,
 +       .subvendor = PCI_ANY_ID,
 +       .subdevice = PCI_ANY_ID,
 +       .class = (PCI_CLASS_NETWORK_ETHERNET << 8),
 +       .class_mask = (0xFFFF00)
 +       },
 +      {.vendor = PCI_VENDOR_ID_ROHM,
 +       .device = PCI_DEVICE_ID_ROHM_ML7223_GBE,
 +       .subvendor = PCI_ANY_ID,
 +       .subdevice = PCI_ANY_ID,
 +       .class = (PCI_CLASS_NETWORK_ETHERNET << 8),
 +       .class_mask = (0xFFFF00)
 +       },
++      {.vendor = PCI_VENDOR_ID_ROHM,
++       .device = PCI_DEVICE_ID_ROHM_ML7831_GBE,
++       .subvendor = PCI_ANY_ID,
++       .subdevice = PCI_ANY_ID,
++       .class = (PCI_CLASS_NETWORK_ETHERNET << 8),
++       .class_mask = (0xFFFF00)
++       },
 +      /* required last entry */
 +      {0}
 +};
 +
 +#ifdef CONFIG_PM
 +static const struct dev_pm_ops pch_gbe_pm_ops = {
 +      .suspend = pch_gbe_suspend,
 +      .resume = pch_gbe_resume,
 +      .freeze = pch_gbe_suspend,
 +      .thaw = pch_gbe_resume,
 +      .poweroff = pch_gbe_suspend,
 +      .restore = pch_gbe_resume,
 +};
 +#endif
 +
 +static struct pci_error_handlers pch_gbe_err_handler = {
 +      .error_detected = pch_gbe_io_error_detected,
 +      .slot_reset = pch_gbe_io_slot_reset,
 +      .resume = pch_gbe_io_resume
 +};
 +
 +static struct pci_driver pch_gbe_driver = {
 +      .name = KBUILD_MODNAME,
 +      .id_table = pch_gbe_pcidev_id,
 +      .probe = pch_gbe_probe,
 +      .remove = pch_gbe_remove,
 +#ifdef CONFIG_PM
 +      .driver.pm = &pch_gbe_pm_ops,
 +#endif
 +      .shutdown = pch_gbe_shutdown,
 +      .err_handler = &pch_gbe_err_handler
 +};
 +
 +
 +static int __init pch_gbe_init_module(void)
 +{
 +      int ret;
 +
 +      ret = pci_register_driver(&pch_gbe_driver);
 +      if (copybreak != PCH_GBE_COPYBREAK_DEFAULT) {
 +              if (copybreak == 0) {
 +                      pr_info("copybreak disabled\n");
 +              } else {
 +                      pr_info("copybreak enabled for packets <= %u bytes\n",
 +                              copybreak);
 +              }
 +      }
 +      return ret;
 +}
 +
 +static void __exit pch_gbe_exit_module(void)
 +{
 +      pci_unregister_driver(&pch_gbe_driver);
 +}
 +
 +module_init(pch_gbe_init_module);
 +module_exit(pch_gbe_exit_module);
 +
 +MODULE_DESCRIPTION("EG20T PCH Gigabit ethernet Driver");
 +MODULE_AUTHOR("OKI SEMICONDUCTOR, <toshiharu-linux@dsn.okisemi.com>");
 +MODULE_LICENSE("GPL");
 +MODULE_VERSION(DRV_VERSION);
 +MODULE_DEVICE_TABLE(pci, pch_gbe_pcidev_id);
 +
 +module_param(copybreak, uint, 0644);
 +MODULE_PARM_DESC(copybreak,
 +      "Maximum size of packet that is copied to a new buffer on receive");
 +
 +/* pch_gbe_main.c */
index 835bbb5,0000000..6eb9f4e
mode 100644,000000..100644
--- /dev/null
@@@ -1,5824 -1,0 +1,5846 @@@
-       if (RTL_R8(PHYstatus) & TBI_Enable)
 +/*
 + * r8169.c: RealTek 8169/8168/8101 ethernet driver.
 + *
 + * Copyright (c) 2002 ShuChen <shuchen@realtek.com.tw>
 + * Copyright (c) 2003 - 2007 Francois Romieu <romieu@fr.zoreil.com>
 + * Copyright (c) a lot of people too. Please respect their work.
 + *
 + * See MAINTAINERS file for support contact information.
 + */
 +
 +#include <linux/module.h>
 +#include <linux/moduleparam.h>
 +#include <linux/pci.h>
 +#include <linux/netdevice.h>
 +#include <linux/etherdevice.h>
 +#include <linux/delay.h>
 +#include <linux/ethtool.h>
 +#include <linux/mii.h>
 +#include <linux/if_vlan.h>
 +#include <linux/crc32.h>
 +#include <linux/in.h>
 +#include <linux/ip.h>
 +#include <linux/tcp.h>
 +#include <linux/init.h>
 +#include <linux/interrupt.h>
 +#include <linux/dma-mapping.h>
 +#include <linux/pm_runtime.h>
 +#include <linux/firmware.h>
 +#include <linux/pci-aspm.h>
 +#include <linux/prefetch.h>
 +
 +#include <asm/system.h>
 +#include <asm/io.h>
 +#include <asm/irq.h>
 +
 +#define RTL8169_VERSION "2.3LK-NAPI"
 +#define MODULENAME "r8169"
 +#define PFX MODULENAME ": "
 +
 +#define FIRMWARE_8168D_1      "rtl_nic/rtl8168d-1.fw"
 +#define FIRMWARE_8168D_2      "rtl_nic/rtl8168d-2.fw"
 +#define FIRMWARE_8168E_1      "rtl_nic/rtl8168e-1.fw"
 +#define FIRMWARE_8168E_2      "rtl_nic/rtl8168e-2.fw"
 +#define FIRMWARE_8168E_3      "rtl_nic/rtl8168e-3.fw"
 +#define FIRMWARE_8105E_1      "rtl_nic/rtl8105e-1.fw"
 +
 +#ifdef RTL8169_DEBUG
 +#define assert(expr) \
 +      if (!(expr)) {                                  \
 +              printk( "Assertion failed! %s,%s,%s,line=%d\n", \
 +              #expr,__FILE__,__func__,__LINE__);              \
 +      }
 +#define dprintk(fmt, args...) \
 +      do { printk(KERN_DEBUG PFX fmt, ## args); } while (0)
 +#else
 +#define assert(expr) do {} while (0)
 +#define dprintk(fmt, args...) do {} while (0)
 +#endif /* RTL8169_DEBUG */
 +
 +#define R8169_MSG_DEFAULT \
 +      (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_IFUP | NETIF_MSG_IFDOWN)
 +
 +#define TX_BUFFS_AVAIL(tp) \
 +      (tp->dirty_tx + NUM_TX_DESC - tp->cur_tx - 1)
 +
 +/* Maximum number of multicast addresses to filter (vs. Rx-all-multicast).
 +   The RTL chips use a 64 element hash table based on the Ethernet CRC. */
 +static const int multicast_filter_limit = 32;
 +
 +/* MAC address length */
 +#define MAC_ADDR_LEN  6
 +
 +#define MAX_READ_REQUEST_SHIFT        12
 +#define TX_DMA_BURST  6       /* Maximum PCI burst, '6' is 1024 */
 +#define SafeMtu               0x1c20  /* ... actually life sucks beyond ~7k */
 +#define InterFrameGap 0x03    /* 3 means InterFrameGap = the shortest one */
 +
 +#define R8169_REGS_SIZE               256
 +#define R8169_NAPI_WEIGHT     64
 +#define NUM_TX_DESC   64      /* Number of Tx descriptor registers */
 +#define NUM_RX_DESC   256     /* Number of Rx descriptor registers */
 +#define RX_BUF_SIZE   1536    /* Rx Buffer size */
 +#define R8169_TX_RING_BYTES   (NUM_TX_DESC * sizeof(struct TxDesc))
 +#define R8169_RX_RING_BYTES   (NUM_RX_DESC * sizeof(struct RxDesc))
 +
 +#define RTL8169_TX_TIMEOUT    (6*HZ)
 +#define RTL8169_PHY_TIMEOUT   (10*HZ)
 +
 +#define RTL_EEPROM_SIG                cpu_to_le32(0x8129)
 +#define RTL_EEPROM_SIG_MASK   cpu_to_le32(0xffff)
 +#define RTL_EEPROM_SIG_ADDR   0x0000
 +
 +/* write/read MMIO register */
 +#define RTL_W8(reg, val8)     writeb ((val8), ioaddr + (reg))
 +#define RTL_W16(reg, val16)   writew ((val16), ioaddr + (reg))
 +#define RTL_W32(reg, val32)   writel ((val32), ioaddr + (reg))
 +#define RTL_R8(reg)           readb (ioaddr + (reg))
 +#define RTL_R16(reg)          readw (ioaddr + (reg))
 +#define RTL_R32(reg)          readl (ioaddr + (reg))
 +
 +enum mac_version {
 +      RTL_GIGA_MAC_VER_01 = 0,
 +      RTL_GIGA_MAC_VER_02,
 +      RTL_GIGA_MAC_VER_03,
 +      RTL_GIGA_MAC_VER_04,
 +      RTL_GIGA_MAC_VER_05,
 +      RTL_GIGA_MAC_VER_06,
 +      RTL_GIGA_MAC_VER_07,
 +      RTL_GIGA_MAC_VER_08,
 +      RTL_GIGA_MAC_VER_09,
 +      RTL_GIGA_MAC_VER_10,
 +      RTL_GIGA_MAC_VER_11,
 +      RTL_GIGA_MAC_VER_12,
 +      RTL_GIGA_MAC_VER_13,
 +      RTL_GIGA_MAC_VER_14,
 +      RTL_GIGA_MAC_VER_15,
 +      RTL_GIGA_MAC_VER_16,
 +      RTL_GIGA_MAC_VER_17,
 +      RTL_GIGA_MAC_VER_18,
 +      RTL_GIGA_MAC_VER_19,
 +      RTL_GIGA_MAC_VER_20,
 +      RTL_GIGA_MAC_VER_21,
 +      RTL_GIGA_MAC_VER_22,
 +      RTL_GIGA_MAC_VER_23,
 +      RTL_GIGA_MAC_VER_24,
 +      RTL_GIGA_MAC_VER_25,
 +      RTL_GIGA_MAC_VER_26,
 +      RTL_GIGA_MAC_VER_27,
 +      RTL_GIGA_MAC_VER_28,
 +      RTL_GIGA_MAC_VER_29,
 +      RTL_GIGA_MAC_VER_30,
 +      RTL_GIGA_MAC_VER_31,
 +      RTL_GIGA_MAC_VER_32,
 +      RTL_GIGA_MAC_VER_33,
 +      RTL_GIGA_MAC_VER_34,
 +      RTL_GIGA_MAC_NONE   = 0xff,
 +};
 +
 +enum rtl_tx_desc_version {
 +      RTL_TD_0        = 0,
 +      RTL_TD_1        = 1,
 +};
 +
 +#define _R(NAME,TD,FW) \
 +      { .name = NAME, .txd_version = TD, .fw_name = FW }
 +
 +static const struct {
 +      const char *name;
 +      enum rtl_tx_desc_version txd_version;
 +      const char *fw_name;
 +} rtl_chip_infos[] = {
 +      /* PCI devices. */
 +      [RTL_GIGA_MAC_VER_01] =
 +              _R("RTL8169",           RTL_TD_0, NULL),
 +      [RTL_GIGA_MAC_VER_02] =
 +              _R("RTL8169s",          RTL_TD_0, NULL),
 +      [RTL_GIGA_MAC_VER_03] =
 +              _R("RTL8110s",          RTL_TD_0, NULL),
 +      [RTL_GIGA_MAC_VER_04] =
 +              _R("RTL8169sb/8110sb",  RTL_TD_0, NULL),
 +      [RTL_GIGA_MAC_VER_05] =
 +              _R("RTL8169sc/8110sc",  RTL_TD_0, NULL),
 +      [RTL_GIGA_MAC_VER_06] =
 +              _R("RTL8169sc/8110sc",  RTL_TD_0, NULL),
 +      /* PCI-E devices. */
 +      [RTL_GIGA_MAC_VER_07] =
 +              _R("RTL8102e",          RTL_TD_1, NULL),
 +      [RTL_GIGA_MAC_VER_08] =
 +              _R("RTL8102e",          RTL_TD_1, NULL),
 +      [RTL_GIGA_MAC_VER_09] =
 +              _R("RTL8102e",          RTL_TD_1, NULL),
 +      [RTL_GIGA_MAC_VER_10] =
 +              _R("RTL8101e",          RTL_TD_0, NULL),
 +      [RTL_GIGA_MAC_VER_11] =
 +              _R("RTL8168b/8111b",    RTL_TD_0, NULL),
 +      [RTL_GIGA_MAC_VER_12] =
 +              _R("RTL8168b/8111b",    RTL_TD_0, NULL),
 +      [RTL_GIGA_MAC_VER_13] =
 +              _R("RTL8101e",          RTL_TD_0, NULL),
 +      [RTL_GIGA_MAC_VER_14] =
 +              _R("RTL8100e",          RTL_TD_0, NULL),
 +      [RTL_GIGA_MAC_VER_15] =
 +              _R("RTL8100e",          RTL_TD_0, NULL),
 +      [RTL_GIGA_MAC_VER_16] =
 +              _R("RTL8101e",          RTL_TD_0, NULL),
 +      [RTL_GIGA_MAC_VER_17] =
 +              _R("RTL8168b/8111b",    RTL_TD_0, NULL),
 +      [RTL_GIGA_MAC_VER_18] =
 +              _R("RTL8168cp/8111cp",  RTL_TD_1, NULL),
 +      [RTL_GIGA_MAC_VER_19] =
 +              _R("RTL8168c/8111c",    RTL_TD_1, NULL),
 +      [RTL_GIGA_MAC_VER_20] =
 +              _R("RTL8168c/8111c",    RTL_TD_1, NULL),
 +      [RTL_GIGA_MAC_VER_21] =
 +              _R("RTL8168c/8111c",    RTL_TD_1, NULL),
 +      [RTL_GIGA_MAC_VER_22] =
 +              _R("RTL8168c/8111c",    RTL_TD_1, NULL),
 +      [RTL_GIGA_MAC_VER_23] =
 +              _R("RTL8168cp/8111cp",  RTL_TD_1, NULL),
 +      [RTL_GIGA_MAC_VER_24] =
 +              _R("RTL8168cp/8111cp",  RTL_TD_1, NULL),
 +      [RTL_GIGA_MAC_VER_25] =
 +              _R("RTL8168d/8111d",    RTL_TD_1, FIRMWARE_8168D_1),
 +      [RTL_GIGA_MAC_VER_26] =
 +              _R("RTL8168d/8111d",    RTL_TD_1, FIRMWARE_8168D_2),
 +      [RTL_GIGA_MAC_VER_27] =
 +              _R("RTL8168dp/8111dp",  RTL_TD_1, NULL),
 +      [RTL_GIGA_MAC_VER_28] =
 +              _R("RTL8168dp/8111dp",  RTL_TD_1, NULL),
 +      [RTL_GIGA_MAC_VER_29] =
 +              _R("RTL8105e",          RTL_TD_1, FIRMWARE_8105E_1),
 +      [RTL_GIGA_MAC_VER_30] =
 +              _R("RTL8105e",          RTL_TD_1, FIRMWARE_8105E_1),
 +      [RTL_GIGA_MAC_VER_31] =
 +              _R("RTL8168dp/8111dp",  RTL_TD_1, NULL),
 +      [RTL_GIGA_MAC_VER_32] =
 +              _R("RTL8168e/8111e",    RTL_TD_1, FIRMWARE_8168E_1),
 +      [RTL_GIGA_MAC_VER_33] =
 +              _R("RTL8168e/8111e",    RTL_TD_1, FIRMWARE_8168E_2),
 +      [RTL_GIGA_MAC_VER_34] =
 +              _R("RTL8168evl/8111evl",RTL_TD_1, FIRMWARE_8168E_3)
 +};
 +#undef _R
 +
 +enum cfg_version {
 +      RTL_CFG_0 = 0x00,
 +      RTL_CFG_1,
 +      RTL_CFG_2
 +};
 +
 +static void rtl_hw_start_8169(struct net_device *);
 +static void rtl_hw_start_8168(struct net_device *);
 +static void rtl_hw_start_8101(struct net_device *);
 +
 +static DEFINE_PCI_DEVICE_TABLE(rtl8169_pci_tbl) = {
 +      { PCI_DEVICE(PCI_VENDOR_ID_REALTEK,     0x8129), 0, 0, RTL_CFG_0 },
 +      { PCI_DEVICE(PCI_VENDOR_ID_REALTEK,     0x8136), 0, 0, RTL_CFG_2 },
 +      { PCI_DEVICE(PCI_VENDOR_ID_REALTEK,     0x8167), 0, 0, RTL_CFG_0 },
 +      { PCI_DEVICE(PCI_VENDOR_ID_REALTEK,     0x8168), 0, 0, RTL_CFG_1 },
 +      { PCI_DEVICE(PCI_VENDOR_ID_REALTEK,     0x8169), 0, 0, RTL_CFG_0 },
 +      { PCI_DEVICE(PCI_VENDOR_ID_DLINK,       0x4300), 0, 0, RTL_CFG_0 },
 +      { PCI_DEVICE(PCI_VENDOR_ID_DLINK,       0x4302), 0, 0, RTL_CFG_0 },
 +      { PCI_DEVICE(PCI_VENDOR_ID_AT,          0xc107), 0, 0, RTL_CFG_0 },
 +      { PCI_DEVICE(0x16ec,                    0x0116), 0, 0, RTL_CFG_0 },
 +      { PCI_VENDOR_ID_LINKSYS,                0x1032,
 +              PCI_ANY_ID, 0x0024, 0, 0, RTL_CFG_0 },
 +      { 0x0001,                               0x8168,
 +              PCI_ANY_ID, 0x2410, 0, 0, RTL_CFG_2 },
 +      {0,},
 +};
 +
 +MODULE_DEVICE_TABLE(pci, rtl8169_pci_tbl);
 +
 +static int rx_buf_sz = 16383;
 +static int use_dac;
 +static struct {
 +      u32 msg_enable;
 +} debug = { -1 };
 +
 +enum rtl_registers {
 +      MAC0            = 0,    /* Ethernet hardware address. */
 +      MAC4            = 4,
 +      MAR0            = 8,    /* Multicast filter. */
 +      CounterAddrLow          = 0x10,
 +      CounterAddrHigh         = 0x14,
 +      TxDescStartAddrLow      = 0x20,
 +      TxDescStartAddrHigh     = 0x24,
 +      TxHDescStartAddrLow     = 0x28,
 +      TxHDescStartAddrHigh    = 0x2c,
 +      FLASH           = 0x30,
 +      ERSR            = 0x36,
 +      ChipCmd         = 0x37,
 +      TxPoll          = 0x38,
 +      IntrMask        = 0x3c,
 +      IntrStatus      = 0x3e,
 +
 +      TxConfig        = 0x40,
 +#define       TXCFG_AUTO_FIFO                 (1 << 7)        /* 8111e-vl */
 +#define       TXCFG_EMPTY                     (1 << 11)       /* 8111e-vl */
 +
 +      RxConfig        = 0x44,
 +#define       RX128_INT_EN                    (1 << 15)       /* 8111c and later */
 +#define       RX_MULTI_EN                     (1 << 14)       /* 8111c only */
 +#define       RXCFG_FIFO_SHIFT                13
 +                                      /* No threshold before first PCI xfer */
 +#define       RX_FIFO_THRESH                  (7 << RXCFG_FIFO_SHIFT)
 +#define       RXCFG_DMA_SHIFT                 8
 +                                      /* Unlimited maximum PCI burst. */
 +#define       RX_DMA_BURST                    (7 << RXCFG_DMA_SHIFT)
 +
 +      RxMissed        = 0x4c,
 +      Cfg9346         = 0x50,
 +      Config0         = 0x51,
 +      Config1         = 0x52,
 +      Config2         = 0x53,
 +      Config3         = 0x54,
 +      Config4         = 0x55,
 +      Config5         = 0x56,
 +      MultiIntr       = 0x5c,
 +      PHYAR           = 0x60,
 +      PHYstatus       = 0x6c,
 +      RxMaxSize       = 0xda,
 +      CPlusCmd        = 0xe0,
 +      IntrMitigate    = 0xe2,
 +      RxDescAddrLow   = 0xe4,
 +      RxDescAddrHigh  = 0xe8,
 +      EarlyTxThres    = 0xec, /* 8169. Unit of 32 bytes. */
 +
 +#define NoEarlyTx     0x3f    /* Max value : no early transmit. */
 +
 +      MaxTxPacketSize = 0xec, /* 8101/8168. Unit of 128 bytes. */
 +
 +#define TxPacketMax   (8064 >> 7)
 +
 +      FuncEvent       = 0xf0,
 +      FuncEventMask   = 0xf4,
 +      FuncPresetState = 0xf8,
 +      FuncForceEvent  = 0xfc,
 +};
 +
 +enum rtl8110_registers {
 +      TBICSR                  = 0x64,
 +      TBI_ANAR                = 0x68,
 +      TBI_LPAR                = 0x6a,
 +};
 +
 +enum rtl8168_8101_registers {
 +      CSIDR                   = 0x64,
 +      CSIAR                   = 0x68,
 +#define       CSIAR_FLAG                      0x80000000
 +#define       CSIAR_WRITE_CMD                 0x80000000
 +#define       CSIAR_BYTE_ENABLE               0x0f
 +#define       CSIAR_BYTE_ENABLE_SHIFT         12
 +#define       CSIAR_ADDR_MASK                 0x0fff
 +      PMCH                    = 0x6f,
 +      EPHYAR                  = 0x80,
 +#define       EPHYAR_FLAG                     0x80000000
 +#define       EPHYAR_WRITE_CMD                0x80000000
 +#define       EPHYAR_REG_MASK                 0x1f
 +#define       EPHYAR_REG_SHIFT                16
 +#define       EPHYAR_DATA_MASK                0xffff
 +      DLLPR                   = 0xd0,
 +#define       PFM_EN                          (1 << 6)
 +      DBG_REG                 = 0xd1,
 +#define       FIX_NAK_1                       (1 << 4)
 +#define       FIX_NAK_2                       (1 << 3)
 +      TWSI                    = 0xd2,
 +      MCU                     = 0xd3,
 +#define       NOW_IS_OOB                      (1 << 7)
 +#define       EN_NDP                          (1 << 3)
 +#define       EN_OOB_RESET                    (1 << 2)
 +      EFUSEAR                 = 0xdc,
 +#define       EFUSEAR_FLAG                    0x80000000
 +#define       EFUSEAR_WRITE_CMD               0x80000000
 +#define       EFUSEAR_READ_CMD                0x00000000
 +#define       EFUSEAR_REG_MASK                0x03ff
 +#define       EFUSEAR_REG_SHIFT               8
 +#define       EFUSEAR_DATA_MASK               0xff
 +};
 +
 +enum rtl8168_registers {
 +      LED_FREQ                = 0x1a,
 +      EEE_LED                 = 0x1b,
 +      ERIDR                   = 0x70,
 +      ERIAR                   = 0x74,
 +#define ERIAR_FLAG                    0x80000000
 +#define ERIAR_WRITE_CMD                       0x80000000
 +#define ERIAR_READ_CMD                        0x00000000
 +#define ERIAR_ADDR_BYTE_ALIGN         4
 +#define ERIAR_TYPE_SHIFT              16
 +#define ERIAR_EXGMAC                  (0x00 << ERIAR_TYPE_SHIFT)
 +#define ERIAR_MSIX                    (0x01 << ERIAR_TYPE_SHIFT)
 +#define ERIAR_ASF                     (0x02 << ERIAR_TYPE_SHIFT)
 +#define ERIAR_MASK_SHIFT              12
 +#define ERIAR_MASK_0001                       (0x1 << ERIAR_MASK_SHIFT)
 +#define ERIAR_MASK_0011                       (0x3 << ERIAR_MASK_SHIFT)
 +#define ERIAR_MASK_1111                       (0xf << ERIAR_MASK_SHIFT)
 +      EPHY_RXER_NUM           = 0x7c,
 +      OCPDR                   = 0xb0, /* OCP GPHY access */
 +#define OCPDR_WRITE_CMD                       0x80000000
 +#define OCPDR_READ_CMD                        0x00000000
 +#define OCPDR_REG_MASK                        0x7f
 +#define OCPDR_GPHY_REG_SHIFT          16
 +#define OCPDR_DATA_MASK                       0xffff
 +      OCPAR                   = 0xb4,
 +#define OCPAR_FLAG                    0x80000000
 +#define OCPAR_GPHY_WRITE_CMD          0x8000f060
 +#define OCPAR_GPHY_READ_CMD           0x0000f060
 +      RDSAR1                  = 0xd0, /* 8168c only. Undocumented on 8168dp */
 +      MISC                    = 0xf0, /* 8168e only. */
 +#define TXPLA_RST                     (1 << 29)
 +#define PWM_EN                                (1 << 22)
 +};
 +
 +enum rtl_register_content {
 +      /* InterruptStatusBits */
 +      SYSErr          = 0x8000,
 +      PCSTimeout      = 0x4000,
 +      SWInt           = 0x0100,
 +      TxDescUnavail   = 0x0080,
 +      RxFIFOOver      = 0x0040,
 +      LinkChg         = 0x0020,
 +      RxOverflow      = 0x0010,
 +      TxErr           = 0x0008,
 +      TxOK            = 0x0004,
 +      RxErr           = 0x0002,
 +      RxOK            = 0x0001,
 +
 +      /* RxStatusDesc */
++      RxBOVF  = (1 << 24),
 +      RxFOVF  = (1 << 23),
 +      RxRWT   = (1 << 22),
 +      RxRES   = (1 << 21),
 +      RxRUNT  = (1 << 20),
 +      RxCRC   = (1 << 19),
 +
 +      /* ChipCmdBits */
 +      StopReq         = 0x80,
 +      CmdReset        = 0x10,
 +      CmdRxEnb        = 0x08,
 +      CmdTxEnb        = 0x04,
 +      RxBufEmpty      = 0x01,
 +
 +      /* TXPoll register p.5 */
 +      HPQ             = 0x80,         /* Poll cmd on the high prio queue */
 +      NPQ             = 0x40,         /* Poll cmd on the low prio queue */
 +      FSWInt          = 0x01,         /* Forced software interrupt */
 +
 +      /* Cfg9346Bits */
 +      Cfg9346_Lock    = 0x00,
 +      Cfg9346_Unlock  = 0xc0,
 +
 +      /* rx_mode_bits */
 +      AcceptErr       = 0x20,
 +      AcceptRunt      = 0x10,
 +      AcceptBroadcast = 0x08,
 +      AcceptMulticast = 0x04,
 +      AcceptMyPhys    = 0x02,
 +      AcceptAllPhys   = 0x01,
 +#define RX_CONFIG_ACCEPT_MASK         0x3f
 +
 +      /* TxConfigBits */
 +      TxInterFrameGapShift = 24,
 +      TxDMAShift = 8, /* DMA burst value (0-7) is shift this many bits */
 +
 +      /* Config1 register p.24 */
 +      LEDS1           = (1 << 7),
 +      LEDS0           = (1 << 6),
 +      MSIEnable       = (1 << 5),     /* Enable Message Signaled Interrupt */
 +      Speed_down      = (1 << 4),
 +      MEMMAP          = (1 << 3),
 +      IOMAP           = (1 << 2),
 +      VPD             = (1 << 1),
 +      PMEnable        = (1 << 0),     /* Power Management Enable */
 +
 +      /* Config2 register p. 25 */
 +      PCI_Clock_66MHz = 0x01,
 +      PCI_Clock_33MHz = 0x00,
 +
 +      /* Config3 register p.25 */
 +      MagicPacket     = (1 << 5),     /* Wake up when receives a Magic Packet */
 +      LinkUp          = (1 << 4),     /* Wake up when the cable connection is re-established */
 +      Beacon_en       = (1 << 0),     /* 8168 only. Reserved in the 8168b */
 +
 +      /* Config5 register p.27 */
 +      BWF             = (1 << 6),     /* Accept Broadcast wakeup frame */
 +      MWF             = (1 << 5),     /* Accept Multicast wakeup frame */
 +      UWF             = (1 << 4),     /* Accept Unicast wakeup frame */
 +      Spi_en          = (1 << 3),
 +      LanWake         = (1 << 1),     /* LanWake enable/disable */
 +      PMEStatus       = (1 << 0),     /* PME status can be reset by PCI RST# */
 +
 +      /* TBICSR p.28 */
 +      TBIReset        = 0x80000000,
 +      TBILoopback     = 0x40000000,
 +      TBINwEnable     = 0x20000000,
 +      TBINwRestart    = 0x10000000,
 +      TBILinkOk       = 0x02000000,
 +      TBINwComplete   = 0x01000000,
 +
 +      /* CPlusCmd p.31 */
 +      EnableBist      = (1 << 15),    // 8168 8101
 +      Mac_dbgo_oe     = (1 << 14),    // 8168 8101
 +      Normal_mode     = (1 << 13),    // unused
 +      Force_half_dup  = (1 << 12),    // 8168 8101
 +      Force_rxflow_en = (1 << 11),    // 8168 8101
 +      Force_txflow_en = (1 << 10),    // 8168 8101
 +      Cxpl_dbg_sel    = (1 << 9),     // 8168 8101
 +      ASF             = (1 << 8),     // 8168 8101
 +      PktCntrDisable  = (1 << 7),     // 8168 8101
 +      Mac_dbgo_sel    = 0x001c,       // 8168
 +      RxVlan          = (1 << 6),
 +      RxChkSum        = (1 << 5),
 +      PCIDAC          = (1 << 4),
 +      PCIMulRW        = (1 << 3),
 +      INTT_0          = 0x0000,       // 8168
 +      INTT_1          = 0x0001,       // 8168
 +      INTT_2          = 0x0002,       // 8168
 +      INTT_3          = 0x0003,       // 8168
 +
 +      /* rtl8169_PHYstatus */
 +      TBI_Enable      = 0x80,
 +      TxFlowCtrl      = 0x40,
 +      RxFlowCtrl      = 0x20,
 +      _1000bpsF       = 0x10,
 +      _100bps         = 0x08,
 +      _10bps          = 0x04,
 +      LinkStatus      = 0x02,
 +      FullDup         = 0x01,
 +
 +      /* _TBICSRBit */
 +      TBILinkOK       = 0x02000000,
 +
 +      /* DumpCounterCommand */
 +      CounterDump     = 0x8,
 +};
 +
 +enum rtl_desc_bit {
 +      /* First doubleword. */
 +      DescOwn         = (1 << 31), /* Descriptor is owned by NIC */
 +      RingEnd         = (1 << 30), /* End of descriptor ring */
 +      FirstFrag       = (1 << 29), /* First segment of a packet */
 +      LastFrag        = (1 << 28), /* Final segment of a packet */
 +};
 +
 +/* Generic case. */
 +enum rtl_tx_desc_bit {
 +      /* First doubleword. */
 +      TD_LSO          = (1 << 27),            /* Large Send Offload */
 +#define TD_MSS_MAX                    0x07ffu /* MSS value */
 +
 +      /* Second doubleword. */
 +      TxVlanTag       = (1 << 17),            /* Add VLAN tag */
 +};
 +
 +/* 8169, 8168b and 810x except 8102e. */
 +enum rtl_tx_desc_bit_0 {
 +      /* First doubleword. */
 +#define TD0_MSS_SHIFT                 16      /* MSS position (11 bits) */
 +      TD0_TCP_CS      = (1 << 16),            /* Calculate TCP/IP checksum */
 +      TD0_UDP_CS      = (1 << 17),            /* Calculate UDP/IP checksum */
 +      TD0_IP_CS       = (1 << 18),            /* Calculate IP checksum */
 +};
 +
 +/* 8102e, 8168c and beyond. */
 +enum rtl_tx_desc_bit_1 {
 +      /* Second doubleword. */
 +#define TD1_MSS_SHIFT                 18      /* MSS position (11 bits) */
 +      TD1_IP_CS       = (1 << 29),            /* Calculate IP checksum */
 +      TD1_TCP_CS      = (1 << 30),            /* Calculate TCP/IP checksum */
 +      TD1_UDP_CS      = (1 << 31),            /* Calculate UDP/IP checksum */
 +};
 +
 +static const struct rtl_tx_desc_info {
 +      struct {
 +              u32 udp;
 +              u32 tcp;
 +      } checksum;
 +      u16 mss_shift;
 +      u16 opts_offset;
 +} tx_desc_info [] = {
 +      [RTL_TD_0] = {
 +              .checksum = {
 +                      .udp    = TD0_IP_CS | TD0_UDP_CS,
 +                      .tcp    = TD0_IP_CS | TD0_TCP_CS
 +              },
 +              .mss_shift      = TD0_MSS_SHIFT,
 +              .opts_offset    = 0
 +      },
 +      [RTL_TD_1] = {
 +              .checksum = {
 +                      .udp    = TD1_IP_CS | TD1_UDP_CS,
 +                      .tcp    = TD1_IP_CS | TD1_TCP_CS
 +              },
 +              .mss_shift      = TD1_MSS_SHIFT,
 +              .opts_offset    = 1
 +      }
 +};
 +
 +enum rtl_rx_desc_bit {
 +      /* Rx private */
 +      PID1            = (1 << 18), /* Protocol ID bit 1/2 */
 +      PID0            = (1 << 17), /* Protocol ID bit 2/2 */
 +
 +#define RxProtoUDP    (PID1)
 +#define RxProtoTCP    (PID0)
 +#define RxProtoIP     (PID1 | PID0)
 +#define RxProtoMask   RxProtoIP
 +
 +      IPFail          = (1 << 16), /* IP checksum failed */
 +      UDPFail         = (1 << 15), /* UDP/IP checksum failed */
 +      TCPFail         = (1 << 14), /* TCP/IP checksum failed */
 +      RxVlanTag       = (1 << 16), /* VLAN tag available */
 +};
 +
 +#define RsvdMask      0x3fffc000
 +
 +struct TxDesc {
 +      __le32 opts1;
 +      __le32 opts2;
 +      __le64 addr;
 +};
 +
 +struct RxDesc {
 +      __le32 opts1;
 +      __le32 opts2;
 +      __le64 addr;
 +};
 +
 +struct ring_info {
 +      struct sk_buff  *skb;
 +      u32             len;
 +      u8              __pad[sizeof(void *) - sizeof(u32)];
 +};
 +
 +enum features {
 +      RTL_FEATURE_WOL         = (1 << 0),
 +      RTL_FEATURE_MSI         = (1 << 1),
 +      RTL_FEATURE_GMII        = (1 << 2),
 +};
 +
 +struct rtl8169_counters {
 +      __le64  tx_packets;
 +      __le64  rx_packets;
 +      __le64  tx_errors;
 +      __le32  rx_errors;
 +      __le16  rx_missed;
 +      __le16  align_errors;
 +      __le32  tx_one_collision;
 +      __le32  tx_multi_collision;
 +      __le64  rx_unicast;
 +      __le64  rx_broadcast;
 +      __le32  rx_multicast;
 +      __le16  tx_aborted;
 +      __le16  tx_underun;
 +};
 +
 +struct rtl8169_private {
 +      void __iomem *mmio_addr;        /* memory map physical address */
 +      struct pci_dev *pci_dev;
 +      struct net_device *dev;
 +      struct napi_struct napi;
 +      spinlock_t lock;
 +      u32 msg_enable;
 +      u16 txd_version;
 +      u16 mac_version;
 +      u32 cur_rx; /* Index into the Rx descriptor buffer of next Rx pkt. */
 +      u32 cur_tx; /* Index into the Tx descriptor buffer of next Rx pkt. */
 +      u32 dirty_rx;
 +      u32 dirty_tx;
 +      struct TxDesc *TxDescArray;     /* 256-aligned Tx descriptor ring */
 +      struct RxDesc *RxDescArray;     /* 256-aligned Rx descriptor ring */
 +      dma_addr_t TxPhyAddr;
 +      dma_addr_t RxPhyAddr;
 +      void *Rx_databuff[NUM_RX_DESC]; /* Rx data buffers */
 +      struct ring_info tx_skb[NUM_TX_DESC];   /* Tx data buffers */
 +      struct timer_list timer;
 +      u16 cp_cmd;
 +      u16 intr_event;
 +      u16 napi_event;
 +      u16 intr_mask;
 +
 +      struct mdio_ops {
 +              void (*write)(void __iomem *, int, int);
 +              int (*read)(void __iomem *, int);
 +      } mdio_ops;
 +
 +      struct pll_power_ops {
 +              void (*down)(struct rtl8169_private *);
 +              void (*up)(struct rtl8169_private *);
 +      } pll_power_ops;
 +
 +      int (*set_speed)(struct net_device *, u8 aneg, u16 sp, u8 dpx, u32 adv);
 +      int (*get_settings)(struct net_device *, struct ethtool_cmd *);
 +      void (*phy_reset_enable)(struct rtl8169_private *tp);
 +      void (*hw_start)(struct net_device *);
 +      unsigned int (*phy_reset_pending)(struct rtl8169_private *tp);
 +      unsigned int (*link_ok)(void __iomem *);
 +      int (*do_ioctl)(struct rtl8169_private *tp, struct mii_ioctl_data *data, int cmd);
 +      struct delayed_work task;
 +      unsigned features;
 +
 +      struct mii_if_info mii;
 +      struct rtl8169_counters counters;
 +      u32 saved_wolopts;
++      u32 opts1_mask;
 +
 +      struct rtl_fw {
 +              const struct firmware *fw;
 +
 +#define RTL_VER_SIZE          32
 +
 +              char version[RTL_VER_SIZE];
 +
 +              struct rtl_fw_phy_action {
 +                      __le32 *code;
 +                      size_t size;
 +              } phy_action;
 +      } *rtl_fw;
 +#define RTL_FIRMWARE_UNKNOWN  ERR_PTR(-EAGAIN)
 +};
 +
 +MODULE_AUTHOR("Realtek and the Linux r8169 crew <netdev@vger.kernel.org>");
 +MODULE_DESCRIPTION("RealTek RTL-8169 Gigabit Ethernet driver");
 +module_param(use_dac, int, 0);
 +MODULE_PARM_DESC(use_dac, "Enable PCI DAC. Unsafe on 32 bit PCI slot.");
 +module_param_named(debug, debug.msg_enable, int, 0);
 +MODULE_PARM_DESC(debug, "Debug verbosity level (0=none, ..., 16=all)");
 +MODULE_LICENSE("GPL");
 +MODULE_VERSION(RTL8169_VERSION);
 +MODULE_FIRMWARE(FIRMWARE_8168D_1);
 +MODULE_FIRMWARE(FIRMWARE_8168D_2);
 +MODULE_FIRMWARE(FIRMWARE_8168E_1);
 +MODULE_FIRMWARE(FIRMWARE_8168E_2);
++MODULE_FIRMWARE(FIRMWARE_8168E_3);
 +MODULE_FIRMWARE(FIRMWARE_8105E_1);
 +
 +static int rtl8169_open(struct net_device *dev);
 +static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
 +                                    struct net_device *dev);
 +static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance);
 +static int rtl8169_init_ring(struct net_device *dev);
 +static void rtl_hw_start(struct net_device *dev);
 +static int rtl8169_close(struct net_device *dev);
 +static void rtl_set_rx_mode(struct net_device *dev);
 +static void rtl8169_tx_timeout(struct net_device *dev);
 +static struct net_device_stats *rtl8169_get_stats(struct net_device *dev);
 +static int rtl8169_rx_interrupt(struct net_device *, struct rtl8169_private *,
 +                              void __iomem *, u32 budget);
 +static int rtl8169_change_mtu(struct net_device *dev, int new_mtu);
 +static void rtl8169_down(struct net_device *dev);
 +static void rtl8169_rx_clear(struct rtl8169_private *tp);
 +static int rtl8169_poll(struct napi_struct *napi, int budget);
 +
 +static u32 ocp_read(struct rtl8169_private *tp, u8 mask, u16 reg)
 +{
 +      void __iomem *ioaddr = tp->mmio_addr;
 +      int i;
 +
 +      RTL_W32(OCPAR, ((u32)mask & 0x0f) << 12 | (reg & 0x0fff));
 +      for (i = 0; i < 20; i++) {
 +              udelay(100);
 +              if (RTL_R32(OCPAR) & OCPAR_FLAG)
 +                      break;
 +      }
 +      return RTL_R32(OCPDR);
 +}
 +
 +static void ocp_write(struct rtl8169_private *tp, u8 mask, u16 reg, u32 data)
 +{
 +      void __iomem *ioaddr = tp->mmio_addr;
 +      int i;
 +
 +      RTL_W32(OCPDR, data);
 +      RTL_W32(OCPAR, OCPAR_FLAG | ((u32)mask & 0x0f) << 12 | (reg & 0x0fff));
 +      for (i = 0; i < 20; i++) {
 +              udelay(100);
 +              if ((RTL_R32(OCPAR) & OCPAR_FLAG) == 0)
 +                      break;
 +      }
 +}
 +
 +static void rtl8168_oob_notify(struct rtl8169_private *tp, u8 cmd)
 +{
 +      void __iomem *ioaddr = tp->mmio_addr;
 +      int i;
 +
 +      RTL_W8(ERIDR, cmd);
 +      RTL_W32(ERIAR, 0x800010e8);
 +      msleep(2);
 +      for (i = 0; i < 5; i++) {
 +              udelay(100);
 +              if (!(RTL_R32(ERIAR) & ERIAR_FLAG))
 +                      break;
 +      }
 +
 +      ocp_write(tp, 0x1, 0x30, 0x00000001);
 +}
 +
 +#define OOB_CMD_RESET         0x00
 +#define OOB_CMD_DRIVER_START  0x05
 +#define OOB_CMD_DRIVER_STOP   0x06
 +
 +static u16 rtl8168_get_ocp_reg(struct rtl8169_private *tp)
 +{
 +      return (tp->mac_version == RTL_GIGA_MAC_VER_31) ? 0xb8 : 0x10;
 +}
 +
 +static void rtl8168_driver_start(struct rtl8169_private *tp)
 +{
 +      u16 reg;
 +      int i;
 +
 +      rtl8168_oob_notify(tp, OOB_CMD_DRIVER_START);
 +
 +      reg = rtl8168_get_ocp_reg(tp);
 +
 +      for (i = 0; i < 10; i++) {
 +              msleep(10);
 +              if (ocp_read(tp, 0x0f, reg) & 0x00000800)
 +                      break;
 +      }
 +}
 +
 +static void rtl8168_driver_stop(struct rtl8169_private *tp)
 +{
 +      u16 reg;
 +      int i;
 +
 +      rtl8168_oob_notify(tp, OOB_CMD_DRIVER_STOP);
 +
 +      reg = rtl8168_get_ocp_reg(tp);
 +
 +      for (i = 0; i < 10; i++) {
 +              msleep(10);
 +              if ((ocp_read(tp, 0x0f, reg) & 0x00000800) == 0)
 +                      break;
 +      }
 +}
 +
 +static int r8168dp_check_dash(struct rtl8169_private *tp)
 +{
 +      u16 reg = rtl8168_get_ocp_reg(tp);
 +
 +      return (ocp_read(tp, 0x0f, reg) & 0x00008000) ? 1 : 0;
 +}
 +
 +static void r8169_mdio_write(void __iomem *ioaddr, int reg_addr, int value)
 +{
 +      int i;
 +
 +      RTL_W32(PHYAR, 0x80000000 | (reg_addr & 0x1f) << 16 | (value & 0xffff));
 +
 +      for (i = 20; i > 0; i--) {
 +              /*
 +               * Check if the RTL8169 has completed writing to the specified
 +               * MII register.
 +               */
 +              if (!(RTL_R32(PHYAR) & 0x80000000))
 +                      break;
 +              udelay(25);
 +      }
 +      /*
 +       * According to hardware specs a 20us delay is required after write
 +       * complete indication, but before sending next command.
 +       */
 +      udelay(20);
 +}
 +
 +static int r8169_mdio_read(void __iomem *ioaddr, int reg_addr)
 +{
 +      int i, value = -1;
 +
 +      RTL_W32(PHYAR, 0x0 | (reg_addr & 0x1f) << 16);
 +
 +      for (i = 20; i > 0; i--) {
 +              /*
 +               * Check if the RTL8169 has completed retrieving data from
 +               * the specified MII register.
 +               */
 +              if (RTL_R32(PHYAR) & 0x80000000) {
 +                      value = RTL_R32(PHYAR) & 0xffff;
 +                      break;
 +              }
 +              udelay(25);
 +      }
 +      /*
 +       * According to hardware specs a 20us delay is required after read
 +       * complete indication, but before sending next command.
 +       */
 +      udelay(20);
 +
 +      return value;
 +}
 +
 +static void r8168dp_1_mdio_access(void __iomem *ioaddr, int reg_addr, u32 data)
 +{
 +      int i;
 +
 +      RTL_W32(OCPDR, data |
 +              ((reg_addr & OCPDR_REG_MASK) << OCPDR_GPHY_REG_SHIFT));
 +      RTL_W32(OCPAR, OCPAR_GPHY_WRITE_CMD);
 +      RTL_W32(EPHY_RXER_NUM, 0);
 +
 +      for (i = 0; i < 100; i++) {
 +              mdelay(1);
 +              if (!(RTL_R32(OCPAR) & OCPAR_FLAG))
 +                      break;
 +      }
 +}
 +
 +static void r8168dp_1_mdio_write(void __iomem *ioaddr, int reg_addr, int value)
 +{
 +      r8168dp_1_mdio_access(ioaddr, reg_addr, OCPDR_WRITE_CMD |
 +              (value & OCPDR_DATA_MASK));
 +}
 +
 +static int r8168dp_1_mdio_read(void __iomem *ioaddr, int reg_addr)
 +{
 +      int i;
 +
 +      r8168dp_1_mdio_access(ioaddr, reg_addr, OCPDR_READ_CMD);
 +
 +      mdelay(1);
 +      RTL_W32(OCPAR, OCPAR_GPHY_READ_CMD);
 +      RTL_W32(EPHY_RXER_NUM, 0);
 +
 +      for (i = 0; i < 100; i++) {
 +              mdelay(1);
 +              if (RTL_R32(OCPAR) & OCPAR_FLAG)
 +                      break;
 +      }
 +
 +      return RTL_R32(OCPDR) & OCPDR_DATA_MASK;
 +}
 +
 +#define R8168DP_1_MDIO_ACCESS_BIT     0x00020000
 +
 +static void r8168dp_2_mdio_start(void __iomem *ioaddr)
 +{
 +      RTL_W32(0xd0, RTL_R32(0xd0) & ~R8168DP_1_MDIO_ACCESS_BIT);
 +}
 +
 +static void r8168dp_2_mdio_stop(void __iomem *ioaddr)
 +{
 +      RTL_W32(0xd0, RTL_R32(0xd0) | R8168DP_1_MDIO_ACCESS_BIT);
 +}
 +
 +static void r8168dp_2_mdio_write(void __iomem *ioaddr, int reg_addr, int value)
 +{
 +      r8168dp_2_mdio_start(ioaddr);
 +
 +      r8169_mdio_write(ioaddr, reg_addr, value);
 +
 +      r8168dp_2_mdio_stop(ioaddr);
 +}
 +
 +static int r8168dp_2_mdio_read(void __iomem *ioaddr, int reg_addr)
 +{
 +      int value;
 +
 +      r8168dp_2_mdio_start(ioaddr);
 +
 +      value = r8169_mdio_read(ioaddr, reg_addr);
 +
 +      r8168dp_2_mdio_stop(ioaddr);
 +
 +      return value;
 +}
 +
 +static void rtl_writephy(struct rtl8169_private *tp, int location, u32 val)
 +{
 +      tp->mdio_ops.write(tp->mmio_addr, location, val);
 +}
 +
 +static int rtl_readphy(struct rtl8169_private *tp, int location)
 +{
 +      return tp->mdio_ops.read(tp->mmio_addr, location);
 +}
 +
 +static void rtl_patchphy(struct rtl8169_private *tp, int reg_addr, int value)
 +{
 +      rtl_writephy(tp, reg_addr, rtl_readphy(tp, reg_addr) | value);
 +}
 +
 +static void rtl_w1w0_phy(struct rtl8169_private *tp, int reg_addr, int p, int m)
 +{
 +      int val;
 +
 +      val = rtl_readphy(tp, reg_addr);
 +      rtl_writephy(tp, reg_addr, (val | p) & ~m);
 +}
 +
 +static void rtl_mdio_write(struct net_device *dev, int phy_id, int location,
 +                         int val)
 +{
 +      struct rtl8169_private *tp = netdev_priv(dev);
 +
 +      rtl_writephy(tp, location, val);
 +}
 +
 +static int rtl_mdio_read(struct net_device *dev, int phy_id, int location)
 +{
 +      struct rtl8169_private *tp = netdev_priv(dev);
 +
 +      return rtl_readphy(tp, location);
 +}
 +
 +static void rtl_ephy_write(void __iomem *ioaddr, int reg_addr, int value)
 +{
 +      unsigned int i;
 +
 +      RTL_W32(EPHYAR, EPHYAR_WRITE_CMD | (value & EPHYAR_DATA_MASK) |
 +              (reg_addr & EPHYAR_REG_MASK) << EPHYAR_REG_SHIFT);
 +
 +      for (i = 0; i < 100; i++) {
 +              if (!(RTL_R32(EPHYAR) & EPHYAR_FLAG))
 +                      break;
 +              udelay(10);
 +      }
 +}
 +
 +static u16 rtl_ephy_read(void __iomem *ioaddr, int reg_addr)
 +{
 +      u16 value = 0xffff;
 +      unsigned int i;
 +
 +      RTL_W32(EPHYAR, (reg_addr & EPHYAR_REG_MASK) << EPHYAR_REG_SHIFT);
 +
 +      for (i = 0; i < 100; i++) {
 +              if (RTL_R32(EPHYAR) & EPHYAR_FLAG) {
 +                      value = RTL_R32(EPHYAR) & EPHYAR_DATA_MASK;
 +                      break;
 +              }
 +              udelay(10);
 +      }
 +
 +      return value;
 +}
 +
 +static void rtl_csi_write(void __iomem *ioaddr, int addr, int value)
 +{
 +      unsigned int i;
 +
 +      RTL_W32(CSIDR, value);
 +      RTL_W32(CSIAR, CSIAR_WRITE_CMD | (addr & CSIAR_ADDR_MASK) |
 +              CSIAR_BYTE_ENABLE << CSIAR_BYTE_ENABLE_SHIFT);
 +
 +      for (i = 0; i < 100; i++) {
 +              if (!(RTL_R32(CSIAR) & CSIAR_FLAG))
 +                      break;
 +              udelay(10);
 +      }
 +}
 +
 +static u32 rtl_csi_read(void __iomem *ioaddr, int addr)
 +{
 +      u32 value = ~0x00;
 +      unsigned int i;
 +
 +      RTL_W32(CSIAR, (addr & CSIAR_ADDR_MASK) |
 +              CSIAR_BYTE_ENABLE << CSIAR_BYTE_ENABLE_SHIFT);
 +
 +      for (i = 0; i < 100; i++) {
 +              if (RTL_R32(CSIAR) & CSIAR_FLAG) {
 +                      value = RTL_R32(CSIDR);
 +                      break;
 +              }
 +              udelay(10);
 +      }
 +
 +      return value;
 +}
 +
 +static
 +void rtl_eri_write(void __iomem *ioaddr, int addr, u32 mask, u32 val, int type)
 +{
 +      unsigned int i;
 +
 +      BUG_ON((addr & 3) || (mask == 0));
 +      RTL_W32(ERIDR, val);
 +      RTL_W32(ERIAR, ERIAR_WRITE_CMD | type | mask | addr);
 +
 +      for (i = 0; i < 100; i++) {
 +              if (!(RTL_R32(ERIAR) & ERIAR_FLAG))
 +                      break;
 +              udelay(100);
 +      }
 +}
 +
 +static u32 rtl_eri_read(void __iomem *ioaddr, int addr, int type)
 +{
 +      u32 value = ~0x00;
 +      unsigned int i;
 +
 +      RTL_W32(ERIAR, ERIAR_READ_CMD | type | ERIAR_MASK_1111 | addr);
 +
 +      for (i = 0; i < 100; i++) {
 +              if (RTL_R32(ERIAR) & ERIAR_FLAG) {
 +                      value = RTL_R32(ERIDR);
 +                      break;
 +              }
 +              udelay(100);
 +      }
 +
 +      return value;
 +}
 +
 +static void
 +rtl_w1w0_eri(void __iomem *ioaddr, int addr, u32 mask, u32 p, u32 m, int type)
 +{
 +      u32 val;
 +
 +      val = rtl_eri_read(ioaddr, addr, type);
 +      rtl_eri_write(ioaddr, addr, mask, (val & ~m) | p, type);
 +}
 +
 +struct exgmac_reg {
 +      u16 addr;
 +      u16 mask;
 +      u32 val;
 +};
 +
 +static void rtl_write_exgmac_batch(void __iomem *ioaddr,
 +                                 const struct exgmac_reg *r, int len)
 +{
 +      while (len-- > 0) {
 +              rtl_eri_write(ioaddr, r->addr, r->mask, r->val, ERIAR_EXGMAC);
 +              r++;
 +      }
 +}
 +
 +static u8 rtl8168d_efuse_read(void __iomem *ioaddr, int reg_addr)
 +{
 +      u8 value = 0xff;
 +      unsigned int i;
 +
 +      RTL_W32(EFUSEAR, (reg_addr & EFUSEAR_REG_MASK) << EFUSEAR_REG_SHIFT);
 +
 +      for (i = 0; i < 300; i++) {
 +              if (RTL_R32(EFUSEAR) & EFUSEAR_FLAG) {
 +                      value = RTL_R32(EFUSEAR) & EFUSEAR_DATA_MASK;
 +                      break;
 +              }
 +              udelay(100);
 +      }
 +
 +      return value;
 +}
 +
 +static void rtl8169_irq_mask_and_ack(void __iomem *ioaddr)
 +{
 +      RTL_W16(IntrMask, 0x0000);
 +
 +      RTL_W16(IntrStatus, 0xffff);
 +}
 +
 +static unsigned int rtl8169_tbi_reset_pending(struct rtl8169_private *tp)
 +{
 +      void __iomem *ioaddr = tp->mmio_addr;
 +
 +      return RTL_R32(TBICSR) & TBIReset;
 +}
 +
 +static unsigned int rtl8169_xmii_reset_pending(struct rtl8169_private *tp)
 +{
 +      return rtl_readphy(tp, MII_BMCR) & BMCR_RESET;
 +}
 +
 +static unsigned int rtl8169_tbi_link_ok(void __iomem *ioaddr)
 +{
 +      return RTL_R32(TBICSR) & TBILinkOk;
 +}
 +
 +static unsigned int rtl8169_xmii_link_ok(void __iomem *ioaddr)
 +{
 +      return RTL_R8(PHYstatus) & LinkStatus;
 +}
 +
 +static void rtl8169_tbi_reset_enable(struct rtl8169_private *tp)
 +{
 +      void __iomem *ioaddr = tp->mmio_addr;
 +
 +      RTL_W32(TBICSR, RTL_R32(TBICSR) | TBIReset);
 +}
 +
 +static void rtl8169_xmii_reset_enable(struct rtl8169_private *tp)
 +{
 +      unsigned int val;
 +
 +      val = rtl_readphy(tp, MII_BMCR) | BMCR_RESET;
 +      rtl_writephy(tp, MII_BMCR, val & 0xffff);
 +}
 +
 +static void rtl_link_chg_patch(struct rtl8169_private *tp)
 +{
 +      void __iomem *ioaddr = tp->mmio_addr;
 +      struct net_device *dev = tp->dev;
 +
 +      if (!netif_running(dev))
 +              return;
 +
 +      if (tp->mac_version == RTL_GIGA_MAC_VER_34) {
 +              if (RTL_R8(PHYstatus) & _1000bpsF) {
 +                      rtl_eri_write(ioaddr, 0x1bc, ERIAR_MASK_1111,
 +                                    0x00000011, ERIAR_EXGMAC);
 +                      rtl_eri_write(ioaddr, 0x1dc, ERIAR_MASK_1111,
 +                                    0x00000005, ERIAR_EXGMAC);
 +              } else if (RTL_R8(PHYstatus) & _100bps) {
 +                      rtl_eri_write(ioaddr, 0x1bc, ERIAR_MASK_1111,
 +                                    0x0000001f, ERIAR_EXGMAC);
 +                      rtl_eri_write(ioaddr, 0x1dc, ERIAR_MASK_1111,
 +                                    0x00000005, ERIAR_EXGMAC);
 +              } else {
 +                      rtl_eri_write(ioaddr, 0x1bc, ERIAR_MASK_1111,
 +                                    0x0000001f, ERIAR_EXGMAC);
 +                      rtl_eri_write(ioaddr, 0x1dc, ERIAR_MASK_1111,
 +                                    0x0000003f, ERIAR_EXGMAC);
 +              }
 +              /* Reset packet filter */
 +              rtl_w1w0_eri(ioaddr, 0xdc, ERIAR_MASK_0001, 0x00, 0x01,
 +                           ERIAR_EXGMAC);
 +              rtl_w1w0_eri(ioaddr, 0xdc, ERIAR_MASK_0001, 0x01, 0x00,
 +                           ERIAR_EXGMAC);
 +      }
 +}
 +
 +static void __rtl8169_check_link_status(struct net_device *dev,
 +                                      struct rtl8169_private *tp,
 +                                      void __iomem *ioaddr, bool pm)
 +{
 +      unsigned long flags;
 +
 +      spin_lock_irqsave(&tp->lock, flags);
 +      if (tp->link_ok(ioaddr)) {
 +              rtl_link_chg_patch(tp);
 +              /* This is to cancel a scheduled suspend if there's one. */
 +              if (pm)
 +                      pm_request_resume(&tp->pci_dev->dev);
 +              netif_carrier_on(dev);
 +              if (net_ratelimit())
 +                      netif_info(tp, ifup, dev, "link up\n");
 +      } else {
 +              netif_carrier_off(dev);
 +              netif_info(tp, ifdown, dev, "link down\n");
 +              if (pm)
 +                      pm_schedule_suspend(&tp->pci_dev->dev, 100);
 +      }
 +      spin_unlock_irqrestore(&tp->lock, flags);
 +}
 +
 +static void rtl8169_check_link_status(struct net_device *dev,
 +                                    struct rtl8169_private *tp,
 +                                    void __iomem *ioaddr)
 +{
 +      __rtl8169_check_link_status(dev, tp, ioaddr, false);
 +}
 +
 +#define WAKE_ANY (WAKE_PHY | WAKE_MAGIC | WAKE_UCAST | WAKE_BCAST | WAKE_MCAST)
 +
 +static u32 __rtl8169_get_wol(struct rtl8169_private *tp)
 +{
 +      void __iomem *ioaddr = tp->mmio_addr;
 +      u8 options;
 +      u32 wolopts = 0;
 +
 +      options = RTL_R8(Config1);
 +      if (!(options & PMEnable))
 +              return 0;
 +
 +      options = RTL_R8(Config3);
 +      if (options & LinkUp)
 +              wolopts |= WAKE_PHY;
 +      if (options & MagicPacket)
 +              wolopts |= WAKE_MAGIC;
 +
 +      options = RTL_R8(Config5);
 +      if (options & UWF)
 +              wolopts |= WAKE_UCAST;
 +      if (options & BWF)
 +              wolopts |= WAKE_BCAST;
 +      if (options & MWF)
 +              wolopts |= WAKE_MCAST;
 +
 +      return wolopts;
 +}
 +
 +static void rtl8169_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
 +{
 +      struct rtl8169_private *tp = netdev_priv(dev);
 +
 +      spin_lock_irq(&tp->lock);
 +
 +      wol->supported = WAKE_ANY;
 +      wol->wolopts = __rtl8169_get_wol(tp);
 +
 +      spin_unlock_irq(&tp->lock);
 +}
 +
 +static void __rtl8169_set_wol(struct rtl8169_private *tp, u32 wolopts)
 +{
 +      void __iomem *ioaddr = tp->mmio_addr;
 +      unsigned int i;
 +      static const struct {
 +              u32 opt;
 +              u16 reg;
 +              u8  mask;
 +      } cfg[] = {
 +              { WAKE_ANY,   Config1, PMEnable },
 +              { WAKE_PHY,   Config3, LinkUp },
 +              { WAKE_MAGIC, Config3, MagicPacket },
 +              { WAKE_UCAST, Config5, UWF },
 +              { WAKE_BCAST, Config5, BWF },
 +              { WAKE_MCAST, Config5, MWF },
 +              { WAKE_ANY,   Config5, LanWake }
 +      };
 +
 +      RTL_W8(Cfg9346, Cfg9346_Unlock);
 +
 +      for (i = 0; i < ARRAY_SIZE(cfg); i++) {
 +              u8 options = RTL_R8(cfg[i].reg) & ~cfg[i].mask;
 +              if (wolopts & cfg[i].opt)
 +                      options |= cfg[i].mask;
 +              RTL_W8(cfg[i].reg, options);
 +      }
 +
 +      RTL_W8(Cfg9346, Cfg9346_Lock);
 +}
 +
 +static int rtl8169_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
 +{
 +      struct rtl8169_private *tp = netdev_priv(dev);
 +
 +      spin_lock_irq(&tp->lock);
 +
 +      if (wol->wolopts)
 +              tp->features |= RTL_FEATURE_WOL;
 +      else
 +              tp->features &= ~RTL_FEATURE_WOL;
 +      __rtl8169_set_wol(tp, wol->wolopts);
 +      spin_unlock_irq(&tp->lock);
 +
 +      device_set_wakeup_enable(&tp->pci_dev->dev, wol->wolopts);
 +
 +      return 0;
 +}
 +
 +static const char *rtl_lookup_firmware_name(struct rtl8169_private *tp)
 +{
 +      return rtl_chip_infos[tp->mac_version].fw_name;
 +}
 +
 +static void rtl8169_get_drvinfo(struct net_device *dev,
 +                              struct ethtool_drvinfo *info)
 +{
 +      struct rtl8169_private *tp = netdev_priv(dev);
 +      struct rtl_fw *rtl_fw = tp->rtl_fw;
 +
 +      strcpy(info->driver, MODULENAME);
 +      strcpy(info->version, RTL8169_VERSION);
 +      strcpy(info->bus_info, pci_name(tp->pci_dev));
 +      BUILD_BUG_ON(sizeof(info->fw_version) < sizeof(rtl_fw->version));
 +      strcpy(info->fw_version, IS_ERR_OR_NULL(rtl_fw) ? "N/A" :
 +             rtl_fw->version);
 +}
 +
 +static int rtl8169_get_regs_len(struct net_device *dev)
 +{
 +      return R8169_REGS_SIZE;
 +}
 +
 +static int rtl8169_set_speed_tbi(struct net_device *dev,
 +                               u8 autoneg, u16 speed, u8 duplex, u32 ignored)
 +{
 +      struct rtl8169_private *tp = netdev_priv(dev);
 +      void __iomem *ioaddr = tp->mmio_addr;
 +      int ret = 0;
 +      u32 reg;
 +
 +      reg = RTL_R32(TBICSR);
 +      if ((autoneg == AUTONEG_DISABLE) && (speed == SPEED_1000) &&
 +          (duplex == DUPLEX_FULL)) {
 +              RTL_W32(TBICSR, reg & ~(TBINwEnable | TBINwRestart));
 +      } else if (autoneg == AUTONEG_ENABLE)
 +              RTL_W32(TBICSR, reg | TBINwEnable | TBINwRestart);
 +      else {
 +              netif_warn(tp, link, dev,
 +                         "incorrect speed setting refused in TBI mode\n");
 +              ret = -EOPNOTSUPP;
 +      }
 +
 +      return ret;
 +}
 +
 +static int rtl8169_set_speed_xmii(struct net_device *dev,
 +                                u8 autoneg, u16 speed, u8 duplex, u32 adv)
 +{
 +      struct rtl8169_private *tp = netdev_priv(dev);
 +      int giga_ctrl, bmcr;
 +      int rc = -EINVAL;
 +
 +      rtl_writephy(tp, 0x1f, 0x0000);
 +
 +      if (autoneg == AUTONEG_ENABLE) {
 +              int auto_nego;
 +
 +              auto_nego = rtl_readphy(tp, MII_ADVERTISE);
 +              auto_nego &= ~(ADVERTISE_10HALF | ADVERTISE_10FULL |
 +                              ADVERTISE_100HALF | ADVERTISE_100FULL);
 +
 +              if (adv & ADVERTISED_10baseT_Half)
 +                      auto_nego |= ADVERTISE_10HALF;
 +              if (adv & ADVERTISED_10baseT_Full)
 +                      auto_nego |= ADVERTISE_10FULL;
 +              if (adv & ADVERTISED_100baseT_Half)
 +                      auto_nego |= ADVERTISE_100HALF;
 +              if (adv & ADVERTISED_100baseT_Full)
 +                      auto_nego |= ADVERTISE_100FULL;
 +
 +              auto_nego |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
 +
 +              giga_ctrl = rtl_readphy(tp, MII_CTRL1000);
 +              giga_ctrl &= ~(ADVERTISE_1000FULL | ADVERTISE_1000HALF);
 +
 +              /* The 8100e/8101e/8102e do Fast Ethernet only. */
 +              if (tp->mii.supports_gmii) {
 +                      if (adv & ADVERTISED_1000baseT_Half)
 +                              giga_ctrl |= ADVERTISE_1000HALF;
 +                      if (adv & ADVERTISED_1000baseT_Full)
 +                              giga_ctrl |= ADVERTISE_1000FULL;
 +              } else if (adv & (ADVERTISED_1000baseT_Half |
 +                                ADVERTISED_1000baseT_Full)) {
 +                      netif_info(tp, link, dev,
 +                                 "PHY does not support 1000Mbps\n");
 +                      goto out;
 +              }
 +
 +              bmcr = BMCR_ANENABLE | BMCR_ANRESTART;
 +
 +              rtl_writephy(tp, MII_ADVERTISE, auto_nego);
 +              rtl_writephy(tp, MII_CTRL1000, giga_ctrl);
 +      } else {
 +              giga_ctrl = 0;
 +
 +              if (speed == SPEED_10)
 +                      bmcr = 0;
 +              else if (speed == SPEED_100)
 +                      bmcr = BMCR_SPEED100;
 +              else
 +                      goto out;
 +
 +              if (duplex == DUPLEX_FULL)
 +                      bmcr |= BMCR_FULLDPLX;
 +      }
 +
 +      rtl_writephy(tp, MII_BMCR, bmcr);
 +
 +      if (tp->mac_version == RTL_GIGA_MAC_VER_02 ||
 +          tp->mac_version == RTL_GIGA_MAC_VER_03) {
 +              if ((speed == SPEED_100) && (autoneg != AUTONEG_ENABLE)) {
 +                      rtl_writephy(tp, 0x17, 0x2138);
 +                      rtl_writephy(tp, 0x0e, 0x0260);
 +              } else {
 +                      rtl_writephy(tp, 0x17, 0x2108);
 +                      rtl_writephy(tp, 0x0e, 0x0000);
 +              }
 +      }
 +
 +      rc = 0;
 +out:
 +      return rc;
 +}
 +
 +static int rtl8169_set_speed(struct net_device *dev,
 +                           u8 autoneg, u16 speed, u8 duplex, u32 advertising)
 +{
 +      struct rtl8169_private *tp = netdev_priv(dev);
 +      int ret;
 +
 +      ret = tp->set_speed(dev, autoneg, speed, duplex, advertising);
 +      if (ret < 0)
 +              goto out;
 +
 +      if (netif_running(dev) && (autoneg == AUTONEG_ENABLE) &&
 +          (advertising & ADVERTISED_1000baseT_Full)) {
 +              mod_timer(&tp->timer, jiffies + RTL8169_PHY_TIMEOUT);
 +      }
 +out:
 +      return ret;
 +}
 +
 +static int rtl8169_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
 +{
 +      struct rtl8169_private *tp = netdev_priv(dev);
 +      unsigned long flags;
 +      int ret;
 +
 +      del_timer_sync(&tp->timer);
 +
 +      spin_lock_irqsave(&tp->lock, flags);
 +      ret = rtl8169_set_speed(dev, cmd->autoneg, ethtool_cmd_speed(cmd),
 +                              cmd->duplex, cmd->advertising);
 +      spin_unlock_irqrestore(&tp->lock, flags);
 +
 +      return ret;
 +}
 +
 +static u32 rtl8169_fix_features(struct net_device *dev, u32 features)
 +{
 +      if (dev->mtu > TD_MSS_MAX)
 +              features &= ~NETIF_F_ALL_TSO;
 +
 +      return features;
 +}
 +
 +static int rtl8169_set_features(struct net_device *dev, u32 features)
 +{
 +      struct rtl8169_private *tp = netdev_priv(dev);
 +      void __iomem *ioaddr = tp->mmio_addr;
 +      unsigned long flags;
 +
 +      spin_lock_irqsave(&tp->lock, flags);
 +
 +      if (features & NETIF_F_RXCSUM)
 +              tp->cp_cmd |= RxChkSum;
 +      else
 +              tp->cp_cmd &= ~RxChkSum;
 +
 +      if (dev->features & NETIF_F_HW_VLAN_RX)
 +              tp->cp_cmd |= RxVlan;
 +      else
 +              tp->cp_cmd &= ~RxVlan;
 +
 +      RTL_W16(CPlusCmd, tp->cp_cmd);
 +      RTL_R16(CPlusCmd);
 +
 +      spin_unlock_irqrestore(&tp->lock, flags);
 +
 +      return 0;
 +}
 +
 +static inline u32 rtl8169_tx_vlan_tag(struct rtl8169_private *tp,
 +                                    struct sk_buff *skb)
 +{
 +      return (vlan_tx_tag_present(skb)) ?
 +              TxVlanTag | swab16(vlan_tx_tag_get(skb)) : 0x00;
 +}
 +
 +static void rtl8169_rx_vlan_tag(struct RxDesc *desc, struct sk_buff *skb)
 +{
 +      u32 opts2 = le32_to_cpu(desc->opts2);
 +
 +      if (opts2 & RxVlanTag)
 +              __vlan_hwaccel_put_tag(skb, swab16(opts2 & 0xffff));
 +
 +      desc->opts2 = 0;
 +}
 +
 +static int rtl8169_gset_tbi(struct net_device *dev, struct ethtool_cmd *cmd)
 +{
 +      struct rtl8169_private *tp = netdev_priv(dev);
 +      void __iomem *ioaddr = tp->mmio_addr;
 +      u32 status;
 +
 +      cmd->supported =
 +              SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg | SUPPORTED_FIBRE;
 +      cmd->port = PORT_FIBRE;
 +      cmd->transceiver = XCVR_INTERNAL;
 +
 +      status = RTL_R32(TBICSR);
 +      cmd->advertising = (status & TBINwEnable) ?  ADVERTISED_Autoneg : 0;
 +      cmd->autoneg = !!(status & TBINwEnable);
 +
 +      ethtool_cmd_speed_set(cmd, SPEED_1000);
 +      cmd->duplex = DUPLEX_FULL; /* Always set */
 +
 +      return 0;
 +}
 +
 +static int rtl8169_gset_xmii(struct net_device *dev, struct ethtool_cmd *cmd)
 +{
 +      struct rtl8169_private *tp = netdev_priv(dev);
 +
 +      return mii_ethtool_gset(&tp->mii, cmd);
 +}
 +
 +static int rtl8169_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
 +{
 +      struct rtl8169_private *tp = netdev_priv(dev);
 +      unsigned long flags;
 +      int rc;
 +
 +      spin_lock_irqsave(&tp->lock, flags);
 +
 +      rc = tp->get_settings(dev, cmd);
 +
 +      spin_unlock_irqrestore(&tp->lock, flags);
 +      return rc;
 +}
 +
 +static void rtl8169_get_regs(struct net_device *dev, struct ethtool_regs *regs,
 +                           void *p)
 +{
 +      struct rtl8169_private *tp = netdev_priv(dev);
 +      unsigned long flags;
 +
 +      if (regs->len > R8169_REGS_SIZE)
 +              regs->len = R8169_REGS_SIZE;
 +
 +      spin_lock_irqsave(&tp->lock, flags);
 +      memcpy_fromio(p, tp->mmio_addr, regs->len);
 +      spin_unlock_irqrestore(&tp->lock, flags);
 +}
 +
 +static u32 rtl8169_get_msglevel(struct net_device *dev)
 +{
 +      struct rtl8169_private *tp = netdev_priv(dev);
 +
 +      return tp->msg_enable;
 +}
 +
 +static void rtl8169_set_msglevel(struct net_device *dev, u32 value)
 +{
 +      struct rtl8169_private *tp = netdev_priv(dev);
 +
 +      tp->msg_enable = value;
 +}
 +
 +static const char rtl8169_gstrings[][ETH_GSTRING_LEN] = {
 +      "tx_packets",
 +      "rx_packets",
 +      "tx_errors",
 +      "rx_errors",
 +      "rx_missed",
 +      "align_errors",
 +      "tx_single_collisions",
 +      "tx_multi_collisions",
 +      "unicast",
 +      "broadcast",
 +      "multicast",
 +      "tx_aborted",
 +      "tx_underrun",
 +};
 +
 +static int rtl8169_get_sset_count(struct net_device *dev, int sset)
 +{
 +      switch (sset) {
 +      case ETH_SS_STATS:
 +              return ARRAY_SIZE(rtl8169_gstrings);
 +      default:
 +              return -EOPNOTSUPP;
 +      }
 +}
 +
 +static void rtl8169_update_counters(struct net_device *dev)
 +{
 +      struct rtl8169_private *tp = netdev_priv(dev);
 +      void __iomem *ioaddr = tp->mmio_addr;
 +      struct device *d = &tp->pci_dev->dev;
 +      struct rtl8169_counters *counters;
 +      dma_addr_t paddr;
 +      u32 cmd;
 +      int wait = 1000;
 +
 +      /*
 +       * Some chips are unable to dump tally counters when the receiver
 +       * is disabled.
 +       */
 +      if ((RTL_R8(ChipCmd) & CmdRxEnb) == 0)
 +              return;
 +
 +      counters = dma_alloc_coherent(d, sizeof(*counters), &paddr, GFP_KERNEL);
 +      if (!counters)
 +              return;
 +
 +      RTL_W32(CounterAddrHigh, (u64)paddr >> 32);
 +      cmd = (u64)paddr & DMA_BIT_MASK(32);
 +      RTL_W32(CounterAddrLow, cmd);
 +      RTL_W32(CounterAddrLow, cmd | CounterDump);
 +
 +      while (wait--) {
 +              if ((RTL_R32(CounterAddrLow) & CounterDump) == 0) {
 +                      memcpy(&tp->counters, counters, sizeof(*counters));
 +                      break;
 +              }
 +              udelay(10);
 +      }
 +
 +      RTL_W32(CounterAddrLow, 0);
 +      RTL_W32(CounterAddrHigh, 0);
 +
 +      dma_free_coherent(d, sizeof(*counters), counters, paddr);
 +}
 +
 +static void rtl8169_get_ethtool_stats(struct net_device *dev,
 +                                    struct ethtool_stats *stats, u64 *data)
 +{
 +      struct rtl8169_private *tp = netdev_priv(dev);
 +
 +      ASSERT_RTNL();
 +
 +      rtl8169_update_counters(dev);
 +
 +      data[0] = le64_to_cpu(tp->counters.tx_packets);
 +      data[1] = le64_to_cpu(tp->counters.rx_packets);
 +      data[2] = le64_to_cpu(tp->counters.tx_errors);
 +      data[3] = le32_to_cpu(tp->counters.rx_errors);
 +      data[4] = le16_to_cpu(tp->counters.rx_missed);
 +      data[5] = le16_to_cpu(tp->counters.align_errors);
 +      data[6] = le32_to_cpu(tp->counters.tx_one_collision);
 +      data[7] = le32_to_cpu(tp->counters.tx_multi_collision);
 +      data[8] = le64_to_cpu(tp->counters.rx_unicast);
 +      data[9] = le64_to_cpu(tp->counters.rx_broadcast);
 +      data[10] = le32_to_cpu(tp->counters.rx_multicast);
 +      data[11] = le16_to_cpu(tp->counters.tx_aborted);
 +      data[12] = le16_to_cpu(tp->counters.tx_underun);
 +}
 +
 +static void rtl8169_get_strings(struct net_device *dev, u32 stringset, u8 *data)
 +{
 +      switch(stringset) {
 +      case ETH_SS_STATS:
 +              memcpy(data, *rtl8169_gstrings, sizeof(rtl8169_gstrings));
 +              break;
 +      }
 +}
 +
 +static const struct ethtool_ops rtl8169_ethtool_ops = {
 +      .get_drvinfo            = rtl8169_get_drvinfo,
 +      .get_regs_len           = rtl8169_get_regs_len,
 +      .get_link               = ethtool_op_get_link,
 +      .get_settings           = rtl8169_get_settings,
 +      .set_settings           = rtl8169_set_settings,
 +      .get_msglevel           = rtl8169_get_msglevel,
 +      .set_msglevel           = rtl8169_set_msglevel,
 +      .get_regs               = rtl8169_get_regs,
 +      .get_wol                = rtl8169_get_wol,
 +      .set_wol                = rtl8169_set_wol,
 +      .get_strings            = rtl8169_get_strings,
 +      .get_sset_count         = rtl8169_get_sset_count,
 +      .get_ethtool_stats      = rtl8169_get_ethtool_stats,
 +};
 +
 +static void rtl8169_get_mac_version(struct rtl8169_private *tp,
 +                                  struct net_device *dev, u8 default_version)
 +{
 +      void __iomem *ioaddr = tp->mmio_addr;
 +      /*
 +       * The driver currently handles the 8168Bf and the 8168Be identically
 +       * but they can be identified more specifically through the test below
 +       * if needed:
 +       *
 +       * (RTL_R32(TxConfig) & 0x700000) == 0x500000 ? 8168Bf : 8168Be
 +       *
 +       * Same thing for the 8101Eb and the 8101Ec:
 +       *
 +       * (RTL_R32(TxConfig) & 0x700000) == 0x200000 ? 8101Eb : 8101Ec
 +       */
 +      static const struct rtl_mac_info {
 +              u32 mask;
 +              u32 val;
 +              int mac_version;
 +      } mac_info[] = {
 +              /* 8168E family. */
 +              { 0x7c800000, 0x2c800000,       RTL_GIGA_MAC_VER_34 },
 +              { 0x7cf00000, 0x2c200000,       RTL_GIGA_MAC_VER_33 },
 +              { 0x7cf00000, 0x2c100000,       RTL_GIGA_MAC_VER_32 },
 +              { 0x7c800000, 0x2c000000,       RTL_GIGA_MAC_VER_33 },
 +
 +              /* 8168D family. */
 +              { 0x7cf00000, 0x28300000,       RTL_GIGA_MAC_VER_26 },
 +              { 0x7cf00000, 0x28100000,       RTL_GIGA_MAC_VER_25 },
 +              { 0x7c800000, 0x28000000,       RTL_GIGA_MAC_VER_26 },
 +
 +              /* 8168DP family. */
 +              { 0x7cf00000, 0x28800000,       RTL_GIGA_MAC_VER_27 },
 +              { 0x7cf00000, 0x28a00000,       RTL_GIGA_MAC_VER_28 },
 +              { 0x7cf00000, 0x28b00000,       RTL_GIGA_MAC_VER_31 },
 +
 +              /* 8168C family. */
 +              { 0x7cf00000, 0x3cb00000,       RTL_GIGA_MAC_VER_24 },
 +              { 0x7cf00000, 0x3c900000,       RTL_GIGA_MAC_VER_23 },
 +              { 0x7cf00000, 0x3c800000,       RTL_GIGA_MAC_VER_18 },
 +              { 0x7c800000, 0x3c800000,       RTL_GIGA_MAC_VER_24 },
 +              { 0x7cf00000, 0x3c000000,       RTL_GIGA_MAC_VER_19 },
 +              { 0x7cf00000, 0x3c200000,       RTL_GIGA_MAC_VER_20 },
 +              { 0x7cf00000, 0x3c300000,       RTL_GIGA_MAC_VER_21 },
 +              { 0x7cf00000, 0x3c400000,       RTL_GIGA_MAC_VER_22 },
 +              { 0x7c800000, 0x3c000000,       RTL_GIGA_MAC_VER_22 },
 +
 +              /* 8168B family. */
 +              { 0x7cf00000, 0x38000000,       RTL_GIGA_MAC_VER_12 },
 +              { 0x7cf00000, 0x38500000,       RTL_GIGA_MAC_VER_17 },
 +              { 0x7c800000, 0x38000000,       RTL_GIGA_MAC_VER_17 },
 +              { 0x7c800000, 0x30000000,       RTL_GIGA_MAC_VER_11 },
 +
 +              /* 8101 family. */
 +              { 0x7cf00000, 0x40b00000,       RTL_GIGA_MAC_VER_30 },
 +              { 0x7cf00000, 0x40a00000,       RTL_GIGA_MAC_VER_30 },
 +              { 0x7cf00000, 0x40900000,       RTL_GIGA_MAC_VER_29 },
 +              { 0x7c800000, 0x40800000,       RTL_GIGA_MAC_VER_30 },
 +              { 0x7cf00000, 0x34a00000,       RTL_GIGA_MAC_VER_09 },
 +              { 0x7cf00000, 0x24a00000,       RTL_GIGA_MAC_VER_09 },
 +              { 0x7cf00000, 0x34900000,       RTL_GIGA_MAC_VER_08 },
 +              { 0x7cf00000, 0x24900000,       RTL_GIGA_MAC_VER_08 },
 +              { 0x7cf00000, 0x34800000,       RTL_GIGA_MAC_VER_07 },
 +              { 0x7cf00000, 0x24800000,       RTL_GIGA_MAC_VER_07 },
 +              { 0x7cf00000, 0x34000000,       RTL_GIGA_MAC_VER_13 },
 +              { 0x7cf00000, 0x34300000,       RTL_GIGA_MAC_VER_10 },
 +              { 0x7cf00000, 0x34200000,       RTL_GIGA_MAC_VER_16 },
 +              { 0x7c800000, 0x34800000,       RTL_GIGA_MAC_VER_09 },
 +              { 0x7c800000, 0x24800000,       RTL_GIGA_MAC_VER_09 },
 +              { 0x7c800000, 0x34000000,       RTL_GIGA_MAC_VER_16 },
 +              /* FIXME: where did these entries come from ? -- FR */
 +              { 0xfc800000, 0x38800000,       RTL_GIGA_MAC_VER_15 },
 +              { 0xfc800000, 0x30800000,       RTL_GIGA_MAC_VER_14 },
 +
 +              /* 8110 family. */
 +              { 0xfc800000, 0x98000000,       RTL_GIGA_MAC_VER_06 },
 +              { 0xfc800000, 0x18000000,       RTL_GIGA_MAC_VER_05 },
 +              { 0xfc800000, 0x10000000,       RTL_GIGA_MAC_VER_04 },
 +              { 0xfc800000, 0x04000000,       RTL_GIGA_MAC_VER_03 },
 +              { 0xfc800000, 0x00800000,       RTL_GIGA_MAC_VER_02 },
 +              { 0xfc800000, 0x00000000,       RTL_GIGA_MAC_VER_01 },
 +
 +              /* Catch-all */
 +              { 0x00000000, 0x00000000,       RTL_GIGA_MAC_NONE   }
 +      };
 +      const struct rtl_mac_info *p = mac_info;
 +      u32 reg;
 +
 +      reg = RTL_R32(TxConfig);
 +      while ((reg & p->mask) != p->val)
 +              p++;
 +      tp->mac_version = p->mac_version;
 +
 +      if (tp->mac_version == RTL_GIGA_MAC_NONE) {
 +              netif_notice(tp, probe, dev,
 +                           "unknown MAC, using family default\n");
 +              tp->mac_version = default_version;
 +      }
 +}
 +
 +static void rtl8169_print_mac_version(struct rtl8169_private *tp)
 +{
 +      dprintk("mac_version = 0x%02x\n", tp->mac_version);
 +}
 +
 +struct phy_reg {
 +      u16 reg;
 +      u16 val;
 +};
 +
 +static void rtl_writephy_batch(struct rtl8169_private *tp,
 +                             const struct phy_reg *regs, int len)
 +{
 +      while (len-- > 0) {
 +              rtl_writephy(tp, regs->reg, regs->val);
 +              regs++;
 +      }
 +}
 +
 +#define PHY_READ              0x00000000
 +#define PHY_DATA_OR           0x10000000
 +#define PHY_DATA_AND          0x20000000
 +#define PHY_BJMPN             0x30000000
 +#define PHY_READ_EFUSE                0x40000000
 +#define PHY_READ_MAC_BYTE     0x50000000
 +#define PHY_WRITE_MAC_BYTE    0x60000000
 +#define PHY_CLEAR_READCOUNT   0x70000000
 +#define PHY_WRITE             0x80000000
 +#define PHY_READCOUNT_EQ_SKIP 0x90000000
 +#define PHY_COMP_EQ_SKIPN     0xa0000000
 +#define PHY_COMP_NEQ_SKIPN    0xb0000000
 +#define PHY_WRITE_PREVIOUS    0xc0000000
 +#define PHY_SKIPN             0xd0000000
 +#define PHY_DELAY_MS          0xe0000000
 +#define PHY_WRITE_ERI_WORD    0xf0000000
 +
 +struct fw_info {
 +      u32     magic;
 +      char    version[RTL_VER_SIZE];
 +      __le32  fw_start;
 +      __le32  fw_len;
 +      u8      chksum;
 +} __packed;
 +
 +#define FW_OPCODE_SIZE        sizeof(typeof(*((struct rtl_fw_phy_action *)0)->code))
 +
 +static bool rtl_fw_format_ok(struct rtl8169_private *tp, struct rtl_fw *rtl_fw)
 +{
 +      const struct firmware *fw = rtl_fw->fw;
 +      struct fw_info *fw_info = (struct fw_info *)fw->data;
 +      struct rtl_fw_phy_action *pa = &rtl_fw->phy_action;
 +      char *version = rtl_fw->version;
 +      bool rc = false;
 +
 +      if (fw->size < FW_OPCODE_SIZE)
 +              goto out;
 +
 +      if (!fw_info->magic) {
 +              size_t i, size, start;
 +              u8 checksum = 0;
 +
 +              if (fw->size < sizeof(*fw_info))
 +                      goto out;
 +
 +              for (i = 0; i < fw->size; i++)
 +                      checksum += fw->data[i];
 +              if (checksum != 0)
 +                      goto out;
 +
 +              start = le32_to_cpu(fw_info->fw_start);
 +              if (start > fw->size)
 +                      goto out;
 +
 +              size = le32_to_cpu(fw_info->fw_len);
 +              if (size > (fw->size - start) / FW_OPCODE_SIZE)
 +                      goto out;
 +
 +              memcpy(version, fw_info->version, RTL_VER_SIZE);
 +
 +              pa->code = (__le32 *)(fw->data + start);
 +              pa->size = size;
 +      } else {
 +              if (fw->size % FW_OPCODE_SIZE)
 +                      goto out;
 +
 +              strlcpy(version, rtl_lookup_firmware_name(tp), RTL_VER_SIZE);
 +
 +              pa->code = (__le32 *)fw->data;
 +              pa->size = fw->size / FW_OPCODE_SIZE;
 +      }
 +      version[RTL_VER_SIZE - 1] = 0;
 +
 +      rc = true;
 +out:
 +      return rc;
 +}
 +
 +static bool rtl_fw_data_ok(struct rtl8169_private *tp, struct net_device *dev,
 +                         struct rtl_fw_phy_action *pa)
 +{
 +      bool rc = false;
 +      size_t index;
 +
 +      for (index = 0; index < pa->size; index++) {
 +              u32 action = le32_to_cpu(pa->code[index]);
 +              u32 regno = (action & 0x0fff0000) >> 16;
 +
 +              switch(action & 0xf0000000) {
 +              case PHY_READ:
 +              case PHY_DATA_OR:
 +              case PHY_DATA_AND:
 +              case PHY_READ_EFUSE:
 +              case PHY_CLEAR_READCOUNT:
 +              case PHY_WRITE:
 +              case PHY_WRITE_PREVIOUS:
 +              case PHY_DELAY_MS:
 +                      break;
 +
 +              case PHY_BJMPN:
 +                      if (regno > index) {
 +                              netif_err(tp, ifup, tp->dev,
 +                                        "Out of range of firmware\n");
 +                              goto out;
 +                      }
 +                      break;
 +              case PHY_READCOUNT_EQ_SKIP:
 +                      if (index + 2 >= pa->size) {
 +                              netif_err(tp, ifup, tp->dev,
 +                                        "Out of range of firmware\n");
 +                              goto out;
 +                      }
 +                      break;
 +              case PHY_COMP_EQ_SKIPN:
 +              case PHY_COMP_NEQ_SKIPN:
 +              case PHY_SKIPN:
 +                      if (index + 1 + regno >= pa->size) {
 +                              netif_err(tp, ifup, tp->dev,
 +                                        "Out of range of firmware\n");
 +                              goto out;
 +                      }
 +                      break;
 +
 +              case PHY_READ_MAC_BYTE:
 +              case PHY_WRITE_MAC_BYTE:
 +              case PHY_WRITE_ERI_WORD:
 +              default:
 +                      netif_err(tp, ifup, tp->dev,
 +                                "Invalid action 0x%08x\n", action);
 +                      goto out;
 +              }
 +      }
 +      rc = true;
 +out:
 +      return rc;
 +}
 +
 +static int rtl_check_firmware(struct rtl8169_private *tp, struct rtl_fw *rtl_fw)
 +{
 +      struct net_device *dev = tp->dev;
 +      int rc = -EINVAL;
 +
 +      if (!rtl_fw_format_ok(tp, rtl_fw)) {
 +              netif_err(tp, ifup, dev, "invalid firwmare\n");
 +              goto out;
 +      }
 +
 +      if (rtl_fw_data_ok(tp, dev, &rtl_fw->phy_action))
 +              rc = 0;
 +out:
 +      return rc;
 +}
 +
 +static void rtl_phy_write_fw(struct rtl8169_private *tp, struct rtl_fw *rtl_fw)
 +{
 +      struct rtl_fw_phy_action *pa = &rtl_fw->phy_action;
 +      u32 predata, count;
 +      size_t index;
 +
 +      predata = count = 0;
 +
 +      for (index = 0; index < pa->size; ) {
 +              u32 action = le32_to_cpu(pa->code[index]);
 +              u32 data = action & 0x0000ffff;
 +              u32 regno = (action & 0x0fff0000) >> 16;
 +
 +              if (!action)
 +                      break;
 +
 +              switch(action & 0xf0000000) {
 +              case PHY_READ:
 +                      predata = rtl_readphy(tp, regno);
 +                      count++;
 +                      index++;
 +                      break;
 +              case PHY_DATA_OR:
 +                      predata |= data;
 +                      index++;
 +                      break;
 +              case PHY_DATA_AND:
 +                      predata &= data;
 +                      index++;
 +                      break;
 +              case PHY_BJMPN:
 +                      index -= regno;
 +                      break;
 +              case PHY_READ_EFUSE:
 +                      predata = rtl8168d_efuse_read(tp->mmio_addr, regno);
 +                      index++;
 +                      break;
 +              case PHY_CLEAR_READCOUNT:
 +                      count = 0;
 +                      index++;
 +                      break;
 +              case PHY_WRITE:
 +                      rtl_writephy(tp, regno, data);
 +                      index++;
 +                      break;
 +              case PHY_READCOUNT_EQ_SKIP:
 +                      index += (count == data) ? 2 : 1;
 +                      break;
 +              case PHY_COMP_EQ_SKIPN:
 +                      if (predata == data)
 +                              index += regno;
 +                      index++;
 +                      break;
 +              case PHY_COMP_NEQ_SKIPN:
 +                      if (predata != data)
 +                              index += regno;
 +                      index++;
 +                      break;
 +              case PHY_WRITE_PREVIOUS:
 +                      rtl_writephy(tp, regno, predata);
 +                      index++;
 +                      break;
 +              case PHY_SKIPN:
 +                      index += regno + 1;
 +                      break;
 +              case PHY_DELAY_MS:
 +                      mdelay(data);
 +                      index++;
 +                      break;
 +
 +              case PHY_READ_MAC_BYTE:
 +              case PHY_WRITE_MAC_BYTE:
 +              case PHY_WRITE_ERI_WORD:
 +              default:
 +                      BUG();
 +              }
 +      }
 +}
 +
 +static void rtl_release_firmware(struct rtl8169_private *tp)
 +{
 +      if (!IS_ERR_OR_NULL(tp->rtl_fw)) {
 +              release_firmware(tp->rtl_fw->fw);
 +              kfree(tp->rtl_fw);
 +      }
 +      tp->rtl_fw = RTL_FIRMWARE_UNKNOWN;
 +}
 +
 +static void rtl_apply_firmware(struct rtl8169_private *tp)
 +{
 +      struct rtl_fw *rtl_fw = tp->rtl_fw;
 +
 +      /* TODO: release firmware once rtl_phy_write_fw signals failures. */
 +      if (!IS_ERR_OR_NULL(rtl_fw))
 +              rtl_phy_write_fw(tp, rtl_fw);
 +}
 +
 +static void rtl_apply_firmware_cond(struct rtl8169_private *tp, u8 reg, u16 val)
 +{
 +      if (rtl_readphy(tp, reg) != val)
 +              netif_warn(tp, hw, tp->dev, "chipset not ready for firmware\n");
 +      else
 +              rtl_apply_firmware(tp);
 +}
 +
 +static void rtl8169s_hw_phy_config(struct rtl8169_private *tp)
 +{
 +      static const struct phy_reg phy_reg_init[] = {
 +              { 0x1f, 0x0001 },
 +              { 0x06, 0x006e },
 +              { 0x08, 0x0708 },
 +              { 0x15, 0x4000 },
 +              { 0x18, 0x65c7 },
 +
 +              { 0x1f, 0x0001 },
 +              { 0x03, 0x00a1 },
 +              { 0x02, 0x0008 },
 +              { 0x01, 0x0120 },
 +              { 0x00, 0x1000 },
 +              { 0x04, 0x0800 },
 +              { 0x04, 0x0000 },
 +
 +              { 0x03, 0xff41 },
 +              { 0x02, 0xdf60 },
 +              { 0x01, 0x0140 },
 +              { 0x00, 0x0077 },
 +              { 0x04, 0x7800 },
 +              { 0x04, 0x7000 },
 +
 +              { 0x03, 0x802f },
 +              { 0x02, 0x4f02 },
 +              { 0x01, 0x0409 },
 +              { 0x00, 0xf0f9 },
 +              { 0x04, 0x9800 },
 +              { 0x04, 0x9000 },
 +
 +              { 0x03, 0xdf01 },
 +              { 0x02, 0xdf20 },
 +              { 0x01, 0xff95 },
 +              { 0x00, 0xba00 },
 +              { 0x04, 0xa800 },
 +              { 0x04, 0xa000 },
 +
 +              { 0x03, 0xff41 },
 +              { 0x02, 0xdf20 },
 +              { 0x01, 0x0140 },
 +              { 0x00, 0x00bb },
 +              { 0x04, 0xb800 },
 +              { 0x04, 0xb000 },
 +
 +              { 0x03, 0xdf41 },
 +              { 0x02, 0xdc60 },
 +              { 0x01, 0x6340 },
 +              { 0x00, 0x007d },
 +              { 0x04, 0xd800 },
 +              { 0x04, 0xd000 },
 +
 +              { 0x03, 0xdf01 },
 +              { 0x02, 0xdf20 },
 +              { 0x01, 0x100a },
 +              { 0x00, 0xa0ff },
 +              { 0x04, 0xf800 },
 +              { 0x04, 0xf000 },
 +
 +              { 0x1f, 0x0000 },
 +              { 0x0b, 0x0000 },
 +              { 0x00, 0x9200 }
 +      };
 +
 +      rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
 +}
 +
 +static void rtl8169sb_hw_phy_config(struct rtl8169_private *tp)
 +{
 +      static const struct phy_reg phy_reg_init[] = {
 +              { 0x1f, 0x0002 },
 +              { 0x01, 0x90d0 },
 +              { 0x1f, 0x0000 }
 +      };
 +
 +      rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
 +}
 +
 +static void rtl8169scd_hw_phy_config_quirk(struct rtl8169_private *tp)
 +{
 +      struct pci_dev *pdev = tp->pci_dev;
 +
 +      if ((pdev->subsystem_vendor != PCI_VENDOR_ID_GIGABYTE) ||
 +          (pdev->subsystem_device != 0xe000))
 +              return;
 +
 +      rtl_writephy(tp, 0x1f, 0x0001);
 +      rtl_writephy(tp, 0x10, 0xf01b);
 +      rtl_writephy(tp, 0x1f, 0x0000);
 +}
 +
 +static void rtl8169scd_hw_phy_config(struct rtl8169_private *tp)
 +{
 +      static const struct phy_reg phy_reg_init[] = {
 +              { 0x1f, 0x0001 },
 +              { 0x04, 0x0000 },
 +              { 0x03, 0x00a1 },
 +              { 0x02, 0x0008 },
 +              { 0x01, 0x0120 },
 +              { 0x00, 0x1000 },
 +              { 0x04, 0x0800 },
 +              { 0x04, 0x9000 },
 +              { 0x03, 0x802f },
 +              { 0x02, 0x4f02 },
 +              { 0x01, 0x0409 },
 +              { 0x00, 0xf099 },
 +              { 0x04, 0x9800 },
 +              { 0x04, 0xa000 },
 +              { 0x03, 0xdf01 },
 +              { 0x02, 0xdf20 },
 +              { 0x01, 0xff95 },
 +              { 0x00, 0xba00 },
 +              { 0x04, 0xa800 },
 +              { 0x04, 0xf000 },
 +              { 0x03, 0xdf01 },
 +              { 0x02, 0xdf20 },
 +              { 0x01, 0x101a },
 +              { 0x00, 0xa0ff },
 +              { 0x04, 0xf800 },
 +              { 0x04, 0x0000 },
 +              { 0x1f, 0x0000 },
 +
 +              { 0x1f, 0x0001 },
 +              { 0x10, 0xf41b },
 +              { 0x14, 0xfb54 },
 +              { 0x18, 0xf5c7 },
 +              { 0x1f, 0x0000 },
 +
 +              { 0x1f, 0x0001 },
 +              { 0x17, 0x0cc0 },
 +              { 0x1f, 0x0000 }
 +      };
 +
 +      rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
 +
 +      rtl8169scd_hw_phy_config_quirk(tp);
 +}
 +
 +static void rtl8169sce_hw_phy_config(struct rtl8169_private *tp)
 +{
 +      static const struct phy_reg phy_reg_init[] = {
 +              { 0x1f, 0x0001 },
 +              { 0x04, 0x0000 },
 +              { 0x03, 0x00a1 },
 +              { 0x02, 0x0008 },
 +              { 0x01, 0x0120 },
 +              { 0x00, 0x1000 },
 +              { 0x04, 0x0800 },
 +              { 0x04, 0x9000 },
 +              { 0x03, 0x802f },
 +              { 0x02, 0x4f02 },
 +              { 0x01, 0x0409 },
 +              { 0x00, 0xf099 },
 +              { 0x04, 0x9800 },
 +              { 0x04, 0xa000 },
 +              { 0x03, 0xdf01 },
 +              { 0x02, 0xdf20 },
 +              { 0x01, 0xff95 },
 +              { 0x00, 0xba00 },
 +              { 0x04, 0xa800 },
 +              { 0x04, 0xf000 },
 +              { 0x03, 0xdf01 },
 +              { 0x02, 0xdf20 },
 +              { 0x01, 0x101a },
 +              { 0x00, 0xa0ff },
 +              { 0x04, 0xf800 },
 +              { 0x04, 0x0000 },
 +              { 0x1f, 0x0000 },
 +
 +              { 0x1f, 0x0001 },
 +              { 0x0b, 0x8480 },
 +              { 0x1f, 0x0000 },
 +
 +              { 0x1f, 0x0001 },
 +              { 0x18, 0x67c7 },
 +              { 0x04, 0x2000 },
 +              { 0x03, 0x002f },
 +              { 0x02, 0x4360 },
 +              { 0x01, 0x0109 },
 +              { 0x00, 0x3022 },
 +              { 0x04, 0x2800 },
 +              { 0x1f, 0x0000 },
 +
 +              { 0x1f, 0x0001 },
 +              { 0x17, 0x0cc0 },
 +              { 0x1f, 0x0000 }
 +      };
 +
 +      rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
 +}
 +
 +static void rtl8168bb_hw_phy_config(struct rtl8169_private *tp)
 +{
 +      static const struct phy_reg phy_reg_init[] = {
 +              { 0x10, 0xf41b },
 +              { 0x1f, 0x0000 }
 +      };
 +
 +      rtl_writephy(tp, 0x1f, 0x0001);
 +      rtl_patchphy(tp, 0x16, 1 << 0);
 +
 +      rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
 +}
 +
 +static void rtl8168bef_hw_phy_config(struct rtl8169_private *tp)
 +{
 +      static const struct phy_reg phy_reg_init[] = {
 +              { 0x1f, 0x0001 },
 +              { 0x10, 0xf41b },
 +              { 0x1f, 0x0000 }
 +      };
 +
 +      rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
 +}
 +
 +static void rtl8168cp_1_hw_phy_config(struct rtl8169_private *tp)
 +{
 +      static const struct phy_reg phy_reg_init[] = {
 +              { 0x1f, 0x0000 },
 +              { 0x1d, 0x0f00 },
 +              { 0x1f, 0x0002 },
 +              { 0x0c, 0x1ec8 },
 +              { 0x1f, 0x0000 }
 +      };
 +
 +      rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
 +}
 +
 +static void rtl8168cp_2_hw_phy_config(struct rtl8169_private *tp)
 +{
 +      static const struct phy_reg phy_reg_init[] = {
 +              { 0x1f, 0x0001 },
 +              { 0x1d, 0x3d98 },
 +              { 0x1f, 0x0000 }
 +      };
 +
 +      rtl_writephy(tp, 0x1f, 0x0000);
 +      rtl_patchphy(tp, 0x14, 1 << 5);
 +      rtl_patchphy(tp, 0x0d, 1 << 5);
 +
 +      rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
 +}
 +
 +static void rtl8168c_1_hw_phy_config(struct rtl8169_private *tp)
 +{
 +      static const struct phy_reg phy_reg_init[] = {
 +              { 0x1f, 0x0001 },
 +              { 0x12, 0x2300 },
 +              { 0x1f, 0x0002 },
 +              { 0x00, 0x88d4 },
 +              { 0x01, 0x82b1 },
 +              { 0x03, 0x7002 },
 +              { 0x08, 0x9e30 },
 +              { 0x09, 0x01f0 },
 +              { 0x0a, 0x5500 },
 +              { 0x0c, 0x00c8 },
 +              { 0x1f, 0x0003 },
 +              { 0x12, 0xc096 },
 +              { 0x16, 0x000a },
 +              { 0x1f, 0x0000 },
 +              { 0x1f, 0x0000 },
 +              { 0x09, 0x2000 },
 +              { 0x09, 0x0000 }
 +      };
 +
 +      rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
 +
 +      rtl_patchphy(tp, 0x14, 1 << 5);
 +      rtl_patchphy(tp, 0x0d, 1 << 5);
 +      rtl_writephy(tp, 0x1f, 0x0000);
 +}
 +
 +static void rtl8168c_2_hw_phy_config(struct rtl8169_private *tp)
 +{
 +      static const struct phy_reg phy_reg_init[] = {
 +              { 0x1f, 0x0001 },
 +              { 0x12, 0x2300 },
 +              { 0x03, 0x802f },
 +              { 0x02, 0x4f02 },
 +              { 0x01, 0x0409 },
 +              { 0x00, 0xf099 },
 +              { 0x04, 0x9800 },
 +              { 0x04, 0x9000 },
 +              { 0x1d, 0x3d98 },
 +              { 0x1f, 0x0002 },
 +              { 0x0c, 0x7eb8 },
 +              { 0x06, 0x0761 },
 +              { 0x1f, 0x0003 },
 +              { 0x16, 0x0f0a },
 +              { 0x1f, 0x0000 }
 +      };
 +
 +      rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
 +
 +      rtl_patchphy(tp, 0x16, 1 << 0);
 +      rtl_patchphy(tp, 0x14, 1 << 5);
 +      rtl_patchphy(tp, 0x0d, 1 << 5);
 +      rtl_writephy(tp, 0x1f, 0x0000);
 +}
 +
 +static void rtl8168c_3_hw_phy_config(struct rtl8169_private *tp)
 +{
 +      static const struct phy_reg phy_reg_init[] = {
 +              { 0x1f, 0x0001 },
 +              { 0x12, 0x2300 },
 +              { 0x1d, 0x3d98 },
 +              { 0x1f, 0x0002 },
 +              { 0x0c, 0x7eb8 },
 +              { 0x06, 0x5461 },
 +              { 0x1f, 0x0003 },
 +              { 0x16, 0x0f0a },
 +              { 0x1f, 0x0000 }
 +      };
 +
 +      rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
 +
 +      rtl_patchphy(tp, 0x16, 1 << 0);
 +      rtl_patchphy(tp, 0x14, 1 << 5);
 +      rtl_patchphy(tp, 0x0d, 1 << 5);
 +      rtl_writephy(tp, 0x1f, 0x0000);
 +}
 +
 +static void rtl8168c_4_hw_phy_config(struct rtl8169_private *tp)
 +{
 +      rtl8168c_3_hw_phy_config(tp);
 +}
 +
 +static void rtl8168d_1_hw_phy_config(struct rtl8169_private *tp)
 +{
 +      static const struct phy_reg phy_reg_init_0[] = {
 +              /* Channel Estimation */
 +              { 0x1f, 0x0001 },
 +              { 0x06, 0x4064 },
 +              { 0x07, 0x2863 },
 +              { 0x08, 0x059c },
 +              { 0x09, 0x26b4 },
 +              { 0x0a, 0x6a19 },
 +              { 0x0b, 0xdcc8 },
 +              { 0x10, 0xf06d },
 +              { 0x14, 0x7f68 },
 +              { 0x18, 0x7fd9 },
 +              { 0x1c, 0xf0ff },
 +              { 0x1d, 0x3d9c },
 +              { 0x1f, 0x0003 },
 +              { 0x12, 0xf49f },
 +              { 0x13, 0x070b },
 +              { 0x1a, 0x05ad },
 +              { 0x14, 0x94c0 },
 +
 +              /*
 +               * Tx Error Issue
 +               * Enhance line driver power
 +               */
 +              { 0x1f, 0x0002 },
 +              { 0x06, 0x5561 },
 +              { 0x1f, 0x0005 },
 +              { 0x05, 0x8332 },
 +              { 0x06, 0x5561 },
 +
 +              /*
 +               * Can not link to 1Gbps with bad cable
 +               * Decrease SNR threshold form 21.07dB to 19.04dB
 +               */
 +              { 0x1f, 0x0001 },
 +              { 0x17, 0x0cc0 },
 +
 +              { 0x1f, 0x0000 },
 +              { 0x0d, 0xf880 }
 +      };
 +      void __iomem *ioaddr = tp->mmio_addr;
 +
 +      rtl_writephy_batch(tp, phy_reg_init_0, ARRAY_SIZE(phy_reg_init_0));
 +
 +      /*
 +       * Rx Error Issue
 +       * Fine Tune Switching regulator parameter
 +       */
 +      rtl_writephy(tp, 0x1f, 0x0002);
 +      rtl_w1w0_phy(tp, 0x0b, 0x0010, 0x00ef);
 +      rtl_w1w0_phy(tp, 0x0c, 0xa200, 0x5d00);
 +
 +      if (rtl8168d_efuse_read(ioaddr, 0x01) == 0xb1) {
 +              static const struct phy_reg phy_reg_init[] = {
 +                      { 0x1f, 0x0002 },
 +                      { 0x05, 0x669a },
 +                      { 0x1f, 0x0005 },
 +                      { 0x05, 0x8330 },
 +                      { 0x06, 0x669a },
 +                      { 0x1f, 0x0002 }
 +              };
 +              int val;
 +
 +              rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
 +
 +              val = rtl_readphy(tp, 0x0d);
 +
 +              if ((val & 0x00ff) != 0x006c) {
 +                      static const u32 set[] = {
 +                              0x0065, 0x0066, 0x0067, 0x0068,
 +                              0x0069, 0x006a, 0x006b, 0x006c
 +                      };
 +                      int i;
 +
 +                      rtl_writephy(tp, 0x1f, 0x0002);
 +
 +                      val &= 0xff00;
 +                      for (i = 0; i < ARRAY_SIZE(set); i++)
 +                              rtl_writephy(tp, 0x0d, val | set[i]);
 +              }
 +      } else {
 +              static const struct phy_reg phy_reg_init[] = {
 +                      { 0x1f, 0x0002 },
 +                      { 0x05, 0x6662 },
 +                      { 0x1f, 0x0005 },
 +                      { 0x05, 0x8330 },
 +                      { 0x06, 0x6662 }
 +              };
 +
 +              rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
 +      }
 +
 +      /* RSET couple improve */
 +      rtl_writephy(tp, 0x1f, 0x0002);
 +      rtl_patchphy(tp, 0x0d, 0x0300);
 +      rtl_patchphy(tp, 0x0f, 0x0010);
 +
 +      /* Fine tune PLL performance */
 +      rtl_writephy(tp, 0x1f, 0x0002);
 +      rtl_w1w0_phy(tp, 0x02, 0x0100, 0x0600);
 +      rtl_w1w0_phy(tp, 0x03, 0x0000, 0xe000);
 +
 +      rtl_writephy(tp, 0x1f, 0x0005);
 +      rtl_writephy(tp, 0x05, 0x001b);
 +
 +      rtl_apply_firmware_cond(tp, MII_EXPANSION, 0xbf00);
 +
 +      rtl_writephy(tp, 0x1f, 0x0000);
 +}
 +
 +static void rtl8168d_2_hw_phy_config(struct rtl8169_private *tp)
 +{
 +      static const struct phy_reg phy_reg_init_0[] = {
 +              /* Channel Estimation */
 +              { 0x1f, 0x0001 },
 +              { 0x06, 0x4064 },
 +              { 0x07, 0x2863 },
 +              { 0x08, 0x059c },
 +              { 0x09, 0x26b4 },
 +              { 0x0a, 0x6a19 },
 +              { 0x0b, 0xdcc8 },
 +              { 0x10, 0xf06d },
 +              { 0x14, 0x7f68 },
 +              { 0x18, 0x7fd9 },
 +              { 0x1c, 0xf0ff },
 +              { 0x1d, 0x3d9c },
 +              { 0x1f, 0x0003 },
 +              { 0x12, 0xf49f },
 +              { 0x13, 0x070b },
 +              { 0x1a, 0x05ad },
 +              { 0x14, 0x94c0 },
 +
 +              /*
 +               * Tx Error Issue
 +               * Enhance line driver power
 +               */
 +              { 0x1f, 0x0002 },
 +              { 0x06, 0x5561 },
 +              { 0x1f, 0x0005 },
 +              { 0x05, 0x8332 },
 +              { 0x06, 0x5561 },
 +
 +              /*
 +               * Can not link to 1Gbps with bad cable
 +               * Decrease SNR threshold form 21.07dB to 19.04dB
 +               */
 +              { 0x1f, 0x0001 },
 +              { 0x17, 0x0cc0 },
 +
 +              { 0x1f, 0x0000 },
 +              { 0x0d, 0xf880 }
 +      };
 +      void __iomem *ioaddr = tp->mmio_addr;
 +
 +      rtl_writephy_batch(tp, phy_reg_init_0, ARRAY_SIZE(phy_reg_init_0));
 +
 +      if (rtl8168d_efuse_read(ioaddr, 0x01) == 0xb1) {
 +              static const struct phy_reg phy_reg_init[] = {
 +                      { 0x1f, 0x0002 },
 +                      { 0x05, 0x669a },
 +                      { 0x1f, 0x0005 },
 +                      { 0x05, 0x8330 },
 +                      { 0x06, 0x669a },
 +
 +                      { 0x1f, 0x0002 }
 +              };
 +              int val;
 +
 +              rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
 +
 +              val = rtl_readphy(tp, 0x0d);
 +              if ((val & 0x00ff) != 0x006c) {
 +                      static const u32 set[] = {
 +                              0x0065, 0x0066, 0x0067, 0x0068,
 +                              0x0069, 0x006a, 0x006b, 0x006c
 +                      };
 +                      int i;
 +
 +                      rtl_writephy(tp, 0x1f, 0x0002);
 +
 +                      val &= 0xff00;
 +                      for (i = 0; i < ARRAY_SIZE(set); i++)
 +                              rtl_writephy(tp, 0x0d, val | set[i]);
 +              }
 +      } else {
 +              static const struct phy_reg phy_reg_init[] = {
 +                      { 0x1f, 0x0002 },
 +                      { 0x05, 0x2642 },
 +                      { 0x1f, 0x0005 },
 +                      { 0x05, 0x8330 },
 +                      { 0x06, 0x2642 }
 +              };
 +
 +              rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
 +      }
 +
 +      /* Fine tune PLL performance */
 +      rtl_writephy(tp, 0x1f, 0x0002);
 +      rtl_w1w0_phy(tp, 0x02, 0x0100, 0x0600);
 +      rtl_w1w0_phy(tp, 0x03, 0x0000, 0xe000);
 +
 +      /* Switching regulator Slew rate */
 +      rtl_writephy(tp, 0x1f, 0x0002);
 +      rtl_patchphy(tp, 0x0f, 0x0017);
 +
 +      rtl_writephy(tp, 0x1f, 0x0005);
 +      rtl_writephy(tp, 0x05, 0x001b);
 +
 +      rtl_apply_firmware_cond(tp, MII_EXPANSION, 0xb300);
 +
 +      rtl_writephy(tp, 0x1f, 0x0000);
 +}
 +
 +static void rtl8168d_3_hw_phy_config(struct rtl8169_private *tp)
 +{
 +      static const struct phy_reg phy_reg_init[] = {
 +              { 0x1f, 0x0002 },
 +              { 0x10, 0x0008 },
 +              { 0x0d, 0x006c },
 +
 +              { 0x1f, 0x0000 },
 +              { 0x0d, 0xf880 },
 +
 +              { 0x1f, 0x0001 },
 +              { 0x17, 0x0cc0 },
 +
 +              { 0x1f, 0x0001 },
 +              { 0x0b, 0xa4d8 },
 +              { 0x09, 0x281c },
 +              { 0x07, 0x2883 },
 +              { 0x0a, 0x6b35 },
 +              { 0x1d, 0x3da4 },
 +              { 0x1c, 0xeffd },
 +              { 0x14, 0x7f52 },
 +              { 0x18, 0x7fc6 },
 +              { 0x08, 0x0601 },
 +              { 0x06, 0x4063 },
 +              { 0x10, 0xf074 },
 +              { 0x1f, 0x0003 },
 +              { 0x13, 0x0789 },
 +              { 0x12, 0xf4bd },
 +              { 0x1a, 0x04fd },
 +              { 0x14, 0x84b0 },
 +              { 0x1f, 0x0000 },
 +              { 0x00, 0x9200 },
 +
 +              { 0x1f, 0x0005 },
 +              { 0x01, 0x0340 },
 +              { 0x1f, 0x0001 },
 +              { 0x04, 0x4000 },
 +              { 0x03, 0x1d21 },
 +              { 0x02, 0x0c32 },
 +              { 0x01, 0x0200 },
 +              { 0x00, 0x5554 },
 +              { 0x04, 0x4800 },
 +              { 0x04, 0x4000 },
 +              { 0x04, 0xf000 },
 +              { 0x03, 0xdf01 },
 +              { 0x02, 0xdf20 },
 +              { 0x01, 0x101a },
 +              { 0x00, 0xa0ff },
 +              { 0x04, 0xf800 },
 +              { 0x04, 0xf000 },
 +              { 0x1f, 0x0000 },
 +
 +              { 0x1f, 0x0007 },
 +              { 0x1e, 0x0023 },
 +              { 0x16, 0x0000 },
 +              { 0x1f, 0x0000 }
 +      };
 +
 +      rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
 +}
 +
 +static void rtl8168d_4_hw_phy_config(struct rtl8169_private *tp)
 +{
 +      static const struct phy_reg phy_reg_init[] = {
 +              { 0x1f, 0x0001 },
 +              { 0x17, 0x0cc0 },
 +
 +              { 0x1f, 0x0007 },
 +              { 0x1e, 0x002d },
 +              { 0x18, 0x0040 },
 +              { 0x1f, 0x0000 }
 +      };
 +
 +      rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
 +      rtl_patchphy(tp, 0x0d, 1 << 5);
 +}
 +
 +static void rtl8168e_1_hw_phy_config(struct rtl8169_private *tp)
 +{
 +      static const struct phy_reg phy_reg_init[] = {
 +              /* Enable Delay cap */
 +              { 0x1f, 0x0005 },
 +              { 0x05, 0x8b80 },
 +              { 0x06, 0xc896 },
 +              { 0x1f, 0x0000 },
 +
 +              /* Channel estimation fine tune */
 +              { 0x1f, 0x0001 },
 +              { 0x0b, 0x6c20 },
 +              { 0x07, 0x2872 },
 +              { 0x1c, 0xefff },
 +              { 0x1f, 0x0003 },
 +              { 0x14, 0x6420 },
 +              { 0x1f, 0x0000 },
 +
 +              /* Update PFM & 10M TX idle timer */
 +              { 0x1f, 0x0007 },
 +              { 0x1e, 0x002f },
 +              { 0x15, 0x1919 },
 +              { 0x1f, 0x0000 },
 +
 +              { 0x1f, 0x0007 },
 +              { 0x1e, 0x00ac },
 +              { 0x18, 0x0006 },
 +              { 0x1f, 0x0000 }
 +      };
 +
 +      rtl_apply_firmware(tp);
 +
 +      rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
 +
 +      /* DCO enable for 10M IDLE Power */
 +      rtl_writephy(tp, 0x1f, 0x0007);
 +      rtl_writephy(tp, 0x1e, 0x0023);
 +      rtl_w1w0_phy(tp, 0x17, 0x0006, 0x0000);
 +      rtl_writephy(tp, 0x1f, 0x0000);
 +
 +      /* For impedance matching */
 +      rtl_writephy(tp, 0x1f, 0x0002);
 +      rtl_w1w0_phy(tp, 0x08, 0x8000, 0x7f00);
 +      rtl_writephy(tp, 0x1f, 0x0000);
 +
 +      /* PHY auto speed down */
 +      rtl_writephy(tp, 0x1f, 0x0007);
 +      rtl_writephy(tp, 0x1e, 0x002d);
 +      rtl_w1w0_phy(tp, 0x18, 0x0050, 0x0000);
 +      rtl_writephy(tp, 0x1f, 0x0000);
 +      rtl_w1w0_phy(tp, 0x14, 0x8000, 0x0000);
 +
 +      rtl_writephy(tp, 0x1f, 0x0005);
 +      rtl_writephy(tp, 0x05, 0x8b86);
 +      rtl_w1w0_phy(tp, 0x06, 0x0001, 0x0000);
 +      rtl_writephy(tp, 0x1f, 0x0000);
 +
 +      rtl_writephy(tp, 0x1f, 0x0005);
 +      rtl_writephy(tp, 0x05, 0x8b85);
 +      rtl_w1w0_phy(tp, 0x06, 0x0000, 0x2000);
 +      rtl_writephy(tp, 0x1f, 0x0007);
 +      rtl_writephy(tp, 0x1e, 0x0020);
 +      rtl_w1w0_phy(tp, 0x15, 0x0000, 0x1100);
 +      rtl_writephy(tp, 0x1f, 0x0006);
 +      rtl_writephy(tp, 0x00, 0x5a00);
 +      rtl_writephy(tp, 0x1f, 0x0000);
 +      rtl_writephy(tp, 0x0d, 0x0007);
 +      rtl_writephy(tp, 0x0e, 0x003c);
 +      rtl_writephy(tp, 0x0d, 0x4007);
 +      rtl_writephy(tp, 0x0e, 0x0000);
 +      rtl_writephy(tp, 0x0d, 0x0000);
 +}
 +
 +static void rtl8168e_2_hw_phy_config(struct rtl8169_private *tp)
 +{
 +      static const struct phy_reg phy_reg_init[] = {
 +              /* Enable Delay cap */
 +              { 0x1f, 0x0004 },
 +              { 0x1f, 0x0007 },
 +              { 0x1e, 0x00ac },
 +              { 0x18, 0x0006 },
 +              { 0x1f, 0x0002 },
 +              { 0x1f, 0x0000 },
 +              { 0x1f, 0x0000 },
 +
 +              /* Channel estimation fine tune */
 +              { 0x1f, 0x0003 },
 +              { 0x09, 0xa20f },
 +              { 0x1f, 0x0000 },
 +              { 0x1f, 0x0000 },
 +
 +              /* Green Setting */
 +              { 0x1f, 0x0005 },
 +              { 0x05, 0x8b5b },
 +              { 0x06, 0x9222 },
 +              { 0x05, 0x8b6d },
 +              { 0x06, 0x8000 },
 +              { 0x05, 0x8b76 },
 +              { 0x06, 0x8000 },
 +              { 0x1f, 0x0000 }
 +      };
 +
 +      rtl_apply_firmware(tp);
 +
 +      rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
 +
 +      /* For 4-corner performance improve */
 +      rtl_writephy(tp, 0x1f, 0x0005);
 +      rtl_writephy(tp, 0x05, 0x8b80);
 +      rtl_w1w0_phy(tp, 0x17, 0x0006, 0x0000);
 +      rtl_writephy(tp, 0x1f, 0x0000);
 +
 +      /* PHY auto speed down */
 +      rtl_writephy(tp, 0x1f, 0x0004);
 +      rtl_writephy(tp, 0x1f, 0x0007);
 +      rtl_writephy(tp, 0x1e, 0x002d);
 +      rtl_w1w0_phy(tp, 0x18, 0x0010, 0x0000);
 +      rtl_writephy(tp, 0x1f, 0x0002);
 +      rtl_writephy(tp, 0x1f, 0x0000);
 +      rtl_w1w0_phy(tp, 0x14, 0x8000, 0x0000);
 +
 +      /* improve 10M EEE waveform */
 +      rtl_writephy(tp, 0x1f, 0x0005);
 +      rtl_writephy(tp, 0x05, 0x8b86);
 +      rtl_w1w0_phy(tp, 0x06, 0x0001, 0x0000);
 +      rtl_writephy(tp, 0x1f, 0x0000);
 +
 +      /* Improve 2-pair detection performance */
 +      rtl_writephy(tp, 0x1f, 0x0005);
 +      rtl_writephy(tp, 0x05, 0x8b85);
 +      rtl_w1w0_phy(tp, 0x06, 0x4000, 0x0000);
 +      rtl_writephy(tp, 0x1f, 0x0000);
 +
 +      /* EEE setting */
 +      rtl_w1w0_eri(tp->mmio_addr, 0x1b0, ERIAR_MASK_1111, 0x0000, 0x0003,
 +                   ERIAR_EXGMAC);
 +      rtl_writephy(tp, 0x1f, 0x0005);
 +      rtl_writephy(tp, 0x05, 0x8b85);
 +      rtl_w1w0_phy(tp, 0x06, 0x0000, 0x2000);
 +      rtl_writephy(tp, 0x1f, 0x0004);
 +      rtl_writephy(tp, 0x1f, 0x0007);
 +      rtl_writephy(tp, 0x1e, 0x0020);
 +      rtl_w1w0_phy(tp, 0x06, 0x0000, 0x0100);
 +      rtl_writephy(tp, 0x1f, 0x0002);
 +      rtl_writephy(tp, 0x1f, 0x0000);
 +      rtl_writephy(tp, 0x0d, 0x0007);
 +      rtl_writephy(tp, 0x0e, 0x003c);
 +      rtl_writephy(tp, 0x0d, 0x4007);
 +      rtl_writephy(tp, 0x0e, 0x0000);
 +      rtl_writephy(tp, 0x0d, 0x0000);
 +
 +      /* Green feature */
 +      rtl_writephy(tp, 0x1f, 0x0003);
 +      rtl_w1w0_phy(tp, 0x19, 0x0000, 0x0001);
 +      rtl_w1w0_phy(tp, 0x10, 0x0000, 0x0400);
 +      rtl_writephy(tp, 0x1f, 0x0000);
 +}
 +
 +static void rtl8102e_hw_phy_config(struct rtl8169_private *tp)
 +{
 +      static const struct phy_reg phy_reg_init[] = {
 +              { 0x1f, 0x0003 },
 +              { 0x08, 0x441d },
 +              { 0x01, 0x9100 },
 +              { 0x1f, 0x0000 }
 +      };
 +
 +      rtl_writephy(tp, 0x1f, 0x0000);
 +      rtl_patchphy(tp, 0x11, 1 << 12);
 +      rtl_patchphy(tp, 0x19, 1 << 13);
 +      rtl_patchphy(tp, 0x10, 1 << 15);
 +
 +      rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
 +}
 +
 +static void rtl8105e_hw_phy_config(struct rtl8169_private *tp)
 +{
 +      static const struct phy_reg phy_reg_init[] = {
 +              { 0x1f, 0x0005 },
 +              { 0x1a, 0x0000 },
 +              { 0x1f, 0x0000 },
 +
 +              { 0x1f, 0x0004 },
 +              { 0x1c, 0x0000 },
 +              { 0x1f, 0x0000 },
 +
 +              { 0x1f, 0x0001 },
 +              { 0x15, 0x7701 },
 +              { 0x1f, 0x0000 }
 +      };
 +
 +      /* Disable ALDPS before ram code */
 +      rtl_writephy(tp, 0x1f, 0x0000);
 +      rtl_writephy(tp, 0x18, 0x0310);
 +      msleep(100);
 +
 +      rtl_apply_firmware(tp);
 +
 +      rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
 +}
 +
 +static void rtl_hw_phy_config(struct net_device *dev)
 +{
 +      struct rtl8169_private *tp = netdev_priv(dev);
 +
 +      rtl8169_print_mac_version(tp);
 +
 +      switch (tp->mac_version) {
 +      case RTL_GIGA_MAC_VER_01:
 +              break;
 +      case RTL_GIGA_MAC_VER_02:
 +      case RTL_GIGA_MAC_VER_03:
 +              rtl8169s_hw_phy_config(tp);
 +              break;
 +      case RTL_GIGA_MAC_VER_04:
 +              rtl8169sb_hw_phy_config(tp);
 +              break;
 +      case RTL_GIGA_MAC_VER_05:
 +              rtl8169scd_hw_phy_config(tp);
 +              break;
 +      case RTL_GIGA_MAC_VER_06:
 +              rtl8169sce_hw_phy_config(tp);
 +              break;
 +      case RTL_GIGA_MAC_VER_07:
 +      case RTL_GIGA_MAC_VER_08:
 +      case RTL_GIGA_MAC_VER_09:
 +              rtl8102e_hw_phy_config(tp);
 +              break;
 +      case RTL_GIGA_MAC_VER_11:
 +              rtl8168bb_hw_phy_config(tp);
 +              break;
 +      case RTL_GIGA_MAC_VER_12:
 +              rtl8168bef_hw_phy_config(tp);
 +              break;
 +      case RTL_GIGA_MAC_VER_17:
 +              rtl8168bef_hw_phy_config(tp);
 +              break;
 +      case RTL_GIGA_MAC_VER_18:
 +              rtl8168cp_1_hw_phy_config(tp);
 +              break;
 +      case RTL_GIGA_MAC_VER_19:
 +              rtl8168c_1_hw_phy_config(tp);
 +              break;
 +      case RTL_GIGA_MAC_VER_20:
 +              rtl8168c_2_hw_phy_config(tp);
 +              break;
 +      case RTL_GIGA_MAC_VER_21:
 +              rtl8168c_3_hw_phy_config(tp);
 +              break;
 +      case RTL_GIGA_MAC_VER_22:
 +              rtl8168c_4_hw_phy_config(tp);
 +              break;
 +      case RTL_GIGA_MAC_VER_23:
 +      case RTL_GIGA_MAC_VER_24:
 +              rtl8168cp_2_hw_phy_config(tp);
 +              break;
 +      case RTL_GIGA_MAC_VER_25:
 +              rtl8168d_1_hw_phy_config(tp);
 +              break;
 +      case RTL_GIGA_MAC_VER_26:
 +              rtl8168d_2_hw_phy_config(tp);
 +              break;
 +      case RTL_GIGA_MAC_VER_27:
 +              rtl8168d_3_hw_phy_config(tp);
 +              break;
 +      case RTL_GIGA_MAC_VER_28:
 +              rtl8168d_4_hw_phy_config(tp);
 +              break;
 +      case RTL_GIGA_MAC_VER_29:
 +      case RTL_GIGA_MAC_VER_30:
 +              rtl8105e_hw_phy_config(tp);
 +              break;
 +      case RTL_GIGA_MAC_VER_31:
 +              /* None. */
 +              break;
 +      case RTL_GIGA_MAC_VER_32:
 +      case RTL_GIGA_MAC_VER_33:
 +              rtl8168e_1_hw_phy_config(tp);
 +              break;
 +      case RTL_GIGA_MAC_VER_34:
 +              rtl8168e_2_hw_phy_config(tp);
 +              break;
 +
 +      default:
 +              break;
 +      }
 +}
 +
 +static void rtl8169_phy_timer(unsigned long __opaque)
 +{
 +      struct net_device *dev = (struct net_device *)__opaque;
 +      struct rtl8169_private *tp = netdev_priv(dev);
 +      struct timer_list *timer = &tp->timer;
 +      void __iomem *ioaddr = tp->mmio_addr;
 +      unsigned long timeout = RTL8169_PHY_TIMEOUT;
 +
 +      assert(tp->mac_version > RTL_GIGA_MAC_VER_01);
 +
 +      spin_lock_irq(&tp->lock);
 +
 +      if (tp->phy_reset_pending(tp)) {
 +              /*
 +               * A busy loop could burn quite a few cycles on nowadays CPU.
 +               * Let's delay the execution of the timer for a few ticks.
 +               */
 +              timeout = HZ/10;
 +              goto out_mod_timer;
 +      }
 +
 +      if (tp->link_ok(ioaddr))
 +              goto out_unlock;
 +
 +      netif_warn(tp, link, dev, "PHY reset until link up\n");
 +
 +      tp->phy_reset_enable(tp);
 +
 +out_mod_timer:
 +      mod_timer(timer, jiffies + timeout);
 +out_unlock:
 +      spin_unlock_irq(&tp->lock);
 +}
 +
 +#ifdef CONFIG_NET_POLL_CONTROLLER
 +/*
 + * Polling 'interrupt' - used by things like netconsole to send skbs
 + * without having to re-enable interrupts. It's not called while
 + * the interrupt routine is executing.
 + */
 +static void rtl8169_netpoll(struct net_device *dev)
 +{
 +      struct rtl8169_private *tp = netdev_priv(dev);
 +      struct pci_dev *pdev = tp->pci_dev;
 +
 +      disable_irq(pdev->irq);
 +      rtl8169_interrupt(pdev->irq, dev);
 +      enable_irq(pdev->irq);
 +}
 +#endif
 +
 +static void rtl8169_release_board(struct pci_dev *pdev, struct net_device *dev,
 +                                void __iomem *ioaddr)
 +{
 +      iounmap(ioaddr);
 +      pci_release_regions(pdev);
 +      pci_clear_mwi(pdev);
 +      pci_disable_device(pdev);
 +      free_netdev(dev);
 +}
 +
 +static void rtl8169_phy_reset(struct net_device *dev,
 +                            struct rtl8169_private *tp)
 +{
 +      unsigned int i;
 +
 +      tp->phy_reset_enable(tp);
 +      for (i = 0; i < 100; i++) {
 +              if (!tp->phy_reset_pending(tp))
 +                      return;
 +              msleep(1);
 +      }
 +      netif_err(tp, link, dev, "PHY reset failed\n");
 +}
 +
++static bool rtl_tbi_enabled(struct rtl8169_private *tp)
++{
++      void __iomem *ioaddr = tp->mmio_addr;
++
++      return (tp->mac_version == RTL_GIGA_MAC_VER_01) &&
++          (RTL_R8(PHYstatus) & TBI_Enable);
++}
++
 +static void rtl8169_init_phy(struct net_device *dev, struct rtl8169_private *tp)
 +{
 +      void __iomem *ioaddr = tp->mmio_addr;
 +
 +      rtl_hw_phy_config(dev);
 +
 +      if (tp->mac_version <= RTL_GIGA_MAC_VER_06) {
 +              dprintk("Set MAC Reg C+CR Offset 0x82h = 0x01h\n");
 +              RTL_W8(0x82, 0x01);
 +      }
 +
 +      pci_write_config_byte(tp->pci_dev, PCI_LATENCY_TIMER, 0x40);
 +
 +      if (tp->mac_version <= RTL_GIGA_MAC_VER_06)
 +              pci_write_config_byte(tp->pci_dev, PCI_CACHE_LINE_SIZE, 0x08);
 +
 +      if (tp->mac_version == RTL_GIGA_MAC_VER_02) {
 +              dprintk("Set MAC Reg C+CR Offset 0x82h = 0x01h\n");
 +              RTL_W8(0x82, 0x01);
 +              dprintk("Set PHY Reg 0x0bh = 0x00h\n");
 +              rtl_writephy(tp, 0x0b, 0x0000); //w 0x0b 15 0 0
 +      }
 +
 +      rtl8169_phy_reset(dev, tp);
 +
 +      rtl8169_set_speed(dev, AUTONEG_ENABLE, SPEED_1000, DUPLEX_FULL,
 +                        ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
 +                        ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
 +                        (tp->mii.supports_gmii ?
 +                         ADVERTISED_1000baseT_Half |
 +                         ADVERTISED_1000baseT_Full : 0));
 +
-                   tp->mac_version == RTL_GIGA_MAC_VER_33)
++      if (rtl_tbi_enabled(tp))
 +              netif_info(tp, link, dev, "TBI auto-negotiating\n");
 +}
 +
 +static void rtl_rar_set(struct rtl8169_private *tp, u8 *addr)
 +{
 +      void __iomem *ioaddr = tp->mmio_addr;
 +      u32 high;
 +      u32 low;
 +
 +      low  = addr[0] | (addr[1] << 8) | (addr[2] << 16) | (addr[3] << 24);
 +      high = addr[4] | (addr[5] << 8);
 +
 +      spin_lock_irq(&tp->lock);
 +
 +      RTL_W8(Cfg9346, Cfg9346_Unlock);
 +
 +      RTL_W32(MAC4, high);
 +      RTL_R32(MAC4);
 +
 +      RTL_W32(MAC0, low);
 +      RTL_R32(MAC0);
 +
 +      if (tp->mac_version == RTL_GIGA_MAC_VER_34) {
 +              const struct exgmac_reg e[] = {
 +                      { .addr = 0xe0, ERIAR_MASK_1111, .val = low },
 +                      { .addr = 0xe4, ERIAR_MASK_1111, .val = high },
 +                      { .addr = 0xf0, ERIAR_MASK_1111, .val = low << 16 },
 +                      { .addr = 0xf4, ERIAR_MASK_1111, .val = high << 16 |
 +                                                              low  >> 16 },
 +              };
 +
 +              rtl_write_exgmac_batch(ioaddr, e, ARRAY_SIZE(e));
 +      }
 +
 +      RTL_W8(Cfg9346, Cfg9346_Lock);
 +
 +      spin_unlock_irq(&tp->lock);
 +}
 +
 +static int rtl_set_mac_address(struct net_device *dev, void *p)
 +{
 +      struct rtl8169_private *tp = netdev_priv(dev);
 +      struct sockaddr *addr = p;
 +
 +      if (!is_valid_ether_addr(addr->sa_data))
 +              return -EADDRNOTAVAIL;
 +
 +      memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
 +
 +      rtl_rar_set(tp, dev->dev_addr);
 +
 +      return 0;
 +}
 +
 +static int rtl8169_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
 +{
 +      struct rtl8169_private *tp = netdev_priv(dev);
 +      struct mii_ioctl_data *data = if_mii(ifr);
 +
 +      return netif_running(dev) ? tp->do_ioctl(tp, data, cmd) : -ENODEV;
 +}
 +
 +static int rtl_xmii_ioctl(struct rtl8169_private *tp,
 +                        struct mii_ioctl_data *data, int cmd)
 +{
 +      switch (cmd) {
 +      case SIOCGMIIPHY:
 +              data->phy_id = 32; /* Internal PHY */
 +              return 0;
 +
 +      case SIOCGMIIREG:
 +              data->val_out = rtl_readphy(tp, data->reg_num & 0x1f);
 +              return 0;
 +
 +      case SIOCSMIIREG:
 +              rtl_writephy(tp, data->reg_num & 0x1f, data->val_in);
 +              return 0;
 +      }
 +      return -EOPNOTSUPP;
 +}
 +
 +static int rtl_tbi_ioctl(struct rtl8169_private *tp, struct mii_ioctl_data *data, int cmd)
 +{
 +      return -EOPNOTSUPP;
 +}
 +
 +static const struct rtl_cfg_info {
 +      void (*hw_start)(struct net_device *);
 +      unsigned int region;
 +      unsigned int align;
 +      u16 intr_event;
 +      u16 napi_event;
 +      unsigned features;
 +      u8 default_ver;
 +} rtl_cfg_infos [] = {
 +      [RTL_CFG_0] = {
 +              .hw_start       = rtl_hw_start_8169,
 +              .region         = 1,
 +              .align          = 0,
 +              .intr_event     = SYSErr | LinkChg | RxOverflow |
 +                                RxFIFOOver | TxErr | TxOK | RxOK | RxErr,
 +              .napi_event     = RxFIFOOver | TxErr | TxOK | RxOK | RxOverflow,
 +              .features       = RTL_FEATURE_GMII,
 +              .default_ver    = RTL_GIGA_MAC_VER_01,
 +      },
 +      [RTL_CFG_1] = {
 +              .hw_start       = rtl_hw_start_8168,
 +              .region         = 2,
 +              .align          = 8,
 +              .intr_event     = SYSErr | LinkChg | RxOverflow |
 +                                TxErr | TxOK | RxOK | RxErr,
 +              .napi_event     = TxErr | TxOK | RxOK | RxOverflow,
 +              .features       = RTL_FEATURE_GMII | RTL_FEATURE_MSI,
 +              .default_ver    = RTL_GIGA_MAC_VER_11,
 +      },
 +      [RTL_CFG_2] = {
 +              .hw_start       = rtl_hw_start_8101,
 +              .region         = 2,
 +              .align          = 8,
 +              .intr_event     = SYSErr | LinkChg | RxOverflow | PCSTimeout |
 +                                RxFIFOOver | TxErr | TxOK | RxOK | RxErr,
 +              .napi_event     = RxFIFOOver | TxErr | TxOK | RxOK | RxOverflow,
 +              .features       = RTL_FEATURE_MSI,
 +              .default_ver    = RTL_GIGA_MAC_VER_13,
 +      }
 +};
 +
 +/* Cfg9346_Unlock assumed. */
 +static unsigned rtl_try_msi(struct pci_dev *pdev, void __iomem *ioaddr,
 +                          const struct rtl_cfg_info *cfg)
 +{
 +      unsigned msi = 0;
 +      u8 cfg2;
 +
 +      cfg2 = RTL_R8(Config2) & ~MSIEnable;
 +      if (cfg->features & RTL_FEATURE_MSI) {
 +              if (pci_enable_msi(pdev)) {
 +                      dev_info(&pdev->dev, "no MSI. Back to INTx.\n");
 +              } else {
 +                      cfg2 |= MSIEnable;
 +                      msi = RTL_FEATURE_MSI;
 +              }
 +      }
 +      RTL_W8(Config2, cfg2);
 +      return msi;
 +}
 +
 +static void rtl_disable_msi(struct pci_dev *pdev, struct rtl8169_private *tp)
 +{
 +      if (tp->features & RTL_FEATURE_MSI) {
 +              pci_disable_msi(pdev);
 +              tp->features &= ~RTL_FEATURE_MSI;
 +      }
 +}
 +
 +static const struct net_device_ops rtl8169_netdev_ops = {
 +      .ndo_open               = rtl8169_open,
 +      .ndo_stop               = rtl8169_close,
 +      .ndo_get_stats          = rtl8169_get_stats,
 +      .ndo_start_xmit         = rtl8169_start_xmit,
 +      .ndo_tx_timeout         = rtl8169_tx_timeout,
 +      .ndo_validate_addr      = eth_validate_addr,
 +      .ndo_change_mtu         = rtl8169_change_mtu,
 +      .ndo_fix_features       = rtl8169_fix_features,
 +      .ndo_set_features       = rtl8169_set_features,
 +      .ndo_set_mac_address    = rtl_set_mac_address,
 +      .ndo_do_ioctl           = rtl8169_ioctl,
 +      .ndo_set_rx_mode        = rtl_set_rx_mode,
 +#ifdef CONFIG_NET_POLL_CONTROLLER
 +      .ndo_poll_controller    = rtl8169_netpoll,
 +#endif
 +
 +};
 +
 +static void __devinit rtl_init_mdio_ops(struct rtl8169_private *tp)
 +{
 +      struct mdio_ops *ops = &tp->mdio_ops;
 +
 +      switch (tp->mac_version) {
 +      case RTL_GIGA_MAC_VER_27:
 +              ops->write      = r8168dp_1_mdio_write;
 +              ops->read       = r8168dp_1_mdio_read;
 +              break;
 +      case RTL_GIGA_MAC_VER_28:
 +      case RTL_GIGA_MAC_VER_31:
 +              ops->write      = r8168dp_2_mdio_write;
 +              ops->read       = r8168dp_2_mdio_read;
 +              break;
 +      default:
 +              ops->write      = r8169_mdio_write;
 +              ops->read       = r8169_mdio_read;
 +              break;
 +      }
 +}
 +
 +static void r810x_phy_power_down(struct rtl8169_private *tp)
 +{
 +      rtl_writephy(tp, 0x1f, 0x0000);
 +      rtl_writephy(tp, MII_BMCR, BMCR_PDOWN);
 +}
 +
 +static void r810x_phy_power_up(struct rtl8169_private *tp)
 +{
 +      rtl_writephy(tp, 0x1f, 0x0000);
 +      rtl_writephy(tp, MII_BMCR, BMCR_ANENABLE);
 +}
 +
 +static void r810x_pll_power_down(struct rtl8169_private *tp)
 +{
++      void __iomem *ioaddr = tp->mmio_addr;
++
 +      if (__rtl8169_get_wol(tp) & WAKE_ANY) {
 +              rtl_writephy(tp, 0x1f, 0x0000);
 +              rtl_writephy(tp, MII_BMCR, 0x0000);
++
++              if (tp->mac_version == RTL_GIGA_MAC_VER_29 ||
++                  tp->mac_version == RTL_GIGA_MAC_VER_30)
++                      RTL_W32(RxConfig, RTL_R32(RxConfig) | AcceptBroadcast |
++                              AcceptMulticast | AcceptMyPhys);
 +              return;
 +      }
 +
 +      r810x_phy_power_down(tp);
 +}
 +
 +static void r810x_pll_power_up(struct rtl8169_private *tp)
 +{
 +      r810x_phy_power_up(tp);
 +}
 +
 +static void r8168_phy_power_up(struct rtl8169_private *tp)
 +{
 +      rtl_writephy(tp, 0x1f, 0x0000);
 +      switch (tp->mac_version) {
 +      case RTL_GIGA_MAC_VER_11:
 +      case RTL_GIGA_MAC_VER_12:
 +      case RTL_GIGA_MAC_VER_17:
 +      case RTL_GIGA_MAC_VER_18:
 +      case RTL_GIGA_MAC_VER_19:
 +      case RTL_GIGA_MAC_VER_20:
 +      case RTL_GIGA_MAC_VER_21:
 +      case RTL_GIGA_MAC_VER_22:
 +      case RTL_GIGA_MAC_VER_23:
 +      case RTL_GIGA_MAC_VER_24:
 +      case RTL_GIGA_MAC_VER_25:
 +      case RTL_GIGA_MAC_VER_26:
 +      case RTL_GIGA_MAC_VER_27:
 +      case RTL_GIGA_MAC_VER_28:
 +      case RTL_GIGA_MAC_VER_31:
 +              rtl_writephy(tp, 0x0e, 0x0000);
 +              break;
 +      default:
 +              break;
 +      }
 +      rtl_writephy(tp, MII_BMCR, BMCR_ANENABLE);
 +}
 +
 +static void r8168_phy_power_down(struct rtl8169_private *tp)
 +{
 +      rtl_writephy(tp, 0x1f, 0x0000);
 +      switch (tp->mac_version) {
 +      case RTL_GIGA_MAC_VER_32:
 +      case RTL_GIGA_MAC_VER_33:
 +              rtl_writephy(tp, MII_BMCR, BMCR_ANENABLE | BMCR_PDOWN);
 +              break;
 +
 +      case RTL_GIGA_MAC_VER_11:
 +      case RTL_GIGA_MAC_VER_12:
 +      case RTL_GIGA_MAC_VER_17:
 +      case RTL_GIGA_MAC_VER_18:
 +      case RTL_GIGA_MAC_VER_19:
 +      case RTL_GIGA_MAC_VER_20:
 +      case RTL_GIGA_MAC_VER_21:
 +      case RTL_GIGA_MAC_VER_22:
 +      case RTL_GIGA_MAC_VER_23:
 +      case RTL_GIGA_MAC_VER_24:
 +      case RTL_GIGA_MAC_VER_25:
 +      case RTL_GIGA_MAC_VER_26:
 +      case RTL_GIGA_MAC_VER_27:
 +      case RTL_GIGA_MAC_VER_28:
 +      case RTL_GIGA_MAC_VER_31:
 +              rtl_writephy(tp, 0x0e, 0x0200);
 +      default:
 +              rtl_writephy(tp, MII_BMCR, BMCR_PDOWN);
 +              break;
 +      }
 +}
 +
 +static void r8168_pll_power_down(struct rtl8169_private *tp)
 +{
 +      void __iomem *ioaddr = tp->mmio_addr;
 +
 +      if ((tp->mac_version == RTL_GIGA_MAC_VER_27 ||
 +           tp->mac_version == RTL_GIGA_MAC_VER_28 ||
 +           tp->mac_version == RTL_GIGA_MAC_VER_31) &&
 +          r8168dp_check_dash(tp)) {
 +              return;
 +      }
 +
 +      if ((tp->mac_version == RTL_GIGA_MAC_VER_23 ||
 +           tp->mac_version == RTL_GIGA_MAC_VER_24) &&
 +          (RTL_R16(CPlusCmd) & ASF)) {
 +              return;
 +      }
 +
 +      if (tp->mac_version == RTL_GIGA_MAC_VER_32 ||
 +          tp->mac_version == RTL_GIGA_MAC_VER_33)
 +              rtl_ephy_write(ioaddr, 0x19, 0xff64);
 +
 +      if (__rtl8169_get_wol(tp) & WAKE_ANY) {
 +              rtl_writephy(tp, 0x1f, 0x0000);
 +              rtl_writephy(tp, MII_BMCR, 0x0000);
 +
 +              if (tp->mac_version == RTL_GIGA_MAC_VER_32 ||
-       if ((tp->mac_version <= RTL_GIGA_MAC_VER_06) &&
-           (RTL_R8(PHYstatus) & TBI_Enable)) {
++                  tp->mac_version == RTL_GIGA_MAC_VER_33 ||
++                  tp->mac_version == RTL_GIGA_MAC_VER_34)
 +                      RTL_W32(RxConfig, RTL_R32(RxConfig) | AcceptBroadcast |
 +                              AcceptMulticast | AcceptMyPhys);
 +              return;
 +      }
 +
 +      r8168_phy_power_down(tp);
 +
 +      switch (tp->mac_version) {
 +      case RTL_GIGA_MAC_VER_25:
 +      case RTL_GIGA_MAC_VER_26:
 +      case RTL_GIGA_MAC_VER_27:
 +      case RTL_GIGA_MAC_VER_28:
 +      case RTL_GIGA_MAC_VER_31:
 +      case RTL_GIGA_MAC_VER_32:
 +      case RTL_GIGA_MAC_VER_33:
 +              RTL_W8(PMCH, RTL_R8(PMCH) & ~0x80);
 +              break;
 +      }
 +}
 +
 +static void r8168_pll_power_up(struct rtl8169_private *tp)
 +{
 +      void __iomem *ioaddr = tp->mmio_addr;
 +
 +      if ((tp->mac_version == RTL_GIGA_MAC_VER_27 ||
 +           tp->mac_version == RTL_GIGA_MAC_VER_28 ||
 +           tp->mac_version == RTL_GIGA_MAC_VER_31) &&
 +          r8168dp_check_dash(tp)) {
 +              return;
 +      }
 +
 +      switch (tp->mac_version) {
 +      case RTL_GIGA_MAC_VER_25:
 +      case RTL_GIGA_MAC_VER_26:
 +      case RTL_GIGA_MAC_VER_27:
 +      case RTL_GIGA_MAC_VER_28:
 +      case RTL_GIGA_MAC_VER_31:
 +      case RTL_GIGA_MAC_VER_32:
 +      case RTL_GIGA_MAC_VER_33:
 +              RTL_W8(PMCH, RTL_R8(PMCH) | 0x80);
 +              break;
 +      }
 +
 +      r8168_phy_power_up(tp);
 +}
 +
 +static void rtl_pll_power_op(struct rtl8169_private *tp,
 +                           void (*op)(struct rtl8169_private *))
 +{
 +      if (op)
 +              op(tp);
 +}
 +
 +static void rtl_pll_power_down(struct rtl8169_private *tp)
 +{
 +      rtl_pll_power_op(tp, tp->pll_power_ops.down);
 +}
 +
 +static void rtl_pll_power_up(struct rtl8169_private *tp)
 +{
 +      rtl_pll_power_op(tp, tp->pll_power_ops.up);
 +}
 +
 +static void __devinit rtl_init_pll_power_ops(struct rtl8169_private *tp)
 +{
 +      struct pll_power_ops *ops = &tp->pll_power_ops;
 +
 +      switch (tp->mac_version) {
 +      case RTL_GIGA_MAC_VER_07:
 +      case RTL_GIGA_MAC_VER_08:
 +      case RTL_GIGA_MAC_VER_09:
 +      case RTL_GIGA_MAC_VER_10:
 +      case RTL_GIGA_MAC_VER_16:
 +      case RTL_GIGA_MAC_VER_29:
 +      case RTL_GIGA_MAC_VER_30:
 +              ops->down       = r810x_pll_power_down;
 +              ops->up         = r810x_pll_power_up;
 +              break;
 +
 +      case RTL_GIGA_MAC_VER_11:
 +      case RTL_GIGA_MAC_VER_12:
 +      case RTL_GIGA_MAC_VER_17:
 +      case RTL_GIGA_MAC_VER_18:
 +      case RTL_GIGA_MAC_VER_19:
 +      case RTL_GIGA_MAC_VER_20:
 +      case RTL_GIGA_MAC_VER_21:
 +      case RTL_GIGA_MAC_VER_22:
 +      case RTL_GIGA_MAC_VER_23:
 +      case RTL_GIGA_MAC_VER_24:
 +      case RTL_GIGA_MAC_VER_25:
 +      case RTL_GIGA_MAC_VER_26:
 +      case RTL_GIGA_MAC_VER_27:
 +      case RTL_GIGA_MAC_VER_28:
 +      case RTL_GIGA_MAC_VER_31:
 +      case RTL_GIGA_MAC_VER_32:
 +      case RTL_GIGA_MAC_VER_33:
 +      case RTL_GIGA_MAC_VER_34:
 +              ops->down       = r8168_pll_power_down;
 +              ops->up         = r8168_pll_power_up;
 +              break;
 +
 +      default:
 +              ops->down       = NULL;
 +              ops->up         = NULL;
 +              break;
 +      }
 +}
 +
 +static void rtl_init_rxcfg(struct rtl8169_private *tp)
 +{
 +      void __iomem *ioaddr = tp->mmio_addr;
 +
 +      switch (tp->mac_version) {
 +      case RTL_GIGA_MAC_VER_01:
 +      case RTL_GIGA_MAC_VER_02:
 +      case RTL_GIGA_MAC_VER_03:
 +      case RTL_GIGA_MAC_VER_04:
 +      case RTL_GIGA_MAC_VER_05:
 +      case RTL_GIGA_MAC_VER_06:
 +      case RTL_GIGA_MAC_VER_10:
 +      case RTL_GIGA_MAC_VER_11:
 +      case RTL_GIGA_MAC_VER_12:
 +      case RTL_GIGA_MAC_VER_13:
 +      case RTL_GIGA_MAC_VER_14:
 +      case RTL_GIGA_MAC_VER_15:
 +      case RTL_GIGA_MAC_VER_16:
 +      case RTL_GIGA_MAC_VER_17:
 +              RTL_W32(RxConfig, RX_FIFO_THRESH | RX_DMA_BURST);
 +              break;
 +      case RTL_GIGA_MAC_VER_18:
 +      case RTL_GIGA_MAC_VER_19:
 +      case RTL_GIGA_MAC_VER_20:
 +      case RTL_GIGA_MAC_VER_21:
 +      case RTL_GIGA_MAC_VER_22:
 +      case RTL_GIGA_MAC_VER_23:
 +      case RTL_GIGA_MAC_VER_24:
 +              RTL_W32(RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST);
 +              break;
 +      default:
 +              RTL_W32(RxConfig, RX128_INT_EN | RX_DMA_BURST);
 +              break;
 +      }
 +}
 +
 +static void rtl8169_init_ring_indexes(struct rtl8169_private *tp)
 +{
 +      tp->dirty_tx = tp->dirty_rx = tp->cur_tx = tp->cur_rx = 0;
 +}
 +
 +static void rtl_hw_reset(struct rtl8169_private *tp)
 +{
 +      void __iomem *ioaddr = tp->mmio_addr;
 +      int i;
 +
 +      /* Soft reset the chip. */
 +      RTL_W8(ChipCmd, CmdReset);
 +
 +      /* Check that the chip has finished the reset. */
 +      for (i = 0; i < 100; i++) {
 +              if ((RTL_R8(ChipCmd) & CmdReset) == 0)
 +                      break;
 +              udelay(100);
 +      }
 +
 +      rtl8169_init_ring_indexes(tp);
 +}
 +
 +static int __devinit
 +rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
 +{
 +      const struct rtl_cfg_info *cfg = rtl_cfg_infos + ent->driver_data;
 +      const unsigned int region = cfg->region;
 +      struct rtl8169_private *tp;
 +      struct mii_if_info *mii;
 +      struct net_device *dev;
 +      void __iomem *ioaddr;
 +      int chipset, i;
 +      int rc;
 +
 +      if (netif_msg_drv(&debug)) {
 +              printk(KERN_INFO "%s Gigabit Ethernet driver %s loaded\n",
 +                     MODULENAME, RTL8169_VERSION);
 +      }
 +
 +      dev = alloc_etherdev(sizeof (*tp));
 +      if (!dev) {
 +              if (netif_msg_drv(&debug))
 +                      dev_err(&pdev->dev, "unable to alloc new ethernet\n");
 +              rc = -ENOMEM;
 +              goto out;
 +      }
 +
 +      SET_NETDEV_DEV(dev, &pdev->dev);
 +      dev->netdev_ops = &rtl8169_netdev_ops;
 +      tp = netdev_priv(dev);
 +      tp->dev = dev;
 +      tp->pci_dev = pdev;
 +      tp->msg_enable = netif_msg_init(debug.msg_enable, R8169_MSG_DEFAULT);
 +
 +      mii = &tp->mii;
 +      mii->dev = dev;
 +      mii->mdio_read = rtl_mdio_read;
 +      mii->mdio_write = rtl_mdio_write;
 +      mii->phy_id_mask = 0x1f;
 +      mii->reg_num_mask = 0x1f;
 +      mii->supports_gmii = !!(cfg->features & RTL_FEATURE_GMII);
 +
 +      /* disable ASPM completely as that cause random device stop working
 +       * problems as well as full system hangs for some PCIe devices users */
 +      pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 |
 +                                   PCIE_LINK_STATE_CLKPM);
 +
 +      /* enable device (incl. PCI PM wakeup and hotplug setup) */
 +      rc = pci_enable_device(pdev);
 +      if (rc < 0) {
 +              netif_err(tp, probe, dev, "enable failure\n");
 +              goto err_out_free_dev_1;
 +      }
 +
 +      if (pci_set_mwi(pdev) < 0)
 +              netif_info(tp, probe, dev, "Mem-Wr-Inval unavailable\n");
 +
 +      /* make sure PCI base addr 1 is MMIO */
 +      if (!(pci_resource_flags(pdev, region) & IORESOURCE_MEM)) {
 +              netif_err(tp, probe, dev,
 +                        "region #%d not an MMIO resource, aborting\n",
 +                        region);
 +              rc = -ENODEV;
 +              goto err_out_mwi_2;
 +      }
 +
 +      /* check for weird/broken PCI region reporting */
 +      if (pci_resource_len(pdev, region) < R8169_REGS_SIZE) {
 +              netif_err(tp, probe, dev,
 +                        "Invalid PCI region size(s), aborting\n");
 +              rc = -ENODEV;
 +              goto err_out_mwi_2;
 +      }
 +
 +      rc = pci_request_regions(pdev, MODULENAME);
 +      if (rc < 0) {
 +              netif_err(tp, probe, dev, "could not request regions\n");
 +              goto err_out_mwi_2;
 +      }
 +
 +      tp->cp_cmd = RxChkSum;
 +
 +      if ((sizeof(dma_addr_t) > 4) &&
 +          !pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) && use_dac) {
 +              tp->cp_cmd |= PCIDAC;
 +              dev->features |= NETIF_F_HIGHDMA;
 +      } else {
 +              rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
 +              if (rc < 0) {
 +                      netif_err(tp, probe, dev, "DMA configuration failed\n");
 +                      goto err_out_free_res_3;
 +              }
 +      }
 +
 +      /* ioremap MMIO region */
 +      ioaddr = ioremap(pci_resource_start(pdev, region), R8169_REGS_SIZE);
 +      if (!ioaddr) {
 +              netif_err(tp, probe, dev, "cannot remap MMIO, aborting\n");
 +              rc = -EIO;
 +              goto err_out_free_res_3;
 +      }
 +      tp->mmio_addr = ioaddr;
 +
 +      if (!pci_is_pcie(pdev))
 +              netif_info(tp, probe, dev, "not PCI Express\n");
 +
 +      /* Identify chip attached to board */
 +      rtl8169_get_mac_version(tp, dev, cfg->default_ver);
 +
 +      rtl_init_rxcfg(tp);
 +
 +      RTL_W16(IntrMask, 0x0000);
 +
 +      rtl_hw_reset(tp);
 +
 +      RTL_W16(IntrStatus, 0xffff);
 +
 +      pci_set_master(pdev);
 +
 +      /*
 +       * Pretend we are using VLANs; This bypasses a nasty bug where
 +       * Interrupts stop flowing on high load on 8110SCd controllers.
 +       */
 +      if (tp->mac_version == RTL_GIGA_MAC_VER_05)
 +              tp->cp_cmd |= RxVlan;
 +
 +      rtl_init_mdio_ops(tp);
 +      rtl_init_pll_power_ops(tp);
 +
 +      rtl8169_print_mac_version(tp);
 +
 +      chipset = tp->mac_version;
 +      tp->txd_version = rtl_chip_infos[chipset].txd_version;
 +
 +      RTL_W8(Cfg9346, Cfg9346_Unlock);
 +      RTL_W8(Config1, RTL_R8(Config1) | PMEnable);
 +      RTL_W8(Config5, RTL_R8(Config5) & PMEStatus);
 +      if ((RTL_R8(Config3) & (LinkUp | MagicPacket)) != 0)
 +              tp->features |= RTL_FEATURE_WOL;
 +      if ((RTL_R8(Config5) & (UWF | BWF | MWF)) != 0)
 +              tp->features |= RTL_FEATURE_WOL;
 +      tp->features |= rtl_try_msi(pdev, ioaddr, cfg);
 +      RTL_W8(Cfg9346, Cfg9346_Lock);
 +
-               status = le32_to_cpu(desc->opts1);
++      if (rtl_tbi_enabled(tp)) {
 +              tp->set_speed = rtl8169_set_speed_tbi;
 +              tp->get_settings = rtl8169_gset_tbi;
 +              tp->phy_reset_enable = rtl8169_tbi_reset_enable;
 +              tp->phy_reset_pending = rtl8169_tbi_reset_pending;
 +              tp->link_ok = rtl8169_tbi_link_ok;
 +              tp->do_ioctl = rtl_tbi_ioctl;
 +      } else {
 +              tp->set_speed = rtl8169_set_speed_xmii;
 +              tp->get_settings = rtl8169_gset_xmii;
 +              tp->phy_reset_enable = rtl8169_xmii_reset_enable;
 +              tp->phy_reset_pending = rtl8169_xmii_reset_pending;
 +              tp->link_ok = rtl8169_xmii_link_ok;
 +              tp->do_ioctl = rtl_xmii_ioctl;
 +      }
 +
 +      spin_lock_init(&tp->lock);
 +
 +      /* Get MAC address */
 +      for (i = 0; i < MAC_ADDR_LEN; i++)
 +              dev->dev_addr[i] = RTL_R8(MAC0 + i);
 +      memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
 +
 +      SET_ETHTOOL_OPS(dev, &rtl8169_ethtool_ops);
 +      dev->watchdog_timeo = RTL8169_TX_TIMEOUT;
 +      dev->irq = pdev->irq;
 +      dev->base_addr = (unsigned long) ioaddr;
 +
 +      netif_napi_add(dev, &tp->napi, rtl8169_poll, R8169_NAPI_WEIGHT);
 +
 +      /* don't enable SG, IP_CSUM and TSO by default - it might not work
 +       * properly for all devices */
 +      dev->features |= NETIF_F_RXCSUM |
 +              NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
 +
 +      dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO |
 +              NETIF_F_RXCSUM | NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
 +      dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO |
 +              NETIF_F_HIGHDMA;
 +
 +      if (tp->mac_version == RTL_GIGA_MAC_VER_05)
 +              /* 8110SCd requires hardware Rx VLAN - disallow toggling */
 +              dev->hw_features &= ~NETIF_F_HW_VLAN_RX;
 +
 +      tp->intr_mask = 0xffff;
 +      tp->hw_start = cfg->hw_start;
 +      tp->intr_event = cfg->intr_event;
 +      tp->napi_event = cfg->napi_event;
 +
++      tp->opts1_mask = (tp->mac_version != RTL_GIGA_MAC_VER_01) ?
++              ~(RxBOVF | RxFOVF) : ~0;
++
 +      init_timer(&tp->timer);
 +      tp->timer.data = (unsigned long) dev;
 +      tp->timer.function = rtl8169_phy_timer;
 +
 +      tp->rtl_fw = RTL_FIRMWARE_UNKNOWN;
 +
 +      rc = register_netdev(dev);
 +      if (rc < 0)
 +              goto err_out_msi_4;
 +
 +      pci_set_drvdata(pdev, dev);
 +
 +      netif_info(tp, probe, dev, "%s at 0x%lx, %pM, XID %08x IRQ %d\n",
 +                 rtl_chip_infos[chipset].name, dev->base_addr, dev->dev_addr,
 +                 (u32)(RTL_R32(TxConfig) & 0x9cf0f8ff), dev->irq);
 +
 +      if (tp->mac_version == RTL_GIGA_MAC_VER_27 ||
 +          tp->mac_version == RTL_GIGA_MAC_VER_28 ||
 +          tp->mac_version == RTL_GIGA_MAC_VER_31) {
 +              rtl8168_driver_start(tp);
 +      }
 +
 +      device_set_wakeup_enable(&pdev->dev, tp->features & RTL_FEATURE_WOL);
 +
 +      if (pci_dev_run_wake(pdev))
 +              pm_runtime_put_noidle(&pdev->dev);
 +
 +      netif_carrier_off(dev);
 +
 +out:
 +      return rc;
 +
 +err_out_msi_4:
 +      rtl_disable_msi(pdev, tp);
 +      iounmap(ioaddr);
 +err_out_free_res_3:
 +      pci_release_regions(pdev);
 +err_out_mwi_2:
 +      pci_clear_mwi(pdev);
 +      pci_disable_device(pdev);
 +err_out_free_dev_1:
 +      free_netdev(dev);
 +      goto out;
 +}
 +
 +static void __devexit rtl8169_remove_one(struct pci_dev *pdev)
 +{
 +      struct net_device *dev = pci_get_drvdata(pdev);
 +      struct rtl8169_private *tp = netdev_priv(dev);
 +
 +      if (tp->mac_version == RTL_GIGA_MAC_VER_27 ||
 +          tp->mac_version == RTL_GIGA_MAC_VER_28 ||
 +          tp->mac_version == RTL_GIGA_MAC_VER_31) {
 +              rtl8168_driver_stop(tp);
 +      }
 +
 +      cancel_delayed_work_sync(&tp->task);
 +
 +      unregister_netdev(dev);
 +
 +      rtl_release_firmware(tp);
 +
 +      if (pci_dev_run_wake(pdev))
 +              pm_runtime_get_noresume(&pdev->dev);
 +
 +      /* restore original MAC address */
 +      rtl_rar_set(tp, dev->perm_addr);
 +
 +      rtl_disable_msi(pdev, tp);
 +      rtl8169_release_board(pdev, dev, tp->mmio_addr);
 +      pci_set_drvdata(pdev, NULL);
 +}
 +
 +static void rtl_request_uncached_firmware(struct rtl8169_private *tp)
 +{
 +      struct rtl_fw *rtl_fw;
 +      const char *name;
 +      int rc = -ENOMEM;
 +
 +      name = rtl_lookup_firmware_name(tp);
 +      if (!name)
 +              goto out_no_firmware;
 +
 +      rtl_fw = kzalloc(sizeof(*rtl_fw), GFP_KERNEL);
 +      if (!rtl_fw)
 +              goto err_warn;
 +
 +      rc = request_firmware(&rtl_fw->fw, name, &tp->pci_dev->dev);
 +      if (rc < 0)
 +              goto err_free;
 +
 +      rc = rtl_check_firmware(tp, rtl_fw);
 +      if (rc < 0)
 +              goto err_release_firmware;
 +
 +      tp->rtl_fw = rtl_fw;
 +out:
 +      return;
 +
 +err_release_firmware:
 +      release_firmware(rtl_fw->fw);
 +err_free:
 +      kfree(rtl_fw);
 +err_warn:
 +      netif_warn(tp, ifup, tp->dev, "unable to load firmware patch %s (%d)\n",
 +                 name, rc);
 +out_no_firmware:
 +      tp->rtl_fw = NULL;
 +      goto out;
 +}
 +
 +static void rtl_request_firmware(struct rtl8169_private *tp)
 +{
 +      if (IS_ERR(tp->rtl_fw))
 +              rtl_request_uncached_firmware(tp);
 +}
 +
 +static int rtl8169_open(struct net_device *dev)
 +{
 +      struct rtl8169_private *tp = netdev_priv(dev);
 +      void __iomem *ioaddr = tp->mmio_addr;
 +      struct pci_dev *pdev = tp->pci_dev;
 +      int retval = -ENOMEM;
 +
 +      pm_runtime_get_sync(&pdev->dev);
 +
 +      /*
 +       * Rx and Tx desscriptors needs 256 bytes alignment.
 +       * dma_alloc_coherent provides more.
 +       */
 +      tp->TxDescArray = dma_alloc_coherent(&pdev->dev, R8169_TX_RING_BYTES,
 +                                           &tp->TxPhyAddr, GFP_KERNEL);
 +      if (!tp->TxDescArray)
 +              goto err_pm_runtime_put;
 +
 +      tp->RxDescArray = dma_alloc_coherent(&pdev->dev, R8169_RX_RING_BYTES,
 +                                           &tp->RxPhyAddr, GFP_KERNEL);
 +      if (!tp->RxDescArray)
 +              goto err_free_tx_0;
 +
 +      retval = rtl8169_init_ring(dev);
 +      if (retval < 0)
 +              goto err_free_rx_1;
 +
 +      INIT_DELAYED_WORK(&tp->task, NULL);
 +
 +      smp_mb();
 +
 +      rtl_request_firmware(tp);
 +
 +      retval = request_irq(dev->irq, rtl8169_interrupt,
 +                           (tp->features & RTL_FEATURE_MSI) ? 0 : IRQF_SHARED,
 +                           dev->name, dev);
 +      if (retval < 0)
 +              goto err_release_fw_2;
 +
 +      napi_enable(&tp->napi);
 +
 +      rtl8169_init_phy(dev, tp);
 +
 +      rtl8169_set_features(dev, dev->features);
 +
 +      rtl_pll_power_up(tp);
 +
 +      rtl_hw_start(dev);
 +
 +      tp->saved_wolopts = 0;
 +      pm_runtime_put_noidle(&pdev->dev);
 +
 +      rtl8169_check_link_status(dev, tp, ioaddr);
 +out:
 +      return retval;
 +
 +err_release_fw_2:
 +      rtl_release_firmware(tp);
 +      rtl8169_rx_clear(tp);
 +err_free_rx_1:
 +      dma_free_coherent(&pdev->dev, R8169_RX_RING_BYTES, tp->RxDescArray,
 +                        tp->RxPhyAddr);
 +      tp->RxDescArray = NULL;
 +err_free_tx_0:
 +      dma_free_coherent(&pdev->dev, R8169_TX_RING_BYTES, tp->TxDescArray,
 +                        tp->TxPhyAddr);
 +      tp->TxDescArray = NULL;
 +err_pm_runtime_put:
 +      pm_runtime_put_noidle(&pdev->dev);
 +      goto out;
 +}
 +
 +static void rtl_rx_close(struct rtl8169_private *tp)
 +{
 +      void __iomem *ioaddr = tp->mmio_addr;
 +
 +      RTL_W32(RxConfig, RTL_R32(RxConfig) & ~RX_CONFIG_ACCEPT_MASK);
 +}
 +
 +static void rtl8169_hw_reset(struct rtl8169_private *tp)
 +{
 +      void __iomem *ioaddr = tp->mmio_addr;
 +
 +      /* Disable interrupts */
 +      rtl8169_irq_mask_and_ack(ioaddr);
 +
 +      rtl_rx_close(tp);
 +
 +      if (tp->mac_version == RTL_GIGA_MAC_VER_27 ||
 +          tp->mac_version == RTL_GIGA_MAC_VER_28 ||
 +          tp->mac_version == RTL_GIGA_MAC_VER_31) {
 +              while (RTL_R8(TxPoll) & NPQ)
 +                      udelay(20);
 +      } else if (tp->mac_version == RTL_GIGA_MAC_VER_34) {
++              RTL_W8(ChipCmd, RTL_R8(ChipCmd) | StopReq);
 +              while (!(RTL_R32(TxConfig) & TXCFG_EMPTY))
 +                      udelay(100);
 +      } else {
 +              RTL_W8(ChipCmd, RTL_R8(ChipCmd) | StopReq);
 +              udelay(100);
 +      }
 +
 +      rtl_hw_reset(tp);
 +}
 +
 +static void rtl_set_rx_tx_config_registers(struct rtl8169_private *tp)
 +{
 +      void __iomem *ioaddr = tp->mmio_addr;
 +
 +      /* Set DMA burst size and Interframe Gap Time */
 +      RTL_W32(TxConfig, (TX_DMA_BURST << TxDMAShift) |
 +              (InterFrameGap << TxInterFrameGapShift));
 +}
 +
 +static void rtl_hw_start(struct net_device *dev)
 +{
 +      struct rtl8169_private *tp = netdev_priv(dev);
 +
 +      tp->hw_start(dev);
 +
 +      netif_start_queue(dev);
 +}
 +
 +static void rtl_set_rx_tx_desc_registers(struct rtl8169_private *tp,
 +                                       void __iomem *ioaddr)
 +{
 +      /*
 +       * Magic spell: some iop3xx ARM board needs the TxDescAddrHigh
 +       * register to be written before TxDescAddrLow to work.
 +       * Switching from MMIO to I/O access fixes the issue as well.
 +       */
 +      RTL_W32(TxDescStartAddrHigh, ((u64) tp->TxPhyAddr) >> 32);
 +      RTL_W32(TxDescStartAddrLow, ((u64) tp->TxPhyAddr) & DMA_BIT_MASK(32));
 +      RTL_W32(RxDescAddrHigh, ((u64) tp->RxPhyAddr) >> 32);
 +      RTL_W32(RxDescAddrLow, ((u64) tp->RxPhyAddr) & DMA_BIT_MASK(32));
 +}
 +
 +static u16 rtl_rw_cpluscmd(void __iomem *ioaddr)
 +{
 +      u16 cmd;
 +
 +      cmd = RTL_R16(CPlusCmd);
 +      RTL_W16(CPlusCmd, cmd);
 +      return cmd;
 +}
 +
 +static void rtl_set_rx_max_size(void __iomem *ioaddr, unsigned int rx_buf_sz)
 +{
 +      /* Low hurts. Let's disable the filtering. */
 +      RTL_W16(RxMaxSize, rx_buf_sz + 1);
 +}
 +
 +static void rtl8169_set_magic_reg(void __iomem *ioaddr, unsigned mac_version)
 +{
 +      static const struct rtl_cfg2_info {
 +              u32 mac_version;
 +              u32 clk;
 +              u32 val;
 +      } cfg2_info [] = {
 +              { RTL_GIGA_MAC_VER_05, PCI_Clock_33MHz, 0x000fff00 }, // 8110SCd
 +              { RTL_GIGA_MAC_VER_05, PCI_Clock_66MHz, 0x000fffff },
 +              { RTL_GIGA_MAC_VER_06, PCI_Clock_33MHz, 0x00ffff00 }, // 8110SCe
 +              { RTL_GIGA_MAC_VER_06, PCI_Clock_66MHz, 0x00ffffff }
 +      };
 +      const struct rtl_cfg2_info *p = cfg2_info;
 +      unsigned int i;
 +      u32 clk;
 +
 +      clk = RTL_R8(Config2) & PCI_Clock_66MHz;
 +      for (i = 0; i < ARRAY_SIZE(cfg2_info); i++, p++) {
 +              if ((p->mac_version == mac_version) && (p->clk == clk)) {
 +                      RTL_W32(0x7c, p->val);
 +                      break;
 +              }
 +      }
 +}
 +
 +static void rtl_hw_start_8169(struct net_device *dev)
 +{
 +      struct rtl8169_private *tp = netdev_priv(dev);
 +      void __iomem *ioaddr = tp->mmio_addr;
 +      struct pci_dev *pdev = tp->pci_dev;
 +
 +      if (tp->mac_version == RTL_GIGA_MAC_VER_05) {
 +              RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) | PCIMulRW);
 +              pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE, 0x08);
 +      }
 +
 +      RTL_W8(Cfg9346, Cfg9346_Unlock);
 +      if (tp->mac_version == RTL_GIGA_MAC_VER_01 ||
 +          tp->mac_version == RTL_GIGA_MAC_VER_02 ||
 +          tp->mac_version == RTL_GIGA_MAC_VER_03 ||
 +          tp->mac_version == RTL_GIGA_MAC_VER_04)
 +              RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
 +
 +      rtl_init_rxcfg(tp);
 +
 +      RTL_W8(EarlyTxThres, NoEarlyTx);
 +
 +      rtl_set_rx_max_size(ioaddr, rx_buf_sz);
 +
 +      if (tp->mac_version == RTL_GIGA_MAC_VER_01 ||
 +          tp->mac_version == RTL_GIGA_MAC_VER_02 ||
 +          tp->mac_version == RTL_GIGA_MAC_VER_03 ||
 +          tp->mac_version == RTL_GIGA_MAC_VER_04)
 +              rtl_set_rx_tx_config_registers(tp);
 +
 +      tp->cp_cmd |= rtl_rw_cpluscmd(ioaddr) | PCIMulRW;
 +
 +      if (tp->mac_version == RTL_GIGA_MAC_VER_02 ||
 +          tp->mac_version == RTL_GIGA_MAC_VER_03) {
 +              dprintk("Set MAC Reg C+CR Offset 0xE0. "
 +                      "Bit-3 and bit-14 MUST be 1\n");
 +              tp->cp_cmd |= (1 << 14);
 +      }
 +
 +      RTL_W16(CPlusCmd, tp->cp_cmd);
 +
 +      rtl8169_set_magic_reg(ioaddr, tp->mac_version);
 +
 +      /*
 +       * Undocumented corner. Supposedly:
 +       * (TxTimer << 12) | (TxPackets << 8) | (RxTimer << 4) | RxPackets
 +       */
 +      RTL_W16(IntrMitigate, 0x0000);
 +
 +      rtl_set_rx_tx_desc_registers(tp, ioaddr);
 +
 +      if (tp->mac_version != RTL_GIGA_MAC_VER_01 &&
 +          tp->mac_version != RTL_GIGA_MAC_VER_02 &&
 +          tp->mac_version != RTL_GIGA_MAC_VER_03 &&
 +          tp->mac_version != RTL_GIGA_MAC_VER_04) {
 +              RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
 +              rtl_set_rx_tx_config_registers(tp);
 +      }
 +
 +      RTL_W8(Cfg9346, Cfg9346_Lock);
 +
 +      /* Initially a 10 us delay. Turned it into a PCI commit. - FR */
 +      RTL_R8(IntrMask);
 +
 +      RTL_W32(RxMissed, 0);
 +
 +      rtl_set_rx_mode(dev);
 +
 +      /* no early-rx interrupts */
 +      RTL_W16(MultiIntr, RTL_R16(MultiIntr) & 0xF000);
 +
 +      /* Enable all known interrupts by setting the interrupt mask. */
 +      RTL_W16(IntrMask, tp->intr_event);
 +}
 +
 +static void rtl_tx_performance_tweak(struct pci_dev *pdev, u16 force)
 +{
 +      int cap = pci_pcie_cap(pdev);
 +
 +      if (cap) {
 +              u16 ctl;
 +
 +              pci_read_config_word(pdev, cap + PCI_EXP_DEVCTL, &ctl);
 +              ctl = (ctl & ~PCI_EXP_DEVCTL_READRQ) | force;
 +              pci_write_config_word(pdev, cap + PCI_EXP_DEVCTL, ctl);
 +      }
 +}
 +
 +static void rtl_csi_access_enable(void __iomem *ioaddr, u32 bits)
 +{
 +      u32 csi;
 +
 +      csi = rtl_csi_read(ioaddr, 0x070c) & 0x00ffffff;
 +      rtl_csi_write(ioaddr, 0x070c, csi | bits);
 +}
 +
 +static void rtl_csi_access_enable_1(void __iomem *ioaddr)
 +{
 +      rtl_csi_access_enable(ioaddr, 0x17000000);
 +}
 +
 +static void rtl_csi_access_enable_2(void __iomem *ioaddr)
 +{
 +      rtl_csi_access_enable(ioaddr, 0x27000000);
 +}
 +
 +struct ephy_info {
 +      unsigned int offset;
 +      u16 mask;
 +      u16 bits;
 +};
 +
 +static void rtl_ephy_init(void __iomem *ioaddr, const struct ephy_info *e, int len)
 +{
 +      u16 w;
 +
 +      while (len-- > 0) {
 +              w = (rtl_ephy_read(ioaddr, e->offset) & ~e->mask) | e->bits;
 +              rtl_ephy_write(ioaddr, e->offset, w);
 +              e++;
 +      }
 +}
 +
 +static void rtl_disable_clock_request(struct pci_dev *pdev)
 +{
 +      int cap = pci_pcie_cap(pdev);
 +
 +      if (cap) {
 +              u16 ctl;
 +
 +              pci_read_config_word(pdev, cap + PCI_EXP_LNKCTL, &ctl);
 +              ctl &= ~PCI_EXP_LNKCTL_CLKREQ_EN;
 +              pci_write_config_word(pdev, cap + PCI_EXP_LNKCTL, ctl);
 +      }
 +}
 +
 +static void rtl_enable_clock_request(struct pci_dev *pdev)
 +{
 +      int cap = pci_pcie_cap(pdev);
 +
 +      if (cap) {
 +              u16 ctl;
 +
 +              pci_read_config_word(pdev, cap + PCI_EXP_LNKCTL, &ctl);
 +              ctl |= PCI_EXP_LNKCTL_CLKREQ_EN;
 +              pci_write_config_word(pdev, cap + PCI_EXP_LNKCTL, ctl);
 +      }
 +}
 +
 +#define R8168_CPCMD_QUIRK_MASK (\
 +      EnableBist | \
 +      Mac_dbgo_oe | \
 +      Force_half_dup | \
 +      Force_rxflow_en | \
 +      Force_txflow_en | \
 +      Cxpl_dbg_sel | \
 +      ASF | \
 +      PktCntrDisable | \
 +      Mac_dbgo_sel)
 +
 +static void rtl_hw_start_8168bb(void __iomem *ioaddr, struct pci_dev *pdev)
 +{
 +      RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en);
 +
 +      RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK);
 +
 +      rtl_tx_performance_tweak(pdev,
 +              (0x5 << MAX_READ_REQUEST_SHIFT) | PCI_EXP_DEVCTL_NOSNOOP_EN);
 +}
 +
 +static void rtl_hw_start_8168bef(void __iomem *ioaddr, struct pci_dev *pdev)
 +{
 +      rtl_hw_start_8168bb(ioaddr, pdev);
 +
 +      RTL_W8(MaxTxPacketSize, TxPacketMax);
 +
 +      RTL_W8(Config4, RTL_R8(Config4) & ~(1 << 0));
 +}
 +
 +static void __rtl_hw_start_8168cp(void __iomem *ioaddr, struct pci_dev *pdev)
 +{
 +      RTL_W8(Config1, RTL_R8(Config1) | Speed_down);
 +
 +      RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en);
 +
 +      rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
 +
 +      rtl_disable_clock_request(pdev);
 +
 +      RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK);
 +}
 +
 +static void rtl_hw_start_8168cp_1(void __iomem *ioaddr, struct pci_dev *pdev)
 +{
 +      static const struct ephy_info e_info_8168cp[] = {
 +              { 0x01, 0,      0x0001 },
 +              { 0x02, 0x0800, 0x1000 },
 +              { 0x03, 0,      0x0042 },
 +              { 0x06, 0x0080, 0x0000 },
 +              { 0x07, 0,      0x2000 }
 +      };
 +
 +      rtl_csi_access_enable_2(ioaddr);
 +
 +      rtl_ephy_init(ioaddr, e_info_8168cp, ARRAY_SIZE(e_info_8168cp));
 +
 +      __rtl_hw_start_8168cp(ioaddr, pdev);
 +}
 +
 +static void rtl_hw_start_8168cp_2(void __iomem *ioaddr, struct pci_dev *pdev)
 +{
 +      rtl_csi_access_enable_2(ioaddr);
 +
 +      RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en);
 +
 +      rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
 +
 +      RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK);
 +}
 +
 +static void rtl_hw_start_8168cp_3(void __iomem *ioaddr, struct pci_dev *pdev)
 +{
 +      rtl_csi_access_enable_2(ioaddr);
 +
 +      RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en);
 +
 +      /* Magic. */
 +      RTL_W8(DBG_REG, 0x20);
 +
 +      RTL_W8(MaxTxPacketSize, TxPacketMax);
 +
 +      rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
 +
 +      RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK);
 +}
 +
 +static void rtl_hw_start_8168c_1(void __iomem *ioaddr, struct pci_dev *pdev)
 +{
 +      static const struct ephy_info e_info_8168c_1[] = {
 +              { 0x02, 0x0800, 0x1000 },
 +              { 0x03, 0,      0x0002 },
 +              { 0x06, 0x0080, 0x0000 }
 +      };
 +
 +      rtl_csi_access_enable_2(ioaddr);
 +
 +      RTL_W8(DBG_REG, 0x06 | FIX_NAK_1 | FIX_NAK_2);
 +
 +      rtl_ephy_init(ioaddr, e_info_8168c_1, ARRAY_SIZE(e_info_8168c_1));
 +
 +      __rtl_hw_start_8168cp(ioaddr, pdev);
 +}
 +
 +static void rtl_hw_start_8168c_2(void __iomem *ioaddr, struct pci_dev *pdev)
 +{
 +      static const struct ephy_info e_info_8168c_2[] = {
 +              { 0x01, 0,      0x0001 },
 +              { 0x03, 0x0400, 0x0220 }
 +      };
 +
 +      rtl_csi_access_enable_2(ioaddr);
 +
 +      rtl_ephy_init(ioaddr, e_info_8168c_2, ARRAY_SIZE(e_info_8168c_2));
 +
 +      __rtl_hw_start_8168cp(ioaddr, pdev);
 +}
 +
 +static void rtl_hw_start_8168c_3(void __iomem *ioaddr, struct pci_dev *pdev)
 +{
 +      rtl_hw_start_8168c_2(ioaddr, pdev);
 +}
 +
 +static void rtl_hw_start_8168c_4(void __iomem *ioaddr, struct pci_dev *pdev)
 +{
 +      rtl_csi_access_enable_2(ioaddr);
 +
 +      __rtl_hw_start_8168cp(ioaddr, pdev);
 +}
 +
 +static void rtl_hw_start_8168d(void __iomem *ioaddr, struct pci_dev *pdev)
 +{
 +      rtl_csi_access_enable_2(ioaddr);
 +
 +      rtl_disable_clock_request(pdev);
 +
 +      RTL_W8(MaxTxPacketSize, TxPacketMax);
 +
 +      rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
 +
 +      RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK);
 +}
 +
 +static void rtl_hw_start_8168dp(void __iomem *ioaddr, struct pci_dev *pdev)
 +{
 +      rtl_csi_access_enable_1(ioaddr);
 +
 +      rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
 +
 +      RTL_W8(MaxTxPacketSize, TxPacketMax);
 +
 +      rtl_disable_clock_request(pdev);
 +}
 +
 +static void rtl_hw_start_8168d_4(void __iomem *ioaddr, struct pci_dev *pdev)
 +{
 +      static const struct ephy_info e_info_8168d_4[] = {
 +              { 0x0b, ~0,     0x48 },
 +              { 0x19, 0x20,   0x50 },
 +              { 0x0c, ~0,     0x20 }
 +      };
 +      int i;
 +
 +      rtl_csi_access_enable_1(ioaddr);
 +
 +      rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
 +
 +      RTL_W8(MaxTxPacketSize, TxPacketMax);
 +
 +      for (i = 0; i < ARRAY_SIZE(e_info_8168d_4); i++) {
 +              const struct ephy_info *e = e_info_8168d_4 + i;
 +              u16 w;
 +
 +              w = rtl_ephy_read(ioaddr, e->offset);
 +              rtl_ephy_write(ioaddr, 0x03, (w & e->mask) | e->bits);
 +      }
 +
 +      rtl_enable_clock_request(pdev);
 +}
 +
 +static void rtl_hw_start_8168e_1(void __iomem *ioaddr, struct pci_dev *pdev)
 +{
 +      static const struct ephy_info e_info_8168e_1[] = {
 +              { 0x00, 0x0200, 0x0100 },
 +              { 0x00, 0x0000, 0x0004 },
 +              { 0x06, 0x0002, 0x0001 },
 +              { 0x06, 0x0000, 0x0030 },
 +              { 0x07, 0x0000, 0x2000 },
 +              { 0x00, 0x0000, 0x0020 },
 +              { 0x03, 0x5800, 0x2000 },
 +              { 0x03, 0x0000, 0x0001 },
 +              { 0x01, 0x0800, 0x1000 },
 +              { 0x07, 0x0000, 0x4000 },
 +              { 0x1e, 0x0000, 0x2000 },
 +              { 0x19, 0xffff, 0xfe6c },
 +              { 0x0a, 0x0000, 0x0040 }
 +      };
 +
 +      rtl_csi_access_enable_2(ioaddr);
 +
 +      rtl_ephy_init(ioaddr, e_info_8168e_1, ARRAY_SIZE(e_info_8168e_1));
 +
 +      rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
 +
 +      RTL_W8(MaxTxPacketSize, TxPacketMax);
 +
 +      rtl_disable_clock_request(pdev);
 +
 +      /* Reset tx FIFO pointer */
 +      RTL_W32(MISC, RTL_R32(MISC) | TXPLA_RST);
 +      RTL_W32(MISC, RTL_R32(MISC) & ~TXPLA_RST);
 +
 +      RTL_W8(Config5, RTL_R8(Config5) & ~Spi_en);
 +}
 +
 +static void rtl_hw_start_8168e_2(void __iomem *ioaddr, struct pci_dev *pdev)
 +{
 +      static const struct ephy_info e_info_8168e_2[] = {
 +              { 0x09, 0x0000, 0x0080 },
 +              { 0x19, 0x0000, 0x0224 }
 +      };
 +
 +      rtl_csi_access_enable_1(ioaddr);
 +
 +      rtl_ephy_init(ioaddr, e_info_8168e_2, ARRAY_SIZE(e_info_8168e_2));
 +
 +      rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
 +
 +      rtl_eri_write(ioaddr, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
 +      rtl_eri_write(ioaddr, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
 +      rtl_eri_write(ioaddr, 0xc8, ERIAR_MASK_1111, 0x00100002, ERIAR_EXGMAC);
 +      rtl_eri_write(ioaddr, 0xe8, ERIAR_MASK_1111, 0x00100006, ERIAR_EXGMAC);
 +      rtl_eri_write(ioaddr, 0xcc, ERIAR_MASK_1111, 0x00000050, ERIAR_EXGMAC);
 +      rtl_eri_write(ioaddr, 0xd0, ERIAR_MASK_1111, 0x07ff0060, ERIAR_EXGMAC);
 +      rtl_w1w0_eri(ioaddr, 0x1b0, ERIAR_MASK_0001, 0x10, 0x00, ERIAR_EXGMAC);
 +      rtl_w1w0_eri(ioaddr, 0x0d4, ERIAR_MASK_0011, 0x0c00, 0xff00,
 +                   ERIAR_EXGMAC);
 +
 +      RTL_W8(MaxTxPacketSize, 0x27);
 +
 +      rtl_disable_clock_request(pdev);
 +
 +      RTL_W32(TxConfig, RTL_R32(TxConfig) | TXCFG_AUTO_FIFO);
 +      RTL_W8(MCU, RTL_R8(MCU) & ~NOW_IS_OOB);
 +
 +      /* Adjust EEE LED frequency */
 +      RTL_W8(EEE_LED, RTL_R8(EEE_LED) & ~0x07);
 +
 +      RTL_W8(DLLPR, RTL_R8(DLLPR) | PFM_EN);
 +      RTL_W32(MISC, RTL_R32(MISC) | PWM_EN);
 +      RTL_W8(Config5, RTL_R8(Config5) & ~Spi_en);
 +}
 +
 +static void rtl_hw_start_8168(struct net_device *dev)
 +{
 +      struct rtl8169_private *tp = netdev_priv(dev);
 +      void __iomem *ioaddr = tp->mmio_addr;
 +      struct pci_dev *pdev = tp->pci_dev;
 +
 +      RTL_W8(Cfg9346, Cfg9346_Unlock);
 +
 +      RTL_W8(MaxTxPacketSize, TxPacketMax);
 +
 +      rtl_set_rx_max_size(ioaddr, rx_buf_sz);
 +
 +      tp->cp_cmd |= RTL_R16(CPlusCmd) | PktCntrDisable | INTT_1;
 +
 +      RTL_W16(CPlusCmd, tp->cp_cmd);
 +
 +      RTL_W16(IntrMitigate, 0x5151);
 +
 +      /* Work around for RxFIFO overflow. */
 +      if (tp->mac_version == RTL_GIGA_MAC_VER_11 ||
 +          tp->mac_version == RTL_GIGA_MAC_VER_22) {
 +              tp->intr_event |= RxFIFOOver | PCSTimeout;
 +              tp->intr_event &= ~RxOverflow;
 +      }
 +
 +      rtl_set_rx_tx_desc_registers(tp, ioaddr);
 +
 +      rtl_set_rx_mode(dev);
 +
 +      RTL_W32(TxConfig, (TX_DMA_BURST << TxDMAShift) |
 +              (InterFrameGap << TxInterFrameGapShift));
 +
 +      RTL_R8(IntrMask);
 +
 +      switch (tp->mac_version) {
 +      case RTL_GIGA_MAC_VER_11:
 +              rtl_hw_start_8168bb(ioaddr, pdev);
 +              break;
 +
 +      case RTL_GIGA_MAC_VER_12:
 +      case RTL_GIGA_MAC_VER_17:
 +              rtl_hw_start_8168bef(ioaddr, pdev);
 +              break;
 +
 +      case RTL_GIGA_MAC_VER_18:
 +              rtl_hw_start_8168cp_1(ioaddr, pdev);
 +              break;
 +
 +      case RTL_GIGA_MAC_VER_19:
 +              rtl_hw_start_8168c_1(ioaddr, pdev);
 +              break;
 +
 +      case RTL_GIGA_MAC_VER_20:
 +              rtl_hw_start_8168c_2(ioaddr, pdev);
 +              break;
 +
 +      case RTL_GIGA_MAC_VER_21:
 +              rtl_hw_start_8168c_3(ioaddr, pdev);
 +              break;
 +
 +      case RTL_GIGA_MAC_VER_22:
 +              rtl_hw_start_8168c_4(ioaddr, pdev);
 +              break;
 +
 +      case RTL_GIGA_MAC_VER_23:
 +              rtl_hw_start_8168cp_2(ioaddr, pdev);
 +              break;
 +
 +      case RTL_GIGA_MAC_VER_24:
 +              rtl_hw_start_8168cp_3(ioaddr, pdev);
 +              break;
 +
 +      case RTL_GIGA_MAC_VER_25:
 +      case RTL_GIGA_MAC_VER_26:
 +      case RTL_GIGA_MAC_VER_27:
 +              rtl_hw_start_8168d(ioaddr, pdev);
 +              break;
 +
 +      case RTL_GIGA_MAC_VER_28:
 +              rtl_hw_start_8168d_4(ioaddr, pdev);
 +              break;
 +
 +      case RTL_GIGA_MAC_VER_31:
 +              rtl_hw_start_8168dp(ioaddr, pdev);
 +              break;
 +
 +      case RTL_GIGA_MAC_VER_32:
 +      case RTL_GIGA_MAC_VER_33:
 +              rtl_hw_start_8168e_1(ioaddr, pdev);
 +              break;
 +      case RTL_GIGA_MAC_VER_34:
 +              rtl_hw_start_8168e_2(ioaddr, pdev);
 +              break;
 +
 +      default:
 +              printk(KERN_ERR PFX "%s: unknown chipset (mac_version = %d).\n",
 +                      dev->name, tp->mac_version);
 +              break;
 +      }
 +
 +      RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
 +
 +      RTL_W8(Cfg9346, Cfg9346_Lock);
 +
 +      RTL_W16(MultiIntr, RTL_R16(MultiIntr) & 0xF000);
 +
 +      RTL_W16(IntrMask, tp->intr_event);
 +}
 +
 +#define R810X_CPCMD_QUIRK_MASK (\
 +      EnableBist | \
 +      Mac_dbgo_oe | \
 +      Force_half_dup | \
 +      Force_rxflow_en | \
 +      Force_txflow_en | \
 +      Cxpl_dbg_sel | \
 +      ASF | \
 +      PktCntrDisable | \
 +      Mac_dbgo_sel)
 +
 +static void rtl_hw_start_8102e_1(void __iomem *ioaddr, struct pci_dev *pdev)
 +{
 +      static const struct ephy_info e_info_8102e_1[] = {
 +              { 0x01, 0, 0x6e65 },
 +              { 0x02, 0, 0x091f },
 +              { 0x03, 0, 0xc2f9 },
 +              { 0x06, 0, 0xafb5 },
 +              { 0x07, 0, 0x0e00 },
 +              { 0x19, 0, 0xec80 },
 +              { 0x01, 0, 0x2e65 },
 +              { 0x01, 0, 0x6e65 }
 +      };
 +      u8 cfg1;
 +
 +      rtl_csi_access_enable_2(ioaddr);
 +
 +      RTL_W8(DBG_REG, FIX_NAK_1);
 +
 +      rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
 +
 +      RTL_W8(Config1,
 +             LEDS1 | LEDS0 | Speed_down | MEMMAP | IOMAP | VPD | PMEnable);
 +      RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en);
 +
 +      cfg1 = RTL_R8(Config1);
 +      if ((cfg1 & LEDS0) && (cfg1 & LEDS1))
 +              RTL_W8(Config1, cfg1 & ~LEDS0);
 +
 +      rtl_ephy_init(ioaddr, e_info_8102e_1, ARRAY_SIZE(e_info_8102e_1));
 +}
 +
 +static void rtl_hw_start_8102e_2(void __iomem *ioaddr, struct pci_dev *pdev)
 +{
 +      rtl_csi_access_enable_2(ioaddr);
 +
 +      rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
 +
 +      RTL_W8(Config1, MEMMAP | IOMAP | VPD | PMEnable);
 +      RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en);
 +}
 +
 +static void rtl_hw_start_8102e_3(void __iomem *ioaddr, struct pci_dev *pdev)
 +{
 +      rtl_hw_start_8102e_2(ioaddr, pdev);
 +
 +      rtl_ephy_write(ioaddr, 0x03, 0xc2f9);
 +}
 +
 +static void rtl_hw_start_8105e_1(void __iomem *ioaddr, struct pci_dev *pdev)
 +{
 +      static const struct ephy_info e_info_8105e_1[] = {
 +              { 0x07, 0, 0x4000 },
 +              { 0x19, 0, 0x0200 },
 +              { 0x19, 0, 0x0020 },
 +              { 0x1e, 0, 0x2000 },
 +              { 0x03, 0, 0x0001 },
 +              { 0x19, 0, 0x0100 },
 +              { 0x19, 0, 0x0004 },
 +              { 0x0a, 0, 0x0020 }
 +      };
 +
 +      /* Force LAN exit from ASPM if Rx/Tx are not idle */
 +      RTL_W32(FuncEvent, RTL_R32(FuncEvent) | 0x002800);
 +
 +      /* Disable Early Tally Counter */
 +      RTL_W32(FuncEvent, RTL_R32(FuncEvent) & ~0x010000);
 +
 +      RTL_W8(MCU, RTL_R8(MCU) | EN_NDP | EN_OOB_RESET);
 +      RTL_W8(DLLPR, RTL_R8(DLLPR) | PFM_EN);
 +
 +      rtl_ephy_init(ioaddr, e_info_8105e_1, ARRAY_SIZE(e_info_8105e_1));
 +}
 +
 +static void rtl_hw_start_8105e_2(void __iomem *ioaddr, struct pci_dev *pdev)
 +{
 +      rtl_hw_start_8105e_1(ioaddr, pdev);
 +      rtl_ephy_write(ioaddr, 0x1e, rtl_ephy_read(ioaddr, 0x1e) | 0x8000);
 +}
 +
 +static void rtl_hw_start_8101(struct net_device *dev)
 +{
 +      struct rtl8169_private *tp = netdev_priv(dev);
 +      void __iomem *ioaddr = tp->mmio_addr;
 +      struct pci_dev *pdev = tp->pci_dev;
 +
 +      if (tp->mac_version == RTL_GIGA_MAC_VER_13 ||
 +          tp->mac_version == RTL_GIGA_MAC_VER_16) {
 +              int cap = pci_pcie_cap(pdev);
 +
 +              if (cap) {
 +                      pci_write_config_word(pdev, cap + PCI_EXP_DEVCTL,
 +                                            PCI_EXP_DEVCTL_NOSNOOP_EN);
 +              }
 +      }
 +
 +      RTL_W8(Cfg9346, Cfg9346_Unlock);
 +
 +      switch (tp->mac_version) {
 +      case RTL_GIGA_MAC_VER_07:
 +              rtl_hw_start_8102e_1(ioaddr, pdev);
 +              break;
 +
 +      case RTL_GIGA_MAC_VER_08:
 +              rtl_hw_start_8102e_3(ioaddr, pdev);
 +              break;
 +
 +      case RTL_GIGA_MAC_VER_09:
 +              rtl_hw_start_8102e_2(ioaddr, pdev);
 +              break;
 +
 +      case RTL_GIGA_MAC_VER_29:
 +              rtl_hw_start_8105e_1(ioaddr, pdev);
 +              break;
 +      case RTL_GIGA_MAC_VER_30:
 +              rtl_hw_start_8105e_2(ioaddr, pdev);
 +              break;
 +      }
 +
 +      RTL_W8(Cfg9346, Cfg9346_Lock);
 +
 +      RTL_W8(MaxTxPacketSize, TxPacketMax);
 +
 +      rtl_set_rx_max_size(ioaddr, rx_buf_sz);
 +
 +      tp->cp_cmd &= ~R810X_CPCMD_QUIRK_MASK;
 +      RTL_W16(CPlusCmd, tp->cp_cmd);
 +
 +      RTL_W16(IntrMitigate, 0x0000);
 +
 +      rtl_set_rx_tx_desc_registers(tp, ioaddr);
 +
 +      RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
 +      rtl_set_rx_tx_config_registers(tp);
 +
 +      RTL_R8(IntrMask);
 +
 +      rtl_set_rx_mode(dev);
 +
 +      RTL_W16(MultiIntr, RTL_R16(MultiIntr) & 0xf000);
 +
 +      RTL_W16(IntrMask, tp->intr_event);
 +}
 +
 +static int rtl8169_change_mtu(struct net_device *dev, int new_mtu)
 +{
 +      if (new_mtu < ETH_ZLEN || new_mtu > SafeMtu)
 +              return -EINVAL;
 +
 +      dev->mtu = new_mtu;
 +      netdev_update_features(dev);
 +
 +      return 0;
 +}
 +
 +static inline void rtl8169_make_unusable_by_asic(struct RxDesc *desc)
 +{
 +      desc->addr = cpu_to_le64(0x0badbadbadbadbadull);
 +      desc->opts1 &= ~cpu_to_le32(DescOwn | RsvdMask);
 +}
 +
 +static void rtl8169_free_rx_databuff(struct rtl8169_private *tp,
 +                                   void **data_buff, struct RxDesc *desc)
 +{
 +      dma_unmap_single(&tp->pci_dev->dev, le64_to_cpu(desc->addr), rx_buf_sz,
 +                       DMA_FROM_DEVICE);
 +
 +      kfree(*data_buff);
 +      *data_buff = NULL;
 +      rtl8169_make_unusable_by_asic(desc);
 +}
 +
 +static inline void rtl8169_mark_to_asic(struct RxDesc *desc, u32 rx_buf_sz)
 +{
 +      u32 eor = le32_to_cpu(desc->opts1) & RingEnd;
 +
 +      desc->opts1 = cpu_to_le32(DescOwn | eor | rx_buf_sz);
 +}
 +
 +static inline void rtl8169_map_to_asic(struct RxDesc *desc, dma_addr_t mapping,
 +                                     u32 rx_buf_sz)
 +{
 +      desc->addr = cpu_to_le64(mapping);
 +      wmb();
 +      rtl8169_mark_to_asic(desc, rx_buf_sz);
 +}
 +
 +static inline void *rtl8169_align(void *data)
 +{
 +      return (void *)ALIGN((long)data, 16);
 +}
 +
 +static struct sk_buff *rtl8169_alloc_rx_data(struct rtl8169_private *tp,
 +                                           struct RxDesc *desc)
 +{
 +      void *data;
 +      dma_addr_t mapping;
 +      struct device *d = &tp->pci_dev->dev;
 +      struct net_device *dev = tp->dev;
 +      int node = dev->dev.parent ? dev_to_node(dev->dev.parent) : -1;
 +
 +      data = kmalloc_node(rx_buf_sz, GFP_KERNEL, node);
 +      if (!data)
 +              return NULL;
 +
 +      if (rtl8169_align(data) != data) {
 +              kfree(data);
 +              data = kmalloc_node(rx_buf_sz + 15, GFP_KERNEL, node);
 +              if (!data)
 +                      return NULL;
 +      }
 +
 +      mapping = dma_map_single(d, rtl8169_align(data), rx_buf_sz,
 +                               DMA_FROM_DEVICE);
 +      if (unlikely(dma_mapping_error(d, mapping))) {
 +              if (net_ratelimit())
 +                      netif_err(tp, drv, tp->dev, "Failed to map RX DMA!\n");
 +              goto err_out;
 +      }
 +
 +      rtl8169_map_to_asic(desc, mapping, rx_buf_sz);
 +      return data;
 +
 +err_out:
 +      kfree(data);
 +      return NULL;
 +}
 +
 +static void rtl8169_rx_clear(struct rtl8169_private *tp)
 +{
 +      unsigned int i;
 +
 +      for (i = 0; i < NUM_RX_DESC; i++) {
 +              if (tp->Rx_databuff[i]) {
 +                      rtl8169_free_rx_databuff(tp, tp->Rx_databuff + i,
 +                                          tp->RxDescArray + i);
 +              }
 +      }
 +}
 +
 +static inline void rtl8169_mark_as_last_descriptor(struct RxDesc *desc)
 +{
 +      desc->opts1 |= cpu_to_le32(RingEnd);
 +}
 +
 +static int rtl8169_rx_fill(struct rtl8169_private *tp)
 +{
 +      unsigned int i;
 +
 +      for (i = 0; i < NUM_RX_DESC; i++) {
 +              void *data;
 +
 +              if (tp->Rx_databuff[i])
 +                      continue;
 +
 +              data = rtl8169_alloc_rx_data(tp, tp->RxDescArray + i);
 +              if (!data) {
 +                      rtl8169_make_unusable_by_asic(tp->RxDescArray + i);
 +                      goto err_out;
 +              }
 +              tp->Rx_databuff[i] = data;
 +      }
 +
 +      rtl8169_mark_as_last_descriptor(tp->RxDescArray + NUM_RX_DESC - 1);
 +      return 0;
 +
 +err_out:
 +      rtl8169_rx_clear(tp);
 +      return -ENOMEM;
 +}
 +
 +static int rtl8169_init_ring(struct net_device *dev)
 +{
 +      struct rtl8169_private *tp = netdev_priv(dev);
 +
 +      rtl8169_init_ring_indexes(tp);
 +
 +      memset(tp->tx_skb, 0x0, NUM_TX_DESC * sizeof(struct ring_info));
 +      memset(tp->Rx_databuff, 0x0, NUM_RX_DESC * sizeof(void *));
 +
 +      return rtl8169_rx_fill(tp);
 +}
 +
 +static void rtl8169_unmap_tx_skb(struct device *d, struct ring_info *tx_skb,
 +                               struct TxDesc *desc)
 +{
 +      unsigned int len = tx_skb->len;
 +
 +      dma_unmap_single(d, le64_to_cpu(desc->addr), len, DMA_TO_DEVICE);
 +
 +      desc->opts1 = 0x00;
 +      desc->opts2 = 0x00;
 +      desc->addr = 0x00;
 +      tx_skb->len = 0;
 +}
 +
 +static void rtl8169_tx_clear_range(struct rtl8169_private *tp, u32 start,
 +                                 unsigned int n)
 +{
 +      unsigned int i;
 +
 +      for (i = 0; i < n; i++) {
 +              unsigned int entry = (start + i) % NUM_TX_DESC;
 +              struct ring_info *tx_skb = tp->tx_skb + entry;
 +              unsigned int len = tx_skb->len;
 +
 +              if (len) {
 +                      struct sk_buff *skb = tx_skb->skb;
 +
 +                      rtl8169_unmap_tx_skb(&tp->pci_dev->dev, tx_skb,
 +                                           tp->TxDescArray + entry);
 +                      if (skb) {
 +                              tp->dev->stats.tx_dropped++;
 +                              dev_kfree_skb(skb);
 +                              tx_skb->skb = NULL;
 +                      }
 +              }
 +      }
 +}
 +
 +static void rtl8169_tx_clear(struct rtl8169_private *tp)
 +{
 +      rtl8169_tx_clear_range(tp, tp->dirty_tx, NUM_TX_DESC);
 +      tp->cur_tx = tp->dirty_tx = 0;
 +}
 +
 +static void rtl8169_schedule_work(struct net_device *dev, work_func_t task)
 +{
 +      struct rtl8169_private *tp = netdev_priv(dev);
 +
 +      PREPARE_DELAYED_WORK(&tp->task, task);
 +      schedule_delayed_work(&tp->task, 4);
 +}
 +
 +static void rtl8169_wait_for_quiescence(struct net_device *dev)
 +{
 +      struct rtl8169_private *tp = netdev_priv(dev);
 +      void __iomem *ioaddr = tp->mmio_addr;
 +
 +      synchronize_irq(dev->irq);
 +
 +      /* Wait for any pending NAPI task to complete */
 +      napi_disable(&tp->napi);
 +
 +      rtl8169_irq_mask_and_ack(ioaddr);
 +
 +      tp->intr_mask = 0xffff;
 +      RTL_W16(IntrMask, tp->intr_event);
 +      napi_enable(&tp->napi);
 +}
 +
 +static void rtl8169_reinit_task(struct work_struct *work)
 +{
 +      struct rtl8169_private *tp =
 +              container_of(work, struct rtl8169_private, task.work);
 +      struct net_device *dev = tp->dev;
 +      int ret;
 +
 +      rtnl_lock();
 +
 +      if (!netif_running(dev))
 +              goto out_unlock;
 +
 +      rtl8169_wait_for_quiescence(dev);
 +      rtl8169_close(dev);
 +
 +      ret = rtl8169_open(dev);
 +      if (unlikely(ret < 0)) {
 +              if (net_ratelimit())
 +                      netif_err(tp, drv, dev,
 +                                "reinit failure (status = %d). Rescheduling\n",
 +                                ret);
 +              rtl8169_schedule_work(dev, rtl8169_reinit_task);
 +      }
 +
 +out_unlock:
 +      rtnl_unlock();
 +}
 +
 +static void rtl8169_reset_task(struct work_struct *work)
 +{
 +      struct rtl8169_private *tp =
 +              container_of(work, struct rtl8169_private, task.work);
 +      struct net_device *dev = tp->dev;
 +      int i;
 +
 +      rtnl_lock();
 +
 +      if (!netif_running(dev))
 +              goto out_unlock;
 +
 +      rtl8169_wait_for_quiescence(dev);
 +
 +      for (i = 0; i < NUM_RX_DESC; i++)
 +              rtl8169_mark_to_asic(tp->RxDescArray + i, rx_buf_sz);
 +
 +      rtl8169_tx_clear(tp);
 +
 +      rtl8169_hw_reset(tp);
 +      rtl_hw_start(dev);
 +      netif_wake_queue(dev);
 +      rtl8169_check_link_status(dev, tp, tp->mmio_addr);
 +
 +out_unlock:
 +      rtnl_unlock();
 +}
 +
 +static void rtl8169_tx_timeout(struct net_device *dev)
 +{
 +      struct rtl8169_private *tp = netdev_priv(dev);
 +
 +      rtl8169_hw_reset(tp);
 +
 +      /* Let's wait a bit while any (async) irq lands on */
 +      rtl8169_schedule_work(dev, rtl8169_reset_task);
 +}
 +
 +static int rtl8169_xmit_frags(struct rtl8169_private *tp, struct sk_buff *skb,
 +                            u32 *opts)
 +{
 +      struct skb_shared_info *info = skb_shinfo(skb);
 +      unsigned int cur_frag, entry;
 +      struct TxDesc * uninitialized_var(txd);
 +      struct device *d = &tp->pci_dev->dev;
 +
 +      entry = tp->cur_tx;
 +      for (cur_frag = 0; cur_frag < info->nr_frags; cur_frag++) {
 +              skb_frag_t *frag = info->frags + cur_frag;
 +              dma_addr_t mapping;
 +              u32 status, len;
 +              void *addr;
 +
 +              entry = (entry + 1) % NUM_TX_DESC;
 +
 +              txd = tp->TxDescArray + entry;
 +              len = frag->size;
 +              addr = skb_frag_address(frag);
 +              mapping = dma_map_single(d, addr, len, DMA_TO_DEVICE);
 +              if (unlikely(dma_mapping_error(d, mapping))) {
 +                      if (net_ratelimit())
 +                              netif_err(tp, drv, tp->dev,
 +                                        "Failed to map TX fragments DMA!\n");
 +                      goto err_out;
 +              }
 +
 +              /* Anti gcc 2.95.3 bugware (sic) */
 +              status = opts[0] | len |
 +                      (RingEnd * !((entry + 1) % NUM_TX_DESC));
 +
 +              txd->opts1 = cpu_to_le32(status);
 +              txd->opts2 = cpu_to_le32(opts[1]);
 +              txd->addr = cpu_to_le64(mapping);
 +
 +              tp->tx_skb[entry].len = len;
 +      }
 +
 +      if (cur_frag) {
 +              tp->tx_skb[entry].skb = skb;
 +              txd->opts1 |= cpu_to_le32(LastFrag);
 +      }
 +
 +      return cur_frag;
 +
 +err_out:
 +      rtl8169_tx_clear_range(tp, tp->cur_tx + 1, cur_frag);
 +      return -EIO;
 +}
 +
 +static inline void rtl8169_tso_csum(struct rtl8169_private *tp,
 +                                  struct sk_buff *skb, u32 *opts)
 +{
 +      const struct rtl_tx_desc_info *info = tx_desc_info + tp->txd_version;
 +      u32 mss = skb_shinfo(skb)->gso_size;
 +      int offset = info->opts_offset;
 +
 +      if (mss) {
 +              opts[0] |= TD_LSO;
 +              opts[offset] |= min(mss, TD_MSS_MAX) << info->mss_shift;
 +      } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
 +              const struct iphdr *ip = ip_hdr(skb);
 +
 +              if (ip->protocol == IPPROTO_TCP)
 +                      opts[offset] |= info->checksum.tcp;
 +              else if (ip->protocol == IPPROTO_UDP)
 +                      opts[offset] |= info->checksum.udp;
 +              else
 +                      WARN_ON_ONCE(1);
 +      }
 +}
 +
 +static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
 +                                    struct net_device *dev)
 +{
 +      struct rtl8169_private *tp = netdev_priv(dev);
 +      unsigned int entry = tp->cur_tx % NUM_TX_DESC;
 +      struct TxDesc *txd = tp->TxDescArray + entry;
 +      void __iomem *ioaddr = tp->mmio_addr;
 +      struct device *d = &tp->pci_dev->dev;
 +      dma_addr_t mapping;
 +      u32 status, len;
 +      u32 opts[2];
 +      int frags;
 +
 +      if (unlikely(TX_BUFFS_AVAIL(tp) < skb_shinfo(skb)->nr_frags)) {
 +              netif_err(tp, drv, dev, "BUG! Tx Ring full when queue awake!\n");
 +              goto err_stop_0;
 +      }
 +
 +      if (unlikely(le32_to_cpu(txd->opts1) & DescOwn))
 +              goto err_stop_0;
 +
 +      len = skb_headlen(skb);
 +      mapping = dma_map_single(d, skb->data, len, DMA_TO_DEVICE);
 +      if (unlikely(dma_mapping_error(d, mapping))) {
 +              if (net_ratelimit())
 +                      netif_err(tp, drv, dev, "Failed to map TX DMA!\n");
 +              goto err_dma_0;
 +      }
 +
 +      tp->tx_skb[entry].len = len;
 +      txd->addr = cpu_to_le64(mapping);
 +
 +      opts[1] = cpu_to_le32(rtl8169_tx_vlan_tag(tp, skb));
 +      opts[0] = DescOwn;
 +
 +      rtl8169_tso_csum(tp, skb, opts);
 +
 +      frags = rtl8169_xmit_frags(tp, skb, opts);
 +      if (frags < 0)
 +              goto err_dma_1;
 +      else if (frags)
 +              opts[0] |= FirstFrag;
 +      else {
 +              opts[0] |= FirstFrag | LastFrag;
 +              tp->tx_skb[entry].skb = skb;
 +      }
 +
 +      txd->opts2 = cpu_to_le32(opts[1]);
 +
 +      wmb();
 +
 +      /* Anti gcc 2.95.3 bugware (sic) */
 +      status = opts[0] | len | (RingEnd * !((entry + 1) % NUM_TX_DESC));
 +      txd->opts1 = cpu_to_le32(status);
 +
 +      tp->cur_tx += frags + 1;
 +
 +      wmb();
 +
 +      RTL_W8(TxPoll, NPQ);
 +
 +      if (TX_BUFFS_AVAIL(tp) < MAX_SKB_FRAGS) {
 +              netif_stop_queue(dev);
 +              smp_rmb();
 +              if (TX_BUFFS_AVAIL(tp) >= MAX_SKB_FRAGS)
 +                      netif_wake_queue(dev);
 +      }
 +
 +      return NETDEV_TX_OK;
 +
 +err_dma_1:
 +      rtl8169_unmap_tx_skb(d, tp->tx_skb + entry, txd);
 +err_dma_0:
 +      dev_kfree_skb(skb);
 +      dev->stats.tx_dropped++;
 +      return NETDEV_TX_OK;
 +
 +err_stop_0:
 +      netif_stop_queue(dev);
 +      dev->stats.tx_dropped++;
 +      return NETDEV_TX_BUSY;
 +}
 +
 +static void rtl8169_pcierr_interrupt(struct net_device *dev)
 +{
 +      struct rtl8169_private *tp = netdev_priv(dev);
 +      struct pci_dev *pdev = tp->pci_dev;
 +      u16 pci_status, pci_cmd;
 +
 +      pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd);
 +      pci_read_config_word(pdev, PCI_STATUS, &pci_status);
 +
 +      netif_err(tp, intr, dev, "PCI error (cmd = 0x%04x, status = 0x%04x)\n",
 +                pci_cmd, pci_status);
 +
 +      /*
 +       * The recovery sequence below admits a very elaborated explanation:
 +       * - it seems to work;
 +       * - I did not see what else could be done;
 +       * - it makes iop3xx happy.
 +       *
 +       * Feel free to adjust to your needs.
 +       */
 +      if (pdev->broken_parity_status)
 +              pci_cmd &= ~PCI_COMMAND_PARITY;
 +      else
 +              pci_cmd |= PCI_COMMAND_SERR | PCI_COMMAND_PARITY;
 +
 +      pci_write_config_word(pdev, PCI_COMMAND, pci_cmd);
 +
 +      pci_write_config_word(pdev, PCI_STATUS,
 +              pci_status & (PCI_STATUS_DETECTED_PARITY |
 +              PCI_STATUS_SIG_SYSTEM_ERROR | PCI_STATUS_REC_MASTER_ABORT |
 +              PCI_STATUS_REC_TARGET_ABORT | PCI_STATUS_SIG_TARGET_ABORT));
 +
 +      /* The infamous DAC f*ckup only happens at boot time */
 +      if ((tp->cp_cmd & PCIDAC) && !tp->dirty_rx && !tp->cur_rx) {
 +              void __iomem *ioaddr = tp->mmio_addr;
 +
 +              netif_info(tp, intr, dev, "disabling PCI DAC\n");
 +              tp->cp_cmd &= ~PCIDAC;
 +              RTL_W16(CPlusCmd, tp->cp_cmd);
 +              dev->features &= ~NETIF_F_HIGHDMA;
 +      }
 +
 +      rtl8169_hw_reset(tp);
 +
 +      rtl8169_schedule_work(dev, rtl8169_reinit_task);
 +}
 +
 +static void rtl8169_tx_interrupt(struct net_device *dev,
 +                               struct rtl8169_private *tp,
 +                               void __iomem *ioaddr)
 +{
 +      unsigned int dirty_tx, tx_left;
 +
 +      dirty_tx = tp->dirty_tx;
 +      smp_rmb();
 +      tx_left = tp->cur_tx - dirty_tx;
 +
 +      while (tx_left > 0) {
 +              unsigned int entry = dirty_tx % NUM_TX_DESC;
 +              struct ring_info *tx_skb = tp->tx_skb + entry;
 +              u32 status;
 +
 +              rmb();
 +              status = le32_to_cpu(tp->TxDescArray[entry].opts1);
 +              if (status & DescOwn)
 +                      break;
 +
 +              rtl8169_unmap_tx_skb(&tp->pci_dev->dev, tx_skb,
 +                                   tp->TxDescArray + entry);
 +              if (status & LastFrag) {
 +                      dev->stats.tx_packets++;
 +                      dev->stats.tx_bytes += tx_skb->skb->len;
 +                      dev_kfree_skb(tx_skb->skb);
 +                      tx_skb->skb = NULL;
 +              }
 +              dirty_tx++;
 +              tx_left--;
 +      }
 +
 +      if (tp->dirty_tx != dirty_tx) {
 +              tp->dirty_tx = dirty_tx;
 +              smp_wmb();
 +              if (netif_queue_stopped(dev) &&
 +                  (TX_BUFFS_AVAIL(tp) >= MAX_SKB_FRAGS)) {
 +                      netif_wake_queue(dev);
 +              }
 +              /*
 +               * 8168 hack: TxPoll requests are lost when the Tx packets are
 +               * too close. Let's kick an extra TxPoll request when a burst
 +               * of start_xmit activity is detected (if it is not detected,
 +               * it is slow enough). -- FR
 +               */
 +              smp_rmb();
 +              if (tp->cur_tx != dirty_tx)
 +                      RTL_W8(TxPoll, NPQ);
 +      }
 +}
 +
 +static inline int rtl8169_fragmented_frame(u32 status)
 +{
 +      return (status & (FirstFrag | LastFrag)) != (FirstFrag | LastFrag);
 +}
 +
 +static inline void rtl8169_rx_csum(struct sk_buff *skb, u32 opts1)
 +{
 +      u32 status = opts1 & RxProtoMask;
 +
 +      if (((status == RxProtoTCP) && !(opts1 & TCPFail)) ||
 +          ((status == RxProtoUDP) && !(opts1 & UDPFail)))
 +              skb->ip_summed = CHECKSUM_UNNECESSARY;
 +      else
 +              skb_checksum_none_assert(skb);
 +}
 +
 +static struct sk_buff *rtl8169_try_rx_copy(void *data,
 +                                         struct rtl8169_private *tp,
 +                                         int pkt_size,
 +                                         dma_addr_t addr)
 +{
 +      struct sk_buff *skb;
 +      struct device *d = &tp->pci_dev->dev;
 +
 +      data = rtl8169_align(data);
 +      dma_sync_single_for_cpu(d, addr, pkt_size, DMA_FROM_DEVICE);
 +      prefetch(data);
 +      skb = netdev_alloc_skb_ip_align(tp->dev, pkt_size);
 +      if (skb)
 +              memcpy(skb->data, data, pkt_size);
 +      dma_sync_single_for_device(d, addr, pkt_size, DMA_FROM_DEVICE);
 +
 +      return skb;
 +}
 +
 +static int rtl8169_rx_interrupt(struct net_device *dev,
 +                              struct rtl8169_private *tp,
 +                              void __iomem *ioaddr, u32 budget)
 +{
 +      unsigned int cur_rx, rx_left;
 +      unsigned int count;
 +
 +      cur_rx = tp->cur_rx;
 +      rx_left = NUM_RX_DESC + tp->dirty_rx - cur_rx;
 +      rx_left = min(rx_left, budget);
 +
 +      for (; rx_left > 0; rx_left--, cur_rx++) {
 +              unsigned int entry = cur_rx % NUM_RX_DESC;
 +              struct RxDesc *desc = tp->RxDescArray + entry;
 +              u32 status;
 +
 +              rmb();
++              status = le32_to_cpu(desc->opts1) & tp->opts1_mask;
 +
 +              if (status & DescOwn)
 +                      break;
 +              if (unlikely(status & RxRES)) {
 +                      netif_info(tp, rx_err, dev, "Rx ERROR. status = %08x\n",
 +                                 status);
 +                      dev->stats.rx_errors++;
 +                      if (status & (RxRWT | RxRUNT))
 +                              dev->stats.rx_length_errors++;
 +                      if (status & RxCRC)
 +                              dev->stats.rx_crc_errors++;
 +                      if (status & RxFOVF) {
 +                              rtl8169_schedule_work(dev, rtl8169_reset_task);
 +                              dev->stats.rx_fifo_errors++;
 +                      }
 +                      rtl8169_mark_to_asic(desc, rx_buf_sz);
 +              } else {
 +                      struct sk_buff *skb;
 +                      dma_addr_t addr = le64_to_cpu(desc->addr);
 +                      int pkt_size = (status & 0x00001FFF) - 4;
 +
 +                      /*
 +                       * The driver does not support incoming fragmented
 +                       * frames. They are seen as a symptom of over-mtu
 +                       * sized frames.
 +                       */
 +                      if (unlikely(rtl8169_fragmented_frame(status))) {
 +                              dev->stats.rx_dropped++;
 +                              dev->stats.rx_length_errors++;
 +                              rtl8169_mark_to_asic(desc, rx_buf_sz);
 +                              continue;
 +                      }
 +
 +                      skb = rtl8169_try_rx_copy(tp->Rx_databuff[entry],
 +                                                tp, pkt_size, addr);
 +                      rtl8169_mark_to_asic(desc, rx_buf_sz);
 +                      if (!skb) {
 +                              dev->stats.rx_dropped++;
 +                              continue;
 +                      }
 +
 +                      rtl8169_rx_csum(skb, status);
 +                      skb_put(skb, pkt_size);
 +                      skb->protocol = eth_type_trans(skb, dev);
 +
 +                      rtl8169_rx_vlan_tag(desc, skb);
 +
 +                      napi_gro_receive(&tp->napi, skb);
 +
 +                      dev->stats.rx_bytes += pkt_size;
 +                      dev->stats.rx_packets++;
 +              }
 +
 +              /* Work around for AMD plateform. */
 +              if ((desc->opts2 & cpu_to_le32(0xfffe000)) &&
 +                  (tp->mac_version == RTL_GIGA_MAC_VER_05)) {
 +                      desc->opts2 = 0;
 +                      cur_rx++;
 +              }
 +      }
 +
 +      count = cur_rx - tp->cur_rx;
 +      tp->cur_rx = cur_rx;
 +
 +      tp->dirty_rx += count;
 +
 +      return count;
 +}
 +
 +static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance)
 +{
 +      struct net_device *dev = dev_instance;
 +      struct rtl8169_private *tp = netdev_priv(dev);
 +      void __iomem *ioaddr = tp->mmio_addr;
 +      int handled = 0;
 +      int status;
 +
 +      /* loop handling interrupts until we have no new ones or
 +       * we hit a invalid/hotplug case.
 +       */
 +      status = RTL_R16(IntrStatus);
 +      while (status && status != 0xffff) {
 +              handled = 1;
 +
 +              /* Handle all of the error cases first. These will reset
 +               * the chip, so just exit the loop.
 +               */
 +              if (unlikely(!netif_running(dev))) {
 +                      rtl8169_hw_reset(tp);
 +                      break;
 +              }
 +
 +              if (unlikely(status & RxFIFOOver)) {
 +                      switch (tp->mac_version) {
 +                      /* Work around for rx fifo overflow */
 +                      case RTL_GIGA_MAC_VER_11:
 +                      case RTL_GIGA_MAC_VER_22:
 +                      case RTL_GIGA_MAC_VER_26:
 +                              netif_stop_queue(dev);
 +                              rtl8169_tx_timeout(dev);
 +                              goto done;
 +                      /* Testers needed. */
 +                      case RTL_GIGA_MAC_VER_17:
 +                      case RTL_GIGA_MAC_VER_19:
 +                      case RTL_GIGA_MAC_VER_20:
 +                      case RTL_GIGA_MAC_VER_21:
 +                      case RTL_GIGA_MAC_VER_23:
 +                      case RTL_GIGA_MAC_VER_24:
 +                      case RTL_GIGA_MAC_VER_27:
 +                      case RTL_GIGA_MAC_VER_28:
 +                      case RTL_GIGA_MAC_VER_31:
 +                      /* Experimental science. Pktgen proof. */
 +                      case RTL_GIGA_MAC_VER_12:
 +                      case RTL_GIGA_MAC_VER_25:
 +                              if (status == RxFIFOOver)
 +                                      goto done;
 +                              break;
 +                      default:
 +                              break;
 +                      }
 +              }
 +
 +              if (unlikely(status & SYSErr)) {
 +                      rtl8169_pcierr_interrupt(dev);
 +                      break;
 +              }
 +
 +              if (status & LinkChg)
 +                      __rtl8169_check_link_status(dev, tp, ioaddr, true);
 +
 +              /* We need to see the lastest version of tp->intr_mask to
 +               * avoid ignoring an MSI interrupt and having to wait for
 +               * another event which may never come.
 +               */
 +              smp_rmb();
 +              if (status & tp->intr_mask & tp->napi_event) {
 +                      RTL_W16(IntrMask, tp->intr_event & ~tp->napi_event);
 +                      tp->intr_mask = ~tp->napi_event;
 +
 +                      if (likely(napi_schedule_prep(&tp->napi)))
 +                              __napi_schedule(&tp->napi);
 +                      else
 +                              netif_info(tp, intr, dev,
 +                                         "interrupt %04x in poll\n", status);
 +              }
 +
 +              /* We only get a new MSI interrupt when all active irq
 +               * sources on the chip have been acknowledged. So, ack
 +               * everything we've seen and check if new sources have become
 +               * active to avoid blocking all interrupts from the chip.
 +               */
 +              RTL_W16(IntrStatus,
 +                      (status & RxFIFOOver) ? (status | RxOverflow) : status);
 +              status = RTL_R16(IntrStatus);
 +      }
 +done:
 +      return IRQ_RETVAL(handled);
 +}
 +
 +static int rtl8169_poll(struct napi_struct *napi, int budget)
 +{
 +      struct rtl8169_private *tp = container_of(napi, struct rtl8169_private, napi);
 +      struct net_device *dev = tp->dev;
 +      void __iomem *ioaddr = tp->mmio_addr;
 +      int work_done;
 +
 +      work_done = rtl8169_rx_interrupt(dev, tp, ioaddr, (u32) budget);
 +      rtl8169_tx_interrupt(dev, tp, ioaddr);
 +
 +      if (work_done < budget) {
 +              napi_complete(napi);
 +
 +              /* We need for force the visibility of tp->intr_mask
 +               * for other CPUs, as we can loose an MSI interrupt
 +               * and potentially wait for a retransmit timeout if we don't.
 +               * The posted write to IntrMask is safe, as it will
 +               * eventually make it to the chip and we won't loose anything
 +               * until it does.
 +               */
 +              tp->intr_mask = 0xffff;
 +              wmb();
 +              RTL_W16(IntrMask, tp->intr_event);
 +      }
 +
 +      return work_done;
 +}
 +
 +static void rtl8169_rx_missed(struct net_device *dev, void __iomem *ioaddr)
 +{
 +      struct rtl8169_private *tp = netdev_priv(dev);
 +
 +      if (tp->mac_version > RTL_GIGA_MAC_VER_06)
 +              return;
 +
 +      dev->stats.rx_missed_errors += (RTL_R32(RxMissed) & 0xffffff);
 +      RTL_W32(RxMissed, 0);
 +}
 +
 +static void rtl8169_down(struct net_device *dev)
 +{
 +      struct rtl8169_private *tp = netdev_priv(dev);
 +      void __iomem *ioaddr = tp->mmio_addr;
 +
 +      del_timer_sync(&tp->timer);
 +
 +      netif_stop_queue(dev);
 +
 +      napi_disable(&tp->napi);
 +
 +      spin_lock_irq(&tp->lock);
 +
 +      rtl8169_hw_reset(tp);
 +      /*
 +       * At this point device interrupts can not be enabled in any function,
 +       * as netif_running is not true (rtl8169_interrupt, rtl8169_reset_task,
 +       * rtl8169_reinit_task) and napi is disabled (rtl8169_poll).
 +       */
 +      rtl8169_rx_missed(dev, ioaddr);
 +
 +      spin_unlock_irq(&tp->lock);
 +
 +      synchronize_irq(dev->irq);
 +
 +      /* Give a racing hard_start_xmit a few cycles to complete. */
 +      synchronize_sched();  /* FIXME: should this be synchronize_irq()? */
 +
 +      rtl8169_tx_clear(tp);
 +
 +      rtl8169_rx_clear(tp);
 +
 +      rtl_pll_power_down(tp);
 +}
 +
 +static int rtl8169_close(struct net_device *dev)
 +{
 +      struct rtl8169_private *tp = netdev_priv(dev);
 +      struct pci_dev *pdev = tp->pci_dev;
 +
 +      pm_runtime_get_sync(&pdev->dev);
 +
 +      /* Update counters before going down */
 +      rtl8169_update_counters(dev);
 +
 +      rtl8169_down(dev);
 +
 +      free_irq(dev->irq, dev);
 +
 +      dma_free_coherent(&pdev->dev, R8169_RX_RING_BYTES, tp->RxDescArray,
 +                        tp->RxPhyAddr);
 +      dma_free_coherent(&pdev->dev, R8169_TX_RING_BYTES, tp->TxDescArray,
 +                        tp->TxPhyAddr);
 +      tp->TxDescArray = NULL;
 +      tp->RxDescArray = NULL;
 +
 +      pm_runtime_put_sync(&pdev->dev);
 +
 +      return 0;
 +}
 +
 +static void rtl_set_rx_mode(struct net_device *dev)
 +{
 +      struct rtl8169_private *tp = netdev_priv(dev);
 +      void __iomem *ioaddr = tp->mmio_addr;
 +      unsigned long flags;
 +      u32 mc_filter[2];       /* Multicast hash filter */
 +      int rx_mode;
 +      u32 tmp = 0;
 +
 +      if (dev->flags & IFF_PROMISC) {
 +              /* Unconditionally log net taps. */
 +              netif_notice(tp, link, dev, "Promiscuous mode enabled\n");
 +              rx_mode =
 +                  AcceptBroadcast | AcceptMulticast | AcceptMyPhys |
 +                  AcceptAllPhys;
 +              mc_filter[1] = mc_filter[0] = 0xffffffff;
 +      } else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
 +                 (dev->flags & IFF_ALLMULTI)) {
 +              /* Too many to filter perfectly -- accept all multicasts. */
 +              rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
 +              mc_filter[1] = mc_filter[0] = 0xffffffff;
 +      } else {
 +              struct netdev_hw_addr *ha;
 +
 +              rx_mode = AcceptBroadcast | AcceptMyPhys;
 +              mc_filter[1] = mc_filter[0] = 0;
 +              netdev_for_each_mc_addr(ha, dev) {
 +                      int bit_nr = ether_crc(ETH_ALEN, ha->addr) >> 26;
 +                      mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
 +                      rx_mode |= AcceptMulticast;
 +              }
 +      }
 +
 +      spin_lock_irqsave(&tp->lock, flags);
 +
 +      tmp = (RTL_R32(RxConfig) & ~RX_CONFIG_ACCEPT_MASK) | rx_mode;
 +
 +      if (tp->mac_version > RTL_GIGA_MAC_VER_06) {
 +              u32 data = mc_filter[0];
 +
 +              mc_filter[0] = swab32(mc_filter[1]);
 +              mc_filter[1] = swab32(data);
 +      }
 +
 +      RTL_W32(MAR0 + 4, mc_filter[1]);
 +      RTL_W32(MAR0 + 0, mc_filter[0]);
 +
 +      RTL_W32(RxConfig, tmp);
 +
 +      spin_unlock_irqrestore(&tp->lock, flags);
 +}
 +
 +/**
 + *  rtl8169_get_stats - Get rtl8169 read/write statistics
 + *  @dev: The Ethernet Device to get statistics for
 + *
 + *  Get TX/RX statistics for rtl8169
 + */
 +static struct net_device_stats *rtl8169_get_stats(struct net_device *dev)
 +{
 +      struct rtl8169_private *tp = netdev_priv(dev);
 +      void __iomem *ioaddr = tp->mmio_addr;
 +      unsigned long flags;
 +
 +      if (netif_running(dev)) {
 +              spin_lock_irqsave(&tp->lock, flags);
 +              rtl8169_rx_missed(dev, ioaddr);
 +              spin_unlock_irqrestore(&tp->lock, flags);
 +      }
 +
 +      return &dev->stats;
 +}
 +
 +static void rtl8169_net_suspend(struct net_device *dev)
 +{
 +      struct rtl8169_private *tp = netdev_priv(dev);
 +
 +      if (!netif_running(dev))
 +              return;
 +
 +      rtl_pll_power_down(tp);
 +
 +      netif_device_detach(dev);
 +      netif_stop_queue(dev);
 +}
 +
 +#ifdef CONFIG_PM
 +
 +static int rtl8169_suspend(struct device *device)
 +{
 +      struct pci_dev *pdev = to_pci_dev(device);
 +      struct net_device *dev = pci_get_drvdata(pdev);
 +
 +      rtl8169_net_suspend(dev);
 +
 +      return 0;
 +}
 +
 +static void __rtl8169_resume(struct net_device *dev)
 +{
 +      struct rtl8169_private *tp = netdev_priv(dev);
 +
 +      netif_device_attach(dev);
 +
 +      rtl_pll_power_up(tp);
 +
 +      rtl8169_schedule_work(dev, rtl8169_reset_task);
 +}
 +
 +static int rtl8169_resume(struct device *device)
 +{
 +      struct pci_dev *pdev = to_pci_dev(device);
 +      struct net_device *dev = pci_get_drvdata(pdev);
 +      struct rtl8169_private *tp = netdev_priv(dev);
 +
 +      rtl8169_init_phy(dev, tp);
 +
 +      if (netif_running(dev))
 +              __rtl8169_resume(dev);
 +
 +      return 0;
 +}
 +
 +static int rtl8169_runtime_suspend(struct device *device)
 +{
 +      struct pci_dev *pdev = to_pci_dev(device);
 +      struct net_device *dev = pci_get_drvdata(pdev);
 +      struct rtl8169_private *tp = netdev_priv(dev);
 +
 +      if (!tp->TxDescArray)
 +              return 0;
 +
 +      spin_lock_irq(&tp->lock);
 +      tp->saved_wolopts = __rtl8169_get_wol(tp);
 +      __rtl8169_set_wol(tp, WAKE_ANY);
 +      spin_unlock_irq(&tp->lock);
 +
 +      rtl8169_net_suspend(dev);
 +
 +      return 0;
 +}
 +
 +static int rtl8169_runtime_resume(struct device *device)
 +{
 +      struct pci_dev *pdev = to_pci_dev(device);
 +      struct net_device *dev = pci_get_drvdata(pdev);
 +      struct rtl8169_private *tp = netdev_priv(dev);
 +
 +      if (!tp->TxDescArray)
 +              return 0;
 +
 +      spin_lock_irq(&tp->lock);
 +      __rtl8169_set_wol(tp, tp->saved_wolopts);
 +      tp->saved_wolopts = 0;
 +      spin_unlock_irq(&tp->lock);
 +
 +      rtl8169_init_phy(dev, tp);
 +
 +      __rtl8169_resume(dev);
 +
 +      return 0;
 +}
 +
 +static int rtl8169_runtime_idle(struct device *device)
 +{
 +      struct pci_dev *pdev = to_pci_dev(device);
 +      struct net_device *dev = pci_get_drvdata(pdev);
 +      struct rtl8169_private *tp = netdev_priv(dev);
 +
 +      return tp->TxDescArray ? -EBUSY : 0;
 +}
 +
 +static const struct dev_pm_ops rtl8169_pm_ops = {
 +      .suspend                = rtl8169_suspend,
 +      .resume                 = rtl8169_resume,
 +      .freeze                 = rtl8169_suspend,
 +      .thaw                   = rtl8169_resume,
 +      .poweroff               = rtl8169_suspend,
 +      .restore                = rtl8169_resume,
 +      .runtime_suspend        = rtl8169_runtime_suspend,
 +      .runtime_resume         = rtl8169_runtime_resume,
 +      .runtime_idle           = rtl8169_runtime_idle,
 +};
 +
 +#define RTL8169_PM_OPS        (&rtl8169_pm_ops)
 +
 +#else /* !CONFIG_PM */
 +
 +#define RTL8169_PM_OPS        NULL
 +
 +#endif /* !CONFIG_PM */
 +
 +static void rtl_shutdown(struct pci_dev *pdev)
 +{
 +      struct net_device *dev = pci_get_drvdata(pdev);
 +      struct rtl8169_private *tp = netdev_priv(dev);
 +      void __iomem *ioaddr = tp->mmio_addr;
 +
 +      rtl8169_net_suspend(dev);
 +
 +      /* Restore original MAC address */
 +      rtl_rar_set(tp, dev->perm_addr);
 +
 +      spin_lock_irq(&tp->lock);
 +
 +      rtl8169_hw_reset(tp);
 +
 +      spin_unlock_irq(&tp->lock);
 +
 +      if (system_state == SYSTEM_POWER_OFF) {
 +              /* WoL fails with 8168b when the receiver is disabled. */
 +              if ((tp->mac_version == RTL_GIGA_MAC_VER_11 ||
 +                   tp->mac_version == RTL_GIGA_MAC_VER_12 ||
 +                   tp->mac_version == RTL_GIGA_MAC_VER_17) &&
 +                  (tp->features & RTL_FEATURE_WOL)) {
 +                      pci_clear_master(pdev);
 +
 +                      RTL_W8(ChipCmd, CmdRxEnb);
 +                      /* PCI commit */
 +                      RTL_R8(ChipCmd);
 +              }
 +
 +              pci_wake_from_d3(pdev, true);
 +              pci_set_power_state(pdev, PCI_D3hot);
 +      }
 +}
 +
 +static struct pci_driver rtl8169_pci_driver = {
 +      .name           = MODULENAME,
 +      .id_table       = rtl8169_pci_tbl,
 +      .probe          = rtl8169_init_one,
 +      .remove         = __devexit_p(rtl8169_remove_one),
 +      .shutdown       = rtl_shutdown,
 +      .driver.pm      = RTL8169_PM_OPS,
 +};
 +
 +static int __init rtl8169_init_module(void)
 +{
 +      return pci_register_driver(&rtl8169_pci_driver);
 +}
 +
 +static void __exit rtl8169_cleanup_module(void)
 +{
 +      pci_unregister_driver(&rtl8169_pci_driver);
 +}
 +
 +module_init(rtl8169_init_module);
 +module_exit(rtl8169_cleanup_module);
index bf2404a,0000000..4479a45
mode 100644,000000..100644
--- /dev/null
@@@ -1,1960 -1,0 +1,1961 @@@
 +/*
 + *  SuperH Ethernet device driver
 + *
 + *  Copyright (C) 2006-2008 Nobuhiro Iwamatsu
 + *  Copyright (C) 2008-2009 Renesas Solutions Corp.
 + *
 + *  This program is free software; you can redistribute it and/or modify it
 + *  under the terms and conditions of the GNU General Public License,
 + *  version 2, as published by the Free Software Foundation.
 + *
 + *  This program is distributed in the hope it will be useful, but WITHOUT
 + *  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 + *  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
 + *  more details.
 + *  You should have received a copy of the GNU General Public License along with
 + *  this program; if not, write to the Free Software Foundation, Inc.,
 + *  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
 + *
 + *  The full GNU General Public License is included in this distribution in
 + *  the file called "COPYING".
 + */
 +
 +#include <linux/init.h>
 +#include <linux/interrupt.h>
 +#include <linux/dma-mapping.h>
 +#include <linux/etherdevice.h>
 +#include <linux/delay.h>
 +#include <linux/platform_device.h>
 +#include <linux/mdio-bitbang.h>
 +#include <linux/netdevice.h>
 +#include <linux/phy.h>
 +#include <linux/cache.h>
 +#include <linux/io.h>
++#include <linux/interrupt.h>
 +#include <linux/pm_runtime.h>
 +#include <linux/slab.h>
 +#include <linux/ethtool.h>
 +
 +#include "sh_eth.h"
 +
 +#define SH_ETH_DEF_MSG_ENABLE \
 +              (NETIF_MSG_LINK | \
 +              NETIF_MSG_TIMER | \
 +              NETIF_MSG_RX_ERR| \
 +              NETIF_MSG_TX_ERR)
 +
 +/* There is CPU dependent code */
 +#if defined(CONFIG_CPU_SUBTYPE_SH7724)
 +#define SH_ETH_RESET_DEFAULT  1
 +static void sh_eth_set_duplex(struct net_device *ndev)
 +{
 +      struct sh_eth_private *mdp = netdev_priv(ndev);
 +
 +      if (mdp->duplex) /* Full */
 +              sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_DM, ECMR);
 +      else            /* Half */
 +              sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_DM, ECMR);
 +}
 +
 +static void sh_eth_set_rate(struct net_device *ndev)
 +{
 +      struct sh_eth_private *mdp = netdev_priv(ndev);
 +
 +      switch (mdp->speed) {
 +      case 10: /* 10BASE */
 +              sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_RTM, ECMR);
 +              break;
 +      case 100:/* 100BASE */
 +              sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_RTM, ECMR);
 +              break;
 +      default:
 +              break;
 +      }
 +}
 +
 +/* SH7724 */
 +static struct sh_eth_cpu_data sh_eth_my_cpu_data = {
 +      .set_duplex     = sh_eth_set_duplex,
 +      .set_rate       = sh_eth_set_rate,
 +
 +      .ecsr_value     = ECSR_PSRTO | ECSR_LCHNG | ECSR_ICD,
 +      .ecsipr_value   = ECSIPR_PSRTOIP | ECSIPR_LCHNGIP | ECSIPR_ICDIP,
 +      .eesipr_value   = DMAC_M_RFRMER | DMAC_M_ECI | 0x01ff009f,
 +
 +      .tx_check       = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO,
 +      .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RDE |
 +                        EESR_RFRMER | EESR_TFE | EESR_TDE | EESR_ECI,
 +      .tx_error_check = EESR_TWB | EESR_TABT | EESR_TDE | EESR_TFE,
 +
 +      .apr            = 1,
 +      .mpr            = 1,
 +      .tpauser        = 1,
 +      .hw_swap        = 1,
 +      .rpadir         = 1,
 +      .rpadir_value   = 0x00020000, /* NET_IP_ALIGN assumed to be 2 */
 +};
 +#elif defined(CONFIG_CPU_SUBTYPE_SH7757)
 +#define SH_ETH_HAS_BOTH_MODULES       1
 +#define SH_ETH_HAS_TSU        1
 +static void sh_eth_set_duplex(struct net_device *ndev)
 +{
 +      struct sh_eth_private *mdp = netdev_priv(ndev);
 +
 +      if (mdp->duplex) /* Full */
 +              sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_DM, ECMR);
 +      else            /* Half */
 +              sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_DM, ECMR);
 +}
 +
 +static void sh_eth_set_rate(struct net_device *ndev)
 +{
 +      struct sh_eth_private *mdp = netdev_priv(ndev);
 +
 +      switch (mdp->speed) {
 +      case 10: /* 10BASE */
 +              sh_eth_write(ndev, 0, RTRATE);
 +              break;
 +      case 100:/* 100BASE */
 +              sh_eth_write(ndev, 1, RTRATE);
 +              break;
 +      default:
 +              break;
 +      }
 +}
 +
 +/* SH7757 */
 +static struct sh_eth_cpu_data sh_eth_my_cpu_data = {
 +      .set_duplex             = sh_eth_set_duplex,
 +      .set_rate               = sh_eth_set_rate,
 +
 +      .eesipr_value   = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
 +      .rmcr_value     = 0x00000001,
 +
 +      .tx_check       = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO,
 +      .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RDE |
 +                        EESR_RFRMER | EESR_TFE | EESR_TDE | EESR_ECI,
 +      .tx_error_check = EESR_TWB | EESR_TABT | EESR_TDE | EESR_TFE,
 +
 +      .apr            = 1,
 +      .mpr            = 1,
 +      .tpauser        = 1,
 +      .hw_swap        = 1,
 +      .no_ade         = 1,
 +      .rpadir         = 1,
 +      .rpadir_value   = 2 << 16,
 +};
 +
 +#define SH_GIGA_ETH_BASE      0xfee00000
 +#define GIGA_MALR(port)               (SH_GIGA_ETH_BASE + 0x800 * (port) + 0x05c8)
 +#define GIGA_MAHR(port)               (SH_GIGA_ETH_BASE + 0x800 * (port) + 0x05c0)
 +static void sh_eth_chip_reset_giga(struct net_device *ndev)
 +{
 +      int i;
 +      unsigned long mahr[2], malr[2];
 +
 +      /* save MAHR and MALR */
 +      for (i = 0; i < 2; i++) {
 +              malr[i] = readl(GIGA_MALR(i));
 +              mahr[i] = readl(GIGA_MAHR(i));
 +      }
 +
 +      /* reset device */
 +      writel(ARSTR_ARSTR, SH_GIGA_ETH_BASE + 0x1800);
 +      mdelay(1);
 +
 +      /* restore MAHR and MALR */
 +      for (i = 0; i < 2; i++) {
 +              writel(malr[i], GIGA_MALR(i));
 +              writel(mahr[i], GIGA_MAHR(i));
 +      }
 +}
 +
 +static int sh_eth_is_gether(struct sh_eth_private *mdp);
 +static void sh_eth_reset(struct net_device *ndev)
 +{
 +      struct sh_eth_private *mdp = netdev_priv(ndev);
 +      int cnt = 100;
 +
 +      if (sh_eth_is_gether(mdp)) {
 +              sh_eth_write(ndev, 0x03, EDSR);
 +              sh_eth_write(ndev, sh_eth_read(ndev, EDMR) | EDMR_SRST_GETHER,
 +                              EDMR);
 +              while (cnt > 0) {
 +                      if (!(sh_eth_read(ndev, EDMR) & 0x3))
 +                              break;
 +                      mdelay(1);
 +                      cnt--;
 +              }
 +              if (cnt < 0)
 +                      printk(KERN_ERR "Device reset fail\n");
 +
 +              /* Table Init */
 +              sh_eth_write(ndev, 0x0, TDLAR);
 +              sh_eth_write(ndev, 0x0, TDFAR);
 +              sh_eth_write(ndev, 0x0, TDFXR);
 +              sh_eth_write(ndev, 0x0, TDFFR);
 +              sh_eth_write(ndev, 0x0, RDLAR);
 +              sh_eth_write(ndev, 0x0, RDFAR);
 +              sh_eth_write(ndev, 0x0, RDFXR);
 +              sh_eth_write(ndev, 0x0, RDFFR);
 +      } else {
 +              sh_eth_write(ndev, sh_eth_read(ndev, EDMR) | EDMR_SRST_ETHER,
 +                              EDMR);
 +              mdelay(3);
 +              sh_eth_write(ndev, sh_eth_read(ndev, EDMR) & ~EDMR_SRST_ETHER,
 +                              EDMR);
 +      }
 +}
 +
 +static void sh_eth_set_duplex_giga(struct net_device *ndev)
 +{
 +      struct sh_eth_private *mdp = netdev_priv(ndev);
 +
 +      if (mdp->duplex) /* Full */
 +              sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_DM, ECMR);
 +      else            /* Half */
 +              sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_DM, ECMR);
 +}
 +
 +static void sh_eth_set_rate_giga(struct net_device *ndev)
 +{
 +      struct sh_eth_private *mdp = netdev_priv(ndev);
 +
 +      switch (mdp->speed) {
 +      case 10: /* 10BASE */
 +              sh_eth_write(ndev, 0x00000000, GECMR);
 +              break;
 +      case 100:/* 100BASE */
 +              sh_eth_write(ndev, 0x00000010, GECMR);
 +              break;
 +      case 1000: /* 1000BASE */
 +              sh_eth_write(ndev, 0x00000020, GECMR);
 +              break;
 +      default:
 +              break;
 +      }
 +}
 +
 +/* SH7757(GETHERC) */
 +static struct sh_eth_cpu_data sh_eth_my_cpu_data_giga = {
 +      .chip_reset     = sh_eth_chip_reset_giga,
 +      .set_duplex     = sh_eth_set_duplex_giga,
 +      .set_rate       = sh_eth_set_rate_giga,
 +
 +      .ecsr_value     = ECSR_ICD | ECSR_MPD,
 +      .ecsipr_value   = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP,
 +      .eesipr_value   = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
 +
 +      .tx_check       = EESR_TC1 | EESR_FTC,
 +      .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT | \
 +                        EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE | \
 +                        EESR_ECI,
 +      .tx_error_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_TDE | \
 +                        EESR_TFE,
 +      .fdr_value      = 0x0000072f,
 +      .rmcr_value     = 0x00000001,
 +
 +      .apr            = 1,
 +      .mpr            = 1,
 +      .tpauser        = 1,
 +      .bculr          = 1,
 +      .hw_swap        = 1,
 +      .rpadir         = 1,
 +      .rpadir_value   = 2 << 16,
 +      .no_trimd       = 1,
 +      .no_ade         = 1,
 +};
 +
 +static struct sh_eth_cpu_data *sh_eth_get_cpu_data(struct sh_eth_private *mdp)
 +{
 +      if (sh_eth_is_gether(mdp))
 +              return &sh_eth_my_cpu_data_giga;
 +      else
 +              return &sh_eth_my_cpu_data;
 +}
 +
 +#elif defined(CONFIG_CPU_SUBTYPE_SH7763)
 +#define SH_ETH_HAS_TSU        1
 +static void sh_eth_chip_reset(struct net_device *ndev)
 +{
 +      struct sh_eth_private *mdp = netdev_priv(ndev);
 +
 +      /* reset device */
 +      sh_eth_tsu_write(mdp, ARSTR_ARSTR, ARSTR);
 +      mdelay(1);
 +}
 +
 +static void sh_eth_reset(struct net_device *ndev)
 +{
 +      int cnt = 100;
 +
 +      sh_eth_write(ndev, EDSR_ENALL, EDSR);
 +      sh_eth_write(ndev, sh_eth_read(ndev, EDMR) | EDMR_SRST_GETHER, EDMR);
 +      while (cnt > 0) {
 +              if (!(sh_eth_read(ndev, EDMR) & 0x3))
 +                      break;
 +              mdelay(1);
 +              cnt--;
 +      }
 +      if (cnt == 0)
 +              printk(KERN_ERR "Device reset fail\n");
 +
 +      /* Table Init */
 +      sh_eth_write(ndev, 0x0, TDLAR);
 +      sh_eth_write(ndev, 0x0, TDFAR);
 +      sh_eth_write(ndev, 0x0, TDFXR);
 +      sh_eth_write(ndev, 0x0, TDFFR);
 +      sh_eth_write(ndev, 0x0, RDLAR);
 +      sh_eth_write(ndev, 0x0, RDFAR);
 +      sh_eth_write(ndev, 0x0, RDFXR);
 +      sh_eth_write(ndev, 0x0, RDFFR);
 +}
 +
 +static void sh_eth_set_duplex(struct net_device *ndev)
 +{
 +      struct sh_eth_private *mdp = netdev_priv(ndev);
 +
 +      if (mdp->duplex) /* Full */
 +              sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_DM, ECMR);
 +      else            /* Half */
 +              sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_DM, ECMR);
 +}
 +
 +static void sh_eth_set_rate(struct net_device *ndev)
 +{
 +      struct sh_eth_private *mdp = netdev_priv(ndev);
 +
 +      switch (mdp->speed) {
 +      case 10: /* 10BASE */
 +              sh_eth_write(ndev, GECMR_10, GECMR);
 +              break;
 +      case 100:/* 100BASE */
 +              sh_eth_write(ndev, GECMR_100, GECMR);
 +              break;
 +      case 1000: /* 1000BASE */
 +              sh_eth_write(ndev, GECMR_1000, GECMR);
 +              break;
 +      default:
 +              break;
 +      }
 +}
 +
 +/* sh7763 */
 +static struct sh_eth_cpu_data sh_eth_my_cpu_data = {
 +      .chip_reset     = sh_eth_chip_reset,
 +      .set_duplex     = sh_eth_set_duplex,
 +      .set_rate       = sh_eth_set_rate,
 +
 +      .ecsr_value     = ECSR_ICD | ECSR_MPD,
 +      .ecsipr_value   = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP,
 +      .eesipr_value   = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
 +
 +      .tx_check       = EESR_TC1 | EESR_FTC,
 +      .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT | \
 +                        EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE | \
 +                        EESR_ECI,
 +      .tx_error_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_TDE | \
 +                        EESR_TFE,
 +
 +      .apr            = 1,
 +      .mpr            = 1,
 +      .tpauser        = 1,
 +      .bculr          = 1,
 +      .hw_swap        = 1,
 +      .no_trimd       = 1,
 +      .no_ade         = 1,
 +      .tsu            = 1,
 +};
 +
 +#elif defined(CONFIG_CPU_SUBTYPE_SH7619)
 +#define SH_ETH_RESET_DEFAULT  1
 +static struct sh_eth_cpu_data sh_eth_my_cpu_data = {
 +      .eesipr_value   = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
 +
 +      .apr            = 1,
 +      .mpr            = 1,
 +      .tpauser        = 1,
 +      .hw_swap        = 1,
 +};
 +#elif defined(CONFIG_CPU_SUBTYPE_SH7710) || defined(CONFIG_CPU_SUBTYPE_SH7712)
 +#define SH_ETH_RESET_DEFAULT  1
 +#define SH_ETH_HAS_TSU        1
 +static struct sh_eth_cpu_data sh_eth_my_cpu_data = {
 +      .eesipr_value   = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
 +      .tsu            = 1,
 +};
 +#endif
 +
 +static void sh_eth_set_default_cpu_data(struct sh_eth_cpu_data *cd)
 +{
 +      if (!cd->ecsr_value)
 +              cd->ecsr_value = DEFAULT_ECSR_INIT;
 +
 +      if (!cd->ecsipr_value)
 +              cd->ecsipr_value = DEFAULT_ECSIPR_INIT;
 +
 +      if (!cd->fcftr_value)
 +              cd->fcftr_value = DEFAULT_FIFO_F_D_RFF | \
 +                                DEFAULT_FIFO_F_D_RFD;
 +
 +      if (!cd->fdr_value)
 +              cd->fdr_value = DEFAULT_FDR_INIT;
 +
 +      if (!cd->rmcr_value)
 +              cd->rmcr_value = DEFAULT_RMCR_VALUE;
 +
 +      if (!cd->tx_check)
 +              cd->tx_check = DEFAULT_TX_CHECK;
 +
 +      if (!cd->eesr_err_check)
 +              cd->eesr_err_check = DEFAULT_EESR_ERR_CHECK;
 +
 +      if (!cd->tx_error_check)
 +              cd->tx_error_check = DEFAULT_TX_ERROR_CHECK;
 +}
 +
 +#if defined(SH_ETH_RESET_DEFAULT)
 +/* Chip Reset */
 +static void sh_eth_reset(struct net_device *ndev)
 +{
 +      sh_eth_write(ndev, sh_eth_read(ndev, EDMR) | EDMR_SRST_ETHER, EDMR);
 +      mdelay(3);
 +      sh_eth_write(ndev, sh_eth_read(ndev, EDMR) & ~EDMR_SRST_ETHER, EDMR);
 +}
 +#endif
 +
 +#if defined(CONFIG_CPU_SH4)
 +static void sh_eth_set_receive_align(struct sk_buff *skb)
 +{
 +      int reserve;
 +
 +      reserve = SH4_SKB_RX_ALIGN - ((u32)skb->data & (SH4_SKB_RX_ALIGN - 1));
 +      if (reserve)
 +              skb_reserve(skb, reserve);
 +}
 +#else
 +static void sh_eth_set_receive_align(struct sk_buff *skb)
 +{
 +      skb_reserve(skb, SH2_SH3_SKB_RX_ALIGN);
 +}
 +#endif
 +
 +
 +/* CPU <-> EDMAC endian convert */
 +static inline __u32 cpu_to_edmac(struct sh_eth_private *mdp, u32 x)
 +{
 +      switch (mdp->edmac_endian) {
 +      case EDMAC_LITTLE_ENDIAN:
 +              return cpu_to_le32(x);
 +      case EDMAC_BIG_ENDIAN:
 +              return cpu_to_be32(x);
 +      }
 +      return x;
 +}
 +
 +static inline __u32 edmac_to_cpu(struct sh_eth_private *mdp, u32 x)
 +{
 +      switch (mdp->edmac_endian) {
 +      case EDMAC_LITTLE_ENDIAN:
 +              return le32_to_cpu(x);
 +      case EDMAC_BIG_ENDIAN:
 +              return be32_to_cpu(x);
 +      }
 +      return x;
 +}
 +
 +/*
 + * Program the hardware MAC address from dev->dev_addr.
 + */
 +static void update_mac_address(struct net_device *ndev)
 +{
 +      sh_eth_write(ndev,
 +              (ndev->dev_addr[0] << 24) | (ndev->dev_addr[1] << 16) |
 +              (ndev->dev_addr[2] << 8) | (ndev->dev_addr[3]), MAHR);
 +      sh_eth_write(ndev,
 +              (ndev->dev_addr[4] << 8) | (ndev->dev_addr[5]), MALR);
 +}
 +
 +/*
 + * Get MAC address from SuperH MAC address register
 + *
 + * SuperH's Ethernet device doesn't have 'ROM' to MAC address.
 + * This driver get MAC address that use by bootloader(U-boot or sh-ipl+g).
 + * When you want use this device, you must set MAC address in bootloader.
 + *
 + */
 +static void read_mac_address(struct net_device *ndev, unsigned char *mac)
 +{
 +      if (mac[0] || mac[1] || mac[2] || mac[3] || mac[4] || mac[5]) {
 +              memcpy(ndev->dev_addr, mac, 6);
 +      } else {
 +              ndev->dev_addr[0] = (sh_eth_read(ndev, MAHR) >> 24);
 +              ndev->dev_addr[1] = (sh_eth_read(ndev, MAHR) >> 16) & 0xFF;
 +              ndev->dev_addr[2] = (sh_eth_read(ndev, MAHR) >> 8) & 0xFF;
 +              ndev->dev_addr[3] = (sh_eth_read(ndev, MAHR) & 0xFF);
 +              ndev->dev_addr[4] = (sh_eth_read(ndev, MALR) >> 8) & 0xFF;
 +              ndev->dev_addr[5] = (sh_eth_read(ndev, MALR) & 0xFF);
 +      }
 +}
 +
 +static int sh_eth_is_gether(struct sh_eth_private *mdp)
 +{
 +      if (mdp->reg_offset == sh_eth_offset_gigabit)
 +              return 1;
 +      else
 +              return 0;
 +}
 +
 +static unsigned long sh_eth_get_edtrr_trns(struct sh_eth_private *mdp)
 +{
 +      if (sh_eth_is_gether(mdp))
 +              return EDTRR_TRNS_GETHER;
 +      else
 +              return EDTRR_TRNS_ETHER;
 +}
 +
 +struct bb_info {
 +      void (*set_gate)(unsigned long addr);
 +      struct mdiobb_ctrl ctrl;
 +      u32 addr;
 +      u32 mmd_msk;/* MMD */
 +      u32 mdo_msk;
 +      u32 mdi_msk;
 +      u32 mdc_msk;
 +};
 +
 +/* PHY bit set */
 +static void bb_set(u32 addr, u32 msk)
 +{
 +      writel(readl(addr) | msk, addr);
 +}
 +
 +/* PHY bit clear */
 +static void bb_clr(u32 addr, u32 msk)
 +{
 +      writel((readl(addr) & ~msk), addr);
 +}
 +
 +/* PHY bit read */
 +static int bb_read(u32 addr, u32 msk)
 +{
 +      return (readl(addr) & msk) != 0;
 +}
 +
 +/* Data I/O pin control */
 +static void sh_mmd_ctrl(struct mdiobb_ctrl *ctrl, int bit)
 +{
 +      struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl);
 +
 +      if (bitbang->set_gate)
 +              bitbang->set_gate(bitbang->addr);
 +
 +      if (bit)
 +              bb_set(bitbang->addr, bitbang->mmd_msk);
 +      else
 +              bb_clr(bitbang->addr, bitbang->mmd_msk);
 +}
 +
 +/* Set bit data*/
 +static void sh_set_mdio(struct mdiobb_ctrl *ctrl, int bit)
 +{
 +      struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl);
 +
 +      if (bitbang->set_gate)
 +              bitbang->set_gate(bitbang->addr);
 +
 +      if (bit)
 +              bb_set(bitbang->addr, bitbang->mdo_msk);
 +      else
 +              bb_clr(bitbang->addr, bitbang->mdo_msk);
 +}
 +
 +/* Get bit data*/
 +static int sh_get_mdio(struct mdiobb_ctrl *ctrl)
 +{
 +      struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl);
 +
 +      if (bitbang->set_gate)
 +              bitbang->set_gate(bitbang->addr);
 +
 +      return bb_read(bitbang->addr, bitbang->mdi_msk);
 +}
 +
 +/* MDC pin control */
 +static void sh_mdc_ctrl(struct mdiobb_ctrl *ctrl, int bit)
 +{
 +      struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl);
 +
 +      if (bitbang->set_gate)
 +              bitbang->set_gate(bitbang->addr);
 +
 +      if (bit)
 +              bb_set(bitbang->addr, bitbang->mdc_msk);
 +      else
 +              bb_clr(bitbang->addr, bitbang->mdc_msk);
 +}
 +
 +/* mdio bus control struct */
 +static struct mdiobb_ops bb_ops = {
 +      .owner = THIS_MODULE,
 +      .set_mdc = sh_mdc_ctrl,
 +      .set_mdio_dir = sh_mmd_ctrl,
 +      .set_mdio_data = sh_set_mdio,
 +      .get_mdio_data = sh_get_mdio,
 +};
 +
 +/* free skb and descriptor buffer */
 +static void sh_eth_ring_free(struct net_device *ndev)
 +{
 +      struct sh_eth_private *mdp = netdev_priv(ndev);
 +      int i;
 +
 +      /* Free Rx skb ringbuffer */
 +      if (mdp->rx_skbuff) {
 +              for (i = 0; i < RX_RING_SIZE; i++) {
 +                      if (mdp->rx_skbuff[i])
 +                              dev_kfree_skb(mdp->rx_skbuff[i]);
 +              }
 +      }
 +      kfree(mdp->rx_skbuff);
 +
 +      /* Free Tx skb ringbuffer */
 +      if (mdp->tx_skbuff) {
 +              for (i = 0; i < TX_RING_SIZE; i++) {
 +                      if (mdp->tx_skbuff[i])
 +                              dev_kfree_skb(mdp->tx_skbuff[i]);
 +              }
 +      }
 +      kfree(mdp->tx_skbuff);
 +}
 +
 +/* format skb and descriptor buffer */
 +static void sh_eth_ring_format(struct net_device *ndev)
 +{
 +      struct sh_eth_private *mdp = netdev_priv(ndev);
 +      int i;
 +      struct sk_buff *skb;
 +      struct sh_eth_rxdesc *rxdesc = NULL;
 +      struct sh_eth_txdesc *txdesc = NULL;
 +      int rx_ringsize = sizeof(*rxdesc) * RX_RING_SIZE;
 +      int tx_ringsize = sizeof(*txdesc) * TX_RING_SIZE;
 +
 +      mdp->cur_rx = mdp->cur_tx = 0;
 +      mdp->dirty_rx = mdp->dirty_tx = 0;
 +
 +      memset(mdp->rx_ring, 0, rx_ringsize);
 +
 +      /* build Rx ring buffer */
 +      for (i = 0; i < RX_RING_SIZE; i++) {
 +              /* skb */
 +              mdp->rx_skbuff[i] = NULL;
 +              skb = dev_alloc_skb(mdp->rx_buf_sz);
 +              mdp->rx_skbuff[i] = skb;
 +              if (skb == NULL)
 +                      break;
 +              dma_map_single(&ndev->dev, skb->tail, mdp->rx_buf_sz,
 +                              DMA_FROM_DEVICE);
 +              skb->dev = ndev; /* Mark as being used by this device. */
 +              sh_eth_set_receive_align(skb);
 +
 +              /* RX descriptor */
 +              rxdesc = &mdp->rx_ring[i];
 +              rxdesc->addr = virt_to_phys(PTR_ALIGN(skb->data, 4));
 +              rxdesc->status = cpu_to_edmac(mdp, RD_RACT | RD_RFP);
 +
 +              /* The size of the buffer is 16 byte boundary. */
 +              rxdesc->buffer_length = ALIGN(mdp->rx_buf_sz, 16);
 +              /* Rx descriptor address set */
 +              if (i == 0) {
 +                      sh_eth_write(ndev, mdp->rx_desc_dma, RDLAR);
 +                      if (sh_eth_is_gether(mdp))
 +                              sh_eth_write(ndev, mdp->rx_desc_dma, RDFAR);
 +              }
 +      }
 +
 +      mdp->dirty_rx = (u32) (i - RX_RING_SIZE);
 +
 +      /* Mark the last entry as wrapping the ring. */
 +      rxdesc->status |= cpu_to_edmac(mdp, RD_RDEL);
 +
 +      memset(mdp->tx_ring, 0, tx_ringsize);
 +
 +      /* build Tx ring buffer */
 +      for (i = 0; i < TX_RING_SIZE; i++) {
 +              mdp->tx_skbuff[i] = NULL;
 +              txdesc = &mdp->tx_ring[i];
 +              txdesc->status = cpu_to_edmac(mdp, TD_TFP);
 +              txdesc->buffer_length = 0;
 +              if (i == 0) {
 +                      /* Tx descriptor address set */
 +                      sh_eth_write(ndev, mdp->tx_desc_dma, TDLAR);
 +                      if (sh_eth_is_gether(mdp))
 +                              sh_eth_write(ndev, mdp->tx_desc_dma, TDFAR);
 +              }
 +      }
 +
 +      txdesc->status |= cpu_to_edmac(mdp, TD_TDLE);
 +}
 +
 +/* Get skb and descriptor buffer */
 +static int sh_eth_ring_init(struct net_device *ndev)
 +{
 +      struct sh_eth_private *mdp = netdev_priv(ndev);
 +      int rx_ringsize, tx_ringsize, ret = 0;
 +
 +      /*
 +       * +26 gets the maximum ethernet encapsulation, +7 & ~7 because the
 +       * card needs room to do 8 byte alignment, +2 so we can reserve
 +       * the first 2 bytes, and +16 gets room for the status word from the
 +       * card.
 +       */
 +      mdp->rx_buf_sz = (ndev->mtu <= 1492 ? PKT_BUF_SZ :
 +                        (((ndev->mtu + 26 + 7) & ~7) + 2 + 16));
 +      if (mdp->cd->rpadir)
 +              mdp->rx_buf_sz += NET_IP_ALIGN;
 +
 +      /* Allocate RX and TX skb rings */
 +      mdp->rx_skbuff = kmalloc(sizeof(*mdp->rx_skbuff) * RX_RING_SIZE,
 +                              GFP_KERNEL);
 +      if (!mdp->rx_skbuff) {
 +              dev_err(&ndev->dev, "Cannot allocate Rx skb\n");
 +              ret = -ENOMEM;
 +              return ret;
 +      }
 +
 +      mdp->tx_skbuff = kmalloc(sizeof(*mdp->tx_skbuff) * TX_RING_SIZE,
 +                              GFP_KERNEL);
 +      if (!mdp->tx_skbuff) {
 +              dev_err(&ndev->dev, "Cannot allocate Tx skb\n");
 +              ret = -ENOMEM;
 +              goto skb_ring_free;
 +      }
 +
 +      /* Allocate all Rx descriptors. */
 +      rx_ringsize = sizeof(struct sh_eth_rxdesc) * RX_RING_SIZE;
 +      mdp->rx_ring = dma_alloc_coherent(NULL, rx_ringsize, &mdp->rx_desc_dma,
 +                      GFP_KERNEL);
 +
 +      if (!mdp->rx_ring) {
 +              dev_err(&ndev->dev, "Cannot allocate Rx Ring (size %d bytes)\n",
 +                      rx_ringsize);
 +              ret = -ENOMEM;
 +              goto desc_ring_free;
 +      }
 +
 +      mdp->dirty_rx = 0;
 +
 +      /* Allocate all Tx descriptors. */
 +      tx_ringsize = sizeof(struct sh_eth_txdesc) * TX_RING_SIZE;
 +      mdp->tx_ring = dma_alloc_coherent(NULL, tx_ringsize, &mdp->tx_desc_dma,
 +                      GFP_KERNEL);
 +      if (!mdp->tx_ring) {
 +              dev_err(&ndev->dev, "Cannot allocate Tx Ring (size %d bytes)\n",
 +                      tx_ringsize);
 +              ret = -ENOMEM;
 +              goto desc_ring_free;
 +      }
 +      return ret;
 +
 +desc_ring_free:
 +      /* free DMA buffer */
 +      dma_free_coherent(NULL, rx_ringsize, mdp->rx_ring, mdp->rx_desc_dma);
 +
 +skb_ring_free:
 +      /* Free Rx and Tx skb ring buffer */
 +      sh_eth_ring_free(ndev);
 +
 +      return ret;
 +}
 +
 +static int sh_eth_dev_init(struct net_device *ndev)
 +{
 +      int ret = 0;
 +      struct sh_eth_private *mdp = netdev_priv(ndev);
 +      u_int32_t rx_int_var, tx_int_var;
 +      u32 val;
 +
 +      /* Soft Reset */
 +      sh_eth_reset(ndev);
 +
 +      /* Descriptor format */
 +      sh_eth_ring_format(ndev);
 +      if (mdp->cd->rpadir)
 +              sh_eth_write(ndev, mdp->cd->rpadir_value, RPADIR);
 +
 +      /* all sh_eth int mask */
 +      sh_eth_write(ndev, 0, EESIPR);
 +
 +#if defined(__LITTLE_ENDIAN__)
 +      if (mdp->cd->hw_swap)
 +              sh_eth_write(ndev, EDMR_EL, EDMR);
 +      else
 +#endif
 +              sh_eth_write(ndev, 0, EDMR);
 +
 +      /* FIFO size set */
 +      sh_eth_write(ndev, mdp->cd->fdr_value, FDR);
 +      sh_eth_write(ndev, 0, TFTR);
 +
 +      /* Frame recv control */
 +      sh_eth_write(ndev, mdp->cd->rmcr_value, RMCR);
 +
 +      rx_int_var = mdp->rx_int_var = DESC_I_RINT8 | DESC_I_RINT5;
 +      tx_int_var = mdp->tx_int_var = DESC_I_TINT2;
 +      sh_eth_write(ndev, rx_int_var | tx_int_var, TRSCER);
 +
 +      if (mdp->cd->bculr)
 +              sh_eth_write(ndev, 0x800, BCULR);       /* Burst sycle set */
 +
 +      sh_eth_write(ndev, mdp->cd->fcftr_value, FCFTR);
 +
 +      if (!mdp->cd->no_trimd)
 +              sh_eth_write(ndev, 0, TRIMD);
 +
 +      /* Recv frame limit set register */
 +      sh_eth_write(ndev, RFLR_VALUE, RFLR);
 +
 +      sh_eth_write(ndev, sh_eth_read(ndev, EESR), EESR);
 +      sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR);
 +
 +      /* PAUSE Prohibition */
 +      val = (sh_eth_read(ndev, ECMR) & ECMR_DM) |
 +              ECMR_ZPF | (mdp->duplex ? ECMR_DM : 0) | ECMR_TE | ECMR_RE;
 +
 +      sh_eth_write(ndev, val, ECMR);
 +
 +      if (mdp->cd->set_rate)
 +              mdp->cd->set_rate(ndev);
 +
 +      /* E-MAC Status Register clear */
 +      sh_eth_write(ndev, mdp->cd->ecsr_value, ECSR);
 +
 +      /* E-MAC Interrupt Enable register */
 +      sh_eth_write(ndev, mdp->cd->ecsipr_value, ECSIPR);
 +
 +      /* Set MAC address */
 +      update_mac_address(ndev);
 +
 +      /* mask reset */
 +      if (mdp->cd->apr)
 +              sh_eth_write(ndev, APR_AP, APR);
 +      if (mdp->cd->mpr)
 +              sh_eth_write(ndev, MPR_MP, MPR);
 +      if (mdp->cd->tpauser)
 +              sh_eth_write(ndev, TPAUSER_UNLIMITED, TPAUSER);
 +
 +      /* Setting the Rx mode will start the Rx process. */
 +      sh_eth_write(ndev, EDRRR_R, EDRRR);
 +
 +      netif_start_queue(ndev);
 +
 +      return ret;
 +}
 +
 +/* free Tx skb function */
 +static int sh_eth_txfree(struct net_device *ndev)
 +{
 +      struct sh_eth_private *mdp = netdev_priv(ndev);
 +      struct sh_eth_txdesc *txdesc;
 +      int freeNum = 0;
 +      int entry = 0;
 +
 +      for (; mdp->cur_tx - mdp->dirty_tx > 0; mdp->dirty_tx++) {
 +              entry = mdp->dirty_tx % TX_RING_SIZE;
 +              txdesc = &mdp->tx_ring[entry];
 +              if (txdesc->status & cpu_to_edmac(mdp, TD_TACT))
 +                      break;
 +              /* Free the original skb. */
 +              if (mdp->tx_skbuff[entry]) {
 +                      dma_unmap_single(&ndev->dev, txdesc->addr,
 +                                       txdesc->buffer_length, DMA_TO_DEVICE);
 +                      dev_kfree_skb_irq(mdp->tx_skbuff[entry]);
 +                      mdp->tx_skbuff[entry] = NULL;
 +                      freeNum++;
 +              }
 +              txdesc->status = cpu_to_edmac(mdp, TD_TFP);
 +              if (entry >= TX_RING_SIZE - 1)
 +                      txdesc->status |= cpu_to_edmac(mdp, TD_TDLE);
 +
 +              mdp->stats.tx_packets++;
 +              mdp->stats.tx_bytes += txdesc->buffer_length;
 +      }
 +      return freeNum;
 +}
 +
 +/* Packet receive function */
 +static int sh_eth_rx(struct net_device *ndev)
 +{
 +      struct sh_eth_private *mdp = netdev_priv(ndev);
 +      struct sh_eth_rxdesc *rxdesc;
 +
 +      int entry = mdp->cur_rx % RX_RING_SIZE;
 +      int boguscnt = (mdp->dirty_rx + RX_RING_SIZE) - mdp->cur_rx;
 +      struct sk_buff *skb;
 +      u16 pkt_len = 0;
 +      u32 desc_status;
 +
 +      rxdesc = &mdp->rx_ring[entry];
 +      while (!(rxdesc->status & cpu_to_edmac(mdp, RD_RACT))) {
 +              desc_status = edmac_to_cpu(mdp, rxdesc->status);
 +              pkt_len = rxdesc->frame_length;
 +
 +              if (--boguscnt < 0)
 +                      break;
 +
 +              if (!(desc_status & RDFEND))
 +                      mdp->stats.rx_length_errors++;
 +
 +              if (desc_status & (RD_RFS1 | RD_RFS2 | RD_RFS3 | RD_RFS4 |
 +                                 RD_RFS5 | RD_RFS6 | RD_RFS10)) {
 +                      mdp->stats.rx_errors++;
 +                      if (desc_status & RD_RFS1)
 +                              mdp->stats.rx_crc_errors++;
 +                      if (desc_status & RD_RFS2)
 +                              mdp->stats.rx_frame_errors++;
 +                      if (desc_status & RD_RFS3)
 +                              mdp->stats.rx_length_errors++;
 +                      if (desc_status & RD_RFS4)
 +                              mdp->stats.rx_length_errors++;
 +                      if (desc_status & RD_RFS6)
 +                              mdp->stats.rx_missed_errors++;
 +                      if (desc_status & RD_RFS10)
 +                              mdp->stats.rx_over_errors++;
 +              } else {
 +                      if (!mdp->cd->hw_swap)
 +                              sh_eth_soft_swap(
 +                                      phys_to_virt(ALIGN(rxdesc->addr, 4)),
 +                                      pkt_len + 2);
 +                      skb = mdp->rx_skbuff[entry];
 +                      mdp->rx_skbuff[entry] = NULL;
 +                      if (mdp->cd->rpadir)
 +                              skb_reserve(skb, NET_IP_ALIGN);
 +                      skb_put(skb, pkt_len);
 +                      skb->protocol = eth_type_trans(skb, ndev);
 +                      netif_rx(skb);
 +                      mdp->stats.rx_packets++;
 +                      mdp->stats.rx_bytes += pkt_len;
 +              }
 +              rxdesc->status |= cpu_to_edmac(mdp, RD_RACT);
 +              entry = (++mdp->cur_rx) % RX_RING_SIZE;
 +              rxdesc = &mdp->rx_ring[entry];
 +      }
 +
 +      /* Refill the Rx ring buffers. */
 +      for (; mdp->cur_rx - mdp->dirty_rx > 0; mdp->dirty_rx++) {
 +              entry = mdp->dirty_rx % RX_RING_SIZE;
 +              rxdesc = &mdp->rx_ring[entry];
 +              /* The size of the buffer is 16 byte boundary. */
 +              rxdesc->buffer_length = ALIGN(mdp->rx_buf_sz, 16);
 +
 +              if (mdp->rx_skbuff[entry] == NULL) {
 +                      skb = dev_alloc_skb(mdp->rx_buf_sz);
 +                      mdp->rx_skbuff[entry] = skb;
 +                      if (skb == NULL)
 +                              break;  /* Better luck next round. */
 +                      dma_map_single(&ndev->dev, skb->tail, mdp->rx_buf_sz,
 +                                      DMA_FROM_DEVICE);
 +                      skb->dev = ndev;
 +                      sh_eth_set_receive_align(skb);
 +
 +                      skb_checksum_none_assert(skb);
 +                      rxdesc->addr = virt_to_phys(PTR_ALIGN(skb->data, 4));
 +              }
 +              if (entry >= RX_RING_SIZE - 1)
 +                      rxdesc->status |=
 +                              cpu_to_edmac(mdp, RD_RACT | RD_RFP | RD_RDEL);
 +              else
 +                      rxdesc->status |=
 +                              cpu_to_edmac(mdp, RD_RACT | RD_RFP);
 +      }
 +
 +      /* Restart Rx engine if stopped. */
 +      /* If we don't need to check status, don't. -KDU */
 +      if (!(sh_eth_read(ndev, EDRRR) & EDRRR_R))
 +              sh_eth_write(ndev, EDRRR_R, EDRRR);
 +
 +      return 0;
 +}
 +
 +static void sh_eth_rcv_snd_disable(struct net_device *ndev)
 +{
 +      /* disable tx and rx */
 +      sh_eth_write(ndev, sh_eth_read(ndev, ECMR) &
 +              ~(ECMR_RE | ECMR_TE), ECMR);
 +}
 +
 +static void sh_eth_rcv_snd_enable(struct net_device *ndev)
 +{
 +      /* enable tx and rx */
 +      sh_eth_write(ndev, sh_eth_read(ndev, ECMR) |
 +              (ECMR_RE | ECMR_TE), ECMR);
 +}
 +
 +/* error control function */
 +static void sh_eth_error(struct net_device *ndev, int intr_status)
 +{
 +      struct sh_eth_private *mdp = netdev_priv(ndev);
 +      u32 felic_stat;
 +      u32 link_stat;
 +      u32 mask;
 +
 +      if (intr_status & EESR_ECI) {
 +              felic_stat = sh_eth_read(ndev, ECSR);
 +              sh_eth_write(ndev, felic_stat, ECSR);   /* clear int */
 +              if (felic_stat & ECSR_ICD)
 +                      mdp->stats.tx_carrier_errors++;
 +              if (felic_stat & ECSR_LCHNG) {
 +                      /* Link Changed */
 +                      if (mdp->cd->no_psr || mdp->no_ether_link) {
 +                              if (mdp->link == PHY_DOWN)
 +                                      link_stat = 0;
 +                              else
 +                                      link_stat = PHY_ST_LINK;
 +                      } else {
 +                              link_stat = (sh_eth_read(ndev, PSR));
 +                              if (mdp->ether_link_active_low)
 +                                      link_stat = ~link_stat;
 +                      }
 +                      if (!(link_stat & PHY_ST_LINK))
 +                              sh_eth_rcv_snd_disable(ndev);
 +                      else {
 +                              /* Link Up */
 +                              sh_eth_write(ndev, sh_eth_read(ndev, EESIPR) &
 +                                        ~DMAC_M_ECI, EESIPR);
 +                              /*clear int */
 +                              sh_eth_write(ndev, sh_eth_read(ndev, ECSR),
 +                                        ECSR);
 +                              sh_eth_write(ndev, sh_eth_read(ndev, EESIPR) |
 +                                        DMAC_M_ECI, EESIPR);
 +                              /* enable tx and rx */
 +                              sh_eth_rcv_snd_enable(ndev);
 +                      }
 +              }
 +      }
 +
 +      if (intr_status & EESR_TWB) {
 +              /* Write buck end. unused write back interrupt */
 +              if (intr_status & EESR_TABT)    /* Transmit Abort int */
 +                      mdp->stats.tx_aborted_errors++;
 +                      if (netif_msg_tx_err(mdp))
 +                              dev_err(&ndev->dev, "Transmit Abort\n");
 +      }
 +
 +      if (intr_status & EESR_RABT) {
 +              /* Receive Abort int */
 +              if (intr_status & EESR_RFRMER) {
 +                      /* Receive Frame Overflow int */
 +                      mdp->stats.rx_frame_errors++;
 +                      if (netif_msg_rx_err(mdp))
 +                              dev_err(&ndev->dev, "Receive Abort\n");
 +              }
 +      }
 +
 +      if (intr_status & EESR_TDE) {
 +              /* Transmit Descriptor Empty int */
 +              mdp->stats.tx_fifo_errors++;
 +              if (netif_msg_tx_err(mdp))
 +                      dev_err(&ndev->dev, "Transmit Descriptor Empty\n");
 +      }
 +
 +      if (intr_status & EESR_TFE) {
 +              /* FIFO under flow */
 +              mdp->stats.tx_fifo_errors++;
 +              if (netif_msg_tx_err(mdp))
 +                      dev_err(&ndev->dev, "Transmit FIFO Under flow\n");
 +      }
 +
 +      if (intr_status & EESR_RDE) {
 +              /* Receive Descriptor Empty int */
 +              mdp->stats.rx_over_errors++;
 +
 +              if (sh_eth_read(ndev, EDRRR) ^ EDRRR_R)
 +                      sh_eth_write(ndev, EDRRR_R, EDRRR);
 +              if (netif_msg_rx_err(mdp))
 +                      dev_err(&ndev->dev, "Receive Descriptor Empty\n");
 +      }
 +
 +      if (intr_status & EESR_RFE) {
 +              /* Receive FIFO Overflow int */
 +              mdp->stats.rx_fifo_errors++;
 +              if (netif_msg_rx_err(mdp))
 +                      dev_err(&ndev->dev, "Receive FIFO Overflow\n");
 +      }
 +
 +      if (!mdp->cd->no_ade && (intr_status & EESR_ADE)) {
 +              /* Address Error */
 +              mdp->stats.tx_fifo_errors++;
 +              if (netif_msg_tx_err(mdp))
 +                      dev_err(&ndev->dev, "Address Error\n");
 +      }
 +
 +      mask = EESR_TWB | EESR_TABT | EESR_ADE | EESR_TDE | EESR_TFE;
 +      if (mdp->cd->no_ade)
 +              mask &= ~EESR_ADE;
 +      if (intr_status & mask) {
 +              /* Tx error */
 +              u32 edtrr = sh_eth_read(ndev, EDTRR);
 +              /* dmesg */
 +              dev_err(&ndev->dev, "TX error. status=%8.8x cur_tx=%8.8x ",
 +                              intr_status, mdp->cur_tx);
 +              dev_err(&ndev->dev, "dirty_tx=%8.8x state=%8.8x EDTRR=%8.8x.\n",
 +                              mdp->dirty_tx, (u32) ndev->state, edtrr);
 +              /* dirty buffer free */
 +              sh_eth_txfree(ndev);
 +
 +              /* SH7712 BUG */
 +              if (edtrr ^ sh_eth_get_edtrr_trns(mdp)) {
 +                      /* tx dma start */
 +                      sh_eth_write(ndev, sh_eth_get_edtrr_trns(mdp), EDTRR);
 +              }
 +              /* wakeup */
 +              netif_wake_queue(ndev);
 +      }
 +}
 +
 +static irqreturn_t sh_eth_interrupt(int irq, void *netdev)
 +{
 +      struct net_device *ndev = netdev;
 +      struct sh_eth_private *mdp = netdev_priv(ndev);
 +      struct sh_eth_cpu_data *cd = mdp->cd;
 +      irqreturn_t ret = IRQ_NONE;
 +      u32 intr_status = 0;
 +
 +      spin_lock(&mdp->lock);
 +
 +      /* Get interrpt stat */
 +      intr_status = sh_eth_read(ndev, EESR);
 +      /* Clear interrupt */
 +      if (intr_status & (EESR_FRC | EESR_RMAF | EESR_RRF |
 +                      EESR_RTLF | EESR_RTSF | EESR_PRE | EESR_CERF |
 +                      cd->tx_check | cd->eesr_err_check)) {
 +              sh_eth_write(ndev, intr_status, EESR);
 +              ret = IRQ_HANDLED;
 +      } else
 +              goto other_irq;
 +
 +      if (intr_status & (EESR_FRC | /* Frame recv*/
 +                      EESR_RMAF | /* Multi cast address recv*/
 +                      EESR_RRF  | /* Bit frame recv */
 +                      EESR_RTLF | /* Long frame recv*/
 +                      EESR_RTSF | /* short frame recv */
 +                      EESR_PRE  | /* PHY-LSI recv error */
 +                      EESR_CERF)){ /* recv frame CRC error */
 +              sh_eth_rx(ndev);
 +      }
 +
 +      /* Tx Check */
 +      if (intr_status & cd->tx_check) {
 +              sh_eth_txfree(ndev);
 +              netif_wake_queue(ndev);
 +      }
 +
 +      if (intr_status & cd->eesr_err_check)
 +              sh_eth_error(ndev, intr_status);
 +
 +other_irq:
 +      spin_unlock(&mdp->lock);
 +
 +      return ret;
 +}
 +
 +static void sh_eth_timer(unsigned long data)
 +{
 +      struct net_device *ndev = (struct net_device *)data;
 +      struct sh_eth_private *mdp = netdev_priv(ndev);
 +
 +      mod_timer(&mdp->timer, jiffies + (10 * HZ));
 +}
 +
 +/* PHY state control function */
 +static void sh_eth_adjust_link(struct net_device *ndev)
 +{
 +      struct sh_eth_private *mdp = netdev_priv(ndev);
 +      struct phy_device *phydev = mdp->phydev;
 +      int new_state = 0;
 +
 +      if (phydev->link != PHY_DOWN) {
 +              if (phydev->duplex != mdp->duplex) {
 +                      new_state = 1;
 +                      mdp->duplex = phydev->duplex;
 +                      if (mdp->cd->set_duplex)
 +                              mdp->cd->set_duplex(ndev);
 +              }
 +
 +              if (phydev->speed != mdp->speed) {
 +                      new_state = 1;
 +                      mdp->speed = phydev->speed;
 +                      if (mdp->cd->set_rate)
 +                              mdp->cd->set_rate(ndev);
 +              }
 +              if (mdp->link == PHY_DOWN) {
 +                      sh_eth_write(ndev,
 +                              (sh_eth_read(ndev, ECMR) & ~ECMR_TXF), ECMR);
 +                      new_state = 1;
 +                      mdp->link = phydev->link;
 +              }
 +      } else if (mdp->link) {
 +              new_state = 1;
 +              mdp->link = PHY_DOWN;
 +              mdp->speed = 0;
 +              mdp->duplex = -1;
 +      }
 +
 +      if (new_state && netif_msg_link(mdp))
 +              phy_print_status(phydev);
 +}
 +
 +/* PHY init function */
 +static int sh_eth_phy_init(struct net_device *ndev)
 +{
 +      struct sh_eth_private *mdp = netdev_priv(ndev);
 +      char phy_id[MII_BUS_ID_SIZE + 3];
 +      struct phy_device *phydev = NULL;
 +
 +      snprintf(phy_id, sizeof(phy_id), PHY_ID_FMT,
 +              mdp->mii_bus->id , mdp->phy_id);
 +
 +      mdp->link = PHY_DOWN;
 +      mdp->speed = 0;
 +      mdp->duplex = -1;
 +
 +      /* Try connect to PHY */
 +      phydev = phy_connect(ndev, phy_id, sh_eth_adjust_link,
 +                              0, mdp->phy_interface);
 +      if (IS_ERR(phydev)) {
 +              dev_err(&ndev->dev, "phy_connect failed\n");
 +              return PTR_ERR(phydev);
 +      }
 +
 +      dev_info(&ndev->dev, "attached phy %i to driver %s\n",
 +              phydev->addr, phydev->drv->name);
 +
 +      mdp->phydev = phydev;
 +
 +      return 0;
 +}
 +
 +/* PHY control start function */
 +static int sh_eth_phy_start(struct net_device *ndev)
 +{
 +      struct sh_eth_private *mdp = netdev_priv(ndev);
 +      int ret;
 +
 +      ret = sh_eth_phy_init(ndev);
 +      if (ret)
 +              return ret;
 +
 +      /* reset phy - this also wakes it from PDOWN */
 +      phy_write(mdp->phydev, MII_BMCR, BMCR_RESET);
 +      phy_start(mdp->phydev);
 +
 +      return 0;
 +}
 +
 +static int sh_eth_get_settings(struct net_device *ndev,
 +                      struct ethtool_cmd *ecmd)
 +{
 +      struct sh_eth_private *mdp = netdev_priv(ndev);
 +      unsigned long flags;
 +      int ret;
 +
 +      spin_lock_irqsave(&mdp->lock, flags);
 +      ret = phy_ethtool_gset(mdp->phydev, ecmd);
 +      spin_unlock_irqrestore(&mdp->lock, flags);
 +
 +      return ret;
 +}
 +
 +static int sh_eth_set_settings(struct net_device *ndev,
 +              struct ethtool_cmd *ecmd)
 +{
 +      struct sh_eth_private *mdp = netdev_priv(ndev);
 +      unsigned long flags;
 +      int ret;
 +
 +      spin_lock_irqsave(&mdp->lock, flags);
 +
 +      /* disable tx and rx */
 +      sh_eth_rcv_snd_disable(ndev);
 +
 +      ret = phy_ethtool_sset(mdp->phydev, ecmd);
 +      if (ret)
 +              goto error_exit;
 +
 +      if (ecmd->duplex == DUPLEX_FULL)
 +              mdp->duplex = 1;
 +      else
 +              mdp->duplex = 0;
 +
 +      if (mdp->cd->set_duplex)
 +              mdp->cd->set_duplex(ndev);
 +
 +error_exit:
 +      mdelay(1);
 +
 +      /* enable tx and rx */
 +      sh_eth_rcv_snd_enable(ndev);
 +
 +      spin_unlock_irqrestore(&mdp->lock, flags);
 +
 +      return ret;
 +}
 +
 +static int sh_eth_nway_reset(struct net_device *ndev)
 +{
 +      struct sh_eth_private *mdp = netdev_priv(ndev);
 +      unsigned long flags;
 +      int ret;
 +
 +      spin_lock_irqsave(&mdp->lock, flags);
 +      ret = phy_start_aneg(mdp->phydev);
 +      spin_unlock_irqrestore(&mdp->lock, flags);
 +
 +      return ret;
 +}
 +
 +static u32 sh_eth_get_msglevel(struct net_device *ndev)
 +{
 +      struct sh_eth_private *mdp = netdev_priv(ndev);
 +      return mdp->msg_enable;
 +}
 +
 +static void sh_eth_set_msglevel(struct net_device *ndev, u32 value)
 +{
 +      struct sh_eth_private *mdp = netdev_priv(ndev);
 +      mdp->msg_enable = value;
 +}
 +
 +static const char sh_eth_gstrings_stats[][ETH_GSTRING_LEN] = {
 +      "rx_current", "tx_current",
 +      "rx_dirty", "tx_dirty",
 +};
 +#define SH_ETH_STATS_LEN  ARRAY_SIZE(sh_eth_gstrings_stats)
 +
 +static int sh_eth_get_sset_count(struct net_device *netdev, int sset)
 +{
 +      switch (sset) {
 +      case ETH_SS_STATS:
 +              return SH_ETH_STATS_LEN;
 +      default:
 +              return -EOPNOTSUPP;
 +      }
 +}
 +
 +static void sh_eth_get_ethtool_stats(struct net_device *ndev,
 +                      struct ethtool_stats *stats, u64 *data)
 +{
 +      struct sh_eth_private *mdp = netdev_priv(ndev);
 +      int i = 0;
 +
 +      /* device-specific stats */
 +      data[i++] = mdp->cur_rx;
 +      data[i++] = mdp->cur_tx;
 +      data[i++] = mdp->dirty_rx;
 +      data[i++] = mdp->dirty_tx;
 +}
 +
 +static void sh_eth_get_strings(struct net_device *ndev, u32 stringset, u8 *data)
 +{
 +      switch (stringset) {
 +      case ETH_SS_STATS:
 +              memcpy(data, *sh_eth_gstrings_stats,
 +                                      sizeof(sh_eth_gstrings_stats));
 +              break;
 +      }
 +}
 +
 +static struct ethtool_ops sh_eth_ethtool_ops = {
 +      .get_settings   = sh_eth_get_settings,
 +      .set_settings   = sh_eth_set_settings,
 +      .nway_reset             = sh_eth_nway_reset,
 +      .get_msglevel   = sh_eth_get_msglevel,
 +      .set_msglevel   = sh_eth_set_msglevel,
 +      .get_link               = ethtool_op_get_link,
 +      .get_strings    = sh_eth_get_strings,
 +      .get_ethtool_stats  = sh_eth_get_ethtool_stats,
 +      .get_sset_count     = sh_eth_get_sset_count,
 +};
 +
 +/* network device open function */
 +static int sh_eth_open(struct net_device *ndev)
 +{
 +      int ret = 0;
 +      struct sh_eth_private *mdp = netdev_priv(ndev);
 +
 +      pm_runtime_get_sync(&mdp->pdev->dev);
 +
 +      ret = request_irq(ndev->irq, sh_eth_interrupt,
 +#if defined(CONFIG_CPU_SUBTYPE_SH7763) || \
 +      defined(CONFIG_CPU_SUBTYPE_SH7764) || \
 +      defined(CONFIG_CPU_SUBTYPE_SH7757)
 +                              IRQF_SHARED,
 +#else
 +                              0,
 +#endif
 +                              ndev->name, ndev);
 +      if (ret) {
 +              dev_err(&ndev->dev, "Can not assign IRQ number\n");
 +              return ret;
 +      }
 +
 +      /* Descriptor set */
 +      ret = sh_eth_ring_init(ndev);
 +      if (ret)
 +              goto out_free_irq;
 +
 +      /* device init */
 +      ret = sh_eth_dev_init(ndev);
 +      if (ret)
 +              goto out_free_irq;
 +
 +      /* PHY control start*/
 +      ret = sh_eth_phy_start(ndev);
 +      if (ret)
 +              goto out_free_irq;
 +
 +      /* Set the timer to check for link beat. */
 +      init_timer(&mdp->timer);
 +      mdp->timer.expires = (jiffies + (24 * HZ)) / 10;/* 2.4 sec. */
 +      setup_timer(&mdp->timer, sh_eth_timer, (unsigned long)ndev);
 +
 +      return ret;
 +
 +out_free_irq:
 +      free_irq(ndev->irq, ndev);
 +      pm_runtime_put_sync(&mdp->pdev->dev);
 +      return ret;
 +}
 +
 +/* Timeout function */
 +static void sh_eth_tx_timeout(struct net_device *ndev)
 +{
 +      struct sh_eth_private *mdp = netdev_priv(ndev);
 +      struct sh_eth_rxdesc *rxdesc;
 +      int i;
 +
 +      netif_stop_queue(ndev);
 +
 +      if (netif_msg_timer(mdp))
 +              dev_err(&ndev->dev, "%s: transmit timed out, status %8.8x,"
 +             " resetting...\n", ndev->name, (int)sh_eth_read(ndev, EESR));
 +
 +      /* tx_errors count up */
 +      mdp->stats.tx_errors++;
 +
 +      /* timer off */
 +      del_timer_sync(&mdp->timer);
 +
 +      /* Free all the skbuffs in the Rx queue. */
 +      for (i = 0; i < RX_RING_SIZE; i++) {
 +              rxdesc = &mdp->rx_ring[i];
 +              rxdesc->status = 0;
 +              rxdesc->addr = 0xBADF00D0;
 +              if (mdp->rx_skbuff[i])
 +                      dev_kfree_skb(mdp->rx_skbuff[i]);
 +              mdp->rx_skbuff[i] = NULL;
 +      }
 +      for (i = 0; i < TX_RING_SIZE; i++) {
 +              if (mdp->tx_skbuff[i])
 +                      dev_kfree_skb(mdp->tx_skbuff[i]);
 +              mdp->tx_skbuff[i] = NULL;
 +      }
 +
 +      /* device init */
 +      sh_eth_dev_init(ndev);
 +
 +      /* timer on */
 +      mdp->timer.expires = (jiffies + (24 * HZ)) / 10;/* 2.4 sec. */
 +      add_timer(&mdp->timer);
 +}
 +
 +/* Packet transmit function */
 +static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev)
 +{
 +      struct sh_eth_private *mdp = netdev_priv(ndev);
 +      struct sh_eth_txdesc *txdesc;
 +      u32 entry;
 +      unsigned long flags;
 +
 +      spin_lock_irqsave(&mdp->lock, flags);
 +      if ((mdp->cur_tx - mdp->dirty_tx) >= (TX_RING_SIZE - 4)) {
 +              if (!sh_eth_txfree(ndev)) {
 +                      if (netif_msg_tx_queued(mdp))
 +                              dev_warn(&ndev->dev, "TxFD exhausted.\n");
 +                      netif_stop_queue(ndev);
 +                      spin_unlock_irqrestore(&mdp->lock, flags);
 +                      return NETDEV_TX_BUSY;
 +              }
 +      }
 +      spin_unlock_irqrestore(&mdp->lock, flags);
 +
 +      entry = mdp->cur_tx % TX_RING_SIZE;
 +      mdp->tx_skbuff[entry] = skb;
 +      txdesc = &mdp->tx_ring[entry];
 +      /* soft swap. */
 +      if (!mdp->cd->hw_swap)
 +              sh_eth_soft_swap(phys_to_virt(ALIGN(txdesc->addr, 4)),
 +                               skb->len + 2);
 +      txdesc->addr = dma_map_single(&ndev->dev, skb->data, skb->len,
 +                                    DMA_TO_DEVICE);
 +      if (skb->len < ETHERSMALL)
 +              txdesc->buffer_length = ETHERSMALL;
 +      else
 +              txdesc->buffer_length = skb->len;
 +
 +      if (entry >= TX_RING_SIZE - 1)
 +              txdesc->status |= cpu_to_edmac(mdp, TD_TACT | TD_TDLE);
 +      else
 +              txdesc->status |= cpu_to_edmac(mdp, TD_TACT);
 +
 +      mdp->cur_tx++;
 +
 +      if (!(sh_eth_read(ndev, EDTRR) & sh_eth_get_edtrr_trns(mdp)))
 +              sh_eth_write(ndev, sh_eth_get_edtrr_trns(mdp), EDTRR);
 +
 +      return NETDEV_TX_OK;
 +}
 +
 +/* device close function */
 +static int sh_eth_close(struct net_device *ndev)
 +{
 +      struct sh_eth_private *mdp = netdev_priv(ndev);
 +      int ringsize;
 +
 +      netif_stop_queue(ndev);
 +
 +      /* Disable interrupts by clearing the interrupt mask. */
 +      sh_eth_write(ndev, 0x0000, EESIPR);
 +
 +      /* Stop the chip's Tx and Rx processes. */
 +      sh_eth_write(ndev, 0, EDTRR);
 +      sh_eth_write(ndev, 0, EDRRR);
 +
 +      /* PHY Disconnect */
 +      if (mdp->phydev) {
 +              phy_stop(mdp->phydev);
 +              phy_disconnect(mdp->phydev);
 +      }
 +
 +      free_irq(ndev->irq, ndev);
 +
 +      del_timer_sync(&mdp->timer);
 +
 +      /* Free all the skbuffs in the Rx queue. */
 +      sh_eth_ring_free(ndev);
 +
 +      /* free DMA buffer */
 +      ringsize = sizeof(struct sh_eth_rxdesc) * RX_RING_SIZE;
 +      dma_free_coherent(NULL, ringsize, mdp->rx_ring, mdp->rx_desc_dma);
 +
 +      /* free DMA buffer */
 +      ringsize = sizeof(struct sh_eth_txdesc) * TX_RING_SIZE;
 +      dma_free_coherent(NULL, ringsize, mdp->tx_ring, mdp->tx_desc_dma);
 +
 +      pm_runtime_put_sync(&mdp->pdev->dev);
 +
 +      return 0;
 +}
 +
 +static struct net_device_stats *sh_eth_get_stats(struct net_device *ndev)
 +{
 +      struct sh_eth_private *mdp = netdev_priv(ndev);
 +
 +      pm_runtime_get_sync(&mdp->pdev->dev);
 +
 +      mdp->stats.tx_dropped += sh_eth_read(ndev, TROCR);
 +      sh_eth_write(ndev, 0, TROCR);   /* (write clear) */
 +      mdp->stats.collisions += sh_eth_read(ndev, CDCR);
 +      sh_eth_write(ndev, 0, CDCR);    /* (write clear) */
 +      mdp->stats.tx_carrier_errors += sh_eth_read(ndev, LCCR);
 +      sh_eth_write(ndev, 0, LCCR);    /* (write clear) */
 +      if (sh_eth_is_gether(mdp)) {
 +              mdp->stats.tx_carrier_errors += sh_eth_read(ndev, CERCR);
 +              sh_eth_write(ndev, 0, CERCR);   /* (write clear) */
 +              mdp->stats.tx_carrier_errors += sh_eth_read(ndev, CEECR);
 +              sh_eth_write(ndev, 0, CEECR);   /* (write clear) */
 +      } else {
 +              mdp->stats.tx_carrier_errors += sh_eth_read(ndev, CNDCR);
 +              sh_eth_write(ndev, 0, CNDCR);   /* (write clear) */
 +      }
 +      pm_runtime_put_sync(&mdp->pdev->dev);
 +
 +      return &mdp->stats;
 +}
 +
 +/* ioctl to device funciotn*/
 +static int sh_eth_do_ioctl(struct net_device *ndev, struct ifreq *rq,
 +                              int cmd)
 +{
 +      struct sh_eth_private *mdp = netdev_priv(ndev);
 +      struct phy_device *phydev = mdp->phydev;
 +
 +      if (!netif_running(ndev))
 +              return -EINVAL;
 +
 +      if (!phydev)
 +              return -ENODEV;
 +
 +      return phy_mii_ioctl(phydev, rq, cmd);
 +}
 +
 +#if defined(SH_ETH_HAS_TSU)
 +/* Multicast reception directions set */
 +static void sh_eth_set_multicast_list(struct net_device *ndev)
 +{
 +      if (ndev->flags & IFF_PROMISC) {
 +              /* Set promiscuous. */
 +              sh_eth_write(ndev, (sh_eth_read(ndev, ECMR) & ~ECMR_MCT) |
 +                              ECMR_PRM, ECMR);
 +      } else {
 +              /* Normal, unicast/broadcast-only mode. */
 +              sh_eth_write(ndev, (sh_eth_read(ndev, ECMR) & ~ECMR_PRM) |
 +                              ECMR_MCT, ECMR);
 +      }
 +}
 +#endif /* SH_ETH_HAS_TSU */
 +
 +/* SuperH's TSU register init function */
 +static void sh_eth_tsu_init(struct sh_eth_private *mdp)
 +{
 +      sh_eth_tsu_write(mdp, 0, TSU_FWEN0);    /* Disable forward(0->1) */
 +      sh_eth_tsu_write(mdp, 0, TSU_FWEN1);    /* Disable forward(1->0) */
 +      sh_eth_tsu_write(mdp, 0, TSU_FCM);      /* forward fifo 3k-3k */
 +      sh_eth_tsu_write(mdp, 0xc, TSU_BSYSL0);
 +      sh_eth_tsu_write(mdp, 0xc, TSU_BSYSL1);
 +      sh_eth_tsu_write(mdp, 0, TSU_PRISL0);
 +      sh_eth_tsu_write(mdp, 0, TSU_PRISL1);
 +      sh_eth_tsu_write(mdp, 0, TSU_FWSL0);
 +      sh_eth_tsu_write(mdp, 0, TSU_FWSL1);
 +      sh_eth_tsu_write(mdp, TSU_FWSLC_POSTENU | TSU_FWSLC_POSTENL, TSU_FWSLC);
 +      if (sh_eth_is_gether(mdp)) {
 +              sh_eth_tsu_write(mdp, 0, TSU_QTAG0);    /* Disable QTAG(0->1) */
 +              sh_eth_tsu_write(mdp, 0, TSU_QTAG1);    /* Disable QTAG(1->0) */
 +      } else {
 +              sh_eth_tsu_write(mdp, 0, TSU_QTAGM0);   /* Disable QTAG(0->1) */
 +              sh_eth_tsu_write(mdp, 0, TSU_QTAGM1);   /* Disable QTAG(1->0) */
 +      }
 +      sh_eth_tsu_write(mdp, 0, TSU_FWSR);     /* all interrupt status clear */
 +      sh_eth_tsu_write(mdp, 0, TSU_FWINMK);   /* Disable all interrupt */
 +      sh_eth_tsu_write(mdp, 0, TSU_TEN);      /* Disable all CAM entry */
 +      sh_eth_tsu_write(mdp, 0, TSU_POST1);    /* Disable CAM entry [ 0- 7] */
 +      sh_eth_tsu_write(mdp, 0, TSU_POST2);    /* Disable CAM entry [ 8-15] */
 +      sh_eth_tsu_write(mdp, 0, TSU_POST3);    /* Disable CAM entry [16-23] */
 +      sh_eth_tsu_write(mdp, 0, TSU_POST4);    /* Disable CAM entry [24-31] */
 +}
 +
 +/* MDIO bus release function */
 +static int sh_mdio_release(struct net_device *ndev)
 +{
 +      struct mii_bus *bus = dev_get_drvdata(&ndev->dev);
 +
 +      /* unregister mdio bus */
 +      mdiobus_unregister(bus);
 +
 +      /* remove mdio bus info from net_device */
 +      dev_set_drvdata(&ndev->dev, NULL);
 +
 +      /* free interrupts memory */
 +      kfree(bus->irq);
 +
 +      /* free bitbang info */
 +      free_mdio_bitbang(bus);
 +
 +      return 0;
 +}
 +
 +/* MDIO bus init function */
 +static int sh_mdio_init(struct net_device *ndev, int id,
 +                      struct sh_eth_plat_data *pd)
 +{
 +      int ret, i;
 +      struct bb_info *bitbang;
 +      struct sh_eth_private *mdp = netdev_priv(ndev);
 +
 +      /* create bit control struct for PHY */
 +      bitbang = kzalloc(sizeof(struct bb_info), GFP_KERNEL);
 +      if (!bitbang) {
 +              ret = -ENOMEM;
 +              goto out;
 +      }
 +
 +      /* bitbang init */
 +      bitbang->addr = ndev->base_addr + mdp->reg_offset[PIR];
 +      bitbang->set_gate = pd->set_mdio_gate;
 +      bitbang->mdi_msk = 0x08;
 +      bitbang->mdo_msk = 0x04;
 +      bitbang->mmd_msk = 0x02;/* MMD */
 +      bitbang->mdc_msk = 0x01;
 +      bitbang->ctrl.ops = &bb_ops;
 +
 +      /* MII controller setting */
 +      mdp->mii_bus = alloc_mdio_bitbang(&bitbang->ctrl);
 +      if (!mdp->mii_bus) {
 +              ret = -ENOMEM;
 +              goto out_free_bitbang;
 +      }
 +
 +      /* Hook up MII support for ethtool */
 +      mdp->mii_bus->name = "sh_mii";
 +      mdp->mii_bus->parent = &ndev->dev;
 +      snprintf(mdp->mii_bus->id, MII_BUS_ID_SIZE, "%x", id);
 +
 +      /* PHY IRQ */
 +      mdp->mii_bus->irq = kmalloc(sizeof(int)*PHY_MAX_ADDR, GFP_KERNEL);
 +      if (!mdp->mii_bus->irq) {
 +              ret = -ENOMEM;
 +              goto out_free_bus;
 +      }
 +
 +      for (i = 0; i < PHY_MAX_ADDR; i++)
 +              mdp->mii_bus->irq[i] = PHY_POLL;
 +
 +      /* regist mdio bus */
 +      ret = mdiobus_register(mdp->mii_bus);
 +      if (ret)
 +              goto out_free_irq;
 +
 +      dev_set_drvdata(&ndev->dev, mdp->mii_bus);
 +
 +      return 0;
 +
 +out_free_irq:
 +      kfree(mdp->mii_bus->irq);
 +
 +out_free_bus:
 +      free_mdio_bitbang(mdp->mii_bus);
 +
 +out_free_bitbang:
 +      kfree(bitbang);
 +
 +out:
 +      return ret;
 +}
 +
 +static const u16 *sh_eth_get_register_offset(int register_type)
 +{
 +      const u16 *reg_offset = NULL;
 +
 +      switch (register_type) {
 +      case SH_ETH_REG_GIGABIT:
 +              reg_offset = sh_eth_offset_gigabit;
 +              break;
 +      case SH_ETH_REG_FAST_SH4:
 +              reg_offset = sh_eth_offset_fast_sh4;
 +              break;
 +      case SH_ETH_REG_FAST_SH3_SH2:
 +              reg_offset = sh_eth_offset_fast_sh3_sh2;
 +              break;
 +      default:
 +              printk(KERN_ERR "Unknown register type (%d)\n", register_type);
 +              break;
 +      }
 +
 +      return reg_offset;
 +}
 +
 +static const struct net_device_ops sh_eth_netdev_ops = {
 +      .ndo_open               = sh_eth_open,
 +      .ndo_stop               = sh_eth_close,
 +      .ndo_start_xmit         = sh_eth_start_xmit,
 +      .ndo_get_stats          = sh_eth_get_stats,
 +#if defined(SH_ETH_HAS_TSU)
 +      .ndo_set_rx_mode        = sh_eth_set_multicast_list,
 +#endif
 +      .ndo_tx_timeout         = sh_eth_tx_timeout,
 +      .ndo_do_ioctl           = sh_eth_do_ioctl,
 +      .ndo_validate_addr      = eth_validate_addr,
 +      .ndo_set_mac_address    = eth_mac_addr,
 +      .ndo_change_mtu         = eth_change_mtu,
 +};
 +
 +static int sh_eth_drv_probe(struct platform_device *pdev)
 +{
 +      int ret, devno = 0;
 +      struct resource *res;
 +      struct net_device *ndev = NULL;
 +      struct sh_eth_private *mdp = NULL;
 +      struct sh_eth_plat_data *pd;
 +
 +      /* get base addr */
 +      res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 +      if (unlikely(res == NULL)) {
 +              dev_err(&pdev->dev, "invalid resource\n");
 +              ret = -EINVAL;
 +              goto out;
 +      }
 +
 +      ndev = alloc_etherdev(sizeof(struct sh_eth_private));
 +      if (!ndev) {
 +              dev_err(&pdev->dev, "Could not allocate device.\n");
 +              ret = -ENOMEM;
 +              goto out;
 +      }
 +
 +      /* The sh Ether-specific entries in the device structure. */
 +      ndev->base_addr = res->start;
 +      devno = pdev->id;
 +      if (devno < 0)
 +              devno = 0;
 +
 +      ndev->dma = -1;
 +      ret = platform_get_irq(pdev, 0);
 +      if (ret < 0) {
 +              ret = -ENODEV;
 +              goto out_release;
 +      }
 +      ndev->irq = ret;
 +
 +      SET_NETDEV_DEV(ndev, &pdev->dev);
 +
 +      /* Fill in the fields of the device structure with ethernet values. */
 +      ether_setup(ndev);
 +
 +      mdp = netdev_priv(ndev);
 +      spin_lock_init(&mdp->lock);
 +      mdp->pdev = pdev;
 +      pm_runtime_enable(&pdev->dev);
 +      pm_runtime_resume(&pdev->dev);
 +
 +      pd = (struct sh_eth_plat_data *)(pdev->dev.platform_data);
 +      /* get PHY ID */
 +      mdp->phy_id = pd->phy;
 +      mdp->phy_interface = pd->phy_interface;
 +      /* EDMAC endian */
 +      mdp->edmac_endian = pd->edmac_endian;
 +      mdp->no_ether_link = pd->no_ether_link;
 +      mdp->ether_link_active_low = pd->ether_link_active_low;
 +      mdp->reg_offset = sh_eth_get_register_offset(pd->register_type);
 +
 +      /* set cpu data */
 +#if defined(SH_ETH_HAS_BOTH_MODULES)
 +      mdp->cd = sh_eth_get_cpu_data(mdp);
 +#else
 +      mdp->cd = &sh_eth_my_cpu_data;
 +#endif
 +      sh_eth_set_default_cpu_data(mdp->cd);
 +
 +      /* set function */
 +      ndev->netdev_ops = &sh_eth_netdev_ops;
 +      SET_ETHTOOL_OPS(ndev, &sh_eth_ethtool_ops);
 +      ndev->watchdog_timeo = TX_TIMEOUT;
 +
 +      /* debug message level */
 +      mdp->msg_enable = SH_ETH_DEF_MSG_ENABLE;
 +      mdp->post_rx = POST_RX >> (devno << 1);
 +      mdp->post_fw = POST_FW >> (devno << 1);
 +
 +      /* read and set MAC address */
 +      read_mac_address(ndev, pd->mac_addr);
 +
 +      /* First device only init */
 +      if (!devno) {
 +              if (mdp->cd->tsu) {
 +                      struct resource *rtsu;
 +                      rtsu = platform_get_resource(pdev, IORESOURCE_MEM, 1);
 +                      if (!rtsu) {
 +                              dev_err(&pdev->dev, "Not found TSU resource\n");
 +                              goto out_release;
 +                      }
 +                      mdp->tsu_addr = ioremap(rtsu->start,
 +                                              resource_size(rtsu));
 +              }
 +              if (mdp->cd->chip_reset)
 +                      mdp->cd->chip_reset(ndev);
 +
 +              if (mdp->cd->tsu) {
 +                      /* TSU init (Init only)*/
 +                      sh_eth_tsu_init(mdp);
 +              }
 +      }
 +
 +      /* network device register */
 +      ret = register_netdev(ndev);
 +      if (ret)
 +              goto out_release;
 +
 +      /* mdio bus init */
 +      ret = sh_mdio_init(ndev, pdev->id, pd);
 +      if (ret)
 +              goto out_unregister;
 +
 +      /* print device information */
 +      pr_info("Base address at 0x%x, %pM, IRQ %d.\n",
 +             (u32)ndev->base_addr, ndev->dev_addr, ndev->irq);
 +
 +      platform_set_drvdata(pdev, ndev);
 +
 +      return ret;
 +
 +out_unregister:
 +      unregister_netdev(ndev);
 +
 +out_release:
 +      /* net_dev free */
 +      if (mdp && mdp->tsu_addr)
 +              iounmap(mdp->tsu_addr);
 +      if (ndev)
 +              free_netdev(ndev);
 +
 +out:
 +      return ret;
 +}
 +
 +static int sh_eth_drv_remove(struct platform_device *pdev)
 +{
 +      struct net_device *ndev = platform_get_drvdata(pdev);
 +      struct sh_eth_private *mdp = netdev_priv(ndev);
 +
 +      iounmap(mdp->tsu_addr);
 +      sh_mdio_release(ndev);
 +      unregister_netdev(ndev);
 +      pm_runtime_disable(&pdev->dev);
 +      free_netdev(ndev);
 +      platform_set_drvdata(pdev, NULL);
 +
 +      return 0;
 +}
 +
 +static int sh_eth_runtime_nop(struct device *dev)
 +{
 +      /*
 +       * Runtime PM callback shared between ->runtime_suspend()
 +       * and ->runtime_resume(). Simply returns success.
 +       *
 +       * This driver re-initializes all registers after
 +       * pm_runtime_get_sync() anyway so there is no need
 +       * to save and restore registers here.
 +       */
 +      return 0;
 +}
 +
 +static struct dev_pm_ops sh_eth_dev_pm_ops = {
 +      .runtime_suspend = sh_eth_runtime_nop,
 +      .runtime_resume = sh_eth_runtime_nop,
 +};
 +
 +static struct platform_driver sh_eth_driver = {
 +      .probe = sh_eth_drv_probe,
 +      .remove = sh_eth_drv_remove,
 +      .driver = {
 +                 .name = CARDNAME,
 +                 .pm = &sh_eth_dev_pm_ops,
 +      },
 +};
 +
 +static int __init sh_eth_init(void)
 +{
 +      return platform_driver_register(&sh_eth_driver);
 +}
 +
 +static void __exit sh_eth_cleanup(void)
 +{
 +      platform_driver_unregister(&sh_eth_driver);
 +}
 +
 +module_init(sh_eth_init);
 +module_exit(sh_eth_cleanup);
 +
 +MODULE_AUTHOR("Nobuhiro Iwamatsu, Yoshihiro Shimoda");
 +MODULE_DESCRIPTION("Renesas SuperH Ethernet driver");
 +MODULE_LICENSE("GPL v2");
index 76dcadf,0000000..de9afeb
mode 100644,000000..100644
--- /dev/null
@@@ -1,2746 -1,0 +1,2732 @@@
-       bool use_wc;
 +/****************************************************************************
 + * Driver for Solarflare Solarstorm network controllers and boards
 + * Copyright 2005-2006 Fen Systems Ltd.
 + * Copyright 2005-2011 Solarflare Communications Inc.
 + *
 + * This program is free software; you can redistribute it and/or modify it
 + * under the terms of the GNU General Public License version 2 as published
 + * by the Free Software Foundation, incorporated herein by reference.
 + */
 +
 +#include <linux/module.h>
 +#include <linux/pci.h>
 +#include <linux/netdevice.h>
 +#include <linux/etherdevice.h>
 +#include <linux/delay.h>
 +#include <linux/notifier.h>
 +#include <linux/ip.h>
 +#include <linux/tcp.h>
 +#include <linux/in.h>
 +#include <linux/crc32.h>
 +#include <linux/ethtool.h>
 +#include <linux/topology.h>
 +#include <linux/gfp.h>
 +#include <linux/cpu_rmap.h>
 +#include "net_driver.h"
 +#include "efx.h"
 +#include "nic.h"
 +
 +#include "mcdi.h"
 +#include "workarounds.h"
 +
 +/**************************************************************************
 + *
 + * Type name strings
 + *
 + **************************************************************************
 + */
 +
 +/* Loopback mode names (see LOOPBACK_MODE()) */
 +const unsigned int efx_loopback_mode_max = LOOPBACK_MAX;
 +const char *efx_loopback_mode_names[] = {
 +      [LOOPBACK_NONE]         = "NONE",
 +      [LOOPBACK_DATA]         = "DATAPATH",
 +      [LOOPBACK_GMAC]         = "GMAC",
 +      [LOOPBACK_XGMII]        = "XGMII",
 +      [LOOPBACK_XGXS]         = "XGXS",
 +      [LOOPBACK_XAUI]         = "XAUI",
 +      [LOOPBACK_GMII]         = "GMII",
 +      [LOOPBACK_SGMII]        = "SGMII",
 +      [LOOPBACK_XGBR]         = "XGBR",
 +      [LOOPBACK_XFI]          = "XFI",
 +      [LOOPBACK_XAUI_FAR]     = "XAUI_FAR",
 +      [LOOPBACK_GMII_FAR]     = "GMII_FAR",
 +      [LOOPBACK_SGMII_FAR]    = "SGMII_FAR",
 +      [LOOPBACK_XFI_FAR]      = "XFI_FAR",
 +      [LOOPBACK_GPHY]         = "GPHY",
 +      [LOOPBACK_PHYXS]        = "PHYXS",
 +      [LOOPBACK_PCS]          = "PCS",
 +      [LOOPBACK_PMAPMD]       = "PMA/PMD",
 +      [LOOPBACK_XPORT]        = "XPORT",
 +      [LOOPBACK_XGMII_WS]     = "XGMII_WS",
 +      [LOOPBACK_XAUI_WS]      = "XAUI_WS",
 +      [LOOPBACK_XAUI_WS_FAR]  = "XAUI_WS_FAR",
 +      [LOOPBACK_XAUI_WS_NEAR] = "XAUI_WS_NEAR",
 +      [LOOPBACK_GMII_WS]      = "GMII_WS",
 +      [LOOPBACK_XFI_WS]       = "XFI_WS",
 +      [LOOPBACK_XFI_WS_FAR]   = "XFI_WS_FAR",
 +      [LOOPBACK_PHYXS_WS]     = "PHYXS_WS",
 +};
 +
 +const unsigned int efx_reset_type_max = RESET_TYPE_MAX;
 +const char *efx_reset_type_names[] = {
 +      [RESET_TYPE_INVISIBLE]     = "INVISIBLE",
 +      [RESET_TYPE_ALL]           = "ALL",
 +      [RESET_TYPE_WORLD]         = "WORLD",
 +      [RESET_TYPE_DISABLE]       = "DISABLE",
 +      [RESET_TYPE_TX_WATCHDOG]   = "TX_WATCHDOG",
 +      [RESET_TYPE_INT_ERROR]     = "INT_ERROR",
 +      [RESET_TYPE_RX_RECOVERY]   = "RX_RECOVERY",
 +      [RESET_TYPE_RX_DESC_FETCH] = "RX_DESC_FETCH",
 +      [RESET_TYPE_TX_DESC_FETCH] = "TX_DESC_FETCH",
 +      [RESET_TYPE_TX_SKIP]       = "TX_SKIP",
 +      [RESET_TYPE_MC_FAILURE]    = "MC_FAILURE",
 +};
 +
 +#define EFX_MAX_MTU (9 * 1024)
 +
 +/* Reset workqueue. If any NIC has a hardware failure then a reset will be
 + * queued onto this work queue. This is not a per-nic work queue, because
 + * efx_reset_work() acquires the rtnl lock, so resets are naturally serialised.
 + */
 +static struct workqueue_struct *reset_workqueue;
 +
 +/**************************************************************************
 + *
 + * Configurable values
 + *
 + *************************************************************************/
 +
 +/*
 + * Use separate channels for TX and RX events
 + *
 + * Set this to 1 to use separate channels for TX and RX. It allows us
 + * to control interrupt affinity separately for TX and RX.
 + *
 + * This is only used in MSI-X interrupt mode
 + */
 +static unsigned int separate_tx_channels;
 +module_param(separate_tx_channels, uint, 0444);
 +MODULE_PARM_DESC(separate_tx_channels,
 +               "Use separate channels for TX and RX");
 +
 +/* This is the weight assigned to each of the (per-channel) virtual
 + * NAPI devices.
 + */
 +static int napi_weight = 64;
 +
 +/* This is the time (in jiffies) between invocations of the hardware
 + * monitor.  On Falcon-based NICs, this will:
 + * - Check the on-board hardware monitor;
 + * - Poll the link state and reconfigure the hardware as necessary.
 + */
 +static unsigned int efx_monitor_interval = 1 * HZ;
 +
 +/* This controls whether or not the driver will initialise devices
 + * with invalid MAC addresses stored in the EEPROM or flash.  If true,
 + * such devices will be initialised with a random locally-generated
 + * MAC address.  This allows for loading the sfc_mtd driver to
 + * reprogram the flash, even if the flash contents (including the MAC
 + * address) have previously been erased.
 + */
 +static unsigned int allow_bad_hwaddr;
 +
 +/* Initial interrupt moderation settings.  They can be modified after
 + * module load with ethtool.
 + *
 + * The default for RX should strike a balance between increasing the
 + * round-trip latency and reducing overhead.
 + */
 +static unsigned int rx_irq_mod_usec = 60;
 +
 +/* Initial interrupt moderation settings.  They can be modified after
 + * module load with ethtool.
 + *
 + * This default is chosen to ensure that a 10G link does not go idle
 + * while a TX queue is stopped after it has become full.  A queue is
 + * restarted when it drops below half full.  The time this takes (assuming
 + * worst case 3 descriptors per packet and 1024 descriptors) is
 + *   512 / 3 * 1.2 = 205 usec.
 + */
 +static unsigned int tx_irq_mod_usec = 150;
 +
 +/* This is the first interrupt mode to try out of:
 + * 0 => MSI-X
 + * 1 => MSI
 + * 2 => legacy
 + */
 +static unsigned int interrupt_mode;
 +
 +/* This is the requested number of CPUs to use for Receive-Side Scaling (RSS),
 + * i.e. the number of CPUs among which we may distribute simultaneous
 + * interrupt handling.
 + *
 + * Cards without MSI-X will only target one CPU via legacy or MSI interrupt.
 + * The default (0) means to assign an interrupt to each package (level II cache)
 + */
 +static unsigned int rss_cpus;
 +module_param(rss_cpus, uint, 0444);
 +MODULE_PARM_DESC(rss_cpus, "Number of CPUs to use for Receive-Side Scaling");
 +
 +static int phy_flash_cfg;
 +module_param(phy_flash_cfg, int, 0644);
 +MODULE_PARM_DESC(phy_flash_cfg, "Set PHYs into reflash mode initially");
 +
 +static unsigned irq_adapt_low_thresh = 10000;
 +module_param(irq_adapt_low_thresh, uint, 0644);
 +MODULE_PARM_DESC(irq_adapt_low_thresh,
 +               "Threshold score for reducing IRQ moderation");
 +
 +static unsigned irq_adapt_high_thresh = 20000;
 +module_param(irq_adapt_high_thresh, uint, 0644);
 +MODULE_PARM_DESC(irq_adapt_high_thresh,
 +               "Threshold score for increasing IRQ moderation");
 +
 +static unsigned debug = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
 +                       NETIF_MSG_LINK | NETIF_MSG_IFDOWN |
 +                       NETIF_MSG_IFUP | NETIF_MSG_RX_ERR |
 +                       NETIF_MSG_TX_ERR | NETIF_MSG_HW);
 +module_param(debug, uint, 0);
 +MODULE_PARM_DESC(debug, "Bitmapped debugging message enable value");
 +
 +/**************************************************************************
 + *
 + * Utility functions and prototypes
 + *
 + *************************************************************************/
 +
 +static void efx_remove_channels(struct efx_nic *efx);
 +static void efx_remove_port(struct efx_nic *efx);
 +static void efx_init_napi(struct efx_nic *efx);
 +static void efx_fini_napi(struct efx_nic *efx);
 +static void efx_fini_napi_channel(struct efx_channel *channel);
 +static void efx_fini_struct(struct efx_nic *efx);
 +static void efx_start_all(struct efx_nic *efx);
 +static void efx_stop_all(struct efx_nic *efx);
 +
 +#define EFX_ASSERT_RESET_SERIALISED(efx)              \
 +      do {                                            \
 +              if ((efx->state == STATE_RUNNING) ||    \
 +                  (efx->state == STATE_DISABLED))     \
 +                      ASSERT_RTNL();                  \
 +      } while (0)
 +
 +/**************************************************************************
 + *
 + * Event queue processing
 + *
 + *************************************************************************/
 +
 +/* Process channel's event queue
 + *
 + * This function is responsible for processing the event queue of a
 + * single channel.  The caller must guarantee that this function will
 + * never be concurrently called more than once on the same channel,
 + * though different channels may be being processed concurrently.
 + */
 +static int efx_process_channel(struct efx_channel *channel, int budget)
 +{
 +      struct efx_nic *efx = channel->efx;
 +      int spent;
 +
 +      if (unlikely(efx->reset_pending || !channel->enabled))
 +              return 0;
 +
 +      spent = efx_nic_process_eventq(channel, budget);
 +      if (spent == 0)
 +              return 0;
 +
 +      /* Deliver last RX packet. */
 +      if (channel->rx_pkt) {
 +              __efx_rx_packet(channel, channel->rx_pkt,
 +                              channel->rx_pkt_csummed);
 +              channel->rx_pkt = NULL;
 +      }
 +
 +      efx_rx_strategy(channel);
 +
 +      efx_fast_push_rx_descriptors(efx_channel_get_rx_queue(channel));
 +
 +      return spent;
 +}
 +
 +/* Mark channel as finished processing
 + *
 + * Note that since we will not receive further interrupts for this
 + * channel before we finish processing and call the eventq_read_ack()
 + * method, there is no need to use the interrupt hold-off timers.
 + */
 +static inline void efx_channel_processed(struct efx_channel *channel)
 +{
 +      /* The interrupt handler for this channel may set work_pending
 +       * as soon as we acknowledge the events we've seen.  Make sure
 +       * it's cleared before then. */
 +      channel->work_pending = false;
 +      smp_wmb();
 +
 +      efx_nic_eventq_read_ack(channel);
 +}
 +
 +/* NAPI poll handler
 + *
 + * NAPI guarantees serialisation of polls of the same device, which
 + * provides the guarantee required by efx_process_channel().
 + */
 +static int efx_poll(struct napi_struct *napi, int budget)
 +{
 +      struct efx_channel *channel =
 +              container_of(napi, struct efx_channel, napi_str);
 +      struct efx_nic *efx = channel->efx;
 +      int spent;
 +
 +      netif_vdbg(efx, intr, efx->net_dev,
 +                 "channel %d NAPI poll executing on CPU %d\n",
 +                 channel->channel, raw_smp_processor_id());
 +
 +      spent = efx_process_channel(channel, budget);
 +
 +      if (spent < budget) {
 +              if (channel->channel < efx->n_rx_channels &&
 +                  efx->irq_rx_adaptive &&
 +                  unlikely(++channel->irq_count == 1000)) {
 +                      if (unlikely(channel->irq_mod_score <
 +                                   irq_adapt_low_thresh)) {
 +                              if (channel->irq_moderation > 1) {
 +                                      channel->irq_moderation -= 1;
 +                                      efx->type->push_irq_moderation(channel);
 +                              }
 +                      } else if (unlikely(channel->irq_mod_score >
 +                                          irq_adapt_high_thresh)) {
 +                              if (channel->irq_moderation <
 +                                  efx->irq_rx_moderation) {
 +                                      channel->irq_moderation += 1;
 +                                      efx->type->push_irq_moderation(channel);
 +                              }
 +                      }
 +                      channel->irq_count = 0;
 +                      channel->irq_mod_score = 0;
 +              }
 +
 +              efx_filter_rfs_expire(channel);
 +
 +              /* There is no race here; although napi_disable() will
 +               * only wait for napi_complete(), this isn't a problem
 +               * since efx_channel_processed() will have no effect if
 +               * interrupts have already been disabled.
 +               */
 +              napi_complete(napi);
 +              efx_channel_processed(channel);
 +      }
 +
 +      return spent;
 +}
 +
 +/* Process the eventq of the specified channel immediately on this CPU
 + *
 + * Disable hardware generated interrupts, wait for any existing
 + * processing to finish, then directly poll (and ack ) the eventq.
 + * Finally reenable NAPI and interrupts.
 + *
 + * This is for use only during a loopback self-test.  It must not
 + * deliver any packets up the stack as this can result in deadlock.
 + */
 +void efx_process_channel_now(struct efx_channel *channel)
 +{
 +      struct efx_nic *efx = channel->efx;
 +
 +      BUG_ON(channel->channel >= efx->n_channels);
 +      BUG_ON(!channel->enabled);
 +      BUG_ON(!efx->loopback_selftest);
 +
 +      /* Disable interrupts and wait for ISRs to complete */
 +      efx_nic_disable_interrupts(efx);
 +      if (efx->legacy_irq) {
 +              synchronize_irq(efx->legacy_irq);
 +              efx->legacy_irq_enabled = false;
 +      }
 +      if (channel->irq)
 +              synchronize_irq(channel->irq);
 +
 +      /* Wait for any NAPI processing to complete */
 +      napi_disable(&channel->napi_str);
 +
 +      /* Poll the channel */
 +      efx_process_channel(channel, channel->eventq_mask + 1);
 +
 +      /* Ack the eventq. This may cause an interrupt to be generated
 +       * when they are reenabled */
 +      efx_channel_processed(channel);
 +
 +      napi_enable(&channel->napi_str);
 +      if (efx->legacy_irq)
 +              efx->legacy_irq_enabled = true;
 +      efx_nic_enable_interrupts(efx);
 +}
 +
 +/* Create event queue
 + * Event queue memory allocations are done only once.  If the channel
 + * is reset, the memory buffer will be reused; this guards against
 + * errors during channel reset and also simplifies interrupt handling.
 + */
 +static int efx_probe_eventq(struct efx_channel *channel)
 +{
 +      struct efx_nic *efx = channel->efx;
 +      unsigned long entries;
 +
 +      netif_dbg(channel->efx, probe, channel->efx->net_dev,
 +                "chan %d create event queue\n", channel->channel);
 +
 +      /* Build an event queue with room for one event per tx and rx buffer,
 +       * plus some extra for link state events and MCDI completions. */
 +      entries = roundup_pow_of_two(efx->rxq_entries + efx->txq_entries + 128);
 +      EFX_BUG_ON_PARANOID(entries > EFX_MAX_EVQ_SIZE);
 +      channel->eventq_mask = max(entries, EFX_MIN_EVQ_SIZE) - 1;
 +
 +      return efx_nic_probe_eventq(channel);
 +}
 +
 +/* Prepare channel's event queue */
 +static void efx_init_eventq(struct efx_channel *channel)
 +{
 +      netif_dbg(channel->efx, drv, channel->efx->net_dev,
 +                "chan %d init event queue\n", channel->channel);
 +
 +      channel->eventq_read_ptr = 0;
 +
 +      efx_nic_init_eventq(channel);
 +}
 +
 +static void efx_fini_eventq(struct efx_channel *channel)
 +{
 +      netif_dbg(channel->efx, drv, channel->efx->net_dev,
 +                "chan %d fini event queue\n", channel->channel);
 +
 +      efx_nic_fini_eventq(channel);
 +}
 +
 +static void efx_remove_eventq(struct efx_channel *channel)
 +{
 +      netif_dbg(channel->efx, drv, channel->efx->net_dev,
 +                "chan %d remove event queue\n", channel->channel);
 +
 +      efx_nic_remove_eventq(channel);
 +}
 +
 +/**************************************************************************
 + *
 + * Channel handling
 + *
 + *************************************************************************/
 +
 +/* Allocate and initialise a channel structure, optionally copying
 + * parameters (but not resources) from an old channel structure. */
 +static struct efx_channel *
 +efx_alloc_channel(struct efx_nic *efx, int i, struct efx_channel *old_channel)
 +{
 +      struct efx_channel *channel;
 +      struct efx_rx_queue *rx_queue;
 +      struct efx_tx_queue *tx_queue;
 +      int j;
 +
 +      if (old_channel) {
 +              channel = kmalloc(sizeof(*channel), GFP_KERNEL);
 +              if (!channel)
 +                      return NULL;
 +
 +              *channel = *old_channel;
 +
 +              channel->napi_dev = NULL;
 +              memset(&channel->eventq, 0, sizeof(channel->eventq));
 +
 +              rx_queue = &channel->rx_queue;
 +              rx_queue->buffer = NULL;
 +              memset(&rx_queue->rxd, 0, sizeof(rx_queue->rxd));
 +
 +              for (j = 0; j < EFX_TXQ_TYPES; j++) {
 +                      tx_queue = &channel->tx_queue[j];
 +                      if (tx_queue->channel)
 +                              tx_queue->channel = channel;
 +                      tx_queue->buffer = NULL;
 +                      memset(&tx_queue->txd, 0, sizeof(tx_queue->txd));
 +              }
 +      } else {
 +              channel = kzalloc(sizeof(*channel), GFP_KERNEL);
 +              if (!channel)
 +                      return NULL;
 +
 +              channel->efx = efx;
 +              channel->channel = i;
 +
 +              for (j = 0; j < EFX_TXQ_TYPES; j++) {
 +                      tx_queue = &channel->tx_queue[j];
 +                      tx_queue->efx = efx;
 +                      tx_queue->queue = i * EFX_TXQ_TYPES + j;
 +                      tx_queue->channel = channel;
 +              }
 +      }
 +
 +      rx_queue = &channel->rx_queue;
 +      rx_queue->efx = efx;
 +      setup_timer(&rx_queue->slow_fill, efx_rx_slow_fill,
 +                  (unsigned long)rx_queue);
 +
 +      return channel;
 +}
 +
 +static int efx_probe_channel(struct efx_channel *channel)
 +{
 +      struct efx_tx_queue *tx_queue;
 +      struct efx_rx_queue *rx_queue;
 +      int rc;
 +
 +      netif_dbg(channel->efx, probe, channel->efx->net_dev,
 +                "creating channel %d\n", channel->channel);
 +
 +      rc = efx_probe_eventq(channel);
 +      if (rc)
 +              goto fail1;
 +
 +      efx_for_each_channel_tx_queue(tx_queue, channel) {
 +              rc = efx_probe_tx_queue(tx_queue);
 +              if (rc)
 +                      goto fail2;
 +      }
 +
 +      efx_for_each_channel_rx_queue(rx_queue, channel) {
 +              rc = efx_probe_rx_queue(rx_queue);
 +              if (rc)
 +                      goto fail3;
 +      }
 +
 +      channel->n_rx_frm_trunc = 0;
 +
 +      return 0;
 +
 + fail3:
 +      efx_for_each_channel_rx_queue(rx_queue, channel)
 +              efx_remove_rx_queue(rx_queue);
 + fail2:
 +      efx_for_each_channel_tx_queue(tx_queue, channel)
 +              efx_remove_tx_queue(tx_queue);
 + fail1:
 +      return rc;
 +}
 +
 +
 +static void efx_set_channel_names(struct efx_nic *efx)
 +{
 +      struct efx_channel *channel;
 +      const char *type = "";
 +      int number;
 +
 +      efx_for_each_channel(channel, efx) {
 +              number = channel->channel;
 +              if (efx->n_channels > efx->n_rx_channels) {
 +                      if (channel->channel < efx->n_rx_channels) {
 +                              type = "-rx";
 +                      } else {
 +                              type = "-tx";
 +                              number -= efx->n_rx_channels;
 +                      }
 +              }
 +              snprintf(efx->channel_name[channel->channel],
 +                       sizeof(efx->channel_name[0]),
 +                       "%s%s-%d", efx->name, type, number);
 +      }
 +}
 +
 +static int efx_probe_channels(struct efx_nic *efx)
 +{
 +      struct efx_channel *channel;
 +      int rc;
 +
 +      /* Restart special buffer allocation */
 +      efx->next_buffer_table = 0;
 +
 +      efx_for_each_channel(channel, efx) {
 +              rc = efx_probe_channel(channel);
 +              if (rc) {
 +                      netif_err(efx, probe, efx->net_dev,
 +                                "failed to create channel %d\n",
 +                                channel->channel);
 +                      goto fail;
 +              }
 +      }
 +      efx_set_channel_names(efx);
 +
 +      return 0;
 +
 +fail:
 +      efx_remove_channels(efx);
 +      return rc;
 +}
 +
 +/* Channels are shutdown and reinitialised whilst the NIC is running
 + * to propagate configuration changes (mtu, checksum offload), or
 + * to clear hardware error conditions
 + */
 +static void efx_init_channels(struct efx_nic *efx)
 +{
 +      struct efx_tx_queue *tx_queue;
 +      struct efx_rx_queue *rx_queue;
 +      struct efx_channel *channel;
 +
 +      /* Calculate the rx buffer allocation parameters required to
 +       * support the current MTU, including padding for header
 +       * alignment and overruns.
 +       */
 +      efx->rx_buffer_len = (max(EFX_PAGE_IP_ALIGN, NET_IP_ALIGN) +
 +                            EFX_MAX_FRAME_LEN(efx->net_dev->mtu) +
 +                            efx->type->rx_buffer_hash_size +
 +                            efx->type->rx_buffer_padding);
 +      efx->rx_buffer_order = get_order(efx->rx_buffer_len +
 +                                       sizeof(struct efx_rx_page_state));
 +
 +      /* Initialise the channels */
 +      efx_for_each_channel(channel, efx) {
 +              netif_dbg(channel->efx, drv, channel->efx->net_dev,
 +                        "init chan %d\n", channel->channel);
 +
 +              efx_init_eventq(channel);
 +
 +              efx_for_each_channel_tx_queue(tx_queue, channel)
 +                      efx_init_tx_queue(tx_queue);
 +
 +              /* The rx buffer allocation strategy is MTU dependent */
 +              efx_rx_strategy(channel);
 +
 +              efx_for_each_channel_rx_queue(rx_queue, channel)
 +                      efx_init_rx_queue(rx_queue);
 +
 +              WARN_ON(channel->rx_pkt != NULL);
 +              efx_rx_strategy(channel);
 +      }
 +}
 +
 +/* This enables event queue processing and packet transmission.
 + *
 + * Note that this function is not allowed to fail, since that would
 + * introduce too much complexity into the suspend/resume path.
 + */
 +static void efx_start_channel(struct efx_channel *channel)
 +{
 +      struct efx_rx_queue *rx_queue;
 +
 +      netif_dbg(channel->efx, ifup, channel->efx->net_dev,
 +                "starting chan %d\n", channel->channel);
 +
 +      /* The interrupt handler for this channel may set work_pending
 +       * as soon as we enable it.  Make sure it's cleared before
 +       * then.  Similarly, make sure it sees the enabled flag set. */
 +      channel->work_pending = false;
 +      channel->enabled = true;
 +      smp_wmb();
 +
 +      /* Fill the queues before enabling NAPI */
 +      efx_for_each_channel_rx_queue(rx_queue, channel)
 +              efx_fast_push_rx_descriptors(rx_queue);
 +
 +      napi_enable(&channel->napi_str);
 +}
 +
 +/* This disables event queue processing and packet transmission.
 + * This function does not guarantee that all queue processing
 + * (e.g. RX refill) is complete.
 + */
 +static void efx_stop_channel(struct efx_channel *channel)
 +{
 +      if (!channel->enabled)
 +              return;
 +
 +      netif_dbg(channel->efx, ifdown, channel->efx->net_dev,
 +                "stop chan %d\n", channel->channel);
 +
 +      channel->enabled = false;
 +      napi_disable(&channel->napi_str);
 +}
 +
 +static void efx_fini_channels(struct efx_nic *efx)
 +{
 +      struct efx_channel *channel;
 +      struct efx_tx_queue *tx_queue;
 +      struct efx_rx_queue *rx_queue;
 +      int rc;
 +
 +      EFX_ASSERT_RESET_SERIALISED(efx);
 +      BUG_ON(efx->port_enabled);
 +
 +      rc = efx_nic_flush_queues(efx);
 +      if (rc && EFX_WORKAROUND_7803(efx)) {
 +              /* Schedule a reset to recover from the flush failure. The
 +               * descriptor caches reference memory we're about to free,
 +               * but falcon_reconfigure_mac_wrapper() won't reconnect
 +               * the MACs because of the pending reset. */
 +              netif_err(efx, drv, efx->net_dev,
 +                        "Resetting to recover from flush failure\n");
 +              efx_schedule_reset(efx, RESET_TYPE_ALL);
 +      } else if (rc) {
 +              netif_err(efx, drv, efx->net_dev, "failed to flush queues\n");
 +      } else {
 +              netif_dbg(efx, drv, efx->net_dev,
 +                        "successfully flushed all queues\n");
 +      }
 +
 +      efx_for_each_channel(channel, efx) {
 +              netif_dbg(channel->efx, drv, channel->efx->net_dev,
 +                        "shut down chan %d\n", channel->channel);
 +
 +              efx_for_each_channel_rx_queue(rx_queue, channel)
 +                      efx_fini_rx_queue(rx_queue);
 +              efx_for_each_possible_channel_tx_queue(tx_queue, channel)
 +                      efx_fini_tx_queue(tx_queue);
 +              efx_fini_eventq(channel);
 +      }
 +}
 +
 +static void efx_remove_channel(struct efx_channel *channel)
 +{
 +      struct efx_tx_queue *tx_queue;
 +      struct efx_rx_queue *rx_queue;
 +
 +      netif_dbg(channel->efx, drv, channel->efx->net_dev,
 +                "destroy chan %d\n", channel->channel);
 +
 +      efx_for_each_channel_rx_queue(rx_queue, channel)
 +              efx_remove_rx_queue(rx_queue);
 +      efx_for_each_possible_channel_tx_queue(tx_queue, channel)
 +              efx_remove_tx_queue(tx_queue);
 +      efx_remove_eventq(channel);
 +}
 +
 +static void efx_remove_channels(struct efx_nic *efx)
 +{
 +      struct efx_channel *channel;
 +
 +      efx_for_each_channel(channel, efx)
 +              efx_remove_channel(channel);
 +}
 +
 +int
 +efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries)
 +{
 +      struct efx_channel *other_channel[EFX_MAX_CHANNELS], *channel;
 +      u32 old_rxq_entries, old_txq_entries;
 +      unsigned i;
 +      int rc;
 +
 +      efx_stop_all(efx);
 +      efx_fini_channels(efx);
 +
 +      /* Clone channels */
 +      memset(other_channel, 0, sizeof(other_channel));
 +      for (i = 0; i < efx->n_channels; i++) {
 +              channel = efx_alloc_channel(efx, i, efx->channel[i]);
 +              if (!channel) {
 +                      rc = -ENOMEM;
 +                      goto out;
 +              }
 +              other_channel[i] = channel;
 +      }
 +
 +      /* Swap entry counts and channel pointers */
 +      old_rxq_entries = efx->rxq_entries;
 +      old_txq_entries = efx->txq_entries;
 +      efx->rxq_entries = rxq_entries;
 +      efx->txq_entries = txq_entries;
 +      for (i = 0; i < efx->n_channels; i++) {
 +              channel = efx->channel[i];
 +              efx->channel[i] = other_channel[i];
 +              other_channel[i] = channel;
 +      }
 +
 +      rc = efx_probe_channels(efx);
 +      if (rc)
 +              goto rollback;
 +
 +      efx_init_napi(efx);
 +
 +      /* Destroy old channels */
 +      for (i = 0; i < efx->n_channels; i++) {
 +              efx_fini_napi_channel(other_channel[i]);
 +              efx_remove_channel(other_channel[i]);
 +      }
 +out:
 +      /* Free unused channel structures */
 +      for (i = 0; i < efx->n_channels; i++)
 +              kfree(other_channel[i]);
 +
 +      efx_init_channels(efx);
 +      efx_start_all(efx);
 +      return rc;
 +
 +rollback:
 +      /* Swap back */
 +      efx->rxq_entries = old_rxq_entries;
 +      efx->txq_entries = old_txq_entries;
 +      for (i = 0; i < efx->n_channels; i++) {
 +              channel = efx->channel[i];
 +              efx->channel[i] = other_channel[i];
 +              other_channel[i] = channel;
 +      }
 +      goto out;
 +}
 +
 +void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue)
 +{
 +      mod_timer(&rx_queue->slow_fill, jiffies + msecs_to_jiffies(100));
 +}
 +
 +/**************************************************************************
 + *
 + * Port handling
 + *
 + **************************************************************************/
 +
 +/* This ensures that the kernel is kept informed (via
 + * netif_carrier_on/off) of the link status, and also maintains the
 + * link status's stop on the port's TX queue.
 + */
 +void efx_link_status_changed(struct efx_nic *efx)
 +{
 +      struct efx_link_state *link_state = &efx->link_state;
 +
 +      /* SFC Bug 5356: A net_dev notifier is registered, so we must ensure
 +       * that no events are triggered between unregister_netdev() and the
 +       * driver unloading. A more general condition is that NETDEV_CHANGE
 +       * can only be generated between NETDEV_UP and NETDEV_DOWN */
 +      if (!netif_running(efx->net_dev))
 +              return;
 +
 +      if (link_state->up != netif_carrier_ok(efx->net_dev)) {
 +              efx->n_link_state_changes++;
 +
 +              if (link_state->up)
 +                      netif_carrier_on(efx->net_dev);
 +              else
 +                      netif_carrier_off(efx->net_dev);
 +      }
 +
 +      /* Status message for kernel log */
 +      if (link_state->up) {
 +              netif_info(efx, link, efx->net_dev,
 +                         "link up at %uMbps %s-duplex (MTU %d)%s\n",
 +                         link_state->speed, link_state->fd ? "full" : "half",
 +                         efx->net_dev->mtu,
 +                         (efx->promiscuous ? " [PROMISC]" : ""));
 +      } else {
 +              netif_info(efx, link, efx->net_dev, "link down\n");
 +      }
 +
 +}
 +
 +void efx_link_set_advertising(struct efx_nic *efx, u32 advertising)
 +{
 +      efx->link_advertising = advertising;
 +      if (advertising) {
 +              if (advertising & ADVERTISED_Pause)
 +                      efx->wanted_fc |= (EFX_FC_TX | EFX_FC_RX);
 +              else
 +                      efx->wanted_fc &= ~(EFX_FC_TX | EFX_FC_RX);
 +              if (advertising & ADVERTISED_Asym_Pause)
 +                      efx->wanted_fc ^= EFX_FC_TX;
 +      }
 +}
 +
 +void efx_link_set_wanted_fc(struct efx_nic *efx, u8 wanted_fc)
 +{
 +      efx->wanted_fc = wanted_fc;
 +      if (efx->link_advertising) {
 +              if (wanted_fc & EFX_FC_RX)
 +                      efx->link_advertising |= (ADVERTISED_Pause |
 +                                                ADVERTISED_Asym_Pause);
 +              else
 +                      efx->link_advertising &= ~(ADVERTISED_Pause |
 +                                                 ADVERTISED_Asym_Pause);
 +              if (wanted_fc & EFX_FC_TX)
 +                      efx->link_advertising ^= ADVERTISED_Asym_Pause;
 +      }
 +}
 +
 +static void efx_fini_port(struct efx_nic *efx);
 +
 +/* Push loopback/power/transmit disable settings to the PHY, and reconfigure
 + * the MAC appropriately. All other PHY configuration changes are pushed
 + * through phy_op->set_settings(), and pushed asynchronously to the MAC
 + * through efx_monitor().
 + *
 + * Callers must hold the mac_lock
 + */
 +int __efx_reconfigure_port(struct efx_nic *efx)
 +{
 +      enum efx_phy_mode phy_mode;
 +      int rc;
 +
 +      WARN_ON(!mutex_is_locked(&efx->mac_lock));
 +
 +      /* Serialise the promiscuous flag with efx_set_multicast_list. */
 +      if (efx_dev_registered(efx)) {
 +              netif_addr_lock_bh(efx->net_dev);
 +              netif_addr_unlock_bh(efx->net_dev);
 +      }
 +
 +      /* Disable PHY transmit in mac level loopbacks */
 +      phy_mode = efx->phy_mode;
 +      if (LOOPBACK_INTERNAL(efx))
 +              efx->phy_mode |= PHY_MODE_TX_DISABLED;
 +      else
 +              efx->phy_mode &= ~PHY_MODE_TX_DISABLED;
 +
 +      rc = efx->type->reconfigure_port(efx);
 +
 +      if (rc)
 +              efx->phy_mode = phy_mode;
 +
 +      return rc;
 +}
 +
 +/* Reinitialise the MAC to pick up new PHY settings, even if the port is
 + * disabled. */
 +int efx_reconfigure_port(struct efx_nic *efx)
 +{
 +      int rc;
 +
 +      EFX_ASSERT_RESET_SERIALISED(efx);
 +
 +      mutex_lock(&efx->mac_lock);
 +      rc = __efx_reconfigure_port(efx);
 +      mutex_unlock(&efx->mac_lock);
 +
 +      return rc;
 +}
 +
 +/* Asynchronous work item for changing MAC promiscuity and multicast
 + * hash.  Avoid a drain/rx_ingress enable by reconfiguring the current
 + * MAC directly. */
 +static void efx_mac_work(struct work_struct *data)
 +{
 +      struct efx_nic *efx = container_of(data, struct efx_nic, mac_work);
 +
 +      mutex_lock(&efx->mac_lock);
 +      if (efx->port_enabled) {
 +              efx->type->push_multicast_hash(efx);
 +              efx->mac_op->reconfigure(efx);
 +      }
 +      mutex_unlock(&efx->mac_lock);
 +}
 +
 +static int efx_probe_port(struct efx_nic *efx)
 +{
 +      unsigned char *perm_addr;
 +      int rc;
 +
 +      netif_dbg(efx, probe, efx->net_dev, "create port\n");
 +
 +      if (phy_flash_cfg)
 +              efx->phy_mode = PHY_MODE_SPECIAL;
 +
 +      /* Connect up MAC/PHY operations table */
 +      rc = efx->type->probe_port(efx);
 +      if (rc)
 +              return rc;
 +
 +      /* Sanity check MAC address */
 +      perm_addr = efx->net_dev->perm_addr;
 +      if (is_valid_ether_addr(perm_addr)) {
 +              memcpy(efx->net_dev->dev_addr, perm_addr, ETH_ALEN);
 +      } else {
 +              netif_err(efx, probe, efx->net_dev, "invalid MAC address %pM\n",
 +                        perm_addr);
 +              if (!allow_bad_hwaddr) {
 +                      rc = -EINVAL;
 +                      goto err;
 +              }
 +              random_ether_addr(efx->net_dev->dev_addr);
 +              netif_info(efx, probe, efx->net_dev,
 +                         "using locally-generated MAC %pM\n",
 +                         efx->net_dev->dev_addr);
 +      }
 +
 +      return 0;
 +
 + err:
 +      efx->type->remove_port(efx);
 +      return rc;
 +}
 +
 +static int efx_init_port(struct efx_nic *efx)
 +{
 +      int rc;
 +
 +      netif_dbg(efx, drv, efx->net_dev, "init port\n");
 +
 +      mutex_lock(&efx->mac_lock);
 +
 +      rc = efx->phy_op->init(efx);
 +      if (rc)
 +              goto fail1;
 +
 +      efx->port_initialized = true;
 +
 +      /* Reconfigure the MAC before creating dma queues (required for
 +       * Falcon/A1 where RX_INGR_EN/TX_DRAIN_EN isn't supported) */
 +      efx->mac_op->reconfigure(efx);
 +
 +      /* Ensure the PHY advertises the correct flow control settings */
 +      rc = efx->phy_op->reconfigure(efx);
 +      if (rc)
 +              goto fail2;
 +
 +      mutex_unlock(&efx->mac_lock);
 +      return 0;
 +
 +fail2:
 +      efx->phy_op->fini(efx);
 +fail1:
 +      mutex_unlock(&efx->mac_lock);
 +      return rc;
 +}
 +
 +static void efx_start_port(struct efx_nic *efx)
 +{
 +      netif_dbg(efx, ifup, efx->net_dev, "start port\n");
 +      BUG_ON(efx->port_enabled);
 +
 +      mutex_lock(&efx->mac_lock);
 +      efx->port_enabled = true;
 +
 +      /* efx_mac_work() might have been scheduled after efx_stop_port(),
 +       * and then cancelled by efx_flush_all() */
 +      efx->type->push_multicast_hash(efx);
 +      efx->mac_op->reconfigure(efx);
 +
 +      mutex_unlock(&efx->mac_lock);
 +}
 +
 +/* Prevent efx_mac_work() and efx_monitor() from working */
 +static void efx_stop_port(struct efx_nic *efx)
 +{
 +      netif_dbg(efx, ifdown, efx->net_dev, "stop port\n");
 +
 +      mutex_lock(&efx->mac_lock);
 +      efx->port_enabled = false;
 +      mutex_unlock(&efx->mac_lock);
 +
 +      /* Serialise against efx_set_multicast_list() */
 +      if (efx_dev_registered(efx)) {
 +              netif_addr_lock_bh(efx->net_dev);
 +              netif_addr_unlock_bh(efx->net_dev);
 +      }
 +}
 +
 +static void efx_fini_port(struct efx_nic *efx)
 +{
 +      netif_dbg(efx, drv, efx->net_dev, "shut down port\n");
 +
 +      if (!efx->port_initialized)
 +              return;
 +
 +      efx->phy_op->fini(efx);
 +      efx->port_initialized = false;
 +
 +      efx->link_state.up = false;
 +      efx_link_status_changed(efx);
 +}
 +
 +static void efx_remove_port(struct efx_nic *efx)
 +{
 +      netif_dbg(efx, drv, efx->net_dev, "destroying port\n");
 +
 +      efx->type->remove_port(efx);
 +}
 +
 +/**************************************************************************
 + *
 + * NIC handling
 + *
 + **************************************************************************/
 +
 +/* This configures the PCI device to enable I/O and DMA. */
 +static int efx_init_io(struct efx_nic *efx)
 +{
 +      struct pci_dev *pci_dev = efx->pci_dev;
 +      dma_addr_t dma_mask = efx->type->max_dma_mask;
-       /* bug22643: If SR-IOV is enabled then tx push over a write combined
-        * mapping is unsafe. We need to disable write combining in this case.
-        * MSI is unsupported when SR-IOV is enabled, and the firmware will
-        * have removed the MSI capability. So write combining is safe if
-        * there is an MSI capability.
-        */
-       use_wc = (!EFX_WORKAROUND_22643(efx) ||
-                 pci_find_capability(pci_dev, PCI_CAP_ID_MSI));
-       if (use_wc)
-               efx->membase = ioremap_wc(efx->membase_phys,
-                                         efx->type->mem_map_size);
-       else
-               efx->membase = ioremap_nocache(efx->membase_phys,
-                                              efx->type->mem_map_size);
 +      int rc;
 +
 +      netif_dbg(efx, probe, efx->net_dev, "initialising I/O\n");
 +
 +      rc = pci_enable_device(pci_dev);
 +      if (rc) {
 +              netif_err(efx, probe, efx->net_dev,
 +                        "failed to enable PCI device\n");
 +              goto fail1;
 +      }
 +
 +      pci_set_master(pci_dev);
 +
 +      /* Set the PCI DMA mask.  Try all possibilities from our
 +       * genuine mask down to 32 bits, because some architectures
 +       * (e.g. x86_64 with iommu_sac_force set) will allow 40 bit
 +       * masks event though they reject 46 bit masks.
 +       */
 +      while (dma_mask > 0x7fffffffUL) {
 +              if (pci_dma_supported(pci_dev, dma_mask) &&
 +                  ((rc = pci_set_dma_mask(pci_dev, dma_mask)) == 0))
 +                      break;
 +              dma_mask >>= 1;
 +      }
 +      if (rc) {
 +              netif_err(efx, probe, efx->net_dev,
 +                        "could not find a suitable DMA mask\n");
 +              goto fail2;
 +      }
 +      netif_dbg(efx, probe, efx->net_dev,
 +                "using DMA mask %llx\n", (unsigned long long) dma_mask);
 +      rc = pci_set_consistent_dma_mask(pci_dev, dma_mask);
 +      if (rc) {
 +              /* pci_set_consistent_dma_mask() is not *allowed* to
 +               * fail with a mask that pci_set_dma_mask() accepted,
 +               * but just in case...
 +               */
 +              netif_err(efx, probe, efx->net_dev,
 +                        "failed to set consistent DMA mask\n");
 +              goto fail2;
 +      }
 +
 +      efx->membase_phys = pci_resource_start(efx->pci_dev, EFX_MEM_BAR);
 +      rc = pci_request_region(pci_dev, EFX_MEM_BAR, "sfc");
 +      if (rc) {
 +              netif_err(efx, probe, efx->net_dev,
 +                        "request for memory BAR failed\n");
 +              rc = -EIO;
 +              goto fail3;
 +      }
++      efx->membase = ioremap_nocache(efx->membase_phys,
++                                     efx->type->mem_map_size);
 +      if (!efx->membase) {
 +              netif_err(efx, probe, efx->net_dev,
 +                        "could not map memory BAR at %llx+%x\n",
 +                        (unsigned long long)efx->membase_phys,
 +                        efx->type->mem_map_size);
 +              rc = -ENOMEM;
 +              goto fail4;
 +      }
 +      netif_dbg(efx, probe, efx->net_dev,
 +                "memory BAR at %llx+%x (virtual %p)\n",
 +                (unsigned long long)efx->membase_phys,
 +                efx->type->mem_map_size, efx->membase);
 +
 +      return 0;
 +
 + fail4:
 +      pci_release_region(efx->pci_dev, EFX_MEM_BAR);
 + fail3:
 +      efx->membase_phys = 0;
 + fail2:
 +      pci_disable_device(efx->pci_dev);
 + fail1:
 +      return rc;
 +}
 +
 +static void efx_fini_io(struct efx_nic *efx)
 +{
 +      netif_dbg(efx, drv, efx->net_dev, "shutting down I/O\n");
 +
 +      if (efx->membase) {
 +              iounmap(efx->membase);
 +              efx->membase = NULL;
 +      }
 +
 +      if (efx->membase_phys) {
 +              pci_release_region(efx->pci_dev, EFX_MEM_BAR);
 +              efx->membase_phys = 0;
 +      }
 +
 +      pci_disable_device(efx->pci_dev);
 +}
 +
 +/* Get number of channels wanted.  Each channel will have its own IRQ,
 + * 1 RX queue and/or 2 TX queues. */
 +static int efx_wanted_channels(void)
 +{
 +      cpumask_var_t core_mask;
 +      int count;
 +      int cpu;
 +
 +      if (rss_cpus)
 +              return rss_cpus;
 +
 +      if (unlikely(!zalloc_cpumask_var(&core_mask, GFP_KERNEL))) {
 +              printk(KERN_WARNING
 +                     "sfc: RSS disabled due to allocation failure\n");
 +              return 1;
 +      }
 +
 +      count = 0;
 +      for_each_online_cpu(cpu) {
 +              if (!cpumask_test_cpu(cpu, core_mask)) {
 +                      ++count;
 +                      cpumask_or(core_mask, core_mask,
 +                                 topology_core_cpumask(cpu));
 +              }
 +      }
 +
 +      free_cpumask_var(core_mask);
 +      return count;
 +}
 +
 +static int
 +efx_init_rx_cpu_rmap(struct efx_nic *efx, struct msix_entry *xentries)
 +{
 +#ifdef CONFIG_RFS_ACCEL
 +      int i, rc;
 +
 +      efx->net_dev->rx_cpu_rmap = alloc_irq_cpu_rmap(efx->n_rx_channels);
 +      if (!efx->net_dev->rx_cpu_rmap)
 +              return -ENOMEM;
 +      for (i = 0; i < efx->n_rx_channels; i++) {
 +              rc = irq_cpu_rmap_add(efx->net_dev->rx_cpu_rmap,
 +                                    xentries[i].vector);
 +              if (rc) {
 +                      free_irq_cpu_rmap(efx->net_dev->rx_cpu_rmap);
 +                      efx->net_dev->rx_cpu_rmap = NULL;
 +                      return rc;
 +              }
 +      }
 +#endif
 +      return 0;
 +}
 +
 +/* Probe the number and type of interrupts we are able to obtain, and
 + * the resulting numbers of channels and RX queues.
 + */
 +static int efx_probe_interrupts(struct efx_nic *efx)
 +{
 +      int max_channels =
 +              min_t(int, efx->type->phys_addr_channels, EFX_MAX_CHANNELS);
 +      int rc, i;
 +
 +      if (efx->interrupt_mode == EFX_INT_MODE_MSIX) {
 +              struct msix_entry xentries[EFX_MAX_CHANNELS];
 +              int n_channels;
 +
 +              n_channels = efx_wanted_channels();
 +              if (separate_tx_channels)
 +                      n_channels *= 2;
 +              n_channels = min(n_channels, max_channels);
 +
 +              for (i = 0; i < n_channels; i++)
 +                      xentries[i].entry = i;
 +              rc = pci_enable_msix(efx->pci_dev, xentries, n_channels);
 +              if (rc > 0) {
 +                      netif_err(efx, drv, efx->net_dev,
 +                                "WARNING: Insufficient MSI-X vectors"
 +                                " available (%d < %d).\n", rc, n_channels);
 +                      netif_err(efx, drv, efx->net_dev,
 +                                "WARNING: Performance may be reduced.\n");
 +                      EFX_BUG_ON_PARANOID(rc >= n_channels);
 +                      n_channels = rc;
 +                      rc = pci_enable_msix(efx->pci_dev, xentries,
 +                                           n_channels);
 +              }
 +
 +              if (rc == 0) {
 +                      efx->n_channels = n_channels;
 +                      if (separate_tx_channels) {
 +                              efx->n_tx_channels =
 +                                      max(efx->n_channels / 2, 1U);
 +                              efx->n_rx_channels =
 +                                      max(efx->n_channels -
 +                                          efx->n_tx_channels, 1U);
 +                      } else {
 +                              efx->n_tx_channels = efx->n_channels;
 +                              efx->n_rx_channels = efx->n_channels;
 +                      }
 +                      rc = efx_init_rx_cpu_rmap(efx, xentries);
 +                      if (rc) {
 +                              pci_disable_msix(efx->pci_dev);
 +                              return rc;
 +                      }
 +                      for (i = 0; i < n_channels; i++)
 +                              efx_get_channel(efx, i)->irq =
 +                                      xentries[i].vector;
 +              } else {
 +                      /* Fall back to single channel MSI */
 +                      efx->interrupt_mode = EFX_INT_MODE_MSI;
 +                      netif_err(efx, drv, efx->net_dev,
 +                                "could not enable MSI-X\n");
 +              }
 +      }
 +
 +      /* Try single interrupt MSI */
 +      if (efx->interrupt_mode == EFX_INT_MODE_MSI) {
 +              efx->n_channels = 1;
 +              efx->n_rx_channels = 1;
 +              efx->n_tx_channels = 1;
 +              rc = pci_enable_msi(efx->pci_dev);
 +              if (rc == 0) {
 +                      efx_get_channel(efx, 0)->irq = efx->pci_dev->irq;
 +              } else {
 +                      netif_err(efx, drv, efx->net_dev,
 +                                "could not enable MSI\n");
 +                      efx->interrupt_mode = EFX_INT_MODE_LEGACY;
 +              }
 +      }
 +
 +      /* Assume legacy interrupts */
 +      if (efx->interrupt_mode == EFX_INT_MODE_LEGACY) {
 +              efx->n_channels = 1 + (separate_tx_channels ? 1 : 0);
 +              efx->n_rx_channels = 1;
 +              efx->n_tx_channels = 1;
 +              efx->legacy_irq = efx->pci_dev->irq;
 +      }
 +
 +      return 0;
 +}
 +
 +static void efx_remove_interrupts(struct efx_nic *efx)
 +{
 +      struct efx_channel *channel;
 +
 +      /* Remove MSI/MSI-X interrupts */
 +      efx_for_each_channel(channel, efx)
 +              channel->irq = 0;
 +      pci_disable_msi(efx->pci_dev);
 +      pci_disable_msix(efx->pci_dev);
 +
 +      /* Remove legacy interrupt */
 +      efx->legacy_irq = 0;
 +}
 +
 +static void efx_set_channels(struct efx_nic *efx)
 +{
 +      struct efx_channel *channel;
 +      struct efx_tx_queue *tx_queue;
 +
 +      efx->tx_channel_offset =
 +              separate_tx_channels ? efx->n_channels - efx->n_tx_channels : 0;
 +
 +      /* We need to adjust the TX queue numbers if we have separate
 +       * RX-only and TX-only channels.
 +       */
 +      efx_for_each_channel(channel, efx) {
 +              efx_for_each_channel_tx_queue(tx_queue, channel)
 +                      tx_queue->queue -= (efx->tx_channel_offset *
 +                                          EFX_TXQ_TYPES);
 +      }
 +}
 +
 +static int efx_probe_nic(struct efx_nic *efx)
 +{
 +      size_t i;
 +      int rc;
 +
 +      netif_dbg(efx, probe, efx->net_dev, "creating NIC\n");
 +
 +      /* Carry out hardware-type specific initialisation */
 +      rc = efx->type->probe(efx);
 +      if (rc)
 +              return rc;
 +
 +      /* Determine the number of channels and queues by trying to hook
 +       * in MSI-X interrupts. */
 +      rc = efx_probe_interrupts(efx);
 +      if (rc)
 +              goto fail;
 +
 +      if (efx->n_channels > 1)
 +              get_random_bytes(&efx->rx_hash_key, sizeof(efx->rx_hash_key));
 +      for (i = 0; i < ARRAY_SIZE(efx->rx_indir_table); i++)
 +              efx->rx_indir_table[i] = i % efx->n_rx_channels;
 +
 +      efx_set_channels(efx);
 +      netif_set_real_num_tx_queues(efx->net_dev, efx->n_tx_channels);
 +      netif_set_real_num_rx_queues(efx->net_dev, efx->n_rx_channels);
 +
 +      /* Initialise the interrupt moderation settings */
 +      efx_init_irq_moderation(efx, tx_irq_mod_usec, rx_irq_mod_usec, true,
 +                              true);
 +
 +      return 0;
 +
 +fail:
 +      efx->type->remove(efx);
 +      return rc;
 +}
 +
 +static void efx_remove_nic(struct efx_nic *efx)
 +{
 +      netif_dbg(efx, drv, efx->net_dev, "destroying NIC\n");
 +
 +      efx_remove_interrupts(efx);
 +      efx->type->remove(efx);
 +}
 +
 +/**************************************************************************
 + *
 + * NIC startup/shutdown
 + *
 + *************************************************************************/
 +
 +static int efx_probe_all(struct efx_nic *efx)
 +{
 +      int rc;
 +
 +      rc = efx_probe_nic(efx);
 +      if (rc) {
 +              netif_err(efx, probe, efx->net_dev, "failed to create NIC\n");
 +              goto fail1;
 +      }
 +
 +      rc = efx_probe_port(efx);
 +      if (rc) {
 +              netif_err(efx, probe, efx->net_dev, "failed to create port\n");
 +              goto fail2;
 +      }
 +
 +      efx->rxq_entries = efx->txq_entries = EFX_DEFAULT_DMAQ_SIZE;
 +      rc = efx_probe_channels(efx);
 +      if (rc)
 +              goto fail3;
 +
 +      rc = efx_probe_filters(efx);
 +      if (rc) {
 +              netif_err(efx, probe, efx->net_dev,
 +                        "failed to create filter tables\n");
 +              goto fail4;
 +      }
 +
 +      return 0;
 +
 + fail4:
 +      efx_remove_channels(efx);
 + fail3:
 +      efx_remove_port(efx);
 + fail2:
 +      efx_remove_nic(efx);
 + fail1:
 +      return rc;
 +}
 +
 +/* Called after previous invocation(s) of efx_stop_all, restarts the
 + * port, kernel transmit queue, NAPI processing and hardware interrupts,
 + * and ensures that the port is scheduled to be reconfigured.
 + * This function is safe to call multiple times when the NIC is in any
 + * state. */
 +static void efx_start_all(struct efx_nic *efx)
 +{
 +      struct efx_channel *channel;
 +
 +      EFX_ASSERT_RESET_SERIALISED(efx);
 +
 +      /* Check that it is appropriate to restart the interface. All
 +       * of these flags are safe to read under just the rtnl lock */
 +      if (efx->port_enabled)
 +              return;
 +      if ((efx->state != STATE_RUNNING) && (efx->state != STATE_INIT))
 +              return;
 +      if (efx_dev_registered(efx) && !netif_running(efx->net_dev))
 +              return;
 +
 +      /* Mark the port as enabled so port reconfigurations can start, then
 +       * restart the transmit interface early so the watchdog timer stops */
 +      efx_start_port(efx);
 +
 +      if (efx_dev_registered(efx) && netif_device_present(efx->net_dev))
 +              netif_tx_wake_all_queues(efx->net_dev);
 +
 +      efx_for_each_channel(channel, efx)
 +              efx_start_channel(channel);
 +
 +      if (efx->legacy_irq)
 +              efx->legacy_irq_enabled = true;
 +      efx_nic_enable_interrupts(efx);
 +
 +      /* Switch to event based MCDI completions after enabling interrupts.
 +       * If a reset has been scheduled, then we need to stay in polled mode.
 +       * Rather than serialising efx_mcdi_mode_event() [which sleeps] and
 +       * reset_pending [modified from an atomic context], we instead guarantee
 +       * that efx_mcdi_mode_poll() isn't reverted erroneously */
 +      efx_mcdi_mode_event(efx);
 +      if (efx->reset_pending)
 +              efx_mcdi_mode_poll(efx);
 +
 +      /* Start the hardware monitor if there is one. Otherwise (we're link
 +       * event driven), we have to poll the PHY because after an event queue
 +       * flush, we could have a missed a link state change */
 +      if (efx->type->monitor != NULL) {
 +              queue_delayed_work(efx->workqueue, &efx->monitor_work,
 +                                 efx_monitor_interval);
 +      } else {
 +              mutex_lock(&efx->mac_lock);
 +              if (efx->phy_op->poll(efx))
 +                      efx_link_status_changed(efx);
 +              mutex_unlock(&efx->mac_lock);
 +      }
 +
 +      efx->type->start_stats(efx);
 +}
 +
 +/* Flush all delayed work. Should only be called when no more delayed work
 + * will be scheduled. This doesn't flush pending online resets (efx_reset),
 + * since we're holding the rtnl_lock at this point. */
 +static void efx_flush_all(struct efx_nic *efx)
 +{
 +      /* Make sure the hardware monitor is stopped */
 +      cancel_delayed_work_sync(&efx->monitor_work);
 +      /* Stop scheduled port reconfigurations */
 +      cancel_work_sync(&efx->mac_work);
 +}
 +
 +/* Quiesce hardware and software without bringing the link down.
 + * Safe to call multiple times, when the nic and interface is in any
 + * state. The caller is guaranteed to subsequently be in a position
 + * to modify any hardware and software state they see fit without
 + * taking locks. */
 +static void efx_stop_all(struct efx_nic *efx)
 +{
 +      struct efx_channel *channel;
 +
 +      EFX_ASSERT_RESET_SERIALISED(efx);
 +
 +      /* port_enabled can be read safely under the rtnl lock */
 +      if (!efx->port_enabled)
 +              return;
 +
 +      efx->type->stop_stats(efx);
 +
 +      /* Switch to MCDI polling on Siena before disabling interrupts */
 +      efx_mcdi_mode_poll(efx);
 +
 +      /* Disable interrupts and wait for ISR to complete */
 +      efx_nic_disable_interrupts(efx);
 +      if (efx->legacy_irq) {
 +              synchronize_irq(efx->legacy_irq);
 +              efx->legacy_irq_enabled = false;
 +      }
 +      efx_for_each_channel(channel, efx) {
 +              if (channel->irq)
 +                      synchronize_irq(channel->irq);
 +      }
 +
 +      /* Stop all NAPI processing and synchronous rx refills */
 +      efx_for_each_channel(channel, efx)
 +              efx_stop_channel(channel);
 +
 +      /* Stop all asynchronous port reconfigurations. Since all
 +       * event processing has already been stopped, there is no
 +       * window to loose phy events */
 +      efx_stop_port(efx);
 +
 +      /* Flush efx_mac_work(), refill_workqueue, monitor_work */
 +      efx_flush_all(efx);
 +
 +      /* Stop the kernel transmit interface late, so the watchdog
 +       * timer isn't ticking over the flush */
 +      if (efx_dev_registered(efx)) {
 +              netif_tx_stop_all_queues(efx->net_dev);
 +              netif_tx_lock_bh(efx->net_dev);
 +              netif_tx_unlock_bh(efx->net_dev);
 +      }
 +}
 +
 +static void efx_remove_all(struct efx_nic *efx)
 +{
 +      efx_remove_filters(efx);
 +      efx_remove_channels(efx);
 +      efx_remove_port(efx);
 +      efx_remove_nic(efx);
 +}
 +
 +/**************************************************************************
 + *
 + * Interrupt moderation
 + *
 + **************************************************************************/
 +
 +static unsigned int irq_mod_ticks(unsigned int usecs, unsigned int resolution)
 +{
 +      if (usecs == 0)
 +              return 0;
 +      if (usecs < resolution)
 +              return 1; /* never round down to 0 */
 +      return usecs / resolution;
 +}
 +
 +/* Set interrupt moderation parameters */
 +int efx_init_irq_moderation(struct efx_nic *efx, unsigned int tx_usecs,
 +                          unsigned int rx_usecs, bool rx_adaptive,
 +                          bool rx_may_override_tx)
 +{
 +      struct efx_channel *channel;
 +      unsigned tx_ticks = irq_mod_ticks(tx_usecs, EFX_IRQ_MOD_RESOLUTION);
 +      unsigned rx_ticks = irq_mod_ticks(rx_usecs, EFX_IRQ_MOD_RESOLUTION);
 +
 +      EFX_ASSERT_RESET_SERIALISED(efx);
 +
 +      if (tx_ticks > EFX_IRQ_MOD_MAX || rx_ticks > EFX_IRQ_MOD_MAX)
 +              return -EINVAL;
 +
 +      if (tx_ticks != rx_ticks && efx->tx_channel_offset == 0 &&
 +          !rx_may_override_tx) {
 +              netif_err(efx, drv, efx->net_dev, "Channels are shared. "
 +                        "RX and TX IRQ moderation must be equal\n");
 +              return -EINVAL;
 +      }
 +
 +      efx->irq_rx_adaptive = rx_adaptive;
 +      efx->irq_rx_moderation = rx_ticks;
 +      efx_for_each_channel(channel, efx) {
 +              if (efx_channel_has_rx_queue(channel))
 +                      channel->irq_moderation = rx_ticks;
 +              else if (efx_channel_has_tx_queues(channel))
 +                      channel->irq_moderation = tx_ticks;
 +      }
 +
 +      return 0;
 +}
 +
 +void efx_get_irq_moderation(struct efx_nic *efx, unsigned int *tx_usecs,
 +                          unsigned int *rx_usecs, bool *rx_adaptive)
 +{
 +      *rx_adaptive = efx->irq_rx_adaptive;
 +      *rx_usecs = efx->irq_rx_moderation * EFX_IRQ_MOD_RESOLUTION;
 +
 +      /* If channels are shared between RX and TX, so is IRQ
 +       * moderation.  Otherwise, IRQ moderation is the same for all
 +       * TX channels and is not adaptive.
 +       */
 +      if (efx->tx_channel_offset == 0)
 +              *tx_usecs = *rx_usecs;
 +      else
 +              *tx_usecs =
 +                      efx->channel[efx->tx_channel_offset]->irq_moderation *
 +                      EFX_IRQ_MOD_RESOLUTION;
 +}
 +
 +/**************************************************************************
 + *
 + * Hardware monitor
 + *
 + **************************************************************************/
 +
 +/* Run periodically off the general workqueue */
 +static void efx_monitor(struct work_struct *data)
 +{
 +      struct efx_nic *efx = container_of(data, struct efx_nic,
 +                                         monitor_work.work);
 +
 +      netif_vdbg(efx, timer, efx->net_dev,
 +                 "hardware monitor executing on CPU %d\n",
 +                 raw_smp_processor_id());
 +      BUG_ON(efx->type->monitor == NULL);
 +
 +      /* If the mac_lock is already held then it is likely a port
 +       * reconfiguration is already in place, which will likely do
 +       * most of the work of monitor() anyway. */
 +      if (mutex_trylock(&efx->mac_lock)) {
 +              if (efx->port_enabled)
 +                      efx->type->monitor(efx);
 +              mutex_unlock(&efx->mac_lock);
 +      }
 +
 +      queue_delayed_work(efx->workqueue, &efx->monitor_work,
 +                         efx_monitor_interval);
 +}
 +
 +/**************************************************************************
 + *
 + * ioctls
 + *
 + *************************************************************************/
 +
 +/* Net device ioctl
 + * Context: process, rtnl_lock() held.
 + */
 +static int efx_ioctl(struct net_device *net_dev, struct ifreq *ifr, int cmd)
 +{
 +      struct efx_nic *efx = netdev_priv(net_dev);
 +      struct mii_ioctl_data *data = if_mii(ifr);
 +
 +      EFX_ASSERT_RESET_SERIALISED(efx);
 +
 +      /* Convert phy_id from older PRTAD/DEVAD format */
 +      if ((cmd == SIOCGMIIREG || cmd == SIOCSMIIREG) &&
 +          (data->phy_id & 0xfc00) == 0x0400)
 +              data->phy_id ^= MDIO_PHY_ID_C45 | 0x0400;
 +
 +      return mdio_mii_ioctl(&efx->mdio, data, cmd);
 +}
 +
 +/**************************************************************************
 + *
 + * NAPI interface
 + *
 + **************************************************************************/
 +
 +static void efx_init_napi(struct efx_nic *efx)
 +{
 +      struct efx_channel *channel;
 +
 +      efx_for_each_channel(channel, efx) {
 +              channel->napi_dev = efx->net_dev;
 +              netif_napi_add(channel->napi_dev, &channel->napi_str,
 +                             efx_poll, napi_weight);
 +      }
 +}
 +
 +static void efx_fini_napi_channel(struct efx_channel *channel)
 +{
 +      if (channel->napi_dev)
 +              netif_napi_del(&channel->napi_str);
 +      channel->napi_dev = NULL;
 +}
 +
 +static void efx_fini_napi(struct efx_nic *efx)
 +{
 +      struct efx_channel *channel;
 +
 +      efx_for_each_channel(channel, efx)
 +              efx_fini_napi_channel(channel);
 +}
 +
 +/**************************************************************************
 + *
 + * Kernel netpoll interface
 + *
 + *************************************************************************/
 +
 +#ifdef CONFIG_NET_POLL_CONTROLLER
 +
 +/* Although in the common case interrupts will be disabled, this is not
 + * guaranteed. However, all our work happens inside the NAPI callback,
 + * so no locking is required.
 + */
 +static void efx_netpoll(struct net_device *net_dev)
 +{
 +      struct efx_nic *efx = netdev_priv(net_dev);
 +      struct efx_channel *channel;
 +
 +      efx_for_each_channel(channel, efx)
 +              efx_schedule_channel(channel);
 +}
 +
 +#endif
 +
 +/**************************************************************************
 + *
 + * Kernel net device interface
 + *
 + *************************************************************************/
 +
 +/* Context: process, rtnl_lock() held. */
 +static int efx_net_open(struct net_device *net_dev)
 +{
 +      struct efx_nic *efx = netdev_priv(net_dev);
 +      EFX_ASSERT_RESET_SERIALISED(efx);
 +
 +      netif_dbg(efx, ifup, efx->net_dev, "opening device on CPU %d\n",
 +                raw_smp_processor_id());
 +
 +      if (efx->state == STATE_DISABLED)
 +              return -EIO;
 +      if (efx->phy_mode & PHY_MODE_SPECIAL)
 +              return -EBUSY;
 +      if (efx_mcdi_poll_reboot(efx) && efx_reset(efx, RESET_TYPE_ALL))
 +              return -EIO;
 +
 +      /* Notify the kernel of the link state polled during driver load,
 +       * before the monitor starts running */
 +      efx_link_status_changed(efx);
 +
 +      efx_start_all(efx);
 +      return 0;
 +}
 +
 +/* Context: process, rtnl_lock() held.
 + * Note that the kernel will ignore our return code; this method
 + * should really be a void.
 + */
 +static int efx_net_stop(struct net_device *net_dev)
 +{
 +      struct efx_nic *efx = netdev_priv(net_dev);
 +
 +      netif_dbg(efx, ifdown, efx->net_dev, "closing on CPU %d\n",
 +                raw_smp_processor_id());
 +
 +      if (efx->state != STATE_DISABLED) {
 +              /* Stop the device and flush all the channels */
 +              efx_stop_all(efx);
 +              efx_fini_channels(efx);
 +              efx_init_channels(efx);
 +      }
 +
 +      return 0;
 +}
 +
 +/* Context: process, dev_base_lock or RTNL held, non-blocking. */
 +static struct rtnl_link_stats64 *efx_net_stats(struct net_device *net_dev, struct rtnl_link_stats64 *stats)
 +{
 +      struct efx_nic *efx = netdev_priv(net_dev);
 +      struct efx_mac_stats *mac_stats = &efx->mac_stats;
 +
 +      spin_lock_bh(&efx->stats_lock);
 +      efx->type->update_stats(efx);
 +      spin_unlock_bh(&efx->stats_lock);
 +
 +      stats->rx_packets = mac_stats->rx_packets;
 +      stats->tx_packets = mac_stats->tx_packets;
 +      stats->rx_bytes = mac_stats->rx_bytes;
 +      stats->tx_bytes = mac_stats->tx_bytes;
 +      stats->rx_dropped = efx->n_rx_nodesc_drop_cnt;
 +      stats->multicast = mac_stats->rx_multicast;
 +      stats->collisions = mac_stats->tx_collision;
 +      stats->rx_length_errors = (mac_stats->rx_gtjumbo +
 +                                 mac_stats->rx_length_error);
 +      stats->rx_crc_errors = mac_stats->rx_bad;
 +      stats->rx_frame_errors = mac_stats->rx_align_error;
 +      stats->rx_fifo_errors = mac_stats->rx_overflow;
 +      stats->rx_missed_errors = mac_stats->rx_missed;
 +      stats->tx_window_errors = mac_stats->tx_late_collision;
 +
 +      stats->rx_errors = (stats->rx_length_errors +
 +                          stats->rx_crc_errors +
 +                          stats->rx_frame_errors +
 +                          mac_stats->rx_symbol_error);
 +      stats->tx_errors = (stats->tx_window_errors +
 +                          mac_stats->tx_bad);
 +
 +      return stats;
 +}
 +
 +/* Context: netif_tx_lock held, BHs disabled. */
 +static void efx_watchdog(struct net_device *net_dev)
 +{
 +      struct efx_nic *efx = netdev_priv(net_dev);
 +
 +      netif_err(efx, tx_err, efx->net_dev,
 +                "TX stuck with port_enabled=%d: resetting channels\n",
 +                efx->port_enabled);
 +
 +      efx_schedule_reset(efx, RESET_TYPE_TX_WATCHDOG);
 +}
 +
 +
 +/* Context: process, rtnl_lock() held. */
 +static int efx_change_mtu(struct net_device *net_dev, int new_mtu)
 +{
 +      struct efx_nic *efx = netdev_priv(net_dev);
 +      int rc = 0;
 +
 +      EFX_ASSERT_RESET_SERIALISED(efx);
 +
 +      if (new_mtu > EFX_MAX_MTU)
 +              return -EINVAL;
 +
 +      efx_stop_all(efx);
 +
 +      netif_dbg(efx, drv, efx->net_dev, "changing MTU to %d\n", new_mtu);
 +
 +      efx_fini_channels(efx);
 +
 +      mutex_lock(&efx->mac_lock);
 +      /* Reconfigure the MAC before enabling the dma queues so that
 +       * the RX buffers don't overflow */
 +      net_dev->mtu = new_mtu;
 +      efx->mac_op->reconfigure(efx);
 +      mutex_unlock(&efx->mac_lock);
 +
 +      efx_init_channels(efx);
 +
 +      efx_start_all(efx);
 +      return rc;
 +}
 +
 +static int efx_set_mac_address(struct net_device *net_dev, void *data)
 +{
 +      struct efx_nic *efx = netdev_priv(net_dev);
 +      struct sockaddr *addr = data;
 +      char *new_addr = addr->sa_data;
 +
 +      EFX_ASSERT_RESET_SERIALISED(efx);
 +
 +      if (!is_valid_ether_addr(new_addr)) {
 +              netif_err(efx, drv, efx->net_dev,
 +                        "invalid ethernet MAC address requested: %pM\n",
 +                        new_addr);
 +              return -EINVAL;
 +      }
 +
 +      memcpy(net_dev->dev_addr, new_addr, net_dev->addr_len);
 +
 +      /* Reconfigure the MAC */
 +      mutex_lock(&efx->mac_lock);
 +      efx->mac_op->reconfigure(efx);
 +      mutex_unlock(&efx->mac_lock);
 +
 +      return 0;
 +}
 +
 +/* Context: netif_addr_lock held, BHs disabled. */
 +static void efx_set_multicast_list(struct net_device *net_dev)
 +{
 +      struct efx_nic *efx = netdev_priv(net_dev);
 +      struct netdev_hw_addr *ha;
 +      union efx_multicast_hash *mc_hash = &efx->multicast_hash;
 +      u32 crc;
 +      int bit;
 +
 +      efx->promiscuous = !!(net_dev->flags & IFF_PROMISC);
 +
 +      /* Build multicast hash table */
 +      if (efx->promiscuous || (net_dev->flags & IFF_ALLMULTI)) {
 +              memset(mc_hash, 0xff, sizeof(*mc_hash));
 +      } else {
 +              memset(mc_hash, 0x00, sizeof(*mc_hash));
 +              netdev_for_each_mc_addr(ha, net_dev) {
 +                      crc = ether_crc_le(ETH_ALEN, ha->addr);
 +                      bit = crc & (EFX_MCAST_HASH_ENTRIES - 1);
 +                      set_bit_le(bit, mc_hash->byte);
 +              }
 +
 +              /* Broadcast packets go through the multicast hash filter.
 +               * ether_crc_le() of the broadcast address is 0xbe2612ff
 +               * so we always add bit 0xff to the mask.
 +               */
 +              set_bit_le(0xff, mc_hash->byte);
 +      }
 +
 +      if (efx->port_enabled)
 +              queue_work(efx->workqueue, &efx->mac_work);
 +      /* Otherwise efx_start_port() will do this */
 +}
 +
 +static int efx_set_features(struct net_device *net_dev, u32 data)
 +{
 +      struct efx_nic *efx = netdev_priv(net_dev);
 +
 +      /* If disabling RX n-tuple filtering, clear existing filters */
 +      if (net_dev->features & ~data & NETIF_F_NTUPLE)
 +              efx_filter_clear_rx(efx, EFX_FILTER_PRI_MANUAL);
 +
 +      return 0;
 +}
 +
 +static const struct net_device_ops efx_netdev_ops = {
 +      .ndo_open               = efx_net_open,
 +      .ndo_stop               = efx_net_stop,
 +      .ndo_get_stats64        = efx_net_stats,
 +      .ndo_tx_timeout         = efx_watchdog,
 +      .ndo_start_xmit         = efx_hard_start_xmit,
 +      .ndo_validate_addr      = eth_validate_addr,
 +      .ndo_do_ioctl           = efx_ioctl,
 +      .ndo_change_mtu         = efx_change_mtu,
 +      .ndo_set_mac_address    = efx_set_mac_address,
 +      .ndo_set_rx_mode        = efx_set_multicast_list,
 +      .ndo_set_features       = efx_set_features,
 +#ifdef CONFIG_NET_POLL_CONTROLLER
 +      .ndo_poll_controller = efx_netpoll,
 +#endif
 +      .ndo_setup_tc           = efx_setup_tc,
 +#ifdef CONFIG_RFS_ACCEL
 +      .ndo_rx_flow_steer      = efx_filter_rfs,
 +#endif
 +};
 +
 +static void efx_update_name(struct efx_nic *efx)
 +{
 +      strcpy(efx->name, efx->net_dev->name);
 +      efx_mtd_rename(efx);
 +      efx_set_channel_names(efx);
 +}
 +
 +static int efx_netdev_event(struct notifier_block *this,
 +                          unsigned long event, void *ptr)
 +{
 +      struct net_device *net_dev = ptr;
 +
 +      if (net_dev->netdev_ops == &efx_netdev_ops &&
 +          event == NETDEV_CHANGENAME)
 +              efx_update_name(netdev_priv(net_dev));
 +
 +      return NOTIFY_DONE;
 +}
 +
 +static struct notifier_block efx_netdev_notifier = {
 +      .notifier_call = efx_netdev_event,
 +};
 +
 +static ssize_t
 +show_phy_type(struct device *dev, struct device_attribute *attr, char *buf)
 +{
 +      struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev));
 +      return sprintf(buf, "%d\n", efx->phy_type);
 +}
 +static DEVICE_ATTR(phy_type, 0644, show_phy_type, NULL);
 +
 +static int efx_register_netdev(struct efx_nic *efx)
 +{
 +      struct net_device *net_dev = efx->net_dev;
 +      struct efx_channel *channel;
 +      int rc;
 +
 +      net_dev->watchdog_timeo = 5 * HZ;
 +      net_dev->irq = efx->pci_dev->irq;
 +      net_dev->netdev_ops = &efx_netdev_ops;
 +      SET_ETHTOOL_OPS(net_dev, &efx_ethtool_ops);
 +
 +      /* Clear MAC statistics */
 +      efx->mac_op->update_stats(efx);
 +      memset(&efx->mac_stats, 0, sizeof(efx->mac_stats));
 +
 +      rtnl_lock();
 +
 +      rc = dev_alloc_name(net_dev, net_dev->name);
 +      if (rc < 0)
 +              goto fail_locked;
 +      efx_update_name(efx);
 +
 +      rc = register_netdevice(net_dev);
 +      if (rc)
 +              goto fail_locked;
 +
 +      efx_for_each_channel(channel, efx) {
 +              struct efx_tx_queue *tx_queue;
 +              efx_for_each_channel_tx_queue(tx_queue, channel)
 +                      efx_init_tx_queue_core_txq(tx_queue);
 +      }
 +
 +      /* Always start with carrier off; PHY events will detect the link */
 +      netif_carrier_off(efx->net_dev);
 +
 +      rtnl_unlock();
 +
 +      rc = device_create_file(&efx->pci_dev->dev, &dev_attr_phy_type);
 +      if (rc) {
 +              netif_err(efx, drv, efx->net_dev,
 +                        "failed to init net dev attributes\n");
 +              goto fail_registered;
 +      }
 +
 +      return 0;
 +
 +fail_locked:
 +      rtnl_unlock();
 +      netif_err(efx, drv, efx->net_dev, "could not register net dev\n");
 +      return rc;
 +
 +fail_registered:
 +      unregister_netdev(net_dev);
 +      return rc;
 +}
 +
 +static void efx_unregister_netdev(struct efx_nic *efx)
 +{
 +      struct efx_channel *channel;
 +      struct efx_tx_queue *tx_queue;
 +
 +      if (!efx->net_dev)
 +              return;
 +
 +      BUG_ON(netdev_priv(efx->net_dev) != efx);
 +
 +      /* Free up any skbs still remaining. This has to happen before
 +       * we try to unregister the netdev as running their destructors
 +       * may be needed to get the device ref. count to 0. */
 +      efx_for_each_channel(channel, efx) {
 +              efx_for_each_channel_tx_queue(tx_queue, channel)
 +                      efx_release_tx_buffers(tx_queue);
 +      }
 +
 +      if (efx_dev_registered(efx)) {
 +              strlcpy(efx->name, pci_name(efx->pci_dev), sizeof(efx->name));
 +              device_remove_file(&efx->pci_dev->dev, &dev_attr_phy_type);
 +              unregister_netdev(efx->net_dev);
 +      }
 +}
 +
 +/**************************************************************************
 + *
 + * Device reset and suspend
 + *
 + **************************************************************************/
 +
 +/* Tears down the entire software state and most of the hardware state
 + * before reset.  */
 +void efx_reset_down(struct efx_nic *efx, enum reset_type method)
 +{
 +      EFX_ASSERT_RESET_SERIALISED(efx);
 +
 +      efx_stop_all(efx);
 +      mutex_lock(&efx->mac_lock);
 +
 +      efx_fini_channels(efx);
 +      if (efx->port_initialized && method != RESET_TYPE_INVISIBLE)
 +              efx->phy_op->fini(efx);
 +      efx->type->fini(efx);
 +}
 +
 +/* This function will always ensure that the locks acquired in
 + * efx_reset_down() are released. A failure return code indicates
 + * that we were unable to reinitialise the hardware, and the
 + * driver should be disabled. If ok is false, then the rx and tx
 + * engines are not restarted, pending a RESET_DISABLE. */
 +int efx_reset_up(struct efx_nic *efx, enum reset_type method, bool ok)
 +{
 +      int rc;
 +
 +      EFX_ASSERT_RESET_SERIALISED(efx);
 +
 +      rc = efx->type->init(efx);
 +      if (rc) {
 +              netif_err(efx, drv, efx->net_dev, "failed to initialise NIC\n");
 +              goto fail;
 +      }
 +
 +      if (!ok)
 +              goto fail;
 +
 +      if (efx->port_initialized && method != RESET_TYPE_INVISIBLE) {
 +              rc = efx->phy_op->init(efx);
 +              if (rc)
 +                      goto fail;
 +              if (efx->phy_op->reconfigure(efx))
 +                      netif_err(efx, drv, efx->net_dev,
 +                                "could not restore PHY settings\n");
 +      }
 +
 +      efx->mac_op->reconfigure(efx);
 +
 +      efx_init_channels(efx);
 +      efx_restore_filters(efx);
 +
 +      mutex_unlock(&efx->mac_lock);
 +
 +      efx_start_all(efx);
 +
 +      return 0;
 +
 +fail:
 +      efx->port_initialized = false;
 +
 +      mutex_unlock(&efx->mac_lock);
 +
 +      return rc;
 +}
 +
 +/* Reset the NIC using the specified method.  Note that the reset may
 + * fail, in which case the card will be left in an unusable state.
 + *
 + * Caller must hold the rtnl_lock.
 + */
 +int efx_reset(struct efx_nic *efx, enum reset_type method)
 +{
 +      int rc, rc2;
 +      bool disabled;
 +
 +      netif_info(efx, drv, efx->net_dev, "resetting (%s)\n",
 +                 RESET_TYPE(method));
 +
 +      netif_device_detach(efx->net_dev);
 +      efx_reset_down(efx, method);
 +
 +      rc = efx->type->reset(efx, method);
 +      if (rc) {
 +              netif_err(efx, drv, efx->net_dev, "failed to reset hardware\n");
 +              goto out;
 +      }
 +
 +      /* Clear flags for the scopes we covered.  We assume the NIC and
 +       * driver are now quiescent so that there is no race here.
 +       */
 +      efx->reset_pending &= -(1 << (method + 1));
 +
 +      /* Reinitialise bus-mastering, which may have been turned off before
 +       * the reset was scheduled. This is still appropriate, even in the
 +       * RESET_TYPE_DISABLE since this driver generally assumes the hardware
 +       * can respond to requests. */
 +      pci_set_master(efx->pci_dev);
 +
 +out:
 +      /* Leave device stopped if necessary */
 +      disabled = rc || method == RESET_TYPE_DISABLE;
 +      rc2 = efx_reset_up(efx, method, !disabled);
 +      if (rc2) {
 +              disabled = true;
 +              if (!rc)
 +                      rc = rc2;
 +      }
 +
 +      if (disabled) {
 +              dev_close(efx->net_dev);
 +              netif_err(efx, drv, efx->net_dev, "has been disabled\n");
 +              efx->state = STATE_DISABLED;
 +      } else {
 +              netif_dbg(efx, drv, efx->net_dev, "reset complete\n");
 +              netif_device_attach(efx->net_dev);
 +      }
 +      return rc;
 +}
 +
 +/* The worker thread exists so that code that cannot sleep can
 + * schedule a reset for later.
 + */
 +static void efx_reset_work(struct work_struct *data)
 +{
 +      struct efx_nic *efx = container_of(data, struct efx_nic, reset_work);
 +      unsigned long pending = ACCESS_ONCE(efx->reset_pending);
 +
 +      if (!pending)
 +              return;
 +
 +      /* If we're not RUNNING then don't reset. Leave the reset_pending
 +       * flags set so that efx_pci_probe_main will be retried */
 +      if (efx->state != STATE_RUNNING) {
 +              netif_info(efx, drv, efx->net_dev,
 +                         "scheduled reset quenched. NIC not RUNNING\n");
 +              return;
 +      }
 +
 +      rtnl_lock();
 +      (void)efx_reset(efx, fls(pending) - 1);
 +      rtnl_unlock();
 +}
 +
 +void efx_schedule_reset(struct efx_nic *efx, enum reset_type type)
 +{
 +      enum reset_type method;
 +
 +      switch (type) {
 +      case RESET_TYPE_INVISIBLE:
 +      case RESET_TYPE_ALL:
 +      case RESET_TYPE_WORLD:
 +      case RESET_TYPE_DISABLE:
 +              method = type;
 +              netif_dbg(efx, drv, efx->net_dev, "scheduling %s reset\n",
 +                        RESET_TYPE(method));
 +              break;
 +      default:
 +              method = efx->type->map_reset_reason(type);
 +              netif_dbg(efx, drv, efx->net_dev,
 +                        "scheduling %s reset for %s\n",
 +                        RESET_TYPE(method), RESET_TYPE(type));
 +              break;
 +      }
 +
 +      set_bit(method, &efx->reset_pending);
 +
 +      /* efx_process_channel() will no longer read events once a
 +       * reset is scheduled. So switch back to poll'd MCDI completions. */
 +      efx_mcdi_mode_poll(efx);
 +
 +      queue_work(reset_workqueue, &efx->reset_work);
 +}
 +
 +/**************************************************************************
 + *
 + * List of NICs we support
 + *
 + **************************************************************************/
 +
 +/* PCI device ID table */
 +static DEFINE_PCI_DEVICE_TABLE(efx_pci_table) = {
 +      {PCI_DEVICE(EFX_VENDID_SFC, FALCON_A_P_DEVID),
 +       .driver_data = (unsigned long) &falcon_a1_nic_type},
 +      {PCI_DEVICE(EFX_VENDID_SFC, FALCON_B_P_DEVID),
 +       .driver_data = (unsigned long) &falcon_b0_nic_type},
 +      {PCI_DEVICE(EFX_VENDID_SFC, BETHPAGE_A_P_DEVID),
 +       .driver_data = (unsigned long) &siena_a0_nic_type},
 +      {PCI_DEVICE(EFX_VENDID_SFC, SIENA_A_P_DEVID),
 +       .driver_data = (unsigned long) &siena_a0_nic_type},
 +      {0}                     /* end of list */
 +};
 +
 +/**************************************************************************
 + *
 + * Dummy PHY/MAC operations
 + *
 + * Can be used for some unimplemented operations
 + * Needed so all function pointers are valid and do not have to be tested
 + * before use
 + *
 + **************************************************************************/
 +int efx_port_dummy_op_int(struct efx_nic *efx)
 +{
 +      return 0;
 +}
 +void efx_port_dummy_op_void(struct efx_nic *efx) {}
 +
 +static bool efx_port_dummy_op_poll(struct efx_nic *efx)
 +{
 +      return false;
 +}
 +
 +static const struct efx_phy_operations efx_dummy_phy_operations = {
 +      .init            = efx_port_dummy_op_int,
 +      .reconfigure     = efx_port_dummy_op_int,
 +      .poll            = efx_port_dummy_op_poll,
 +      .fini            = efx_port_dummy_op_void,
 +};
 +
 +/**************************************************************************
 + *
 + * Data housekeeping
 + *
 + **************************************************************************/
 +
 +/* This zeroes out and then fills in the invariants in a struct
 + * efx_nic (including all sub-structures).
 + */
 +static int efx_init_struct(struct efx_nic *efx, const struct efx_nic_type *type,
 +                         struct pci_dev *pci_dev, struct net_device *net_dev)
 +{
 +      int i;
 +
 +      /* Initialise common structures */
 +      memset(efx, 0, sizeof(*efx));
 +      spin_lock_init(&efx->biu_lock);
 +#ifdef CONFIG_SFC_MTD
 +      INIT_LIST_HEAD(&efx->mtd_list);
 +#endif
 +      INIT_WORK(&efx->reset_work, efx_reset_work);
 +      INIT_DELAYED_WORK(&efx->monitor_work, efx_monitor);
 +      efx->pci_dev = pci_dev;
 +      efx->msg_enable = debug;
 +      efx->state = STATE_INIT;
 +      strlcpy(efx->name, pci_name(pci_dev), sizeof(efx->name));
 +
 +      efx->net_dev = net_dev;
 +      spin_lock_init(&efx->stats_lock);
 +      mutex_init(&efx->mac_lock);
 +      efx->mac_op = type->default_mac_ops;
 +      efx->phy_op = &efx_dummy_phy_operations;
 +      efx->mdio.dev = net_dev;
 +      INIT_WORK(&efx->mac_work, efx_mac_work);
 +
 +      for (i = 0; i < EFX_MAX_CHANNELS; i++) {
 +              efx->channel[i] = efx_alloc_channel(efx, i, NULL);
 +              if (!efx->channel[i])
 +                      goto fail;
 +      }
 +
 +      efx->type = type;
 +
 +      EFX_BUG_ON_PARANOID(efx->type->phys_addr_channels > EFX_MAX_CHANNELS);
 +
 +      /* Higher numbered interrupt modes are less capable! */
 +      efx->interrupt_mode = max(efx->type->max_interrupt_mode,
 +                                interrupt_mode);
 +
 +      /* Would be good to use the net_dev name, but we're too early */
 +      snprintf(efx->workqueue_name, sizeof(efx->workqueue_name), "sfc%s",
 +               pci_name(pci_dev));
 +      efx->workqueue = create_singlethread_workqueue(efx->workqueue_name);
 +      if (!efx->workqueue)
 +              goto fail;
 +
 +      return 0;
 +
 +fail:
 +      efx_fini_struct(efx);
 +      return -ENOMEM;
 +}
 +
 +static void efx_fini_struct(struct efx_nic *efx)
 +{
 +      int i;
 +
 +      for (i = 0; i < EFX_MAX_CHANNELS; i++)
 +              kfree(efx->channel[i]);
 +
 +      if (efx->workqueue) {
 +              destroy_workqueue(efx->workqueue);
 +              efx->workqueue = NULL;
 +      }
 +}
 +
 +/**************************************************************************
 + *
 + * PCI interface
 + *
 + **************************************************************************/
 +
 +/* Main body of final NIC shutdown code
 + * This is called only at module unload (or hotplug removal).
 + */
 +static void efx_pci_remove_main(struct efx_nic *efx)
 +{
 +#ifdef CONFIG_RFS_ACCEL
 +      free_irq_cpu_rmap(efx->net_dev->rx_cpu_rmap);
 +      efx->net_dev->rx_cpu_rmap = NULL;
 +#endif
 +      efx_nic_fini_interrupt(efx);
 +      efx_fini_channels(efx);
 +      efx_fini_port(efx);
 +      efx->type->fini(efx);
 +      efx_fini_napi(efx);
 +      efx_remove_all(efx);
 +}
 +
 +/* Final NIC shutdown
 + * This is called only at module unload (or hotplug removal).
 + */
 +static void efx_pci_remove(struct pci_dev *pci_dev)
 +{
 +      struct efx_nic *efx;
 +
 +      efx = pci_get_drvdata(pci_dev);
 +      if (!efx)
 +              return;
 +
 +      /* Mark the NIC as fini, then stop the interface */
 +      rtnl_lock();
 +      efx->state = STATE_FINI;
 +      dev_close(efx->net_dev);
 +
 +      /* Allow any queued efx_resets() to complete */
 +      rtnl_unlock();
 +
 +      efx_unregister_netdev(efx);
 +
 +      efx_mtd_remove(efx);
 +
 +      /* Wait for any scheduled resets to complete. No more will be
 +       * scheduled from this point because efx_stop_all() has been
 +       * called, we are no longer registered with driverlink, and
 +       * the net_device's have been removed. */
 +      cancel_work_sync(&efx->reset_work);
 +
 +      efx_pci_remove_main(efx);
 +
 +      efx_fini_io(efx);
 +      netif_dbg(efx, drv, efx->net_dev, "shutdown successful\n");
 +
 +      pci_set_drvdata(pci_dev, NULL);
 +      efx_fini_struct(efx);
 +      free_netdev(efx->net_dev);
 +};
 +
 +/* Main body of NIC initialisation
 + * This is called at module load (or hotplug insertion, theoretically).
 + */
 +static int efx_pci_probe_main(struct efx_nic *efx)
 +{
 +      int rc;
 +
 +      /* Do start-of-day initialisation */
 +      rc = efx_probe_all(efx);
 +      if (rc)
 +              goto fail1;
 +
 +      efx_init_napi(efx);
 +
 +      rc = efx->type->init(efx);
 +      if (rc) {
 +              netif_err(efx, probe, efx->net_dev,
 +                        "failed to initialise NIC\n");
 +              goto fail3;
 +      }
 +
 +      rc = efx_init_port(efx);
 +      if (rc) {
 +              netif_err(efx, probe, efx->net_dev,
 +                        "failed to initialise port\n");
 +              goto fail4;
 +      }
 +
 +      efx_init_channels(efx);
 +
 +      rc = efx_nic_init_interrupt(efx);
 +      if (rc)
 +              goto fail5;
 +
 +      return 0;
 +
 + fail5:
 +      efx_fini_channels(efx);
 +      efx_fini_port(efx);
 + fail4:
 +      efx->type->fini(efx);
 + fail3:
 +      efx_fini_napi(efx);
 +      efx_remove_all(efx);
 + fail1:
 +      return rc;
 +}
 +
 +/* NIC initialisation
 + *
 + * This is called at module load (or hotplug insertion,
 + * theoretically).  It sets up PCI mappings, tests and resets the NIC,
 + * sets up and registers the network devices with the kernel and hooks
 + * the interrupt service routine.  It does not prepare the device for
 + * transmission; this is left to the first time one of the network
 + * interfaces is brought up (i.e. efx_net_open).
 + */
 +static int __devinit efx_pci_probe(struct pci_dev *pci_dev,
 +                                 const struct pci_device_id *entry)
 +{
 +      const struct efx_nic_type *type = (const struct efx_nic_type *) entry->driver_data;
 +      struct net_device *net_dev;
 +      struct efx_nic *efx;
 +      int i, rc;
 +
 +      /* Allocate and initialise a struct net_device and struct efx_nic */
 +      net_dev = alloc_etherdev_mqs(sizeof(*efx), EFX_MAX_CORE_TX_QUEUES,
 +                                   EFX_MAX_RX_QUEUES);
 +      if (!net_dev)
 +              return -ENOMEM;
 +      net_dev->features |= (type->offload_features | NETIF_F_SG |
 +                            NETIF_F_HIGHDMA | NETIF_F_TSO |
 +                            NETIF_F_RXCSUM);
 +      if (type->offload_features & NETIF_F_V6_CSUM)
 +              net_dev->features |= NETIF_F_TSO6;
 +      /* Mask for features that also apply to VLAN devices */
 +      net_dev->vlan_features |= (NETIF_F_ALL_CSUM | NETIF_F_SG |
 +                                 NETIF_F_HIGHDMA | NETIF_F_ALL_TSO |
 +                                 NETIF_F_RXCSUM);
 +      /* All offloads can be toggled */
 +      net_dev->hw_features = net_dev->features & ~NETIF_F_HIGHDMA;
 +      efx = netdev_priv(net_dev);
 +      pci_set_drvdata(pci_dev, efx);
 +      SET_NETDEV_DEV(net_dev, &pci_dev->dev);
 +      rc = efx_init_struct(efx, type, pci_dev, net_dev);
 +      if (rc)
 +              goto fail1;
 +
 +      netif_info(efx, probe, efx->net_dev,
 +                 "Solarflare NIC detected\n");
 +
 +      /* Set up basic I/O (BAR mappings etc) */
 +      rc = efx_init_io(efx);
 +      if (rc)
 +              goto fail2;
 +
 +      /* No serialisation is required with the reset path because
 +       * we're in STATE_INIT. */
 +      for (i = 0; i < 5; i++) {
 +              rc = efx_pci_probe_main(efx);
 +
 +              /* Serialise against efx_reset(). No more resets will be
 +               * scheduled since efx_stop_all() has been called, and we
 +               * have not and never have been registered with either
 +               * the rtnetlink or driverlink layers. */
 +              cancel_work_sync(&efx->reset_work);
 +
 +              if (rc == 0) {
 +                      if (efx->reset_pending) {
 +                              /* If there was a scheduled reset during
 +                               * probe, the NIC is probably hosed anyway */
 +                              efx_pci_remove_main(efx);
 +                              rc = -EIO;
 +                      } else {
 +                              break;
 +                      }
 +              }
 +
 +              /* Retry if a recoverably reset event has been scheduled */
 +              if (efx->reset_pending &
 +                  ~(1 << RESET_TYPE_INVISIBLE | 1 << RESET_TYPE_ALL) ||
 +                  !efx->reset_pending)
 +                      goto fail3;
 +
 +              efx->reset_pending = 0;
 +      }
 +
 +      if (rc) {
 +              netif_err(efx, probe, efx->net_dev, "Could not reset NIC\n");
 +              goto fail4;
 +      }
 +
 +      /* Switch to the running state before we expose the device to the OS,
 +       * so that dev_open()|efx_start_all() will actually start the device */
 +      efx->state = STATE_RUNNING;
 +
 +      rc = efx_register_netdev(efx);
 +      if (rc)
 +              goto fail5;
 +
 +      netif_dbg(efx, probe, efx->net_dev, "initialisation successful\n");
 +
 +      rtnl_lock();
 +      efx_mtd_probe(efx); /* allowed to fail */
 +      rtnl_unlock();
 +      return 0;
 +
 + fail5:
 +      efx_pci_remove_main(efx);
 + fail4:
 + fail3:
 +      efx_fini_io(efx);
 + fail2:
 +      efx_fini_struct(efx);
 + fail1:
 +      WARN_ON(rc > 0);
 +      netif_dbg(efx, drv, efx->net_dev, "initialisation failed. rc=%d\n", rc);
 +      free_netdev(net_dev);
 +      return rc;
 +}
 +
 +static int efx_pm_freeze(struct device *dev)
 +{
 +      struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev));
 +
 +      efx->state = STATE_FINI;
 +
 +      netif_device_detach(efx->net_dev);
 +
 +      efx_stop_all(efx);
 +      efx_fini_channels(efx);
 +
 +      return 0;
 +}
 +
 +static int efx_pm_thaw(struct device *dev)
 +{
 +      struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev));
 +
 +      efx->state = STATE_INIT;
 +
 +      efx_init_channels(efx);
 +
 +      mutex_lock(&efx->mac_lock);
 +      efx->phy_op->reconfigure(efx);
 +      mutex_unlock(&efx->mac_lock);
 +
 +      efx_start_all(efx);
 +
 +      netif_device_attach(efx->net_dev);
 +
 +      efx->state = STATE_RUNNING;
 +
 +      efx->type->resume_wol(efx);
 +
 +      /* Reschedule any quenched resets scheduled during efx_pm_freeze() */
 +      queue_work(reset_workqueue, &efx->reset_work);
 +
 +      return 0;
 +}
 +
 +static int efx_pm_poweroff(struct device *dev)
 +{
 +      struct pci_dev *pci_dev = to_pci_dev(dev);
 +      struct efx_nic *efx = pci_get_drvdata(pci_dev);
 +
 +      efx->type->fini(efx);
 +
 +      efx->reset_pending = 0;
 +
 +      pci_save_state(pci_dev);
 +      return pci_set_power_state(pci_dev, PCI_D3hot);
 +}
 +
 +/* Used for both resume and restore */
 +static int efx_pm_resume(struct device *dev)
 +{
 +      struct pci_dev *pci_dev = to_pci_dev(dev);
 +      struct efx_nic *efx = pci_get_drvdata(pci_dev);
 +      int rc;
 +
 +      rc = pci_set_power_state(pci_dev, PCI_D0);
 +      if (rc)
 +              return rc;
 +      pci_restore_state(pci_dev);
 +      rc = pci_enable_device(pci_dev);
 +      if (rc)
 +              return rc;
 +      pci_set_master(efx->pci_dev);
 +      rc = efx->type->reset(efx, RESET_TYPE_ALL);
 +      if (rc)
 +              return rc;
 +      rc = efx->type->init(efx);
 +      if (rc)
 +              return rc;
 +      efx_pm_thaw(dev);
 +      return 0;
 +}
 +
 +static int efx_pm_suspend(struct device *dev)
 +{
 +      int rc;
 +
 +      efx_pm_freeze(dev);
 +      rc = efx_pm_poweroff(dev);
 +      if (rc)
 +              efx_pm_resume(dev);
 +      return rc;
 +}
 +
 +static struct dev_pm_ops efx_pm_ops = {
 +      .suspend        = efx_pm_suspend,
 +      .resume         = efx_pm_resume,
 +      .freeze         = efx_pm_freeze,
 +      .thaw           = efx_pm_thaw,
 +      .poweroff       = efx_pm_poweroff,
 +      .restore        = efx_pm_resume,
 +};
 +
 +static struct pci_driver efx_pci_driver = {
 +      .name           = KBUILD_MODNAME,
 +      .id_table       = efx_pci_table,
 +      .probe          = efx_pci_probe,
 +      .remove         = efx_pci_remove,
 +      .driver.pm      = &efx_pm_ops,
 +};
 +
 +/**************************************************************************
 + *
 + * Kernel module interface
 + *
 + *************************************************************************/
 +
 +module_param(interrupt_mode, uint, 0444);
 +MODULE_PARM_DESC(interrupt_mode,
 +               "Interrupt mode (0=>MSIX 1=>MSI 2=>legacy)");
 +
 +static int __init efx_init_module(void)
 +{
 +      int rc;
 +
 +      printk(KERN_INFO "Solarflare NET driver v" EFX_DRIVER_VERSION "\n");
 +
 +      rc = register_netdevice_notifier(&efx_netdev_notifier);
 +      if (rc)
 +              goto err_notifier;
 +
 +      reset_workqueue = create_singlethread_workqueue("sfc_reset");
 +      if (!reset_workqueue) {
 +              rc = -ENOMEM;
 +              goto err_reset;
 +      }
 +
 +      rc = pci_register_driver(&efx_pci_driver);
 +      if (rc < 0)
 +              goto err_pci;
 +
 +      return 0;
 +
 + err_pci:
 +      destroy_workqueue(reset_workqueue);
 + err_reset:
 +      unregister_netdevice_notifier(&efx_netdev_notifier);
 + err_notifier:
 +      return rc;
 +}
 +
 +static void __exit efx_exit_module(void)
 +{
 +      printk(KERN_INFO "Solarflare NET driver unloading\n");
 +
 +      pci_unregister_driver(&efx_pci_driver);
 +      destroy_workqueue(reset_workqueue);
 +      unregister_netdevice_notifier(&efx_netdev_notifier);
 +
 +}
 +
 +module_init(efx_init_module);
 +module_exit(efx_exit_module);
 +
 +MODULE_AUTHOR("Solarflare Communications and "
 +            "Michael Brown <mbrown@fensystems.co.uk>");
 +MODULE_DESCRIPTION("Solarflare Communications network driver");
 +MODULE_LICENSE("GPL");
 +MODULE_DEVICE_TABLE(pci, efx_pci_table);
Simple merge
Simple merge
Simple merge
index b5b2886,0000000..5fb24d3
mode 100644,000000..100644
--- /dev/null
@@@ -1,274 -1,0 +1,272 @@@
-  * @mcdi_smem: MCDI shared memory mapping. The mapping is always uncacheable.
 +/****************************************************************************
 + * Driver for Solarflare Solarstorm network controllers and boards
 + * Copyright 2005-2006 Fen Systems Ltd.
 + * Copyright 2006-2011 Solarflare Communications Inc.
 + *
 + * This program is free software; you can redistribute it and/or modify it
 + * under the terms of the GNU General Public License version 2 as published
 + * by the Free Software Foundation, incorporated herein by reference.
 + */
 +
 +#ifndef EFX_NIC_H
 +#define EFX_NIC_H
 +
 +#include <linux/i2c-algo-bit.h>
 +#include "net_driver.h"
 +#include "efx.h"
 +#include "mcdi.h"
 +#include "spi.h"
 +
 +/*
 + * Falcon hardware control
 + */
 +
 +enum {
 +      EFX_REV_FALCON_A0 = 0,
 +      EFX_REV_FALCON_A1 = 1,
 +      EFX_REV_FALCON_B0 = 2,
 +      EFX_REV_SIENA_A0 = 3,
 +};
 +
 +static inline int efx_nic_rev(struct efx_nic *efx)
 +{
 +      return efx->type->revision;
 +}
 +
 +extern u32 efx_nic_fpga_ver(struct efx_nic *efx);
 +
 +static inline bool efx_nic_has_mc(struct efx_nic *efx)
 +{
 +      return efx_nic_rev(efx) >= EFX_REV_SIENA_A0;
 +}
 +/* NIC has two interlinked PCI functions for the same port. */
 +static inline bool efx_nic_is_dual_func(struct efx_nic *efx)
 +{
 +      return efx_nic_rev(efx) < EFX_REV_FALCON_B0;
 +}
 +
 +enum {
 +      PHY_TYPE_NONE = 0,
 +      PHY_TYPE_TXC43128 = 1,
 +      PHY_TYPE_88E1111 = 2,
 +      PHY_TYPE_SFX7101 = 3,
 +      PHY_TYPE_QT2022C2 = 4,
 +      PHY_TYPE_PM8358 = 6,
 +      PHY_TYPE_SFT9001A = 8,
 +      PHY_TYPE_QT2025C = 9,
 +      PHY_TYPE_SFT9001B = 10,
 +};
 +
 +#define FALCON_XMAC_LOOPBACKS                 \
 +      ((1 << LOOPBACK_XGMII) |                \
 +       (1 << LOOPBACK_XGXS) |                 \
 +       (1 << LOOPBACK_XAUI))
 +
 +#define FALCON_GMAC_LOOPBACKS                 \
 +      (1 << LOOPBACK_GMAC)
 +
 +/**
 + * struct falcon_board_type - board operations and type information
 + * @id: Board type id, as found in NVRAM
 + * @ref_model: Model number of Solarflare reference design
 + * @gen_type: Generic board type description
 + * @init: Allocate resources and initialise peripheral hardware
 + * @init_phy: Do board-specific PHY initialisation
 + * @fini: Shut down hardware and free resources
 + * @set_id_led: Set state of identifying LED or revert to automatic function
 + * @monitor: Board-specific health check function
 + */
 +struct falcon_board_type {
 +      u8 id;
 +      const char *ref_model;
 +      const char *gen_type;
 +      int (*init) (struct efx_nic *nic);
 +      void (*init_phy) (struct efx_nic *efx);
 +      void (*fini) (struct efx_nic *nic);
 +      void (*set_id_led) (struct efx_nic *efx, enum efx_led_mode mode);
 +      int (*monitor) (struct efx_nic *nic);
 +};
 +
 +/**
 + * struct falcon_board - board information
 + * @type: Type of board
 + * @major: Major rev. ('A', 'B' ...)
 + * @minor: Minor rev. (0, 1, ...)
 + * @i2c_adap: I2C adapter for on-board peripherals
 + * @i2c_data: Data for bit-banging algorithm
 + * @hwmon_client: I2C client for hardware monitor
 + * @ioexp_client: I2C client for power/port control
 + */
 +struct falcon_board {
 +      const struct falcon_board_type *type;
 +      int major;
 +      int minor;
 +      struct i2c_adapter i2c_adap;
 +      struct i2c_algo_bit_data i2c_data;
 +      struct i2c_client *hwmon_client, *ioexp_client;
 +};
 +
 +/**
 + * struct falcon_nic_data - Falcon NIC state
 + * @pci_dev2: Secondary function of Falcon A
 + * @board: Board state and functions
 + * @stats_disable_count: Nest count for disabling statistics fetches
 + * @stats_pending: Is there a pending DMA of MAC statistics.
 + * @stats_timer: A timer for regularly fetching MAC statistics.
 + * @stats_dma_done: Pointer to the flag which indicates DMA completion.
 + * @spi_flash: SPI flash device
 + * @spi_eeprom: SPI EEPROM device
 + * @spi_lock: SPI bus lock
 + * @mdio_lock: MDIO bus lock
 + * @xmac_poll_required: XMAC link state needs polling
 + */
 +struct falcon_nic_data {
 +      struct pci_dev *pci_dev2;
 +      struct falcon_board board;
 +      unsigned int stats_disable_count;
 +      bool stats_pending;
 +      struct timer_list stats_timer;
 +      u32 *stats_dma_done;
 +      struct efx_spi_device spi_flash;
 +      struct efx_spi_device spi_eeprom;
 +      struct mutex spi_lock;
 +      struct mutex mdio_lock;
 +      bool xmac_poll_required;
 +};
 +
 +static inline struct falcon_board *falcon_board(struct efx_nic *efx)
 +{
 +      struct falcon_nic_data *data = efx->nic_data;
 +      return &data->board;
 +}
 +
 +/**
 + * struct siena_nic_data - Siena NIC state
 + * @mcdi: Management-Controller-to-Driver Interface
-       void __iomem *mcdi_smem;
 + * @wol_filter_id: Wake-on-LAN packet filter id
 + */
 +struct siena_nic_data {
 +      struct efx_mcdi_iface mcdi;
 +      int wol_filter_id;
 +};
 +
 +extern const struct efx_nic_type falcon_a1_nic_type;
 +extern const struct efx_nic_type falcon_b0_nic_type;
 +extern const struct efx_nic_type siena_a0_nic_type;
 +
 +/**************************************************************************
 + *
 + * Externs
 + *
 + **************************************************************************
 + */
 +
 +extern int falcon_probe_board(struct efx_nic *efx, u16 revision_info);
 +
 +/* TX data path */
 +extern int efx_nic_probe_tx(struct efx_tx_queue *tx_queue);
 +extern void efx_nic_init_tx(struct efx_tx_queue *tx_queue);
 +extern void efx_nic_fini_tx(struct efx_tx_queue *tx_queue);
 +extern void efx_nic_remove_tx(struct efx_tx_queue *tx_queue);
 +extern void efx_nic_push_buffers(struct efx_tx_queue *tx_queue);
 +
 +/* RX data path */
 +extern int efx_nic_probe_rx(struct efx_rx_queue *rx_queue);
 +extern void efx_nic_init_rx(struct efx_rx_queue *rx_queue);
 +extern void efx_nic_fini_rx(struct efx_rx_queue *rx_queue);
 +extern void efx_nic_remove_rx(struct efx_rx_queue *rx_queue);
 +extern void efx_nic_notify_rx_desc(struct efx_rx_queue *rx_queue);
 +
 +/* Event data path */
 +extern int efx_nic_probe_eventq(struct efx_channel *channel);
 +extern void efx_nic_init_eventq(struct efx_channel *channel);
 +extern void efx_nic_fini_eventq(struct efx_channel *channel);
 +extern void efx_nic_remove_eventq(struct efx_channel *channel);
 +extern int efx_nic_process_eventq(struct efx_channel *channel, int rx_quota);
 +extern void efx_nic_eventq_read_ack(struct efx_channel *channel);
 +extern bool efx_nic_event_present(struct efx_channel *channel);
 +
 +/* MAC/PHY */
 +extern void falcon_drain_tx_fifo(struct efx_nic *efx);
 +extern void falcon_reconfigure_mac_wrapper(struct efx_nic *efx);
 +
 +/* Interrupts and test events */
 +extern int efx_nic_init_interrupt(struct efx_nic *efx);
 +extern void efx_nic_enable_interrupts(struct efx_nic *efx);
 +extern void efx_nic_generate_test_event(struct efx_channel *channel);
 +extern void efx_nic_generate_fill_event(struct efx_channel *channel);
 +extern void efx_nic_generate_interrupt(struct efx_nic *efx);
 +extern void efx_nic_disable_interrupts(struct efx_nic *efx);
 +extern void efx_nic_fini_interrupt(struct efx_nic *efx);
 +extern irqreturn_t efx_nic_fatal_interrupt(struct efx_nic *efx);
 +extern irqreturn_t falcon_legacy_interrupt_a1(int irq, void *dev_id);
 +extern void falcon_irq_ack_a1(struct efx_nic *efx);
 +
 +#define EFX_IRQ_MOD_RESOLUTION        5
 +#define EFX_IRQ_MOD_MAX               0x1000
 +
 +/* Global Resources */
 +extern int efx_nic_flush_queues(struct efx_nic *efx);
 +extern void falcon_start_nic_stats(struct efx_nic *efx);
 +extern void falcon_stop_nic_stats(struct efx_nic *efx);
 +extern void falcon_setup_xaui(struct efx_nic *efx);
 +extern int falcon_reset_xaui(struct efx_nic *efx);
 +extern void efx_nic_init_common(struct efx_nic *efx);
 +extern void efx_nic_push_rx_indir_table(struct efx_nic *efx);
 +
 +int efx_nic_alloc_buffer(struct efx_nic *efx, struct efx_buffer *buffer,
 +                       unsigned int len);
 +void efx_nic_free_buffer(struct efx_nic *efx, struct efx_buffer *buffer);
 +
 +/* Tests */
 +struct efx_nic_register_test {
 +      unsigned address;
 +      efx_oword_t mask;
 +};
 +extern int efx_nic_test_registers(struct efx_nic *efx,
 +                                const struct efx_nic_register_test *regs,
 +                                size_t n_regs);
 +
 +extern size_t efx_nic_get_regs_len(struct efx_nic *efx);
 +extern void efx_nic_get_regs(struct efx_nic *efx, void *buf);
 +
 +/**************************************************************************
 + *
 + * Falcon MAC stats
 + *
 + **************************************************************************
 + */
 +
 +#define FALCON_STAT_OFFSET(falcon_stat) EFX_VAL(falcon_stat, offset)
 +#define FALCON_STAT_WIDTH(falcon_stat) EFX_VAL(falcon_stat, WIDTH)
 +
 +/* Retrieve statistic from statistics block */
 +#define FALCON_STAT(efx, falcon_stat, efx_stat) do {          \
 +      if (FALCON_STAT_WIDTH(falcon_stat) == 16)               \
 +              (efx)->mac_stats.efx_stat += le16_to_cpu(       \
 +                      *((__force __le16 *)                            \
 +                        (efx->stats_buffer.addr +             \
 +                         FALCON_STAT_OFFSET(falcon_stat))));  \
 +      else if (FALCON_STAT_WIDTH(falcon_stat) == 32)          \
 +              (efx)->mac_stats.efx_stat += le32_to_cpu(       \
 +                      *((__force __le32 *)                            \
 +                        (efx->stats_buffer.addr +             \
 +                         FALCON_STAT_OFFSET(falcon_stat))));  \
 +      else                                                    \
 +              (efx)->mac_stats.efx_stat += le64_to_cpu(       \
 +                      *((__force __le64 *)                            \
 +                        (efx->stats_buffer.addr +             \
 +                         FALCON_STAT_OFFSET(falcon_stat))));  \
 +      } while (0)
 +
 +#define FALCON_MAC_STATS_SIZE 0x100
 +
 +#define MAC_DATA_LBN 0
 +#define MAC_DATA_WIDTH 32
 +
 +extern void efx_nic_generate_event(struct efx_channel *channel,
 +                                 efx_qword_t *event);
 +
 +extern void falcon_poll_xmac(struct efx_nic *efx);
 +
 +#endif /* EFX_NIC_H */
index 4fdd148,0000000..cc2549c
mode 100644,000000..100644
--- /dev/null
@@@ -1,678 -1,0 +1,661 @@@
-       /* Initialise MCDI */
-       nic_data->mcdi_smem = ioremap_nocache(efx->membase_phys +
-                                             FR_CZ_MC_TREG_SMEM,
-                                             FR_CZ_MC_TREG_SMEM_STEP *
-                                             FR_CZ_MC_TREG_SMEM_ROWS);
-       if (!nic_data->mcdi_smem) {
-               netif_err(efx, probe, efx->net_dev,
-                         "could not map MCDI at %llx+%x\n",
-                         (unsigned long long)efx->membase_phys +
-                         FR_CZ_MC_TREG_SMEM,
-                         FR_CZ_MC_TREG_SMEM_STEP * FR_CZ_MC_TREG_SMEM_ROWS);
-               rc = -ENOMEM;
-               goto fail1;
-       }
 +/****************************************************************************
 + * Driver for Solarflare Solarstorm network controllers and boards
 + * Copyright 2005-2006 Fen Systems Ltd.
 + * Copyright 2006-2010 Solarflare Communications Inc.
 + *
 + * This program is free software; you can redistribute it and/or modify it
 + * under the terms of the GNU General Public License version 2 as published
 + * by the Free Software Foundation, incorporated herein by reference.
 + */
 +
 +#include <linux/bitops.h>
 +#include <linux/delay.h>
 +#include <linux/pci.h>
 +#include <linux/module.h>
 +#include <linux/slab.h>
 +#include <linux/random.h>
 +#include "net_driver.h"
 +#include "bitfield.h"
 +#include "efx.h"
 +#include "nic.h"
 +#include "mac.h"
 +#include "spi.h"
 +#include "regs.h"
 +#include "io.h"
 +#include "phy.h"
 +#include "workarounds.h"
 +#include "mcdi.h"
 +#include "mcdi_pcol.h"
 +
 +/* Hardware control for SFC9000 family including SFL9021 (aka Siena). */
 +
 +static void siena_init_wol(struct efx_nic *efx);
 +
 +
 +static void siena_push_irq_moderation(struct efx_channel *channel)
 +{
 +      efx_dword_t timer_cmd;
 +
 +      BUILD_BUG_ON(EFX_IRQ_MOD_MAX > (1 << FRF_CZ_TC_TIMER_VAL_WIDTH));
 +
 +      if (channel->irq_moderation)
 +              EFX_POPULATE_DWORD_2(timer_cmd,
 +                                   FRF_CZ_TC_TIMER_MODE,
 +                                   FFE_CZ_TIMER_MODE_INT_HLDOFF,
 +                                   FRF_CZ_TC_TIMER_VAL,
 +                                   channel->irq_moderation - 1);
 +      else
 +              EFX_POPULATE_DWORD_2(timer_cmd,
 +                                   FRF_CZ_TC_TIMER_MODE,
 +                                   FFE_CZ_TIMER_MODE_DIS,
 +                                   FRF_CZ_TC_TIMER_VAL, 0);
 +      efx_writed_page_locked(channel->efx, &timer_cmd, FR_BZ_TIMER_COMMAND_P0,
 +                             channel->channel);
 +}
 +
 +static void siena_push_multicast_hash(struct efx_nic *efx)
 +{
 +      WARN_ON(!mutex_is_locked(&efx->mac_lock));
 +
 +      efx_mcdi_rpc(efx, MC_CMD_SET_MCAST_HASH,
 +                   efx->multicast_hash.byte, sizeof(efx->multicast_hash),
 +                   NULL, 0, NULL);
 +}
 +
 +static int siena_mdio_write(struct net_device *net_dev,
 +                          int prtad, int devad, u16 addr, u16 value)
 +{
 +      struct efx_nic *efx = netdev_priv(net_dev);
 +      uint32_t status;
 +      int rc;
 +
 +      rc = efx_mcdi_mdio_write(efx, efx->mdio_bus, prtad, devad,
 +                               addr, value, &status);
 +      if (rc)
 +              return rc;
 +      if (status != MC_CMD_MDIO_STATUS_GOOD)
 +              return -EIO;
 +
 +      return 0;
 +}
 +
 +static int siena_mdio_read(struct net_device *net_dev,
 +                         int prtad, int devad, u16 addr)
 +{
 +      struct efx_nic *efx = netdev_priv(net_dev);
 +      uint16_t value;
 +      uint32_t status;
 +      int rc;
 +
 +      rc = efx_mcdi_mdio_read(efx, efx->mdio_bus, prtad, devad,
 +                              addr, &value, &status);
 +      if (rc)
 +              return rc;
 +      if (status != MC_CMD_MDIO_STATUS_GOOD)
 +              return -EIO;
 +
 +      return (int)value;
 +}
 +
 +/* This call is responsible for hooking in the MAC and PHY operations */
 +static int siena_probe_port(struct efx_nic *efx)
 +{
 +      int rc;
 +
 +      /* Hook in PHY operations table */
 +      efx->phy_op = &efx_mcdi_phy_ops;
 +
 +      /* Set up MDIO structure for PHY */
 +      efx->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
 +      efx->mdio.mdio_read = siena_mdio_read;
 +      efx->mdio.mdio_write = siena_mdio_write;
 +
 +      /* Fill out MDIO structure, loopback modes, and initial link state */
 +      rc = efx->phy_op->probe(efx);
 +      if (rc != 0)
 +              return rc;
 +
 +      /* Allocate buffer for stats */
 +      rc = efx_nic_alloc_buffer(efx, &efx->stats_buffer,
 +                                MC_CMD_MAC_NSTATS * sizeof(u64));
 +      if (rc)
 +              return rc;
 +      netif_dbg(efx, probe, efx->net_dev,
 +                "stats buffer at %llx (virt %p phys %llx)\n",
 +                (u64)efx->stats_buffer.dma_addr,
 +                efx->stats_buffer.addr,
 +                (u64)virt_to_phys(efx->stats_buffer.addr));
 +
 +      efx_mcdi_mac_stats(efx, efx->stats_buffer.dma_addr, 0, 0, 1);
 +
 +      return 0;
 +}
 +
 +static void siena_remove_port(struct efx_nic *efx)
 +{
 +      efx->phy_op->remove(efx);
 +      efx_nic_free_buffer(efx, &efx->stats_buffer);
 +}
 +
 +static const struct efx_nic_register_test siena_register_tests[] = {
 +      { FR_AZ_ADR_REGION,
 +        EFX_OWORD32(0x0003FFFF, 0x0003FFFF, 0x0003FFFF, 0x0003FFFF) },
 +      { FR_CZ_USR_EV_CFG,
 +        EFX_OWORD32(0x000103FF, 0x00000000, 0x00000000, 0x00000000) },
 +      { FR_AZ_RX_CFG,
 +        EFX_OWORD32(0xFFFFFFFE, 0xFFFFFFFF, 0x0003FFFF, 0x00000000) },
 +      { FR_AZ_TX_CFG,
 +        EFX_OWORD32(0x7FFF0037, 0xFFFF8000, 0xFFFFFFFF, 0x03FFFFFF) },
 +      { FR_AZ_TX_RESERVED,
 +        EFX_OWORD32(0xFFFEFE80, 0x1FFFFFFF, 0x020000FE, 0x007FFFFF) },
 +      { FR_AZ_SRM_TX_DC_CFG,
 +        EFX_OWORD32(0x001FFFFF, 0x00000000, 0x00000000, 0x00000000) },
 +      { FR_AZ_RX_DC_CFG,
 +        EFX_OWORD32(0x00000003, 0x00000000, 0x00000000, 0x00000000) },
 +      { FR_AZ_RX_DC_PF_WM,
 +        EFX_OWORD32(0x000003FF, 0x00000000, 0x00000000, 0x00000000) },
 +      { FR_BZ_DP_CTRL,
 +        EFX_OWORD32(0x00000FFF, 0x00000000, 0x00000000, 0x00000000) },
 +      { FR_BZ_RX_RSS_TKEY,
 +        EFX_OWORD32(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF) },
 +      { FR_CZ_RX_RSS_IPV6_REG1,
 +        EFX_OWORD32(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF) },
 +      { FR_CZ_RX_RSS_IPV6_REG2,
 +        EFX_OWORD32(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF) },
 +      { FR_CZ_RX_RSS_IPV6_REG3,
 +        EFX_OWORD32(0xFFFFFFFF, 0xFFFFFFFF, 0x00000007, 0x00000000) },
 +};
 +
 +static int siena_test_registers(struct efx_nic *efx)
 +{
 +      return efx_nic_test_registers(efx, siena_register_tests,
 +                                    ARRAY_SIZE(siena_register_tests));
 +}
 +
 +/**************************************************************************
 + *
 + * Device reset
 + *
 + **************************************************************************
 + */
 +
 +static enum reset_type siena_map_reset_reason(enum reset_type reason)
 +{
 +      return RESET_TYPE_ALL;
 +}
 +
 +static int siena_map_reset_flags(u32 *flags)
 +{
 +      enum {
 +              SIENA_RESET_PORT = (ETH_RESET_DMA | ETH_RESET_FILTER |
 +                                  ETH_RESET_OFFLOAD | ETH_RESET_MAC |
 +                                  ETH_RESET_PHY),
 +              SIENA_RESET_MC = (SIENA_RESET_PORT |
 +                                ETH_RESET_MGMT << ETH_RESET_SHARED_SHIFT),
 +      };
 +
 +      if ((*flags & SIENA_RESET_MC) == SIENA_RESET_MC) {
 +              *flags &= ~SIENA_RESET_MC;
 +              return RESET_TYPE_WORLD;
 +      }
 +
 +      if ((*flags & SIENA_RESET_PORT) == SIENA_RESET_PORT) {
 +              *flags &= ~SIENA_RESET_PORT;
 +              return RESET_TYPE_ALL;
 +      }
 +
 +      /* no invisible reset implemented */
 +
 +      return -EINVAL;
 +}
 +
 +static int siena_reset_hw(struct efx_nic *efx, enum reset_type method)
 +{
 +      int rc;
 +
 +      /* Recover from a failed assertion pre-reset */
 +      rc = efx_mcdi_handle_assertion(efx);
 +      if (rc)
 +              return rc;
 +
 +      if (method == RESET_TYPE_WORLD)
 +              return efx_mcdi_reset_mc(efx);
 +      else
 +              return efx_mcdi_reset_port(efx);
 +}
 +
 +static int siena_probe_nvconfig(struct efx_nic *efx)
 +{
 +      return efx_mcdi_get_board_cfg(efx, efx->net_dev->perm_addr, NULL);
 +}
 +
 +static int siena_probe_nic(struct efx_nic *efx)
 +{
 +      struct siena_nic_data *nic_data;
 +      bool already_attached = 0;
 +      efx_oword_t reg;
 +      int rc;
 +
 +      /* Allocate storage for hardware specific data */
 +      nic_data = kzalloc(sizeof(struct siena_nic_data), GFP_KERNEL);
 +      if (!nic_data)
 +              return -ENOMEM;
 +      efx->nic_data = nic_data;
 +
 +      if (efx_nic_fpga_ver(efx) != 0) {
 +              netif_err(efx, probe, efx->net_dev,
 +                        "Siena FPGA not supported\n");
 +              rc = -ENODEV;
 +              goto fail1;
 +      }
 +
 +      efx_reado(efx, &reg, FR_AZ_CS_DEBUG);
 +      efx->net_dev->dev_id = EFX_OWORD_FIELD(reg, FRF_CZ_CS_PORT_NUM) - 1;
 +
-               goto fail2;
 +      efx_mcdi_init(efx);
 +
 +      /* Recover from a failed assertion before probing */
 +      rc = efx_mcdi_handle_assertion(efx);
 +      if (rc)
-       iounmap(nic_data->mcdi_smem);
++              goto fail1;
 +
 +      /* Let the BMC know that the driver is now in charge of link and
 +       * filter settings. We must do this before we reset the NIC */
 +      rc = efx_mcdi_drv_attach(efx, true, &already_attached);
 +      if (rc) {
 +              netif_err(efx, probe, efx->net_dev,
 +                        "Unable to register driver with MCPU\n");
 +              goto fail2;
 +      }
 +      if (already_attached)
 +              /* Not a fatal error */
 +              netif_err(efx, probe, efx->net_dev,
 +                        "Host already registered with MCPU\n");
 +
 +      /* Now we can reset the NIC */
 +      rc = siena_reset_hw(efx, RESET_TYPE_ALL);
 +      if (rc) {
 +              netif_err(efx, probe, efx->net_dev, "failed to reset NIC\n");
 +              goto fail3;
 +      }
 +
 +      siena_init_wol(efx);
 +
 +      /* Allocate memory for INT_KER */
 +      rc = efx_nic_alloc_buffer(efx, &efx->irq_status, sizeof(efx_oword_t));
 +      if (rc)
 +              goto fail4;
 +      BUG_ON(efx->irq_status.dma_addr & 0x0f);
 +
 +      netif_dbg(efx, probe, efx->net_dev,
 +                "INT_KER at %llx (virt %p phys %llx)\n",
 +                (unsigned long long)efx->irq_status.dma_addr,
 +                efx->irq_status.addr,
 +                (unsigned long long)virt_to_phys(efx->irq_status.addr));
 +
 +      /* Read in the non-volatile configuration */
 +      rc = siena_probe_nvconfig(efx);
 +      if (rc == -EINVAL) {
 +              netif_err(efx, probe, efx->net_dev,
 +                        "NVRAM is invalid therefore using defaults\n");
 +              efx->phy_type = PHY_TYPE_NONE;
 +              efx->mdio.prtad = MDIO_PRTAD_NONE;
 +      } else if (rc) {
 +              goto fail5;
 +      }
 +
 +      return 0;
 +
 +fail5:
 +      efx_nic_free_buffer(efx, &efx->irq_status);
 +fail4:
 +fail3:
 +      efx_mcdi_drv_attach(efx, false, NULL);
 +fail2:
-       struct siena_nic_data *nic_data = efx->nic_data;
 +fail1:
 +      kfree(efx->nic_data);
 +      return rc;
 +}
 +
 +/* This call performs hardware-specific global initialisation, such as
 + * defining the descriptor cache sizes and number of RSS channels.
 + * It does not set up any buffers, descriptor rings or event queues.
 + */
 +static int siena_init_nic(struct efx_nic *efx)
 +{
 +      efx_oword_t temp;
 +      int rc;
 +
 +      /* Recover from a failed assertion post-reset */
 +      rc = efx_mcdi_handle_assertion(efx);
 +      if (rc)
 +              return rc;
 +
 +      /* Squash TX of packets of 16 bytes or less */
 +      efx_reado(efx, &temp, FR_AZ_TX_RESERVED);
 +      EFX_SET_OWORD_FIELD(temp, FRF_BZ_TX_FLUSH_MIN_LEN_EN, 1);
 +      efx_writeo(efx, &temp, FR_AZ_TX_RESERVED);
 +
 +      /* Do not enable TX_NO_EOP_DISC_EN, since it limits packets to 16
 +       * descriptors (which is bad).
 +       */
 +      efx_reado(efx, &temp, FR_AZ_TX_CFG);
 +      EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_NO_EOP_DISC_EN, 0);
 +      EFX_SET_OWORD_FIELD(temp, FRF_CZ_TX_FILTER_EN_BIT, 1);
 +      efx_writeo(efx, &temp, FR_AZ_TX_CFG);
 +
 +      efx_reado(efx, &temp, FR_AZ_RX_CFG);
 +      EFX_SET_OWORD_FIELD(temp, FRF_BZ_RX_DESC_PUSH_EN, 0);
 +      EFX_SET_OWORD_FIELD(temp, FRF_BZ_RX_INGR_EN, 1);
 +      /* Enable hash insertion. This is broken for the 'Falcon' hash
 +       * if IPv6 hashing is also enabled, so also select Toeplitz
 +       * TCP/IPv4 and IPv4 hashes. */
 +      EFX_SET_OWORD_FIELD(temp, FRF_BZ_RX_HASH_INSRT_HDR, 1);
 +      EFX_SET_OWORD_FIELD(temp, FRF_BZ_RX_HASH_ALG, 1);
 +      EFX_SET_OWORD_FIELD(temp, FRF_BZ_RX_IP_HASH, 1);
 +      efx_writeo(efx, &temp, FR_AZ_RX_CFG);
 +
 +      /* Set hash key for IPv4 */
 +      memcpy(&temp, efx->rx_hash_key, sizeof(temp));
 +      efx_writeo(efx, &temp, FR_BZ_RX_RSS_TKEY);
 +
 +      /* Enable IPv6 RSS */
 +      BUILD_BUG_ON(sizeof(efx->rx_hash_key) <
 +                   2 * sizeof(temp) + FRF_CZ_RX_RSS_IPV6_TKEY_HI_WIDTH / 8 ||
 +                   FRF_CZ_RX_RSS_IPV6_TKEY_HI_LBN != 0);
 +      memcpy(&temp, efx->rx_hash_key, sizeof(temp));
 +      efx_writeo(efx, &temp, FR_CZ_RX_RSS_IPV6_REG1);
 +      memcpy(&temp, efx->rx_hash_key + sizeof(temp), sizeof(temp));
 +      efx_writeo(efx, &temp, FR_CZ_RX_RSS_IPV6_REG2);
 +      EFX_POPULATE_OWORD_2(temp, FRF_CZ_RX_RSS_IPV6_THASH_ENABLE, 1,
 +                           FRF_CZ_RX_RSS_IPV6_IP_THASH_ENABLE, 1);
 +      memcpy(&temp, efx->rx_hash_key + 2 * sizeof(temp),
 +             FRF_CZ_RX_RSS_IPV6_TKEY_HI_WIDTH / 8);
 +      efx_writeo(efx, &temp, FR_CZ_RX_RSS_IPV6_REG3);
 +
 +      /* Enable event logging */
 +      rc = efx_mcdi_log_ctrl(efx, true, false, 0);
 +      if (rc)
 +              return rc;
 +
 +      /* Set destination of both TX and RX Flush events */
 +      EFX_POPULATE_OWORD_1(temp, FRF_BZ_FLS_EVQ_ID, 0);
 +      efx_writeo(efx, &temp, FR_BZ_DP_CTRL);
 +
 +      EFX_POPULATE_OWORD_1(temp, FRF_CZ_USREV_DIS, 1);
 +      efx_writeo(efx, &temp, FR_CZ_USR_EV_CFG);
 +
 +      efx_nic_init_common(efx);
 +      return 0;
 +}
 +
 +static void siena_remove_nic(struct efx_nic *efx)
 +{
-       iounmap(nic_data->mcdi_smem);
-       kfree(nic_data);
 +      efx_nic_free_buffer(efx, &efx->irq_status);
 +
 +      siena_reset_hw(efx, RESET_TYPE_ALL);
 +
 +      /* Relinquish the device back to the BMC */
 +      if (efx_nic_has_mc(efx))
 +              efx_mcdi_drv_attach(efx, false, NULL);
 +
 +      /* Tear down the private nic state */
-       .mem_map_size = FR_CZ_MC_TREG_SMEM, /* MC_TREG_SMEM mapped separately */
++      kfree(efx->nic_data);
 +      efx->nic_data = NULL;
 +}
 +
 +#define STATS_GENERATION_INVALID ((__force __le64)(-1))
 +
 +static int siena_try_update_nic_stats(struct efx_nic *efx)
 +{
 +      __le64 *dma_stats;
 +      struct efx_mac_stats *mac_stats;
 +      __le64 generation_start, generation_end;
 +
 +      mac_stats = &efx->mac_stats;
 +      dma_stats = efx->stats_buffer.addr;
 +
 +      generation_end = dma_stats[MC_CMD_MAC_GENERATION_END];
 +      if (generation_end == STATS_GENERATION_INVALID)
 +              return 0;
 +      rmb();
 +
 +#define MAC_STAT(M, D) \
 +      mac_stats->M = le64_to_cpu(dma_stats[MC_CMD_MAC_ ## D])
 +
 +      MAC_STAT(tx_bytes, TX_BYTES);
 +      MAC_STAT(tx_bad_bytes, TX_BAD_BYTES);
 +      mac_stats->tx_good_bytes = (mac_stats->tx_bytes -
 +                                  mac_stats->tx_bad_bytes);
 +      MAC_STAT(tx_packets, TX_PKTS);
 +      MAC_STAT(tx_bad, TX_BAD_FCS_PKTS);
 +      MAC_STAT(tx_pause, TX_PAUSE_PKTS);
 +      MAC_STAT(tx_control, TX_CONTROL_PKTS);
 +      MAC_STAT(tx_unicast, TX_UNICAST_PKTS);
 +      MAC_STAT(tx_multicast, TX_MULTICAST_PKTS);
 +      MAC_STAT(tx_broadcast, TX_BROADCAST_PKTS);
 +      MAC_STAT(tx_lt64, TX_LT64_PKTS);
 +      MAC_STAT(tx_64, TX_64_PKTS);
 +      MAC_STAT(tx_65_to_127, TX_65_TO_127_PKTS);
 +      MAC_STAT(tx_128_to_255, TX_128_TO_255_PKTS);
 +      MAC_STAT(tx_256_to_511, TX_256_TO_511_PKTS);
 +      MAC_STAT(tx_512_to_1023, TX_512_TO_1023_PKTS);
 +      MAC_STAT(tx_1024_to_15xx, TX_1024_TO_15XX_PKTS);
 +      MAC_STAT(tx_15xx_to_jumbo, TX_15XX_TO_JUMBO_PKTS);
 +      MAC_STAT(tx_gtjumbo, TX_GTJUMBO_PKTS);
 +      mac_stats->tx_collision = 0;
 +      MAC_STAT(tx_single_collision, TX_SINGLE_COLLISION_PKTS);
 +      MAC_STAT(tx_multiple_collision, TX_MULTIPLE_COLLISION_PKTS);
 +      MAC_STAT(tx_excessive_collision, TX_EXCESSIVE_COLLISION_PKTS);
 +      MAC_STAT(tx_deferred, TX_DEFERRED_PKTS);
 +      MAC_STAT(tx_late_collision, TX_LATE_COLLISION_PKTS);
 +      mac_stats->tx_collision = (mac_stats->tx_single_collision +
 +                                 mac_stats->tx_multiple_collision +
 +                                 mac_stats->tx_excessive_collision +
 +                                 mac_stats->tx_late_collision);
 +      MAC_STAT(tx_excessive_deferred, TX_EXCESSIVE_DEFERRED_PKTS);
 +      MAC_STAT(tx_non_tcpudp, TX_NON_TCPUDP_PKTS);
 +      MAC_STAT(tx_mac_src_error, TX_MAC_SRC_ERR_PKTS);
 +      MAC_STAT(tx_ip_src_error, TX_IP_SRC_ERR_PKTS);
 +      MAC_STAT(rx_bytes, RX_BYTES);
 +      MAC_STAT(rx_bad_bytes, RX_BAD_BYTES);
 +      mac_stats->rx_good_bytes = (mac_stats->rx_bytes -
 +                                  mac_stats->rx_bad_bytes);
 +      MAC_STAT(rx_packets, RX_PKTS);
 +      MAC_STAT(rx_good, RX_GOOD_PKTS);
 +      MAC_STAT(rx_bad, RX_BAD_FCS_PKTS);
 +      MAC_STAT(rx_pause, RX_PAUSE_PKTS);
 +      MAC_STAT(rx_control, RX_CONTROL_PKTS);
 +      MAC_STAT(rx_unicast, RX_UNICAST_PKTS);
 +      MAC_STAT(rx_multicast, RX_MULTICAST_PKTS);
 +      MAC_STAT(rx_broadcast, RX_BROADCAST_PKTS);
 +      MAC_STAT(rx_lt64, RX_UNDERSIZE_PKTS);
 +      MAC_STAT(rx_64, RX_64_PKTS);
 +      MAC_STAT(rx_65_to_127, RX_65_TO_127_PKTS);
 +      MAC_STAT(rx_128_to_255, RX_128_TO_255_PKTS);
 +      MAC_STAT(rx_256_to_511, RX_256_TO_511_PKTS);
 +      MAC_STAT(rx_512_to_1023, RX_512_TO_1023_PKTS);
 +      MAC_STAT(rx_1024_to_15xx, RX_1024_TO_15XX_PKTS);
 +      MAC_STAT(rx_15xx_to_jumbo, RX_15XX_TO_JUMBO_PKTS);
 +      MAC_STAT(rx_gtjumbo, RX_GTJUMBO_PKTS);
 +      mac_stats->rx_bad_lt64 = 0;
 +      mac_stats->rx_bad_64_to_15xx = 0;
 +      mac_stats->rx_bad_15xx_to_jumbo = 0;
 +      MAC_STAT(rx_bad_gtjumbo, RX_JABBER_PKTS);
 +      MAC_STAT(rx_overflow, RX_OVERFLOW_PKTS);
 +      mac_stats->rx_missed = 0;
 +      MAC_STAT(rx_false_carrier, RX_FALSE_CARRIER_PKTS);
 +      MAC_STAT(rx_symbol_error, RX_SYMBOL_ERROR_PKTS);
 +      MAC_STAT(rx_align_error, RX_ALIGN_ERROR_PKTS);
 +      MAC_STAT(rx_length_error, RX_LENGTH_ERROR_PKTS);
 +      MAC_STAT(rx_internal_error, RX_INTERNAL_ERROR_PKTS);
 +      mac_stats->rx_good_lt64 = 0;
 +
 +      efx->n_rx_nodesc_drop_cnt =
 +              le64_to_cpu(dma_stats[MC_CMD_MAC_RX_NODESC_DROPS]);
 +
 +#undef MAC_STAT
 +
 +      rmb();
 +      generation_start = dma_stats[MC_CMD_MAC_GENERATION_START];
 +      if (generation_end != generation_start)
 +              return -EAGAIN;
 +
 +      return 0;
 +}
 +
 +static void siena_update_nic_stats(struct efx_nic *efx)
 +{
 +      int retry;
 +
 +      /* If we're unlucky enough to read statistics wduring the DMA, wait
 +       * up to 10ms for it to finish (typically takes <500us) */
 +      for (retry = 0; retry < 100; ++retry) {
 +              if (siena_try_update_nic_stats(efx) == 0)
 +                      return;
 +              udelay(100);
 +      }
 +
 +      /* Use the old values instead */
 +}
 +
 +static void siena_start_nic_stats(struct efx_nic *efx)
 +{
 +      __le64 *dma_stats = efx->stats_buffer.addr;
 +
 +      dma_stats[MC_CMD_MAC_GENERATION_END] = STATS_GENERATION_INVALID;
 +
 +      efx_mcdi_mac_stats(efx, efx->stats_buffer.dma_addr,
 +                         MC_CMD_MAC_NSTATS * sizeof(u64), 1, 0);
 +}
 +
 +static void siena_stop_nic_stats(struct efx_nic *efx)
 +{
 +      efx_mcdi_mac_stats(efx, efx->stats_buffer.dma_addr, 0, 0, 0);
 +}
 +
 +/**************************************************************************
 + *
 + * Wake on LAN
 + *
 + **************************************************************************
 + */
 +
 +static void siena_get_wol(struct efx_nic *efx, struct ethtool_wolinfo *wol)
 +{
 +      struct siena_nic_data *nic_data = efx->nic_data;
 +
 +      wol->supported = WAKE_MAGIC;
 +      if (nic_data->wol_filter_id != -1)
 +              wol->wolopts = WAKE_MAGIC;
 +      else
 +              wol->wolopts = 0;
 +      memset(&wol->sopass, 0, sizeof(wol->sopass));
 +}
 +
 +
 +static int siena_set_wol(struct efx_nic *efx, u32 type)
 +{
 +      struct siena_nic_data *nic_data = efx->nic_data;
 +      int rc;
 +
 +      if (type & ~WAKE_MAGIC)
 +              return -EINVAL;
 +
 +      if (type & WAKE_MAGIC) {
 +              if (nic_data->wol_filter_id != -1)
 +                      efx_mcdi_wol_filter_remove(efx,
 +                                                 nic_data->wol_filter_id);
 +              rc = efx_mcdi_wol_filter_set_magic(efx, efx->net_dev->dev_addr,
 +                                                 &nic_data->wol_filter_id);
 +              if (rc)
 +                      goto fail;
 +
 +              pci_wake_from_d3(efx->pci_dev, true);
 +      } else {
 +              rc = efx_mcdi_wol_filter_reset(efx);
 +              nic_data->wol_filter_id = -1;
 +              pci_wake_from_d3(efx->pci_dev, false);
 +              if (rc)
 +                      goto fail;
 +      }
 +
 +      return 0;
 + fail:
 +      netif_err(efx, hw, efx->net_dev, "%s failed: type=%d rc=%d\n",
 +                __func__, type, rc);
 +      return rc;
 +}
 +
 +
 +static void siena_init_wol(struct efx_nic *efx)
 +{
 +      struct siena_nic_data *nic_data = efx->nic_data;
 +      int rc;
 +
 +      rc = efx_mcdi_wol_filter_get_magic(efx, &nic_data->wol_filter_id);
 +
 +      if (rc != 0) {
 +              /* If it failed, attempt to get into a synchronised
 +               * state with MC by resetting any set WoL filters */
 +              efx_mcdi_wol_filter_reset(efx);
 +              nic_data->wol_filter_id = -1;
 +      } else if (nic_data->wol_filter_id != -1) {
 +              pci_wake_from_d3(efx->pci_dev, true);
 +      }
 +}
 +
 +
 +/**************************************************************************
 + *
 + * Revision-dependent attributes used by efx.c and nic.c
 + *
 + **************************************************************************
 + */
 +
 +const struct efx_nic_type siena_a0_nic_type = {
 +      .probe = siena_probe_nic,
 +      .remove = siena_remove_nic,
 +      .init = siena_init_nic,
 +      .fini = efx_port_dummy_op_void,
 +      .monitor = NULL,
 +      .map_reset_reason = siena_map_reset_reason,
 +      .map_reset_flags = siena_map_reset_flags,
 +      .reset = siena_reset_hw,
 +      .probe_port = siena_probe_port,
 +      .remove_port = siena_remove_port,
 +      .prepare_flush = efx_port_dummy_op_void,
 +      .update_stats = siena_update_nic_stats,
 +      .start_stats = siena_start_nic_stats,
 +      .stop_stats = siena_stop_nic_stats,
 +      .set_id_led = efx_mcdi_set_id_led,
 +      .push_irq_moderation = siena_push_irq_moderation,
 +      .push_multicast_hash = siena_push_multicast_hash,
 +      .reconfigure_port = efx_mcdi_phy_reconfigure,
 +      .get_wol = siena_get_wol,
 +      .set_wol = siena_set_wol,
 +      .resume_wol = siena_init_wol,
 +      .test_registers = siena_test_registers,
 +      .test_nvram = efx_mcdi_nvram_test_all,
 +      .default_mac_ops = &efx_mcdi_mac_operations,
 +
 +      .revision = EFX_REV_SIENA_A0,
++      .mem_map_size = (FR_CZ_MC_TREG_SMEM +
++                       FR_CZ_MC_TREG_SMEM_STEP * FR_CZ_MC_TREG_SMEM_ROWS),
 +      .txd_ptr_tbl_base = FR_BZ_TX_DESC_PTR_TBL,
 +      .rxd_ptr_tbl_base = FR_BZ_RX_DESC_PTR_TBL,
 +      .buf_tbl_base = FR_BZ_BUF_FULL_TBL,
 +      .evq_ptr_tbl_base = FR_BZ_EVQ_PTR_TBL,
 +      .evq_rptr_tbl_base = FR_BZ_EVQ_RPTR,
 +      .max_dma_mask = DMA_BIT_MASK(FSF_AZ_TX_KER_BUF_ADDR_WIDTH),
 +      .rx_buffer_hash_size = 0x10,
 +      .rx_buffer_padding = 0,
 +      .max_interrupt_mode = EFX_INT_MODE_MSIX,
 +      .phys_addr_channels = 32, /* Hardware limit is 64, but the legacy
 +                                 * interrupt handler only supports 32
 +                                 * channels */
 +      .tx_dc_base = 0x88000,
 +      .rx_dc_base = 0x68000,
 +      .offload_features = (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
 +                           NETIF_F_RXHASH | NETIF_F_NTUPLE),
 +};
index f07a721,0000000..1206821
mode 100644,000000..100644
--- /dev/null
@@@ -1,5304 -1,0 +1,5303 @@@
-       int ring;
 +/* cassini.c: Sun Microsystems Cassini(+) ethernet driver.
 + *
 + * Copyright (C) 2004 Sun Microsystems Inc.
 + * Copyright (C) 2003 Adrian Sun (asun@darksunrising.com)
 + *
 + * This program is free software; you can redistribute it and/or
 + * modify it under the terms of the GNU General Public License as
 + * published by the Free Software Foundation; either version 2 of the
 + * License, or (at your option) any later version.
 + *
 + * This program is distributed in the hope that it will be useful,
 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 + * GNU General Public License for more details.
 + *
 + * You should have received a copy of the GNU General Public License
 + * along with this program; if not, write to the Free Software
 + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
 + * 02111-1307, USA.
 + *
 + * This driver uses the sungem driver (c) David Miller
 + * (davem@redhat.com) as its basis.
 + *
 + * The cassini chip has a number of features that distinguish it from
 + * the gem chip:
 + *  4 transmit descriptor rings that are used for either QoS (VLAN) or
 + *      load balancing (non-VLAN mode)
 + *  batching of multiple packets
 + *  multiple CPU dispatching
 + *  page-based RX descriptor engine with separate completion rings
 + *  Gigabit support (GMII and PCS interface)
 + *  MIF link up/down detection works
 + *
 + * RX is handled by page sized buffers that are attached as fragments to
 + * the skb. here's what's done:
 + *  -- driver allocates pages at a time and keeps reference counts
 + *     on them.
 + *  -- the upper protocol layers assume that the header is in the skb
 + *     itself. as a result, cassini will copy a small amount (64 bytes)
 + *     to make them happy.
 + *  -- driver appends the rest of the data pages as frags to skbuffs
 + *     and increments the reference count
 + *  -- on page reclamation, the driver swaps the page with a spare page.
 + *     if that page is still in use, it frees its reference to that page,
 + *     and allocates a new page for use. otherwise, it just recycles the
 + *     the page.
 + *
 + * NOTE: cassini can parse the header. however, it's not worth it
 + *       as long as the network stack requires a header copy.
 + *
 + * TX has 4 queues. currently these queues are used in a round-robin
 + * fashion for load balancing. They can also be used for QoS. for that
 + * to work, however, QoS information needs to be exposed down to the driver
 + * level so that subqueues get targeted to particular transmit rings.
 + * alternatively, the queues can be configured via use of the all-purpose
 + * ioctl.
 + *
 + * RX DATA: the rx completion ring has all the info, but the rx desc
 + * ring has all of the data. RX can conceivably come in under multiple
 + * interrupts, but the INT# assignment needs to be set up properly by
 + * the BIOS and conveyed to the driver. PCI BIOSes don't know how to do
 + * that. also, the two descriptor rings are designed to distinguish between
 + * encrypted and non-encrypted packets, but we use them for buffering
 + * instead.
 + *
 + * by default, the selective clear mask is set up to process rx packets.
 + */
 +
 +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 +
 +#include <linux/module.h>
 +#include <linux/kernel.h>
 +#include <linux/types.h>
 +#include <linux/compiler.h>
 +#include <linux/slab.h>
 +#include <linux/delay.h>
 +#include <linux/init.h>
 +#include <linux/interrupt.h>
 +#include <linux/vmalloc.h>
 +#include <linux/ioport.h>
 +#include <linux/pci.h>
 +#include <linux/mm.h>
 +#include <linux/highmem.h>
 +#include <linux/list.h>
 +#include <linux/dma-mapping.h>
 +
 +#include <linux/netdevice.h>
 +#include <linux/etherdevice.h>
 +#include <linux/skbuff.h>
 +#include <linux/ethtool.h>
 +#include <linux/crc32.h>
 +#include <linux/random.h>
 +#include <linux/mii.h>
 +#include <linux/ip.h>
 +#include <linux/tcp.h>
 +#include <linux/mutex.h>
 +#include <linux/firmware.h>
 +
 +#include <net/checksum.h>
 +
 +#include <linux/atomic.h>
 +#include <asm/system.h>
 +#include <asm/io.h>
 +#include <asm/byteorder.h>
 +#include <asm/uaccess.h>
 +
 +#define cas_page_map(x)      kmap_atomic((x), KM_SKB_DATA_SOFTIRQ)
 +#define cas_page_unmap(x)    kunmap_atomic((x), KM_SKB_DATA_SOFTIRQ)
 +#define CAS_NCPUS            num_online_cpus()
 +
 +#define cas_skb_release(x)  netif_rx(x)
 +
 +/* select which firmware to use */
 +#define USE_HP_WORKAROUND
 +#define HP_WORKAROUND_DEFAULT /* select which firmware to use as default */
 +#define CAS_HP_ALT_FIRMWARE   cas_prog_null /* alternate firmware */
 +
 +#include "cassini.h"
 +
 +#define USE_TX_COMPWB      /* use completion writeback registers */
 +#define USE_CSMA_CD_PROTO  /* standard CSMA/CD */
 +#define USE_RX_BLANK       /* hw interrupt mitigation */
 +#undef USE_ENTROPY_DEV     /* don't test for entropy device */
 +
 +/* NOTE: these aren't useable unless PCI interrupts can be assigned.
 + * also, we need to make cp->lock finer-grained.
 + */
 +#undef  USE_PCI_INTB
 +#undef  USE_PCI_INTC
 +#undef  USE_PCI_INTD
 +#undef  USE_QOS
 +
 +#undef  USE_VPD_DEBUG       /* debug vpd information if defined */
 +
 +/* rx processing options */
 +#define USE_PAGE_ORDER      /* specify to allocate large rx pages */
 +#define RX_DONT_BATCH  0    /* if 1, don't batch flows */
 +#define RX_COPY_ALWAYS 0    /* if 0, use frags */
 +#define RX_COPY_MIN    64   /* copy a little to make upper layers happy */
 +#undef  RX_COUNT_BUFFERS    /* define to calculate RX buffer stats */
 +
 +#define DRV_MODULE_NAME               "cassini"
 +#define DRV_MODULE_VERSION    "1.6"
 +#define DRV_MODULE_RELDATE    "21 May 2008"
 +
 +#define CAS_DEF_MSG_ENABLE      \
 +      (NETIF_MSG_DRV          | \
 +       NETIF_MSG_PROBE        | \
 +       NETIF_MSG_LINK         | \
 +       NETIF_MSG_TIMER        | \
 +       NETIF_MSG_IFDOWN       | \
 +       NETIF_MSG_IFUP         | \
 +       NETIF_MSG_RX_ERR       | \
 +       NETIF_MSG_TX_ERR)
 +
 +/* length of time before we decide the hardware is borked,
 + * and dev->tx_timeout() should be called to fix the problem
 + */
 +#define CAS_TX_TIMEOUT                        (HZ)
 +#define CAS_LINK_TIMEOUT                (22*HZ/10)
 +#define CAS_LINK_FAST_TIMEOUT           (1)
 +
 +/* timeout values for state changing. these specify the number
 + * of 10us delays to be used before giving up.
 + */
 +#define STOP_TRIES_PHY 1000
 +#define STOP_TRIES     5000
 +
 +/* specify a minimum frame size to deal with some fifo issues
 + * max mtu == 2 * page size - ethernet header - 64 - swivel =
 + *            2 * page_size - 0x50
 + */
 +#define CAS_MIN_FRAME                 97
 +#define CAS_1000MB_MIN_FRAME            255
 +#define CAS_MIN_MTU                     60
 +#define CAS_MAX_MTU                     min(((cp->page_size << 1) - 0x50), 9000)
 +
 +#if 1
 +/*
 + * Eliminate these and use separate atomic counters for each, to
 + * avoid a race condition.
 + */
 +#else
 +#define CAS_RESET_MTU                   1
 +#define CAS_RESET_ALL                   2
 +#define CAS_RESET_SPARE                 3
 +#endif
 +
 +static char version[] __devinitdata =
 +      DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
 +
 +static int cassini_debug = -1;        /* -1 == use CAS_DEF_MSG_ENABLE as value */
 +static int link_mode;
 +
 +MODULE_AUTHOR("Adrian Sun (asun@darksunrising.com)");
 +MODULE_DESCRIPTION("Sun Cassini(+) ethernet driver");
 +MODULE_LICENSE("GPL");
 +MODULE_FIRMWARE("sun/cassini.bin");
 +module_param(cassini_debug, int, 0);
 +MODULE_PARM_DESC(cassini_debug, "Cassini bitmapped debugging message enable value");
 +module_param(link_mode, int, 0);
 +MODULE_PARM_DESC(link_mode, "default link mode");
 +
 +/*
 + * Work around for a PCS bug in which the link goes down due to the chip
 + * being confused and never showing a link status of "up."
 + */
 +#define DEFAULT_LINKDOWN_TIMEOUT 5
 +/*
 + * Value in seconds, for user input.
 + */
 +static int linkdown_timeout = DEFAULT_LINKDOWN_TIMEOUT;
 +module_param(linkdown_timeout, int, 0);
 +MODULE_PARM_DESC(linkdown_timeout,
 +"min reset interval in sec. for PCS linkdown issue; disabled if not positive");
 +
 +/*
 + * value in 'ticks' (units used by jiffies). Set when we init the
 + * module because 'HZ' in actually a function call on some flavors of
 + * Linux.  This will default to DEFAULT_LINKDOWN_TIMEOUT * HZ.
 + */
 +static int link_transition_timeout;
 +
 +
 +
 +static u16 link_modes[] __devinitdata = {
 +      BMCR_ANENABLE,                   /* 0 : autoneg */
 +      0,                               /* 1 : 10bt half duplex */
 +      BMCR_SPEED100,                   /* 2 : 100bt half duplex */
 +      BMCR_FULLDPLX,                   /* 3 : 10bt full duplex */
 +      BMCR_SPEED100|BMCR_FULLDPLX,     /* 4 : 100bt full duplex */
 +      CAS_BMCR_SPEED1000|BMCR_FULLDPLX /* 5 : 1000bt full duplex */
 +};
 +
 +static DEFINE_PCI_DEVICE_TABLE(cas_pci_tbl) = {
 +      { PCI_VENDOR_ID_SUN, PCI_DEVICE_ID_SUN_CASSINI,
 +        PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
 +      { PCI_VENDOR_ID_NS, PCI_DEVICE_ID_NS_SATURN,
 +        PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
 +      { 0, }
 +};
 +
 +MODULE_DEVICE_TABLE(pci, cas_pci_tbl);
 +
 +static void cas_set_link_modes(struct cas *cp);
 +
 +static inline void cas_lock_tx(struct cas *cp)
 +{
 +      int i;
 +
 +      for (i = 0; i < N_TX_RINGS; i++)
 +              spin_lock(&cp->tx_lock[i]);
 +}
 +
 +static inline void cas_lock_all(struct cas *cp)
 +{
 +      spin_lock_irq(&cp->lock);
 +      cas_lock_tx(cp);
 +}
 +
 +/* WTZ: QA was finding deadlock problems with the previous
 + * versions after long test runs with multiple cards per machine.
 + * See if replacing cas_lock_all with safer versions helps. The
 + * symptoms QA is reporting match those we'd expect if interrupts
 + * aren't being properly restored, and we fixed a previous deadlock
 + * with similar symptoms by using save/restore versions in other
 + * places.
 + */
 +#define cas_lock_all_save(cp, flags) \
 +do { \
 +      struct cas *xxxcp = (cp); \
 +      spin_lock_irqsave(&xxxcp->lock, flags); \
 +      cas_lock_tx(xxxcp); \
 +} while (0)
 +
 +static inline void cas_unlock_tx(struct cas *cp)
 +{
 +      int i;
 +
 +      for (i = N_TX_RINGS; i > 0; i--)
 +              spin_unlock(&cp->tx_lock[i - 1]);
 +}
 +
 +static inline void cas_unlock_all(struct cas *cp)
 +{
 +      cas_unlock_tx(cp);
 +      spin_unlock_irq(&cp->lock);
 +}
 +
 +#define cas_unlock_all_restore(cp, flags) \
 +do { \
 +      struct cas *xxxcp = (cp); \
 +      cas_unlock_tx(xxxcp); \
 +      spin_unlock_irqrestore(&xxxcp->lock, flags); \
 +} while (0)
 +
 +static void cas_disable_irq(struct cas *cp, const int ring)
 +{
 +      /* Make sure we won't get any more interrupts */
 +      if (ring == 0) {
 +              writel(0xFFFFFFFF, cp->regs + REG_INTR_MASK);
 +              return;
 +      }
 +
 +      /* disable completion interrupts and selectively mask */
 +      if (cp->cas_flags & CAS_FLAG_REG_PLUS) {
 +              switch (ring) {
 +#if defined (USE_PCI_INTB) || defined(USE_PCI_INTC) || defined(USE_PCI_INTD)
 +#ifdef USE_PCI_INTB
 +              case 1:
 +#endif
 +#ifdef USE_PCI_INTC
 +              case 2:
 +#endif
 +#ifdef USE_PCI_INTD
 +              case 3:
 +#endif
 +                      writel(INTRN_MASK_CLEAR_ALL | INTRN_MASK_RX_EN,
 +                             cp->regs + REG_PLUS_INTRN_MASK(ring));
 +                      break;
 +#endif
 +              default:
 +                      writel(INTRN_MASK_CLEAR_ALL, cp->regs +
 +                             REG_PLUS_INTRN_MASK(ring));
 +                      break;
 +              }
 +      }
 +}
 +
 +static inline void cas_mask_intr(struct cas *cp)
 +{
 +      int i;
 +
 +      for (i = 0; i < N_RX_COMP_RINGS; i++)
 +              cas_disable_irq(cp, i);
 +}
 +
 +static void cas_enable_irq(struct cas *cp, const int ring)
 +{
 +      if (ring == 0) { /* all but TX_DONE */
 +              writel(INTR_TX_DONE, cp->regs + REG_INTR_MASK);
 +              return;
 +      }
 +
 +      if (cp->cas_flags & CAS_FLAG_REG_PLUS) {
 +              switch (ring) {
 +#if defined (USE_PCI_INTB) || defined(USE_PCI_INTC) || defined(USE_PCI_INTD)
 +#ifdef USE_PCI_INTB
 +              case 1:
 +#endif
 +#ifdef USE_PCI_INTC
 +              case 2:
 +#endif
 +#ifdef USE_PCI_INTD
 +              case 3:
 +#endif
 +                      writel(INTRN_MASK_RX_EN, cp->regs +
 +                             REG_PLUS_INTRN_MASK(ring));
 +                      break;
 +#endif
 +              default:
 +                      break;
 +              }
 +      }
 +}
 +
 +static inline void cas_unmask_intr(struct cas *cp)
 +{
 +      int i;
 +
 +      for (i = 0; i < N_RX_COMP_RINGS; i++)
 +              cas_enable_irq(cp, i);
 +}
 +
 +static inline void cas_entropy_gather(struct cas *cp)
 +{
 +#ifdef USE_ENTROPY_DEV
 +      if ((cp->cas_flags & CAS_FLAG_ENTROPY_DEV) == 0)
 +              return;
 +
 +      batch_entropy_store(readl(cp->regs + REG_ENTROPY_IV),
 +                          readl(cp->regs + REG_ENTROPY_IV),
 +                          sizeof(uint64_t)*8);
 +#endif
 +}
 +
 +static inline void cas_entropy_reset(struct cas *cp)
 +{
 +#ifdef USE_ENTROPY_DEV
 +      if ((cp->cas_flags & CAS_FLAG_ENTROPY_DEV) == 0)
 +              return;
 +
 +      writel(BIM_LOCAL_DEV_PAD | BIM_LOCAL_DEV_PROM | BIM_LOCAL_DEV_EXT,
 +             cp->regs + REG_BIM_LOCAL_DEV_EN);
 +      writeb(ENTROPY_RESET_STC_MODE, cp->regs + REG_ENTROPY_RESET);
 +      writeb(0x55, cp->regs + REG_ENTROPY_RAND_REG);
 +
 +      /* if we read back 0x0, we don't have an entropy device */
 +      if (readb(cp->regs + REG_ENTROPY_RAND_REG) == 0)
 +              cp->cas_flags &= ~CAS_FLAG_ENTROPY_DEV;
 +#endif
 +}
 +
 +/* access to the phy. the following assumes that we've initialized the MIF to
 + * be in frame rather than bit-bang mode
 + */
 +static u16 cas_phy_read(struct cas *cp, int reg)
 +{
 +      u32 cmd;
 +      int limit = STOP_TRIES_PHY;
 +
 +      cmd = MIF_FRAME_ST | MIF_FRAME_OP_READ;
 +      cmd |= CAS_BASE(MIF_FRAME_PHY_ADDR, cp->phy_addr);
 +      cmd |= CAS_BASE(MIF_FRAME_REG_ADDR, reg);
 +      cmd |= MIF_FRAME_TURN_AROUND_MSB;
 +      writel(cmd, cp->regs + REG_MIF_FRAME);
 +
 +      /* poll for completion */
 +      while (limit-- > 0) {
 +              udelay(10);
 +              cmd = readl(cp->regs + REG_MIF_FRAME);
 +              if (cmd & MIF_FRAME_TURN_AROUND_LSB)
 +                      return cmd & MIF_FRAME_DATA_MASK;
 +      }
 +      return 0xFFFF; /* -1 */
 +}
 +
 +static int cas_phy_write(struct cas *cp, int reg, u16 val)
 +{
 +      int limit = STOP_TRIES_PHY;
 +      u32 cmd;
 +
 +      cmd = MIF_FRAME_ST | MIF_FRAME_OP_WRITE;
 +      cmd |= CAS_BASE(MIF_FRAME_PHY_ADDR, cp->phy_addr);
 +      cmd |= CAS_BASE(MIF_FRAME_REG_ADDR, reg);
 +      cmd |= MIF_FRAME_TURN_AROUND_MSB;
 +      cmd |= val & MIF_FRAME_DATA_MASK;
 +      writel(cmd, cp->regs + REG_MIF_FRAME);
 +
 +      /* poll for completion */
 +      while (limit-- > 0) {
 +              udelay(10);
 +              cmd = readl(cp->regs + REG_MIF_FRAME);
 +              if (cmd & MIF_FRAME_TURN_AROUND_LSB)
 +                      return 0;
 +      }
 +      return -1;
 +}
 +
 +static void cas_phy_powerup(struct cas *cp)
 +{
 +      u16 ctl = cas_phy_read(cp, MII_BMCR);
 +
 +      if ((ctl & BMCR_PDOWN) == 0)
 +              return;
 +      ctl &= ~BMCR_PDOWN;
 +      cas_phy_write(cp, MII_BMCR, ctl);
 +}
 +
 +static void cas_phy_powerdown(struct cas *cp)
 +{
 +      u16 ctl = cas_phy_read(cp, MII_BMCR);
 +
 +      if (ctl & BMCR_PDOWN)
 +              return;
 +      ctl |= BMCR_PDOWN;
 +      cas_phy_write(cp, MII_BMCR, ctl);
 +}
 +
 +/* cp->lock held. note: the last put_page will free the buffer */
 +static int cas_page_free(struct cas *cp, cas_page_t *page)
 +{
 +      pci_unmap_page(cp->pdev, page->dma_addr, cp->page_size,
 +                     PCI_DMA_FROMDEVICE);
 +      __free_pages(page->buffer, cp->page_order);
 +      kfree(page);
 +      return 0;
 +}
 +
 +#ifdef RX_COUNT_BUFFERS
 +#define RX_USED_ADD(x, y)       ((x)->used += (y))
 +#define RX_USED_SET(x, y)       ((x)->used  = (y))
 +#else
 +#define RX_USED_ADD(x, y)
 +#define RX_USED_SET(x, y)
 +#endif
 +
 +/* local page allocation routines for the receive buffers. jumbo pages
 + * require at least 8K contiguous and 8K aligned buffers.
 + */
 +static cas_page_t *cas_page_alloc(struct cas *cp, const gfp_t flags)
 +{
 +      cas_page_t *page;
 +
 +      page = kmalloc(sizeof(cas_page_t), flags);
 +      if (!page)
 +              return NULL;
 +
 +      INIT_LIST_HEAD(&page->list);
 +      RX_USED_SET(page, 0);
 +      page->buffer = alloc_pages(flags, cp->page_order);
 +      if (!page->buffer)
 +              goto page_err;
 +      page->dma_addr = pci_map_page(cp->pdev, page->buffer, 0,
 +                                    cp->page_size, PCI_DMA_FROMDEVICE);
 +      return page;
 +
 +page_err:
 +      kfree(page);
 +      return NULL;
 +}
 +
 +/* initialize spare pool of rx buffers, but allocate during the open */
 +static void cas_spare_init(struct cas *cp)
 +{
 +      spin_lock(&cp->rx_inuse_lock);
 +      INIT_LIST_HEAD(&cp->rx_inuse_list);
 +      spin_unlock(&cp->rx_inuse_lock);
 +
 +      spin_lock(&cp->rx_spare_lock);
 +      INIT_LIST_HEAD(&cp->rx_spare_list);
 +      cp->rx_spares_needed = RX_SPARE_COUNT;
 +      spin_unlock(&cp->rx_spare_lock);
 +}
 +
 +/* used on close. free all the spare buffers. */
 +static void cas_spare_free(struct cas *cp)
 +{
 +      struct list_head list, *elem, *tmp;
 +
 +      /* free spare buffers */
 +      INIT_LIST_HEAD(&list);
 +      spin_lock(&cp->rx_spare_lock);
 +      list_splice_init(&cp->rx_spare_list, &list);
 +      spin_unlock(&cp->rx_spare_lock);
 +      list_for_each_safe(elem, tmp, &list) {
 +              cas_page_free(cp, list_entry(elem, cas_page_t, list));
 +      }
 +
 +      INIT_LIST_HEAD(&list);
 +#if 1
 +      /*
 +       * Looks like Adrian had protected this with a different
 +       * lock than used everywhere else to manipulate this list.
 +       */
 +      spin_lock(&cp->rx_inuse_lock);
 +      list_splice_init(&cp->rx_inuse_list, &list);
 +      spin_unlock(&cp->rx_inuse_lock);
 +#else
 +      spin_lock(&cp->rx_spare_lock);
 +      list_splice_init(&cp->rx_inuse_list, &list);
 +      spin_unlock(&cp->rx_spare_lock);
 +#endif
 +      list_for_each_safe(elem, tmp, &list) {
 +              cas_page_free(cp, list_entry(elem, cas_page_t, list));
 +      }
 +}
 +
 +/* replenish spares if needed */
 +static void cas_spare_recover(struct cas *cp, const gfp_t flags)
 +{
 +      struct list_head list, *elem, *tmp;
 +      int needed, i;
 +
 +      /* check inuse list. if we don't need any more free buffers,
 +       * just free it
 +       */
 +
 +      /* make a local copy of the list */
 +      INIT_LIST_HEAD(&list);
 +      spin_lock(&cp->rx_inuse_lock);
 +      list_splice_init(&cp->rx_inuse_list, &list);
 +      spin_unlock(&cp->rx_inuse_lock);
 +
 +      list_for_each_safe(elem, tmp, &list) {
 +              cas_page_t *page = list_entry(elem, cas_page_t, list);
 +
 +              /*
 +               * With the lockless pagecache, cassini buffering scheme gets
 +               * slightly less accurate: we might find that a page has an
 +               * elevated reference count here, due to a speculative ref,
 +               * and skip it as in-use. Ideally we would be able to reclaim
 +               * it. However this would be such a rare case, it doesn't
 +               * matter too much as we should pick it up the next time round.
 +               *
 +               * Importantly, if we find that the page has a refcount of 1
 +               * here (our refcount), then we know it is definitely not inuse
 +               * so we can reuse it.
 +               */
 +              if (page_count(page->buffer) > 1)
 +                      continue;
 +
 +              list_del(elem);
 +              spin_lock(&cp->rx_spare_lock);
 +              if (cp->rx_spares_needed > 0) {
 +                      list_add(elem, &cp->rx_spare_list);
 +                      cp->rx_spares_needed--;
 +                      spin_unlock(&cp->rx_spare_lock);
 +              } else {
 +                      spin_unlock(&cp->rx_spare_lock);
 +                      cas_page_free(cp, page);
 +              }
 +      }
 +
 +      /* put any inuse buffers back on the list */
 +      if (!list_empty(&list)) {
 +              spin_lock(&cp->rx_inuse_lock);
 +              list_splice(&list, &cp->rx_inuse_list);
 +              spin_unlock(&cp->rx_inuse_lock);
 +      }
 +
 +      spin_lock(&cp->rx_spare_lock);
 +      needed = cp->rx_spares_needed;
 +      spin_unlock(&cp->rx_spare_lock);
 +      if (!needed)
 +              return;
 +
 +      /* we still need spares, so try to allocate some */
 +      INIT_LIST_HEAD(&list);
 +      i = 0;
 +      while (i < needed) {
 +              cas_page_t *spare = cas_page_alloc(cp, flags);
 +              if (!spare)
 +                      break;
 +              list_add(&spare->list, &list);
 +              i++;
 +      }
 +
 +      spin_lock(&cp->rx_spare_lock);
 +      list_splice(&list, &cp->rx_spare_list);
 +      cp->rx_spares_needed -= i;
 +      spin_unlock(&cp->rx_spare_lock);
 +}
 +
 +/* pull a page from the list. */
 +static cas_page_t *cas_page_dequeue(struct cas *cp)
 +{
 +      struct list_head *entry;
 +      int recover;
 +
 +      spin_lock(&cp->rx_spare_lock);
 +      if (list_empty(&cp->rx_spare_list)) {
 +              /* try to do a quick recovery */
 +              spin_unlock(&cp->rx_spare_lock);
 +              cas_spare_recover(cp, GFP_ATOMIC);
 +              spin_lock(&cp->rx_spare_lock);
 +              if (list_empty(&cp->rx_spare_list)) {
 +                      netif_err(cp, rx_err, cp->dev,
 +                                "no spare buffers available\n");
 +                      spin_unlock(&cp->rx_spare_lock);
 +                      return NULL;
 +              }
 +      }
 +
 +      entry = cp->rx_spare_list.next;
 +      list_del(entry);
 +      recover = ++cp->rx_spares_needed;
 +      spin_unlock(&cp->rx_spare_lock);
 +
 +      /* trigger the timer to do the recovery */
 +      if ((recover & (RX_SPARE_RECOVER_VAL - 1)) == 0) {
 +#if 1
 +              atomic_inc(&cp->reset_task_pending);
 +              atomic_inc(&cp->reset_task_pending_spare);
 +              schedule_work(&cp->reset_task);
 +#else
 +              atomic_set(&cp->reset_task_pending, CAS_RESET_SPARE);
 +              schedule_work(&cp->reset_task);
 +#endif
 +      }
 +      return list_entry(entry, cas_page_t, list);
 +}
 +
 +
 +static void cas_mif_poll(struct cas *cp, const int enable)
 +{
 +      u32 cfg;
 +
 +      cfg  = readl(cp->regs + REG_MIF_CFG);
 +      cfg &= (MIF_CFG_MDIO_0 | MIF_CFG_MDIO_1);
 +
 +      if (cp->phy_type & CAS_PHY_MII_MDIO1)
 +              cfg |= MIF_CFG_PHY_SELECT;
 +
 +      /* poll and interrupt on link status change. */
 +      if (enable) {
 +              cfg |= MIF_CFG_POLL_EN;
 +              cfg |= CAS_BASE(MIF_CFG_POLL_REG, MII_BMSR);
 +              cfg |= CAS_BASE(MIF_CFG_POLL_PHY, cp->phy_addr);
 +      }
 +      writel((enable) ? ~(BMSR_LSTATUS | BMSR_ANEGCOMPLETE) : 0xFFFF,
 +             cp->regs + REG_MIF_MASK);
 +      writel(cfg, cp->regs + REG_MIF_CFG);
 +}
 +
 +/* Must be invoked under cp->lock */
 +static void cas_begin_auto_negotiation(struct cas *cp, struct ethtool_cmd *ep)
 +{
 +      u16 ctl;
 +#if 1
 +      int lcntl;
 +      int changed = 0;
 +      int oldstate = cp->lstate;
 +      int link_was_not_down = !(oldstate == link_down);
 +#endif
 +      /* Setup link parameters */
 +      if (!ep)
 +              goto start_aneg;
 +      lcntl = cp->link_cntl;
 +      if (ep->autoneg == AUTONEG_ENABLE)
 +              cp->link_cntl = BMCR_ANENABLE;
 +      else {
 +              u32 speed = ethtool_cmd_speed(ep);
 +              cp->link_cntl = 0;
 +              if (speed == SPEED_100)
 +                      cp->link_cntl |= BMCR_SPEED100;
 +              else if (speed == SPEED_1000)
 +                      cp->link_cntl |= CAS_BMCR_SPEED1000;
 +              if (ep->duplex == DUPLEX_FULL)
 +                      cp->link_cntl |= BMCR_FULLDPLX;
 +      }
 +#if 1
 +      changed = (lcntl != cp->link_cntl);
 +#endif
 +start_aneg:
 +      if (cp->lstate == link_up) {
 +              netdev_info(cp->dev, "PCS link down\n");
 +      } else {
 +              if (changed) {
 +                      netdev_info(cp->dev, "link configuration changed\n");
 +              }
 +      }
 +      cp->lstate = link_down;
 +      cp->link_transition = LINK_TRANSITION_LINK_DOWN;
 +      if (!cp->hw_running)
 +              return;
 +#if 1
 +      /*
 +       * WTZ: If the old state was link_up, we turn off the carrier
 +       * to replicate everything we do elsewhere on a link-down
 +       * event when we were already in a link-up state..
 +       */
 +      if (oldstate == link_up)
 +              netif_carrier_off(cp->dev);
 +      if (changed  && link_was_not_down) {
 +              /*
 +               * WTZ: This branch will simply schedule a full reset after
 +               * we explicitly changed link modes in an ioctl. See if this
 +               * fixes the link-problems we were having for forced mode.
 +               */
 +              atomic_inc(&cp->reset_task_pending);
 +              atomic_inc(&cp->reset_task_pending_all);
 +              schedule_work(&cp->reset_task);
 +              cp->timer_ticks = 0;
 +              mod_timer(&cp->link_timer, jiffies + CAS_LINK_TIMEOUT);
 +              return;
 +      }
 +#endif
 +      if (cp->phy_type & CAS_PHY_SERDES) {
 +              u32 val = readl(cp->regs + REG_PCS_MII_CTRL);
 +
 +              if (cp->link_cntl & BMCR_ANENABLE) {
 +                      val |= (PCS_MII_RESTART_AUTONEG | PCS_MII_AUTONEG_EN);
 +                      cp->lstate = link_aneg;
 +              } else {
 +                      if (cp->link_cntl & BMCR_FULLDPLX)
 +                              val |= PCS_MII_CTRL_DUPLEX;
 +                      val &= ~PCS_MII_AUTONEG_EN;
 +                      cp->lstate = link_force_ok;
 +              }
 +              cp->link_transition = LINK_TRANSITION_LINK_CONFIG;
 +              writel(val, cp->regs + REG_PCS_MII_CTRL);
 +
 +      } else {
 +              cas_mif_poll(cp, 0);
 +              ctl = cas_phy_read(cp, MII_BMCR);
 +              ctl &= ~(BMCR_FULLDPLX | BMCR_SPEED100 |
 +                       CAS_BMCR_SPEED1000 | BMCR_ANENABLE);
 +              ctl |= cp->link_cntl;
 +              if (ctl & BMCR_ANENABLE) {
 +                      ctl |= BMCR_ANRESTART;
 +                      cp->lstate = link_aneg;
 +              } else {
 +                      cp->lstate = link_force_ok;
 +              }
 +              cp->link_transition = LINK_TRANSITION_LINK_CONFIG;
 +              cas_phy_write(cp, MII_BMCR, ctl);
 +              cas_mif_poll(cp, 1);
 +      }
 +
 +      cp->timer_ticks = 0;
 +      mod_timer(&cp->link_timer, jiffies + CAS_LINK_TIMEOUT);
 +}
 +
 +/* Must be invoked under cp->lock. */
 +static int cas_reset_mii_phy(struct cas *cp)
 +{
 +      int limit = STOP_TRIES_PHY;
 +      u16 val;
 +
 +      cas_phy_write(cp, MII_BMCR, BMCR_RESET);
 +      udelay(100);
 +      while (--limit) {
 +              val = cas_phy_read(cp, MII_BMCR);
 +              if ((val & BMCR_RESET) == 0)
 +                      break;
 +              udelay(10);
 +      }
 +      return limit <= 0;
 +}
 +
 +static int cas_saturn_firmware_init(struct cas *cp)
 +{
 +      const struct firmware *fw;
 +      const char fw_name[] = "sun/cassini.bin";
 +      int err;
 +
 +      if (PHY_NS_DP83065 != cp->phy_id)
 +              return 0;
 +
 +      err = request_firmware(&fw, fw_name, &cp->pdev->dev);
 +      if (err) {
 +              pr_err("Failed to load firmware \"%s\"\n",
 +                     fw_name);
 +              return err;
 +      }
 +      if (fw->size < 2) {
 +              pr_err("bogus length %zu in \"%s\"\n",
 +                     fw->size, fw_name);
 +              err = -EINVAL;
 +              goto out;
 +      }
 +      cp->fw_load_addr= fw->data[1] << 8 | fw->data[0];
 +      cp->fw_size = fw->size - 2;
 +      cp->fw_data = vmalloc(cp->fw_size);
 +      if (!cp->fw_data) {
 +              err = -ENOMEM;
 +              pr_err("\"%s\" Failed %d\n", fw_name, err);
 +              goto out;
 +      }
 +      memcpy(cp->fw_data, &fw->data[2], cp->fw_size);
 +out:
 +      release_firmware(fw);
 +      return err;
 +}
 +
 +static void cas_saturn_firmware_load(struct cas *cp)
 +{
 +      int i;
 +
 +      cas_phy_powerdown(cp);
 +
 +      /* expanded memory access mode */
 +      cas_phy_write(cp, DP83065_MII_MEM, 0x0);
 +
 +      /* pointer configuration for new firmware */
 +      cas_phy_write(cp, DP83065_MII_REGE, 0x8ff9);
 +      cas_phy_write(cp, DP83065_MII_REGD, 0xbd);
 +      cas_phy_write(cp, DP83065_MII_REGE, 0x8ffa);
 +      cas_phy_write(cp, DP83065_MII_REGD, 0x82);
 +      cas_phy_write(cp, DP83065_MII_REGE, 0x8ffb);
 +      cas_phy_write(cp, DP83065_MII_REGD, 0x0);
 +      cas_phy_write(cp, DP83065_MII_REGE, 0x8ffc);
 +      cas_phy_write(cp, DP83065_MII_REGD, 0x39);
 +
 +      /* download new firmware */
 +      cas_phy_write(cp, DP83065_MII_MEM, 0x1);
 +      cas_phy_write(cp, DP83065_MII_REGE, cp->fw_load_addr);
 +      for (i = 0; i < cp->fw_size; i++)
 +              cas_phy_write(cp, DP83065_MII_REGD, cp->fw_data[i]);
 +
 +      /* enable firmware */
 +      cas_phy_write(cp, DP83065_MII_REGE, 0x8ff8);
 +      cas_phy_write(cp, DP83065_MII_REGD, 0x1);
 +}
 +
 +
 +/* phy initialization */
 +static void cas_phy_init(struct cas *cp)
 +{
 +      u16 val;
 +
 +      /* if we're in MII/GMII mode, set up phy */
 +      if (CAS_PHY_MII(cp->phy_type)) {
 +              writel(PCS_DATAPATH_MODE_MII,
 +                     cp->regs + REG_PCS_DATAPATH_MODE);
 +
 +              cas_mif_poll(cp, 0);
 +              cas_reset_mii_phy(cp); /* take out of isolate mode */
 +
 +              if (PHY_LUCENT_B0 == cp->phy_id) {
 +                      /* workaround link up/down issue with lucent */
 +                      cas_phy_write(cp, LUCENT_MII_REG, 0x8000);
 +                      cas_phy_write(cp, MII_BMCR, 0x00f1);
 +                      cas_phy_write(cp, LUCENT_MII_REG, 0x0);
 +
 +              } else if (PHY_BROADCOM_B0 == (cp->phy_id & 0xFFFFFFFC)) {
 +                      /* workarounds for broadcom phy */
 +                      cas_phy_write(cp, BROADCOM_MII_REG8, 0x0C20);
 +                      cas_phy_write(cp, BROADCOM_MII_REG7, 0x0012);
 +                      cas_phy_write(cp, BROADCOM_MII_REG5, 0x1804);
 +                      cas_phy_write(cp, BROADCOM_MII_REG7, 0x0013);
 +                      cas_phy_write(cp, BROADCOM_MII_REG5, 0x1204);
 +                      cas_phy_write(cp, BROADCOM_MII_REG7, 0x8006);
 +                      cas_phy_write(cp, BROADCOM_MII_REG5, 0x0132);
 +                      cas_phy_write(cp, BROADCOM_MII_REG7, 0x8006);
 +                      cas_phy_write(cp, BROADCOM_MII_REG5, 0x0232);
 +                      cas_phy_write(cp, BROADCOM_MII_REG7, 0x201F);
 +                      cas_phy_write(cp, BROADCOM_MII_REG5, 0x0A20);
 +
 +              } else if (PHY_BROADCOM_5411 == cp->phy_id) {
 +                      val = cas_phy_read(cp, BROADCOM_MII_REG4);
 +                      val = cas_phy_read(cp, BROADCOM_MII_REG4);
 +                      if (val & 0x0080) {
 +                              /* link workaround */
 +                              cas_phy_write(cp, BROADCOM_MII_REG4,
 +                                            val & ~0x0080);
 +                      }
 +
 +              } else if (cp->cas_flags & CAS_FLAG_SATURN) {
 +                      writel((cp->phy_type & CAS_PHY_MII_MDIO0) ?
 +                             SATURN_PCFG_FSI : 0x0,
 +                             cp->regs + REG_SATURN_PCFG);
 +
 +                      /* load firmware to address 10Mbps auto-negotiation
 +                       * issue. NOTE: this will need to be changed if the
 +                       * default firmware gets fixed.
 +                       */
 +                      if (PHY_NS_DP83065 == cp->phy_id) {
 +                              cas_saturn_firmware_load(cp);
 +                      }
 +                      cas_phy_powerup(cp);
 +              }
 +
 +              /* advertise capabilities */
 +              val = cas_phy_read(cp, MII_BMCR);
 +              val &= ~BMCR_ANENABLE;
 +              cas_phy_write(cp, MII_BMCR, val);
 +              udelay(10);
 +
 +              cas_phy_write(cp, MII_ADVERTISE,
 +                            cas_phy_read(cp, MII_ADVERTISE) |
 +                            (ADVERTISE_10HALF | ADVERTISE_10FULL |
 +                             ADVERTISE_100HALF | ADVERTISE_100FULL |
 +                             CAS_ADVERTISE_PAUSE |
 +                             CAS_ADVERTISE_ASYM_PAUSE));
 +
 +              if (cp->cas_flags & CAS_FLAG_1000MB_CAP) {
 +                      /* make sure that we don't advertise half
 +                       * duplex to avoid a chip issue
 +                       */
 +                      val  = cas_phy_read(cp, CAS_MII_1000_CTRL);
 +                      val &= ~CAS_ADVERTISE_1000HALF;
 +                      val |= CAS_ADVERTISE_1000FULL;
 +                      cas_phy_write(cp, CAS_MII_1000_CTRL, val);
 +              }
 +
 +      } else {
 +              /* reset pcs for serdes */
 +              u32 val;
 +              int limit;
 +
 +              writel(PCS_DATAPATH_MODE_SERDES,
 +                     cp->regs + REG_PCS_DATAPATH_MODE);
 +
 +              /* enable serdes pins on saturn */
 +              if (cp->cas_flags & CAS_FLAG_SATURN)
 +                      writel(0, cp->regs + REG_SATURN_PCFG);
 +
 +              /* Reset PCS unit. */
 +              val = readl(cp->regs + REG_PCS_MII_CTRL);
 +              val |= PCS_MII_RESET;
 +              writel(val, cp->regs + REG_PCS_MII_CTRL);
 +
 +              limit = STOP_TRIES;
 +              while (--limit > 0) {
 +                      udelay(10);
 +                      if ((readl(cp->regs + REG_PCS_MII_CTRL) &
 +                           PCS_MII_RESET) == 0)
 +                              break;
 +              }
 +              if (limit <= 0)
 +                      netdev_warn(cp->dev, "PCS reset bit would not clear [%08x]\n",
 +                                  readl(cp->regs + REG_PCS_STATE_MACHINE));
 +
 +              /* Make sure PCS is disabled while changing advertisement
 +               * configuration.
 +               */
 +              writel(0x0, cp->regs + REG_PCS_CFG);
 +
 +              /* Advertise all capabilities except half-duplex. */
 +              val  = readl(cp->regs + REG_PCS_MII_ADVERT);
 +              val &= ~PCS_MII_ADVERT_HD;
 +              val |= (PCS_MII_ADVERT_FD | PCS_MII_ADVERT_SYM_PAUSE |
 +                      PCS_MII_ADVERT_ASYM_PAUSE);
 +              writel(val, cp->regs + REG_PCS_MII_ADVERT);
 +
 +              /* enable PCS */
 +              writel(PCS_CFG_EN, cp->regs + REG_PCS_CFG);
 +
 +              /* pcs workaround: enable sync detect */
 +              writel(PCS_SERDES_CTRL_SYNCD_EN,
 +                     cp->regs + REG_PCS_SERDES_CTRL);
 +      }
 +}
 +
 +
 +static int cas_pcs_link_check(struct cas *cp)
 +{
 +      u32 stat, state_machine;
 +      int retval = 0;
 +
 +      /* The link status bit latches on zero, so you must
 +       * read it twice in such a case to see a transition
 +       * to the link being up.
 +       */
 +      stat = readl(cp->regs + REG_PCS_MII_STATUS);
 +      if ((stat & PCS_MII_STATUS_LINK_STATUS) == 0)
 +              stat = readl(cp->regs + REG_PCS_MII_STATUS);
 +
 +      /* The remote-fault indication is only valid
 +       * when autoneg has completed.
 +       */
 +      if ((stat & (PCS_MII_STATUS_AUTONEG_COMP |
 +                   PCS_MII_STATUS_REMOTE_FAULT)) ==
 +          (PCS_MII_STATUS_AUTONEG_COMP | PCS_MII_STATUS_REMOTE_FAULT))
 +              netif_info(cp, link, cp->dev, "PCS RemoteFault\n");
 +
 +      /* work around link detection issue by querying the PCS state
 +       * machine directly.
 +       */
 +      state_machine = readl(cp->regs + REG_PCS_STATE_MACHINE);
 +      if ((state_machine & PCS_SM_LINK_STATE_MASK) != SM_LINK_STATE_UP) {
 +              stat &= ~PCS_MII_STATUS_LINK_STATUS;
 +      } else if (state_machine & PCS_SM_WORD_SYNC_STATE_MASK) {
 +              stat |= PCS_MII_STATUS_LINK_STATUS;
 +      }
 +
 +      if (stat & PCS_MII_STATUS_LINK_STATUS) {
 +              if (cp->lstate != link_up) {
 +                      if (cp->opened) {
 +                              cp->lstate = link_up;
 +                              cp->link_transition = LINK_TRANSITION_LINK_UP;
 +
 +                              cas_set_link_modes(cp);
 +                              netif_carrier_on(cp->dev);
 +                      }
 +              }
 +      } else if (cp->lstate == link_up) {
 +              cp->lstate = link_down;
 +              if (link_transition_timeout != 0 &&
 +                  cp->link_transition != LINK_TRANSITION_REQUESTED_RESET &&
 +                  !cp->link_transition_jiffies_valid) {
 +                      /*
 +                       * force a reset, as a workaround for the
 +                       * link-failure problem. May want to move this to a
 +                       * point a bit earlier in the sequence. If we had
 +                       * generated a reset a short time ago, we'll wait for
 +                       * the link timer to check the status until a
 +                       * timer expires (link_transistion_jiffies_valid is
 +                       * true when the timer is running.)  Instead of using
 +                       * a system timer, we just do a check whenever the
 +                       * link timer is running - this clears the flag after
 +                       * a suitable delay.
 +                       */
 +                      retval = 1;
 +                      cp->link_transition = LINK_TRANSITION_REQUESTED_RESET;
 +                      cp->link_transition_jiffies = jiffies;
 +                      cp->link_transition_jiffies_valid = 1;
 +              } else {
 +                      cp->link_transition = LINK_TRANSITION_ON_FAILURE;
 +              }
 +              netif_carrier_off(cp->dev);
 +              if (cp->opened)
 +                      netif_info(cp, link, cp->dev, "PCS link down\n");
 +
 +              /* Cassini only: if you force a mode, there can be
 +               * sync problems on link down. to fix that, the following
 +               * things need to be checked:
 +               * 1) read serialink state register
 +               * 2) read pcs status register to verify link down.
 +               * 3) if link down and serial link == 0x03, then you need
 +               *    to global reset the chip.
 +               */
 +              if ((cp->cas_flags & CAS_FLAG_REG_PLUS) == 0) {
 +                      /* should check to see if we're in a forced mode */
 +                      stat = readl(cp->regs + REG_PCS_SERDES_STATE);
 +                      if (stat == 0x03)
 +                              return 1;
 +              }
 +      } else if (cp->lstate == link_down) {
 +              if (link_transition_timeout != 0 &&
 +                  cp->link_transition != LINK_TRANSITION_REQUESTED_RESET &&
 +                  !cp->link_transition_jiffies_valid) {
 +                      /* force a reset, as a workaround for the
 +                       * link-failure problem.  May want to move
 +                       * this to a point a bit earlier in the
 +                       * sequence.
 +                       */
 +                      retval = 1;
 +                      cp->link_transition = LINK_TRANSITION_REQUESTED_RESET;
 +                      cp->link_transition_jiffies = jiffies;
 +                      cp->link_transition_jiffies_valid = 1;
 +              } else {
 +                      cp->link_transition = LINK_TRANSITION_STILL_FAILED;
 +              }
 +      }
 +
 +      return retval;
 +}
 +
 +static int cas_pcs_interrupt(struct net_device *dev,
 +                           struct cas *cp, u32 status)
 +{
 +      u32 stat = readl(cp->regs + REG_PCS_INTR_STATUS);
 +
 +      if ((stat & PCS_INTR_STATUS_LINK_CHANGE) == 0)
 +              return 0;
 +      return cas_pcs_link_check(cp);
 +}
 +
 +static int cas_txmac_interrupt(struct net_device *dev,
 +                             struct cas *cp, u32 status)
 +{
 +      u32 txmac_stat = readl(cp->regs + REG_MAC_TX_STATUS);
 +
 +      if (!txmac_stat)
 +              return 0;
 +
 +      netif_printk(cp, intr, KERN_DEBUG, cp->dev,
 +                   "txmac interrupt, txmac_stat: 0x%x\n", txmac_stat);
 +
 +      /* Defer timer expiration is quite normal,
 +       * don't even log the event.
 +       */
 +      if ((txmac_stat & MAC_TX_DEFER_TIMER) &&
 +          !(txmac_stat & ~MAC_TX_DEFER_TIMER))
 +              return 0;
 +
 +      spin_lock(&cp->stat_lock[0]);
 +      if (txmac_stat & MAC_TX_UNDERRUN) {
 +              netdev_err(dev, "TX MAC xmit underrun\n");
 +              cp->net_stats[0].tx_fifo_errors++;
 +      }
 +
 +      if (txmac_stat & MAC_TX_MAX_PACKET_ERR) {
 +              netdev_err(dev, "TX MAC max packet size error\n");
 +              cp->net_stats[0].tx_errors++;
 +      }
 +
 +      /* The rest are all cases of one of the 16-bit TX
 +       * counters expiring.
 +       */
 +      if (txmac_stat & MAC_TX_COLL_NORMAL)
 +              cp->net_stats[0].collisions += 0x10000;
 +
 +      if (txmac_stat & MAC_TX_COLL_EXCESS) {
 +              cp->net_stats[0].tx_aborted_errors += 0x10000;
 +              cp->net_stats[0].collisions += 0x10000;
 +      }
 +
 +      if (txmac_stat & MAC_TX_COLL_LATE) {
 +              cp->net_stats[0].tx_aborted_errors += 0x10000;
 +              cp->net_stats[0].collisions += 0x10000;
 +      }
 +      spin_unlock(&cp->stat_lock[0]);
 +
 +      /* We do not keep track of MAC_TX_COLL_FIRST and
 +       * MAC_TX_PEAK_ATTEMPTS events.
 +       */
 +      return 0;
 +}
 +
 +static void cas_load_firmware(struct cas *cp, cas_hp_inst_t *firmware)
 +{
 +      cas_hp_inst_t *inst;
 +      u32 val;
 +      int i;
 +
 +      i = 0;
 +      while ((inst = firmware) && inst->note) {
 +              writel(i, cp->regs + REG_HP_INSTR_RAM_ADDR);
 +
 +              val = CAS_BASE(HP_INSTR_RAM_HI_VAL, inst->val);
 +              val |= CAS_BASE(HP_INSTR_RAM_HI_MASK, inst->mask);
 +              writel(val, cp->regs + REG_HP_INSTR_RAM_DATA_HI);
 +
 +              val = CAS_BASE(HP_INSTR_RAM_MID_OUTARG, inst->outarg >> 10);
 +              val |= CAS_BASE(HP_INSTR_RAM_MID_OUTOP, inst->outop);
 +              val |= CAS_BASE(HP_INSTR_RAM_MID_FNEXT, inst->fnext);
 +              val |= CAS_BASE(HP_INSTR_RAM_MID_FOFF, inst->foff);
 +              val |= CAS_BASE(HP_INSTR_RAM_MID_SNEXT, inst->snext);
 +              val |= CAS_BASE(HP_INSTR_RAM_MID_SOFF, inst->soff);
 +              val |= CAS_BASE(HP_INSTR_RAM_MID_OP, inst->op);
 +              writel(val, cp->regs + REG_HP_INSTR_RAM_DATA_MID);
 +
 +              val = CAS_BASE(HP_INSTR_RAM_LOW_OUTMASK, inst->outmask);
 +              val |= CAS_BASE(HP_INSTR_RAM_LOW_OUTSHIFT, inst->outshift);
 +              val |= CAS_BASE(HP_INSTR_RAM_LOW_OUTEN, inst->outenab);
 +              val |= CAS_BASE(HP_INSTR_RAM_LOW_OUTARG, inst->outarg);
 +              writel(val, cp->regs + REG_HP_INSTR_RAM_DATA_LOW);
 +              ++firmware;
 +              ++i;
 +      }
 +}
 +
 +static void cas_init_rx_dma(struct cas *cp)
 +{
 +      u64 desc_dma = cp->block_dvma;
 +      u32 val;
 +      int i, size;
 +
 +      /* rx free descriptors */
 +      val = CAS_BASE(RX_CFG_SWIVEL, RX_SWIVEL_OFF_VAL);
 +      val |= CAS_BASE(RX_CFG_DESC_RING, RX_DESC_RINGN_INDEX(0));
 +      val |= CAS_BASE(RX_CFG_COMP_RING, RX_COMP_RINGN_INDEX(0));
 +      if ((N_RX_DESC_RINGS > 1) &&
 +          (cp->cas_flags & CAS_FLAG_REG_PLUS))  /* do desc 2 */
 +              val |= CAS_BASE(RX_CFG_DESC_RING1, RX_DESC_RINGN_INDEX(1));
 +      writel(val, cp->regs + REG_RX_CFG);
 +
 +      val = (unsigned long) cp->init_rxds[0] -
 +              (unsigned long) cp->init_block;
 +      writel((desc_dma + val) >> 32, cp->regs + REG_RX_DB_HI);
 +      writel((desc_dma + val) & 0xffffffff, cp->regs + REG_RX_DB_LOW);
 +      writel(RX_DESC_RINGN_SIZE(0) - 4, cp->regs + REG_RX_KICK);
 +
 +      if (cp->cas_flags & CAS_FLAG_REG_PLUS) {
 +              /* rx desc 2 is for IPSEC packets. however,
 +               * we don't it that for that purpose.
 +               */
 +              val = (unsigned long) cp->init_rxds[1] -
 +                      (unsigned long) cp->init_block;
 +              writel((desc_dma + val) >> 32, cp->regs + REG_PLUS_RX_DB1_HI);
 +              writel((desc_dma + val) & 0xffffffff, cp->regs +
 +                     REG_PLUS_RX_DB1_LOW);
 +              writel(RX_DESC_RINGN_SIZE(1) - 4, cp->regs +
 +                     REG_PLUS_RX_KICK1);
 +      }
 +
 +      /* rx completion registers */
 +      val = (unsigned long) cp->init_rxcs[0] -
 +              (unsigned long) cp->init_block;
 +      writel((desc_dma + val) >> 32, cp->regs + REG_RX_CB_HI);
 +      writel((desc_dma + val) & 0xffffffff, cp->regs + REG_RX_CB_LOW);
 +
 +      if (cp->cas_flags & CAS_FLAG_REG_PLUS) {
 +              /* rx comp 2-4 */
 +              for (i = 1; i < MAX_RX_COMP_RINGS; i++) {
 +                      val = (unsigned long) cp->init_rxcs[i] -
 +                              (unsigned long) cp->init_block;
 +                      writel((desc_dma + val) >> 32, cp->regs +
 +                             REG_PLUS_RX_CBN_HI(i));
 +                      writel((desc_dma + val) & 0xffffffff, cp->regs +
 +                             REG_PLUS_RX_CBN_LOW(i));
 +              }
 +      }
 +
 +      /* read selective clear regs to prevent spurious interrupts
 +       * on reset because complete == kick.
 +       * selective clear set up to prevent interrupts on resets
 +       */
 +      readl(cp->regs + REG_INTR_STATUS_ALIAS);
 +      writel(INTR_RX_DONE | INTR_RX_BUF_UNAVAIL, cp->regs + REG_ALIAS_CLEAR);
 +      if (cp->cas_flags & CAS_FLAG_REG_PLUS) {
 +              for (i = 1; i < N_RX_COMP_RINGS; i++)
 +                      readl(cp->regs + REG_PLUS_INTRN_STATUS_ALIAS(i));
 +
 +              /* 2 is different from 3 and 4 */
 +              if (N_RX_COMP_RINGS > 1)
 +                      writel(INTR_RX_DONE_ALT | INTR_RX_BUF_UNAVAIL_1,
 +                             cp->regs + REG_PLUS_ALIASN_CLEAR(1));
 +
 +              for (i = 2; i < N_RX_COMP_RINGS; i++)
 +                      writel(INTR_RX_DONE_ALT,
 +                             cp->regs + REG_PLUS_ALIASN_CLEAR(i));
 +      }
 +
 +      /* set up pause thresholds */
 +      val  = CAS_BASE(RX_PAUSE_THRESH_OFF,
 +                      cp->rx_pause_off / RX_PAUSE_THRESH_QUANTUM);
 +      val |= CAS_BASE(RX_PAUSE_THRESH_ON,
 +                      cp->rx_pause_on / RX_PAUSE_THRESH_QUANTUM);
 +      writel(val, cp->regs + REG_RX_PAUSE_THRESH);
 +
 +      /* zero out dma reassembly buffers */
 +      for (i = 0; i < 64; i++) {
 +              writel(i, cp->regs + REG_RX_TABLE_ADDR);
 +              writel(0x0, cp->regs + REG_RX_TABLE_DATA_LOW);
 +              writel(0x0, cp->regs + REG_RX_TABLE_DATA_MID);
 +              writel(0x0, cp->regs + REG_RX_TABLE_DATA_HI);
 +      }
 +
 +      /* make sure address register is 0 for normal operation */
 +      writel(0x0, cp->regs + REG_RX_CTRL_FIFO_ADDR);
 +      writel(0x0, cp->regs + REG_RX_IPP_FIFO_ADDR);
 +
 +      /* interrupt mitigation */
 +#ifdef USE_RX_BLANK
 +      val = CAS_BASE(RX_BLANK_INTR_TIME, RX_BLANK_INTR_TIME_VAL);
 +      val |= CAS_BASE(RX_BLANK_INTR_PKT, RX_BLANK_INTR_PKT_VAL);
 +      writel(val, cp->regs + REG_RX_BLANK);
 +#else
 +      writel(0x0, cp->regs + REG_RX_BLANK);
 +#endif
 +
 +      /* interrupt generation as a function of low water marks for
 +       * free desc and completion entries. these are used to trigger
 +       * housekeeping for rx descs. we don't use the free interrupt
 +       * as it's not very useful
 +       */
 +      /* val = CAS_BASE(RX_AE_THRESH_FREE, RX_AE_FREEN_VAL(0)); */
 +      val = CAS_BASE(RX_AE_THRESH_COMP, RX_AE_COMP_VAL);
 +      writel(val, cp->regs + REG_RX_AE_THRESH);
 +      if (cp->cas_flags & CAS_FLAG_REG_PLUS) {
 +              val = CAS_BASE(RX_AE1_THRESH_FREE, RX_AE_FREEN_VAL(1));
 +              writel(val, cp->regs + REG_PLUS_RX_AE1_THRESH);
 +      }
 +
 +      /* Random early detect registers. useful for congestion avoidance.
 +       * this should be tunable.
 +       */
 +      writel(0x0, cp->regs + REG_RX_RED);
 +
 +      /* receive page sizes. default == 2K (0x800) */
 +      val = 0;
 +      if (cp->page_size == 0x1000)
 +              val = 0x1;
 +      else if (cp->page_size == 0x2000)
 +              val = 0x2;
 +      else if (cp->page_size == 0x4000)
 +              val = 0x3;
 +
 +      /* round mtu + offset. constrain to page size. */
 +      size = cp->dev->mtu + 64;
 +      if (size > cp->page_size)
 +              size = cp->page_size;
 +
 +      if (size <= 0x400)
 +              i = 0x0;
 +      else if (size <= 0x800)
 +              i = 0x1;
 +      else if (size <= 0x1000)
 +              i = 0x2;
 +      else
 +              i = 0x3;
 +
 +      cp->mtu_stride = 1 << (i + 10);
 +      val  = CAS_BASE(RX_PAGE_SIZE, val);
 +      val |= CAS_BASE(RX_PAGE_SIZE_MTU_STRIDE, i);
 +      val |= CAS_BASE(RX_PAGE_SIZE_MTU_COUNT, cp->page_size >> (i + 10));
 +      val |= CAS_BASE(RX_PAGE_SIZE_MTU_OFF, 0x1);
 +      writel(val, cp->regs + REG_RX_PAGE_SIZE);
 +
 +      /* enable the header parser if desired */
 +      if (CAS_HP_FIRMWARE == cas_prog_null)
 +              return;
 +
 +      val = CAS_BASE(HP_CFG_NUM_CPU, CAS_NCPUS > 63 ? 0 : CAS_NCPUS);
 +      val |= HP_CFG_PARSE_EN | HP_CFG_SYN_INC_MASK;
 +      val |= CAS_BASE(HP_CFG_TCP_THRESH, HP_TCP_THRESH_VAL);
 +      writel(val, cp->regs + REG_HP_CFG);
 +}
 +
 +static inline void cas_rxc_init(struct cas_rx_comp *rxc)
 +{
 +      memset(rxc, 0, sizeof(*rxc));
 +      rxc->word4 = cpu_to_le64(RX_COMP4_ZERO);
 +}
 +
 +/* NOTE: we use the ENC RX DESC ring for spares. the rx_page[0,1]
 + * flipping is protected by the fact that the chip will not
 + * hand back the same page index while it's being processed.
 + */
 +static inline cas_page_t *cas_page_spare(struct cas *cp, const int index)
 +{
 +      cas_page_t *page = cp->rx_pages[1][index];
 +      cas_page_t *new;
 +
 +      if (page_count(page->buffer) == 1)
 +              return page;
 +
 +      new = cas_page_dequeue(cp);
 +      if (new) {
 +              spin_lock(&cp->rx_inuse_lock);
 +              list_add(&page->list, &cp->rx_inuse_list);
 +              spin_unlock(&cp->rx_inuse_lock);
 +      }
 +      return new;
 +}
 +
 +/* this needs to be changed if we actually use the ENC RX DESC ring */
 +static cas_page_t *cas_page_swap(struct cas *cp, const int ring,
 +                               const int index)
 +{
 +      cas_page_t **page0 = cp->rx_pages[0];
 +      cas_page_t **page1 = cp->rx_pages[1];
 +
 +      /* swap if buffer is in use */
 +      if (page_count(page0[index]->buffer) > 1) {
 +              cas_page_t *new = cas_page_spare(cp, index);
 +              if (new) {
 +                      page1[index] = page0[index];
 +                      page0[index] = new;
 +              }
 +      }
 +      RX_USED_SET(page0[index], 0);
 +      return page0[index];
 +}
 +
 +static void cas_clean_rxds(struct cas *cp)
 +{
 +      /* only clean ring 0 as ring 1 is used for spare buffers */
 +        struct cas_rx_desc *rxd = cp->init_rxds[0];
 +      int i, size;
 +
 +      /* release all rx flows */
 +      for (i = 0; i < N_RX_FLOWS; i++) {
 +              struct sk_buff *skb;
 +              while ((skb = __skb_dequeue(&cp->rx_flows[i]))) {
 +                      cas_skb_release(skb);
 +              }
 +      }
 +
 +      /* initialize descriptors */
 +      size = RX_DESC_RINGN_SIZE(0);
 +      for (i = 0; i < size; i++) {
 +              cas_page_t *page = cas_page_swap(cp, 0, i);
 +              rxd[i].buffer = cpu_to_le64(page->dma_addr);
 +              rxd[i].index  = cpu_to_le64(CAS_BASE(RX_INDEX_NUM, i) |
 +                                          CAS_BASE(RX_INDEX_RING, 0));
 +      }
 +
 +      cp->rx_old[0]  = RX_DESC_RINGN_SIZE(0) - 4;
 +      cp->rx_last[0] = 0;
 +      cp->cas_flags &= ~CAS_FLAG_RXD_POST(0);
 +}
 +
 +static void cas_clean_rxcs(struct cas *cp)
 +{
 +      int i, j;
 +
 +      /* take ownership of rx comp descriptors */
 +      memset(cp->rx_cur, 0, sizeof(*cp->rx_cur)*N_RX_COMP_RINGS);
 +      memset(cp->rx_new, 0, sizeof(*cp->rx_new)*N_RX_COMP_RINGS);
 +      for (i = 0; i < N_RX_COMP_RINGS; i++) {
 +              struct cas_rx_comp *rxc = cp->init_rxcs[i];
 +              for (j = 0; j < RX_COMP_RINGN_SIZE(i); j++) {
 +                      cas_rxc_init(rxc + j);
 +              }
 +      }
 +}
 +
 +#if 0
 +/* When we get a RX fifo overflow, the RX unit is probably hung
 + * so we do the following.
 + *
 + * If any part of the reset goes wrong, we return 1 and that causes the
 + * whole chip to be reset.
 + */
 +static int cas_rxmac_reset(struct cas *cp)
 +{
 +      struct net_device *dev = cp->dev;
 +      int limit;
 +      u32 val;
 +
 +      /* First, reset MAC RX. */
 +      writel(cp->mac_rx_cfg & ~MAC_RX_CFG_EN, cp->regs + REG_MAC_RX_CFG);
 +      for (limit = 0; limit < STOP_TRIES; limit++) {
 +              if (!(readl(cp->regs + REG_MAC_RX_CFG) & MAC_RX_CFG_EN))
 +                      break;
 +              udelay(10);
 +      }
 +      if (limit == STOP_TRIES) {
 +              netdev_err(dev, "RX MAC will not disable, resetting whole chip\n");
 +              return 1;
 +      }
 +
 +      /* Second, disable RX DMA. */
 +      writel(0, cp->regs + REG_RX_CFG);
 +      for (limit = 0; limit < STOP_TRIES; limit++) {
 +              if (!(readl(cp->regs + REG_RX_CFG) & RX_CFG_DMA_EN))
 +                      break;
 +              udelay(10);
 +      }
 +      if (limit == STOP_TRIES) {
 +              netdev_err(dev, "RX DMA will not disable, resetting whole chip\n");
 +              return 1;
 +      }
 +
 +      mdelay(5);
 +
 +      /* Execute RX reset command. */
 +      writel(SW_RESET_RX, cp->regs + REG_SW_RESET);
 +      for (limit = 0; limit < STOP_TRIES; limit++) {
 +              if (!(readl(cp->regs + REG_SW_RESET) & SW_RESET_RX))
 +                      break;
 +              udelay(10);
 +      }
 +      if (limit == STOP_TRIES) {
 +              netdev_err(dev, "RX reset command will not execute, resetting whole chip\n");
 +              return 1;
 +      }
 +
 +      /* reset driver rx state */
 +      cas_clean_rxds(cp);
 +      cas_clean_rxcs(cp);
 +
 +      /* Now, reprogram the rest of RX unit. */
 +      cas_init_rx_dma(cp);
 +
 +      /* re-enable */
 +      val = readl(cp->regs + REG_RX_CFG);
 +      writel(val | RX_CFG_DMA_EN, cp->regs + REG_RX_CFG);
 +      writel(MAC_RX_FRAME_RECV, cp->regs + REG_MAC_RX_MASK);
 +      val = readl(cp->regs + REG_MAC_RX_CFG);
 +      writel(val | MAC_RX_CFG_EN, cp->regs + REG_MAC_RX_CFG);
 +      return 0;
 +}
 +#endif
 +
 +static int cas_rxmac_interrupt(struct net_device *dev, struct cas *cp,
 +                             u32 status)
 +{
 +      u32 stat = readl(cp->regs + REG_MAC_RX_STATUS);
 +
 +      if (!stat)
 +              return 0;
 +
 +      netif_dbg(cp, intr, cp->dev, "rxmac interrupt, stat: 0x%x\n", stat);
 +
 +      /* these are all rollovers */
 +      spin_lock(&cp->stat_lock[0]);
 +      if (stat & MAC_RX_ALIGN_ERR)
 +              cp->net_stats[0].rx_frame_errors += 0x10000;
 +
 +      if (stat & MAC_RX_CRC_ERR)
 +              cp->net_stats[0].rx_crc_errors += 0x10000;
 +
 +      if (stat & MAC_RX_LEN_ERR)
 +              cp->net_stats[0].rx_length_errors += 0x10000;
 +
 +      if (stat & MAC_RX_OVERFLOW) {
 +              cp->net_stats[0].rx_over_errors++;
 +              cp->net_stats[0].rx_fifo_errors++;
 +      }
 +
 +      /* We do not track MAC_RX_FRAME_COUNT and MAC_RX_VIOL_ERR
 +       * events.
 +       */
 +      spin_unlock(&cp->stat_lock[0]);
 +      return 0;
 +}
 +
 +static int cas_mac_interrupt(struct net_device *dev, struct cas *cp,
 +                           u32 status)
 +{
 +      u32 stat = readl(cp->regs + REG_MAC_CTRL_STATUS);
 +
 +      if (!stat)
 +              return 0;
 +
 +      netif_printk(cp, intr, KERN_DEBUG, cp->dev,
 +                   "mac interrupt, stat: 0x%x\n", stat);
 +
 +      /* This interrupt is just for pause frame and pause
 +       * tracking.  It is useful for diagnostics and debug
 +       * but probably by default we will mask these events.
 +       */
 +      if (stat & MAC_CTRL_PAUSE_STATE)
 +              cp->pause_entered++;
 +
 +      if (stat & MAC_CTRL_PAUSE_RECEIVED)
 +              cp->pause_last_time_recvd = (stat >> 16);
 +
 +      return 0;
 +}
 +
 +
 +/* Must be invoked under cp->lock. */
 +static inline int cas_mdio_link_not_up(struct cas *cp)
 +{
 +      u16 val;
 +
 +      switch (cp->lstate) {
 +      case link_force_ret:
 +              netif_info(cp, link, cp->dev, "Autoneg failed again, keeping forced mode\n");
 +              cas_phy_write(cp, MII_BMCR, cp->link_fcntl);
 +              cp->timer_ticks = 5;
 +              cp->lstate = link_force_ok;
 +              cp->link_transition = LINK_TRANSITION_LINK_CONFIG;
 +              break;
 +
 +      case link_aneg:
 +              val = cas_phy_read(cp, MII_BMCR);
 +
 +              /* Try forced modes. we try things in the following order:
 +               * 1000 full -> 100 full/half -> 10 half
 +               */
 +              val &= ~(BMCR_ANRESTART | BMCR_ANENABLE);
 +              val |= BMCR_FULLDPLX;
 +              val |= (cp->cas_flags & CAS_FLAG_1000MB_CAP) ?
 +                      CAS_BMCR_SPEED1000 : BMCR_SPEED100;
 +              cas_phy_write(cp, MII_BMCR, val);
 +              cp->timer_ticks = 5;
 +              cp->lstate = link_force_try;
 +              cp->link_transition = LINK_TRANSITION_LINK_CONFIG;
 +              break;
 +
 +      case link_force_try:
 +              /* Downgrade from 1000 to 100 to 10 Mbps if necessary. */
 +              val = cas_phy_read(cp, MII_BMCR);
 +              cp->timer_ticks = 5;
 +              if (val & CAS_BMCR_SPEED1000) { /* gigabit */
 +                      val &= ~CAS_BMCR_SPEED1000;
 +                      val |= (BMCR_SPEED100 | BMCR_FULLDPLX);
 +                      cas_phy_write(cp, MII_BMCR, val);
 +                      break;
 +              }
 +
 +              if (val & BMCR_SPEED100) {
 +                      if (val & BMCR_FULLDPLX) /* fd failed */
 +                              val &= ~BMCR_FULLDPLX;
 +                      else { /* 100Mbps failed */
 +                              val &= ~BMCR_SPEED100;
 +                      }
 +                      cas_phy_write(cp, MII_BMCR, val);
 +                      break;
 +              }
 +      default:
 +              break;
 +      }
 +      return 0;
 +}
 +
 +
 +/* must be invoked with cp->lock held */
 +static int cas_mii_link_check(struct cas *cp, const u16 bmsr)
 +{
 +      int restart;
 +
 +      if (bmsr & BMSR_LSTATUS) {
 +              /* Ok, here we got a link. If we had it due to a forced
 +               * fallback, and we were configured for autoneg, we
 +               * retry a short autoneg pass. If you know your hub is
 +               * broken, use ethtool ;)
 +               */
 +              if ((cp->lstate == link_force_try) &&
 +                  (cp->link_cntl & BMCR_ANENABLE)) {
 +                      cp->lstate = link_force_ret;
 +                      cp->link_transition = LINK_TRANSITION_LINK_CONFIG;
 +                      cas_mif_poll(cp, 0);
 +                      cp->link_fcntl = cas_phy_read(cp, MII_BMCR);
 +                      cp->timer_ticks = 5;
 +                      if (cp->opened)
 +                              netif_info(cp, link, cp->dev,
 +                                         "Got link after fallback, retrying autoneg once...\n");
 +                      cas_phy_write(cp, MII_BMCR,
 +                                    cp->link_fcntl | BMCR_ANENABLE |
 +                                    BMCR_ANRESTART);
 +                      cas_mif_poll(cp, 1);
 +
 +              } else if (cp->lstate != link_up) {
 +                      cp->lstate = link_up;
 +                      cp->link_transition = LINK_TRANSITION_LINK_UP;
 +
 +                      if (cp->opened) {
 +                              cas_set_link_modes(cp);
 +                              netif_carrier_on(cp->dev);
 +                      }
 +              }
 +              return 0;
 +      }
 +
 +      /* link not up. if the link was previously up, we restart the
 +       * whole process
 +       */
 +      restart = 0;
 +      if (cp->lstate == link_up) {
 +              cp->lstate = link_down;
 +              cp->link_transition = LINK_TRANSITION_LINK_DOWN;
 +
 +              netif_carrier_off(cp->dev);
 +              if (cp->opened)
 +                      netif_info(cp, link, cp->dev, "Link down\n");
 +              restart = 1;
 +
 +      } else if (++cp->timer_ticks > 10)
 +              cas_mdio_link_not_up(cp);
 +
 +      return restart;
 +}
 +
 +static int cas_mif_interrupt(struct net_device *dev, struct cas *cp,
 +                           u32 status)
 +{
 +      u32 stat = readl(cp->regs + REG_MIF_STATUS);
 +      u16 bmsr;
 +
 +      /* check for a link change */
 +      if (CAS_VAL(MIF_STATUS_POLL_STATUS, stat) == 0)
 +              return 0;
 +
 +      bmsr = CAS_VAL(MIF_STATUS_POLL_DATA, stat);
 +      return cas_mii_link_check(cp, bmsr);
 +}
 +
 +static int cas_pci_interrupt(struct net_device *dev, struct cas *cp,
 +                           u32 status)
 +{
 +      u32 stat = readl(cp->regs + REG_PCI_ERR_STATUS);
 +
 +      if (!stat)
 +              return 0;
 +
 +      netdev_err(dev, "PCI error [%04x:%04x]",
 +                 stat, readl(cp->regs + REG_BIM_DIAG));
 +
 +      /* cassini+ has this reserved */
 +      if ((stat & PCI_ERR_BADACK) &&
 +          ((cp->cas_flags & CAS_FLAG_REG_PLUS) == 0))
 +              pr_cont(" <No ACK64# during ABS64 cycle>");
 +
 +      if (stat & PCI_ERR_DTRTO)
 +              pr_cont(" <Delayed transaction timeout>");
 +      if (stat & PCI_ERR_OTHER)
 +              pr_cont(" <other>");
 +      if (stat & PCI_ERR_BIM_DMA_WRITE)
 +              pr_cont(" <BIM DMA 0 write req>");
 +      if (stat & PCI_ERR_BIM_DMA_READ)
 +              pr_cont(" <BIM DMA 0 read req>");
 +      pr_cont("\n");
 +
 +      if (stat & PCI_ERR_OTHER) {
 +              u16 cfg;
 +
 +              /* Interrogate PCI config space for the
 +               * true cause.
 +               */
 +              pci_read_config_word(cp->pdev, PCI_STATUS, &cfg);
 +              netdev_err(dev, "Read PCI cfg space status [%04x]\n", cfg);
 +              if (cfg & PCI_STATUS_PARITY)
 +                      netdev_err(dev, "PCI parity error detected\n");
 +              if (cfg & PCI_STATUS_SIG_TARGET_ABORT)
 +                      netdev_err(dev, "PCI target abort\n");
 +              if (cfg & PCI_STATUS_REC_TARGET_ABORT)
 +                      netdev_err(dev, "PCI master acks target abort\n");
 +              if (cfg & PCI_STATUS_REC_MASTER_ABORT)
 +                      netdev_err(dev, "PCI master abort\n");
 +              if (cfg & PCI_STATUS_SIG_SYSTEM_ERROR)
 +                      netdev_err(dev, "PCI system error SERR#\n");
 +              if (cfg & PCI_STATUS_DETECTED_PARITY)
 +                      netdev_err(dev, "PCI parity error\n");
 +
 +              /* Write the error bits back to clear them. */
 +              cfg &= (PCI_STATUS_PARITY |
 +                      PCI_STATUS_SIG_TARGET_ABORT |
 +                      PCI_STATUS_REC_TARGET_ABORT |
 +                      PCI_STATUS_REC_MASTER_ABORT |
 +                      PCI_STATUS_SIG_SYSTEM_ERROR |
 +                      PCI_STATUS_DETECTED_PARITY);
 +              pci_write_config_word(cp->pdev, PCI_STATUS, cfg);
 +      }
 +
 +      /* For all PCI errors, we should reset the chip. */
 +      return 1;
 +}
 +
 +/* All non-normal interrupt conditions get serviced here.
 + * Returns non-zero if we should just exit the interrupt
 + * handler right now (ie. if we reset the card which invalidates
 + * all of the other original irq status bits).
 + */
 +static int cas_abnormal_irq(struct net_device *dev, struct cas *cp,
 +                          u32 status)
 +{
 +      if (status & INTR_RX_TAG_ERROR) {
 +              /* corrupt RX tag framing */
 +              netif_printk(cp, rx_err, KERN_DEBUG, cp->dev,
 +                           "corrupt rx tag framing\n");
 +              spin_lock(&cp->stat_lock[0]);
 +              cp->net_stats[0].rx_errors++;
 +              spin_unlock(&cp->stat_lock[0]);
 +              goto do_reset;
 +      }
 +
 +      if (status & INTR_RX_LEN_MISMATCH) {
 +              /* length mismatch. */
 +              netif_printk(cp, rx_err, KERN_DEBUG, cp->dev,
 +                           "length mismatch for rx frame\n");
 +              spin_lock(&cp->stat_lock[0]);
 +              cp->net_stats[0].rx_errors++;
 +              spin_unlock(&cp->stat_lock[0]);
 +              goto do_reset;
 +      }
 +
 +      if (status & INTR_PCS_STATUS) {
 +              if (cas_pcs_interrupt(dev, cp, status))
 +                      goto do_reset;
 +      }
 +
 +      if (status & INTR_TX_MAC_STATUS) {
 +              if (cas_txmac_interrupt(dev, cp, status))
 +                      goto do_reset;
 +      }
 +
 +      if (status & INTR_RX_MAC_STATUS) {
 +              if (cas_rxmac_interrupt(dev, cp, status))
 +                      goto do_reset;
 +      }
 +
 +      if (status & INTR_MAC_CTRL_STATUS) {
 +              if (cas_mac_interrupt(dev, cp, status))
 +                      goto do_reset;
 +      }
 +
 +      if (status & INTR_MIF_STATUS) {
 +              if (cas_mif_interrupt(dev, cp, status))
 +                      goto do_reset;
 +      }
 +
 +      if (status & INTR_PCI_ERROR_STATUS) {
 +              if (cas_pci_interrupt(dev, cp, status))
 +                      goto do_reset;
 +      }
 +      return 0;
 +
 +do_reset:
 +#if 1
 +      atomic_inc(&cp->reset_task_pending);
 +      atomic_inc(&cp->reset_task_pending_all);
 +      netdev_err(dev, "reset called in cas_abnormal_irq [0x%x]\n", status);
 +      schedule_work(&cp->reset_task);
 +#else
 +      atomic_set(&cp->reset_task_pending, CAS_RESET_ALL);
 +      netdev_err(dev, "reset called in cas_abnormal_irq\n");
 +      schedule_work(&cp->reset_task);
 +#endif
 +      return 1;
 +}
 +
 +/* NOTE: CAS_TABORT returns 1 or 2 so that it can be used when
 + *       determining whether to do a netif_stop/wakeup
 + */
 +#define CAS_TABORT(x)      (((x)->cas_flags & CAS_FLAG_TARGET_ABORT) ? 2 : 1)
 +#define CAS_ROUND_PAGE(x)  (((x) + PAGE_SIZE - 1) & PAGE_MASK)
 +static inline int cas_calc_tabort(struct cas *cp, const unsigned long addr,
 +                                const int len)
 +{
 +      unsigned long off = addr + len;
 +
 +      if (CAS_TABORT(cp) == 1)
 +              return 0;
 +      if ((CAS_ROUND_PAGE(off) - off) > TX_TARGET_ABORT_LEN)
 +              return 0;
 +      return TX_TARGET_ABORT_LEN;
 +}
 +
 +static inline void cas_tx_ringN(struct cas *cp, int ring, int limit)
 +{
 +      struct cas_tx_desc *txds;
 +      struct sk_buff **skbs;
 +      struct net_device *dev = cp->dev;
 +      int entry, count;
 +
 +      spin_lock(&cp->tx_lock[ring]);
 +      txds = cp->init_txds[ring];
 +      skbs = cp->tx_skbs[ring];
 +      entry = cp->tx_old[ring];
 +
 +      count = TX_BUFF_COUNT(ring, entry, limit);
 +      while (entry != limit) {
 +              struct sk_buff *skb = skbs[entry];
 +              dma_addr_t daddr;
 +              u32 dlen;
 +              int frag;
 +
 +              if (!skb) {
 +                      /* this should never occur */
 +                      entry = TX_DESC_NEXT(ring, entry);
 +                      continue;
 +              }
 +
 +              /* however, we might get only a partial skb release. */
 +              count -= skb_shinfo(skb)->nr_frags +
 +                      + cp->tx_tiny_use[ring][entry].nbufs + 1;
 +              if (count < 0)
 +                      break;
 +
 +              netif_printk(cp, tx_done, KERN_DEBUG, cp->dev,
 +                           "tx[%d] done, slot %d\n", ring, entry);
 +
 +              skbs[entry] = NULL;
 +              cp->tx_tiny_use[ring][entry].nbufs = 0;
 +
 +              for (frag = 0; frag <= skb_shinfo(skb)->nr_frags; frag++) {
 +                      struct cas_tx_desc *txd = txds + entry;
 +
 +                      daddr = le64_to_cpu(txd->buffer);
 +                      dlen = CAS_VAL(TX_DESC_BUFLEN,
 +                                     le64_to_cpu(txd->control));
 +                      pci_unmap_page(cp->pdev, daddr, dlen,
 +                                     PCI_DMA_TODEVICE);
 +                      entry = TX_DESC_NEXT(ring, entry);
 +
 +                      /* tiny buffer may follow */
 +                      if (cp->tx_tiny_use[ring][entry].used) {
 +                              cp->tx_tiny_use[ring][entry].used = 0;
 +                              entry = TX_DESC_NEXT(ring, entry);
 +                      }
 +              }
 +
 +              spin_lock(&cp->stat_lock[ring]);
 +              cp->net_stats[ring].tx_packets++;
 +              cp->net_stats[ring].tx_bytes += skb->len;
 +              spin_unlock(&cp->stat_lock[ring]);
 +              dev_kfree_skb_irq(skb);
 +      }
 +      cp->tx_old[ring] = entry;
 +
 +      /* this is wrong for multiple tx rings. the net device needs
 +       * multiple queues for this to do the right thing.  we wait
 +       * for 2*packets to be available when using tiny buffers
 +       */
 +      if (netif_queue_stopped(dev) &&
 +          (TX_BUFFS_AVAIL(cp, ring) > CAS_TABORT(cp)*(MAX_SKB_FRAGS + 1)))
 +              netif_wake_queue(dev);
 +      spin_unlock(&cp->tx_lock[ring]);
 +}
 +
 +static void cas_tx(struct net_device *dev, struct cas *cp,
 +                 u32 status)
 +{
 +        int limit, ring;
 +#ifdef USE_TX_COMPWB
 +      u64 compwb = le64_to_cpu(cp->init_block->tx_compwb);
 +#endif
 +      netif_printk(cp, intr, KERN_DEBUG, cp->dev,
 +                   "tx interrupt, status: 0x%x, %llx\n",
 +                   status, (unsigned long long)compwb);
 +      /* process all the rings */
 +      for (ring = 0; ring < N_TX_RINGS; ring++) {
 +#ifdef USE_TX_COMPWB
 +              /* use the completion writeback registers */
 +              limit = (CAS_VAL(TX_COMPWB_MSB, compwb) << 8) |
 +                      CAS_VAL(TX_COMPWB_LSB, compwb);
 +              compwb = TX_COMPWB_NEXT(compwb);
 +#else
 +              limit = readl(cp->regs + REG_TX_COMPN(ring));
 +#endif
 +              if (cp->tx_old[ring] != limit)
 +                      cas_tx_ringN(cp, ring, limit);
 +      }
 +}
 +
 +
 +static int cas_rx_process_pkt(struct cas *cp, struct cas_rx_comp *rxc,
 +                            int entry, const u64 *words,
 +                            struct sk_buff **skbref)
 +{
 +      int dlen, hlen, len, i, alloclen;
 +      int off, swivel = RX_SWIVEL_OFF_VAL;
 +      struct cas_page *page;
 +      struct sk_buff *skb;
 +      void *addr, *crcaddr;
 +      __sum16 csum;
 +      char *p;
 +
 +      hlen = CAS_VAL(RX_COMP2_HDR_SIZE, words[1]);
 +      dlen = CAS_VAL(RX_COMP1_DATA_SIZE, words[0]);
 +      len  = hlen + dlen;
 +
 +      if (RX_COPY_ALWAYS || (words[2] & RX_COMP3_SMALL_PKT))
 +              alloclen = len;
 +      else
 +              alloclen = max(hlen, RX_COPY_MIN);
 +
 +      skb = dev_alloc_skb(alloclen + swivel + cp->crc_size);
 +      if (skb == NULL)
 +              return -1;
 +
 +      *skbref = skb;
 +      skb_reserve(skb, swivel);
 +
 +      p = skb->data;
 +      addr = crcaddr = NULL;
 +      if (hlen) { /* always copy header pages */
 +              i = CAS_VAL(RX_COMP2_HDR_INDEX, words[1]);
 +              page = cp->rx_pages[CAS_VAL(RX_INDEX_RING, i)][CAS_VAL(RX_INDEX_NUM, i)];
 +              off = CAS_VAL(RX_COMP2_HDR_OFF, words[1]) * 0x100 +
 +                      swivel;
 +
 +              i = hlen;
 +              if (!dlen) /* attach FCS */
 +                      i += cp->crc_size;
 +              pci_dma_sync_single_for_cpu(cp->pdev, page->dma_addr + off, i,
 +                                  PCI_DMA_FROMDEVICE);
 +              addr = cas_page_map(page->buffer);
 +              memcpy(p, addr + off, i);
 +              pci_dma_sync_single_for_device(cp->pdev, page->dma_addr + off, i,
 +                                  PCI_DMA_FROMDEVICE);
 +              cas_page_unmap(addr);
 +              RX_USED_ADD(page, 0x100);
 +              p += hlen;
 +              swivel = 0;
 +      }
 +
 +
 +      if (alloclen < (hlen + dlen)) {
 +              skb_frag_t *frag = skb_shinfo(skb)->frags;
 +
 +              /* normal or jumbo packets. we use frags */
 +              i = CAS_VAL(RX_COMP1_DATA_INDEX, words[0]);
 +              page = cp->rx_pages[CAS_VAL(RX_INDEX_RING, i)][CAS_VAL(RX_INDEX_NUM, i)];
 +              off = CAS_VAL(RX_COMP1_DATA_OFF, words[0]) + swivel;
 +
 +              hlen = min(cp->page_size - off, dlen);
 +              if (hlen < 0) {
 +                      netif_printk(cp, rx_err, KERN_DEBUG, cp->dev,
 +                                   "rx page overflow: %d\n", hlen);
 +                      dev_kfree_skb_irq(skb);
 +                      return -1;
 +              }
 +              i = hlen;
 +              if (i == dlen)  /* attach FCS */
 +                      i += cp->crc_size;
 +              pci_dma_sync_single_for_cpu(cp->pdev, page->dma_addr + off, i,
 +                                  PCI_DMA_FROMDEVICE);
 +
 +              /* make sure we always copy a header */
 +              swivel = 0;
 +              if (p == (char *) skb->data) { /* not split */
 +                      addr = cas_page_map(page->buffer);
 +                      memcpy(p, addr + off, RX_COPY_MIN);
 +                      pci_dma_sync_single_for_device(cp->pdev, page->dma_addr + off, i,
 +                                      PCI_DMA_FROMDEVICE);
 +                      cas_page_unmap(addr);
 +                      off += RX_COPY_MIN;
 +                      swivel = RX_COPY_MIN;
 +                      RX_USED_ADD(page, cp->mtu_stride);
 +              } else {
 +                      RX_USED_ADD(page, hlen);
 +              }
 +              skb_put(skb, alloclen);
 +
 +              skb_shinfo(skb)->nr_frags++;
 +              skb->data_len += hlen - swivel;
 +              skb->truesize += hlen - swivel;
 +              skb->len      += hlen - swivel;
 +
 +              __skb_frag_set_page(frag, page->buffer);
 +              __skb_frag_ref(frag);
 +              frag->page_offset = off;
 +              frag->size = hlen - swivel;
 +
 +              /* any more data? */
 +              if ((words[0] & RX_COMP1_SPLIT_PKT) && ((dlen -= hlen) > 0)) {
 +                      hlen = dlen;
 +                      off = 0;
 +
 +                      i = CAS_VAL(RX_COMP2_NEXT_INDEX, words[1]);
 +                      page = cp->rx_pages[CAS_VAL(RX_INDEX_RING, i)][CAS_VAL(RX_INDEX_NUM, i)];
 +                      pci_dma_sync_single_for_cpu(cp->pdev, page->dma_addr,
 +                                          hlen + cp->crc_size,
 +                                          PCI_DMA_FROMDEVICE);
 +                      pci_dma_sync_single_for_device(cp->pdev, page->dma_addr,
 +                                          hlen + cp->crc_size,
 +                                          PCI_DMA_FROMDEVICE);
 +
 +                      skb_shinfo(skb)->nr_frags++;
 +                      skb->data_len += hlen;
 +                      skb->len      += hlen;
 +                      frag++;
 +
 +                      __skb_frag_set_page(frag, page->buffer);
 +                      __skb_frag_ref(frag);
 +                      frag->page_offset = 0;
 +                      frag->size = hlen;
 +                      RX_USED_ADD(page, hlen + cp->crc_size);
 +              }
 +
 +              if (cp->crc_size) {
 +                      addr = cas_page_map(page->buffer);
 +                      crcaddr  = addr + off + hlen;
 +              }
 +
 +      } else {
 +              /* copying packet */
 +              if (!dlen)
 +                      goto end_copy_pkt;
 +
 +              i = CAS_VAL(RX_COMP1_DATA_INDEX, words[0]);
 +              page = cp->rx_pages[CAS_VAL(RX_INDEX_RING, i)][CAS_VAL(RX_INDEX_NUM, i)];
 +              off = CAS_VAL(RX_COMP1_DATA_OFF, words[0]) + swivel;
 +              hlen = min(cp->page_size - off, dlen);
 +              if (hlen < 0) {
 +                      netif_printk(cp, rx_err, KERN_DEBUG, cp->dev,
 +                                   "rx page overflow: %d\n", hlen);
 +                      dev_kfree_skb_irq(skb);
 +                      return -1;
 +              }
 +              i = hlen;
 +              if (i == dlen) /* attach FCS */
 +                      i += cp->crc_size;
 +              pci_dma_sync_single_for_cpu(cp->pdev, page->dma_addr + off, i,
 +                                  PCI_DMA_FROMDEVICE);
 +              addr = cas_page_map(page->buffer);
 +              memcpy(p, addr + off, i);
 +              pci_dma_sync_single_for_device(cp->pdev, page->dma_addr + off, i,
 +                                  PCI_DMA_FROMDEVICE);
 +              cas_page_unmap(addr);
 +              if (p == (char *) skb->data) /* not split */
 +                      RX_USED_ADD(page, cp->mtu_stride);
 +              else
 +                      RX_USED_ADD(page, i);
 +
 +              /* any more data? */
 +              if ((words[0] & RX_COMP1_SPLIT_PKT) && ((dlen -= hlen) > 0)) {
 +                      p += hlen;
 +                      i = CAS_VAL(RX_COMP2_NEXT_INDEX, words[1]);
 +                      page = cp->rx_pages[CAS_VAL(RX_INDEX_RING, i)][CAS_VAL(RX_INDEX_NUM, i)];
 +                      pci_dma_sync_single_for_cpu(cp->pdev, page->dma_addr,
 +                                          dlen + cp->crc_size,
 +                                          PCI_DMA_FROMDEVICE);
 +                      addr = cas_page_map(page->buffer);
 +                      memcpy(p, addr, dlen + cp->crc_size);
 +                      pci_dma_sync_single_for_device(cp->pdev, page->dma_addr,
 +                                          dlen + cp->crc_size,
 +                                          PCI_DMA_FROMDEVICE);
 +                      cas_page_unmap(addr);
 +                      RX_USED_ADD(page, dlen + cp->crc_size);
 +              }
 +end_copy_pkt:
 +              if (cp->crc_size) {
 +                      addr    = NULL;
 +                      crcaddr = skb->data + alloclen;
 +              }
 +              skb_put(skb, alloclen);
 +      }
 +
 +      csum = (__force __sum16)htons(CAS_VAL(RX_COMP4_TCP_CSUM, words[3]));
 +      if (cp->crc_size) {
 +              /* checksum includes FCS. strip it out. */
 +              csum = csum_fold(csum_partial(crcaddr, cp->crc_size,
 +                                            csum_unfold(csum)));
 +              if (addr)
 +                      cas_page_unmap(addr);
 +      }
 +      skb->protocol = eth_type_trans(skb, cp->dev);
 +      if (skb->protocol == htons(ETH_P_IP)) {
 +              skb->csum = csum_unfold(~csum);
 +              skb->ip_summed = CHECKSUM_COMPLETE;
 +      } else
 +              skb_checksum_none_assert(skb);
 +      return len;
 +}
 +
 +
 +/* we can handle up to 64 rx flows at a time. we do the same thing
 + * as nonreassm except that we batch up the buffers.
 + * NOTE: we currently just treat each flow as a bunch of packets that
 + *       we pass up. a better way would be to coalesce the packets
 + *       into a jumbo packet. to do that, we need to do the following:
 + *       1) the first packet will have a clean split between header and
 + *          data. save both.
 + *       2) each time the next flow packet comes in, extend the
 + *          data length and merge the checksums.
 + *       3) on flow release, fix up the header.
 + *       4) make sure the higher layer doesn't care.
 + * because packets get coalesced, we shouldn't run into fragment count
 + * issues.
 + */
 +static inline void cas_rx_flow_pkt(struct cas *cp, const u64 *words,
 +                                 struct sk_buff *skb)
 +{
 +      int flowid = CAS_VAL(RX_COMP3_FLOWID, words[2]) & (N_RX_FLOWS - 1);
 +      struct sk_buff_head *flow = &cp->rx_flows[flowid];
 +
 +      /* this is protected at a higher layer, so no need to
 +       * do any additional locking here. stick the buffer
 +       * at the end.
 +       */
 +      __skb_queue_tail(flow, skb);
 +      if (words[0] & RX_COMP1_RELEASE_FLOW) {
 +              while ((skb = __skb_dequeue(flow))) {
 +                      cas_skb_release(skb);
 +              }
 +      }
 +}
 +
 +/* put rx descriptor back on ring. if a buffer is in use by a higher
 + * layer, this will need to put in a replacement.
 + */
 +static void cas_post_page(struct cas *cp, const int ring, const int index)
 +{
 +      cas_page_t *new;
 +      int entry;
 +
 +      entry = cp->rx_old[ring];
 +
 +      new = cas_page_swap(cp, ring, index);
 +      cp->init_rxds[ring][entry].buffer = cpu_to_le64(new->dma_addr);
 +      cp->init_rxds[ring][entry].index  =
 +              cpu_to_le64(CAS_BASE(RX_INDEX_NUM, index) |
 +                          CAS_BASE(RX_INDEX_RING, ring));
 +
 +      entry = RX_DESC_ENTRY(ring, entry + 1);
 +      cp->rx_old[ring] = entry;
 +
 +      if (entry % 4)
 +              return;
 +
 +      if (ring == 0)
 +              writel(entry, cp->regs + REG_RX_KICK);
 +      else if ((N_RX_DESC_RINGS > 1) &&
 +               (cp->cas_flags & CAS_FLAG_REG_PLUS))
 +              writel(entry, cp->regs + REG_PLUS_RX_KICK1);
 +}
 +
 +
 +/* only when things are bad */
 +static int cas_post_rxds_ringN(struct cas *cp, int ring, int num)
 +{
 +      unsigned int entry, last, count, released;
 +      int cluster;
 +      cas_page_t **page = cp->rx_pages[ring];
 +
 +      entry = cp->rx_old[ring];
 +
 +      netif_printk(cp, intr, KERN_DEBUG, cp->dev,
 +                   "rxd[%d] interrupt, done: %d\n", ring, entry);
 +
 +      cluster = -1;
 +      count = entry & 0x3;
 +      last = RX_DESC_ENTRY(ring, num ? entry + num - 4: entry - 4);
 +      released = 0;
 +      while (entry != last) {
 +              /* make a new buffer if it's still in use */
 +              if (page_count(page[entry]->buffer) > 1) {
 +                      cas_page_t *new = cas_page_dequeue(cp);
 +                      if (!new) {
 +                              /* let the timer know that we need to
 +                               * do this again
 +                               */
 +                              cp->cas_flags |= CAS_FLAG_RXD_POST(ring);
 +                              if (!timer_pending(&cp->link_timer))
 +                                      mod_timer(&cp->link_timer, jiffies +
 +                                                CAS_LINK_FAST_TIMEOUT);
 +                              cp->rx_old[ring]  = entry;
 +                              cp->rx_last[ring] = num ? num - released : 0;
 +                              return -ENOMEM;
 +                      }
 +                      spin_lock(&cp->rx_inuse_lock);
 +                      list_add(&page[entry]->list, &cp->rx_inuse_list);
 +                      spin_unlock(&cp->rx_inuse_lock);
 +                      cp->init_rxds[ring][entry].buffer =
 +                              cpu_to_le64(new->dma_addr);
 +                      page[entry] = new;
 +
 +              }
 +
 +              if (++count == 4) {
 +                      cluster = entry;
 +                      count = 0;
 +              }
 +              released++;
 +              entry = RX_DESC_ENTRY(ring, entry + 1);
 +      }
 +      cp->rx_old[ring] = entry;
 +
 +      if (cluster < 0)
 +              return 0;
 +
 +      if (ring == 0)
 +              writel(cluster, cp->regs + REG_RX_KICK);
 +      else if ((N_RX_DESC_RINGS > 1) &&
 +               (cp->cas_flags & CAS_FLAG_REG_PLUS))
 +              writel(cluster, cp->regs + REG_PLUS_RX_KICK1);
 +      return 0;
 +}
 +
 +
 +/* process a completion ring. packets are set up in three basic ways:
 + * small packets: should be copied header + data in single buffer.
 + * large packets: header and data in a single buffer.
 + * split packets: header in a separate buffer from data.
 + *                data may be in multiple pages. data may be > 256
 + *                bytes but in a single page.
 + *
 + * NOTE: RX page posting is done in this routine as well. while there's
 + *       the capability of using multiple RX completion rings, it isn't
 + *       really worthwhile due to the fact that the page posting will
 + *       force serialization on the single descriptor ring.
 + */
 +static int cas_rx_ringN(struct cas *cp, int ring, int budget)
 +{
 +      struct cas_rx_comp *rxcs = cp->init_rxcs[ring];
 +      int entry, drops;
 +      int npackets = 0;
 +
 +      netif_printk(cp, intr, KERN_DEBUG, cp->dev,
 +                   "rx[%d] interrupt, done: %d/%d\n",
 +                   ring,
 +                   readl(cp->regs + REG_RX_COMP_HEAD), cp->rx_new[ring]);
 +
 +      entry = cp->rx_new[ring];
 +      drops = 0;
 +      while (1) {
 +              struct cas_rx_comp *rxc = rxcs + entry;
 +              struct sk_buff *uninitialized_var(skb);
 +              int type, len;
 +              u64 words[4];
 +              int i, dring;
 +
 +              words[0] = le64_to_cpu(rxc->word1);
 +              words[1] = le64_to_cpu(rxc->word2);
 +              words[2] = le64_to_cpu(rxc->word3);
 +              words[3] = le64_to_cpu(rxc->word4);
 +
 +              /* don't touch if still owned by hw */
 +              type = CAS_VAL(RX_COMP1_TYPE, words[0]);
 +              if (type == 0)
 +                      break;
 +
 +              /* hw hasn't cleared the zero bit yet */
 +              if (words[3] & RX_COMP4_ZERO) {
 +                      break;
 +              }
 +
 +              /* get info on the packet */
 +              if (words[3] & (RX_COMP4_LEN_MISMATCH | RX_COMP4_BAD)) {
 +                      spin_lock(&cp->stat_lock[ring]);
 +                      cp->net_stats[ring].rx_errors++;
 +                      if (words[3] & RX_COMP4_LEN_MISMATCH)
 +                              cp->net_stats[ring].rx_length_errors++;
 +                      if (words[3] & RX_COMP4_BAD)
 +                              cp->net_stats[ring].rx_crc_errors++;
 +                      spin_unlock(&cp->stat_lock[ring]);
 +
 +                      /* We'll just return it to Cassini. */
 +              drop_it:
 +                      spin_lock(&cp->stat_lock[ring]);
 +                      ++cp->net_stats[ring].rx_dropped;
 +                      spin_unlock(&cp->stat_lock[ring]);
 +                      goto next;
 +              }
 +
 +              len = cas_rx_process_pkt(cp, rxc, entry, words, &skb);
 +              if (len < 0) {
 +                      ++drops;
 +                      goto drop_it;
 +              }
 +
 +              /* see if it's a flow re-assembly or not. the driver
 +               * itself handles release back up.
 +               */
 +              if (RX_DONT_BATCH || (type == 0x2)) {
 +                      /* non-reassm: these always get released */
 +                      cas_skb_release(skb);
 +              } else {
 +                      cas_rx_flow_pkt(cp, words, skb);
 +              }
 +
 +              spin_lock(&cp->stat_lock[ring]);
 +              cp->net_stats[ring].rx_packets++;
 +              cp->net_stats[ring].rx_bytes += len;
 +              spin_unlock(&cp->stat_lock[ring]);
 +
 +      next:
 +              npackets++;
 +
 +              /* should it be released? */
 +              if (words[0] & RX_COMP1_RELEASE_HDR) {
 +                      i = CAS_VAL(RX_COMP2_HDR_INDEX, words[1]);
 +                      dring = CAS_VAL(RX_INDEX_RING, i);
 +                      i = CAS_VAL(RX_INDEX_NUM, i);
 +                      cas_post_page(cp, dring, i);
 +              }
 +
 +              if (words[0] & RX_COMP1_RELEASE_DATA) {
 +                      i = CAS_VAL(RX_COMP1_DATA_INDEX, words[0]);
 +                      dring = CAS_VAL(RX_INDEX_RING, i);
 +                      i = CAS_VAL(RX_INDEX_NUM, i);
 +                      cas_post_page(cp, dring, i);
 +              }
 +
 +              if (words[0] & RX_COMP1_RELEASE_NEXT) {
 +                      i = CAS_VAL(RX_COMP2_NEXT_INDEX, words[1]);
 +                      dring = CAS_VAL(RX_INDEX_RING, i);
 +                      i = CAS_VAL(RX_INDEX_NUM, i);
 +                      cas_post_page(cp, dring, i);
 +              }
 +
 +              /* skip to the next entry */
 +              entry = RX_COMP_ENTRY(ring, entry + 1 +
 +                                    CAS_VAL(RX_COMP1_SKIP, words[0]));
 +#ifdef USE_NAPI
 +              if (budget && (npackets >= budget))
 +                      break;
 +#endif
 +      }
 +      cp->rx_new[ring] = entry;
 +
 +      if (drops)
 +              netdev_info(cp->dev, "Memory squeeze, deferring packet\n");
 +      return npackets;
 +}
 +
 +
 +/* put completion entries back on the ring */
 +static void cas_post_rxcs_ringN(struct net_device *dev,
 +                              struct cas *cp, int ring)
 +{
 +      struct cas_rx_comp *rxc = cp->init_rxcs[ring];
 +      int last, entry;
 +
 +      last = cp->rx_cur[ring];
 +      entry = cp->rx_new[ring];
 +      netif_printk(cp, intr, KERN_DEBUG, dev,
 +                   "rxc[%d] interrupt, done: %d/%d\n",
 +                   ring, readl(cp->regs + REG_RX_COMP_HEAD), entry);
 +
 +      /* zero and re-mark descriptors */
 +      while (last != entry) {
 +              cas_rxc_init(rxc + last);
 +              last = RX_COMP_ENTRY(ring, last + 1);
 +      }
 +      cp->rx_cur[ring] = last;
 +
 +      if (ring == 0)
 +              writel(last, cp->regs + REG_RX_COMP_TAIL);
 +      else if (cp->cas_flags & CAS_FLAG_REG_PLUS)
 +              writel(last, cp->regs + REG_PLUS_RX_COMPN_TAIL(ring));
 +}
 +
 +
 +
 +/* cassini can use all four PCI interrupts for the completion ring.
 + * rings 3 and 4 are identical
 + */
 +#if defined(USE_PCI_INTC) || defined(USE_PCI_INTD)
 +static inline void cas_handle_irqN(struct net_device *dev,
 +                                 struct cas *cp, const u32 status,
 +                                 const int ring)
 +{
 +      if (status & (INTR_RX_COMP_FULL_ALT | INTR_RX_COMP_AF_ALT))
 +              cas_post_rxcs_ringN(dev, cp, ring);
 +}
 +
 +static irqreturn_t cas_interruptN(int irq, void *dev_id)
 +{
 +      struct net_device *dev = dev_id;
 +      struct cas *cp = netdev_priv(dev);
 +      unsigned long flags;
-       ring = (irq == cp->pci_irq_INTC) ? 2 : 3;
++      int ring = (irq == cp->pci_irq_INTC) ? 2 : 3;
 +      u32 status = readl(cp->regs + REG_PLUS_INTRN_STATUS(ring));
 +
 +      /* check for shared irq */
 +      if (status == 0)
 +              return IRQ_NONE;
 +
 +      spin_lock_irqsave(&cp->lock, flags);
 +      if (status & INTR_RX_DONE_ALT) { /* handle rx separately */
 +#ifdef USE_NAPI
 +              cas_mask_intr(cp);
 +              napi_schedule(&cp->napi);
 +#else
 +              cas_rx_ringN(cp, ring, 0);
 +#endif
 +              status &= ~INTR_RX_DONE_ALT;
 +      }
 +
 +      if (status)
 +              cas_handle_irqN(dev, cp, status, ring);
 +      spin_unlock_irqrestore(&cp->lock, flags);
 +      return IRQ_HANDLED;
 +}
 +#endif
 +
 +#ifdef USE_PCI_INTB
 +/* everything but rx packets */
 +static inline void cas_handle_irq1(struct cas *cp, const u32 status)
 +{
 +      if (status & INTR_RX_BUF_UNAVAIL_1) {
 +              /* Frame arrived, no free RX buffers available.
 +               * NOTE: we can get this on a link transition. */
 +              cas_post_rxds_ringN(cp, 1, 0);
 +              spin_lock(&cp->stat_lock[1]);
 +              cp->net_stats[1].rx_dropped++;
 +              spin_unlock(&cp->stat_lock[1]);
 +      }
 +
 +      if (status & INTR_RX_BUF_AE_1)
 +              cas_post_rxds_ringN(cp, 1, RX_DESC_RINGN_SIZE(1) -
 +                                  RX_AE_FREEN_VAL(1));
 +
 +      if (status & (INTR_RX_COMP_AF | INTR_RX_COMP_FULL))
 +              cas_post_rxcs_ringN(cp, 1);
 +}
 +
 +/* ring 2 handles a few more events than 3 and 4 */
 +static irqreturn_t cas_interrupt1(int irq, void *dev_id)
 +{
 +      struct net_device *dev = dev_id;
 +      struct cas *cp = netdev_priv(dev);
 +      unsigned long flags;
 +      u32 status = readl(cp->regs + REG_PLUS_INTRN_STATUS(1));
 +
 +      /* check for shared interrupt */
 +      if (status == 0)
 +              return IRQ_NONE;
 +
 +      spin_lock_irqsave(&cp->lock, flags);
 +      if (status & INTR_RX_DONE_ALT) { /* handle rx separately */
 +#ifdef USE_NAPI
 +              cas_mask_intr(cp);
 +              napi_schedule(&cp->napi);
 +#else
 +              cas_rx_ringN(cp, 1, 0);
 +#endif
 +              status &= ~INTR_RX_DONE_ALT;
 +      }
 +      if (status)
 +              cas_handle_irq1(cp, status);
 +      spin_unlock_irqrestore(&cp->lock, flags);
 +      return IRQ_HANDLED;
 +}
 +#endif
 +
 +static inline void cas_handle_irq(struct net_device *dev,
 +                                struct cas *cp, const u32 status)
 +{
 +      /* housekeeping interrupts */
 +      if (status & INTR_ERROR_MASK)
 +              cas_abnormal_irq(dev, cp, status);
 +
 +      if (status & INTR_RX_BUF_UNAVAIL) {
 +              /* Frame arrived, no free RX buffers available.
 +               * NOTE: we can get this on a link transition.
 +               */
 +              cas_post_rxds_ringN(cp, 0, 0);
 +              spin_lock(&cp->stat_lock[0]);
 +              cp->net_stats[0].rx_dropped++;
 +              spin_unlock(&cp->stat_lock[0]);
 +      } else if (status & INTR_RX_BUF_AE) {
 +              cas_post_rxds_ringN(cp, 0, RX_DESC_RINGN_SIZE(0) -
 +                                  RX_AE_FREEN_VAL(0));
 +      }
 +
 +      if (status & (INTR_RX_COMP_AF | INTR_RX_COMP_FULL))
 +              cas_post_rxcs_ringN(dev, cp, 0);
 +}
 +
 +static irqreturn_t cas_interrupt(int irq, void *dev_id)
 +{
 +      struct net_device *dev = dev_id;
 +      struct cas *cp = netdev_priv(dev);
 +      unsigned long flags;
 +      u32 status = readl(cp->regs + REG_INTR_STATUS);
 +
 +      if (status == 0)
 +              return IRQ_NONE;
 +
 +      spin_lock_irqsave(&cp->lock, flags);
 +      if (status & (INTR_TX_ALL | INTR_TX_INTME)) {
 +              cas_tx(dev, cp, status);
 +              status &= ~(INTR_TX_ALL | INTR_TX_INTME);
 +      }
 +
 +      if (status & INTR_RX_DONE) {
 +#ifdef USE_NAPI
 +              cas_mask_intr(cp);
 +              napi_schedule(&cp->napi);
 +#else
 +              cas_rx_ringN(cp, 0, 0);
 +#endif
 +              status &= ~INTR_RX_DONE;
 +      }
 +
 +      if (status)
 +              cas_handle_irq(dev, cp, status);
 +      spin_unlock_irqrestore(&cp->lock, flags);
 +      return IRQ_HANDLED;
 +}
 +
 +
 +#ifdef USE_NAPI
 +static int cas_poll(struct napi_struct *napi, int budget)
 +{
 +      struct cas *cp = container_of(napi, struct cas, napi);
 +      struct net_device *dev = cp->dev;
 +      int i, enable_intr, credits;
 +      u32 status = readl(cp->regs + REG_INTR_STATUS);
 +      unsigned long flags;
 +
 +      spin_lock_irqsave(&cp->lock, flags);
 +      cas_tx(dev, cp, status);
 +      spin_unlock_irqrestore(&cp->lock, flags);
 +
 +      /* NAPI rx packets. we spread the credits across all of the
 +       * rxc rings
 +       *
 +       * to make sure we're fair with the work we loop through each
 +       * ring N_RX_COMP_RING times with a request of
 +       * budget / N_RX_COMP_RINGS
 +       */
 +      enable_intr = 1;
 +      credits = 0;
 +      for (i = 0; i < N_RX_COMP_RINGS; i++) {
 +              int j;
 +              for (j = 0; j < N_RX_COMP_RINGS; j++) {
 +                      credits += cas_rx_ringN(cp, j, budget / N_RX_COMP_RINGS);
 +                      if (credits >= budget) {
 +                              enable_intr = 0;
 +                              goto rx_comp;
 +                      }
 +              }
 +      }
 +
 +rx_comp:
 +      /* final rx completion */
 +      spin_lock_irqsave(&cp->lock, flags);
 +      if (status)
 +              cas_handle_irq(dev, cp, status);
 +
 +#ifdef USE_PCI_INTB
 +      if (N_RX_COMP_RINGS > 1) {
 +              status = readl(cp->regs + REG_PLUS_INTRN_STATUS(1));
 +              if (status)
 +                      cas_handle_irq1(dev, cp, status);
 +      }
 +#endif
 +
 +#ifdef USE_PCI_INTC
 +      if (N_RX_COMP_RINGS > 2) {
 +              status = readl(cp->regs + REG_PLUS_INTRN_STATUS(2));
 +              if (status)
 +                      cas_handle_irqN(dev, cp, status, 2);
 +      }
 +#endif
 +
 +#ifdef USE_PCI_INTD
 +      if (N_RX_COMP_RINGS > 3) {
 +              status = readl(cp->regs + REG_PLUS_INTRN_STATUS(3));
 +              if (status)
 +                      cas_handle_irqN(dev, cp, status, 3);
 +      }
 +#endif
 +      spin_unlock_irqrestore(&cp->lock, flags);
 +      if (enable_intr) {
 +              napi_complete(napi);
 +              cas_unmask_intr(cp);
 +      }
 +      return credits;
 +}
 +#endif
 +
 +#ifdef CONFIG_NET_POLL_CONTROLLER
 +static void cas_netpoll(struct net_device *dev)
 +{
 +      struct cas *cp = netdev_priv(dev);
 +
 +      cas_disable_irq(cp, 0);
 +      cas_interrupt(cp->pdev->irq, dev);
 +      cas_enable_irq(cp, 0);
 +
 +#ifdef USE_PCI_INTB
 +      if (N_RX_COMP_RINGS > 1) {
 +              /* cas_interrupt1(); */
 +      }
 +#endif
 +#ifdef USE_PCI_INTC
 +      if (N_RX_COMP_RINGS > 2) {
 +              /* cas_interruptN(); */
 +      }
 +#endif
 +#ifdef USE_PCI_INTD
 +      if (N_RX_COMP_RINGS > 3) {
 +              /* cas_interruptN(); */
 +      }
 +#endif
 +}
 +#endif
 +
 +static void cas_tx_timeout(struct net_device *dev)
 +{
 +      struct cas *cp = netdev_priv(dev);
 +
 +      netdev_err(dev, "transmit timed out, resetting\n");
 +      if (!cp->hw_running) {
 +              netdev_err(dev, "hrm.. hw not running!\n");
 +              return;
 +      }
 +
 +      netdev_err(dev, "MIF_STATE[%08x]\n",
 +                 readl(cp->regs + REG_MIF_STATE_MACHINE));
 +
 +      netdev_err(dev, "MAC_STATE[%08x]\n",
 +                 readl(cp->regs + REG_MAC_STATE_MACHINE));
 +
 +      netdev_err(dev, "TX_STATE[%08x:%08x:%08x] FIFO[%08x:%08x:%08x] SM1[%08x] SM2[%08x]\n",
 +                 readl(cp->regs + REG_TX_CFG),
 +                 readl(cp->regs + REG_MAC_TX_STATUS),
 +                 readl(cp->regs + REG_MAC_TX_CFG),
 +                 readl(cp->regs + REG_TX_FIFO_PKT_CNT),
 +                 readl(cp->regs + REG_TX_FIFO_WRITE_PTR),
 +                 readl(cp->regs + REG_TX_FIFO_READ_PTR),
 +                 readl(cp->regs + REG_TX_SM_1),
 +                 readl(cp->regs + REG_TX_SM_2));
 +
 +      netdev_err(dev, "RX_STATE[%08x:%08x:%08x]\n",
 +                 readl(cp->regs + REG_RX_CFG),
 +                 readl(cp->regs + REG_MAC_RX_STATUS),
 +                 readl(cp->regs + REG_MAC_RX_CFG));
 +
 +      netdev_err(dev, "HP_STATE[%08x:%08x:%08x:%08x]\n",
 +                 readl(cp->regs + REG_HP_STATE_MACHINE),
 +                 readl(cp->regs + REG_HP_STATUS0),
 +                 readl(cp->regs + REG_HP_STATUS1),
 +                 readl(cp->regs + REG_HP_STATUS2));
 +
 +#if 1
 +      atomic_inc(&cp->reset_task_pending);
 +      atomic_inc(&cp->reset_task_pending_all);
 +      schedule_work(&cp->reset_task);
 +#else
 +      atomic_set(&cp->reset_task_pending, CAS_RESET_ALL);
 +      schedule_work(&cp->reset_task);
 +#endif
 +}
 +
 +static inline int cas_intme(int ring, int entry)
 +{
 +      /* Algorithm: IRQ every 1/2 of descriptors. */
 +      if (!(entry & ((TX_DESC_RINGN_SIZE(ring) >> 1) - 1)))
 +              return 1;
 +      return 0;
 +}
 +
 +
 +static void cas_write_txd(struct cas *cp, int ring, int entry,
 +                        dma_addr_t mapping, int len, u64 ctrl, int last)
 +{
 +      struct cas_tx_desc *txd = cp->init_txds[ring] + entry;
 +
 +      ctrl |= CAS_BASE(TX_DESC_BUFLEN, len);
 +      if (cas_intme(ring, entry))
 +              ctrl |= TX_DESC_INTME;
 +      if (last)
 +              ctrl |= TX_DESC_EOF;
 +      txd->control = cpu_to_le64(ctrl);
 +      txd->buffer = cpu_to_le64(mapping);
 +}
 +
 +static inline void *tx_tiny_buf(struct cas *cp, const int ring,
 +                              const int entry)
 +{
 +      return cp->tx_tiny_bufs[ring] + TX_TINY_BUF_LEN*entry;
 +}
 +
 +static inline dma_addr_t tx_tiny_map(struct cas *cp, const int ring,
 +                                   const int entry, const int tentry)
 +{
 +      cp->tx_tiny_use[ring][tentry].nbufs++;
 +      cp->tx_tiny_use[ring][entry].used = 1;
 +      return cp->tx_tiny_dvma[ring] + TX_TINY_BUF_LEN*entry;
 +}
 +
 +static inline int cas_xmit_tx_ringN(struct cas *cp, int ring,
 +                                  struct sk_buff *skb)
 +{
 +      struct net_device *dev = cp->dev;
 +      int entry, nr_frags, frag, tabort, tentry;
 +      dma_addr_t mapping;
 +      unsigned long flags;
 +      u64 ctrl;
 +      u32 len;
 +
 +      spin_lock_irqsave(&cp->tx_lock[ring], flags);
 +
 +      /* This is a hard error, log it. */
 +      if (TX_BUFFS_AVAIL(cp, ring) <=
 +          CAS_TABORT(cp)*(skb_shinfo(skb)->nr_frags + 1)) {
 +              netif_stop_queue(dev);
 +              spin_unlock_irqrestore(&cp->tx_lock[ring], flags);
 +              netdev_err(dev, "BUG! Tx Ring full when queue awake!\n");
 +              return 1;
 +      }
 +
 +      ctrl = 0;
 +      if (skb->ip_summed == CHECKSUM_PARTIAL) {
 +              const u64 csum_start_off = skb_checksum_start_offset(skb);
 +              const u64 csum_stuff_off = csum_start_off + skb->csum_offset;
 +
 +              ctrl =  TX_DESC_CSUM_EN |
 +                      CAS_BASE(TX_DESC_CSUM_START, csum_start_off) |
 +                      CAS_BASE(TX_DESC_CSUM_STUFF, csum_stuff_off);
 +      }
 +
 +      entry = cp->tx_new[ring];
 +      cp->tx_skbs[ring][entry] = skb;
 +
 +      nr_frags = skb_shinfo(skb)->nr_frags;
 +      len = skb_headlen(skb);
 +      mapping = pci_map_page(cp->pdev, virt_to_page(skb->data),
 +                             offset_in_page(skb->data), len,
 +                             PCI_DMA_TODEVICE);
 +
 +      tentry = entry;
 +      tabort = cas_calc_tabort(cp, (unsigned long) skb->data, len);
 +      if (unlikely(tabort)) {
 +              /* NOTE: len is always >  tabort */
 +              cas_write_txd(cp, ring, entry, mapping, len - tabort,
 +                            ctrl | TX_DESC_SOF, 0);
 +              entry = TX_DESC_NEXT(ring, entry);
 +
 +              skb_copy_from_linear_data_offset(skb, len - tabort,
 +                            tx_tiny_buf(cp, ring, entry), tabort);
 +              mapping = tx_tiny_map(cp, ring, entry, tentry);
 +              cas_write_txd(cp, ring, entry, mapping, tabort, ctrl,
 +                            (nr_frags == 0));
 +      } else {
 +              cas_write_txd(cp, ring, entry, mapping, len, ctrl |
 +                            TX_DESC_SOF, (nr_frags == 0));
 +      }
 +      entry = TX_DESC_NEXT(ring, entry);
 +
 +      for (frag = 0; frag < nr_frags; frag++) {
 +              skb_frag_t *fragp = &skb_shinfo(skb)->frags[frag];
 +
 +              len = fragp->size;
 +              mapping = skb_frag_dma_map(&cp->pdev->dev, fragp, 0, len,
 +                                         PCI_DMA_TODEVICE);
 +
 +              tabort = cas_calc_tabort(cp, fragp->page_offset, len);
 +              if (unlikely(tabort)) {
 +                      void *addr;
 +
 +                      /* NOTE: len is always > tabort */
 +                      cas_write_txd(cp, ring, entry, mapping, len - tabort,
 +                                    ctrl, 0);
 +                      entry = TX_DESC_NEXT(ring, entry);
 +
 +                      addr = cas_page_map(skb_frag_page(fragp));
 +                      memcpy(tx_tiny_buf(cp, ring, entry),
 +                             addr + fragp->page_offset + len - tabort,
 +                             tabort);
 +                      cas_page_unmap(addr);
 +                      mapping = tx_tiny_map(cp, ring, entry, tentry);
 +                      len     = tabort;
 +              }
 +
 +              cas_write_txd(cp, ring, entry, mapping, len, ctrl,
 +                            (frag + 1 == nr_frags));
 +              entry = TX_DESC_NEXT(ring, entry);
 +      }
 +
 +      cp->tx_new[ring] = entry;
 +      if (TX_BUFFS_AVAIL(cp, ring) <= CAS_TABORT(cp)*(MAX_SKB_FRAGS + 1))
 +              netif_stop_queue(dev);
 +
 +      netif_printk(cp, tx_queued, KERN_DEBUG, dev,
 +                   "tx[%d] queued, slot %d, skblen %d, avail %d\n",
 +                   ring, entry, skb->len, TX_BUFFS_AVAIL(cp, ring));
 +      writel(entry, cp->regs + REG_TX_KICKN(ring));
 +      spin_unlock_irqrestore(&cp->tx_lock[ring], flags);
 +      return 0;
 +}
 +
 +static netdev_tx_t cas_start_xmit(struct sk_buff *skb, struct net_device *dev)
 +{
 +      struct cas *cp = netdev_priv(dev);
 +
 +      /* this is only used as a load-balancing hint, so it doesn't
 +       * need to be SMP safe
 +       */
 +      static int ring;
 +
 +      if (skb_padto(skb, cp->min_frame_size))
 +              return NETDEV_TX_OK;
 +
 +      /* XXX: we need some higher-level QoS hooks to steer packets to
 +       *      individual queues.
 +       */
 +      if (cas_xmit_tx_ringN(cp, ring++ & N_TX_RINGS_MASK, skb))
 +              return NETDEV_TX_BUSY;
 +      return NETDEV_TX_OK;
 +}
 +
 +static void cas_init_tx_dma(struct cas *cp)
 +{
 +      u64 desc_dma = cp->block_dvma;
 +      unsigned long off;
 +      u32 val;
 +      int i;
 +
 +      /* set up tx completion writeback registers. must be 8-byte aligned */
 +#ifdef USE_TX_COMPWB
 +      off = offsetof(struct cas_init_block, tx_compwb);
 +      writel((desc_dma + off) >> 32, cp->regs + REG_TX_COMPWB_DB_HI);
 +      writel((desc_dma + off) & 0xffffffff, cp->regs + REG_TX_COMPWB_DB_LOW);
 +#endif
 +
 +      /* enable completion writebacks, enable paced mode,
 +       * disable read pipe, and disable pre-interrupt compwbs
 +       */
 +      val =   TX_CFG_COMPWB_Q1 | TX_CFG_COMPWB_Q2 |
 +              TX_CFG_COMPWB_Q3 | TX_CFG_COMPWB_Q4 |
 +              TX_CFG_DMA_RDPIPE_DIS | TX_CFG_PACED_MODE |
 +              TX_CFG_INTR_COMPWB_DIS;
 +
 +      /* write out tx ring info and tx desc bases */
 +      for (i = 0; i < MAX_TX_RINGS; i++) {
 +              off = (unsigned long) cp->init_txds[i] -
 +                      (unsigned long) cp->init_block;
 +
 +              val |= CAS_TX_RINGN_BASE(i);
 +              writel((desc_dma + off) >> 32, cp->regs + REG_TX_DBN_HI(i));
 +              writel((desc_dma + off) & 0xffffffff, cp->regs +
 +                     REG_TX_DBN_LOW(i));
 +              /* don't zero out the kick register here as the system
 +               * will wedge
 +               */
 +      }
 +      writel(val, cp->regs + REG_TX_CFG);
 +
 +      /* program max burst sizes. these numbers should be different
 +       * if doing QoS.
 +       */
 +#ifdef USE_QOS
 +      writel(0x800, cp->regs + REG_TX_MAXBURST_0);
 +      writel(0x1600, cp->regs + REG_TX_MAXBURST_1);
 +      writel(0x2400, cp->regs + REG_TX_MAXBURST_2);
 +      writel(0x4800, cp->regs + REG_TX_MAXBURST_3);
 +#else
 +      writel(0x800, cp->regs + REG_TX_MAXBURST_0);
 +      writel(0x800, cp->regs + REG_TX_MAXBURST_1);
 +      writel(0x800, cp->regs + REG_TX_MAXBURST_2);
 +      writel(0x800, cp->regs + REG_TX_MAXBURST_3);
 +#endif
 +}
 +
 +/* Must be invoked under cp->lock. */
 +static inline void cas_init_dma(struct cas *cp)
 +{
 +      cas_init_tx_dma(cp);
 +      cas_init_rx_dma(cp);
 +}
 +
 +static void cas_process_mc_list(struct cas *cp)
 +{
 +      u16 hash_table[16];
 +      u32 crc;
 +      struct netdev_hw_addr *ha;
 +      int i = 1;
 +
 +      memset(hash_table, 0, sizeof(hash_table));
 +      netdev_for_each_mc_addr(ha, cp->dev) {
 +              if (i <= CAS_MC_EXACT_MATCH_SIZE) {
 +                      /* use the alternate mac address registers for the
 +                       * first 15 multicast addresses
 +                       */
 +                      writel((ha->addr[4] << 8) | ha->addr[5],
 +                             cp->regs + REG_MAC_ADDRN(i*3 + 0));
 +                      writel((ha->addr[2] << 8) | ha->addr[3],
 +                             cp->regs + REG_MAC_ADDRN(i*3 + 1));
 +                      writel((ha->addr[0] << 8) | ha->addr[1],
 +                             cp->regs + REG_MAC_ADDRN(i*3 + 2));
 +                      i++;
 +              }
 +              else {
 +                      /* use hw hash table for the next series of
 +                       * multicast addresses
 +                       */
 +                      crc = ether_crc_le(ETH_ALEN, ha->addr);
 +                      crc >>= 24;
 +                      hash_table[crc >> 4] |= 1 << (15 - (crc & 0xf));
 +              }
 +      }
 +      for (i = 0; i < 16; i++)
 +              writel(hash_table[i], cp->regs + REG_MAC_HASH_TABLEN(i));
 +}
 +
 +/* Must be invoked under cp->lock. */
 +static u32 cas_setup_multicast(struct cas *cp)
 +{
 +      u32 rxcfg = 0;
 +      int i;
 +
 +      if (cp->dev->flags & IFF_PROMISC) {
 +              rxcfg |= MAC_RX_CFG_PROMISC_EN;
 +
 +      } else if (cp->dev->flags & IFF_ALLMULTI) {
 +              for (i=0; i < 16; i++)
 +                      writel(0xFFFF, cp->regs + REG_MAC_HASH_TABLEN(i));
 +              rxcfg |= MAC_RX_CFG_HASH_FILTER_EN;
 +
 +      } else {
 +              cas_process_mc_list(cp);
 +              rxcfg |= MAC_RX_CFG_HASH_FILTER_EN;
 +      }
 +
 +      return rxcfg;
 +}
 +
 +/* must be invoked under cp->stat_lock[N_TX_RINGS] */
 +static void cas_clear_mac_err(struct cas *cp)
 +{
 +      writel(0, cp->regs + REG_MAC_COLL_NORMAL);
 +      writel(0, cp->regs + REG_MAC_COLL_FIRST);
 +      writel(0, cp->regs + REG_MAC_COLL_EXCESS);
 +      writel(0, cp->regs + REG_MAC_COLL_LATE);
 +      writel(0, cp->regs + REG_MAC_TIMER_DEFER);
 +      writel(0, cp->regs + REG_MAC_ATTEMPTS_PEAK);
 +      writel(0, cp->regs + REG_MAC_RECV_FRAME);
 +      writel(0, cp->regs + REG_MAC_LEN_ERR);
 +      writel(0, cp->regs + REG_MAC_ALIGN_ERR);
 +      writel(0, cp->regs + REG_MAC_FCS_ERR);
 +      writel(0, cp->regs + REG_MAC_RX_CODE_ERR);
 +}
 +
 +
 +static void cas_mac_reset(struct cas *cp)
 +{
 +      int i;
 +
 +      /* do both TX and RX reset */
 +      writel(0x1, cp->regs + REG_MAC_TX_RESET);
 +      writel(0x1, cp->regs + REG_MAC_RX_RESET);
 +
 +      /* wait for TX */
 +      i = STOP_TRIES;
 +      while (i-- > 0) {
 +              if (readl(cp->regs + REG_MAC_TX_RESET) == 0)
 +                      break;
 +              udelay(10);
 +      }
 +
 +      /* wait for RX */
 +      i = STOP_TRIES;
 +      while (i-- > 0) {
 +              if (readl(cp->regs + REG_MAC_RX_RESET) == 0)
 +                      break;
 +              udelay(10);
 +      }
 +
 +      if (readl(cp->regs + REG_MAC_TX_RESET) |
 +          readl(cp->regs + REG_MAC_RX_RESET))
 +              netdev_err(cp->dev, "mac tx[%d]/rx[%d] reset failed [%08x]\n",
 +                         readl(cp->regs + REG_MAC_TX_RESET),
 +                         readl(cp->regs + REG_MAC_RX_RESET),
 +                         readl(cp->regs + REG_MAC_STATE_MACHINE));
 +}
 +
 +
 +/* Must be invoked under cp->lock. */
 +static void cas_init_mac(struct cas *cp)
 +{
 +      unsigned char *e = &cp->dev->dev_addr[0];
 +      int i;
 +      cas_mac_reset(cp);
 +
 +      /* setup core arbitration weight register */
 +      writel(CAWR_RR_DIS, cp->regs + REG_CAWR);
 +
 +      /* XXX Use pci_dma_burst_advice() */
 +#if !defined(CONFIG_SPARC64) && !defined(CONFIG_ALPHA)
 +      /* set the infinite burst register for chips that don't have
 +       * pci issues.
 +       */
 +      if ((cp->cas_flags & CAS_FLAG_TARGET_ABORT) == 0)
 +              writel(INF_BURST_EN, cp->regs + REG_INF_BURST);
 +#endif
 +
 +      writel(0x1BF0, cp->regs + REG_MAC_SEND_PAUSE);
 +
 +      writel(0x00, cp->regs + REG_MAC_IPG0);
 +      writel(0x08, cp->regs + REG_MAC_IPG1);
 +      writel(0x04, cp->regs + REG_MAC_IPG2);
 +
 +      /* change later for 802.3z */
 +      writel(0x40, cp->regs + REG_MAC_SLOT_TIME);
 +
 +      /* min frame + FCS */
 +      writel(ETH_ZLEN + 4, cp->regs + REG_MAC_FRAMESIZE_MIN);
 +
 +      /* Ethernet payload + header + FCS + optional VLAN tag. NOTE: we
 +       * specify the maximum frame size to prevent RX tag errors on
 +       * oversized frames.
 +       */
 +      writel(CAS_BASE(MAC_FRAMESIZE_MAX_BURST, 0x2000) |
 +             CAS_BASE(MAC_FRAMESIZE_MAX_FRAME,
 +                      (CAS_MAX_MTU + ETH_HLEN + 4 + 4)),
 +             cp->regs + REG_MAC_FRAMESIZE_MAX);
 +
 +      /* NOTE: crc_size is used as a surrogate for half-duplex.
 +       * workaround saturn half-duplex issue by increasing preamble
 +       * size to 65 bytes.
 +       */
 +      if ((cp->cas_flags & CAS_FLAG_SATURN) && cp->crc_size)
 +              writel(0x41, cp->regs + REG_MAC_PA_SIZE);
 +      else
 +              writel(0x07, cp->regs + REG_MAC_PA_SIZE);
 +      writel(0x04, cp->regs + REG_MAC_JAM_SIZE);
 +      writel(0x10, cp->regs + REG_MAC_ATTEMPT_LIMIT);
 +      writel(0x8808, cp->regs + REG_MAC_CTRL_TYPE);
 +
 +      writel((e[5] | (e[4] << 8)) & 0x3ff, cp->regs + REG_MAC_RANDOM_SEED);
 +
 +      writel(0, cp->regs + REG_MAC_ADDR_FILTER0);
 +      writel(0, cp->regs + REG_MAC_ADDR_FILTER1);
 +      writel(0, cp->regs + REG_MAC_ADDR_FILTER2);
 +      writel(0, cp->regs + REG_MAC_ADDR_FILTER2_1_MASK);
 +      writel(0, cp->regs + REG_MAC_ADDR_FILTER0_MASK);
 +
 +      /* setup mac address in perfect filter array */
 +      for (i = 0; i < 45; i++)
 +              writel(0x0, cp->regs + REG_MAC_ADDRN(i));
 +
 +      writel((e[4] << 8) | e[5], cp->regs + REG_MAC_ADDRN(0));
 +      writel((e[2] << 8) | e[3], cp->regs + REG_MAC_ADDRN(1));
 +      writel((e[0] << 8) | e[1], cp->regs + REG_MAC_ADDRN(2));
 +
 +      writel(0x0001, cp->regs + REG_MAC_ADDRN(42));
 +      writel(0xc200, cp->regs + REG_MAC_ADDRN(43));
 +      writel(0x0180, cp->regs + REG_MAC_ADDRN(44));
 +
 +      cp->mac_rx_cfg = cas_setup_multicast(cp);
 +
 +      spin_lock(&cp->stat_lock[N_TX_RINGS]);
 +      cas_clear_mac_err(cp);
 +      spin_unlock(&cp->stat_lock[N_TX_RINGS]);
 +
 +      /* Setup MAC interrupts.  We want to get all of the interesting
 +       * counter expiration events, but we do not want to hear about
 +       * normal rx/tx as the DMA engine tells us that.
 +       */
 +      writel(MAC_TX_FRAME_XMIT, cp->regs + REG_MAC_TX_MASK);
 +      writel(MAC_RX_FRAME_RECV, cp->regs + REG_MAC_RX_MASK);
 +
 +      /* Don't enable even the PAUSE interrupts for now, we
 +       * make no use of those events other than to record them.
 +       */
 +      writel(0xffffffff, cp->regs + REG_MAC_CTRL_MASK);
 +}
 +
 +/* Must be invoked under cp->lock. */
 +static void cas_init_pause_thresholds(struct cas *cp)
 +{
 +      /* Calculate pause thresholds.  Setting the OFF threshold to the
 +       * full RX fifo size effectively disables PAUSE generation
 +       */
 +      if (cp->rx_fifo_size <= (2 * 1024)) {
 +              cp->rx_pause_off = cp->rx_pause_on = cp->rx_fifo_size;
 +      } else {
 +              int max_frame = (cp->dev->mtu + ETH_HLEN + 4 + 4 + 64) & ~63;
 +              if (max_frame * 3 > cp->rx_fifo_size) {
 +                      cp->rx_pause_off = 7104;
 +                      cp->rx_pause_on  = 960;
 +              } else {
 +                      int off = (cp->rx_fifo_size - (max_frame * 2));
 +                      int on = off - max_frame;
 +                      cp->rx_pause_off = off;
 +                      cp->rx_pause_on = on;
 +              }
 +      }
 +}
 +
 +static int cas_vpd_match(const void __iomem *p, const char *str)
 +{
 +      int len = strlen(str) + 1;
 +      int i;
 +
 +      for (i = 0; i < len; i++) {
 +              if (readb(p + i) != str[i])
 +                      return 0;
 +      }
 +      return 1;
 +}
 +
 +
 +/* get the mac address by reading the vpd information in the rom.
 + * also get the phy type and determine if there's an entropy generator.
 + * NOTE: this is a bit convoluted for the following reasons:
 + *  1) vpd info has order-dependent mac addresses for multinic cards
 + *  2) the only way to determine the nic order is to use the slot
 + *     number.
 + *  3) fiber cards don't have bridges, so their slot numbers don't
 + *     mean anything.
 + *  4) we don't actually know we have a fiber card until after
 + *     the mac addresses are parsed.
 + */
 +static int cas_get_vpd_info(struct cas *cp, unsigned char *dev_addr,
 +                          const int offset)
 +{
 +      void __iomem *p = cp->regs + REG_EXPANSION_ROM_RUN_START;
 +      void __iomem *base, *kstart;
 +      int i, len;
 +      int found = 0;
 +#define VPD_FOUND_MAC        0x01
 +#define VPD_FOUND_PHY        0x02
 +
 +      int phy_type = CAS_PHY_MII_MDIO0; /* default phy type */
 +      int mac_off  = 0;
 +
 +#if defined(CONFIG_SPARC)
 +      const unsigned char *addr;
 +#endif
 +
 +      /* give us access to the PROM */
 +      writel(BIM_LOCAL_DEV_PROM | BIM_LOCAL_DEV_PAD,
 +             cp->regs + REG_BIM_LOCAL_DEV_EN);
 +
 +      /* check for an expansion rom */
 +      if (readb(p) != 0x55 || readb(p + 1) != 0xaa)
 +              goto use_random_mac_addr;
 +
 +      /* search for beginning of vpd */
 +      base = NULL;
 +      for (i = 2; i < EXPANSION_ROM_SIZE; i++) {
 +              /* check for PCIR */
 +              if ((readb(p + i + 0) == 0x50) &&
 +                  (readb(p + i + 1) == 0x43) &&
 +                  (readb(p + i + 2) == 0x49) &&
 +                  (readb(p + i + 3) == 0x52)) {
 +                      base = p + (readb(p + i + 8) |
 +                                  (readb(p + i + 9) << 8));
 +                      break;
 +              }
 +      }
 +
 +      if (!base || (readb(base) != 0x82))
 +              goto use_random_mac_addr;
 +
 +      i = (readb(base + 1) | (readb(base + 2) << 8)) + 3;
 +      while (i < EXPANSION_ROM_SIZE) {
 +              if (readb(base + i) != 0x90) /* no vpd found */
 +                      goto use_random_mac_addr;
 +
 +              /* found a vpd field */
 +              len = readb(base + i + 1) | (readb(base + i + 2) << 8);
 +
 +              /* extract keywords */
 +              kstart = base + i + 3;
 +              p = kstart;
 +              while ((p - kstart) < len) {
 +                      int klen = readb(p + 2);
 +                      int j;
 +                      char type;
 +
 +                      p += 3;
 +
 +                      /* look for the following things:
 +                       * -- correct length == 29
 +                       * 3 (type) + 2 (size) +
 +                       * 18 (strlen("local-mac-address") + 1) +
 +                       * 6 (mac addr)
 +                       * -- VPD Instance 'I'
 +                       * -- VPD Type Bytes 'B'
 +                       * -- VPD data length == 6
 +                       * -- property string == local-mac-address
 +                       *
 +                       * -- correct length == 24
 +                       * 3 (type) + 2 (size) +
 +                       * 12 (strlen("entropy-dev") + 1) +
 +                       * 7 (strlen("vms110") + 1)
 +                       * -- VPD Instance 'I'
 +                       * -- VPD Type String 'B'
 +                       * -- VPD data length == 7
 +                       * -- property string == entropy-dev
 +                       *
 +                       * -- correct length == 18
 +                       * 3 (type) + 2 (size) +
 +                       * 9 (strlen("phy-type") + 1) +
 +                       * 4 (strlen("pcs") + 1)
 +                       * -- VPD Instance 'I'
 +                       * -- VPD Type String 'S'
 +                       * -- VPD data length == 4
 +                       * -- property string == phy-type
 +                       *
 +                       * -- correct length == 23
 +                       * 3 (type) + 2 (size) +
 +                       * 14 (strlen("phy-interface") + 1) +
 +                       * 4 (strlen("pcs") + 1)
 +                       * -- VPD Instance 'I'
 +                       * -- VPD Type String 'S'
 +                       * -- VPD data length == 4
 +                       * -- property string == phy-interface
 +                       */
 +                      if (readb(p) != 'I')
 +                              goto next;
 +
 +                      /* finally, check string and length */
 +                      type = readb(p + 3);
 +                      if (type == 'B') {
 +                              if ((klen == 29) && readb(p + 4) == 6 &&
 +                                  cas_vpd_match(p + 5,
 +                                                "local-mac-address")) {
 +                                      if (mac_off++ > offset)
 +                                              goto next;
 +
 +                                      /* set mac address */
 +                                      for (j = 0; j < 6; j++)
 +                                              dev_addr[j] =
 +                                                      readb(p + 23 + j);
 +                                      goto found_mac;
 +                              }
 +                      }
 +
 +                      if (type != 'S')
 +                              goto next;
 +
 +#ifdef USE_ENTROPY_DEV
 +                      if ((klen == 24) &&
 +                          cas_vpd_match(p + 5, "entropy-dev") &&
 +                          cas_vpd_match(p + 17, "vms110")) {
 +                              cp->cas_flags |= CAS_FLAG_ENTROPY_DEV;
 +                              goto next;
 +                      }
 +#endif
 +
 +                      if (found & VPD_FOUND_PHY)
 +                              goto next;
 +
 +                      if ((klen == 18) && readb(p + 4) == 4 &&
 +                          cas_vpd_match(p + 5, "phy-type")) {
 +                              if (cas_vpd_match(p + 14, "pcs")) {
 +                                      phy_type = CAS_PHY_SERDES;
 +                                      goto found_phy;
 +                              }
 +                      }
 +
 +                      if ((klen == 23) && readb(p + 4) == 4 &&
 +                          cas_vpd_match(p + 5, "phy-interface")) {
 +                              if (cas_vpd_match(p + 19, "pcs")) {
 +                                      phy_type = CAS_PHY_SERDES;
 +                                      goto found_phy;
 +                              }
 +                      }
 +found_mac:
 +                      found |= VPD_FOUND_MAC;
 +                      goto next;
 +
 +found_phy:
 +                      found |= VPD_FOUND_PHY;
 +
 +next:
 +                      p += klen;
 +              }
 +              i += len + 3;
 +      }
 +
 +use_random_mac_addr:
 +      if (found & VPD_FOUND_MAC)
 +              goto done;
 +
 +#if defined(CONFIG_SPARC)
 +      addr = of_get_property(cp->of_node, "local-mac-address", NULL);
 +      if (addr != NULL) {
 +              memcpy(dev_addr, addr, 6);
 +              goto done;
 +      }
 +#endif
 +
 +      /* Sun MAC prefix then 3 random bytes. */
 +      pr_info("MAC address not found in ROM VPD\n");
 +      dev_addr[0] = 0x08;
 +      dev_addr[1] = 0x00;
 +      dev_addr[2] = 0x20;
 +      get_random_bytes(dev_addr + 3, 3);
 +
 +done:
 +      writel(0, cp->regs + REG_BIM_LOCAL_DEV_EN);
 +      return phy_type;
 +}
 +
 +/* check pci invariants */
 +static void cas_check_pci_invariants(struct cas *cp)
 +{
 +      struct pci_dev *pdev = cp->pdev;
 +
 +      cp->cas_flags = 0;
 +      if ((pdev->vendor == PCI_VENDOR_ID_SUN) &&
 +          (pdev->device == PCI_DEVICE_ID_SUN_CASSINI)) {
 +              if (pdev->revision >= CAS_ID_REVPLUS)
 +                      cp->cas_flags |= CAS_FLAG_REG_PLUS;
 +              if (pdev->revision < CAS_ID_REVPLUS02u)
 +                      cp->cas_flags |= CAS_FLAG_TARGET_ABORT;
 +
 +              /* Original Cassini supports HW CSUM, but it's not
 +               * enabled by default as it can trigger TX hangs.
 +               */
 +              if (pdev->revision < CAS_ID_REV2)
 +                      cp->cas_flags |= CAS_FLAG_NO_HW_CSUM;
 +      } else {
 +              /* Only sun has original cassini chips.  */
 +              cp->cas_flags |= CAS_FLAG_REG_PLUS;
 +
 +              /* We use a flag because the same phy might be externally
 +               * connected.
 +               */
 +              if ((pdev->vendor == PCI_VENDOR_ID_NS) &&
 +                  (pdev->device == PCI_DEVICE_ID_NS_SATURN))
 +                      cp->cas_flags |= CAS_FLAG_SATURN;
 +      }
 +}
 +
 +
 +static int cas_check_invariants(struct cas *cp)
 +{
 +      struct pci_dev *pdev = cp->pdev;
 +      u32 cfg;
 +      int i;
 +
 +      /* get page size for rx buffers. */
 +      cp->page_order = 0;
 +#ifdef USE_PAGE_ORDER
 +      if (PAGE_SHIFT < CAS_JUMBO_PAGE_SHIFT) {
 +              /* see if we can allocate larger pages */
 +              struct page *page = alloc_pages(GFP_ATOMIC,
 +                                              CAS_JUMBO_PAGE_SHIFT -
 +                                              PAGE_SHIFT);
 +              if (page) {
 +                      __free_pages(page, CAS_JUMBO_PAGE_SHIFT - PAGE_SHIFT);
 +                      cp->page_order = CAS_JUMBO_PAGE_SHIFT - PAGE_SHIFT;
 +              } else {
 +                      printk("MTU limited to %d bytes\n", CAS_MAX_MTU);
 +              }
 +      }
 +#endif
 +      cp->page_size = (PAGE_SIZE << cp->page_order);
 +
 +      /* Fetch the FIFO configurations. */
 +      cp->tx_fifo_size = readl(cp->regs + REG_TX_FIFO_SIZE) * 64;
 +      cp->rx_fifo_size = RX_FIFO_SIZE;
 +
 +      /* finish phy determination. MDIO1 takes precedence over MDIO0 if
 +       * they're both connected.
 +       */
 +      cp->phy_type = cas_get_vpd_info(cp, cp->dev->dev_addr,
 +                                      PCI_SLOT(pdev->devfn));
 +      if (cp->phy_type & CAS_PHY_SERDES) {
 +              cp->cas_flags |= CAS_FLAG_1000MB_CAP;
 +              return 0; /* no more checking needed */
 +      }
 +
 +      /* MII */
 +      cfg = readl(cp->regs + REG_MIF_CFG);
 +      if (cfg & MIF_CFG_MDIO_1) {
 +              cp->phy_type = CAS_PHY_MII_MDIO1;
 +      } else if (cfg & MIF_CFG_MDIO_0) {
 +              cp->phy_type = CAS_PHY_MII_MDIO0;
 +      }
 +
 +      cas_mif_poll(cp, 0);
 +      writel(PCS_DATAPATH_MODE_MII, cp->regs + REG_PCS_DATAPATH_MODE);
 +
 +      for (i = 0; i < 32; i++) {
 +              u32 phy_id;
 +              int j;
 +
 +              for (j = 0; j < 3; j++) {
 +                      cp->phy_addr = i;
 +                      phy_id = cas_phy_read(cp, MII_PHYSID1) << 16;
 +                      phy_id |= cas_phy_read(cp, MII_PHYSID2);
 +                      if (phy_id && (phy_id != 0xFFFFFFFF)) {
 +                              cp->phy_id = phy_id;
 +                              goto done;
 +                      }
 +              }
 +      }
 +      pr_err("MII phy did not respond [%08x]\n",
 +             readl(cp->regs + REG_MIF_STATE_MACHINE));
 +      return -1;
 +
 +done:
 +      /* see if we can do gigabit */
 +      cfg = cas_phy_read(cp, MII_BMSR);
 +      if ((cfg & CAS_BMSR_1000_EXTEND) &&
 +          cas_phy_read(cp, CAS_MII_1000_EXTEND))
 +              cp->cas_flags |= CAS_FLAG_1000MB_CAP;
 +      return 0;
 +}
 +
 +/* Must be invoked under cp->lock. */
 +static inline void cas_start_dma(struct cas *cp)
 +{
 +      int i;
 +      u32 val;
 +      int txfailed = 0;
 +
 +      /* enable dma */
 +      val = readl(cp->regs + REG_TX_CFG) | TX_CFG_DMA_EN;
 +      writel(val, cp->regs + REG_TX_CFG);
 +      val = readl(cp->regs + REG_RX_CFG) | RX_CFG_DMA_EN;
 +      writel(val, cp->regs + REG_RX_CFG);
 +
 +      /* enable the mac */
 +      val = readl(cp->regs + REG_MAC_TX_CFG) | MAC_TX_CFG_EN;
 +      writel(val, cp->regs + REG_MAC_TX_CFG);
 +      val = readl(cp->regs + REG_MAC_RX_CFG) | MAC_RX_CFG_EN;
 +      writel(val, cp->regs + REG_MAC_RX_CFG);
 +
 +      i = STOP_TRIES;
 +      while (i-- > 0) {
 +              val = readl(cp->regs + REG_MAC_TX_CFG);
 +              if ((val & MAC_TX_CFG_EN))
 +                      break;
 +              udelay(10);
 +      }
 +      if (i < 0) txfailed = 1;
 +      i = STOP_TRIES;
 +      while (i-- > 0) {
 +              val = readl(cp->regs + REG_MAC_RX_CFG);
 +              if ((val & MAC_RX_CFG_EN)) {
 +                      if (txfailed) {
 +                              netdev_err(cp->dev,
 +                                         "enabling mac failed [tx:%08x:%08x]\n",
 +                                         readl(cp->regs + REG_MIF_STATE_MACHINE),
 +                                         readl(cp->regs + REG_MAC_STATE_MACHINE));
 +                      }
 +                      goto enable_rx_done;
 +              }
 +              udelay(10);
 +      }
 +      netdev_err(cp->dev, "enabling mac failed [%s:%08x:%08x]\n",
 +                 (txfailed ? "tx,rx" : "rx"),
 +                 readl(cp->regs + REG_MIF_STATE_MACHINE),
 +                 readl(cp->regs + REG_MAC_STATE_MACHINE));
 +
 +enable_rx_done:
 +      cas_unmask_intr(cp); /* enable interrupts */
 +      writel(RX_DESC_RINGN_SIZE(0) - 4, cp->regs + REG_RX_KICK);
 +      writel(0, cp->regs + REG_RX_COMP_TAIL);
 +
 +      if (cp->cas_flags & CAS_FLAG_REG_PLUS) {
 +              if (N_RX_DESC_RINGS > 1)
 +                      writel(RX_DESC_RINGN_SIZE(1) - 4,
 +                             cp->regs + REG_PLUS_RX_KICK1);
 +
 +              for (i = 1; i < N_RX_COMP_RINGS; i++)
 +                      writel(0, cp->regs + REG_PLUS_RX_COMPN_TAIL(i));
 +      }
 +}
 +
 +/* Must be invoked under cp->lock. */
 +static void cas_read_pcs_link_mode(struct cas *cp, int *fd, int *spd,
 +                                 int *pause)
 +{
 +      u32 val = readl(cp->regs + REG_PCS_MII_LPA);
 +      *fd     = (val & PCS_MII_LPA_FD) ? 1 : 0;
 +      *pause  = (val & PCS_MII_LPA_SYM_PAUSE) ? 0x01 : 0x00;
 +      if (val & PCS_MII_LPA_ASYM_PAUSE)
 +              *pause |= 0x10;
 +      *spd = 1000;
 +}
 +
 +/* Must be invoked under cp->lock. */
 +static void cas_read_mii_link_mode(struct cas *cp, int *fd, int *spd,
 +                                 int *pause)
 +{
 +      u32 val;
 +
 +      *fd = 0;
 +      *spd = 10;
 +      *pause = 0;
 +
 +      /* use GMII registers */
 +      val = cas_phy_read(cp, MII_LPA);
 +      if (val & CAS_LPA_PAUSE)
 +              *pause = 0x01;
 +
 +      if (val & CAS_LPA_ASYM_PAUSE)
 +              *pause |= 0x10;
 +
 +      if (val & LPA_DUPLEX)
 +              *fd = 1;
 +      if (val & LPA_100)
 +              *spd = 100;
 +
 +      if (cp->cas_flags & CAS_FLAG_1000MB_CAP) {
 +              val = cas_phy_read(cp, CAS_MII_1000_STATUS);
 +              if (val & (CAS_LPA_1000FULL | CAS_LPA_1000HALF))
 +                      *spd = 1000;
 +              if (val & CAS_LPA_1000FULL)
 +                      *fd = 1;
 +      }
 +}
 +
 +/* A link-up condition has occurred, initialize and enable the
 + * rest of the chip.
 + *
 + * Must be invoked under cp->lock.
 + */
 +static void cas_set_link_modes(struct cas *cp)
 +{
 +      u32 val;
 +      int full_duplex, speed, pause;
 +
 +      full_duplex = 0;
 +      speed = 10;
 +      pause = 0;
 +
 +      if (CAS_PHY_MII(cp->phy_type)) {
 +              cas_mif_poll(cp, 0);
 +              val = cas_phy_read(cp, MII_BMCR);
 +              if (val & BMCR_ANENABLE) {
 +                      cas_read_mii_link_mode(cp, &full_duplex, &speed,
 +                                             &pause);
 +              } else {
 +                      if (val & BMCR_FULLDPLX)
 +                              full_duplex = 1;
 +
 +                      if (val & BMCR_SPEED100)
 +                              speed = 100;
 +                      else if (val & CAS_BMCR_SPEED1000)
 +                              speed = (cp->cas_flags & CAS_FLAG_1000MB_CAP) ?
 +                                      1000 : 100;
 +              }
 +              cas_mif_poll(cp, 1);
 +
 +      } else {
 +              val = readl(cp->regs + REG_PCS_MII_CTRL);
 +              cas_read_pcs_link_mode(cp, &full_duplex, &speed, &pause);
 +              if ((val & PCS_MII_AUTONEG_EN) == 0) {
 +                      if (val & PCS_MII_CTRL_DUPLEX)
 +                              full_duplex = 1;
 +              }
 +      }
 +
 +      netif_info(cp, link, cp->dev, "Link up at %d Mbps, %s-duplex\n",
 +                 speed, full_duplex ? "full" : "half");
 +
 +      val = MAC_XIF_TX_MII_OUTPUT_EN | MAC_XIF_LINK_LED;
 +      if (CAS_PHY_MII(cp->phy_type)) {
 +              val |= MAC_XIF_MII_BUFFER_OUTPUT_EN;
 +              if (!full_duplex)
 +                      val |= MAC_XIF_DISABLE_ECHO;
 +      }
 +      if (full_duplex)
 +              val |= MAC_XIF_FDPLX_LED;
 +      if (speed == 1000)
 +              val |= MAC_XIF_GMII_MODE;
 +      writel(val, cp->regs + REG_MAC_XIF_CFG);
 +
 +      /* deal with carrier and collision detect. */
 +      val = MAC_TX_CFG_IPG_EN;
 +      if (full_duplex) {
 +              val |= MAC_TX_CFG_IGNORE_CARRIER;
 +              val |= MAC_TX_CFG_IGNORE_COLL;
 +      } else {
 +#ifndef USE_CSMA_CD_PROTO
 +              val |= MAC_TX_CFG_NEVER_GIVE_UP_EN;
 +              val |= MAC_TX_CFG_NEVER_GIVE_UP_LIM;
 +#endif
 +      }
 +      /* val now set up for REG_MAC_TX_CFG */
 +
 +      /* If gigabit and half-duplex, enable carrier extension
 +       * mode.  increase slot time to 512 bytes as well.
 +       * else, disable it and make sure slot time is 64 bytes.
 +       * also activate checksum bug workaround
 +       */
 +      if ((speed == 1000) && !full_duplex) {
 +              writel(val | MAC_TX_CFG_CARRIER_EXTEND,
 +                     cp->regs + REG_MAC_TX_CFG);
 +
 +              val = readl(cp->regs + REG_MAC_RX_CFG);
 +              val &= ~MAC_RX_CFG_STRIP_FCS; /* checksum workaround */
 +              writel(val | MAC_RX_CFG_CARRIER_EXTEND,
 +                     cp->regs + REG_MAC_RX_CFG);
 +
 +              writel(0x200, cp->regs + REG_MAC_SLOT_TIME);
 +
 +              cp->crc_size = 4;
 +              /* minimum size gigabit frame at half duplex */
 +              cp->min_frame_size = CAS_1000MB_MIN_FRAME;
 +
 +      } else {
 +              writel(val, cp->regs + REG_MAC_TX_CFG);
 +
 +              /* checksum bug workaround. don't strip FCS when in
 +               * half-duplex mode
 +               */
 +              val = readl(cp->regs + REG_MAC_RX_CFG);
 +              if (full_duplex) {
 +                      val |= MAC_RX_CFG_STRIP_FCS;
 +                      cp->crc_size = 0;
 +                      cp->min_frame_size = CAS_MIN_MTU;
 +              } else {
 +                      val &= ~MAC_RX_CFG_STRIP_FCS;
 +                      cp->crc_size = 4;
 +                      cp->min_frame_size = CAS_MIN_FRAME;
 +              }
 +              writel(val & ~MAC_RX_CFG_CARRIER_EXTEND,
 +                     cp->regs + REG_MAC_RX_CFG);
 +              writel(0x40, cp->regs + REG_MAC_SLOT_TIME);
 +      }
 +
 +      if (netif_msg_link(cp)) {
 +              if (pause & 0x01) {
 +                      netdev_info(cp->dev, "Pause is enabled (rxfifo: %d off: %d on: %d)\n",
 +                                  cp->rx_fifo_size,
 +                                  cp->rx_pause_off,
 +                                  cp->rx_pause_on);
 +              } else if (pause & 0x10) {
 +                      netdev_info(cp->dev, "TX pause enabled\n");
 +              } else {
 +                      netdev_info(cp->dev, "Pause is disabled\n");
 +              }
 +      }
 +
 +      val = readl(cp->regs + REG_MAC_CTRL_CFG);
 +      val &= ~(MAC_CTRL_CFG_SEND_PAUSE_EN | MAC_CTRL_CFG_RECV_PAUSE_EN);
 +      if (pause) { /* symmetric or asymmetric pause */
 +              val |= MAC_CTRL_CFG_SEND_PAUSE_EN;
 +              if (pause & 0x01) { /* symmetric pause */
 +                      val |= MAC_CTRL_CFG_RECV_PAUSE_EN;
 +              }
 +      }
 +      writel(val, cp->regs + REG_MAC_CTRL_CFG);
 +      cas_start_dma(cp);
 +}
 +
 +/* Must be invoked under cp->lock. */
 +static void cas_init_hw(struct cas *cp, int restart_link)
 +{
 +      if (restart_link)
 +              cas_phy_init(cp);
 +
 +      cas_init_pause_thresholds(cp);
 +      cas_init_mac(cp);
 +      cas_init_dma(cp);
 +
 +      if (restart_link) {
 +              /* Default aneg parameters */
 +              cp->timer_ticks = 0;
 +              cas_begin_auto_negotiation(cp, NULL);
 +      } else if (cp->lstate == link_up) {
 +              cas_set_link_modes(cp);
 +              netif_carrier_on(cp->dev);
 +      }
 +}
 +
 +/* Must be invoked under cp->lock. on earlier cassini boards,
 + * SOFT_0 is tied to PCI reset. we use this to force a pci reset,
 + * let it settle out, and then restore pci state.
 + */
 +static void cas_hard_reset(struct cas *cp)
 +{
 +      writel(BIM_LOCAL_DEV_SOFT_0, cp->regs + REG_BIM_LOCAL_DEV_EN);
 +      udelay(20);
 +      pci_restore_state(cp->pdev);
 +}
 +
 +
 +static void cas_global_reset(struct cas *cp, int blkflag)
 +{
 +      int limit;
 +
 +      /* issue a global reset. don't use RSTOUT. */
 +      if (blkflag && !CAS_PHY_MII(cp->phy_type)) {
 +              /* For PCS, when the blkflag is set, we should set the
 +               * SW_REST_BLOCK_PCS_SLINK bit to prevent the results of
 +               * the last autonegotiation from being cleared.  We'll
 +               * need some special handling if the chip is set into a
 +               * loopback mode.
 +               */
 +              writel((SW_RESET_TX | SW_RESET_RX | SW_RESET_BLOCK_PCS_SLINK),
 +                     cp->regs + REG_SW_RESET);
 +      } else {
 +              writel(SW_RESET_TX | SW_RESET_RX, cp->regs + REG_SW_RESET);
 +      }
 +
 +      /* need to wait at least 3ms before polling register */
 +      mdelay(3);
 +
 +      limit = STOP_TRIES;
 +      while (limit-- > 0) {
 +              u32 val = readl(cp->regs + REG_SW_RESET);
 +              if ((val & (SW_RESET_TX | SW_RESET_RX)) == 0)
 +                      goto done;
 +              udelay(10);
 +      }
 +      netdev_err(cp->dev, "sw reset failed\n");
 +
 +done:
 +      /* enable various BIM interrupts */
 +      writel(BIM_CFG_DPAR_INTR_ENABLE | BIM_CFG_RMA_INTR_ENABLE |
 +             BIM_CFG_RTA_INTR_ENABLE, cp->regs + REG_BIM_CFG);
 +
 +      /* clear out pci error status mask for handled errors.
 +       * we don't deal with DMA counter overflows as they happen
 +       * all the time.
 +       */
 +      writel(0xFFFFFFFFU & ~(PCI_ERR_BADACK | PCI_ERR_DTRTO |
 +                             PCI_ERR_OTHER | PCI_ERR_BIM_DMA_WRITE |
 +                             PCI_ERR_BIM_DMA_READ), cp->regs +
 +             REG_PCI_ERR_STATUS_MASK);
 +
 +      /* set up for MII by default to address mac rx reset timeout
 +       * issue
 +       */
 +      writel(PCS_DATAPATH_MODE_MII, cp->regs + REG_PCS_DATAPATH_MODE);
 +}
 +
 +static void cas_reset(struct cas *cp, int blkflag)
 +{
 +      u32 val;
 +
 +      cas_mask_intr(cp);
 +      cas_global_reset(cp, blkflag);
 +      cas_mac_reset(cp);
 +      cas_entropy_reset(cp);
 +
 +      /* disable dma engines. */
 +      val = readl(cp->regs + REG_TX_CFG);
 +      val &= ~TX_CFG_DMA_EN;
 +      writel(val, cp->regs + REG_TX_CFG);
 +
 +      val = readl(cp->regs + REG_RX_CFG);
 +      val &= ~RX_CFG_DMA_EN;
 +      writel(val, cp->regs + REG_RX_CFG);
 +
 +      /* program header parser */
 +      if ((cp->cas_flags & CAS_FLAG_TARGET_ABORT) ||
 +          (CAS_HP_ALT_FIRMWARE == cas_prog_null)) {
 +              cas_load_firmware(cp, CAS_HP_FIRMWARE);
 +      } else {
 +              cas_load_firmware(cp, CAS_HP_ALT_FIRMWARE);
 +      }
 +
 +      /* clear out error registers */
 +      spin_lock(&cp->stat_lock[N_TX_RINGS]);
 +      cas_clear_mac_err(cp);
 +      spin_unlock(&cp->stat_lock[N_TX_RINGS]);
 +}
 +
 +/* Shut down the chip, must be called with pm_mutex held.  */
 +static void cas_shutdown(struct cas *cp)
 +{
 +      unsigned long flags;
 +
 +      /* Make us not-running to avoid timers respawning */
 +      cp->hw_running = 0;
 +
 +      del_timer_sync(&cp->link_timer);
 +
 +      /* Stop the reset task */
 +#if 0
 +      while (atomic_read(&cp->reset_task_pending_mtu) ||
 +             atomic_read(&cp->reset_task_pending_spare) ||
 +             atomic_read(&cp->reset_task_pending_all))
 +              schedule();
 +
 +#else
 +      while (atomic_read(&cp->reset_task_pending))
 +              schedule();
 +#endif
 +      /* Actually stop the chip */
 +      cas_lock_all_save(cp, flags);
 +      cas_reset(cp, 0);
 +      if (cp->cas_flags & CAS_FLAG_SATURN)
 +              cas_phy_powerdown(cp);
 +      cas_unlock_all_restore(cp, flags);
 +}
 +
 +static int cas_change_mtu(struct net_device *dev, int new_mtu)
 +{
 +      struct cas *cp = netdev_priv(dev);
 +
 +      if (new_mtu < CAS_MIN_MTU || new_mtu > CAS_MAX_MTU)
 +              return -EINVAL;
 +
 +      dev->mtu = new_mtu;
 +      if (!netif_running(dev) || !netif_device_present(dev))
 +              return 0;
 +
 +      /* let the reset task handle it */
 +#if 1
 +      atomic_inc(&cp->reset_task_pending);
 +      if ((cp->phy_type & CAS_PHY_SERDES)) {
 +              atomic_inc(&cp->reset_task_pending_all);
 +      } else {
 +              atomic_inc(&cp->reset_task_pending_mtu);
 +      }
 +      schedule_work(&cp->reset_task);
 +#else
 +      atomic_set(&cp->reset_task_pending, (cp->phy_type & CAS_PHY_SERDES) ?
 +                 CAS_RESET_ALL : CAS_RESET_MTU);
 +      pr_err("reset called in cas_change_mtu\n");
 +      schedule_work(&cp->reset_task);
 +#endif
 +
 +      flush_work_sync(&cp->reset_task);
 +      return 0;
 +}
 +
 +static void cas_clean_txd(struct cas *cp, int ring)
 +{
 +      struct cas_tx_desc *txd = cp->init_txds[ring];
 +      struct sk_buff *skb, **skbs = cp->tx_skbs[ring];
 +      u64 daddr, dlen;
 +      int i, size;
 +
 +      size = TX_DESC_RINGN_SIZE(ring);
 +      for (i = 0; i < size; i++) {
 +              int frag;
 +
 +              if (skbs[i] == NULL)
 +                      continue;
 +
 +              skb = skbs[i];
 +              skbs[i] = NULL;
 +
 +              for (frag = 0; frag <= skb_shinfo(skb)->nr_frags;  frag++) {
 +                      int ent = i & (size - 1);
 +
 +                      /* first buffer is never a tiny buffer and so
 +                       * needs to be unmapped.
 +                       */
 +                      daddr = le64_to_cpu(txd[ent].buffer);
 +                      dlen  =  CAS_VAL(TX_DESC_BUFLEN,
 +                                       le64_to_cpu(txd[ent].control));
 +                      pci_unmap_page(cp->pdev, daddr, dlen,
 +                                     PCI_DMA_TODEVICE);
 +
 +                      if (frag != skb_shinfo(skb)->nr_frags) {
 +                              i++;
 +
 +                              /* next buffer might by a tiny buffer.
 +                               * skip past it.
 +                               */
 +                              ent = i & (size - 1);
 +                              if (cp->tx_tiny_use[ring][ent].used)
 +                                      i++;
 +                      }
 +              }
 +              dev_kfree_skb_any(skb);
 +      }
 +
 +      /* zero out tiny buf usage */
 +      memset(cp->tx_tiny_use[ring], 0, size*sizeof(*cp->tx_tiny_use[ring]));
 +}
 +
 +/* freed on close */
 +static inline void cas_free_rx_desc(struct cas *cp, int ring)
 +{
 +      cas_page_t **page = cp->rx_pages[ring];
 +      int i, size;
 +
 +      size = RX_DESC_RINGN_SIZE(ring);
 +      for (i = 0; i < size; i++) {
 +              if (page[i]) {
 +                      cas_page_free(cp, page[i]);
 +                      page[i] = NULL;
 +              }
 +      }
 +}
 +
 +static void cas_free_rxds(struct cas *cp)
 +{
 +      int i;
 +
 +      for (i = 0; i < N_RX_DESC_RINGS; i++)
 +              cas_free_rx_desc(cp, i);
 +}
 +
 +/* Must be invoked under cp->lock. */
 +static void cas_clean_rings(struct cas *cp)
 +{
 +      int i;
 +
 +      /* need to clean all tx rings */
 +      memset(cp->tx_old, 0, sizeof(*cp->tx_old)*N_TX_RINGS);
 +      memset(cp->tx_new, 0, sizeof(*cp->tx_new)*N_TX_RINGS);
 +      for (i = 0; i < N_TX_RINGS; i++)
 +              cas_clean_txd(cp, i);
 +
 +      /* zero out init block */
 +      memset(cp->init_block, 0, sizeof(struct cas_init_block));
 +      cas_clean_rxds(cp);
 +      cas_clean_rxcs(cp);
 +}
 +
 +/* allocated on open */
 +static inline int cas_alloc_rx_desc(struct cas *cp, int ring)
 +{
 +      cas_page_t **page = cp->rx_pages[ring];
 +      int size, i = 0;
 +
 +      size = RX_DESC_RINGN_SIZE(ring);
 +      for (i = 0; i < size; i++) {
 +              if ((page[i] = cas_page_alloc(cp, GFP_KERNEL)) == NULL)
 +                      return -1;
 +      }
 +      return 0;
 +}
 +
 +static int cas_alloc_rxds(struct cas *cp)
 +{
 +      int i;
 +
 +      for (i = 0; i < N_RX_DESC_RINGS; i++) {
 +              if (cas_alloc_rx_desc(cp, i) < 0) {
 +                      cas_free_rxds(cp);
 +                      return -1;
 +              }
 +      }
 +      return 0;
 +}
 +
 +static void cas_reset_task(struct work_struct *work)
 +{
 +      struct cas *cp = container_of(work, struct cas, reset_task);
 +#if 0
 +      int pending = atomic_read(&cp->reset_task_pending);
 +#else
 +      int pending_all = atomic_read(&cp->reset_task_pending_all);
 +      int pending_spare = atomic_read(&cp->reset_task_pending_spare);
 +      int pending_mtu = atomic_read(&cp->reset_task_pending_mtu);
 +
 +      if (pending_all == 0 && pending_spare == 0 && pending_mtu == 0) {
 +              /* We can have more tasks scheduled than actually
 +               * needed.
 +               */
 +              atomic_dec(&cp->reset_task_pending);
 +              return;
 +      }
 +#endif
 +      /* The link went down, we reset the ring, but keep
 +       * DMA stopped. Use this function for reset
 +       * on error as well.
 +       */
 +      if (cp->hw_running) {
 +              unsigned long flags;
 +
 +              /* Make sure we don't get interrupts or tx packets */
 +              netif_device_detach(cp->dev);
 +              cas_lock_all_save(cp, flags);
 +
 +              if (cp->opened) {
 +                      /* We call cas_spare_recover when we call cas_open.
 +                       * but we do not initialize the lists cas_spare_recover
 +                       * uses until cas_open is called.
 +                       */
 +                      cas_spare_recover(cp, GFP_ATOMIC);
 +              }
 +#if 1
 +              /* test => only pending_spare set */
 +              if (!pending_all && !pending_mtu)
 +                      goto done;
 +#else
 +              if (pending == CAS_RESET_SPARE)
 +                      goto done;
 +#endif
 +              /* when pending == CAS_RESET_ALL, the following
 +               * call to cas_init_hw will restart auto negotiation.
 +               * Setting the second argument of cas_reset to
 +               * !(pending == CAS_RESET_ALL) will set this argument
 +               * to 1 (avoiding reinitializing the PHY for the normal
 +               * PCS case) when auto negotiation is not restarted.
 +               */
 +#if 1
 +              cas_reset(cp, !(pending_all > 0));
 +              if (cp->opened)
 +                      cas_clean_rings(cp);
 +              cas_init_hw(cp, (pending_all > 0));
 +#else
 +              cas_reset(cp, !(pending == CAS_RESET_ALL));
 +              if (cp->opened)
 +                      cas_clean_rings(cp);
 +              cas_init_hw(cp, pending == CAS_RESET_ALL);
 +#endif
 +
 +done:
 +              cas_unlock_all_restore(cp, flags);
 +              netif_device_attach(cp->dev);
 +      }
 +#if 1
 +      atomic_sub(pending_all, &cp->reset_task_pending_all);
 +      atomic_sub(pending_spare, &cp->reset_task_pending_spare);
 +      atomic_sub(pending_mtu, &cp->reset_task_pending_mtu);
 +      atomic_dec(&cp->reset_task_pending);
 +#else
 +      atomic_set(&cp->reset_task_pending, 0);
 +#endif
 +}
 +
 +static void cas_link_timer(unsigned long data)
 +{
 +      struct cas *cp = (struct cas *) data;
 +      int mask, pending = 0, reset = 0;
 +      unsigned long flags;
 +
 +      if (link_transition_timeout != 0 &&
 +          cp->link_transition_jiffies_valid &&
 +          ((jiffies - cp->link_transition_jiffies) >
 +            (link_transition_timeout))) {
 +              /* One-second counter so link-down workaround doesn't
 +               * cause resets to occur so fast as to fool the switch
 +               * into thinking the link is down.
 +               */
 +              cp->link_transition_jiffies_valid = 0;
 +      }
 +
 +      if (!cp->hw_running)
 +              return;
 +
 +      spin_lock_irqsave(&cp->lock, flags);
 +      cas_lock_tx(cp);
 +      cas_entropy_gather(cp);
 +
 +      /* If the link task is still pending, we just
 +       * reschedule the link timer
 +       */
 +#if 1
 +      if (atomic_read(&cp->reset_task_pending_all) ||
 +          atomic_read(&cp->reset_task_pending_spare) ||
 +          atomic_read(&cp->reset_task_pending_mtu))
 +              goto done;
 +#else
 +      if (atomic_read(&cp->reset_task_pending))
 +              goto done;
 +#endif
 +
 +      /* check for rx cleaning */
 +      if ((mask = (cp->cas_flags & CAS_FLAG_RXD_POST_MASK))) {
 +              int i, rmask;
 +
 +              for (i = 0; i < MAX_RX_DESC_RINGS; i++) {
 +                      rmask = CAS_FLAG_RXD_POST(i);
 +                      if ((mask & rmask) == 0)
 +                              continue;
 +
 +                      /* post_rxds will do a mod_timer */
 +                      if (cas_post_rxds_ringN(cp, i, cp->rx_last[i]) < 0) {
 +                              pending = 1;
 +                              continue;
 +                      }
 +                      cp->cas_flags &= ~rmask;
 +              }
 +      }
 +
 +      if (CAS_PHY_MII(cp->phy_type)) {
 +              u16 bmsr;
 +              cas_mif_poll(cp, 0);
 +              bmsr = cas_phy_read(cp, MII_BMSR);
 +              /* WTZ: Solaris driver reads this twice, but that
 +               * may be due to the PCS case and the use of a
 +               * common implementation. Read it twice here to be
 +               * safe.
 +               */
 +              bmsr = cas_phy_read(cp, MII_BMSR);
 +              cas_mif_poll(cp, 1);
 +              readl(cp->regs + REG_MIF_STATUS); /* avoid dups */
 +              reset = cas_mii_link_check(cp, bmsr);
 +      } else {
 +              reset = cas_pcs_link_check(cp);
 +      }
 +
 +      if (reset)
 +              goto done;
 +
 +      /* check for tx state machine confusion */
 +      if ((readl(cp->regs + REG_MAC_TX_STATUS) & MAC_TX_FRAME_XMIT) == 0) {
 +              u32 val = readl(cp->regs + REG_MAC_STATE_MACHINE);
 +              u32 wptr, rptr;
 +              int tlm  = CAS_VAL(MAC_SM_TLM, val);
 +
 +              if (((tlm == 0x5) || (tlm == 0x3)) &&
 +                  (CAS_VAL(MAC_SM_ENCAP_SM, val) == 0)) {
 +                      netif_printk(cp, tx_err, KERN_DEBUG, cp->dev,
 +                                   "tx err: MAC_STATE[%08x]\n", val);
 +                      reset = 1;
 +                      goto done;
 +              }
 +
 +              val  = readl(cp->regs + REG_TX_FIFO_PKT_CNT);
 +              wptr = readl(cp->regs + REG_TX_FIFO_WRITE_PTR);
 +              rptr = readl(cp->regs + REG_TX_FIFO_READ_PTR);
 +              if ((val == 0) && (wptr != rptr)) {
 +                      netif_printk(cp, tx_err, KERN_DEBUG, cp->dev,
 +                                   "tx err: TX_FIFO[%08x:%08x:%08x]\n",
 +                                   val, wptr, rptr);
 +                      reset = 1;
 +              }
 +
 +              if (reset)
 +                      cas_hard_reset(cp);
 +      }
 +
 +done:
 +      if (reset) {
 +#if 1
 +              atomic_inc(&cp->reset_task_pending);
 +              atomic_inc(&cp->reset_task_pending_all);
 +              schedule_work(&cp->reset_task);
 +#else
 +              atomic_set(&cp->reset_task_pending, CAS_RESET_ALL);
 +              pr_err("reset called in cas_link_timer\n");
 +              schedule_work(&cp->reset_task);
 +#endif
 +      }
 +
 +      if (!pending)
 +              mod_timer(&cp->link_timer, jiffies + CAS_LINK_TIMEOUT);
 +      cas_unlock_tx(cp);
 +      spin_unlock_irqrestore(&cp->lock, flags);
 +}
 +
 +/* tiny buffers are used to avoid target abort issues with
 + * older cassini's
 + */
 +static void cas_tx_tiny_free(struct cas *cp)
 +{
 +      struct pci_dev *pdev = cp->pdev;
 +      int i;
 +
 +      for (i = 0; i < N_TX_RINGS; i++) {
 +              if (!cp->tx_tiny_bufs[i])
 +                      continue;
 +
 +              pci_free_consistent(pdev, TX_TINY_BUF_BLOCK,
 +                                  cp->tx_tiny_bufs[i],
 +                                  cp->tx_tiny_dvma[i]);
 +              cp->tx_tiny_bufs[i] = NULL;
 +      }
 +}
 +
 +static int cas_tx_tiny_alloc(struct cas *cp)
 +{
 +      struct pci_dev *pdev = cp->pdev;
 +      int i;
 +
 +      for (i = 0; i < N_TX_RINGS; i++) {
 +              cp->tx_tiny_bufs[i] =
 +                      pci_alloc_consistent(pdev, TX_TINY_BUF_BLOCK,
 +                                           &cp->tx_tiny_dvma[i]);
 +              if (!cp->tx_tiny_bufs[i]) {
 +                      cas_tx_tiny_free(cp);
 +                      return -1;
 +              }
 +      }
 +      return 0;
 +}
 +
 +
 +static int cas_open(struct net_device *dev)
 +{
 +      struct cas *cp = netdev_priv(dev);
 +      int hw_was_up, err;
 +      unsigned long flags;
 +
 +      mutex_lock(&cp->pm_mutex);
 +
 +      hw_was_up = cp->hw_running;
 +
 +      /* The power-management mutex protects the hw_running
 +       * etc. state so it is safe to do this bit without cp->lock
 +       */
 +      if (!cp->hw_running) {
 +              /* Reset the chip */
 +              cas_lock_all_save(cp, flags);
 +              /* We set the second arg to cas_reset to zero
 +               * because cas_init_hw below will have its second
 +               * argument set to non-zero, which will force
 +               * autonegotiation to start.
 +               */
 +              cas_reset(cp, 0);
 +              cp->hw_running = 1;
 +              cas_unlock_all_restore(cp, flags);
 +      }
 +
 +      err = -ENOMEM;
 +      if (cas_tx_tiny_alloc(cp) < 0)
 +              goto err_unlock;
 +
 +      /* alloc rx descriptors */
 +      if (cas_alloc_rxds(cp) < 0)
 +              goto err_tx_tiny;
 +
 +      /* allocate spares */
 +      cas_spare_init(cp);
 +      cas_spare_recover(cp, GFP_KERNEL);
 +
 +      /* We can now request the interrupt as we know it's masked
 +       * on the controller. cassini+ has up to 4 interrupts
 +       * that can be used, but you need to do explicit pci interrupt
 +       * mapping to expose them
 +       */
 +      if (request_irq(cp->pdev->irq, cas_interrupt,
 +                      IRQF_SHARED, dev->name, (void *) dev)) {
 +              netdev_err(cp->dev, "failed to request irq !\n");
 +              err = -EAGAIN;
 +              goto err_spare;
 +      }
 +
 +#ifdef USE_NAPI
 +      napi_enable(&cp->napi);
 +#endif
 +      /* init hw */
 +      cas_lock_all_save(cp, flags);
 +      cas_clean_rings(cp);
 +      cas_init_hw(cp, !hw_was_up);
 +      cp->opened = 1;
 +      cas_unlock_all_restore(cp, flags);
 +
 +      netif_start_queue(dev);
 +      mutex_unlock(&cp->pm_mutex);
 +      return 0;
 +
 +err_spare:
 +      cas_spare_free(cp);
 +      cas_free_rxds(cp);
 +err_tx_tiny:
 +      cas_tx_tiny_free(cp);
 +err_unlock:
 +      mutex_unlock(&cp->pm_mutex);
 +      return err;
 +}
 +
 +static int cas_close(struct net_device *dev)
 +{
 +      unsigned long flags;
 +      struct cas *cp = netdev_priv(dev);
 +
 +#ifdef USE_NAPI
 +      napi_disable(&cp->napi);
 +#endif
 +      /* Make sure we don't get distracted by suspend/resume */
 +      mutex_lock(&cp->pm_mutex);
 +
 +      netif_stop_queue(dev);
 +
 +      /* Stop traffic, mark us closed */
 +      cas_lock_all_save(cp, flags);
 +      cp->opened = 0;
 +      cas_reset(cp, 0);
 +      cas_phy_init(cp);
 +      cas_begin_auto_negotiation(cp, NULL);
 +      cas_clean_rings(cp);
 +      cas_unlock_all_restore(cp, flags);
 +
 +      free_irq(cp->pdev->irq, (void *) dev);
 +      cas_spare_free(cp);
 +      cas_free_rxds(cp);
 +      cas_tx_tiny_free(cp);
 +      mutex_unlock(&cp->pm_mutex);
 +      return 0;
 +}
 +
 +static struct {
 +      const char name[ETH_GSTRING_LEN];
 +} ethtool_cassini_statnames[] = {
 +      {"collisions"},
 +      {"rx_bytes"},
 +      {"rx_crc_errors"},
 +      {"rx_dropped"},
 +      {"rx_errors"},
 +      {"rx_fifo_errors"},
 +      {"rx_frame_errors"},
 +      {"rx_length_errors"},
 +      {"rx_over_errors"},
 +      {"rx_packets"},
 +      {"tx_aborted_errors"},
 +      {"tx_bytes"},
 +      {"tx_dropped"},
 +      {"tx_errors"},
 +      {"tx_fifo_errors"},
 +      {"tx_packets"}
 +};
 +#define CAS_NUM_STAT_KEYS ARRAY_SIZE(ethtool_cassini_statnames)
 +
 +static struct {
 +      const int offsets;      /* neg. values for 2nd arg to cas_read_phy */
 +} ethtool_register_table[] = {
 +      {-MII_BMSR},
 +      {-MII_BMCR},
 +      {REG_CAWR},
 +      {REG_INF_BURST},
 +      {REG_BIM_CFG},
 +      {REG_RX_CFG},
 +      {REG_HP_CFG},
 +      {REG_MAC_TX_CFG},
 +      {REG_MAC_RX_CFG},
 +      {REG_MAC_CTRL_CFG},
 +      {REG_MAC_XIF_CFG},
 +      {REG_MIF_CFG},
 +      {REG_PCS_CFG},
 +      {REG_SATURN_PCFG},
 +      {REG_PCS_MII_STATUS},
 +      {REG_PCS_STATE_MACHINE},
 +      {REG_MAC_COLL_EXCESS},
 +      {REG_MAC_COLL_LATE}
 +};
 +#define CAS_REG_LEN   ARRAY_SIZE(ethtool_register_table)
 +#define CAS_MAX_REGS  (sizeof (u32)*CAS_REG_LEN)
 +
 +static void cas_read_regs(struct cas *cp, u8 *ptr, int len)
 +{
 +      u8 *p;
 +      int i;
 +      unsigned long flags;
 +
 +      spin_lock_irqsave(&cp->lock, flags);
 +      for (i = 0, p = ptr; i < len ; i ++, p += sizeof(u32)) {
 +              u16 hval;
 +              u32 val;
 +              if (ethtool_register_table[i].offsets < 0) {
 +                      hval = cas_phy_read(cp,
 +                                  -ethtool_register_table[i].offsets);
 +                      val = hval;
 +              } else {
 +                      val= readl(cp->regs+ethtool_register_table[i].offsets);
 +              }
 +              memcpy(p, (u8 *)&val, sizeof(u32));
 +      }
 +      spin_unlock_irqrestore(&cp->lock, flags);
 +}
 +
 +static struct net_device_stats *cas_get_stats(struct net_device *dev)
 +{
 +      struct cas *cp = netdev_priv(dev);
 +      struct net_device_stats *stats = cp->net_stats;
 +      unsigned long flags;
 +      int i;
 +      unsigned long tmp;
 +
 +      /* we collate all of the stats into net_stats[N_TX_RING] */
 +      if (!cp->hw_running)
 +              return stats + N_TX_RINGS;
 +
 +      /* collect outstanding stats */
 +      /* WTZ: the Cassini spec gives these as 16 bit counters but
 +       * stored in 32-bit words.  Added a mask of 0xffff to be safe,
 +       * in case the chip somehow puts any garbage in the other bits.
 +       * Also, counter usage didn't seem to mach what Adrian did
 +       * in the parts of the code that set these quantities. Made
 +       * that consistent.
 +       */
 +      spin_lock_irqsave(&cp->stat_lock[N_TX_RINGS], flags);
 +      stats[N_TX_RINGS].rx_crc_errors +=
 +        readl(cp->regs + REG_MAC_FCS_ERR) & 0xffff;
 +      stats[N_TX_RINGS].rx_frame_errors +=
 +              readl(cp->regs + REG_MAC_ALIGN_ERR) &0xffff;
 +      stats[N_TX_RINGS].rx_length_errors +=
 +              readl(cp->regs + REG_MAC_LEN_ERR) & 0xffff;
 +#if 1
 +      tmp = (readl(cp->regs + REG_MAC_COLL_EXCESS) & 0xffff) +
 +              (readl(cp->regs + REG_MAC_COLL_LATE) & 0xffff);
 +      stats[N_TX_RINGS].tx_aborted_errors += tmp;
 +      stats[N_TX_RINGS].collisions +=
 +        tmp + (readl(cp->regs + REG_MAC_COLL_NORMAL) & 0xffff);
 +#else
 +      stats[N_TX_RINGS].tx_aborted_errors +=
 +              readl(cp->regs + REG_MAC_COLL_EXCESS);
 +      stats[N_TX_RINGS].collisions += readl(cp->regs + REG_MAC_COLL_EXCESS) +
 +              readl(cp->regs + REG_MAC_COLL_LATE);
 +#endif
 +      cas_clear_mac_err(cp);
 +
 +      /* saved bits that are unique to ring 0 */
 +      spin_lock(&cp->stat_lock[0]);
 +      stats[N_TX_RINGS].collisions        += stats[0].collisions;
 +      stats[N_TX_RINGS].rx_over_errors    += stats[0].rx_over_errors;
 +      stats[N_TX_RINGS].rx_frame_errors   += stats[0].rx_frame_errors;
 +      stats[N_TX_RINGS].rx_fifo_errors    += stats[0].rx_fifo_errors;
 +      stats[N_TX_RINGS].tx_aborted_errors += stats[0].tx_aborted_errors;
 +      stats[N_TX_RINGS].tx_fifo_errors    += stats[0].tx_fifo_errors;
 +      spin_unlock(&cp->stat_lock[0]);
 +
 +      for (i = 0; i < N_TX_RINGS; i++) {
 +              spin_lock(&cp->stat_lock[i]);
 +              stats[N_TX_RINGS].rx_length_errors +=
 +                      stats[i].rx_length_errors;
 +              stats[N_TX_RINGS].rx_crc_errors += stats[i].rx_crc_errors;
 +              stats[N_TX_RINGS].rx_packets    += stats[i].rx_packets;
 +              stats[N_TX_RINGS].tx_packets    += stats[i].tx_packets;
 +              stats[N_TX_RINGS].rx_bytes      += stats[i].rx_bytes;
 +              stats[N_TX_RINGS].tx_bytes      += stats[i].tx_bytes;
 +              stats[N_TX_RINGS].rx_errors     += stats[i].rx_errors;
 +              stats[N_TX_RINGS].tx_errors     += stats[i].tx_errors;
 +              stats[N_TX_RINGS].rx_dropped    += stats[i].rx_dropped;
 +              stats[N_TX_RINGS].tx_dropped    += stats[i].tx_dropped;
 +              memset(stats + i, 0, sizeof(struct net_device_stats));
 +              spin_unlock(&cp->stat_lock[i]);
 +      }
 +      spin_unlock_irqrestore(&cp->stat_lock[N_TX_RINGS], flags);
 +      return stats + N_TX_RINGS;
 +}
 +
 +
 +static void cas_set_multicast(struct net_device *dev)
 +{
 +      struct cas *cp = netdev_priv(dev);
 +      u32 rxcfg, rxcfg_new;
 +      unsigned long flags;
 +      int limit = STOP_TRIES;
 +
 +      if (!cp->hw_running)
 +              return;
 +
 +      spin_lock_irqsave(&cp->lock, flags);
 +      rxcfg = readl(cp->regs + REG_MAC_RX_CFG);
 +
 +      /* disable RX MAC and wait for completion */
 +      writel(rxcfg & ~MAC_RX_CFG_EN, cp->regs + REG_MAC_RX_CFG);
 +      while (readl(cp->regs + REG_MAC_RX_CFG) & MAC_RX_CFG_EN) {
 +              if (!limit--)
 +                      break;
 +              udelay(10);
 +      }
 +
 +      /* disable hash filter and wait for completion */
 +      limit = STOP_TRIES;
 +      rxcfg &= ~(MAC_RX_CFG_PROMISC_EN | MAC_RX_CFG_HASH_FILTER_EN);
 +      writel(rxcfg & ~MAC_RX_CFG_EN, cp->regs + REG_MAC_RX_CFG);
 +      while (readl(cp->regs + REG_MAC_RX_CFG) & MAC_RX_CFG_HASH_FILTER_EN) {
 +              if (!limit--)
 +                      break;
 +              udelay(10);
 +      }
 +
 +      /* program hash filters */
 +      cp->mac_rx_cfg = rxcfg_new = cas_setup_multicast(cp);
 +      rxcfg |= rxcfg_new;
 +      writel(rxcfg, cp->regs + REG_MAC_RX_CFG);
 +      spin_unlock_irqrestore(&cp->lock, flags);
 +}
 +
 +static void cas_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
 +{
 +      struct cas *cp = netdev_priv(dev);
 +      strncpy(info->driver, DRV_MODULE_NAME, ETHTOOL_BUSINFO_LEN);
 +      strncpy(info->version, DRV_MODULE_VERSION, ETHTOOL_BUSINFO_LEN);
 +      info->fw_version[0] = '\0';
 +      strncpy(info->bus_info, pci_name(cp->pdev), ETHTOOL_BUSINFO_LEN);
 +      info->regdump_len = cp->casreg_len < CAS_MAX_REGS ?
 +              cp->casreg_len : CAS_MAX_REGS;
 +      info->n_stats = CAS_NUM_STAT_KEYS;
 +}
 +
 +static int cas_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
 +{
 +      struct cas *cp = netdev_priv(dev);
 +      u16 bmcr;
 +      int full_duplex, speed, pause;
 +      unsigned long flags;
 +      enum link_state linkstate = link_up;
 +
 +      cmd->advertising = 0;
 +      cmd->supported = SUPPORTED_Autoneg;
 +      if (cp->cas_flags & CAS_FLAG_1000MB_CAP) {
 +              cmd->supported |= SUPPORTED_1000baseT_Full;
 +              cmd->advertising |= ADVERTISED_1000baseT_Full;
 +      }
 +
 +      /* Record PHY settings if HW is on. */
 +      spin_lock_irqsave(&cp->lock, flags);
 +      bmcr = 0;
 +      linkstate = cp->lstate;
 +      if (CAS_PHY_MII(cp->phy_type)) {
 +              cmd->port = PORT_MII;
 +              cmd->transceiver = (cp->cas_flags & CAS_FLAG_SATURN) ?
 +                      XCVR_INTERNAL : XCVR_EXTERNAL;
 +              cmd->phy_address = cp->phy_addr;
 +              cmd->advertising |= ADVERTISED_TP | ADVERTISED_MII |
 +                      ADVERTISED_10baseT_Half |
 +                      ADVERTISED_10baseT_Full |
 +                      ADVERTISED_100baseT_Half |
 +                      ADVERTISED_100baseT_Full;
 +
 +              cmd->supported |=
 +                      (SUPPORTED_10baseT_Half |
 +                       SUPPORTED_10baseT_Full |
 +                       SUPPORTED_100baseT_Half |
 +                       SUPPORTED_100baseT_Full |
 +                       SUPPORTED_TP | SUPPORTED_MII);
 +
 +              if (cp->hw_running) {
 +                      cas_mif_poll(cp, 0);
 +                      bmcr = cas_phy_read(cp, MII_BMCR);
 +                      cas_read_mii_link_mode(cp, &full_duplex,
 +                                             &speed, &pause);
 +                      cas_mif_poll(cp, 1);
 +              }
 +
 +      } else {
 +              cmd->port = PORT_FIBRE;
 +              cmd->transceiver = XCVR_INTERNAL;
 +              cmd->phy_address = 0;
 +              cmd->supported   |= SUPPORTED_FIBRE;
 +              cmd->advertising |= ADVERTISED_FIBRE;
 +
 +              if (cp->hw_running) {
 +                      /* pcs uses the same bits as mii */
 +                      bmcr = readl(cp->regs + REG_PCS_MII_CTRL);
 +                      cas_read_pcs_link_mode(cp, &full_duplex,
 +                                             &speed, &pause);
 +              }
 +      }
 +      spin_unlock_irqrestore(&cp->lock, flags);
 +
 +      if (bmcr & BMCR_ANENABLE) {
 +              cmd->advertising |= ADVERTISED_Autoneg;
 +              cmd->autoneg = AUTONEG_ENABLE;
 +              ethtool_cmd_speed_set(cmd, ((speed == 10) ?
 +                                          SPEED_10 :
 +                                          ((speed == 1000) ?
 +                                           SPEED_1000 : SPEED_100)));
 +              cmd->duplex = full_duplex ? DUPLEX_FULL : DUPLEX_HALF;
 +      } else {
 +              cmd->autoneg = AUTONEG_DISABLE;
 +              ethtool_cmd_speed_set(cmd, ((bmcr & CAS_BMCR_SPEED1000) ?
 +                                          SPEED_1000 :
 +                                          ((bmcr & BMCR_SPEED100) ?
 +                                           SPEED_100 : SPEED_10)));
 +              cmd->duplex =
 +                      (bmcr & BMCR_FULLDPLX) ?
 +                      DUPLEX_FULL : DUPLEX_HALF;
 +      }
 +      if (linkstate != link_up) {
 +              /* Force these to "unknown" if the link is not up and
 +               * autonogotiation in enabled. We can set the link
 +               * speed to 0, but not cmd->duplex,
 +               * because its legal values are 0 and 1.  Ethtool will
 +               * print the value reported in parentheses after the
 +               * word "Unknown" for unrecognized values.
 +               *
 +               * If in forced mode, we report the speed and duplex
 +               * settings that we configured.
 +               */
 +              if (cp->link_cntl & BMCR_ANENABLE) {
 +                      ethtool_cmd_speed_set(cmd, 0);
 +                      cmd->duplex = 0xff;
 +              } else {
 +                      ethtool_cmd_speed_set(cmd, SPEED_10);
 +                      if (cp->link_cntl & BMCR_SPEED100) {
 +                              ethtool_cmd_speed_set(cmd, SPEED_100);
 +                      } else if (cp->link_cntl & CAS_BMCR_SPEED1000) {
 +                              ethtool_cmd_speed_set(cmd, SPEED_1000);
 +                      }
 +                      cmd->duplex = (cp->link_cntl & BMCR_FULLDPLX)?
 +                              DUPLEX_FULL : DUPLEX_HALF;
 +              }
 +      }
 +      return 0;
 +}
 +
 +static int cas_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
 +{
 +      struct cas *cp = netdev_priv(dev);
 +      unsigned long flags;
 +      u32 speed = ethtool_cmd_speed(cmd);
 +
 +      /* Verify the settings we care about. */
 +      if (cmd->autoneg != AUTONEG_ENABLE &&
 +          cmd->autoneg != AUTONEG_DISABLE)
 +              return -EINVAL;
 +
 +      if (cmd->autoneg == AUTONEG_DISABLE &&
 +          ((speed != SPEED_1000 &&
 +            speed != SPEED_100 &&
 +            speed != SPEED_10) ||
 +           (cmd->duplex != DUPLEX_HALF &&
 +            cmd->duplex != DUPLEX_FULL)))
 +              return -EINVAL;
 +
 +      /* Apply settings and restart link process. */
 +      spin_lock_irqsave(&cp->lock, flags);
 +      cas_begin_auto_negotiation(cp, cmd);
 +      spin_unlock_irqrestore(&cp->lock, flags);
 +      return 0;
 +}
 +
 +static int cas_nway_reset(struct net_device *dev)
 +{
 +      struct cas *cp = netdev_priv(dev);
 +      unsigned long flags;
 +
 +      if ((cp->link_cntl & BMCR_ANENABLE) == 0)
 +              return -EINVAL;
 +
 +      /* Restart link process. */
 +      spin_lock_irqsave(&cp->lock, flags);
 +      cas_begin_auto_negotiation(cp, NULL);
 +      spin_unlock_irqrestore(&cp->lock, flags);
 +
 +      return 0;
 +}
 +
 +static u32 cas_get_link(struct net_device *dev)
 +{
 +      struct cas *cp = netdev_priv(dev);
 +      return cp->lstate == link_up;
 +}
 +
 +static u32 cas_get_msglevel(struct net_device *dev)
 +{
 +      struct cas *cp = netdev_priv(dev);
 +      return cp->msg_enable;
 +}
 +
 +static void cas_set_msglevel(struct net_device *dev, u32 value)
 +{
 +      struct cas *cp = netdev_priv(dev);
 +      cp->msg_enable = value;
 +}
 +
 +static int cas_get_regs_len(struct net_device *dev)
 +{
 +      struct cas *cp = netdev_priv(dev);
 +      return cp->casreg_len < CAS_MAX_REGS ? cp->casreg_len: CAS_MAX_REGS;
 +}
 +
 +static void cas_get_regs(struct net_device *dev, struct ethtool_regs *regs,
 +                           void *p)
 +{
 +      struct cas *cp = netdev_priv(dev);
 +      regs->version = 0;
 +      /* cas_read_regs handles locks (cp->lock).  */
 +      cas_read_regs(cp, p, regs->len / sizeof(u32));
 +}
 +
 +static int cas_get_sset_count(struct net_device *dev, int sset)
 +{
 +      switch (sset) {
 +      case ETH_SS_STATS:
 +              return CAS_NUM_STAT_KEYS;
 +      default:
 +              return -EOPNOTSUPP;
 +      }
 +}
 +
 +static void cas_get_strings(struct net_device *dev, u32 stringset, u8 *data)
 +{
 +       memcpy(data, &ethtool_cassini_statnames,
 +                                       CAS_NUM_STAT_KEYS * ETH_GSTRING_LEN);
 +}
 +
 +static void cas_get_ethtool_stats(struct net_device *dev,
 +                                    struct ethtool_stats *estats, u64 *data)
 +{
 +      struct cas *cp = netdev_priv(dev);
 +      struct net_device_stats *stats = cas_get_stats(cp->dev);
 +      int i = 0;
 +      data[i++] = stats->collisions;
 +      data[i++] = stats->rx_bytes;
 +      data[i++] = stats->rx_crc_errors;
 +      data[i++] = stats->rx_dropped;
 +      data[i++] = stats->rx_errors;
 +      data[i++] = stats->rx_fifo_errors;
 +      data[i++] = stats->rx_frame_errors;
 +      data[i++] = stats->rx_length_errors;
 +      data[i++] = stats->rx_over_errors;
 +      data[i++] = stats->rx_packets;
 +      data[i++] = stats->tx_aborted_errors;
 +      data[i++] = stats->tx_bytes;
 +      data[i++] = stats->tx_dropped;
 +      data[i++] = stats->tx_errors;
 +      data[i++] = stats->tx_fifo_errors;
 +      data[i++] = stats->tx_packets;
 +      BUG_ON(i != CAS_NUM_STAT_KEYS);
 +}
 +
 +static const struct ethtool_ops cas_ethtool_ops = {
 +      .get_drvinfo            = cas_get_drvinfo,
 +      .get_settings           = cas_get_settings,
 +      .set_settings           = cas_set_settings,
 +      .nway_reset             = cas_nway_reset,
 +      .get_link               = cas_get_link,
 +      .get_msglevel           = cas_get_msglevel,
 +      .set_msglevel           = cas_set_msglevel,
 +      .get_regs_len           = cas_get_regs_len,
 +      .get_regs               = cas_get_regs,
 +      .get_sset_count         = cas_get_sset_count,
 +      .get_strings            = cas_get_strings,
 +      .get_ethtool_stats      = cas_get_ethtool_stats,
 +};
 +
 +static int cas_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
 +{
 +      struct cas *cp = netdev_priv(dev);
 +      struct mii_ioctl_data *data = if_mii(ifr);
 +      unsigned long flags;
 +      int rc = -EOPNOTSUPP;
 +
 +      /* Hold the PM mutex while doing ioctl's or we may collide
 +       * with open/close and power management and oops.
 +       */
 +      mutex_lock(&cp->pm_mutex);
 +      switch (cmd) {
 +      case SIOCGMIIPHY:               /* Get address of MII PHY in use. */
 +              data->phy_id = cp->phy_addr;
 +              /* Fallthrough... */
 +
 +      case SIOCGMIIREG:               /* Read MII PHY register. */
 +              spin_lock_irqsave(&cp->lock, flags);
 +              cas_mif_poll(cp, 0);
 +              data->val_out = cas_phy_read(cp, data->reg_num & 0x1f);
 +              cas_mif_poll(cp, 1);
 +              spin_unlock_irqrestore(&cp->lock, flags);
 +              rc = 0;
 +              break;
 +
 +      case SIOCSMIIREG:               /* Write MII PHY register. */
 +              spin_lock_irqsave(&cp->lock, flags);
 +              cas_mif_poll(cp, 0);
 +              rc = cas_phy_write(cp, data->reg_num & 0x1f, data->val_in);
 +              cas_mif_poll(cp, 1);
 +              spin_unlock_irqrestore(&cp->lock, flags);
 +              break;
 +      default:
 +              break;
 +      }
 +
 +      mutex_unlock(&cp->pm_mutex);
 +      return rc;
 +}
 +
 +/* When this chip sits underneath an Intel 31154 bridge, it is the
 + * only subordinate device and we can tweak the bridge settings to
 + * reflect that fact.
 + */
 +static void __devinit cas_program_bridge(struct pci_dev *cas_pdev)
 +{
 +      struct pci_dev *pdev = cas_pdev->bus->self;
 +      u32 val;
 +
 +      if (!pdev)
 +              return;
 +
 +      if (pdev->vendor != 0x8086 || pdev->device != 0x537c)
 +              return;
 +
 +      /* Clear bit 10 (Bus Parking Control) in the Secondary
 +       * Arbiter Control/Status Register which lives at offset
 +       * 0x41.  Using a 32-bit word read/modify/write at 0x40
 +       * is much simpler so that's how we do this.
 +       */
 +      pci_read_config_dword(pdev, 0x40, &val);
 +      val &= ~0x00040000;
 +      pci_write_config_dword(pdev, 0x40, val);
 +
 +      /* Max out the Multi-Transaction Timer settings since
 +       * Cassini is the only device present.
 +       *
 +       * The register is 16-bit and lives at 0x50.  When the
 +       * settings are enabled, it extends the GRANT# signal
 +       * for a requestor after a transaction is complete.  This
 +       * allows the next request to run without first needing
 +       * to negotiate the GRANT# signal back.
 +       *
 +       * Bits 12:10 define the grant duration:
 +       *
 +       *      1       --      16 clocks
 +       *      2       --      32 clocks
 +       *      3       --      64 clocks
 +       *      4       --      128 clocks
 +       *      5       --      256 clocks
 +       *
 +       * All other values are illegal.
 +       *
 +       * Bits 09:00 define which REQ/GNT signal pairs get the
 +       * GRANT# signal treatment.  We set them all.
 +       */
 +      pci_write_config_word(pdev, 0x50, (5 << 10) | 0x3ff);
 +
 +      /* The Read Prefecth Policy register is 16-bit and sits at
 +       * offset 0x52.  It enables a "smart" pre-fetch policy.  We
 +       * enable it and max out all of the settings since only one
 +       * device is sitting underneath and thus bandwidth sharing is
 +       * not an issue.
 +       *
 +       * The register has several 3 bit fields, which indicates a
 +       * multiplier applied to the base amount of prefetching the
 +       * chip would do.  These fields are at:
 +       *
 +       *      15:13   ---     ReRead Primary Bus
 +       *      12:10   ---     FirstRead Primary Bus
 +       *      09:07   ---     ReRead Secondary Bus
 +       *      06:04   ---     FirstRead Secondary Bus
 +       *
 +       * Bits 03:00 control which REQ/GNT pairs the prefetch settings
 +       * get enabled on.  Bit 3 is a grouped enabler which controls
 +       * all of the REQ/GNT pairs from [8:3].  Bits 2 to 0 control
 +       * the individual REQ/GNT pairs [2:0].
 +       */
 +      pci_write_config_word(pdev, 0x52,
 +                            (0x7 << 13) |
 +                            (0x7 << 10) |
 +                            (0x7 <<  7) |
 +                            (0x7 <<  4) |
 +                            (0xf <<  0));
 +
 +      /* Force cacheline size to 0x8 */
 +      pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE, 0x08);
 +
 +      /* Force latency timer to maximum setting so Cassini can
 +       * sit on the bus as long as it likes.
 +       */
 +      pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 0xff);
 +}
 +
 +static const struct net_device_ops cas_netdev_ops = {
 +      .ndo_open               = cas_open,
 +      .ndo_stop               = cas_close,
 +      .ndo_start_xmit         = cas_start_xmit,
 +      .ndo_get_stats          = cas_get_stats,
 +      .ndo_set_rx_mode        = cas_set_multicast,
 +      .ndo_do_ioctl           = cas_ioctl,
 +      .ndo_tx_timeout         = cas_tx_timeout,
 +      .ndo_change_mtu         = cas_change_mtu,
 +      .ndo_set_mac_address    = eth_mac_addr,
 +      .ndo_validate_addr      = eth_validate_addr,
 +#ifdef CONFIG_NET_POLL_CONTROLLER
 +      .ndo_poll_controller    = cas_netpoll,
 +#endif
 +};
 +
 +static int __devinit cas_init_one(struct pci_dev *pdev,
 +                                const struct pci_device_id *ent)
 +{
 +      static int cas_version_printed = 0;
 +      unsigned long casreg_len;
 +      struct net_device *dev;
 +      struct cas *cp;
 +      int i, err, pci_using_dac;
 +      u16 pci_cmd;
 +      u8 orig_cacheline_size = 0, cas_cacheline_size = 0;
 +
 +      if (cas_version_printed++ == 0)
 +              pr_info("%s", version);
 +
 +      err = pci_enable_device(pdev);
 +      if (err) {
 +              dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
 +              return err;
 +      }
 +
 +      if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
 +              dev_err(&pdev->dev, "Cannot find proper PCI device "
 +                     "base address, aborting\n");
 +              err = -ENODEV;
 +              goto err_out_disable_pdev;
 +      }
 +
 +      dev = alloc_etherdev(sizeof(*cp));
 +      if (!dev) {
 +              dev_err(&pdev->dev, "Etherdev alloc failed, aborting\n");
 +              err = -ENOMEM;
 +              goto err_out_disable_pdev;
 +      }
 +      SET_NETDEV_DEV(dev, &pdev->dev);
 +
 +      err = pci_request_regions(pdev, dev->name);
 +      if (err) {
 +              dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
 +              goto err_out_free_netdev;
 +      }
 +      pci_set_master(pdev);
 +
 +      /* we must always turn on parity response or else parity
 +       * doesn't get generated properly. disable SERR/PERR as well.
 +       * in addition, we want to turn MWI on.
 +       */
 +      pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd);
 +      pci_cmd &= ~PCI_COMMAND_SERR;
 +      pci_cmd |= PCI_COMMAND_PARITY;
 +      pci_write_config_word(pdev, PCI_COMMAND, pci_cmd);
 +      if (pci_try_set_mwi(pdev))
 +              pr_warning("Could not enable MWI for %s\n", pci_name(pdev));
 +
 +      cas_program_bridge(pdev);
 +
 +      /*
 +       * On some architectures, the default cache line size set
 +       * by pci_try_set_mwi reduces perforamnce.  We have to increase
 +       * it for this case.  To start, we'll print some configuration
 +       * data.
 +       */
 +#if 1
 +      pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE,
 +                           &orig_cacheline_size);
 +      if (orig_cacheline_size < CAS_PREF_CACHELINE_SIZE) {
 +              cas_cacheline_size =
 +                      (CAS_PREF_CACHELINE_SIZE < SMP_CACHE_BYTES) ?
 +                      CAS_PREF_CACHELINE_SIZE : SMP_CACHE_BYTES;
 +              if (pci_write_config_byte(pdev,
 +                                        PCI_CACHE_LINE_SIZE,
 +                                        cas_cacheline_size)) {
 +                      dev_err(&pdev->dev, "Could not set PCI cache "
 +                             "line size\n");
 +                      goto err_write_cacheline;
 +              }
 +      }
 +#endif
 +
 +
 +      /* Configure DMA attributes. */
 +      if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
 +              pci_using_dac = 1;
 +              err = pci_set_consistent_dma_mask(pdev,
 +                                                DMA_BIT_MASK(64));
 +              if (err < 0) {
 +                      dev_err(&pdev->dev, "Unable to obtain 64-bit DMA "
 +                             "for consistent allocations\n");
 +                      goto err_out_free_res;
 +              }
 +
 +      } else {
 +              err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
 +              if (err) {
 +                      dev_err(&pdev->dev, "No usable DMA configuration, "
 +                             "aborting\n");
 +                      goto err_out_free_res;
 +              }
 +              pci_using_dac = 0;
 +      }
 +
 +      casreg_len = pci_resource_len(pdev, 0);
 +
 +      cp = netdev_priv(dev);
 +      cp->pdev = pdev;
 +#if 1
 +      /* A value of 0 indicates we never explicitly set it */
 +      cp->orig_cacheline_size = cas_cacheline_size ? orig_cacheline_size: 0;
 +#endif
 +      cp->dev = dev;
 +      cp->msg_enable = (cassini_debug < 0) ? CAS_DEF_MSG_ENABLE :
 +        cassini_debug;
 +
 +#if defined(CONFIG_SPARC)
 +      cp->of_node = pci_device_to_OF_node(pdev);
 +#endif
 +
 +      cp->link_transition = LINK_TRANSITION_UNKNOWN;
 +      cp->link_transition_jiffies_valid = 0;
 +
 +      spin_lock_init(&cp->lock);
 +      spin_lock_init(&cp->rx_inuse_lock);
 +      spin_lock_init(&cp->rx_spare_lock);
 +      for (i = 0; i < N_TX_RINGS; i++) {
 +              spin_lock_init(&cp->stat_lock[i]);
 +              spin_lock_init(&cp->tx_lock[i]);
 +      }
 +      spin_lock_init(&cp->stat_lock[N_TX_RINGS]);
 +      mutex_init(&cp->pm_mutex);
 +
 +      init_timer(&cp->link_timer);
 +      cp->link_timer.function = cas_link_timer;
 +      cp->link_timer.data = (unsigned long) cp;
 +
 +#if 1
 +      /* Just in case the implementation of atomic operations
 +       * change so that an explicit initialization is necessary.
 +       */
 +      atomic_set(&cp->reset_task_pending, 0);
 +      atomic_set(&cp->reset_task_pending_all, 0);
 +      atomic_set(&cp->reset_task_pending_spare, 0);
 +      atomic_set(&cp->reset_task_pending_mtu, 0);
 +#endif
 +      INIT_WORK(&cp->reset_task, cas_reset_task);
 +
 +      /* Default link parameters */
 +      if (link_mode >= 0 && link_mode < 6)
 +              cp->link_cntl = link_modes[link_mode];
 +      else
 +              cp->link_cntl = BMCR_ANENABLE;
 +      cp->lstate = link_down;
 +      cp->link_transition = LINK_TRANSITION_LINK_DOWN;
 +      netif_carrier_off(cp->dev);
 +      cp->timer_ticks = 0;
 +
 +      /* give us access to cassini registers */
 +      cp->regs = pci_iomap(pdev, 0, casreg_len);
 +      if (!cp->regs) {
 +              dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
 +              goto err_out_free_res;
 +      }
 +      cp->casreg_len = casreg_len;
 +
 +      pci_save_state(pdev);
 +      cas_check_pci_invariants(cp);
 +      cas_hard_reset(cp);
 +      cas_reset(cp, 0);
 +      if (cas_check_invariants(cp))
 +              goto err_out_iounmap;
 +      if (cp->cas_flags & CAS_FLAG_SATURN)
 +              if (cas_saturn_firmware_init(cp))
 +                      goto err_out_iounmap;
 +
 +      cp->init_block = (struct cas_init_block *)
 +              pci_alloc_consistent(pdev, sizeof(struct cas_init_block),
 +                                   &cp->block_dvma);
 +      if (!cp->init_block) {
 +              dev_err(&pdev->dev, "Cannot allocate init block, aborting\n");
 +              goto err_out_iounmap;
 +      }
 +
 +      for (i = 0; i < N_TX_RINGS; i++)
 +              cp->init_txds[i] = cp->init_block->txds[i];
 +
 +      for (i = 0; i < N_RX_DESC_RINGS; i++)
 +              cp->init_rxds[i] = cp->init_block->rxds[i];
 +
 +      for (i = 0; i < N_RX_COMP_RINGS; i++)
 +              cp->init_rxcs[i] = cp->init_block->rxcs[i];
 +
 +      for (i = 0; i < N_RX_FLOWS; i++)
 +              skb_queue_head_init(&cp->rx_flows[i]);
 +
 +      dev->netdev_ops = &cas_netdev_ops;
 +      dev->ethtool_ops = &cas_ethtool_ops;
 +      dev->watchdog_timeo = CAS_TX_TIMEOUT;
 +
 +#ifdef USE_NAPI
 +      netif_napi_add(dev, &cp->napi, cas_poll, 64);
 +#endif
 +      dev->irq = pdev->irq;
 +      dev->dma = 0;
 +
 +      /* Cassini features. */
 +      if ((cp->cas_flags & CAS_FLAG_NO_HW_CSUM) == 0)
 +              dev->features |= NETIF_F_HW_CSUM | NETIF_F_SG;
 +
 +      if (pci_using_dac)
 +              dev->features |= NETIF_F_HIGHDMA;
 +
 +      if (register_netdev(dev)) {
 +              dev_err(&pdev->dev, "Cannot register net device, aborting\n");
 +              goto err_out_free_consistent;
 +      }
 +
 +      i = readl(cp->regs + REG_BIM_CFG);
 +      netdev_info(dev, "Sun Cassini%s (%sbit/%sMHz PCI/%s) Ethernet[%d] %pM\n",
 +                  (cp->cas_flags & CAS_FLAG_REG_PLUS) ? "+" : "",
 +                  (i & BIM_CFG_32BIT) ? "32" : "64",
 +                  (i & BIM_CFG_66MHZ) ? "66" : "33",
 +                  (cp->phy_type == CAS_PHY_SERDES) ? "Fi" : "Cu", pdev->irq,
 +                  dev->dev_addr);
 +
 +      pci_set_drvdata(pdev, dev);
 +      cp->hw_running = 1;
 +      cas_entropy_reset(cp);
 +      cas_phy_init(cp);
 +      cas_begin_auto_negotiation(cp, NULL);
 +      return 0;
 +
 +err_out_free_consistent:
 +      pci_free_consistent(pdev, sizeof(struct cas_init_block),
 +                          cp->init_block, cp->block_dvma);
 +
 +err_out_iounmap:
 +      mutex_lock(&cp->pm_mutex);
 +      if (cp->hw_running)
 +              cas_shutdown(cp);
 +      mutex_unlock(&cp->pm_mutex);
 +
 +      pci_iounmap(pdev, cp->regs);
 +
 +
 +err_out_free_res:
 +      pci_release_regions(pdev);
 +
 +err_write_cacheline:
 +      /* Try to restore it in case the error occurred after we
 +       * set it.
 +       */
 +      pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE, orig_cacheline_size);
 +
 +err_out_free_netdev:
 +      free_netdev(dev);
 +
 +err_out_disable_pdev:
 +      pci_disable_device(pdev);
 +      pci_set_drvdata(pdev, NULL);
 +      return -ENODEV;
 +}
 +
 +static void __devexit cas_remove_one(struct pci_dev *pdev)
 +{
 +      struct net_device *dev = pci_get_drvdata(pdev);
 +      struct cas *cp;
 +      if (!dev)
 +              return;
 +
 +      cp = netdev_priv(dev);
 +      unregister_netdev(dev);
 +
 +      if (cp->fw_data)
 +              vfree(cp->fw_data);
 +
 +      mutex_lock(&cp->pm_mutex);
 +      cancel_work_sync(&cp->reset_task);
 +      if (cp->hw_running)
 +              cas_shutdown(cp);
 +      mutex_unlock(&cp->pm_mutex);
 +
 +#if 1
 +      if (cp->orig_cacheline_size) {
 +              /* Restore the cache line size if we had modified
 +               * it.
 +               */
 +              pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE,
 +                                    cp->orig_cacheline_size);
 +      }
 +#endif
 +      pci_free_consistent(pdev, sizeof(struct cas_init_block),
 +                          cp->init_block, cp->block_dvma);
 +      pci_iounmap(pdev, cp->regs);
 +      free_netdev(dev);
 +      pci_release_regions(pdev);
 +      pci_disable_device(pdev);
 +      pci_set_drvdata(pdev, NULL);
 +}
 +
 +#ifdef CONFIG_PM
 +static int cas_suspend(struct pci_dev *pdev, pm_message_t state)
 +{
 +      struct net_device *dev = pci_get_drvdata(pdev);
 +      struct cas *cp = netdev_priv(dev);
 +      unsigned long flags;
 +
 +      mutex_lock(&cp->pm_mutex);
 +
 +      /* If the driver is opened, we stop the DMA */
 +      if (cp->opened) {
 +              netif_device_detach(dev);
 +
 +              cas_lock_all_save(cp, flags);
 +
 +              /* We can set the second arg of cas_reset to 0
 +               * because on resume, we'll call cas_init_hw with
 +               * its second arg set so that autonegotiation is
 +               * restarted.
 +               */
 +              cas_reset(cp, 0);
 +              cas_clean_rings(cp);
 +              cas_unlock_all_restore(cp, flags);
 +      }
 +
 +      if (cp->hw_running)
 +              cas_shutdown(cp);
 +      mutex_unlock(&cp->pm_mutex);
 +
 +      return 0;
 +}
 +
 +static int cas_resume(struct pci_dev *pdev)
 +{
 +      struct net_device *dev = pci_get_drvdata(pdev);
 +      struct cas *cp = netdev_priv(dev);
 +
 +      netdev_info(dev, "resuming\n");
 +
 +      mutex_lock(&cp->pm_mutex);
 +      cas_hard_reset(cp);
 +      if (cp->opened) {
 +              unsigned long flags;
 +              cas_lock_all_save(cp, flags);
 +              cas_reset(cp, 0);
 +              cp->hw_running = 1;
 +              cas_clean_rings(cp);
 +              cas_init_hw(cp, 1);
 +              cas_unlock_all_restore(cp, flags);
 +
 +              netif_device_attach(dev);
 +      }
 +      mutex_unlock(&cp->pm_mutex);
 +      return 0;
 +}
 +#endif /* CONFIG_PM */
 +
 +static struct pci_driver cas_driver = {
 +      .name           = DRV_MODULE_NAME,
 +      .id_table       = cas_pci_tbl,
 +      .probe          = cas_init_one,
 +      .remove         = __devexit_p(cas_remove_one),
 +#ifdef CONFIG_PM
 +      .suspend        = cas_suspend,
 +      .resume         = cas_resume
 +#endif
 +};
 +
 +static int __init cas_init(void)
 +{
 +      if (linkdown_timeout > 0)
 +              link_transition_timeout = linkdown_timeout * HZ;
 +      else
 +              link_transition_timeout = 0;
 +
 +      return pci_register_driver(&cas_driver);
 +}
 +
 +static void __exit cas_cleanup(void)
 +{
 +      pci_unregister_driver(&cas_driver);
 +}
 +
 +module_init(cas_init);
 +module_exit(cas_cleanup);
Simple merge
Simple merge
Simple merge
@@@ -925,8 -771,9 +925,10 @@@ void iwl_tx_cmd_complete(struct iwl_tra
        cmd = txq->cmd[cmd_index];
        meta = &txq->meta[cmd_index];
  
 -      iwlagn_unmap_tfd(priv, meta, &txq->tfds[index], DMA_BIDIRECTIONAL);
+       txq->time_stamp = jiffies;
 +      iwlagn_unmap_tfd(trans, meta, &txq->tfds[index],
 +                       DMA_BIDIRECTIONAL);
  
        /* Input error checking is done when commands are added to queue. */
        if (meta->flags & CMD_WANT_SKB) {
@@@ -529,13 -538,11 +538,12 @@@ static void rt2800usb_txdone(struct rt2
                        entry = rt2x00queue_get_entry(queue, Q_INDEX_DONE);
                        if (rt2800usb_txdone_entry_check(entry, reg))
                                break;
+                       entry = NULL;
                }
  
-               if (!entry || rt2x00queue_empty(queue))
-                       break;
-               rt2800_txdone_entry(entry, reg,
-                                   rt2800usb_get_txwi(entry));
+               if (entry)
 -                      rt2800_txdone_entry(entry, reg);
++                      rt2800_txdone_entry(entry, reg,
++                                          rt2800usb_get_txwi(entry));
        }
  }
  
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
diff --cc net/core/dev.c
Simple merge
Simple merge
Simple merge
Simple merge
@@@ -898,10 -909,9 +911,9 @@@ int pskb_expand_head(struct sk_buff *sk
                if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) {
                        if (skb_copy_ubufs(skb, gfp_mask))
                                goto nofrags;
-                       skb_shinfo(skb)->tx_flags &= ~SKBTX_DEV_ZEROCOPY;
                }
                for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
 -                      get_page(skb_shinfo(skb)->frags[i].page);
 +                      skb_frag_ref(skb, i);
  
                if (skb_has_frag_list(skb))
                        skb_clone_fraglist(skb);
diff --cc net/ipv4/igmp.c
Simple merge
Simple merge
Simple merge
Simple merge
diff --cc net/ipv6/raw.c
Simple merge
Simple merge
diff --cc net/ipv6/udp.c
Simple merge
Simple merge
Simple merge
Simple merge
diff --cc net/socket.c
Simple merge
Simple merge
Simple merge