Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
authorDavid S. Miller <davem@davemloft.net>
Thu, 19 Jul 2012 18:17:30 +0000 (11:17 -0700)
committerDavid S. Miller <davem@davemloft.net>
Thu, 19 Jul 2012 18:17:30 +0000 (11:17 -0700)
Conflicts:
drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c

13 files changed:
1  2 
Documentation/devicetree/bindings/net/fsl-fec.txt
MAINTAINERS
drivers/net/ethernet/intel/e1000e/82571.c
drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
drivers/scsi/bnx2i/bnx2i.h
drivers/scsi/bnx2i/bnx2i_hwi.c
drivers/scsi/bnx2i/bnx2i_iscsi.c
net/caif/caif_dev.c
net/core/dev.c
net/core/skbuff.c
net/sctp/input.c
net/sctp/socket.c
security/selinux/hooks.c

@@@ -7,14 -7,10 +7,14 @@@ Required properties
  - phy-mode : String, operation mode of the PHY interface.
    Supported values are: "mii", "gmii", "sgmii", "tbi", "rmii",
    "rgmii", "rgmii-id", "rgmii-rxid", "rgmii-txid", "rtbi", "smii".
 -- phy-reset-gpios : Should specify the gpio for phy reset
  
  Optional properties:
  - local-mac-address : 6 bytes, mac address
 +- phy-reset-gpios : Should specify the gpio for phy reset
 +- phy-reset-duration : Reset duration in milliseconds.  Should present
 +  only if property "phy-reset-gpios" is available.  Missing the property
 +  will have the duration be 1 millisecond.  Numbers greater than 1000 are
 +  invalid and 1 millisecond will be used instead.
  
  Example:
  
@@@ -23,6 -19,6 +23,6 @@@ ethernet@83fec000 
        reg = <0x83fec000 0x4000>;
        interrupts = <87>;
        phy-mode = "mii";
-       phy-reset-gpios = <&gpio1 14 0>; /* GPIO2_14 */
+       phy-reset-gpios = <&gpio2 14 0>; /* GPIO2_14 */
        local-mac-address = [00 04 9F 01 1B B9];
  };
diff --combined MAINTAINERS
@@@ -329,7 -329,7 +329,7 @@@ F: drivers/hwmon/adm1029.
  
  ADM8211 WIRELESS DRIVER
  L:    linux-wireless@vger.kernel.org
 -W:    http://linuxwireless.org/
 +W:    http://wireless.kernel.org/
  S:    Orphan
  F:    drivers/net/wireless/adm8211.*
  
@@@ -1423,7 -1423,7 +1423,7 @@@ B43 WIRELESS DRIVE
  M:    Stefano Brivio <stefano.brivio@polimi.it>
  L:    linux-wireless@vger.kernel.org
  L:    b43-dev@lists.infradead.org
 -W:    http://linuxwireless.org/en/users/Drivers/b43
 +W:    http://wireless.kernel.org/en/users/Drivers/b43
  S:    Maintained
  F:    drivers/net/wireless/b43/
  
@@@ -1432,7 -1432,7 +1432,7 @@@ M:      Larry Finger <Larry.Finger@lwfinger.
  M:    Stefano Brivio <stefano.brivio@polimi.it>
  L:    linux-wireless@vger.kernel.org
  L:    b43-dev@lists.infradead.org
 -W:    http://linuxwireless.org/en/users/Drivers/b43
 +W:    http://wireless.kernel.org/en/users/Drivers/b43
  S:    Maintained
  F:    drivers/net/wireless/b43legacy/
  
@@@ -1595,7 -1595,6 +1595,7 @@@ M:      Arend van Spriel <arend@broadcom.com
  M:    Franky (Zhenhui) Lin <frankyl@broadcom.com>
  M:    Kan Yan <kanyan@broadcom.com>
  L:    linux-wireless@vger.kernel.org
 +L:    brcm80211-dev-list@broadcom.com
  S:    Supported
  F:    drivers/net/wireless/brcm80211/
  
@@@ -3434,13 -3433,14 +3434,14 @@@ S:   Supporte
  F:    drivers/idle/i7300_idle.c
  
  IEEE 802.15.4 SUBSYSTEM
+ M:    Alexander Smirnov <alex.bluesman.smirnov@gmail.com>
  M:    Dmitry Eremin-Solenikov <dbaryshkov@gmail.com>
- M:    Sergey Lapin <slapin@ossfans.org>
  L:    linux-zigbee-devel@lists.sourceforge.net (moderated for non-subscribers)
  W:    http://apps.sourceforge.net/trac/linux-zigbee
  T:    git git://git.kernel.org/pub/scm/linux/kernel/git/lowpan/lowpan.git
  S:    Maintained
  F:    net/ieee802154/
+ F:    net/mac802154/
  F:    drivers/ieee802154/
  
  IIO SUBSYSTEM AND DRIVERS
@@@ -3661,6 -3661,14 +3662,6 @@@ T:     git git://git.kernel.org/pub/scm/lin
  S:    Supported
  F:    drivers/net/wireless/iwlwifi/
  
 -INTEL WIRELESS MULTICOMM 3200 WIFI (iwmc3200wifi)
 -M:    Samuel Ortiz <samuel.ortiz@intel.com>
 -M:    Intel Linux Wireless <ilw@linux.intel.com>
 -L:    linux-wireless@vger.kernel.org
 -S:    Supported
 -W:    http://wireless.kernel.org/en/users/Drivers/iwmc3200wifi
 -F:    drivers/net/wireless/iwmc3200wifi/
 -
  INTEL MANAGEMENT ENGINE (mei)
  M:    Tomas Winkler <tomas.winkler@intel.com>
  L:    linux-kernel@vger.kernel.org
@@@ -4344,7 -4352,7 +4345,7 @@@ F:      arch/m68k/hp300
  MAC80211
  M:    Johannes Berg <johannes@sipsolutions.net>
  L:    linux-wireless@vger.kernel.org
 -W:    http://linuxwireless.org/
 +W:    http://wireless.kernel.org/
  T:    git git://git.kernel.org/pub/scm/linux/kernel/git/jberg/mac80211.git
  T:    git git://git.kernel.org/pub/scm/linux/kernel/git/jberg/mac80211-next.git
  S:    Maintained
@@@ -4356,7 -4364,7 +4357,7 @@@ MAC80211 PID RATE CONTRO
  M:    Stefano Brivio <stefano.brivio@polimi.it>
  M:    Mattias Nissler <mattias.nissler@gmx.de>
  L:    linux-wireless@vger.kernel.org
 -W:    http://linuxwireless.org/en/developers/Documentation/mac80211/RateControl/PID
 +W:    http://wireless.kernel.org/en/developers/Documentation/mac80211/RateControl/PID
  T:    git git://git.kernel.org/pub/scm/linux/kernel/git/jberg/mac80211.git
  T:    git git://git.kernel.org/pub/scm/linux/kernel/git/jberg/mac80211-next.git
  S:    Maintained
@@@ -4630,6 -4638,8 +4631,6 @@@ F:      net/sched/sch_netem.
  NETERION 10GbE DRIVERS (s2io/vxge)
  M:    Jon Mason <jdmason@kudzu.us>
  L:    netdev@vger.kernel.org
 -W:    http://trac.neterion.com/cgi-bin/trac.cgi/wiki/Linux?Anonymous
 -W:    http://trac.neterion.com/cgi-bin/trac.cgi/wiki/X3100Linux?Anonymous
  S:    Supported
  F:    Documentation/networking/s2io.txt
  F:    Documentation/networking/vxge.txt
@@@ -4848,6 -4858,7 +4849,7 @@@ M:      Kevin Hilman <khilman@ti.com
  L:    linux-omap@vger.kernel.org
  S:    Maintained
  F:    arch/arm/*omap*/*pm*
+ F:    drivers/cpufreq/omap-cpufreq.c
  
  OMAP POWERDOMAIN/CLOCKDOMAIN SOC ADAPTATION LAYER SUPPORT
  M:    Rajendra Nayak <rnayak@ti.com>
@@@ -5039,7 -5050,7 +5041,7 @@@ F:      fs/ocfs2
  
  ORINOCO DRIVER
  L:    linux-wireless@vger.kernel.org
 -W:    http://linuxwireless.org/en/users/Drivers/orinoco
 +W:    http://wireless.kernel.org/en/users/Drivers/orinoco
  W:    http://www.nongnu.org/orinoco/
  S:    Orphan
  F:    drivers/net/wireless/orinoco/
@@@ -5554,7 -5565,7 +5556,7 @@@ F:      Documentation/networking/LICENSE.qla
  F:    drivers/net/ethernet/qlogic/qla3xxx.*
  
  QLOGIC QLCNIC (1/10)Gb ETHERNET DRIVER
- M:    Anirban Chakraborty <anirban.chakraborty@qlogic.com>
+ M:    Jitendra Kalsaria <jitendra.kalsaria@qlogic.com>
  M:    Sony Chacko <sony.chacko@qlogic.com>
  M:    linux-driver@qlogic.com
  L:    netdev@vger.kernel.org
@@@ -5562,7 -5573,6 +5564,6 @@@ S:      Supporte
  F:    drivers/net/ethernet/qlogic/qlcnic/
  
  QLOGIC QLGE 10Gb ETHERNET DRIVER
- M:    Anirban Chakraborty <anirban.chakraborty@qlogic.com>
  M:    Jitendra Kalsaria <jitendra.kalsaria@qlogic.com>
  M:    Ron Mercer <ron.mercer@qlogic.com>
  M:    linux-driver@qlogic.com
@@@ -5744,7 -5754,7 +5745,7 @@@ F:      net/rose
  RTL8180 WIRELESS DRIVER
  M:    "John W. Linville" <linville@tuxdriver.com>
  L:    linux-wireless@vger.kernel.org
 -W:    http://linuxwireless.org/
 +W:    http://wireless.kernel.org/
  T:    git git://git.kernel.org/pub/scm/linux/kernel/git/linville/wireless-testing.git
  S:    Maintained
  F:    drivers/net/wireless/rtl818x/rtl8180/
@@@ -5754,7 -5764,7 +5755,7 @@@ M:      Herton Ronaldo Krzesinski <herton@ca
  M:    Hin-Tak Leung <htl10@users.sourceforge.net>
  M:    Larry Finger <Larry.Finger@lwfinger.net>
  L:    linux-wireless@vger.kernel.org
 -W:    http://linuxwireless.org/
 +W:    http://wireless.kernel.org/
  T:    git git://git.kernel.org/pub/scm/linux/kernel/git/linville/wireless-testing.git
  S:    Maintained
  F:    drivers/net/wireless/rtl818x/rtl8187/
@@@ -5763,7 -5773,7 +5764,7 @@@ RTL8192CE WIRELESS DRIVE
  M:    Larry Finger <Larry.Finger@lwfinger.net>
  M:    Chaoming Li <chaoming_li@realsil.com.cn>
  L:    linux-wireless@vger.kernel.org
 -W:    http://linuxwireless.org/
 +W:    http://wireless.kernel.org/
  T:    git git://git.kernel.org/pub/scm/linux/kernel/git/linville/wireless-testing.git
  S:    Maintained
  F:    drivers/net/wireless/rtlwifi/
@@@ -5900,7 -5910,7 +5901,7 @@@ M:      Ingo Molnar <mingo@redhat.com
  M:    Peter Zijlstra <peterz@infradead.org>
  T:    git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git sched/core
  S:    Maintained
- F:    kernel/sched*
+ F:    kernel/sched/
  F:    include/linux/sched.h
  
  SCORE ARCHITECTURE
@@@ -1572,6 -1572,9 +1572,9 @@@ static s32 e1000_check_for_serdes_link_
        ctrl = er32(CTRL);
        status = er32(STATUS);
        rxcw = er32(RXCW);
+       /* SYNCH bit and IV bit are sticky */
+       udelay(10);
+       rxcw = er32(RXCW);
  
        if ((rxcw & E1000_RXCW_SYNCH) && !(rxcw & E1000_RXCW_IV)) {
  
                        e_dbg("ANYSTATE  -> DOWN\n");
                } else {
                        /*
 -                       * Check several times, if Sync and Config
 -                       * both are consistently 1 then simply ignore
 -                       * the Invalid bit and restart Autoneg
 +                       * Check several times, if SYNCH bit and CONFIG
 +                       * bit both are consistently 1 then simply ignore
 +                       * the IV bit and restart Autoneg
                         */
                        for (i = 0; i < AN_RETRY_COUNT; i++) {
                                udelay(10);
                                rxcw = er32(RXCW);
 -                              if ((rxcw & E1000_RXCW_IV) &&
 -                                  !((rxcw & E1000_RXCW_SYNCH) &&
 -                                    (rxcw & E1000_RXCW_C))) {
 +                              if ((rxcw & E1000_RXCW_SYNCH) &&
 +                                  (rxcw & E1000_RXCW_C))
 +                                      continue;
 +
 +                              if (rxcw & E1000_RXCW_IV) {
                                        mac->serdes_has_link = false;
                                        mac->serdes_link_state =
                                            e1000_serdes_link_down;
@@@ -42,7 -42,6 +42,7 @@@
  #include <linux/in.h>
  #include <linux/ip.h>
  #include <linux/tcp.h>
 +#include <linux/sctp.h>
  #include <linux/ipv6.h>
  #include <linux/slab.h>
  #include <net/checksum.h>
@@@ -98,7 -97,9 +98,7 @@@ module_param(debug, int, 0)
  MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
  
  /* forward decls */
 -static void ixgbevf_set_itr_msix(struct ixgbevf_q_vector *q_vector);
 -static void ixgbevf_write_eitr(struct ixgbevf_adapter *adapter, int v_idx,
 -                             u32 itr_reg);
 +static void ixgbevf_set_itr(struct ixgbevf_q_vector *q_vector);
  
  static inline void ixgbevf_release_rx_desc(struct ixgbe_hw *hw,
                                           struct ixgbevf_ring *rx_ring,
        IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rx_ring->reg_idx), val);
  }
  
 -/*
 +/**
   * ixgbevf_set_ivar - set IVAR registers - maps interrupt causes to vectors
   * @adapter: pointer to adapter struct
   * @direction: 0 for Rx, 1 for Tx, -1 for other causes
@@@ -145,18 -146,18 +145,18 @@@ static void ixgbevf_set_ivar(struct ixg
        }
  }
  
 -static void ixgbevf_unmap_and_free_tx_resource(struct ixgbevf_adapter *adapter,
 +static void ixgbevf_unmap_and_free_tx_resource(struct ixgbevf_ring *tx_ring,
                                               struct ixgbevf_tx_buffer
                                               *tx_buffer_info)
  {
        if (tx_buffer_info->dma) {
                if (tx_buffer_info->mapped_as_page)
 -                      dma_unmap_page(&adapter->pdev->dev,
 +                      dma_unmap_page(tx_ring->dev,
                                       tx_buffer_info->dma,
                                       tx_buffer_info->length,
                                       DMA_TO_DEVICE);
                else
 -                      dma_unmap_single(&adapter->pdev->dev,
 +                      dma_unmap_single(tx_ring->dev,
                                         tx_buffer_info->dma,
                                         tx_buffer_info->length,
                                         DMA_TO_DEVICE);
  #define IXGBE_MAX_DATA_PER_TXD        (1 << IXGBE_MAX_TXD_PWR)
  
  /* Tx Descriptors needed, worst case */
 -#define TXD_USE_COUNT(S) (((S) >> IXGBE_MAX_TXD_PWR) + \
 -                       (((S) & (IXGBE_MAX_DATA_PER_TXD - 1)) ? 1 : 0))
 -#ifdef MAX_SKB_FRAGS
 -#define DESC_NEEDED (TXD_USE_COUNT(IXGBE_MAX_DATA_PER_TXD) /* skb->data */ + \
 -      MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE) + 1)      /* for context */
 -#else
 -#define DESC_NEEDED TXD_USE_COUNT(IXGBE_MAX_DATA_PER_TXD)
 -#endif
 +#define TXD_USE_COUNT(S) DIV_ROUND_UP((S), IXGBE_MAX_DATA_PER_TXD)
 +#define DESC_NEEDED (MAX_SKB_FRAGS + 4)
  
  static void ixgbevf_tx_timeout(struct net_device *netdev);
  
  /**
   * ixgbevf_clean_tx_irq - Reclaim resources after transmit completes
 - * @adapter: board private structure
 + * @q_vector: board private structure
   * @tx_ring: tx ring to clean
   **/
 -static bool ixgbevf_clean_tx_irq(struct ixgbevf_adapter *adapter,
 +static bool ixgbevf_clean_tx_irq(struct ixgbevf_q_vector *q_vector,
                                 struct ixgbevf_ring *tx_ring)
  {
 -      struct net_device *netdev = adapter->netdev;
 -      struct ixgbe_hw *hw = &adapter->hw;
 +      struct ixgbevf_adapter *adapter = q_vector->adapter;
        union ixgbe_adv_tx_desc *tx_desc, *eop_desc;
        struct ixgbevf_tx_buffer *tx_buffer_info;
        unsigned int i, eop, count = 0;
        unsigned int total_bytes = 0, total_packets = 0;
  
+       if (test_bit(__IXGBEVF_DOWN, &adapter->state))
+               return true;
        i = tx_ring->next_to_clean;
        eop = tx_ring->tx_buffer_info[i].next_to_watch;
 -      eop_desc = IXGBE_TX_DESC_ADV(*tx_ring, eop);
 +      eop_desc = IXGBEVF_TX_DESC(tx_ring, eop);
  
        while ((eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)) &&
 -             (count < tx_ring->work_limit)) {
 +             (count < tx_ring->count)) {
                bool cleaned = false;
                rmb(); /* read buffer_info after eop_desc */
                /* eop could change between read and DD-check */
                        goto cont_loop;
                for ( ; !cleaned; count++) {
                        struct sk_buff *skb;
 -                      tx_desc = IXGBE_TX_DESC_ADV(*tx_ring, i);
 +                      tx_desc = IXGBEVF_TX_DESC(tx_ring, i);
                        tx_buffer_info = &tx_ring->tx_buffer_info[i];
                        cleaned = (i == eop);
                        skb = tx_buffer_info->skb;
                                total_bytes += bytecount;
                        }
  
 -                      ixgbevf_unmap_and_free_tx_resource(adapter,
 +                      ixgbevf_unmap_and_free_tx_resource(tx_ring,
                                                           tx_buffer_info);
  
                        tx_desc->wb.status = 0;
  
  cont_loop:
                eop = tx_ring->tx_buffer_info[i].next_to_watch;
 -              eop_desc = IXGBE_TX_DESC_ADV(*tx_ring, eop);
 +              eop_desc = IXGBEVF_TX_DESC(tx_ring, eop);
        }
  
        tx_ring->next_to_clean = i;
  
  #define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
 -      if (unlikely(count && netif_carrier_ok(netdev) &&
 +      if (unlikely(count && netif_carrier_ok(tx_ring->netdev) &&
                     (IXGBE_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) {
                /* Make sure that anybody stopping the queue after this
                 * sees the new next_to_clean.
                 */
                smp_mb();
 -#ifdef HAVE_TX_MQ
 -              if (__netif_subqueue_stopped(netdev, tx_ring->queue_index) &&
 -                  !test_bit(__IXGBEVF_DOWN, &adapter->state)) {
 -                      netif_wake_subqueue(netdev, tx_ring->queue_index);
 -                      ++adapter->restart_queue;
 -              }
 -#else
 -              if (netif_queue_stopped(netdev) &&
 +              if (__netif_subqueue_stopped(tx_ring->netdev,
 +                                           tx_ring->queue_index) &&
                    !test_bit(__IXGBEVF_DOWN, &adapter->state)) {
 -                      netif_wake_queue(netdev);
 +                      netif_wake_subqueue(tx_ring->netdev,
 +                                          tx_ring->queue_index);
                        ++adapter->restart_queue;
                }
 -#endif
 -      }
 -
 -      /* re-arm the interrupt */
 -      if ((count >= tx_ring->work_limit) &&
 -          (!test_bit(__IXGBEVF_DOWN, &adapter->state))) {
 -              IXGBE_WRITE_REG(hw, IXGBE_VTEICS, tx_ring->v_idx);
        }
  
        u64_stats_update_begin(&tx_ring->syncp);
        tx_ring->total_packets += total_packets;
        u64_stats_update_end(&tx_ring->syncp);
  
 -      return count < tx_ring->work_limit;
 +      return count < tx_ring->count;
  }
  
  /**
@@@ -281,10 -304,13 +284,10 @@@ static void ixgbevf_receive_skb(struct 
        bool is_vlan = (status & IXGBE_RXD_STAT_VP);
        u16 tag = le16_to_cpu(rx_desc->wb.upper.vlan);
  
 -      if (is_vlan && test_bit(tag, adapter->active_vlans))
 +      if (is_vlan && test_bit(tag & VLAN_VID_MASK, adapter->active_vlans))
                __vlan_hwaccel_put_tag(skb, tag);
  
 -      if (!(adapter->flags & IXGBE_FLAG_IN_NETPOLL))
 -                      napi_gro_receive(&q_vector->napi, skb);
 -      else
 -                      netif_rx(skb);
 +      napi_gro_receive(&q_vector->napi, skb);
  }
  
  /**
   * @skb: skb currently being received and modified
   **/
  static inline void ixgbevf_rx_checksum(struct ixgbevf_adapter *adapter,
 +                                     struct ixgbevf_ring *ring,
                                       u32 status_err, struct sk_buff *skb)
  {
        skb_checksum_none_assert(skb);
  
        /* Rx csum disabled */
 -      if (!(adapter->flags & IXGBE_FLAG_RX_CSUM_ENABLED))
 +      if (!(ring->netdev->features & NETIF_F_RXCSUM))
                return;
  
        /* if IP and error */
@@@ -335,21 -360,52 +338,21 @@@ static void ixgbevf_alloc_rx_buffers(st
        union ixgbe_adv_rx_desc *rx_desc;
        struct ixgbevf_rx_buffer *bi;
        struct sk_buff *skb;
 -      unsigned int i;
 -      unsigned int bufsz = rx_ring->rx_buf_len + NET_IP_ALIGN;
 +      unsigned int i = rx_ring->next_to_use;
  
 -      i = rx_ring->next_to_use;
        bi = &rx_ring->rx_buffer_info[i];
  
        while (cleaned_count--) {
 -              rx_desc = IXGBE_RX_DESC_ADV(*rx_ring, i);
 -
 -              if (!bi->page_dma &&
 -                  (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED)) {
 -                      if (!bi->page) {
 -                              bi->page = alloc_page(GFP_ATOMIC | __GFP_COLD);
 -                              if (!bi->page) {
 -                                      adapter->alloc_rx_page_failed++;
 -                                      goto no_buffers;
 -                              }
 -                              bi->page_offset = 0;
 -                      } else {
 -                              /* use a half page if we're re-using */
 -                              bi->page_offset ^= (PAGE_SIZE / 2);
 -                      }
 -
 -                      bi->page_dma = dma_map_page(&pdev->dev, bi->page,
 -                                                  bi->page_offset,
 -                                                  (PAGE_SIZE / 2),
 -                                                  DMA_FROM_DEVICE);
 -              }
 -
 +              rx_desc = IXGBEVF_RX_DESC(rx_ring, i);
                skb = bi->skb;
                if (!skb) {
 -                      skb = netdev_alloc_skb(adapter->netdev,
 -                                                             bufsz);
 -
 +                      skb = netdev_alloc_skb_ip_align(rx_ring->netdev,
 +                                                      rx_ring->rx_buf_len);
                        if (!skb) {
                                adapter->alloc_rx_buff_failed++;
                                goto no_buffers;
                        }
  
 -                      /*
 -                       * Make buffer alignment 2 beyond a 16 byte boundary
 -                       * this will result in a 16 byte aligned IP header after
 -                       * the 14 byte MAC header is removed
 -                       */
 -                      skb_reserve(skb, NET_IP_ALIGN);
 -
                        bi->skb = skb;
                }
                if (!bi->dma) {
                                                 rx_ring->rx_buf_len,
                                                 DMA_FROM_DEVICE);
                }
 -              /* Refresh the desc even if buffer_addrs didn't change because
 -               * each write-back erases this info. */
 -              if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
 -                      rx_desc->read.pkt_addr = cpu_to_le64(bi->page_dma);
 -                      rx_desc->read.hdr_addr = cpu_to_le64(bi->dma);
 -              } else {
 -                      rx_desc->read.pkt_addr = cpu_to_le64(bi->dma);
 -              }
 +              rx_desc->read.pkt_addr = cpu_to_le64(bi->dma);
  
                i++;
                if (i == rx_ring->count)
  no_buffers:
        if (rx_ring->next_to_use != i) {
                rx_ring->next_to_use = i;
 -              if (i-- == 0)
 -                      i = (rx_ring->count - 1);
  
                ixgbevf_release_rx_desc(&adapter->hw, rx_ring, i);
        }
  }
  
  static inline void ixgbevf_irq_enable_queues(struct ixgbevf_adapter *adapter,
 -                                           u64 qmask)
 +                                           u32 qmask)
  {
 -      u32 mask;
        struct ixgbe_hw *hw = &adapter->hw;
  
 -      mask = (qmask & 0xFFFFFFFF);
 -      IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
 -}
 -
 -static inline u16 ixgbevf_get_hdr_info(union ixgbe_adv_rx_desc *rx_desc)
 -{
 -      return rx_desc->wb.lower.lo_dword.hs_rss.hdr_info;
 -}
 -
 -static inline u16 ixgbevf_get_pkt_info(union ixgbe_adv_rx_desc *rx_desc)
 -{
 -      return rx_desc->wb.lower.lo_dword.hs_rss.pkt_info;
 +      IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, qmask);
  }
  
  static bool ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
                                 struct ixgbevf_ring *rx_ring,
 -                               int *work_done, int work_to_do)
 +                               int budget)
  {
        struct ixgbevf_adapter *adapter = q_vector->adapter;
        struct pci_dev *pdev = adapter->pdev;
        struct sk_buff *skb;
        unsigned int i;
        u32 len, staterr;
 -      u16 hdr_info;
 -      bool cleaned = false;
        int cleaned_count = 0;
        unsigned int total_rx_bytes = 0, total_rx_packets = 0;
  
        i = rx_ring->next_to_clean;
 -      rx_desc = IXGBE_RX_DESC_ADV(*rx_ring, i);
 +      rx_desc = IXGBEVF_RX_DESC(rx_ring, i);
        staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
        rx_buffer_info = &rx_ring->rx_buffer_info[i];
  
        while (staterr & IXGBE_RXD_STAT_DD) {
 -              u32 upper_len = 0;
 -              if (*work_done >= work_to_do)
 +              if (!budget)
                        break;
 -              (*work_done)++;
 +              budget--;
  
                rmb(); /* read descriptor and rx_buffer_info after status DD */
 -              if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
 -                      hdr_info = le16_to_cpu(ixgbevf_get_hdr_info(rx_desc));
 -                      len = (hdr_info & IXGBE_RXDADV_HDRBUFLEN_MASK) >>
 -                             IXGBE_RXDADV_HDRBUFLEN_SHIFT;
 -                      if (hdr_info & IXGBE_RXDADV_SPH)
 -                              adapter->rx_hdr_split++;
 -                      if (len > IXGBEVF_RX_HDR_SIZE)
 -                              len = IXGBEVF_RX_HDR_SIZE;
 -                      upper_len = le16_to_cpu(rx_desc->wb.upper.length);
 -              } else {
 -                      len = le16_to_cpu(rx_desc->wb.upper.length);
 -              }
 -              cleaned = true;
 +              len = le16_to_cpu(rx_desc->wb.upper.length);
                skb = rx_buffer_info->skb;
                prefetch(skb->data - NET_IP_ALIGN);
                rx_buffer_info->skb = NULL;
                        skb_put(skb, len);
                }
  
 -              if (upper_len) {
 -                      dma_unmap_page(&pdev->dev, rx_buffer_info->page_dma,
 -                                     PAGE_SIZE / 2, DMA_FROM_DEVICE);
 -                      rx_buffer_info->page_dma = 0;
 -                      skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
 -                                         rx_buffer_info->page,
 -                                         rx_buffer_info->page_offset,
 -                                         upper_len);
 -
 -                      if ((rx_ring->rx_buf_len > (PAGE_SIZE / 2)) ||
 -                          (page_count(rx_buffer_info->page) != 1))
 -                              rx_buffer_info->page = NULL;
 -                      else
 -                              get_page(rx_buffer_info->page);
 -
 -                      skb->len += upper_len;
 -                      skb->data_len += upper_len;
 -                      skb->truesize += upper_len;
 -              }
 -
                i++;
                if (i == rx_ring->count)
                        i = 0;
  
 -              next_rxd = IXGBE_RX_DESC_ADV(*rx_ring, i);
 +              next_rxd = IXGBEVF_RX_DESC(rx_ring, i);
                prefetch(next_rxd);
                cleaned_count++;
  
                next_buffer = &rx_ring->rx_buffer_info[i];
  
                if (!(staterr & IXGBE_RXD_STAT_EOP)) {
 -                      if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
 -                              rx_buffer_info->skb = next_buffer->skb;
 -                              rx_buffer_info->dma = next_buffer->dma;
 -                              next_buffer->skb = skb;
 -                              next_buffer->dma = 0;
 -                      } else {
 -                              skb->next = next_buffer->skb;
 -                              skb->next->prev = skb;
 -                      }
 +                      skb->next = next_buffer->skb;
 +                      skb->next->prev = skb;
                        adapter->non_eop_descs++;
                        goto next_desc;
                }
                        goto next_desc;
                }
  
 -              ixgbevf_rx_checksum(adapter, staterr, skb);
 +              ixgbevf_rx_checksum(adapter, rx_ring, staterr, skb);
  
                /* probably a little skewed due to removing CRC */
                total_rx_bytes += skb->len;
                        if (header_fixup_len < 14)
                                skb_push(skb, header_fixup_len);
                }
 -              skb->protocol = eth_type_trans(skb, adapter->netdev);
 +              skb->protocol = eth_type_trans(skb, rx_ring->netdev);
  
                ixgbevf_receive_skb(q_vector, skb, staterr, rx_ring, rx_desc);
  
@@@ -489,52 -608,92 +492,52 @@@ next_desc
        rx_ring->total_bytes += total_rx_bytes;
        u64_stats_update_end(&rx_ring->syncp);
  
 -      return cleaned;
 +      return !!budget;
  }
  
  /**
 - * ixgbevf_clean_rxonly - msix (aka one shot) rx clean routine
 + * ixgbevf_poll - NAPI polling calback
   * @napi: napi struct with our devices info in it
   * @budget: amount of work driver is allowed to do this pass, in packets
   *
 - * This function is optimized for cleaning one queue only on a single
 - * q_vector!!!
 - **/
 -static int ixgbevf_clean_rxonly(struct napi_struct *napi, int budget)
 -{
 -      struct ixgbevf_q_vector *q_vector =
 -              container_of(napi, struct ixgbevf_q_vector, napi);
 -      struct ixgbevf_adapter *adapter = q_vector->adapter;
 -      struct ixgbevf_ring *rx_ring = NULL;
 -      int work_done = 0;
 -      long r_idx;
 -
 -      r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
 -      rx_ring = &(adapter->rx_ring[r_idx]);
 -
 -      ixgbevf_clean_rx_irq(q_vector, rx_ring, &work_done, budget);
 -
 -      /* If all Rx work done, exit the polling mode */
 -      if (work_done < budget) {
 -              napi_complete(napi);
 -              if (adapter->itr_setting & 1)
 -                      ixgbevf_set_itr_msix(q_vector);
 -              if (!test_bit(__IXGBEVF_DOWN, &adapter->state))
 -                      ixgbevf_irq_enable_queues(adapter, rx_ring->v_idx);
 -      }
 -
 -      return work_done;
 -}
 -
 -/**
 - * ixgbevf_clean_rxonly_many - msix (aka one shot) rx clean routine
 - * @napi: napi struct with our devices info in it
 - * @budget: amount of work driver is allowed to do this pass, in packets
 - *
 - * This function will clean more than one rx queue associated with a
 + * This function will clean more than one or more rings associated with a
   * q_vector.
   **/
 -static int ixgbevf_clean_rxonly_many(struct napi_struct *napi, int budget)
 +static int ixgbevf_poll(struct napi_struct *napi, int budget)
  {
        struct ixgbevf_q_vector *q_vector =
                container_of(napi, struct ixgbevf_q_vector, napi);
        struct ixgbevf_adapter *adapter = q_vector->adapter;
 -      struct ixgbevf_ring *rx_ring = NULL;
 -      int work_done = 0, i;
 -      long r_idx;
 -      u64 enable_mask = 0;
 +      struct ixgbevf_ring *ring;
 +      int per_ring_budget;
 +      bool clean_complete = true;
 +
 +      ixgbevf_for_each_ring(ring, q_vector->tx)
 +              clean_complete &= ixgbevf_clean_tx_irq(q_vector, ring);
  
        /* attempt to distribute budget to each queue fairly, but don't allow
         * the budget to go below 1 because we'll exit polling */
 -      budget /= (q_vector->rxr_count ?: 1);
 -      budget = max(budget, 1);
 -      r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
 -      for (i = 0; i < q_vector->rxr_count; i++) {
 -              rx_ring = &(adapter->rx_ring[r_idx]);
 -              ixgbevf_clean_rx_irq(q_vector, rx_ring, &work_done, budget);
 -              enable_mask |= rx_ring->v_idx;
 -              r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
 -                                    r_idx + 1);
 -      }
 -
 -#ifndef HAVE_NETDEV_NAPI_LIST
 -      if (!netif_running(adapter->netdev))
 -              work_done = 0;
 -
 -#endif
 -      r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
 -      rx_ring = &(adapter->rx_ring[r_idx]);
 -
 -      /* If all Rx work done, exit the polling mode */
 -      if (work_done < budget) {
 -              napi_complete(napi);
 -              if (adapter->itr_setting & 1)
 -                      ixgbevf_set_itr_msix(q_vector);
 -              if (!test_bit(__IXGBEVF_DOWN, &adapter->state))
 -                      ixgbevf_irq_enable_queues(adapter, enable_mask);
 -      }
 +      if (q_vector->rx.count > 1)
 +              per_ring_budget = max(budget/q_vector->rx.count, 1);
 +      else
 +              per_ring_budget = budget;
 +
 +      ixgbevf_for_each_ring(ring, q_vector->rx)
 +              clean_complete &= ixgbevf_clean_rx_irq(q_vector, ring,
 +                                                     per_ring_budget);
 +
 +      /* If all work not completed, return budget and keep polling */
 +      if (!clean_complete)
 +              return budget;
 +      /* all work done, exit the polling mode */
 +      napi_complete(napi);
 +      if (adapter->rx_itr_setting & 1)
 +              ixgbevf_set_itr(q_vector);
 +      if (!test_bit(__IXGBEVF_DOWN, &adapter->state))
 +              ixgbevf_irq_enable_queues(adapter,
 +                                        1 << q_vector->v_idx);
  
 -      return work_done;
 +      return 0;
  }
  
  
  static void ixgbevf_configure_msix(struct ixgbevf_adapter *adapter)
  {
        struct ixgbevf_q_vector *q_vector;
 -      struct ixgbe_hw *hw = &adapter->hw;
 -      int i, j, q_vectors, v_idx, r_idx;
 -      u32 mask;
 +      int q_vectors, v_idx;
  
        q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
 +      adapter->eims_enable_mask = 0;
  
        /*
         * Populate the IVAR table and set the ITR values to the
         * corresponding register.
         */
        for (v_idx = 0; v_idx < q_vectors; v_idx++) {
 +              struct ixgbevf_ring *ring;
                q_vector = adapter->q_vector[v_idx];
 -              /* XXX for_each_set_bit(...) */
 -              r_idx = find_first_bit(q_vector->rxr_idx,
 -                                     adapter->num_rx_queues);
 -
 -              for (i = 0; i < q_vector->rxr_count; i++) {
 -                      j = adapter->rx_ring[r_idx].reg_idx;
 -                      ixgbevf_set_ivar(adapter, 0, j, v_idx);
 -                      r_idx = find_next_bit(q_vector->rxr_idx,
 -                                            adapter->num_rx_queues,
 -                                            r_idx + 1);
 -              }
 -              r_idx = find_first_bit(q_vector->txr_idx,
 -                                     adapter->num_tx_queues);
 -
 -              for (i = 0; i < q_vector->txr_count; i++) {
 -                      j = adapter->tx_ring[r_idx].reg_idx;
 -                      ixgbevf_set_ivar(adapter, 1, j, v_idx);
 -                      r_idx = find_next_bit(q_vector->txr_idx,
 -                                            adapter->num_tx_queues,
 -                                            r_idx + 1);
 +
 +              ixgbevf_for_each_ring(ring, q_vector->rx)
 +                      ixgbevf_set_ivar(adapter, 0, ring->reg_idx, v_idx);
 +
 +              ixgbevf_for_each_ring(ring, q_vector->tx)
 +                      ixgbevf_set_ivar(adapter, 1, ring->reg_idx, v_idx);
 +
 +              if (q_vector->tx.ring && !q_vector->rx.ring) {
 +                      /* tx only vector */
 +                      if (adapter->tx_itr_setting == 1)
 +                              q_vector->itr = IXGBE_10K_ITR;
 +                      else
 +                              q_vector->itr = adapter->tx_itr_setting;
 +              } else {
 +                      /* rx or rx/tx vector */
 +                      if (adapter->rx_itr_setting == 1)
 +                              q_vector->itr = IXGBE_20K_ITR;
 +                      else
 +                              q_vector->itr = adapter->rx_itr_setting;
                }
  
 -              /* if this is a tx only vector halve the interrupt rate */
 -              if (q_vector->txr_count && !q_vector->rxr_count)
 -                      q_vector->eitr = (adapter->eitr_param >> 1);
 -              else if (q_vector->rxr_count)
 -                      /* rx only */
 -                      q_vector->eitr = adapter->eitr_param;
 +              /* add q_vector eims value to global eims_enable_mask */
 +              adapter->eims_enable_mask |= 1 << v_idx;
  
 -              ixgbevf_write_eitr(adapter, v_idx, q_vector->eitr);
 +              ixgbevf_write_eitr(q_vector);
        }
  
        ixgbevf_set_ivar(adapter, -1, 1, v_idx);
 -
 -      /* set up to autoclear timer, and the vectors */
 -      mask = IXGBE_EIMS_ENABLE_MASK;
 -      mask &= ~IXGBE_EIMS_OTHER;
 -      IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, mask);
 +      /* setup eims_other and add value to global eims_enable_mask */
 +      adapter->eims_other = 1 << v_idx;
 +      adapter->eims_enable_mask |= adapter->eims_other;
  }
  
  enum latency_range {
  
  /**
   * ixgbevf_update_itr - update the dynamic ITR value based on statistics
 - * @adapter: pointer to adapter
 - * @eitr: eitr setting (ints per sec) to give last timeslice
 - * @itr_setting: current throttle rate in ints/second
 - * @packets: the number of packets during this measurement interval
 - * @bytes: the number of bytes during this measurement interval
 + * @q_vector: structure containing interrupt and ring information
 + * @ring_container: structure containing ring performance data
   *
   *      Stores a new ITR value based on packets and byte
   *      counts during the last interrupt.  The advantage of per interrupt
   *      on testing data as well as attempting to minimize response time
   *      while increasing bulk throughput.
   **/
 -static u8 ixgbevf_update_itr(struct ixgbevf_adapter *adapter,
 -                           u32 eitr, u8 itr_setting,
 -                           int packets, int bytes)
 +static void ixgbevf_update_itr(struct ixgbevf_q_vector *q_vector,
 +                             struct ixgbevf_ring_container *ring_container)
  {
 -      unsigned int retval = itr_setting;
 +      int bytes = ring_container->total_bytes;
 +      int packets = ring_container->total_packets;
        u32 timepassed_us;
        u64 bytes_perint;
 +      u8 itr_setting = ring_container->itr;
  
        if (packets == 0)
 -              goto update_itr_done;
 -
 +              return;
  
        /* simple throttlerate management
         *    0-20MB/s lowest (100000 ints/s)
         *  100-1249MB/s bulk (8000 ints/s)
         */
        /* what was last interrupt timeslice? */
 -      timepassed_us = 1000000/eitr;
 +      timepassed_us = q_vector->itr >> 2;
        bytes_perint = bytes / timepassed_us; /* bytes/usec */
  
        switch (itr_setting) {
        case lowest_latency:
 -              if (bytes_perint > adapter->eitr_low)
 -                      retval = low_latency;
 +              if (bytes_perint > 10)
 +                      itr_setting = low_latency;
                break;
        case low_latency:
 -              if (bytes_perint > adapter->eitr_high)
 -                      retval = bulk_latency;
 -              else if (bytes_perint <= adapter->eitr_low)
 -                      retval = lowest_latency;
 +              if (bytes_perint > 20)
 +                      itr_setting = bulk_latency;
 +              else if (bytes_perint <= 10)
 +                      itr_setting = lowest_latency;
                break;
        case bulk_latency:
 -              if (bytes_perint <= adapter->eitr_high)
 -                      retval = low_latency;
 +              if (bytes_perint <= 20)
 +                      itr_setting = low_latency;
                break;
        }
  
 -update_itr_done:
 -      return retval;
 +      /* clear work counters since we have the values we need */
 +      ring_container->total_bytes = 0;
 +      ring_container->total_packets = 0;
 +
 +      /* write updated itr to ring container */
 +      ring_container->itr = itr_setting;
  }
  
  /**
   * ixgbevf_write_eitr - write VTEITR register in hardware specific way
 - * @adapter: pointer to adapter struct
 - * @v_idx: vector index into q_vector array
 - * @itr_reg: new value to be written in *register* format, not ints/s
 + * @q_vector: structure containing interrupt and ring information
   *
   * This function is made to be called by ethtool and by the driver
   * when it needs to update VTEITR registers at runtime.  Hardware
   * specific quirks/differences are taken care of here.
   */
 -static void ixgbevf_write_eitr(struct ixgbevf_adapter *adapter, int v_idx,
 -                             u32 itr_reg)
 +void ixgbevf_write_eitr(struct ixgbevf_q_vector *q_vector)
  {
 +      struct ixgbevf_adapter *adapter = q_vector->adapter;
        struct ixgbe_hw *hw = &adapter->hw;
 -
 -      itr_reg = EITR_INTS_PER_SEC_TO_REG(itr_reg);
 +      int v_idx = q_vector->v_idx;
 +      u32 itr_reg = q_vector->itr & IXGBE_MAX_EITR;
  
        /*
         * set the WDIS bit to not clear the timer bits and cause an
        IXGBE_WRITE_REG(hw, IXGBE_VTEITR(v_idx), itr_reg);
  }
  
 -static void ixgbevf_set_itr_msix(struct ixgbevf_q_vector *q_vector)
 +static void ixgbevf_set_itr(struct ixgbevf_q_vector *q_vector)
  {
 -      struct ixgbevf_adapter *adapter = q_vector->adapter;
 -      u32 new_itr;
 -      u8 current_itr, ret_itr;
 -      int i, r_idx, v_idx = q_vector->v_idx;
 -      struct ixgbevf_ring *rx_ring, *tx_ring;
 -
 -      r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
 -      for (i = 0; i < q_vector->txr_count; i++) {
 -              tx_ring = &(adapter->tx_ring[r_idx]);
 -              ret_itr = ixgbevf_update_itr(adapter, q_vector->eitr,
 -                                           q_vector->tx_itr,
 -                                           tx_ring->total_packets,
 -                                           tx_ring->total_bytes);
 -              /* if the result for this queue would decrease interrupt
 -               * rate for this vector then use that result */
 -              q_vector->tx_itr = ((q_vector->tx_itr > ret_itr) ?
 -                                  q_vector->tx_itr - 1 : ret_itr);
 -              r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues,
 -                                    r_idx + 1);
 -      }
 -
 -      r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
 -      for (i = 0; i < q_vector->rxr_count; i++) {
 -              rx_ring = &(adapter->rx_ring[r_idx]);
 -              ret_itr = ixgbevf_update_itr(adapter, q_vector->eitr,
 -                                           q_vector->rx_itr,
 -                                           rx_ring->total_packets,
 -                                           rx_ring->total_bytes);
 -              /* if the result for this queue would decrease interrupt
 -               * rate for this vector then use that result */
 -              q_vector->rx_itr = ((q_vector->rx_itr > ret_itr) ?
 -                                  q_vector->rx_itr - 1 : ret_itr);
 -              r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
 -                                    r_idx + 1);
 -      }
 -
 -      current_itr = max(q_vector->rx_itr, q_vector->tx_itr);
 +      u32 new_itr = q_vector->itr;
 +      u8 current_itr;
 +
 +      ixgbevf_update_itr(q_vector, &q_vector->tx);
 +      ixgbevf_update_itr(q_vector, &q_vector->rx);
 +
 +      current_itr = max(q_vector->rx.itr, q_vector->tx.itr);
  
        switch (current_itr) {
        /* counts and packets in update_itr are dependent on these numbers */
        case lowest_latency:
 -              new_itr = 100000;
 +              new_itr = IXGBE_100K_ITR;
                break;
        case low_latency:
 -              new_itr = 20000; /* aka hwitr = ~200 */
 +              new_itr = IXGBE_20K_ITR;
                break;
        case bulk_latency:
        default:
 -              new_itr = 8000;
 +              new_itr = IXGBE_8K_ITR;
                break;
        }
  
 -      if (new_itr != q_vector->eitr) {
 -              u32 itr_reg;
 -
 -              /* save the algorithm value here, not the smoothed one */
 -              q_vector->eitr = new_itr;
 +      if (new_itr != q_vector->itr) {
                /* do an exponential smoothing */
 -              new_itr = ((q_vector->eitr * 90)/100) + ((new_itr * 10)/100);
 -              itr_reg = EITR_INTS_PER_SEC_TO_REG(new_itr);
 -              ixgbevf_write_eitr(adapter, v_idx, itr_reg);
 +              new_itr = (10 * new_itr * q_vector->itr) /
 +                        ((9 * new_itr) + q_vector->itr);
 +
 +              /* save the algorithm value here */
 +              q_vector->itr = new_itr;
 +
 +              ixgbevf_write_eitr(q_vector);
        }
  }
  
  static irqreturn_t ixgbevf_msix_mbx(int irq, void *data)
  {
 -      struct net_device *netdev = data;
 -      struct ixgbevf_adapter *adapter = netdev_priv(netdev);
 +      struct ixgbevf_adapter *adapter = data;
        struct ixgbe_hw *hw = &adapter->hw;
 -      u32 eicr;
        u32 msg;
        bool got_ack = false;
  
 -      eicr = IXGBE_READ_REG(hw, IXGBE_VTEICS);
 -      IXGBE_WRITE_REG(hw, IXGBE_VTEICR, eicr);
 -
        if (!hw->mbx.ops.check_for_ack(hw))
                got_ack = true;
  
        if (got_ack)
                hw->mbx.v2p_mailbox |= IXGBE_VFMAILBOX_PFACK;
  
 -      return IRQ_HANDLED;
 -}
 -
 -static irqreturn_t ixgbevf_msix_clean_tx(int irq, void *data)
 -{
 -      struct ixgbevf_q_vector *q_vector = data;
 -      struct ixgbevf_adapter  *adapter = q_vector->adapter;
 -      struct ixgbevf_ring     *tx_ring;
 -      int i, r_idx;
 -
 -      if (!q_vector->txr_count)
 -              return IRQ_HANDLED;
 -
 -      r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
 -      for (i = 0; i < q_vector->txr_count; i++) {
 -              tx_ring = &(adapter->tx_ring[r_idx]);
 -              ixgbevf_clean_tx_irq(adapter, tx_ring);
 -              r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues,
 -                                    r_idx + 1);
 -      }
 -
 -      if (adapter->itr_setting & 1)
 -              ixgbevf_set_itr_msix(q_vector);
 +      IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, adapter->eims_other);
  
        return IRQ_HANDLED;
  }
  
 +
  /**
 - * ixgbevf_msix_clean_rx - single unshared vector rx clean (all queues)
 + * ixgbevf_msix_clean_rings - single unshared vector rx clean (all queues)
   * @irq: unused
   * @data: pointer to our q_vector struct for this interrupt vector
   **/
 -static irqreturn_t ixgbevf_msix_clean_rx(int irq, void *data)
 +static irqreturn_t ixgbevf_msix_clean_rings(int irq, void *data)
  {
        struct ixgbevf_q_vector *q_vector = data;
 -      struct ixgbevf_adapter  *adapter = q_vector->adapter;
 -      struct ixgbe_hw *hw = &adapter->hw;
 -      struct ixgbevf_ring  *rx_ring;
 -      int r_idx;
 -
 -      if (!q_vector->rxr_count)
 -              return IRQ_HANDLED;
 -
 -      r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
 -      rx_ring = &(adapter->rx_ring[r_idx]);
 -      /* disable interrupts on this vector only */
 -      IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, rx_ring->v_idx);
 -      napi_schedule(&q_vector->napi);
 -
 -
 -      return IRQ_HANDLED;
 -}
  
 -static irqreturn_t ixgbevf_msix_clean_many(int irq, void *data)
 -{
 -      ixgbevf_msix_clean_rx(irq, data);
 -      ixgbevf_msix_clean_tx(irq, data);
 +      /* EIAM disabled interrupts (on this vector) for us */
 +      if (q_vector->rx.ring || q_vector->tx.ring)
 +              napi_schedule(&q_vector->napi);
  
        return IRQ_HANDLED;
  }
@@@ -781,9 -1022,9 +784,9 @@@ static inline void map_vector_to_rxq(st
  {
        struct ixgbevf_q_vector *q_vector = a->q_vector[v_idx];
  
 -      set_bit(r_idx, q_vector->rxr_idx);
 -      q_vector->rxr_count++;
 -      a->rx_ring[r_idx].v_idx = 1 << v_idx;
 +      a->rx_ring[r_idx].next = q_vector->rx.ring;
 +      q_vector->rx.ring = &a->rx_ring[r_idx];
 +      q_vector->rx.count++;
  }
  
  static inline void map_vector_to_txq(struct ixgbevf_adapter *a, int v_idx,
  {
        struct ixgbevf_q_vector *q_vector = a->q_vector[v_idx];
  
 -      set_bit(t_idx, q_vector->txr_idx);
 -      q_vector->txr_count++;
 -      a->tx_ring[t_idx].v_idx = 1 << v_idx;
 +      a->tx_ring[t_idx].next = q_vector->tx.ring;
 +      q_vector->tx.ring = &a->tx_ring[t_idx];
 +      q_vector->tx.count++;
  }
  
  /**
@@@ -869,30 -1110,37 +872,30 @@@ out
  static int ixgbevf_request_msix_irqs(struct ixgbevf_adapter *adapter)
  {
        struct net_device *netdev = adapter->netdev;
 -      irqreturn_t (*handler)(int, void *);
 -      int i, vector, q_vectors, err;
 +      int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
 +      int vector, err;
        int ri = 0, ti = 0;
  
 -      /* Decrement for Other and TCP Timer vectors */
 -      q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
 -
 -#define SET_HANDLER(_v) (((_v)->rxr_count && (_v)->txr_count)          \
 -                                        ? &ixgbevf_msix_clean_many : \
 -                        (_v)->rxr_count ? &ixgbevf_msix_clean_rx   : \
 -                        (_v)->txr_count ? &ixgbevf_msix_clean_tx   : \
 -                        NULL)
        for (vector = 0; vector < q_vectors; vector++) {
 -              handler = SET_HANDLER(adapter->q_vector[vector]);
 -
 -              if (handler == &ixgbevf_msix_clean_rx) {
 -                      sprintf(adapter->name[vector], "%s-%s-%d",
 -                              netdev->name, "rx", ri++);
 -              } else if (handler == &ixgbevf_msix_clean_tx) {
 -                      sprintf(adapter->name[vector], "%s-%s-%d",
 -                              netdev->name, "tx", ti++);
 -              } else if (handler == &ixgbevf_msix_clean_many) {
 -                      sprintf(adapter->name[vector], "%s-%s-%d",
 -                              netdev->name, "TxRx", vector);
 +              struct ixgbevf_q_vector *q_vector = adapter->q_vector[vector];
 +              struct msix_entry *entry = &adapter->msix_entries[vector];
 +
 +              if (q_vector->tx.ring && q_vector->rx.ring) {
 +                      snprintf(q_vector->name, sizeof(q_vector->name) - 1,
 +                               "%s-%s-%d", netdev->name, "TxRx", ri++);
 +                      ti++;
 +              } else if (q_vector->rx.ring) {
 +                      snprintf(q_vector->name, sizeof(q_vector->name) - 1,
 +                               "%s-%s-%d", netdev->name, "rx", ri++);
 +              } else if (q_vector->tx.ring) {
 +                      snprintf(q_vector->name, sizeof(q_vector->name) - 1,
 +                               "%s-%s-%d", netdev->name, "tx", ti++);
                } else {
                        /* skip this unused q_vector */
                        continue;
                }
 -              err = request_irq(adapter->msix_entries[vector].vector,
 -                                handler, 0, adapter->name[vector],
 -                                adapter->q_vector[vector]);
 +              err = request_irq(entry->vector, &ixgbevf_msix_clean_rings, 0,
 +                                q_vector->name, q_vector);
                if (err) {
                        hw_dbg(&adapter->hw,
                               "request_irq failed for MSIX interrupt "
                }
        }
  
 -      sprintf(adapter->name[vector], "%s:mbx", netdev->name);
        err = request_irq(adapter->msix_entries[vector].vector,
 -                        &ixgbevf_msix_mbx, 0, adapter->name[vector], netdev);
 +                        &ixgbevf_msix_mbx, 0, netdev->name, adapter);
        if (err) {
                hw_dbg(&adapter->hw,
                       "request_irq for msix_mbx failed: %d\n", err);
        return 0;
  
  free_queue_irqs:
 -      for (i = vector - 1; i >= 0; i--)
 -              free_irq(adapter->msix_entries[--vector].vector,
 -                       &(adapter->q_vector[i]));
 +      while (vector) {
 +              vector--;
 +              free_irq(adapter->msix_entries[vector].vector,
 +                       adapter->q_vector[vector]);
 +      }
        pci_disable_msix(adapter->pdev);
        kfree(adapter->msix_entries);
        adapter->msix_entries = NULL;
@@@ -929,10 -1176,11 +932,10 @@@ static inline void ixgbevf_reset_q_vect
  
        for (i = 0; i < q_vectors; i++) {
                struct ixgbevf_q_vector *q_vector = adapter->q_vector[i];
 -              bitmap_zero(q_vector->rxr_idx, MAX_RX_QUEUES);
 -              bitmap_zero(q_vector->txr_idx, MAX_TX_QUEUES);
 -              q_vector->rxr_count = 0;
 -              q_vector->txr_count = 0;
 -              q_vector->eitr = adapter->eitr_param;
 +              q_vector->rx.ring = NULL;
 +              q_vector->tx.ring = NULL;
 +              q_vector->rx.count = 0;
 +              q_vector->tx.count = 0;
        }
  }
  
@@@ -958,20 -1206,17 +961,20 @@@ static int ixgbevf_request_irq(struct i
  
  static void ixgbevf_free_irq(struct ixgbevf_adapter *adapter)
  {
 -      struct net_device *netdev = adapter->netdev;
        int i, q_vectors;
  
        q_vectors = adapter->num_msix_vectors;
 -
        i = q_vectors - 1;
  
 -      free_irq(adapter->msix_entries[i].vector, netdev);
 +      free_irq(adapter->msix_entries[i].vector, adapter);
        i--;
  
        for (; i >= 0; i--) {
 +              /* free only the irqs that were actually requested */
 +              if (!adapter->q_vector[i]->rx.ring &&
 +                  !adapter->q_vector[i]->tx.ring)
 +                      continue;
 +
                free_irq(adapter->msix_entries[i].vector,
                         adapter->q_vector[i]);
        }
   **/
  static inline void ixgbevf_irq_disable(struct ixgbevf_adapter *adapter)
  {
 -      int i;
        struct ixgbe_hw *hw = &adapter->hw;
 +      int i;
  
 +      IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, 0);
        IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, ~0);
 +      IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, 0);
  
        IXGBE_WRITE_FLUSH(hw);
  
   * ixgbevf_irq_enable - Enable default interrupt generation settings
   * @adapter: board private structure
   **/
 -static inline void ixgbevf_irq_enable(struct ixgbevf_adapter *adapter,
 -                                    bool queues, bool flush)
 +static inline void ixgbevf_irq_enable(struct ixgbevf_adapter *adapter)
  {
        struct ixgbe_hw *hw = &adapter->hw;
 -      u32 mask;
 -      u64 qmask;
 -
 -      mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
 -      qmask = ~0;
 -
 -      IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
 -
 -      if (queues)
 -              ixgbevf_irq_enable_queues(adapter, qmask);
  
 -      if (flush)
 -              IXGBE_WRITE_FLUSH(hw);
 +      IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, adapter->eims_enable_mask);
 +      IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, adapter->eims_enable_mask);
 +      IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, adapter->eims_enable_mask);
  }
  
  /**
@@@ -1058,14 -1311,29 +1061,14 @@@ static void ixgbevf_configure_srrctl(st
  
        srrctl = IXGBE_SRRCTL_DROP_EN;
  
 -      if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
 -              u16 bufsz = IXGBEVF_RXBUFFER_2048;
 -              /* grow the amount we can receive on large page machines */
 -              if (bufsz < (PAGE_SIZE / 2))
 -                      bufsz = (PAGE_SIZE / 2);
 -              /* cap the bufsz at our largest descriptor size */
 -              bufsz = min((u16)IXGBEVF_MAX_RXBUFFER, bufsz);
 -
 -              srrctl |= bufsz >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
 -              srrctl |= IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
 -              srrctl |= ((IXGBEVF_RX_HDR_SIZE <<
 -                         IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) &
 -                         IXGBE_SRRCTL_BSIZEHDR_MASK);
 -      } else {
 -              srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
 +      srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
  
 -              if (rx_ring->rx_buf_len == MAXIMUM_ETHERNET_VLAN_SIZE)
 -                      srrctl |= IXGBEVF_RXBUFFER_2048 >>
 -                              IXGBE_SRRCTL_BSIZEPKT_SHIFT;
 -              else
 -                      srrctl |= rx_ring->rx_buf_len >>
 -                              IXGBE_SRRCTL_BSIZEPKT_SHIFT;
 -      }
 +      if (rx_ring->rx_buf_len == MAXIMUM_ETHERNET_VLAN_SIZE)
 +              srrctl |= IXGBEVF_RXBUFFER_2048 >>
 +                      IXGBE_SRRCTL_BSIZEPKT_SHIFT;
 +      else
 +              srrctl |= rx_ring->rx_buf_len >>
 +                      IXGBE_SRRCTL_BSIZEPKT_SHIFT;
        IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(index), srrctl);
  }
  
@@@ -1085,12 -1353,36 +1088,12 @@@ static void ixgbevf_configure_rx(struc
        u32 rdlen;
        int rx_buf_len;
  
 -      /* Decide whether to use packet split mode or not */
 -      if (netdev->mtu > ETH_DATA_LEN) {
 -              if (adapter->flags & IXGBE_FLAG_RX_PS_CAPABLE)
 -                      adapter->flags |= IXGBE_FLAG_RX_PS_ENABLED;
 -              else
 -                      adapter->flags &= ~IXGBE_FLAG_RX_PS_ENABLED;
 -      } else {
 -              if (adapter->flags & IXGBE_FLAG_RX_1BUF_CAPABLE)
 -                      adapter->flags &= ~IXGBE_FLAG_RX_PS_ENABLED;
 -              else
 -                      adapter->flags |= IXGBE_FLAG_RX_PS_ENABLED;
 -      }
 -
 -      /* Set the RX buffer length according to the mode */
 -      if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
 -              /* PSRTYPE must be initialized in 82599 */
 -              u32 psrtype = IXGBE_PSRTYPE_TCPHDR |
 -                      IXGBE_PSRTYPE_UDPHDR |
 -                      IXGBE_PSRTYPE_IPV4HDR |
 -                      IXGBE_PSRTYPE_IPV6HDR |
 -                      IXGBE_PSRTYPE_L2HDR;
 -              IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, psrtype);
 -              rx_buf_len = IXGBEVF_RX_HDR_SIZE;
 -      } else {
 -              IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, 0);
 -              if (netdev->mtu <= ETH_DATA_LEN)
 -                      rx_buf_len = MAXIMUM_ETHERNET_VLAN_SIZE;
 -              else
 -                      rx_buf_len = ALIGN(max_frame, 1024);
 -      }
 +      /* PSRTYPE must be initialized in 82599 */
 +      IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, 0);
 +      if (netdev->mtu <= ETH_DATA_LEN)
 +              rx_buf_len = MAXIMUM_ETHERNET_VLAN_SIZE;
 +      else
 +              rx_buf_len = ALIGN(max_frame, 1024);
  
        rdlen = adapter->rx_ring[0].count * sizeof(union ixgbe_adv_rx_desc);
        /* Setup the HW Rx Head and Tail Descriptor Pointers and
@@@ -1201,8 -1493,15 +1204,8 @@@ static void ixgbevf_napi_enable_all(str
        int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
  
        for (q_idx = 0; q_idx < q_vectors; q_idx++) {
 -              struct napi_struct *napi;
                q_vector = adapter->q_vector[q_idx];
 -              if (!q_vector->rxr_count)
 -                      continue;
 -              napi = &q_vector->napi;
 -              if (q_vector->rxr_count > 1)
 -                      napi->poll = &ixgbevf_clean_rxonly_many;
 -
 -              napi_enable(napi);
 +              napi_enable(&q_vector->napi);
        }
  }
  
@@@ -1214,6 -1513,8 +1217,6 @@@ static void ixgbevf_napi_disable_all(st
  
        for (q_idx = 0; q_idx < q_vectors; q_idx++) {
                q_vector = adapter->q_vector[q_idx];
 -              if (!q_vector->rxr_count)
 -                      continue;
                napi_disable(&q_vector->napi);
        }
  }
@@@ -1231,8 -1532,9 +1234,8 @@@ static void ixgbevf_configure(struct ix
        ixgbevf_configure_rx(adapter);
        for (i = 0; i < adapter->num_rx_queues; i++) {
                struct ixgbevf_ring *ring = &adapter->rx_ring[i];
 -              ixgbevf_alloc_rx_buffers(adapter, ring, ring->count);
 -              ring->next_to_use = ring->count - 1;
 -              writel(ring->next_to_use, adapter->hw.hw_addr + ring->tail);
 +              ixgbevf_alloc_rx_buffers(adapter, ring,
 +                                       IXGBE_DESC_UNUSED(ring));
        }
  }
  
@@@ -1356,6 -1658,10 +1359,6 @@@ static void ixgbevf_up_complete(struct 
        ixgbevf_save_reset_stats(adapter);
        ixgbevf_init_last_counter_stats(adapter);
  
 -      /* bring the link up in the watchdog, this could race with our first
 -       * link up interrupt but shouldn't be a problem */
 -      adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
 -      adapter->link_check_timeout = jiffies;
        mod_timer(&adapter->watchdog_timer, jiffies);
  }
  
@@@ -1370,7 -1676,7 +1373,7 @@@ void ixgbevf_up(struct ixgbevf_adapter 
        /* clear any pending interrupts, may auto mask */
        IXGBE_READ_REG(hw, IXGBE_VTEICR);
  
 -      ixgbevf_irq_enable(adapter, true, true);
 +      ixgbevf_irq_enable(adapter);
  }
  
  /**
@@@ -1408,6 -1714,14 +1411,6 @@@ static void ixgbevf_clean_rx_ring(struc
                                dev_kfree_skb(this);
                        } while (skb);
                }
 -              if (!rx_buffer_info->page)
 -                      continue;
 -              dma_unmap_page(&pdev->dev, rx_buffer_info->page_dma,
 -                             PAGE_SIZE / 2, DMA_FROM_DEVICE);
 -              rx_buffer_info->page_dma = 0;
 -              put_page(rx_buffer_info->page);
 -              rx_buffer_info->page = NULL;
 -              rx_buffer_info->page_offset = 0;
        }
  
        size = sizeof(struct ixgbevf_rx_buffer) * rx_ring->count;
@@@ -1444,7 -1758,7 +1447,7 @@@ static void ixgbevf_clean_tx_ring(struc
  
        for (i = 0; i < tx_ring->count; i++) {
                tx_buffer_info = &tx_ring->tx_buffer_info[i];
 -              ixgbevf_unmap_and_free_tx_resource(adapter, tx_buffer_info);
 +              ixgbevf_unmap_and_free_tx_resource(tx_ring, tx_buffer_info);
        }
  
        size = sizeof(struct ixgbevf_tx_buffer) * tx_ring->count;
@@@ -1577,9 -1891,10 +1580,9 @@@ static void ixgbevf_acquire_msix_vector
  {
        int err, vector_threshold;
  
 -      /* We'll want at least 3 (vector_threshold):
 -       * 1) TxQ[0] Cleanup
 -       * 2) RxQ[0] Cleanup
 -       * 3) Other (Link Status Change, etc.)
 +      /* We'll want at least 2 (vector_threshold):
 +       * 1) TxQ[0] + RxQ[0] handler
 +       * 2) Other (Link Status Change, etc.)
         */
        vector_threshold = MIN_MSIX_COUNT;
  
        }
  }
  
 -/*
 - * ixgbevf_set_num_queues: Allocate queues for device, feature dependent
 +/**
 + * ixgbevf_set_num_queues - Allocate queues for device, feature dependent
   * @adapter: board private structure to initialize
   *
   * This is the top level queue allocation routine.  The order here is very
@@@ -1634,6 -1949,8 +1637,6 @@@ static void ixgbevf_set_num_queues(stru
        /* Start with base case */
        adapter->num_rx_queues = 1;
        adapter->num_tx_queues = 1;
 -      adapter->num_rx_pools = adapter->num_rx_queues;
 -      adapter->num_rx_queues_per_pool = 1;
  }
  
  /**
@@@ -1662,16 -1979,12 +1665,16 @@@ static int ixgbevf_alloc_queues(struct 
                adapter->tx_ring[i].count = adapter->tx_ring_count;
                adapter->tx_ring[i].queue_index = i;
                adapter->tx_ring[i].reg_idx = i;
 +              adapter->tx_ring[i].dev = &adapter->pdev->dev;
 +              adapter->tx_ring[i].netdev = adapter->netdev;
        }
  
        for (i = 0; i < adapter->num_rx_queues; i++) {
                adapter->rx_ring[i].count = adapter->rx_ring_count;
                adapter->rx_ring[i].queue_index = i;
                adapter->rx_ring[i].reg_idx = i;
 +              adapter->rx_ring[i].dev = &adapter->pdev->dev;
 +              adapter->rx_ring[i].netdev = adapter->netdev;
        }
  
        return 0;
@@@ -1698,12 -2011,10 +1701,12 @@@ static int ixgbevf_set_interrupt_capabi
         * It's easy to be greedy for MSI-X vectors, but it really
         * doesn't do us much good if we have a lot more vectors
         * than CPU's.  So let's be conservative and only ask for
 -       * (roughly) twice the number of vectors as there are CPU's.
 +       * (roughly) the same number of vectors as there are CPU's.
 +       * The default is to use pairs of vectors.
         */
 -      v_budget = min(adapter->num_rx_queues + adapter->num_tx_queues,
 -                     (int)(num_online_cpus() * 2)) + NON_Q_VECTORS;
 +      v_budget = max(adapter->num_rx_queues, adapter->num_tx_queues);
 +      v_budget = min_t(int, v_budget, num_online_cpus());
 +      v_budget += NON_Q_VECTORS;
  
        /* A failure in MSI-X entry allocation isn't fatal, but it does
         * mean we disable MSI-X capabilities of the adapter. */
@@@ -1734,8 -2045,12 +1737,8 @@@ static int ixgbevf_alloc_q_vectors(stru
  {
        int q_idx, num_q_vectors;
        struct ixgbevf_q_vector *q_vector;
 -      int napi_vectors;
 -      int (*poll)(struct napi_struct *, int);
  
        num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
 -      napi_vectors = adapter->num_rx_queues;
 -      poll = &ixgbevf_clean_rxonly;
  
        for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
                q_vector = kzalloc(sizeof(struct ixgbevf_q_vector), GFP_KERNEL);
                        goto err_out;
                q_vector->adapter = adapter;
                q_vector->v_idx = q_idx;
 -              q_vector->eitr = adapter->eitr_param;
 -              if (q_idx < napi_vectors)
 -                      netif_napi_add(adapter->netdev, &q_vector->napi,
 -                                     (*poll), 64);
 +              netif_napi_add(adapter->netdev, &q_vector->napi,
 +                             ixgbevf_poll, 64);
                adapter->q_vector[q_idx] = q_vector;
        }
  
@@@ -1891,13 -2208,20 +1894,13 @@@ static int __devinit ixgbevf_sw_init(st
        }
  
        /* Enable dynamic interrupt throttling rates */
 -      adapter->eitr_param = 20000;
 -      adapter->itr_setting = 1;
 -
 -      /* set defaults for eitr in MegaBytes */
 -      adapter->eitr_low = 10;
 -      adapter->eitr_high = 20;
 +      adapter->rx_itr_setting = 1;
 +      adapter->tx_itr_setting = 1;
  
        /* set default ring sizes */
        adapter->tx_ring_count = IXGBEVF_DEFAULT_TXD;
        adapter->rx_ring_count = IXGBEVF_DEFAULT_RXD;
  
 -      /* enable rx csum by default */
 -      adapter->flags |= IXGBE_FLAG_RX_CSUM_ENABLED;
 -
        set_bit(__IXGBEVF_DOWN, &adapter->state);
        return 0;
  
@@@ -1957,7 -2281,7 +1960,7 @@@ static void ixgbevf_watchdog(unsigned l
  {
        struct ixgbevf_adapter *adapter = (struct ixgbevf_adapter *)data;
        struct ixgbe_hw *hw = &adapter->hw;
 -      u64 eics = 0;
 +      u32 eics = 0;
        int i;
  
        /*
        /* get one bit for every active tx/rx interrupt vector */
        for (i = 0; i < adapter->num_msix_vectors - NON_Q_VECTORS; i++) {
                struct ixgbevf_q_vector *qv = adapter->q_vector[i];
 -              if (qv->rxr_count || qv->txr_count)
 -                      eics |= (1 << i);
 +              if (qv->rx.ring || qv->tx.ring)
 +                      eics |= 1 << i;
        }
  
 -      IXGBE_WRITE_REG(hw, IXGBE_VTEICS, (u32)eics);
 +      IXGBE_WRITE_REG(hw, IXGBE_VTEICS, eics);
  
  watchdog_short_circuit:
        schedule_work(&adapter->watchdog_task);
@@@ -2145,6 -2469,7 +2148,6 @@@ int ixgbevf_setup_tx_resources(struct i
  
        tx_ring->next_to_use = 0;
        tx_ring->next_to_clean = 0;
 -      tx_ring->work_limit = tx_ring->count;
        return 0;
  
  err:
@@@ -2348,7 -2673,7 +2351,7 @@@ static int ixgbevf_open(struct net_devi
        if (err)
                goto err_req_irq;
  
 -      ixgbevf_irq_enable(adapter, true, true);
 +      ixgbevf_irq_enable(adapter);
  
        return 0;
  
@@@ -2390,153 -2715,172 +2393,153 @@@ static int ixgbevf_close(struct net_dev
        return 0;
  }
  
 -static int ixgbevf_tso(struct ixgbevf_adapter *adapter,
 -                     struct ixgbevf_ring *tx_ring,
 -                     struct sk_buff *skb, u32 tx_flags, u8 *hdr_len)
 +static void ixgbevf_tx_ctxtdesc(struct ixgbevf_ring *tx_ring,
 +                              u32 vlan_macip_lens, u32 type_tucmd,
 +                              u32 mss_l4len_idx)
  {
        struct ixgbe_adv_tx_context_desc *context_desc;
 -      unsigned int i;
 -      int err;
 -      struct ixgbevf_tx_buffer *tx_buffer_info;
 -      u32 vlan_macip_lens = 0, type_tucmd_mlhl;
 -      u32 mss_l4len_idx, l4len;
 +      u16 i = tx_ring->next_to_use;
  
 -      if (skb_is_gso(skb)) {
 -              if (skb_header_cloned(skb)) {
 -                      err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
 -                      if (err)
 -                              return err;
 -              }
 -              l4len = tcp_hdrlen(skb);
 -              *hdr_len += l4len;
 -
 -              if (skb->protocol == htons(ETH_P_IP)) {
 -                      struct iphdr *iph = ip_hdr(skb);
 -                      iph->tot_len = 0;
 -                      iph->check = 0;
 -                      tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
 -                                                               iph->daddr, 0,
 -                                                               IPPROTO_TCP,
 -                                                               0);
 -                      adapter->hw_tso_ctxt++;
 -              } else if (skb_is_gso_v6(skb)) {
 -                      ipv6_hdr(skb)->payload_len = 0;
 -                      tcp_hdr(skb)->check =
 -                          ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
 -                                           &ipv6_hdr(skb)->daddr,
 -                                           0, IPPROTO_TCP, 0);
 -                      adapter->hw_tso6_ctxt++;
 -              }
 +      context_desc = IXGBEVF_TX_CTXTDESC(tx_ring, i);
  
 -              i = tx_ring->next_to_use;
 +      i++;
 +      tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
  
 -              tx_buffer_info = &tx_ring->tx_buffer_info[i];
 -              context_desc = IXGBE_TX_CTXTDESC_ADV(*tx_ring, i);
 -
 -              /* VLAN MACLEN IPLEN */
 -              if (tx_flags & IXGBE_TX_FLAGS_VLAN)
 -                      vlan_macip_lens |=
 -                              (tx_flags & IXGBE_TX_FLAGS_VLAN_MASK);
 -              vlan_macip_lens |= ((skb_network_offset(skb)) <<
 -                                  IXGBE_ADVTXD_MACLEN_SHIFT);
 -              *hdr_len += skb_network_offset(skb);
 -              vlan_macip_lens |=
 -                      (skb_transport_header(skb) - skb_network_header(skb));
 -              *hdr_len +=
 -                      (skb_transport_header(skb) - skb_network_header(skb));
 -              context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
 -              context_desc->seqnum_seed = 0;
 -
 -              /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
 -              type_tucmd_mlhl = (IXGBE_TXD_CMD_DEXT |
 -                                  IXGBE_ADVTXD_DTYP_CTXT);
 -
 -              if (skb->protocol == htons(ETH_P_IP))
 -                      type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
 -              type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
 -              context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd_mlhl);
 -
 -              /* MSS L4LEN IDX */
 -              mss_l4len_idx =
 -                      (skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT);
 -              mss_l4len_idx |= (l4len << IXGBE_ADVTXD_L4LEN_SHIFT);
 -              /* use index 1 for TSO */
 -              mss_l4len_idx |= (1 << IXGBE_ADVTXD_IDX_SHIFT);
 -              context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
 -
 -              tx_buffer_info->time_stamp = jiffies;
 -              tx_buffer_info->next_to_watch = i;
 +      /* set bits to identify this as an advanced context descriptor */
 +      type_tucmd |= IXGBE_TXD_CMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
  
 -              i++;
 -              if (i == tx_ring->count)
 -                      i = 0;
 -              tx_ring->next_to_use = i;
 +      context_desc->vlan_macip_lens   = cpu_to_le32(vlan_macip_lens);
 +      context_desc->seqnum_seed       = 0;
 +      context_desc->type_tucmd_mlhl   = cpu_to_le32(type_tucmd);
 +      context_desc->mss_l4len_idx     = cpu_to_le32(mss_l4len_idx);
 +}
  
 -              return true;
 +static int ixgbevf_tso(struct ixgbevf_ring *tx_ring,
 +                     struct sk_buff *skb, u32 tx_flags, u8 *hdr_len)
 +{
 +      u32 vlan_macip_lens, type_tucmd;
 +      u32 mss_l4len_idx, l4len;
 +
 +      if (!skb_is_gso(skb))
 +              return 0;
 +
 +      if (skb_header_cloned(skb)) {
 +              int err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
 +              if (err)
 +                      return err;
 +      }
 +
 +      /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
 +      type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP;
 +
 +      if (skb->protocol == htons(ETH_P_IP)) {
 +              struct iphdr *iph = ip_hdr(skb);
 +              iph->tot_len = 0;
 +              iph->check = 0;
 +              tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
 +                                                       iph->daddr, 0,
 +                                                       IPPROTO_TCP,
 +                                                       0);
 +              type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4;
 +      } else if (skb_is_gso_v6(skb)) {
 +              ipv6_hdr(skb)->payload_len = 0;
 +              tcp_hdr(skb)->check =
 +                  ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
 +                                   &ipv6_hdr(skb)->daddr,
 +                                   0, IPPROTO_TCP, 0);
        }
  
 -      return false;
 +      /* compute header lengths */
 +      l4len = tcp_hdrlen(skb);
 +      *hdr_len += l4len;
 +      *hdr_len = skb_transport_offset(skb) + l4len;
 +
 +      /* mss_l4len_id: use 1 as index for TSO */
 +      mss_l4len_idx = l4len << IXGBE_ADVTXD_L4LEN_SHIFT;
 +      mss_l4len_idx |= skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT;
 +      mss_l4len_idx |= 1 << IXGBE_ADVTXD_IDX_SHIFT;
 +
 +      /* vlan_macip_lens: HEADLEN, MACLEN, VLAN tag */
 +      vlan_macip_lens = skb_network_header_len(skb);
 +      vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT;
 +      vlan_macip_lens |= tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
 +
 +      ixgbevf_tx_ctxtdesc(tx_ring, vlan_macip_lens,
 +                          type_tucmd, mss_l4len_idx);
 +
 +      return 1;
  }
  
 -static bool ixgbevf_tx_csum(struct ixgbevf_adapter *adapter,
 -                          struct ixgbevf_ring *tx_ring,
 +static bool ixgbevf_tx_csum(struct ixgbevf_ring *tx_ring,
                            struct sk_buff *skb, u32 tx_flags)
  {
 -      struct ixgbe_adv_tx_context_desc *context_desc;
 -      unsigned int i;
 -      struct ixgbevf_tx_buffer *tx_buffer_info;
 -      u32 vlan_macip_lens = 0, type_tucmd_mlhl = 0;
  
 -      if (skb->ip_summed == CHECKSUM_PARTIAL ||
 -          (tx_flags & IXGBE_TX_FLAGS_VLAN)) {
 -              i = tx_ring->next_to_use;
 -              tx_buffer_info = &tx_ring->tx_buffer_info[i];
 -              context_desc = IXGBE_TX_CTXTDESC_ADV(*tx_ring, i);
 -
 -              if (tx_flags & IXGBE_TX_FLAGS_VLAN)
 -                      vlan_macip_lens |= (tx_flags &
 -                                          IXGBE_TX_FLAGS_VLAN_MASK);
 -              vlan_macip_lens |= (skb_network_offset(skb) <<
 -                                  IXGBE_ADVTXD_MACLEN_SHIFT);
 -              if (skb->ip_summed == CHECKSUM_PARTIAL)
 -                      vlan_macip_lens |= (skb_transport_header(skb) -
 -                                          skb_network_header(skb));
 -
 -              context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
 -              context_desc->seqnum_seed = 0;
 -
 -              type_tucmd_mlhl |= (IXGBE_TXD_CMD_DEXT |
 -                                  IXGBE_ADVTXD_DTYP_CTXT);
 -
 -              if (skb->ip_summed == CHECKSUM_PARTIAL) {
 -                      switch (skb->protocol) {
 -                      case __constant_htons(ETH_P_IP):
 -                              type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
 -                              if (ip_hdr(skb)->protocol == IPPROTO_TCP)
 -                                      type_tucmd_mlhl |=
 -                                          IXGBE_ADVTXD_TUCMD_L4T_TCP;
 -                              break;
 -                      case __constant_htons(ETH_P_IPV6):
 -                              /* XXX what about other V6 headers?? */
 -                              if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
 -                                      type_tucmd_mlhl |=
 -                                              IXGBE_ADVTXD_TUCMD_L4T_TCP;
 -                              break;
 -                      default:
 -                              if (unlikely(net_ratelimit())) {
 -                                      pr_warn("partial checksum but "
 -                                              "proto=%x!\n", skb->protocol);
 -                              }
 -                              break;
 -                      }
 -              }
  
 -              context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd_mlhl);
 -              /* use index zero for tx checksum offload */
 -              context_desc->mss_l4len_idx = 0;
  
 -              tx_buffer_info->time_stamp = jiffies;
 -              tx_buffer_info->next_to_watch = i;
 +      u32 vlan_macip_lens = 0;
 +      u32 mss_l4len_idx = 0;
 +      u32 type_tucmd = 0;
  
 -              adapter->hw_csum_tx_good++;
 -              i++;
 -              if (i == tx_ring->count)
 -                      i = 0;
 -              tx_ring->next_to_use = i;
 +      if (skb->ip_summed == CHECKSUM_PARTIAL) {
 +              u8 l4_hdr = 0;
 +              switch (skb->protocol) {
 +              case __constant_htons(ETH_P_IP):
 +                      vlan_macip_lens |= skb_network_header_len(skb);
 +                      type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4;
 +                      l4_hdr = ip_hdr(skb)->protocol;
 +                      break;
 +              case __constant_htons(ETH_P_IPV6):
 +                      vlan_macip_lens |= skb_network_header_len(skb);
 +                      l4_hdr = ipv6_hdr(skb)->nexthdr;
 +                      break;
 +              default:
 +                      if (unlikely(net_ratelimit())) {
 +                              dev_warn(tx_ring->dev,
 +                               "partial checksum but proto=%x!\n",
 +                               skb->protocol);
 +                      }
 +                      break;
 +              }
  
 -              return true;
 +              switch (l4_hdr) {
 +              case IPPROTO_TCP:
 +                      type_tucmd |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
 +                      mss_l4len_idx = tcp_hdrlen(skb) <<
 +                                      IXGBE_ADVTXD_L4LEN_SHIFT;
 +                      break;
 +              case IPPROTO_SCTP:
 +                      type_tucmd |= IXGBE_ADVTXD_TUCMD_L4T_SCTP;
 +                      mss_l4len_idx = sizeof(struct sctphdr) <<
 +                                      IXGBE_ADVTXD_L4LEN_SHIFT;
 +                      break;
 +              case IPPROTO_UDP:
 +                      mss_l4len_idx = sizeof(struct udphdr) <<
 +                                      IXGBE_ADVTXD_L4LEN_SHIFT;
 +                      break;
 +              default:
 +                      if (unlikely(net_ratelimit())) {
 +                              dev_warn(tx_ring->dev,
 +                               "partial checksum but l4 proto=%x!\n",
 +                               l4_hdr);
 +                      }
 +                      break;
 +              }
        }
  
 -      return false;
 +      /* vlan_macip_lens: MACLEN, VLAN tag */
 +      vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT;
 +      vlan_macip_lens |= tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
 +
 +      ixgbevf_tx_ctxtdesc(tx_ring, vlan_macip_lens,
 +                          type_tucmd, mss_l4len_idx);
 +
 +      return (skb->ip_summed == CHECKSUM_PARTIAL);
  }
  
 -static int ixgbevf_tx_map(struct ixgbevf_adapter *adapter,
 -                        struct ixgbevf_ring *tx_ring,
 +static int ixgbevf_tx_map(struct ixgbevf_ring *tx_ring,
                          struct sk_buff *skb, u32 tx_flags,
                          unsigned int first)
  {
 -      struct pci_dev *pdev = adapter->pdev;
        struct ixgbevf_tx_buffer *tx_buffer_info;
        unsigned int len;
        unsigned int total = skb->len;
  
                tx_buffer_info->length = size;
                tx_buffer_info->mapped_as_page = false;
 -              tx_buffer_info->dma = dma_map_single(&adapter->pdev->dev,
 +              tx_buffer_info->dma = dma_map_single(tx_ring->dev,
                                                     skb->data + offset,
                                                     size, DMA_TO_DEVICE);
 -              if (dma_mapping_error(&pdev->dev, tx_buffer_info->dma))
 +              if (dma_mapping_error(tx_ring->dev, tx_buffer_info->dma))
                        goto dma_error;
 -              tx_buffer_info->time_stamp = jiffies;
                tx_buffer_info->next_to_watch = i;
  
                len -= size;
  
                        tx_buffer_info->length = size;
                        tx_buffer_info->dma =
 -                              skb_frag_dma_map(&adapter->pdev->dev, frag,
 +                              skb_frag_dma_map(tx_ring->dev, frag,
                                                 offset, size, DMA_TO_DEVICE);
                        tx_buffer_info->mapped_as_page = true;
 -                      if (dma_mapping_error(&pdev->dev, tx_buffer_info->dma))
 +                      if (dma_mapping_error(tx_ring->dev,
 +                                            tx_buffer_info->dma))
                                goto dma_error;
 -                      tx_buffer_info->time_stamp = jiffies;
                        tx_buffer_info->next_to_watch = i;
  
                        len -= size;
                i = i - 1;
        tx_ring->tx_buffer_info[i].skb = skb;
        tx_ring->tx_buffer_info[first].next_to_watch = i;
 +      tx_ring->tx_buffer_info[first].time_stamp = jiffies;
  
        return count;
  
  dma_error:
 -      dev_err(&pdev->dev, "TX DMA map failed\n");
 +      dev_err(tx_ring->dev, "TX DMA map failed\n");
  
        /* clear timestamp and dma mappings for failed tx_buffer_info map */
        tx_buffer_info->dma = 0;
 -      tx_buffer_info->time_stamp = 0;
        tx_buffer_info->next_to_watch = 0;
        count--;
  
                if (i < 0)
                        i += tx_ring->count;
                tx_buffer_info = &tx_ring->tx_buffer_info[i];
 -              ixgbevf_unmap_and_free_tx_resource(adapter, tx_buffer_info);
 +              ixgbevf_unmap_and_free_tx_resource(tx_ring, tx_buffer_info);
        }
  
        return count;
  }
  
 -static void ixgbevf_tx_queue(struct ixgbevf_adapter *adapter,
 -                           struct ixgbevf_ring *tx_ring, int tx_flags,
 +static void ixgbevf_tx_queue(struct ixgbevf_ring *tx_ring, int tx_flags,
                             int count, u32 paylen, u8 hdr_len)
  {
        union ixgbe_adv_tx_desc *tx_desc = NULL;
        if (tx_flags & IXGBE_TX_FLAGS_VLAN)
                cmd_type_len |= IXGBE_ADVTXD_DCMD_VLE;
  
 +      if (tx_flags & IXGBE_TX_FLAGS_CSUM)
 +              olinfo_status |= IXGBE_ADVTXD_POPTS_TXSM;
 +
        if (tx_flags & IXGBE_TX_FLAGS_TSO) {
                cmd_type_len |= IXGBE_ADVTXD_DCMD_TSE;
  
 -              olinfo_status |= IXGBE_TXD_POPTS_TXSM <<
 -                      IXGBE_ADVTXD_POPTS_SHIFT;
 -
                /* use index 1 context for tso */
                olinfo_status |= (1 << IXGBE_ADVTXD_IDX_SHIFT);
                if (tx_flags & IXGBE_TX_FLAGS_IPV4)
 -                      olinfo_status |= IXGBE_TXD_POPTS_IXSM <<
 -                              IXGBE_ADVTXD_POPTS_SHIFT;
 +                      olinfo_status |= IXGBE_ADVTXD_POPTS_IXSM;
 +
 +      }
  
 -      } else if (tx_flags & IXGBE_TX_FLAGS_CSUM)
 -              olinfo_status |= IXGBE_TXD_POPTS_TXSM <<
 -                      IXGBE_ADVTXD_POPTS_SHIFT;
 +      /*
 +       * Check Context must be set if Tx switch is enabled, which it
 +       * always is for case where virtual functions are running
 +       */
 +      olinfo_status |= IXGBE_ADVTXD_CC;
  
        olinfo_status |= ((paylen - hdr_len) << IXGBE_ADVTXD_PAYLEN_SHIFT);
  
        i = tx_ring->next_to_use;
        while (count--) {
                tx_buffer_info = &tx_ring->tx_buffer_info[i];
 -              tx_desc = IXGBE_TX_DESC_ADV(*tx_ring, i);
 +              tx_desc = IXGBEVF_TX_DESC(tx_ring, i);
                tx_desc->read.buffer_addr = cpu_to_le64(tx_buffer_info->dma);
                tx_desc->read.cmd_type_len =
                        cpu_to_le32(cmd_type_len | tx_buffer_info->length);
  
        tx_desc->read.cmd_type_len |= cpu_to_le32(txd_cmd);
  
 -      /*
 -       * Force memory writes to complete before letting h/w
 -       * know there are new descriptors to fetch.  (Only
 -       * applicable for weak-ordered memory model archs,
 -       * such as IA-64).
 -       */
 -      wmb();
 -
        tx_ring->next_to_use = i;
 -      writel(i, adapter->hw.hw_addr + tx_ring->tail);
  }
  
 -static int __ixgbevf_maybe_stop_tx(struct net_device *netdev,
 -                                 struct ixgbevf_ring *tx_ring, int size)
 +static int __ixgbevf_maybe_stop_tx(struct ixgbevf_ring *tx_ring, int size)
  {
 -      struct ixgbevf_adapter *adapter = netdev_priv(netdev);
 +      struct ixgbevf_adapter *adapter = netdev_priv(tx_ring->netdev);
  
 -      netif_stop_subqueue(netdev, tx_ring->queue_index);
 +      netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
        /* Herbert's original patch had:
         *  smp_mb__after_netif_stop_queue();
         * but since that doesn't exist yet, just open code it. */
                return -EBUSY;
  
        /* A reprieve! - use start_queue because it doesn't call schedule */
 -      netif_start_subqueue(netdev, tx_ring->queue_index);
 +      netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index);
        ++adapter->restart_queue;
        return 0;
  }
  
 -static int ixgbevf_maybe_stop_tx(struct net_device *netdev,
 -                               struct ixgbevf_ring *tx_ring, int size)
 +static int ixgbevf_maybe_stop_tx(struct ixgbevf_ring *tx_ring, int size)
  {
        if (likely(IXGBE_DESC_UNUSED(tx_ring) >= size))
                return 0;
 -      return __ixgbevf_maybe_stop_tx(netdev, tx_ring, size);
 +      return __ixgbevf_maybe_stop_tx(tx_ring, size);
  }
  
  static int ixgbevf_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
        unsigned int tx_flags = 0;
        u8 hdr_len = 0;
        int r_idx = 0, tso;
 -      int count = 0;
 -
 -      unsigned int f;
 +      u16 count = TXD_USE_COUNT(skb_headlen(skb));
 +#if PAGE_SIZE > IXGBE_MAX_DATA_PER_TXD
 +      unsigned short f;
 +#endif
  
        tx_ring = &adapter->tx_ring[r_idx];
  
 +      /*
 +       * need: 1 descriptor per page * PAGE_SIZE/IXGBE_MAX_DATA_PER_TXD,
 +       *       + 1 desc for skb_headlen/IXGBE_MAX_DATA_PER_TXD,
 +       *       + 2 desc gap to keep tail from touching head,
 +       *       + 1 desc for context descriptor,
 +       * otherwise try next time
 +       */
 +#if PAGE_SIZE > IXGBE_MAX_DATA_PER_TXD
 +      for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
 +              count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
 +#else
 +      count += skb_shinfo(skb)->nr_frags;
 +#endif
 +      if (ixgbevf_maybe_stop_tx(tx_ring, count + 3)) {
 +              adapter->tx_busy++;
 +              return NETDEV_TX_BUSY;
 +      }
 +
        if (vlan_tx_tag_present(skb)) {
                tx_flags |= vlan_tx_tag_get(skb);
                tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT;
                tx_flags |= IXGBE_TX_FLAGS_VLAN;
        }
  
 -      /* four things can cause us to need a context descriptor */
 -      if (skb_is_gso(skb) ||
 -          (skb->ip_summed == CHECKSUM_PARTIAL) ||
 -          (tx_flags & IXGBE_TX_FLAGS_VLAN))
 -              count++;
 -
 -      count += TXD_USE_COUNT(skb_headlen(skb));
 -      for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
 -              count += TXD_USE_COUNT(skb_frag_size(&skb_shinfo(skb)->frags[f]));
 -
 -      if (ixgbevf_maybe_stop_tx(netdev, tx_ring, count)) {
 -              adapter->tx_busy++;
 -              return NETDEV_TX_BUSY;
 -      }
 -
        first = tx_ring->next_to_use;
  
        if (skb->protocol == htons(ETH_P_IP))
                tx_flags |= IXGBE_TX_FLAGS_IPV4;
 -      tso = ixgbevf_tso(adapter, tx_ring, skb, tx_flags, &hdr_len);
 +      tso = ixgbevf_tso(tx_ring, skb, tx_flags, &hdr_len);
        if (tso < 0) {
                dev_kfree_skb_any(skb);
                return NETDEV_TX_OK;
        }
  
        if (tso)
 -              tx_flags |= IXGBE_TX_FLAGS_TSO;
 -      else if (ixgbevf_tx_csum(adapter, tx_ring, skb, tx_flags) &&
 -               (skb->ip_summed == CHECKSUM_PARTIAL))
 +              tx_flags |= IXGBE_TX_FLAGS_TSO | IXGBE_TX_FLAGS_CSUM;
 +      else if (ixgbevf_tx_csum(tx_ring, skb, tx_flags))
                tx_flags |= IXGBE_TX_FLAGS_CSUM;
  
 -      ixgbevf_tx_queue(adapter, tx_ring, tx_flags,
 -                       ixgbevf_tx_map(adapter, tx_ring, skb, tx_flags, first),
 +      ixgbevf_tx_queue(tx_ring, tx_flags,
 +                       ixgbevf_tx_map(tx_ring, skb, tx_flags, first),
                         skb->len, hdr_len);
 +      /*
 +       * Force memory writes to complete before letting h/w
 +       * know there are new descriptors to fetch.  (Only
 +       * applicable for weak-ordered memory model archs,
 +       * such as IA-64).
 +       */
 +      wmb();
 +
 +      writel(tx_ring->next_to_use, adapter->hw.hw_addr + tx_ring->tail);
  
 -      ixgbevf_maybe_stop_tx(netdev, tx_ring, DESC_NEEDED);
 +      ixgbevf_maybe_stop_tx(tx_ring, DESC_NEEDED);
  
        return NETDEV_TX_OK;
  }
@@@ -2869,7 -3211,9 +2872,7 @@@ static void ixgbevf_shutdown(struct pci
                ixgbevf_free_all_rx_resources(adapter);
        }
  
 -#ifdef CONFIG_PM
        pci_save_state(pdev);
 -#endif
  
        pci_disable_device(pdev);
  }
@@@ -2912,6 -3256,19 +2915,6 @@@ static struct rtnl_link_stats64 *ixgbev
        return stats;
  }
  
 -static int ixgbevf_set_features(struct net_device *netdev,
 -      netdev_features_t features)
 -{
 -      struct ixgbevf_adapter *adapter = netdev_priv(netdev);
 -
 -      if (features & NETIF_F_RXCSUM)
 -              adapter->flags |= IXGBE_FLAG_RX_CSUM_ENABLED;
 -      else
 -              adapter->flags &= ~IXGBE_FLAG_RX_CSUM_ENABLED;
 -
 -      return 0;
 -}
 -
  static const struct net_device_ops ixgbe_netdev_ops = {
        .ndo_open               = ixgbevf_open,
        .ndo_stop               = ixgbevf_close,
        .ndo_tx_timeout         = ixgbevf_tx_timeout,
        .ndo_vlan_rx_add_vid    = ixgbevf_vlan_rx_add_vid,
        .ndo_vlan_rx_kill_vid   = ixgbevf_vlan_rx_kill_vid,
 -      .ndo_set_features       = ixgbevf_set_features,
  };
  
  static void ixgbevf_assign_netdev_ops(struct net_device *dev)
@@@ -2983,8 -3341,12 +2986,8 @@@ static int __devinit ixgbevf_probe(stru
  
        pci_set_master(pdev);
  
 -#ifdef HAVE_TX_MQ
        netdev = alloc_etherdev_mq(sizeof(struct ixgbevf_adapter),
                                   MAX_TX_QUEUES);
 -#else
 -      netdev = alloc_etherdev(sizeof(struct ixgbevf_adapter));
 -#endif
        if (!netdev) {
                err = -ENOMEM;
                goto err_alloc_etherdev;
        memcpy(&hw->mbx.ops, &ixgbevf_mbx_ops,
               sizeof(struct ixgbe_mbx_operations));
  
 -      adapter->flags &= ~IXGBE_FLAG_RX_PS_CAPABLE;
 -      adapter->flags &= ~IXGBE_FLAG_RX_PS_ENABLED;
 -      adapter->flags |= IXGBE_FLAG_RX_1BUF_CAPABLE;
 -
        /* setup the private structure */
        err = ixgbevf_sw_init(adapter);
        if (err)
        if (err)
                goto err_register;
  
 -      adapter->netdev_registered = true;
 -
        netif_carrier_off(netdev);
  
        ixgbevf_init_last_counter_stats(adapter);
  
        hw_dbg(hw, "MAC: %d\n", hw->mac.type);
  
 -      hw_dbg(hw, "LRO is disabled\n");
 -
        hw_dbg(hw, "Intel(R) 82599 Virtual Function\n");
        cards_found++;
        return 0;
@@@ -3131,8 -3501,10 +3134,8 @@@ static void __devexit ixgbevf_remove(st
        cancel_work_sync(&adapter->reset_task);
        cancel_work_sync(&adapter->watchdog_task);
  
 -      if (adapter->netdev_registered) {
 +      if (netdev->reg_state == NETREG_REGISTERED)
                unregister_netdev(netdev);
 -              adapter->netdev_registered = false;
 -      }
  
        ixgbevf_reset_interrupt_capability(adapter);
  
@@@ -44,8 -44,6 +44,8 @@@
  #include "57xx_iscsi_hsi.h"
  #include "57xx_iscsi_constants.h"
  
 +#include "../../net/ethernet/broadcom/bnx2x/bnx2x_mfw_req.h"
 +
  #define BNX2_ISCSI_DRIVER_NAME                "bnx2i"
  
  #define BNX2I_MAX_ADAPTERS            8
  #define REG_WR(__hba, offset, val)                    \
                writel(val, __hba->regview + offset)
  
 +#ifdef CONFIG_32BIT
 +#define GET_STATS_64(__hba, dst, field)                               \
 +      do {                                                    \
 +              spin_lock_bh(&__hba->stat_lock);                \
 +              dst->field##_lo = __hba->stats.field##_lo;      \
 +              dst->field##_hi = __hba->stats.field##_hi;      \
 +              spin_unlock_bh(&__hba->stat_lock);              \
 +      } while (0)
 +
 +#define ADD_STATS_64(__hba, field, len)                               \
 +      do {                                                    \
 +              if (spin_trylock(&__hba->stat_lock)) {          \
 +                      if (__hba->stats.field##_lo + len <     \
 +                          __hba->stats.field##_lo)            \
 +                              __hba->stats.field##_hi++;      \
 +                      __hba->stats.field##_lo += len;         \
 +                      spin_unlock(&__hba->stat_lock);         \
 +              }                                               \
 +      } while (0)
 +
 +#else
 +#define GET_STATS_64(__hba, dst, field)                               \
 +      do {                                                    \
 +              u64 val, *out;                                  \
 +                                                              \
 +              val = __hba->bnx2i_stats.field;                 \
 +              out = (u64 *)&__hba->stats.field##_lo;          \
 +              *out = cpu_to_le64(val);                        \
 +              out = (u64 *)&dst->field##_lo;                  \
 +              *out = cpu_to_le64(val);                        \
 +      } while (0)
 +
 +#define ADD_STATS_64(__hba, field, len)                               \
 +      do {                                                    \
 +              __hba->bnx2i_stats.field += len;                \
 +      } while (0)
 +#endif
  
  /**
   * struct generic_pdu_resc - login pdu resource structure
@@@ -327,15 -288,6 +327,15 @@@ struct iscsi_cid_queue 
        struct bnx2i_conn **conn_cid_tbl;
  };
  
 +
 +struct bnx2i_stats_info {
 +      u64 rx_pdus;
 +      u64 rx_bytes;
 +      u64 tx_pdus;
 +      u64 tx_bytes;
 +};
 +
 +
  /**
   * struct bnx2i_hba - bnx2i adapter structure
   *
   * @ctx_ccell_tasks:       captures number of ccells and tasks supported by
   *                         currently offloaded connection, used to decode
   *                         context memory
 + * @stat_lock:                   spin lock used by the statistic collector (32 bit)
 + * @stats:               local iSCSI statistic collection place holder
   *
   * Adapter Data Structure
   */
@@@ -400,6 -350,7 +400,7 @@@ struct bnx2i_hba 
        struct pci_dev *pcidev;
        struct net_device *netdev;
        void __iomem *regview;
+       resource_size_t reg_base;
  
        u32 age;
        unsigned long cnic_dev_type;
        u32 num_sess_opened;
        u32 num_conn_opened;
        unsigned int ctx_ccell_tasks;
 +
 +#ifdef CONFIG_32BIT
 +      spinlock_t stat_lock;
 +#endif
 +      struct bnx2i_stats_info bnx2i_stats;
 +      struct iscsi_stats_info stats;
  };
  
  
@@@ -805,8 -750,6 +806,8 @@@ extern void bnx2i_ulp_init(struct cnic_
  extern void bnx2i_ulp_exit(struct cnic_dev *dev);
  extern void bnx2i_start(void *handle);
  extern void bnx2i_stop(void *handle);
 +extern int bnx2i_get_stats(void *handle);
 +
  extern struct bnx2i_hba *get_adapter_list_head(void);
  
  struct bnx2i_conn *bnx2i_get_conn_from_id(struct bnx2i_hba *hba,
@@@ -1350,7 -1350,6 +1350,7 @@@ int bnx2i_process_scsi_cmd_resp(struct 
                                struct cqe *cqe)
  {
        struct iscsi_conn *conn = bnx2i_conn->cls_conn->dd_data;
 +      struct bnx2i_hba *hba = bnx2i_conn->hba;
        struct bnx2i_cmd_response *resp_cqe;
        struct bnx2i_cmd *bnx2i_cmd;
        struct iscsi_task *task;
  
        if (bnx2i_cmd->req.op_attr & ISCSI_CMD_REQUEST_READ) {
                conn->datain_pdus_cnt +=
 -                      resp_cqe->task_stat.read_stat.num_data_outs;
 +                      resp_cqe->task_stat.read_stat.num_data_ins;
                conn->rxdata_octets +=
                        bnx2i_cmd->req.total_data_transfer_length;
 +              ADD_STATS_64(hba, rx_pdus,
 +                           resp_cqe->task_stat.read_stat.num_data_ins);
 +              ADD_STATS_64(hba, rx_bytes,
 +                           bnx2i_cmd->req.total_data_transfer_length);
        } else {
                conn->dataout_pdus_cnt +=
 -                      resp_cqe->task_stat.read_stat.num_data_outs;
 +                      resp_cqe->task_stat.write_stat.num_data_outs;
                conn->r2t_pdus_cnt +=
 -                      resp_cqe->task_stat.read_stat.num_r2ts;
 +                      resp_cqe->task_stat.write_stat.num_r2ts;
                conn->txdata_octets +=
                        bnx2i_cmd->req.total_data_transfer_length;
 +              ADD_STATS_64(hba, tx_pdus,
 +                           resp_cqe->task_stat.write_stat.num_data_outs);
 +              ADD_STATS_64(hba, tx_bytes,
 +                           bnx2i_cmd->req.total_data_transfer_length);
 +              ADD_STATS_64(hba, rx_pdus,
 +                           resp_cqe->task_stat.write_stat.num_r2ts);
        }
        bnx2i_iscsi_unmap_sg_list(bnx2i_cmd);
  
@@@ -1972,7 -1961,6 +1972,7 @@@ static int bnx2i_process_new_cqes(struc
  {
        struct iscsi_conn *conn = bnx2i_conn->cls_conn->dd_data;
        struct iscsi_session *session = conn->session;
 +      struct bnx2i_hba *hba = bnx2i_conn->hba;
        struct qp_info *qp;
        struct bnx2i_nop_in_msg *nopin;
        int tgt_async_msg;
  
        if (!qp->cq_virt) {
                printk(KERN_ALERT "bnx2i (%s): cq resr freed in bh execution!",
 -                      bnx2i_conn->hba->netdev->name);
 +                     hba->netdev->name);
                goto out;
        }
        while (1) {
                        if (nopin->op_code == ISCSI_OP_NOOP_IN &&
                            nopin->itt == (u16) RESERVED_ITT) {
                                printk(KERN_ALERT "bnx2i: Unsolicited "
 -                                      "NOP-In detected for suspended "
 -                                      "connection dev=%s!\n",
 -                                      bnx2i_conn->hba->netdev->name);
 +                                     "NOP-In detected for suspended "
 +                                     "connection dev=%s!\n",
 +                                     hba->netdev->name);
                                bnx2i_unsol_pdu_adjust_rq(bnx2i_conn);
                                goto cqe_out;
                        }
                        /* Run the kthread engine only for data cmds
                           All other cmds will be completed in this bh! */
                        bnx2i_queue_scsi_cmd_resp(session, bnx2i_conn, nopin);
 -                      break;
 +                      goto done;
                case ISCSI_OP_LOGIN_RSP:
                        bnx2i_process_login_resp(session, bnx2i_conn,
                                                 qp->cq_cons_qe);
                        printk(KERN_ALERT "bnx2i: unknown opcode 0x%x\n",
                                          nopin->op_code);
                }
 +
 +              ADD_STATS_64(hba, rx_pdus, 1);
 +              ADD_STATS_64(hba, rx_bytes, nopin->data_length);
 +done:
                if (!tgt_async_msg) {
                        if (!atomic_read(&bnx2i_conn->ep->num_active_cmds))
                                printk(KERN_ALERT "bnx2i (%s): no active cmd! "
                                       "op 0x%x\n",
 -                                     bnx2i_conn->hba->netdev->name,
 +                                     hba->netdev->name,
                                       nopin->op_code);
                        else
                                atomic_dec(&bnx2i_conn->ep->num_active_cmds);
@@@ -2708,7 -2692,6 +2708,7 @@@ struct cnic_ulp_ops bnx2i_cnic_cb = 
        .cm_remote_close = bnx2i_cm_remote_close,
        .cm_remote_abort = bnx2i_cm_remote_abort,
        .iscsi_nl_send_msg = bnx2i_send_nl_mesg,
 +      .cnic_get_stats = bnx2i_get_stats,
        .owner = THIS_MODULE
  };
  
@@@ -2741,7 -2724,6 +2741,6 @@@ int bnx2i_map_ep_dbell_regs(struct bnx2
                goto arm_cq;
        }
  
-       reg_base = ep->hba->netdev->base_addr;
        if ((test_bit(BNX2I_NX2_DEV_5709, &ep->hba->cnic_dev_type)) &&
            (ep->hba->mail_queue_access == BNX2I_MQ_BIN_MODE)) {
                config2 = REG_RD(ep->hba, BNX2_MQ_CONFIG2);
                /* 5709 device in normal node and 5706/5708 devices */
                reg_off = CTX_OFFSET + (MB_KERNEL_CTX_SIZE * cid_num);
  
-       ep->qp.ctx_base = ioremap_nocache(reg_base + reg_off,
+       ep->qp.ctx_base = ioremap_nocache(ep->hba->reg_base + reg_off,
                                          MB_KERNEL_CTX_SIZE);
        if (!ep->qp.ctx_base)
                return -ENOMEM;
@@@ -811,13 -811,13 +811,13 @@@ struct bnx2i_hba *bnx2i_alloc_hba(struc
        bnx2i_identify_device(hba);
        bnx2i_setup_host_queue_size(hba, shost);
  
+       hba->reg_base = pci_resource_start(hba->pcidev, 0);
        if (test_bit(BNX2I_NX2_DEV_5709, &hba->cnic_dev_type)) {
-               hba->regview = ioremap_nocache(hba->netdev->base_addr,
-                                              BNX2_MQ_CONFIG2);
+               hba->regview = pci_iomap(hba->pcidev, 0, BNX2_MQ_CONFIG2);
                if (!hba->regview)
                        goto ioreg_map_err;
        } else if (test_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type)) {
-               hba->regview = ioremap_nocache(hba->netdev->base_addr, 4096);
+               hba->regview = pci_iomap(hba->pcidev, 0, 4096);
                if (!hba->regview)
                        goto ioreg_map_err;
        }
                hba->conn_ctx_destroy_tmo = 2 * HZ;
        }
  
 +#ifdef CONFIG_32BIT
 +      spin_lock_init(&hba->stat_lock);
 +#endif
 +      memset(&hba->stats, 0, sizeof(struct iscsi_stats_info));
 +
        if (iscsi_host_add(shost, &hba->pcidev->dev))
                goto free_dump_mem;
        return hba;
@@@ -889,7 -884,7 +889,7 @@@ cid_que_err
        bnx2i_free_mp_bdt(hba);
  mp_bdt_mem_err:
        if (hba->regview) {
-               iounmap(hba->regview);
+               pci_iounmap(hba->pcidev, hba->regview);
                hba->regview = NULL;
        }
  ioreg_map_err:
@@@ -915,7 -910,7 +915,7 @@@ void bnx2i_free_hba(struct bnx2i_hba *h
        pci_dev_put(hba->pcidev);
  
        if (hba->regview) {
-               iounmap(hba->regview);
+               pci_iounmap(hba->pcidev, hba->regview);
                hba->regview = NULL;
        }
        bnx2i_free_mp_bdt(hba);
@@@ -1186,18 -1181,12 +1186,18 @@@ static in
  bnx2i_mtask_xmit(struct iscsi_conn *conn, struct iscsi_task *task)
  {
        struct bnx2i_conn *bnx2i_conn = conn->dd_data;
 +      struct bnx2i_hba *hba = bnx2i_conn->hba;
        struct bnx2i_cmd *cmd = task->dd_data;
  
        memset(bnx2i_conn->gen_pdu.req_buf, 0, ISCSI_DEF_MAX_RECV_SEG_LEN);
  
        bnx2i_setup_cmd_wqe_template(cmd);
        bnx2i_conn->gen_pdu.req_buf_size = task->data_count;
 +
 +      /* Tx PDU/data length count */
 +      ADD_STATS_64(hba, tx_pdus, 1);
 +      ADD_STATS_64(hba, tx_bytes, task->data_count);
 +
        if (task->data_count) {
                memcpy(bnx2i_conn->gen_pdu.req_buf, task->data,
                       task->data_count);
diff --combined net/caif/caif_dev.c
@@@ -90,8 -90,11 +90,8 @@@ static int caifd_refcnt_read(struct cai
  /* Allocate new CAIF device. */
  static struct caif_device_entry *caif_device_alloc(struct net_device *dev)
  {
 -      struct caif_device_entry_list *caifdevs;
        struct caif_device_entry *caifd;
  
 -      caifdevs = caif_device_list(dev_net(dev));
 -
        caifd = kzalloc(sizeof(*caifd), GFP_KERNEL);
        if (!caifd)
                return NULL;
@@@ -128,11 -131,6 +128,11 @@@ void caif_flow_cb(struct sk_buff *skb
  
        rcu_read_lock();
        caifd = caif_get(skb->dev);
 +
 +      WARN_ON(caifd == NULL);
 +      if (caifd == NULL)
 +              return;
 +
        caifd_hold(caifd);
        rcu_read_unlock();
  
@@@ -563,9 -561,9 +563,9 @@@ static int __init caif_device_init(void
  
  static void __exit caif_device_exit(void)
  {
-       unregister_pernet_subsys(&caif_net_ops);
        unregister_netdevice_notifier(&caif_device_notifier);
        dev_remove_pack(&caif_packet_type);
+       unregister_pernet_subsys(&caif_net_ops);
  }
  
  module_init(caif_device_init);
diff --combined net/core/dev.c
@@@ -1691,8 -1691,7 +1691,8 @@@ static void dev_queue_xmit_nit(struct s
        rcu_read_unlock();
  }
  
 -/* netif_setup_tc - Handle tc mappings on real_num_tx_queues change
 +/**
 + * netif_setup_tc - Handle tc mappings on real_num_tx_queues change
   * @dev: Network device
   * @txq: number of queues available
   *
@@@ -1794,18 -1793,6 +1794,18 @@@ int netif_set_real_num_rx_queues(struc
  EXPORT_SYMBOL(netif_set_real_num_rx_queues);
  #endif
  
 +/**
 + * netif_get_num_default_rss_queues - default number of RSS queues
 + *
 + * This routine should set an upper limit on the number of RSS queues
 + * used by default by multiqueue devices.
 + */
 +int netif_get_num_default_rss_queues(void)
 +{
 +      return min_t(int, DEFAULT_MAX_NUM_RSS_QUEUES, num_online_cpus());
 +}
 +EXPORT_SYMBOL(netif_get_num_default_rss_queues);
 +
  static inline void __netif_reschedule(struct Qdisc *q)
  {
        struct softnet_data *sd;
@@@ -2472,23 -2459,6 +2472,23 @@@ static DEFINE_PER_CPU(int, xmit_recursi
  #define RECURSION_LIMIT 10
  
  /**
 + *    dev_loopback_xmit - loop back @skb
 + *    @skb: buffer to transmit
 + */
 +int dev_loopback_xmit(struct sk_buff *skb)
 +{
 +      skb_reset_mac_header(skb);
 +      __skb_pull(skb, skb_network_offset(skb));
 +      skb->pkt_type = PACKET_LOOPBACK;
 +      skb->ip_summed = CHECKSUM_UNNECESSARY;
 +      WARN_ON(!skb_dst(skb));
 +      skb_dst_force(skb);
 +      netif_rx_ni(skb);
 +      return 0;
 +}
 +EXPORT_SYMBOL(dev_loopback_xmit);
 +
 +/**
   *    dev_queue_xmit - transmit a buffer
   *    @skb: buffer to transmit
   *
@@@ -5676,7 -5646,7 +5676,7 @@@ int netdev_refcnt_read(const struct net
  }
  EXPORT_SYMBOL(netdev_refcnt_read);
  
 -/*
 +/**
   * netdev_wait_allrefs - wait until all references are gone.
   *
   * This is called when unregistering network devices.
@@@ -6313,7 -6283,8 +6313,8 @@@ static struct hlist_head *netdev_create
  /* Initialize per network namespace state */
  static int __net_init netdev_init(struct net *net)
  {
-       INIT_LIST_HEAD(&net->dev_base_head);
+       if (net != &init_net)
+               INIT_LIST_HEAD(&net->dev_base_head);
  
        net->dev_name_head = netdev_create_hash();
        if (net->dev_name_head == NULL)
diff --combined net/core/skbuff.c
@@@ -160,8 -160,8 +160,8 @@@ static void skb_under_panic(struct sk_b
   *    @node: numa node to allocate memory on
   *
   *    Allocate a new &sk_buff. The returned buffer has no headroom and a
 - *    tail room of size bytes. The object has a reference count of one.
 - *    The return is the buffer. On a failure the return is %NULL.
 + *    tail room of at least size bytes. The object has a reference count
 + *    of one. The return is the buffer. On a failure the return is %NULL.
   *
   *    Buffers may only be allocated from interrupts using a @gfp_mask of
   *    %GFP_ATOMIC.
@@@ -296,12 -296,9 +296,12 @@@ EXPORT_SYMBOL(build_skb)
  struct netdev_alloc_cache {
        struct page *page;
        unsigned int offset;
 +      unsigned int pagecnt_bias;
  };
  static DEFINE_PER_CPU(struct netdev_alloc_cache, netdev_alloc_cache);
  
 +#define NETDEV_PAGECNT_BIAS (PAGE_SIZE / SMP_CACHE_BYTES)
 +
  /**
   * netdev_alloc_frag - allocate a page fragment
   * @fragsz: fragment size
@@@ -320,26 -317,17 +320,26 @@@ void *netdev_alloc_frag(unsigned int fr
        if (unlikely(!nc->page)) {
  refill:
                nc->page = alloc_page(GFP_ATOMIC | __GFP_COLD);
 +              if (unlikely(!nc->page))
 +                      goto end;
 +recycle:
 +              atomic_set(&nc->page->_count, NETDEV_PAGECNT_BIAS);
 +              nc->pagecnt_bias = NETDEV_PAGECNT_BIAS;
                nc->offset = 0;
        }
 -      if (likely(nc->page)) {
 -              if (nc->offset + fragsz > PAGE_SIZE) {
 -                      put_page(nc->page);
 -                      goto refill;
 -              }
 -              data = page_address(nc->page) + nc->offset;
 -              nc->offset += fragsz;
 -              get_page(nc->page);
 +
 +      if (nc->offset + fragsz > PAGE_SIZE) {
 +              /* avoid unnecessary locked operations if possible */
 +              if ((atomic_read(&nc->page->_count) == nc->pagecnt_bias) ||
 +                  atomic_sub_and_test(nc->pagecnt_bias, &nc->page->_count))
 +                      goto recycle;
 +              goto refill;
        }
 +
 +      data = page_address(nc->page) + nc->offset;
 +      nc->offset += fragsz;
 +      nc->pagecnt_bias--;
 +end:
        local_irq_restore(flags);
        return data;
  }
@@@ -365,7 -353,7 +365,7 @@@ struct sk_buff *__netdev_alloc_skb(stru
        unsigned int fragsz = SKB_DATA_ALIGN(length + NET_SKB_PAD) +
                              SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
  
-       if (fragsz <= PAGE_SIZE && !(gfp_mask & __GFP_WAIT)) {
+       if (fragsz <= PAGE_SIZE && !(gfp_mask & (__GFP_WAIT | GFP_DMA))) {
                void *data = netdev_alloc_frag(fragsz);
  
                if (likely(data)) {
@@@ -725,8 -713,7 +725,8 @@@ struct sk_buff *skb_morph(struct sk_buf
  }
  EXPORT_SYMBOL_GPL(skb_morph);
  
 -/*    skb_copy_ubufs  -       copy userspace skb frags buffers to kernel
 +/**
 + *    skb_copy_ubufs  -       copy userspace skb frags buffers to kernel
   *    @skb: the skb to modify
   *    @gfp_mask: allocation priority
   *
@@@ -751,7 -738,7 +751,7 @@@ int skb_copy_ubufs(struct sk_buff *skb
                u8 *vaddr;
                skb_frag_t *f = &skb_shinfo(skb)->frags[i];
  
 -              page = alloc_page(GFP_ATOMIC);
 +              page = alloc_page(gfp_mask);
                if (!page) {
                        while (head) {
                                struct page *next = (struct page *)head->private;
        }
  
        /* skb frags release userspace buffers */
 -      for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
 +      for (i = 0; i < num_frags; i++)
                skb_frag_unref(skb, i);
  
        uarg->callback(uarg);
  
        /* skb frags point to kernel buffers */
 -      for (i = skb_shinfo(skb)->nr_frags; i > 0; i--) {
 -              __skb_fill_page_desc(skb, i-1, head, 0,
 -                                   skb_shinfo(skb)->frags[i - 1].size);
 +      for (i = num_frags - 1; i >= 0; i--) {
 +              __skb_fill_page_desc(skb, i, head, 0,
 +                                   skb_shinfo(skb)->frags[i].size);
                head = (struct page *)head->private;
        }
  
@@@ -2627,7 -2614,7 +2627,7 @@@ unsigned int skb_find_text(struct sk_bu
  EXPORT_SYMBOL(skb_find_text);
  
  /**
 - * skb_append_datato_frags: - append the user data to a skb
 + * skb_append_datato_frags - append the user data to a skb
   * @sk: sock  structure
   * @skb: skb structure to be appened with user data.
   * @getfrag: call back function to be used for getting the user data
diff --combined net/sctp/input.c
@@@ -408,10 -408,10 +408,10 @@@ void sctp_icmp_frag_needed(struct sock 
  
        if (t->param_flags & SPP_PMTUD_ENABLE) {
                /* Update transports view of the MTU */
 -              sctp_transport_update_pmtu(t, pmtu);
 +              sctp_transport_update_pmtu(sk, t, pmtu);
  
                /* Update association pmtu. */
 -              sctp_assoc_sync_pmtu(asoc);
 +              sctp_assoc_sync_pmtu(sk, asoc);
        }
  
        /* Retransmit with the new pmtu setting.
        sctp_retransmit(&asoc->outqueue, t, SCTP_RTXR_PMTUD);
  }
  
 +void sctp_icmp_redirect(struct sock *sk, struct sctp_transport *t,
 +                      struct sk_buff *skb)
 +{
 +      struct dst_entry *dst;
 +
 +      if (!t)
 +              return;
 +      dst = sctp_transport_dst_check(t);
 +      if (dst)
 +              dst->ops->redirect(dst, sk, skb);
 +}
 +
  /*
   * SCTP Implementer's Guide, 2.37 ICMP handling procedures
   *
@@@ -640,10 -628,6 +640,10 @@@ void sctp_v4_err(struct sk_buff *skb, _
  
                err = EHOSTUNREACH;
                break;
 +      case ICMP_REDIRECT:
 +              sctp_icmp_redirect(sk, transport, skb);
 +              err = 0;
 +              break;
        default:
                goto out_unlock;
        }
@@@ -752,15 -736,12 +752,12 @@@ static void __sctp_unhash_endpoint(stru
  
        epb = &ep->base;
  
-       if (hlist_unhashed(&epb->node))
-               return;
        epb->hashent = sctp_ep_hashfn(epb->bind_addr.port);
  
        head = &sctp_ep_hashtable[epb->hashent];
  
        sctp_write_lock(&head->lock);
-       __hlist_del(&epb->node);
+       hlist_del_init(&epb->node);
        sctp_write_unlock(&head->lock);
  }
  
@@@ -841,7 -822,7 +838,7 @@@ static void __sctp_unhash_established(s
        head = &sctp_assoc_hashtable[epb->hashent];
  
        sctp_write_lock(&head->lock);
-       __hlist_del(&epb->node);
+       hlist_del_init(&epb->node);
        sctp_write_unlock(&head->lock);
  }
  
diff --combined net/sctp/socket.c
@@@ -1231,8 -1231,14 +1231,14 @@@ out_free
        SCTP_DEBUG_PRINTK("About to exit __sctp_connect() free asoc: %p"
                          " kaddrs: %p err: %d\n",
                          asoc, kaddrs, err);
-       if (asoc)
+       if (asoc) {
+               /* sctp_primitive_ASSOCIATE may have added this association
+                * To the hash table, try to unhash it, just in case, its a noop
+                * if it wasn't hashed so we're safe
+                */
+               sctp_unhash_established(asoc);
                sctp_association_free(asoc);
+       }
        return err;
  }
  
@@@ -1853,7 -1859,7 +1859,7 @@@ SCTP_STATIC int sctp_sendmsg(struct kio
        }
  
        if (asoc->pmtu_pending)
 -              sctp_assoc_pending_pmtu(asoc);
 +              sctp_assoc_pending_pmtu(sk, asoc);
  
        /* If fragmentation is disabled and the message length exceeds the
         * association fragmentation point, return EMSGSIZE.  The I-D
        goto out_unlock;
  
  out_free:
-       if (new_asoc)
+       if (new_asoc) {
+               sctp_unhash_established(asoc);
                sctp_association_free(asoc);
+       }
  out_unlock:
        sctp_release_sock(sk);
  
@@@ -2365,7 -2373,7 +2373,7 @@@ static int sctp_apply_peer_addr_params(
        if ((params->spp_flags & SPP_PMTUD_DISABLE) && params->spp_pathmtu) {
                if (trans) {
                        trans->pathmtu = params->spp_pathmtu;
 -                      sctp_assoc_sync_pmtu(asoc);
 +                      sctp_assoc_sync_pmtu(sctp_opt2sk(sp), asoc);
                } else if (asoc) {
                        asoc->pathmtu = params->spp_pathmtu;
                        sctp_frag_point(asoc, params->spp_pathmtu);
                                (trans->param_flags & ~SPP_PMTUD) | pmtud_change;
                        if (update) {
                                sctp_transport_pmtu(trans, sctp_opt2sk(sp));
 -                              sctp_assoc_sync_pmtu(asoc);
 +                              sctp_assoc_sync_pmtu(sctp_opt2sk(sp), asoc);
                        }
                } else if (asoc) {
                        asoc->param_flags =
diff --combined security/selinux/hooks.c
@@@ -2717,7 -2717,7 +2717,7 @@@ static int selinux_inode_setattr(struc
                        ATTR_ATIME_SET | ATTR_MTIME_SET | ATTR_TIMES_SET))
                return dentry_has_perm(cred, dentry, FILE__SETATTR);
  
-       if (ia_valid & ATTR_SIZE)
+       if (selinux_policycap_openperm && (ia_valid & ATTR_SIZE))
                av |= FILE__OPEN;
  
        return dentry_has_perm(cred, dentry, av);
@@@ -5763,21 -5763,21 +5763,21 @@@ static struct nf_hook_ops selinux_ipv4_
        {
                .hook =         selinux_ipv4_postroute,
                .owner =        THIS_MODULE,
 -              .pf =           PF_INET,
 +              .pf =           NFPROTO_IPV4,
                .hooknum =      NF_INET_POST_ROUTING,
                .priority =     NF_IP_PRI_SELINUX_LAST,
        },
        {
                .hook =         selinux_ipv4_forward,
                .owner =        THIS_MODULE,
 -              .pf =           PF_INET,
 +              .pf =           NFPROTO_IPV4,
                .hooknum =      NF_INET_FORWARD,
                .priority =     NF_IP_PRI_SELINUX_FIRST,
        },
        {
                .hook =         selinux_ipv4_output,
                .owner =        THIS_MODULE,
 -              .pf =           PF_INET,
 +              .pf =           NFPROTO_IPV4,
                .hooknum =      NF_INET_LOCAL_OUT,
                .priority =     NF_IP_PRI_SELINUX_FIRST,
        }
@@@ -5789,14 -5789,14 +5789,14 @@@ static struct nf_hook_ops selinux_ipv6_
        {
                .hook =         selinux_ipv6_postroute,
                .owner =        THIS_MODULE,
 -              .pf =           PF_INET6,
 +              .pf =           NFPROTO_IPV6,
                .hooknum =      NF_INET_POST_ROUTING,
                .priority =     NF_IP6_PRI_SELINUX_LAST,
        },
        {
                .hook =         selinux_ipv6_forward,
                .owner =        THIS_MODULE,
 -              .pf =           PF_INET6,
 +              .pf =           NFPROTO_IPV6,
                .hooknum =      NF_INET_FORWARD,
                .priority =     NF_IP6_PRI_SELINUX_FIRST,
        }