ixgbevf: Reduce size of maximum rx buffer
[profile/ivi/kernel-x86-ivi.git] / drivers / net / ethernet / intel / ixgbevf / ixgbevf_main.c
index 33444b5..9d88153 100644 (file)
@@ -58,7 +58,7 @@ const char ixgbevf_driver_name[] = "ixgbevf";
 static const char ixgbevf_driver_string[] =
        "Intel(R) 10 Gigabit PCI Express Virtual Function Network Driver";
 
-#define DRV_VERSION "2.6.0-k"
+#define DRV_VERSION "2.7.12-k"
 const char ixgbevf_driver_version[] = DRV_VERSION;
 static char ixgbevf_copyright[] =
        "Copyright (c) 2009 - 2012 Intel Corporation.";
@@ -288,7 +288,10 @@ static void ixgbevf_receive_skb(struct ixgbevf_q_vector *q_vector,
        if (is_vlan && test_bit(tag & VLAN_VID_MASK, adapter->active_vlans))
                __vlan_hwaccel_put_tag(skb, tag);
 
-       napi_gro_receive(&q_vector->napi, skb);
+       if (!(adapter->flags & IXGBE_FLAG_IN_NETPOLL))
+               napi_gro_receive(&q_vector->napi, skb);
+       else
+               netif_rx(skb);
 }
 
 /**
@@ -359,6 +362,12 @@ static void ixgbevf_alloc_rx_buffers(struct ixgbevf_adapter *adapter,
                        bi->dma = dma_map_single(&pdev->dev, skb->data,
                                                 rx_ring->rx_buf_len,
                                                 DMA_FROM_DEVICE);
+                       if (dma_mapping_error(&pdev->dev, bi->dma)) {
+                               dev_kfree_skb(skb);
+                               bi->skb = NULL;
+                               dev_err(&pdev->dev, "RX DMA map failed\n");
+                               break;
+                       }
                }
                rx_desc->read.pkt_addr = cpu_to_le64(bi->dma);
 
@@ -472,6 +481,16 @@ static bool ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
                }
                skb->protocol = eth_type_trans(skb, rx_ring->netdev);
 
+               /* Workaround hardware that can't do proper VEPA multicast
+                * source pruning.
+                */
+               if ((skb->pkt_type & (PACKET_BROADCAST | PACKET_MULTICAST)) &&
+                   !(compare_ether_addr(adapter->netdev->dev_addr,
+                                       eth_hdr(skb)->h_source))) {
+                       dev_kfree_skb_irq(skb);
+                       goto next_desc;
+               }
+
                ixgbevf_receive_skb(q_vector, skb, staterr, rx_desc);
 
 next_desc:
@@ -534,9 +553,11 @@ static int ixgbevf_poll(struct napi_struct *napi, int budget)
        else
                per_ring_budget = budget;
 
+       adapter->flags |= IXGBE_FLAG_IN_NETPOLL;
        ixgbevf_for_each_ring(ring, q_vector->rx)
                clean_complete &= ixgbevf_clean_rx_irq(q_vector, ring,
                                                       per_ring_budget);
+       adapter->flags &= ~IXGBE_FLAG_IN_NETPOLL;
 
        /* If all work not completed, return budget and keep polling */
        if (!clean_complete)
@@ -1066,20 +1087,20 @@ static void ixgbevf_set_rx_buffer_len(struct ixgbevf_adapter *adapter)
        max_frame += VLAN_HLEN;
 
        /*
-        * Make best use of allocation by using all but 1K of a
-        * power of 2 allocation that will be used for skb->head.
+        * Allocate buffer sizes that fit well into 32K and
+        * take into account max frame size of 9.5K
         */
        if ((hw->mac.type == ixgbe_mac_X540_vf) &&
            (max_frame <= MAXIMUM_ETHERNET_VLAN_SIZE))
                rx_buf_len = MAXIMUM_ETHERNET_VLAN_SIZE;
-       else if (max_frame <= IXGBEVF_RXBUFFER_3K)
-               rx_buf_len = IXGBEVF_RXBUFFER_3K;
-       else if (max_frame <= IXGBEVF_RXBUFFER_7K)
-               rx_buf_len = IXGBEVF_RXBUFFER_7K;
-       else if (max_frame <= IXGBEVF_RXBUFFER_15K)
-               rx_buf_len = IXGBEVF_RXBUFFER_15K;
+       else if (max_frame <= IXGBEVF_RXBUFFER_2K)
+               rx_buf_len = IXGBEVF_RXBUFFER_2K;
+       else if (max_frame <= IXGBEVF_RXBUFFER_4K)
+               rx_buf_len = IXGBEVF_RXBUFFER_4K;
+       else if (max_frame <= IXGBEVF_RXBUFFER_8K)
+               rx_buf_len = IXGBEVF_RXBUFFER_8K;
        else
-               rx_buf_len = IXGBEVF_MAX_RXBUFFER;
+               rx_buf_len = IXGBEVF_RXBUFFER_10K;
 
        for (i = 0; i < adapter->num_rx_queues; i++)
                adapter->rx_ring[i].rx_buf_len = rx_buf_len;
@@ -1132,12 +1153,12 @@ static int ixgbevf_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
        if (!hw->mac.ops.set_vfta)
                return -EOPNOTSUPP;
 
-       spin_lock(&adapter->mbx_lock);
+       spin_lock_bh(&adapter->mbx_lock);
 
        /* add VID to filter table */
        err = hw->mac.ops.set_vfta(hw, vid, 0, true);
 
-       spin_unlock(&adapter->mbx_lock);
+       spin_unlock_bh(&adapter->mbx_lock);
 
        /* translate error return types so error makes sense */
        if (err == IXGBE_ERR_MBX)
@@ -1157,13 +1178,13 @@ static int ixgbevf_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
        struct ixgbe_hw *hw = &adapter->hw;
        int err = -EOPNOTSUPP;
 
-       spin_lock(&adapter->mbx_lock);
+       spin_lock_bh(&adapter->mbx_lock);
 
        /* remove VID from filter table */
        if (hw->mac.ops.set_vfta)
                err = hw->mac.ops.set_vfta(hw, vid, 0, false);
 
-       spin_unlock(&adapter->mbx_lock);
+       spin_unlock_bh(&adapter->mbx_lock);
 
        clear_bit(vid, adapter->active_vlans);
 
@@ -1219,7 +1240,7 @@ static void ixgbevf_set_rx_mode(struct net_device *netdev)
        struct ixgbevf_adapter *adapter = netdev_priv(netdev);
        struct ixgbe_hw *hw = &adapter->hw;
 
-       spin_lock(&adapter->mbx_lock);
+       spin_lock_bh(&adapter->mbx_lock);
 
        /* reprogram multicast list */
        if (hw->mac.ops.update_mc_addr_list)
@@ -1227,7 +1248,7 @@ static void ixgbevf_set_rx_mode(struct net_device *netdev)
 
        ixgbevf_write_uc_addr_list(netdev);
 
-       spin_unlock(&adapter->mbx_lock);
+       spin_unlock_bh(&adapter->mbx_lock);
 }
 
 static void ixgbevf_napi_enable_all(struct ixgbevf_adapter *adapter)
@@ -1341,7 +1362,7 @@ static void ixgbevf_negotiate_api(struct ixgbevf_adapter *adapter)
                      ixgbe_mbox_api_unknown };
        int err = 0, idx = 0;
 
-       spin_lock(&adapter->mbx_lock);
+       spin_lock_bh(&adapter->mbx_lock);
 
        while (api[idx] != ixgbe_mbox_api_unknown) {
                err = ixgbevf_negotiate_api_version(hw, api[idx]);
@@ -1350,7 +1371,7 @@ static void ixgbevf_negotiate_api(struct ixgbevf_adapter *adapter)
                idx++;
        }
 
-       spin_unlock(&adapter->mbx_lock);
+       spin_unlock_bh(&adapter->mbx_lock);
 }
 
 static void ixgbevf_up_complete(struct ixgbevf_adapter *adapter)
@@ -1391,7 +1412,7 @@ static void ixgbevf_up_complete(struct ixgbevf_adapter *adapter)
 
        ixgbevf_configure_msix(adapter);
 
-       spin_lock(&adapter->mbx_lock);
+       spin_lock_bh(&adapter->mbx_lock);
 
        if (hw->mac.ops.set_rar) {
                if (is_valid_ether_addr(hw->mac.addr))
@@ -1400,7 +1421,7 @@ static void ixgbevf_up_complete(struct ixgbevf_adapter *adapter)
                        hw->mac.ops.set_rar(hw, 0, hw->mac.perm_addr, 0);
        }
 
-       spin_unlock(&adapter->mbx_lock);
+       spin_unlock_bh(&adapter->mbx_lock);
 
        clear_bit(__IXGBEVF_DOWN, &adapter->state);
        ixgbevf_napi_enable_all(adapter);
@@ -1424,12 +1445,12 @@ static int ixgbevf_reset_queues(struct ixgbevf_adapter *adapter)
        unsigned int num_rx_queues = 1;
        int err, i;
 
-       spin_lock(&adapter->mbx_lock);
+       spin_lock_bh(&adapter->mbx_lock);
 
        /* fetch queue configuration from the PF */
        err = ixgbevf_get_queues(hw, &num_tcs, &def_q);
 
-       spin_unlock(&adapter->mbx_lock);
+       spin_unlock_bh(&adapter->mbx_lock);
 
        if (err)
                return err;
@@ -1688,14 +1709,14 @@ void ixgbevf_reset(struct ixgbevf_adapter *adapter)
        struct ixgbe_hw *hw = &adapter->hw;
        struct net_device *netdev = adapter->netdev;
 
-       spin_lock(&adapter->mbx_lock);
+       spin_lock_bh(&adapter->mbx_lock);
 
        if (hw->mac.ops.reset_hw(hw))
                hw_dbg(hw, "PF still resetting\n");
        else
                hw->mac.ops.init_hw(hw);
 
-       spin_unlock(&adapter->mbx_lock);
+       spin_unlock_bh(&adapter->mbx_lock);
 
        if (is_valid_ether_addr(adapter->hw.mac.addr)) {
                memcpy(netdev->dev_addr, adapter->hw.mac.addr,
@@ -1705,10 +1726,11 @@ void ixgbevf_reset(struct ixgbevf_adapter *adapter)
        }
 }
 
-static void ixgbevf_acquire_msix_vectors(struct ixgbevf_adapter *adapter,
-                                        int vectors)
+static int ixgbevf_acquire_msix_vectors(struct ixgbevf_adapter *adapter,
+                                       int vectors)
 {
-       int err, vector_threshold;
+       int err = 0;
+       int vector_threshold;
 
        /* We'll want at least 2 (vector_threshold):
         * 1) TxQ[0] + RxQ[0] handler
@@ -1724,21 +1746,18 @@ static void ixgbevf_acquire_msix_vectors(struct ixgbevf_adapter *adapter,
        while (vectors >= vector_threshold) {
                err = pci_enable_msix(adapter->pdev, adapter->msix_entries,
                                      vectors);
-               if (!err) /* Success in acquiring all requested vectors. */
+               if (!err || err < 0) /* Success or a nasty failure. */
                        break;
-               else if (err < 0)
-                       vectors = 0; /* Nasty failure, quit now */
                else /* err == number of vectors we should try again with */
                        vectors = err;
        }
 
-       if (vectors < vector_threshold) {
-               /* Can't allocate enough MSI-X interrupts?  Oh well.
-                * This just means we'll go with either a single MSI
-                * vector or fall back to legacy interrupts.
-                */
-               hw_dbg(&adapter->hw,
-                      "Unable to allocate MSI-X interrupts\n");
+       if (vectors < vector_threshold)
+               err = -ENOMEM;
+
+       if (err) {
+               dev_err(&adapter->pdev->dev,
+                       "Unable to allocate MSI-X interrupts\n");
                kfree(adapter->msix_entries);
                adapter->msix_entries = NULL;
        } else {
@@ -1749,6 +1768,7 @@ static void ixgbevf_acquire_msix_vectors(struct ixgbevf_adapter *adapter,
                 */
                adapter->num_msix_vectors = vectors;
        }
+       return err;
 }
 
 /**
@@ -1852,7 +1872,9 @@ static int ixgbevf_set_interrupt_capability(struct ixgbevf_adapter *adapter)
        for (vector = 0; vector < v_budget; vector++)
                adapter->msix_entries[vector].entry = vector;
 
-       ixgbevf_acquire_msix_vectors(adapter, v_budget);
+       err = ixgbevf_acquire_msix_vectors(adapter, v_budget);
+       if (err)
+               goto out;
 
        err = netif_set_real_num_tx_queues(netdev, adapter->num_tx_queues);
        if (err)
@@ -1912,18 +1934,13 @@ err_out:
  **/
 static void ixgbevf_free_q_vectors(struct ixgbevf_adapter *adapter)
 {
-       int q_idx, num_q_vectors;
-       int napi_vectors;
-
-       num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
-       napi_vectors = adapter->num_rx_queues;
+       int q_idx, num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
 
        for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
                struct ixgbevf_q_vector *q_vector = adapter->q_vector[q_idx];
 
                adapter->q_vector[q_idx] = NULL;
-               if (q_idx < napi_vectors)
-                       netif_napi_del(&q_vector->napi);
+               netif_napi_del(&q_vector->napi);
                kfree(q_vector);
        }
 }
@@ -2194,12 +2211,12 @@ static void ixgbevf_watchdog_task(struct work_struct *work)
        if (hw->mac.ops.check_link) {
                s32 need_reset;
 
-               spin_lock(&adapter->mbx_lock);
+               spin_lock_bh(&adapter->mbx_lock);
 
                need_reset = hw->mac.ops.check_link(hw, &link_speed,
                                                    &link_up, false);
 
-               spin_unlock(&adapter->mbx_lock);
+               spin_unlock_bh(&adapter->mbx_lock);
 
                if (need_reset) {
                        adapter->link_up = link_up;
@@ -2467,12 +2484,12 @@ static int ixgbevf_setup_queues(struct ixgbevf_adapter *adapter)
        unsigned int num_rx_queues = 1;
        int err, i;
 
-       spin_lock(&adapter->mbx_lock);
+       spin_lock_bh(&adapter->mbx_lock);
 
        /* fetch queue configuration from the PF */
        err = ixgbevf_get_queues(hw, &num_tcs, &def_q);
 
-       spin_unlock(&adapter->mbx_lock);
+       spin_unlock_bh(&adapter->mbx_lock);
 
        if (err)
                return err;
@@ -2822,10 +2839,10 @@ static int ixgbevf_tx_map(struct ixgbevf_ring *tx_ring,
                        tx_buffer_info->dma =
                                skb_frag_dma_map(tx_ring->dev, frag,
                                                 offset, size, DMA_TO_DEVICE);
-                       tx_buffer_info->mapped_as_page = true;
                        if (dma_mapping_error(tx_ring->dev,
                                              tx_buffer_info->dma))
                                goto dma_error;
+                       tx_buffer_info->mapped_as_page = true;
                        tx_buffer_info->next_to_watch = i;
 
                        len -= size;
@@ -2967,6 +2984,11 @@ static int ixgbevf_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
 #if PAGE_SIZE > IXGBE_MAX_DATA_PER_TXD
        unsigned short f;
 #endif
+       u8 *dst_mac = skb_header_pointer(skb, 0, 0, NULL);
+       if (!dst_mac || is_link_local_ether_addr(dst_mac)) {
+               dev_kfree_skb(skb);
+               return NETDEV_TX_OK;
+       }
 
        tx_ring = &adapter->tx_ring[r_idx];
 
@@ -3046,12 +3068,12 @@ static int ixgbevf_set_mac(struct net_device *netdev, void *p)
        memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
        memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
 
-       spin_lock(&adapter->mbx_lock);
+       spin_lock_bh(&adapter->mbx_lock);
 
        if (hw->mac.ops.set_rar)
                hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0);
 
-       spin_unlock(&adapter->mbx_lock);
+       spin_unlock_bh(&adapter->mbx_lock);
 
        return 0;
 }