struct sky2_hw *hw = sky2->hw;
u16 port = sky2->port;
- spin_lock_bh(&sky2->tx_lock);
+ netif_tx_lock_bh(dev);
sky2_write32(hw, SK_REG(port, RX_GMF_CTRL_T), RX_VLAN_STRIP_ON);
sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T), TX_VLAN_TAG_ON);
sky2->vlgrp = grp;
- spin_unlock_bh(&sky2->tx_lock);
+ netif_tx_unlock_bh(dev);
}
static void sky2_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
struct sky2_hw *hw = sky2->hw;
u16 port = sky2->port;
- spin_lock_bh(&sky2->tx_lock);
+ netif_tx_lock_bh(dev);
sky2_write32(hw, SK_REG(port, RX_GMF_CTRL_T), RX_VLAN_STRIP_OFF);
sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T), TX_VLAN_TAG_OFF);
if (sky2->vlgrp)
sky2->vlgrp->vlan_devices[vid] = NULL;
- spin_unlock_bh(&sky2->tx_lock);
+ netif_tx_unlock_bh(dev);
}
#endif
* A single packet can generate multiple list elements, and
* the number of ring elements will probably be less than the number
* of list elements used.
- *
- * No BH disabling for tx_lock here (like tg3)
*/
static int sky2_xmit_frame(struct sk_buff *skb, struct net_device *dev)
{
u16 mss;
u8 ctrl;
- /* No BH disabling for tx_lock here. We are running in BH disabled
- * context and TX reclaim runs via poll inside of a software
- * interrupt, and no related locks in IRQ processing.
- */
- if (!spin_trylock(&sky2->tx_lock))
- return NETDEV_TX_LOCKED;
-
- if (unlikely(tx_avail(sky2) < tx_le_req(skb))) {
- /* There is a known but harmless race with lockless tx
- * and netif_stop_queue.
- */
- if (!netif_queue_stopped(dev)) {
- netif_stop_queue(dev);
- if (net_ratelimit())
- printk(KERN_WARNING PFX "%s: ring full when queue awake!\n",
- dev->name);
- }
- spin_unlock(&sky2->tx_lock);
-
- return NETDEV_TX_BUSY;
- }
+ if (unlikely(tx_avail(sky2) < tx_le_req(skb)))
+ return NETDEV_TX_BUSY;
if (unlikely(netif_msg_tx_queued(sky2)))
printk(KERN_DEBUG "%s: tx queued, slot %u, len %d\n",
sky2_put_idx(hw, txqaddr[sky2->port], sky2->tx_prod);
- spin_unlock(&sky2->tx_lock);
-
dev->trans_start = jiffies;
return NETDEV_TX_OK;
}
}
/* Cleanup all untransmitted buffers, assume transmitter not running */
-static void sky2_tx_clean(struct sky2_port *sky2)
+static void sky2_tx_clean(struct net_device *dev)
{
- spin_lock_bh(&sky2->tx_lock);
+ struct sky2_port *sky2 = netdev_priv(dev);
+
+ netif_tx_lock_bh(dev);
sky2_tx_complete(sky2, sky2->tx_prod);
- spin_unlock_bh(&sky2->tx_lock);
+ netif_tx_unlock_bh(dev);
}
/* Network shutdown */
synchronize_irq(hw->pdev->irq);
- sky2_tx_clean(sky2);
+ sky2_tx_clean(dev);
sky2_rx_clean(sky2);
pci_free_consistent(hw->pdev, RX_LE_BYTES,
} else if (report != sky2->tx_cons) {
printk(KERN_INFO PFX "status report lost?\n");
- spin_lock_bh(&sky2->tx_lock);
+ netif_tx_lock_bh(dev);
sky2_tx_complete(sky2, report);
- spin_unlock_bh(&sky2->tx_lock);
+ netif_tx_unlock_bh(dev);
} else {
printk(KERN_INFO PFX "hardware hung? flushing\n");
sky2_write32(hw, Q_ADDR(txq, Q_CSR), BMU_STOP);
sky2_write32(hw, Y2_QADDR(txq, PREF_UNIT_CTRL), PREF_UNIT_RST_SET);
- sky2_tx_clean(sky2);
+ sky2_tx_clean(dev);
sky2_qset(hw, txq);
sky2_prefetch_init(hw, txq, sky2->tx_le_map, TX_RING_SIZE - 1);
struct sky2_port *sky2 = netdev_priv(dev);
if (netif_running(dev)) {
- spin_lock(&sky2->tx_lock);
+ netif_tx_lock(dev);
sky2_tx_complete(sky2, last);
- spin_unlock(&sky2->tx_lock);
+ netif_tx_unlock(dev);
}
}
sky2->hw = hw;
sky2->msg_enable = netif_msg_init(debug, default_msg);
- spin_lock_init(&sky2->tx_lock);
/* Auto speed and flow control */
sky2->autoneg = AUTONEG_ENABLE;
sky2->tx_pause = 1;
sky2->port = port;
- dev->features |= NETIF_F_LLTX;
if (hw->chip_id != CHIP_ID_YUKON_EC_U)
dev->features |= NETIF_F_TSO;
if (highmem)