#define MV643XX_TX_FAST_REFILL
#undef MV643XX_COAL
-/*
- * Number of RX / TX descriptors on RX / TX rings.
- * Note that allocating RX descriptors is done by allocating the RX
- * ring AND a preallocated RX buffers (skb's) for each descriptor.
- * The TX descriptors only allocates the TX descriptors ring,
- * with no pre allocated TX buffers (skb's are allocated by higher layers.
- */
-
-/* Default TX ring size is 1000 descriptors */
-#define MV643XX_DEFAULT_TX_QUEUE_SIZE 1000
-
-/* Default RX ring size is 400 descriptors */
-#define MV643XX_DEFAULT_RX_QUEUE_SIZE 400
-
#define MV643XX_TX_COAL 100
#ifdef MV643XX_COAL
#define MV643XX_RX_COAL 100
ETH_QUEUE_LAST_RESOURCE /* Ring resources about to exhaust. */
} ETH_FUNC_RET_STATUS;
-typedef enum _eth_target {
- ETH_TARGET_DRAM,
- ETH_TARGET_DEVICE,
- ETH_TARGET_CBS,
- ETH_TARGET_PCI0,
- ETH_TARGET_PCI1
-} ETH_TARGET;
-
/* These are for big-endian machines. Little endian needs different
* definitions.
*/
static unsigned int mv643xx_eth_port_disable_rx(unsigned int port_num);
static int mv643xx_eth_open(struct net_device *);
static int mv643xx_eth_stop(struct net_device *);
-static int mv643xx_eth_change_mtu(struct net_device *, int);
static void eth_port_init_mac_tables(unsigned int eth_port_num);
#ifdef MV643XX_NAPI
static int mv643xx_poll(struct napi_struct *napi, int budget);
return -EINVAL;
dev->mtu = new_mtu;
+ if (!netif_running(dev))
+ return 0;
+
/*
- * Stop then re-open the interface. This will allocate RX skb's with
- * the new MTU.
- * There is a possible danger that the open will not successed, due
- * to memory is full, which might fail the open function.
+ * Stop and then re-open the interface. This will allocate RX
+ * skbs of the new MTU.
+ * There is a possible danger that the open will not succeed,
+ * due to memory being full, which might fail the open function.
*/
- if (netif_running(dev)) {
- mv643xx_eth_stop(dev);
- if (mv643xx_eth_open(dev))
- printk(KERN_ERR
- "%s: Fatal error on opening device\n",
- dev->name);
+ mv643xx_eth_stop(dev);
+ if (mv643xx_eth_open(dev)) {
+ printk(KERN_ERR "%s: Fatal error on opening device\n",
+ dev->name);
}
return 0;
{
struct mv643xx_private *mp = container_of(ugly, struct mv643xx_private,
tx_timeout_task);
- struct net_device *dev = mp->mii.dev; /* yuck */
+ struct net_device *dev = mp->dev;
if (!netif_running(dev))
return;
*
* If force is non-zero, frees uncompleted descriptors as well
*/
-int mv643xx_eth_free_tx_descs(struct net_device *dev, int force)
+static int mv643xx_eth_free_tx_descs(struct net_device *dev, int force)
{
struct mv643xx_private *mp = netdev_priv(dev);
struct eth_tx_desc *desc;
unsigned long flags;
BUG_ON(netif_queue_stopped(dev));
- BUG_ON(skb == NULL);
if (has_tiny_unaligned_frags(skb) && __skb_linearize(skb)) {
stats->tx_dropped++;
printk(KERN_DEBUG "%s: failed to linearize tiny "
"unaligned fragment\n", dev->name);
- return 1;
+ return NETDEV_TX_BUSY;
}
spin_lock_irqsave(&mp->lock, flags);
printk(KERN_ERR "%s: transmit with queue full\n", dev->name);
netif_stop_queue(dev);
spin_unlock_irqrestore(&mp->lock, flags);
- return 1;
+ return NETDEV_TX_BUSY;
}
eth_tx_submit_descs_for_skb(mp, skb);
spin_unlock_irqrestore(&mp->lock, flags);
- return 0; /* success */
+ return NETDEV_TX_OK;
}
#ifdef CONFIG_NET_POLL_CONTROLLER