*/
rx_local->fbr[0]->local_full = ET_DMA10_WRAP;
writel(
- ((rx_local->fbr[0]->num_entries * LO_MARK_PERCENT_FOR_RX) / 100) - 1,
- &rx_dma->fbr1_min_des);
+ ((rx_local->fbr[0]->num_entries * LO_MARK_PERCENT_FOR_RX) / 100) - 1,
+ &rx_dma->fbr1_min_des);
#ifdef USE_FBR0
/* Now's the best time to initialize FBR0 contents */
*/
rx_local->fbr[1]->local_full = ET_DMA10_WRAP;
writel(
- ((rx_local->fbr[1]->num_entries * LO_MARK_PERCENT_FOR_RX) / 100) - 1,
- &rx_dma->fbr0_min_des);
+ ((rx_local->fbr[1]->num_entries * LO_MARK_PERCENT_FOR_RX) / 100) - 1,
+ &rx_dma->fbr0_min_des);
#endif
/* Program the number of packets we will receive before generating an
}
#ifdef USE_FBR0
- adapter->rx_ring.psr_num_entries = adapter->rx_ring.fbr[1]->num_entries +
- adapter->rx_ring.fbr[0]->num_entries;
+ adapter->rx_ring.psr_num_entries =
+ adapter->rx_ring.fbr[1]->num_entries +
+ adapter->rx_ring.fbr[0]->num_entries;
#else
adapter->rx_ring.psr_num_entries = adapter->rx_ring.fbr[0]->num_entries;
#endif
/* Allocate an area of memory for Free Buffer Ring 1 */
- bufsize = (sizeof(struct fbr_desc) * rx_ring->fbr[0]->num_entries) + 0xfff;
+ bufsize = (sizeof(struct fbr_desc) * rx_ring->fbr[0]->num_entries) +
+ 0xfff;
rx_ring->fbr[0]->ring_virtaddr = dma_alloc_coherent(&adapter->pdev->dev,
bufsize,
&rx_ring->fbr[0]->ring_physaddr,
#ifdef USE_FBR0
/* Allocate an area of memory for Free Buffer Ring 0 */
- bufsize = (sizeof(struct fbr_desc) * rx_ring->fbr[1]->num_entries) + 0xfff;
+ bufsize = (sizeof(struct fbr_desc) * rx_ring->fbr[1]->num_entries) +
+ 0xfff;
rx_ring->fbr[1]->ring_virtaddr = dma_alloc_coherent(&adapter->pdev->dev,
bufsize,
&rx_ring->fbr[1]->ring_physaddr,
(FBR_CHUNKS * rx_ring->fbr[0]->buffsize) + fbr1_align - 1;
rx_ring->fbr[0]->mem_virtaddrs[i] =
dma_alloc_coherent(&adapter->pdev->dev, fbr_chunksize,
- &rx_ring->fbr[0]->mem_physaddrs[i], GFP_KERNEL);
+ &rx_ring->fbr[0]->mem_physaddrs[i],
+ GFP_KERNEL);
if (!rx_ring->fbr[0]->mem_virtaddrs[i]) {
dev_err(&adapter->pdev->dev,
((FBR_CHUNKS + 1) * rx_ring->fbr[1]->buffsize) - 1;
rx_ring->fbr[1]->mem_virtaddrs[i] =
dma_alloc_coherent(&adapter->pdev->dev, fbr_chunksize,
- &rx_ring->fbr[1]->mem_physaddrs[i], GFP_KERNEL);
+ &rx_ring->fbr[1]->mem_physaddrs[i],
+ GFP_KERNEL);
if (!rx_ring->fbr[1]->mem_virtaddrs[i]) {
dev_err(&adapter->pdev->dev,
/* Now the FIFO itself */
rx_ring->fbr[0]->ring_virtaddr = (void *)((u8 *)
- rx_ring->fbr[0]->ring_virtaddr - rx_ring->fbr[0]->offset);
+ rx_ring->fbr[0]->ring_virtaddr - rx_ring->fbr[0]->offset);
- bufsize = (sizeof(struct fbr_desc) * rx_ring->fbr[0]->num_entries)
- + 0xfff;
+ bufsize =
+ (sizeof(struct fbr_desc) * rx_ring->fbr[0]->num_entries) +
+ 0xfff;
dma_free_coherent(&adapter->pdev->dev, bufsize,
rx_ring->fbr[0]->ring_virtaddr,
/* Now the FIFO itself */
rx_ring->fbr[1]->ring_virtaddr = (void *)((u8 *)
- rx_ring->fbr[1]->ring_virtaddr - rx_ring->fbr[1]->offset);
+ rx_ring->fbr[1]->ring_virtaddr - rx_ring->fbr[1]->offset);
- bufsize = (sizeof(struct fbr_desc) * rx_ring->fbr[1]->num_entries)
- + 0xfff;
+ bufsize =
+ (sizeof(struct fbr_desc) * rx_ring->fbr[1]->num_entries) +
+ 0xfff;
dma_free_coherent(&adapter->pdev->dev,
- bufsize,
- rx_ring->fbr[1]->ring_virtaddr,
- rx_ring->fbr[1]->ring_physaddr);
+ bufsize,
+ rx_ring->fbr[1]->ring_virtaddr,
+ rx_ring->fbr[1]->ring_physaddr);
rx_ring->fbr[1]->ring_virtaddr = NULL;
}
spin_lock_irqsave(&adapter->fbr_lock, flags);
if (ring_index == 1) {
- struct fbr_desc *next =
- (struct fbr_desc *) (rx_local->fbr[0]->ring_virtaddr) +
- INDEX10(rx_local->fbr[0]->local_full);
+ struct fbr_desc *next = (struct fbr_desc *)
+ (rx_local->fbr[0]->ring_virtaddr) +
+ INDEX10(rx_local->fbr[0]->local_full);
/* Handle the Free Buffer Ring advancement here. Write
* the PA / Buffer Index for the returned buffer into
next->addr_lo = rx_local->fbr[0]->bus_low[buff_index];
next->word2 = buff_index;
- writel(bump_free_buff_ring(&rx_local->fbr[0]->local_full,
- rx_local->fbr[0]->num_entries - 1),
- &rx_dma->fbr1_full_offset);
+ writel(bump_free_buff_ring(
+ &rx_local->fbr[0]->local_full,
+ rx_local->fbr[0]->num_entries - 1),
+ &rx_dma->fbr1_full_offset);
}
#ifdef USE_FBR0
else {
* 1 for FBR0 etc
*/
memcpy(skb_put(skb, rfd->len),
- rx_local->fbr[(ring_index == 0 ? 1 : 0)]->virt[buff_index],
- rfd->len);
+ rx_local->fbr[(ring_index == 0 ? 1 : 0)]->virt[buff_index],
+ rfd->len);
skb->dev = adapter->netdev;
skb->protocol = eth_type_trans(skb, adapter->netdev);
*/
desc_size = (sizeof(struct tx_desc) * NUM_DESC_PER_RING_TX) + 4096 - 1;
tx_ring->tx_desc_ring =
- (struct tx_desc *) dma_alloc_coherent(&adapter->pdev->dev, desc_size,
- &tx_ring->tx_desc_ring_pa, GFP_KERNEL);
+ (struct tx_desc *) dma_alloc_coherent(&adapter->pdev->dev,
+ desc_size,
+ &tx_ring->tx_desc_ring_pa,
+ GFP_KERNEL);
if (!adapter->tx_ring.tx_desc_ring) {
dev_err(&adapter->pdev->dev,
- "Cannot alloc memory for Tx Ring\n");
+ "Cannot alloc memory for Tx Ring\n");
return -ENOMEM;
}
*
* Returns 0 on success, errno on failure (as defined in errno.h)
*/
-static int et131x_ioctl(struct net_device *netdev, struct ifreq *reqbuf, int cmd)
+static int et131x_ioctl(struct net_device *netdev, struct ifreq *reqbuf,
+ int cmd)
{
struct et131x_adapter *adapter = netdev_priv(netdev);
struct et131x_adapter *adapter = netdev_priv(netdev);
/* stop the queue if it's getting full */
- if(adapter->tx_ring.used >= NUM_TCB - 1 && !netif_queue_stopped(netdev))
+ if (adapter->tx_ring.used >= NUM_TCB - 1 &&
+ !netif_queue_stopped(netdev))
netif_stop_queue(netdev);
/* Save the timestamp for the TX timeout watchdog */
/* Check status and manage the netif queue if necessary */
if (status != 0) {
- if (status == -ENOMEM) {
+ if (status == -ENOMEM)
status = NETDEV_TX_BUSY;
- } else {
+ else
status = NETDEV_TX_OK;
- }
}
return status;
}