upload tizen1.0 source
[kernel/linux-2.6.36.git] / drivers / net / tulip / interrupt.c
1 /*
2         drivers/net/tulip/interrupt.c
3
4         Copyright 2000,2001  The Linux Kernel Team
5         Written/copyright 1994-2001 by Donald Becker.
6
7         This software may be used and distributed according to the terms
8         of the GNU General Public License, incorporated herein by reference.
9
10         Please refer to Documentation/DocBook/tulip-user.{pdf,ps,html}
11         for more information on this driver.
12         Please submit bugs to http://bugzilla.kernel.org/ .
13
14 */
15
16 #include <linux/pci.h>
17 #include "tulip.h"
18 #include <linux/etherdevice.h>
19
20 int tulip_rx_copybreak;
21 unsigned int tulip_max_interrupt_work;
22
23 #ifdef CONFIG_TULIP_NAPI_HW_MITIGATION
24 #define MIT_SIZE 15
25 #define MIT_TABLE 15 /* We use 0 or max */
26
27 static unsigned int mit_table[MIT_SIZE+1] =
28 {
29         /*  CRS11 21143 hardware Mitigation Control Interrupt
30             We use only RX mitigation we other techniques for
31             TX intr. mitigation.
32
33            31    Cycle Size (timer control)
34            30:27 TX timer in 16 * Cycle size
35            26:24 TX No pkts before Int.
36            23:20 RX timer in Cycle size
37            19:17 RX No pkts before Int.
38            16       Continues Mode (CM)
39         */
40
41         0x0,             /* IM disabled */
42         0x80150000,      /* RX time = 1, RX pkts = 2, CM = 1 */
43         0x80150000,
44         0x80270000,
45         0x80370000,
46         0x80490000,
47         0x80590000,
48         0x80690000,
49         0x807B0000,
50         0x808B0000,
51         0x809D0000,
52         0x80AD0000,
53         0x80BD0000,
54         0x80CF0000,
55         0x80DF0000,
56 //       0x80FF0000      /* RX time = 16, RX pkts = 7, CM = 1 */
57         0x80F10000      /* RX time = 16, RX pkts = 0, CM = 1 */
58 };
59 #endif
60
61
62 int tulip_refill_rx(struct net_device *dev)
63 {
64         struct tulip_private *tp = netdev_priv(dev);
65         int entry;
66         int refilled = 0;
67
68         /* Refill the Rx ring buffers. */
69         for (; tp->cur_rx - tp->dirty_rx > 0; tp->dirty_rx++) {
70                 entry = tp->dirty_rx % RX_RING_SIZE;
71                 if (tp->rx_buffers[entry].skb == NULL) {
72                         struct sk_buff *skb;
73                         dma_addr_t mapping;
74
75                         skb = tp->rx_buffers[entry].skb = dev_alloc_skb(PKT_BUF_SZ);
76                         if (skb == NULL)
77                                 break;
78
79                         mapping = pci_map_single(tp->pdev, skb->data, PKT_BUF_SZ,
80                                                  PCI_DMA_FROMDEVICE);
81                         tp->rx_buffers[entry].mapping = mapping;
82
83                         skb->dev = dev;                 /* Mark as being used by this device. */
84                         tp->rx_ring[entry].buffer1 = cpu_to_le32(mapping);
85                         refilled++;
86                 }
87                 tp->rx_ring[entry].status = cpu_to_le32(DescOwned);
88         }
89         if(tp->chip_id == LC82C168) {
90                 if(((ioread32(tp->base_addr + CSR5)>>17)&0x07) == 4) {
91                         /* Rx stopped due to out of buffers,
92                          * restart it
93                          */
94                         iowrite32(0x01, tp->base_addr + CSR2);
95                 }
96         }
97         return refilled;
98 }
99
100 #ifdef CONFIG_TULIP_NAPI
101
102 void oom_timer(unsigned long data)
103 {
104         struct net_device *dev = (struct net_device *)data;
105         struct tulip_private *tp = netdev_priv(dev);
106         napi_schedule(&tp->napi);
107 }
108
109 int tulip_poll(struct napi_struct *napi, int budget)
110 {
111         struct tulip_private *tp = container_of(napi, struct tulip_private, napi);
112         struct net_device *dev = tp->dev;
113         int entry = tp->cur_rx % RX_RING_SIZE;
114         int work_done = 0;
115 #ifdef CONFIG_TULIP_NAPI_HW_MITIGATION
116         int received = 0;
117 #endif
118
119 #ifdef CONFIG_TULIP_NAPI_HW_MITIGATION
120
121 /* that one buffer is needed for mit activation; or might be a
122    bug in the ring buffer code; check later -- JHS*/
123
124         if (budget >=RX_RING_SIZE) budget--;
125 #endif
126
127         if (tulip_debug > 4)
128                 printk(KERN_DEBUG " In tulip_rx(), entry %d %08x\n",
129                        entry, tp->rx_ring[entry].status);
130
131        do {
132                 if (ioread32(tp->base_addr + CSR5) == 0xffffffff) {
133                         printk(KERN_DEBUG " In tulip_poll(), hardware disappeared\n");
134                         break;
135                 }
136                /* Acknowledge current RX interrupt sources. */
137                iowrite32((RxIntr | RxNoBuf), tp->base_addr + CSR5);
138
139
140                /* If we own the next entry, it is a new packet. Send it up. */
141                while ( ! (tp->rx_ring[entry].status & cpu_to_le32(DescOwned))) {
142                        s32 status = le32_to_cpu(tp->rx_ring[entry].status);
143                        short pkt_len;
144
145                        if (tp->dirty_rx + RX_RING_SIZE == tp->cur_rx)
146                                break;
147
148                        if (tulip_debug > 5)
149                                printk(KERN_DEBUG "%s: In tulip_rx(), entry %d %08x\n",
150                                       dev->name, entry, status);
151
152                        if (++work_done >= budget)
153                                goto not_done;
154
155                        /*
156                         * Omit the four octet CRC from the length.
157                         * (May not be considered valid until we have
158                         * checked status for RxLengthOver2047 bits)
159                         */
160                        pkt_len = ((status >> 16) & 0x7ff) - 4;
161
162                        /*
163                         * Maximum pkt_len is 1518 (1514 + vlan header)
164                         * Anything higher than this is always invalid
165                         * regardless of RxLengthOver2047 bits
166                         */
167
168                        if ((status & (RxLengthOver2047 |
169                                       RxDescCRCError |
170                                       RxDescCollisionSeen |
171                                       RxDescRunt |
172                                       RxDescDescErr |
173                                       RxWholePkt)) != RxWholePkt ||
174                            pkt_len > 1518) {
175                                if ((status & (RxLengthOver2047 |
176                                               RxWholePkt)) != RxWholePkt) {
177                                 /* Ingore earlier buffers. */
178                                        if ((status & 0xffff) != 0x7fff) {
179                                                if (tulip_debug > 1)
180                                                        dev_warn(&dev->dev,
181                                                                 "Oversized Ethernet frame spanned multiple buffers, status %08x!\n",
182                                                                 status);
183                                                tp->stats.rx_length_errors++;
184                                        }
185                                } else {
186                                 /* There was a fatal error. */
187                                        if (tulip_debug > 2)
188                                                printk(KERN_DEBUG "%s: Receive error, Rx status %08x\n",
189                                                       dev->name, status);
190                                        tp->stats.rx_errors++; /* end of a packet.*/
191                                        if (pkt_len > 1518 ||
192                                            (status & RxDescRunt))
193                                                tp->stats.rx_length_errors++;
194
195                                        if (status & 0x0004) tp->stats.rx_frame_errors++;
196                                        if (status & 0x0002) tp->stats.rx_crc_errors++;
197                                        if (status & 0x0001) tp->stats.rx_fifo_errors++;
198                                }
199                        } else {
200                                struct sk_buff *skb;
201
202                                /* Check if the packet is long enough to accept without copying
203                                   to a minimally-sized skbuff. */
204                                if (pkt_len < tulip_rx_copybreak &&
205                                    (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
206                                        skb_reserve(skb, 2);    /* 16 byte align the IP header */
207                                        pci_dma_sync_single_for_cpu(tp->pdev,
208                                                                    tp->rx_buffers[entry].mapping,
209                                                                    pkt_len, PCI_DMA_FROMDEVICE);
210 #if ! defined(__alpha__)
211                                        skb_copy_to_linear_data(skb, tp->rx_buffers[entry].skb->data,
212                                                         pkt_len);
213                                        skb_put(skb, pkt_len);
214 #else
215                                        memcpy(skb_put(skb, pkt_len),
216                                               tp->rx_buffers[entry].skb->data,
217                                               pkt_len);
218 #endif
219                                        pci_dma_sync_single_for_device(tp->pdev,
220                                                                       tp->rx_buffers[entry].mapping,
221                                                                       pkt_len, PCI_DMA_FROMDEVICE);
222                                } else {        /* Pass up the skb already on the Rx ring. */
223                                        char *temp = skb_put(skb = tp->rx_buffers[entry].skb,
224                                                             pkt_len);
225
226 #ifndef final_version
227                                        if (tp->rx_buffers[entry].mapping !=
228                                            le32_to_cpu(tp->rx_ring[entry].buffer1)) {
229                                                dev_err(&dev->dev,
230                                                        "Internal fault: The skbuff addresses do not match in tulip_rx: %08x vs. %08llx %p / %p\n",
231                                                        le32_to_cpu(tp->rx_ring[entry].buffer1),
232                                                        (unsigned long long)tp->rx_buffers[entry].mapping,
233                                                        skb->head, temp);
234                                        }
235 #endif
236
237                                        pci_unmap_single(tp->pdev, tp->rx_buffers[entry].mapping,
238                                                         PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
239
240                                        tp->rx_buffers[entry].skb = NULL;
241                                        tp->rx_buffers[entry].mapping = 0;
242                                }
243                                skb->protocol = eth_type_trans(skb, dev);
244
245                                netif_receive_skb(skb);
246
247                                tp->stats.rx_packets++;
248                                tp->stats.rx_bytes += pkt_len;
249                        }
250 #ifdef CONFIG_TULIP_NAPI_HW_MITIGATION
251                        received++;
252 #endif
253
254                        entry = (++tp->cur_rx) % RX_RING_SIZE;
255                        if (tp->cur_rx - tp->dirty_rx > RX_RING_SIZE/4)
256                                tulip_refill_rx(dev);
257
258                 }
259
260                /* New ack strategy... irq does not ack Rx any longer
261                   hopefully this helps */
262
263                /* Really bad things can happen here... If new packet arrives
264                 * and an irq arrives (tx or just due to occasionally unset
265                 * mask), it will be acked by irq handler, but new thread
266                 * is not scheduled. It is major hole in design.
267                 * No idea how to fix this if "playing with fire" will fail
268                 * tomorrow (night 011029). If it will not fail, we won
269                 * finally: amount of IO did not increase at all. */
270        } while ((ioread32(tp->base_addr + CSR5) & RxIntr));
271
272  #ifdef CONFIG_TULIP_NAPI_HW_MITIGATION
273
274           /* We use this simplistic scheme for IM. It's proven by
275              real life installations. We can have IM enabled
276             continuesly but this would cause unnecessary latency.
277             Unfortunely we can't use all the NET_RX_* feedback here.
278             This would turn on IM for devices that is not contributing
279             to backlog congestion with unnecessary latency.
280
281              We monitor the device RX-ring and have:
282
283              HW Interrupt Mitigation either ON or OFF.
284
285             ON:  More then 1 pkt received (per intr.) OR we are dropping
286              OFF: Only 1 pkt received
287
288              Note. We only use min and max (0, 15) settings from mit_table */
289
290
291           if( tp->flags &  HAS_INTR_MITIGATION) {
292                  if( received > 1 ) {
293                          if( ! tp->mit_on ) {
294                                  tp->mit_on = 1;
295                                  iowrite32(mit_table[MIT_TABLE], tp->base_addr + CSR11);
296                          }
297                   }
298                  else {
299                          if( tp->mit_on ) {
300                                  tp->mit_on = 0;
301                                  iowrite32(0, tp->base_addr + CSR11);
302                          }
303                   }
304           }
305
306 #endif /* CONFIG_TULIP_NAPI_HW_MITIGATION */
307
308          tulip_refill_rx(dev);
309
310          /* If RX ring is not full we are out of memory. */
311          if (tp->rx_buffers[tp->dirty_rx % RX_RING_SIZE].skb == NULL)
312                  goto oom;
313
314          /* Remove us from polling list and enable RX intr. */
315
316          napi_complete(napi);
317          iowrite32(tulip_tbl[tp->chip_id].valid_intrs, tp->base_addr+CSR7);
318
319          /* The last op happens after poll completion. Which means the following:
320           * 1. it can race with disabling irqs in irq handler
321           * 2. it can race with dise/enabling irqs in other poll threads
322           * 3. if an irq raised after beginning loop, it will be immediately
323           *    triggered here.
324           *
325           * Summarizing: the logic results in some redundant irqs both
326           * due to races in masking and due to too late acking of already
327           * processed irqs. But it must not result in losing events.
328           */
329
330          return work_done;
331
332  not_done:
333          if (tp->cur_rx - tp->dirty_rx > RX_RING_SIZE/2 ||
334              tp->rx_buffers[tp->dirty_rx % RX_RING_SIZE].skb == NULL)
335                  tulip_refill_rx(dev);
336
337          if (tp->rx_buffers[tp->dirty_rx % RX_RING_SIZE].skb == NULL)
338                  goto oom;
339
340          return work_done;
341
342  oom:    /* Executed with RX ints disabled */
343
344          /* Start timer, stop polling, but do not enable rx interrupts. */
345          mod_timer(&tp->oom_timer, jiffies+1);
346
347          /* Think: timer_pending() was an explicit signature of bug.
348           * Timer can be pending now but fired and completed
349           * before we did napi_complete(). See? We would lose it. */
350
351          /* remove ourselves from the polling list */
352          napi_complete(napi);
353
354          return work_done;
355 }
356
357 #else /* CONFIG_TULIP_NAPI */
358
359 static int tulip_rx(struct net_device *dev)
360 {
361         struct tulip_private *tp = netdev_priv(dev);
362         int entry = tp->cur_rx % RX_RING_SIZE;
363         int rx_work_limit = tp->dirty_rx + RX_RING_SIZE - tp->cur_rx;
364         int received = 0;
365
366         if (tulip_debug > 4)
367                 printk(KERN_DEBUG " In tulip_rx(), entry %d %08x\n",
368                        entry, tp->rx_ring[entry].status);
369         /* If we own the next entry, it is a new packet. Send it up. */
370         while ( ! (tp->rx_ring[entry].status & cpu_to_le32(DescOwned))) {
371                 s32 status = le32_to_cpu(tp->rx_ring[entry].status);
372                 short pkt_len;
373
374                 if (tulip_debug > 5)
375                         printk(KERN_DEBUG "%s: In tulip_rx(), entry %d %08x\n",
376                                dev->name, entry, status);
377                 if (--rx_work_limit < 0)
378                         break;
379
380                 /*
381                   Omit the four octet CRC from the length.
382                   (May not be considered valid until we have
383                   checked status for RxLengthOver2047 bits)
384                 */
385                 pkt_len = ((status >> 16) & 0x7ff) - 4;
386                 /*
387                   Maximum pkt_len is 1518 (1514 + vlan header)
388                   Anything higher than this is always invalid
389                   regardless of RxLengthOver2047 bits
390                 */
391
392                 if ((status & (RxLengthOver2047 |
393                                RxDescCRCError |
394                                RxDescCollisionSeen |
395                                RxDescRunt |
396                                RxDescDescErr |
397                                RxWholePkt))        != RxWholePkt ||
398                     pkt_len > 1518) {
399                         if ((status & (RxLengthOver2047 |
400                              RxWholePkt))         != RxWholePkt) {
401                                 /* Ingore earlier buffers. */
402                                 if ((status & 0xffff) != 0x7fff) {
403                                         if (tulip_debug > 1)
404                                                 dev_warn(&dev->dev,
405                                                          "Oversized Ethernet frame spanned multiple buffers, status %08x!\n",
406                                                          status);
407                                         tp->stats.rx_length_errors++;
408                                 }
409                         } else {
410                                 /* There was a fatal error. */
411                                 if (tulip_debug > 2)
412                                         printk(KERN_DEBUG "%s: Receive error, Rx status %08x\n",
413                                                dev->name, status);
414                                 tp->stats.rx_errors++; /* end of a packet.*/
415                                 if (pkt_len > 1518 ||
416                                     (status & RxDescRunt))
417                                         tp->stats.rx_length_errors++;
418                                 if (status & 0x0004) tp->stats.rx_frame_errors++;
419                                 if (status & 0x0002) tp->stats.rx_crc_errors++;
420                                 if (status & 0x0001) tp->stats.rx_fifo_errors++;
421                         }
422                 } else {
423                         struct sk_buff *skb;
424
425                         /* Check if the packet is long enough to accept without copying
426                            to a minimally-sized skbuff. */
427                         if (pkt_len < tulip_rx_copybreak &&
428                             (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
429                                 skb_reserve(skb, 2);    /* 16 byte align the IP header */
430                                 pci_dma_sync_single_for_cpu(tp->pdev,
431                                                             tp->rx_buffers[entry].mapping,
432                                                             pkt_len, PCI_DMA_FROMDEVICE);
433 #if ! defined(__alpha__)
434                                 skb_copy_to_linear_data(skb, tp->rx_buffers[entry].skb->data,
435                                                  pkt_len);
436                                 skb_put(skb, pkt_len);
437 #else
438                                 memcpy(skb_put(skb, pkt_len),
439                                        tp->rx_buffers[entry].skb->data,
440                                        pkt_len);
441 #endif
442                                 pci_dma_sync_single_for_device(tp->pdev,
443                                                                tp->rx_buffers[entry].mapping,
444                                                                pkt_len, PCI_DMA_FROMDEVICE);
445                         } else {        /* Pass up the skb already on the Rx ring. */
446                                 char *temp = skb_put(skb = tp->rx_buffers[entry].skb,
447                                                      pkt_len);
448
449 #ifndef final_version
450                                 if (tp->rx_buffers[entry].mapping !=
451                                     le32_to_cpu(tp->rx_ring[entry].buffer1)) {
452                                         dev_err(&dev->dev,
453                                                 "Internal fault: The skbuff addresses do not match in tulip_rx: %08x vs. %Lx %p / %p\n",
454                                                 le32_to_cpu(tp->rx_ring[entry].buffer1),
455                                                 (long long)tp->rx_buffers[entry].mapping,
456                                                 skb->head, temp);
457                                 }
458 #endif
459
460                                 pci_unmap_single(tp->pdev, tp->rx_buffers[entry].mapping,
461                                                  PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
462
463                                 tp->rx_buffers[entry].skb = NULL;
464                                 tp->rx_buffers[entry].mapping = 0;
465                         }
466                         skb->protocol = eth_type_trans(skb, dev);
467
468                         netif_rx(skb);
469
470                         tp->stats.rx_packets++;
471                         tp->stats.rx_bytes += pkt_len;
472                 }
473                 received++;
474                 entry = (++tp->cur_rx) % RX_RING_SIZE;
475         }
476         return received;
477 }
478 #endif  /* CONFIG_TULIP_NAPI */
479
480 static inline unsigned int phy_interrupt (struct net_device *dev)
481 {
482 #ifdef __hppa__
483         struct tulip_private *tp = netdev_priv(dev);
484         int csr12 = ioread32(tp->base_addr + CSR12) & 0xff;
485
486         if (csr12 != tp->csr12_shadow) {
487                 /* ack interrupt */
488                 iowrite32(csr12 | 0x02, tp->base_addr + CSR12);
489                 tp->csr12_shadow = csr12;
490                 /* do link change stuff */
491                 spin_lock(&tp->lock);
492                 tulip_check_duplex(dev);
493                 spin_unlock(&tp->lock);
494                 /* clear irq ack bit */
495                 iowrite32(csr12 & ~0x02, tp->base_addr + CSR12);
496
497                 return 1;
498         }
499 #endif
500
501         return 0;
502 }
503
504 /* The interrupt handler does all of the Rx thread work and cleans up
505    after the Tx thread. */
506 irqreturn_t tulip_interrupt(int irq, void *dev_instance)
507 {
508         struct net_device *dev = (struct net_device *)dev_instance;
509         struct tulip_private *tp = netdev_priv(dev);
510         void __iomem *ioaddr = tp->base_addr;
511         int csr5;
512         int missed;
513         int rx = 0;
514         int tx = 0;
515         int oi = 0;
516         int maxrx = RX_RING_SIZE;
517         int maxtx = TX_RING_SIZE;
518         int maxoi = TX_RING_SIZE;
519 #ifdef CONFIG_TULIP_NAPI
520         int rxd = 0;
521 #else
522         int entry;
523 #endif
524         unsigned int work_count = tulip_max_interrupt_work;
525         unsigned int handled = 0;
526
527         /* Let's see whether the interrupt really is for us */
528         csr5 = ioread32(ioaddr + CSR5);
529
530         if (tp->flags & HAS_PHY_IRQ)
531                 handled = phy_interrupt (dev);
532
533         if ((csr5 & (NormalIntr|AbnormalIntr)) == 0)
534                 return IRQ_RETVAL(handled);
535
536         tp->nir++;
537
538         do {
539
540 #ifdef CONFIG_TULIP_NAPI
541
542                 if (!rxd && (csr5 & (RxIntr | RxNoBuf))) {
543                         rxd++;
544                         /* Mask RX intrs and add the device to poll list. */
545                         iowrite32(tulip_tbl[tp->chip_id].valid_intrs&~RxPollInt, ioaddr + CSR7);
546                         napi_schedule(&tp->napi);
547
548                         if (!(csr5&~(AbnormalIntr|NormalIntr|RxPollInt|TPLnkPass)))
549                                break;
550                 }
551
552                /* Acknowledge the interrupt sources we handle here ASAP
553                   the poll function does Rx and RxNoBuf acking */
554
555                 iowrite32(csr5 & 0x0001ff3f, ioaddr + CSR5);
556
557 #else
558                 /* Acknowledge all of the current interrupt sources ASAP. */
559                 iowrite32(csr5 & 0x0001ffff, ioaddr + CSR5);
560
561
562                 if (csr5 & (RxIntr | RxNoBuf)) {
563                                 rx += tulip_rx(dev);
564                         tulip_refill_rx(dev);
565                 }
566
567 #endif /*  CONFIG_TULIP_NAPI */
568
569                 if (tulip_debug > 4)
570                         printk(KERN_DEBUG "%s: interrupt  csr5=%#8.8x new csr5=%#8.8x\n",
571                                dev->name, csr5, ioread32(ioaddr + CSR5));
572
573
574                 if (csr5 & (TxNoBuf | TxDied | TxIntr | TimerInt)) {
575                         unsigned int dirty_tx;
576
577                         spin_lock(&tp->lock);
578
579                         for (dirty_tx = tp->dirty_tx; tp->cur_tx - dirty_tx > 0;
580                                  dirty_tx++) {
581                                 int entry = dirty_tx % TX_RING_SIZE;
582                                 int status = le32_to_cpu(tp->tx_ring[entry].status);
583
584                                 if (status < 0)
585                                         break;                  /* It still has not been Txed */
586
587                                 /* Check for Rx filter setup frames. */
588                                 if (tp->tx_buffers[entry].skb == NULL) {
589                                         /* test because dummy frames not mapped */
590                                         if (tp->tx_buffers[entry].mapping)
591                                                 pci_unmap_single(tp->pdev,
592                                                          tp->tx_buffers[entry].mapping,
593                                                          sizeof(tp->setup_frame),
594                                                          PCI_DMA_TODEVICE);
595                                         continue;
596                                 }
597
598                                 if (status & 0x8000) {
599                                         /* There was an major error, log it. */
600 #ifndef final_version
601                                         if (tulip_debug > 1)
602                                                 printk(KERN_DEBUG "%s: Transmit error, Tx status %08x\n",
603                                                        dev->name, status);
604 #endif
605                                         tp->stats.tx_errors++;
606                                         if (status & 0x4104) tp->stats.tx_aborted_errors++;
607                                         if (status & 0x0C00) tp->stats.tx_carrier_errors++;
608                                         if (status & 0x0200) tp->stats.tx_window_errors++;
609                                         if (status & 0x0002) tp->stats.tx_fifo_errors++;
610                                         if ((status & 0x0080) && tp->full_duplex == 0)
611                                                 tp->stats.tx_heartbeat_errors++;
612                                 } else {
613                                         tp->stats.tx_bytes +=
614                                                 tp->tx_buffers[entry].skb->len;
615                                         tp->stats.collisions += (status >> 3) & 15;
616                                         tp->stats.tx_packets++;
617                                 }
618
619                                 pci_unmap_single(tp->pdev, tp->tx_buffers[entry].mapping,
620                                                  tp->tx_buffers[entry].skb->len,
621                                                  PCI_DMA_TODEVICE);
622
623                                 /* Free the original skb. */
624                                 dev_kfree_skb_irq(tp->tx_buffers[entry].skb);
625                                 tp->tx_buffers[entry].skb = NULL;
626                                 tp->tx_buffers[entry].mapping = 0;
627                                 tx++;
628                         }
629
630 #ifndef final_version
631                         if (tp->cur_tx - dirty_tx > TX_RING_SIZE) {
632                                 dev_err(&dev->dev,
633                                         "Out-of-sync dirty pointer, %d vs. %d\n",
634                                         dirty_tx, tp->cur_tx);
635                                 dirty_tx += TX_RING_SIZE;
636                         }
637 #endif
638
639                         if (tp->cur_tx - dirty_tx < TX_RING_SIZE - 2)
640                                 netif_wake_queue(dev);
641
642                         tp->dirty_tx = dirty_tx;
643                         if (csr5 & TxDied) {
644                                 if (tulip_debug > 2)
645                                         dev_warn(&dev->dev,
646                                                  "The transmitter stopped.  CSR5 is %x, CSR6 %x, new CSR6 %x\n",
647                                                  csr5, ioread32(ioaddr + CSR6),
648                                                  tp->csr6);
649                                 tulip_restart_rxtx(tp);
650                         }
651                         spin_unlock(&tp->lock);
652                 }
653
654                 /* Log errors. */
655                 if (csr5 & AbnormalIntr) {      /* Abnormal error summary bit. */
656                         if (csr5 == 0xffffffff)
657                                 break;
658                         if (csr5 & TxJabber) tp->stats.tx_errors++;
659                         if (csr5 & TxFIFOUnderflow) {
660                                 if ((tp->csr6 & 0xC000) != 0xC000)
661                                         tp->csr6 += 0x4000;     /* Bump up the Tx threshold */
662                                 else
663                                         tp->csr6 |= 0x00200000;  /* Store-n-forward. */
664                                 /* Restart the transmit process. */
665                                 tulip_restart_rxtx(tp);
666                                 iowrite32(0, ioaddr + CSR1);
667                         }
668                         if (csr5 & (RxDied | RxNoBuf)) {
669                                 if (tp->flags & COMET_MAC_ADDR) {
670                                         iowrite32(tp->mc_filter[0], ioaddr + 0xAC);
671                                         iowrite32(tp->mc_filter[1], ioaddr + 0xB0);
672                                 }
673                         }
674                         if (csr5 & RxDied) {            /* Missed a Rx frame. */
675                                 tp->stats.rx_missed_errors += ioread32(ioaddr + CSR8) & 0xffff;
676                                 tp->stats.rx_errors++;
677                                 tulip_start_rxtx(tp);
678                         }
679                         /*
680                          * NB: t21142_lnk_change() does a del_timer_sync(), so be careful if this
681                          * call is ever done under the spinlock
682                          */
683                         if (csr5 & (TPLnkPass | TPLnkFail | 0x08000000)) {
684                                 if (tp->link_change)
685                                         (tp->link_change)(dev, csr5);
686                         }
687                         if (csr5 & SystemError) {
688                                 int error = (csr5 >> 23) & 7;
689                                 /* oops, we hit a PCI error.  The code produced corresponds
690                                  * to the reason:
691                                  *  0 - parity error
692                                  *  1 - master abort
693                                  *  2 - target abort
694                                  * Note that on parity error, we should do a software reset
695                                  * of the chip to get it back into a sane state (according
696                                  * to the 21142/3 docs that is).
697                                  *   -- rmk
698                                  */
699                                 dev_err(&dev->dev,
700                                         "(%lu) System Error occurred (%d)\n",
701                                         tp->nir, error);
702                         }
703                         /* Clear all error sources, included undocumented ones! */
704                         iowrite32(0x0800f7ba, ioaddr + CSR5);
705                         oi++;
706                 }
707                 if (csr5 & TimerInt) {
708
709                         if (tulip_debug > 2)
710                                 dev_err(&dev->dev,
711                                         "Re-enabling interrupts, %08x\n",
712                                         csr5);
713                         iowrite32(tulip_tbl[tp->chip_id].valid_intrs, ioaddr + CSR7);
714                         tp->ttimer = 0;
715                         oi++;
716                 }
717                 if (tx > maxtx || rx > maxrx || oi > maxoi) {
718                         if (tulip_debug > 1)
719                                 dev_warn(&dev->dev, "Too much work during an interrupt, csr5=0x%08x. (%lu) (%d,%d,%d)\n",
720                                          csr5, tp->nir, tx, rx, oi);
721
722                        /* Acknowledge all interrupt sources. */
723                         iowrite32(0x8001ffff, ioaddr + CSR5);
724                         if (tp->flags & HAS_INTR_MITIGATION) {
725                      /* Josip Loncaric at ICASE did extensive experimentation
726                         to develop a good interrupt mitigation setting.*/
727                                 iowrite32(0x8b240000, ioaddr + CSR11);
728                         } else if (tp->chip_id == LC82C168) {
729                                 /* the LC82C168 doesn't have a hw timer.*/
730                                 iowrite32(0x00, ioaddr + CSR7);
731                                 mod_timer(&tp->timer, RUN_AT(HZ/50));
732                         } else {
733                           /* Mask all interrupting sources, set timer to
734                                 re-enable. */
735                                 iowrite32(((~csr5) & 0x0001ebef) | AbnormalIntr | TimerInt, ioaddr + CSR7);
736                                 iowrite32(0x0012, ioaddr + CSR11);
737                         }
738                         break;
739                 }
740
741                 work_count--;
742                 if (work_count == 0)
743                         break;
744
745                 csr5 = ioread32(ioaddr + CSR5);
746
747 #ifdef CONFIG_TULIP_NAPI
748                 if (rxd)
749                         csr5 &= ~RxPollInt;
750         } while ((csr5 & (TxNoBuf |
751                           TxDied |
752                           TxIntr |
753                           TimerInt |
754                           /* Abnormal intr. */
755                           RxDied |
756                           TxFIFOUnderflow |
757                           TxJabber |
758                           TPLnkFail |
759                           SystemError )) != 0);
760 #else
761         } while ((csr5 & (NormalIntr|AbnormalIntr)) != 0);
762
763         tulip_refill_rx(dev);
764
765         /* check if the card is in suspend mode */
766         entry = tp->dirty_rx % RX_RING_SIZE;
767         if (tp->rx_buffers[entry].skb == NULL) {
768                 if (tulip_debug > 1)
769                         dev_warn(&dev->dev,
770                                  "in rx suspend mode: (%lu) (tp->cur_rx = %u, ttimer = %d, rx = %d) go/stay in suspend mode\n",
771                                  tp->nir, tp->cur_rx, tp->ttimer, rx);
772                 if (tp->chip_id == LC82C168) {
773                         iowrite32(0x00, ioaddr + CSR7);
774                         mod_timer(&tp->timer, RUN_AT(HZ/50));
775                 } else {
776                         if (tp->ttimer == 0 || (ioread32(ioaddr + CSR11) & 0xffff) == 0) {
777                                 if (tulip_debug > 1)
778                                         dev_warn(&dev->dev,
779                                                  "in rx suspend mode: (%lu) set timer\n",
780                                                  tp->nir);
781                                 iowrite32(tulip_tbl[tp->chip_id].valid_intrs | TimerInt,
782                                         ioaddr + CSR7);
783                                 iowrite32(TimerInt, ioaddr + CSR5);
784                                 iowrite32(12, ioaddr + CSR11);
785                                 tp->ttimer = 1;
786                         }
787                 }
788         }
789 #endif /* CONFIG_TULIP_NAPI */
790
791         if ((missed = ioread32(ioaddr + CSR8) & 0x1ffff)) {
792                 tp->stats.rx_dropped += missed & 0x10000 ? 0x10000 : missed;
793         }
794
795         if (tulip_debug > 4)
796                 printk(KERN_DEBUG "%s: exiting interrupt, csr5=%#04x\n",
797                        dev->name, ioread32(ioaddr + CSR5));
798
799         return IRQ_HANDLED;
800 }