drivers/net: Remove unnecessary returns from void function()s
[profile/ivi/kernel-x86-ivi.git] / drivers / net / qla3xxx.c
1 /*
2  * QLogic QLA3xxx NIC HBA Driver
3  * Copyright (c)  2003-2006 QLogic Corporation
4  *
5  * See LICENSE.qla3xxx for copyright and licensing details.
6  */
7
8 #include <linux/kernel.h>
9 #include <linux/init.h>
10 #include <linux/types.h>
11 #include <linux/module.h>
12 #include <linux/list.h>
13 #include <linux/pci.h>
14 #include <linux/dma-mapping.h>
15 #include <linux/sched.h>
16 #include <linux/slab.h>
17 #include <linux/dmapool.h>
18 #include <linux/mempool.h>
19 #include <linux/spinlock.h>
20 #include <linux/kthread.h>
21 #include <linux/interrupt.h>
22 #include <linux/errno.h>
23 #include <linux/ioport.h>
24 #include <linux/ip.h>
25 #include <linux/in.h>
26 #include <linux/if_arp.h>
27 #include <linux/if_ether.h>
28 #include <linux/netdevice.h>
29 #include <linux/etherdevice.h>
30 #include <linux/ethtool.h>
31 #include <linux/skbuff.h>
32 #include <linux/rtnetlink.h>
33 #include <linux/if_vlan.h>
34 #include <linux/delay.h>
35 #include <linux/mm.h>
36
37 #include "qla3xxx.h"
38
39 #define DRV_NAME        "qla3xxx"
40 #define DRV_STRING      "QLogic ISP3XXX Network Driver"
41 #define DRV_VERSION     "v2.03.00-k5"
42 #define PFX             DRV_NAME " "
43
44 static const char ql3xxx_driver_name[] = DRV_NAME;
45 static const char ql3xxx_driver_version[] = DRV_VERSION;
46
47 MODULE_AUTHOR("QLogic Corporation");
48 MODULE_DESCRIPTION("QLogic ISP3XXX Network Driver " DRV_VERSION " ");
49 MODULE_LICENSE("GPL");
50 MODULE_VERSION(DRV_VERSION);
51
52 static const u32 default_msg
53     = NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK
54     | NETIF_MSG_IFUP | NETIF_MSG_IFDOWN;
55
56 static int debug = -1;          /* defaults above */
57 module_param(debug, int, 0);
58 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
59
60 static int msi;
61 module_param(msi, int, 0);
62 MODULE_PARM_DESC(msi, "Turn on Message Signaled Interrupts.");
63
64 static DEFINE_PCI_DEVICE_TABLE(ql3xxx_pci_tbl) = {
65         {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QL3022_DEVICE_ID)},
66         {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QL3032_DEVICE_ID)},
67         /* required last entry */
68         {0,}
69 };
70
71 MODULE_DEVICE_TABLE(pci, ql3xxx_pci_tbl);
72
73 /*
74  *  These are the known PHY's which are used
75  */
76 typedef enum {
77    PHY_TYPE_UNKNOWN   = 0,
78    PHY_VITESSE_VSC8211,
79    PHY_AGERE_ET1011C,
80    MAX_PHY_DEV_TYPES
81 } PHY_DEVICE_et;
82
83 typedef struct {
84         PHY_DEVICE_et phyDevice;
85         u32             phyIdOUI;
86         u16             phyIdModel;
87         char            *name;
88 } PHY_DEVICE_INFO_t;
89
90 static const PHY_DEVICE_INFO_t PHY_DEVICES[] =
91         {{PHY_TYPE_UNKNOWN,    0x000000, 0x0, "PHY_TYPE_UNKNOWN"},
92          {PHY_VITESSE_VSC8211, 0x0003f1, 0xb, "PHY_VITESSE_VSC8211"},
93          {PHY_AGERE_ET1011C,   0x00a0bc, 0x1, "PHY_AGERE_ET1011C"},
94 };
95
96
97 /*
98  * Caller must take hw_lock.
99  */
100 static int ql_sem_spinlock(struct ql3_adapter *qdev,
101                             u32 sem_mask, u32 sem_bits)
102 {
103         struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
104         u32 value;
105         unsigned int seconds = 3;
106
107         do {
108                 writel((sem_mask | sem_bits),
109                        &port_regs->CommonRegs.semaphoreReg);
110                 value = readl(&port_regs->CommonRegs.semaphoreReg);
111                 if ((value & (sem_mask >> 16)) == sem_bits)
112                         return 0;
113                 ssleep(1);
114         } while(--seconds);
115         return -1;
116 }
117
118 static void ql_sem_unlock(struct ql3_adapter *qdev, u32 sem_mask)
119 {
120         struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
121         writel(sem_mask, &port_regs->CommonRegs.semaphoreReg);
122         readl(&port_regs->CommonRegs.semaphoreReg);
123 }
124
125 static int ql_sem_lock(struct ql3_adapter *qdev, u32 sem_mask, u32 sem_bits)
126 {
127         struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
128         u32 value;
129
130         writel((sem_mask | sem_bits), &port_regs->CommonRegs.semaphoreReg);
131         value = readl(&port_regs->CommonRegs.semaphoreReg);
132         return ((value & (sem_mask >> 16)) == sem_bits);
133 }
134
135 /*
136  * Caller holds hw_lock.
137  */
138 static int ql_wait_for_drvr_lock(struct ql3_adapter *qdev)
139 {
140         int i = 0;
141
142         while (1) {
143                 if (!ql_sem_lock(qdev,
144                                  QL_DRVR_SEM_MASK,
145                                  (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index)
146                                   * 2) << 1)) {
147                         if (i < 10) {
148                                 ssleep(1);
149                                 i++;
150                         } else {
151                                 printk(KERN_ERR PFX "%s: Timed out waiting for "
152                                        "driver lock...\n",
153                                        qdev->ndev->name);
154                                 return 0;
155                         }
156                 } else {
157                         printk(KERN_DEBUG PFX
158                                "%s: driver lock acquired.\n",
159                                qdev->ndev->name);
160                         return 1;
161                 }
162         }
163 }
164
165 static void ql_set_register_page(struct ql3_adapter *qdev, u32 page)
166 {
167         struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
168
169         writel(((ISP_CONTROL_NP_MASK << 16) | page),
170                         &port_regs->CommonRegs.ispControlStatus);
171         readl(&port_regs->CommonRegs.ispControlStatus);
172         qdev->current_page = page;
173 }
174
175 static u32 ql_read_common_reg_l(struct ql3_adapter *qdev,
176                               u32 __iomem * reg)
177 {
178         u32 value;
179         unsigned long hw_flags;
180
181         spin_lock_irqsave(&qdev->hw_lock, hw_flags);
182         value = readl(reg);
183         spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
184
185         return value;
186 }
187
188 static u32 ql_read_common_reg(struct ql3_adapter *qdev,
189                               u32 __iomem * reg)
190 {
191         return readl(reg);
192 }
193
194 static u32 ql_read_page0_reg_l(struct ql3_adapter *qdev, u32 __iomem *reg)
195 {
196         u32 value;
197         unsigned long hw_flags;
198
199         spin_lock_irqsave(&qdev->hw_lock, hw_flags);
200
201         if (qdev->current_page != 0)
202                 ql_set_register_page(qdev,0);
203         value = readl(reg);
204
205         spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
206         return value;
207 }
208
209 static u32 ql_read_page0_reg(struct ql3_adapter *qdev, u32 __iomem *reg)
210 {
211         if (qdev->current_page != 0)
212                 ql_set_register_page(qdev,0);
213         return readl(reg);
214 }
215
216 static void ql_write_common_reg_l(struct ql3_adapter *qdev,
217                                 u32 __iomem *reg, u32 value)
218 {
219         unsigned long hw_flags;
220
221         spin_lock_irqsave(&qdev->hw_lock, hw_flags);
222         writel(value, reg);
223         readl(reg);
224         spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
225 }
226
227 static void ql_write_common_reg(struct ql3_adapter *qdev,
228                                 u32 __iomem *reg, u32 value)
229 {
230         writel(value, reg);
231         readl(reg);
232 }
233
234 static void ql_write_nvram_reg(struct ql3_adapter *qdev,
235                                 u32 __iomem *reg, u32 value)
236 {
237         writel(value, reg);
238         readl(reg);
239         udelay(1);
240 }
241
242 static void ql_write_page0_reg(struct ql3_adapter *qdev,
243                                u32 __iomem *reg, u32 value)
244 {
245         if (qdev->current_page != 0)
246                 ql_set_register_page(qdev,0);
247         writel(value, reg);
248         readl(reg);
249 }
250
251 /*
252  * Caller holds hw_lock. Only called during init.
253  */
254 static void ql_write_page1_reg(struct ql3_adapter *qdev,
255                                u32 __iomem *reg, u32 value)
256 {
257         if (qdev->current_page != 1)
258                 ql_set_register_page(qdev,1);
259         writel(value, reg);
260         readl(reg);
261 }
262
263 /*
264  * Caller holds hw_lock. Only called during init.
265  */
266 static void ql_write_page2_reg(struct ql3_adapter *qdev,
267                                u32 __iomem *reg, u32 value)
268 {
269         if (qdev->current_page != 2)
270                 ql_set_register_page(qdev,2);
271         writel(value, reg);
272         readl(reg);
273 }
274
275 static void ql_disable_interrupts(struct ql3_adapter *qdev)
276 {
277         struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
278
279         ql_write_common_reg_l(qdev, &port_regs->CommonRegs.ispInterruptMaskReg,
280                             (ISP_IMR_ENABLE_INT << 16));
281
282 }
283
284 static void ql_enable_interrupts(struct ql3_adapter *qdev)
285 {
286         struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
287
288         ql_write_common_reg_l(qdev, &port_regs->CommonRegs.ispInterruptMaskReg,
289                             ((0xff << 16) | ISP_IMR_ENABLE_INT));
290
291 }
292
293 static void ql_release_to_lrg_buf_free_list(struct ql3_adapter *qdev,
294                                             struct ql_rcv_buf_cb *lrg_buf_cb)
295 {
296         dma_addr_t map;
297         int err;
298         lrg_buf_cb->next = NULL;
299
300         if (qdev->lrg_buf_free_tail == NULL) {  /* The list is empty  */
301                 qdev->lrg_buf_free_head = qdev->lrg_buf_free_tail = lrg_buf_cb;
302         } else {
303                 qdev->lrg_buf_free_tail->next = lrg_buf_cb;
304                 qdev->lrg_buf_free_tail = lrg_buf_cb;
305         }
306
307         if (!lrg_buf_cb->skb) {
308                 lrg_buf_cb->skb = netdev_alloc_skb(qdev->ndev,
309                                                    qdev->lrg_buffer_len);
310                 if (unlikely(!lrg_buf_cb->skb)) {
311                         printk(KERN_ERR PFX "%s: failed netdev_alloc_skb().\n",
312                                qdev->ndev->name);
313                         qdev->lrg_buf_skb_check++;
314                 } else {
315                         /*
316                          * We save some space to copy the ethhdr from first
317                          * buffer
318                          */
319                         skb_reserve(lrg_buf_cb->skb, QL_HEADER_SPACE);
320                         map = pci_map_single(qdev->pdev,
321                                              lrg_buf_cb->skb->data,
322                                              qdev->lrg_buffer_len -
323                                              QL_HEADER_SPACE,
324                                              PCI_DMA_FROMDEVICE);
325                         err = pci_dma_mapping_error(qdev->pdev, map);
326                         if(err) {
327                                 printk(KERN_ERR "%s: PCI mapping failed with error: %d\n",
328                                        qdev->ndev->name, err);
329                                 dev_kfree_skb(lrg_buf_cb->skb);
330                                 lrg_buf_cb->skb = NULL;
331
332                                 qdev->lrg_buf_skb_check++;
333                                 return;
334                         }
335
336                         lrg_buf_cb->buf_phy_addr_low =
337                             cpu_to_le32(LS_64BITS(map));
338                         lrg_buf_cb->buf_phy_addr_high =
339                             cpu_to_le32(MS_64BITS(map));
340                         dma_unmap_addr_set(lrg_buf_cb, mapaddr, map);
341                         dma_unmap_len_set(lrg_buf_cb, maplen,
342                                           qdev->lrg_buffer_len -
343                                           QL_HEADER_SPACE);
344                 }
345         }
346
347         qdev->lrg_buf_free_count++;
348 }
349
350 static struct ql_rcv_buf_cb *ql_get_from_lrg_buf_free_list(struct ql3_adapter
351                                                            *qdev)
352 {
353         struct ql_rcv_buf_cb *lrg_buf_cb;
354
355         if ((lrg_buf_cb = qdev->lrg_buf_free_head) != NULL) {
356                 if ((qdev->lrg_buf_free_head = lrg_buf_cb->next) == NULL)
357                         qdev->lrg_buf_free_tail = NULL;
358                 qdev->lrg_buf_free_count--;
359         }
360
361         return lrg_buf_cb;
362 }
363
364 static u32 addrBits = EEPROM_NO_ADDR_BITS;
365 static u32 dataBits = EEPROM_NO_DATA_BITS;
366
367 static void fm93c56a_deselect(struct ql3_adapter *qdev);
368 static void eeprom_readword(struct ql3_adapter *qdev, u32 eepromAddr,
369                             unsigned short *value);
370
371 /*
372  * Caller holds hw_lock.
373  */
374 static void fm93c56a_select(struct ql3_adapter *qdev)
375 {
376         struct ql3xxx_port_registers __iomem *port_regs =
377                         qdev->mem_map_registers;
378
379         qdev->eeprom_cmd_data = AUBURN_EEPROM_CS_1;
380         ql_write_nvram_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg,
381                             ISP_NVRAM_MASK | qdev->eeprom_cmd_data);
382         ql_write_nvram_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg,
383                             ((ISP_NVRAM_MASK << 16) | qdev->eeprom_cmd_data));
384 }
385
386 /*
387  * Caller holds hw_lock.
388  */
389 static void fm93c56a_cmd(struct ql3_adapter *qdev, u32 cmd, u32 eepromAddr)
390 {
391         int i;
392         u32 mask;
393         u32 dataBit;
394         u32 previousBit;
395         struct ql3xxx_port_registers __iomem *port_regs =
396                         qdev->mem_map_registers;
397
398         /* Clock in a zero, then do the start bit */
399         ql_write_nvram_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg,
400                             ISP_NVRAM_MASK | qdev->eeprom_cmd_data |
401                             AUBURN_EEPROM_DO_1);
402         ql_write_nvram_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg,
403                             ISP_NVRAM_MASK | qdev->
404                             eeprom_cmd_data | AUBURN_EEPROM_DO_1 |
405                             AUBURN_EEPROM_CLK_RISE);
406         ql_write_nvram_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg,
407                             ISP_NVRAM_MASK | qdev->
408                             eeprom_cmd_data | AUBURN_EEPROM_DO_1 |
409                             AUBURN_EEPROM_CLK_FALL);
410
411         mask = 1 << (FM93C56A_CMD_BITS - 1);
412         /* Force the previous data bit to be different */
413         previousBit = 0xffff;
414         for (i = 0; i < FM93C56A_CMD_BITS; i++) {
415                 dataBit =
416                     (cmd & mask) ? AUBURN_EEPROM_DO_1 : AUBURN_EEPROM_DO_0;
417                 if (previousBit != dataBit) {
418                         /*
419                          * If the bit changed, then change the DO state to
420                          * match
421                          */
422                         ql_write_nvram_reg(qdev,
423                                             &port_regs->CommonRegs.
424                                             serialPortInterfaceReg,
425                                             ISP_NVRAM_MASK | qdev->
426                                             eeprom_cmd_data | dataBit);
427                         previousBit = dataBit;
428                 }
429                 ql_write_nvram_reg(qdev,
430                                     &port_regs->CommonRegs.
431                                     serialPortInterfaceReg,
432                                     ISP_NVRAM_MASK | qdev->
433                                     eeprom_cmd_data | dataBit |
434                                     AUBURN_EEPROM_CLK_RISE);
435                 ql_write_nvram_reg(qdev,
436                                     &port_regs->CommonRegs.
437                                     serialPortInterfaceReg,
438                                     ISP_NVRAM_MASK | qdev->
439                                     eeprom_cmd_data | dataBit |
440                                     AUBURN_EEPROM_CLK_FALL);
441                 cmd = cmd << 1;
442         }
443
444         mask = 1 << (addrBits - 1);
445         /* Force the previous data bit to be different */
446         previousBit = 0xffff;
447         for (i = 0; i < addrBits; i++) {
448                 dataBit =
449                     (eepromAddr & mask) ? AUBURN_EEPROM_DO_1 :
450                     AUBURN_EEPROM_DO_0;
451                 if (previousBit != dataBit) {
452                         /*
453                          * If the bit changed, then change the DO state to
454                          * match
455                          */
456                         ql_write_nvram_reg(qdev,
457                                             &port_regs->CommonRegs.
458                                             serialPortInterfaceReg,
459                                             ISP_NVRAM_MASK | qdev->
460                                             eeprom_cmd_data | dataBit);
461                         previousBit = dataBit;
462                 }
463                 ql_write_nvram_reg(qdev,
464                                     &port_regs->CommonRegs.
465                                     serialPortInterfaceReg,
466                                     ISP_NVRAM_MASK | qdev->
467                                     eeprom_cmd_data | dataBit |
468                                     AUBURN_EEPROM_CLK_RISE);
469                 ql_write_nvram_reg(qdev,
470                                     &port_regs->CommonRegs.
471                                     serialPortInterfaceReg,
472                                     ISP_NVRAM_MASK | qdev->
473                                     eeprom_cmd_data | dataBit |
474                                     AUBURN_EEPROM_CLK_FALL);
475                 eepromAddr = eepromAddr << 1;
476         }
477 }
478
479 /*
480  * Caller holds hw_lock.
481  */
482 static void fm93c56a_deselect(struct ql3_adapter *qdev)
483 {
484         struct ql3xxx_port_registers __iomem *port_regs =
485                         qdev->mem_map_registers;
486         qdev->eeprom_cmd_data = AUBURN_EEPROM_CS_0;
487         ql_write_nvram_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg,
488                             ISP_NVRAM_MASK | qdev->eeprom_cmd_data);
489 }
490
491 /*
492  * Caller holds hw_lock.
493  */
494 static void fm93c56a_datain(struct ql3_adapter *qdev, unsigned short *value)
495 {
496         int i;
497         u32 data = 0;
498         u32 dataBit;
499         struct ql3xxx_port_registers __iomem *port_regs =
500                         qdev->mem_map_registers;
501
502         /* Read the data bits */
503         /* The first bit is a dummy.  Clock right over it. */
504         for (i = 0; i < dataBits; i++) {
505                 ql_write_nvram_reg(qdev,
506                                     &port_regs->CommonRegs.
507                                     serialPortInterfaceReg,
508                                     ISP_NVRAM_MASK | qdev->eeprom_cmd_data |
509                                     AUBURN_EEPROM_CLK_RISE);
510                 ql_write_nvram_reg(qdev,
511                                     &port_regs->CommonRegs.
512                                     serialPortInterfaceReg,
513                                     ISP_NVRAM_MASK | qdev->eeprom_cmd_data |
514                                     AUBURN_EEPROM_CLK_FALL);
515                 dataBit =
516                     (ql_read_common_reg
517                      (qdev,
518                       &port_regs->CommonRegs.
519                       serialPortInterfaceReg) & AUBURN_EEPROM_DI_1) ? 1 : 0;
520                 data = (data << 1) | dataBit;
521         }
522         *value = (u16) data;
523 }
524
525 /*
526  * Caller holds hw_lock.
527  */
528 static void eeprom_readword(struct ql3_adapter *qdev,
529                             u32 eepromAddr, unsigned short *value)
530 {
531         fm93c56a_select(qdev);
532         fm93c56a_cmd(qdev, (int)FM93C56A_READ, eepromAddr);
533         fm93c56a_datain(qdev, value);
534         fm93c56a_deselect(qdev);
535 }
536
537 static void ql_set_mac_addr(struct net_device *ndev, u16 *addr)
538 {
539         __le16 *p = (__le16 *)ndev->dev_addr;
540         p[0] = cpu_to_le16(addr[0]);
541         p[1] = cpu_to_le16(addr[1]);
542         p[2] = cpu_to_le16(addr[2]);
543 }
544
545 static int ql_get_nvram_params(struct ql3_adapter *qdev)
546 {
547         u16 *pEEPROMData;
548         u16 checksum = 0;
549         u32 index;
550         unsigned long hw_flags;
551
552         spin_lock_irqsave(&qdev->hw_lock, hw_flags);
553
554         pEEPROMData = (u16 *) & qdev->nvram_data;
555         qdev->eeprom_cmd_data = 0;
556         if(ql_sem_spinlock(qdev, QL_NVRAM_SEM_MASK,
557                         (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
558                          2) << 10)) {
559                 printk(KERN_ERR PFX"%s: Failed ql_sem_spinlock().\n",
560                         __func__);
561                 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
562                 return -1;
563         }
564
565         for (index = 0; index < EEPROM_SIZE; index++) {
566                 eeprom_readword(qdev, index, pEEPROMData);
567                 checksum += *pEEPROMData;
568                 pEEPROMData++;
569         }
570         ql_sem_unlock(qdev, QL_NVRAM_SEM_MASK);
571
572         if (checksum != 0) {
573                 printk(KERN_ERR PFX "%s: checksum should be zero, is %x!!\n",
574                        qdev->ndev->name, checksum);
575                 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
576                 return -1;
577         }
578
579         spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
580         return checksum;
581 }
582
583 static const u32 PHYAddr[2] = {
584         PORT0_PHY_ADDRESS, PORT1_PHY_ADDRESS
585 };
586
587 static int ql_wait_for_mii_ready(struct ql3_adapter *qdev)
588 {
589         struct ql3xxx_port_registers __iomem *port_regs =
590                         qdev->mem_map_registers;
591         u32 temp;
592         int count = 1000;
593
594         while (count) {
595                 temp = ql_read_page0_reg(qdev, &port_regs->macMIIStatusReg);
596                 if (!(temp & MAC_MII_STATUS_BSY))
597                         return 0;
598                 udelay(10);
599                 count--;
600         }
601         return -1;
602 }
603
604 static void ql_mii_enable_scan_mode(struct ql3_adapter *qdev)
605 {
606         struct ql3xxx_port_registers __iomem *port_regs =
607                         qdev->mem_map_registers;
608         u32 scanControl;
609
610         if (qdev->numPorts > 1) {
611                 /* Auto scan will cycle through multiple ports */
612                 scanControl = MAC_MII_CONTROL_AS | MAC_MII_CONTROL_SC;
613         } else {
614                 scanControl = MAC_MII_CONTROL_SC;
615         }
616
617         /*
618          * Scan register 1 of PHY/PETBI,
619          * Set up to scan both devices
620          * The autoscan starts from the first register, completes
621          * the last one before rolling over to the first
622          */
623         ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg,
624                            PHYAddr[0] | MII_SCAN_REGISTER);
625
626         ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg,
627                            (scanControl) |
628                            ((MAC_MII_CONTROL_SC | MAC_MII_CONTROL_AS) << 16));
629 }
630
631 static u8 ql_mii_disable_scan_mode(struct ql3_adapter *qdev)
632 {
633         u8 ret;
634         struct ql3xxx_port_registers __iomem *port_regs =
635                                         qdev->mem_map_registers;
636
637         /* See if scan mode is enabled before we turn it off */
638         if (ql_read_page0_reg(qdev, &port_regs->macMIIMgmtControlReg) &
639             (MAC_MII_CONTROL_AS | MAC_MII_CONTROL_SC)) {
640                 /* Scan is enabled */
641                 ret = 1;
642         } else {
643                 /* Scan is disabled */
644                 ret = 0;
645         }
646
647         /*
648          * When disabling scan mode you must first change the MII register
649          * address
650          */
651         ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg,
652                            PHYAddr[0] | MII_SCAN_REGISTER);
653
654         ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg,
655                            ((MAC_MII_CONTROL_SC | MAC_MII_CONTROL_AS |
656                              MAC_MII_CONTROL_RC) << 16));
657
658         return ret;
659 }
660
661 static int ql_mii_write_reg_ex(struct ql3_adapter *qdev,
662                                u16 regAddr, u16 value, u32 phyAddr)
663 {
664         struct ql3xxx_port_registers __iomem *port_regs =
665                         qdev->mem_map_registers;
666         u8 scanWasEnabled;
667
668         scanWasEnabled = ql_mii_disable_scan_mode(qdev);
669
670         if (ql_wait_for_mii_ready(qdev)) {
671                 if (netif_msg_link(qdev))
672                         printk(KERN_WARNING PFX
673                                "%s Timed out waiting for management port to "
674                                "get free before issuing command.\n",
675                                qdev->ndev->name);
676                 return -1;
677         }
678
679         ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg,
680                            phyAddr | regAddr);
681
682         ql_write_page0_reg(qdev, &port_regs->macMIIMgmtDataReg, value);
683
684         /* Wait for write to complete 9/10/04 SJP */
685         if (ql_wait_for_mii_ready(qdev)) {
686                 if (netif_msg_link(qdev))
687                         printk(KERN_WARNING PFX
688                                "%s: Timed out waiting for management port to "
689                                "get free before issuing command.\n",
690                                qdev->ndev->name);
691                 return -1;
692         }
693
694         if (scanWasEnabled)
695                 ql_mii_enable_scan_mode(qdev);
696
697         return 0;
698 }
699
700 static int ql_mii_read_reg_ex(struct ql3_adapter *qdev, u16 regAddr,
701                               u16 * value, u32 phyAddr)
702 {
703         struct ql3xxx_port_registers __iomem *port_regs =
704                         qdev->mem_map_registers;
705         u8 scanWasEnabled;
706         u32 temp;
707
708         scanWasEnabled = ql_mii_disable_scan_mode(qdev);
709
710         if (ql_wait_for_mii_ready(qdev)) {
711                 if (netif_msg_link(qdev))
712                         printk(KERN_WARNING PFX
713                                "%s: Timed out waiting for management port to "
714                                "get free before issuing command.\n",
715                                qdev->ndev->name);
716                 return -1;
717         }
718
719         ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg,
720                            phyAddr | regAddr);
721
722         ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg,
723                            (MAC_MII_CONTROL_RC << 16));
724
725         ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg,
726                            (MAC_MII_CONTROL_RC << 16) | MAC_MII_CONTROL_RC);
727
728         /* Wait for the read to complete */
729         if (ql_wait_for_mii_ready(qdev)) {
730                 if (netif_msg_link(qdev))
731                         printk(KERN_WARNING PFX
732                                "%s: Timed out waiting for management port to "
733                                "get free after issuing command.\n",
734                                qdev->ndev->name);
735                 return -1;
736         }
737
738         temp = ql_read_page0_reg(qdev, &port_regs->macMIIMgmtDataReg);
739         *value = (u16) temp;
740
741         if (scanWasEnabled)
742                 ql_mii_enable_scan_mode(qdev);
743
744         return 0;
745 }
746
747 static int ql_mii_write_reg(struct ql3_adapter *qdev, u16 regAddr, u16 value)
748 {
749         struct ql3xxx_port_registers __iomem *port_regs =
750                         qdev->mem_map_registers;
751
752         ql_mii_disable_scan_mode(qdev);
753
754         if (ql_wait_for_mii_ready(qdev)) {
755                 if (netif_msg_link(qdev))
756                         printk(KERN_WARNING PFX
757                                "%s: Timed out waiting for management port to "
758                                "get free before issuing command.\n",
759                                qdev->ndev->name);
760                 return -1;
761         }
762
763         ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg,
764                            qdev->PHYAddr | regAddr);
765
766         ql_write_page0_reg(qdev, &port_regs->macMIIMgmtDataReg, value);
767
768         /* Wait for write to complete. */
769         if (ql_wait_for_mii_ready(qdev)) {
770                 if (netif_msg_link(qdev))
771                         printk(KERN_WARNING PFX
772                                "%s: Timed out waiting for management port to "
773                                "get free before issuing command.\n",
774                                qdev->ndev->name);
775                 return -1;
776         }
777
778         ql_mii_enable_scan_mode(qdev);
779
780         return 0;
781 }
782
783 static int ql_mii_read_reg(struct ql3_adapter *qdev, u16 regAddr, u16 *value)
784 {
785         u32 temp;
786         struct ql3xxx_port_registers __iomem *port_regs =
787                         qdev->mem_map_registers;
788
789         ql_mii_disable_scan_mode(qdev);
790
791         if (ql_wait_for_mii_ready(qdev)) {
792                 if (netif_msg_link(qdev))
793                         printk(KERN_WARNING PFX
794                                "%s: Timed out waiting for management port to "
795                                "get free before issuing command.\n",
796                                qdev->ndev->name);
797                 return -1;
798         }
799
800         ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg,
801                            qdev->PHYAddr | regAddr);
802
803         ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg,
804                            (MAC_MII_CONTROL_RC << 16));
805
806         ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg,
807                            (MAC_MII_CONTROL_RC << 16) | MAC_MII_CONTROL_RC);
808
809         /* Wait for the read to complete */
810         if (ql_wait_for_mii_ready(qdev)) {
811                 if (netif_msg_link(qdev))
812                         printk(KERN_WARNING PFX
813                                "%s: Timed out waiting for management port to "
814                                "get free before issuing command.\n",
815                                qdev->ndev->name);
816                 return -1;
817         }
818
819         temp = ql_read_page0_reg(qdev, &port_regs->macMIIMgmtDataReg);
820         *value = (u16) temp;
821
822         ql_mii_enable_scan_mode(qdev);
823
824         return 0;
825 }
826
827 static void ql_petbi_reset(struct ql3_adapter *qdev)
828 {
829         ql_mii_write_reg(qdev, PETBI_CONTROL_REG, PETBI_CTRL_SOFT_RESET);
830 }
831
832 static void ql_petbi_start_neg(struct ql3_adapter *qdev)
833 {
834         u16 reg;
835
836         /* Enable Auto-negotiation sense */
837         ql_mii_read_reg(qdev, PETBI_TBI_CTRL, &reg);
838         reg |= PETBI_TBI_AUTO_SENSE;
839         ql_mii_write_reg(qdev, PETBI_TBI_CTRL, reg);
840
841         ql_mii_write_reg(qdev, PETBI_NEG_ADVER,
842                          PETBI_NEG_PAUSE | PETBI_NEG_DUPLEX);
843
844         ql_mii_write_reg(qdev, PETBI_CONTROL_REG,
845                          PETBI_CTRL_AUTO_NEG | PETBI_CTRL_RESTART_NEG |
846                          PETBI_CTRL_FULL_DUPLEX | PETBI_CTRL_SPEED_1000);
847
848 }
849
850 static void ql_petbi_reset_ex(struct ql3_adapter *qdev)
851 {
852         ql_mii_write_reg_ex(qdev, PETBI_CONTROL_REG, PETBI_CTRL_SOFT_RESET,
853                             PHYAddr[qdev->mac_index]);
854 }
855
856 static void ql_petbi_start_neg_ex(struct ql3_adapter *qdev)
857 {
858         u16 reg;
859
860         /* Enable Auto-negotiation sense */
861         ql_mii_read_reg_ex(qdev, PETBI_TBI_CTRL, &reg,
862                            PHYAddr[qdev->mac_index]);
863         reg |= PETBI_TBI_AUTO_SENSE;
864         ql_mii_write_reg_ex(qdev, PETBI_TBI_CTRL, reg,
865                             PHYAddr[qdev->mac_index]);
866
867         ql_mii_write_reg_ex(qdev, PETBI_NEG_ADVER,
868                             PETBI_NEG_PAUSE | PETBI_NEG_DUPLEX,
869                             PHYAddr[qdev->mac_index]);
870
871         ql_mii_write_reg_ex(qdev, PETBI_CONTROL_REG,
872                             PETBI_CTRL_AUTO_NEG | PETBI_CTRL_RESTART_NEG |
873                             PETBI_CTRL_FULL_DUPLEX | PETBI_CTRL_SPEED_1000,
874                             PHYAddr[qdev->mac_index]);
875 }
876
877 static void ql_petbi_init(struct ql3_adapter *qdev)
878 {
879         ql_petbi_reset(qdev);
880         ql_petbi_start_neg(qdev);
881 }
882
883 static void ql_petbi_init_ex(struct ql3_adapter *qdev)
884 {
885         ql_petbi_reset_ex(qdev);
886         ql_petbi_start_neg_ex(qdev);
887 }
888
889 static int ql_is_petbi_neg_pause(struct ql3_adapter *qdev)
890 {
891         u16 reg;
892
893         if (ql_mii_read_reg(qdev, PETBI_NEG_PARTNER, &reg) < 0)
894                 return 0;
895
896         return (reg & PETBI_NEG_PAUSE_MASK) == PETBI_NEG_PAUSE;
897 }
898
899 static void phyAgereSpecificInit(struct ql3_adapter *qdev, u32 miiAddr)
900 {
901         printk(KERN_INFO "%s: enabling Agere specific PHY\n", qdev->ndev->name);
902         /* power down device bit 11 = 1 */
903         ql_mii_write_reg_ex(qdev, 0x00, 0x1940, miiAddr);
904         /* enable diagnostic mode bit 2 = 1 */
905         ql_mii_write_reg_ex(qdev, 0x12, 0x840e, miiAddr);
906         /* 1000MB amplitude adjust (see Agere errata) */
907         ql_mii_write_reg_ex(qdev, 0x10, 0x8805, miiAddr);
908         /* 1000MB amplitude adjust (see Agere errata) */
909         ql_mii_write_reg_ex(qdev, 0x11, 0xf03e, miiAddr);
910         /* 100MB amplitude adjust (see Agere errata) */
911         ql_mii_write_reg_ex(qdev, 0x10, 0x8806, miiAddr);
912         /* 100MB amplitude adjust (see Agere errata) */
913         ql_mii_write_reg_ex(qdev, 0x11, 0x003e, miiAddr);
914         /* 10MB amplitude adjust (see Agere errata) */
915         ql_mii_write_reg_ex(qdev, 0x10, 0x8807, miiAddr);
916         /* 10MB amplitude adjust (see Agere errata) */
917         ql_mii_write_reg_ex(qdev, 0x11, 0x1f00, miiAddr);
918         /* point to hidden reg 0x2806 */
919         ql_mii_write_reg_ex(qdev, 0x10, 0x2806, miiAddr);
920         /* Write new PHYAD w/bit 5 set */
921         ql_mii_write_reg_ex(qdev, 0x11, 0x0020 | (PHYAddr[qdev->mac_index] >> 8), miiAddr);
922         /*
923          * Disable diagnostic mode bit 2 = 0
924          * Power up device bit 11 = 0
925          * Link up (on) and activity (blink)
926          */
927         ql_mii_write_reg(qdev, 0x12, 0x840a);
928         ql_mii_write_reg(qdev, 0x00, 0x1140);
929         ql_mii_write_reg(qdev, 0x1c, 0xfaf0);
930 }
931
932 static PHY_DEVICE_et getPhyType (struct ql3_adapter *qdev,
933                                  u16 phyIdReg0, u16 phyIdReg1)
934 {
935         PHY_DEVICE_et result = PHY_TYPE_UNKNOWN;
936         u32   oui;
937         u16   model;
938         int i;
939
940         if (phyIdReg0 == 0xffff) {
941                 return result;
942         }
943
944         if (phyIdReg1 == 0xffff) {
945                 return result;
946         }
947
948         /* oui is split between two registers */
949         oui = (phyIdReg0 << 6) | ((phyIdReg1 & PHY_OUI_1_MASK) >> 10);
950
951         model = (phyIdReg1 & PHY_MODEL_MASK) >> 4;
952
953         /* Scan table for this PHY */
954         for(i = 0; i < MAX_PHY_DEV_TYPES; i++) {
955                 if ((oui == PHY_DEVICES[i].phyIdOUI) && (model == PHY_DEVICES[i].phyIdModel))
956                 {
957                         result = PHY_DEVICES[i].phyDevice;
958
959                         printk(KERN_INFO "%s: Phy: %s\n",
960                                 qdev->ndev->name, PHY_DEVICES[i].name);
961
962                         break;
963                 }
964         }
965
966         return result;
967 }
968
969 static int ql_phy_get_speed(struct ql3_adapter *qdev)
970 {
971         u16 reg;
972
973         switch(qdev->phyType) {
974         case PHY_AGERE_ET1011C:
975         {
976                 if (ql_mii_read_reg(qdev, 0x1A, &reg) < 0)
977                         return 0;
978
979                 reg = (reg >> 8) & 3;
980                 break;
981         }
982         default:
983         if (ql_mii_read_reg(qdev, AUX_CONTROL_STATUS, &reg) < 0)
984                 return 0;
985
986         reg = (((reg & 0x18) >> 3) & 3);
987         }
988
989         switch(reg) {
990                 case 2:
991                 return SPEED_1000;
992                 case 1:
993                 return SPEED_100;
994                 case 0:
995                 return SPEED_10;
996                 default:
997                 return -1;
998         }
999 }
1000
1001 static int ql_is_full_dup(struct ql3_adapter *qdev)
1002 {
1003         u16 reg;
1004
1005         switch(qdev->phyType) {
1006         case PHY_AGERE_ET1011C:
1007         {
1008                 if (ql_mii_read_reg(qdev, 0x1A, &reg))
1009                         return 0;
1010
1011                 return ((reg & 0x0080) && (reg & 0x1000)) != 0;
1012         }
1013         case PHY_VITESSE_VSC8211:
1014         default:
1015         {
1016                 if (ql_mii_read_reg(qdev, AUX_CONTROL_STATUS, &reg) < 0)
1017                         return 0;
1018                 return (reg & PHY_AUX_DUPLEX_STAT) != 0;
1019         }
1020         }
1021 }
1022
1023 static int ql_is_phy_neg_pause(struct ql3_adapter *qdev)
1024 {
1025         u16 reg;
1026
1027         if (ql_mii_read_reg(qdev, PHY_NEG_PARTNER, &reg) < 0)
1028                 return 0;
1029
1030         return (reg & PHY_NEG_PAUSE) != 0;
1031 }
1032
1033 static int PHY_Setup(struct ql3_adapter *qdev)
1034 {
1035         u16   reg1;
1036         u16   reg2;
1037         bool  agereAddrChangeNeeded = false;
1038         u32 miiAddr = 0;
1039         int err;
1040
1041         /*  Determine the PHY we are using by reading the ID's */
1042         err = ql_mii_read_reg(qdev, PHY_ID_0_REG, &reg1);
1043         if(err != 0) {
1044                 printk(KERN_ERR "%s: Could not read from reg PHY_ID_0_REG\n",
1045                        qdev->ndev->name);
1046                 return err;
1047         }
1048
1049         err = ql_mii_read_reg(qdev, PHY_ID_1_REG, &reg2);
1050         if(err != 0) {
1051                 printk(KERN_ERR "%s: Could not read from reg PHY_ID_0_REG\n",
1052                        qdev->ndev->name);
1053                 return err;
1054         }
1055
1056         /*  Check if we have a Agere PHY */
1057         if ((reg1 == 0xffff) || (reg2 == 0xffff)) {
1058
1059                 /* Determine which MII address we should be using
1060                    determined by the index of the card */
1061                 if (qdev->mac_index == 0) {
1062                         miiAddr = MII_AGERE_ADDR_1;
1063                 } else {
1064                         miiAddr = MII_AGERE_ADDR_2;
1065                 }
1066
1067                 err =ql_mii_read_reg_ex(qdev, PHY_ID_0_REG, &reg1, miiAddr);
1068                 if(err != 0) {
1069                         printk(KERN_ERR "%s: Could not read from reg PHY_ID_0_REG after Agere detected\n",
1070                                qdev->ndev->name);
1071                         return err;
1072                 }
1073
1074                 err = ql_mii_read_reg_ex(qdev, PHY_ID_1_REG, &reg2, miiAddr);
1075                 if(err != 0) {
1076                         printk(KERN_ERR "%s: Could not read from reg PHY_ID_0_REG after Agere detected\n",
1077                                qdev->ndev->name);
1078                         return err;
1079                 }
1080
1081                 /*  We need to remember to initialize the Agere PHY */
1082                 agereAddrChangeNeeded = true;
1083         }
1084
1085         /*  Determine the particular PHY we have on board to apply
1086             PHY specific initializations */
1087         qdev->phyType = getPhyType(qdev, reg1, reg2);
1088
1089         if ((qdev->phyType == PHY_AGERE_ET1011C) && agereAddrChangeNeeded) {
1090                 /* need this here so address gets changed */
1091                 phyAgereSpecificInit(qdev, miiAddr);
1092         } else if (qdev->phyType == PHY_TYPE_UNKNOWN) {
1093                 printk(KERN_ERR "%s: PHY is unknown\n", qdev->ndev->name);
1094                 return -EIO;
1095         }
1096
1097         return 0;
1098 }
1099
1100 /*
1101  * Caller holds hw_lock.
1102  */
1103 static void ql_mac_enable(struct ql3_adapter *qdev, u32 enable)
1104 {
1105         struct ql3xxx_port_registers __iomem *port_regs =
1106                         qdev->mem_map_registers;
1107         u32 value;
1108
1109         if (enable)
1110                 value = (MAC_CONFIG_REG_PE | (MAC_CONFIG_REG_PE << 16));
1111         else
1112                 value = (MAC_CONFIG_REG_PE << 16);
1113
1114         if (qdev->mac_index)
1115                 ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value);
1116         else
1117                 ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value);
1118 }
1119
1120 /*
1121  * Caller holds hw_lock.
1122  */
1123 static void ql_mac_cfg_soft_reset(struct ql3_adapter *qdev, u32 enable)
1124 {
1125         struct ql3xxx_port_registers __iomem *port_regs =
1126                         qdev->mem_map_registers;
1127         u32 value;
1128
1129         if (enable)
1130                 value = (MAC_CONFIG_REG_SR | (MAC_CONFIG_REG_SR << 16));
1131         else
1132                 value = (MAC_CONFIG_REG_SR << 16);
1133
1134         if (qdev->mac_index)
1135                 ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value);
1136         else
1137                 ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value);
1138 }
1139
1140 /*
1141  * Caller holds hw_lock.
1142  */
1143 static void ql_mac_cfg_gig(struct ql3_adapter *qdev, u32 enable)
1144 {
1145         struct ql3xxx_port_registers __iomem *port_regs =
1146                         qdev->mem_map_registers;
1147         u32 value;
1148
1149         if (enable)
1150                 value = (MAC_CONFIG_REG_GM | (MAC_CONFIG_REG_GM << 16));
1151         else
1152                 value = (MAC_CONFIG_REG_GM << 16);
1153
1154         if (qdev->mac_index)
1155                 ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value);
1156         else
1157                 ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value);
1158 }
1159
1160 /*
1161  * Caller holds hw_lock.
1162  */
1163 static void ql_mac_cfg_full_dup(struct ql3_adapter *qdev, u32 enable)
1164 {
1165         struct ql3xxx_port_registers __iomem *port_regs =
1166                         qdev->mem_map_registers;
1167         u32 value;
1168
1169         if (enable)
1170                 value = (MAC_CONFIG_REG_FD | (MAC_CONFIG_REG_FD << 16));
1171         else
1172                 value = (MAC_CONFIG_REG_FD << 16);
1173
1174         if (qdev->mac_index)
1175                 ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value);
1176         else
1177                 ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value);
1178 }
1179
1180 /*
1181  * Caller holds hw_lock.
1182  */
1183 static void ql_mac_cfg_pause(struct ql3_adapter *qdev, u32 enable)
1184 {
1185         struct ql3xxx_port_registers __iomem *port_regs =
1186                         qdev->mem_map_registers;
1187         u32 value;
1188
1189         if (enable)
1190                 value =
1191                     ((MAC_CONFIG_REG_TF | MAC_CONFIG_REG_RF) |
1192                      ((MAC_CONFIG_REG_TF | MAC_CONFIG_REG_RF) << 16));
1193         else
1194                 value = ((MAC_CONFIG_REG_TF | MAC_CONFIG_REG_RF) << 16);
1195
1196         if (qdev->mac_index)
1197                 ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value);
1198         else
1199                 ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value);
1200 }
1201
1202 /*
1203  * Caller holds hw_lock.
1204  */
1205 static int ql_is_fiber(struct ql3_adapter *qdev)
1206 {
1207         struct ql3xxx_port_registers __iomem *port_regs =
1208                         qdev->mem_map_registers;
1209         u32 bitToCheck = 0;
1210         u32 temp;
1211
1212         switch (qdev->mac_index) {
1213         case 0:
1214                 bitToCheck = PORT_STATUS_SM0;
1215                 break;
1216         case 1:
1217                 bitToCheck = PORT_STATUS_SM1;
1218                 break;
1219         }
1220
1221         temp = ql_read_page0_reg(qdev, &port_regs->portStatus);
1222         return (temp & bitToCheck) != 0;
1223 }
1224
1225 static int ql_is_auto_cfg(struct ql3_adapter *qdev)
1226 {
1227         u16 reg;
1228         ql_mii_read_reg(qdev, 0x00, &reg);
1229         return (reg & 0x1000) != 0;
1230 }
1231
1232 /*
1233  * Caller holds hw_lock.
1234  */
1235 static int ql_is_auto_neg_complete(struct ql3_adapter *qdev)
1236 {
1237         struct ql3xxx_port_registers __iomem *port_regs =
1238                         qdev->mem_map_registers;
1239         u32 bitToCheck = 0;
1240         u32 temp;
1241
1242         switch (qdev->mac_index) {
1243         case 0:
1244                 bitToCheck = PORT_STATUS_AC0;
1245                 break;
1246         case 1:
1247                 bitToCheck = PORT_STATUS_AC1;
1248                 break;
1249         }
1250
1251         temp = ql_read_page0_reg(qdev, &port_regs->portStatus);
1252         if (temp & bitToCheck) {
1253                 if (netif_msg_link(qdev))
1254                         printk(KERN_INFO PFX
1255                                "%s: Auto-Negotiate complete.\n",
1256                                qdev->ndev->name);
1257                 return 1;
1258         } else {
1259                 if (netif_msg_link(qdev))
1260                         printk(KERN_WARNING PFX
1261                                "%s: Auto-Negotiate incomplete.\n",
1262                                qdev->ndev->name);
1263                 return 0;
1264         }
1265 }
1266
1267 /*
1268  *  ql_is_neg_pause() returns 1 if pause was negotiated to be on
1269  */
1270 static int ql_is_neg_pause(struct ql3_adapter *qdev)
1271 {
1272         if (ql_is_fiber(qdev))
1273                 return ql_is_petbi_neg_pause(qdev);
1274         else
1275                 return ql_is_phy_neg_pause(qdev);
1276 }
1277
1278 static int ql_auto_neg_error(struct ql3_adapter *qdev)
1279 {
1280         struct ql3xxx_port_registers __iomem *port_regs =
1281                         qdev->mem_map_registers;
1282         u32 bitToCheck = 0;
1283         u32 temp;
1284
1285         switch (qdev->mac_index) {
1286         case 0:
1287                 bitToCheck = PORT_STATUS_AE0;
1288                 break;
1289         case 1:
1290                 bitToCheck = PORT_STATUS_AE1;
1291                 break;
1292         }
1293         temp = ql_read_page0_reg(qdev, &port_regs->portStatus);
1294         return (temp & bitToCheck) != 0;
1295 }
1296
1297 static u32 ql_get_link_speed(struct ql3_adapter *qdev)
1298 {
1299         if (ql_is_fiber(qdev))
1300                 return SPEED_1000;
1301         else
1302                 return ql_phy_get_speed(qdev);
1303 }
1304
1305 static int ql_is_link_full_dup(struct ql3_adapter *qdev)
1306 {
1307         if (ql_is_fiber(qdev))
1308                 return 1;
1309         else
1310                 return ql_is_full_dup(qdev);
1311 }
1312
1313 /*
1314  * Caller holds hw_lock.
1315  */
1316 static int ql_link_down_detect(struct ql3_adapter *qdev)
1317 {
1318         struct ql3xxx_port_registers __iomem *port_regs =
1319                         qdev->mem_map_registers;
1320         u32 bitToCheck = 0;
1321         u32 temp;
1322
1323         switch (qdev->mac_index) {
1324         case 0:
1325                 bitToCheck = ISP_CONTROL_LINK_DN_0;
1326                 break;
1327         case 1:
1328                 bitToCheck = ISP_CONTROL_LINK_DN_1;
1329                 break;
1330         }
1331
1332         temp =
1333             ql_read_common_reg(qdev, &port_regs->CommonRegs.ispControlStatus);
1334         return (temp & bitToCheck) != 0;
1335 }
1336
1337 /*
1338  * Caller holds hw_lock.
1339  */
1340 static int ql_link_down_detect_clear(struct ql3_adapter *qdev)
1341 {
1342         struct ql3xxx_port_registers __iomem *port_regs =
1343                         qdev->mem_map_registers;
1344
1345         switch (qdev->mac_index) {
1346         case 0:
1347                 ql_write_common_reg(qdev,
1348                                     &port_regs->CommonRegs.ispControlStatus,
1349                                     (ISP_CONTROL_LINK_DN_0) |
1350                                     (ISP_CONTROL_LINK_DN_0 << 16));
1351                 break;
1352
1353         case 1:
1354                 ql_write_common_reg(qdev,
1355                                     &port_regs->CommonRegs.ispControlStatus,
1356                                     (ISP_CONTROL_LINK_DN_1) |
1357                                     (ISP_CONTROL_LINK_DN_1 << 16));
1358                 break;
1359
1360         default:
1361                 return 1;
1362         }
1363
1364         return 0;
1365 }
1366
1367 /*
1368  * Caller holds hw_lock.
1369  */
1370 static int ql_this_adapter_controls_port(struct ql3_adapter *qdev)
1371 {
1372         struct ql3xxx_port_registers __iomem *port_regs =
1373                         qdev->mem_map_registers;
1374         u32 bitToCheck = 0;
1375         u32 temp;
1376
1377         switch (qdev->mac_index) {
1378         case 0:
1379                 bitToCheck = PORT_STATUS_F1_ENABLED;
1380                 break;
1381         case 1:
1382                 bitToCheck = PORT_STATUS_F3_ENABLED;
1383                 break;
1384         default:
1385                 break;
1386         }
1387
1388         temp = ql_read_page0_reg(qdev, &port_regs->portStatus);
1389         if (temp & bitToCheck) {
1390                 if (netif_msg_link(qdev))
1391                         printk(KERN_DEBUG PFX
1392                                "%s: is not link master.\n", qdev->ndev->name);
1393                 return 0;
1394         } else {
1395                 if (netif_msg_link(qdev))
1396                         printk(KERN_DEBUG PFX
1397                                "%s: is link master.\n", qdev->ndev->name);
1398                 return 1;
1399         }
1400 }
1401
1402 static void ql_phy_reset_ex(struct ql3_adapter *qdev)
1403 {
1404         ql_mii_write_reg_ex(qdev, CONTROL_REG, PHY_CTRL_SOFT_RESET,
1405                             PHYAddr[qdev->mac_index]);
1406 }
1407
1408 static void ql_phy_start_neg_ex(struct ql3_adapter *qdev)
1409 {
1410         u16 reg;
1411         u16 portConfiguration;
1412
1413         if(qdev->phyType == PHY_AGERE_ET1011C) {
1414                 /* turn off external loopback */
1415                 ql_mii_write_reg(qdev, 0x13, 0x0000);
1416         }
1417
1418         if(qdev->mac_index == 0)
1419                 portConfiguration = qdev->nvram_data.macCfg_port0.portConfiguration;
1420         else
1421                 portConfiguration = qdev->nvram_data.macCfg_port1.portConfiguration;
1422
1423         /*  Some HBA's in the field are set to 0 and they need to
1424             be reinterpreted with a default value */
1425         if(portConfiguration == 0)
1426                 portConfiguration = PORT_CONFIG_DEFAULT;
1427
1428         /* Set the 1000 advertisements */
1429         ql_mii_read_reg_ex(qdev, PHY_GIG_CONTROL, &reg,
1430                            PHYAddr[qdev->mac_index]);
1431         reg &= ~PHY_GIG_ALL_PARAMS;
1432
1433         if(portConfiguration & PORT_CONFIG_1000MB_SPEED) {
1434                 if(portConfiguration & PORT_CONFIG_FULL_DUPLEX_ENABLED)
1435                         reg |= PHY_GIG_ADV_1000F;
1436                 else
1437                         reg |= PHY_GIG_ADV_1000H;
1438         }
1439
1440         ql_mii_write_reg_ex(qdev, PHY_GIG_CONTROL, reg,
1441                             PHYAddr[qdev->mac_index]);
1442
1443         /* Set the 10/100 & pause negotiation advertisements */
1444         ql_mii_read_reg_ex(qdev, PHY_NEG_ADVER, &reg,
1445                            PHYAddr[qdev->mac_index]);
1446         reg &= ~PHY_NEG_ALL_PARAMS;
1447
1448         if(portConfiguration & PORT_CONFIG_SYM_PAUSE_ENABLED)
1449                 reg |= PHY_NEG_ASY_PAUSE | PHY_NEG_SYM_PAUSE;
1450
1451         if(portConfiguration & PORT_CONFIG_FULL_DUPLEX_ENABLED) {
1452                 if(portConfiguration & PORT_CONFIG_100MB_SPEED)
1453                         reg |= PHY_NEG_ADV_100F;
1454
1455                 if(portConfiguration & PORT_CONFIG_10MB_SPEED)
1456                         reg |= PHY_NEG_ADV_10F;
1457         }
1458
1459         if(portConfiguration & PORT_CONFIG_HALF_DUPLEX_ENABLED) {
1460                 if(portConfiguration & PORT_CONFIG_100MB_SPEED)
1461                         reg |= PHY_NEG_ADV_100H;
1462
1463                 if(portConfiguration & PORT_CONFIG_10MB_SPEED)
1464                         reg |= PHY_NEG_ADV_10H;
1465         }
1466
1467         if(portConfiguration &
1468            PORT_CONFIG_1000MB_SPEED) {
1469                 reg |= 1;
1470         }
1471
1472         ql_mii_write_reg_ex(qdev, PHY_NEG_ADVER, reg,
1473                             PHYAddr[qdev->mac_index]);
1474
1475         ql_mii_read_reg_ex(qdev, CONTROL_REG, &reg, PHYAddr[qdev->mac_index]);
1476
1477         ql_mii_write_reg_ex(qdev, CONTROL_REG,
1478                             reg | PHY_CTRL_RESTART_NEG | PHY_CTRL_AUTO_NEG,
1479                             PHYAddr[qdev->mac_index]);
1480 }
1481
1482 static void ql_phy_init_ex(struct ql3_adapter *qdev)
1483 {
1484         ql_phy_reset_ex(qdev);
1485         PHY_Setup(qdev);
1486         ql_phy_start_neg_ex(qdev);
1487 }
1488
1489 /*
1490  * Caller holds hw_lock.
1491  */
1492 static u32 ql_get_link_state(struct ql3_adapter *qdev)
1493 {
1494         struct ql3xxx_port_registers __iomem *port_regs =
1495                         qdev->mem_map_registers;
1496         u32 bitToCheck = 0;
1497         u32 temp, linkState;
1498
1499         switch (qdev->mac_index) {
1500         case 0:
1501                 bitToCheck = PORT_STATUS_UP0;
1502                 break;
1503         case 1:
1504                 bitToCheck = PORT_STATUS_UP1;
1505                 break;
1506         }
1507         temp = ql_read_page0_reg(qdev, &port_regs->portStatus);
1508         if (temp & bitToCheck) {
1509                 linkState = LS_UP;
1510         } else {
1511                 linkState = LS_DOWN;
1512         }
1513         return linkState;
1514 }
1515
1516 static int ql_port_start(struct ql3_adapter *qdev)
1517 {
1518         if(ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
1519                 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
1520                          2) << 7)) {
1521                 printk(KERN_ERR "%s: Could not get hw lock for GIO\n",
1522                        qdev->ndev->name);
1523                 return -1;
1524         }
1525
1526         if (ql_is_fiber(qdev)) {
1527                 ql_petbi_init(qdev);
1528         } else {
1529                 /* Copper port */
1530                 ql_phy_init_ex(qdev);
1531         }
1532
1533         ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
1534         return 0;
1535 }
1536
1537 static int ql_finish_auto_neg(struct ql3_adapter *qdev)
1538 {
1539
1540         if(ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
1541                 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
1542                          2) << 7))
1543                 return -1;
1544
1545         if (!ql_auto_neg_error(qdev)) {
1546                 if (test_bit(QL_LINK_MASTER,&qdev->flags)) {
1547                         /* configure the MAC */
1548                         if (netif_msg_link(qdev))
1549                                 printk(KERN_DEBUG PFX
1550                                        "%s: Configuring link.\n",
1551                                        qdev->ndev->
1552                                        name);
1553                         ql_mac_cfg_soft_reset(qdev, 1);
1554                         ql_mac_cfg_gig(qdev,
1555                                        (ql_get_link_speed
1556                                         (qdev) ==
1557                                         SPEED_1000));
1558                         ql_mac_cfg_full_dup(qdev,
1559                                             ql_is_link_full_dup
1560                                             (qdev));
1561                         ql_mac_cfg_pause(qdev,
1562                                          ql_is_neg_pause
1563                                          (qdev));
1564                         ql_mac_cfg_soft_reset(qdev, 0);
1565
1566                         /* enable the MAC */
1567                         if (netif_msg_link(qdev))
1568                                 printk(KERN_DEBUG PFX
1569                                        "%s: Enabling mac.\n",
1570                                        qdev->ndev->
1571                                                name);
1572                         ql_mac_enable(qdev, 1);
1573                 }
1574
1575                 qdev->port_link_state = LS_UP;
1576                 netif_start_queue(qdev->ndev);
1577                 netif_carrier_on(qdev->ndev);
1578                 if (netif_msg_link(qdev))
1579                         printk(KERN_INFO PFX
1580                                "%s: Link is up at %d Mbps, %s duplex.\n",
1581                                qdev->ndev->name,
1582                                ql_get_link_speed(qdev),
1583                                ql_is_link_full_dup(qdev)
1584                                ? "full" : "half");
1585
1586         } else {        /* Remote error detected */
1587
1588                 if (test_bit(QL_LINK_MASTER,&qdev->flags)) {
1589                         if (netif_msg_link(qdev))
1590                                 printk(KERN_DEBUG PFX
1591                                        "%s: Remote error detected. "
1592                                        "Calling ql_port_start().\n",
1593                                        qdev->ndev->
1594                                        name);
1595                         /*
1596                          * ql_port_start() is shared code and needs
1597                          * to lock the PHY on it's own.
1598                          */
1599                         ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
1600                         if(ql_port_start(qdev)) {/* Restart port */
1601                                 return -1;
1602                         } else
1603                                 return 0;
1604                 }
1605         }
1606         ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
1607         return 0;
1608 }
1609
1610 static void ql_link_state_machine_work(struct work_struct *work)
1611 {
1612         struct ql3_adapter *qdev =
1613                 container_of(work, struct ql3_adapter, link_state_work.work);
1614
1615         u32 curr_link_state;
1616         unsigned long hw_flags;
1617
1618         spin_lock_irqsave(&qdev->hw_lock, hw_flags);
1619
1620         curr_link_state = ql_get_link_state(qdev);
1621
1622         if (test_bit(QL_RESET_ACTIVE,&qdev->flags)) {
1623                 if (netif_msg_link(qdev))
1624                         printk(KERN_INFO PFX
1625                                "%s: Reset in progress, skip processing link "
1626                                "state.\n", qdev->ndev->name);
1627
1628                 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
1629
1630                 /* Restart timer on 2 second interval. */
1631                 mod_timer(&qdev->adapter_timer, jiffies + HZ * 1);\
1632
1633                 return;
1634         }
1635
1636         switch (qdev->port_link_state) {
1637         default:
1638                 if (test_bit(QL_LINK_MASTER,&qdev->flags)) {
1639                         ql_port_start(qdev);
1640                 }
1641                 qdev->port_link_state = LS_DOWN;
1642                 /* Fall Through */
1643
1644         case LS_DOWN:
1645                 if (curr_link_state == LS_UP) {
1646                         if (netif_msg_link(qdev))
1647                                 printk(KERN_INFO PFX "%s: Link is up.\n",
1648                                        qdev->ndev->name);
1649                         if (ql_is_auto_neg_complete(qdev))
1650                                 ql_finish_auto_neg(qdev);
1651
1652                         if (qdev->port_link_state == LS_UP)
1653                                 ql_link_down_detect_clear(qdev);
1654
1655                         qdev->port_link_state = LS_UP;
1656                 }
1657                 break;
1658
1659         case LS_UP:
1660                 /*
1661                  * See if the link is currently down or went down and came
1662                  * back up
1663                  */
1664                 if (curr_link_state == LS_DOWN) {
1665                         if (netif_msg_link(qdev))
1666                                 printk(KERN_INFO PFX "%s: Link is down.\n",
1667                                        qdev->ndev->name);
1668                         qdev->port_link_state = LS_DOWN;
1669                 }
1670                 if (ql_link_down_detect(qdev))
1671                         qdev->port_link_state = LS_DOWN;
1672                 break;
1673         }
1674         spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
1675
1676         /* Restart timer on 2 second interval. */
1677         mod_timer(&qdev->adapter_timer, jiffies + HZ * 1);
1678 }
1679
1680 /*
1681  * Caller must take hw_lock and QL_PHY_GIO_SEM.
1682  */
1683 static void ql_get_phy_owner(struct ql3_adapter *qdev)
1684 {
1685         if (ql_this_adapter_controls_port(qdev))
1686                 set_bit(QL_LINK_MASTER,&qdev->flags);
1687         else
1688                 clear_bit(QL_LINK_MASTER,&qdev->flags);
1689 }
1690
1691 /*
1692  * Caller must take hw_lock and QL_PHY_GIO_SEM.
1693  */
1694 static void ql_init_scan_mode(struct ql3_adapter *qdev)
1695 {
1696         ql_mii_enable_scan_mode(qdev);
1697
1698         if (test_bit(QL_LINK_OPTICAL,&qdev->flags)) {
1699                 if (ql_this_adapter_controls_port(qdev))
1700                         ql_petbi_init_ex(qdev);
1701         } else {
1702                 if (ql_this_adapter_controls_port(qdev))
1703                         ql_phy_init_ex(qdev);
1704         }
1705 }
1706
1707 /*
1708  * MII_Setup needs to be called before taking the PHY out of reset so that the
1709  * management interface clock speed can be set properly.  It would be better if
1710  * we had a way to disable MDC until after the PHY is out of reset, but we
1711  * don't have that capability.
1712  */
1713 static int ql_mii_setup(struct ql3_adapter *qdev)
1714 {
1715         u32 reg;
1716         struct ql3xxx_port_registers __iomem *port_regs =
1717                         qdev->mem_map_registers;
1718
1719         if(ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
1720                         (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
1721                          2) << 7))
1722                 return -1;
1723
1724         if (qdev->device_id == QL3032_DEVICE_ID)
1725                 ql_write_page0_reg(qdev,
1726                         &port_regs->macMIIMgmtControlReg, 0x0f00000);
1727
1728         /* Divide 125MHz clock by 28 to meet PHY timing requirements */
1729         reg = MAC_MII_CONTROL_CLK_SEL_DIV28;
1730
1731         ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg,
1732                            reg | ((MAC_MII_CONTROL_CLK_SEL_MASK) << 16));
1733
1734         ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
1735         return 0;
1736 }
1737
1738 static u32 ql_supported_modes(struct ql3_adapter *qdev)
1739 {
1740         u32 supported;
1741
1742         if (test_bit(QL_LINK_OPTICAL,&qdev->flags)) {
1743                 supported = SUPPORTED_1000baseT_Full | SUPPORTED_FIBRE
1744                     | SUPPORTED_Autoneg;
1745         } else {
1746                 supported = SUPPORTED_10baseT_Half
1747                     | SUPPORTED_10baseT_Full
1748                     | SUPPORTED_100baseT_Half
1749                     | SUPPORTED_100baseT_Full
1750                     | SUPPORTED_1000baseT_Half
1751                     | SUPPORTED_1000baseT_Full
1752                     | SUPPORTED_Autoneg | SUPPORTED_TP;
1753         }
1754
1755         return supported;
1756 }
1757
1758 static int ql_get_auto_cfg_status(struct ql3_adapter *qdev)
1759 {
1760         int status;
1761         unsigned long hw_flags;
1762         spin_lock_irqsave(&qdev->hw_lock, hw_flags);
1763         if(ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
1764                 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
1765                          2) << 7)) {
1766                 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
1767                 return 0;
1768         }
1769         status = ql_is_auto_cfg(qdev);
1770         ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
1771         spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
1772         return status;
1773 }
1774
1775 static u32 ql_get_speed(struct ql3_adapter *qdev)
1776 {
1777         u32 status;
1778         unsigned long hw_flags;
1779         spin_lock_irqsave(&qdev->hw_lock, hw_flags);
1780         if(ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
1781                 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
1782                          2) << 7)) {
1783                 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
1784                 return 0;
1785         }
1786         status = ql_get_link_speed(qdev);
1787         ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
1788         spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
1789         return status;
1790 }
1791
1792 static int ql_get_full_dup(struct ql3_adapter *qdev)
1793 {
1794         int status;
1795         unsigned long hw_flags;
1796         spin_lock_irqsave(&qdev->hw_lock, hw_flags);
1797         if(ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
1798                 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
1799                          2) << 7)) {
1800                 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
1801                 return 0;
1802         }
1803         status = ql_is_link_full_dup(qdev);
1804         ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
1805         spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
1806         return status;
1807 }
1808
1809
1810 static int ql_get_settings(struct net_device *ndev, struct ethtool_cmd *ecmd)
1811 {
1812         struct ql3_adapter *qdev = netdev_priv(ndev);
1813
1814         ecmd->transceiver = XCVR_INTERNAL;
1815         ecmd->supported = ql_supported_modes(qdev);
1816
1817         if (test_bit(QL_LINK_OPTICAL,&qdev->flags)) {
1818                 ecmd->port = PORT_FIBRE;
1819         } else {
1820                 ecmd->port = PORT_TP;
1821                 ecmd->phy_address = qdev->PHYAddr;
1822         }
1823         ecmd->advertising = ql_supported_modes(qdev);
1824         ecmd->autoneg = ql_get_auto_cfg_status(qdev);
1825         ecmd->speed = ql_get_speed(qdev);
1826         ecmd->duplex = ql_get_full_dup(qdev);
1827         return 0;
1828 }
1829
1830 static void ql_get_drvinfo(struct net_device *ndev,
1831                            struct ethtool_drvinfo *drvinfo)
1832 {
1833         struct ql3_adapter *qdev = netdev_priv(ndev);
1834         strncpy(drvinfo->driver, ql3xxx_driver_name, 32);
1835         strncpy(drvinfo->version, ql3xxx_driver_version, 32);
1836         strncpy(drvinfo->fw_version, "N/A", 32);
1837         strncpy(drvinfo->bus_info, pci_name(qdev->pdev), 32);
1838         drvinfo->regdump_len = 0;
1839         drvinfo->eedump_len = 0;
1840 }
1841
1842 static u32 ql_get_msglevel(struct net_device *ndev)
1843 {
1844         struct ql3_adapter *qdev = netdev_priv(ndev);
1845         return qdev->msg_enable;
1846 }
1847
1848 static void ql_set_msglevel(struct net_device *ndev, u32 value)
1849 {
1850         struct ql3_adapter *qdev = netdev_priv(ndev);
1851         qdev->msg_enable = value;
1852 }
1853
1854 static void ql_get_pauseparam(struct net_device *ndev,
1855                               struct ethtool_pauseparam *pause)
1856 {
1857         struct ql3_adapter *qdev = netdev_priv(ndev);
1858         struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
1859
1860         u32 reg;
1861         if(qdev->mac_index == 0)
1862                 reg = ql_read_page0_reg(qdev, &port_regs->mac0ConfigReg);
1863         else
1864                 reg = ql_read_page0_reg(qdev, &port_regs->mac1ConfigReg);
1865
1866         pause->autoneg  = ql_get_auto_cfg_status(qdev);
1867         pause->rx_pause = (reg & MAC_CONFIG_REG_RF) >> 2;
1868         pause->tx_pause = (reg & MAC_CONFIG_REG_TF) >> 1;
1869 }
1870
1871 static const struct ethtool_ops ql3xxx_ethtool_ops = {
1872         .get_settings = ql_get_settings,
1873         .get_drvinfo = ql_get_drvinfo,
1874         .get_link = ethtool_op_get_link,
1875         .get_msglevel = ql_get_msglevel,
1876         .set_msglevel = ql_set_msglevel,
1877         .get_pauseparam = ql_get_pauseparam,
1878 };
1879
1880 static int ql_populate_free_queue(struct ql3_adapter *qdev)
1881 {
1882         struct ql_rcv_buf_cb *lrg_buf_cb = qdev->lrg_buf_free_head;
1883         dma_addr_t map;
1884         int err;
1885
1886         while (lrg_buf_cb) {
1887                 if (!lrg_buf_cb->skb) {
1888                         lrg_buf_cb->skb = netdev_alloc_skb(qdev->ndev,
1889                                                            qdev->lrg_buffer_len);
1890                         if (unlikely(!lrg_buf_cb->skb)) {
1891                                 printk(KERN_DEBUG PFX
1892                                        "%s: Failed netdev_alloc_skb().\n",
1893                                        qdev->ndev->name);
1894                                 break;
1895                         } else {
1896                                 /*
1897                                  * We save some space to copy the ethhdr from
1898                                  * first buffer
1899                                  */
1900                                 skb_reserve(lrg_buf_cb->skb, QL_HEADER_SPACE);
1901                                 map = pci_map_single(qdev->pdev,
1902                                                      lrg_buf_cb->skb->data,
1903                                                      qdev->lrg_buffer_len -
1904                                                      QL_HEADER_SPACE,
1905                                                      PCI_DMA_FROMDEVICE);
1906
1907                                 err = pci_dma_mapping_error(qdev->pdev, map);
1908                                 if(err) {
1909                                         printk(KERN_ERR "%s: PCI mapping failed with error: %d\n",
1910                                                qdev->ndev->name, err);
1911                                         dev_kfree_skb(lrg_buf_cb->skb);
1912                                         lrg_buf_cb->skb = NULL;
1913                                         break;
1914                                 }
1915
1916
1917                                 lrg_buf_cb->buf_phy_addr_low =
1918                                     cpu_to_le32(LS_64BITS(map));
1919                                 lrg_buf_cb->buf_phy_addr_high =
1920                                     cpu_to_le32(MS_64BITS(map));
1921                                 dma_unmap_addr_set(lrg_buf_cb, mapaddr, map);
1922                                 dma_unmap_len_set(lrg_buf_cb, maplen,
1923                                                   qdev->lrg_buffer_len -
1924                                                   QL_HEADER_SPACE);
1925                                 --qdev->lrg_buf_skb_check;
1926                                 if (!qdev->lrg_buf_skb_check)
1927                                         return 1;
1928                         }
1929                 }
1930                 lrg_buf_cb = lrg_buf_cb->next;
1931         }
1932         return 0;
1933 }
1934
1935 /*
1936  * Caller holds hw_lock.
1937  */
1938 static void ql_update_small_bufq_prod_index(struct ql3_adapter *qdev)
1939 {
1940         struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
1941         if (qdev->small_buf_release_cnt >= 16) {
1942                 while (qdev->small_buf_release_cnt >= 16) {
1943                         qdev->small_buf_q_producer_index++;
1944
1945                         if (qdev->small_buf_q_producer_index ==
1946                             NUM_SBUFQ_ENTRIES)
1947                                 qdev->small_buf_q_producer_index = 0;
1948                         qdev->small_buf_release_cnt -= 8;
1949                 }
1950                 wmb();
1951                 writel(qdev->small_buf_q_producer_index,
1952                         &port_regs->CommonRegs.rxSmallQProducerIndex);
1953         }
1954 }
1955
1956 /*
1957  * Caller holds hw_lock.
1958  */
1959 static void ql_update_lrg_bufq_prod_index(struct ql3_adapter *qdev)
1960 {
1961         struct bufq_addr_element *lrg_buf_q_ele;
1962         int i;
1963         struct ql_rcv_buf_cb *lrg_buf_cb;
1964         struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
1965
1966         if ((qdev->lrg_buf_free_count >= 8) &&
1967             (qdev->lrg_buf_release_cnt >= 16)) {
1968
1969                 if (qdev->lrg_buf_skb_check)
1970                         if (!ql_populate_free_queue(qdev))
1971                                 return;
1972
1973                 lrg_buf_q_ele = qdev->lrg_buf_next_free;
1974
1975                 while ((qdev->lrg_buf_release_cnt >= 16) &&
1976                        (qdev->lrg_buf_free_count >= 8)) {
1977
1978                         for (i = 0; i < 8; i++) {
1979                                 lrg_buf_cb =
1980                                     ql_get_from_lrg_buf_free_list(qdev);
1981                                 lrg_buf_q_ele->addr_high =
1982                                     lrg_buf_cb->buf_phy_addr_high;
1983                                 lrg_buf_q_ele->addr_low =
1984                                     lrg_buf_cb->buf_phy_addr_low;
1985                                 lrg_buf_q_ele++;
1986
1987                                 qdev->lrg_buf_release_cnt--;
1988                         }
1989
1990                         qdev->lrg_buf_q_producer_index++;
1991
1992                         if (qdev->lrg_buf_q_producer_index == qdev->num_lbufq_entries)
1993                                 qdev->lrg_buf_q_producer_index = 0;
1994
1995                         if (qdev->lrg_buf_q_producer_index ==
1996                             (qdev->num_lbufq_entries - 1)) {
1997                                 lrg_buf_q_ele = qdev->lrg_buf_q_virt_addr;
1998                         }
1999                 }
2000                 wmb();
2001                 qdev->lrg_buf_next_free = lrg_buf_q_ele;
2002                 writel(qdev->lrg_buf_q_producer_index,
2003                         &port_regs->CommonRegs.rxLargeQProducerIndex);
2004         }
2005 }
2006
2007 static void ql_process_mac_tx_intr(struct ql3_adapter *qdev,
2008                                    struct ob_mac_iocb_rsp *mac_rsp)
2009 {
2010         struct ql_tx_buf_cb *tx_cb;
2011         int i;
2012         int retval = 0;
2013
2014         if(mac_rsp->flags & OB_MAC_IOCB_RSP_S) {
2015                 printk(KERN_WARNING "Frame short but, frame was padded and sent.\n");
2016         }
2017
2018         tx_cb = &qdev->tx_buf[mac_rsp->transaction_id];
2019
2020         /*  Check the transmit response flags for any errors */
2021         if(mac_rsp->flags & OB_MAC_IOCB_RSP_S) {
2022                 printk(KERN_ERR "Frame too short to be legal, frame not sent.\n");
2023
2024                 qdev->ndev->stats.tx_errors++;
2025                 retval = -EIO;
2026                 goto frame_not_sent;
2027         }
2028
2029         if(tx_cb->seg_count == 0) {
2030                 printk(KERN_ERR "tx_cb->seg_count == 0: %d\n", mac_rsp->transaction_id);
2031
2032                 qdev->ndev->stats.tx_errors++;
2033                 retval = -EIO;
2034                 goto invalid_seg_count;
2035         }
2036
2037         pci_unmap_single(qdev->pdev,
2038                          dma_unmap_addr(&tx_cb->map[0], mapaddr),
2039                          dma_unmap_len(&tx_cb->map[0], maplen),
2040                          PCI_DMA_TODEVICE);
2041         tx_cb->seg_count--;
2042         if (tx_cb->seg_count) {
2043                 for (i = 1; i < tx_cb->seg_count; i++) {
2044                         pci_unmap_page(qdev->pdev,
2045                                        dma_unmap_addr(&tx_cb->map[i],
2046                                                       mapaddr),
2047                                        dma_unmap_len(&tx_cb->map[i], maplen),
2048                                        PCI_DMA_TODEVICE);
2049                 }
2050         }
2051         qdev->ndev->stats.tx_packets++;
2052         qdev->ndev->stats.tx_bytes += tx_cb->skb->len;
2053
2054 frame_not_sent:
2055         dev_kfree_skb_irq(tx_cb->skb);
2056         tx_cb->skb = NULL;
2057
2058 invalid_seg_count:
2059         atomic_inc(&qdev->tx_count);
2060 }
2061
2062 static void ql_get_sbuf(struct ql3_adapter *qdev)
2063 {
2064         if (++qdev->small_buf_index == NUM_SMALL_BUFFERS)
2065                 qdev->small_buf_index = 0;
2066         qdev->small_buf_release_cnt++;
2067 }
2068
2069 static struct ql_rcv_buf_cb *ql_get_lbuf(struct ql3_adapter *qdev)
2070 {
2071         struct ql_rcv_buf_cb *lrg_buf_cb = NULL;
2072         lrg_buf_cb = &qdev->lrg_buf[qdev->lrg_buf_index];
2073         qdev->lrg_buf_release_cnt++;
2074         if (++qdev->lrg_buf_index == qdev->num_large_buffers)
2075                 qdev->lrg_buf_index = 0;
2076         return(lrg_buf_cb);
2077 }
2078
2079 /*
2080  * The difference between 3022 and 3032 for inbound completions:
2081  * 3022 uses two buffers per completion.  The first buffer contains
2082  * (some) header info, the second the remainder of the headers plus
2083  * the data.  For this chip we reserve some space at the top of the
2084  * receive buffer so that the header info in buffer one can be
2085  * prepended to the buffer two.  Buffer two is the sent up while
2086  * buffer one is returned to the hardware to be reused.
2087  * 3032 receives all of it's data and headers in one buffer for a
2088  * simpler process.  3032 also supports checksum verification as
2089  * can be seen in ql_process_macip_rx_intr().
2090  */
2091 static void ql_process_mac_rx_intr(struct ql3_adapter *qdev,
2092                                    struct ib_mac_iocb_rsp *ib_mac_rsp_ptr)
2093 {
2094         struct ql_rcv_buf_cb *lrg_buf_cb1 = NULL;
2095         struct ql_rcv_buf_cb *lrg_buf_cb2 = NULL;
2096         struct sk_buff *skb;
2097         u16 length = le16_to_cpu(ib_mac_rsp_ptr->length);
2098
2099         /*
2100          * Get the inbound address list (small buffer).
2101          */
2102         ql_get_sbuf(qdev);
2103
2104         if (qdev->device_id == QL3022_DEVICE_ID)
2105                 lrg_buf_cb1 = ql_get_lbuf(qdev);
2106
2107         /* start of second buffer */
2108         lrg_buf_cb2 = ql_get_lbuf(qdev);
2109         skb = lrg_buf_cb2->skb;
2110
2111         qdev->ndev->stats.rx_packets++;
2112         qdev->ndev->stats.rx_bytes += length;
2113
2114         skb_put(skb, length);
2115         pci_unmap_single(qdev->pdev,
2116                          dma_unmap_addr(lrg_buf_cb2, mapaddr),
2117                          dma_unmap_len(lrg_buf_cb2, maplen),
2118                          PCI_DMA_FROMDEVICE);
2119         prefetch(skb->data);
2120         skb->ip_summed = CHECKSUM_NONE;
2121         skb->protocol = eth_type_trans(skb, qdev->ndev);
2122
2123         netif_receive_skb(skb);
2124         lrg_buf_cb2->skb = NULL;
2125
2126         if (qdev->device_id == QL3022_DEVICE_ID)
2127                 ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb1);
2128         ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb2);
2129 }
2130
2131 static void ql_process_macip_rx_intr(struct ql3_adapter *qdev,
2132                                      struct ib_ip_iocb_rsp *ib_ip_rsp_ptr)
2133 {
2134         struct ql_rcv_buf_cb *lrg_buf_cb1 = NULL;
2135         struct ql_rcv_buf_cb *lrg_buf_cb2 = NULL;
2136         struct sk_buff *skb1 = NULL, *skb2;
2137         struct net_device *ndev = qdev->ndev;
2138         u16 length = le16_to_cpu(ib_ip_rsp_ptr->length);
2139         u16 size = 0;
2140
2141         /*
2142          * Get the inbound address list (small buffer).
2143          */
2144
2145         ql_get_sbuf(qdev);
2146
2147         if (qdev->device_id == QL3022_DEVICE_ID) {
2148                 /* start of first buffer on 3022 */
2149                 lrg_buf_cb1 = ql_get_lbuf(qdev);
2150                 skb1 = lrg_buf_cb1->skb;
2151                 size = ETH_HLEN;
2152                 if (*((u16 *) skb1->data) != 0xFFFF)
2153                         size += VLAN_ETH_HLEN - ETH_HLEN;
2154         }
2155
2156         /* start of second buffer */
2157         lrg_buf_cb2 = ql_get_lbuf(qdev);
2158         skb2 = lrg_buf_cb2->skb;
2159
2160         skb_put(skb2, length);  /* Just the second buffer length here. */
2161         pci_unmap_single(qdev->pdev,
2162                          dma_unmap_addr(lrg_buf_cb2, mapaddr),
2163                          dma_unmap_len(lrg_buf_cb2, maplen),
2164                          PCI_DMA_FROMDEVICE);
2165         prefetch(skb2->data);
2166
2167         skb2->ip_summed = CHECKSUM_NONE;
2168         if (qdev->device_id == QL3022_DEVICE_ID) {
2169                 /*
2170                  * Copy the ethhdr from first buffer to second. This
2171                  * is necessary for 3022 IP completions.
2172                  */
2173                 skb_copy_from_linear_data_offset(skb1, VLAN_ID_LEN,
2174                                                  skb_push(skb2, size), size);
2175         } else {
2176                 u16 checksum = le16_to_cpu(ib_ip_rsp_ptr->checksum);
2177                 if (checksum &
2178                         (IB_IP_IOCB_RSP_3032_ICE |
2179                          IB_IP_IOCB_RSP_3032_CE)) {
2180                         printk(KERN_ERR
2181                                "%s: Bad checksum for this %s packet, checksum = %x.\n",
2182                                __func__,
2183                                ((checksum &
2184                                 IB_IP_IOCB_RSP_3032_TCP) ? "TCP" :
2185                                 "UDP"),checksum);
2186                 } else if ((checksum & IB_IP_IOCB_RSP_3032_TCP) ||
2187                                 (checksum & IB_IP_IOCB_RSP_3032_UDP &&
2188                                 !(checksum & IB_IP_IOCB_RSP_3032_NUC))) {
2189                         skb2->ip_summed = CHECKSUM_UNNECESSARY;
2190                 }
2191         }
2192         skb2->protocol = eth_type_trans(skb2, qdev->ndev);
2193
2194         netif_receive_skb(skb2);
2195         ndev->stats.rx_packets++;
2196         ndev->stats.rx_bytes += length;
2197         lrg_buf_cb2->skb = NULL;
2198
2199         if (qdev->device_id == QL3022_DEVICE_ID)
2200                 ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb1);
2201         ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb2);
2202 }
2203
2204 static int ql_tx_rx_clean(struct ql3_adapter *qdev,
2205                           int *tx_cleaned, int *rx_cleaned, int work_to_do)
2206 {
2207         struct net_rsp_iocb *net_rsp;
2208         struct net_device *ndev = qdev->ndev;
2209         int work_done = 0;
2210
2211         /* While there are entries in the completion queue. */
2212         while ((le32_to_cpu(*(qdev->prsp_producer_index)) !=
2213                 qdev->rsp_consumer_index) && (work_done < work_to_do)) {
2214
2215                 net_rsp = qdev->rsp_current;
2216                 rmb();
2217                 /*
2218                  * Fix 4032 chipe undocumented "feature" where bit-8 is set if the
2219                  * inbound completion is for a VLAN.
2220                  */
2221                 if (qdev->device_id == QL3032_DEVICE_ID)
2222                         net_rsp->opcode &= 0x7f;
2223                 switch (net_rsp->opcode) {
2224
2225                 case OPCODE_OB_MAC_IOCB_FN0:
2226                 case OPCODE_OB_MAC_IOCB_FN2:
2227                         ql_process_mac_tx_intr(qdev, (struct ob_mac_iocb_rsp *)
2228                                                net_rsp);
2229                         (*tx_cleaned)++;
2230                         break;
2231
2232                 case OPCODE_IB_MAC_IOCB:
2233                 case OPCODE_IB_3032_MAC_IOCB:
2234                         ql_process_mac_rx_intr(qdev, (struct ib_mac_iocb_rsp *)
2235                                                net_rsp);
2236                         (*rx_cleaned)++;
2237                         break;
2238
2239                 case OPCODE_IB_IP_IOCB:
2240                 case OPCODE_IB_3032_IP_IOCB:
2241                         ql_process_macip_rx_intr(qdev, (struct ib_ip_iocb_rsp *)
2242                                                  net_rsp);
2243                         (*rx_cleaned)++;
2244                         break;
2245                 default:
2246                         {
2247                                 u32 *tmp = (u32 *) net_rsp;
2248                                 printk(KERN_ERR PFX
2249                                        "%s: Hit default case, not "
2250                                        "handled!\n"
2251                                        "        dropping the packet, opcode = "
2252                                        "%x.\n",
2253                                        ndev->name, net_rsp->opcode);
2254                                 printk(KERN_ERR PFX
2255                                        "0x%08lx 0x%08lx 0x%08lx 0x%08lx\n",
2256                                        (unsigned long int)tmp[0],
2257                                        (unsigned long int)tmp[1],
2258                                        (unsigned long int)tmp[2],
2259                                        (unsigned long int)tmp[3]);
2260                         }
2261                 }
2262
2263                 qdev->rsp_consumer_index++;
2264
2265                 if (qdev->rsp_consumer_index == NUM_RSP_Q_ENTRIES) {
2266                         qdev->rsp_consumer_index = 0;
2267                         qdev->rsp_current = qdev->rsp_q_virt_addr;
2268                 } else {
2269                         qdev->rsp_current++;
2270                 }
2271
2272                 work_done = *tx_cleaned + *rx_cleaned;
2273         }
2274
2275         return work_done;
2276 }
2277
2278 static int ql_poll(struct napi_struct *napi, int budget)
2279 {
2280         struct ql3_adapter *qdev = container_of(napi, struct ql3_adapter, napi);
2281         int rx_cleaned = 0, tx_cleaned = 0;
2282         unsigned long hw_flags;
2283         struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
2284
2285         ql_tx_rx_clean(qdev, &tx_cleaned, &rx_cleaned, budget);
2286
2287         if (tx_cleaned + rx_cleaned != budget) {
2288                 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
2289                 __napi_complete(napi);
2290                 ql_update_small_bufq_prod_index(qdev);
2291                 ql_update_lrg_bufq_prod_index(qdev);
2292                 writel(qdev->rsp_consumer_index,
2293                             &port_regs->CommonRegs.rspQConsumerIndex);
2294                 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
2295
2296                 ql_enable_interrupts(qdev);
2297         }
2298         return tx_cleaned + rx_cleaned;
2299 }
2300
2301 static irqreturn_t ql3xxx_isr(int irq, void *dev_id)
2302 {
2303
2304         struct net_device *ndev = dev_id;
2305         struct ql3_adapter *qdev = netdev_priv(ndev);
2306         struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
2307         u32 value;
2308         int handled = 1;
2309         u32 var;
2310
2311         port_regs = qdev->mem_map_registers;
2312
2313         value =
2314             ql_read_common_reg_l(qdev, &port_regs->CommonRegs.ispControlStatus);
2315
2316         if (value & (ISP_CONTROL_FE | ISP_CONTROL_RI)) {
2317                 spin_lock(&qdev->adapter_lock);
2318                 netif_stop_queue(qdev->ndev);
2319                 netif_carrier_off(qdev->ndev);
2320                 ql_disable_interrupts(qdev);
2321                 qdev->port_link_state = LS_DOWN;
2322                 set_bit(QL_RESET_ACTIVE,&qdev->flags) ;
2323
2324                 if (value & ISP_CONTROL_FE) {
2325                         /*
2326                          * Chip Fatal Error.
2327                          */
2328                         var =
2329                             ql_read_page0_reg_l(qdev,
2330                                               &port_regs->PortFatalErrStatus);
2331                         printk(KERN_WARNING PFX
2332                                "%s: Resetting chip. PortFatalErrStatus "
2333                                "register = 0x%x\n", ndev->name, var);
2334                         set_bit(QL_RESET_START,&qdev->flags) ;
2335                 } else {
2336                         /*
2337                          * Soft Reset Requested.
2338                          */
2339                         set_bit(QL_RESET_PER_SCSI,&qdev->flags) ;
2340                         printk(KERN_ERR PFX
2341                                "%s: Another function issued a reset to the "
2342                                "chip. ISR value = %x.\n", ndev->name, value);
2343                 }
2344                 queue_delayed_work(qdev->workqueue, &qdev->reset_work, 0);
2345                 spin_unlock(&qdev->adapter_lock);
2346         } else if (value & ISP_IMR_DISABLE_CMPL_INT) {
2347                 ql_disable_interrupts(qdev);
2348                 if (likely(napi_schedule_prep(&qdev->napi))) {
2349                         __napi_schedule(&qdev->napi);
2350                 }
2351         } else {
2352                 return IRQ_NONE;
2353         }
2354
2355         return IRQ_RETVAL(handled);
2356 }
2357
2358 /*
2359  * Get the total number of segments needed for the
2360  * given number of fragments.  This is necessary because
2361  * outbound address lists (OAL) will be used when more than
2362  * two frags are given.  Each address list has 5 addr/len
2363  * pairs.  The 5th pair in each AOL is used to  point to
2364  * the next AOL if more frags are coming.
2365  * That is why the frags:segment count  ratio is not linear.
2366  */
2367 static int ql_get_seg_count(struct ql3_adapter *qdev,
2368                             unsigned short frags)
2369 {
2370         if (qdev->device_id == QL3022_DEVICE_ID)
2371                 return 1;
2372
2373         switch(frags) {
2374         case 0: return 1;       /* just the skb->data seg */
2375         case 1: return 2;       /* skb->data + 1 frag */
2376         case 2: return 3;       /* skb->data + 2 frags */
2377         case 3: return 5;       /* skb->data + 1 frag + 1 AOL containting 2 frags */
2378         case 4: return 6;
2379         case 5: return 7;
2380         case 6: return 8;
2381         case 7: return 10;
2382         case 8: return 11;
2383         case 9: return 12;
2384         case 10: return 13;
2385         case 11: return 15;
2386         case 12: return 16;
2387         case 13: return 17;
2388         case 14: return 18;
2389         case 15: return 20;
2390         case 16: return 21;
2391         case 17: return 22;
2392         case 18: return 23;
2393         }
2394         return -1;
2395 }
2396
2397 static void ql_hw_csum_setup(const struct sk_buff *skb,
2398                              struct ob_mac_iocb_req *mac_iocb_ptr)
2399 {
2400         const struct iphdr *ip = ip_hdr(skb);
2401
2402         mac_iocb_ptr->ip_hdr_off = skb_network_offset(skb);
2403         mac_iocb_ptr->ip_hdr_len = ip->ihl;
2404
2405         if (ip->protocol == IPPROTO_TCP) {
2406                 mac_iocb_ptr->flags1 |= OB_3032MAC_IOCB_REQ_TC |
2407                         OB_3032MAC_IOCB_REQ_IC;
2408         } else {
2409                 mac_iocb_ptr->flags1 |= OB_3032MAC_IOCB_REQ_UC |
2410                         OB_3032MAC_IOCB_REQ_IC;
2411         }
2412
2413 }
2414
2415 /*
2416  * Map the buffers for this transmit.  This will return
2417  * NETDEV_TX_BUSY or NETDEV_TX_OK based on success.
2418  */
2419 static int ql_send_map(struct ql3_adapter *qdev,
2420                                 struct ob_mac_iocb_req *mac_iocb_ptr,
2421                                 struct ql_tx_buf_cb *tx_cb,
2422                                 struct sk_buff *skb)
2423 {
2424         struct oal *oal;
2425         struct oal_entry *oal_entry;
2426         int len = skb_headlen(skb);
2427         dma_addr_t map;
2428         int err;
2429         int completed_segs, i;
2430         int seg_cnt, seg = 0;
2431         int frag_cnt = (int)skb_shinfo(skb)->nr_frags;
2432
2433         seg_cnt = tx_cb->seg_count;
2434         /*
2435          * Map the skb buffer first.
2436          */
2437         map = pci_map_single(qdev->pdev, skb->data, len, PCI_DMA_TODEVICE);
2438
2439         err = pci_dma_mapping_error(qdev->pdev, map);
2440         if(err) {
2441                 printk(KERN_ERR "%s: PCI mapping failed with error: %d\n",
2442                        qdev->ndev->name, err);
2443
2444                 return NETDEV_TX_BUSY;
2445         }
2446
2447         oal_entry = (struct oal_entry *)&mac_iocb_ptr->buf_addr0_low;
2448         oal_entry->dma_lo = cpu_to_le32(LS_64BITS(map));
2449         oal_entry->dma_hi = cpu_to_le32(MS_64BITS(map));
2450         oal_entry->len = cpu_to_le32(len);
2451         dma_unmap_addr_set(&tx_cb->map[seg], mapaddr, map);
2452         dma_unmap_len_set(&tx_cb->map[seg], maplen, len);
2453         seg++;
2454
2455         if (seg_cnt == 1) {
2456                 /* Terminate the last segment. */
2457                 oal_entry->len |= cpu_to_le32(OAL_LAST_ENTRY);
2458         } else {
2459                 oal = tx_cb->oal;
2460                 for (completed_segs=0; completed_segs<frag_cnt; completed_segs++,seg++) {
2461                         skb_frag_t *frag = &skb_shinfo(skb)->frags[completed_segs];
2462                         oal_entry++;
2463                         if ((seg == 2 && seg_cnt > 3) ||        /* Check for continuation */
2464                             (seg == 7 && seg_cnt > 8) ||        /* requirements. It's strange */
2465                             (seg == 12 && seg_cnt > 13) ||      /* but necessary. */
2466                             (seg == 17 && seg_cnt > 18)) {
2467                                 /* Continuation entry points to outbound address list. */
2468                                 map = pci_map_single(qdev->pdev, oal,
2469                                                      sizeof(struct oal),
2470                                                      PCI_DMA_TODEVICE);
2471
2472                                 err = pci_dma_mapping_error(qdev->pdev, map);
2473                                 if(err) {
2474
2475                                         printk(KERN_ERR "%s: PCI mapping outbound address list with error: %d\n",
2476                                                qdev->ndev->name, err);
2477                                         goto map_error;
2478                                 }
2479
2480                                 oal_entry->dma_lo = cpu_to_le32(LS_64BITS(map));
2481                                 oal_entry->dma_hi = cpu_to_le32(MS_64BITS(map));
2482                                 oal_entry->len =
2483                                     cpu_to_le32(sizeof(struct oal) |
2484                                                 OAL_CONT_ENTRY);
2485                                 dma_unmap_addr_set(&tx_cb->map[seg], mapaddr,
2486                                                    map);
2487                                 dma_unmap_len_set(&tx_cb->map[seg], maplen,
2488                                                   sizeof(struct oal));
2489                                 oal_entry = (struct oal_entry *)oal;
2490                                 oal++;
2491                                 seg++;
2492                         }
2493
2494                         map =
2495                             pci_map_page(qdev->pdev, frag->page,
2496                                          frag->page_offset, frag->size,
2497                                          PCI_DMA_TODEVICE);
2498
2499                         err = pci_dma_mapping_error(qdev->pdev, map);
2500                         if(err) {
2501                                 printk(KERN_ERR "%s: PCI mapping frags failed with error: %d\n",
2502                                        qdev->ndev->name, err);
2503                                 goto map_error;
2504                         }
2505
2506                         oal_entry->dma_lo = cpu_to_le32(LS_64BITS(map));
2507                         oal_entry->dma_hi = cpu_to_le32(MS_64BITS(map));
2508                         oal_entry->len = cpu_to_le32(frag->size);
2509                         dma_unmap_addr_set(&tx_cb->map[seg], mapaddr, map);
2510                         dma_unmap_len_set(&tx_cb->map[seg], maplen,
2511                                           frag->size);
2512                 }
2513                 /* Terminate the last segment. */
2514                 oal_entry->len |= cpu_to_le32(OAL_LAST_ENTRY);
2515         }
2516
2517         return NETDEV_TX_OK;
2518
2519 map_error:
2520         /* A PCI mapping failed and now we will need to back out
2521          * We need to traverse through the oal's and associated pages which
2522          * have been mapped and now we must unmap them to clean up properly
2523          */
2524
2525         seg = 1;
2526         oal_entry = (struct oal_entry *)&mac_iocb_ptr->buf_addr0_low;
2527         oal = tx_cb->oal;
2528         for (i=0; i<completed_segs; i++,seg++) {
2529                 oal_entry++;
2530
2531                 if((seg == 2 && seg_cnt > 3) ||        /* Check for continuation */
2532                    (seg == 7 && seg_cnt > 8) ||        /* requirements. It's strange */
2533                    (seg == 12 && seg_cnt > 13) ||      /* but necessary. */
2534                    (seg == 17 && seg_cnt > 18)) {
2535                         pci_unmap_single(qdev->pdev,
2536                                 dma_unmap_addr(&tx_cb->map[seg], mapaddr),
2537                                 dma_unmap_len(&tx_cb->map[seg], maplen),
2538                                  PCI_DMA_TODEVICE);
2539                         oal++;
2540                         seg++;
2541                 }
2542
2543                 pci_unmap_page(qdev->pdev,
2544                                dma_unmap_addr(&tx_cb->map[seg], mapaddr),
2545                                dma_unmap_len(&tx_cb->map[seg], maplen),
2546                                PCI_DMA_TODEVICE);
2547         }
2548
2549         pci_unmap_single(qdev->pdev,
2550                          dma_unmap_addr(&tx_cb->map[0], mapaddr),
2551                          dma_unmap_addr(&tx_cb->map[0], maplen),
2552                          PCI_DMA_TODEVICE);
2553
2554         return NETDEV_TX_BUSY;
2555
2556 }
2557
2558 /*
2559  * The difference between 3022 and 3032 sends:
2560  * 3022 only supports a simple single segment transmission.
2561  * 3032 supports checksumming and scatter/gather lists (fragments).
2562  * The 3032 supports sglists by using the 3 addr/len pairs (ALP)
2563  * in the IOCB plus a chain of outbound address lists (OAL) that
2564  * each contain 5 ALPs.  The last ALP of the IOCB (3rd) or OAL (5th)
2565  * will used to point to an OAL when more ALP entries are required.
2566  * The IOCB is always the top of the chain followed by one or more
2567  * OALs (when necessary).
2568  */
2569 static netdev_tx_t ql3xxx_send(struct sk_buff *skb,
2570                                struct net_device *ndev)
2571 {
2572         struct ql3_adapter *qdev = (struct ql3_adapter *)netdev_priv(ndev);
2573         struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
2574         struct ql_tx_buf_cb *tx_cb;
2575         u32 tot_len = skb->len;
2576         struct ob_mac_iocb_req *mac_iocb_ptr;
2577
2578         if (unlikely(atomic_read(&qdev->tx_count) < 2)) {
2579                 return NETDEV_TX_BUSY;
2580         }
2581
2582         tx_cb = &qdev->tx_buf[qdev->req_producer_index] ;
2583         if((tx_cb->seg_count = ql_get_seg_count(qdev,
2584                                                 (skb_shinfo(skb)->nr_frags))) == -1) {
2585                 printk(KERN_ERR PFX"%s: invalid segment count!\n",__func__);
2586                 return NETDEV_TX_OK;
2587         }
2588
2589         mac_iocb_ptr = tx_cb->queue_entry;
2590         memset((void *)mac_iocb_ptr, 0, sizeof(struct ob_mac_iocb_req));
2591         mac_iocb_ptr->opcode = qdev->mac_ob_opcode;
2592         mac_iocb_ptr->flags = OB_MAC_IOCB_REQ_X;
2593         mac_iocb_ptr->flags |= qdev->mb_bit_mask;
2594         mac_iocb_ptr->transaction_id = qdev->req_producer_index;
2595         mac_iocb_ptr->data_len = cpu_to_le16((u16) tot_len);
2596         tx_cb->skb = skb;
2597         if (qdev->device_id == QL3032_DEVICE_ID &&
2598             skb->ip_summed == CHECKSUM_PARTIAL)
2599                 ql_hw_csum_setup(skb, mac_iocb_ptr);
2600
2601         if(ql_send_map(qdev,mac_iocb_ptr,tx_cb,skb) != NETDEV_TX_OK) {
2602                 printk(KERN_ERR PFX"%s: Could not map the segments!\n",__func__);
2603                 return NETDEV_TX_BUSY;
2604         }
2605
2606         wmb();
2607         qdev->req_producer_index++;
2608         if (qdev->req_producer_index == NUM_REQ_Q_ENTRIES)
2609                 qdev->req_producer_index = 0;
2610         wmb();
2611         ql_write_common_reg_l(qdev,
2612                             &port_regs->CommonRegs.reqQProducerIndex,
2613                             qdev->req_producer_index);
2614
2615         if (netif_msg_tx_queued(qdev))
2616                 printk(KERN_DEBUG PFX "%s: tx queued, slot %d, len %d\n",
2617                        ndev->name, qdev->req_producer_index, skb->len);
2618
2619         atomic_dec(&qdev->tx_count);
2620         return NETDEV_TX_OK;
2621 }
2622
2623 static int ql_alloc_net_req_rsp_queues(struct ql3_adapter *qdev)
2624 {
2625         qdev->req_q_size =
2626             (u32) (NUM_REQ_Q_ENTRIES * sizeof(struct ob_mac_iocb_req));
2627
2628         qdev->req_q_virt_addr =
2629             pci_alloc_consistent(qdev->pdev,
2630                                  (size_t) qdev->req_q_size,
2631                                  &qdev->req_q_phy_addr);
2632
2633         if ((qdev->req_q_virt_addr == NULL) ||
2634             LS_64BITS(qdev->req_q_phy_addr) & (qdev->req_q_size - 1)) {
2635                 printk(KERN_ERR PFX "%s: reqQ failed.\n",
2636                        qdev->ndev->name);
2637                 return -ENOMEM;
2638         }
2639
2640         qdev->rsp_q_size = NUM_RSP_Q_ENTRIES * sizeof(struct net_rsp_iocb);
2641
2642         qdev->rsp_q_virt_addr =
2643             pci_alloc_consistent(qdev->pdev,
2644                                  (size_t) qdev->rsp_q_size,
2645                                  &qdev->rsp_q_phy_addr);
2646
2647         if ((qdev->rsp_q_virt_addr == NULL) ||
2648             LS_64BITS(qdev->rsp_q_phy_addr) & (qdev->rsp_q_size - 1)) {
2649                 printk(KERN_ERR PFX
2650                        "%s: rspQ allocation failed\n",
2651                        qdev->ndev->name);
2652                 pci_free_consistent(qdev->pdev, (size_t) qdev->req_q_size,
2653                                     qdev->req_q_virt_addr,
2654                                     qdev->req_q_phy_addr);
2655                 return -ENOMEM;
2656         }
2657
2658         set_bit(QL_ALLOC_REQ_RSP_Q_DONE,&qdev->flags);
2659
2660         return 0;
2661 }
2662
2663 static void ql_free_net_req_rsp_queues(struct ql3_adapter *qdev)
2664 {
2665         if (!test_bit(QL_ALLOC_REQ_RSP_Q_DONE,&qdev->flags)) {
2666                 printk(KERN_INFO PFX
2667                        "%s: Already done.\n", qdev->ndev->name);
2668                 return;
2669         }
2670
2671         pci_free_consistent(qdev->pdev,
2672                             qdev->req_q_size,
2673                             qdev->req_q_virt_addr, qdev->req_q_phy_addr);
2674
2675         qdev->req_q_virt_addr = NULL;
2676
2677         pci_free_consistent(qdev->pdev,
2678                             qdev->rsp_q_size,
2679                             qdev->rsp_q_virt_addr, qdev->rsp_q_phy_addr);
2680
2681         qdev->rsp_q_virt_addr = NULL;
2682
2683         clear_bit(QL_ALLOC_REQ_RSP_Q_DONE,&qdev->flags);
2684 }
2685
2686 static int ql_alloc_buffer_queues(struct ql3_adapter *qdev)
2687 {
2688         /* Create Large Buffer Queue */
2689         qdev->lrg_buf_q_size =
2690             qdev->num_lbufq_entries * sizeof(struct lrg_buf_q_entry);
2691         if (qdev->lrg_buf_q_size < PAGE_SIZE)
2692                 qdev->lrg_buf_q_alloc_size = PAGE_SIZE;
2693         else
2694                 qdev->lrg_buf_q_alloc_size = qdev->lrg_buf_q_size * 2;
2695
2696         qdev->lrg_buf = kmalloc(qdev->num_large_buffers * sizeof(struct ql_rcv_buf_cb),GFP_KERNEL);
2697         if (qdev->lrg_buf == NULL) {
2698                 printk(KERN_ERR PFX
2699                        "%s: qdev->lrg_buf alloc failed.\n", qdev->ndev->name);
2700                 return -ENOMEM;
2701         }
2702
2703         qdev->lrg_buf_q_alloc_virt_addr =
2704             pci_alloc_consistent(qdev->pdev,
2705                                  qdev->lrg_buf_q_alloc_size,
2706                                  &qdev->lrg_buf_q_alloc_phy_addr);
2707
2708         if (qdev->lrg_buf_q_alloc_virt_addr == NULL) {
2709                 printk(KERN_ERR PFX
2710                        "%s: lBufQ failed\n", qdev->ndev->name);
2711                 return -ENOMEM;
2712         }
2713         qdev->lrg_buf_q_virt_addr = qdev->lrg_buf_q_alloc_virt_addr;
2714         qdev->lrg_buf_q_phy_addr = qdev->lrg_buf_q_alloc_phy_addr;
2715
2716         /* Create Small Buffer Queue */
2717         qdev->small_buf_q_size =
2718             NUM_SBUFQ_ENTRIES * sizeof(struct lrg_buf_q_entry);
2719         if (qdev->small_buf_q_size < PAGE_SIZE)
2720                 qdev->small_buf_q_alloc_size = PAGE_SIZE;
2721         else
2722                 qdev->small_buf_q_alloc_size = qdev->small_buf_q_size * 2;
2723
2724         qdev->small_buf_q_alloc_virt_addr =
2725             pci_alloc_consistent(qdev->pdev,
2726                                  qdev->small_buf_q_alloc_size,
2727                                  &qdev->small_buf_q_alloc_phy_addr);
2728
2729         if (qdev->small_buf_q_alloc_virt_addr == NULL) {
2730                 printk(KERN_ERR PFX
2731                        "%s: Small Buffer Queue allocation failed.\n",
2732                        qdev->ndev->name);
2733                 pci_free_consistent(qdev->pdev, qdev->lrg_buf_q_alloc_size,
2734                                     qdev->lrg_buf_q_alloc_virt_addr,
2735                                     qdev->lrg_buf_q_alloc_phy_addr);
2736                 return -ENOMEM;
2737         }
2738
2739         qdev->small_buf_q_virt_addr = qdev->small_buf_q_alloc_virt_addr;
2740         qdev->small_buf_q_phy_addr = qdev->small_buf_q_alloc_phy_addr;
2741         set_bit(QL_ALLOC_BUFQS_DONE,&qdev->flags);
2742         return 0;
2743 }
2744
2745 static void ql_free_buffer_queues(struct ql3_adapter *qdev)
2746 {
2747         if (!test_bit(QL_ALLOC_BUFQS_DONE,&qdev->flags)) {
2748                 printk(KERN_INFO PFX
2749                        "%s: Already done.\n", qdev->ndev->name);
2750                 return;
2751         }
2752         if(qdev->lrg_buf) kfree(qdev->lrg_buf);
2753         pci_free_consistent(qdev->pdev,
2754                             qdev->lrg_buf_q_alloc_size,
2755                             qdev->lrg_buf_q_alloc_virt_addr,
2756                             qdev->lrg_buf_q_alloc_phy_addr);
2757
2758         qdev->lrg_buf_q_virt_addr = NULL;
2759
2760         pci_free_consistent(qdev->pdev,
2761                             qdev->small_buf_q_alloc_size,
2762                             qdev->small_buf_q_alloc_virt_addr,
2763                             qdev->small_buf_q_alloc_phy_addr);
2764
2765         qdev->small_buf_q_virt_addr = NULL;
2766
2767         clear_bit(QL_ALLOC_BUFQS_DONE,&qdev->flags);
2768 }
2769
2770 static int ql_alloc_small_buffers(struct ql3_adapter *qdev)
2771 {
2772         int i;
2773         struct bufq_addr_element *small_buf_q_entry;
2774
2775         /* Currently we allocate on one of memory and use it for smallbuffers */
2776         qdev->small_buf_total_size =
2777             (QL_ADDR_ELE_PER_BUFQ_ENTRY * NUM_SBUFQ_ENTRIES *
2778              QL_SMALL_BUFFER_SIZE);
2779
2780         qdev->small_buf_virt_addr =
2781             pci_alloc_consistent(qdev->pdev,
2782                                  qdev->small_buf_total_size,
2783                                  &qdev->small_buf_phy_addr);
2784
2785         if (qdev->small_buf_virt_addr == NULL) {
2786                 printk(KERN_ERR PFX
2787                        "%s: Failed to get small buffer memory.\n",
2788                        qdev->ndev->name);
2789                 return -ENOMEM;
2790         }
2791
2792         qdev->small_buf_phy_addr_low = LS_64BITS(qdev->small_buf_phy_addr);
2793         qdev->small_buf_phy_addr_high = MS_64BITS(qdev->small_buf_phy_addr);
2794
2795         small_buf_q_entry = qdev->small_buf_q_virt_addr;
2796
2797         /* Initialize the small buffer queue. */
2798         for (i = 0; i < (QL_ADDR_ELE_PER_BUFQ_ENTRY * NUM_SBUFQ_ENTRIES); i++) {
2799                 small_buf_q_entry->addr_high =
2800                     cpu_to_le32(qdev->small_buf_phy_addr_high);
2801                 small_buf_q_entry->addr_low =
2802                     cpu_to_le32(qdev->small_buf_phy_addr_low +
2803                                 (i * QL_SMALL_BUFFER_SIZE));
2804                 small_buf_q_entry++;
2805         }
2806         qdev->small_buf_index = 0;
2807         set_bit(QL_ALLOC_SMALL_BUF_DONE,&qdev->flags);
2808         return 0;
2809 }
2810
2811 static void ql_free_small_buffers(struct ql3_adapter *qdev)
2812 {
2813         if (!test_bit(QL_ALLOC_SMALL_BUF_DONE,&qdev->flags)) {
2814                 printk(KERN_INFO PFX
2815                        "%s: Already done.\n", qdev->ndev->name);
2816                 return;
2817         }
2818         if (qdev->small_buf_virt_addr != NULL) {
2819                 pci_free_consistent(qdev->pdev,
2820                                     qdev->small_buf_total_size,
2821                                     qdev->small_buf_virt_addr,
2822                                     qdev->small_buf_phy_addr);
2823
2824                 qdev->small_buf_virt_addr = NULL;
2825         }
2826 }
2827
2828 static void ql_free_large_buffers(struct ql3_adapter *qdev)
2829 {
2830         int i = 0;
2831         struct ql_rcv_buf_cb *lrg_buf_cb;
2832
2833         for (i = 0; i < qdev->num_large_buffers; i++) {
2834                 lrg_buf_cb = &qdev->lrg_buf[i];
2835                 if (lrg_buf_cb->skb) {
2836                         dev_kfree_skb(lrg_buf_cb->skb);
2837                         pci_unmap_single(qdev->pdev,
2838                                          dma_unmap_addr(lrg_buf_cb, mapaddr),
2839                                          dma_unmap_len(lrg_buf_cb, maplen),
2840                                          PCI_DMA_FROMDEVICE);
2841                         memset(lrg_buf_cb, 0, sizeof(struct ql_rcv_buf_cb));
2842                 } else {
2843                         break;
2844                 }
2845         }
2846 }
2847
2848 static void ql_init_large_buffers(struct ql3_adapter *qdev)
2849 {
2850         int i;
2851         struct ql_rcv_buf_cb *lrg_buf_cb;
2852         struct bufq_addr_element *buf_addr_ele = qdev->lrg_buf_q_virt_addr;
2853
2854         for (i = 0; i < qdev->num_large_buffers; i++) {
2855                 lrg_buf_cb = &qdev->lrg_buf[i];
2856                 buf_addr_ele->addr_high = lrg_buf_cb->buf_phy_addr_high;
2857                 buf_addr_ele->addr_low = lrg_buf_cb->buf_phy_addr_low;
2858                 buf_addr_ele++;
2859         }
2860         qdev->lrg_buf_index = 0;
2861         qdev->lrg_buf_skb_check = 0;
2862 }
2863
2864 static int ql_alloc_large_buffers(struct ql3_adapter *qdev)
2865 {
2866         int i;
2867         struct ql_rcv_buf_cb *lrg_buf_cb;
2868         struct sk_buff *skb;
2869         dma_addr_t map;
2870         int err;
2871
2872         for (i = 0; i < qdev->num_large_buffers; i++) {
2873                 skb = netdev_alloc_skb(qdev->ndev,
2874                                        qdev->lrg_buffer_len);
2875                 if (unlikely(!skb)) {
2876                         /* Better luck next round */
2877                         printk(KERN_ERR PFX
2878                                "%s: large buff alloc failed, "
2879                                "for %d bytes at index %d.\n",
2880                                qdev->ndev->name,
2881                                qdev->lrg_buffer_len * 2, i);
2882                         ql_free_large_buffers(qdev);
2883                         return -ENOMEM;
2884                 } else {
2885
2886                         lrg_buf_cb = &qdev->lrg_buf[i];
2887                         memset(lrg_buf_cb, 0, sizeof(struct ql_rcv_buf_cb));
2888                         lrg_buf_cb->index = i;
2889                         lrg_buf_cb->skb = skb;
2890                         /*
2891                          * We save some space to copy the ethhdr from first
2892                          * buffer
2893                          */
2894                         skb_reserve(skb, QL_HEADER_SPACE);
2895                         map = pci_map_single(qdev->pdev,
2896                                              skb->data,
2897                                              qdev->lrg_buffer_len -
2898                                              QL_HEADER_SPACE,
2899                                              PCI_DMA_FROMDEVICE);
2900
2901                         err = pci_dma_mapping_error(qdev->pdev, map);
2902                         if(err) {
2903                                 printk(KERN_ERR "%s: PCI mapping failed with error: %d\n",
2904                                        qdev->ndev->name, err);
2905                                 ql_free_large_buffers(qdev);
2906                                 return -ENOMEM;
2907                         }
2908
2909                         dma_unmap_addr_set(lrg_buf_cb, mapaddr, map);
2910                         dma_unmap_len_set(lrg_buf_cb, maplen,
2911                                           qdev->lrg_buffer_len -
2912                                           QL_HEADER_SPACE);
2913                         lrg_buf_cb->buf_phy_addr_low =
2914                             cpu_to_le32(LS_64BITS(map));
2915                         lrg_buf_cb->buf_phy_addr_high =
2916                             cpu_to_le32(MS_64BITS(map));
2917                 }
2918         }
2919         return 0;
2920 }
2921
2922 static void ql_free_send_free_list(struct ql3_adapter *qdev)
2923 {
2924         struct ql_tx_buf_cb *tx_cb;
2925         int i;
2926
2927         tx_cb = &qdev->tx_buf[0];
2928         for (i = 0; i < NUM_REQ_Q_ENTRIES; i++) {
2929                 if (tx_cb->oal) {
2930                         kfree(tx_cb->oal);
2931                         tx_cb->oal = NULL;
2932                 }
2933                 tx_cb++;
2934         }
2935 }
2936
2937 static int ql_create_send_free_list(struct ql3_adapter *qdev)
2938 {
2939         struct ql_tx_buf_cb *tx_cb;
2940         int i;
2941         struct ob_mac_iocb_req *req_q_curr =
2942                                         qdev->req_q_virt_addr;
2943
2944         /* Create free list of transmit buffers */
2945         for (i = 0; i < NUM_REQ_Q_ENTRIES; i++) {
2946
2947                 tx_cb = &qdev->tx_buf[i];
2948                 tx_cb->skb = NULL;
2949                 tx_cb->queue_entry = req_q_curr;
2950                 req_q_curr++;
2951                 tx_cb->oal = kmalloc(512, GFP_KERNEL);
2952                 if (tx_cb->oal == NULL)
2953                         return -1;
2954         }
2955         return 0;
2956 }
2957
2958 static int ql_alloc_mem_resources(struct ql3_adapter *qdev)
2959 {
2960         if (qdev->ndev->mtu == NORMAL_MTU_SIZE) {
2961                 qdev->num_lbufq_entries = NUM_LBUFQ_ENTRIES;
2962                 qdev->lrg_buffer_len = NORMAL_MTU_SIZE;
2963         }
2964         else if (qdev->ndev->mtu == JUMBO_MTU_SIZE) {
2965                 /*
2966                  * Bigger buffers, so less of them.
2967                  */
2968                 qdev->num_lbufq_entries = JUMBO_NUM_LBUFQ_ENTRIES;
2969                 qdev->lrg_buffer_len = JUMBO_MTU_SIZE;
2970         } else {
2971                 printk(KERN_ERR PFX
2972                        "%s: Invalid mtu size.  Only 1500 and 9000 are accepted.\n",
2973                        qdev->ndev->name);
2974                 return -ENOMEM;
2975         }
2976         qdev->num_large_buffers = qdev->num_lbufq_entries * QL_ADDR_ELE_PER_BUFQ_ENTRY;
2977         qdev->lrg_buffer_len += VLAN_ETH_HLEN + VLAN_ID_LEN + QL_HEADER_SPACE;
2978         qdev->max_frame_size =
2979             (qdev->lrg_buffer_len - QL_HEADER_SPACE) + ETHERNET_CRC_SIZE;
2980
2981         /*
2982          * First allocate a page of shared memory and use it for shadow
2983          * locations of Network Request Queue Consumer Address Register and
2984          * Network Completion Queue Producer Index Register
2985          */
2986         qdev->shadow_reg_virt_addr =
2987             pci_alloc_consistent(qdev->pdev,
2988                                  PAGE_SIZE, &qdev->shadow_reg_phy_addr);
2989
2990         if (qdev->shadow_reg_virt_addr != NULL) {
2991                 qdev->preq_consumer_index = (u16 *) qdev->shadow_reg_virt_addr;
2992                 qdev->req_consumer_index_phy_addr_high =
2993                     MS_64BITS(qdev->shadow_reg_phy_addr);
2994                 qdev->req_consumer_index_phy_addr_low =
2995                     LS_64BITS(qdev->shadow_reg_phy_addr);
2996
2997                 qdev->prsp_producer_index =
2998                     (__le32 *) (((u8 *) qdev->preq_consumer_index) + 8);
2999                 qdev->rsp_producer_index_phy_addr_high =
3000                     qdev->req_consumer_index_phy_addr_high;
3001                 qdev->rsp_producer_index_phy_addr_low =
3002                     qdev->req_consumer_index_phy_addr_low + 8;
3003         } else {
3004                 printk(KERN_ERR PFX
3005                        "%s: shadowReg Alloc failed.\n", qdev->ndev->name);
3006                 return -ENOMEM;
3007         }
3008
3009         if (ql_alloc_net_req_rsp_queues(qdev) != 0) {
3010                 printk(KERN_ERR PFX
3011                        "%s: ql_alloc_net_req_rsp_queues failed.\n",
3012                        qdev->ndev->name);
3013                 goto err_req_rsp;
3014         }
3015
3016         if (ql_alloc_buffer_queues(qdev) != 0) {
3017                 printk(KERN_ERR PFX
3018                        "%s: ql_alloc_buffer_queues failed.\n",
3019                        qdev->ndev->name);
3020                 goto err_buffer_queues;
3021         }
3022
3023         if (ql_alloc_small_buffers(qdev) != 0) {
3024                 printk(KERN_ERR PFX
3025                        "%s: ql_alloc_small_buffers failed\n", qdev->ndev->name);
3026                 goto err_small_buffers;
3027         }
3028
3029         if (ql_alloc_large_buffers(qdev) != 0) {
3030                 printk(KERN_ERR PFX
3031                        "%s: ql_alloc_large_buffers failed\n", qdev->ndev->name);
3032                 goto err_small_buffers;
3033         }
3034
3035         /* Initialize the large buffer queue. */
3036         ql_init_large_buffers(qdev);
3037         if (ql_create_send_free_list(qdev))
3038                 goto err_free_list;
3039
3040         qdev->rsp_current = qdev->rsp_q_virt_addr;
3041
3042         return 0;
3043 err_free_list:
3044         ql_free_send_free_list(qdev);
3045 err_small_buffers:
3046         ql_free_buffer_queues(qdev);
3047 err_buffer_queues:
3048         ql_free_net_req_rsp_queues(qdev);
3049 err_req_rsp:
3050         pci_free_consistent(qdev->pdev,
3051                             PAGE_SIZE,
3052                             qdev->shadow_reg_virt_addr,
3053                             qdev->shadow_reg_phy_addr);
3054
3055         return -ENOMEM;
3056 }
3057
3058 static void ql_free_mem_resources(struct ql3_adapter *qdev)
3059 {
3060         ql_free_send_free_list(qdev);
3061         ql_free_large_buffers(qdev);
3062         ql_free_small_buffers(qdev);
3063         ql_free_buffer_queues(qdev);
3064         ql_free_net_req_rsp_queues(qdev);
3065         if (qdev->shadow_reg_virt_addr != NULL) {
3066                 pci_free_consistent(qdev->pdev,
3067                                     PAGE_SIZE,
3068                                     qdev->shadow_reg_virt_addr,
3069                                     qdev->shadow_reg_phy_addr);
3070                 qdev->shadow_reg_virt_addr = NULL;
3071         }
3072 }
3073
3074 static int ql_init_misc_registers(struct ql3_adapter *qdev)
3075 {
3076         struct ql3xxx_local_ram_registers __iomem *local_ram =
3077             (void __iomem *)qdev->mem_map_registers;
3078
3079         if(ql_sem_spinlock(qdev, QL_DDR_RAM_SEM_MASK,
3080                         (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
3081                          2) << 4))
3082                 return -1;
3083
3084         ql_write_page2_reg(qdev,
3085                            &local_ram->bufletSize, qdev->nvram_data.bufletSize);
3086
3087         ql_write_page2_reg(qdev,
3088                            &local_ram->maxBufletCount,
3089                            qdev->nvram_data.bufletCount);
3090
3091         ql_write_page2_reg(qdev,
3092                            &local_ram->freeBufletThresholdLow,
3093                            (qdev->nvram_data.tcpWindowThreshold25 << 16) |
3094                            (qdev->nvram_data.tcpWindowThreshold0));
3095
3096         ql_write_page2_reg(qdev,
3097                            &local_ram->freeBufletThresholdHigh,
3098                            qdev->nvram_data.tcpWindowThreshold50);
3099
3100         ql_write_page2_reg(qdev,
3101                            &local_ram->ipHashTableBase,
3102                            (qdev->nvram_data.ipHashTableBaseHi << 16) |
3103                            qdev->nvram_data.ipHashTableBaseLo);
3104         ql_write_page2_reg(qdev,
3105                            &local_ram->ipHashTableCount,
3106                            qdev->nvram_data.ipHashTableSize);
3107         ql_write_page2_reg(qdev,
3108                            &local_ram->tcpHashTableBase,
3109                            (qdev->nvram_data.tcpHashTableBaseHi << 16) |
3110                            qdev->nvram_data.tcpHashTableBaseLo);
3111         ql_write_page2_reg(qdev,
3112                            &local_ram->tcpHashTableCount,
3113                            qdev->nvram_data.tcpHashTableSize);
3114         ql_write_page2_reg(qdev,
3115                            &local_ram->ncbBase,
3116                            (qdev->nvram_data.ncbTableBaseHi << 16) |
3117                            qdev->nvram_data.ncbTableBaseLo);
3118         ql_write_page2_reg(qdev,
3119                            &local_ram->maxNcbCount,
3120                            qdev->nvram_data.ncbTableSize);
3121         ql_write_page2_reg(qdev,
3122                            &local_ram->drbBase,
3123                            (qdev->nvram_data.drbTableBaseHi << 16) |
3124                            qdev->nvram_data.drbTableBaseLo);
3125         ql_write_page2_reg(qdev,
3126                            &local_ram->maxDrbCount,
3127                            qdev->nvram_data.drbTableSize);
3128         ql_sem_unlock(qdev, QL_DDR_RAM_SEM_MASK);
3129         return 0;
3130 }
3131
3132 static int ql_adapter_initialize(struct ql3_adapter *qdev)
3133 {
3134         u32 value;
3135         struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
3136         struct ql3xxx_host_memory_registers __iomem *hmem_regs =
3137                                                 (void __iomem *)port_regs;
3138         u32 delay = 10;
3139         int status = 0;
3140         unsigned long hw_flags = 0;
3141
3142         if(ql_mii_setup(qdev))
3143                 return -1;
3144
3145         /* Bring out PHY out of reset */
3146         ql_write_common_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg,
3147                             (ISP_SERIAL_PORT_IF_WE |
3148                              (ISP_SERIAL_PORT_IF_WE << 16)));
3149         /* Give the PHY time to come out of reset. */
3150         mdelay(100);
3151         qdev->port_link_state = LS_DOWN;
3152         netif_carrier_off(qdev->ndev);
3153
3154         /* V2 chip fix for ARS-39168. */
3155         ql_write_common_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg,
3156                             (ISP_SERIAL_PORT_IF_SDE |
3157                              (ISP_SERIAL_PORT_IF_SDE << 16)));
3158
3159         /* Request Queue Registers */
3160         *((u32 *) (qdev->preq_consumer_index)) = 0;
3161         atomic_set(&qdev->tx_count,NUM_REQ_Q_ENTRIES);
3162         qdev->req_producer_index = 0;
3163
3164         ql_write_page1_reg(qdev,
3165                            &hmem_regs->reqConsumerIndexAddrHigh,
3166                            qdev->req_consumer_index_phy_addr_high);
3167         ql_write_page1_reg(qdev,
3168                            &hmem_regs->reqConsumerIndexAddrLow,
3169                            qdev->req_consumer_index_phy_addr_low);
3170
3171         ql_write_page1_reg(qdev,
3172                            &hmem_regs->reqBaseAddrHigh,
3173                            MS_64BITS(qdev->req_q_phy_addr));
3174         ql_write_page1_reg(qdev,
3175                            &hmem_regs->reqBaseAddrLow,
3176                            LS_64BITS(qdev->req_q_phy_addr));
3177         ql_write_page1_reg(qdev, &hmem_regs->reqLength, NUM_REQ_Q_ENTRIES);
3178
3179         /* Response Queue Registers */
3180         *((__le16 *) (qdev->prsp_producer_index)) = 0;
3181         qdev->rsp_consumer_index = 0;
3182         qdev->rsp_current = qdev->rsp_q_virt_addr;
3183
3184         ql_write_page1_reg(qdev,
3185                            &hmem_regs->rspProducerIndexAddrHigh,
3186                            qdev->rsp_producer_index_phy_addr_high);
3187
3188         ql_write_page1_reg(qdev,
3189                            &hmem_regs->rspProducerIndexAddrLow,
3190                            qdev->rsp_producer_index_phy_addr_low);
3191
3192         ql_write_page1_reg(qdev,
3193                            &hmem_regs->rspBaseAddrHigh,
3194                            MS_64BITS(qdev->rsp_q_phy_addr));
3195
3196         ql_write_page1_reg(qdev,
3197                            &hmem_regs->rspBaseAddrLow,
3198                            LS_64BITS(qdev->rsp_q_phy_addr));
3199
3200         ql_write_page1_reg(qdev, &hmem_regs->rspLength, NUM_RSP_Q_ENTRIES);
3201
3202         /* Large Buffer Queue */
3203         ql_write_page1_reg(qdev,
3204                            &hmem_regs->rxLargeQBaseAddrHigh,
3205                            MS_64BITS(qdev->lrg_buf_q_phy_addr));
3206
3207         ql_write_page1_reg(qdev,
3208                            &hmem_regs->rxLargeQBaseAddrLow,
3209                            LS_64BITS(qdev->lrg_buf_q_phy_addr));
3210
3211         ql_write_page1_reg(qdev, &hmem_regs->rxLargeQLength, qdev->num_lbufq_entries);
3212
3213         ql_write_page1_reg(qdev,
3214                            &hmem_regs->rxLargeBufferLength,
3215                            qdev->lrg_buffer_len);
3216
3217         /* Small Buffer Queue */
3218         ql_write_page1_reg(qdev,
3219                            &hmem_regs->rxSmallQBaseAddrHigh,
3220                            MS_64BITS(qdev->small_buf_q_phy_addr));
3221
3222         ql_write_page1_reg(qdev,
3223                            &hmem_regs->rxSmallQBaseAddrLow,
3224                            LS_64BITS(qdev->small_buf_q_phy_addr));
3225
3226         ql_write_page1_reg(qdev, &hmem_regs->rxSmallQLength, NUM_SBUFQ_ENTRIES);
3227         ql_write_page1_reg(qdev,
3228                            &hmem_regs->rxSmallBufferLength,
3229                            QL_SMALL_BUFFER_SIZE);
3230
3231         qdev->small_buf_q_producer_index = NUM_SBUFQ_ENTRIES - 1;
3232         qdev->small_buf_release_cnt = 8;
3233         qdev->lrg_buf_q_producer_index = qdev->num_lbufq_entries - 1;
3234         qdev->lrg_buf_release_cnt = 8;
3235         qdev->lrg_buf_next_free =
3236             (struct bufq_addr_element *)qdev->lrg_buf_q_virt_addr;
3237         qdev->small_buf_index = 0;
3238         qdev->lrg_buf_index = 0;
3239         qdev->lrg_buf_free_count = 0;
3240         qdev->lrg_buf_free_head = NULL;
3241         qdev->lrg_buf_free_tail = NULL;
3242
3243         ql_write_common_reg(qdev,
3244                             &port_regs->CommonRegs.
3245                             rxSmallQProducerIndex,
3246                             qdev->small_buf_q_producer_index);
3247         ql_write_common_reg(qdev,
3248                             &port_regs->CommonRegs.
3249                             rxLargeQProducerIndex,
3250                             qdev->lrg_buf_q_producer_index);
3251
3252         /*
3253          * Find out if the chip has already been initialized.  If it has, then
3254          * we skip some of the initialization.
3255          */
3256         clear_bit(QL_LINK_MASTER, &qdev->flags);
3257         value = ql_read_page0_reg(qdev, &port_regs->portStatus);
3258         if ((value & PORT_STATUS_IC) == 0) {
3259
3260                 /* Chip has not been configured yet, so let it rip. */
3261                 if(ql_init_misc_registers(qdev)) {
3262                         status = -1;
3263                         goto out;
3264                 }
3265
3266                 value = qdev->nvram_data.tcpMaxWindowSize;
3267                 ql_write_page0_reg(qdev, &port_regs->tcpMaxWindow, value);
3268
3269                 value = (0xFFFF << 16) | qdev->nvram_data.extHwConfig;
3270
3271                 if(ql_sem_spinlock(qdev, QL_FLASH_SEM_MASK,
3272                                 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index)
3273                                  * 2) << 13)) {
3274                         status = -1;
3275                         goto out;
3276                 }
3277                 ql_write_page0_reg(qdev, &port_regs->ExternalHWConfig, value);
3278                 ql_write_page0_reg(qdev, &port_regs->InternalChipConfig,
3279                                    (((INTERNAL_CHIP_SD | INTERNAL_CHIP_WE) <<
3280                                      16) | (INTERNAL_CHIP_SD |
3281                                             INTERNAL_CHIP_WE)));
3282                 ql_sem_unlock(qdev, QL_FLASH_SEM_MASK);
3283         }
3284
3285         if (qdev->mac_index)
3286                 ql_write_page0_reg(qdev,
3287                                    &port_regs->mac1MaxFrameLengthReg,
3288                                    qdev->max_frame_size);
3289         else
3290                 ql_write_page0_reg(qdev,
3291                                            &port_regs->mac0MaxFrameLengthReg,
3292                                            qdev->max_frame_size);
3293
3294         if(ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
3295                         (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
3296                          2) << 7)) {
3297                 status = -1;
3298                 goto out;
3299         }
3300
3301         PHY_Setup(qdev);
3302         ql_init_scan_mode(qdev);
3303         ql_get_phy_owner(qdev);
3304
3305         /* Load the MAC Configuration */
3306
3307         /* Program lower 32 bits of the MAC address */
3308         ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg,
3309                            (MAC_ADDR_INDIRECT_PTR_REG_RP_MASK << 16));
3310         ql_write_page0_reg(qdev, &port_regs->macAddrDataReg,
3311                            ((qdev->ndev->dev_addr[2] << 24)
3312                             | (qdev->ndev->dev_addr[3] << 16)
3313                             | (qdev->ndev->dev_addr[4] << 8)
3314                             | qdev->ndev->dev_addr[5]));
3315
3316         /* Program top 16 bits of the MAC address */
3317         ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg,
3318                            ((MAC_ADDR_INDIRECT_PTR_REG_RP_MASK << 16) | 1));
3319         ql_write_page0_reg(qdev, &port_regs->macAddrDataReg,
3320                            ((qdev->ndev->dev_addr[0] << 8)
3321                             | qdev->ndev->dev_addr[1]));
3322
3323         /* Enable Primary MAC */
3324         ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg,
3325                            ((MAC_ADDR_INDIRECT_PTR_REG_PE << 16) |
3326                             MAC_ADDR_INDIRECT_PTR_REG_PE));
3327
3328         /* Clear Primary and Secondary IP addresses */
3329         ql_write_page0_reg(qdev, &port_regs->ipAddrIndexReg,
3330                            ((IP_ADDR_INDEX_REG_MASK << 16) |
3331                             (qdev->mac_index << 2)));
3332         ql_write_page0_reg(qdev, &port_regs->ipAddrDataReg, 0);
3333
3334         ql_write_page0_reg(qdev, &port_regs->ipAddrIndexReg,
3335                            ((IP_ADDR_INDEX_REG_MASK << 16) |
3336                             ((qdev->mac_index << 2) + 1)));
3337         ql_write_page0_reg(qdev, &port_regs->ipAddrDataReg, 0);
3338
3339         ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
3340
3341         /* Indicate Configuration Complete */
3342         ql_write_page0_reg(qdev,
3343                            &port_regs->portControl,
3344                            ((PORT_CONTROL_CC << 16) | PORT_CONTROL_CC));
3345
3346         do {
3347                 value = ql_read_page0_reg(qdev, &port_regs->portStatus);
3348                 if (value & PORT_STATUS_IC)
3349                         break;
3350                 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
3351                 msleep(500);
3352                 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
3353         } while (--delay);
3354
3355         if (delay == 0) {
3356                 printk(KERN_ERR PFX
3357                        "%s: Hw Initialization timeout.\n", qdev->ndev->name);
3358                 status = -1;
3359                 goto out;
3360         }
3361
3362         /* Enable Ethernet Function */
3363         if (qdev->device_id == QL3032_DEVICE_ID) {
3364                 value =
3365                     (QL3032_PORT_CONTROL_EF | QL3032_PORT_CONTROL_KIE |
3366                      QL3032_PORT_CONTROL_EIv6 | QL3032_PORT_CONTROL_EIv4 |
3367                         QL3032_PORT_CONTROL_ET);
3368                 ql_write_page0_reg(qdev, &port_regs->functionControl,
3369                                    ((value << 16) | value));
3370         } else {
3371                 value =
3372                     (PORT_CONTROL_EF | PORT_CONTROL_ET | PORT_CONTROL_EI |
3373                      PORT_CONTROL_HH);
3374                 ql_write_page0_reg(qdev, &port_regs->portControl,
3375                                    ((value << 16) | value));
3376         }
3377
3378
3379 out:
3380         return status;
3381 }
3382
3383 /*
3384  * Caller holds hw_lock.
3385  */
3386 static int ql_adapter_reset(struct ql3_adapter *qdev)
3387 {
3388         struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
3389         int status = 0;
3390         u16 value;
3391         int max_wait_time;
3392
3393         set_bit(QL_RESET_ACTIVE, &qdev->flags);
3394         clear_bit(QL_RESET_DONE, &qdev->flags);
3395
3396         /*
3397          * Issue soft reset to chip.
3398          */
3399         printk(KERN_DEBUG PFX
3400                "%s: Issue soft reset to chip.\n",
3401                qdev->ndev->name);
3402         ql_write_common_reg(qdev,
3403                             &port_regs->CommonRegs.ispControlStatus,
3404                             ((ISP_CONTROL_SR << 16) | ISP_CONTROL_SR));
3405
3406         /* Wait 3 seconds for reset to complete. */
3407         printk(KERN_DEBUG PFX
3408                "%s: Wait 10 milliseconds for reset to complete.\n",
3409                qdev->ndev->name);
3410
3411         /* Wait until the firmware tells us the Soft Reset is done */
3412         max_wait_time = 5;
3413         do {
3414                 value =
3415                     ql_read_common_reg(qdev,
3416                                        &port_regs->CommonRegs.ispControlStatus);
3417                 if ((value & ISP_CONTROL_SR) == 0)
3418                         break;
3419
3420                 ssleep(1);
3421         } while ((--max_wait_time));
3422
3423         /*
3424          * Also, make sure that the Network Reset Interrupt bit has been
3425          * cleared after the soft reset has taken place.
3426          */
3427         value =
3428             ql_read_common_reg(qdev, &port_regs->CommonRegs.ispControlStatus);
3429         if (value & ISP_CONTROL_RI) {
3430                 printk(KERN_DEBUG PFX
3431                        "ql_adapter_reset: clearing RI after reset.\n");
3432                 ql_write_common_reg(qdev,
3433                                     &port_regs->CommonRegs.
3434                                     ispControlStatus,
3435                                     ((ISP_CONTROL_RI << 16) | ISP_CONTROL_RI));
3436         }
3437
3438         if (max_wait_time == 0) {
3439                 /* Issue Force Soft Reset */
3440                 ql_write_common_reg(qdev,
3441                                     &port_regs->CommonRegs.
3442                                     ispControlStatus,
3443                                     ((ISP_CONTROL_FSR << 16) |
3444                                      ISP_CONTROL_FSR));
3445                 /*
3446                  * Wait until the firmware tells us the Force Soft Reset is
3447                  * done
3448                  */
3449                 max_wait_time = 5;
3450                 do {
3451                         value =
3452                             ql_read_common_reg(qdev,
3453                                                &port_regs->CommonRegs.
3454                                                ispControlStatus);
3455                         if ((value & ISP_CONTROL_FSR) == 0) {
3456                                 break;
3457                         }
3458                         ssleep(1);
3459                 } while ((--max_wait_time));
3460         }
3461         if (max_wait_time == 0)
3462                 status = 1;
3463
3464         clear_bit(QL_RESET_ACTIVE, &qdev->flags);
3465         set_bit(QL_RESET_DONE, &qdev->flags);
3466         return status;
3467 }
3468
3469 static void ql_set_mac_info(struct ql3_adapter *qdev)
3470 {
3471         struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
3472         u32 value, port_status;
3473         u8 func_number;
3474
3475         /* Get the function number */
3476         value =
3477             ql_read_common_reg_l(qdev, &port_regs->CommonRegs.ispControlStatus);
3478         func_number = (u8) ((value >> 4) & OPCODE_FUNC_ID_MASK);
3479         port_status = ql_read_page0_reg(qdev, &port_regs->portStatus);
3480         switch (value & ISP_CONTROL_FN_MASK) {
3481         case ISP_CONTROL_FN0_NET:
3482                 qdev->mac_index = 0;
3483                 qdev->mac_ob_opcode = OUTBOUND_MAC_IOCB | func_number;
3484                 qdev->mb_bit_mask = FN0_MA_BITS_MASK;
3485                 qdev->PHYAddr = PORT0_PHY_ADDRESS;
3486                 if (port_status & PORT_STATUS_SM0)
3487                         set_bit(QL_LINK_OPTICAL,&qdev->flags);
3488                 else
3489                         clear_bit(QL_LINK_OPTICAL,&qdev->flags);
3490                 break;
3491
3492         case ISP_CONTROL_FN1_NET:
3493                 qdev->mac_index = 1;
3494                 qdev->mac_ob_opcode = OUTBOUND_MAC_IOCB | func_number;
3495                 qdev->mb_bit_mask = FN1_MA_BITS_MASK;
3496                 qdev->PHYAddr = PORT1_PHY_ADDRESS;
3497                 if (port_status & PORT_STATUS_SM1)
3498                         set_bit(QL_LINK_OPTICAL,&qdev->flags);
3499                 else
3500                         clear_bit(QL_LINK_OPTICAL,&qdev->flags);
3501                 break;
3502
3503         case ISP_CONTROL_FN0_SCSI:
3504         case ISP_CONTROL_FN1_SCSI:
3505         default:
3506                 printk(KERN_DEBUG PFX
3507                        "%s: Invalid function number, ispControlStatus = 0x%x\n",
3508                        qdev->ndev->name,value);
3509                 break;
3510         }
3511         qdev->numPorts = qdev->nvram_data.version_and_numPorts >> 8;
3512 }
3513
3514 static void ql_display_dev_info(struct net_device *ndev)
3515 {
3516         struct ql3_adapter *qdev = (struct ql3_adapter *)netdev_priv(ndev);
3517         struct pci_dev *pdev = qdev->pdev;
3518
3519         printk(KERN_INFO PFX
3520                "\n%s Adapter %d RevisionID %d found %s on PCI slot %d.\n",
3521                DRV_NAME, qdev->index, qdev->chip_rev_id,
3522                (qdev->device_id == QL3032_DEVICE_ID) ? "QLA3032" : "QLA3022",
3523                qdev->pci_slot);
3524         printk(KERN_INFO PFX
3525                "%s Interface.\n",
3526                test_bit(QL_LINK_OPTICAL,&qdev->flags) ? "OPTICAL" : "COPPER");
3527
3528         /*
3529          * Print PCI bus width/type.
3530          */
3531         printk(KERN_INFO PFX
3532                "Bus interface is %s %s.\n",
3533                ((qdev->pci_width == 64) ? "64-bit" : "32-bit"),
3534                ((qdev->pci_x) ? "PCI-X" : "PCI"));
3535
3536         printk(KERN_INFO PFX
3537                "mem  IO base address adjusted = 0x%p\n",
3538                qdev->mem_map_registers);
3539         printk(KERN_INFO PFX "Interrupt number = %d\n", pdev->irq);
3540
3541         if (netif_msg_probe(qdev))
3542                 printk(KERN_INFO PFX
3543                        "%s: MAC address %pM\n",
3544                        ndev->name, ndev->dev_addr);
3545 }
3546
3547 static int ql_adapter_down(struct ql3_adapter *qdev, int do_reset)
3548 {
3549         struct net_device *ndev = qdev->ndev;
3550         int retval = 0;
3551
3552         netif_stop_queue(ndev);
3553         netif_carrier_off(ndev);
3554
3555         clear_bit(QL_ADAPTER_UP,&qdev->flags);
3556         clear_bit(QL_LINK_MASTER,&qdev->flags);
3557
3558         ql_disable_interrupts(qdev);
3559
3560         free_irq(qdev->pdev->irq, ndev);
3561
3562         if (qdev->msi && test_bit(QL_MSI_ENABLED,&qdev->flags)) {
3563                 printk(KERN_INFO PFX
3564                        "%s: calling pci_disable_msi().\n", qdev->ndev->name);
3565                 clear_bit(QL_MSI_ENABLED,&qdev->flags);
3566                 pci_disable_msi(qdev->pdev);
3567         }
3568
3569         del_timer_sync(&qdev->adapter_timer);
3570
3571         napi_disable(&qdev->napi);
3572
3573         if (do_reset) {
3574                 int soft_reset;
3575                 unsigned long hw_flags;
3576
3577                 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
3578                 if (ql_wait_for_drvr_lock(qdev)) {
3579                         if ((soft_reset = ql_adapter_reset(qdev))) {
3580                                 printk(KERN_ERR PFX
3581                                        "%s: ql_adapter_reset(%d) FAILED!\n",
3582                                        ndev->name, qdev->index);
3583                         }
3584                         printk(KERN_ERR PFX
3585                                 "%s: Releaseing driver lock via chip reset.\n",ndev->name);
3586                 } else {
3587                         printk(KERN_ERR PFX
3588                                "%s: Could not acquire driver lock to do "
3589                                "reset!\n", ndev->name);
3590                         retval = -1;
3591                 }
3592                 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
3593         }
3594         ql_free_mem_resources(qdev);
3595         return retval;
3596 }
3597
3598 static int ql_adapter_up(struct ql3_adapter *qdev)
3599 {
3600         struct net_device *ndev = qdev->ndev;
3601         int err;
3602         unsigned long irq_flags = IRQF_SAMPLE_RANDOM | IRQF_SHARED;
3603         unsigned long hw_flags;
3604
3605         if (ql_alloc_mem_resources(qdev)) {
3606                 printk(KERN_ERR PFX
3607                        "%s Unable to  allocate buffers.\n", ndev->name);
3608                 return -ENOMEM;
3609         }
3610
3611         if (qdev->msi) {
3612                 if (pci_enable_msi(qdev->pdev)) {
3613                         printk(KERN_ERR PFX
3614                                "%s: User requested MSI, but MSI failed to "
3615                                "initialize.  Continuing without MSI.\n",
3616                                qdev->ndev->name);
3617                         qdev->msi = 0;
3618                 } else {
3619                         printk(KERN_INFO PFX "%s: MSI Enabled...\n", qdev->ndev->name);
3620                         set_bit(QL_MSI_ENABLED,&qdev->flags);
3621                         irq_flags &= ~IRQF_SHARED;
3622                 }
3623         }
3624
3625         if ((err = request_irq(qdev->pdev->irq,
3626                                ql3xxx_isr,
3627                                irq_flags, ndev->name, ndev))) {
3628                 printk(KERN_ERR PFX
3629                        "%s: Failed to reserve interrupt %d already in use.\n",
3630                        ndev->name, qdev->pdev->irq);
3631                 goto err_irq;
3632         }
3633
3634         spin_lock_irqsave(&qdev->hw_lock, hw_flags);
3635
3636         if ((err = ql_wait_for_drvr_lock(qdev))) {
3637                 if ((err = ql_adapter_initialize(qdev))) {
3638                         printk(KERN_ERR PFX
3639                                "%s: Unable to initialize adapter.\n",
3640                                ndev->name);
3641                         goto err_init;
3642                 }
3643                 printk(KERN_ERR PFX
3644                                 "%s: Releaseing driver lock.\n",ndev->name);
3645                 ql_sem_unlock(qdev, QL_DRVR_SEM_MASK);
3646         } else {
3647                 printk(KERN_ERR PFX
3648                        "%s: Could not acquire driver lock.\n",
3649                        ndev->name);
3650                 goto err_lock;
3651         }
3652
3653         spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
3654
3655         set_bit(QL_ADAPTER_UP,&qdev->flags);
3656
3657         mod_timer(&qdev->adapter_timer, jiffies + HZ * 1);
3658
3659         napi_enable(&qdev->napi);
3660         ql_enable_interrupts(qdev);
3661         return 0;
3662
3663 err_init:
3664         ql_sem_unlock(qdev, QL_DRVR_SEM_MASK);
3665 err_lock:
3666         spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
3667         free_irq(qdev->pdev->irq, ndev);
3668 err_irq:
3669         if (qdev->msi && test_bit(QL_MSI_ENABLED,&qdev->flags)) {
3670                 printk(KERN_INFO PFX
3671                        "%s: calling pci_disable_msi().\n",
3672                        qdev->ndev->name);
3673                 clear_bit(QL_MSI_ENABLED,&qdev->flags);
3674                 pci_disable_msi(qdev->pdev);
3675         }
3676         return err;
3677 }
3678
3679 static int ql_cycle_adapter(struct ql3_adapter *qdev, int reset)
3680 {
3681         if( ql_adapter_down(qdev,reset) || ql_adapter_up(qdev)) {
3682                 printk(KERN_ERR PFX
3683                                 "%s: Driver up/down cycle failed, "
3684                                 "closing device\n",qdev->ndev->name);
3685                 rtnl_lock();
3686                 dev_close(qdev->ndev);
3687                 rtnl_unlock();
3688                 return -1;
3689         }
3690         return 0;
3691 }
3692
3693 static int ql3xxx_close(struct net_device *ndev)
3694 {
3695         struct ql3_adapter *qdev = netdev_priv(ndev);
3696
3697         /*
3698          * Wait for device to recover from a reset.
3699          * (Rarely happens, but possible.)
3700          */
3701         while (!test_bit(QL_ADAPTER_UP,&qdev->flags))
3702                 msleep(50);
3703
3704         ql_adapter_down(qdev,QL_DO_RESET);
3705         return 0;
3706 }
3707
3708 static int ql3xxx_open(struct net_device *ndev)
3709 {
3710         struct ql3_adapter *qdev = netdev_priv(ndev);
3711         return (ql_adapter_up(qdev));
3712 }
3713
3714 static int ql3xxx_set_mac_address(struct net_device *ndev, void *p)
3715 {
3716         struct ql3_adapter *qdev = (struct ql3_adapter *)netdev_priv(ndev);
3717         struct ql3xxx_port_registers __iomem *port_regs =
3718                         qdev->mem_map_registers;
3719         struct sockaddr *addr = p;
3720         unsigned long hw_flags;
3721
3722         if (netif_running(ndev))
3723                 return -EBUSY;
3724
3725         if (!is_valid_ether_addr(addr->sa_data))
3726                 return -EADDRNOTAVAIL;
3727
3728         memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len);
3729
3730         spin_lock_irqsave(&qdev->hw_lock, hw_flags);
3731         /* Program lower 32 bits of the MAC address */
3732         ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg,
3733                            (MAC_ADDR_INDIRECT_PTR_REG_RP_MASK << 16));
3734         ql_write_page0_reg(qdev, &port_regs->macAddrDataReg,
3735                            ((ndev->dev_addr[2] << 24) | (ndev->
3736                                                          dev_addr[3] << 16) |
3737                             (ndev->dev_addr[4] << 8) | ndev->dev_addr[5]));
3738
3739         /* Program top 16 bits of the MAC address */
3740         ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg,
3741                            ((MAC_ADDR_INDIRECT_PTR_REG_RP_MASK << 16) | 1));
3742         ql_write_page0_reg(qdev, &port_regs->macAddrDataReg,
3743                            ((ndev->dev_addr[0] << 8) | ndev->dev_addr[1]));
3744         spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
3745
3746         return 0;
3747 }
3748
3749 static void ql3xxx_tx_timeout(struct net_device *ndev)
3750 {
3751         struct ql3_adapter *qdev = (struct ql3_adapter *)netdev_priv(ndev);
3752
3753         printk(KERN_ERR PFX "%s: Resetting...\n", ndev->name);
3754         /*
3755          * Stop the queues, we've got a problem.
3756          */
3757         netif_stop_queue(ndev);
3758
3759         /*
3760          * Wake up the worker to process this event.
3761          */
3762         queue_delayed_work(qdev->workqueue, &qdev->tx_timeout_work, 0);
3763 }
3764
3765 static void ql_reset_work(struct work_struct *work)
3766 {
3767         struct ql3_adapter *qdev =
3768                 container_of(work, struct ql3_adapter, reset_work.work);
3769         struct net_device *ndev = qdev->ndev;
3770         u32 value;
3771         struct ql_tx_buf_cb *tx_cb;
3772         int max_wait_time, i;
3773         struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
3774         unsigned long hw_flags;
3775
3776         if (test_bit((QL_RESET_PER_SCSI | QL_RESET_START),&qdev->flags)) {
3777                 clear_bit(QL_LINK_MASTER,&qdev->flags);
3778
3779                 /*
3780                  * Loop through the active list and return the skb.
3781                  */
3782                 for (i = 0; i < NUM_REQ_Q_ENTRIES; i++) {
3783                         int j;
3784                         tx_cb = &qdev->tx_buf[i];
3785                         if (tx_cb->skb) {
3786                                 printk(KERN_DEBUG PFX
3787                                        "%s: Freeing lost SKB.\n",
3788                                        qdev->ndev->name);
3789                                 pci_unmap_single(qdev->pdev,
3790                                          dma_unmap_addr(&tx_cb->map[0], mapaddr),
3791                                          dma_unmap_len(&tx_cb->map[0], maplen),
3792                                          PCI_DMA_TODEVICE);
3793                                 for(j=1;j<tx_cb->seg_count;j++) {
3794                                         pci_unmap_page(qdev->pdev,
3795                                                dma_unmap_addr(&tx_cb->map[j],mapaddr),
3796                                                dma_unmap_len(&tx_cb->map[j],maplen),
3797                                                PCI_DMA_TODEVICE);
3798                                 }
3799                                 dev_kfree_skb(tx_cb->skb);
3800                                 tx_cb->skb = NULL;
3801                         }
3802                 }
3803
3804                 printk(KERN_ERR PFX
3805                        "%s: Clearing NRI after reset.\n", qdev->ndev->name);
3806                 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
3807                 ql_write_common_reg(qdev,
3808                                     &port_regs->CommonRegs.
3809                                     ispControlStatus,
3810                                     ((ISP_CONTROL_RI << 16) | ISP_CONTROL_RI));
3811                 /*
3812                  * Wait the for Soft Reset to Complete.
3813                  */
3814                 max_wait_time = 10;
3815                 do {
3816                         value = ql_read_common_reg(qdev,
3817                                                    &port_regs->CommonRegs.
3818
3819                                                    ispControlStatus);
3820                         if ((value & ISP_CONTROL_SR) == 0) {
3821                                 printk(KERN_DEBUG PFX
3822                                        "%s: reset completed.\n",
3823                                        qdev->ndev->name);
3824                                 break;
3825                         }
3826
3827                         if (value & ISP_CONTROL_RI) {
3828                                 printk(KERN_DEBUG PFX
3829                                        "%s: clearing NRI after reset.\n",
3830                                        qdev->ndev->name);
3831                                 ql_write_common_reg(qdev,
3832                                                     &port_regs->
3833                                                     CommonRegs.
3834                                                     ispControlStatus,
3835                                                     ((ISP_CONTROL_RI <<
3836                                                       16) | ISP_CONTROL_RI));
3837                         }
3838
3839                         spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
3840                         ssleep(1);
3841                         spin_lock_irqsave(&qdev->hw_lock, hw_flags);
3842                 } while (--max_wait_time);
3843                 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
3844
3845                 if (value & ISP_CONTROL_SR) {
3846
3847                         /*
3848                          * Set the reset flags and clear the board again.
3849                          * Nothing else to do...
3850                          */
3851                         printk(KERN_ERR PFX
3852                                "%s: Timed out waiting for reset to "
3853                                "complete.\n", ndev->name);
3854                         printk(KERN_ERR PFX
3855                                "%s: Do a reset.\n", ndev->name);
3856                         clear_bit(QL_RESET_PER_SCSI,&qdev->flags);
3857                         clear_bit(QL_RESET_START,&qdev->flags);
3858                         ql_cycle_adapter(qdev,QL_DO_RESET);
3859                         return;
3860                 }
3861
3862                 clear_bit(QL_RESET_ACTIVE,&qdev->flags);
3863                 clear_bit(QL_RESET_PER_SCSI,&qdev->flags);
3864                 clear_bit(QL_RESET_START,&qdev->flags);
3865                 ql_cycle_adapter(qdev,QL_NO_RESET);
3866         }
3867 }
3868
3869 static void ql_tx_timeout_work(struct work_struct *work)
3870 {
3871         struct ql3_adapter *qdev =
3872                 container_of(work, struct ql3_adapter, tx_timeout_work.work);
3873
3874         ql_cycle_adapter(qdev, QL_DO_RESET);
3875 }
3876
3877 static void ql_get_board_info(struct ql3_adapter *qdev)
3878 {
3879         struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
3880         u32 value;
3881
3882         value = ql_read_page0_reg_l(qdev, &port_regs->portStatus);
3883
3884         qdev->chip_rev_id = ((value & PORT_STATUS_REV_ID_MASK) >> 12);
3885         if (value & PORT_STATUS_64)
3886                 qdev->pci_width = 64;
3887         else
3888                 qdev->pci_width = 32;
3889         if (value & PORT_STATUS_X)
3890                 qdev->pci_x = 1;
3891         else
3892                 qdev->pci_x = 0;
3893         qdev->pci_slot = (u8) PCI_SLOT(qdev->pdev->devfn);
3894 }
3895
3896 static void ql3xxx_timer(unsigned long ptr)
3897 {
3898         struct ql3_adapter *qdev = (struct ql3_adapter *)ptr;
3899         queue_delayed_work(qdev->workqueue, &qdev->link_state_work, 0);
3900 }
3901
3902 static const struct net_device_ops ql3xxx_netdev_ops = {
3903         .ndo_open               = ql3xxx_open,
3904         .ndo_start_xmit         = ql3xxx_send,
3905         .ndo_stop               = ql3xxx_close,
3906         .ndo_set_multicast_list = NULL, /* not allowed on NIC side */
3907         .ndo_change_mtu         = eth_change_mtu,
3908         .ndo_validate_addr      = eth_validate_addr,
3909         .ndo_set_mac_address    = ql3xxx_set_mac_address,
3910         .ndo_tx_timeout         = ql3xxx_tx_timeout,
3911 };
3912
3913 static int __devinit ql3xxx_probe(struct pci_dev *pdev,
3914                                   const struct pci_device_id *pci_entry)
3915 {
3916         struct net_device *ndev = NULL;
3917         struct ql3_adapter *qdev = NULL;
3918         static int cards_found = 0;
3919         int uninitialized_var(pci_using_dac), err;
3920
3921         err = pci_enable_device(pdev);
3922         if (err) {
3923                 printk(KERN_ERR PFX "%s cannot enable PCI device\n",
3924                        pci_name(pdev));
3925                 goto err_out;
3926         }
3927
3928         err = pci_request_regions(pdev, DRV_NAME);
3929         if (err) {
3930                 printk(KERN_ERR PFX "%s cannot obtain PCI resources\n",
3931                        pci_name(pdev));
3932                 goto err_out_disable_pdev;
3933         }
3934
3935         pci_set_master(pdev);
3936
3937         if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
3938                 pci_using_dac = 1;
3939                 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
3940         } else if (!(err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)))) {
3941                 pci_using_dac = 0;
3942                 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
3943         }
3944
3945         if (err) {
3946                 printk(KERN_ERR PFX "%s no usable DMA configuration\n",
3947                        pci_name(pdev));
3948                 goto err_out_free_regions;
3949         }
3950
3951         ndev = alloc_etherdev(sizeof(struct ql3_adapter));
3952         if (!ndev) {
3953                 printk(KERN_ERR PFX "%s could not alloc etherdev\n",
3954                        pci_name(pdev));
3955                 err = -ENOMEM;
3956                 goto err_out_free_regions;
3957         }
3958
3959         SET_NETDEV_DEV(ndev, &pdev->dev);
3960
3961         pci_set_drvdata(pdev, ndev);
3962
3963         qdev = netdev_priv(ndev);
3964         qdev->index = cards_found;
3965         qdev->ndev = ndev;
3966         qdev->pdev = pdev;
3967         qdev->device_id = pci_entry->device;
3968         qdev->port_link_state = LS_DOWN;
3969         if (msi)
3970                 qdev->msi = 1;
3971
3972         qdev->msg_enable = netif_msg_init(debug, default_msg);
3973
3974         if (pci_using_dac)
3975                 ndev->features |= NETIF_F_HIGHDMA;
3976         if (qdev->device_id == QL3032_DEVICE_ID)
3977                 ndev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
3978
3979         qdev->mem_map_registers = pci_ioremap_bar(pdev, 1);
3980         if (!qdev->mem_map_registers) {
3981                 printk(KERN_ERR PFX "%s: cannot map device registers\n",
3982                        pci_name(pdev));
3983                 err = -EIO;
3984                 goto err_out_free_ndev;
3985         }
3986
3987         spin_lock_init(&qdev->adapter_lock);
3988         spin_lock_init(&qdev->hw_lock);
3989
3990         /* Set driver entry points */
3991         ndev->netdev_ops = &ql3xxx_netdev_ops;
3992         SET_ETHTOOL_OPS(ndev, &ql3xxx_ethtool_ops);
3993         ndev->watchdog_timeo = 5 * HZ;
3994
3995         netif_napi_add(ndev, &qdev->napi, ql_poll, 64);
3996
3997         ndev->irq = pdev->irq;
3998
3999         /* make sure the EEPROM is good */
4000         if (ql_get_nvram_params(qdev)) {
4001                 printk(KERN_ALERT PFX
4002                        "ql3xxx_probe: Adapter #%d, Invalid NVRAM parameters.\n",
4003                        qdev->index);
4004                 err = -EIO;
4005                 goto err_out_iounmap;
4006         }
4007
4008         ql_set_mac_info(qdev);
4009
4010         /* Validate and set parameters */
4011         if (qdev->mac_index) {
4012                 ndev->mtu = qdev->nvram_data.macCfg_port1.etherMtu_mac ;
4013                 ql_set_mac_addr(ndev, qdev->nvram_data.funcCfg_fn2.macAddress);
4014         } else {
4015                 ndev->mtu = qdev->nvram_data.macCfg_port0.etherMtu_mac ;
4016                 ql_set_mac_addr(ndev, qdev->nvram_data.funcCfg_fn0.macAddress);
4017         }
4018         memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len);
4019
4020         ndev->tx_queue_len = NUM_REQ_Q_ENTRIES;
4021
4022         /* Record PCI bus information. */
4023         ql_get_board_info(qdev);
4024
4025         /*
4026          * Set the Maximum Memory Read Byte Count value. We do this to handle
4027          * jumbo frames.
4028          */
4029         if (qdev->pci_x) {
4030                 pci_write_config_word(pdev, (int)0x4e, (u16) 0x0036);
4031         }
4032
4033         err = register_netdev(ndev);
4034         if (err) {
4035                 printk(KERN_ERR PFX "%s: cannot register net device\n",
4036                        pci_name(pdev));
4037                 goto err_out_iounmap;
4038         }
4039
4040         /* we're going to reset, so assume we have no link for now */
4041
4042         netif_carrier_off(ndev);
4043         netif_stop_queue(ndev);
4044
4045         qdev->workqueue = create_singlethread_workqueue(ndev->name);
4046         INIT_DELAYED_WORK(&qdev->reset_work, ql_reset_work);
4047         INIT_DELAYED_WORK(&qdev->tx_timeout_work, ql_tx_timeout_work);
4048         INIT_DELAYED_WORK(&qdev->link_state_work, ql_link_state_machine_work);
4049
4050         init_timer(&qdev->adapter_timer);
4051         qdev->adapter_timer.function = ql3xxx_timer;
4052         qdev->adapter_timer.expires = jiffies + HZ * 2; /* two second delay */
4053         qdev->adapter_timer.data = (unsigned long)qdev;
4054
4055         if(!cards_found) {
4056                 printk(KERN_ALERT PFX "%s\n", DRV_STRING);
4057                 printk(KERN_ALERT PFX "Driver name: %s, Version: %s.\n",
4058                    DRV_NAME, DRV_VERSION);
4059         }
4060         ql_display_dev_info(ndev);
4061
4062         cards_found++;
4063         return 0;
4064
4065 err_out_iounmap:
4066         iounmap(qdev->mem_map_registers);
4067 err_out_free_ndev:
4068         free_netdev(ndev);
4069 err_out_free_regions:
4070         pci_release_regions(pdev);
4071 err_out_disable_pdev:
4072         pci_disable_device(pdev);
4073         pci_set_drvdata(pdev, NULL);
4074 err_out:
4075         return err;
4076 }
4077
4078 static void __devexit ql3xxx_remove(struct pci_dev *pdev)
4079 {
4080         struct net_device *ndev = pci_get_drvdata(pdev);
4081         struct ql3_adapter *qdev = netdev_priv(ndev);
4082
4083         unregister_netdev(ndev);
4084
4085         ql_disable_interrupts(qdev);
4086
4087         if (qdev->workqueue) {
4088                 cancel_delayed_work(&qdev->reset_work);
4089                 cancel_delayed_work(&qdev->tx_timeout_work);
4090                 destroy_workqueue(qdev->workqueue);
4091                 qdev->workqueue = NULL;
4092         }
4093
4094         iounmap(qdev->mem_map_registers);
4095         pci_release_regions(pdev);
4096         pci_set_drvdata(pdev, NULL);
4097         free_netdev(ndev);
4098 }
4099
4100 static struct pci_driver ql3xxx_driver = {
4101
4102         .name = DRV_NAME,
4103         .id_table = ql3xxx_pci_tbl,
4104         .probe = ql3xxx_probe,
4105         .remove = __devexit_p(ql3xxx_remove),
4106 };
4107
4108 static int __init ql3xxx_init_module(void)
4109 {
4110         return pci_register_driver(&ql3xxx_driver);
4111 }
4112
4113 static void __exit ql3xxx_exit(void)
4114 {
4115         pci_unregister_driver(&ql3xxx_driver);
4116 }
4117
4118 module_init(ql3xxx_init_module);
4119 module_exit(ql3xxx_exit);