tizen 2.3.1 release
[kernel/linux-3.0.git] / drivers / net / pch_gbe / pch_gbe_main.c
1 /*
2  * Copyright (C) 1999 - 2010 Intel Corporation.
3  * Copyright (C) 2010 OKI SEMICONDUCTOR CO., LTD.
4  *
5  * This code was derived from the Intel e1000e Linux driver.
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License as published by
9  * the Free Software Foundation; version 2 of the License.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  * GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program; if not, write to the Free Software
18  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307, USA.
19  */
20
21 #include "pch_gbe.h"
22 #include "pch_gbe_api.h"
23 #include <linux/prefetch.h>
24
25 #define DRV_VERSION     "1.00"
26 const char pch_driver_version[] = DRV_VERSION;
27
28 #define PCI_DEVICE_ID_INTEL_IOH1_GBE    0x8802          /* Pci device ID */
29 #define PCH_GBE_MAR_ENTRIES             16
30 #define PCH_GBE_SHORT_PKT               64
31 #define DSC_INIT16                      0xC000
32 #define PCH_GBE_DMA_ALIGN               0
33 #define PCH_GBE_DMA_PADDING             2
34 #define PCH_GBE_WATCHDOG_PERIOD         (1 * HZ)        /* watchdog time */
35 #define PCH_GBE_COPYBREAK_DEFAULT       256
36 #define PCH_GBE_PCI_BAR                 1
37
38 /* Macros for ML7223 */
39 #define PCI_VENDOR_ID_ROHM                      0x10db
40 #define PCI_DEVICE_ID_ROHM_ML7223_GBE           0x8013
41
42 /* Macros for ML7831 */
43 #define PCI_DEVICE_ID_ROHM_ML7831_GBE           0x8802
44
45 #define PCH_GBE_TX_WEIGHT         64
46 #define PCH_GBE_RX_WEIGHT         64
47 #define PCH_GBE_RX_BUFFER_WRITE   16
48
49 /* Initialize the wake-on-LAN settings */
50 #define PCH_GBE_WL_INIT_SETTING    (PCH_GBE_WLC_MP)
51
52 #define PCH_GBE_MAC_RGMII_CTRL_SETTING ( \
53         PCH_GBE_CHIP_TYPE_INTERNAL | \
54         PCH_GBE_RGMII_MODE_RGMII     \
55         )
56
57 /* Ethertype field values */
58 #define PCH_GBE_MAX_JUMBO_FRAME_SIZE    10318
59 #define PCH_GBE_FRAME_SIZE_2048         2048
60 #define PCH_GBE_FRAME_SIZE_4096         4096
61 #define PCH_GBE_FRAME_SIZE_8192         8192
62
63 #define PCH_GBE_GET_DESC(R, i, type)    (&(((struct type *)((R).desc))[i]))
64 #define PCH_GBE_RX_DESC(R, i)           PCH_GBE_GET_DESC(R, i, pch_gbe_rx_desc)
65 #define PCH_GBE_TX_DESC(R, i)           PCH_GBE_GET_DESC(R, i, pch_gbe_tx_desc)
66 #define PCH_GBE_DESC_UNUSED(R) \
67         ((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \
68         (R)->next_to_clean - (R)->next_to_use - 1)
69
70 /* Pause packet value */
71 #define PCH_GBE_PAUSE_PKT1_VALUE    0x00C28001
72 #define PCH_GBE_PAUSE_PKT2_VALUE    0x00000100
73 #define PCH_GBE_PAUSE_PKT4_VALUE    0x01000888
74 #define PCH_GBE_PAUSE_PKT5_VALUE    0x0000FFFF
75
76 #define PCH_GBE_ETH_ALEN            6
77
78 /* This defines the bits that are set in the Interrupt Mask
79  * Set/Read Register.  Each bit is documented below:
80  *   o RXT0   = Receiver Timer Interrupt (ring 0)
81  *   o TXDW   = Transmit Descriptor Written Back
82  *   o RXDMT0 = Receive Descriptor Minimum Threshold hit (ring 0)
83  *   o RXSEQ  = Receive Sequence Error
84  *   o LSC    = Link Status Change
85  */
86 #define PCH_GBE_INT_ENABLE_MASK ( \
87         PCH_GBE_INT_RX_DMA_CMPLT |    \
88         PCH_GBE_INT_RX_DSC_EMP   |    \
89         PCH_GBE_INT_WOL_DET      |    \
90         PCH_GBE_INT_TX_CMPLT          \
91         )
92
93
94 static unsigned int copybreak __read_mostly = PCH_GBE_COPYBREAK_DEFAULT;
95
96 static int pch_gbe_mdio_read(struct net_device *netdev, int addr, int reg);
97 static void pch_gbe_mdio_write(struct net_device *netdev, int addr, int reg,
98                                int data);
99
100 inline void pch_gbe_mac_load_mac_addr(struct pch_gbe_hw *hw)
101 {
102         iowrite32(0x01, &hw->reg->MAC_ADDR_LOAD);
103 }
104
105 /**
106  * pch_gbe_mac_read_mac_addr - Read MAC address
107  * @hw:             Pointer to the HW structure
108  * Returns
109  *      0:                      Successful.
110  */
111 s32 pch_gbe_mac_read_mac_addr(struct pch_gbe_hw *hw)
112 {
113         u32  adr1a, adr1b;
114
115         adr1a = ioread32(&hw->reg->mac_adr[0].high);
116         adr1b = ioread32(&hw->reg->mac_adr[0].low);
117
118         hw->mac.addr[0] = (u8)(adr1a & 0xFF);
119         hw->mac.addr[1] = (u8)((adr1a >> 8) & 0xFF);
120         hw->mac.addr[2] = (u8)((adr1a >> 16) & 0xFF);
121         hw->mac.addr[3] = (u8)((adr1a >> 24) & 0xFF);
122         hw->mac.addr[4] = (u8)(adr1b & 0xFF);
123         hw->mac.addr[5] = (u8)((adr1b >> 8) & 0xFF);
124
125         pr_debug("hw->mac.addr : %pM\n", hw->mac.addr);
126         return 0;
127 }
128
129 /**
130  * pch_gbe_wait_clr_bit - Wait to clear a bit
131  * @reg:        Pointer of register
132  * @busy:       Busy bit
133  */
134 static void pch_gbe_wait_clr_bit(void *reg, u32 bit)
135 {
136         u32 tmp;
137         /* wait busy */
138         tmp = 1000;
139         while ((ioread32(reg) & bit) && --tmp)
140                 cpu_relax();
141         if (!tmp)
142                 pr_err("Error: busy bit is not cleared\n");
143 }
144 /**
145  * pch_gbe_mac_mar_set - Set MAC address register
146  * @hw:     Pointer to the HW structure
147  * @addr:   Pointer to the MAC address
148  * @index:  MAC address array register
149  */
150 static void pch_gbe_mac_mar_set(struct pch_gbe_hw *hw, u8 * addr, u32 index)
151 {
152         u32 mar_low, mar_high, adrmask;
153
154         pr_debug("index : 0x%x\n", index);
155
156         /*
157          * HW expects these in little endian so we reverse the byte order
158          * from network order (big endian) to little endian
159          */
160         mar_high = ((u32) addr[0] | ((u32) addr[1] << 8) |
161                    ((u32) addr[2] << 16) | ((u32) addr[3] << 24));
162         mar_low = ((u32) addr[4] | ((u32) addr[5] << 8));
163         /* Stop the MAC Address of index. */
164         adrmask = ioread32(&hw->reg->ADDR_MASK);
165         iowrite32((adrmask | (0x0001 << index)), &hw->reg->ADDR_MASK);
166         /* wait busy */
167         pch_gbe_wait_clr_bit(&hw->reg->ADDR_MASK, PCH_GBE_BUSY);
168         /* Set the MAC address to the MAC address 1A/1B register */
169         iowrite32(mar_high, &hw->reg->mac_adr[index].high);
170         iowrite32(mar_low, &hw->reg->mac_adr[index].low);
171         /* Start the MAC address of index */
172         iowrite32((adrmask & ~(0x0001 << index)), &hw->reg->ADDR_MASK);
173         /* wait busy */
174         pch_gbe_wait_clr_bit(&hw->reg->ADDR_MASK, PCH_GBE_BUSY);
175 }
176
177 /**
178  * pch_gbe_mac_reset_hw - Reset hardware
179  * @hw: Pointer to the HW structure
180  */
181 static void pch_gbe_mac_reset_hw(struct pch_gbe_hw *hw)
182 {
183         /* Read the MAC address. and store to the private data */
184         pch_gbe_mac_read_mac_addr(hw);
185         iowrite32(PCH_GBE_ALL_RST, &hw->reg->RESET);
186 #ifdef PCH_GBE_MAC_IFOP_RGMII
187         iowrite32(PCH_GBE_MODE_GMII_ETHER, &hw->reg->MODE);
188 #endif
189         pch_gbe_wait_clr_bit(&hw->reg->RESET, PCH_GBE_ALL_RST);
190         /* Setup the receive address */
191         pch_gbe_mac_mar_set(hw, hw->mac.addr, 0);
192         return;
193 }
194
195 /**
196  * pch_gbe_mac_init_rx_addrs - Initialize receive address's
197  * @hw: Pointer to the HW structure
198  * @mar_count: Receive address registers
199  */
200 static void pch_gbe_mac_init_rx_addrs(struct pch_gbe_hw *hw, u16 mar_count)
201 {
202         u32 i;
203
204         /* Setup the receive address */
205         pch_gbe_mac_mar_set(hw, hw->mac.addr, 0);
206
207         /* Zero out the other receive addresses */
208         for (i = 1; i < mar_count; i++) {
209                 iowrite32(0, &hw->reg->mac_adr[i].high);
210                 iowrite32(0, &hw->reg->mac_adr[i].low);
211         }
212         iowrite32(0xFFFE, &hw->reg->ADDR_MASK);
213         /* wait busy */
214         pch_gbe_wait_clr_bit(&hw->reg->ADDR_MASK, PCH_GBE_BUSY);
215 }
216
217
218 /**
219  * pch_gbe_mac_mc_addr_list_update - Update Multicast addresses
220  * @hw:             Pointer to the HW structure
221  * @mc_addr_list:   Array of multicast addresses to program
222  * @mc_addr_count:  Number of multicast addresses to program
223  * @mar_used_count: The first MAC Address register free to program
224  * @mar_total_num:  Total number of supported MAC Address Registers
225  */
226 static void pch_gbe_mac_mc_addr_list_update(struct pch_gbe_hw *hw,
227                                             u8 *mc_addr_list, u32 mc_addr_count,
228                                             u32 mar_used_count, u32 mar_total_num)
229 {
230         u32 i, adrmask;
231
232         /* Load the first set of multicast addresses into the exact
233          * filters (RAR).  If there are not enough to fill the RAR
234          * array, clear the filters.
235          */
236         for (i = mar_used_count; i < mar_total_num; i++) {
237                 if (mc_addr_count) {
238                         pch_gbe_mac_mar_set(hw, mc_addr_list, i);
239                         mc_addr_count--;
240                         mc_addr_list += PCH_GBE_ETH_ALEN;
241                 } else {
242                         /* Clear MAC address mask */
243                         adrmask = ioread32(&hw->reg->ADDR_MASK);
244                         iowrite32((adrmask | (0x0001 << i)),
245                                         &hw->reg->ADDR_MASK);
246                         /* wait busy */
247                         pch_gbe_wait_clr_bit(&hw->reg->ADDR_MASK, PCH_GBE_BUSY);
248                         /* Clear MAC address */
249                         iowrite32(0, &hw->reg->mac_adr[i].high);
250                         iowrite32(0, &hw->reg->mac_adr[i].low);
251                 }
252         }
253 }
254
255 /**
256  * pch_gbe_mac_force_mac_fc - Force the MAC's flow control settings
257  * @hw:             Pointer to the HW structure
258  * Returns
259  *      0:                      Successful.
260  *      Negative value:         Failed.
261  */
262 s32 pch_gbe_mac_force_mac_fc(struct pch_gbe_hw *hw)
263 {
264         struct pch_gbe_mac_info *mac = &hw->mac;
265         u32 rx_fctrl;
266
267         pr_debug("mac->fc = %u\n", mac->fc);
268
269         rx_fctrl = ioread32(&hw->reg->RX_FCTRL);
270
271         switch (mac->fc) {
272         case PCH_GBE_FC_NONE:
273                 rx_fctrl &= ~PCH_GBE_FL_CTRL_EN;
274                 mac->tx_fc_enable = false;
275                 break;
276         case PCH_GBE_FC_RX_PAUSE:
277                 rx_fctrl |= PCH_GBE_FL_CTRL_EN;
278                 mac->tx_fc_enable = false;
279                 break;
280         case PCH_GBE_FC_TX_PAUSE:
281                 rx_fctrl &= ~PCH_GBE_FL_CTRL_EN;
282                 mac->tx_fc_enable = true;
283                 break;
284         case PCH_GBE_FC_FULL:
285                 rx_fctrl |= PCH_GBE_FL_CTRL_EN;
286                 mac->tx_fc_enable = true;
287                 break;
288         default:
289                 pr_err("Flow control param set incorrectly\n");
290                 return -EINVAL;
291         }
292         if (mac->link_duplex == DUPLEX_HALF)
293                 rx_fctrl &= ~PCH_GBE_FL_CTRL_EN;
294         iowrite32(rx_fctrl, &hw->reg->RX_FCTRL);
295         pr_debug("RX_FCTRL reg : 0x%08x  mac->tx_fc_enable : %d\n",
296                  ioread32(&hw->reg->RX_FCTRL), mac->tx_fc_enable);
297         return 0;
298 }
299
300 /**
301  * pch_gbe_mac_set_wol_event - Set wake-on-lan event
302  * @hw:     Pointer to the HW structure
303  * @wu_evt: Wake up event
304  */
305 static void pch_gbe_mac_set_wol_event(struct pch_gbe_hw *hw, u32 wu_evt)
306 {
307         u32 addr_mask;
308
309         pr_debug("wu_evt : 0x%08x  ADDR_MASK reg : 0x%08x\n",
310                  wu_evt, ioread32(&hw->reg->ADDR_MASK));
311
312         if (wu_evt) {
313                 /* Set Wake-On-Lan address mask */
314                 addr_mask = ioread32(&hw->reg->ADDR_MASK);
315                 iowrite32(addr_mask, &hw->reg->WOL_ADDR_MASK);
316                 /* wait busy */
317                 pch_gbe_wait_clr_bit(&hw->reg->WOL_ADDR_MASK, PCH_GBE_WLA_BUSY);
318                 iowrite32(0, &hw->reg->WOL_ST);
319                 iowrite32((wu_evt | PCH_GBE_WLC_WOL_MODE), &hw->reg->WOL_CTRL);
320                 iowrite32(0x02, &hw->reg->TCPIP_ACC);
321                 iowrite32(PCH_GBE_INT_ENABLE_MASK, &hw->reg->INT_EN);
322         } else {
323                 iowrite32(0, &hw->reg->WOL_CTRL);
324                 iowrite32(0, &hw->reg->WOL_ST);
325         }
326         return;
327 }
328
329 /**
330  * pch_gbe_mac_ctrl_miim - Control MIIM interface
331  * @hw:   Pointer to the HW structure
332  * @addr: Address of PHY
333  * @dir:  Operetion. (Write or Read)
334  * @reg:  Access register of PHY
335  * @data: Write data.
336  *
337  * Returns: Read date.
338  */
339 u16 pch_gbe_mac_ctrl_miim(struct pch_gbe_hw *hw, u32 addr, u32 dir, u32 reg,
340                         u16 data)
341 {
342         u32 data_out = 0;
343         unsigned int i;
344         unsigned long flags;
345
346         spin_lock_irqsave(&hw->miim_lock, flags);
347
348         for (i = 100; i; --i) {
349                 if ((ioread32(&hw->reg->MIIM) & PCH_GBE_MIIM_OPER_READY))
350                         break;
351                 udelay(20);
352         }
353         if (i == 0) {
354                 pr_err("pch-gbe.miim won't go Ready\n");
355                 spin_unlock_irqrestore(&hw->miim_lock, flags);
356                 return 0;       /* No way to indicate timeout error */
357         }
358         iowrite32(((reg << PCH_GBE_MIIM_REG_ADDR_SHIFT) |
359                   (addr << PCH_GBE_MIIM_PHY_ADDR_SHIFT) |
360                   dir | data), &hw->reg->MIIM);
361         for (i = 0; i < 100; i++) {
362                 udelay(20);
363                 data_out = ioread32(&hw->reg->MIIM);
364                 if ((data_out & PCH_GBE_MIIM_OPER_READY))
365                         break;
366         }
367         spin_unlock_irqrestore(&hw->miim_lock, flags);
368
369         pr_debug("PHY %s: reg=%d, data=0x%04X\n",
370                  dir == PCH_GBE_MIIM_OPER_READ ? "READ" : "WRITE", reg,
371                  dir == PCH_GBE_MIIM_OPER_READ ? data_out : data);
372         return (u16) data_out;
373 }
374
375 /**
376  * pch_gbe_mac_set_pause_packet - Set pause packet
377  * @hw:   Pointer to the HW structure
378  */
379 static void pch_gbe_mac_set_pause_packet(struct pch_gbe_hw *hw)
380 {
381         unsigned long tmp2, tmp3;
382
383         /* Set Pause packet */
384         tmp2 = hw->mac.addr[1];
385         tmp2 = (tmp2 << 8) | hw->mac.addr[0];
386         tmp2 = PCH_GBE_PAUSE_PKT2_VALUE | (tmp2 << 16);
387
388         tmp3 = hw->mac.addr[5];
389         tmp3 = (tmp3 << 8) | hw->mac.addr[4];
390         tmp3 = (tmp3 << 8) | hw->mac.addr[3];
391         tmp3 = (tmp3 << 8) | hw->mac.addr[2];
392
393         iowrite32(PCH_GBE_PAUSE_PKT1_VALUE, &hw->reg->PAUSE_PKT1);
394         iowrite32(tmp2, &hw->reg->PAUSE_PKT2);
395         iowrite32(tmp3, &hw->reg->PAUSE_PKT3);
396         iowrite32(PCH_GBE_PAUSE_PKT4_VALUE, &hw->reg->PAUSE_PKT4);
397         iowrite32(PCH_GBE_PAUSE_PKT5_VALUE, &hw->reg->PAUSE_PKT5);
398
399         /* Transmit Pause Packet */
400         iowrite32(PCH_GBE_PS_PKT_RQ, &hw->reg->PAUSE_REQ);
401
402         pr_debug("PAUSE_PKT1-5 reg : 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
403                  ioread32(&hw->reg->PAUSE_PKT1), ioread32(&hw->reg->PAUSE_PKT2),
404                  ioread32(&hw->reg->PAUSE_PKT3), ioread32(&hw->reg->PAUSE_PKT4),
405                  ioread32(&hw->reg->PAUSE_PKT5));
406
407         return;
408 }
409
410
411 /**
412  * pch_gbe_alloc_queues - Allocate memory for all rings
413  * @adapter:  Board private structure to initialize
414  * Returns
415  *      0:      Successfully
416  *      Negative value: Failed
417  */
418 static int pch_gbe_alloc_queues(struct pch_gbe_adapter *adapter)
419 {
420         int size;
421
422         size = (int)sizeof(struct pch_gbe_tx_ring);
423         adapter->tx_ring = kzalloc(size, GFP_KERNEL);
424         if (!adapter->tx_ring)
425                 return -ENOMEM;
426         size = (int)sizeof(struct pch_gbe_rx_ring);
427         adapter->rx_ring = kzalloc(size, GFP_KERNEL);
428         if (!adapter->rx_ring) {
429                 kfree(adapter->tx_ring);
430                 return -ENOMEM;
431         }
432         return 0;
433 }
434
435 /**
436  * pch_gbe_init_stats - Initialize status
437  * @adapter:  Board private structure to initialize
438  */
439 static void pch_gbe_init_stats(struct pch_gbe_adapter *adapter)
440 {
441         memset(&adapter->stats, 0, sizeof(adapter->stats));
442         return;
443 }
444
445 /**
446  * pch_gbe_init_phy - Initialize PHY
447  * @adapter:  Board private structure to initialize
448  * Returns
449  *      0:      Successfully
450  *      Negative value: Failed
451  */
452 static int pch_gbe_init_phy(struct pch_gbe_adapter *adapter)
453 {
454         struct net_device *netdev = adapter->netdev;
455         u32 addr;
456         u16 bmcr, stat;
457
458         /* Discover phy addr by searching addrs in order {1,0,2,..., 31} */
459         for (addr = 0; addr < PCH_GBE_PHY_REGS_LEN; addr++) {
460                 adapter->mii.phy_id = (addr == 0) ? 1 : (addr == 1) ? 0 : addr;
461                 bmcr = pch_gbe_mdio_read(netdev, adapter->mii.phy_id, MII_BMCR);
462                 stat = pch_gbe_mdio_read(netdev, adapter->mii.phy_id, MII_BMSR);
463                 stat = pch_gbe_mdio_read(netdev, adapter->mii.phy_id, MII_BMSR);
464                 if (!((bmcr == 0xFFFF) || ((stat == 0) && (bmcr == 0))))
465                         break;
466         }
467         adapter->hw.phy.addr = adapter->mii.phy_id;
468         pr_debug("phy_addr = %d\n", adapter->mii.phy_id);
469         if (addr == 32)
470                 return -EAGAIN;
471         /* Selected the phy and isolate the rest */
472         for (addr = 0; addr < PCH_GBE_PHY_REGS_LEN; addr++) {
473                 if (addr != adapter->mii.phy_id) {
474                         pch_gbe_mdio_write(netdev, addr, MII_BMCR,
475                                            BMCR_ISOLATE);
476                 } else {
477                         bmcr = pch_gbe_mdio_read(netdev, addr, MII_BMCR);
478                         pch_gbe_mdio_write(netdev, addr, MII_BMCR,
479                                            bmcr & ~BMCR_ISOLATE);
480                 }
481         }
482
483         /* MII setup */
484         adapter->mii.phy_id_mask = 0x1F;
485         adapter->mii.reg_num_mask = 0x1F;
486         adapter->mii.dev = adapter->netdev;
487         adapter->mii.mdio_read = pch_gbe_mdio_read;
488         adapter->mii.mdio_write = pch_gbe_mdio_write;
489         adapter->mii.supports_gmii = mii_check_gmii_support(&adapter->mii);
490         return 0;
491 }
492
493 /**
494  * pch_gbe_mdio_read - The read function for mii
495  * @netdev: Network interface device structure
496  * @addr:   Phy ID
497  * @reg:    Access location
498  * Returns
499  *      0:      Successfully
500  *      Negative value: Failed
501  */
502 static int pch_gbe_mdio_read(struct net_device *netdev, int addr, int reg)
503 {
504         struct pch_gbe_adapter *adapter = netdev_priv(netdev);
505         struct pch_gbe_hw *hw = &adapter->hw;
506
507         return pch_gbe_mac_ctrl_miim(hw, addr, PCH_GBE_HAL_MIIM_READ, reg,
508                                      (u16) 0);
509 }
510
511 /**
512  * pch_gbe_mdio_write - The write function for mii
513  * @netdev: Network interface device structure
514  * @addr:   Phy ID (not used)
515  * @reg:    Access location
516  * @data:   Write data
517  */
518 static void pch_gbe_mdio_write(struct net_device *netdev,
519                                int addr, int reg, int data)
520 {
521         struct pch_gbe_adapter *adapter = netdev_priv(netdev);
522         struct pch_gbe_hw *hw = &adapter->hw;
523
524         pch_gbe_mac_ctrl_miim(hw, addr, PCH_GBE_HAL_MIIM_WRITE, reg, data);
525 }
526
527 /**
528  * pch_gbe_reset_task - Reset processing at the time of transmission timeout
529  * @work:  Pointer of board private structure
530  */
531 static void pch_gbe_reset_task(struct work_struct *work)
532 {
533         struct pch_gbe_adapter *adapter;
534         adapter = container_of(work, struct pch_gbe_adapter, reset_task);
535
536         rtnl_lock();
537         pch_gbe_reinit_locked(adapter);
538         rtnl_unlock();
539 }
540
541 /**
542  * pch_gbe_reinit_locked- Re-initialization
543  * @adapter:  Board private structure
544  */
545 void pch_gbe_reinit_locked(struct pch_gbe_adapter *adapter)
546 {
547         pch_gbe_down(adapter);
548         pch_gbe_up(adapter);
549 }
550
551 /**
552  * pch_gbe_reset - Reset GbE
553  * @adapter:  Board private structure
554  */
555 void pch_gbe_reset(struct pch_gbe_adapter *adapter)
556 {
557         pch_gbe_mac_reset_hw(&adapter->hw);
558         /* Setup the receive address. */
559         pch_gbe_mac_init_rx_addrs(&adapter->hw, PCH_GBE_MAR_ENTRIES);
560         if (pch_gbe_hal_init_hw(&adapter->hw))
561                 pr_err("Hardware Error\n");
562 }
563
564 /**
565  * pch_gbe_free_irq - Free an interrupt
566  * @adapter:  Board private structure
567  */
568 static void pch_gbe_free_irq(struct pch_gbe_adapter *adapter)
569 {
570         struct net_device *netdev = adapter->netdev;
571
572         free_irq(adapter->pdev->irq, netdev);
573         if (adapter->have_msi) {
574                 pci_disable_msi(adapter->pdev);
575                 pr_debug("call pci_disable_msi\n");
576         }
577 }
578
579 /**
580  * pch_gbe_irq_disable - Mask off interrupt generation on the NIC
581  * @adapter:  Board private structure
582  */
583 static void pch_gbe_irq_disable(struct pch_gbe_adapter *adapter)
584 {
585         struct pch_gbe_hw *hw = &adapter->hw;
586
587         atomic_inc(&adapter->irq_sem);
588         iowrite32(0, &hw->reg->INT_EN);
589         ioread32(&hw->reg->INT_ST);
590         synchronize_irq(adapter->pdev->irq);
591
592         pr_debug("INT_EN reg : 0x%08x\n", ioread32(&hw->reg->INT_EN));
593 }
594
595 /**
596  * pch_gbe_irq_enable - Enable default interrupt generation settings
597  * @adapter:  Board private structure
598  */
599 static void pch_gbe_irq_enable(struct pch_gbe_adapter *adapter)
600 {
601         struct pch_gbe_hw *hw = &adapter->hw;
602
603         if (likely(atomic_dec_and_test(&adapter->irq_sem)))
604                 iowrite32(PCH_GBE_INT_ENABLE_MASK, &hw->reg->INT_EN);
605         ioread32(&hw->reg->INT_ST);
606         pr_debug("INT_EN reg : 0x%08x\n", ioread32(&hw->reg->INT_EN));
607 }
608
609
610
611 /**
612  * pch_gbe_setup_tctl - configure the Transmit control registers
613  * @adapter:  Board private structure
614  */
615 static void pch_gbe_setup_tctl(struct pch_gbe_adapter *adapter)
616 {
617         struct pch_gbe_hw *hw = &adapter->hw;
618         u32 tx_mode, tcpip;
619
620         tx_mode = PCH_GBE_TM_LONG_PKT |
621                 PCH_GBE_TM_ST_AND_FD |
622                 PCH_GBE_TM_SHORT_PKT |
623                 PCH_GBE_TM_TH_TX_STRT_8 |
624                 PCH_GBE_TM_TH_ALM_EMP_4 | PCH_GBE_TM_TH_ALM_FULL_8;
625
626         iowrite32(tx_mode, &hw->reg->TX_MODE);
627
628         tcpip = ioread32(&hw->reg->TCPIP_ACC);
629         tcpip |= PCH_GBE_TX_TCPIPACC_EN;
630         iowrite32(tcpip, &hw->reg->TCPIP_ACC);
631         return;
632 }
633
634 /**
635  * pch_gbe_configure_tx - Configure Transmit Unit after Reset
636  * @adapter:  Board private structure
637  */
638 static void pch_gbe_configure_tx(struct pch_gbe_adapter *adapter)
639 {
640         struct pch_gbe_hw *hw = &adapter->hw;
641         u32 tdba, tdlen, dctrl;
642
643         pr_debug("dma addr = 0x%08llx  size = 0x%08x\n",
644                  (unsigned long long)adapter->tx_ring->dma,
645                  adapter->tx_ring->size);
646
647         /* Setup the HW Tx Head and Tail descriptor pointers */
648         tdba = adapter->tx_ring->dma;
649         tdlen = adapter->tx_ring->size - 0x10;
650         iowrite32(tdba, &hw->reg->TX_DSC_BASE);
651         iowrite32(tdlen, &hw->reg->TX_DSC_SIZE);
652         iowrite32(tdba, &hw->reg->TX_DSC_SW_P);
653
654         /* Enables Transmission DMA */
655         dctrl = ioread32(&hw->reg->DMA_CTRL);
656         dctrl |= PCH_GBE_TX_DMA_EN;
657         iowrite32(dctrl, &hw->reg->DMA_CTRL);
658 }
659
660 /**
661  * pch_gbe_setup_rctl - Configure the receive control registers
662  * @adapter:  Board private structure
663  */
664 static void pch_gbe_setup_rctl(struct pch_gbe_adapter *adapter)
665 {
666         struct net_device *netdev = adapter->netdev;
667         struct pch_gbe_hw *hw = &adapter->hw;
668         u32 rx_mode, tcpip;
669
670         rx_mode = PCH_GBE_ADD_FIL_EN | PCH_GBE_MLT_FIL_EN |
671         PCH_GBE_RH_ALM_EMP_4 | PCH_GBE_RH_ALM_FULL_4 | PCH_GBE_RH_RD_TRG_8;
672
673         iowrite32(rx_mode, &hw->reg->RX_MODE);
674
675         tcpip = ioread32(&hw->reg->TCPIP_ACC);
676
677         if (netdev->features & NETIF_F_RXCSUM) {
678                 tcpip &= ~PCH_GBE_RX_TCPIPACC_OFF;
679                 tcpip |= PCH_GBE_RX_TCPIPACC_EN;
680         } else {
681                 tcpip |= PCH_GBE_RX_TCPIPACC_OFF;
682                 tcpip &= ~PCH_GBE_RX_TCPIPACC_EN;
683         }
684         iowrite32(tcpip, &hw->reg->TCPIP_ACC);
685         return;
686 }
687
688 /**
689  * pch_gbe_configure_rx - Configure Receive Unit after Reset
690  * @adapter:  Board private structure
691  */
692 static void pch_gbe_configure_rx(struct pch_gbe_adapter *adapter)
693 {
694         struct pch_gbe_hw *hw = &adapter->hw;
695         u32 rdba, rdlen, rctl, rxdma;
696
697         pr_debug("dma adr = 0x%08llx  size = 0x%08x\n",
698                  (unsigned long long)adapter->rx_ring->dma,
699                  adapter->rx_ring->size);
700
701         pch_gbe_mac_force_mac_fc(hw);
702
703         /* Disables Receive MAC */
704         rctl = ioread32(&hw->reg->MAC_RX_EN);
705         iowrite32((rctl & ~PCH_GBE_MRE_MAC_RX_EN), &hw->reg->MAC_RX_EN);
706
707         /* Disables Receive DMA */
708         rxdma = ioread32(&hw->reg->DMA_CTRL);
709         rxdma &= ~PCH_GBE_RX_DMA_EN;
710         iowrite32(rxdma, &hw->reg->DMA_CTRL);
711
712         pr_debug("MAC_RX_EN reg = 0x%08x  DMA_CTRL reg = 0x%08x\n",
713                  ioread32(&hw->reg->MAC_RX_EN),
714                  ioread32(&hw->reg->DMA_CTRL));
715
716         /* Setup the HW Rx Head and Tail Descriptor Pointers and
717          * the Base and Length of the Rx Descriptor Ring */
718         rdba = adapter->rx_ring->dma;
719         rdlen = adapter->rx_ring->size - 0x10;
720         iowrite32(rdba, &hw->reg->RX_DSC_BASE);
721         iowrite32(rdlen, &hw->reg->RX_DSC_SIZE);
722         iowrite32((rdba + rdlen), &hw->reg->RX_DSC_SW_P);
723 }
724
725 /**
726  * pch_gbe_unmap_and_free_tx_resource - Unmap and free tx socket buffer
727  * @adapter:     Board private structure
728  * @buffer_info: Buffer information structure
729  */
730 static void pch_gbe_unmap_and_free_tx_resource(
731         struct pch_gbe_adapter *adapter, struct pch_gbe_buffer *buffer_info)
732 {
733         if (buffer_info->mapped) {
734                 dma_unmap_single(&adapter->pdev->dev, buffer_info->dma,
735                                  buffer_info->length, DMA_TO_DEVICE);
736                 buffer_info->mapped = false;
737         }
738         if (buffer_info->skb) {
739                 dev_kfree_skb_any(buffer_info->skb);
740                 buffer_info->skb = NULL;
741         }
742 }
743
744 /**
745  * pch_gbe_unmap_and_free_rx_resource - Unmap and free rx socket buffer
746  * @adapter:      Board private structure
747  * @buffer_info:  Buffer information structure
748  */
749 static void pch_gbe_unmap_and_free_rx_resource(
750                                         struct pch_gbe_adapter *adapter,
751                                         struct pch_gbe_buffer *buffer_info)
752 {
753         if (buffer_info->mapped) {
754                 dma_unmap_single(&adapter->pdev->dev, buffer_info->dma,
755                                  buffer_info->length, DMA_FROM_DEVICE);
756                 buffer_info->mapped = false;
757         }
758         if (buffer_info->skb) {
759                 dev_kfree_skb_any(buffer_info->skb);
760                 buffer_info->skb = NULL;
761         }
762 }
763
764 /**
765  * pch_gbe_clean_tx_ring - Free Tx Buffers
766  * @adapter:  Board private structure
767  * @tx_ring:  Ring to be cleaned
768  */
769 static void pch_gbe_clean_tx_ring(struct pch_gbe_adapter *adapter,
770                                    struct pch_gbe_tx_ring *tx_ring)
771 {
772         struct pch_gbe_hw *hw = &adapter->hw;
773         struct pch_gbe_buffer *buffer_info;
774         unsigned long size;
775         unsigned int i;
776
777         /* Free all the Tx ring sk_buffs */
778         for (i = 0; i < tx_ring->count; i++) {
779                 buffer_info = &tx_ring->buffer_info[i];
780                 pch_gbe_unmap_and_free_tx_resource(adapter, buffer_info);
781         }
782         pr_debug("call pch_gbe_unmap_and_free_tx_resource() %d count\n", i);
783
784         size = (unsigned long)sizeof(struct pch_gbe_buffer) * tx_ring->count;
785         memset(tx_ring->buffer_info, 0, size);
786
787         /* Zero out the descriptor ring */
788         memset(tx_ring->desc, 0, tx_ring->size);
789         tx_ring->next_to_use = 0;
790         tx_ring->next_to_clean = 0;
791         iowrite32(tx_ring->dma, &hw->reg->TX_DSC_HW_P);
792         iowrite32((tx_ring->size - 0x10), &hw->reg->TX_DSC_SIZE);
793 }
794
795 /**
796  * pch_gbe_clean_rx_ring - Free Rx Buffers
797  * @adapter:  Board private structure
798  * @rx_ring:  Ring to free buffers from
799  */
800 static void
801 pch_gbe_clean_rx_ring(struct pch_gbe_adapter *adapter,
802                       struct pch_gbe_rx_ring *rx_ring)
803 {
804         struct pch_gbe_hw *hw = &adapter->hw;
805         struct pch_gbe_buffer *buffer_info;
806         unsigned long size;
807         unsigned int i;
808
809         /* Free all the Rx ring sk_buffs */
810         for (i = 0; i < rx_ring->count; i++) {
811                 buffer_info = &rx_ring->buffer_info[i];
812                 pch_gbe_unmap_and_free_rx_resource(adapter, buffer_info);
813         }
814         pr_debug("call pch_gbe_unmap_and_free_rx_resource() %d count\n", i);
815         size = (unsigned long)sizeof(struct pch_gbe_buffer) * rx_ring->count;
816         memset(rx_ring->buffer_info, 0, size);
817
818         /* Zero out the descriptor ring */
819         memset(rx_ring->desc, 0, rx_ring->size);
820         rx_ring->next_to_clean = 0;
821         rx_ring->next_to_use = 0;
822         iowrite32(rx_ring->dma, &hw->reg->RX_DSC_HW_P);
823         iowrite32((rx_ring->size - 0x10), &hw->reg->RX_DSC_SIZE);
824 }
825
826 static void pch_gbe_set_rgmii_ctrl(struct pch_gbe_adapter *adapter, u16 speed,
827                                     u16 duplex)
828 {
829         struct pch_gbe_hw *hw = &adapter->hw;
830         unsigned long rgmii = 0;
831
832         /* Set the RGMII control. */
833 #ifdef PCH_GBE_MAC_IFOP_RGMII
834         switch (speed) {
835         case SPEED_10:
836                 rgmii = (PCH_GBE_RGMII_RATE_2_5M |
837                          PCH_GBE_MAC_RGMII_CTRL_SETTING);
838                 break;
839         case SPEED_100:
840                 rgmii = (PCH_GBE_RGMII_RATE_25M |
841                          PCH_GBE_MAC_RGMII_CTRL_SETTING);
842                 break;
843         case SPEED_1000:
844                 rgmii = (PCH_GBE_RGMII_RATE_125M |
845                          PCH_GBE_MAC_RGMII_CTRL_SETTING);
846                 break;
847         }
848         iowrite32(rgmii, &hw->reg->RGMII_CTRL);
849 #else   /* GMII */
850         rgmii = 0;
851         iowrite32(rgmii, &hw->reg->RGMII_CTRL);
852 #endif
853 }
854 static void pch_gbe_set_mode(struct pch_gbe_adapter *adapter, u16 speed,
855                               u16 duplex)
856 {
857         struct net_device *netdev = adapter->netdev;
858         struct pch_gbe_hw *hw = &adapter->hw;
859         unsigned long mode = 0;
860
861         /* Set the communication mode */
862         switch (speed) {
863         case SPEED_10:
864                 mode = PCH_GBE_MODE_MII_ETHER;
865                 netdev->tx_queue_len = 10;
866                 break;
867         case SPEED_100:
868                 mode = PCH_GBE_MODE_MII_ETHER;
869                 netdev->tx_queue_len = 100;
870                 break;
871         case SPEED_1000:
872                 mode = PCH_GBE_MODE_GMII_ETHER;
873                 break;
874         }
875         if (duplex == DUPLEX_FULL)
876                 mode |= PCH_GBE_MODE_FULL_DUPLEX;
877         else
878                 mode |= PCH_GBE_MODE_HALF_DUPLEX;
879         iowrite32(mode, &hw->reg->MODE);
880 }
881
882 /**
883  * pch_gbe_watchdog - Watchdog process
884  * @data:  Board private structure
885  */
886 static void pch_gbe_watchdog(unsigned long data)
887 {
888         struct pch_gbe_adapter *adapter = (struct pch_gbe_adapter *)data;
889         struct net_device *netdev = adapter->netdev;
890         struct pch_gbe_hw *hw = &adapter->hw;
891
892         pr_debug("right now = %ld\n", jiffies);
893
894         pch_gbe_update_stats(adapter);
895         if ((mii_link_ok(&adapter->mii)) && (!netif_carrier_ok(netdev))) {
896                 struct ethtool_cmd cmd = { .cmd = ETHTOOL_GSET };
897                 netdev->tx_queue_len = adapter->tx_queue_len;
898                 /* mii library handles link maintenance tasks */
899                 if (mii_ethtool_gset(&adapter->mii, &cmd)) {
900                         pr_err("ethtool get setting Error\n");
901                         mod_timer(&adapter->watchdog_timer,
902                                   round_jiffies(jiffies +
903                                                 PCH_GBE_WATCHDOG_PERIOD));
904                         return;
905                 }
906                 hw->mac.link_speed = ethtool_cmd_speed(&cmd);
907                 hw->mac.link_duplex = cmd.duplex;
908                 /* Set the RGMII control. */
909                 pch_gbe_set_rgmii_ctrl(adapter, hw->mac.link_speed,
910                                                 hw->mac.link_duplex);
911                 /* Set the communication mode */
912                 pch_gbe_set_mode(adapter, hw->mac.link_speed,
913                                  hw->mac.link_duplex);
914                 netdev_dbg(netdev,
915                            "Link is Up %d Mbps %s-Duplex\n",
916                            hw->mac.link_speed,
917                            cmd.duplex == DUPLEX_FULL ? "Full" : "Half");
918                 netif_carrier_on(netdev);
919                 netif_wake_queue(netdev);
920         } else if ((!mii_link_ok(&adapter->mii)) &&
921                    (netif_carrier_ok(netdev))) {
922                 netdev_dbg(netdev, "NIC Link is Down\n");
923                 hw->mac.link_speed = SPEED_10;
924                 hw->mac.link_duplex = DUPLEX_HALF;
925                 netif_carrier_off(netdev);
926                 netif_stop_queue(netdev);
927         }
928         mod_timer(&adapter->watchdog_timer,
929                   round_jiffies(jiffies + PCH_GBE_WATCHDOG_PERIOD));
930 }
931
932 /**
933  * pch_gbe_tx_queue - Carry out queuing of the transmission data
934  * @adapter:  Board private structure
935  * @tx_ring:  Tx descriptor ring structure
936  * @skb:      Sockt buffer structure
937  */
938 static void pch_gbe_tx_queue(struct pch_gbe_adapter *adapter,
939                               struct pch_gbe_tx_ring *tx_ring,
940                               struct sk_buff *skb)
941 {
942         struct pch_gbe_hw *hw = &adapter->hw;
943         struct pch_gbe_tx_desc *tx_desc;
944         struct pch_gbe_buffer *buffer_info;
945         struct sk_buff *tmp_skb;
946         unsigned int frame_ctrl;
947         unsigned int ring_num;
948         unsigned long flags;
949
950         /*-- Set frame control --*/
951         frame_ctrl = 0;
952         if (unlikely(skb->len < PCH_GBE_SHORT_PKT))
953                 frame_ctrl |= PCH_GBE_TXD_CTRL_APAD;
954         if (skb->ip_summed == CHECKSUM_NONE)
955                 frame_ctrl |= PCH_GBE_TXD_CTRL_TCPIP_ACC_OFF;
956
957         /* Performs checksum processing */
958         /*
959          * It is because the hardware accelerator does not support a checksum,
960          * when the received data size is less than 64 bytes.
961          */
962         if (skb->len < PCH_GBE_SHORT_PKT && skb->ip_summed != CHECKSUM_NONE) {
963                 frame_ctrl |= PCH_GBE_TXD_CTRL_APAD |
964                               PCH_GBE_TXD_CTRL_TCPIP_ACC_OFF;
965                 if (skb->protocol == htons(ETH_P_IP)) {
966                         struct iphdr *iph = ip_hdr(skb);
967                         unsigned int offset;
968                         iph->check = 0;
969                         iph->check = ip_fast_csum((u8 *) iph, iph->ihl);
970                         offset = skb_transport_offset(skb);
971                         if (iph->protocol == IPPROTO_TCP) {
972                                 skb->csum = 0;
973                                 tcp_hdr(skb)->check = 0;
974                                 skb->csum = skb_checksum(skb, offset,
975                                                          skb->len - offset, 0);
976                                 tcp_hdr(skb)->check =
977                                         csum_tcpudp_magic(iph->saddr,
978                                                           iph->daddr,
979                                                           skb->len - offset,
980                                                           IPPROTO_TCP,
981                                                           skb->csum);
982                         } else if (iph->protocol == IPPROTO_UDP) {
983                                 skb->csum = 0;
984                                 udp_hdr(skb)->check = 0;
985                                 skb->csum =
986                                         skb_checksum(skb, offset,
987                                                      skb->len - offset, 0);
988                                 udp_hdr(skb)->check =
989                                         csum_tcpudp_magic(iph->saddr,
990                                                           iph->daddr,
991                                                           skb->len - offset,
992                                                           IPPROTO_UDP,
993                                                           skb->csum);
994                         }
995                 }
996         }
997         spin_lock_irqsave(&tx_ring->tx_lock, flags);
998         ring_num = tx_ring->next_to_use;
999         if (unlikely((ring_num + 1) == tx_ring->count))
1000                 tx_ring->next_to_use = 0;
1001         else
1002                 tx_ring->next_to_use = ring_num + 1;
1003
1004         spin_unlock_irqrestore(&tx_ring->tx_lock, flags);
1005         buffer_info = &tx_ring->buffer_info[ring_num];
1006         tmp_skb = buffer_info->skb;
1007
1008         /* [Header:14][payload] ---> [Header:14][paddong:2][payload]    */
1009         memcpy(tmp_skb->data, skb->data, ETH_HLEN);
1010         tmp_skb->data[ETH_HLEN] = 0x00;
1011         tmp_skb->data[ETH_HLEN + 1] = 0x00;
1012         tmp_skb->len = skb->len;
1013         memcpy(&tmp_skb->data[ETH_HLEN + 2], &skb->data[ETH_HLEN],
1014                (skb->len - ETH_HLEN));
1015         /*-- Set Buffer information --*/
1016         buffer_info->length = tmp_skb->len;
1017         buffer_info->dma = dma_map_single(&adapter->pdev->dev, tmp_skb->data,
1018                                           buffer_info->length,
1019                                           DMA_TO_DEVICE);
1020         if (dma_mapping_error(&adapter->pdev->dev, buffer_info->dma)) {
1021                 pr_err("TX DMA map failed\n");
1022                 buffer_info->dma = 0;
1023                 buffer_info->time_stamp = 0;
1024                 tx_ring->next_to_use = ring_num;
1025                 return;
1026         }
1027         buffer_info->mapped = true;
1028         buffer_info->time_stamp = jiffies;
1029
1030         /*-- Set Tx descriptor --*/
1031         tx_desc = PCH_GBE_TX_DESC(*tx_ring, ring_num);
1032         tx_desc->buffer_addr = (buffer_info->dma);
1033         tx_desc->length = (tmp_skb->len);
1034         tx_desc->tx_words_eob = ((tmp_skb->len + 3));
1035         tx_desc->tx_frame_ctrl = (frame_ctrl);
1036         tx_desc->gbec_status = (DSC_INIT16);
1037
1038         if (unlikely(++ring_num == tx_ring->count))
1039                 ring_num = 0;
1040
1041         /* Update software pointer of TX descriptor */
1042         iowrite32(tx_ring->dma +
1043                   (int)sizeof(struct pch_gbe_tx_desc) * ring_num,
1044                   &hw->reg->TX_DSC_SW_P);
1045         dev_kfree_skb_any(skb);
1046 }
1047
1048 /**
1049  * pch_gbe_update_stats - Update the board statistics counters
1050  * @adapter:  Board private structure
1051  */
1052 void pch_gbe_update_stats(struct pch_gbe_adapter *adapter)
1053 {
1054         struct net_device *netdev = adapter->netdev;
1055         struct pci_dev *pdev = adapter->pdev;
1056         struct pch_gbe_hw_stats *stats = &adapter->stats;
1057         unsigned long flags;
1058
1059         /*
1060          * Prevent stats update while adapter is being reset, or if the pci
1061          * connection is down.
1062          */
1063         if ((pdev->error_state) && (pdev->error_state != pci_channel_io_normal))
1064                 return;
1065
1066         spin_lock_irqsave(&adapter->stats_lock, flags);
1067
1068         /* Update device status "adapter->stats" */
1069         stats->rx_errors = stats->rx_crc_errors + stats->rx_frame_errors;
1070         stats->tx_errors = stats->tx_length_errors +
1071             stats->tx_aborted_errors +
1072             stats->tx_carrier_errors + stats->tx_timeout_count;
1073
1074         /* Update network device status "adapter->net_stats" */
1075         netdev->stats.rx_packets = stats->rx_packets;
1076         netdev->stats.rx_bytes = stats->rx_bytes;
1077         netdev->stats.rx_dropped = stats->rx_dropped;
1078         netdev->stats.tx_packets = stats->tx_packets;
1079         netdev->stats.tx_bytes = stats->tx_bytes;
1080         netdev->stats.tx_dropped = stats->tx_dropped;
1081         /* Fill out the OS statistics structure */
1082         netdev->stats.multicast = stats->multicast;
1083         netdev->stats.collisions = stats->collisions;
1084         /* Rx Errors */
1085         netdev->stats.rx_errors = stats->rx_errors;
1086         netdev->stats.rx_crc_errors = stats->rx_crc_errors;
1087         netdev->stats.rx_frame_errors = stats->rx_frame_errors;
1088         /* Tx Errors */
1089         netdev->stats.tx_errors = stats->tx_errors;
1090         netdev->stats.tx_aborted_errors = stats->tx_aborted_errors;
1091         netdev->stats.tx_carrier_errors = stats->tx_carrier_errors;
1092
1093         spin_unlock_irqrestore(&adapter->stats_lock, flags);
1094 }
1095
1096 static void pch_gbe_start_receive(struct pch_gbe_hw *hw)
1097 {
1098         u32 rxdma;
1099
1100         /* Enables Receive DMA */
1101         rxdma = ioread32(&hw->reg->DMA_CTRL);
1102         rxdma |= PCH_GBE_RX_DMA_EN;
1103         iowrite32(rxdma, &hw->reg->DMA_CTRL);
1104         /* Enables Receive */
1105         iowrite32(PCH_GBE_MRE_MAC_RX_EN, &hw->reg->MAC_RX_EN);
1106         return;
1107 }
1108
1109 /**
1110  * pch_gbe_intr - Interrupt Handler
1111  * @irq:   Interrupt number
1112  * @data:  Pointer to a network interface device structure
1113  * Returns
1114  *      - IRQ_HANDLED:  Our interrupt
1115  *      - IRQ_NONE:     Not our interrupt
1116  */
1117 static irqreturn_t pch_gbe_intr(int irq, void *data)
1118 {
1119         struct net_device *netdev = data;
1120         struct pch_gbe_adapter *adapter = netdev_priv(netdev);
1121         struct pch_gbe_hw *hw = &adapter->hw;
1122         u32 int_st;
1123         u32 int_en;
1124
1125         /* Check request status */
1126         int_st = ioread32(&hw->reg->INT_ST);
1127         int_st = int_st & ioread32(&hw->reg->INT_EN);
1128         /* When request status is no interruption factor */
1129         if (unlikely(!int_st))
1130                 return IRQ_NONE;        /* Not our interrupt. End processing. */
1131         pr_debug("%s occur int_st = 0x%08x\n", __func__, int_st);
1132         if (int_st & PCH_GBE_INT_RX_FRAME_ERR)
1133                 adapter->stats.intr_rx_frame_err_count++;
1134         if (int_st & PCH_GBE_INT_RX_FIFO_ERR)
1135                 adapter->stats.intr_rx_fifo_err_count++;
1136         if (int_st & PCH_GBE_INT_RX_DMA_ERR)
1137                 adapter->stats.intr_rx_dma_err_count++;
1138         if (int_st & PCH_GBE_INT_TX_FIFO_ERR)
1139                 adapter->stats.intr_tx_fifo_err_count++;
1140         if (int_st & PCH_GBE_INT_TX_DMA_ERR)
1141                 adapter->stats.intr_tx_dma_err_count++;
1142         if (int_st & PCH_GBE_INT_TCPIP_ERR)
1143                 adapter->stats.intr_tcpip_err_count++;
1144         /* When Rx descriptor is empty  */
1145         if ((int_st & PCH_GBE_INT_RX_DSC_EMP)) {
1146                 adapter->stats.intr_rx_dsc_empty_count++;
1147                 pr_err("Rx descriptor is empty\n");
1148                 int_en = ioread32(&hw->reg->INT_EN);
1149                 iowrite32((int_en & ~PCH_GBE_INT_RX_DSC_EMP), &hw->reg->INT_EN);
1150                 if (hw->mac.tx_fc_enable) {
1151                         /* Set Pause packet */
1152                         pch_gbe_mac_set_pause_packet(hw);
1153                 }
1154                 if ((int_en & (PCH_GBE_INT_RX_DMA_CMPLT | PCH_GBE_INT_TX_CMPLT))
1155                     == 0) {
1156                         return IRQ_HANDLED;
1157                 }
1158         }
1159
1160         /* When request status is Receive interruption */
1161         if ((int_st & (PCH_GBE_INT_RX_DMA_CMPLT | PCH_GBE_INT_TX_CMPLT))) {
1162                 if (likely(napi_schedule_prep(&adapter->napi))) {
1163                         /* Enable only Rx Descriptor empty */
1164                         atomic_inc(&adapter->irq_sem);
1165                         int_en = ioread32(&hw->reg->INT_EN);
1166                         int_en &=
1167                             ~(PCH_GBE_INT_RX_DMA_CMPLT | PCH_GBE_INT_TX_CMPLT);
1168                         iowrite32(int_en, &hw->reg->INT_EN);
1169                         /* Start polling for NAPI */
1170                         __napi_schedule(&adapter->napi);
1171                 }
1172         }
1173         pr_debug("return = 0x%08x  INT_EN reg = 0x%08x\n",
1174                  IRQ_HANDLED, ioread32(&hw->reg->INT_EN));
1175         return IRQ_HANDLED;
1176 }
1177
1178 /**
1179  * pch_gbe_alloc_rx_buffers - Replace used receive buffers; legacy & extended
1180  * @adapter:       Board private structure
1181  * @rx_ring:       Rx descriptor ring
1182  * @cleaned_count: Cleaned count
1183  */
1184 static void
1185 pch_gbe_alloc_rx_buffers(struct pch_gbe_adapter *adapter,
1186                          struct pch_gbe_rx_ring *rx_ring, int cleaned_count)
1187 {
1188         struct net_device *netdev = adapter->netdev;
1189         struct pci_dev *pdev = adapter->pdev;
1190         struct pch_gbe_hw *hw = &adapter->hw;
1191         struct pch_gbe_rx_desc *rx_desc;
1192         struct pch_gbe_buffer *buffer_info;
1193         struct sk_buff *skb;
1194         unsigned int i;
1195         unsigned int bufsz;
1196
1197         bufsz = adapter->rx_buffer_len + PCH_GBE_DMA_ALIGN;
1198         i = rx_ring->next_to_use;
1199
1200         while ((cleaned_count--)) {
1201                 buffer_info = &rx_ring->buffer_info[i];
1202                 skb = buffer_info->skb;
1203                 if (skb) {
1204                         skb_trim(skb, 0);
1205                 } else {
1206                         skb = netdev_alloc_skb(netdev, bufsz);
1207                         if (unlikely(!skb)) {
1208                                 /* Better luck next round */
1209                                 adapter->stats.rx_alloc_buff_failed++;
1210                                 break;
1211                         }
1212                         /* 64byte align */
1213                         skb_reserve(skb, PCH_GBE_DMA_ALIGN);
1214
1215                         buffer_info->skb = skb;
1216                         buffer_info->length = adapter->rx_buffer_len;
1217                 }
1218                 buffer_info->dma = dma_map_single(&pdev->dev,
1219                                                   skb->data,
1220                                                   buffer_info->length,
1221                                                   DMA_FROM_DEVICE);
1222                 if (dma_mapping_error(&adapter->pdev->dev, buffer_info->dma)) {
1223                         dev_kfree_skb(skb);
1224                         buffer_info->skb = NULL;
1225                         buffer_info->dma = 0;
1226                         adapter->stats.rx_alloc_buff_failed++;
1227                         break; /* while !buffer_info->skb */
1228                 }
1229                 buffer_info->mapped = true;
1230                 rx_desc = PCH_GBE_RX_DESC(*rx_ring, i);
1231                 rx_desc->buffer_addr = (buffer_info->dma);
1232                 rx_desc->gbec_status = DSC_INIT16;
1233
1234                 pr_debug("i = %d  buffer_info->dma = 0x08%llx  buffer_info->length = 0x%x\n",
1235                          i, (unsigned long long)buffer_info->dma,
1236                          buffer_info->length);
1237
1238                 if (unlikely(++i == rx_ring->count))
1239                         i = 0;
1240         }
1241         if (likely(rx_ring->next_to_use != i)) {
1242                 rx_ring->next_to_use = i;
1243                 if (unlikely(i-- == 0))
1244                         i = (rx_ring->count - 1);
1245                 iowrite32(rx_ring->dma +
1246                           (int)sizeof(struct pch_gbe_rx_desc) * i,
1247                           &hw->reg->RX_DSC_SW_P);
1248         }
1249         return;
1250 }
1251
1252 /**
1253  * pch_gbe_alloc_tx_buffers - Allocate transmit buffers
1254  * @adapter:   Board private structure
1255  * @tx_ring:   Tx descriptor ring
1256  */
1257 static void pch_gbe_alloc_tx_buffers(struct pch_gbe_adapter *adapter,
1258                                         struct pch_gbe_tx_ring *tx_ring)
1259 {
1260         struct pch_gbe_buffer *buffer_info;
1261         struct sk_buff *skb;
1262         unsigned int i;
1263         unsigned int bufsz;
1264         struct pch_gbe_tx_desc *tx_desc;
1265
1266         bufsz =
1267             adapter->hw.mac.max_frame_size + PCH_GBE_DMA_ALIGN + NET_IP_ALIGN;
1268
1269         for (i = 0; i < tx_ring->count; i++) {
1270                 buffer_info = &tx_ring->buffer_info[i];
1271                 skb = netdev_alloc_skb(adapter->netdev, bufsz);
1272                 skb_reserve(skb, PCH_GBE_DMA_ALIGN);
1273                 buffer_info->skb = skb;
1274                 tx_desc = PCH_GBE_TX_DESC(*tx_ring, i);
1275                 tx_desc->gbec_status = (DSC_INIT16);
1276         }
1277         return;
1278 }
1279
1280 /**
1281  * pch_gbe_clean_tx - Reclaim resources after transmit completes
1282  * @adapter:   Board private structure
1283  * @tx_ring:   Tx descriptor ring
1284  * Returns
1285  *      true:  Cleaned the descriptor
1286  *      false: Not cleaned the descriptor
1287  */
1288 static bool
1289 pch_gbe_clean_tx(struct pch_gbe_adapter *adapter,
1290                  struct pch_gbe_tx_ring *tx_ring)
1291 {
1292         struct pch_gbe_tx_desc *tx_desc;
1293         struct pch_gbe_buffer *buffer_info;
1294         struct sk_buff *skb;
1295         unsigned int i;
1296         unsigned int cleaned_count = 0;
1297         bool cleaned = false;
1298
1299         pr_debug("next_to_clean : %d\n", tx_ring->next_to_clean);
1300
1301         i = tx_ring->next_to_clean;
1302         tx_desc = PCH_GBE_TX_DESC(*tx_ring, i);
1303         pr_debug("gbec_status:0x%04x  dma_status:0x%04x\n",
1304                  tx_desc->gbec_status, tx_desc->dma_status);
1305
1306         while ((tx_desc->gbec_status & DSC_INIT16) == 0x0000) {
1307                 pr_debug("gbec_status:0x%04x\n", tx_desc->gbec_status);
1308                 cleaned = true;
1309                 buffer_info = &tx_ring->buffer_info[i];
1310                 skb = buffer_info->skb;
1311
1312                 if ((tx_desc->gbec_status & PCH_GBE_TXD_GMAC_STAT_ABT)) {
1313                         adapter->stats.tx_aborted_errors++;
1314                         pr_err("Transfer Abort Error\n");
1315                 } else if ((tx_desc->gbec_status & PCH_GBE_TXD_GMAC_STAT_CRSER)
1316                           ) {
1317                         adapter->stats.tx_carrier_errors++;
1318                         pr_err("Transfer Carrier Sense Error\n");
1319                 } else if ((tx_desc->gbec_status & PCH_GBE_TXD_GMAC_STAT_EXCOL)
1320                           ) {
1321                         adapter->stats.tx_aborted_errors++;
1322                         pr_err("Transfer Collision Abort Error\n");
1323                 } else if ((tx_desc->gbec_status &
1324                             (PCH_GBE_TXD_GMAC_STAT_SNGCOL |
1325                              PCH_GBE_TXD_GMAC_STAT_MLTCOL))) {
1326                         adapter->stats.collisions++;
1327                         adapter->stats.tx_packets++;
1328                         adapter->stats.tx_bytes += skb->len;
1329                         pr_debug("Transfer Collision\n");
1330                 } else if ((tx_desc->gbec_status & PCH_GBE_TXD_GMAC_STAT_CMPLT)
1331                           ) {
1332                         adapter->stats.tx_packets++;
1333                         adapter->stats.tx_bytes += skb->len;
1334                 }
1335                 if (buffer_info->mapped) {
1336                         pr_debug("unmap buffer_info->dma : %d\n", i);
1337                         dma_unmap_single(&adapter->pdev->dev, buffer_info->dma,
1338                                          buffer_info->length, DMA_TO_DEVICE);
1339                         buffer_info->mapped = false;
1340                 }
1341                 if (buffer_info->skb) {
1342                         pr_debug("trim buffer_info->skb : %d\n", i);
1343                         skb_trim(buffer_info->skb, 0);
1344                 }
1345                 tx_desc->gbec_status = DSC_INIT16;
1346                 if (unlikely(++i == tx_ring->count))
1347                         i = 0;
1348                 tx_desc = PCH_GBE_TX_DESC(*tx_ring, i);
1349
1350                 /* weight of a sort for tx, to avoid endless transmit cleanup */
1351                 if (cleaned_count++ == PCH_GBE_TX_WEIGHT)
1352                         break;
1353         }
1354         pr_debug("called pch_gbe_unmap_and_free_tx_resource() %d count\n",
1355                  cleaned_count);
1356         /* Recover from running out of Tx resources in xmit_frame */
1357         if (unlikely(cleaned && (netif_queue_stopped(adapter->netdev)))) {
1358                 netif_wake_queue(adapter->netdev);
1359                 adapter->stats.tx_restart_count++;
1360                 pr_debug("Tx wake queue\n");
1361         }
1362         spin_lock(&adapter->tx_queue_lock);
1363         tx_ring->next_to_clean = i;
1364         spin_unlock(&adapter->tx_queue_lock);
1365         pr_debug("next_to_clean : %d\n", tx_ring->next_to_clean);
1366         return cleaned;
1367 }
1368
1369 /**
1370  * pch_gbe_clean_rx - Send received data up the network stack; legacy
1371  * @adapter:     Board private structure
1372  * @rx_ring:     Rx descriptor ring
1373  * @work_done:   Completed count
1374  * @work_to_do:  Request count
1375  * Returns
1376  *      true:  Cleaned the descriptor
1377  *      false: Not cleaned the descriptor
1378  */
1379 static bool
1380 pch_gbe_clean_rx(struct pch_gbe_adapter *adapter,
1381                  struct pch_gbe_rx_ring *rx_ring,
1382                  int *work_done, int work_to_do)
1383 {
1384         struct net_device *netdev = adapter->netdev;
1385         struct pci_dev *pdev = adapter->pdev;
1386         struct pch_gbe_buffer *buffer_info;
1387         struct pch_gbe_rx_desc *rx_desc;
1388         u32 length;
1389         unsigned int i;
1390         unsigned int cleaned_count = 0;
1391         bool cleaned = false;
1392         struct sk_buff *skb, *new_skb;
1393         u8 dma_status;
1394         u16 gbec_status;
1395         u32 tcp_ip_status;
1396
1397         i = rx_ring->next_to_clean;
1398
1399         while (*work_done < work_to_do) {
1400                 /* Check Rx descriptor status */
1401                 rx_desc = PCH_GBE_RX_DESC(*rx_ring, i);
1402                 if (rx_desc->gbec_status == DSC_INIT16)
1403                         break;
1404                 cleaned = true;
1405                 cleaned_count++;
1406
1407                 dma_status = rx_desc->dma_status;
1408                 gbec_status = rx_desc->gbec_status;
1409                 tcp_ip_status = rx_desc->tcp_ip_status;
1410                 rx_desc->gbec_status = DSC_INIT16;
1411                 buffer_info = &rx_ring->buffer_info[i];
1412                 skb = buffer_info->skb;
1413
1414                 /* unmap dma */
1415                 dma_unmap_single(&pdev->dev, buffer_info->dma,
1416                                    buffer_info->length, DMA_FROM_DEVICE);
1417                 buffer_info->mapped = false;
1418                 /* Prefetch the packet */
1419                 prefetch(skb->data);
1420
1421                 pr_debug("RxDecNo = 0x%04x  Status[DMA:0x%02x GBE:0x%04x "
1422                          "TCP:0x%08x]  BufInf = 0x%p\n",
1423                          i, dma_status, gbec_status, tcp_ip_status,
1424                          buffer_info);
1425                 /* Error check */
1426                 if (unlikely(gbec_status & PCH_GBE_RXD_GMAC_STAT_NOTOCTAL)) {
1427                         adapter->stats.rx_frame_errors++;
1428                         pr_err("Receive Not Octal Error\n");
1429                 } else if (unlikely(gbec_status &
1430                                 PCH_GBE_RXD_GMAC_STAT_NBLERR)) {
1431                         adapter->stats.rx_frame_errors++;
1432                         pr_err("Receive Nibble Error\n");
1433                 } else if (unlikely(gbec_status &
1434                                 PCH_GBE_RXD_GMAC_STAT_CRCERR)) {
1435                         adapter->stats.rx_crc_errors++;
1436                         pr_err("Receive CRC Error\n");
1437                 } else {
1438                         /* get receive length */
1439                         /* length convert[-3] */
1440                         length = (rx_desc->rx_words_eob) - 3;
1441
1442                         /* Decide the data conversion method */
1443                         if (!(netdev->features & NETIF_F_RXCSUM)) {
1444                                 /* [Header:14][payload] */
1445                                 if (NET_IP_ALIGN) {
1446                                         /* Because alignment differs,
1447                                          * the new_skb is newly allocated,
1448                                          * and data is copied to new_skb.*/
1449                                         new_skb = netdev_alloc_skb(netdev,
1450                                                          length + NET_IP_ALIGN);
1451                                         if (!new_skb) {
1452                                                 /* dorrop error */
1453                                                 pr_err("New skb allocation "
1454                                                         "Error\n");
1455                                                 goto dorrop;
1456                                         }
1457                                         skb_reserve(new_skb, NET_IP_ALIGN);
1458                                         memcpy(new_skb->data, skb->data,
1459                                                length);
1460                                         skb = new_skb;
1461                                 } else {
1462                                         /* DMA buffer is used as SKB as it is.*/
1463                                         buffer_info->skb = NULL;
1464                                 }
1465                         } else {
1466                                 /* [Header:14][padding:2][payload] */
1467                                 /* The length includes padding length */
1468                                 length = length - PCH_GBE_DMA_PADDING;
1469                                 if ((length < copybreak) ||
1470                                     (NET_IP_ALIGN != PCH_GBE_DMA_PADDING)) {
1471                                         /* Because alignment differs,
1472                                          * the new_skb is newly allocated,
1473                                          * and data is copied to new_skb.
1474                                          * Padding data is deleted
1475                                          * at the time of a copy.*/
1476                                         new_skb = netdev_alloc_skb(netdev,
1477                                                          length + NET_IP_ALIGN);
1478                                         if (!new_skb) {
1479                                                 /* dorrop error */
1480                                                 pr_err("New skb allocation "
1481                                                         "Error\n");
1482                                                 goto dorrop;
1483                                         }
1484                                         skb_reserve(new_skb, NET_IP_ALIGN);
1485                                         memcpy(new_skb->data, skb->data,
1486                                                ETH_HLEN);
1487                                         memcpy(&new_skb->data[ETH_HLEN],
1488                                                &skb->data[ETH_HLEN +
1489                                                PCH_GBE_DMA_PADDING],
1490                                                length - ETH_HLEN);
1491                                         skb = new_skb;
1492                                 } else {
1493                                         /* Padding data is deleted
1494                                          * by moving header data.*/
1495                                         memmove(&skb->data[PCH_GBE_DMA_PADDING],
1496                                                 &skb->data[0], ETH_HLEN);
1497                                         skb_reserve(skb, NET_IP_ALIGN);
1498                                         buffer_info->skb = NULL;
1499                                 }
1500                         }
1501                         /* The length includes FCS length */
1502                         length = length - ETH_FCS_LEN;
1503                         /* update status of driver */
1504                         adapter->stats.rx_bytes += length;
1505                         adapter->stats.rx_packets++;
1506                         if ((gbec_status & PCH_GBE_RXD_GMAC_STAT_MARMLT))
1507                                 adapter->stats.multicast++;
1508                         /* Write meta date of skb */
1509                         skb_put(skb, length);
1510                         skb->protocol = eth_type_trans(skb, netdev);
1511                         if (tcp_ip_status & PCH_GBE_RXD_ACC_STAT_TCPIPOK)
1512                                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1513                         else
1514                                 skb->ip_summed = CHECKSUM_NONE;
1515
1516                         napi_gro_receive(&adapter->napi, skb);
1517                         (*work_done)++;
1518                         pr_debug("Receive skb->ip_summed: %d length: %d\n",
1519                                  skb->ip_summed, length);
1520                 }
1521 dorrop:
1522                 /* return some buffers to hardware, one at a time is too slow */
1523                 if (unlikely(cleaned_count >= PCH_GBE_RX_BUFFER_WRITE)) {
1524                         pch_gbe_alloc_rx_buffers(adapter, rx_ring,
1525                                                  cleaned_count);
1526                         cleaned_count = 0;
1527                 }
1528                 if (++i == rx_ring->count)
1529                         i = 0;
1530         }
1531         rx_ring->next_to_clean = i;
1532         if (cleaned_count)
1533                 pch_gbe_alloc_rx_buffers(adapter, rx_ring, cleaned_count);
1534         return cleaned;
1535 }
1536
1537 /**
1538  * pch_gbe_setup_tx_resources - Allocate Tx resources (Descriptors)
1539  * @adapter:  Board private structure
1540  * @tx_ring:  Tx descriptor ring (for a specific queue) to setup
1541  * Returns
1542  *      0:              Successfully
1543  *      Negative value: Failed
1544  */
1545 int pch_gbe_setup_tx_resources(struct pch_gbe_adapter *adapter,
1546                                 struct pch_gbe_tx_ring *tx_ring)
1547 {
1548         struct pci_dev *pdev = adapter->pdev;
1549         struct pch_gbe_tx_desc *tx_desc;
1550         int size;
1551         int desNo;
1552
1553         size = (int)sizeof(struct pch_gbe_buffer) * tx_ring->count;
1554         tx_ring->buffer_info = vzalloc(size);
1555         if (!tx_ring->buffer_info) {
1556                 pr_err("Unable to allocate memory for the buffer information\n");
1557                 return -ENOMEM;
1558         }
1559
1560         tx_ring->size = tx_ring->count * (int)sizeof(struct pch_gbe_tx_desc);
1561
1562         tx_ring->desc = dma_alloc_coherent(&pdev->dev, tx_ring->size,
1563                                            &tx_ring->dma, GFP_KERNEL);
1564         if (!tx_ring->desc) {
1565                 vfree(tx_ring->buffer_info);
1566                 pr_err("Unable to allocate memory for the transmit descriptor ring\n");
1567                 return -ENOMEM;
1568         }
1569         memset(tx_ring->desc, 0, tx_ring->size);
1570
1571         tx_ring->next_to_use = 0;
1572         tx_ring->next_to_clean = 0;
1573         spin_lock_init(&tx_ring->tx_lock);
1574
1575         for (desNo = 0; desNo < tx_ring->count; desNo++) {
1576                 tx_desc = PCH_GBE_TX_DESC(*tx_ring, desNo);
1577                 tx_desc->gbec_status = DSC_INIT16;
1578         }
1579         pr_debug("tx_ring->desc = 0x%p  tx_ring->dma = 0x%08llx\n"
1580                  "next_to_clean = 0x%08x  next_to_use = 0x%08x\n",
1581                  tx_ring->desc, (unsigned long long)tx_ring->dma,
1582                  tx_ring->next_to_clean, tx_ring->next_to_use);
1583         return 0;
1584 }
1585
1586 /**
1587  * pch_gbe_setup_rx_resources - Allocate Rx resources (Descriptors)
1588  * @adapter:  Board private structure
1589  * @rx_ring:  Rx descriptor ring (for a specific queue) to setup
1590  * Returns
1591  *      0:              Successfully
1592  *      Negative value: Failed
1593  */
1594 int pch_gbe_setup_rx_resources(struct pch_gbe_adapter *adapter,
1595                                 struct pch_gbe_rx_ring *rx_ring)
1596 {
1597         struct pci_dev *pdev = adapter->pdev;
1598         struct pch_gbe_rx_desc *rx_desc;
1599         int size;
1600         int desNo;
1601
1602         size = (int)sizeof(struct pch_gbe_buffer) * rx_ring->count;
1603         rx_ring->buffer_info = vzalloc(size);
1604         if (!rx_ring->buffer_info) {
1605                 pr_err("Unable to allocate memory for the receive descriptor ring\n");
1606                 return -ENOMEM;
1607         }
1608         rx_ring->size = rx_ring->count * (int)sizeof(struct pch_gbe_rx_desc);
1609         rx_ring->desc = dma_alloc_coherent(&pdev->dev, rx_ring->size,
1610                                            &rx_ring->dma, GFP_KERNEL);
1611
1612         if (!rx_ring->desc) {
1613                 pr_err("Unable to allocate memory for the receive descriptor ring\n");
1614                 vfree(rx_ring->buffer_info);
1615                 return -ENOMEM;
1616         }
1617         memset(rx_ring->desc, 0, rx_ring->size);
1618         rx_ring->next_to_clean = 0;
1619         rx_ring->next_to_use = 0;
1620         for (desNo = 0; desNo < rx_ring->count; desNo++) {
1621                 rx_desc = PCH_GBE_RX_DESC(*rx_ring, desNo);
1622                 rx_desc->gbec_status = DSC_INIT16;
1623         }
1624         pr_debug("rx_ring->desc = 0x%p  rx_ring->dma = 0x%08llx "
1625                  "next_to_clean = 0x%08x  next_to_use = 0x%08x\n",
1626                  rx_ring->desc, (unsigned long long)rx_ring->dma,
1627                  rx_ring->next_to_clean, rx_ring->next_to_use);
1628         return 0;
1629 }
1630
1631 /**
1632  * pch_gbe_free_tx_resources - Free Tx Resources
1633  * @adapter:  Board private structure
1634  * @tx_ring:  Tx descriptor ring for a specific queue
1635  */
1636 void pch_gbe_free_tx_resources(struct pch_gbe_adapter *adapter,
1637                                 struct pch_gbe_tx_ring *tx_ring)
1638 {
1639         struct pci_dev *pdev = adapter->pdev;
1640
1641         pch_gbe_clean_tx_ring(adapter, tx_ring);
1642         vfree(tx_ring->buffer_info);
1643         tx_ring->buffer_info = NULL;
1644         pci_free_consistent(pdev, tx_ring->size, tx_ring->desc, tx_ring->dma);
1645         tx_ring->desc = NULL;
1646 }
1647
1648 /**
1649  * pch_gbe_free_rx_resources - Free Rx Resources
1650  * @adapter:  Board private structure
1651  * @rx_ring:  Ring to clean the resources from
1652  */
1653 void pch_gbe_free_rx_resources(struct pch_gbe_adapter *adapter,
1654                                 struct pch_gbe_rx_ring *rx_ring)
1655 {
1656         struct pci_dev *pdev = adapter->pdev;
1657
1658         pch_gbe_clean_rx_ring(adapter, rx_ring);
1659         vfree(rx_ring->buffer_info);
1660         rx_ring->buffer_info = NULL;
1661         pci_free_consistent(pdev, rx_ring->size, rx_ring->desc, rx_ring->dma);
1662         rx_ring->desc = NULL;
1663 }
1664
1665 /**
1666  * pch_gbe_request_irq - Allocate an interrupt line
1667  * @adapter:  Board private structure
1668  * Returns
1669  *      0:              Successfully
1670  *      Negative value: Failed
1671  */
1672 static int pch_gbe_request_irq(struct pch_gbe_adapter *adapter)
1673 {
1674         struct net_device *netdev = adapter->netdev;
1675         int err;
1676         int flags;
1677
1678         flags = IRQF_SHARED;
1679         adapter->have_msi = false;
1680         err = pci_enable_msi(adapter->pdev);
1681         pr_debug("call pci_enable_msi\n");
1682         if (err) {
1683                 pr_debug("call pci_enable_msi - Error: %d\n", err);
1684         } else {
1685                 flags = 0;
1686                 adapter->have_msi = true;
1687         }
1688         err = request_irq(adapter->pdev->irq, &pch_gbe_intr,
1689                           flags, netdev->name, netdev);
1690         if (err)
1691                 pr_err("Unable to allocate interrupt Error: %d\n", err);
1692         pr_debug("adapter->have_msi : %d  flags : 0x%04x  return : 0x%04x\n",
1693                  adapter->have_msi, flags, err);
1694         return err;
1695 }
1696
1697
1698 static void pch_gbe_set_multi(struct net_device *netdev);
1699 /**
1700  * pch_gbe_up - Up GbE network device
1701  * @adapter:  Board private structure
1702  * Returns
1703  *      0:              Successfully
1704  *      Negative value: Failed
1705  */
1706 int pch_gbe_up(struct pch_gbe_adapter *adapter)
1707 {
1708         struct net_device *netdev = adapter->netdev;
1709         struct pch_gbe_tx_ring *tx_ring = adapter->tx_ring;
1710         struct pch_gbe_rx_ring *rx_ring = adapter->rx_ring;
1711         int err;
1712
1713         /* Ensure we have a valid MAC */
1714         if (!is_valid_ether_addr(adapter->hw.mac.addr)) {
1715                 pr_err("Error: Invalid MAC address\n");
1716                 return -EINVAL;
1717         }
1718
1719         /* hardware has been reset, we need to reload some things */
1720         pch_gbe_set_multi(netdev);
1721
1722         pch_gbe_setup_tctl(adapter);
1723         pch_gbe_configure_tx(adapter);
1724         pch_gbe_setup_rctl(adapter);
1725         pch_gbe_configure_rx(adapter);
1726
1727         err = pch_gbe_request_irq(adapter);
1728         if (err) {
1729                 pr_err("Error: can't bring device up\n");
1730                 return err;
1731         }
1732         pch_gbe_alloc_tx_buffers(adapter, tx_ring);
1733         pch_gbe_alloc_rx_buffers(adapter, rx_ring, rx_ring->count);
1734         adapter->tx_queue_len = netdev->tx_queue_len;
1735         pch_gbe_start_receive(&adapter->hw);
1736
1737         mod_timer(&adapter->watchdog_timer, jiffies);
1738
1739         napi_enable(&adapter->napi);
1740         pch_gbe_irq_enable(adapter);
1741         netif_start_queue(adapter->netdev);
1742
1743         return 0;
1744 }
1745
1746 /**
1747  * pch_gbe_down - Down GbE network device
1748  * @adapter:  Board private structure
1749  */
1750 void pch_gbe_down(struct pch_gbe_adapter *adapter)
1751 {
1752         struct net_device *netdev = adapter->netdev;
1753
1754         /* signal that we're down so the interrupt handler does not
1755          * reschedule our watchdog timer */
1756         napi_disable(&adapter->napi);
1757         atomic_set(&adapter->irq_sem, 0);
1758
1759         pch_gbe_irq_disable(adapter);
1760         pch_gbe_free_irq(adapter);
1761
1762         del_timer_sync(&adapter->watchdog_timer);
1763
1764         netdev->tx_queue_len = adapter->tx_queue_len;
1765         netif_carrier_off(netdev);
1766         netif_stop_queue(netdev);
1767
1768         pch_gbe_reset(adapter);
1769         pch_gbe_clean_tx_ring(adapter, adapter->tx_ring);
1770         pch_gbe_clean_rx_ring(adapter, adapter->rx_ring);
1771 }
1772
1773 /**
1774  * pch_gbe_sw_init - Initialize general software structures (struct pch_gbe_adapter)
1775  * @adapter:  Board private structure to initialize
1776  * Returns
1777  *      0:              Successfully
1778  *      Negative value: Failed
1779  */
1780 static int pch_gbe_sw_init(struct pch_gbe_adapter *adapter)
1781 {
1782         struct pch_gbe_hw *hw = &adapter->hw;
1783         struct net_device *netdev = adapter->netdev;
1784
1785         adapter->rx_buffer_len = PCH_GBE_FRAME_SIZE_2048;
1786         hw->mac.max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
1787         hw->mac.min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
1788
1789         /* Initialize the hardware-specific values */
1790         if (pch_gbe_hal_setup_init_funcs(hw)) {
1791                 pr_err("Hardware Initialization Failure\n");
1792                 return -EIO;
1793         }
1794         if (pch_gbe_alloc_queues(adapter)) {
1795                 pr_err("Unable to allocate memory for queues\n");
1796                 return -ENOMEM;
1797         }
1798         spin_lock_init(&adapter->hw.miim_lock);
1799         spin_lock_init(&adapter->tx_queue_lock);
1800         spin_lock_init(&adapter->stats_lock);
1801         spin_lock_init(&adapter->ethtool_lock);
1802         atomic_set(&adapter->irq_sem, 0);
1803         pch_gbe_irq_disable(adapter);
1804
1805         pch_gbe_init_stats(adapter);
1806
1807         pr_debug("rx_buffer_len : %d  mac.min_frame_size : %d  mac.max_frame_size : %d\n",
1808                  (u32) adapter->rx_buffer_len,
1809                  hw->mac.min_frame_size, hw->mac.max_frame_size);
1810         return 0;
1811 }
1812
1813 /**
1814  * pch_gbe_open - Called when a network interface is made active
1815  * @netdev:     Network interface device structure
1816  * Returns
1817  *      0:              Successfully
1818  *      Negative value: Failed
1819  */
1820 static int pch_gbe_open(struct net_device *netdev)
1821 {
1822         struct pch_gbe_adapter *adapter = netdev_priv(netdev);
1823         struct pch_gbe_hw *hw = &adapter->hw;
1824         int err;
1825
1826         /* allocate transmit descriptors */
1827         err = pch_gbe_setup_tx_resources(adapter, adapter->tx_ring);
1828         if (err)
1829                 goto err_setup_tx;
1830         /* allocate receive descriptors */
1831         err = pch_gbe_setup_rx_resources(adapter, adapter->rx_ring);
1832         if (err)
1833                 goto err_setup_rx;
1834         pch_gbe_hal_power_up_phy(hw);
1835         err = pch_gbe_up(adapter);
1836         if (err)
1837                 goto err_up;
1838         pr_debug("Success End\n");
1839         return 0;
1840
1841 err_up:
1842         if (!adapter->wake_up_evt)
1843                 pch_gbe_hal_power_down_phy(hw);
1844         pch_gbe_free_rx_resources(adapter, adapter->rx_ring);
1845 err_setup_rx:
1846         pch_gbe_free_tx_resources(adapter, adapter->tx_ring);
1847 err_setup_tx:
1848         pch_gbe_reset(adapter);
1849         pr_err("Error End\n");
1850         return err;
1851 }
1852
1853 /**
1854  * pch_gbe_stop - Disables a network interface
1855  * @netdev:  Network interface device structure
1856  * Returns
1857  *      0: Successfully
1858  */
1859 static int pch_gbe_stop(struct net_device *netdev)
1860 {
1861         struct pch_gbe_adapter *adapter = netdev_priv(netdev);
1862         struct pch_gbe_hw *hw = &adapter->hw;
1863
1864         pch_gbe_down(adapter);
1865         if (!adapter->wake_up_evt)
1866                 pch_gbe_hal_power_down_phy(hw);
1867         pch_gbe_free_tx_resources(adapter, adapter->tx_ring);
1868         pch_gbe_free_rx_resources(adapter, adapter->rx_ring);
1869         return 0;
1870 }
1871
1872 /**
1873  * pch_gbe_xmit_frame - Packet transmitting start
1874  * @skb:     Socket buffer structure
1875  * @netdev:  Network interface device structure
1876  * Returns
1877  *      - NETDEV_TX_OK:   Normal end
1878  *      - NETDEV_TX_BUSY: Error end
1879  */
1880 static int pch_gbe_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
1881 {
1882         struct pch_gbe_adapter *adapter = netdev_priv(netdev);
1883         struct pch_gbe_tx_ring *tx_ring = adapter->tx_ring;
1884         unsigned long flags;
1885
1886         if (unlikely(skb->len > (adapter->hw.mac.max_frame_size - 4))) {
1887                 pr_err("Transfer length Error: skb len: %d > max: %d\n",
1888                        skb->len, adapter->hw.mac.max_frame_size);
1889                 dev_kfree_skb_any(skb);
1890                 adapter->stats.tx_length_errors++;
1891                 return NETDEV_TX_OK;
1892         }
1893         if (!spin_trylock_irqsave(&tx_ring->tx_lock, flags)) {
1894                 /* Collision - tell upper layer to requeue */
1895                 return NETDEV_TX_LOCKED;
1896         }
1897         if (unlikely(!PCH_GBE_DESC_UNUSED(tx_ring))) {
1898                 netif_stop_queue(netdev);
1899                 spin_unlock_irqrestore(&tx_ring->tx_lock, flags);
1900                 pr_debug("Return : BUSY  next_to use : 0x%08x  next_to clean : 0x%08x\n",
1901                          tx_ring->next_to_use, tx_ring->next_to_clean);
1902                 return NETDEV_TX_BUSY;
1903         }
1904         spin_unlock_irqrestore(&tx_ring->tx_lock, flags);
1905
1906         /* CRC,ITAG no support */
1907         pch_gbe_tx_queue(adapter, tx_ring, skb);
1908         return NETDEV_TX_OK;
1909 }
1910
1911 /**
1912  * pch_gbe_get_stats - Get System Network Statistics
1913  * @netdev:  Network interface device structure
1914  * Returns:  The current stats
1915  */
1916 static struct net_device_stats *pch_gbe_get_stats(struct net_device *netdev)
1917 {
1918         /* only return the current stats */
1919         return &netdev->stats;
1920 }
1921
1922 /**
1923  * pch_gbe_set_multi - Multicast and Promiscuous mode set
1924  * @netdev:   Network interface device structure
1925  */
1926 static void pch_gbe_set_multi(struct net_device *netdev)
1927 {
1928         struct pch_gbe_adapter *adapter = netdev_priv(netdev);
1929         struct pch_gbe_hw *hw = &adapter->hw;
1930         struct netdev_hw_addr *ha;
1931         u8 *mta_list;
1932         u32 rctl;
1933         int i;
1934         int mc_count;
1935
1936         pr_debug("netdev->flags : 0x%08x\n", netdev->flags);
1937
1938         /* Check for Promiscuous and All Multicast modes */
1939         rctl = ioread32(&hw->reg->RX_MODE);
1940         mc_count = netdev_mc_count(netdev);
1941         if ((netdev->flags & IFF_PROMISC)) {
1942                 rctl &= ~PCH_GBE_ADD_FIL_EN;
1943                 rctl &= ~PCH_GBE_MLT_FIL_EN;
1944         } else if ((netdev->flags & IFF_ALLMULTI)) {
1945                 /* all the multicasting receive permissions */
1946                 rctl |= PCH_GBE_ADD_FIL_EN;
1947                 rctl &= ~PCH_GBE_MLT_FIL_EN;
1948         } else {
1949                 if (mc_count >= PCH_GBE_MAR_ENTRIES) {
1950                         /* all the multicasting receive permissions */
1951                         rctl |= PCH_GBE_ADD_FIL_EN;
1952                         rctl &= ~PCH_GBE_MLT_FIL_EN;
1953                 } else {
1954                         rctl |= (PCH_GBE_ADD_FIL_EN | PCH_GBE_MLT_FIL_EN);
1955                 }
1956         }
1957         iowrite32(rctl, &hw->reg->RX_MODE);
1958
1959         if (mc_count >= PCH_GBE_MAR_ENTRIES)
1960                 return;
1961         mta_list = kmalloc(mc_count * ETH_ALEN, GFP_ATOMIC);
1962         if (!mta_list)
1963                 return;
1964
1965         /* The shared function expects a packed array of only addresses. */
1966         i = 0;
1967         netdev_for_each_mc_addr(ha, netdev) {
1968                 if (i == mc_count)
1969                         break;
1970                 memcpy(mta_list + (i++ * ETH_ALEN), &ha->addr, ETH_ALEN);
1971         }
1972         pch_gbe_mac_mc_addr_list_update(hw, mta_list, i, 1,
1973                                         PCH_GBE_MAR_ENTRIES);
1974         kfree(mta_list);
1975
1976         pr_debug("RX_MODE reg(check bit31,30 ADD,MLT) : 0x%08x  netdev->mc_count : 0x%08x\n",
1977                  ioread32(&hw->reg->RX_MODE), mc_count);
1978 }
1979
1980 /**
1981  * pch_gbe_set_mac - Change the Ethernet Address of the NIC
1982  * @netdev: Network interface device structure
1983  * @addr:   Pointer to an address structure
1984  * Returns
1985  *      0:              Successfully
1986  *      -EADDRNOTAVAIL: Failed
1987  */
1988 static int pch_gbe_set_mac(struct net_device *netdev, void *addr)
1989 {
1990         struct pch_gbe_adapter *adapter = netdev_priv(netdev);
1991         struct sockaddr *skaddr = addr;
1992         int ret_val;
1993
1994         if (!is_valid_ether_addr(skaddr->sa_data)) {
1995                 ret_val = -EADDRNOTAVAIL;
1996         } else {
1997                 memcpy(netdev->dev_addr, skaddr->sa_data, netdev->addr_len);
1998                 memcpy(adapter->hw.mac.addr, skaddr->sa_data, netdev->addr_len);
1999                 pch_gbe_mac_mar_set(&adapter->hw, adapter->hw.mac.addr, 0);
2000                 ret_val = 0;
2001         }
2002         pr_debug("ret_val : 0x%08x\n", ret_val);
2003         pr_debug("dev_addr : %pM\n", netdev->dev_addr);
2004         pr_debug("mac_addr : %pM\n", adapter->hw.mac.addr);
2005         pr_debug("MAC_ADR1AB reg : 0x%08x 0x%08x\n",
2006                  ioread32(&adapter->hw.reg->mac_adr[0].high),
2007                  ioread32(&adapter->hw.reg->mac_adr[0].low));
2008         return ret_val;
2009 }
2010
2011 /**
2012  * pch_gbe_change_mtu - Change the Maximum Transfer Unit
2013  * @netdev:   Network interface device structure
2014  * @new_mtu:  New value for maximum frame size
2015  * Returns
2016  *      0:              Successfully
2017  *      -EINVAL:        Failed
2018  */
2019 static int pch_gbe_change_mtu(struct net_device *netdev, int new_mtu)
2020 {
2021         struct pch_gbe_adapter *adapter = netdev_priv(netdev);
2022         int max_frame;
2023
2024         max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
2025         if ((max_frame < ETH_ZLEN + ETH_FCS_LEN) ||
2026                 (max_frame > PCH_GBE_MAX_JUMBO_FRAME_SIZE)) {
2027                 pr_err("Invalid MTU setting\n");
2028                 return -EINVAL;
2029         }
2030         if (max_frame <= PCH_GBE_FRAME_SIZE_2048)
2031                 adapter->rx_buffer_len = PCH_GBE_FRAME_SIZE_2048;
2032         else if (max_frame <= PCH_GBE_FRAME_SIZE_4096)
2033                 adapter->rx_buffer_len = PCH_GBE_FRAME_SIZE_4096;
2034         else if (max_frame <= PCH_GBE_FRAME_SIZE_8192)
2035                 adapter->rx_buffer_len = PCH_GBE_FRAME_SIZE_8192;
2036         else
2037                 adapter->rx_buffer_len = PCH_GBE_MAX_JUMBO_FRAME_SIZE;
2038         netdev->mtu = new_mtu;
2039         adapter->hw.mac.max_frame_size = max_frame;
2040
2041         if (netif_running(netdev))
2042                 pch_gbe_reinit_locked(adapter);
2043         else
2044                 pch_gbe_reset(adapter);
2045
2046         pr_debug("max_frame : %d  rx_buffer_len : %d  mtu : %d  max_frame_size : %d\n",
2047                  max_frame, (u32) adapter->rx_buffer_len, netdev->mtu,
2048                  adapter->hw.mac.max_frame_size);
2049         return 0;
2050 }
2051
2052 /**
2053  * pch_gbe_set_features - Reset device after features changed
2054  * @netdev:   Network interface device structure
2055  * @features:  New features
2056  * Returns
2057  *      0:              HW state updated successfully
2058  */
2059 static int pch_gbe_set_features(struct net_device *netdev, u32 features)
2060 {
2061         struct pch_gbe_adapter *adapter = netdev_priv(netdev);
2062         u32 changed = features ^ netdev->features;
2063
2064         if (!(changed & NETIF_F_RXCSUM))
2065                 return 0;
2066
2067         if (netif_running(netdev))
2068                 pch_gbe_reinit_locked(adapter);
2069         else
2070                 pch_gbe_reset(adapter);
2071
2072         return 0;
2073 }
2074
2075 /**
2076  * pch_gbe_ioctl - Controls register through a MII interface
2077  * @netdev:   Network interface device structure
2078  * @ifr:      Pointer to ifr structure
2079  * @cmd:      Control command
2080  * Returns
2081  *      0:      Successfully
2082  *      Negative value: Failed
2083  */
2084 static int pch_gbe_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
2085 {
2086         struct pch_gbe_adapter *adapter = netdev_priv(netdev);
2087
2088         pr_debug("cmd : 0x%04x\n", cmd);
2089
2090         return generic_mii_ioctl(&adapter->mii, if_mii(ifr), cmd, NULL);
2091 }
2092
2093 /**
2094  * pch_gbe_tx_timeout - Respond to a Tx Hang
2095  * @netdev:   Network interface device structure
2096  */
2097 static void pch_gbe_tx_timeout(struct net_device *netdev)
2098 {
2099         struct pch_gbe_adapter *adapter = netdev_priv(netdev);
2100
2101         /* Do the reset outside of interrupt context */
2102         adapter->stats.tx_timeout_count++;
2103         schedule_work(&adapter->reset_task);
2104 }
2105
2106 /**
2107  * pch_gbe_napi_poll - NAPI receive and transfer polling callback
2108  * @napi:    Pointer of polling device struct
2109  * @budget:  The maximum number of a packet
2110  * Returns
2111  *      false:  Exit the polling mode
2112  *      true:   Continue the polling mode
2113  */
2114 static int pch_gbe_napi_poll(struct napi_struct *napi, int budget)
2115 {
2116         struct pch_gbe_adapter *adapter =
2117             container_of(napi, struct pch_gbe_adapter, napi);
2118         struct net_device *netdev = adapter->netdev;
2119         int work_done = 0;
2120         bool poll_end_flag = false;
2121         bool cleaned = false;
2122
2123         pr_debug("budget : %d\n", budget);
2124
2125         /* Keep link state information with original netdev */
2126         if (!netif_carrier_ok(netdev)) {
2127                 poll_end_flag = true;
2128         } else {
2129                 cleaned = pch_gbe_clean_tx(adapter, adapter->tx_ring);
2130                 pch_gbe_clean_rx(adapter, adapter->rx_ring, &work_done, budget);
2131
2132                 if (cleaned)
2133                         work_done = budget;
2134                 /* If no Tx and not enough Rx work done,
2135                  * exit the polling mode
2136                  */
2137                 if (work_done < budget)
2138                         poll_end_flag = true;
2139         }
2140
2141         if (poll_end_flag) {
2142                 napi_complete(napi);
2143                 pch_gbe_irq_enable(adapter);
2144         }
2145
2146         pr_debug("poll_end_flag : %d  work_done : %d  budget : %d\n",
2147                  poll_end_flag, work_done, budget);
2148
2149         return work_done;
2150 }
2151
2152 #ifdef CONFIG_NET_POLL_CONTROLLER
2153 /**
2154  * pch_gbe_netpoll - Used by things like netconsole to send skbs
2155  * @netdev:  Network interface device structure
2156  */
2157 static void pch_gbe_netpoll(struct net_device *netdev)
2158 {
2159         struct pch_gbe_adapter *adapter = netdev_priv(netdev);
2160
2161         disable_irq(adapter->pdev->irq);
2162         pch_gbe_intr(adapter->pdev->irq, netdev);
2163         enable_irq(adapter->pdev->irq);
2164 }
2165 #endif
2166
2167 static const struct net_device_ops pch_gbe_netdev_ops = {
2168         .ndo_open = pch_gbe_open,
2169         .ndo_stop = pch_gbe_stop,
2170         .ndo_start_xmit = pch_gbe_xmit_frame,
2171         .ndo_get_stats = pch_gbe_get_stats,
2172         .ndo_set_mac_address = pch_gbe_set_mac,
2173         .ndo_tx_timeout = pch_gbe_tx_timeout,
2174         .ndo_change_mtu = pch_gbe_change_mtu,
2175         .ndo_set_features = pch_gbe_set_features,
2176         .ndo_do_ioctl = pch_gbe_ioctl,
2177         .ndo_set_multicast_list = &pch_gbe_set_multi,
2178 #ifdef CONFIG_NET_POLL_CONTROLLER
2179         .ndo_poll_controller = pch_gbe_netpoll,
2180 #endif
2181 };
2182
2183 static pci_ers_result_t pch_gbe_io_error_detected(struct pci_dev *pdev,
2184                                                 pci_channel_state_t state)
2185 {
2186         struct net_device *netdev = pci_get_drvdata(pdev);
2187         struct pch_gbe_adapter *adapter = netdev_priv(netdev);
2188
2189         netif_device_detach(netdev);
2190         if (netif_running(netdev))
2191                 pch_gbe_down(adapter);
2192         pci_disable_device(pdev);
2193         /* Request a slot slot reset. */
2194         return PCI_ERS_RESULT_NEED_RESET;
2195 }
2196
2197 static pci_ers_result_t pch_gbe_io_slot_reset(struct pci_dev *pdev)
2198 {
2199         struct net_device *netdev = pci_get_drvdata(pdev);
2200         struct pch_gbe_adapter *adapter = netdev_priv(netdev);
2201         struct pch_gbe_hw *hw = &adapter->hw;
2202
2203         if (pci_enable_device(pdev)) {
2204                 pr_err("Cannot re-enable PCI device after reset\n");
2205                 return PCI_ERS_RESULT_DISCONNECT;
2206         }
2207         pci_set_master(pdev);
2208         pci_enable_wake(pdev, PCI_D0, 0);
2209         pch_gbe_hal_power_up_phy(hw);
2210         pch_gbe_reset(adapter);
2211         /* Clear wake up status */
2212         pch_gbe_mac_set_wol_event(hw, 0);
2213
2214         return PCI_ERS_RESULT_RECOVERED;
2215 }
2216
2217 static void pch_gbe_io_resume(struct pci_dev *pdev)
2218 {
2219         struct net_device *netdev = pci_get_drvdata(pdev);
2220         struct pch_gbe_adapter *adapter = netdev_priv(netdev);
2221
2222         if (netif_running(netdev)) {
2223                 if (pch_gbe_up(adapter)) {
2224                         pr_debug("can't bring device back up after reset\n");
2225                         return;
2226                 }
2227         }
2228         netif_device_attach(netdev);
2229 }
2230
2231 static int __pch_gbe_suspend(struct pci_dev *pdev)
2232 {
2233         struct net_device *netdev = pci_get_drvdata(pdev);
2234         struct pch_gbe_adapter *adapter = netdev_priv(netdev);
2235         struct pch_gbe_hw *hw = &adapter->hw;
2236         u32 wufc = adapter->wake_up_evt;
2237         int retval = 0;
2238
2239         netif_device_detach(netdev);
2240         if (netif_running(netdev))
2241                 pch_gbe_down(adapter);
2242         if (wufc) {
2243                 pch_gbe_set_multi(netdev);
2244                 pch_gbe_setup_rctl(adapter);
2245                 pch_gbe_configure_rx(adapter);
2246                 pch_gbe_set_rgmii_ctrl(adapter, hw->mac.link_speed,
2247                                         hw->mac.link_duplex);
2248                 pch_gbe_set_mode(adapter, hw->mac.link_speed,
2249                                         hw->mac.link_duplex);
2250                 pch_gbe_mac_set_wol_event(hw, wufc);
2251                 pci_disable_device(pdev);
2252         } else {
2253                 pch_gbe_hal_power_down_phy(hw);
2254                 pch_gbe_mac_set_wol_event(hw, wufc);
2255                 pci_disable_device(pdev);
2256         }
2257         return retval;
2258 }
2259
2260 #ifdef CONFIG_PM
2261 static int pch_gbe_suspend(struct device *device)
2262 {
2263         struct pci_dev *pdev = to_pci_dev(device);
2264
2265         return __pch_gbe_suspend(pdev);
2266 }
2267
2268 static int pch_gbe_resume(struct device *device)
2269 {
2270         struct pci_dev *pdev = to_pci_dev(device);
2271         struct net_device *netdev = pci_get_drvdata(pdev);
2272         struct pch_gbe_adapter *adapter = netdev_priv(netdev);
2273         struct pch_gbe_hw *hw = &adapter->hw;
2274         u32 err;
2275
2276         err = pci_enable_device(pdev);
2277         if (err) {
2278                 pr_err("Cannot enable PCI device from suspend\n");
2279                 return err;
2280         }
2281         pci_set_master(pdev);
2282         pch_gbe_hal_power_up_phy(hw);
2283         pch_gbe_reset(adapter);
2284         /* Clear wake on lan control and status */
2285         pch_gbe_mac_set_wol_event(hw, 0);
2286
2287         if (netif_running(netdev))
2288                 pch_gbe_up(adapter);
2289         netif_device_attach(netdev);
2290
2291         return 0;
2292 }
2293 #endif /* CONFIG_PM */
2294
2295 static void pch_gbe_shutdown(struct pci_dev *pdev)
2296 {
2297         __pch_gbe_suspend(pdev);
2298         if (system_state == SYSTEM_POWER_OFF) {
2299                 pci_wake_from_d3(pdev, true);
2300                 pci_set_power_state(pdev, PCI_D3hot);
2301         }
2302 }
2303
2304 static void pch_gbe_remove(struct pci_dev *pdev)
2305 {
2306         struct net_device *netdev = pci_get_drvdata(pdev);
2307         struct pch_gbe_adapter *adapter = netdev_priv(netdev);
2308
2309         cancel_work_sync(&adapter->reset_task);
2310         unregister_netdev(netdev);
2311
2312         pch_gbe_hal_phy_hw_reset(&adapter->hw);
2313
2314         kfree(adapter->tx_ring);
2315         kfree(adapter->rx_ring);
2316
2317         iounmap(adapter->hw.reg);
2318         pci_release_regions(pdev);
2319         free_netdev(netdev);
2320         pci_disable_device(pdev);
2321 }
2322
2323 static int pch_gbe_probe(struct pci_dev *pdev,
2324                           const struct pci_device_id *pci_id)
2325 {
2326         struct net_device *netdev;
2327         struct pch_gbe_adapter *adapter;
2328         int ret;
2329
2330         ret = pci_enable_device(pdev);
2331         if (ret)
2332                 return ret;
2333
2334         if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64))
2335                 || pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
2336                 ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
2337                 if (ret) {
2338                         ret = pci_set_consistent_dma_mask(pdev,
2339                                                           DMA_BIT_MASK(32));
2340                         if (ret) {
2341                                 dev_err(&pdev->dev, "ERR: No usable DMA "
2342                                         "configuration, aborting\n");
2343                                 goto err_disable_device;
2344                         }
2345                 }
2346         }
2347
2348         ret = pci_request_regions(pdev, KBUILD_MODNAME);
2349         if (ret) {
2350                 dev_err(&pdev->dev,
2351                         "ERR: Can't reserve PCI I/O and memory resources\n");
2352                 goto err_disable_device;
2353         }
2354         pci_set_master(pdev);
2355
2356         netdev = alloc_etherdev((int)sizeof(struct pch_gbe_adapter));
2357         if (!netdev) {
2358                 ret = -ENOMEM;
2359                 dev_err(&pdev->dev,
2360                         "ERR: Can't allocate and set up an Ethernet device\n");
2361                 goto err_release_pci;
2362         }
2363         SET_NETDEV_DEV(netdev, &pdev->dev);
2364
2365         pci_set_drvdata(pdev, netdev);
2366         adapter = netdev_priv(netdev);
2367         adapter->netdev = netdev;
2368         adapter->pdev = pdev;
2369         adapter->hw.back = adapter;
2370         adapter->hw.reg = pci_iomap(pdev, PCH_GBE_PCI_BAR, 0);
2371         if (!adapter->hw.reg) {
2372                 ret = -EIO;
2373                 dev_err(&pdev->dev, "Can't ioremap\n");
2374                 goto err_free_netdev;
2375         }
2376
2377         netdev->netdev_ops = &pch_gbe_netdev_ops;
2378         netdev->watchdog_timeo = PCH_GBE_WATCHDOG_PERIOD;
2379         netif_napi_add(netdev, &adapter->napi,
2380                        pch_gbe_napi_poll, PCH_GBE_RX_WEIGHT);
2381         netdev->hw_features = NETIF_F_RXCSUM |
2382                 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
2383         netdev->features = netdev->hw_features;
2384         pch_gbe_set_ethtool_ops(netdev);
2385
2386         pch_gbe_mac_load_mac_addr(&adapter->hw);
2387         pch_gbe_mac_reset_hw(&adapter->hw);
2388
2389         /* setup the private structure */
2390         ret = pch_gbe_sw_init(adapter);
2391         if (ret)
2392                 goto err_iounmap;
2393
2394         /* Initialize PHY */
2395         ret = pch_gbe_init_phy(adapter);
2396         if (ret) {
2397                 dev_err(&pdev->dev, "PHY initialize error\n");
2398                 goto err_free_adapter;
2399         }
2400         pch_gbe_hal_get_bus_info(&adapter->hw);
2401
2402         /* Read the MAC address. and store to the private data */
2403         ret = pch_gbe_hal_read_mac_addr(&adapter->hw);
2404         if (ret) {
2405                 dev_err(&pdev->dev, "MAC address Read Error\n");
2406                 goto err_free_adapter;
2407         }
2408
2409         memcpy(netdev->dev_addr, adapter->hw.mac.addr, netdev->addr_len);
2410         if (!is_valid_ether_addr(netdev->dev_addr)) {
2411                 /*
2412                  * If the MAC is invalid (or just missing), display a warning
2413                  * but do not abort setting up the device. pch_gbe_up will
2414                  * prevent the interface from being brought up until a valid MAC
2415                  * is set.
2416                  */
2417                 dev_err(&pdev->dev, "Invalid MAC address, "
2418                                     "interface disabled.\n");
2419         }
2420         setup_timer(&adapter->watchdog_timer, pch_gbe_watchdog,
2421                     (unsigned long)adapter);
2422
2423         INIT_WORK(&adapter->reset_task, pch_gbe_reset_task);
2424
2425         pch_gbe_check_options(adapter);
2426
2427         /* initialize the wol settings based on the eeprom settings */
2428         adapter->wake_up_evt = PCH_GBE_WL_INIT_SETTING;
2429         dev_info(&pdev->dev, "MAC address : %pM\n", netdev->dev_addr);
2430
2431         /* reset the hardware with the new settings */
2432         pch_gbe_reset(adapter);
2433
2434         ret = register_netdev(netdev);
2435         if (ret)
2436                 goto err_free_adapter;
2437         /* tell the stack to leave us alone until pch_gbe_open() is called */
2438         netif_carrier_off(netdev);
2439         netif_stop_queue(netdev);
2440
2441         dev_dbg(&pdev->dev, "OKIsemi(R) PCH Network Connection\n");
2442
2443         device_set_wakeup_enable(&pdev->dev, 1);
2444         return 0;
2445
2446 err_free_adapter:
2447         pch_gbe_hal_phy_hw_reset(&adapter->hw);
2448         kfree(adapter->tx_ring);
2449         kfree(adapter->rx_ring);
2450 err_iounmap:
2451         iounmap(adapter->hw.reg);
2452 err_free_netdev:
2453         free_netdev(netdev);
2454 err_release_pci:
2455         pci_release_regions(pdev);
2456 err_disable_device:
2457         pci_disable_device(pdev);
2458         return ret;
2459 }
2460
2461 static DEFINE_PCI_DEVICE_TABLE(pch_gbe_pcidev_id) = {
2462         {.vendor = PCI_VENDOR_ID_INTEL,
2463          .device = PCI_DEVICE_ID_INTEL_IOH1_GBE,
2464          .subvendor = PCI_ANY_ID,
2465          .subdevice = PCI_ANY_ID,
2466          .class = (PCI_CLASS_NETWORK_ETHERNET << 8),
2467          .class_mask = (0xFFFF00)
2468          },
2469         {.vendor = PCI_VENDOR_ID_ROHM,
2470          .device = PCI_DEVICE_ID_ROHM_ML7223_GBE,
2471          .subvendor = PCI_ANY_ID,
2472          .subdevice = PCI_ANY_ID,
2473          .class = (PCI_CLASS_NETWORK_ETHERNET << 8),
2474          .class_mask = (0xFFFF00)
2475          },
2476         {.vendor = PCI_VENDOR_ID_ROHM,
2477          .device = PCI_DEVICE_ID_ROHM_ML7831_GBE,
2478          .subvendor = PCI_ANY_ID,
2479          .subdevice = PCI_ANY_ID,
2480          .class = (PCI_CLASS_NETWORK_ETHERNET << 8),
2481          .class_mask = (0xFFFF00)
2482          },
2483         /* required last entry */
2484         {0}
2485 };
2486
2487 #ifdef CONFIG_PM
2488 static const struct dev_pm_ops pch_gbe_pm_ops = {
2489         .suspend = pch_gbe_suspend,
2490         .resume = pch_gbe_resume,
2491         .freeze = pch_gbe_suspend,
2492         .thaw = pch_gbe_resume,
2493         .poweroff = pch_gbe_suspend,
2494         .restore = pch_gbe_resume,
2495 };
2496 #endif
2497
2498 static struct pci_error_handlers pch_gbe_err_handler = {
2499         .error_detected = pch_gbe_io_error_detected,
2500         .slot_reset = pch_gbe_io_slot_reset,
2501         .resume = pch_gbe_io_resume
2502 };
2503
2504 static struct pci_driver pch_gbe_driver = {
2505         .name = KBUILD_MODNAME,
2506         .id_table = pch_gbe_pcidev_id,
2507         .probe = pch_gbe_probe,
2508         .remove = pch_gbe_remove,
2509 #ifdef CONFIG_PM
2510         .driver.pm = &pch_gbe_pm_ops,
2511 #endif
2512         .shutdown = pch_gbe_shutdown,
2513         .err_handler = &pch_gbe_err_handler
2514 };
2515
2516
2517 static int __init pch_gbe_init_module(void)
2518 {
2519         int ret;
2520
2521         ret = pci_register_driver(&pch_gbe_driver);
2522         if (copybreak != PCH_GBE_COPYBREAK_DEFAULT) {
2523                 if (copybreak == 0) {
2524                         pr_info("copybreak disabled\n");
2525                 } else {
2526                         pr_info("copybreak enabled for packets <= %u bytes\n",
2527                                 copybreak);
2528                 }
2529         }
2530         return ret;
2531 }
2532
2533 static void __exit pch_gbe_exit_module(void)
2534 {
2535         pci_unregister_driver(&pch_gbe_driver);
2536 }
2537
2538 module_init(pch_gbe_init_module);
2539 module_exit(pch_gbe_exit_module);
2540
2541 MODULE_DESCRIPTION("EG20T PCH Gigabit ethernet Driver");
2542 MODULE_AUTHOR("OKI SEMICONDUCTOR, <toshiharu-linux@dsn.okisemi.com>");
2543 MODULE_LICENSE("GPL");
2544 MODULE_VERSION(DRV_VERSION);
2545 MODULE_DEVICE_TABLE(pci, pch_gbe_pcidev_id);
2546
2547 module_param(copybreak, uint, 0644);
2548 MODULE_PARM_DESC(copybreak,
2549         "Maximum size of packet that is copied to a new buffer on receive");
2550
2551 /* pch_gbe_main.c */