Merge branch 'modsplit-Oct31_2011' of git://git.kernel.org/pub/scm/linux/kernel/git...
[platform/adaptation/renesas_rcar/renesas_kernel.git] / drivers / net / ethernet / emulex / benet / be_main.c
1 /*
2  * Copyright (C) 2005 - 2011 Emulex
3  * All rights reserved.
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License version 2
7  * as published by the Free Software Foundation.  The full GNU General
8  * Public License is included in this distribution in the file called COPYING.
9  *
10  * Contact Information:
11  * linux-drivers@emulex.com
12  *
13  * Emulex
14  * 3333 Susan Street
15  * Costa Mesa, CA 92626
16  */
17
18 #include <linux/prefetch.h>
19 #include <linux/module.h>
20 #include "be.h"
21 #include "be_cmds.h"
22 #include <asm/div64.h>
23
24 MODULE_VERSION(DRV_VER);
25 MODULE_DEVICE_TABLE(pci, be_dev_ids);
26 MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
27 MODULE_AUTHOR("ServerEngines Corporation");
28 MODULE_LICENSE("GPL");
29
30 static ushort rx_frag_size = 2048;
31 static unsigned int num_vfs;
32 module_param(rx_frag_size, ushort, S_IRUGO);
33 module_param(num_vfs, uint, S_IRUGO);
34 MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
35 MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
36
37 static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
38         { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
39         { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
40         { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
41         { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
42         { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
43         { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
44         { 0 }
45 };
46 MODULE_DEVICE_TABLE(pci, be_dev_ids);
47 /* UE Status Low CSR */
48 static const char * const ue_status_low_desc[] = {
49         "CEV",
50         "CTX",
51         "DBUF",
52         "ERX",
53         "Host",
54         "MPU",
55         "NDMA",
56         "PTC ",
57         "RDMA ",
58         "RXF ",
59         "RXIPS ",
60         "RXULP0 ",
61         "RXULP1 ",
62         "RXULP2 ",
63         "TIM ",
64         "TPOST ",
65         "TPRE ",
66         "TXIPS ",
67         "TXULP0 ",
68         "TXULP1 ",
69         "UC ",
70         "WDMA ",
71         "TXULP2 ",
72         "HOST1 ",
73         "P0_OB_LINK ",
74         "P1_OB_LINK ",
75         "HOST_GPIO ",
76         "MBOX ",
77         "AXGMAC0",
78         "AXGMAC1",
79         "JTAG",
80         "MPU_INTPEND"
81 };
82 /* UE Status High CSR */
83 static const char * const ue_status_hi_desc[] = {
84         "LPCMEMHOST",
85         "MGMT_MAC",
86         "PCS0ONLINE",
87         "MPU_IRAM",
88         "PCS1ONLINE",
89         "PCTL0",
90         "PCTL1",
91         "PMEM",
92         "RR",
93         "TXPB",
94         "RXPP",
95         "XAUI",
96         "TXP",
97         "ARM",
98         "IPC",
99         "HOST2",
100         "HOST3",
101         "HOST4",
102         "HOST5",
103         "HOST6",
104         "HOST7",
105         "HOST8",
106         "HOST9",
107         "NETC",
108         "Unknown",
109         "Unknown",
110         "Unknown",
111         "Unknown",
112         "Unknown",
113         "Unknown",
114         "Unknown",
115         "Unknown"
116 };
117
118 /* Is BE in a multi-channel mode */
119 static inline bool be_is_mc(struct be_adapter *adapter) {
120         return (adapter->function_mode & FLEX10_MODE ||
121                 adapter->function_mode & VNIC_MODE ||
122                 adapter->function_mode & UMC_ENABLED);
123 }
124
125 static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
126 {
127         struct be_dma_mem *mem = &q->dma_mem;
128         if (mem->va)
129                 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
130                                   mem->dma);
131 }
132
133 static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
134                 u16 len, u16 entry_size)
135 {
136         struct be_dma_mem *mem = &q->dma_mem;
137
138         memset(q, 0, sizeof(*q));
139         q->len = len;
140         q->entry_size = entry_size;
141         mem->size = len * entry_size;
142         mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
143                                      GFP_KERNEL);
144         if (!mem->va)
145                 return -1;
146         memset(mem->va, 0, mem->size);
147         return 0;
148 }
149
150 static void be_intr_set(struct be_adapter *adapter, bool enable)
151 {
152         u32 reg, enabled;
153
154         if (adapter->eeh_err)
155                 return;
156
157         pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
158                                 &reg);
159         enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
160
161         if (!enabled && enable)
162                 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
163         else if (enabled && !enable)
164                 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
165         else
166                 return;
167
168         pci_write_config_dword(adapter->pdev,
169                         PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
170 }
171
172 static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
173 {
174         u32 val = 0;
175         val |= qid & DB_RQ_RING_ID_MASK;
176         val |= posted << DB_RQ_NUM_POSTED_SHIFT;
177
178         wmb();
179         iowrite32(val, adapter->db + DB_RQ_OFFSET);
180 }
181
182 static void be_txq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
183 {
184         u32 val = 0;
185         val |= qid & DB_TXULP_RING_ID_MASK;
186         val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
187
188         wmb();
189         iowrite32(val, adapter->db + DB_TXULP1_OFFSET);
190 }
191
192 static void be_eq_notify(struct be_adapter *adapter, u16 qid,
193                 bool arm, bool clear_int, u16 num_popped)
194 {
195         u32 val = 0;
196         val |= qid & DB_EQ_RING_ID_MASK;
197         val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
198                         DB_EQ_RING_ID_EXT_MASK_SHIFT);
199
200         if (adapter->eeh_err)
201                 return;
202
203         if (arm)
204                 val |= 1 << DB_EQ_REARM_SHIFT;
205         if (clear_int)
206                 val |= 1 << DB_EQ_CLR_SHIFT;
207         val |= 1 << DB_EQ_EVNT_SHIFT;
208         val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
209         iowrite32(val, adapter->db + DB_EQ_OFFSET);
210 }
211
212 void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
213 {
214         u32 val = 0;
215         val |= qid & DB_CQ_RING_ID_MASK;
216         val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
217                         DB_CQ_RING_ID_EXT_MASK_SHIFT);
218
219         if (adapter->eeh_err)
220                 return;
221
222         if (arm)
223                 val |= 1 << DB_CQ_REARM_SHIFT;
224         val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
225         iowrite32(val, adapter->db + DB_CQ_OFFSET);
226 }
227
228 static int be_mac_addr_set(struct net_device *netdev, void *p)
229 {
230         struct be_adapter *adapter = netdev_priv(netdev);
231         struct sockaddr *addr = p;
232         int status = 0;
233         u8 current_mac[ETH_ALEN];
234         u32 pmac_id = adapter->pmac_id;
235
236         if (!is_valid_ether_addr(addr->sa_data))
237                 return -EADDRNOTAVAIL;
238
239         status = be_cmd_mac_addr_query(adapter, current_mac,
240                         MAC_ADDRESS_TYPE_NETWORK, false, adapter->if_handle);
241         if (status)
242                 goto err;
243
244         if (memcmp(addr->sa_data, current_mac, ETH_ALEN)) {
245                 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
246                                 adapter->if_handle, &adapter->pmac_id, 0);
247                 if (status)
248                         goto err;
249
250                 be_cmd_pmac_del(adapter, adapter->if_handle, pmac_id, 0);
251         }
252         memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
253         return 0;
254 err:
255         dev_err(&adapter->pdev->dev, "MAC %pM set Failed\n", addr->sa_data);
256         return status;
257 }
258
259 static void populate_be2_stats(struct be_adapter *adapter)
260 {
261         struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
262         struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
263         struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
264         struct be_port_rxf_stats_v0 *port_stats =
265                                         &rxf_stats->port[adapter->port_num];
266         struct be_drv_stats *drvs = &adapter->drv_stats;
267
268         be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
269         drvs->rx_pause_frames = port_stats->rx_pause_frames;
270         drvs->rx_crc_errors = port_stats->rx_crc_errors;
271         drvs->rx_control_frames = port_stats->rx_control_frames;
272         drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
273         drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
274         drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
275         drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
276         drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
277         drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
278         drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
279         drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
280         drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
281         drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
282         drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
283         drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
284         drvs->rx_dropped_header_too_small =
285                 port_stats->rx_dropped_header_too_small;
286         drvs->rx_address_match_errors = port_stats->rx_address_match_errors;
287         drvs->rx_alignment_symbol_errors =
288                 port_stats->rx_alignment_symbol_errors;
289
290         drvs->tx_pauseframes = port_stats->tx_pauseframes;
291         drvs->tx_controlframes = port_stats->tx_controlframes;
292
293         if (adapter->port_num)
294                 drvs->jabber_events = rxf_stats->port1_jabber_events;
295         else
296                 drvs->jabber_events = rxf_stats->port0_jabber_events;
297         drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
298         drvs->rx_drops_no_txpb = rxf_stats->rx_drops_no_txpb;
299         drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
300         drvs->rx_drops_invalid_ring = rxf_stats->rx_drops_invalid_ring;
301         drvs->forwarded_packets = rxf_stats->forwarded_packets;
302         drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
303         drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
304         drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
305         adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
306 }
307
308 static void populate_be3_stats(struct be_adapter *adapter)
309 {
310         struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
311         struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
312         struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
313         struct be_port_rxf_stats_v1 *port_stats =
314                                         &rxf_stats->port[adapter->port_num];
315         struct be_drv_stats *drvs = &adapter->drv_stats;
316
317         be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
318         drvs->rx_pause_frames = port_stats->rx_pause_frames;
319         drvs->rx_crc_errors = port_stats->rx_crc_errors;
320         drvs->rx_control_frames = port_stats->rx_control_frames;
321         drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
322         drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
323         drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
324         drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
325         drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
326         drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
327         drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
328         drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
329         drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
330         drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
331         drvs->rx_dropped_header_too_small =
332                 port_stats->rx_dropped_header_too_small;
333         drvs->rx_input_fifo_overflow_drop =
334                 port_stats->rx_input_fifo_overflow_drop;
335         drvs->rx_address_match_errors = port_stats->rx_address_match_errors;
336         drvs->rx_alignment_symbol_errors =
337                 port_stats->rx_alignment_symbol_errors;
338         drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
339         drvs->tx_pauseframes = port_stats->tx_pauseframes;
340         drvs->tx_controlframes = port_stats->tx_controlframes;
341         drvs->jabber_events = port_stats->jabber_events;
342         drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
343         drvs->rx_drops_no_txpb = rxf_stats->rx_drops_no_txpb;
344         drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
345         drvs->rx_drops_invalid_ring = rxf_stats->rx_drops_invalid_ring;
346         drvs->forwarded_packets = rxf_stats->forwarded_packets;
347         drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
348         drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
349         drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
350         adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
351 }
352
353 static void populate_lancer_stats(struct be_adapter *adapter)
354 {
355
356         struct be_drv_stats *drvs = &adapter->drv_stats;
357         struct lancer_pport_stats *pport_stats =
358                                         pport_stats_from_cmd(adapter);
359
360         be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
361         drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
362         drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
363         drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
364         drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
365         drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
366         drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
367         drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
368         drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
369         drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
370         drvs->rx_dropped_tcp_length =
371                                 pport_stats->rx_dropped_invalid_tcp_length;
372         drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
373         drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
374         drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
375         drvs->rx_dropped_header_too_small =
376                                 pport_stats->rx_dropped_header_too_small;
377         drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
378         drvs->rx_address_match_errors = pport_stats->rx_address_match_errors;
379         drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
380         drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
381         drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
382         drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
383         drvs->jabber_events = pport_stats->rx_jabbers;
384         drvs->rx_drops_invalid_ring = pport_stats->rx_drops_invalid_queue;
385         drvs->forwarded_packets = pport_stats->num_forwards_lo;
386         drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
387         drvs->rx_drops_too_many_frags =
388                                 pport_stats->rx_drops_too_many_frags_lo;
389 }
390
391 static void accumulate_16bit_val(u32 *acc, u16 val)
392 {
393 #define lo(x)                   (x & 0xFFFF)
394 #define hi(x)                   (x & 0xFFFF0000)
395         bool wrapped = val < lo(*acc);
396         u32 newacc = hi(*acc) + val;
397
398         if (wrapped)
399                 newacc += 65536;
400         ACCESS_ONCE(*acc) = newacc;
401 }
402
403 void be_parse_stats(struct be_adapter *adapter)
404 {
405         struct be_erx_stats_v1 *erx = be_erx_stats_from_cmd(adapter);
406         struct be_rx_obj *rxo;
407         int i;
408
409         if (adapter->generation == BE_GEN3) {
410                 if (lancer_chip(adapter))
411                         populate_lancer_stats(adapter);
412                  else
413                         populate_be3_stats(adapter);
414         } else {
415                 populate_be2_stats(adapter);
416         }
417
418         /* as erx_v1 is longer than v0, ok to use v1 defn for v0 access */
419         for_all_rx_queues(adapter, rxo, i) {
420                 /* below erx HW counter can actually wrap around after
421                  * 65535. Driver accumulates a 32-bit value
422                  */
423                 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
424                                 (u16)erx->rx_drops_no_fragments[rxo->q.id]);
425         }
426 }
427
428 static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
429                                         struct rtnl_link_stats64 *stats)
430 {
431         struct be_adapter *adapter = netdev_priv(netdev);
432         struct be_drv_stats *drvs = &adapter->drv_stats;
433         struct be_rx_obj *rxo;
434         struct be_tx_obj *txo;
435         u64 pkts, bytes;
436         unsigned int start;
437         int i;
438
439         for_all_rx_queues(adapter, rxo, i) {
440                 const struct be_rx_stats *rx_stats = rx_stats(rxo);
441                 do {
442                         start = u64_stats_fetch_begin_bh(&rx_stats->sync);
443                         pkts = rx_stats(rxo)->rx_pkts;
444                         bytes = rx_stats(rxo)->rx_bytes;
445                 } while (u64_stats_fetch_retry_bh(&rx_stats->sync, start));
446                 stats->rx_packets += pkts;
447                 stats->rx_bytes += bytes;
448                 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
449                 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
450                                         rx_stats(rxo)->rx_drops_no_frags;
451         }
452
453         for_all_tx_queues(adapter, txo, i) {
454                 const struct be_tx_stats *tx_stats = tx_stats(txo);
455                 do {
456                         start = u64_stats_fetch_begin_bh(&tx_stats->sync);
457                         pkts = tx_stats(txo)->tx_pkts;
458                         bytes = tx_stats(txo)->tx_bytes;
459                 } while (u64_stats_fetch_retry_bh(&tx_stats->sync, start));
460                 stats->tx_packets += pkts;
461                 stats->tx_bytes += bytes;
462         }
463
464         /* bad pkts received */
465         stats->rx_errors = drvs->rx_crc_errors +
466                 drvs->rx_alignment_symbol_errors +
467                 drvs->rx_in_range_errors +
468                 drvs->rx_out_range_errors +
469                 drvs->rx_frame_too_long +
470                 drvs->rx_dropped_too_small +
471                 drvs->rx_dropped_too_short +
472                 drvs->rx_dropped_header_too_small +
473                 drvs->rx_dropped_tcp_length +
474                 drvs->rx_dropped_runt;
475
476         /* detailed rx errors */
477         stats->rx_length_errors = drvs->rx_in_range_errors +
478                 drvs->rx_out_range_errors +
479                 drvs->rx_frame_too_long;
480
481         stats->rx_crc_errors = drvs->rx_crc_errors;
482
483         /* frame alignment errors */
484         stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
485
486         /* receiver fifo overrun */
487         /* drops_no_pbuf is no per i/f, it's per BE card */
488         stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
489                                 drvs->rx_input_fifo_overflow_drop +
490                                 drvs->rx_drops_no_pbuf;
491         return stats;
492 }
493
494 void be_link_status_update(struct be_adapter *adapter, u32 link_status)
495 {
496         struct net_device *netdev = adapter->netdev;
497
498         /* when link status changes, link speed must be re-queried from card */
499         adapter->link_speed = -1;
500         if ((link_status & LINK_STATUS_MASK) == LINK_UP) {
501                 netif_carrier_on(netdev);
502                 dev_info(&adapter->pdev->dev, "%s: Link up\n", netdev->name);
503         } else {
504                 netif_carrier_off(netdev);
505                 dev_info(&adapter->pdev->dev, "%s: Link down\n", netdev->name);
506         }
507 }
508
509 static void be_tx_stats_update(struct be_tx_obj *txo,
510                         u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
511 {
512         struct be_tx_stats *stats = tx_stats(txo);
513
514         u64_stats_update_begin(&stats->sync);
515         stats->tx_reqs++;
516         stats->tx_wrbs += wrb_cnt;
517         stats->tx_bytes += copied;
518         stats->tx_pkts += (gso_segs ? gso_segs : 1);
519         if (stopped)
520                 stats->tx_stops++;
521         u64_stats_update_end(&stats->sync);
522 }
523
524 /* Determine number of WRB entries needed to xmit data in an skb */
525 static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
526                                                                 bool *dummy)
527 {
528         int cnt = (skb->len > skb->data_len);
529
530         cnt += skb_shinfo(skb)->nr_frags;
531
532         /* to account for hdr wrb */
533         cnt++;
534         if (lancer_chip(adapter) || !(cnt & 1)) {
535                 *dummy = false;
536         } else {
537                 /* add a dummy to make it an even num */
538                 cnt++;
539                 *dummy = true;
540         }
541         BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
542         return cnt;
543 }
544
545 static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
546 {
547         wrb->frag_pa_hi = upper_32_bits(addr);
548         wrb->frag_pa_lo = addr & 0xFFFFFFFF;
549         wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
550 }
551
552 static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
553                 struct sk_buff *skb, u32 wrb_cnt, u32 len)
554 {
555         u8 vlan_prio = 0;
556         u16 vlan_tag = 0;
557
558         memset(hdr, 0, sizeof(*hdr));
559
560         AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
561
562         if (skb_is_gso(skb)) {
563                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
564                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
565                         hdr, skb_shinfo(skb)->gso_size);
566                 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
567                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
568                 if (lancer_chip(adapter) && adapter->sli_family  ==
569                                                         LANCER_A0_SLI_FAMILY) {
570                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, ipcs, hdr, 1);
571                         if (is_tcp_pkt(skb))
572                                 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
573                                                                 tcpcs, hdr, 1);
574                         else if (is_udp_pkt(skb))
575                                 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
576                                                                 udpcs, hdr, 1);
577                 }
578         } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
579                 if (is_tcp_pkt(skb))
580                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
581                 else if (is_udp_pkt(skb))
582                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
583         }
584
585         if (vlan_tx_tag_present(skb)) {
586                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
587                 vlan_tag = vlan_tx_tag_get(skb);
588                 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
589                 /* If vlan priority provided by OS is NOT in available bmap */
590                 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
591                         vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
592                                         adapter->recommended_prio;
593                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
594         }
595
596         AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
597         AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, 1);
598         AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
599         AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
600 }
601
602 static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
603                 bool unmap_single)
604 {
605         dma_addr_t dma;
606
607         be_dws_le_to_cpu(wrb, sizeof(*wrb));
608
609         dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
610         if (wrb->frag_len) {
611                 if (unmap_single)
612                         dma_unmap_single(dev, dma, wrb->frag_len,
613                                          DMA_TO_DEVICE);
614                 else
615                         dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
616         }
617 }
618
619 static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
620                 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb)
621 {
622         dma_addr_t busaddr;
623         int i, copied = 0;
624         struct device *dev = &adapter->pdev->dev;
625         struct sk_buff *first_skb = skb;
626         struct be_eth_wrb *wrb;
627         struct be_eth_hdr_wrb *hdr;
628         bool map_single = false;
629         u16 map_head;
630
631         hdr = queue_head_node(txq);
632         queue_head_inc(txq);
633         map_head = txq->head;
634
635         if (skb->len > skb->data_len) {
636                 int len = skb_headlen(skb);
637                 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
638                 if (dma_mapping_error(dev, busaddr))
639                         goto dma_err;
640                 map_single = true;
641                 wrb = queue_head_node(txq);
642                 wrb_fill(wrb, busaddr, len);
643                 be_dws_cpu_to_le(wrb, sizeof(*wrb));
644                 queue_head_inc(txq);
645                 copied += len;
646         }
647
648         for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
649                 const struct skb_frag_struct *frag =
650                         &skb_shinfo(skb)->frags[i];
651                 busaddr = skb_frag_dma_map(dev, frag, 0,
652                                            skb_frag_size(frag), DMA_TO_DEVICE);
653                 if (dma_mapping_error(dev, busaddr))
654                         goto dma_err;
655                 wrb = queue_head_node(txq);
656                 wrb_fill(wrb, busaddr, skb_frag_size(frag));
657                 be_dws_cpu_to_le(wrb, sizeof(*wrb));
658                 queue_head_inc(txq);
659                 copied += skb_frag_size(frag);
660         }
661
662         if (dummy_wrb) {
663                 wrb = queue_head_node(txq);
664                 wrb_fill(wrb, 0, 0);
665                 be_dws_cpu_to_le(wrb, sizeof(*wrb));
666                 queue_head_inc(txq);
667         }
668
669         wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied);
670         be_dws_cpu_to_le(hdr, sizeof(*hdr));
671
672         return copied;
673 dma_err:
674         txq->head = map_head;
675         while (copied) {
676                 wrb = queue_head_node(txq);
677                 unmap_tx_frag(dev, wrb, map_single);
678                 map_single = false;
679                 copied -= wrb->frag_len;
680                 queue_head_inc(txq);
681         }
682         return 0;
683 }
684
685 static netdev_tx_t be_xmit(struct sk_buff *skb,
686                         struct net_device *netdev)
687 {
688         struct be_adapter *adapter = netdev_priv(netdev);
689         struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
690         struct be_queue_info *txq = &txo->q;
691         u32 wrb_cnt = 0, copied = 0;
692         u32 start = txq->head;
693         bool dummy_wrb, stopped = false;
694
695         wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
696
697         copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb);
698         if (copied) {
699                 /* record the sent skb in the sent_skb table */
700                 BUG_ON(txo->sent_skb_list[start]);
701                 txo->sent_skb_list[start] = skb;
702
703                 /* Ensure txq has space for the next skb; Else stop the queue
704                  * *BEFORE* ringing the tx doorbell, so that we serialze the
705                  * tx compls of the current transmit which'll wake up the queue
706                  */
707                 atomic_add(wrb_cnt, &txq->used);
708                 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
709                                                                 txq->len) {
710                         netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
711                         stopped = true;
712                 }
713
714                 be_txq_notify(adapter, txq->id, wrb_cnt);
715
716                 be_tx_stats_update(txo, wrb_cnt, copied,
717                                 skb_shinfo(skb)->gso_segs, stopped);
718         } else {
719                 txq->head = start;
720                 dev_kfree_skb_any(skb);
721         }
722         return NETDEV_TX_OK;
723 }
724
725 static int be_change_mtu(struct net_device *netdev, int new_mtu)
726 {
727         struct be_adapter *adapter = netdev_priv(netdev);
728         if (new_mtu < BE_MIN_MTU ||
729                         new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
730                                         (ETH_HLEN + ETH_FCS_LEN))) {
731                 dev_info(&adapter->pdev->dev,
732                         "MTU must be between %d and %d bytes\n",
733                         BE_MIN_MTU,
734                         (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
735                 return -EINVAL;
736         }
737         dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
738                         netdev->mtu, new_mtu);
739         netdev->mtu = new_mtu;
740         return 0;
741 }
742
743 /*
744  * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
745  * If the user configures more, place BE in vlan promiscuous mode.
746  */
747 static int be_vid_config(struct be_adapter *adapter, bool vf, u32 vf_num)
748 {
749         u16 vtag[BE_NUM_VLANS_SUPPORTED];
750         u16 ntags = 0, i;
751         int status = 0;
752         u32 if_handle;
753
754         if (vf) {
755                 if_handle = adapter->vf_cfg[vf_num].vf_if_handle;
756                 vtag[0] = cpu_to_le16(adapter->vf_cfg[vf_num].vf_vlan_tag);
757                 status = be_cmd_vlan_config(adapter, if_handle, vtag, 1, 1, 0);
758         }
759
760         /* No need to further configure vids if in promiscuous mode */
761         if (adapter->promiscuous)
762                 return 0;
763
764         if (adapter->vlans_added <= adapter->max_vlans)  {
765                 /* Construct VLAN Table to give to HW */
766                 for (i = 0; i < VLAN_N_VID; i++) {
767                         if (adapter->vlan_tag[i]) {
768                                 vtag[ntags] = cpu_to_le16(i);
769                                 ntags++;
770                         }
771                 }
772                 status = be_cmd_vlan_config(adapter, adapter->if_handle,
773                                         vtag, ntags, 1, 0);
774         } else {
775                 status = be_cmd_vlan_config(adapter, adapter->if_handle,
776                                         NULL, 0, 1, 1);
777         }
778
779         return status;
780 }
781
782 static void be_vlan_add_vid(struct net_device *netdev, u16 vid)
783 {
784         struct be_adapter *adapter = netdev_priv(netdev);
785
786         adapter->vlans_added++;
787         if (!be_physfn(adapter))
788                 return;
789
790         adapter->vlan_tag[vid] = 1;
791         if (adapter->vlans_added <= (adapter->max_vlans + 1))
792                 be_vid_config(adapter, false, 0);
793 }
794
795 static void be_vlan_rem_vid(struct net_device *netdev, u16 vid)
796 {
797         struct be_adapter *adapter = netdev_priv(netdev);
798
799         adapter->vlans_added--;
800
801         if (!be_physfn(adapter))
802                 return;
803
804         adapter->vlan_tag[vid] = 0;
805         if (adapter->vlans_added <= adapter->max_vlans)
806                 be_vid_config(adapter, false, 0);
807 }
808
809 static void be_set_rx_mode(struct net_device *netdev)
810 {
811         struct be_adapter *adapter = netdev_priv(netdev);
812
813         if (netdev->flags & IFF_PROMISC) {
814                 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
815                 adapter->promiscuous = true;
816                 goto done;
817         }
818
819         /* BE was previously in promiscuous mode; disable it */
820         if (adapter->promiscuous) {
821                 adapter->promiscuous = false;
822                 be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
823
824                 if (adapter->vlans_added)
825                         be_vid_config(adapter, false, 0);
826         }
827
828         /* Enable multicast promisc if num configured exceeds what we support */
829         if (netdev->flags & IFF_ALLMULTI ||
830                         netdev_mc_count(netdev) > BE_MAX_MC) {
831                 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
832                 goto done;
833         }
834
835         be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);
836 done:
837         return;
838 }
839
840 static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
841 {
842         struct be_adapter *adapter = netdev_priv(netdev);
843         int status;
844
845         if (!adapter->sriov_enabled)
846                 return -EPERM;
847
848         if (!is_valid_ether_addr(mac) || (vf >= num_vfs))
849                 return -EINVAL;
850
851         if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID)
852                 status = be_cmd_pmac_del(adapter,
853                                         adapter->vf_cfg[vf].vf_if_handle,
854                                         adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
855
856         status = be_cmd_pmac_add(adapter, mac,
857                                 adapter->vf_cfg[vf].vf_if_handle,
858                                 &adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
859
860         if (status)
861                 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
862                                 mac, vf);
863         else
864                 memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
865
866         return status;
867 }
868
869 static int be_get_vf_config(struct net_device *netdev, int vf,
870                         struct ifla_vf_info *vi)
871 {
872         struct be_adapter *adapter = netdev_priv(netdev);
873
874         if (!adapter->sriov_enabled)
875                 return -EPERM;
876
877         if (vf >= num_vfs)
878                 return -EINVAL;
879
880         vi->vf = vf;
881         vi->tx_rate = adapter->vf_cfg[vf].vf_tx_rate;
882         vi->vlan = adapter->vf_cfg[vf].vf_vlan_tag;
883         vi->qos = 0;
884         memcpy(&vi->mac, adapter->vf_cfg[vf].vf_mac_addr, ETH_ALEN);
885
886         return 0;
887 }
888
889 static int be_set_vf_vlan(struct net_device *netdev,
890                         int vf, u16 vlan, u8 qos)
891 {
892         struct be_adapter *adapter = netdev_priv(netdev);
893         int status = 0;
894
895         if (!adapter->sriov_enabled)
896                 return -EPERM;
897
898         if ((vf >= num_vfs) || (vlan > 4095))
899                 return -EINVAL;
900
901         if (vlan) {
902                 adapter->vf_cfg[vf].vf_vlan_tag = vlan;
903                 adapter->vlans_added++;
904         } else {
905                 adapter->vf_cfg[vf].vf_vlan_tag = 0;
906                 adapter->vlans_added--;
907         }
908
909         status = be_vid_config(adapter, true, vf);
910
911         if (status)
912                 dev_info(&adapter->pdev->dev,
913                                 "VLAN %d config on VF %d failed\n", vlan, vf);
914         return status;
915 }
916
917 static int be_set_vf_tx_rate(struct net_device *netdev,
918                         int vf, int rate)
919 {
920         struct be_adapter *adapter = netdev_priv(netdev);
921         int status = 0;
922
923         if (!adapter->sriov_enabled)
924                 return -EPERM;
925
926         if ((vf >= num_vfs) || (rate < 0))
927                 return -EINVAL;
928
929         if (rate > 10000)
930                 rate = 10000;
931
932         adapter->vf_cfg[vf].vf_tx_rate = rate;
933         status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
934
935         if (status)
936                 dev_info(&adapter->pdev->dev,
937                                 "tx rate %d on VF %d failed\n", rate, vf);
938         return status;
939 }
940
941 static void be_rx_eqd_update(struct be_adapter *adapter, struct be_rx_obj *rxo)
942 {
943         struct be_eq_obj *rx_eq = &rxo->rx_eq;
944         struct be_rx_stats *stats = rx_stats(rxo);
945         ulong now = jiffies;
946         ulong delta = now - stats->rx_jiffies;
947         u64 pkts;
948         unsigned int start, eqd;
949
950         if (!rx_eq->enable_aic)
951                 return;
952
953         /* Wrapped around */
954         if (time_before(now, stats->rx_jiffies)) {
955                 stats->rx_jiffies = now;
956                 return;
957         }
958
959         /* Update once a second */
960         if (delta < HZ)
961                 return;
962
963         do {
964                 start = u64_stats_fetch_begin_bh(&stats->sync);
965                 pkts = stats->rx_pkts;
966         } while (u64_stats_fetch_retry_bh(&stats->sync, start));
967
968         stats->rx_pps = (unsigned long)(pkts - stats->rx_pkts_prev) / (delta / HZ);
969         stats->rx_pkts_prev = pkts;
970         stats->rx_jiffies = now;
971         eqd = stats->rx_pps / 110000;
972         eqd = eqd << 3;
973         if (eqd > rx_eq->max_eqd)
974                 eqd = rx_eq->max_eqd;
975         if (eqd < rx_eq->min_eqd)
976                 eqd = rx_eq->min_eqd;
977         if (eqd < 10)
978                 eqd = 0;
979         if (eqd != rx_eq->cur_eqd) {
980                 be_cmd_modify_eqd(adapter, rx_eq->q.id, eqd);
981                 rx_eq->cur_eqd = eqd;
982         }
983 }
984
985 static void be_rx_stats_update(struct be_rx_obj *rxo,
986                 struct be_rx_compl_info *rxcp)
987 {
988         struct be_rx_stats *stats = rx_stats(rxo);
989
990         u64_stats_update_begin(&stats->sync);
991         stats->rx_compl++;
992         stats->rx_bytes += rxcp->pkt_size;
993         stats->rx_pkts++;
994         if (rxcp->pkt_type == BE_MULTICAST_PACKET)
995                 stats->rx_mcast_pkts++;
996         if (rxcp->err)
997                 stats->rx_compl_err++;
998         u64_stats_update_end(&stats->sync);
999 }
1000
1001 static inline bool csum_passed(struct be_rx_compl_info *rxcp)
1002 {
1003         /* L4 checksum is not reliable for non TCP/UDP packets.
1004          * Also ignore ipcksm for ipv6 pkts */
1005         return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
1006                                 (rxcp->ip_csum || rxcp->ipv6);
1007 }
1008
1009 static struct be_rx_page_info *
1010 get_rx_page_info(struct be_adapter *adapter,
1011                 struct be_rx_obj *rxo,
1012                 u16 frag_idx)
1013 {
1014         struct be_rx_page_info *rx_page_info;
1015         struct be_queue_info *rxq = &rxo->q;
1016
1017         rx_page_info = &rxo->page_info_tbl[frag_idx];
1018         BUG_ON(!rx_page_info->page);
1019
1020         if (rx_page_info->last_page_user) {
1021                 dma_unmap_page(&adapter->pdev->dev,
1022                                dma_unmap_addr(rx_page_info, bus),
1023                                adapter->big_page_size, DMA_FROM_DEVICE);
1024                 rx_page_info->last_page_user = false;
1025         }
1026
1027         atomic_dec(&rxq->used);
1028         return rx_page_info;
1029 }
1030
1031 /* Throwaway the data in the Rx completion */
1032 static void be_rx_compl_discard(struct be_adapter *adapter,
1033                 struct be_rx_obj *rxo,
1034                 struct be_rx_compl_info *rxcp)
1035 {
1036         struct be_queue_info *rxq = &rxo->q;
1037         struct be_rx_page_info *page_info;
1038         u16 i, num_rcvd = rxcp->num_rcvd;
1039
1040         for (i = 0; i < num_rcvd; i++) {
1041                 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
1042                 put_page(page_info->page);
1043                 memset(page_info, 0, sizeof(*page_info));
1044                 index_inc(&rxcp->rxq_idx, rxq->len);
1045         }
1046 }
1047
1048 /*
1049  * skb_fill_rx_data forms a complete skb for an ether frame
1050  * indicated by rxcp.
1051  */
1052 static void skb_fill_rx_data(struct be_adapter *adapter, struct be_rx_obj *rxo,
1053                         struct sk_buff *skb, struct be_rx_compl_info *rxcp)
1054 {
1055         struct be_queue_info *rxq = &rxo->q;
1056         struct be_rx_page_info *page_info;
1057         u16 i, j;
1058         u16 hdr_len, curr_frag_len, remaining;
1059         u8 *start;
1060
1061         page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
1062         start = page_address(page_info->page) + page_info->page_offset;
1063         prefetch(start);
1064
1065         /* Copy data in the first descriptor of this completion */
1066         curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
1067
1068         /* Copy the header portion into skb_data */
1069         hdr_len = min(BE_HDR_LEN, curr_frag_len);
1070         memcpy(skb->data, start, hdr_len);
1071         skb->len = curr_frag_len;
1072         if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
1073                 /* Complete packet has now been moved to data */
1074                 put_page(page_info->page);
1075                 skb->data_len = 0;
1076                 skb->tail += curr_frag_len;
1077         } else {
1078                 skb_shinfo(skb)->nr_frags = 1;
1079                 skb_frag_set_page(skb, 0, page_info->page);
1080                 skb_shinfo(skb)->frags[0].page_offset =
1081                                         page_info->page_offset + hdr_len;
1082                 skb_frag_size_set(&skb_shinfo(skb)->frags[0], curr_frag_len - hdr_len);
1083                 skb->data_len = curr_frag_len - hdr_len;
1084                 skb->truesize += rx_frag_size;
1085                 skb->tail += hdr_len;
1086         }
1087         page_info->page = NULL;
1088
1089         if (rxcp->pkt_size <= rx_frag_size) {
1090                 BUG_ON(rxcp->num_rcvd != 1);
1091                 return;
1092         }
1093
1094         /* More frags present for this completion */
1095         index_inc(&rxcp->rxq_idx, rxq->len);
1096         remaining = rxcp->pkt_size - curr_frag_len;
1097         for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
1098                 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
1099                 curr_frag_len = min(remaining, rx_frag_size);
1100
1101                 /* Coalesce all frags from the same physical page in one slot */
1102                 if (page_info->page_offset == 0) {
1103                         /* Fresh page */
1104                         j++;
1105                         skb_frag_set_page(skb, j, page_info->page);
1106                         skb_shinfo(skb)->frags[j].page_offset =
1107                                                         page_info->page_offset;
1108                         skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
1109                         skb_shinfo(skb)->nr_frags++;
1110                 } else {
1111                         put_page(page_info->page);
1112                 }
1113
1114                 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
1115                 skb->len += curr_frag_len;
1116                 skb->data_len += curr_frag_len;
1117                 skb->truesize += rx_frag_size;
1118                 remaining -= curr_frag_len;
1119                 index_inc(&rxcp->rxq_idx, rxq->len);
1120                 page_info->page = NULL;
1121         }
1122         BUG_ON(j > MAX_SKB_FRAGS);
1123 }
1124
1125 /* Process the RX completion indicated by rxcp when GRO is disabled */
1126 static void be_rx_compl_process(struct be_adapter *adapter,
1127                         struct be_rx_obj *rxo,
1128                         struct be_rx_compl_info *rxcp)
1129 {
1130         struct net_device *netdev = adapter->netdev;
1131         struct sk_buff *skb;
1132
1133         skb = netdev_alloc_skb_ip_align(netdev, BE_HDR_LEN);
1134         if (unlikely(!skb)) {
1135                 rx_stats(rxo)->rx_drops_no_skbs++;
1136                 be_rx_compl_discard(adapter, rxo, rxcp);
1137                 return;
1138         }
1139
1140         skb_fill_rx_data(adapter, rxo, skb, rxcp);
1141
1142         if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
1143                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1144         else
1145                 skb_checksum_none_assert(skb);
1146
1147         skb->protocol = eth_type_trans(skb, netdev);
1148         if (adapter->netdev->features & NETIF_F_RXHASH)
1149                 skb->rxhash = rxcp->rss_hash;
1150
1151
1152         if (rxcp->vlanf)
1153                 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1154
1155         netif_receive_skb(skb);
1156 }
1157
1158 /* Process the RX completion indicated by rxcp when GRO is enabled */
1159 static void be_rx_compl_process_gro(struct be_adapter *adapter,
1160                 struct be_rx_obj *rxo,
1161                 struct be_rx_compl_info *rxcp)
1162 {
1163         struct be_rx_page_info *page_info;
1164         struct sk_buff *skb = NULL;
1165         struct be_queue_info *rxq = &rxo->q;
1166         struct be_eq_obj *eq_obj =  &rxo->rx_eq;
1167         u16 remaining, curr_frag_len;
1168         u16 i, j;
1169
1170         skb = napi_get_frags(&eq_obj->napi);
1171         if (!skb) {
1172                 be_rx_compl_discard(adapter, rxo, rxcp);
1173                 return;
1174         }
1175
1176         remaining = rxcp->pkt_size;
1177         for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
1178                 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
1179
1180                 curr_frag_len = min(remaining, rx_frag_size);
1181
1182                 /* Coalesce all frags from the same physical page in one slot */
1183                 if (i == 0 || page_info->page_offset == 0) {
1184                         /* First frag or Fresh page */
1185                         j++;
1186                         skb_frag_set_page(skb, j, page_info->page);
1187                         skb_shinfo(skb)->frags[j].page_offset =
1188                                                         page_info->page_offset;
1189                         skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
1190                 } else {
1191                         put_page(page_info->page);
1192                 }
1193                 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
1194                 skb->truesize += rx_frag_size;
1195                 remaining -= curr_frag_len;
1196                 index_inc(&rxcp->rxq_idx, rxq->len);
1197                 memset(page_info, 0, sizeof(*page_info));
1198         }
1199         BUG_ON(j > MAX_SKB_FRAGS);
1200
1201         skb_shinfo(skb)->nr_frags = j + 1;
1202         skb->len = rxcp->pkt_size;
1203         skb->data_len = rxcp->pkt_size;
1204         skb->ip_summed = CHECKSUM_UNNECESSARY;
1205         if (adapter->netdev->features & NETIF_F_RXHASH)
1206                 skb->rxhash = rxcp->rss_hash;
1207
1208         if (rxcp->vlanf)
1209                 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1210
1211         napi_gro_frags(&eq_obj->napi);
1212 }
1213
1214 static void be_parse_rx_compl_v1(struct be_adapter *adapter,
1215                                 struct be_eth_rx_compl *compl,
1216                                 struct be_rx_compl_info *rxcp)
1217 {
1218         rxcp->pkt_size =
1219                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1220         rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1221         rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1222         rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
1223         rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
1224         rxcp->ip_csum =
1225                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1226         rxcp->l4_csum =
1227                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1228         rxcp->ipv6 =
1229                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
1230         rxcp->rxq_idx =
1231                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, fragndx, compl);
1232         rxcp->num_rcvd =
1233                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1234         rxcp->pkt_type =
1235                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
1236         rxcp->rss_hash =
1237                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, rxcp);
1238         if (rxcp->vlanf) {
1239                 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm,
1240                                           compl);
1241                 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
1242                                                compl);
1243         }
1244         rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, port, compl);
1245 }
1246
1247 static void be_parse_rx_compl_v0(struct be_adapter *adapter,
1248                                 struct be_eth_rx_compl *compl,
1249                                 struct be_rx_compl_info *rxcp)
1250 {
1251         rxcp->pkt_size =
1252                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1253         rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1254         rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1255         rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
1256         rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
1257         rxcp->ip_csum =
1258                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1259         rxcp->l4_csum =
1260                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1261         rxcp->ipv6 =
1262                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
1263         rxcp->rxq_idx =
1264                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, fragndx, compl);
1265         rxcp->num_rcvd =
1266                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1267         rxcp->pkt_type =
1268                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
1269         rxcp->rss_hash =
1270                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, rxcp);
1271         if (rxcp->vlanf) {
1272                 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm,
1273                                           compl);
1274                 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
1275                                                compl);
1276         }
1277         rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, port, compl);
1278 }
1279
1280 static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1281 {
1282         struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1283         struct be_rx_compl_info *rxcp = &rxo->rxcp;
1284         struct be_adapter *adapter = rxo->adapter;
1285
1286         /* For checking the valid bit it is Ok to use either definition as the
1287          * valid bit is at the same position in both v0 and v1 Rx compl */
1288         if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
1289                 return NULL;
1290
1291         rmb();
1292         be_dws_le_to_cpu(compl, sizeof(*compl));
1293
1294         if (adapter->be3_native)
1295                 be_parse_rx_compl_v1(adapter, compl, rxcp);
1296         else
1297                 be_parse_rx_compl_v0(adapter, compl, rxcp);
1298
1299         if (rxcp->vlanf) {
1300                 /* vlanf could be wrongly set in some cards.
1301                  * ignore if vtm is not set */
1302                 if ((adapter->function_mode & FLEX10_MODE) && !rxcp->vtm)
1303                         rxcp->vlanf = 0;
1304
1305                 if (!lancer_chip(adapter))
1306                         rxcp->vlan_tag = swab16(rxcp->vlan_tag);
1307
1308                 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
1309                     !adapter->vlan_tag[rxcp->vlan_tag])
1310                         rxcp->vlanf = 0;
1311         }
1312
1313         /* As the compl has been parsed, reset it; we wont touch it again */
1314         compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
1315
1316         queue_tail_inc(&rxo->cq);
1317         return rxcp;
1318 }
1319
1320 static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
1321 {
1322         u32 order = get_order(size);
1323
1324         if (order > 0)
1325                 gfp |= __GFP_COMP;
1326         return  alloc_pages(gfp, order);
1327 }
1328
1329 /*
1330  * Allocate a page, split it to fragments of size rx_frag_size and post as
1331  * receive buffers to BE
1332  */
1333 static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
1334 {
1335         struct be_adapter *adapter = rxo->adapter;
1336         struct be_rx_page_info *page_info_tbl = rxo->page_info_tbl;
1337         struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
1338         struct be_queue_info *rxq = &rxo->q;
1339         struct page *pagep = NULL;
1340         struct be_eth_rx_d *rxd;
1341         u64 page_dmaaddr = 0, frag_dmaaddr;
1342         u32 posted, page_offset = 0;
1343
1344         page_info = &rxo->page_info_tbl[rxq->head];
1345         for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1346                 if (!pagep) {
1347                         pagep = be_alloc_pages(adapter->big_page_size, gfp);
1348                         if (unlikely(!pagep)) {
1349                                 rx_stats(rxo)->rx_post_fail++;
1350                                 break;
1351                         }
1352                         page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
1353                                                     0, adapter->big_page_size,
1354                                                     DMA_FROM_DEVICE);
1355                         page_info->page_offset = 0;
1356                 } else {
1357                         get_page(pagep);
1358                         page_info->page_offset = page_offset + rx_frag_size;
1359                 }
1360                 page_offset = page_info->page_offset;
1361                 page_info->page = pagep;
1362                 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
1363                 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1364
1365                 rxd = queue_head_node(rxq);
1366                 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1367                 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
1368
1369                 /* Any space left in the current big page for another frag? */
1370                 if ((page_offset + rx_frag_size + rx_frag_size) >
1371                                         adapter->big_page_size) {
1372                         pagep = NULL;
1373                         page_info->last_page_user = true;
1374                 }
1375
1376                 prev_page_info = page_info;
1377                 queue_head_inc(rxq);
1378                 page_info = &page_info_tbl[rxq->head];
1379         }
1380         if (pagep)
1381                 prev_page_info->last_page_user = true;
1382
1383         if (posted) {
1384                 atomic_add(posted, &rxq->used);
1385                 be_rxq_notify(adapter, rxq->id, posted);
1386         } else if (atomic_read(&rxq->used) == 0) {
1387                 /* Let be_worker replenish when memory is available */
1388                 rxo->rx_post_starved = true;
1389         }
1390 }
1391
1392 static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
1393 {
1394         struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1395
1396         if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1397                 return NULL;
1398
1399         rmb();
1400         be_dws_le_to_cpu(txcp, sizeof(*txcp));
1401
1402         txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1403
1404         queue_tail_inc(tx_cq);
1405         return txcp;
1406 }
1407
1408 static u16 be_tx_compl_process(struct be_adapter *adapter,
1409                 struct be_tx_obj *txo, u16 last_index)
1410 {
1411         struct be_queue_info *txq = &txo->q;
1412         struct be_eth_wrb *wrb;
1413         struct sk_buff **sent_skbs = txo->sent_skb_list;
1414         struct sk_buff *sent_skb;
1415         u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1416         bool unmap_skb_hdr = true;
1417
1418         sent_skb = sent_skbs[txq->tail];
1419         BUG_ON(!sent_skb);
1420         sent_skbs[txq->tail] = NULL;
1421
1422         /* skip header wrb */
1423         queue_tail_inc(txq);
1424
1425         do {
1426                 cur_index = txq->tail;
1427                 wrb = queue_tail_node(txq);
1428                 unmap_tx_frag(&adapter->pdev->dev, wrb,
1429                               (unmap_skb_hdr && skb_headlen(sent_skb)));
1430                 unmap_skb_hdr = false;
1431
1432                 num_wrbs++;
1433                 queue_tail_inc(txq);
1434         } while (cur_index != last_index);
1435
1436         kfree_skb(sent_skb);
1437         return num_wrbs;
1438 }
1439
1440 static inline struct be_eq_entry *event_get(struct be_eq_obj *eq_obj)
1441 {
1442         struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
1443
1444         if (!eqe->evt)
1445                 return NULL;
1446
1447         rmb();
1448         eqe->evt = le32_to_cpu(eqe->evt);
1449         queue_tail_inc(&eq_obj->q);
1450         return eqe;
1451 }
1452
1453 static int event_handle(struct be_adapter *adapter,
1454                         struct be_eq_obj *eq_obj,
1455                         bool rearm)
1456 {
1457         struct be_eq_entry *eqe;
1458         u16 num = 0;
1459
1460         while ((eqe = event_get(eq_obj)) != NULL) {
1461                 eqe->evt = 0;
1462                 num++;
1463         }
1464
1465         /* Deal with any spurious interrupts that come
1466          * without events
1467          */
1468         if (!num)
1469                 rearm = true;
1470
1471         be_eq_notify(adapter, eq_obj->q.id, rearm, true, num);
1472         if (num)
1473                 napi_schedule(&eq_obj->napi);
1474
1475         return num;
1476 }
1477
1478 /* Just read and notify events without processing them.
1479  * Used at the time of destroying event queues */
1480 static void be_eq_clean(struct be_adapter *adapter,
1481                         struct be_eq_obj *eq_obj)
1482 {
1483         struct be_eq_entry *eqe;
1484         u16 num = 0;
1485
1486         while ((eqe = event_get(eq_obj)) != NULL) {
1487                 eqe->evt = 0;
1488                 num++;
1489         }
1490
1491         if (num)
1492                 be_eq_notify(adapter, eq_obj->q.id, false, true, num);
1493 }
1494
1495 static void be_rx_q_clean(struct be_adapter *adapter, struct be_rx_obj *rxo)
1496 {
1497         struct be_rx_page_info *page_info;
1498         struct be_queue_info *rxq = &rxo->q;
1499         struct be_queue_info *rx_cq = &rxo->cq;
1500         struct be_rx_compl_info *rxcp;
1501         u16 tail;
1502
1503         /* First cleanup pending rx completions */
1504         while ((rxcp = be_rx_compl_get(rxo)) != NULL) {
1505                 be_rx_compl_discard(adapter, rxo, rxcp);
1506                 be_cq_notify(adapter, rx_cq->id, false, 1);
1507         }
1508
1509         /* Then free posted rx buffer that were not used */
1510         tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
1511         for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
1512                 page_info = get_rx_page_info(adapter, rxo, tail);
1513                 put_page(page_info->page);
1514                 memset(page_info, 0, sizeof(*page_info));
1515         }
1516         BUG_ON(atomic_read(&rxq->used));
1517         rxq->tail = rxq->head = 0;
1518 }
1519
1520 static void be_tx_compl_clean(struct be_adapter *adapter,
1521                                 struct be_tx_obj *txo)
1522 {
1523         struct be_queue_info *tx_cq = &txo->cq;
1524         struct be_queue_info *txq = &txo->q;
1525         struct be_eth_tx_compl *txcp;
1526         u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
1527         struct sk_buff **sent_skbs = txo->sent_skb_list;
1528         struct sk_buff *sent_skb;
1529         bool dummy_wrb;
1530
1531         /* Wait for a max of 200ms for all the tx-completions to arrive. */
1532         do {
1533                 while ((txcp = be_tx_compl_get(tx_cq))) {
1534                         end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl,
1535                                         wrb_index, txcp);
1536                         num_wrbs += be_tx_compl_process(adapter, txo, end_idx);
1537                         cmpl++;
1538                 }
1539                 if (cmpl) {
1540                         be_cq_notify(adapter, tx_cq->id, false, cmpl);
1541                         atomic_sub(num_wrbs, &txq->used);
1542                         cmpl = 0;
1543                         num_wrbs = 0;
1544                 }
1545
1546                 if (atomic_read(&txq->used) == 0 || ++timeo > 200)
1547                         break;
1548
1549                 mdelay(1);
1550         } while (true);
1551
1552         if (atomic_read(&txq->used))
1553                 dev_err(&adapter->pdev->dev, "%d pending tx-completions\n",
1554                         atomic_read(&txq->used));
1555
1556         /* free posted tx for which compls will never arrive */
1557         while (atomic_read(&txq->used)) {
1558                 sent_skb = sent_skbs[txq->tail];
1559                 end_idx = txq->tail;
1560                 index_adv(&end_idx,
1561                         wrb_cnt_for_skb(adapter, sent_skb, &dummy_wrb) - 1,
1562                         txq->len);
1563                 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
1564                 atomic_sub(num_wrbs, &txq->used);
1565         }
1566 }
1567
1568 static void be_mcc_queues_destroy(struct be_adapter *adapter)
1569 {
1570         struct be_queue_info *q;
1571
1572         q = &adapter->mcc_obj.q;
1573         if (q->created)
1574                 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
1575         be_queue_free(adapter, q);
1576
1577         q = &adapter->mcc_obj.cq;
1578         if (q->created)
1579                 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1580         be_queue_free(adapter, q);
1581 }
1582
1583 /* Must be called only after TX qs are created as MCC shares TX EQ */
1584 static int be_mcc_queues_create(struct be_adapter *adapter)
1585 {
1586         struct be_queue_info *q, *cq;
1587
1588         /* Alloc MCC compl queue */
1589         cq = &adapter->mcc_obj.cq;
1590         if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
1591                         sizeof(struct be_mcc_compl)))
1592                 goto err;
1593
1594         /* Ask BE to create MCC compl queue; share TX's eq */
1595         if (be_cmd_cq_create(adapter, cq, &adapter->tx_eq.q, false, true, 0))
1596                 goto mcc_cq_free;
1597
1598         /* Alloc MCC queue */
1599         q = &adapter->mcc_obj.q;
1600         if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
1601                 goto mcc_cq_destroy;
1602
1603         /* Ask BE to create MCC queue */
1604         if (be_cmd_mccq_create(adapter, q, cq))
1605                 goto mcc_q_free;
1606
1607         return 0;
1608
1609 mcc_q_free:
1610         be_queue_free(adapter, q);
1611 mcc_cq_destroy:
1612         be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
1613 mcc_cq_free:
1614         be_queue_free(adapter, cq);
1615 err:
1616         return -1;
1617 }
1618
1619 static void be_tx_queues_destroy(struct be_adapter *adapter)
1620 {
1621         struct be_queue_info *q;
1622         struct be_tx_obj *txo;
1623         u8 i;
1624
1625         for_all_tx_queues(adapter, txo, i) {
1626                 q = &txo->q;
1627                 if (q->created)
1628                         be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
1629                 be_queue_free(adapter, q);
1630
1631                 q = &txo->cq;
1632                 if (q->created)
1633                         be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1634                 be_queue_free(adapter, q);
1635         }
1636
1637         /* Clear any residual events */
1638         be_eq_clean(adapter, &adapter->tx_eq);
1639
1640         q = &adapter->tx_eq.q;
1641         if (q->created)
1642                 be_cmd_q_destroy(adapter, q, QTYPE_EQ);
1643         be_queue_free(adapter, q);
1644 }
1645
1646 static int be_num_txqs_want(struct be_adapter *adapter)
1647 {
1648         if ((num_vfs && adapter->sriov_enabled) ||
1649                 be_is_mc(adapter) ||
1650                 lancer_chip(adapter) || !be_physfn(adapter) ||
1651                 adapter->generation == BE_GEN2)
1652                 return 1;
1653         else
1654                 return MAX_TX_QS;
1655 }
1656
1657 /* One TX event queue is shared by all TX compl qs */
1658 static int be_tx_queues_create(struct be_adapter *adapter)
1659 {
1660         struct be_queue_info *eq, *q, *cq;
1661         struct be_tx_obj *txo;
1662         u8 i;
1663
1664         adapter->num_tx_qs = be_num_txqs_want(adapter);
1665         if (adapter->num_tx_qs != MAX_TX_QS)
1666                 netif_set_real_num_tx_queues(adapter->netdev,
1667                         adapter->num_tx_qs);
1668
1669         adapter->tx_eq.max_eqd = 0;
1670         adapter->tx_eq.min_eqd = 0;
1671         adapter->tx_eq.cur_eqd = 96;
1672         adapter->tx_eq.enable_aic = false;
1673
1674         eq = &adapter->tx_eq.q;
1675         if (be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1676                 sizeof(struct be_eq_entry)))
1677                 return -1;
1678
1679         if (be_cmd_eq_create(adapter, eq, adapter->tx_eq.cur_eqd))
1680                 goto err;
1681         adapter->tx_eq.eq_idx = adapter->eq_next_idx++;
1682
1683         for_all_tx_queues(adapter, txo, i) {
1684                 cq = &txo->cq;
1685                 if (be_queue_alloc(adapter, cq, TX_CQ_LEN,
1686                         sizeof(struct be_eth_tx_compl)))
1687                         goto err;
1688
1689                 if (be_cmd_cq_create(adapter, cq, eq, false, false, 3))
1690                         goto err;
1691
1692                 q = &txo->q;
1693                 if (be_queue_alloc(adapter, q, TX_Q_LEN,
1694                         sizeof(struct be_eth_wrb)))
1695                         goto err;
1696
1697                 if (be_cmd_txq_create(adapter, q, cq))
1698                         goto err;
1699         }
1700         return 0;
1701
1702 err:
1703         be_tx_queues_destroy(adapter);
1704         return -1;
1705 }
1706
1707 static void be_rx_queues_destroy(struct be_adapter *adapter)
1708 {
1709         struct be_queue_info *q;
1710         struct be_rx_obj *rxo;
1711         int i;
1712
1713         for_all_rx_queues(adapter, rxo, i) {
1714                 be_queue_free(adapter, &rxo->q);
1715
1716                 q = &rxo->cq;
1717                 if (q->created)
1718                         be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1719                 be_queue_free(adapter, q);
1720
1721                 q = &rxo->rx_eq.q;
1722                 if (q->created)
1723                         be_cmd_q_destroy(adapter, q, QTYPE_EQ);
1724                 be_queue_free(adapter, q);
1725         }
1726 }
1727
1728 static u32 be_num_rxqs_want(struct be_adapter *adapter)
1729 {
1730         if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
1731                 !adapter->sriov_enabled && be_physfn(adapter) &&
1732                 !be_is_mc(adapter)) {
1733                 return 1 + MAX_RSS_QS; /* one default non-RSS queue */
1734         } else {
1735                 dev_warn(&adapter->pdev->dev,
1736                         "No support for multiple RX queues\n");
1737                 return 1;
1738         }
1739 }
1740
1741 static int be_rx_queues_create(struct be_adapter *adapter)
1742 {
1743         struct be_queue_info *eq, *q, *cq;
1744         struct be_rx_obj *rxo;
1745         int rc, i;
1746
1747         adapter->num_rx_qs = min(be_num_rxqs_want(adapter),
1748                                 msix_enabled(adapter) ?
1749                                         adapter->num_msix_vec - 1 : 1);
1750         if (adapter->num_rx_qs != MAX_RX_QS)
1751                 dev_warn(&adapter->pdev->dev,
1752                         "Can create only %d RX queues", adapter->num_rx_qs);
1753
1754         adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
1755         for_all_rx_queues(adapter, rxo, i) {
1756                 rxo->adapter = adapter;
1757                 rxo->rx_eq.max_eqd = BE_MAX_EQD;
1758                 rxo->rx_eq.enable_aic = true;
1759
1760                 /* EQ */
1761                 eq = &rxo->rx_eq.q;
1762                 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1763                                         sizeof(struct be_eq_entry));
1764                 if (rc)
1765                         goto err;
1766
1767                 rc = be_cmd_eq_create(adapter, eq, rxo->rx_eq.cur_eqd);
1768                 if (rc)
1769                         goto err;
1770
1771                 rxo->rx_eq.eq_idx = adapter->eq_next_idx++;
1772
1773                 /* CQ */
1774                 cq = &rxo->cq;
1775                 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
1776                                 sizeof(struct be_eth_rx_compl));
1777                 if (rc)
1778                         goto err;
1779
1780                 rc = be_cmd_cq_create(adapter, cq, eq, false, false, 3);
1781                 if (rc)
1782                         goto err;
1783
1784                 /* Rx Q - will be created in be_open() */
1785                 q = &rxo->q;
1786                 rc = be_queue_alloc(adapter, q, RX_Q_LEN,
1787                                 sizeof(struct be_eth_rx_d));
1788                 if (rc)
1789                         goto err;
1790
1791         }
1792
1793         return 0;
1794 err:
1795         be_rx_queues_destroy(adapter);
1796         return -1;
1797 }
1798
1799 static bool event_peek(struct be_eq_obj *eq_obj)
1800 {
1801         struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
1802         if (!eqe->evt)
1803                 return false;
1804         else
1805                 return true;
1806 }
1807
1808 static irqreturn_t be_intx(int irq, void *dev)
1809 {
1810         struct be_adapter *adapter = dev;
1811         struct be_rx_obj *rxo;
1812         int isr, i, tx = 0 , rx = 0;
1813
1814         if (lancer_chip(adapter)) {
1815                 if (event_peek(&adapter->tx_eq))
1816                         tx = event_handle(adapter, &adapter->tx_eq, false);
1817                 for_all_rx_queues(adapter, rxo, i) {
1818                         if (event_peek(&rxo->rx_eq))
1819                                 rx |= event_handle(adapter, &rxo->rx_eq, true);
1820                 }
1821
1822                 if (!(tx || rx))
1823                         return IRQ_NONE;
1824
1825         } else {
1826                 isr = ioread32(adapter->csr + CEV_ISR0_OFFSET +
1827                         (adapter->tx_eq.q.id / 8) * CEV_ISR_SIZE);
1828                 if (!isr)
1829                         return IRQ_NONE;
1830
1831                 if ((1 << adapter->tx_eq.eq_idx & isr))
1832                         event_handle(adapter, &adapter->tx_eq, false);
1833
1834                 for_all_rx_queues(adapter, rxo, i) {
1835                         if ((1 << rxo->rx_eq.eq_idx & isr))
1836                                 event_handle(adapter, &rxo->rx_eq, true);
1837                 }
1838         }
1839
1840         return IRQ_HANDLED;
1841 }
1842
1843 static irqreturn_t be_msix_rx(int irq, void *dev)
1844 {
1845         struct be_rx_obj *rxo = dev;
1846         struct be_adapter *adapter = rxo->adapter;
1847
1848         event_handle(adapter, &rxo->rx_eq, true);
1849
1850         return IRQ_HANDLED;
1851 }
1852
1853 static irqreturn_t be_msix_tx_mcc(int irq, void *dev)
1854 {
1855         struct be_adapter *adapter = dev;
1856
1857         event_handle(adapter, &adapter->tx_eq, false);
1858
1859         return IRQ_HANDLED;
1860 }
1861
1862 static inline bool do_gro(struct be_rx_compl_info *rxcp)
1863 {
1864         return (rxcp->tcpf && !rxcp->err) ? true : false;
1865 }
1866
1867 static int be_poll_rx(struct napi_struct *napi, int budget)
1868 {
1869         struct be_eq_obj *rx_eq = container_of(napi, struct be_eq_obj, napi);
1870         struct be_rx_obj *rxo = container_of(rx_eq, struct be_rx_obj, rx_eq);
1871         struct be_adapter *adapter = rxo->adapter;
1872         struct be_queue_info *rx_cq = &rxo->cq;
1873         struct be_rx_compl_info *rxcp;
1874         u32 work_done;
1875
1876         rx_stats(rxo)->rx_polls++;
1877         for (work_done = 0; work_done < budget; work_done++) {
1878                 rxcp = be_rx_compl_get(rxo);
1879                 if (!rxcp)
1880                         break;
1881
1882                 /* Is it a flush compl that has no data */
1883                 if (unlikely(rxcp->num_rcvd == 0))
1884                         goto loop_continue;
1885
1886                 /* Discard compl with partial DMA Lancer B0 */
1887                 if (unlikely(!rxcp->pkt_size)) {
1888                         be_rx_compl_discard(adapter, rxo, rxcp);
1889                         goto loop_continue;
1890                 }
1891
1892                 /* On BE drop pkts that arrive due to imperfect filtering in
1893                  * promiscuous mode on some skews
1894                  */
1895                 if (unlikely(rxcp->port != adapter->port_num &&
1896                                 !lancer_chip(adapter))) {
1897                         be_rx_compl_discard(adapter, rxo, rxcp);
1898                         goto loop_continue;
1899                 }
1900
1901                 if (do_gro(rxcp))
1902                         be_rx_compl_process_gro(adapter, rxo, rxcp);
1903                 else
1904                         be_rx_compl_process(adapter, rxo, rxcp);
1905 loop_continue:
1906                 be_rx_stats_update(rxo, rxcp);
1907         }
1908
1909         be_cq_notify(adapter, rx_cq->id, false, work_done);
1910
1911         /* Refill the queue */
1912         if (work_done && atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
1913                 be_post_rx_frags(rxo, GFP_ATOMIC);
1914
1915         /* All consumed */
1916         if (work_done < budget) {
1917                 napi_complete(napi);
1918                 /* Arm CQ */
1919                 be_cq_notify(adapter, rx_cq->id, true, 0);
1920         }
1921         return work_done;
1922 }
1923
1924 /* As TX and MCC share the same EQ check for both TX and MCC completions.
1925  * For TX/MCC we don't honour budget; consume everything
1926  */
1927 static int be_poll_tx_mcc(struct napi_struct *napi, int budget)
1928 {
1929         struct be_eq_obj *tx_eq = container_of(napi, struct be_eq_obj, napi);
1930         struct be_adapter *adapter =
1931                 container_of(tx_eq, struct be_adapter, tx_eq);
1932         struct be_tx_obj *txo;
1933         struct be_eth_tx_compl *txcp;
1934         int tx_compl, mcc_compl, status = 0;
1935         u8 i;
1936         u16 num_wrbs;
1937
1938         for_all_tx_queues(adapter, txo, i) {
1939                 tx_compl = 0;
1940                 num_wrbs = 0;
1941                 while ((txcp = be_tx_compl_get(&txo->cq))) {
1942                         num_wrbs += be_tx_compl_process(adapter, txo,
1943                                 AMAP_GET_BITS(struct amap_eth_tx_compl,
1944                                         wrb_index, txcp));
1945                         tx_compl++;
1946                 }
1947                 if (tx_compl) {
1948                         be_cq_notify(adapter, txo->cq.id, true, tx_compl);
1949
1950                         atomic_sub(num_wrbs, &txo->q.used);
1951
1952                         /* As Tx wrbs have been freed up, wake up netdev queue
1953                          * if it was stopped due to lack of tx wrbs.  */
1954                         if (__netif_subqueue_stopped(adapter->netdev, i) &&
1955                                 atomic_read(&txo->q.used) < txo->q.len / 2) {
1956                                 netif_wake_subqueue(adapter->netdev, i);
1957                         }
1958
1959                         u64_stats_update_begin(&tx_stats(txo)->sync_compl);
1960                         tx_stats(txo)->tx_compl += tx_compl;
1961                         u64_stats_update_end(&tx_stats(txo)->sync_compl);
1962                 }
1963         }
1964
1965         mcc_compl = be_process_mcc(adapter, &status);
1966
1967         if (mcc_compl) {
1968                 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
1969                 be_cq_notify(adapter, mcc_obj->cq.id, true, mcc_compl);
1970         }
1971
1972         napi_complete(napi);
1973
1974         be_eq_notify(adapter, tx_eq->q.id, true, false, 0);
1975         adapter->drv_stats.tx_events++;
1976         return 1;
1977 }
1978
1979 void be_detect_dump_ue(struct be_adapter *adapter)
1980 {
1981         u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
1982         u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
1983         u32 i;
1984
1985         if (lancer_chip(adapter)) {
1986                 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
1987                 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
1988                         sliport_err1 = ioread32(adapter->db +
1989                                         SLIPORT_ERROR1_OFFSET);
1990                         sliport_err2 = ioread32(adapter->db +
1991                                         SLIPORT_ERROR2_OFFSET);
1992                 }
1993         } else {
1994                 pci_read_config_dword(adapter->pdev,
1995                                 PCICFG_UE_STATUS_LOW, &ue_lo);
1996                 pci_read_config_dword(adapter->pdev,
1997                                 PCICFG_UE_STATUS_HIGH, &ue_hi);
1998                 pci_read_config_dword(adapter->pdev,
1999                                 PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask);
2000                 pci_read_config_dword(adapter->pdev,
2001                                 PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
2002
2003                 ue_lo = (ue_lo & (~ue_lo_mask));
2004                 ue_hi = (ue_hi & (~ue_hi_mask));
2005         }
2006
2007         if (ue_lo || ue_hi ||
2008                 sliport_status & SLIPORT_STATUS_ERR_MASK) {
2009                 adapter->ue_detected = true;
2010                 adapter->eeh_err = true;
2011                 dev_err(&adapter->pdev->dev, "UE Detected!!\n");
2012         }
2013
2014         if (ue_lo) {
2015                 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
2016                         if (ue_lo & 1)
2017                                 dev_err(&adapter->pdev->dev,
2018                                 "UE: %s bit set\n", ue_status_low_desc[i]);
2019                 }
2020         }
2021         if (ue_hi) {
2022                 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
2023                         if (ue_hi & 1)
2024                                 dev_err(&adapter->pdev->dev,
2025                                 "UE: %s bit set\n", ue_status_hi_desc[i]);
2026                 }
2027         }
2028
2029         if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2030                 dev_err(&adapter->pdev->dev,
2031                         "sliport status 0x%x\n", sliport_status);
2032                 dev_err(&adapter->pdev->dev,
2033                         "sliport error1 0x%x\n", sliport_err1);
2034                 dev_err(&adapter->pdev->dev,
2035                         "sliport error2 0x%x\n", sliport_err2);
2036         }
2037 }
2038
2039 static void be_worker(struct work_struct *work)
2040 {
2041         struct be_adapter *adapter =
2042                 container_of(work, struct be_adapter, work.work);
2043         struct be_rx_obj *rxo;
2044         int i;
2045
2046         if (!adapter->ue_detected)
2047                 be_detect_dump_ue(adapter);
2048
2049         /* when interrupts are not yet enabled, just reap any pending
2050         * mcc completions */
2051         if (!netif_running(adapter->netdev)) {
2052                 int mcc_compl, status = 0;
2053
2054                 mcc_compl = be_process_mcc(adapter, &status);
2055
2056                 if (mcc_compl) {
2057                         struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
2058                         be_cq_notify(adapter, mcc_obj->cq.id, false, mcc_compl);
2059                 }
2060
2061                 goto reschedule;
2062         }
2063
2064         if (!adapter->stats_cmd_sent) {
2065                 if (lancer_chip(adapter))
2066                         lancer_cmd_get_pport_stats(adapter,
2067                                                 &adapter->stats_cmd);
2068                 else
2069                         be_cmd_get_stats(adapter, &adapter->stats_cmd);
2070         }
2071
2072         for_all_rx_queues(adapter, rxo, i) {
2073                 be_rx_eqd_update(adapter, rxo);
2074
2075                 if (rxo->rx_post_starved) {
2076                         rxo->rx_post_starved = false;
2077                         be_post_rx_frags(rxo, GFP_KERNEL);
2078                 }
2079         }
2080
2081 reschedule:
2082         adapter->work_counter++;
2083         schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
2084 }
2085
2086 static void be_msix_disable(struct be_adapter *adapter)
2087 {
2088         if (msix_enabled(adapter)) {
2089                 pci_disable_msix(adapter->pdev);
2090                 adapter->num_msix_vec = 0;
2091         }
2092 }
2093
2094 static void be_msix_enable(struct be_adapter *adapter)
2095 {
2096 #define BE_MIN_MSIX_VECTORS     (1 + 1) /* Rx + Tx */
2097         int i, status, num_vec;
2098
2099         num_vec = be_num_rxqs_want(adapter) + 1;
2100
2101         for (i = 0; i < num_vec; i++)
2102                 adapter->msix_entries[i].entry = i;
2103
2104         status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
2105         if (status == 0) {
2106                 goto done;
2107         } else if (status >= BE_MIN_MSIX_VECTORS) {
2108                 num_vec = status;
2109                 if (pci_enable_msix(adapter->pdev, adapter->msix_entries,
2110                                 num_vec) == 0)
2111                         goto done;
2112         }
2113         return;
2114 done:
2115         adapter->num_msix_vec = num_vec;
2116         return;
2117 }
2118
2119 static int be_sriov_enable(struct be_adapter *adapter)
2120 {
2121         be_check_sriov_fn_type(adapter);
2122 #ifdef CONFIG_PCI_IOV
2123         if (be_physfn(adapter) && num_vfs) {
2124                 int status, pos;
2125                 u16 nvfs;
2126
2127                 pos = pci_find_ext_capability(adapter->pdev,
2128                                                 PCI_EXT_CAP_ID_SRIOV);
2129                 pci_read_config_word(adapter->pdev,
2130                                         pos + PCI_SRIOV_TOTAL_VF, &nvfs);
2131
2132                 if (num_vfs > nvfs) {
2133                         dev_info(&adapter->pdev->dev,
2134                                         "Device supports %d VFs and not %d\n",
2135                                         nvfs, num_vfs);
2136                         num_vfs = nvfs;
2137                 }
2138
2139                 status = pci_enable_sriov(adapter->pdev, num_vfs);
2140                 adapter->sriov_enabled = status ? false : true;
2141
2142                 if (adapter->sriov_enabled) {
2143                         adapter->vf_cfg = kcalloc(num_vfs,
2144                                                 sizeof(struct be_vf_cfg),
2145                                                 GFP_KERNEL);
2146                         if (!adapter->vf_cfg)
2147                                 return -ENOMEM;
2148                 }
2149         }
2150 #endif
2151         return 0;
2152 }
2153
2154 static void be_sriov_disable(struct be_adapter *adapter)
2155 {
2156 #ifdef CONFIG_PCI_IOV
2157         if (adapter->sriov_enabled) {
2158                 pci_disable_sriov(adapter->pdev);
2159                 kfree(adapter->vf_cfg);
2160                 adapter->sriov_enabled = false;
2161         }
2162 #endif
2163 }
2164
2165 static inline int be_msix_vec_get(struct be_adapter *adapter,
2166                                         struct be_eq_obj *eq_obj)
2167 {
2168         return adapter->msix_entries[eq_obj->eq_idx].vector;
2169 }
2170
2171 static int be_request_irq(struct be_adapter *adapter,
2172                 struct be_eq_obj *eq_obj,
2173                 void *handler, char *desc, void *context)
2174 {
2175         struct net_device *netdev = adapter->netdev;
2176         int vec;
2177
2178         sprintf(eq_obj->desc, "%s-%s", netdev->name, desc);
2179         vec = be_msix_vec_get(adapter, eq_obj);
2180         return request_irq(vec, handler, 0, eq_obj->desc, context);
2181 }
2182
2183 static void be_free_irq(struct be_adapter *adapter, struct be_eq_obj *eq_obj,
2184                         void *context)
2185 {
2186         int vec = be_msix_vec_get(adapter, eq_obj);
2187         free_irq(vec, context);
2188 }
2189
2190 static int be_msix_register(struct be_adapter *adapter)
2191 {
2192         struct be_rx_obj *rxo;
2193         int status, i;
2194         char qname[10];
2195
2196         status = be_request_irq(adapter, &adapter->tx_eq, be_msix_tx_mcc, "tx",
2197                                 adapter);
2198         if (status)
2199                 goto err;
2200
2201         for_all_rx_queues(adapter, rxo, i) {
2202                 sprintf(qname, "rxq%d", i);
2203                 status = be_request_irq(adapter, &rxo->rx_eq, be_msix_rx,
2204                                 qname, rxo);
2205                 if (status)
2206                         goto err_msix;
2207         }
2208
2209         return 0;
2210
2211 err_msix:
2212         be_free_irq(adapter, &adapter->tx_eq, adapter);
2213
2214         for (i--, rxo = &adapter->rx_obj[i]; i >= 0; i--, rxo--)
2215                 be_free_irq(adapter, &rxo->rx_eq, rxo);
2216
2217 err:
2218         dev_warn(&adapter->pdev->dev,
2219                 "MSIX Request IRQ failed - err %d\n", status);
2220         be_msix_disable(adapter);
2221         return status;
2222 }
2223
2224 static int be_irq_register(struct be_adapter *adapter)
2225 {
2226         struct net_device *netdev = adapter->netdev;
2227         int status;
2228
2229         if (msix_enabled(adapter)) {
2230                 status = be_msix_register(adapter);
2231                 if (status == 0)
2232                         goto done;
2233                 /* INTx is not supported for VF */
2234                 if (!be_physfn(adapter))
2235                         return status;
2236         }
2237
2238         /* INTx */
2239         netdev->irq = adapter->pdev->irq;
2240         status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
2241                         adapter);
2242         if (status) {
2243                 dev_err(&adapter->pdev->dev,
2244                         "INTx request IRQ failed - err %d\n", status);
2245                 return status;
2246         }
2247 done:
2248         adapter->isr_registered = true;
2249         return 0;
2250 }
2251
2252 static void be_irq_unregister(struct be_adapter *adapter)
2253 {
2254         struct net_device *netdev = adapter->netdev;
2255         struct be_rx_obj *rxo;
2256         int i;
2257
2258         if (!adapter->isr_registered)
2259                 return;
2260
2261         /* INTx */
2262         if (!msix_enabled(adapter)) {
2263                 free_irq(netdev->irq, adapter);
2264                 goto done;
2265         }
2266
2267         /* MSIx */
2268         be_free_irq(adapter, &adapter->tx_eq, adapter);
2269
2270         for_all_rx_queues(adapter, rxo, i)
2271                 be_free_irq(adapter, &rxo->rx_eq, rxo);
2272
2273 done:
2274         adapter->isr_registered = false;
2275 }
2276
2277 static void be_rx_queues_clear(struct be_adapter *adapter)
2278 {
2279         struct be_queue_info *q;
2280         struct be_rx_obj *rxo;
2281         int i;
2282
2283         for_all_rx_queues(adapter, rxo, i) {
2284                 q = &rxo->q;
2285                 if (q->created) {
2286                         be_cmd_rxq_destroy(adapter, q);
2287                         /* After the rxq is invalidated, wait for a grace time
2288                          * of 1ms for all dma to end and the flush compl to
2289                          * arrive
2290                          */
2291                         mdelay(1);
2292                         be_rx_q_clean(adapter, rxo);
2293                 }
2294
2295                 /* Clear any residual events */
2296                 q = &rxo->rx_eq.q;
2297                 if (q->created)
2298                         be_eq_clean(adapter, &rxo->rx_eq);
2299         }
2300 }
2301
2302 static int be_close(struct net_device *netdev)
2303 {
2304         struct be_adapter *adapter = netdev_priv(netdev);
2305         struct be_rx_obj *rxo;
2306         struct be_tx_obj *txo;
2307         struct be_eq_obj *tx_eq = &adapter->tx_eq;
2308         int vec, i;
2309
2310         be_async_mcc_disable(adapter);
2311
2312         if (!lancer_chip(adapter))
2313                 be_intr_set(adapter, false);
2314
2315         for_all_rx_queues(adapter, rxo, i)
2316                 napi_disable(&rxo->rx_eq.napi);
2317
2318         napi_disable(&tx_eq->napi);
2319
2320         if (lancer_chip(adapter)) {
2321                 be_cq_notify(adapter, adapter->mcc_obj.cq.id, false, 0);
2322                 for_all_rx_queues(adapter, rxo, i)
2323                          be_cq_notify(adapter, rxo->cq.id, false, 0);
2324                 for_all_tx_queues(adapter, txo, i)
2325                          be_cq_notify(adapter, txo->cq.id, false, 0);
2326         }
2327
2328         if (msix_enabled(adapter)) {
2329                 vec = be_msix_vec_get(adapter, tx_eq);
2330                 synchronize_irq(vec);
2331
2332                 for_all_rx_queues(adapter, rxo, i) {
2333                         vec = be_msix_vec_get(adapter, &rxo->rx_eq);
2334                         synchronize_irq(vec);
2335                 }
2336         } else {
2337                 synchronize_irq(netdev->irq);
2338         }
2339         be_irq_unregister(adapter);
2340
2341         /* Wait for all pending tx completions to arrive so that
2342          * all tx skbs are freed.
2343          */
2344         for_all_tx_queues(adapter, txo, i)
2345                 be_tx_compl_clean(adapter, txo);
2346
2347         be_rx_queues_clear(adapter);
2348         return 0;
2349 }
2350
2351 static int be_rx_queues_setup(struct be_adapter *adapter)
2352 {
2353         struct be_rx_obj *rxo;
2354         int rc, i;
2355         u8 rsstable[MAX_RSS_QS];
2356
2357         for_all_rx_queues(adapter, rxo, i) {
2358                 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
2359                         rx_frag_size, BE_MAX_JUMBO_FRAME_SIZE,
2360                         adapter->if_handle,
2361                         (i > 0) ? 1 : 0/* rss enable */, &rxo->rss_id);
2362                 if (rc)
2363                         return rc;
2364         }
2365
2366         if (be_multi_rxq(adapter)) {
2367                 for_all_rss_queues(adapter, rxo, i)
2368                         rsstable[i] = rxo->rss_id;
2369
2370                 rc = be_cmd_rss_config(adapter, rsstable,
2371                         adapter->num_rx_qs - 1);
2372                 if (rc)
2373                         return rc;
2374         }
2375
2376         /* First time posting */
2377         for_all_rx_queues(adapter, rxo, i) {
2378                 be_post_rx_frags(rxo, GFP_KERNEL);
2379                 napi_enable(&rxo->rx_eq.napi);
2380         }
2381         return 0;
2382 }
2383
2384 static int be_open(struct net_device *netdev)
2385 {
2386         struct be_adapter *adapter = netdev_priv(netdev);
2387         struct be_eq_obj *tx_eq = &adapter->tx_eq;
2388         struct be_rx_obj *rxo;
2389         int status, i;
2390
2391         status = be_rx_queues_setup(adapter);
2392         if (status)
2393                 goto err;
2394
2395         napi_enable(&tx_eq->napi);
2396
2397         be_irq_register(adapter);
2398
2399         if (!lancer_chip(adapter))
2400                 be_intr_set(adapter, true);
2401
2402         /* The evt queues are created in unarmed state; arm them */
2403         for_all_rx_queues(adapter, rxo, i) {
2404                 be_eq_notify(adapter, rxo->rx_eq.q.id, true, false, 0);
2405                 be_cq_notify(adapter, rxo->cq.id, true, 0);
2406         }
2407         be_eq_notify(adapter, tx_eq->q.id, true, false, 0);
2408
2409         /* Now that interrupts are on we can process async mcc */
2410         be_async_mcc_enable(adapter);
2411
2412         return 0;
2413 err:
2414         be_close(adapter->netdev);
2415         return -EIO;
2416 }
2417
2418 static int be_setup_wol(struct be_adapter *adapter, bool enable)
2419 {
2420         struct be_dma_mem cmd;
2421         int status = 0;
2422         u8 mac[ETH_ALEN];
2423
2424         memset(mac, 0, ETH_ALEN);
2425
2426         cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
2427         cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2428                                     GFP_KERNEL);
2429         if (cmd.va == NULL)
2430                 return -1;
2431         memset(cmd.va, 0, cmd.size);
2432
2433         if (enable) {
2434                 status = pci_write_config_dword(adapter->pdev,
2435                         PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2436                 if (status) {
2437                         dev_err(&adapter->pdev->dev,
2438                                 "Could not enable Wake-on-lan\n");
2439                         dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2440                                           cmd.dma);
2441                         return status;
2442                 }
2443                 status = be_cmd_enable_magic_wol(adapter,
2444                                 adapter->netdev->dev_addr, &cmd);
2445                 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2446                 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2447         } else {
2448                 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2449                 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2450                 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2451         }
2452
2453         dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
2454         return status;
2455 }
2456
2457 /*
2458  * Generate a seed MAC address from the PF MAC Address using jhash.
2459  * MAC Address for VFs are assigned incrementally starting from the seed.
2460  * These addresses are programmed in the ASIC by the PF and the VF driver
2461  * queries for the MAC address during its probe.
2462  */
2463 static inline int be_vf_eth_addr_config(struct be_adapter *adapter)
2464 {
2465         u32 vf;
2466         int status = 0;
2467         u8 mac[ETH_ALEN];
2468
2469         be_vf_eth_addr_generate(adapter, mac);
2470
2471         for (vf = 0; vf < num_vfs; vf++) {
2472                 status = be_cmd_pmac_add(adapter, mac,
2473                                         adapter->vf_cfg[vf].vf_if_handle,
2474                                         &adapter->vf_cfg[vf].vf_pmac_id,
2475                                         vf + 1);
2476                 if (status)
2477                         dev_err(&adapter->pdev->dev,
2478                                 "Mac address add failed for VF %d\n", vf);
2479                 else
2480                         memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
2481
2482                 mac[5] += 1;
2483         }
2484         return status;
2485 }
2486
2487 static void be_vf_clear(struct be_adapter *adapter)
2488 {
2489         u32 vf;
2490
2491         for (vf = 0; vf < num_vfs; vf++) {
2492                 if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID)
2493                         be_cmd_pmac_del(adapter,
2494                                         adapter->vf_cfg[vf].vf_if_handle,
2495                                         adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
2496         }
2497
2498         for (vf = 0; vf < num_vfs; vf++)
2499                 if (adapter->vf_cfg[vf].vf_if_handle)
2500                         be_cmd_if_destroy(adapter,
2501                                 adapter->vf_cfg[vf].vf_if_handle, vf + 1);
2502 }
2503
2504 static int be_clear(struct be_adapter *adapter)
2505 {
2506         if (be_physfn(adapter) && adapter->sriov_enabled)
2507                 be_vf_clear(adapter);
2508
2509         be_cmd_if_destroy(adapter, adapter->if_handle,  0);
2510
2511         be_mcc_queues_destroy(adapter);
2512         be_rx_queues_destroy(adapter);
2513         be_tx_queues_destroy(adapter);
2514         adapter->eq_next_idx = 0;
2515
2516         adapter->be3_native = false;
2517         adapter->promiscuous = false;
2518
2519         /* tell fw we're done with firing cmds */
2520         be_cmd_fw_clean(adapter);
2521         return 0;
2522 }
2523
2524 static int be_vf_setup(struct be_adapter *adapter)
2525 {
2526         u32 cap_flags, en_flags, vf;
2527         u16 lnk_speed;
2528         int status;
2529
2530         cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST;
2531         for (vf = 0; vf < num_vfs; vf++) {
2532                 status = be_cmd_if_create(adapter, cap_flags, en_flags, NULL,
2533                                         &adapter->vf_cfg[vf].vf_if_handle,
2534                                         NULL, vf+1);
2535                 if (status)
2536                         goto err;
2537                 adapter->vf_cfg[vf].vf_pmac_id = BE_INVALID_PMAC_ID;
2538         }
2539
2540         if (!lancer_chip(adapter)) {
2541                 status = be_vf_eth_addr_config(adapter);
2542                 if (status)
2543                         goto err;
2544         }
2545
2546         for (vf = 0; vf < num_vfs; vf++) {
2547                 status = be_cmd_link_status_query(adapter, NULL, &lnk_speed,
2548                                 vf + 1);
2549                 if (status)
2550                         goto err;
2551                 adapter->vf_cfg[vf].vf_tx_rate = lnk_speed * 10;
2552         }
2553         return 0;
2554 err:
2555         return status;
2556 }
2557
2558 static int be_setup(struct be_adapter *adapter)
2559 {
2560         struct net_device *netdev = adapter->netdev;
2561         u32 cap_flags, en_flags;
2562         u32 tx_fc, rx_fc;
2563         int status;
2564         u8 mac[ETH_ALEN];
2565
2566         /* Allow all priorities by default. A GRP5 evt may modify this */
2567         adapter->vlan_prio_bmap = 0xff;
2568         adapter->link_speed = -1;
2569
2570         be_cmd_req_native_mode(adapter);
2571
2572         status = be_tx_queues_create(adapter);
2573         if (status != 0)
2574                 goto err;
2575
2576         status = be_rx_queues_create(adapter);
2577         if (status != 0)
2578                 goto err;
2579
2580         status = be_mcc_queues_create(adapter);
2581         if (status != 0)
2582                 goto err;
2583
2584         memset(mac, 0, ETH_ALEN);
2585         status = be_cmd_mac_addr_query(adapter, mac, MAC_ADDRESS_TYPE_NETWORK,
2586                         true /*permanent */, 0);
2587         if (status)
2588                 return status;
2589         memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2590         memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2591
2592         en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2593                         BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
2594         cap_flags = en_flags | BE_IF_FLAGS_MCAST_PROMISCUOUS |
2595                         BE_IF_FLAGS_PROMISCUOUS;
2596         if (adapter->function_caps & BE_FUNCTION_CAPS_RSS) {
2597                 cap_flags |= BE_IF_FLAGS_RSS;
2598                 en_flags |= BE_IF_FLAGS_RSS;
2599         }
2600         status = be_cmd_if_create(adapter, cap_flags, en_flags,
2601                         netdev->dev_addr, &adapter->if_handle,
2602                         &adapter->pmac_id, 0);
2603         if (status != 0)
2604                 goto err;
2605
2606         /* For BEx, the VF's permanent mac queried from card is incorrect.
2607          * Query the mac configued by the PF using if_handle
2608          */
2609         if (!be_physfn(adapter) && !lancer_chip(adapter)) {
2610                 status = be_cmd_mac_addr_query(adapter, mac,
2611                         MAC_ADDRESS_TYPE_NETWORK, false, adapter->if_handle);
2612                 if (!status) {
2613                         memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2614                         memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2615                 }
2616         }
2617
2618         be_cmd_get_fw_ver(adapter, adapter->fw_ver, NULL);
2619
2620         status = be_vid_config(adapter, false, 0);
2621         if (status)
2622                 goto err;
2623
2624         be_set_rx_mode(adapter->netdev);
2625
2626         status = be_cmd_get_flow_control(adapter, &tx_fc, &rx_fc);
2627         if (status)
2628                 goto err;
2629         if (rx_fc != adapter->rx_fc || tx_fc != adapter->tx_fc) {
2630                 status = be_cmd_set_flow_control(adapter, adapter->tx_fc,
2631                                         adapter->rx_fc);
2632                 if (status)
2633                         goto err;
2634         }
2635
2636         pcie_set_readrq(adapter->pdev, 4096);
2637
2638         if (be_physfn(adapter) && adapter->sriov_enabled) {
2639                 status = be_vf_setup(adapter);
2640                 if (status)
2641                         goto err;
2642         }
2643
2644         return 0;
2645 err:
2646         be_clear(adapter);
2647         return status;
2648 }
2649
2650 #define FW_FILE_HDR_SIGN        "ServerEngines Corp. "
2651 static bool be_flash_redboot(struct be_adapter *adapter,
2652                         const u8 *p, u32 img_start, int image_size,
2653                         int hdr_size)
2654 {
2655         u32 crc_offset;
2656         u8 flashed_crc[4];
2657         int status;
2658
2659         crc_offset = hdr_size + img_start + image_size - 4;
2660
2661         p += crc_offset;
2662
2663         status = be_cmd_get_flash_crc(adapter, flashed_crc,
2664                         (image_size - 4));
2665         if (status) {
2666                 dev_err(&adapter->pdev->dev,
2667                 "could not get crc from flash, not flashing redboot\n");
2668                 return false;
2669         }
2670
2671         /*update redboot only if crc does not match*/
2672         if (!memcmp(flashed_crc, p, 4))
2673                 return false;
2674         else
2675                 return true;
2676 }
2677
2678 static bool phy_flashing_required(struct be_adapter *adapter)
2679 {
2680         int status = 0;
2681         struct be_phy_info phy_info;
2682
2683         status = be_cmd_get_phy_info(adapter, &phy_info);
2684         if (status)
2685                 return false;
2686         if ((phy_info.phy_type == TN_8022) &&
2687                 (phy_info.interface_type == PHY_TYPE_BASET_10GB)) {
2688                 return true;
2689         }
2690         return false;
2691 }
2692
2693 static int be_flash_data(struct be_adapter *adapter,
2694                         const struct firmware *fw,
2695                         struct be_dma_mem *flash_cmd, int num_of_images)
2696
2697 {
2698         int status = 0, i, filehdr_size = 0;
2699         u32 total_bytes = 0, flash_op;
2700         int num_bytes;
2701         const u8 *p = fw->data;
2702         struct be_cmd_write_flashrom *req = flash_cmd->va;
2703         const struct flash_comp *pflashcomp;
2704         int num_comp;
2705
2706         static const struct flash_comp gen3_flash_types[10] = {
2707                 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, IMG_TYPE_ISCSI_ACTIVE,
2708                         FLASH_IMAGE_MAX_SIZE_g3},
2709                 { FLASH_REDBOOT_START_g3, IMG_TYPE_REDBOOT,
2710                         FLASH_REDBOOT_IMAGE_MAX_SIZE_g3},
2711                 { FLASH_iSCSI_BIOS_START_g3, IMG_TYPE_BIOS,
2712                         FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2713                 { FLASH_PXE_BIOS_START_g3, IMG_TYPE_PXE_BIOS,
2714                         FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2715                 { FLASH_FCoE_BIOS_START_g3, IMG_TYPE_FCOE_BIOS,
2716                         FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2717                 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, IMG_TYPE_ISCSI_BACKUP,
2718                         FLASH_IMAGE_MAX_SIZE_g3},
2719                 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, IMG_TYPE_FCOE_FW_ACTIVE,
2720                         FLASH_IMAGE_MAX_SIZE_g3},
2721                 { FLASH_FCoE_BACKUP_IMAGE_START_g3, IMG_TYPE_FCOE_FW_BACKUP,
2722                         FLASH_IMAGE_MAX_SIZE_g3},
2723                 { FLASH_NCSI_START_g3, IMG_TYPE_NCSI_FW,
2724                         FLASH_NCSI_IMAGE_MAX_SIZE_g3},
2725                 { FLASH_PHY_FW_START_g3, IMG_TYPE_PHY_FW,
2726                         FLASH_PHY_FW_IMAGE_MAX_SIZE_g3}
2727         };
2728         static const struct flash_comp gen2_flash_types[8] = {
2729                 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, IMG_TYPE_ISCSI_ACTIVE,
2730                         FLASH_IMAGE_MAX_SIZE_g2},
2731                 { FLASH_REDBOOT_START_g2, IMG_TYPE_REDBOOT,
2732                         FLASH_REDBOOT_IMAGE_MAX_SIZE_g2},
2733                 { FLASH_iSCSI_BIOS_START_g2, IMG_TYPE_BIOS,
2734                         FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2735                 { FLASH_PXE_BIOS_START_g2, IMG_TYPE_PXE_BIOS,
2736                         FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2737                 { FLASH_FCoE_BIOS_START_g2, IMG_TYPE_FCOE_BIOS,
2738                         FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2739                 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, IMG_TYPE_ISCSI_BACKUP,
2740                         FLASH_IMAGE_MAX_SIZE_g2},
2741                 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, IMG_TYPE_FCOE_FW_ACTIVE,
2742                         FLASH_IMAGE_MAX_SIZE_g2},
2743                 { FLASH_FCoE_BACKUP_IMAGE_START_g2, IMG_TYPE_FCOE_FW_BACKUP,
2744                          FLASH_IMAGE_MAX_SIZE_g2}
2745         };
2746
2747         if (adapter->generation == BE_GEN3) {
2748                 pflashcomp = gen3_flash_types;
2749                 filehdr_size = sizeof(struct flash_file_hdr_g3);
2750                 num_comp = ARRAY_SIZE(gen3_flash_types);
2751         } else {
2752                 pflashcomp = gen2_flash_types;
2753                 filehdr_size = sizeof(struct flash_file_hdr_g2);
2754                 num_comp = ARRAY_SIZE(gen2_flash_types);
2755         }
2756         for (i = 0; i < num_comp; i++) {
2757                 if ((pflashcomp[i].optype == IMG_TYPE_NCSI_FW) &&
2758                                 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
2759                         continue;
2760                 if (pflashcomp[i].optype == IMG_TYPE_PHY_FW) {
2761                         if (!phy_flashing_required(adapter))
2762                                 continue;
2763                 }
2764                 if ((pflashcomp[i].optype == IMG_TYPE_REDBOOT) &&
2765                         (!be_flash_redboot(adapter, fw->data,
2766                         pflashcomp[i].offset, pflashcomp[i].size, filehdr_size +
2767                         (num_of_images * sizeof(struct image_hdr)))))
2768                         continue;
2769                 p = fw->data;
2770                 p += filehdr_size + pflashcomp[i].offset
2771                         + (num_of_images * sizeof(struct image_hdr));
2772                 if (p + pflashcomp[i].size > fw->data + fw->size)
2773                         return -1;
2774                 total_bytes = pflashcomp[i].size;
2775                 while (total_bytes) {
2776                         if (total_bytes > 32*1024)
2777                                 num_bytes = 32*1024;
2778                         else
2779                                 num_bytes = total_bytes;
2780                         total_bytes -= num_bytes;
2781                         if (!total_bytes) {
2782                                 if (pflashcomp[i].optype == IMG_TYPE_PHY_FW)
2783                                         flash_op = FLASHROM_OPER_PHY_FLASH;
2784                                 else
2785                                         flash_op = FLASHROM_OPER_FLASH;
2786                         } else {
2787                                 if (pflashcomp[i].optype == IMG_TYPE_PHY_FW)
2788                                         flash_op = FLASHROM_OPER_PHY_SAVE;
2789                                 else
2790                                         flash_op = FLASHROM_OPER_SAVE;
2791                         }
2792                         memcpy(req->params.data_buf, p, num_bytes);
2793                         p += num_bytes;
2794                         status = be_cmd_write_flashrom(adapter, flash_cmd,
2795                                 pflashcomp[i].optype, flash_op, num_bytes);
2796                         if (status) {
2797                                 if ((status == ILLEGAL_IOCTL_REQ) &&
2798                                         (pflashcomp[i].optype ==
2799                                                 IMG_TYPE_PHY_FW))
2800                                         break;
2801                                 dev_err(&adapter->pdev->dev,
2802                                         "cmd to write to flash rom failed.\n");
2803                                 return -1;
2804                         }
2805                 }
2806         }
2807         return 0;
2808 }
2809
2810 static int get_ufigen_type(struct flash_file_hdr_g2 *fhdr)
2811 {
2812         if (fhdr == NULL)
2813                 return 0;
2814         if (fhdr->build[0] == '3')
2815                 return BE_GEN3;
2816         else if (fhdr->build[0] == '2')
2817                 return BE_GEN2;
2818         else
2819                 return 0;
2820 }
2821
2822 static int lancer_fw_download(struct be_adapter *adapter,
2823                                 const struct firmware *fw)
2824 {
2825 #define LANCER_FW_DOWNLOAD_CHUNK      (32 * 1024)
2826 #define LANCER_FW_DOWNLOAD_LOCATION   "/prg"
2827         struct be_dma_mem flash_cmd;
2828         const u8 *data_ptr = NULL;
2829         u8 *dest_image_ptr = NULL;
2830         size_t image_size = 0;
2831         u32 chunk_size = 0;
2832         u32 data_written = 0;
2833         u32 offset = 0;
2834         int status = 0;
2835         u8 add_status = 0;
2836
2837         if (!IS_ALIGNED(fw->size, sizeof(u32))) {
2838                 dev_err(&adapter->pdev->dev,
2839                         "FW Image not properly aligned. "
2840                         "Length must be 4 byte aligned.\n");
2841                 status = -EINVAL;
2842                 goto lancer_fw_exit;
2843         }
2844
2845         flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
2846                                 + LANCER_FW_DOWNLOAD_CHUNK;
2847         flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
2848                                                 &flash_cmd.dma, GFP_KERNEL);
2849         if (!flash_cmd.va) {
2850                 status = -ENOMEM;
2851                 dev_err(&adapter->pdev->dev,
2852                         "Memory allocation failure while flashing\n");
2853                 goto lancer_fw_exit;
2854         }
2855
2856         dest_image_ptr = flash_cmd.va +
2857                                 sizeof(struct lancer_cmd_req_write_object);
2858         image_size = fw->size;
2859         data_ptr = fw->data;
2860
2861         while (image_size) {
2862                 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
2863
2864                 /* Copy the image chunk content. */
2865                 memcpy(dest_image_ptr, data_ptr, chunk_size);
2866
2867                 status = lancer_cmd_write_object(adapter, &flash_cmd,
2868                                 chunk_size, offset, LANCER_FW_DOWNLOAD_LOCATION,
2869                                 &data_written, &add_status);
2870
2871                 if (status)
2872                         break;
2873
2874                 offset += data_written;
2875                 data_ptr += data_written;
2876                 image_size -= data_written;
2877         }
2878
2879         if (!status) {
2880                 /* Commit the FW written */
2881                 status = lancer_cmd_write_object(adapter, &flash_cmd,
2882                                         0, offset, LANCER_FW_DOWNLOAD_LOCATION,
2883                                         &data_written, &add_status);
2884         }
2885
2886         dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
2887                                 flash_cmd.dma);
2888         if (status) {
2889                 dev_err(&adapter->pdev->dev,
2890                         "Firmware load error. "
2891                         "Status code: 0x%x Additional Status: 0x%x\n",
2892                         status, add_status);
2893                 goto lancer_fw_exit;
2894         }
2895
2896         dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
2897 lancer_fw_exit:
2898         return status;
2899 }
2900
2901 static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
2902 {
2903         struct flash_file_hdr_g2 *fhdr;
2904         struct flash_file_hdr_g3 *fhdr3;
2905         struct image_hdr *img_hdr_ptr = NULL;
2906         struct be_dma_mem flash_cmd;
2907         const u8 *p;
2908         int status = 0, i = 0, num_imgs = 0;
2909
2910         p = fw->data;
2911         fhdr = (struct flash_file_hdr_g2 *) p;
2912
2913         flash_cmd.size = sizeof(struct be_cmd_write_flashrom) + 32*1024;
2914         flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
2915                                           &flash_cmd.dma, GFP_KERNEL);
2916         if (!flash_cmd.va) {
2917                 status = -ENOMEM;
2918                 dev_err(&adapter->pdev->dev,
2919                         "Memory allocation failure while flashing\n");
2920                 goto be_fw_exit;
2921         }
2922
2923         if ((adapter->generation == BE_GEN3) &&
2924                         (get_ufigen_type(fhdr) == BE_GEN3)) {
2925                 fhdr3 = (struct flash_file_hdr_g3 *) fw->data;
2926                 num_imgs = le32_to_cpu(fhdr3->num_imgs);
2927                 for (i = 0; i < num_imgs; i++) {
2928                         img_hdr_ptr = (struct image_hdr *) (fw->data +
2929                                         (sizeof(struct flash_file_hdr_g3) +
2930                                          i * sizeof(struct image_hdr)));
2931                         if (le32_to_cpu(img_hdr_ptr->imageid) == 1)
2932                                 status = be_flash_data(adapter, fw, &flash_cmd,
2933                                                         num_imgs);
2934                 }
2935         } else if ((adapter->generation == BE_GEN2) &&
2936                         (get_ufigen_type(fhdr) == BE_GEN2)) {
2937                 status = be_flash_data(adapter, fw, &flash_cmd, 0);
2938         } else {
2939                 dev_err(&adapter->pdev->dev,
2940                         "UFI and Interface are not compatible for flashing\n");
2941                 status = -1;
2942         }
2943
2944         dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
2945                           flash_cmd.dma);
2946         if (status) {
2947                 dev_err(&adapter->pdev->dev, "Firmware load error\n");
2948                 goto be_fw_exit;
2949         }
2950
2951         dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
2952
2953 be_fw_exit:
2954         return status;
2955 }
2956
2957 int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
2958 {
2959         const struct firmware *fw;
2960         int status;
2961
2962         if (!netif_running(adapter->netdev)) {
2963                 dev_err(&adapter->pdev->dev,
2964                         "Firmware load not allowed (interface is down)\n");
2965                 return -1;
2966         }
2967
2968         status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
2969         if (status)
2970                 goto fw_exit;
2971
2972         dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
2973
2974         if (lancer_chip(adapter))
2975                 status = lancer_fw_download(adapter, fw);
2976         else
2977                 status = be_fw_download(adapter, fw);
2978
2979 fw_exit:
2980         release_firmware(fw);
2981         return status;
2982 }
2983
2984 static struct net_device_ops be_netdev_ops = {
2985         .ndo_open               = be_open,
2986         .ndo_stop               = be_close,
2987         .ndo_start_xmit         = be_xmit,
2988         .ndo_set_rx_mode        = be_set_rx_mode,
2989         .ndo_set_mac_address    = be_mac_addr_set,
2990         .ndo_change_mtu         = be_change_mtu,
2991         .ndo_get_stats64        = be_get_stats64,
2992         .ndo_validate_addr      = eth_validate_addr,
2993         .ndo_vlan_rx_add_vid    = be_vlan_add_vid,
2994         .ndo_vlan_rx_kill_vid   = be_vlan_rem_vid,
2995         .ndo_set_vf_mac         = be_set_vf_mac,
2996         .ndo_set_vf_vlan        = be_set_vf_vlan,
2997         .ndo_set_vf_tx_rate     = be_set_vf_tx_rate,
2998         .ndo_get_vf_config      = be_get_vf_config
2999 };
3000
3001 static void be_netdev_init(struct net_device *netdev)
3002 {
3003         struct be_adapter *adapter = netdev_priv(netdev);
3004         struct be_rx_obj *rxo;
3005         int i;
3006
3007         netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
3008                 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
3009                 NETIF_F_HW_VLAN_TX;
3010         if (be_multi_rxq(adapter))
3011                 netdev->hw_features |= NETIF_F_RXHASH;
3012
3013         netdev->features |= netdev->hw_features |
3014                 NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
3015
3016         netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
3017                 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
3018
3019         netdev->flags |= IFF_MULTICAST;
3020
3021         netif_set_gso_max_size(netdev, 65535);
3022
3023         BE_SET_NETDEV_OPS(netdev, &be_netdev_ops);
3024
3025         SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
3026
3027         for_all_rx_queues(adapter, rxo, i)
3028                 netif_napi_add(netdev, &rxo->rx_eq.napi, be_poll_rx,
3029                                 BE_NAPI_WEIGHT);
3030
3031         netif_napi_add(netdev, &adapter->tx_eq.napi, be_poll_tx_mcc,
3032                 BE_NAPI_WEIGHT);
3033 }
3034
3035 static void be_unmap_pci_bars(struct be_adapter *adapter)
3036 {
3037         if (adapter->csr)
3038                 iounmap(adapter->csr);
3039         if (adapter->db)
3040                 iounmap(adapter->db);
3041 }
3042
3043 static int be_map_pci_bars(struct be_adapter *adapter)
3044 {
3045         u8 __iomem *addr;
3046         int db_reg;
3047
3048         if (lancer_chip(adapter)) {
3049                 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 0),
3050                         pci_resource_len(adapter->pdev, 0));
3051                 if (addr == NULL)
3052                         return -ENOMEM;
3053                 adapter->db = addr;
3054                 return 0;
3055         }
3056
3057         if (be_physfn(adapter)) {
3058                 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2),
3059                                 pci_resource_len(adapter->pdev, 2));
3060                 if (addr == NULL)
3061                         return -ENOMEM;
3062                 adapter->csr = addr;
3063         }
3064
3065         if (adapter->generation == BE_GEN2) {
3066                 db_reg = 4;
3067         } else {
3068                 if (be_physfn(adapter))
3069                         db_reg = 4;
3070                 else
3071                         db_reg = 0;
3072         }
3073         addr = ioremap_nocache(pci_resource_start(adapter->pdev, db_reg),
3074                                 pci_resource_len(adapter->pdev, db_reg));
3075         if (addr == NULL)
3076                 goto pci_map_err;
3077         adapter->db = addr;
3078
3079         return 0;
3080 pci_map_err:
3081         be_unmap_pci_bars(adapter);
3082         return -ENOMEM;
3083 }
3084
3085
3086 static void be_ctrl_cleanup(struct be_adapter *adapter)
3087 {
3088         struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
3089
3090         be_unmap_pci_bars(adapter);
3091
3092         if (mem->va)
3093                 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3094                                   mem->dma);
3095
3096         mem = &adapter->rx_filter;
3097         if (mem->va)
3098                 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3099                                   mem->dma);
3100 }
3101
3102 static int be_ctrl_init(struct be_adapter *adapter)
3103 {
3104         struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
3105         struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
3106         struct be_dma_mem *rx_filter = &adapter->rx_filter;
3107         int status;
3108
3109         status = be_map_pci_bars(adapter);
3110         if (status)
3111                 goto done;
3112
3113         mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
3114         mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
3115                                                 mbox_mem_alloc->size,
3116                                                 &mbox_mem_alloc->dma,
3117                                                 GFP_KERNEL);
3118         if (!mbox_mem_alloc->va) {
3119                 status = -ENOMEM;
3120                 goto unmap_pci_bars;
3121         }
3122         mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
3123         mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
3124         mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
3125         memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
3126
3127         rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
3128         rx_filter->va = dma_alloc_coherent(&adapter->pdev->dev, rx_filter->size,
3129                                         &rx_filter->dma, GFP_KERNEL);
3130         if (rx_filter->va == NULL) {
3131                 status = -ENOMEM;
3132                 goto free_mbox;
3133         }
3134         memset(rx_filter->va, 0, rx_filter->size);
3135
3136         mutex_init(&adapter->mbox_lock);
3137         spin_lock_init(&adapter->mcc_lock);
3138         spin_lock_init(&adapter->mcc_cq_lock);
3139
3140         init_completion(&adapter->flash_compl);
3141         pci_save_state(adapter->pdev);
3142         return 0;
3143
3144 free_mbox:
3145         dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
3146                           mbox_mem_alloc->va, mbox_mem_alloc->dma);
3147
3148 unmap_pci_bars:
3149         be_unmap_pci_bars(adapter);
3150
3151 done:
3152         return status;
3153 }
3154
3155 static void be_stats_cleanup(struct be_adapter *adapter)
3156 {
3157         struct be_dma_mem *cmd = &adapter->stats_cmd;
3158
3159         if (cmd->va)
3160                 dma_free_coherent(&adapter->pdev->dev, cmd->size,
3161                                   cmd->va, cmd->dma);
3162 }
3163
3164 static int be_stats_init(struct be_adapter *adapter)
3165 {
3166         struct be_dma_mem *cmd = &adapter->stats_cmd;
3167
3168         if (adapter->generation == BE_GEN2) {
3169                 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
3170         } else {
3171                 if (lancer_chip(adapter))
3172                         cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
3173                 else
3174                         cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
3175         }
3176         cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
3177                                      GFP_KERNEL);
3178         if (cmd->va == NULL)
3179                 return -1;
3180         memset(cmd->va, 0, cmd->size);
3181         return 0;
3182 }
3183
3184 static void __devexit be_remove(struct pci_dev *pdev)
3185 {
3186         struct be_adapter *adapter = pci_get_drvdata(pdev);
3187
3188         if (!adapter)
3189                 return;
3190
3191         cancel_delayed_work_sync(&adapter->work);
3192
3193         unregister_netdev(adapter->netdev);
3194
3195         be_clear(adapter);
3196
3197         be_stats_cleanup(adapter);
3198
3199         be_ctrl_cleanup(adapter);
3200
3201         be_sriov_disable(adapter);
3202
3203         be_msix_disable(adapter);
3204
3205         pci_set_drvdata(pdev, NULL);
3206         pci_release_regions(pdev);
3207         pci_disable_device(pdev);
3208
3209         free_netdev(adapter->netdev);
3210 }
3211
3212 static int be_get_config(struct be_adapter *adapter)
3213 {
3214         int status;
3215
3216         status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
3217                         &adapter->function_mode, &adapter->function_caps);
3218         if (status)
3219                 return status;
3220
3221         if (adapter->function_mode & FLEX10_MODE)
3222                 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/4;
3223         else
3224                 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
3225
3226         status = be_cmd_get_cntl_attributes(adapter);
3227         if (status)
3228                 return status;
3229
3230         return 0;
3231 }
3232
3233 static int be_dev_family_check(struct be_adapter *adapter)
3234 {
3235         struct pci_dev *pdev = adapter->pdev;
3236         u32 sli_intf = 0, if_type;
3237
3238         switch (pdev->device) {
3239         case BE_DEVICE_ID1:
3240         case OC_DEVICE_ID1:
3241                 adapter->generation = BE_GEN2;
3242                 break;
3243         case BE_DEVICE_ID2:
3244         case OC_DEVICE_ID2:
3245                 adapter->generation = BE_GEN3;
3246                 break;
3247         case OC_DEVICE_ID3:
3248         case OC_DEVICE_ID4:
3249                 pci_read_config_dword(pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3250                 if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
3251                                                 SLI_INTF_IF_TYPE_SHIFT;
3252
3253                 if (((sli_intf & SLI_INTF_VALID_MASK) != SLI_INTF_VALID) ||
3254                         if_type != 0x02) {
3255                         dev_err(&pdev->dev, "SLI_INTF reg val is not valid\n");
3256                         return -EINVAL;
3257                 }
3258                 adapter->sli_family = ((sli_intf & SLI_INTF_FAMILY_MASK) >>
3259                                          SLI_INTF_FAMILY_SHIFT);
3260                 adapter->generation = BE_GEN3;
3261                 break;
3262         default:
3263                 adapter->generation = 0;
3264         }
3265         return 0;
3266 }
3267
3268 static int lancer_wait_ready(struct be_adapter *adapter)
3269 {
3270 #define SLIPORT_READY_TIMEOUT 500
3271         u32 sliport_status;
3272         int status = 0, i;
3273
3274         for (i = 0; i < SLIPORT_READY_TIMEOUT; i++) {
3275                 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3276                 if (sliport_status & SLIPORT_STATUS_RDY_MASK)
3277                         break;
3278
3279                 msleep(20);
3280         }
3281
3282         if (i == SLIPORT_READY_TIMEOUT)
3283                 status = -1;
3284
3285         return status;
3286 }
3287
3288 static int lancer_test_and_set_rdy_state(struct be_adapter *adapter)
3289 {
3290         int status;
3291         u32 sliport_status, err, reset_needed;
3292         status = lancer_wait_ready(adapter);
3293         if (!status) {
3294                 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3295                 err = sliport_status & SLIPORT_STATUS_ERR_MASK;
3296                 reset_needed = sliport_status & SLIPORT_STATUS_RN_MASK;
3297                 if (err && reset_needed) {
3298                         iowrite32(SLI_PORT_CONTROL_IP_MASK,
3299                                         adapter->db + SLIPORT_CONTROL_OFFSET);
3300
3301                         /* check adapter has corrected the error */
3302                         status = lancer_wait_ready(adapter);
3303                         sliport_status = ioread32(adapter->db +
3304                                                         SLIPORT_STATUS_OFFSET);
3305                         sliport_status &= (SLIPORT_STATUS_ERR_MASK |
3306                                                 SLIPORT_STATUS_RN_MASK);
3307                         if (status || sliport_status)
3308                                 status = -1;
3309                 } else if (err || reset_needed) {
3310                         status = -1;
3311                 }
3312         }
3313         return status;
3314 }
3315
3316 static int __devinit be_probe(struct pci_dev *pdev,
3317                         const struct pci_device_id *pdev_id)
3318 {
3319         int status = 0;
3320         struct be_adapter *adapter;
3321         struct net_device *netdev;
3322
3323         status = pci_enable_device(pdev);
3324         if (status)
3325                 goto do_none;
3326
3327         status = pci_request_regions(pdev, DRV_NAME);
3328         if (status)
3329                 goto disable_dev;
3330         pci_set_master(pdev);
3331
3332         netdev = alloc_etherdev_mq(sizeof(struct be_adapter), MAX_TX_QS);
3333         if (netdev == NULL) {
3334                 status = -ENOMEM;
3335                 goto rel_reg;
3336         }
3337         adapter = netdev_priv(netdev);
3338         adapter->pdev = pdev;
3339         pci_set_drvdata(pdev, adapter);
3340
3341         status = be_dev_family_check(adapter);
3342         if (status)
3343                 goto free_netdev;
3344
3345         adapter->netdev = netdev;
3346         SET_NETDEV_DEV(netdev, &pdev->dev);
3347
3348         status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
3349         if (!status) {
3350                 netdev->features |= NETIF_F_HIGHDMA;
3351         } else {
3352                 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
3353                 if (status) {
3354                         dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
3355                         goto free_netdev;
3356                 }
3357         }
3358
3359         status = be_sriov_enable(adapter);
3360         if (status)
3361                 goto free_netdev;
3362
3363         status = be_ctrl_init(adapter);
3364         if (status)
3365                 goto disable_sriov;
3366
3367         if (lancer_chip(adapter)) {
3368                 status = lancer_test_and_set_rdy_state(adapter);
3369                 if (status) {
3370                         dev_err(&pdev->dev, "Adapter in non recoverable error\n");
3371                         goto ctrl_clean;
3372                 }
3373         }
3374
3375         /* sync up with fw's ready state */
3376         if (be_physfn(adapter)) {
3377                 status = be_cmd_POST(adapter);
3378                 if (status)
3379                         goto ctrl_clean;
3380         }
3381
3382         /* tell fw we're ready to fire cmds */
3383         status = be_cmd_fw_init(adapter);
3384         if (status)
3385                 goto ctrl_clean;
3386
3387         status = be_cmd_reset_function(adapter);
3388         if (status)
3389                 goto ctrl_clean;
3390
3391         status = be_stats_init(adapter);
3392         if (status)
3393                 goto ctrl_clean;
3394
3395         status = be_get_config(adapter);
3396         if (status)
3397                 goto stats_clean;
3398
3399         /* The INTR bit may be set in the card when probed by a kdump kernel
3400          * after a crash.
3401          */
3402         if (!lancer_chip(adapter))
3403                 be_intr_set(adapter, false);
3404
3405         be_msix_enable(adapter);
3406
3407         INIT_DELAYED_WORK(&adapter->work, be_worker);
3408         adapter->rx_fc = adapter->tx_fc = true;
3409
3410         status = be_setup(adapter);
3411         if (status)
3412                 goto msix_disable;
3413
3414         be_netdev_init(netdev);
3415         status = register_netdev(netdev);
3416         if (status != 0)
3417                 goto unsetup;
3418
3419         dev_info(&pdev->dev, "%s port %d\n", nic_name(pdev), adapter->port_num);
3420
3421         schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
3422         return 0;
3423
3424 unsetup:
3425         be_clear(adapter);
3426 msix_disable:
3427         be_msix_disable(adapter);
3428 stats_clean:
3429         be_stats_cleanup(adapter);
3430 ctrl_clean:
3431         be_ctrl_cleanup(adapter);
3432 disable_sriov:
3433         be_sriov_disable(adapter);
3434 free_netdev:
3435         free_netdev(netdev);
3436         pci_set_drvdata(pdev, NULL);
3437 rel_reg:
3438         pci_release_regions(pdev);
3439 disable_dev:
3440         pci_disable_device(pdev);
3441 do_none:
3442         dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
3443         return status;
3444 }
3445
3446 static int be_suspend(struct pci_dev *pdev, pm_message_t state)
3447 {
3448         struct be_adapter *adapter = pci_get_drvdata(pdev);
3449         struct net_device *netdev =  adapter->netdev;
3450
3451         cancel_delayed_work_sync(&adapter->work);
3452         if (adapter->wol)
3453                 be_setup_wol(adapter, true);
3454
3455         netif_device_detach(netdev);
3456         if (netif_running(netdev)) {
3457                 rtnl_lock();
3458                 be_close(netdev);
3459                 rtnl_unlock();
3460         }
3461         be_clear(adapter);
3462
3463         be_msix_disable(adapter);
3464         pci_save_state(pdev);
3465         pci_disable_device(pdev);
3466         pci_set_power_state(pdev, pci_choose_state(pdev, state));
3467         return 0;
3468 }
3469
3470 static int be_resume(struct pci_dev *pdev)
3471 {
3472         int status = 0;
3473         struct be_adapter *adapter = pci_get_drvdata(pdev);
3474         struct net_device *netdev =  adapter->netdev;
3475
3476         netif_device_detach(netdev);
3477
3478         status = pci_enable_device(pdev);
3479         if (status)
3480                 return status;
3481
3482         pci_set_power_state(pdev, 0);
3483         pci_restore_state(pdev);
3484
3485         be_msix_enable(adapter);
3486         /* tell fw we're ready to fire cmds */
3487         status = be_cmd_fw_init(adapter);
3488         if (status)
3489                 return status;
3490
3491         be_setup(adapter);
3492         if (netif_running(netdev)) {
3493                 rtnl_lock();
3494                 be_open(netdev);
3495                 rtnl_unlock();
3496         }
3497         netif_device_attach(netdev);
3498
3499         if (adapter->wol)
3500                 be_setup_wol(adapter, false);
3501
3502         schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
3503         return 0;
3504 }
3505
3506 /*
3507  * An FLR will stop BE from DMAing any data.
3508  */
3509 static void be_shutdown(struct pci_dev *pdev)
3510 {
3511         struct be_adapter *adapter = pci_get_drvdata(pdev);
3512
3513         if (!adapter)
3514                 return;
3515
3516         cancel_delayed_work_sync(&adapter->work);
3517
3518         netif_device_detach(adapter->netdev);
3519
3520         if (adapter->wol)
3521                 be_setup_wol(adapter, true);
3522
3523         be_cmd_reset_function(adapter);
3524
3525         pci_disable_device(pdev);
3526 }
3527
3528 static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
3529                                 pci_channel_state_t state)
3530 {
3531         struct be_adapter *adapter = pci_get_drvdata(pdev);
3532         struct net_device *netdev =  adapter->netdev;
3533
3534         dev_err(&adapter->pdev->dev, "EEH error detected\n");
3535
3536         adapter->eeh_err = true;
3537
3538         netif_device_detach(netdev);
3539
3540         if (netif_running(netdev)) {
3541                 rtnl_lock();
3542                 be_close(netdev);
3543                 rtnl_unlock();
3544         }
3545         be_clear(adapter);
3546
3547         if (state == pci_channel_io_perm_failure)
3548                 return PCI_ERS_RESULT_DISCONNECT;
3549
3550         pci_disable_device(pdev);
3551
3552         return PCI_ERS_RESULT_NEED_RESET;
3553 }
3554
3555 static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
3556 {
3557         struct be_adapter *adapter = pci_get_drvdata(pdev);
3558         int status;
3559
3560         dev_info(&adapter->pdev->dev, "EEH reset\n");
3561         adapter->eeh_err = false;
3562
3563         status = pci_enable_device(pdev);
3564         if (status)
3565                 return PCI_ERS_RESULT_DISCONNECT;
3566
3567         pci_set_master(pdev);
3568         pci_set_power_state(pdev, 0);
3569         pci_restore_state(pdev);
3570
3571         /* Check if card is ok and fw is ready */
3572         status = be_cmd_POST(adapter);
3573         if (status)
3574                 return PCI_ERS_RESULT_DISCONNECT;
3575
3576         return PCI_ERS_RESULT_RECOVERED;
3577 }
3578
3579 static void be_eeh_resume(struct pci_dev *pdev)
3580 {
3581         int status = 0;
3582         struct be_adapter *adapter = pci_get_drvdata(pdev);
3583         struct net_device *netdev =  adapter->netdev;
3584
3585         dev_info(&adapter->pdev->dev, "EEH resume\n");
3586
3587         pci_save_state(pdev);
3588
3589         /* tell fw we're ready to fire cmds */
3590         status = be_cmd_fw_init(adapter);
3591         if (status)
3592                 goto err;
3593
3594         status = be_setup(adapter);
3595         if (status)
3596                 goto err;
3597
3598         if (netif_running(netdev)) {
3599                 status = be_open(netdev);
3600                 if (status)
3601                         goto err;
3602         }
3603         netif_device_attach(netdev);
3604         return;
3605 err:
3606         dev_err(&adapter->pdev->dev, "EEH resume failed\n");
3607 }
3608
3609 static struct pci_error_handlers be_eeh_handlers = {
3610         .error_detected = be_eeh_err_detected,
3611         .slot_reset = be_eeh_reset,
3612         .resume = be_eeh_resume,
3613 };
3614
3615 static struct pci_driver be_driver = {
3616         .name = DRV_NAME,
3617         .id_table = be_dev_ids,
3618         .probe = be_probe,
3619         .remove = be_remove,
3620         .suspend = be_suspend,
3621         .resume = be_resume,
3622         .shutdown = be_shutdown,
3623         .err_handler = &be_eeh_handlers
3624 };
3625
3626 static int __init be_init_module(void)
3627 {
3628         if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
3629             rx_frag_size != 2048) {
3630                 printk(KERN_WARNING DRV_NAME
3631                         " : Module param rx_frag_size must be 2048/4096/8192."
3632                         " Using 2048\n");
3633                 rx_frag_size = 2048;
3634         }
3635
3636         return pci_register_driver(&be_driver);
3637 }
3638 module_init(be_init_module);
3639
3640 static void __exit be_exit_module(void)
3641 {
3642         pci_unregister_driver(&be_driver);
3643 }
3644 module_exit(be_exit_module);