2 * Copyright (C) 2005 - 2011 Emulex
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
10 * Contact Information:
11 * linux-drivers@emulex.com
15 * Costa Mesa, CA 92626
18 #include <linux/prefetch.h>
19 #include <linux/module.h>
22 #include <asm/div64.h>
24 MODULE_VERSION(DRV_VER);
25 MODULE_DEVICE_TABLE(pci, be_dev_ids);
26 MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
27 MODULE_AUTHOR("ServerEngines Corporation");
28 MODULE_LICENSE("GPL");
30 static unsigned int num_vfs;
31 module_param(num_vfs, uint, S_IRUGO);
32 MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
34 static ushort rx_frag_size = 2048;
35 module_param(rx_frag_size, ushort, S_IRUGO);
36 MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
38 static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
39 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
40 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
41 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
42 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
43 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
44 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
45 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
48 MODULE_DEVICE_TABLE(pci, be_dev_ids);
49 /* UE Status Low CSR */
50 static const char * const ue_status_low_desc[] = {
84 /* UE Status High CSR */
85 static const char * const ue_status_hi_desc[] = {
120 /* Is BE in a multi-channel mode */
121 static inline bool be_is_mc(struct be_adapter *adapter) {
122 return (adapter->function_mode & FLEX10_MODE ||
123 adapter->function_mode & VNIC_MODE ||
124 adapter->function_mode & UMC_ENABLED);
127 static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
129 struct be_dma_mem *mem = &q->dma_mem;
131 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
135 static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
136 u16 len, u16 entry_size)
138 struct be_dma_mem *mem = &q->dma_mem;
140 memset(q, 0, sizeof(*q));
142 q->entry_size = entry_size;
143 mem->size = len * entry_size;
144 mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
148 memset(mem->va, 0, mem->size);
152 static void be_intr_set(struct be_adapter *adapter, bool enable)
156 if (adapter->eeh_err)
159 pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
161 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
163 if (!enabled && enable)
164 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
165 else if (enabled && !enable)
166 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
170 pci_write_config_dword(adapter->pdev,
171 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
174 static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
177 val |= qid & DB_RQ_RING_ID_MASK;
178 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
181 iowrite32(val, adapter->db + DB_RQ_OFFSET);
184 static void be_txq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
187 val |= qid & DB_TXULP_RING_ID_MASK;
188 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
191 iowrite32(val, adapter->db + DB_TXULP1_OFFSET);
194 static void be_eq_notify(struct be_adapter *adapter, u16 qid,
195 bool arm, bool clear_int, u16 num_popped)
198 val |= qid & DB_EQ_RING_ID_MASK;
199 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
200 DB_EQ_RING_ID_EXT_MASK_SHIFT);
202 if (adapter->eeh_err)
206 val |= 1 << DB_EQ_REARM_SHIFT;
208 val |= 1 << DB_EQ_CLR_SHIFT;
209 val |= 1 << DB_EQ_EVNT_SHIFT;
210 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
211 iowrite32(val, adapter->db + DB_EQ_OFFSET);
214 void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
217 val |= qid & DB_CQ_RING_ID_MASK;
218 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
219 DB_CQ_RING_ID_EXT_MASK_SHIFT);
221 if (adapter->eeh_err)
225 val |= 1 << DB_CQ_REARM_SHIFT;
226 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
227 iowrite32(val, adapter->db + DB_CQ_OFFSET);
230 static int be_mac_addr_set(struct net_device *netdev, void *p)
232 struct be_adapter *adapter = netdev_priv(netdev);
233 struct sockaddr *addr = p;
235 u8 current_mac[ETH_ALEN];
236 u32 pmac_id = adapter->pmac_id;
238 if (!is_valid_ether_addr(addr->sa_data))
239 return -EADDRNOTAVAIL;
241 status = be_cmd_mac_addr_query(adapter, current_mac,
242 MAC_ADDRESS_TYPE_NETWORK, false,
243 adapter->if_handle, 0);
247 if (memcmp(addr->sa_data, current_mac, ETH_ALEN)) {
248 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
249 adapter->if_handle, &adapter->pmac_id, 0);
253 be_cmd_pmac_del(adapter, adapter->if_handle, pmac_id, 0);
255 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
258 dev_err(&adapter->pdev->dev, "MAC %pM set Failed\n", addr->sa_data);
262 static void populate_be2_stats(struct be_adapter *adapter)
264 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
265 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
266 struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
267 struct be_port_rxf_stats_v0 *port_stats =
268 &rxf_stats->port[adapter->port_num];
269 struct be_drv_stats *drvs = &adapter->drv_stats;
271 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
272 drvs->rx_pause_frames = port_stats->rx_pause_frames;
273 drvs->rx_crc_errors = port_stats->rx_crc_errors;
274 drvs->rx_control_frames = port_stats->rx_control_frames;
275 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
276 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
277 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
278 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
279 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
280 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
281 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
282 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
283 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
284 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
285 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
286 drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
287 drvs->rx_dropped_header_too_small =
288 port_stats->rx_dropped_header_too_small;
289 drvs->rx_address_match_errors = port_stats->rx_address_match_errors;
290 drvs->rx_alignment_symbol_errors =
291 port_stats->rx_alignment_symbol_errors;
293 drvs->tx_pauseframes = port_stats->tx_pauseframes;
294 drvs->tx_controlframes = port_stats->tx_controlframes;
296 if (adapter->port_num)
297 drvs->jabber_events = rxf_stats->port1_jabber_events;
299 drvs->jabber_events = rxf_stats->port0_jabber_events;
300 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
301 drvs->rx_drops_no_txpb = rxf_stats->rx_drops_no_txpb;
302 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
303 drvs->rx_drops_invalid_ring = rxf_stats->rx_drops_invalid_ring;
304 drvs->forwarded_packets = rxf_stats->forwarded_packets;
305 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
306 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
307 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
308 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
311 static void populate_be3_stats(struct be_adapter *adapter)
313 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
314 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
315 struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
316 struct be_port_rxf_stats_v1 *port_stats =
317 &rxf_stats->port[adapter->port_num];
318 struct be_drv_stats *drvs = &adapter->drv_stats;
320 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
321 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
322 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
323 drvs->rx_pause_frames = port_stats->rx_pause_frames;
324 drvs->rx_crc_errors = port_stats->rx_crc_errors;
325 drvs->rx_control_frames = port_stats->rx_control_frames;
326 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
327 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
328 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
329 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
330 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
331 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
332 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
333 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
334 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
335 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
336 drvs->rx_dropped_header_too_small =
337 port_stats->rx_dropped_header_too_small;
338 drvs->rx_input_fifo_overflow_drop =
339 port_stats->rx_input_fifo_overflow_drop;
340 drvs->rx_address_match_errors = port_stats->rx_address_match_errors;
341 drvs->rx_alignment_symbol_errors =
342 port_stats->rx_alignment_symbol_errors;
343 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
344 drvs->tx_pauseframes = port_stats->tx_pauseframes;
345 drvs->tx_controlframes = port_stats->tx_controlframes;
346 drvs->jabber_events = port_stats->jabber_events;
347 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
348 drvs->rx_drops_no_txpb = rxf_stats->rx_drops_no_txpb;
349 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
350 drvs->rx_drops_invalid_ring = rxf_stats->rx_drops_invalid_ring;
351 drvs->forwarded_packets = rxf_stats->forwarded_packets;
352 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
353 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
354 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
355 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
358 static void populate_lancer_stats(struct be_adapter *adapter)
361 struct be_drv_stats *drvs = &adapter->drv_stats;
362 struct lancer_pport_stats *pport_stats =
363 pport_stats_from_cmd(adapter);
365 be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
366 drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
367 drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
368 drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
369 drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
370 drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
371 drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
372 drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
373 drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
374 drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
375 drvs->rx_dropped_tcp_length =
376 pport_stats->rx_dropped_invalid_tcp_length;
377 drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
378 drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
379 drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
380 drvs->rx_dropped_header_too_small =
381 pport_stats->rx_dropped_header_too_small;
382 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
383 drvs->rx_address_match_errors = pport_stats->rx_address_match_errors;
384 drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
385 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
386 drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
387 drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
388 drvs->jabber_events = pport_stats->rx_jabbers;
389 drvs->rx_drops_invalid_ring = pport_stats->rx_drops_invalid_queue;
390 drvs->forwarded_packets = pport_stats->num_forwards_lo;
391 drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
392 drvs->rx_drops_too_many_frags =
393 pport_stats->rx_drops_too_many_frags_lo;
396 static void accumulate_16bit_val(u32 *acc, u16 val)
398 #define lo(x) (x & 0xFFFF)
399 #define hi(x) (x & 0xFFFF0000)
400 bool wrapped = val < lo(*acc);
401 u32 newacc = hi(*acc) + val;
405 ACCESS_ONCE(*acc) = newacc;
408 void be_parse_stats(struct be_adapter *adapter)
410 struct be_erx_stats_v1 *erx = be_erx_stats_from_cmd(adapter);
411 struct be_rx_obj *rxo;
414 if (adapter->generation == BE_GEN3) {
415 if (lancer_chip(adapter))
416 populate_lancer_stats(adapter);
418 populate_be3_stats(adapter);
420 populate_be2_stats(adapter);
423 /* as erx_v1 is longer than v0, ok to use v1 defn for v0 access */
424 for_all_rx_queues(adapter, rxo, i) {
425 /* below erx HW counter can actually wrap around after
426 * 65535. Driver accumulates a 32-bit value
428 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
429 (u16)erx->rx_drops_no_fragments[rxo->q.id]);
433 static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
434 struct rtnl_link_stats64 *stats)
436 struct be_adapter *adapter = netdev_priv(netdev);
437 struct be_drv_stats *drvs = &adapter->drv_stats;
438 struct be_rx_obj *rxo;
439 struct be_tx_obj *txo;
444 for_all_rx_queues(adapter, rxo, i) {
445 const struct be_rx_stats *rx_stats = rx_stats(rxo);
447 start = u64_stats_fetch_begin_bh(&rx_stats->sync);
448 pkts = rx_stats(rxo)->rx_pkts;
449 bytes = rx_stats(rxo)->rx_bytes;
450 } while (u64_stats_fetch_retry_bh(&rx_stats->sync, start));
451 stats->rx_packets += pkts;
452 stats->rx_bytes += bytes;
453 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
454 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
455 rx_stats(rxo)->rx_drops_no_frags;
458 for_all_tx_queues(adapter, txo, i) {
459 const struct be_tx_stats *tx_stats = tx_stats(txo);
461 start = u64_stats_fetch_begin_bh(&tx_stats->sync);
462 pkts = tx_stats(txo)->tx_pkts;
463 bytes = tx_stats(txo)->tx_bytes;
464 } while (u64_stats_fetch_retry_bh(&tx_stats->sync, start));
465 stats->tx_packets += pkts;
466 stats->tx_bytes += bytes;
469 /* bad pkts received */
470 stats->rx_errors = drvs->rx_crc_errors +
471 drvs->rx_alignment_symbol_errors +
472 drvs->rx_in_range_errors +
473 drvs->rx_out_range_errors +
474 drvs->rx_frame_too_long +
475 drvs->rx_dropped_too_small +
476 drvs->rx_dropped_too_short +
477 drvs->rx_dropped_header_too_small +
478 drvs->rx_dropped_tcp_length +
479 drvs->rx_dropped_runt;
481 /* detailed rx errors */
482 stats->rx_length_errors = drvs->rx_in_range_errors +
483 drvs->rx_out_range_errors +
484 drvs->rx_frame_too_long;
486 stats->rx_crc_errors = drvs->rx_crc_errors;
488 /* frame alignment errors */
489 stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
491 /* receiver fifo overrun */
492 /* drops_no_pbuf is no per i/f, it's per BE card */
493 stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
494 drvs->rx_input_fifo_overflow_drop +
495 drvs->rx_drops_no_pbuf;
499 void be_link_status_update(struct be_adapter *adapter, u8 link_status)
501 struct net_device *netdev = adapter->netdev;
503 if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
504 netif_carrier_off(netdev);
505 adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
508 if ((link_status & LINK_STATUS_MASK) == LINK_UP)
509 netif_carrier_on(netdev);
511 netif_carrier_off(netdev);
514 static void be_tx_stats_update(struct be_tx_obj *txo,
515 u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
517 struct be_tx_stats *stats = tx_stats(txo);
519 u64_stats_update_begin(&stats->sync);
521 stats->tx_wrbs += wrb_cnt;
522 stats->tx_bytes += copied;
523 stats->tx_pkts += (gso_segs ? gso_segs : 1);
526 u64_stats_update_end(&stats->sync);
529 /* Determine number of WRB entries needed to xmit data in an skb */
530 static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
533 int cnt = (skb->len > skb->data_len);
535 cnt += skb_shinfo(skb)->nr_frags;
537 /* to account for hdr wrb */
539 if (lancer_chip(adapter) || !(cnt & 1)) {
542 /* add a dummy to make it an even num */
546 BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
550 static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
552 wrb->frag_pa_hi = upper_32_bits(addr);
553 wrb->frag_pa_lo = addr & 0xFFFFFFFF;
554 wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
557 static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
563 vlan_tag = vlan_tx_tag_get(skb);
564 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
565 /* If vlan priority provided by OS is NOT in available bmap */
566 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
567 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
568 adapter->recommended_prio;
573 static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
574 struct sk_buff *skb, u32 wrb_cnt, u32 len)
578 memset(hdr, 0, sizeof(*hdr));
580 AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
582 if (skb_is_gso(skb)) {
583 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
584 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
585 hdr, skb_shinfo(skb)->gso_size);
586 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
587 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
588 if (lancer_chip(adapter) && adapter->sli_family ==
589 LANCER_A0_SLI_FAMILY) {
590 AMAP_SET_BITS(struct amap_eth_hdr_wrb, ipcs, hdr, 1);
592 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
594 else if (is_udp_pkt(skb))
595 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
598 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
600 AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
601 else if (is_udp_pkt(skb))
602 AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
605 if (vlan_tx_tag_present(skb)) {
606 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
607 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
608 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
611 AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
612 AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, 1);
613 AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
614 AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
617 static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
622 be_dws_le_to_cpu(wrb, sizeof(*wrb));
624 dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
627 dma_unmap_single(dev, dma, wrb->frag_len,
630 dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
634 static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
635 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb)
639 struct device *dev = &adapter->pdev->dev;
640 struct sk_buff *first_skb = skb;
641 struct be_eth_wrb *wrb;
642 struct be_eth_hdr_wrb *hdr;
643 bool map_single = false;
646 hdr = queue_head_node(txq);
648 map_head = txq->head;
650 if (skb->len > skb->data_len) {
651 int len = skb_headlen(skb);
652 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
653 if (dma_mapping_error(dev, busaddr))
656 wrb = queue_head_node(txq);
657 wrb_fill(wrb, busaddr, len);
658 be_dws_cpu_to_le(wrb, sizeof(*wrb));
663 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
664 const struct skb_frag_struct *frag =
665 &skb_shinfo(skb)->frags[i];
666 busaddr = skb_frag_dma_map(dev, frag, 0,
667 skb_frag_size(frag), DMA_TO_DEVICE);
668 if (dma_mapping_error(dev, busaddr))
670 wrb = queue_head_node(txq);
671 wrb_fill(wrb, busaddr, skb_frag_size(frag));
672 be_dws_cpu_to_le(wrb, sizeof(*wrb));
674 copied += skb_frag_size(frag);
678 wrb = queue_head_node(txq);
680 be_dws_cpu_to_le(wrb, sizeof(*wrb));
684 wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied);
685 be_dws_cpu_to_le(hdr, sizeof(*hdr));
689 txq->head = map_head;
691 wrb = queue_head_node(txq);
692 unmap_tx_frag(dev, wrb, map_single);
694 copied -= wrb->frag_len;
700 static netdev_tx_t be_xmit(struct sk_buff *skb,
701 struct net_device *netdev)
703 struct be_adapter *adapter = netdev_priv(netdev);
704 struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
705 struct be_queue_info *txq = &txo->q;
706 u32 wrb_cnt = 0, copied = 0;
707 u32 start = txq->head;
708 bool dummy_wrb, stopped = false;
710 /* For vlan tagged pkts, BE
711 * 1) calculates checksum even when CSO is not requested
712 * 2) calculates checksum wrongly for padded pkt less than
714 * As a workaround disable TX vlan offloading in such cases.
716 if (unlikely(vlan_tx_tag_present(skb) &&
717 (skb->ip_summed != CHECKSUM_PARTIAL || skb->len <= 60))) {
718 skb = skb_share_check(skb, GFP_ATOMIC);
722 skb = __vlan_put_tag(skb, be_get_tx_vlan_tag(adapter, skb));
729 wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
731 copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb);
733 /* record the sent skb in the sent_skb table */
734 BUG_ON(txo->sent_skb_list[start]);
735 txo->sent_skb_list[start] = skb;
737 /* Ensure txq has space for the next skb; Else stop the queue
738 * *BEFORE* ringing the tx doorbell, so that we serialze the
739 * tx compls of the current transmit which'll wake up the queue
741 atomic_add(wrb_cnt, &txq->used);
742 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
744 netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
748 be_txq_notify(adapter, txq->id, wrb_cnt);
750 be_tx_stats_update(txo, wrb_cnt, copied,
751 skb_shinfo(skb)->gso_segs, stopped);
754 dev_kfree_skb_any(skb);
760 static int be_change_mtu(struct net_device *netdev, int new_mtu)
762 struct be_adapter *adapter = netdev_priv(netdev);
763 if (new_mtu < BE_MIN_MTU ||
764 new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
765 (ETH_HLEN + ETH_FCS_LEN))) {
766 dev_info(&adapter->pdev->dev,
767 "MTU must be between %d and %d bytes\n",
769 (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
772 dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
773 netdev->mtu, new_mtu);
774 netdev->mtu = new_mtu;
779 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
780 * If the user configures more, place BE in vlan promiscuous mode.
782 static int be_vid_config(struct be_adapter *adapter, bool vf, u32 vf_num)
784 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf_num];
785 u16 vtag[BE_NUM_VLANS_SUPPORTED];
790 vtag[0] = cpu_to_le16(vf_cfg->vlan_tag);
791 status = be_cmd_vlan_config(adapter, vf_cfg->if_handle, vtag,
795 /* No need to further configure vids if in promiscuous mode */
796 if (adapter->promiscuous)
799 if (adapter->vlans_added <= adapter->max_vlans) {
800 /* Construct VLAN Table to give to HW */
801 for (i = 0; i < VLAN_N_VID; i++) {
802 if (adapter->vlan_tag[i]) {
803 vtag[ntags] = cpu_to_le16(i);
807 status = be_cmd_vlan_config(adapter, adapter->if_handle,
810 status = be_cmd_vlan_config(adapter, adapter->if_handle,
817 static int be_vlan_add_vid(struct net_device *netdev, u16 vid)
819 struct be_adapter *adapter = netdev_priv(netdev);
822 if (!be_physfn(adapter)) {
827 adapter->vlan_tag[vid] = 1;
828 if (adapter->vlans_added <= (adapter->max_vlans + 1))
829 status = be_vid_config(adapter, false, 0);
832 adapter->vlans_added++;
834 adapter->vlan_tag[vid] = 0;
839 static int be_vlan_rem_vid(struct net_device *netdev, u16 vid)
841 struct be_adapter *adapter = netdev_priv(netdev);
844 if (!be_physfn(adapter)) {
849 adapter->vlan_tag[vid] = 0;
850 if (adapter->vlans_added <= adapter->max_vlans)
851 status = be_vid_config(adapter, false, 0);
854 adapter->vlans_added--;
856 adapter->vlan_tag[vid] = 1;
861 static void be_set_rx_mode(struct net_device *netdev)
863 struct be_adapter *adapter = netdev_priv(netdev);
865 if (netdev->flags & IFF_PROMISC) {
866 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
867 adapter->promiscuous = true;
871 /* BE was previously in promiscuous mode; disable it */
872 if (adapter->promiscuous) {
873 adapter->promiscuous = false;
874 be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
876 if (adapter->vlans_added)
877 be_vid_config(adapter, false, 0);
880 /* Enable multicast promisc if num configured exceeds what we support */
881 if (netdev->flags & IFF_ALLMULTI ||
882 netdev_mc_count(netdev) > BE_MAX_MC) {
883 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
887 be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);
892 static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
894 struct be_adapter *adapter = netdev_priv(netdev);
895 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
898 if (!sriov_enabled(adapter))
901 if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
904 if (lancer_chip(adapter)) {
905 status = be_cmd_set_mac_list(adapter, mac, 1, vf + 1);
907 status = be_cmd_pmac_del(adapter, vf_cfg->if_handle,
908 vf_cfg->pmac_id, vf + 1);
910 status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
911 &vf_cfg->pmac_id, vf + 1);
915 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
918 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
923 static int be_get_vf_config(struct net_device *netdev, int vf,
924 struct ifla_vf_info *vi)
926 struct be_adapter *adapter = netdev_priv(netdev);
927 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
929 if (!sriov_enabled(adapter))
932 if (vf >= adapter->num_vfs)
936 vi->tx_rate = vf_cfg->tx_rate;
937 vi->vlan = vf_cfg->vlan_tag;
939 memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
944 static int be_set_vf_vlan(struct net_device *netdev,
945 int vf, u16 vlan, u8 qos)
947 struct be_adapter *adapter = netdev_priv(netdev);
950 if (!sriov_enabled(adapter))
953 if (vf >= adapter->num_vfs || vlan > 4095)
957 adapter->vf_cfg[vf].vlan_tag = vlan;
958 adapter->vlans_added++;
960 adapter->vf_cfg[vf].vlan_tag = 0;
961 adapter->vlans_added--;
964 status = be_vid_config(adapter, true, vf);
967 dev_info(&adapter->pdev->dev,
968 "VLAN %d config on VF %d failed\n", vlan, vf);
972 static int be_set_vf_tx_rate(struct net_device *netdev,
975 struct be_adapter *adapter = netdev_priv(netdev);
978 if (!sriov_enabled(adapter))
981 if (vf >= adapter->num_vfs)
984 if (rate < 100 || rate > 10000) {
985 dev_err(&adapter->pdev->dev,
986 "tx rate must be between 100 and 10000 Mbps\n");
990 status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
993 dev_err(&adapter->pdev->dev,
994 "tx rate %d on VF %d failed\n", rate, vf);
996 adapter->vf_cfg[vf].tx_rate = rate;
1000 static void be_rx_eqd_update(struct be_adapter *adapter, struct be_rx_obj *rxo)
1002 struct be_eq_obj *rx_eq = &rxo->rx_eq;
1003 struct be_rx_stats *stats = rx_stats(rxo);
1004 ulong now = jiffies;
1005 ulong delta = now - stats->rx_jiffies;
1007 unsigned int start, eqd;
1009 if (!rx_eq->enable_aic)
1012 /* Wrapped around */
1013 if (time_before(now, stats->rx_jiffies)) {
1014 stats->rx_jiffies = now;
1018 /* Update once a second */
1023 start = u64_stats_fetch_begin_bh(&stats->sync);
1024 pkts = stats->rx_pkts;
1025 } while (u64_stats_fetch_retry_bh(&stats->sync, start));
1027 stats->rx_pps = (unsigned long)(pkts - stats->rx_pkts_prev) / (delta / HZ);
1028 stats->rx_pkts_prev = pkts;
1029 stats->rx_jiffies = now;
1030 eqd = stats->rx_pps / 110000;
1032 if (eqd > rx_eq->max_eqd)
1033 eqd = rx_eq->max_eqd;
1034 if (eqd < rx_eq->min_eqd)
1035 eqd = rx_eq->min_eqd;
1038 if (eqd != rx_eq->cur_eqd) {
1039 be_cmd_modify_eqd(adapter, rx_eq->q.id, eqd);
1040 rx_eq->cur_eqd = eqd;
1044 static void be_rx_stats_update(struct be_rx_obj *rxo,
1045 struct be_rx_compl_info *rxcp)
1047 struct be_rx_stats *stats = rx_stats(rxo);
1049 u64_stats_update_begin(&stats->sync);
1051 stats->rx_bytes += rxcp->pkt_size;
1053 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
1054 stats->rx_mcast_pkts++;
1056 stats->rx_compl_err++;
1057 u64_stats_update_end(&stats->sync);
1060 static inline bool csum_passed(struct be_rx_compl_info *rxcp)
1062 /* L4 checksum is not reliable for non TCP/UDP packets.
1063 * Also ignore ipcksm for ipv6 pkts */
1064 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
1065 (rxcp->ip_csum || rxcp->ipv6);
1068 static struct be_rx_page_info *
1069 get_rx_page_info(struct be_adapter *adapter,
1070 struct be_rx_obj *rxo,
1073 struct be_rx_page_info *rx_page_info;
1074 struct be_queue_info *rxq = &rxo->q;
1076 rx_page_info = &rxo->page_info_tbl[frag_idx];
1077 BUG_ON(!rx_page_info->page);
1079 if (rx_page_info->last_page_user) {
1080 dma_unmap_page(&adapter->pdev->dev,
1081 dma_unmap_addr(rx_page_info, bus),
1082 adapter->big_page_size, DMA_FROM_DEVICE);
1083 rx_page_info->last_page_user = false;
1086 atomic_dec(&rxq->used);
1087 return rx_page_info;
1090 /* Throwaway the data in the Rx completion */
1091 static void be_rx_compl_discard(struct be_adapter *adapter,
1092 struct be_rx_obj *rxo,
1093 struct be_rx_compl_info *rxcp)
1095 struct be_queue_info *rxq = &rxo->q;
1096 struct be_rx_page_info *page_info;
1097 u16 i, num_rcvd = rxcp->num_rcvd;
1099 for (i = 0; i < num_rcvd; i++) {
1100 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
1101 put_page(page_info->page);
1102 memset(page_info, 0, sizeof(*page_info));
1103 index_inc(&rxcp->rxq_idx, rxq->len);
1108 * skb_fill_rx_data forms a complete skb for an ether frame
1109 * indicated by rxcp.
1111 static void skb_fill_rx_data(struct be_adapter *adapter, struct be_rx_obj *rxo,
1112 struct sk_buff *skb, struct be_rx_compl_info *rxcp)
1114 struct be_queue_info *rxq = &rxo->q;
1115 struct be_rx_page_info *page_info;
1117 u16 hdr_len, curr_frag_len, remaining;
1120 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
1121 start = page_address(page_info->page) + page_info->page_offset;
1124 /* Copy data in the first descriptor of this completion */
1125 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
1127 /* Copy the header portion into skb_data */
1128 hdr_len = min(BE_HDR_LEN, curr_frag_len);
1129 memcpy(skb->data, start, hdr_len);
1130 skb->len = curr_frag_len;
1131 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
1132 /* Complete packet has now been moved to data */
1133 put_page(page_info->page);
1135 skb->tail += curr_frag_len;
1137 skb_shinfo(skb)->nr_frags = 1;
1138 skb_frag_set_page(skb, 0, page_info->page);
1139 skb_shinfo(skb)->frags[0].page_offset =
1140 page_info->page_offset + hdr_len;
1141 skb_frag_size_set(&skb_shinfo(skb)->frags[0], curr_frag_len - hdr_len);
1142 skb->data_len = curr_frag_len - hdr_len;
1143 skb->truesize += rx_frag_size;
1144 skb->tail += hdr_len;
1146 page_info->page = NULL;
1148 if (rxcp->pkt_size <= rx_frag_size) {
1149 BUG_ON(rxcp->num_rcvd != 1);
1153 /* More frags present for this completion */
1154 index_inc(&rxcp->rxq_idx, rxq->len);
1155 remaining = rxcp->pkt_size - curr_frag_len;
1156 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
1157 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
1158 curr_frag_len = min(remaining, rx_frag_size);
1160 /* Coalesce all frags from the same physical page in one slot */
1161 if (page_info->page_offset == 0) {
1164 skb_frag_set_page(skb, j, page_info->page);
1165 skb_shinfo(skb)->frags[j].page_offset =
1166 page_info->page_offset;
1167 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
1168 skb_shinfo(skb)->nr_frags++;
1170 put_page(page_info->page);
1173 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
1174 skb->len += curr_frag_len;
1175 skb->data_len += curr_frag_len;
1176 skb->truesize += rx_frag_size;
1177 remaining -= curr_frag_len;
1178 index_inc(&rxcp->rxq_idx, rxq->len);
1179 page_info->page = NULL;
1181 BUG_ON(j > MAX_SKB_FRAGS);
1184 /* Process the RX completion indicated by rxcp when GRO is disabled */
1185 static void be_rx_compl_process(struct be_adapter *adapter,
1186 struct be_rx_obj *rxo,
1187 struct be_rx_compl_info *rxcp)
1189 struct net_device *netdev = adapter->netdev;
1190 struct sk_buff *skb;
1192 skb = netdev_alloc_skb_ip_align(netdev, BE_HDR_LEN);
1193 if (unlikely(!skb)) {
1194 rx_stats(rxo)->rx_drops_no_skbs++;
1195 be_rx_compl_discard(adapter, rxo, rxcp);
1199 skb_fill_rx_data(adapter, rxo, skb, rxcp);
1201 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
1202 skb->ip_summed = CHECKSUM_UNNECESSARY;
1204 skb_checksum_none_assert(skb);
1206 skb->protocol = eth_type_trans(skb, netdev);
1207 if (adapter->netdev->features & NETIF_F_RXHASH)
1208 skb->rxhash = rxcp->rss_hash;
1212 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1214 netif_receive_skb(skb);
1217 /* Process the RX completion indicated by rxcp when GRO is enabled */
1218 static void be_rx_compl_process_gro(struct be_adapter *adapter,
1219 struct be_rx_obj *rxo,
1220 struct be_rx_compl_info *rxcp)
1222 struct be_rx_page_info *page_info;
1223 struct sk_buff *skb = NULL;
1224 struct be_queue_info *rxq = &rxo->q;
1225 struct be_eq_obj *eq_obj = &rxo->rx_eq;
1226 u16 remaining, curr_frag_len;
1229 skb = napi_get_frags(&eq_obj->napi);
1231 be_rx_compl_discard(adapter, rxo, rxcp);
1235 remaining = rxcp->pkt_size;
1236 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
1237 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
1239 curr_frag_len = min(remaining, rx_frag_size);
1241 /* Coalesce all frags from the same physical page in one slot */
1242 if (i == 0 || page_info->page_offset == 0) {
1243 /* First frag or Fresh page */
1245 skb_frag_set_page(skb, j, page_info->page);
1246 skb_shinfo(skb)->frags[j].page_offset =
1247 page_info->page_offset;
1248 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
1250 put_page(page_info->page);
1252 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
1253 skb->truesize += rx_frag_size;
1254 remaining -= curr_frag_len;
1255 index_inc(&rxcp->rxq_idx, rxq->len);
1256 memset(page_info, 0, sizeof(*page_info));
1258 BUG_ON(j > MAX_SKB_FRAGS);
1260 skb_shinfo(skb)->nr_frags = j + 1;
1261 skb->len = rxcp->pkt_size;
1262 skb->data_len = rxcp->pkt_size;
1263 skb->ip_summed = CHECKSUM_UNNECESSARY;
1264 if (adapter->netdev->features & NETIF_F_RXHASH)
1265 skb->rxhash = rxcp->rss_hash;
1268 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1270 napi_gro_frags(&eq_obj->napi);
1273 static void be_parse_rx_compl_v1(struct be_adapter *adapter,
1274 struct be_eth_rx_compl *compl,
1275 struct be_rx_compl_info *rxcp)
1278 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1279 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1280 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1281 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
1282 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
1284 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1286 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1288 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
1290 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, fragndx, compl);
1292 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1294 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
1296 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, rxcp);
1298 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm,
1300 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
1303 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, port, compl);
1306 static void be_parse_rx_compl_v0(struct be_adapter *adapter,
1307 struct be_eth_rx_compl *compl,
1308 struct be_rx_compl_info *rxcp)
1311 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1312 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1313 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1314 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
1315 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
1317 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1319 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1321 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
1323 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, fragndx, compl);
1325 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1327 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
1329 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, rxcp);
1331 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm,
1333 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
1336 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, port, compl);
1339 static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1341 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1342 struct be_rx_compl_info *rxcp = &rxo->rxcp;
1343 struct be_adapter *adapter = rxo->adapter;
1345 /* For checking the valid bit it is Ok to use either definition as the
1346 * valid bit is at the same position in both v0 and v1 Rx compl */
1347 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
1351 be_dws_le_to_cpu(compl, sizeof(*compl));
1353 if (adapter->be3_native)
1354 be_parse_rx_compl_v1(adapter, compl, rxcp);
1356 be_parse_rx_compl_v0(adapter, compl, rxcp);
1359 /* vlanf could be wrongly set in some cards.
1360 * ignore if vtm is not set */
1361 if ((adapter->function_mode & FLEX10_MODE) && !rxcp->vtm)
1364 if (!lancer_chip(adapter))
1365 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
1367 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
1368 !adapter->vlan_tag[rxcp->vlan_tag])
1372 /* As the compl has been parsed, reset it; we wont touch it again */
1373 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
1375 queue_tail_inc(&rxo->cq);
1379 static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
1381 u32 order = get_order(size);
1385 return alloc_pages(gfp, order);
1389 * Allocate a page, split it to fragments of size rx_frag_size and post as
1390 * receive buffers to BE
1392 static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
1394 struct be_adapter *adapter = rxo->adapter;
1395 struct be_rx_page_info *page_info_tbl = rxo->page_info_tbl;
1396 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
1397 struct be_queue_info *rxq = &rxo->q;
1398 struct page *pagep = NULL;
1399 struct be_eth_rx_d *rxd;
1400 u64 page_dmaaddr = 0, frag_dmaaddr;
1401 u32 posted, page_offset = 0;
1403 page_info = &rxo->page_info_tbl[rxq->head];
1404 for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1406 pagep = be_alloc_pages(adapter->big_page_size, gfp);
1407 if (unlikely(!pagep)) {
1408 rx_stats(rxo)->rx_post_fail++;
1411 page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
1412 0, adapter->big_page_size,
1414 page_info->page_offset = 0;
1417 page_info->page_offset = page_offset + rx_frag_size;
1419 page_offset = page_info->page_offset;
1420 page_info->page = pagep;
1421 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
1422 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1424 rxd = queue_head_node(rxq);
1425 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1426 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
1428 /* Any space left in the current big page for another frag? */
1429 if ((page_offset + rx_frag_size + rx_frag_size) >
1430 adapter->big_page_size) {
1432 page_info->last_page_user = true;
1435 prev_page_info = page_info;
1436 queue_head_inc(rxq);
1437 page_info = &page_info_tbl[rxq->head];
1440 prev_page_info->last_page_user = true;
1443 atomic_add(posted, &rxq->used);
1444 be_rxq_notify(adapter, rxq->id, posted);
1445 } else if (atomic_read(&rxq->used) == 0) {
1446 /* Let be_worker replenish when memory is available */
1447 rxo->rx_post_starved = true;
1451 static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
1453 struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1455 if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1459 be_dws_le_to_cpu(txcp, sizeof(*txcp));
1461 txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1463 queue_tail_inc(tx_cq);
1467 static u16 be_tx_compl_process(struct be_adapter *adapter,
1468 struct be_tx_obj *txo, u16 last_index)
1470 struct be_queue_info *txq = &txo->q;
1471 struct be_eth_wrb *wrb;
1472 struct sk_buff **sent_skbs = txo->sent_skb_list;
1473 struct sk_buff *sent_skb;
1474 u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1475 bool unmap_skb_hdr = true;
1477 sent_skb = sent_skbs[txq->tail];
1479 sent_skbs[txq->tail] = NULL;
1481 /* skip header wrb */
1482 queue_tail_inc(txq);
1485 cur_index = txq->tail;
1486 wrb = queue_tail_node(txq);
1487 unmap_tx_frag(&adapter->pdev->dev, wrb,
1488 (unmap_skb_hdr && skb_headlen(sent_skb)));
1489 unmap_skb_hdr = false;
1492 queue_tail_inc(txq);
1493 } while (cur_index != last_index);
1495 kfree_skb(sent_skb);
1499 static inline struct be_eq_entry *event_get(struct be_eq_obj *eq_obj)
1501 struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
1507 eqe->evt = le32_to_cpu(eqe->evt);
1508 queue_tail_inc(&eq_obj->q);
1512 static int event_handle(struct be_adapter *adapter,
1513 struct be_eq_obj *eq_obj,
1516 struct be_eq_entry *eqe;
1519 while ((eqe = event_get(eq_obj)) != NULL) {
1524 /* Deal with any spurious interrupts that come
1530 be_eq_notify(adapter, eq_obj->q.id, rearm, true, num);
1532 napi_schedule(&eq_obj->napi);
1537 /* Just read and notify events without processing them.
1538 * Used at the time of destroying event queues */
1539 static void be_eq_clean(struct be_adapter *adapter,
1540 struct be_eq_obj *eq_obj)
1542 struct be_eq_entry *eqe;
1545 while ((eqe = event_get(eq_obj)) != NULL) {
1551 be_eq_notify(adapter, eq_obj->q.id, false, true, num);
1554 static void be_rx_q_clean(struct be_adapter *adapter, struct be_rx_obj *rxo)
1556 struct be_rx_page_info *page_info;
1557 struct be_queue_info *rxq = &rxo->q;
1558 struct be_queue_info *rx_cq = &rxo->cq;
1559 struct be_rx_compl_info *rxcp;
1562 /* First cleanup pending rx completions */
1563 while ((rxcp = be_rx_compl_get(rxo)) != NULL) {
1564 be_rx_compl_discard(adapter, rxo, rxcp);
1565 be_cq_notify(adapter, rx_cq->id, false, 1);
1568 /* Then free posted rx buffer that were not used */
1569 tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
1570 for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
1571 page_info = get_rx_page_info(adapter, rxo, tail);
1572 put_page(page_info->page);
1573 memset(page_info, 0, sizeof(*page_info));
1575 BUG_ON(atomic_read(&rxq->used));
1576 rxq->tail = rxq->head = 0;
1579 static void be_tx_compl_clean(struct be_adapter *adapter,
1580 struct be_tx_obj *txo)
1582 struct be_queue_info *tx_cq = &txo->cq;
1583 struct be_queue_info *txq = &txo->q;
1584 struct be_eth_tx_compl *txcp;
1585 u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
1586 struct sk_buff **sent_skbs = txo->sent_skb_list;
1587 struct sk_buff *sent_skb;
1590 /* Wait for a max of 200ms for all the tx-completions to arrive. */
1592 while ((txcp = be_tx_compl_get(tx_cq))) {
1593 end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl,
1595 num_wrbs += be_tx_compl_process(adapter, txo, end_idx);
1599 be_cq_notify(adapter, tx_cq->id, false, cmpl);
1600 atomic_sub(num_wrbs, &txq->used);
1605 if (atomic_read(&txq->used) == 0 || ++timeo > 200)
1611 if (atomic_read(&txq->used))
1612 dev_err(&adapter->pdev->dev, "%d pending tx-completions\n",
1613 atomic_read(&txq->used));
1615 /* free posted tx for which compls will never arrive */
1616 while (atomic_read(&txq->used)) {
1617 sent_skb = sent_skbs[txq->tail];
1618 end_idx = txq->tail;
1620 wrb_cnt_for_skb(adapter, sent_skb, &dummy_wrb) - 1,
1622 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
1623 atomic_sub(num_wrbs, &txq->used);
1627 static void be_mcc_queues_destroy(struct be_adapter *adapter)
1629 struct be_queue_info *q;
1631 q = &adapter->mcc_obj.q;
1633 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
1634 be_queue_free(adapter, q);
1636 q = &adapter->mcc_obj.cq;
1638 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1639 be_queue_free(adapter, q);
1642 /* Must be called only after TX qs are created as MCC shares TX EQ */
1643 static int be_mcc_queues_create(struct be_adapter *adapter)
1645 struct be_queue_info *q, *cq;
1647 /* Alloc MCC compl queue */
1648 cq = &adapter->mcc_obj.cq;
1649 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
1650 sizeof(struct be_mcc_compl)))
1653 /* Ask BE to create MCC compl queue; share TX's eq */
1654 if (be_cmd_cq_create(adapter, cq, &adapter->tx_eq.q, false, true, 0))
1657 /* Alloc MCC queue */
1658 q = &adapter->mcc_obj.q;
1659 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
1660 goto mcc_cq_destroy;
1662 /* Ask BE to create MCC queue */
1663 if (be_cmd_mccq_create(adapter, q, cq))
1669 be_queue_free(adapter, q);
1671 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
1673 be_queue_free(adapter, cq);
1678 static void be_tx_queues_destroy(struct be_adapter *adapter)
1680 struct be_queue_info *q;
1681 struct be_tx_obj *txo;
1684 for_all_tx_queues(adapter, txo, i) {
1687 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
1688 be_queue_free(adapter, q);
1692 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1693 be_queue_free(adapter, q);
1696 /* Clear any residual events */
1697 be_eq_clean(adapter, &adapter->tx_eq);
1699 q = &adapter->tx_eq.q;
1701 be_cmd_q_destroy(adapter, q, QTYPE_EQ);
1702 be_queue_free(adapter, q);
1705 static int be_num_txqs_want(struct be_adapter *adapter)
1707 if (sriov_enabled(adapter) || be_is_mc(adapter) ||
1708 lancer_chip(adapter) || !be_physfn(adapter) ||
1709 adapter->generation == BE_GEN2)
1715 /* One TX event queue is shared by all TX compl qs */
1716 static int be_tx_queues_create(struct be_adapter *adapter)
1718 struct be_queue_info *eq, *q, *cq;
1719 struct be_tx_obj *txo;
1722 adapter->num_tx_qs = be_num_txqs_want(adapter);
1723 if (adapter->num_tx_qs != MAX_TX_QS) {
1725 netif_set_real_num_tx_queues(adapter->netdev,
1726 adapter->num_tx_qs);
1730 adapter->tx_eq.max_eqd = 0;
1731 adapter->tx_eq.min_eqd = 0;
1732 adapter->tx_eq.cur_eqd = 96;
1733 adapter->tx_eq.enable_aic = false;
1735 eq = &adapter->tx_eq.q;
1736 if (be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1737 sizeof(struct be_eq_entry)))
1740 if (be_cmd_eq_create(adapter, eq, adapter->tx_eq.cur_eqd))
1742 adapter->tx_eq.eq_idx = adapter->eq_next_idx++;
1744 for_all_tx_queues(adapter, txo, i) {
1746 if (be_queue_alloc(adapter, cq, TX_CQ_LEN,
1747 sizeof(struct be_eth_tx_compl)))
1750 if (be_cmd_cq_create(adapter, cq, eq, false, false, 3))
1754 if (be_queue_alloc(adapter, q, TX_Q_LEN,
1755 sizeof(struct be_eth_wrb)))
1761 be_tx_queues_destroy(adapter);
1765 static void be_rx_queues_destroy(struct be_adapter *adapter)
1767 struct be_queue_info *q;
1768 struct be_rx_obj *rxo;
1771 for_all_rx_queues(adapter, rxo, i) {
1772 be_queue_free(adapter, &rxo->q);
1776 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1777 be_queue_free(adapter, q);
1781 be_cmd_q_destroy(adapter, q, QTYPE_EQ);
1782 be_queue_free(adapter, q);
1786 static u32 be_num_rxqs_want(struct be_adapter *adapter)
1788 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
1789 !sriov_enabled(adapter) && be_physfn(adapter) &&
1790 !be_is_mc(adapter)) {
1791 return 1 + MAX_RSS_QS; /* one default non-RSS queue */
1793 dev_warn(&adapter->pdev->dev,
1794 "No support for multiple RX queues\n");
1799 static int be_rx_queues_create(struct be_adapter *adapter)
1801 struct be_queue_info *eq, *q, *cq;
1802 struct be_rx_obj *rxo;
1805 adapter->num_rx_qs = min(be_num_rxqs_want(adapter),
1806 msix_enabled(adapter) ?
1807 adapter->num_msix_vec - 1 : 1);
1808 if (adapter->num_rx_qs != MAX_RX_QS)
1809 dev_warn(&adapter->pdev->dev,
1810 "Can create only %d RX queues", adapter->num_rx_qs);
1812 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
1813 for_all_rx_queues(adapter, rxo, i) {
1814 rxo->adapter = adapter;
1815 rxo->rx_eq.max_eqd = BE_MAX_EQD;
1816 rxo->rx_eq.enable_aic = true;
1820 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1821 sizeof(struct be_eq_entry));
1825 rc = be_cmd_eq_create(adapter, eq, rxo->rx_eq.cur_eqd);
1829 rxo->rx_eq.eq_idx = adapter->eq_next_idx++;
1833 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
1834 sizeof(struct be_eth_rx_compl));
1838 rc = be_cmd_cq_create(adapter, cq, eq, false, false, 3);
1842 /* Rx Q - will be created in be_open() */
1844 rc = be_queue_alloc(adapter, q, RX_Q_LEN,
1845 sizeof(struct be_eth_rx_d));
1853 be_rx_queues_destroy(adapter);
1857 static bool event_peek(struct be_eq_obj *eq_obj)
1859 struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
1866 static irqreturn_t be_intx(int irq, void *dev)
1868 struct be_adapter *adapter = dev;
1869 struct be_rx_obj *rxo;
1870 int isr, i, tx = 0 , rx = 0;
1872 if (lancer_chip(adapter)) {
1873 if (event_peek(&adapter->tx_eq))
1874 tx = event_handle(adapter, &adapter->tx_eq, false);
1875 for_all_rx_queues(adapter, rxo, i) {
1876 if (event_peek(&rxo->rx_eq))
1877 rx |= event_handle(adapter, &rxo->rx_eq, true);
1884 isr = ioread32(adapter->csr + CEV_ISR0_OFFSET +
1885 (adapter->tx_eq.q.id / 8) * CEV_ISR_SIZE);
1889 if ((1 << adapter->tx_eq.eq_idx & isr))
1890 event_handle(adapter, &adapter->tx_eq, false);
1892 for_all_rx_queues(adapter, rxo, i) {
1893 if ((1 << rxo->rx_eq.eq_idx & isr))
1894 event_handle(adapter, &rxo->rx_eq, true);
1901 static irqreturn_t be_msix_rx(int irq, void *dev)
1903 struct be_rx_obj *rxo = dev;
1904 struct be_adapter *adapter = rxo->adapter;
1906 event_handle(adapter, &rxo->rx_eq, true);
1911 static irqreturn_t be_msix_tx_mcc(int irq, void *dev)
1913 struct be_adapter *adapter = dev;
1915 event_handle(adapter, &adapter->tx_eq, false);
1920 static inline bool do_gro(struct be_rx_compl_info *rxcp)
1922 return (rxcp->tcpf && !rxcp->err) ? true : false;
1925 static int be_poll_rx(struct napi_struct *napi, int budget)
1927 struct be_eq_obj *rx_eq = container_of(napi, struct be_eq_obj, napi);
1928 struct be_rx_obj *rxo = container_of(rx_eq, struct be_rx_obj, rx_eq);
1929 struct be_adapter *adapter = rxo->adapter;
1930 struct be_queue_info *rx_cq = &rxo->cq;
1931 struct be_rx_compl_info *rxcp;
1934 rx_stats(rxo)->rx_polls++;
1935 for (work_done = 0; work_done < budget; work_done++) {
1936 rxcp = be_rx_compl_get(rxo);
1940 /* Is it a flush compl that has no data */
1941 if (unlikely(rxcp->num_rcvd == 0))
1944 /* Discard compl with partial DMA Lancer B0 */
1945 if (unlikely(!rxcp->pkt_size)) {
1946 be_rx_compl_discard(adapter, rxo, rxcp);
1950 /* On BE drop pkts that arrive due to imperfect filtering in
1951 * promiscuous mode on some skews
1953 if (unlikely(rxcp->port != adapter->port_num &&
1954 !lancer_chip(adapter))) {
1955 be_rx_compl_discard(adapter, rxo, rxcp);
1960 be_rx_compl_process_gro(adapter, rxo, rxcp);
1962 be_rx_compl_process(adapter, rxo, rxcp);
1964 be_rx_stats_update(rxo, rxcp);
1967 be_cq_notify(adapter, rx_cq->id, false, work_done);
1969 /* Refill the queue */
1970 if (work_done && atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
1971 be_post_rx_frags(rxo, GFP_ATOMIC);
1974 if (work_done < budget) {
1975 napi_complete(napi);
1977 be_cq_notify(adapter, rx_cq->id, true, 0);
1982 /* As TX and MCC share the same EQ check for both TX and MCC completions.
1983 * For TX/MCC we don't honour budget; consume everything
1985 static int be_poll_tx_mcc(struct napi_struct *napi, int budget)
1987 struct be_eq_obj *tx_eq = container_of(napi, struct be_eq_obj, napi);
1988 struct be_adapter *adapter =
1989 container_of(tx_eq, struct be_adapter, tx_eq);
1990 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
1991 struct be_tx_obj *txo;
1992 struct be_eth_tx_compl *txcp;
1993 int tx_compl, mcc_compl, status = 0;
1997 for_all_tx_queues(adapter, txo, i) {
2000 while ((txcp = be_tx_compl_get(&txo->cq))) {
2001 num_wrbs += be_tx_compl_process(adapter, txo,
2002 AMAP_GET_BITS(struct amap_eth_tx_compl,
2007 be_cq_notify(adapter, txo->cq.id, true, tx_compl);
2009 atomic_sub(num_wrbs, &txo->q.used);
2011 /* As Tx wrbs have been freed up, wake up netdev queue
2012 * if it was stopped due to lack of tx wrbs. */
2013 if (__netif_subqueue_stopped(adapter->netdev, i) &&
2014 atomic_read(&txo->q.used) < txo->q.len / 2) {
2015 netif_wake_subqueue(adapter->netdev, i);
2018 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
2019 tx_stats(txo)->tx_compl += tx_compl;
2020 u64_stats_update_end(&tx_stats(txo)->sync_compl);
2024 mcc_compl = be_process_mcc(adapter, &status);
2027 be_cq_notify(adapter, mcc_obj->cq.id, true, mcc_compl);
2030 napi_complete(napi);
2032 /* Arm CQ again to regenerate EQEs for Lancer in INTx mode */
2033 if (lancer_chip(adapter) && !msix_enabled(adapter)) {
2034 for_all_tx_queues(adapter, txo, i)
2035 be_cq_notify(adapter, txo->cq.id, true, 0);
2037 be_cq_notify(adapter, mcc_obj->cq.id, true, 0);
2040 be_eq_notify(adapter, tx_eq->q.id, true, false, 0);
2041 adapter->drv_stats.tx_events++;
2045 void be_detect_dump_ue(struct be_adapter *adapter)
2047 u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
2048 u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
2051 if (adapter->eeh_err || adapter->ue_detected)
2054 if (lancer_chip(adapter)) {
2055 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2056 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2057 sliport_err1 = ioread32(adapter->db +
2058 SLIPORT_ERROR1_OFFSET);
2059 sliport_err2 = ioread32(adapter->db +
2060 SLIPORT_ERROR2_OFFSET);
2063 pci_read_config_dword(adapter->pdev,
2064 PCICFG_UE_STATUS_LOW, &ue_lo);
2065 pci_read_config_dword(adapter->pdev,
2066 PCICFG_UE_STATUS_HIGH, &ue_hi);
2067 pci_read_config_dword(adapter->pdev,
2068 PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask);
2069 pci_read_config_dword(adapter->pdev,
2070 PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
2072 ue_lo = (ue_lo & (~ue_lo_mask));
2073 ue_hi = (ue_hi & (~ue_hi_mask));
2076 if (ue_lo || ue_hi ||
2077 sliport_status & SLIPORT_STATUS_ERR_MASK) {
2078 adapter->ue_detected = true;
2079 adapter->eeh_err = true;
2080 dev_err(&adapter->pdev->dev,
2081 "Unrecoverable error in the card\n");
2085 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
2087 dev_err(&adapter->pdev->dev,
2088 "UE: %s bit set\n", ue_status_low_desc[i]);
2092 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
2094 dev_err(&adapter->pdev->dev,
2095 "UE: %s bit set\n", ue_status_hi_desc[i]);
2099 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2100 dev_err(&adapter->pdev->dev,
2101 "sliport status 0x%x\n", sliport_status);
2102 dev_err(&adapter->pdev->dev,
2103 "sliport error1 0x%x\n", sliport_err1);
2104 dev_err(&adapter->pdev->dev,
2105 "sliport error2 0x%x\n", sliport_err2);
2109 static void be_msix_disable(struct be_adapter *adapter)
2111 if (msix_enabled(adapter)) {
2112 pci_disable_msix(adapter->pdev);
2113 adapter->num_msix_vec = 0;
2117 static void be_msix_enable(struct be_adapter *adapter)
2119 #define BE_MIN_MSIX_VECTORS (1 + 1) /* Rx + Tx */
2120 int i, status, num_vec;
2122 num_vec = be_num_rxqs_want(adapter) + 1;
2124 for (i = 0; i < num_vec; i++)
2125 adapter->msix_entries[i].entry = i;
2127 status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
2130 } else if (status >= BE_MIN_MSIX_VECTORS) {
2132 if (pci_enable_msix(adapter->pdev, adapter->msix_entries,
2138 adapter->num_msix_vec = num_vec;
2142 static int be_sriov_enable(struct be_adapter *adapter)
2144 be_check_sriov_fn_type(adapter);
2146 #ifdef CONFIG_PCI_IOV
2147 if (be_physfn(adapter) && num_vfs) {
2151 pos = pci_find_ext_capability(adapter->pdev,
2152 PCI_EXT_CAP_ID_SRIOV);
2153 pci_read_config_word(adapter->pdev,
2154 pos + PCI_SRIOV_TOTAL_VF, &dev_vfs);
2156 adapter->num_vfs = min_t(u16, num_vfs, dev_vfs);
2157 if (adapter->num_vfs != num_vfs)
2158 dev_info(&adapter->pdev->dev,
2159 "Device supports %d VFs and not %d\n",
2160 adapter->num_vfs, num_vfs);
2162 status = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
2164 adapter->num_vfs = 0;
2166 if (adapter->num_vfs) {
2167 adapter->vf_cfg = kcalloc(num_vfs,
2168 sizeof(struct be_vf_cfg),
2170 if (!adapter->vf_cfg)
2178 static void be_sriov_disable(struct be_adapter *adapter)
2180 #ifdef CONFIG_PCI_IOV
2181 if (sriov_enabled(adapter)) {
2182 pci_disable_sriov(adapter->pdev);
2183 kfree(adapter->vf_cfg);
2184 adapter->num_vfs = 0;
2189 static inline int be_msix_vec_get(struct be_adapter *adapter,
2190 struct be_eq_obj *eq_obj)
2192 return adapter->msix_entries[eq_obj->eq_idx].vector;
2195 static int be_request_irq(struct be_adapter *adapter,
2196 struct be_eq_obj *eq_obj,
2197 void *handler, char *desc, void *context)
2199 struct net_device *netdev = adapter->netdev;
2202 sprintf(eq_obj->desc, "%s-%s", netdev->name, desc);
2203 vec = be_msix_vec_get(adapter, eq_obj);
2204 return request_irq(vec, handler, 0, eq_obj->desc, context);
2207 static void be_free_irq(struct be_adapter *adapter, struct be_eq_obj *eq_obj,
2210 int vec = be_msix_vec_get(adapter, eq_obj);
2211 free_irq(vec, context);
2214 static int be_msix_register(struct be_adapter *adapter)
2216 struct be_rx_obj *rxo;
2220 status = be_request_irq(adapter, &adapter->tx_eq, be_msix_tx_mcc, "tx",
2225 for_all_rx_queues(adapter, rxo, i) {
2226 sprintf(qname, "rxq%d", i);
2227 status = be_request_irq(adapter, &rxo->rx_eq, be_msix_rx,
2236 be_free_irq(adapter, &adapter->tx_eq, adapter);
2238 for (i--, rxo = &adapter->rx_obj[i]; i >= 0; i--, rxo--)
2239 be_free_irq(adapter, &rxo->rx_eq, rxo);
2242 dev_warn(&adapter->pdev->dev,
2243 "MSIX Request IRQ failed - err %d\n", status);
2244 be_msix_disable(adapter);
2248 static int be_irq_register(struct be_adapter *adapter)
2250 struct net_device *netdev = adapter->netdev;
2253 if (msix_enabled(adapter)) {
2254 status = be_msix_register(adapter);
2257 /* INTx is not supported for VF */
2258 if (!be_physfn(adapter))
2263 netdev->irq = adapter->pdev->irq;
2264 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
2267 dev_err(&adapter->pdev->dev,
2268 "INTx request IRQ failed - err %d\n", status);
2272 adapter->isr_registered = true;
2276 static void be_irq_unregister(struct be_adapter *adapter)
2278 struct net_device *netdev = adapter->netdev;
2279 struct be_rx_obj *rxo;
2282 if (!adapter->isr_registered)
2286 if (!msix_enabled(adapter)) {
2287 free_irq(netdev->irq, adapter);
2292 be_free_irq(adapter, &adapter->tx_eq, adapter);
2294 for_all_rx_queues(adapter, rxo, i)
2295 be_free_irq(adapter, &rxo->rx_eq, rxo);
2298 adapter->isr_registered = false;
2301 static void be_rx_queues_clear(struct be_adapter *adapter)
2303 struct be_queue_info *q;
2304 struct be_rx_obj *rxo;
2307 for_all_rx_queues(adapter, rxo, i) {
2310 be_cmd_rxq_destroy(adapter, q);
2311 /* After the rxq is invalidated, wait for a grace time
2312 * of 1ms for all dma to end and the flush compl to
2316 be_rx_q_clean(adapter, rxo);
2319 /* Clear any residual events */
2322 be_eq_clean(adapter, &rxo->rx_eq);
2326 static int be_close(struct net_device *netdev)
2328 struct be_adapter *adapter = netdev_priv(netdev);
2329 struct be_rx_obj *rxo;
2330 struct be_tx_obj *txo;
2331 struct be_eq_obj *tx_eq = &adapter->tx_eq;
2334 be_async_mcc_disable(adapter);
2336 if (!lancer_chip(adapter))
2337 be_intr_set(adapter, false);
2339 for_all_rx_queues(adapter, rxo, i)
2340 napi_disable(&rxo->rx_eq.napi);
2342 napi_disable(&tx_eq->napi);
2344 if (lancer_chip(adapter)) {
2345 be_cq_notify(adapter, adapter->mcc_obj.cq.id, false, 0);
2346 for_all_rx_queues(adapter, rxo, i)
2347 be_cq_notify(adapter, rxo->cq.id, false, 0);
2348 for_all_tx_queues(adapter, txo, i)
2349 be_cq_notify(adapter, txo->cq.id, false, 0);
2352 if (msix_enabled(adapter)) {
2353 vec = be_msix_vec_get(adapter, tx_eq);
2354 synchronize_irq(vec);
2356 for_all_rx_queues(adapter, rxo, i) {
2357 vec = be_msix_vec_get(adapter, &rxo->rx_eq);
2358 synchronize_irq(vec);
2361 synchronize_irq(netdev->irq);
2363 be_irq_unregister(adapter);
2365 /* Wait for all pending tx completions to arrive so that
2366 * all tx skbs are freed.
2368 for_all_tx_queues(adapter, txo, i)
2369 be_tx_compl_clean(adapter, txo);
2371 be_rx_queues_clear(adapter);
2375 static int be_rx_queues_setup(struct be_adapter *adapter)
2377 struct be_rx_obj *rxo;
2381 for_all_rx_queues(adapter, rxo, i) {
2382 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
2383 rx_frag_size, BE_MAX_JUMBO_FRAME_SIZE,
2385 (i > 0) ? 1 : 0/* rss enable */, &rxo->rss_id);
2390 if (be_multi_rxq(adapter)) {
2391 for (j = 0; j < 128; j += adapter->num_rx_qs - 1) {
2392 for_all_rss_queues(adapter, rxo, i) {
2395 rsstable[j + i] = rxo->rss_id;
2398 rc = be_cmd_rss_config(adapter, rsstable, 128);
2404 /* First time posting */
2405 for_all_rx_queues(adapter, rxo, i) {
2406 be_post_rx_frags(rxo, GFP_KERNEL);
2407 napi_enable(&rxo->rx_eq.napi);
2412 static int be_open(struct net_device *netdev)
2414 struct be_adapter *adapter = netdev_priv(netdev);
2415 struct be_eq_obj *tx_eq = &adapter->tx_eq;
2416 struct be_rx_obj *rxo;
2420 status = be_rx_queues_setup(adapter);
2424 napi_enable(&tx_eq->napi);
2426 be_irq_register(adapter);
2428 if (!lancer_chip(adapter))
2429 be_intr_set(adapter, true);
2431 /* The evt queues are created in unarmed state; arm them */
2432 for_all_rx_queues(adapter, rxo, i) {
2433 be_eq_notify(adapter, rxo->rx_eq.q.id, true, false, 0);
2434 be_cq_notify(adapter, rxo->cq.id, true, 0);
2436 be_eq_notify(adapter, tx_eq->q.id, true, false, 0);
2438 /* Now that interrupts are on we can process async mcc */
2439 be_async_mcc_enable(adapter);
2441 status = be_cmd_link_status_query(adapter, NULL, NULL,
2444 be_link_status_update(adapter, link_status);
2448 be_close(adapter->netdev);
2452 static int be_setup_wol(struct be_adapter *adapter, bool enable)
2454 struct be_dma_mem cmd;
2458 memset(mac, 0, ETH_ALEN);
2460 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
2461 cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2465 memset(cmd.va, 0, cmd.size);
2468 status = pci_write_config_dword(adapter->pdev,
2469 PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2471 dev_err(&adapter->pdev->dev,
2472 "Could not enable Wake-on-lan\n");
2473 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2477 status = be_cmd_enable_magic_wol(adapter,
2478 adapter->netdev->dev_addr, &cmd);
2479 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2480 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2482 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2483 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2484 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2487 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
2492 * Generate a seed MAC address from the PF MAC Address using jhash.
2493 * MAC Address for VFs are assigned incrementally starting from the seed.
2494 * These addresses are programmed in the ASIC by the PF and the VF driver
2495 * queries for the MAC address during its probe.
2497 static inline int be_vf_eth_addr_config(struct be_adapter *adapter)
2502 struct be_vf_cfg *vf_cfg;
2504 be_vf_eth_addr_generate(adapter, mac);
2506 for_all_vfs(adapter, vf_cfg, vf) {
2507 if (lancer_chip(adapter)) {
2508 status = be_cmd_set_mac_list(adapter, mac, 1, vf + 1);
2510 status = be_cmd_pmac_add(adapter, mac,
2512 &vf_cfg->pmac_id, vf + 1);
2516 dev_err(&adapter->pdev->dev,
2517 "Mac address assignment failed for VF %d\n", vf);
2519 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
2526 static void be_vf_clear(struct be_adapter *adapter)
2528 struct be_vf_cfg *vf_cfg;
2531 for_all_vfs(adapter, vf_cfg, vf) {
2532 if (lancer_chip(adapter))
2533 be_cmd_set_mac_list(adapter, NULL, 0, vf + 1);
2535 be_cmd_pmac_del(adapter, vf_cfg->if_handle,
2536 vf_cfg->pmac_id, vf + 1);
2538 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
2542 static int be_clear(struct be_adapter *adapter)
2544 if (sriov_enabled(adapter))
2545 be_vf_clear(adapter);
2547 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
2549 be_mcc_queues_destroy(adapter);
2550 be_rx_queues_destroy(adapter);
2551 be_tx_queues_destroy(adapter);
2553 /* tell fw we're done with firing cmds */
2554 be_cmd_fw_clean(adapter);
2558 static void be_vf_setup_init(struct be_adapter *adapter)
2560 struct be_vf_cfg *vf_cfg;
2563 for_all_vfs(adapter, vf_cfg, vf) {
2564 vf_cfg->if_handle = -1;
2565 vf_cfg->pmac_id = -1;
2569 static int be_vf_setup(struct be_adapter *adapter)
2571 struct be_vf_cfg *vf_cfg;
2572 u32 cap_flags, en_flags, vf;
2576 be_vf_setup_init(adapter);
2578 cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2579 BE_IF_FLAGS_MULTICAST;
2580 for_all_vfs(adapter, vf_cfg, vf) {
2581 status = be_cmd_if_create(adapter, cap_flags, en_flags, NULL,
2582 &vf_cfg->if_handle, NULL, vf + 1);
2587 status = be_vf_eth_addr_config(adapter);
2591 for_all_vfs(adapter, vf_cfg, vf) {
2592 status = be_cmd_link_status_query(adapter, NULL, &lnk_speed,
2596 vf_cfg->tx_rate = lnk_speed * 10;
2603 static void be_setup_init(struct be_adapter *adapter)
2605 adapter->vlan_prio_bmap = 0xff;
2606 adapter->link_speed = -1;
2607 adapter->if_handle = -1;
2608 adapter->be3_native = false;
2609 adapter->promiscuous = false;
2610 adapter->eq_next_idx = 0;
2613 static int be_configure_mac_from_list(struct be_adapter *adapter, u8 *mac)
2616 int status = be_cmd_get_mac_from_list(adapter, 0, &pmac_id);
2619 status = be_cmd_mac_addr_query(adapter, mac,
2620 MAC_ADDRESS_TYPE_NETWORK,
2621 false, adapter->if_handle, pmac_id);
2624 status = be_cmd_pmac_add(adapter, mac, adapter->if_handle,
2625 &adapter->pmac_id, 0);
2630 static int be_setup(struct be_adapter *adapter)
2632 struct net_device *netdev = adapter->netdev;
2633 u32 cap_flags, en_flags;
2637 struct be_tx_obj *txo;
2639 be_setup_init(adapter);
2641 be_cmd_req_native_mode(adapter);
2643 status = be_tx_queues_create(adapter);
2647 status = be_rx_queues_create(adapter);
2651 status = be_mcc_queues_create(adapter);
2655 memset(mac, 0, ETH_ALEN);
2656 status = be_cmd_mac_addr_query(adapter, mac, MAC_ADDRESS_TYPE_NETWORK,
2657 true /*permanent */, 0, 0);
2660 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2661 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2663 en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2664 BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
2665 cap_flags = en_flags | BE_IF_FLAGS_MCAST_PROMISCUOUS |
2666 BE_IF_FLAGS_VLAN_PROMISCUOUS | BE_IF_FLAGS_PROMISCUOUS;
2668 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS) {
2669 cap_flags |= BE_IF_FLAGS_RSS;
2670 en_flags |= BE_IF_FLAGS_RSS;
2672 status = be_cmd_if_create(adapter, cap_flags, en_flags,
2673 netdev->dev_addr, &adapter->if_handle,
2674 &adapter->pmac_id, 0);
2678 for_all_tx_queues(adapter, txo, i) {
2679 status = be_cmd_txq_create(adapter, &txo->q, &txo->cq);
2684 /* The VF's permanent mac queried from card is incorrect.
2685 * For BEx: Query the mac configued by the PF using if_handle
2686 * For Lancer: Get and use mac_list to obtain mac address.
2688 if (!be_physfn(adapter)) {
2689 if (lancer_chip(adapter))
2690 status = be_configure_mac_from_list(adapter, mac);
2692 status = be_cmd_mac_addr_query(adapter, mac,
2693 MAC_ADDRESS_TYPE_NETWORK, false,
2694 adapter->if_handle, 0);
2696 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2697 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2701 be_cmd_get_fw_ver(adapter, adapter->fw_ver, NULL);
2703 status = be_vid_config(adapter, false, 0);
2707 be_set_rx_mode(adapter->netdev);
2709 status = be_cmd_get_flow_control(adapter, &tx_fc, &rx_fc);
2710 /* For Lancer: It is legal for this cmd to fail on VF */
2711 if (status && (be_physfn(adapter) || !lancer_chip(adapter)))
2714 if (rx_fc != adapter->rx_fc || tx_fc != adapter->tx_fc) {
2715 status = be_cmd_set_flow_control(adapter, adapter->tx_fc,
2717 /* For Lancer: It is legal for this cmd to fail on VF */
2718 if (status && (be_physfn(adapter) || !lancer_chip(adapter)))
2722 pcie_set_readrq(adapter->pdev, 4096);
2724 if (sriov_enabled(adapter)) {
2725 status = be_vf_setup(adapter);
2736 #ifdef CONFIG_NET_POLL_CONTROLLER
2737 static void be_netpoll(struct net_device *netdev)
2739 struct be_adapter *adapter = netdev_priv(netdev);
2740 struct be_rx_obj *rxo;
2743 event_handle(adapter, &adapter->tx_eq, false);
2744 for_all_rx_queues(adapter, rxo, i)
2745 event_handle(adapter, &rxo->rx_eq, true);
2749 #define FW_FILE_HDR_SIGN "ServerEngines Corp. "
2750 static bool be_flash_redboot(struct be_adapter *adapter,
2751 const u8 *p, u32 img_start, int image_size,
2758 crc_offset = hdr_size + img_start + image_size - 4;
2762 status = be_cmd_get_flash_crc(adapter, flashed_crc,
2765 dev_err(&adapter->pdev->dev,
2766 "could not get crc from flash, not flashing redboot\n");
2770 /*update redboot only if crc does not match*/
2771 if (!memcmp(flashed_crc, p, 4))
2777 static bool phy_flashing_required(struct be_adapter *adapter)
2780 struct be_phy_info phy_info;
2782 status = be_cmd_get_phy_info(adapter, &phy_info);
2785 if ((phy_info.phy_type == TN_8022) &&
2786 (phy_info.interface_type == PHY_TYPE_BASET_10GB)) {
2792 static int be_flash_data(struct be_adapter *adapter,
2793 const struct firmware *fw,
2794 struct be_dma_mem *flash_cmd, int num_of_images)
2797 int status = 0, i, filehdr_size = 0;
2798 u32 total_bytes = 0, flash_op;
2800 const u8 *p = fw->data;
2801 struct be_cmd_write_flashrom *req = flash_cmd->va;
2802 const struct flash_comp *pflashcomp;
2805 static const struct flash_comp gen3_flash_types[10] = {
2806 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, IMG_TYPE_ISCSI_ACTIVE,
2807 FLASH_IMAGE_MAX_SIZE_g3},
2808 { FLASH_REDBOOT_START_g3, IMG_TYPE_REDBOOT,
2809 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3},
2810 { FLASH_iSCSI_BIOS_START_g3, IMG_TYPE_BIOS,
2811 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2812 { FLASH_PXE_BIOS_START_g3, IMG_TYPE_PXE_BIOS,
2813 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2814 { FLASH_FCoE_BIOS_START_g3, IMG_TYPE_FCOE_BIOS,
2815 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2816 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, IMG_TYPE_ISCSI_BACKUP,
2817 FLASH_IMAGE_MAX_SIZE_g3},
2818 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, IMG_TYPE_FCOE_FW_ACTIVE,
2819 FLASH_IMAGE_MAX_SIZE_g3},
2820 { FLASH_FCoE_BACKUP_IMAGE_START_g3, IMG_TYPE_FCOE_FW_BACKUP,
2821 FLASH_IMAGE_MAX_SIZE_g3},
2822 { FLASH_NCSI_START_g3, IMG_TYPE_NCSI_FW,
2823 FLASH_NCSI_IMAGE_MAX_SIZE_g3},
2824 { FLASH_PHY_FW_START_g3, IMG_TYPE_PHY_FW,
2825 FLASH_PHY_FW_IMAGE_MAX_SIZE_g3}
2827 static const struct flash_comp gen2_flash_types[8] = {
2828 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, IMG_TYPE_ISCSI_ACTIVE,
2829 FLASH_IMAGE_MAX_SIZE_g2},
2830 { FLASH_REDBOOT_START_g2, IMG_TYPE_REDBOOT,
2831 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2},
2832 { FLASH_iSCSI_BIOS_START_g2, IMG_TYPE_BIOS,
2833 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2834 { FLASH_PXE_BIOS_START_g2, IMG_TYPE_PXE_BIOS,
2835 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2836 { FLASH_FCoE_BIOS_START_g2, IMG_TYPE_FCOE_BIOS,
2837 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2838 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, IMG_TYPE_ISCSI_BACKUP,
2839 FLASH_IMAGE_MAX_SIZE_g2},
2840 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, IMG_TYPE_FCOE_FW_ACTIVE,
2841 FLASH_IMAGE_MAX_SIZE_g2},
2842 { FLASH_FCoE_BACKUP_IMAGE_START_g2, IMG_TYPE_FCOE_FW_BACKUP,
2843 FLASH_IMAGE_MAX_SIZE_g2}
2846 if (adapter->generation == BE_GEN3) {
2847 pflashcomp = gen3_flash_types;
2848 filehdr_size = sizeof(struct flash_file_hdr_g3);
2849 num_comp = ARRAY_SIZE(gen3_flash_types);
2851 pflashcomp = gen2_flash_types;
2852 filehdr_size = sizeof(struct flash_file_hdr_g2);
2853 num_comp = ARRAY_SIZE(gen2_flash_types);
2855 for (i = 0; i < num_comp; i++) {
2856 if ((pflashcomp[i].optype == IMG_TYPE_NCSI_FW) &&
2857 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
2859 if (pflashcomp[i].optype == IMG_TYPE_PHY_FW) {
2860 if (!phy_flashing_required(adapter))
2863 if ((pflashcomp[i].optype == IMG_TYPE_REDBOOT) &&
2864 (!be_flash_redboot(adapter, fw->data,
2865 pflashcomp[i].offset, pflashcomp[i].size, filehdr_size +
2866 (num_of_images * sizeof(struct image_hdr)))))
2869 p += filehdr_size + pflashcomp[i].offset
2870 + (num_of_images * sizeof(struct image_hdr));
2871 if (p + pflashcomp[i].size > fw->data + fw->size)
2873 total_bytes = pflashcomp[i].size;
2874 while (total_bytes) {
2875 if (total_bytes > 32*1024)
2876 num_bytes = 32*1024;
2878 num_bytes = total_bytes;
2879 total_bytes -= num_bytes;
2881 if (pflashcomp[i].optype == IMG_TYPE_PHY_FW)
2882 flash_op = FLASHROM_OPER_PHY_FLASH;
2884 flash_op = FLASHROM_OPER_FLASH;
2886 if (pflashcomp[i].optype == IMG_TYPE_PHY_FW)
2887 flash_op = FLASHROM_OPER_PHY_SAVE;
2889 flash_op = FLASHROM_OPER_SAVE;
2891 memcpy(req->params.data_buf, p, num_bytes);
2893 status = be_cmd_write_flashrom(adapter, flash_cmd,
2894 pflashcomp[i].optype, flash_op, num_bytes);
2896 if ((status == ILLEGAL_IOCTL_REQ) &&
2897 (pflashcomp[i].optype ==
2900 dev_err(&adapter->pdev->dev,
2901 "cmd to write to flash rom failed.\n");
2909 static int get_ufigen_type(struct flash_file_hdr_g2 *fhdr)
2913 if (fhdr->build[0] == '3')
2915 else if (fhdr->build[0] == '2')
2921 static int lancer_fw_download(struct be_adapter *adapter,
2922 const struct firmware *fw)
2924 #define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024)
2925 #define LANCER_FW_DOWNLOAD_LOCATION "/prg"
2926 struct be_dma_mem flash_cmd;
2927 const u8 *data_ptr = NULL;
2928 u8 *dest_image_ptr = NULL;
2929 size_t image_size = 0;
2931 u32 data_written = 0;
2936 if (!IS_ALIGNED(fw->size, sizeof(u32))) {
2937 dev_err(&adapter->pdev->dev,
2938 "FW Image not properly aligned. "
2939 "Length must be 4 byte aligned.\n");
2941 goto lancer_fw_exit;
2944 flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
2945 + LANCER_FW_DOWNLOAD_CHUNK;
2946 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
2947 &flash_cmd.dma, GFP_KERNEL);
2948 if (!flash_cmd.va) {
2950 dev_err(&adapter->pdev->dev,
2951 "Memory allocation failure while flashing\n");
2952 goto lancer_fw_exit;
2955 dest_image_ptr = flash_cmd.va +
2956 sizeof(struct lancer_cmd_req_write_object);
2957 image_size = fw->size;
2958 data_ptr = fw->data;
2960 while (image_size) {
2961 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
2963 /* Copy the image chunk content. */
2964 memcpy(dest_image_ptr, data_ptr, chunk_size);
2966 status = lancer_cmd_write_object(adapter, &flash_cmd,
2967 chunk_size, offset, LANCER_FW_DOWNLOAD_LOCATION,
2968 &data_written, &add_status);
2973 offset += data_written;
2974 data_ptr += data_written;
2975 image_size -= data_written;
2979 /* Commit the FW written */
2980 status = lancer_cmd_write_object(adapter, &flash_cmd,
2981 0, offset, LANCER_FW_DOWNLOAD_LOCATION,
2982 &data_written, &add_status);
2985 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
2988 dev_err(&adapter->pdev->dev,
2989 "Firmware load error. "
2990 "Status code: 0x%x Additional Status: 0x%x\n",
2991 status, add_status);
2992 goto lancer_fw_exit;
2995 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
3000 static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
3002 struct flash_file_hdr_g2 *fhdr;
3003 struct flash_file_hdr_g3 *fhdr3;
3004 struct image_hdr *img_hdr_ptr = NULL;
3005 struct be_dma_mem flash_cmd;
3007 int status = 0, i = 0, num_imgs = 0;
3010 fhdr = (struct flash_file_hdr_g2 *) p;
3012 flash_cmd.size = sizeof(struct be_cmd_write_flashrom) + 32*1024;
3013 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
3014 &flash_cmd.dma, GFP_KERNEL);
3015 if (!flash_cmd.va) {
3017 dev_err(&adapter->pdev->dev,
3018 "Memory allocation failure while flashing\n");
3022 if ((adapter->generation == BE_GEN3) &&
3023 (get_ufigen_type(fhdr) == BE_GEN3)) {
3024 fhdr3 = (struct flash_file_hdr_g3 *) fw->data;
3025 num_imgs = le32_to_cpu(fhdr3->num_imgs);
3026 for (i = 0; i < num_imgs; i++) {
3027 img_hdr_ptr = (struct image_hdr *) (fw->data +
3028 (sizeof(struct flash_file_hdr_g3) +
3029 i * sizeof(struct image_hdr)));
3030 if (le32_to_cpu(img_hdr_ptr->imageid) == 1)
3031 status = be_flash_data(adapter, fw, &flash_cmd,
3034 } else if ((adapter->generation == BE_GEN2) &&
3035 (get_ufigen_type(fhdr) == BE_GEN2)) {
3036 status = be_flash_data(adapter, fw, &flash_cmd, 0);
3038 dev_err(&adapter->pdev->dev,
3039 "UFI and Interface are not compatible for flashing\n");
3043 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3046 dev_err(&adapter->pdev->dev, "Firmware load error\n");
3050 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
3056 int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
3058 const struct firmware *fw;
3061 if (!netif_running(adapter->netdev)) {
3062 dev_err(&adapter->pdev->dev,
3063 "Firmware load not allowed (interface is down)\n");
3067 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
3071 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
3073 if (lancer_chip(adapter))
3074 status = lancer_fw_download(adapter, fw);
3076 status = be_fw_download(adapter, fw);
3079 release_firmware(fw);
3083 static const struct net_device_ops be_netdev_ops = {
3084 .ndo_open = be_open,
3085 .ndo_stop = be_close,
3086 .ndo_start_xmit = be_xmit,
3087 .ndo_set_rx_mode = be_set_rx_mode,
3088 .ndo_set_mac_address = be_mac_addr_set,
3089 .ndo_change_mtu = be_change_mtu,
3090 .ndo_get_stats64 = be_get_stats64,
3091 .ndo_validate_addr = eth_validate_addr,
3092 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
3093 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
3094 .ndo_set_vf_mac = be_set_vf_mac,
3095 .ndo_set_vf_vlan = be_set_vf_vlan,
3096 .ndo_set_vf_tx_rate = be_set_vf_tx_rate,
3097 .ndo_get_vf_config = be_get_vf_config,
3098 #ifdef CONFIG_NET_POLL_CONTROLLER
3099 .ndo_poll_controller = be_netpoll,
3103 static void be_netdev_init(struct net_device *netdev)
3105 struct be_adapter *adapter = netdev_priv(netdev);
3106 struct be_rx_obj *rxo;
3109 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
3110 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
3112 if (be_multi_rxq(adapter))
3113 netdev->hw_features |= NETIF_F_RXHASH;
3115 netdev->features |= netdev->hw_features |
3116 NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
3118 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
3119 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
3121 netdev->flags |= IFF_MULTICAST;
3123 netif_set_gso_max_size(netdev, 65535);
3125 BE_SET_NETDEV_OPS(netdev, &be_netdev_ops);
3127 SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
3129 for_all_rx_queues(adapter, rxo, i)
3130 netif_napi_add(netdev, &rxo->rx_eq.napi, be_poll_rx,
3133 netif_napi_add(netdev, &adapter->tx_eq.napi, be_poll_tx_mcc,
3137 static void be_unmap_pci_bars(struct be_adapter *adapter)
3140 iounmap(adapter->csr);
3142 iounmap(adapter->db);
3145 static int be_map_pci_bars(struct be_adapter *adapter)
3150 if (lancer_chip(adapter)) {
3151 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 0),
3152 pci_resource_len(adapter->pdev, 0));
3159 if (be_physfn(adapter)) {
3160 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2),
3161 pci_resource_len(adapter->pdev, 2));
3164 adapter->csr = addr;
3167 if (adapter->generation == BE_GEN2) {
3170 if (be_physfn(adapter))
3175 addr = ioremap_nocache(pci_resource_start(adapter->pdev, db_reg),
3176 pci_resource_len(adapter->pdev, db_reg));
3183 be_unmap_pci_bars(adapter);
3188 static void be_ctrl_cleanup(struct be_adapter *adapter)
3190 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
3192 be_unmap_pci_bars(adapter);
3195 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3198 mem = &adapter->rx_filter;
3200 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3204 static int be_ctrl_init(struct be_adapter *adapter)
3206 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
3207 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
3208 struct be_dma_mem *rx_filter = &adapter->rx_filter;
3211 status = be_map_pci_bars(adapter);
3215 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
3216 mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
3217 mbox_mem_alloc->size,
3218 &mbox_mem_alloc->dma,
3220 if (!mbox_mem_alloc->va) {
3222 goto unmap_pci_bars;
3224 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
3225 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
3226 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
3227 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
3229 rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
3230 rx_filter->va = dma_alloc_coherent(&adapter->pdev->dev, rx_filter->size,
3231 &rx_filter->dma, GFP_KERNEL);
3232 if (rx_filter->va == NULL) {
3236 memset(rx_filter->va, 0, rx_filter->size);
3238 mutex_init(&adapter->mbox_lock);
3239 spin_lock_init(&adapter->mcc_lock);
3240 spin_lock_init(&adapter->mcc_cq_lock);
3242 init_completion(&adapter->flash_compl);
3243 pci_save_state(adapter->pdev);
3247 dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
3248 mbox_mem_alloc->va, mbox_mem_alloc->dma);
3251 be_unmap_pci_bars(adapter);
3257 static void be_stats_cleanup(struct be_adapter *adapter)
3259 struct be_dma_mem *cmd = &adapter->stats_cmd;
3262 dma_free_coherent(&adapter->pdev->dev, cmd->size,
3266 static int be_stats_init(struct be_adapter *adapter)
3268 struct be_dma_mem *cmd = &adapter->stats_cmd;
3270 if (adapter->generation == BE_GEN2) {
3271 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
3273 if (lancer_chip(adapter))
3274 cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
3276 cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
3278 cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
3280 if (cmd->va == NULL)
3282 memset(cmd->va, 0, cmd->size);
3286 static void __devexit be_remove(struct pci_dev *pdev)
3288 struct be_adapter *adapter = pci_get_drvdata(pdev);
3293 cancel_delayed_work_sync(&adapter->work);
3295 unregister_netdev(adapter->netdev);
3299 be_stats_cleanup(adapter);
3301 be_ctrl_cleanup(adapter);
3303 be_sriov_disable(adapter);
3305 be_msix_disable(adapter);
3307 pci_set_drvdata(pdev, NULL);
3308 pci_release_regions(pdev);
3309 pci_disable_device(pdev);
3311 free_netdev(adapter->netdev);
3314 static int be_get_config(struct be_adapter *adapter)
3318 status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
3319 &adapter->function_mode, &adapter->function_caps);
3323 if (adapter->function_mode & FLEX10_MODE)
3324 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/4;
3326 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
3328 status = be_cmd_get_cntl_attributes(adapter);
3335 static int be_dev_family_check(struct be_adapter *adapter)
3337 struct pci_dev *pdev = adapter->pdev;
3338 u32 sli_intf = 0, if_type;
3340 switch (pdev->device) {
3343 adapter->generation = BE_GEN2;
3348 adapter->generation = BE_GEN3;
3352 pci_read_config_dword(pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3353 if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
3354 SLI_INTF_IF_TYPE_SHIFT;
3356 if (((sli_intf & SLI_INTF_VALID_MASK) != SLI_INTF_VALID) ||
3358 dev_err(&pdev->dev, "SLI_INTF reg val is not valid\n");
3361 adapter->sli_family = ((sli_intf & SLI_INTF_FAMILY_MASK) >>
3362 SLI_INTF_FAMILY_SHIFT);
3363 adapter->generation = BE_GEN3;
3366 adapter->generation = 0;
3371 static int lancer_wait_ready(struct be_adapter *adapter)
3373 #define SLIPORT_READY_TIMEOUT 30
3377 for (i = 0; i < SLIPORT_READY_TIMEOUT; i++) {
3378 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3379 if (sliport_status & SLIPORT_STATUS_RDY_MASK)
3385 if (i == SLIPORT_READY_TIMEOUT)
3391 static int lancer_test_and_set_rdy_state(struct be_adapter *adapter)
3394 u32 sliport_status, err, reset_needed;
3395 status = lancer_wait_ready(adapter);
3397 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3398 err = sliport_status & SLIPORT_STATUS_ERR_MASK;
3399 reset_needed = sliport_status & SLIPORT_STATUS_RN_MASK;
3400 if (err && reset_needed) {
3401 iowrite32(SLI_PORT_CONTROL_IP_MASK,
3402 adapter->db + SLIPORT_CONTROL_OFFSET);
3404 /* check adapter has corrected the error */
3405 status = lancer_wait_ready(adapter);
3406 sliport_status = ioread32(adapter->db +
3407 SLIPORT_STATUS_OFFSET);
3408 sliport_status &= (SLIPORT_STATUS_ERR_MASK |
3409 SLIPORT_STATUS_RN_MASK);
3410 if (status || sliport_status)
3412 } else if (err || reset_needed) {
3419 static void lancer_test_and_recover_fn_err(struct be_adapter *adapter)
3424 if (adapter->eeh_err || adapter->ue_detected)
3427 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3429 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
3430 dev_err(&adapter->pdev->dev,
3431 "Adapter in error state."
3432 "Trying to recover.\n");
3434 status = lancer_test_and_set_rdy_state(adapter);
3438 netif_device_detach(adapter->netdev);
3440 if (netif_running(adapter->netdev))
3441 be_close(adapter->netdev);
3445 adapter->fw_timeout = false;
3447 status = be_setup(adapter);
3451 if (netif_running(adapter->netdev)) {
3452 status = be_open(adapter->netdev);
3457 netif_device_attach(adapter->netdev);
3459 dev_err(&adapter->pdev->dev,
3460 "Adapter error recovery succeeded\n");
3464 dev_err(&adapter->pdev->dev,
3465 "Adapter error recovery failed\n");
3468 static void be_worker(struct work_struct *work)
3470 struct be_adapter *adapter =
3471 container_of(work, struct be_adapter, work.work);
3472 struct be_rx_obj *rxo;
3475 if (lancer_chip(adapter))
3476 lancer_test_and_recover_fn_err(adapter);
3478 be_detect_dump_ue(adapter);
3480 /* when interrupts are not yet enabled, just reap any pending
3481 * mcc completions */
3482 if (!netif_running(adapter->netdev)) {
3483 int mcc_compl, status = 0;
3485 mcc_compl = be_process_mcc(adapter, &status);
3488 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
3489 be_cq_notify(adapter, mcc_obj->cq.id, false, mcc_compl);
3495 if (!adapter->stats_cmd_sent) {
3496 if (lancer_chip(adapter))
3497 lancer_cmd_get_pport_stats(adapter,
3498 &adapter->stats_cmd);
3500 be_cmd_get_stats(adapter, &adapter->stats_cmd);
3503 for_all_rx_queues(adapter, rxo, i) {
3504 be_rx_eqd_update(adapter, rxo);
3506 if (rxo->rx_post_starved) {
3507 rxo->rx_post_starved = false;
3508 be_post_rx_frags(rxo, GFP_KERNEL);
3513 adapter->work_counter++;
3514 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
3517 static int __devinit be_probe(struct pci_dev *pdev,
3518 const struct pci_device_id *pdev_id)
3521 struct be_adapter *adapter;
3522 struct net_device *netdev;
3524 status = pci_enable_device(pdev);
3528 status = pci_request_regions(pdev, DRV_NAME);
3531 pci_set_master(pdev);
3533 netdev = alloc_etherdev_mq(sizeof(struct be_adapter), MAX_TX_QS);
3534 if (netdev == NULL) {
3538 adapter = netdev_priv(netdev);
3539 adapter->pdev = pdev;
3540 pci_set_drvdata(pdev, adapter);
3542 status = be_dev_family_check(adapter);
3546 adapter->netdev = netdev;
3547 SET_NETDEV_DEV(netdev, &pdev->dev);
3549 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
3551 netdev->features |= NETIF_F_HIGHDMA;
3553 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
3555 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
3560 status = be_sriov_enable(adapter);
3564 status = be_ctrl_init(adapter);
3568 if (lancer_chip(adapter)) {
3569 status = lancer_wait_ready(adapter);
3571 iowrite32(SLI_PORT_CONTROL_IP_MASK,
3572 adapter->db + SLIPORT_CONTROL_OFFSET);
3573 status = lancer_test_and_set_rdy_state(adapter);
3576 dev_err(&pdev->dev, "Adapter in non recoverable error\n");
3581 /* sync up with fw's ready state */
3582 if (be_physfn(adapter)) {
3583 status = be_cmd_POST(adapter);
3588 /* tell fw we're ready to fire cmds */
3589 status = be_cmd_fw_init(adapter);
3593 status = be_cmd_reset_function(adapter);
3597 status = be_stats_init(adapter);
3601 status = be_get_config(adapter);
3605 /* The INTR bit may be set in the card when probed by a kdump kernel
3608 if (!lancer_chip(adapter))
3609 be_intr_set(adapter, false);
3611 be_msix_enable(adapter);
3613 INIT_DELAYED_WORK(&adapter->work, be_worker);
3614 adapter->rx_fc = adapter->tx_fc = true;
3616 status = be_setup(adapter);
3620 be_netdev_init(netdev);
3621 status = register_netdev(netdev);
3625 dev_info(&pdev->dev, "%s port %d\n", nic_name(pdev), adapter->port_num);
3627 schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
3633 be_msix_disable(adapter);
3635 be_stats_cleanup(adapter);
3637 be_ctrl_cleanup(adapter);
3639 be_sriov_disable(adapter);
3641 free_netdev(netdev);
3642 pci_set_drvdata(pdev, NULL);
3644 pci_release_regions(pdev);
3646 pci_disable_device(pdev);
3648 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
3652 static int be_suspend(struct pci_dev *pdev, pm_message_t state)
3654 struct be_adapter *adapter = pci_get_drvdata(pdev);
3655 struct net_device *netdev = adapter->netdev;
3657 cancel_delayed_work_sync(&adapter->work);
3659 be_setup_wol(adapter, true);
3661 netif_device_detach(netdev);
3662 if (netif_running(netdev)) {
3669 be_msix_disable(adapter);
3670 pci_save_state(pdev);
3671 pci_disable_device(pdev);
3672 pci_set_power_state(pdev, pci_choose_state(pdev, state));
3676 static int be_resume(struct pci_dev *pdev)
3679 struct be_adapter *adapter = pci_get_drvdata(pdev);
3680 struct net_device *netdev = adapter->netdev;
3682 netif_device_detach(netdev);
3684 status = pci_enable_device(pdev);
3688 pci_set_power_state(pdev, 0);
3689 pci_restore_state(pdev);
3691 be_msix_enable(adapter);
3692 /* tell fw we're ready to fire cmds */
3693 status = be_cmd_fw_init(adapter);
3698 if (netif_running(netdev)) {
3703 netif_device_attach(netdev);
3706 be_setup_wol(adapter, false);
3708 schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
3713 * An FLR will stop BE from DMAing any data.
3715 static void be_shutdown(struct pci_dev *pdev)
3717 struct be_adapter *adapter = pci_get_drvdata(pdev);
3722 cancel_delayed_work_sync(&adapter->work);
3724 netif_device_detach(adapter->netdev);
3727 be_setup_wol(adapter, true);
3729 be_cmd_reset_function(adapter);
3731 pci_disable_device(pdev);
3734 static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
3735 pci_channel_state_t state)
3737 struct be_adapter *adapter = pci_get_drvdata(pdev);
3738 struct net_device *netdev = adapter->netdev;
3740 dev_err(&adapter->pdev->dev, "EEH error detected\n");
3742 adapter->eeh_err = true;
3744 netif_device_detach(netdev);
3746 if (netif_running(netdev)) {
3753 if (state == pci_channel_io_perm_failure)
3754 return PCI_ERS_RESULT_DISCONNECT;
3756 pci_disable_device(pdev);
3758 return PCI_ERS_RESULT_NEED_RESET;
3761 static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
3763 struct be_adapter *adapter = pci_get_drvdata(pdev);
3766 dev_info(&adapter->pdev->dev, "EEH reset\n");
3767 adapter->eeh_err = false;
3768 adapter->ue_detected = false;
3769 adapter->fw_timeout = false;
3771 status = pci_enable_device(pdev);
3773 return PCI_ERS_RESULT_DISCONNECT;
3775 pci_set_master(pdev);
3776 pci_set_power_state(pdev, 0);
3777 pci_restore_state(pdev);
3779 /* Check if card is ok and fw is ready */
3780 status = be_cmd_POST(adapter);
3782 return PCI_ERS_RESULT_DISCONNECT;
3784 return PCI_ERS_RESULT_RECOVERED;
3787 static void be_eeh_resume(struct pci_dev *pdev)
3790 struct be_adapter *adapter = pci_get_drvdata(pdev);
3791 struct net_device *netdev = adapter->netdev;
3793 dev_info(&adapter->pdev->dev, "EEH resume\n");
3795 pci_save_state(pdev);
3797 /* tell fw we're ready to fire cmds */
3798 status = be_cmd_fw_init(adapter);
3802 status = be_setup(adapter);
3806 if (netif_running(netdev)) {
3807 status = be_open(netdev);
3811 netif_device_attach(netdev);
3814 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
3817 static struct pci_error_handlers be_eeh_handlers = {
3818 .error_detected = be_eeh_err_detected,
3819 .slot_reset = be_eeh_reset,
3820 .resume = be_eeh_resume,
3823 static struct pci_driver be_driver = {
3825 .id_table = be_dev_ids,
3827 .remove = be_remove,
3828 .suspend = be_suspend,
3829 .resume = be_resume,
3830 .shutdown = be_shutdown,
3831 .err_handler = &be_eeh_handlers
3834 static int __init be_init_module(void)
3836 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
3837 rx_frag_size != 2048) {
3838 printk(KERN_WARNING DRV_NAME
3839 " : Module param rx_frag_size must be 2048/4096/8192."
3841 rx_frag_size = 2048;
3844 return pci_register_driver(&be_driver);
3846 module_init(be_init_module);
3848 static void __exit be_exit_module(void)
3850 pci_unregister_driver(&be_driver);
3852 module_exit(be_exit_module);