2 * Copyright (C) 2005 - 2011 Emulex
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
10 * Contact Information:
11 * linux-drivers@emulex.com
15 * Costa Mesa, CA 92626
18 #include <linux/prefetch.h>
19 #include <linux/module.h>
22 #include <asm/div64.h>
24 MODULE_VERSION(DRV_VER);
25 MODULE_DEVICE_TABLE(pci, be_dev_ids);
26 MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
27 MODULE_AUTHOR("ServerEngines Corporation");
28 MODULE_LICENSE("GPL");
30 static unsigned int num_vfs;
31 module_param(num_vfs, uint, S_IRUGO);
32 MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
34 static ushort rx_frag_size = 2048;
35 module_param(rx_frag_size, ushort, S_IRUGO);
36 MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
38 static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
39 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
40 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
41 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
42 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
43 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
44 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
45 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
48 MODULE_DEVICE_TABLE(pci, be_dev_ids);
49 /* UE Status Low CSR */
50 static const char * const ue_status_low_desc[] = {
84 /* UE Status High CSR */
85 static const char * const ue_status_hi_desc[] = {
120 /* Is BE in a multi-channel mode */
121 static inline bool be_is_mc(struct be_adapter *adapter) {
122 return (adapter->function_mode & FLEX10_MODE ||
123 adapter->function_mode & VNIC_MODE ||
124 adapter->function_mode & UMC_ENABLED);
127 static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
129 struct be_dma_mem *mem = &q->dma_mem;
131 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
137 static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
138 u16 len, u16 entry_size)
140 struct be_dma_mem *mem = &q->dma_mem;
142 memset(q, 0, sizeof(*q));
144 q->entry_size = entry_size;
145 mem->size = len * entry_size;
146 mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
150 memset(mem->va, 0, mem->size);
154 static void be_intr_set(struct be_adapter *adapter, bool enable)
158 if (adapter->eeh_error)
161 pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
163 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
165 if (!enabled && enable)
166 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
167 else if (enabled && !enable)
168 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
172 pci_write_config_dword(adapter->pdev,
173 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
176 static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
179 val |= qid & DB_RQ_RING_ID_MASK;
180 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
183 iowrite32(val, adapter->db + DB_RQ_OFFSET);
186 static void be_txq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
189 val |= qid & DB_TXULP_RING_ID_MASK;
190 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
193 iowrite32(val, adapter->db + DB_TXULP1_OFFSET);
196 static void be_eq_notify(struct be_adapter *adapter, u16 qid,
197 bool arm, bool clear_int, u16 num_popped)
200 val |= qid & DB_EQ_RING_ID_MASK;
201 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
202 DB_EQ_RING_ID_EXT_MASK_SHIFT);
204 if (adapter->eeh_error)
208 val |= 1 << DB_EQ_REARM_SHIFT;
210 val |= 1 << DB_EQ_CLR_SHIFT;
211 val |= 1 << DB_EQ_EVNT_SHIFT;
212 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
213 iowrite32(val, adapter->db + DB_EQ_OFFSET);
216 void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
219 val |= qid & DB_CQ_RING_ID_MASK;
220 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
221 DB_CQ_RING_ID_EXT_MASK_SHIFT);
223 if (adapter->eeh_error)
227 val |= 1 << DB_CQ_REARM_SHIFT;
228 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
229 iowrite32(val, adapter->db + DB_CQ_OFFSET);
232 static int be_mac_addr_set(struct net_device *netdev, void *p)
234 struct be_adapter *adapter = netdev_priv(netdev);
235 struct sockaddr *addr = p;
237 u8 current_mac[ETH_ALEN];
238 u32 pmac_id = adapter->pmac_id[0];
240 if (!is_valid_ether_addr(addr->sa_data))
241 return -EADDRNOTAVAIL;
243 status = be_cmd_mac_addr_query(adapter, current_mac,
244 MAC_ADDRESS_TYPE_NETWORK, false,
245 adapter->if_handle, 0);
249 if (memcmp(addr->sa_data, current_mac, ETH_ALEN)) {
250 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
251 adapter->if_handle, &adapter->pmac_id[0], 0);
255 be_cmd_pmac_del(adapter, adapter->if_handle, pmac_id, 0);
257 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
260 dev_err(&adapter->pdev->dev, "MAC %pM set Failed\n", addr->sa_data);
264 static void populate_be2_stats(struct be_adapter *adapter)
266 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
267 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
268 struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
269 struct be_port_rxf_stats_v0 *port_stats =
270 &rxf_stats->port[adapter->port_num];
271 struct be_drv_stats *drvs = &adapter->drv_stats;
273 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
274 drvs->rx_pause_frames = port_stats->rx_pause_frames;
275 drvs->rx_crc_errors = port_stats->rx_crc_errors;
276 drvs->rx_control_frames = port_stats->rx_control_frames;
277 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
278 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
279 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
280 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
281 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
282 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
283 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
284 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
285 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
286 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
287 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
288 drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
289 drvs->rx_dropped_header_too_small =
290 port_stats->rx_dropped_header_too_small;
291 drvs->rx_address_mismatch_drops =
292 port_stats->rx_address_mismatch_drops +
293 port_stats->rx_vlan_mismatch_drops;
294 drvs->rx_alignment_symbol_errors =
295 port_stats->rx_alignment_symbol_errors;
297 drvs->tx_pauseframes = port_stats->tx_pauseframes;
298 drvs->tx_controlframes = port_stats->tx_controlframes;
300 if (adapter->port_num)
301 drvs->jabber_events = rxf_stats->port1_jabber_events;
303 drvs->jabber_events = rxf_stats->port0_jabber_events;
304 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
305 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
306 drvs->forwarded_packets = rxf_stats->forwarded_packets;
307 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
308 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
309 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
310 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
313 static void populate_be3_stats(struct be_adapter *adapter)
315 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
316 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
317 struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
318 struct be_port_rxf_stats_v1 *port_stats =
319 &rxf_stats->port[adapter->port_num];
320 struct be_drv_stats *drvs = &adapter->drv_stats;
322 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
323 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
324 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
325 drvs->rx_pause_frames = port_stats->rx_pause_frames;
326 drvs->rx_crc_errors = port_stats->rx_crc_errors;
327 drvs->rx_control_frames = port_stats->rx_control_frames;
328 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
329 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
330 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
331 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
332 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
333 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
334 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
335 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
336 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
337 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
338 drvs->rx_dropped_header_too_small =
339 port_stats->rx_dropped_header_too_small;
340 drvs->rx_input_fifo_overflow_drop =
341 port_stats->rx_input_fifo_overflow_drop;
342 drvs->rx_address_mismatch_drops = port_stats->rx_address_mismatch_drops;
343 drvs->rx_alignment_symbol_errors =
344 port_stats->rx_alignment_symbol_errors;
345 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
346 drvs->tx_pauseframes = port_stats->tx_pauseframes;
347 drvs->tx_controlframes = port_stats->tx_controlframes;
348 drvs->jabber_events = port_stats->jabber_events;
349 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
350 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
351 drvs->forwarded_packets = rxf_stats->forwarded_packets;
352 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
353 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
354 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
355 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
358 static void populate_lancer_stats(struct be_adapter *adapter)
361 struct be_drv_stats *drvs = &adapter->drv_stats;
362 struct lancer_pport_stats *pport_stats =
363 pport_stats_from_cmd(adapter);
365 be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
366 drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
367 drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
368 drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
369 drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
370 drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
371 drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
372 drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
373 drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
374 drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
375 drvs->rx_dropped_tcp_length =
376 pport_stats->rx_dropped_invalid_tcp_length;
377 drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
378 drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
379 drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
380 drvs->rx_dropped_header_too_small =
381 pport_stats->rx_dropped_header_too_small;
382 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
383 drvs->rx_address_mismatch_drops =
384 pport_stats->rx_address_mismatch_drops +
385 pport_stats->rx_vlan_mismatch_drops;
386 drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
387 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
388 drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
389 drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
390 drvs->jabber_events = pport_stats->rx_jabbers;
391 drvs->forwarded_packets = pport_stats->num_forwards_lo;
392 drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
393 drvs->rx_drops_too_many_frags =
394 pport_stats->rx_drops_too_many_frags_lo;
397 static void accumulate_16bit_val(u32 *acc, u16 val)
399 #define lo(x) (x & 0xFFFF)
400 #define hi(x) (x & 0xFFFF0000)
401 bool wrapped = val < lo(*acc);
402 u32 newacc = hi(*acc) + val;
406 ACCESS_ONCE(*acc) = newacc;
409 void be_parse_stats(struct be_adapter *adapter)
411 struct be_erx_stats_v1 *erx = be_erx_stats_from_cmd(adapter);
412 struct be_rx_obj *rxo;
415 if (adapter->generation == BE_GEN3) {
416 if (lancer_chip(adapter))
417 populate_lancer_stats(adapter);
419 populate_be3_stats(adapter);
421 populate_be2_stats(adapter);
424 if (lancer_chip(adapter))
427 /* as erx_v1 is longer than v0, ok to use v1 defn for v0 access */
428 for_all_rx_queues(adapter, rxo, i) {
429 /* below erx HW counter can actually wrap around after
430 * 65535. Driver accumulates a 32-bit value
432 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
433 (u16)erx->rx_drops_no_fragments[rxo->q.id]);
439 static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
440 struct rtnl_link_stats64 *stats)
442 struct be_adapter *adapter = netdev_priv(netdev);
443 struct be_drv_stats *drvs = &adapter->drv_stats;
444 struct be_rx_obj *rxo;
445 struct be_tx_obj *txo;
450 for_all_rx_queues(adapter, rxo, i) {
451 const struct be_rx_stats *rx_stats = rx_stats(rxo);
453 start = u64_stats_fetch_begin_bh(&rx_stats->sync);
454 pkts = rx_stats(rxo)->rx_pkts;
455 bytes = rx_stats(rxo)->rx_bytes;
456 } while (u64_stats_fetch_retry_bh(&rx_stats->sync, start));
457 stats->rx_packets += pkts;
458 stats->rx_bytes += bytes;
459 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
460 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
461 rx_stats(rxo)->rx_drops_no_frags;
464 for_all_tx_queues(adapter, txo, i) {
465 const struct be_tx_stats *tx_stats = tx_stats(txo);
467 start = u64_stats_fetch_begin_bh(&tx_stats->sync);
468 pkts = tx_stats(txo)->tx_pkts;
469 bytes = tx_stats(txo)->tx_bytes;
470 } while (u64_stats_fetch_retry_bh(&tx_stats->sync, start));
471 stats->tx_packets += pkts;
472 stats->tx_bytes += bytes;
475 /* bad pkts received */
476 stats->rx_errors = drvs->rx_crc_errors +
477 drvs->rx_alignment_symbol_errors +
478 drvs->rx_in_range_errors +
479 drvs->rx_out_range_errors +
480 drvs->rx_frame_too_long +
481 drvs->rx_dropped_too_small +
482 drvs->rx_dropped_too_short +
483 drvs->rx_dropped_header_too_small +
484 drvs->rx_dropped_tcp_length +
485 drvs->rx_dropped_runt;
487 /* detailed rx errors */
488 stats->rx_length_errors = drvs->rx_in_range_errors +
489 drvs->rx_out_range_errors +
490 drvs->rx_frame_too_long;
492 stats->rx_crc_errors = drvs->rx_crc_errors;
494 /* frame alignment errors */
495 stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
497 /* receiver fifo overrun */
498 /* drops_no_pbuf is no per i/f, it's per BE card */
499 stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
500 drvs->rx_input_fifo_overflow_drop +
501 drvs->rx_drops_no_pbuf;
505 void be_link_status_update(struct be_adapter *adapter, u8 link_status)
507 struct net_device *netdev = adapter->netdev;
509 if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
510 netif_carrier_off(netdev);
511 adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
514 if ((link_status & LINK_STATUS_MASK) == LINK_UP)
515 netif_carrier_on(netdev);
517 netif_carrier_off(netdev);
520 static void be_tx_stats_update(struct be_tx_obj *txo,
521 u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
523 struct be_tx_stats *stats = tx_stats(txo);
525 u64_stats_update_begin(&stats->sync);
527 stats->tx_wrbs += wrb_cnt;
528 stats->tx_bytes += copied;
529 stats->tx_pkts += (gso_segs ? gso_segs : 1);
532 u64_stats_update_end(&stats->sync);
535 /* Determine number of WRB entries needed to xmit data in an skb */
536 static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
539 int cnt = (skb->len > skb->data_len);
541 cnt += skb_shinfo(skb)->nr_frags;
543 /* to account for hdr wrb */
545 if (lancer_chip(adapter) || !(cnt & 1)) {
548 /* add a dummy to make it an even num */
552 BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
556 static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
558 wrb->frag_pa_hi = upper_32_bits(addr);
559 wrb->frag_pa_lo = addr & 0xFFFFFFFF;
560 wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
564 static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
570 vlan_tag = vlan_tx_tag_get(skb);
571 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
572 /* If vlan priority provided by OS is NOT in available bmap */
573 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
574 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
575 adapter->recommended_prio;
580 static int be_vlan_tag_chk(struct be_adapter *adapter, struct sk_buff *skb)
582 return vlan_tx_tag_present(skb) || adapter->pvid;
585 static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
586 struct sk_buff *skb, u32 wrb_cnt, u32 len)
590 memset(hdr, 0, sizeof(*hdr));
592 AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
594 if (skb_is_gso(skb)) {
595 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
596 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
597 hdr, skb_shinfo(skb)->gso_size);
598 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
599 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
600 if (lancer_chip(adapter) && adapter->sli_family ==
601 LANCER_A0_SLI_FAMILY) {
602 AMAP_SET_BITS(struct amap_eth_hdr_wrb, ipcs, hdr, 1);
604 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
606 else if (is_udp_pkt(skb))
607 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
610 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
612 AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
613 else if (is_udp_pkt(skb))
614 AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
617 if (vlan_tx_tag_present(skb)) {
618 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
619 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
620 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
623 AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
624 AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, 1);
625 AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
626 AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
629 static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
634 be_dws_le_to_cpu(wrb, sizeof(*wrb));
636 dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
639 dma_unmap_single(dev, dma, wrb->frag_len,
642 dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
646 static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
647 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb)
651 struct device *dev = &adapter->pdev->dev;
652 struct sk_buff *first_skb = skb;
653 struct be_eth_wrb *wrb;
654 struct be_eth_hdr_wrb *hdr;
655 bool map_single = false;
658 hdr = queue_head_node(txq);
660 map_head = txq->head;
662 if (skb->len > skb->data_len) {
663 int len = skb_headlen(skb);
664 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
665 if (dma_mapping_error(dev, busaddr))
668 wrb = queue_head_node(txq);
669 wrb_fill(wrb, busaddr, len);
670 be_dws_cpu_to_le(wrb, sizeof(*wrb));
675 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
676 const struct skb_frag_struct *frag =
677 &skb_shinfo(skb)->frags[i];
678 busaddr = skb_frag_dma_map(dev, frag, 0,
679 skb_frag_size(frag), DMA_TO_DEVICE);
680 if (dma_mapping_error(dev, busaddr))
682 wrb = queue_head_node(txq);
683 wrb_fill(wrb, busaddr, skb_frag_size(frag));
684 be_dws_cpu_to_le(wrb, sizeof(*wrb));
686 copied += skb_frag_size(frag);
690 wrb = queue_head_node(txq);
692 be_dws_cpu_to_le(wrb, sizeof(*wrb));
696 wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied);
697 be_dws_cpu_to_le(hdr, sizeof(*hdr));
701 txq->head = map_head;
703 wrb = queue_head_node(txq);
704 unmap_tx_frag(dev, wrb, map_single);
706 copied -= wrb->frag_len;
712 static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
717 skb = skb_share_check(skb, GFP_ATOMIC);
721 if (vlan_tx_tag_present(skb)) {
722 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
723 __vlan_put_tag(skb, vlan_tag);
730 static netdev_tx_t be_xmit(struct sk_buff *skb,
731 struct net_device *netdev)
733 struct be_adapter *adapter = netdev_priv(netdev);
734 struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
735 struct be_queue_info *txq = &txo->q;
736 struct iphdr *ip = NULL;
737 u32 wrb_cnt = 0, copied = 0;
738 u32 start = txq->head, eth_hdr_len;
739 bool dummy_wrb, stopped = false;
741 eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
742 VLAN_ETH_HLEN : ETH_HLEN;
744 /* HW has a bug which considers padding bytes as legal
745 * and modifies the IPv4 hdr's 'tot_len' field
747 if (skb->len <= 60 && be_vlan_tag_chk(adapter, skb) &&
749 ip = (struct iphdr *)ip_hdr(skb);
750 pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len));
753 /* HW has a bug wherein it will calculate CSUM for VLAN
754 * pkts even though it is disabled.
755 * Manually insert VLAN in pkt.
757 if (skb->ip_summed != CHECKSUM_PARTIAL &&
758 be_vlan_tag_chk(adapter, skb)) {
759 skb = be_insert_vlan_in_pkt(adapter, skb);
764 wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
766 copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb);
768 int gso_segs = skb_shinfo(skb)->gso_segs;
770 /* record the sent skb in the sent_skb table */
771 BUG_ON(txo->sent_skb_list[start]);
772 txo->sent_skb_list[start] = skb;
774 /* Ensure txq has space for the next skb; Else stop the queue
775 * *BEFORE* ringing the tx doorbell, so that we serialze the
776 * tx compls of the current transmit which'll wake up the queue
778 atomic_add(wrb_cnt, &txq->used);
779 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
781 netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
785 be_txq_notify(adapter, txq->id, wrb_cnt);
787 be_tx_stats_update(txo, wrb_cnt, copied, gso_segs, stopped);
790 dev_kfree_skb_any(skb);
796 static int be_change_mtu(struct net_device *netdev, int new_mtu)
798 struct be_adapter *adapter = netdev_priv(netdev);
799 if (new_mtu < BE_MIN_MTU ||
800 new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
801 (ETH_HLEN + ETH_FCS_LEN))) {
802 dev_info(&adapter->pdev->dev,
803 "MTU must be between %d and %d bytes\n",
805 (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
808 dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
809 netdev->mtu, new_mtu);
810 netdev->mtu = new_mtu;
815 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
816 * If the user configures more, place BE in vlan promiscuous mode.
818 static int be_vid_config(struct be_adapter *adapter)
820 u16 vids[BE_NUM_VLANS_SUPPORTED];
824 /* No need to further configure vids if in promiscuous mode */
825 if (adapter->promiscuous)
828 if (adapter->vlans_added > adapter->max_vlans)
829 goto set_vlan_promisc;
831 /* Construct VLAN Table to give to HW */
832 for (i = 0; i < VLAN_N_VID; i++)
833 if (adapter->vlan_tag[i])
834 vids[num++] = cpu_to_le16(i);
836 status = be_cmd_vlan_config(adapter, adapter->if_handle,
839 /* Set to VLAN promisc mode as setting VLAN filter failed */
841 dev_info(&adapter->pdev->dev, "Exhausted VLAN HW filters.\n");
842 dev_info(&adapter->pdev->dev, "Disabling HW VLAN filtering.\n");
843 goto set_vlan_promisc;
849 status = be_cmd_vlan_config(adapter, adapter->if_handle,
854 static int be_vlan_add_vid(struct net_device *netdev, u16 vid)
856 struct be_adapter *adapter = netdev_priv(netdev);
859 if (!be_physfn(adapter)) {
864 adapter->vlan_tag[vid] = 1;
865 if (adapter->vlans_added <= (adapter->max_vlans + 1))
866 status = be_vid_config(adapter);
869 adapter->vlans_added++;
871 adapter->vlan_tag[vid] = 0;
876 static int be_vlan_rem_vid(struct net_device *netdev, u16 vid)
878 struct be_adapter *adapter = netdev_priv(netdev);
881 if (!be_physfn(adapter)) {
886 adapter->vlan_tag[vid] = 0;
887 if (adapter->vlans_added <= adapter->max_vlans)
888 status = be_vid_config(adapter);
891 adapter->vlans_added--;
893 adapter->vlan_tag[vid] = 1;
898 static void be_set_rx_mode(struct net_device *netdev)
900 struct be_adapter *adapter = netdev_priv(netdev);
903 if (netdev->flags & IFF_PROMISC) {
904 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
905 adapter->promiscuous = true;
909 /* BE was previously in promiscuous mode; disable it */
910 if (adapter->promiscuous) {
911 adapter->promiscuous = false;
912 be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
914 if (adapter->vlans_added)
915 be_vid_config(adapter);
918 /* Enable multicast promisc if num configured exceeds what we support */
919 if (netdev->flags & IFF_ALLMULTI ||
920 netdev_mc_count(netdev) > BE_MAX_MC) {
921 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
925 if (netdev_uc_count(netdev) != adapter->uc_macs) {
926 struct netdev_hw_addr *ha;
927 int i = 1; /* First slot is claimed by the Primary MAC */
929 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++) {
930 be_cmd_pmac_del(adapter, adapter->if_handle,
931 adapter->pmac_id[i], 0);
934 if (netdev_uc_count(netdev) > adapter->max_pmac_cnt) {
935 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
936 adapter->promiscuous = true;
940 netdev_for_each_uc_addr(ha, adapter->netdev) {
941 adapter->uc_macs++; /* First slot is for Primary MAC */
942 be_cmd_pmac_add(adapter, (u8 *)ha->addr,
944 &adapter->pmac_id[adapter->uc_macs], 0);
948 status = be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);
950 /* Set to MCAST promisc mode if setting MULTICAST address fails */
952 dev_info(&adapter->pdev->dev, "Exhausted multicast HW filters.\n");
953 dev_info(&adapter->pdev->dev, "Disabling HW multicast filtering.\n");
954 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
960 static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
962 struct be_adapter *adapter = netdev_priv(netdev);
963 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
966 if (!sriov_enabled(adapter))
969 if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
972 if (lancer_chip(adapter)) {
973 status = be_cmd_set_mac_list(adapter, mac, 1, vf + 1);
975 status = be_cmd_pmac_del(adapter, vf_cfg->if_handle,
976 vf_cfg->pmac_id, vf + 1);
978 status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
979 &vf_cfg->pmac_id, vf + 1);
983 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
986 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
991 static int be_get_vf_config(struct net_device *netdev, int vf,
992 struct ifla_vf_info *vi)
994 struct be_adapter *adapter = netdev_priv(netdev);
995 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
997 if (!sriov_enabled(adapter))
1000 if (vf >= adapter->num_vfs)
1004 vi->tx_rate = vf_cfg->tx_rate;
1005 vi->vlan = vf_cfg->vlan_tag;
1007 memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
1012 static int be_set_vf_vlan(struct net_device *netdev,
1013 int vf, u16 vlan, u8 qos)
1015 struct be_adapter *adapter = netdev_priv(netdev);
1018 if (!sriov_enabled(adapter))
1021 if (vf >= adapter->num_vfs || vlan > 4095)
1025 if (adapter->vf_cfg[vf].vlan_tag != vlan) {
1026 /* If this is new value, program it. Else skip. */
1027 adapter->vf_cfg[vf].vlan_tag = vlan;
1029 status = be_cmd_set_hsw_config(adapter, vlan,
1030 vf + 1, adapter->vf_cfg[vf].if_handle);
1033 /* Reset Transparent Vlan Tagging. */
1034 adapter->vf_cfg[vf].vlan_tag = 0;
1035 vlan = adapter->vf_cfg[vf].def_vid;
1036 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
1037 adapter->vf_cfg[vf].if_handle);
1042 dev_info(&adapter->pdev->dev,
1043 "VLAN %d config on VF %d failed\n", vlan, vf);
1047 static int be_set_vf_tx_rate(struct net_device *netdev,
1050 struct be_adapter *adapter = netdev_priv(netdev);
1053 if (!sriov_enabled(adapter))
1056 if (vf >= adapter->num_vfs)
1059 if (rate < 100 || rate > 10000) {
1060 dev_err(&adapter->pdev->dev,
1061 "tx rate must be between 100 and 10000 Mbps\n");
1065 status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
1068 dev_err(&adapter->pdev->dev,
1069 "tx rate %d on VF %d failed\n", rate, vf);
1071 adapter->vf_cfg[vf].tx_rate = rate;
1075 static int be_find_vfs(struct be_adapter *adapter, int vf_state)
1077 struct pci_dev *dev, *pdev = adapter->pdev;
1078 int vfs = 0, assigned_vfs = 0, pos, vf_fn;
1081 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
1084 pci_read_config_word(pdev, pos + PCI_SRIOV_VF_OFFSET, &offset);
1085 pci_read_config_word(pdev, pos + PCI_SRIOV_VF_STRIDE, &stride);
1087 dev = pci_get_device(pdev->vendor, PCI_ANY_ID, NULL);
1089 vf_fn = (pdev->devfn + offset + stride * vfs) & 0xFFFF;
1090 if (dev->is_virtfn && dev->devfn == vf_fn &&
1091 dev->bus->number == pdev->bus->number) {
1093 if (dev->dev_flags & PCI_DEV_FLAGS_ASSIGNED)
1096 dev = pci_get_device(pdev->vendor, PCI_ANY_ID, dev);
1098 return (vf_state == ASSIGNED) ? assigned_vfs : vfs;
1101 static void be_eqd_update(struct be_adapter *adapter, struct be_eq_obj *eqo)
1103 struct be_rx_stats *stats = rx_stats(&adapter->rx_obj[eqo->idx]);
1104 ulong now = jiffies;
1105 ulong delta = now - stats->rx_jiffies;
1107 unsigned int start, eqd;
1109 if (!eqo->enable_aic) {
1114 if (eqo->idx >= adapter->num_rx_qs)
1117 stats = rx_stats(&adapter->rx_obj[eqo->idx]);
1119 /* Wrapped around */
1120 if (time_before(now, stats->rx_jiffies)) {
1121 stats->rx_jiffies = now;
1125 /* Update once a second */
1130 start = u64_stats_fetch_begin_bh(&stats->sync);
1131 pkts = stats->rx_pkts;
1132 } while (u64_stats_fetch_retry_bh(&stats->sync, start));
1134 stats->rx_pps = (unsigned long)(pkts - stats->rx_pkts_prev) / (delta / HZ);
1135 stats->rx_pkts_prev = pkts;
1136 stats->rx_jiffies = now;
1137 eqd = (stats->rx_pps / 110000) << 3;
1138 eqd = min(eqd, eqo->max_eqd);
1139 eqd = max(eqd, eqo->min_eqd);
1144 if (eqd != eqo->cur_eqd) {
1145 be_cmd_modify_eqd(adapter, eqo->q.id, eqd);
1150 static void be_rx_stats_update(struct be_rx_obj *rxo,
1151 struct be_rx_compl_info *rxcp)
1153 struct be_rx_stats *stats = rx_stats(rxo);
1155 u64_stats_update_begin(&stats->sync);
1157 stats->rx_bytes += rxcp->pkt_size;
1159 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
1160 stats->rx_mcast_pkts++;
1162 stats->rx_compl_err++;
1163 u64_stats_update_end(&stats->sync);
1166 static inline bool csum_passed(struct be_rx_compl_info *rxcp)
1168 /* L4 checksum is not reliable for non TCP/UDP packets.
1169 * Also ignore ipcksm for ipv6 pkts */
1170 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
1171 (rxcp->ip_csum || rxcp->ipv6);
1174 static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo,
1177 struct be_adapter *adapter = rxo->adapter;
1178 struct be_rx_page_info *rx_page_info;
1179 struct be_queue_info *rxq = &rxo->q;
1181 rx_page_info = &rxo->page_info_tbl[frag_idx];
1182 BUG_ON(!rx_page_info->page);
1184 if (rx_page_info->last_page_user) {
1185 dma_unmap_page(&adapter->pdev->dev,
1186 dma_unmap_addr(rx_page_info, bus),
1187 adapter->big_page_size, DMA_FROM_DEVICE);
1188 rx_page_info->last_page_user = false;
1191 atomic_dec(&rxq->used);
1192 return rx_page_info;
1195 /* Throwaway the data in the Rx completion */
1196 static void be_rx_compl_discard(struct be_rx_obj *rxo,
1197 struct be_rx_compl_info *rxcp)
1199 struct be_queue_info *rxq = &rxo->q;
1200 struct be_rx_page_info *page_info;
1201 u16 i, num_rcvd = rxcp->num_rcvd;
1203 for (i = 0; i < num_rcvd; i++) {
1204 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
1205 put_page(page_info->page);
1206 memset(page_info, 0, sizeof(*page_info));
1207 index_inc(&rxcp->rxq_idx, rxq->len);
1212 * skb_fill_rx_data forms a complete skb for an ether frame
1213 * indicated by rxcp.
1215 static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
1216 struct be_rx_compl_info *rxcp)
1218 struct be_queue_info *rxq = &rxo->q;
1219 struct be_rx_page_info *page_info;
1221 u16 hdr_len, curr_frag_len, remaining;
1224 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
1225 start = page_address(page_info->page) + page_info->page_offset;
1228 /* Copy data in the first descriptor of this completion */
1229 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
1231 skb->len = curr_frag_len;
1232 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
1233 memcpy(skb->data, start, curr_frag_len);
1234 /* Complete packet has now been moved to data */
1235 put_page(page_info->page);
1237 skb->tail += curr_frag_len;
1240 memcpy(skb->data, start, hdr_len);
1241 skb_shinfo(skb)->nr_frags = 1;
1242 skb_frag_set_page(skb, 0, page_info->page);
1243 skb_shinfo(skb)->frags[0].page_offset =
1244 page_info->page_offset + hdr_len;
1245 skb_frag_size_set(&skb_shinfo(skb)->frags[0], curr_frag_len - hdr_len);
1246 skb->data_len = curr_frag_len - hdr_len;
1247 skb->truesize += rx_frag_size;
1248 skb->tail += hdr_len;
1250 page_info->page = NULL;
1252 if (rxcp->pkt_size <= rx_frag_size) {
1253 BUG_ON(rxcp->num_rcvd != 1);
1257 /* More frags present for this completion */
1258 index_inc(&rxcp->rxq_idx, rxq->len);
1259 remaining = rxcp->pkt_size - curr_frag_len;
1260 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
1261 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
1262 curr_frag_len = min(remaining, rx_frag_size);
1264 /* Coalesce all frags from the same physical page in one slot */
1265 if (page_info->page_offset == 0) {
1268 skb_frag_set_page(skb, j, page_info->page);
1269 skb_shinfo(skb)->frags[j].page_offset =
1270 page_info->page_offset;
1271 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
1272 skb_shinfo(skb)->nr_frags++;
1274 put_page(page_info->page);
1277 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
1278 skb->len += curr_frag_len;
1279 skb->data_len += curr_frag_len;
1280 skb->truesize += rx_frag_size;
1281 remaining -= curr_frag_len;
1282 index_inc(&rxcp->rxq_idx, rxq->len);
1283 page_info->page = NULL;
1285 BUG_ON(j > MAX_SKB_FRAGS);
1288 /* Process the RX completion indicated by rxcp when GRO is disabled */
1289 static void be_rx_compl_process(struct be_rx_obj *rxo,
1290 struct be_rx_compl_info *rxcp)
1292 struct be_adapter *adapter = rxo->adapter;
1293 struct net_device *netdev = adapter->netdev;
1294 struct sk_buff *skb;
1296 skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE);
1297 if (unlikely(!skb)) {
1298 rx_stats(rxo)->rx_drops_no_skbs++;
1299 be_rx_compl_discard(rxo, rxcp);
1303 skb_fill_rx_data(rxo, skb, rxcp);
1305 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
1306 skb->ip_summed = CHECKSUM_UNNECESSARY;
1308 skb_checksum_none_assert(skb);
1310 skb->protocol = eth_type_trans(skb, netdev);
1311 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
1312 if (netdev->features & NETIF_F_RXHASH)
1313 skb->rxhash = rxcp->rss_hash;
1317 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1319 netif_receive_skb(skb);
1322 /* Process the RX completion indicated by rxcp when GRO is enabled */
1323 void be_rx_compl_process_gro(struct be_rx_obj *rxo, struct napi_struct *napi,
1324 struct be_rx_compl_info *rxcp)
1326 struct be_adapter *adapter = rxo->adapter;
1327 struct be_rx_page_info *page_info;
1328 struct sk_buff *skb = NULL;
1329 struct be_queue_info *rxq = &rxo->q;
1330 u16 remaining, curr_frag_len;
1333 skb = napi_get_frags(napi);
1335 be_rx_compl_discard(rxo, rxcp);
1339 remaining = rxcp->pkt_size;
1340 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
1341 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
1343 curr_frag_len = min(remaining, rx_frag_size);
1345 /* Coalesce all frags from the same physical page in one slot */
1346 if (i == 0 || page_info->page_offset == 0) {
1347 /* First frag or Fresh page */
1349 skb_frag_set_page(skb, j, page_info->page);
1350 skb_shinfo(skb)->frags[j].page_offset =
1351 page_info->page_offset;
1352 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
1354 put_page(page_info->page);
1356 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
1357 skb->truesize += rx_frag_size;
1358 remaining -= curr_frag_len;
1359 index_inc(&rxcp->rxq_idx, rxq->len);
1360 memset(page_info, 0, sizeof(*page_info));
1362 BUG_ON(j > MAX_SKB_FRAGS);
1364 skb_shinfo(skb)->nr_frags = j + 1;
1365 skb->len = rxcp->pkt_size;
1366 skb->data_len = rxcp->pkt_size;
1367 skb->ip_summed = CHECKSUM_UNNECESSARY;
1368 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
1369 if (adapter->netdev->features & NETIF_F_RXHASH)
1370 skb->rxhash = rxcp->rss_hash;
1373 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1375 napi_gro_frags(napi);
1378 static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
1379 struct be_rx_compl_info *rxcp)
1382 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1383 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1384 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1385 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
1386 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
1388 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1390 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1392 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
1394 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, fragndx, compl);
1396 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1398 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
1400 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, compl);
1402 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm,
1404 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
1407 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, port, compl);
1410 static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
1411 struct be_rx_compl_info *rxcp)
1414 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1415 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1416 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1417 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
1418 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
1420 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1422 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1424 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
1426 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, fragndx, compl);
1428 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1430 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
1432 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, compl);
1434 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm,
1436 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
1439 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, port, compl);
1442 static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1444 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1445 struct be_rx_compl_info *rxcp = &rxo->rxcp;
1446 struct be_adapter *adapter = rxo->adapter;
1448 /* For checking the valid bit it is Ok to use either definition as the
1449 * valid bit is at the same position in both v0 and v1 Rx compl */
1450 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
1454 be_dws_le_to_cpu(compl, sizeof(*compl));
1456 if (adapter->be3_native)
1457 be_parse_rx_compl_v1(compl, rxcp);
1459 be_parse_rx_compl_v0(compl, rxcp);
1462 /* vlanf could be wrongly set in some cards.
1463 * ignore if vtm is not set */
1464 if ((adapter->function_mode & FLEX10_MODE) && !rxcp->vtm)
1467 if (!lancer_chip(adapter))
1468 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
1470 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
1471 !adapter->vlan_tag[rxcp->vlan_tag])
1475 /* As the compl has been parsed, reset it; we wont touch it again */
1476 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
1478 queue_tail_inc(&rxo->cq);
1482 static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
1484 u32 order = get_order(size);
1488 return alloc_pages(gfp, order);
1492 * Allocate a page, split it to fragments of size rx_frag_size and post as
1493 * receive buffers to BE
1495 static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
1497 struct be_adapter *adapter = rxo->adapter;
1498 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
1499 struct be_queue_info *rxq = &rxo->q;
1500 struct page *pagep = NULL;
1501 struct be_eth_rx_d *rxd;
1502 u64 page_dmaaddr = 0, frag_dmaaddr;
1503 u32 posted, page_offset = 0;
1505 page_info = &rxo->page_info_tbl[rxq->head];
1506 for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1508 pagep = be_alloc_pages(adapter->big_page_size, gfp);
1509 if (unlikely(!pagep)) {
1510 rx_stats(rxo)->rx_post_fail++;
1513 page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
1514 0, adapter->big_page_size,
1516 page_info->page_offset = 0;
1519 page_info->page_offset = page_offset + rx_frag_size;
1521 page_offset = page_info->page_offset;
1522 page_info->page = pagep;
1523 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
1524 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1526 rxd = queue_head_node(rxq);
1527 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1528 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
1530 /* Any space left in the current big page for another frag? */
1531 if ((page_offset + rx_frag_size + rx_frag_size) >
1532 adapter->big_page_size) {
1534 page_info->last_page_user = true;
1537 prev_page_info = page_info;
1538 queue_head_inc(rxq);
1539 page_info = &rxo->page_info_tbl[rxq->head];
1542 prev_page_info->last_page_user = true;
1545 atomic_add(posted, &rxq->used);
1546 be_rxq_notify(adapter, rxq->id, posted);
1547 } else if (atomic_read(&rxq->used) == 0) {
1548 /* Let be_worker replenish when memory is available */
1549 rxo->rx_post_starved = true;
1553 static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
1555 struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1557 if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1561 be_dws_le_to_cpu(txcp, sizeof(*txcp));
1563 txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1565 queue_tail_inc(tx_cq);
1569 static u16 be_tx_compl_process(struct be_adapter *adapter,
1570 struct be_tx_obj *txo, u16 last_index)
1572 struct be_queue_info *txq = &txo->q;
1573 struct be_eth_wrb *wrb;
1574 struct sk_buff **sent_skbs = txo->sent_skb_list;
1575 struct sk_buff *sent_skb;
1576 u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1577 bool unmap_skb_hdr = true;
1579 sent_skb = sent_skbs[txq->tail];
1581 sent_skbs[txq->tail] = NULL;
1583 /* skip header wrb */
1584 queue_tail_inc(txq);
1587 cur_index = txq->tail;
1588 wrb = queue_tail_node(txq);
1589 unmap_tx_frag(&adapter->pdev->dev, wrb,
1590 (unmap_skb_hdr && skb_headlen(sent_skb)));
1591 unmap_skb_hdr = false;
1594 queue_tail_inc(txq);
1595 } while (cur_index != last_index);
1597 kfree_skb(sent_skb);
1601 /* Return the number of events in the event queue */
1602 static inline int events_get(struct be_eq_obj *eqo)
1604 struct be_eq_entry *eqe;
1608 eqe = queue_tail_node(&eqo->q);
1615 queue_tail_inc(&eqo->q);
1621 static int event_handle(struct be_eq_obj *eqo)
1624 int num = events_get(eqo);
1626 /* Deal with any spurious interrupts that come without events */
1630 if (num || msix_enabled(eqo->adapter))
1631 be_eq_notify(eqo->adapter, eqo->q.id, rearm, true, num);
1634 napi_schedule(&eqo->napi);
1639 /* Leaves the EQ is disarmed state */
1640 static void be_eq_clean(struct be_eq_obj *eqo)
1642 int num = events_get(eqo);
1644 be_eq_notify(eqo->adapter, eqo->q.id, false, true, num);
1647 static void be_rx_cq_clean(struct be_rx_obj *rxo)
1649 struct be_rx_page_info *page_info;
1650 struct be_queue_info *rxq = &rxo->q;
1651 struct be_queue_info *rx_cq = &rxo->cq;
1652 struct be_rx_compl_info *rxcp;
1655 /* First cleanup pending rx completions */
1656 while ((rxcp = be_rx_compl_get(rxo)) != NULL) {
1657 be_rx_compl_discard(rxo, rxcp);
1658 be_cq_notify(rxo->adapter, rx_cq->id, false, 1);
1661 /* Then free posted rx buffer that were not used */
1662 tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
1663 for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
1664 page_info = get_rx_page_info(rxo, tail);
1665 put_page(page_info->page);
1666 memset(page_info, 0, sizeof(*page_info));
1668 BUG_ON(atomic_read(&rxq->used));
1669 rxq->tail = rxq->head = 0;
1672 static void be_tx_compl_clean(struct be_adapter *adapter)
1674 struct be_tx_obj *txo;
1675 struct be_queue_info *txq;
1676 struct be_eth_tx_compl *txcp;
1677 u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
1678 struct sk_buff *sent_skb;
1680 int i, pending_txqs;
1682 /* Wait for a max of 200ms for all the tx-completions to arrive. */
1684 pending_txqs = adapter->num_tx_qs;
1686 for_all_tx_queues(adapter, txo, i) {
1688 while ((txcp = be_tx_compl_get(&txo->cq))) {
1690 AMAP_GET_BITS(struct amap_eth_tx_compl,
1692 num_wrbs += be_tx_compl_process(adapter, txo,
1697 be_cq_notify(adapter, txo->cq.id, false, cmpl);
1698 atomic_sub(num_wrbs, &txq->used);
1702 if (atomic_read(&txq->used) == 0)
1706 if (pending_txqs == 0 || ++timeo > 200)
1712 for_all_tx_queues(adapter, txo, i) {
1714 if (atomic_read(&txq->used))
1715 dev_err(&adapter->pdev->dev, "%d pending tx-compls\n",
1716 atomic_read(&txq->used));
1718 /* free posted tx for which compls will never arrive */
1719 while (atomic_read(&txq->used)) {
1720 sent_skb = txo->sent_skb_list[txq->tail];
1721 end_idx = txq->tail;
1722 num_wrbs = wrb_cnt_for_skb(adapter, sent_skb,
1724 index_adv(&end_idx, num_wrbs - 1, txq->len);
1725 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
1726 atomic_sub(num_wrbs, &txq->used);
1731 static void be_evt_queues_destroy(struct be_adapter *adapter)
1733 struct be_eq_obj *eqo;
1736 for_all_evt_queues(adapter, eqo, i) {
1737 if (eqo->q.created) {
1739 be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
1741 be_queue_free(adapter, &eqo->q);
1745 static int be_evt_queues_create(struct be_adapter *adapter)
1747 struct be_queue_info *eq;
1748 struct be_eq_obj *eqo;
1751 adapter->num_evt_qs = num_irqs(adapter);
1753 for_all_evt_queues(adapter, eqo, i) {
1754 eqo->adapter = adapter;
1755 eqo->tx_budget = BE_TX_BUDGET;
1757 eqo->max_eqd = BE_MAX_EQD;
1758 eqo->enable_aic = true;
1761 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1762 sizeof(struct be_eq_entry));
1766 rc = be_cmd_eq_create(adapter, eq, eqo->cur_eqd);
1773 static void be_mcc_queues_destroy(struct be_adapter *adapter)
1775 struct be_queue_info *q;
1777 q = &adapter->mcc_obj.q;
1779 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
1780 be_queue_free(adapter, q);
1782 q = &adapter->mcc_obj.cq;
1784 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1785 be_queue_free(adapter, q);
1788 /* Must be called only after TX qs are created as MCC shares TX EQ */
1789 static int be_mcc_queues_create(struct be_adapter *adapter)
1791 struct be_queue_info *q, *cq;
1793 cq = &adapter->mcc_obj.cq;
1794 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
1795 sizeof(struct be_mcc_compl)))
1798 /* Use the default EQ for MCC completions */
1799 if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0))
1802 q = &adapter->mcc_obj.q;
1803 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
1804 goto mcc_cq_destroy;
1806 if (be_cmd_mccq_create(adapter, q, cq))
1812 be_queue_free(adapter, q);
1814 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
1816 be_queue_free(adapter, cq);
1821 static void be_tx_queues_destroy(struct be_adapter *adapter)
1823 struct be_queue_info *q;
1824 struct be_tx_obj *txo;
1827 for_all_tx_queues(adapter, txo, i) {
1830 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
1831 be_queue_free(adapter, q);
1835 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1836 be_queue_free(adapter, q);
1840 static int be_num_txqs_want(struct be_adapter *adapter)
1842 if (sriov_want(adapter) || be_is_mc(adapter) ||
1843 lancer_chip(adapter) || !be_physfn(adapter) ||
1844 adapter->generation == BE_GEN2)
1850 static int be_tx_cqs_create(struct be_adapter *adapter)
1852 struct be_queue_info *cq, *eq;
1854 struct be_tx_obj *txo;
1857 adapter->num_tx_qs = be_num_txqs_want(adapter);
1858 if (adapter->num_tx_qs != MAX_TX_QS) {
1860 netif_set_real_num_tx_queues(adapter->netdev,
1861 adapter->num_tx_qs);
1865 for_all_tx_queues(adapter, txo, i) {
1867 status = be_queue_alloc(adapter, cq, TX_CQ_LEN,
1868 sizeof(struct be_eth_tx_compl));
1872 /* If num_evt_qs is less than num_tx_qs, then more than
1873 * one txq share an eq
1875 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
1876 status = be_cmd_cq_create(adapter, cq, eq, false, 3);
1883 static int be_tx_qs_create(struct be_adapter *adapter)
1885 struct be_tx_obj *txo;
1888 for_all_tx_queues(adapter, txo, i) {
1889 status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
1890 sizeof(struct be_eth_wrb));
1894 status = be_cmd_txq_create(adapter, &txo->q, &txo->cq);
1902 static void be_rx_cqs_destroy(struct be_adapter *adapter)
1904 struct be_queue_info *q;
1905 struct be_rx_obj *rxo;
1908 for_all_rx_queues(adapter, rxo, i) {
1911 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1912 be_queue_free(adapter, q);
1916 static int be_rx_cqs_create(struct be_adapter *adapter)
1918 struct be_queue_info *eq, *cq;
1919 struct be_rx_obj *rxo;
1922 /* We'll create as many RSS rings as there are irqs.
1923 * But when there's only one irq there's no use creating RSS rings
1925 adapter->num_rx_qs = (num_irqs(adapter) > 1) ?
1926 num_irqs(adapter) + 1 : 1;
1927 if (adapter->num_rx_qs != MAX_RX_QS) {
1929 netif_set_real_num_rx_queues(adapter->netdev,
1930 adapter->num_rx_qs);
1934 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
1935 for_all_rx_queues(adapter, rxo, i) {
1936 rxo->adapter = adapter;
1938 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
1939 sizeof(struct be_eth_rx_compl));
1943 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
1944 rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
1949 if (adapter->num_rx_qs != MAX_RX_QS)
1950 dev_info(&adapter->pdev->dev,
1951 "Created only %d receive queues\n", adapter->num_rx_qs);
1956 static irqreturn_t be_intx(int irq, void *dev)
1958 struct be_adapter *adapter = dev;
1961 /* With INTx only one EQ is used */
1962 num_evts = event_handle(&adapter->eq_obj[0]);
1969 static irqreturn_t be_msix(int irq, void *dev)
1971 struct be_eq_obj *eqo = dev;
1977 static inline bool do_gro(struct be_rx_compl_info *rxcp)
1979 return (rxcp->tcpf && !rxcp->err) ? true : false;
1982 static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
1985 struct be_adapter *adapter = rxo->adapter;
1986 struct be_queue_info *rx_cq = &rxo->cq;
1987 struct be_rx_compl_info *rxcp;
1990 for (work_done = 0; work_done < budget; work_done++) {
1991 rxcp = be_rx_compl_get(rxo);
1995 /* Is it a flush compl that has no data */
1996 if (unlikely(rxcp->num_rcvd == 0))
1999 /* Discard compl with partial DMA Lancer B0 */
2000 if (unlikely(!rxcp->pkt_size)) {
2001 be_rx_compl_discard(rxo, rxcp);
2005 /* On BE drop pkts that arrive due to imperfect filtering in
2006 * promiscuous mode on some skews
2008 if (unlikely(rxcp->port != adapter->port_num &&
2009 !lancer_chip(adapter))) {
2010 be_rx_compl_discard(rxo, rxcp);
2015 be_rx_compl_process_gro(rxo, napi, rxcp);
2017 be_rx_compl_process(rxo, rxcp);
2019 be_rx_stats_update(rxo, rxcp);
2023 be_cq_notify(adapter, rx_cq->id, true, work_done);
2025 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
2026 be_post_rx_frags(rxo, GFP_ATOMIC);
2032 static bool be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
2033 int budget, int idx)
2035 struct be_eth_tx_compl *txcp;
2036 int num_wrbs = 0, work_done;
2038 for (work_done = 0; work_done < budget; work_done++) {
2039 txcp = be_tx_compl_get(&txo->cq);
2042 num_wrbs += be_tx_compl_process(adapter, txo,
2043 AMAP_GET_BITS(struct amap_eth_tx_compl,
2048 be_cq_notify(adapter, txo->cq.id, true, work_done);
2049 atomic_sub(num_wrbs, &txo->q.used);
2051 /* As Tx wrbs have been freed up, wake up netdev queue
2052 * if it was stopped due to lack of tx wrbs. */
2053 if (__netif_subqueue_stopped(adapter->netdev, idx) &&
2054 atomic_read(&txo->q.used) < txo->q.len / 2) {
2055 netif_wake_subqueue(adapter->netdev, idx);
2058 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
2059 tx_stats(txo)->tx_compl += work_done;
2060 u64_stats_update_end(&tx_stats(txo)->sync_compl);
2062 return (work_done < budget); /* Done */
2065 int be_poll(struct napi_struct *napi, int budget)
2067 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2068 struct be_adapter *adapter = eqo->adapter;
2069 int max_work = 0, work, i;
2072 /* Process all TXQs serviced by this EQ */
2073 for (i = eqo->idx; i < adapter->num_tx_qs; i += adapter->num_evt_qs) {
2074 tx_done = be_process_tx(adapter, &adapter->tx_obj[i],
2080 /* This loop will iterate twice for EQ0 in which
2081 * completions of the last RXQ (default one) are also processed
2082 * For other EQs the loop iterates only once
2084 for (i = eqo->idx; i < adapter->num_rx_qs; i += adapter->num_evt_qs) {
2085 work = be_process_rx(&adapter->rx_obj[i], napi, budget);
2086 max_work = max(work, max_work);
2089 if (is_mcc_eqo(eqo))
2090 be_process_mcc(adapter);
2092 if (max_work < budget) {
2093 napi_complete(napi);
2094 be_eq_notify(adapter, eqo->q.id, true, false, 0);
2096 /* As we'll continue in polling mode, count and clear events */
2097 be_eq_notify(adapter, eqo->q.id, false, false, events_get(eqo));
2102 void be_detect_error(struct be_adapter *adapter)
2104 u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
2105 u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
2108 if (be_crit_error(adapter))
2111 if (lancer_chip(adapter)) {
2112 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2113 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2114 sliport_err1 = ioread32(adapter->db +
2115 SLIPORT_ERROR1_OFFSET);
2116 sliport_err2 = ioread32(adapter->db +
2117 SLIPORT_ERROR2_OFFSET);
2120 pci_read_config_dword(adapter->pdev,
2121 PCICFG_UE_STATUS_LOW, &ue_lo);
2122 pci_read_config_dword(adapter->pdev,
2123 PCICFG_UE_STATUS_HIGH, &ue_hi);
2124 pci_read_config_dword(adapter->pdev,
2125 PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask);
2126 pci_read_config_dword(adapter->pdev,
2127 PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
2129 ue_lo = (ue_lo & ~ue_lo_mask);
2130 ue_hi = (ue_hi & ~ue_hi_mask);
2133 if (ue_lo || ue_hi ||
2134 sliport_status & SLIPORT_STATUS_ERR_MASK) {
2135 adapter->hw_error = true;
2136 dev_err(&adapter->pdev->dev,
2137 "Error detected in the card\n");
2140 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2141 dev_err(&adapter->pdev->dev,
2142 "ERR: sliport status 0x%x\n", sliport_status);
2143 dev_err(&adapter->pdev->dev,
2144 "ERR: sliport error1 0x%x\n", sliport_err1);
2145 dev_err(&adapter->pdev->dev,
2146 "ERR: sliport error2 0x%x\n", sliport_err2);
2150 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
2152 dev_err(&adapter->pdev->dev,
2153 "UE: %s bit set\n", ue_status_low_desc[i]);
2158 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
2160 dev_err(&adapter->pdev->dev,
2161 "UE: %s bit set\n", ue_status_hi_desc[i]);
2167 static void be_msix_disable(struct be_adapter *adapter)
2169 if (msix_enabled(adapter)) {
2170 pci_disable_msix(adapter->pdev);
2171 adapter->num_msix_vec = 0;
2175 static uint be_num_rss_want(struct be_adapter *adapter)
2178 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
2179 !sriov_want(adapter) && be_physfn(adapter) &&
2180 !be_is_mc(adapter)) {
2181 num = (adapter->be3_native) ? BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
2182 num = min_t(u32, num, (u32)netif_get_num_default_rss_queues());
2187 static void be_msix_enable(struct be_adapter *adapter)
2189 #define BE_MIN_MSIX_VECTORS 1
2190 int i, status, num_vec, num_roce_vec = 0;
2192 /* If RSS queues are not used, need a vec for default RX Q */
2193 num_vec = min(be_num_rss_want(adapter), num_online_cpus());
2194 if (be_roce_supported(adapter)) {
2195 num_roce_vec = min_t(u32, MAX_ROCE_MSIX_VECTORS,
2196 (num_online_cpus() + 1));
2197 num_roce_vec = min(num_roce_vec, MAX_ROCE_EQS);
2198 num_vec += num_roce_vec;
2199 num_vec = min(num_vec, MAX_MSIX_VECTORS);
2201 num_vec = max(num_vec, BE_MIN_MSIX_VECTORS);
2203 for (i = 0; i < num_vec; i++)
2204 adapter->msix_entries[i].entry = i;
2206 status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
2209 } else if (status >= BE_MIN_MSIX_VECTORS) {
2211 if (pci_enable_msix(adapter->pdev, adapter->msix_entries,
2217 if (be_roce_supported(adapter)) {
2218 if (num_vec > num_roce_vec) {
2219 adapter->num_msix_vec = num_vec - num_roce_vec;
2220 adapter->num_msix_roce_vec =
2221 num_vec - adapter->num_msix_vec;
2223 adapter->num_msix_vec = num_vec;
2224 adapter->num_msix_roce_vec = 0;
2227 adapter->num_msix_vec = num_vec;
2231 static inline int be_msix_vec_get(struct be_adapter *adapter,
2232 struct be_eq_obj *eqo)
2234 return adapter->msix_entries[eqo->idx].vector;
2237 static int be_msix_register(struct be_adapter *adapter)
2239 struct net_device *netdev = adapter->netdev;
2240 struct be_eq_obj *eqo;
2243 for_all_evt_queues(adapter, eqo, i) {
2244 sprintf(eqo->desc, "%s-q%d", netdev->name, i);
2245 vec = be_msix_vec_get(adapter, eqo);
2246 status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
2253 for (i--, eqo = &adapter->eq_obj[i]; i >= 0; i--, eqo--)
2254 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2255 dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
2257 be_msix_disable(adapter);
2261 static int be_irq_register(struct be_adapter *adapter)
2263 struct net_device *netdev = adapter->netdev;
2266 if (msix_enabled(adapter)) {
2267 status = be_msix_register(adapter);
2270 /* INTx is not supported for VF */
2271 if (!be_physfn(adapter))
2276 netdev->irq = adapter->pdev->irq;
2277 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
2280 dev_err(&adapter->pdev->dev,
2281 "INTx request IRQ failed - err %d\n", status);
2285 adapter->isr_registered = true;
2289 static void be_irq_unregister(struct be_adapter *adapter)
2291 struct net_device *netdev = adapter->netdev;
2292 struct be_eq_obj *eqo;
2295 if (!adapter->isr_registered)
2299 if (!msix_enabled(adapter)) {
2300 free_irq(netdev->irq, adapter);
2305 for_all_evt_queues(adapter, eqo, i)
2306 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2309 adapter->isr_registered = false;
2312 static void be_rx_qs_destroy(struct be_adapter *adapter)
2314 struct be_queue_info *q;
2315 struct be_rx_obj *rxo;
2318 for_all_rx_queues(adapter, rxo, i) {
2321 be_cmd_rxq_destroy(adapter, q);
2322 /* After the rxq is invalidated, wait for a grace time
2323 * of 1ms for all dma to end and the flush compl to
2327 be_rx_cq_clean(rxo);
2329 be_queue_free(adapter, q);
2333 static int be_close(struct net_device *netdev)
2335 struct be_adapter *adapter = netdev_priv(netdev);
2336 struct be_eq_obj *eqo;
2339 be_roce_dev_close(adapter);
2341 be_async_mcc_disable(adapter);
2343 if (!lancer_chip(adapter))
2344 be_intr_set(adapter, false);
2346 for_all_evt_queues(adapter, eqo, i) {
2347 napi_disable(&eqo->napi);
2348 if (msix_enabled(adapter))
2349 synchronize_irq(be_msix_vec_get(adapter, eqo));
2351 synchronize_irq(netdev->irq);
2355 be_irq_unregister(adapter);
2357 /* Wait for all pending tx completions to arrive so that
2358 * all tx skbs are freed.
2360 be_tx_compl_clean(adapter);
2362 be_rx_qs_destroy(adapter);
2366 static int be_rx_qs_create(struct be_adapter *adapter)
2368 struct be_rx_obj *rxo;
2372 for_all_rx_queues(adapter, rxo, i) {
2373 rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
2374 sizeof(struct be_eth_rx_d));
2379 /* The FW would like the default RXQ to be created first */
2380 rxo = default_rxo(adapter);
2381 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id, rx_frag_size,
2382 adapter->if_handle, false, &rxo->rss_id);
2386 for_all_rss_queues(adapter, rxo, i) {
2387 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
2388 rx_frag_size, adapter->if_handle,
2389 true, &rxo->rss_id);
2394 if (be_multi_rxq(adapter)) {
2395 for (j = 0; j < 128; j += adapter->num_rx_qs - 1) {
2396 for_all_rss_queues(adapter, rxo, i) {
2399 rsstable[j + i] = rxo->rss_id;
2402 rc = be_cmd_rss_config(adapter, rsstable, 128);
2407 /* First time posting */
2408 for_all_rx_queues(adapter, rxo, i)
2409 be_post_rx_frags(rxo, GFP_KERNEL);
2413 static int be_open(struct net_device *netdev)
2415 struct be_adapter *adapter = netdev_priv(netdev);
2416 struct be_eq_obj *eqo;
2417 struct be_rx_obj *rxo;
2418 struct be_tx_obj *txo;
2422 status = be_rx_qs_create(adapter);
2426 be_irq_register(adapter);
2428 if (!lancer_chip(adapter))
2429 be_intr_set(adapter, true);
2431 for_all_rx_queues(adapter, rxo, i)
2432 be_cq_notify(adapter, rxo->cq.id, true, 0);
2434 for_all_tx_queues(adapter, txo, i)
2435 be_cq_notify(adapter, txo->cq.id, true, 0);
2437 be_async_mcc_enable(adapter);
2439 for_all_evt_queues(adapter, eqo, i) {
2440 napi_enable(&eqo->napi);
2441 be_eq_notify(adapter, eqo->q.id, true, false, 0);
2444 status = be_cmd_link_status_query(adapter, NULL, NULL,
2447 be_link_status_update(adapter, link_status);
2449 be_roce_dev_open(adapter);
2452 be_close(adapter->netdev);
2456 static int be_setup_wol(struct be_adapter *adapter, bool enable)
2458 struct be_dma_mem cmd;
2462 memset(mac, 0, ETH_ALEN);
2464 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
2465 cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2469 memset(cmd.va, 0, cmd.size);
2472 status = pci_write_config_dword(adapter->pdev,
2473 PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2475 dev_err(&adapter->pdev->dev,
2476 "Could not enable Wake-on-lan\n");
2477 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2481 status = be_cmd_enable_magic_wol(adapter,
2482 adapter->netdev->dev_addr, &cmd);
2483 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2484 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2486 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2487 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2488 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2491 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
2496 * Generate a seed MAC address from the PF MAC Address using jhash.
2497 * MAC Address for VFs are assigned incrementally starting from the seed.
2498 * These addresses are programmed in the ASIC by the PF and the VF driver
2499 * queries for the MAC address during its probe.
2501 static inline int be_vf_eth_addr_config(struct be_adapter *adapter)
2506 struct be_vf_cfg *vf_cfg;
2508 be_vf_eth_addr_generate(adapter, mac);
2510 for_all_vfs(adapter, vf_cfg, vf) {
2511 if (lancer_chip(adapter)) {
2512 status = be_cmd_set_mac_list(adapter, mac, 1, vf + 1);
2514 status = be_cmd_pmac_add(adapter, mac,
2516 &vf_cfg->pmac_id, vf + 1);
2520 dev_err(&adapter->pdev->dev,
2521 "Mac address assignment failed for VF %d\n", vf);
2523 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
2530 static void be_vf_clear(struct be_adapter *adapter)
2532 struct be_vf_cfg *vf_cfg;
2535 if (be_find_vfs(adapter, ASSIGNED)) {
2536 dev_warn(&adapter->pdev->dev, "VFs are assigned to VMs\n");
2540 for_all_vfs(adapter, vf_cfg, vf) {
2541 if (lancer_chip(adapter))
2542 be_cmd_set_mac_list(adapter, NULL, 0, vf + 1);
2544 be_cmd_pmac_del(adapter, vf_cfg->if_handle,
2545 vf_cfg->pmac_id, vf + 1);
2547 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
2549 pci_disable_sriov(adapter->pdev);
2551 kfree(adapter->vf_cfg);
2552 adapter->num_vfs = 0;
2555 static int be_clear(struct be_adapter *adapter)
2559 if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
2560 cancel_delayed_work_sync(&adapter->work);
2561 adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
2564 if (sriov_enabled(adapter))
2565 be_vf_clear(adapter);
2567 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++)
2568 be_cmd_pmac_del(adapter, adapter->if_handle,
2569 adapter->pmac_id[i], 0);
2571 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
2573 be_mcc_queues_destroy(adapter);
2574 be_rx_cqs_destroy(adapter);
2575 be_tx_queues_destroy(adapter);
2576 be_evt_queues_destroy(adapter);
2578 be_msix_disable(adapter);
2582 static int be_vf_setup_init(struct be_adapter *adapter)
2584 struct be_vf_cfg *vf_cfg;
2587 adapter->vf_cfg = kcalloc(adapter->num_vfs, sizeof(*vf_cfg),
2589 if (!adapter->vf_cfg)
2592 for_all_vfs(adapter, vf_cfg, vf) {
2593 vf_cfg->if_handle = -1;
2594 vf_cfg->pmac_id = -1;
2599 static int be_vf_setup(struct be_adapter *adapter)
2601 struct be_vf_cfg *vf_cfg;
2602 struct device *dev = &adapter->pdev->dev;
2603 u32 cap_flags, en_flags, vf;
2604 u16 def_vlan, lnk_speed;
2605 int status, enabled_vfs;
2607 enabled_vfs = be_find_vfs(adapter, ENABLED);
2609 dev_warn(dev, "%d VFs are already enabled\n", enabled_vfs);
2610 dev_warn(dev, "Ignoring num_vfs=%d setting\n", num_vfs);
2614 if (num_vfs > adapter->dev_num_vfs) {
2615 dev_warn(dev, "Device supports %d VFs and not %d\n",
2616 adapter->dev_num_vfs, num_vfs);
2617 num_vfs = adapter->dev_num_vfs;
2620 status = pci_enable_sriov(adapter->pdev, num_vfs);
2622 adapter->num_vfs = num_vfs;
2624 /* Platform doesn't support SRIOV though device supports it */
2625 dev_warn(dev, "SRIOV enable failed\n");
2629 status = be_vf_setup_init(adapter);
2633 cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2634 BE_IF_FLAGS_MULTICAST;
2635 for_all_vfs(adapter, vf_cfg, vf) {
2636 status = be_cmd_if_create(adapter, cap_flags, en_flags,
2637 &vf_cfg->if_handle, vf + 1);
2643 status = be_vf_eth_addr_config(adapter);
2648 for_all_vfs(adapter, vf_cfg, vf) {
2649 status = be_cmd_link_status_query(adapter, NULL, &lnk_speed,
2653 vf_cfg->tx_rate = lnk_speed * 10;
2655 status = be_cmd_get_hsw_config(adapter, &def_vlan,
2656 vf + 1, vf_cfg->if_handle);
2659 vf_cfg->def_vid = def_vlan;
2666 static void be_setup_init(struct be_adapter *adapter)
2668 adapter->vlan_prio_bmap = 0xff;
2669 adapter->phy.link_speed = -1;
2670 adapter->if_handle = -1;
2671 adapter->be3_native = false;
2672 adapter->promiscuous = false;
2673 adapter->eq_next_idx = 0;
2674 adapter->phy.forced_port_speed = -1;
2677 static int be_get_mac_addr(struct be_adapter *adapter, u8 *mac, u32 if_handle,
2678 bool *active_mac, u32 *pmac_id)
2682 if (!is_zero_ether_addr(adapter->netdev->perm_addr)) {
2683 memcpy(mac, adapter->netdev->dev_addr, ETH_ALEN);
2684 if (!lancer_chip(adapter) && !be_physfn(adapter))
2687 *active_mac = false;
2692 if (lancer_chip(adapter)) {
2693 status = be_cmd_get_mac_from_list(adapter, mac,
2694 active_mac, pmac_id, 0);
2696 status = be_cmd_mac_addr_query(adapter, mac,
2697 MAC_ADDRESS_TYPE_NETWORK,
2701 } else if (be_physfn(adapter)) {
2702 /* For BE3, for PF get permanent MAC */
2703 status = be_cmd_mac_addr_query(adapter, mac,
2704 MAC_ADDRESS_TYPE_NETWORK, true,
2706 *active_mac = false;
2708 /* For BE3, for VF get soft MAC assigned by PF*/
2709 status = be_cmd_mac_addr_query(adapter, mac,
2710 MAC_ADDRESS_TYPE_NETWORK, false,
2717 /* Routine to query per function resource limits */
2718 static int be_get_config(struct be_adapter *adapter)
2723 pos = pci_find_ext_capability(adapter->pdev, PCI_EXT_CAP_ID_SRIOV);
2725 pci_read_config_word(adapter->pdev, pos + PCI_SRIOV_TOTAL_VF,
2727 adapter->dev_num_vfs = dev_num_vfs;
2732 static int be_setup(struct be_adapter *adapter)
2734 struct device *dev = &adapter->pdev->dev;
2735 u32 cap_flags, en_flags;
2741 be_setup_init(adapter);
2743 be_get_config(adapter);
2745 be_cmd_req_native_mode(adapter);
2747 be_msix_enable(adapter);
2749 status = be_evt_queues_create(adapter);
2753 status = be_tx_cqs_create(adapter);
2757 status = be_rx_cqs_create(adapter);
2761 status = be_mcc_queues_create(adapter);
2765 en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2766 BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
2767 cap_flags = en_flags | BE_IF_FLAGS_MCAST_PROMISCUOUS |
2768 BE_IF_FLAGS_VLAN_PROMISCUOUS | BE_IF_FLAGS_PROMISCUOUS;
2770 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS) {
2771 cap_flags |= BE_IF_FLAGS_RSS;
2772 en_flags |= BE_IF_FLAGS_RSS;
2775 if (lancer_chip(adapter) && !be_physfn(adapter)) {
2776 en_flags = BE_IF_FLAGS_UNTAGGED |
2777 BE_IF_FLAGS_BROADCAST |
2778 BE_IF_FLAGS_MULTICAST;
2779 cap_flags = en_flags;
2782 status = be_cmd_if_create(adapter, cap_flags, en_flags,
2783 &adapter->if_handle, 0);
2787 memset(mac, 0, ETH_ALEN);
2789 status = be_get_mac_addr(adapter, mac, adapter->if_handle,
2790 &active_mac, &adapter->pmac_id[0]);
2795 status = be_cmd_pmac_add(adapter, mac, adapter->if_handle,
2796 &adapter->pmac_id[0], 0);
2801 if (is_zero_ether_addr(adapter->netdev->dev_addr)) {
2802 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2803 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2806 status = be_tx_qs_create(adapter);
2810 be_cmd_get_fw_ver(adapter, adapter->fw_ver, NULL);
2812 if (adapter->vlans_added)
2813 be_vid_config(adapter);
2815 be_set_rx_mode(adapter->netdev);
2817 be_cmd_get_flow_control(adapter, &tx_fc, &rx_fc);
2819 if (rx_fc != adapter->rx_fc || tx_fc != adapter->tx_fc)
2820 be_cmd_set_flow_control(adapter, adapter->tx_fc,
2823 if (be_physfn(adapter) && num_vfs) {
2824 if (adapter->dev_num_vfs)
2825 be_vf_setup(adapter);
2827 dev_warn(dev, "device doesn't support SRIOV\n");
2830 be_cmd_get_phy_info(adapter);
2831 if (be_pause_supported(adapter))
2832 adapter->phy.fc_autoneg = 1;
2834 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
2835 adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
2842 #ifdef CONFIG_NET_POLL_CONTROLLER
2843 static void be_netpoll(struct net_device *netdev)
2845 struct be_adapter *adapter = netdev_priv(netdev);
2846 struct be_eq_obj *eqo;
2849 for_all_evt_queues(adapter, eqo, i)
2856 #define FW_FILE_HDR_SIGN "ServerEngines Corp. "
2857 char flash_cookie[2][16] = {"*** SE FLAS", "H DIRECTORY *** "};
2859 static bool be_flash_redboot(struct be_adapter *adapter,
2860 const u8 *p, u32 img_start, int image_size,
2867 crc_offset = hdr_size + img_start + image_size - 4;
2871 status = be_cmd_get_flash_crc(adapter, flashed_crc,
2874 dev_err(&adapter->pdev->dev,
2875 "could not get crc from flash, not flashing redboot\n");
2879 /*update redboot only if crc does not match*/
2880 if (!memcmp(flashed_crc, p, 4))
2886 static bool phy_flashing_required(struct be_adapter *adapter)
2888 return (adapter->phy.phy_type == TN_8022 &&
2889 adapter->phy.interface_type == PHY_TYPE_BASET_10GB);
2892 static bool is_comp_in_ufi(struct be_adapter *adapter,
2893 struct flash_section_info *fsec, int type)
2895 int i = 0, img_type = 0;
2896 struct flash_section_info_g2 *fsec_g2 = NULL;
2898 if (adapter->generation != BE_GEN3)
2899 fsec_g2 = (struct flash_section_info_g2 *)fsec;
2901 for (i = 0; i < MAX_FLASH_COMP; i++) {
2903 img_type = le32_to_cpu(fsec_g2->fsec_entry[i].type);
2905 img_type = le32_to_cpu(fsec->fsec_entry[i].type);
2907 if (img_type == type)
2914 struct flash_section_info *get_fsec_info(struct be_adapter *adapter,
2916 const struct firmware *fw)
2918 struct flash_section_info *fsec = NULL;
2919 const u8 *p = fw->data;
2922 while (p < (fw->data + fw->size)) {
2923 fsec = (struct flash_section_info *)p;
2924 if (!memcmp(flash_cookie, fsec->cookie, sizeof(flash_cookie)))
2931 static int be_flash_data(struct be_adapter *adapter,
2932 const struct firmware *fw,
2933 struct be_dma_mem *flash_cmd,
2937 int status = 0, i, filehdr_size = 0;
2938 int img_hdrs_size = (num_of_images * sizeof(struct image_hdr));
2939 u32 total_bytes = 0, flash_op;
2941 const u8 *p = fw->data;
2942 struct be_cmd_write_flashrom *req = flash_cmd->va;
2943 const struct flash_comp *pflashcomp;
2944 int num_comp, hdr_size;
2945 struct flash_section_info *fsec = NULL;
2947 struct flash_comp gen3_flash_types[] = {
2948 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, OPTYPE_ISCSI_ACTIVE,
2949 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_iSCSI},
2950 { FLASH_REDBOOT_START_g3, OPTYPE_REDBOOT,
2951 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3, IMAGE_BOOT_CODE},
2952 { FLASH_iSCSI_BIOS_START_g3, OPTYPE_BIOS,
2953 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_ISCSI},
2954 { FLASH_PXE_BIOS_START_g3, OPTYPE_PXE_BIOS,
2955 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_PXE},
2956 { FLASH_FCoE_BIOS_START_g3, OPTYPE_FCOE_BIOS,
2957 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_FCoE},
2958 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, OPTYPE_ISCSI_BACKUP,
2959 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_iSCSI},
2960 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, OPTYPE_FCOE_FW_ACTIVE,
2961 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_FCoE},
2962 { FLASH_FCoE_BACKUP_IMAGE_START_g3, OPTYPE_FCOE_FW_BACKUP,
2963 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_FCoE},
2964 { FLASH_NCSI_START_g3, OPTYPE_NCSI_FW,
2965 FLASH_NCSI_IMAGE_MAX_SIZE_g3, IMAGE_NCSI},
2966 { FLASH_PHY_FW_START_g3, OPTYPE_PHY_FW,
2967 FLASH_PHY_FW_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_PHY}
2970 struct flash_comp gen2_flash_types[] = {
2971 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, OPTYPE_ISCSI_ACTIVE,
2972 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_iSCSI},
2973 { FLASH_REDBOOT_START_g2, OPTYPE_REDBOOT,
2974 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2, IMAGE_BOOT_CODE},
2975 { FLASH_iSCSI_BIOS_START_g2, OPTYPE_BIOS,
2976 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_ISCSI},
2977 { FLASH_PXE_BIOS_START_g2, OPTYPE_PXE_BIOS,
2978 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_PXE},
2979 { FLASH_FCoE_BIOS_START_g2, OPTYPE_FCOE_BIOS,
2980 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_FCoE},
2981 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, OPTYPE_ISCSI_BACKUP,
2982 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_iSCSI},
2983 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, OPTYPE_FCOE_FW_ACTIVE,
2984 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_FCoE},
2985 { FLASH_FCoE_BACKUP_IMAGE_START_g2, OPTYPE_FCOE_FW_BACKUP,
2986 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_FCoE}
2989 if (adapter->generation == BE_GEN3) {
2990 pflashcomp = gen3_flash_types;
2991 filehdr_size = sizeof(struct flash_file_hdr_g3);
2992 num_comp = ARRAY_SIZE(gen3_flash_types);
2994 pflashcomp = gen2_flash_types;
2995 filehdr_size = sizeof(struct flash_file_hdr_g2);
2996 num_comp = ARRAY_SIZE(gen2_flash_types);
2998 /* Get flash section info*/
2999 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3001 dev_err(&adapter->pdev->dev,
3002 "Invalid Cookie. UFI corrupted ?\n");
3005 for (i = 0; i < num_comp; i++) {
3006 if (!is_comp_in_ufi(adapter, fsec, pflashcomp[i].img_type))
3009 if ((pflashcomp[i].optype == OPTYPE_NCSI_FW) &&
3010 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
3013 if (pflashcomp[i].optype == OPTYPE_PHY_FW) {
3014 if (!phy_flashing_required(adapter))
3018 hdr_size = filehdr_size +
3019 (num_of_images * sizeof(struct image_hdr));
3021 if ((pflashcomp[i].optype == OPTYPE_REDBOOT) &&
3022 (!be_flash_redboot(adapter, fw->data, pflashcomp[i].offset,
3023 pflashcomp[i].size, hdr_size)))
3026 /* Flash the component */
3028 p += filehdr_size + pflashcomp[i].offset + img_hdrs_size;
3029 if (p + pflashcomp[i].size > fw->data + fw->size)
3031 total_bytes = pflashcomp[i].size;
3032 while (total_bytes) {
3033 if (total_bytes > 32*1024)
3034 num_bytes = 32*1024;
3036 num_bytes = total_bytes;
3037 total_bytes -= num_bytes;
3039 if (pflashcomp[i].optype == OPTYPE_PHY_FW)
3040 flash_op = FLASHROM_OPER_PHY_FLASH;
3042 flash_op = FLASHROM_OPER_FLASH;
3044 if (pflashcomp[i].optype == OPTYPE_PHY_FW)
3045 flash_op = FLASHROM_OPER_PHY_SAVE;
3047 flash_op = FLASHROM_OPER_SAVE;
3049 memcpy(req->params.data_buf, p, num_bytes);
3051 status = be_cmd_write_flashrom(adapter, flash_cmd,
3052 pflashcomp[i].optype, flash_op, num_bytes);
3054 if ((status == ILLEGAL_IOCTL_REQ) &&
3055 (pflashcomp[i].optype ==
3058 dev_err(&adapter->pdev->dev,
3059 "cmd to write to flash rom failed.\n");
3067 static int get_ufigen_type(struct flash_file_hdr_g2 *fhdr)
3071 if (fhdr->build[0] == '3')
3073 else if (fhdr->build[0] == '2')
3079 static int lancer_wait_idle(struct be_adapter *adapter)
3081 #define SLIPORT_IDLE_TIMEOUT 30
3085 for (i = 0; i < SLIPORT_IDLE_TIMEOUT; i++) {
3086 reg_val = ioread32(adapter->db + PHYSDEV_CONTROL_OFFSET);
3087 if ((reg_val & PHYSDEV_CONTROL_INP_MASK) == 0)
3093 if (i == SLIPORT_IDLE_TIMEOUT)
3099 static int lancer_fw_reset(struct be_adapter *adapter)
3103 status = lancer_wait_idle(adapter);
3107 iowrite32(PHYSDEV_CONTROL_FW_RESET_MASK, adapter->db +
3108 PHYSDEV_CONTROL_OFFSET);
3113 static int lancer_fw_download(struct be_adapter *adapter,
3114 const struct firmware *fw)
3116 #define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024)
3117 #define LANCER_FW_DOWNLOAD_LOCATION "/prg"
3118 struct be_dma_mem flash_cmd;
3119 const u8 *data_ptr = NULL;
3120 u8 *dest_image_ptr = NULL;
3121 size_t image_size = 0;
3123 u32 data_written = 0;
3129 if (!IS_ALIGNED(fw->size, sizeof(u32))) {
3130 dev_err(&adapter->pdev->dev,
3131 "FW Image not properly aligned. "
3132 "Length must be 4 byte aligned.\n");
3134 goto lancer_fw_exit;
3137 flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
3138 + LANCER_FW_DOWNLOAD_CHUNK;
3139 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
3140 &flash_cmd.dma, GFP_KERNEL);
3141 if (!flash_cmd.va) {
3143 dev_err(&adapter->pdev->dev,
3144 "Memory allocation failure while flashing\n");
3145 goto lancer_fw_exit;
3148 dest_image_ptr = flash_cmd.va +
3149 sizeof(struct lancer_cmd_req_write_object);
3150 image_size = fw->size;
3151 data_ptr = fw->data;
3153 while (image_size) {
3154 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
3156 /* Copy the image chunk content. */
3157 memcpy(dest_image_ptr, data_ptr, chunk_size);
3159 status = lancer_cmd_write_object(adapter, &flash_cmd,
3161 LANCER_FW_DOWNLOAD_LOCATION,
3162 &data_written, &change_status,
3167 offset += data_written;
3168 data_ptr += data_written;
3169 image_size -= data_written;
3173 /* Commit the FW written */
3174 status = lancer_cmd_write_object(adapter, &flash_cmd,
3176 LANCER_FW_DOWNLOAD_LOCATION,
3177 &data_written, &change_status,
3181 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3184 dev_err(&adapter->pdev->dev,
3185 "Firmware load error. "
3186 "Status code: 0x%x Additional Status: 0x%x\n",
3187 status, add_status);
3188 goto lancer_fw_exit;
3191 if (change_status == LANCER_FW_RESET_NEEDED) {
3192 status = lancer_fw_reset(adapter);
3194 dev_err(&adapter->pdev->dev,
3195 "Adapter busy for FW reset.\n"
3196 "New FW will not be active.\n");
3197 goto lancer_fw_exit;
3199 } else if (change_status != LANCER_NO_RESET_NEEDED) {
3200 dev_err(&adapter->pdev->dev,
3201 "System reboot required for new FW"
3205 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
3210 static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
3212 struct flash_file_hdr_g2 *fhdr;
3213 struct flash_file_hdr_g3 *fhdr3;
3214 struct image_hdr *img_hdr_ptr = NULL;
3215 struct be_dma_mem flash_cmd;
3217 int status = 0, i = 0, num_imgs = 0;
3220 fhdr = (struct flash_file_hdr_g2 *) p;
3222 flash_cmd.size = sizeof(struct be_cmd_write_flashrom) + 32*1024;
3223 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
3224 &flash_cmd.dma, GFP_KERNEL);
3225 if (!flash_cmd.va) {
3227 dev_err(&adapter->pdev->dev,
3228 "Memory allocation failure while flashing\n");
3232 if ((adapter->generation == BE_GEN3) &&
3233 (get_ufigen_type(fhdr) == BE_GEN3)) {
3234 fhdr3 = (struct flash_file_hdr_g3 *) fw->data;
3235 num_imgs = le32_to_cpu(fhdr3->num_imgs);
3236 for (i = 0; i < num_imgs; i++) {
3237 img_hdr_ptr = (struct image_hdr *) (fw->data +
3238 (sizeof(struct flash_file_hdr_g3) +
3239 i * sizeof(struct image_hdr)));
3240 if (le32_to_cpu(img_hdr_ptr->imageid) == 1)
3241 status = be_flash_data(adapter, fw, &flash_cmd,
3244 } else if ((adapter->generation == BE_GEN2) &&
3245 (get_ufigen_type(fhdr) == BE_GEN2)) {
3246 status = be_flash_data(adapter, fw, &flash_cmd, 0);
3248 dev_err(&adapter->pdev->dev,
3249 "UFI and Interface are not compatible for flashing\n");
3253 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3256 dev_err(&adapter->pdev->dev, "Firmware load error\n");
3260 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
3266 int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
3268 const struct firmware *fw;
3271 if (!netif_running(adapter->netdev)) {
3272 dev_err(&adapter->pdev->dev,
3273 "Firmware load not allowed (interface is down)\n");
3277 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
3281 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
3283 if (lancer_chip(adapter))
3284 status = lancer_fw_download(adapter, fw);
3286 status = be_fw_download(adapter, fw);
3289 release_firmware(fw);
3293 static const struct net_device_ops be_netdev_ops = {
3294 .ndo_open = be_open,
3295 .ndo_stop = be_close,
3296 .ndo_start_xmit = be_xmit,
3297 .ndo_set_rx_mode = be_set_rx_mode,
3298 .ndo_set_mac_address = be_mac_addr_set,
3299 .ndo_change_mtu = be_change_mtu,
3300 .ndo_get_stats64 = be_get_stats64,
3301 .ndo_validate_addr = eth_validate_addr,
3302 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
3303 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
3304 .ndo_set_vf_mac = be_set_vf_mac,
3305 .ndo_set_vf_vlan = be_set_vf_vlan,
3306 .ndo_set_vf_tx_rate = be_set_vf_tx_rate,
3307 .ndo_get_vf_config = be_get_vf_config,
3308 #ifdef CONFIG_NET_POLL_CONTROLLER
3309 .ndo_poll_controller = be_netpoll,
3313 static void be_netdev_init(struct net_device *netdev)
3315 struct be_adapter *adapter = netdev_priv(netdev);
3316 struct be_eq_obj *eqo;
3319 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
3320 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
3322 if (be_multi_rxq(adapter))
3323 netdev->hw_features |= NETIF_F_RXHASH;
3325 netdev->features |= netdev->hw_features |
3326 NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
3328 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
3329 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
3331 netdev->priv_flags |= IFF_UNICAST_FLT;
3333 netdev->flags |= IFF_MULTICAST;
3335 netif_set_gso_max_size(netdev, 65535 - ETH_HLEN);
3337 netdev->netdev_ops = &be_netdev_ops;
3339 SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
3341 for_all_evt_queues(adapter, eqo, i)
3342 netif_napi_add(netdev, &eqo->napi, be_poll, BE_NAPI_WEIGHT);
3345 static void be_unmap_pci_bars(struct be_adapter *adapter)
3348 iounmap(adapter->csr);
3350 iounmap(adapter->db);
3351 if (adapter->roce_db.base)
3352 pci_iounmap(adapter->pdev, adapter->roce_db.base);
3355 static int lancer_roce_map_pci_bars(struct be_adapter *adapter)
3357 struct pci_dev *pdev = adapter->pdev;
3360 addr = pci_iomap(pdev, 2, 0);
3364 adapter->roce_db.base = addr;
3365 adapter->roce_db.io_addr = pci_resource_start(pdev, 2);
3366 adapter->roce_db.size = 8192;
3367 adapter->roce_db.total_size = pci_resource_len(pdev, 2);
3371 static int be_map_pci_bars(struct be_adapter *adapter)
3376 if (lancer_chip(adapter)) {
3377 if (be_type_2_3(adapter)) {
3378 addr = ioremap_nocache(
3379 pci_resource_start(adapter->pdev, 0),
3380 pci_resource_len(adapter->pdev, 0));
3385 if (adapter->if_type == SLI_INTF_TYPE_3) {
3386 if (lancer_roce_map_pci_bars(adapter))
3392 if (be_physfn(adapter)) {
3393 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2),
3394 pci_resource_len(adapter->pdev, 2));
3397 adapter->csr = addr;
3400 if (adapter->generation == BE_GEN2) {
3403 if (be_physfn(adapter))
3408 addr = ioremap_nocache(pci_resource_start(adapter->pdev, db_reg),
3409 pci_resource_len(adapter->pdev, db_reg));
3413 if (adapter->sli_family == SKYHAWK_SLI_FAMILY) {
3414 adapter->roce_db.size = 4096;
3415 adapter->roce_db.io_addr =
3416 pci_resource_start(adapter->pdev, db_reg);
3417 adapter->roce_db.total_size =
3418 pci_resource_len(adapter->pdev, db_reg);
3422 be_unmap_pci_bars(adapter);
3426 static void be_ctrl_cleanup(struct be_adapter *adapter)
3428 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
3430 be_unmap_pci_bars(adapter);
3433 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3436 mem = &adapter->rx_filter;
3438 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3442 static int be_ctrl_init(struct be_adapter *adapter)
3444 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
3445 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
3446 struct be_dma_mem *rx_filter = &adapter->rx_filter;
3449 status = be_map_pci_bars(adapter);
3453 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
3454 mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
3455 mbox_mem_alloc->size,
3456 &mbox_mem_alloc->dma,
3458 if (!mbox_mem_alloc->va) {
3460 goto unmap_pci_bars;
3462 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
3463 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
3464 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
3465 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
3467 rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
3468 rx_filter->va = dma_alloc_coherent(&adapter->pdev->dev, rx_filter->size,
3469 &rx_filter->dma, GFP_KERNEL);
3470 if (rx_filter->va == NULL) {
3474 memset(rx_filter->va, 0, rx_filter->size);
3476 mutex_init(&adapter->mbox_lock);
3477 spin_lock_init(&adapter->mcc_lock);
3478 spin_lock_init(&adapter->mcc_cq_lock);
3480 init_completion(&adapter->flash_compl);
3481 pci_save_state(adapter->pdev);
3485 dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
3486 mbox_mem_alloc->va, mbox_mem_alloc->dma);
3489 be_unmap_pci_bars(adapter);
3495 static void be_stats_cleanup(struct be_adapter *adapter)
3497 struct be_dma_mem *cmd = &adapter->stats_cmd;
3500 dma_free_coherent(&adapter->pdev->dev, cmd->size,
3504 static int be_stats_init(struct be_adapter *adapter)
3506 struct be_dma_mem *cmd = &adapter->stats_cmd;
3508 if (adapter->generation == BE_GEN2) {
3509 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
3511 if (lancer_chip(adapter))
3512 cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
3514 cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
3516 cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
3518 if (cmd->va == NULL)
3520 memset(cmd->va, 0, cmd->size);
3524 static void __devexit be_remove(struct pci_dev *pdev)
3526 struct be_adapter *adapter = pci_get_drvdata(pdev);
3531 be_roce_dev_remove(adapter);
3533 cancel_delayed_work_sync(&adapter->func_recovery_work);
3535 unregister_netdev(adapter->netdev);
3539 /* tell fw we're done with firing cmds */
3540 be_cmd_fw_clean(adapter);
3542 be_stats_cleanup(adapter);
3544 be_ctrl_cleanup(adapter);
3546 pci_set_drvdata(pdev, NULL);
3547 pci_release_regions(pdev);
3548 pci_disable_device(pdev);
3550 free_netdev(adapter->netdev);
3553 bool be_is_wol_supported(struct be_adapter *adapter)
3555 return ((adapter->wol_cap & BE_WOL_CAP) &&
3556 !be_is_wol_excluded(adapter)) ? true : false;
3559 u32 be_get_fw_log_level(struct be_adapter *adapter)
3561 struct be_dma_mem extfat_cmd;
3562 struct be_fat_conf_params *cfgs;
3567 memset(&extfat_cmd, 0, sizeof(struct be_dma_mem));
3568 extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps);
3569 extfat_cmd.va = pci_alloc_consistent(adapter->pdev, extfat_cmd.size,
3572 if (!extfat_cmd.va) {
3573 dev_err(&adapter->pdev->dev, "%s: Memory allocation failure\n",
3578 status = be_cmd_get_ext_fat_capabilites(adapter, &extfat_cmd);
3580 cfgs = (struct be_fat_conf_params *)(extfat_cmd.va +
3581 sizeof(struct be_cmd_resp_hdr));
3582 for (j = 0; j < le32_to_cpu(cfgs->module[0].num_modes); j++) {
3583 if (cfgs->module[0].trace_lvl[j].mode == MODE_UART)
3584 level = cfgs->module[0].trace_lvl[j].dbg_lvl;
3587 pci_free_consistent(adapter->pdev, extfat_cmd.size, extfat_cmd.va,
3592 static int be_get_initial_config(struct be_adapter *adapter)
3597 status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
3598 &adapter->function_mode, &adapter->function_caps);
3602 if (adapter->function_mode & FLEX10_MODE)
3603 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
3605 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
3607 if (be_physfn(adapter))
3608 adapter->max_pmac_cnt = BE_UC_PMAC_COUNT;
3610 adapter->max_pmac_cnt = BE_VF_UC_PMAC_COUNT;
3612 /* primary mac needs 1 pmac entry */
3613 adapter->pmac_id = kcalloc(adapter->max_pmac_cnt + 1,
3614 sizeof(u32), GFP_KERNEL);
3615 if (!adapter->pmac_id)
3618 status = be_cmd_get_cntl_attributes(adapter);
3622 status = be_cmd_get_acpi_wol_cap(adapter);
3624 /* in case of a failure to get wol capabillities
3625 * check the exclusion list to determine WOL capability */
3626 if (!be_is_wol_excluded(adapter))
3627 adapter->wol_cap |= BE_WOL_CAP;
3630 if (be_is_wol_supported(adapter))
3631 adapter->wol = true;
3633 /* Must be a power of 2 or else MODULO will BUG_ON */
3634 adapter->be_get_temp_freq = 64;
3636 level = be_get_fw_log_level(adapter);
3637 adapter->msg_enable = level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0;
3642 static int be_dev_type_check(struct be_adapter *adapter)
3644 struct pci_dev *pdev = adapter->pdev;
3645 u32 sli_intf = 0, if_type;
3647 switch (pdev->device) {
3650 adapter->generation = BE_GEN2;
3654 adapter->generation = BE_GEN3;
3658 pci_read_config_dword(pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3659 adapter->if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
3660 SLI_INTF_IF_TYPE_SHIFT;
3661 if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
3662 SLI_INTF_IF_TYPE_SHIFT;
3663 if (((sli_intf & SLI_INTF_VALID_MASK) != SLI_INTF_VALID) ||
3664 !be_type_2_3(adapter)) {
3665 dev_err(&pdev->dev, "SLI_INTF reg val is not valid\n");
3668 adapter->sli_family = ((sli_intf & SLI_INTF_FAMILY_MASK) >>
3669 SLI_INTF_FAMILY_SHIFT);
3670 adapter->generation = BE_GEN3;
3673 pci_read_config_dword(pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3674 if ((sli_intf & SLI_INTF_VALID_MASK) != SLI_INTF_VALID) {
3675 dev_err(&pdev->dev, "SLI_INTF reg val is not valid\n");
3678 adapter->sli_family = ((sli_intf & SLI_INTF_FAMILY_MASK) >>
3679 SLI_INTF_FAMILY_SHIFT);
3680 adapter->generation = BE_GEN3;
3683 adapter->generation = 0;
3686 pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3687 adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
3691 static int lancer_recover_func(struct be_adapter *adapter)
3695 status = lancer_test_and_set_rdy_state(adapter);
3699 if (netif_running(adapter->netdev))
3700 be_close(adapter->netdev);
3704 adapter->hw_error = false;
3705 adapter->fw_timeout = false;
3707 status = be_setup(adapter);
3711 if (netif_running(adapter->netdev)) {
3712 status = be_open(adapter->netdev);
3717 dev_err(&adapter->pdev->dev,
3718 "Adapter SLIPORT recovery succeeded\n");
3721 dev_err(&adapter->pdev->dev,
3722 "Adapter SLIPORT recovery failed\n");
3727 static void be_func_recovery_task(struct work_struct *work)
3729 struct be_adapter *adapter =
3730 container_of(work, struct be_adapter, func_recovery_work.work);
3733 be_detect_error(adapter);
3735 if (adapter->hw_error && lancer_chip(adapter)) {
3737 if (adapter->eeh_error)
3741 netif_device_detach(adapter->netdev);
3744 status = lancer_recover_func(adapter);
3747 netif_device_attach(adapter->netdev);
3751 schedule_delayed_work(&adapter->func_recovery_work,
3752 msecs_to_jiffies(1000));
3755 static void be_worker(struct work_struct *work)
3757 struct be_adapter *adapter =
3758 container_of(work, struct be_adapter, work.work);
3759 struct be_rx_obj *rxo;
3760 struct be_eq_obj *eqo;
3763 /* when interrupts are not yet enabled, just reap any pending
3764 * mcc completions */
3765 if (!netif_running(adapter->netdev)) {
3766 be_process_mcc(adapter);
3770 if (!adapter->stats_cmd_sent) {
3771 if (lancer_chip(adapter))
3772 lancer_cmd_get_pport_stats(adapter,
3773 &adapter->stats_cmd);
3775 be_cmd_get_stats(adapter, &adapter->stats_cmd);
3778 if (MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0)
3779 be_cmd_get_die_temperature(adapter);
3781 for_all_rx_queues(adapter, rxo, i) {
3782 if (rxo->rx_post_starved) {
3783 rxo->rx_post_starved = false;
3784 be_post_rx_frags(rxo, GFP_KERNEL);
3788 for_all_evt_queues(adapter, eqo, i)
3789 be_eqd_update(adapter, eqo);
3792 adapter->work_counter++;
3793 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
3796 static bool be_reset_required(struct be_adapter *adapter)
3798 return be_find_vfs(adapter, ENABLED) > 0 ? false : true;
3801 static int __devinit be_probe(struct pci_dev *pdev,
3802 const struct pci_device_id *pdev_id)
3805 struct be_adapter *adapter;
3806 struct net_device *netdev;
3809 status = pci_enable_device(pdev);
3813 status = pci_request_regions(pdev, DRV_NAME);
3816 pci_set_master(pdev);
3818 netdev = alloc_etherdev_mqs(sizeof(*adapter), MAX_TX_QS, MAX_RX_QS);
3819 if (netdev == NULL) {
3823 adapter = netdev_priv(netdev);
3824 adapter->pdev = pdev;
3825 pci_set_drvdata(pdev, adapter);
3827 status = be_dev_type_check(adapter);
3831 adapter->netdev = netdev;
3832 SET_NETDEV_DEV(netdev, &pdev->dev);
3834 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
3836 netdev->features |= NETIF_F_HIGHDMA;
3838 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
3840 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
3845 status = be_ctrl_init(adapter);
3849 /* sync up with fw's ready state */
3850 if (be_physfn(adapter)) {
3851 status = be_fw_wait_ready(adapter);
3856 /* tell fw we're ready to fire cmds */
3857 status = be_cmd_fw_init(adapter);
3861 if (be_reset_required(adapter)) {
3862 status = be_cmd_reset_function(adapter);
3867 /* The INTR bit may be set in the card when probed by a kdump kernel
3870 if (!lancer_chip(adapter))
3871 be_intr_set(adapter, false);
3873 status = be_stats_init(adapter);
3877 status = be_get_initial_config(adapter);
3881 INIT_DELAYED_WORK(&adapter->work, be_worker);
3882 INIT_DELAYED_WORK(&adapter->func_recovery_work, be_func_recovery_task);
3883 adapter->rx_fc = adapter->tx_fc = true;
3885 status = be_setup(adapter);
3889 be_netdev_init(netdev);
3890 status = register_netdev(netdev);
3894 be_roce_dev_add(adapter);
3896 schedule_delayed_work(&adapter->func_recovery_work,
3897 msecs_to_jiffies(1000));
3899 be_cmd_query_port_name(adapter, &port_name);
3901 dev_info(&pdev->dev, "%s: %s port %c\n", netdev->name, nic_name(pdev),
3909 be_msix_disable(adapter);
3911 be_stats_cleanup(adapter);
3913 be_ctrl_cleanup(adapter);
3915 free_netdev(netdev);
3916 pci_set_drvdata(pdev, NULL);
3918 pci_release_regions(pdev);
3920 pci_disable_device(pdev);
3922 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
3926 static int be_suspend(struct pci_dev *pdev, pm_message_t state)
3928 struct be_adapter *adapter = pci_get_drvdata(pdev);
3929 struct net_device *netdev = adapter->netdev;
3932 be_setup_wol(adapter, true);
3934 cancel_delayed_work_sync(&adapter->func_recovery_work);
3936 netif_device_detach(netdev);
3937 if (netif_running(netdev)) {
3944 pci_save_state(pdev);
3945 pci_disable_device(pdev);
3946 pci_set_power_state(pdev, pci_choose_state(pdev, state));
3950 static int be_resume(struct pci_dev *pdev)
3953 struct be_adapter *adapter = pci_get_drvdata(pdev);
3954 struct net_device *netdev = adapter->netdev;
3956 netif_device_detach(netdev);
3958 status = pci_enable_device(pdev);
3962 pci_set_power_state(pdev, 0);
3963 pci_restore_state(pdev);
3965 /* tell fw we're ready to fire cmds */
3966 status = be_cmd_fw_init(adapter);
3971 if (netif_running(netdev)) {
3977 schedule_delayed_work(&adapter->func_recovery_work,
3978 msecs_to_jiffies(1000));
3979 netif_device_attach(netdev);
3982 be_setup_wol(adapter, false);
3988 * An FLR will stop BE from DMAing any data.
3990 static void be_shutdown(struct pci_dev *pdev)
3992 struct be_adapter *adapter = pci_get_drvdata(pdev);
3997 cancel_delayed_work_sync(&adapter->work);
3998 cancel_delayed_work_sync(&adapter->func_recovery_work);
4000 netif_device_detach(adapter->netdev);
4003 be_setup_wol(adapter, true);
4005 be_cmd_reset_function(adapter);
4007 pci_disable_device(pdev);
4010 static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
4011 pci_channel_state_t state)
4013 struct be_adapter *adapter = pci_get_drvdata(pdev);
4014 struct net_device *netdev = adapter->netdev;
4016 dev_err(&adapter->pdev->dev, "EEH error detected\n");
4018 adapter->eeh_error = true;
4020 cancel_delayed_work_sync(&adapter->func_recovery_work);
4023 netif_device_detach(netdev);
4026 if (netif_running(netdev)) {
4033 if (state == pci_channel_io_perm_failure)
4034 return PCI_ERS_RESULT_DISCONNECT;
4036 pci_disable_device(pdev);
4038 /* The error could cause the FW to trigger a flash debug dump.
4039 * Resetting the card while flash dump is in progress
4040 * can cause it not to recover; wait for it to finish
4043 return PCI_ERS_RESULT_NEED_RESET;
4046 static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
4048 struct be_adapter *adapter = pci_get_drvdata(pdev);
4051 dev_info(&adapter->pdev->dev, "EEH reset\n");
4052 be_clear_all_error(adapter);
4054 status = pci_enable_device(pdev);
4056 return PCI_ERS_RESULT_DISCONNECT;
4058 pci_set_master(pdev);
4059 pci_set_power_state(pdev, 0);
4060 pci_restore_state(pdev);
4062 /* Check if card is ok and fw is ready */
4063 status = be_fw_wait_ready(adapter);
4065 return PCI_ERS_RESULT_DISCONNECT;
4067 return PCI_ERS_RESULT_RECOVERED;
4070 static void be_eeh_resume(struct pci_dev *pdev)
4073 struct be_adapter *adapter = pci_get_drvdata(pdev);
4074 struct net_device *netdev = adapter->netdev;
4076 dev_info(&adapter->pdev->dev, "EEH resume\n");
4078 pci_save_state(pdev);
4080 /* tell fw we're ready to fire cmds */
4081 status = be_cmd_fw_init(adapter);
4085 status = be_cmd_reset_function(adapter);
4089 status = be_setup(adapter);
4093 if (netif_running(netdev)) {
4094 status = be_open(netdev);
4099 schedule_delayed_work(&adapter->func_recovery_work,
4100 msecs_to_jiffies(1000));
4101 netif_device_attach(netdev);
4104 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
4107 static struct pci_error_handlers be_eeh_handlers = {
4108 .error_detected = be_eeh_err_detected,
4109 .slot_reset = be_eeh_reset,
4110 .resume = be_eeh_resume,
4113 static struct pci_driver be_driver = {
4115 .id_table = be_dev_ids,
4117 .remove = be_remove,
4118 .suspend = be_suspend,
4119 .resume = be_resume,
4120 .shutdown = be_shutdown,
4121 .err_handler = &be_eeh_handlers
4124 static int __init be_init_module(void)
4126 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
4127 rx_frag_size != 2048) {
4128 printk(KERN_WARNING DRV_NAME
4129 " : Module param rx_frag_size must be 2048/4096/8192."
4131 rx_frag_size = 2048;
4134 return pci_register_driver(&be_driver);
4136 module_init(be_init_module);
4138 static void __exit be_exit_module(void)
4140 pci_unregister_driver(&be_driver);
4142 module_exit(be_exit_module);