1 /* bnx2x_cmn.c: Broadcom Everest network driver.
3 * Copyright (c) 2007-2012 Broadcom Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath and fastpath rework by Vladislav Zolotarov
14 * Statistics and Link management by Yitchak Gertner
18 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
20 #include <linux/etherdevice.h>
21 #include <linux/if_vlan.h>
22 #include <linux/interrupt.h>
25 #include <net/ip6_checksum.h>
26 #include <linux/prefetch.h>
27 #include "bnx2x_cmn.h"
28 #include "bnx2x_init.h"
34 * bnx2x_move_fp - move content of the fastpath structure.
37 * @from: source FP index
38 * @to: destination FP index
40 * Makes sure the contents of the bp->fp[to].napi is kept
41 * intact. This is done by first copying the napi struct from
42 * the target to the source, and then mem copying the entire
43 * source onto the target
45 static inline void bnx2x_move_fp(struct bnx2x *bp, int from, int to)
47 struct bnx2x_fastpath *from_fp = &bp->fp[from];
48 struct bnx2x_fastpath *to_fp = &bp->fp[to];
50 /* Copy the NAPI object as it has been already initialized */
51 from_fp->napi = to_fp->napi;
53 /* Move bnx2x_fastpath contents */
54 memcpy(to_fp, from_fp, sizeof(*to_fp));
58 int load_count[2][3] = { {0} }; /* per-path: 0-common, 1-port0, 2-port1 */
60 /* free skb in the packet ring at pos idx
61 * return idx of last bd freed
63 static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata,
64 u16 idx, unsigned int *pkts_compl,
65 unsigned int *bytes_compl)
67 struct sw_tx_bd *tx_buf = &txdata->tx_buf_ring[idx];
68 struct eth_tx_start_bd *tx_start_bd;
69 struct eth_tx_bd *tx_data_bd;
70 struct sk_buff *skb = tx_buf->skb;
71 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
74 /* prefetch skb end pointer to speedup dev_kfree_skb() */
77 DP(NETIF_MSG_TX_DONE, "fp[%d]: pkt_idx %d buff @(%p)->skb %p\n",
78 txdata->txq_index, idx, tx_buf, skb);
81 tx_start_bd = &txdata->tx_desc_ring[bd_idx].start_bd;
82 dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd),
83 BD_UNMAP_LEN(tx_start_bd), DMA_TO_DEVICE);
86 nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
87 #ifdef BNX2X_STOP_ON_ERROR
88 if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
89 BNX2X_ERR("BAD nbd!\n");
93 new_cons = nbd + tx_buf->first_bd;
96 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
98 /* Skip a parse bd... */
100 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
102 /* ...and the TSO split header bd since they have no mapping */
103 if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
105 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
111 tx_data_bd = &txdata->tx_desc_ring[bd_idx].reg_bd;
112 dma_unmap_page(&bp->pdev->dev, BD_UNMAP_ADDR(tx_data_bd),
113 BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
115 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
122 (*bytes_compl) += skb->len;
125 dev_kfree_skb_any(skb);
126 tx_buf->first_bd = 0;
132 int bnx2x_tx_int(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata)
134 struct netdev_queue *txq;
135 u16 hw_cons, sw_cons, bd_cons = txdata->tx_bd_cons;
136 unsigned int pkts_compl = 0, bytes_compl = 0;
138 #ifdef BNX2X_STOP_ON_ERROR
139 if (unlikely(bp->panic))
143 txq = netdev_get_tx_queue(bp->dev, txdata->txq_index);
144 hw_cons = le16_to_cpu(*txdata->tx_cons_sb);
145 sw_cons = txdata->tx_pkt_cons;
147 while (sw_cons != hw_cons) {
150 pkt_cons = TX_BD(sw_cons);
152 DP(NETIF_MSG_TX_DONE,
153 "queue[%d]: hw_cons %u sw_cons %u pkt_cons %u\n",
154 txdata->txq_index, hw_cons, sw_cons, pkt_cons);
156 bd_cons = bnx2x_free_tx_pkt(bp, txdata, pkt_cons,
157 &pkts_compl, &bytes_compl);
162 netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
164 txdata->tx_pkt_cons = sw_cons;
165 txdata->tx_bd_cons = bd_cons;
167 /* Need to make the tx_bd_cons update visible to start_xmit()
168 * before checking for netif_tx_queue_stopped(). Without the
169 * memory barrier, there is a small possibility that
170 * start_xmit() will miss it and cause the queue to be stopped
172 * On the other hand we need an rmb() here to ensure the proper
173 * ordering of bit testing in the following
174 * netif_tx_queue_stopped(txq) call.
178 if (unlikely(netif_tx_queue_stopped(txq))) {
179 /* Taking tx_lock() is needed to prevent reenabling the queue
180 * while it's empty. This could have happen if rx_action() gets
181 * suspended in bnx2x_tx_int() after the condition before
182 * netif_tx_wake_queue(), while tx_action (bnx2x_start_xmit()):
184 * stops the queue->sees fresh tx_bd_cons->releases the queue->
185 * sends some packets consuming the whole queue again->
189 __netif_tx_lock(txq, smp_processor_id());
191 if ((netif_tx_queue_stopped(txq)) &&
192 (bp->state == BNX2X_STATE_OPEN) &&
193 (bnx2x_tx_avail(bp, txdata) >= MAX_SKB_FRAGS + 4))
194 netif_tx_wake_queue(txq);
196 __netif_tx_unlock(txq);
201 static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
204 u16 last_max = fp->last_max_sge;
206 if (SUB_S16(idx, last_max) > 0)
207 fp->last_max_sge = idx;
210 static inline void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
212 struct eth_end_agg_rx_cqe *cqe)
214 struct bnx2x *bp = fp->bp;
215 u16 last_max, last_elem, first_elem;
222 /* First mark all used pages */
223 for (i = 0; i < sge_len; i++)
224 BIT_VEC64_CLEAR_BIT(fp->sge_mask,
225 RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[i])));
227 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
228 sge_len - 1, le16_to_cpu(cqe->sgl_or_raw_data.sgl[sge_len - 1]));
230 /* Here we assume that the last SGE index is the biggest */
231 prefetch((void *)(fp->sge_mask));
232 bnx2x_update_last_max_sge(fp,
233 le16_to_cpu(cqe->sgl_or_raw_data.sgl[sge_len - 1]));
235 last_max = RX_SGE(fp->last_max_sge);
236 last_elem = last_max >> BIT_VEC64_ELEM_SHIFT;
237 first_elem = RX_SGE(fp->rx_sge_prod) >> BIT_VEC64_ELEM_SHIFT;
239 /* If ring is not full */
240 if (last_elem + 1 != first_elem)
243 /* Now update the prod */
244 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
245 if (likely(fp->sge_mask[i]))
248 fp->sge_mask[i] = BIT_VEC64_ELEM_ONE_MASK;
249 delta += BIT_VEC64_ELEM_SZ;
253 fp->rx_sge_prod += delta;
254 /* clear page-end entries */
255 bnx2x_clear_sge_mask_next_elems(fp);
258 DP(NETIF_MSG_RX_STATUS,
259 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
260 fp->last_max_sge, fp->rx_sge_prod);
263 /* Set Toeplitz hash value in the skb using the value from the
264 * CQE (calculated by HW).
266 static u32 bnx2x_get_rxhash(const struct bnx2x *bp,
267 const struct eth_fast_path_rx_cqe *cqe)
269 /* Set Toeplitz hash from CQE */
270 if ((bp->dev->features & NETIF_F_RXHASH) &&
271 (cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG))
272 return le32_to_cpu(cqe->rss_hash_result);
276 static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
278 struct eth_fast_path_rx_cqe *cqe)
280 struct bnx2x *bp = fp->bp;
281 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
282 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
283 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
285 struct bnx2x_agg_info *tpa_info = &fp->tpa_info[queue];
286 struct sw_rx_bd *first_buf = &tpa_info->first_buf;
288 /* print error if current state != stop */
289 if (tpa_info->tpa_state != BNX2X_TPA_STOP)
290 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
292 /* Try to map an empty data buffer from the aggregation info */
293 mapping = dma_map_single(&bp->pdev->dev,
294 first_buf->data + NET_SKB_PAD,
295 fp->rx_buf_size, DMA_FROM_DEVICE);
297 * ...if it fails - move the skb from the consumer to the producer
298 * and set the current aggregation state as ERROR to drop it
299 * when TPA_STOP arrives.
302 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
303 /* Move the BD from the consumer to the producer */
304 bnx2x_reuse_rx_data(fp, cons, prod);
305 tpa_info->tpa_state = BNX2X_TPA_ERROR;
309 /* move empty data from pool to prod */
310 prod_rx_buf->data = first_buf->data;
311 dma_unmap_addr_set(prod_rx_buf, mapping, mapping);
312 /* point prod_bd to new data */
313 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
314 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
316 /* move partial skb from cons to pool (don't unmap yet) */
317 *first_buf = *cons_rx_buf;
319 /* mark bin state as START */
320 tpa_info->parsing_flags =
321 le16_to_cpu(cqe->pars_flags.flags);
322 tpa_info->vlan_tag = le16_to_cpu(cqe->vlan_tag);
323 tpa_info->tpa_state = BNX2X_TPA_START;
324 tpa_info->len_on_bd = le16_to_cpu(cqe->len_on_bd);
325 tpa_info->placement_offset = cqe->placement_offset;
326 tpa_info->rxhash = bnx2x_get_rxhash(bp, cqe);
327 if (fp->mode == TPA_MODE_GRO) {
328 u16 gro_size = le16_to_cpu(cqe->pkt_len_or_gro_seg_len);
329 tpa_info->full_page =
330 SGE_PAGE_SIZE * PAGES_PER_SGE / gro_size * gro_size;
331 tpa_info->gro_size = gro_size;
334 #ifdef BNX2X_STOP_ON_ERROR
335 fp->tpa_queue_used |= (1 << queue);
336 #ifdef _ASM_GENERIC_INT_L64_H
337 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
339 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
345 /* Timestamp option length allowed for TPA aggregation:
347 * nop nop kind length echo val
349 #define TPA_TSTAMP_OPT_LEN 12
351 * bnx2x_set_lro_mss - calculate the approximate value of the MSS
354 * @parsing_flags: parsing flags from the START CQE
355 * @len_on_bd: total length of the first packet for the
358 * Approximate value of the MSS for this aggregation calculated using
359 * the first packet of it.
361 static u16 bnx2x_set_lro_mss(struct bnx2x *bp, u16 parsing_flags,
365 * TPA arrgregation won't have either IP options or TCP options
366 * other than timestamp or IPv6 extension headers.
368 u16 hdrs_len = ETH_HLEN + sizeof(struct tcphdr);
370 if (GET_FLAG(parsing_flags, PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) ==
371 PRS_FLAG_OVERETH_IPV6)
372 hdrs_len += sizeof(struct ipv6hdr);
374 hdrs_len += sizeof(struct iphdr);
377 /* Check if there was a TCP timestamp, if there is it's will
378 * always be 12 bytes length: nop nop kind length echo val.
380 * Otherwise FW would close the aggregation.
382 if (parsing_flags & PARSING_FLAGS_TIME_STAMP_EXIST_FLAG)
383 hdrs_len += TPA_TSTAMP_OPT_LEN;
385 return len_on_bd - hdrs_len;
388 static int bnx2x_alloc_rx_sge(struct bnx2x *bp,
389 struct bnx2x_fastpath *fp, u16 index)
391 struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
392 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
393 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
396 if (unlikely(page == NULL)) {
397 BNX2X_ERR("Can't alloc sge\n");
401 mapping = dma_map_page(&bp->pdev->dev, page, 0,
402 SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE);
403 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
404 __free_pages(page, PAGES_PER_SGE_SHIFT);
405 BNX2X_ERR("Can't map sge\n");
410 dma_unmap_addr_set(sw_buf, mapping, mapping);
412 sge->addr_hi = cpu_to_le32(U64_HI(mapping));
413 sge->addr_lo = cpu_to_le32(U64_LO(mapping));
418 static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
419 struct bnx2x_agg_info *tpa_info,
422 struct eth_end_agg_rx_cqe *cqe,
425 struct sw_rx_page *rx_pg, old_rx_pg;
426 u32 i, frag_len, frag_size;
427 int err, j, frag_id = 0;
428 u16 len_on_bd = tpa_info->len_on_bd;
429 u16 full_page = 0, gro_size = 0;
431 frag_size = le16_to_cpu(cqe->pkt_len) - len_on_bd;
433 if (fp->mode == TPA_MODE_GRO) {
434 gro_size = tpa_info->gro_size;
435 full_page = tpa_info->full_page;
438 /* This is needed in order to enable forwarding support */
440 skb_shinfo(skb)->gso_size = bnx2x_set_lro_mss(bp,
441 tpa_info->parsing_flags, len_on_bd);
444 if (fp->mode == TPA_MODE_GRO)
445 skb_shinfo(skb)->gso_type =
446 (GET_FLAG(tpa_info->parsing_flags,
447 PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) ==
448 PRS_FLAG_OVERETH_IPV6) ?
449 SKB_GSO_TCPV6 : SKB_GSO_TCPV4;
453 #ifdef BNX2X_STOP_ON_ERROR
454 if (pages > min_t(u32, 8, MAX_SKB_FRAGS)*SGE_PAGE_SIZE*PAGES_PER_SGE) {
455 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
457 BNX2X_ERR("cqe->pkt_len = %d\n", cqe->pkt_len);
463 /* Run through the SGL and compose the fragmented skb */
464 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
465 u16 sge_idx = RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[j]));
467 /* FW gives the indices of the SGE as if the ring is an array
468 (meaning that "next" element will consume 2 indices) */
469 if (fp->mode == TPA_MODE_GRO)
470 frag_len = min_t(u32, frag_size, (u32)full_page);
472 frag_len = min_t(u32, frag_size,
473 (u32)(SGE_PAGE_SIZE * PAGES_PER_SGE));
475 rx_pg = &fp->rx_page_ring[sge_idx];
478 /* If we fail to allocate a substitute page, we simply stop
479 where we are and drop the whole packet */
480 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
482 fp->eth_q_stats.rx_skb_alloc_failed++;
486 /* Unmap the page as we r going to pass it to the stack */
487 dma_unmap_page(&bp->pdev->dev,
488 dma_unmap_addr(&old_rx_pg, mapping),
489 SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE);
490 /* Add one frag and update the appropriate fields in the skb */
491 if (fp->mode == TPA_MODE_LRO)
492 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
496 for (rem = frag_len; rem > 0; rem -= gro_size) {
497 int len = rem > gro_size ? gro_size : rem;
498 skb_fill_page_desc(skb, frag_id++,
499 old_rx_pg.page, offset, len);
501 get_page(old_rx_pg.page);
506 skb->data_len += frag_len;
507 skb->truesize += SGE_PAGE_SIZE * PAGES_PER_SGE;
508 skb->len += frag_len;
510 frag_size -= frag_len;
516 static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
517 struct bnx2x_agg_info *tpa_info,
519 struct eth_end_agg_rx_cqe *cqe,
522 struct sw_rx_bd *rx_buf = &tpa_info->first_buf;
523 u8 pad = tpa_info->placement_offset;
524 u16 len = tpa_info->len_on_bd;
525 struct sk_buff *skb = NULL;
526 u8 *new_data, *data = rx_buf->data;
527 u8 old_tpa_state = tpa_info->tpa_state;
529 tpa_info->tpa_state = BNX2X_TPA_STOP;
531 /* If we there was an error during the handling of the TPA_START -
532 * drop this aggregation.
534 if (old_tpa_state == BNX2X_TPA_ERROR)
537 /* Try to allocate the new data */
538 new_data = kmalloc(fp->rx_buf_size + NET_SKB_PAD, GFP_ATOMIC);
540 /* Unmap skb in the pool anyway, as we are going to change
541 pool entry status to BNX2X_TPA_STOP even if new skb allocation
543 dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping),
544 fp->rx_buf_size, DMA_FROM_DEVICE);
545 if (likely(new_data))
546 skb = build_skb(data, 0);
549 #ifdef BNX2X_STOP_ON_ERROR
550 if (pad + len > fp->rx_buf_size) {
551 BNX2X_ERR("skb_put is about to fail... pad %d len %d rx_buf_size %d\n",
552 pad, len, fp->rx_buf_size);
558 skb_reserve(skb, pad + NET_SKB_PAD);
560 skb->rxhash = tpa_info->rxhash;
562 skb->protocol = eth_type_trans(skb, bp->dev);
563 skb->ip_summed = CHECKSUM_UNNECESSARY;
565 if (!bnx2x_fill_frag_skb(bp, fp, tpa_info, pages,
566 skb, cqe, cqe_idx)) {
567 if (tpa_info->parsing_flags & PARSING_FLAGS_VLAN)
568 __vlan_hwaccel_put_tag(skb, tpa_info->vlan_tag);
569 napi_gro_receive(&fp->napi, skb);
571 DP(NETIF_MSG_RX_STATUS,
572 "Failed to allocate new pages - dropping packet!\n");
573 dev_kfree_skb_any(skb);
577 /* put new data in bin */
578 rx_buf->data = new_data;
584 /* drop the packet and keep the buffer in the bin */
585 DP(NETIF_MSG_RX_STATUS,
586 "Failed to allocate or map a new skb - dropping packet!\n");
587 fp->eth_q_stats.rx_skb_alloc_failed++;
590 static int bnx2x_alloc_rx_data(struct bnx2x *bp,
591 struct bnx2x_fastpath *fp, u16 index)
594 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
595 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
598 data = kmalloc(fp->rx_buf_size + NET_SKB_PAD, GFP_ATOMIC);
599 if (unlikely(data == NULL))
602 mapping = dma_map_single(&bp->pdev->dev, data + NET_SKB_PAD,
605 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
607 BNX2X_ERR("Can't map rx data\n");
612 dma_unmap_addr_set(rx_buf, mapping, mapping);
614 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
615 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
620 static void bnx2x_csum_validate(struct sk_buff *skb, union eth_rx_cqe *cqe,
621 struct bnx2x_fastpath *fp)
623 /* Do nothing if no IP/L4 csum validation was done */
625 if (cqe->fast_path_cqe.status_flags &
626 (ETH_FAST_PATH_RX_CQE_IP_XSUM_NO_VALIDATION_FLG |
627 ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG))
630 /* If both IP/L4 validation were done, check if an error was found. */
632 if (cqe->fast_path_cqe.type_error_flags &
633 (ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG |
634 ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG))
635 fp->eth_q_stats.hw_csum_err++;
637 skb->ip_summed = CHECKSUM_UNNECESSARY;
640 int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
642 struct bnx2x *bp = fp->bp;
643 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
644 u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
647 #ifdef BNX2X_STOP_ON_ERROR
648 if (unlikely(bp->panic))
652 /* CQ "next element" is of the size of the regular element,
653 that's why it's ok here */
654 hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
655 if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
658 bd_cons = fp->rx_bd_cons;
659 bd_prod = fp->rx_bd_prod;
660 bd_prod_fw = bd_prod;
661 sw_comp_cons = fp->rx_comp_cons;
662 sw_comp_prod = fp->rx_comp_prod;
664 /* Memory barrier necessary as speculative reads of the rx
665 * buffer can be ahead of the index in the status block
669 DP(NETIF_MSG_RX_STATUS,
670 "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
671 fp->index, hw_comp_cons, sw_comp_cons);
673 while (sw_comp_cons != hw_comp_cons) {
674 struct sw_rx_bd *rx_buf = NULL;
676 union eth_rx_cqe *cqe;
677 struct eth_fast_path_rx_cqe *cqe_fp;
679 enum eth_rx_cqe_type cqe_fp_type;
683 #ifdef BNX2X_STOP_ON_ERROR
684 if (unlikely(bp->panic))
688 comp_ring_cons = RCQ_BD(sw_comp_cons);
689 bd_prod = RX_BD(bd_prod);
690 bd_cons = RX_BD(bd_cons);
692 cqe = &fp->rx_comp_ring[comp_ring_cons];
693 cqe_fp = &cqe->fast_path_cqe;
694 cqe_fp_flags = cqe_fp->type_error_flags;
695 cqe_fp_type = cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE;
697 DP(NETIF_MSG_RX_STATUS,
698 "CQE type %x err %x status %x queue %x vlan %x len %u\n",
699 CQE_TYPE(cqe_fp_flags),
700 cqe_fp_flags, cqe_fp->status_flags,
701 le32_to_cpu(cqe_fp->rss_hash_result),
702 le16_to_cpu(cqe_fp->vlan_tag),
703 le16_to_cpu(cqe_fp->pkt_len_or_gro_seg_len));
705 /* is this a slowpath msg? */
706 if (unlikely(CQE_TYPE_SLOW(cqe_fp_type))) {
707 bnx2x_sp_event(fp, cqe);
711 rx_buf = &fp->rx_buf_ring[bd_cons];
714 if (!CQE_TYPE_FAST(cqe_fp_type)) {
715 struct bnx2x_agg_info *tpa_info;
716 u16 frag_size, pages;
717 #ifdef BNX2X_STOP_ON_ERROR
719 if (fp->disable_tpa &&
720 (CQE_TYPE_START(cqe_fp_type) ||
721 CQE_TYPE_STOP(cqe_fp_type)))
722 BNX2X_ERR("START/STOP packet while disable_tpa type %x\n",
723 CQE_TYPE(cqe_fp_type));
726 if (CQE_TYPE_START(cqe_fp_type)) {
727 u16 queue = cqe_fp->queue_index;
728 DP(NETIF_MSG_RX_STATUS,
729 "calling tpa_start on queue %d\n",
732 bnx2x_tpa_start(fp, queue,
739 queue = cqe->end_agg_cqe.queue_index;
740 tpa_info = &fp->tpa_info[queue];
741 DP(NETIF_MSG_RX_STATUS,
742 "calling tpa_stop on queue %d\n",
745 frag_size = le16_to_cpu(cqe->end_agg_cqe.pkt_len) -
748 if (fp->mode == TPA_MODE_GRO)
749 pages = (frag_size + tpa_info->full_page - 1) /
752 pages = SGE_PAGE_ALIGN(frag_size) >>
755 bnx2x_tpa_stop(bp, fp, tpa_info, pages,
756 &cqe->end_agg_cqe, comp_ring_cons);
757 #ifdef BNX2X_STOP_ON_ERROR
762 bnx2x_update_sge_prod(fp, pages, &cqe->end_agg_cqe);
766 len = le16_to_cpu(cqe_fp->pkt_len_or_gro_seg_len);
767 pad = cqe_fp->placement_offset;
768 dma_sync_single_for_cpu(&bp->pdev->dev,
769 dma_unmap_addr(rx_buf, mapping),
770 pad + RX_COPY_THRESH,
773 prefetch(data + pad); /* speedup eth_type_trans() */
774 /* is this an error packet? */
775 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
776 DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
777 "ERROR flags %x rx packet %u\n",
778 cqe_fp_flags, sw_comp_cons);
779 fp->eth_q_stats.rx_err_discard_pkt++;
783 /* Since we don't have a jumbo ring
784 * copy small packets if mtu > 1500
786 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
787 (len <= RX_COPY_THRESH)) {
788 skb = netdev_alloc_skb_ip_align(bp->dev, len);
790 DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
791 "ERROR packet dropped because of alloc failure\n");
792 fp->eth_q_stats.rx_skb_alloc_failed++;
795 memcpy(skb->data, data + pad, len);
796 bnx2x_reuse_rx_data(fp, bd_cons, bd_prod);
798 if (likely(bnx2x_alloc_rx_data(bp, fp, bd_prod) == 0)) {
799 dma_unmap_single(&bp->pdev->dev,
800 dma_unmap_addr(rx_buf, mapping),
803 skb = build_skb(data, 0);
804 if (unlikely(!skb)) {
806 fp->eth_q_stats.rx_skb_alloc_failed++;
809 skb_reserve(skb, pad);
811 DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
812 "ERROR packet dropped because of alloc failure\n");
813 fp->eth_q_stats.rx_skb_alloc_failed++;
815 bnx2x_reuse_rx_data(fp, bd_cons, bd_prod);
821 skb->protocol = eth_type_trans(skb, bp->dev);
823 /* Set Toeplitz hash for a none-LRO skb */
824 skb->rxhash = bnx2x_get_rxhash(bp, cqe_fp);
826 skb_checksum_none_assert(skb);
828 if (bp->dev->features & NETIF_F_RXCSUM)
829 bnx2x_csum_validate(skb, cqe, fp);
832 skb_record_rx_queue(skb, fp->rx_queue);
834 if (le16_to_cpu(cqe_fp->pars_flags.flags) &
836 __vlan_hwaccel_put_tag(skb,
837 le16_to_cpu(cqe_fp->vlan_tag));
838 napi_gro_receive(&fp->napi, skb);
844 bd_cons = NEXT_RX_IDX(bd_cons);
845 bd_prod = NEXT_RX_IDX(bd_prod);
846 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
849 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
850 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
852 if (rx_pkt == budget)
856 fp->rx_bd_cons = bd_cons;
857 fp->rx_bd_prod = bd_prod_fw;
858 fp->rx_comp_cons = sw_comp_cons;
859 fp->rx_comp_prod = sw_comp_prod;
861 /* Update producers */
862 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
865 fp->rx_pkt += rx_pkt;
871 static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
873 struct bnx2x_fastpath *fp = fp_cookie;
874 struct bnx2x *bp = fp->bp;
878 "got an MSI-X interrupt on IDX:SB [fp %d fw_sd %d igusb %d]\n",
879 fp->index, fp->fw_sb_id, fp->igu_sb_id);
880 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
882 #ifdef BNX2X_STOP_ON_ERROR
883 if (unlikely(bp->panic))
887 /* Handle Rx and Tx according to MSI-X vector */
888 prefetch(fp->rx_cons_sb);
890 for_each_cos_in_tx_queue(fp, cos)
891 prefetch(fp->txdata[cos].tx_cons_sb);
893 prefetch(&fp->sb_running_index[SM_RX_ID]);
894 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
899 /* HW Lock for shared dual port PHYs */
900 void bnx2x_acquire_phy_lock(struct bnx2x *bp)
902 mutex_lock(&bp->port.phy_mutex);
904 if (bp->port.need_hw_lock)
905 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
908 void bnx2x_release_phy_lock(struct bnx2x *bp)
910 if (bp->port.need_hw_lock)
911 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
913 mutex_unlock(&bp->port.phy_mutex);
916 /* calculates MF speed according to current linespeed and MF configuration */
917 u16 bnx2x_get_mf_speed(struct bnx2x *bp)
919 u16 line_speed = bp->link_vars.line_speed;
921 u16 maxCfg = bnx2x_extract_max_cfg(bp,
922 bp->mf_config[BP_VN(bp)]);
924 /* Calculate the current MAX line speed limit for the MF
928 line_speed = (line_speed * maxCfg) / 100;
930 u16 vn_max_rate = maxCfg * 100;
932 if (vn_max_rate < line_speed)
933 line_speed = vn_max_rate;
941 * bnx2x_fill_report_data - fill link report data to report
944 * @data: link state to update
946 * It uses a none-atomic bit operations because is called under the mutex.
948 static void bnx2x_fill_report_data(struct bnx2x *bp,
949 struct bnx2x_link_report_data *data)
951 u16 line_speed = bnx2x_get_mf_speed(bp);
953 memset(data, 0, sizeof(*data));
955 /* Fill the report data: efective line speed */
956 data->line_speed = line_speed;
959 if (!bp->link_vars.link_up || (bp->flags & MF_FUNC_DIS))
960 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
961 &data->link_report_flags);
964 if (bp->link_vars.duplex == DUPLEX_FULL)
965 __set_bit(BNX2X_LINK_REPORT_FD, &data->link_report_flags);
967 /* Rx Flow Control is ON */
968 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX)
969 __set_bit(BNX2X_LINK_REPORT_RX_FC_ON, &data->link_report_flags);
971 /* Tx Flow Control is ON */
972 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
973 __set_bit(BNX2X_LINK_REPORT_TX_FC_ON, &data->link_report_flags);
977 * bnx2x_link_report - report link status to OS.
981 * Calls the __bnx2x_link_report() under the same locking scheme
982 * as a link/PHY state managing code to ensure a consistent link
986 void bnx2x_link_report(struct bnx2x *bp)
988 bnx2x_acquire_phy_lock(bp);
989 __bnx2x_link_report(bp);
990 bnx2x_release_phy_lock(bp);
994 * __bnx2x_link_report - report link status to OS.
998 * None atomic inmlementation.
999 * Should be called under the phy_lock.
1001 void __bnx2x_link_report(struct bnx2x *bp)
1003 struct bnx2x_link_report_data cur_data;
1006 if (!CHIP_IS_E1(bp))
1007 bnx2x_read_mf_cfg(bp);
1009 /* Read the current link report info */
1010 bnx2x_fill_report_data(bp, &cur_data);
1012 /* Don't report link down or exactly the same link status twice */
1013 if (!memcmp(&cur_data, &bp->last_reported_link, sizeof(cur_data)) ||
1014 (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1015 &bp->last_reported_link.link_report_flags) &&
1016 test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1017 &cur_data.link_report_flags)))
1022 /* We are going to report a new link parameters now -
1023 * remember the current data for the next time.
1025 memcpy(&bp->last_reported_link, &cur_data, sizeof(cur_data));
1027 if (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1028 &cur_data.link_report_flags)) {
1029 netif_carrier_off(bp->dev);
1030 netdev_err(bp->dev, "NIC Link is Down\n");
1036 netif_carrier_on(bp->dev);
1038 if (test_and_clear_bit(BNX2X_LINK_REPORT_FD,
1039 &cur_data.link_report_flags))
1044 /* Handle the FC at the end so that only these flags would be
1045 * possibly set. This way we may easily check if there is no FC
1048 if (cur_data.link_report_flags) {
1049 if (test_bit(BNX2X_LINK_REPORT_RX_FC_ON,
1050 &cur_data.link_report_flags)) {
1051 if (test_bit(BNX2X_LINK_REPORT_TX_FC_ON,
1052 &cur_data.link_report_flags))
1053 flow = "ON - receive & transmit";
1055 flow = "ON - receive";
1057 flow = "ON - transmit";
1062 netdev_info(bp->dev, "NIC Link is Up, %d Mbps %s duplex, Flow control: %s\n",
1063 cur_data.line_speed, duplex, flow);
1067 static void bnx2x_set_next_page_sgl(struct bnx2x_fastpath *fp)
1071 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1072 struct eth_rx_sge *sge;
1074 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
1076 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
1077 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
1080 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
1081 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
1085 static void bnx2x_free_tpa_pool(struct bnx2x *bp,
1086 struct bnx2x_fastpath *fp, int last)
1090 for (i = 0; i < last; i++) {
1091 struct bnx2x_agg_info *tpa_info = &fp->tpa_info[i];
1092 struct sw_rx_bd *first_buf = &tpa_info->first_buf;
1093 u8 *data = first_buf->data;
1096 DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
1099 if (tpa_info->tpa_state == BNX2X_TPA_START)
1100 dma_unmap_single(&bp->pdev->dev,
1101 dma_unmap_addr(first_buf, mapping),
1102 fp->rx_buf_size, DMA_FROM_DEVICE);
1104 first_buf->data = NULL;
1108 void bnx2x_init_rx_rings(struct bnx2x *bp)
1110 int func = BP_FUNC(bp);
1114 /* Allocate TPA resources */
1115 for_each_rx_queue(bp, j) {
1116 struct bnx2x_fastpath *fp = &bp->fp[j];
1119 "mtu %d rx_buf_size %d\n", bp->dev->mtu, fp->rx_buf_size);
1121 if (!fp->disable_tpa) {
1122 /* Fill the per-aggregtion pool */
1123 for (i = 0; i < MAX_AGG_QS(bp); i++) {
1124 struct bnx2x_agg_info *tpa_info =
1126 struct sw_rx_bd *first_buf =
1127 &tpa_info->first_buf;
1129 first_buf->data = kmalloc(fp->rx_buf_size + NET_SKB_PAD,
1131 if (!first_buf->data) {
1132 BNX2X_ERR("Failed to allocate TPA skb pool for queue[%d] - disabling TPA on this queue!\n",
1134 bnx2x_free_tpa_pool(bp, fp, i);
1135 fp->disable_tpa = 1;
1138 dma_unmap_addr_set(first_buf, mapping, 0);
1139 tpa_info->tpa_state = BNX2X_TPA_STOP;
1142 /* "next page" elements initialization */
1143 bnx2x_set_next_page_sgl(fp);
1145 /* set SGEs bit mask */
1146 bnx2x_init_sge_ring_bit_mask(fp);
1148 /* Allocate SGEs and initialize the ring elements */
1149 for (i = 0, ring_prod = 0;
1150 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
1152 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
1153 BNX2X_ERR("was only able to allocate %d rx sges\n",
1155 BNX2X_ERR("disabling TPA for queue[%d]\n",
1157 /* Cleanup already allocated elements */
1158 bnx2x_free_rx_sge_range(bp, fp,
1160 bnx2x_free_tpa_pool(bp, fp,
1162 fp->disable_tpa = 1;
1166 ring_prod = NEXT_SGE_IDX(ring_prod);
1169 fp->rx_sge_prod = ring_prod;
1173 for_each_rx_queue(bp, j) {
1174 struct bnx2x_fastpath *fp = &bp->fp[j];
1178 /* Activate BD ring */
1180 * this will generate an interrupt (to the TSTORM)
1181 * must only be done after chip is initialized
1183 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
1189 if (CHIP_IS_E1(bp)) {
1190 REG_WR(bp, BAR_USTRORM_INTMEM +
1191 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
1192 U64_LO(fp->rx_comp_mapping));
1193 REG_WR(bp, BAR_USTRORM_INTMEM +
1194 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
1195 U64_HI(fp->rx_comp_mapping));
1200 static void bnx2x_free_tx_skbs(struct bnx2x *bp)
1205 for_each_tx_queue(bp, i) {
1206 struct bnx2x_fastpath *fp = &bp->fp[i];
1207 for_each_cos_in_tx_queue(fp, cos) {
1208 struct bnx2x_fp_txdata *txdata = &fp->txdata[cos];
1209 unsigned pkts_compl = 0, bytes_compl = 0;
1211 u16 sw_prod = txdata->tx_pkt_prod;
1212 u16 sw_cons = txdata->tx_pkt_cons;
1214 while (sw_cons != sw_prod) {
1215 bnx2x_free_tx_pkt(bp, txdata, TX_BD(sw_cons),
1216 &pkts_compl, &bytes_compl);
1219 netdev_tx_reset_queue(
1220 netdev_get_tx_queue(bp->dev, txdata->txq_index));
1225 static void bnx2x_free_rx_bds(struct bnx2x_fastpath *fp)
1227 struct bnx2x *bp = fp->bp;
1230 /* ring wasn't allocated */
1231 if (fp->rx_buf_ring == NULL)
1234 for (i = 0; i < NUM_RX_BD; i++) {
1235 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
1236 u8 *data = rx_buf->data;
1240 dma_unmap_single(&bp->pdev->dev,
1241 dma_unmap_addr(rx_buf, mapping),
1242 fp->rx_buf_size, DMA_FROM_DEVICE);
1244 rx_buf->data = NULL;
1249 static void bnx2x_free_rx_skbs(struct bnx2x *bp)
1253 for_each_rx_queue(bp, j) {
1254 struct bnx2x_fastpath *fp = &bp->fp[j];
1256 bnx2x_free_rx_bds(fp);
1258 if (!fp->disable_tpa)
1259 bnx2x_free_tpa_pool(bp, fp, MAX_AGG_QS(bp));
1263 void bnx2x_free_skbs(struct bnx2x *bp)
1265 bnx2x_free_tx_skbs(bp);
1266 bnx2x_free_rx_skbs(bp);
1269 void bnx2x_update_max_mf_config(struct bnx2x *bp, u32 value)
1271 /* load old values */
1272 u32 mf_cfg = bp->mf_config[BP_VN(bp)];
1274 if (value != bnx2x_extract_max_cfg(bp, mf_cfg)) {
1275 /* leave all but MAX value */
1276 mf_cfg &= ~FUNC_MF_CFG_MAX_BW_MASK;
1278 /* set new MAX value */
1279 mf_cfg |= (value << FUNC_MF_CFG_MAX_BW_SHIFT)
1280 & FUNC_MF_CFG_MAX_BW_MASK;
1282 bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW, mf_cfg);
1287 * bnx2x_free_msix_irqs - free previously requested MSI-X IRQ vectors
1289 * @bp: driver handle
1290 * @nvecs: number of vectors to be released
1292 static void bnx2x_free_msix_irqs(struct bnx2x *bp, int nvecs)
1296 if (nvecs == offset)
1298 free_irq(bp->msix_table[offset].vector, bp->dev);
1299 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
1300 bp->msix_table[offset].vector);
1303 if (nvecs == offset)
1308 for_each_eth_queue(bp, i) {
1309 if (nvecs == offset)
1311 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq\n",
1312 i, bp->msix_table[offset].vector);
1314 free_irq(bp->msix_table[offset++].vector, &bp->fp[i]);
1318 void bnx2x_free_irq(struct bnx2x *bp)
1320 if (bp->flags & USING_MSIX_FLAG &&
1321 !(bp->flags & USING_SINGLE_MSIX_FLAG))
1322 bnx2x_free_msix_irqs(bp, BNX2X_NUM_ETH_QUEUES(bp) +
1325 free_irq(bp->dev->irq, bp->dev);
1328 int __devinit bnx2x_enable_msix(struct bnx2x *bp)
1330 int msix_vec = 0, i, rc, req_cnt;
1332 bp->msix_table[msix_vec].entry = msix_vec;
1333 BNX2X_DEV_INFO("msix_table[0].entry = %d (slowpath)\n",
1334 bp->msix_table[0].entry);
1338 bp->msix_table[msix_vec].entry = msix_vec;
1339 BNX2X_DEV_INFO("msix_table[%d].entry = %d (CNIC)\n",
1340 bp->msix_table[msix_vec].entry, bp->msix_table[msix_vec].entry);
1343 /* We need separate vectors for ETH queues only (not FCoE) */
1344 for_each_eth_queue(bp, i) {
1345 bp->msix_table[msix_vec].entry = msix_vec;
1346 BNX2X_DEV_INFO("msix_table[%d].entry = %d (fastpath #%u)\n",
1347 msix_vec, msix_vec, i);
1351 req_cnt = BNX2X_NUM_ETH_QUEUES(bp) + CNIC_PRESENT + 1;
1353 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], req_cnt);
1356 * reconfigure number of tx/rx queues according to available
1359 if (rc >= BNX2X_MIN_MSIX_VEC_CNT) {
1360 /* how less vectors we will have? */
1361 int diff = req_cnt - rc;
1363 BNX2X_DEV_INFO("Trying to use less MSI-X vectors: %d\n", rc);
1365 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], rc);
1368 BNX2X_DEV_INFO("MSI-X is not attainable rc %d\n", rc);
1372 * decrease number of queues by number of unallocated entries
1374 bp->num_queues -= diff;
1376 BNX2X_DEV_INFO("New queue configuration set: %d\n",
1378 } else if (rc > 0) {
1379 /* Get by with single vector */
1380 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], 1);
1382 BNX2X_DEV_INFO("Single MSI-X is not attainable rc %d\n",
1387 BNX2X_DEV_INFO("Using single MSI-X vector\n");
1388 bp->flags |= USING_SINGLE_MSIX_FLAG;
1390 } else if (rc < 0) {
1391 BNX2X_DEV_INFO("MSI-X is not attainable rc %d\n", rc);
1395 bp->flags |= USING_MSIX_FLAG;
1400 /* fall to INTx if not enough memory */
1402 bp->flags |= DISABLE_MSI_FLAG;
1407 static int bnx2x_req_msix_irqs(struct bnx2x *bp)
1409 int i, rc, offset = 0;
1411 rc = request_irq(bp->msix_table[offset++].vector,
1412 bnx2x_msix_sp_int, 0,
1413 bp->dev->name, bp->dev);
1415 BNX2X_ERR("request sp irq failed\n");
1422 for_each_eth_queue(bp, i) {
1423 struct bnx2x_fastpath *fp = &bp->fp[i];
1424 snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
1427 rc = request_irq(bp->msix_table[offset].vector,
1428 bnx2x_msix_fp_int, 0, fp->name, fp);
1430 BNX2X_ERR("request fp #%d irq (%d) failed rc %d\n", i,
1431 bp->msix_table[offset].vector, rc);
1432 bnx2x_free_msix_irqs(bp, offset);
1439 i = BNX2X_NUM_ETH_QUEUES(bp);
1440 offset = 1 + CNIC_PRESENT;
1441 netdev_info(bp->dev, "using MSI-X IRQs: sp %d fp[%d] %d ... fp[%d] %d\n",
1442 bp->msix_table[0].vector,
1443 0, bp->msix_table[offset].vector,
1444 i - 1, bp->msix_table[offset + i - 1].vector);
1449 int bnx2x_enable_msi(struct bnx2x *bp)
1453 rc = pci_enable_msi(bp->pdev);
1455 BNX2X_DEV_INFO("MSI is not attainable\n");
1458 bp->flags |= USING_MSI_FLAG;
1463 static int bnx2x_req_irq(struct bnx2x *bp)
1465 unsigned long flags;
1468 if (bp->flags & (USING_MSI_FLAG | USING_MSIX_FLAG))
1471 flags = IRQF_SHARED;
1473 if (bp->flags & USING_MSIX_FLAG)
1474 irq = bp->msix_table[0].vector;
1476 irq = bp->pdev->irq;
1478 return request_irq(irq, bnx2x_interrupt, flags, bp->dev->name, bp->dev);
1481 static int bnx2x_setup_irqs(struct bnx2x *bp)
1484 if (bp->flags & USING_MSIX_FLAG &&
1485 !(bp->flags & USING_SINGLE_MSIX_FLAG)) {
1486 rc = bnx2x_req_msix_irqs(bp);
1491 rc = bnx2x_req_irq(bp);
1493 BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc);
1496 if (bp->flags & USING_MSI_FLAG) {
1497 bp->dev->irq = bp->pdev->irq;
1498 netdev_info(bp->dev, "using MSI IRQ %d\n",
1501 if (bp->flags & USING_MSIX_FLAG) {
1502 bp->dev->irq = bp->msix_table[0].vector;
1503 netdev_info(bp->dev, "using MSIX IRQ %d\n",
1511 static void bnx2x_napi_enable(struct bnx2x *bp)
1515 for_each_rx_queue(bp, i)
1516 napi_enable(&bnx2x_fp(bp, i, napi));
1519 static void bnx2x_napi_disable(struct bnx2x *bp)
1523 for_each_rx_queue(bp, i)
1524 napi_disable(&bnx2x_fp(bp, i, napi));
1527 void bnx2x_netif_start(struct bnx2x *bp)
1529 if (netif_running(bp->dev)) {
1530 bnx2x_napi_enable(bp);
1531 bnx2x_int_enable(bp);
1532 if (bp->state == BNX2X_STATE_OPEN)
1533 netif_tx_wake_all_queues(bp->dev);
1537 void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
1539 bnx2x_int_disable_sync(bp, disable_hw);
1540 bnx2x_napi_disable(bp);
1543 u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb)
1545 struct bnx2x *bp = netdev_priv(dev);
1549 struct ethhdr *hdr = (struct ethhdr *)skb->data;
1550 u16 ether_type = ntohs(hdr->h_proto);
1552 /* Skip VLAN tag if present */
1553 if (ether_type == ETH_P_8021Q) {
1554 struct vlan_ethhdr *vhdr =
1555 (struct vlan_ethhdr *)skb->data;
1557 ether_type = ntohs(vhdr->h_vlan_encapsulated_proto);
1560 /* If ethertype is FCoE or FIP - use FCoE ring */
1561 if ((ether_type == ETH_P_FCOE) || (ether_type == ETH_P_FIP))
1562 return bnx2x_fcoe_tx(bp, txq_index);
1565 /* select a non-FCoE queue */
1566 return __skb_tx_hash(dev, skb, BNX2X_NUM_ETH_QUEUES(bp));
1570 void bnx2x_set_num_queues(struct bnx2x *bp)
1573 bp->num_queues = bnx2x_calc_num_queues(bp);
1576 /* override in STORAGE SD modes */
1577 if (IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp))
1580 /* Add special queues */
1581 bp->num_queues += NON_ETH_CONTEXT_USE;
1585 * bnx2x_set_real_num_queues - configure netdev->real_num_[tx,rx]_queues
1587 * @bp: Driver handle
1589 * We currently support for at most 16 Tx queues for each CoS thus we will
1590 * allocate a multiple of 16 for ETH L2 rings according to the value of the
1593 * If there is an FCoE L2 queue the appropriate Tx queue will have the next
1594 * index after all ETH L2 indices.
1596 * If the actual number of Tx queues (for each CoS) is less than 16 then there
1597 * will be the holes at the end of each group of 16 ETh L2 indices (0..15,
1598 * 16..31,...) with indicies that are not coupled with any real Tx queue.
1600 * The proper configuration of skb->queue_mapping is handled by
1601 * bnx2x_select_queue() and __skb_tx_hash().
1603 * bnx2x_setup_tc() takes care of the proper TC mappings so that __skb_tx_hash()
1604 * will return a proper Tx index if TC is enabled (netdev->num_tc > 0).
1606 static int bnx2x_set_real_num_queues(struct bnx2x *bp)
1610 tx = MAX_TXQS_PER_COS * bp->max_cos;
1611 rx = BNX2X_NUM_ETH_QUEUES(bp);
1613 /* account for fcoe queue */
1621 rc = netif_set_real_num_tx_queues(bp->dev, tx);
1623 BNX2X_ERR("Failed to set real number of Tx queues: %d\n", rc);
1626 rc = netif_set_real_num_rx_queues(bp->dev, rx);
1628 BNX2X_ERR("Failed to set real number of Rx queues: %d\n", rc);
1632 DP(NETIF_MSG_IFUP, "Setting real num queues to (tx, rx) (%d, %d)\n",
1638 static void bnx2x_set_rx_buf_size(struct bnx2x *bp)
1642 for_each_queue(bp, i) {
1643 struct bnx2x_fastpath *fp = &bp->fp[i];
1646 /* Always use a mini-jumbo MTU for the FCoE L2 ring */
1649 * Although there are no IP frames expected to arrive to
1650 * this ring we still want to add an
1651 * IP_HEADER_ALIGNMENT_PADDING to prevent a buffer
1654 mtu = BNX2X_FCOE_MINI_JUMBO_MTU;
1657 fp->rx_buf_size = BNX2X_FW_RX_ALIGN_START +
1658 IP_HEADER_ALIGNMENT_PADDING +
1661 BNX2X_FW_RX_ALIGN_END;
1662 /* Note : rx_buf_size doesnt take into account NET_SKB_PAD */
1666 static int bnx2x_init_rss_pf(struct bnx2x *bp)
1669 u8 ind_table[T_ETH_INDIRECTION_TABLE_SIZE] = {0};
1670 u8 num_eth_queues = BNX2X_NUM_ETH_QUEUES(bp);
1672 /* Prepare the initial contents fo the indirection table if RSS is
1675 for (i = 0; i < sizeof(ind_table); i++)
1678 ethtool_rxfh_indir_default(i, num_eth_queues);
1681 * For 57710 and 57711 SEARCHER configuration (rss_keys) is
1682 * per-port, so if explicit configuration is needed , do it only
1685 * For 57712 and newer on the other hand it's a per-function
1688 return bnx2x_config_rss_eth(bp, ind_table,
1689 bp->port.pmf || !CHIP_IS_E1x(bp));
1692 int bnx2x_config_rss_pf(struct bnx2x *bp, struct bnx2x_rss_config_obj *rss_obj,
1693 u8 *ind_table, bool config_hash)
1695 struct bnx2x_config_rss_params params = {NULL};
1698 /* Although RSS is meaningless when there is a single HW queue we
1699 * still need it enabled in order to have HW Rx hash generated.
1701 * if (!is_eth_multi(bp))
1702 * bp->multi_mode = ETH_RSS_MODE_DISABLED;
1705 params.rss_obj = rss_obj;
1707 __set_bit(RAMROD_COMP_WAIT, ¶ms.ramrod_flags);
1709 __set_bit(BNX2X_RSS_MODE_REGULAR, ¶ms.rss_flags);
1711 /* RSS configuration */
1712 __set_bit(BNX2X_RSS_IPV4, ¶ms.rss_flags);
1713 __set_bit(BNX2X_RSS_IPV4_TCP, ¶ms.rss_flags);
1714 __set_bit(BNX2X_RSS_IPV6, ¶ms.rss_flags);
1715 __set_bit(BNX2X_RSS_IPV6_TCP, ¶ms.rss_flags);
1718 params.rss_result_mask = MULTI_MASK;
1720 memcpy(params.ind_table, ind_table, sizeof(params.ind_table));
1724 for (i = 0; i < sizeof(params.rss_key) / 4; i++)
1725 params.rss_key[i] = random32();
1727 __set_bit(BNX2X_RSS_SET_SRCH, ¶ms.rss_flags);
1730 return bnx2x_config_rss(bp, ¶ms);
1733 static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
1735 struct bnx2x_func_state_params func_params = {NULL};
1737 /* Prepare parameters for function state transitions */
1738 __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
1740 func_params.f_obj = &bp->func_obj;
1741 func_params.cmd = BNX2X_F_CMD_HW_INIT;
1743 func_params.params.hw_init.load_phase = load_code;
1745 return bnx2x_func_state_change(bp, &func_params);
1749 * Cleans the object that have internal lists without sending
1750 * ramrods. Should be run when interrutps are disabled.
1752 static void bnx2x_squeeze_objects(struct bnx2x *bp)
1755 unsigned long ramrod_flags = 0, vlan_mac_flags = 0;
1756 struct bnx2x_mcast_ramrod_params rparam = {NULL};
1757 struct bnx2x_vlan_mac_obj *mac_obj = &bp->fp->mac_obj;
1759 /***************** Cleanup MACs' object first *************************/
1761 /* Wait for completion of requested */
1762 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
1763 /* Perform a dry cleanup */
1764 __set_bit(RAMROD_DRV_CLR_ONLY, &ramrod_flags);
1766 /* Clean ETH primary MAC */
1767 __set_bit(BNX2X_ETH_MAC, &vlan_mac_flags);
1768 rc = mac_obj->delete_all(bp, &bp->fp->mac_obj, &vlan_mac_flags,
1771 BNX2X_ERR("Failed to clean ETH MACs: %d\n", rc);
1773 /* Cleanup UC list */
1775 __set_bit(BNX2X_UC_LIST_MAC, &vlan_mac_flags);
1776 rc = mac_obj->delete_all(bp, mac_obj, &vlan_mac_flags,
1779 BNX2X_ERR("Failed to clean UC list MACs: %d\n", rc);
1781 /***************** Now clean mcast object *****************************/
1782 rparam.mcast_obj = &bp->mcast_obj;
1783 __set_bit(RAMROD_DRV_CLR_ONLY, &rparam.ramrod_flags);
1785 /* Add a DEL command... */
1786 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL);
1788 BNX2X_ERR("Failed to add a new DEL command to a multi-cast object: %d\n",
1791 /* ...and wait until all pending commands are cleared */
1792 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
1795 BNX2X_ERR("Failed to clean multi-cast object: %d\n",
1800 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
1804 #ifndef BNX2X_STOP_ON_ERROR
1805 #define LOAD_ERROR_EXIT(bp, label) \
1807 (bp)->state = BNX2X_STATE_ERROR; \
1811 #define LOAD_ERROR_EXIT(bp, label) \
1813 (bp)->state = BNX2X_STATE_ERROR; \
1819 bool bnx2x_test_firmware_version(struct bnx2x *bp, bool is_err)
1821 /* build FW version dword */
1822 u32 my_fw = (BCM_5710_FW_MAJOR_VERSION) +
1823 (BCM_5710_FW_MINOR_VERSION << 8) +
1824 (BCM_5710_FW_REVISION_VERSION << 16) +
1825 (BCM_5710_FW_ENGINEERING_VERSION << 24);
1827 /* read loaded FW from chip */
1828 u32 loaded_fw = REG_RD(bp, XSEM_REG_PRAM);
1830 DP(NETIF_MSG_IFUP, "loaded fw %x, my fw %x\n", loaded_fw, my_fw);
1832 if (loaded_fw != my_fw) {
1834 BNX2X_ERR("bnx2x with FW %x was already loaded, which mismatches my %x FW. aborting\n",
1843 * bnx2x_bz_fp - zero content of the fastpath structure.
1845 * @bp: driver handle
1846 * @index: fastpath index to be zeroed
1848 * Makes sure the contents of the bp->fp[index].napi is kept
1851 static void bnx2x_bz_fp(struct bnx2x *bp, int index)
1853 struct bnx2x_fastpath *fp = &bp->fp[index];
1854 struct napi_struct orig_napi = fp->napi;
1855 /* bzero bnx2x_fastpath contents */
1857 memset(fp, 0, sizeof(*fp));
1859 /* Keep Queue statistics */
1860 struct bnx2x_eth_q_stats *tmp_eth_q_stats;
1861 struct bnx2x_eth_q_stats_old *tmp_eth_q_stats_old;
1863 tmp_eth_q_stats = kzalloc(sizeof(struct bnx2x_eth_q_stats),
1865 if (tmp_eth_q_stats)
1866 memcpy(tmp_eth_q_stats, &fp->eth_q_stats,
1867 sizeof(struct bnx2x_eth_q_stats));
1869 tmp_eth_q_stats_old =
1870 kzalloc(sizeof(struct bnx2x_eth_q_stats_old),
1872 if (tmp_eth_q_stats_old)
1873 memcpy(tmp_eth_q_stats_old, &fp->eth_q_stats_old,
1874 sizeof(struct bnx2x_eth_q_stats_old));
1876 memset(fp, 0, sizeof(*fp));
1878 if (tmp_eth_q_stats) {
1879 memcpy(&fp->eth_q_stats, tmp_eth_q_stats,
1880 sizeof(struct bnx2x_eth_q_stats));
1881 kfree(tmp_eth_q_stats);
1884 if (tmp_eth_q_stats_old) {
1885 memcpy(&fp->eth_q_stats_old, tmp_eth_q_stats_old,
1886 sizeof(struct bnx2x_eth_q_stats_old));
1887 kfree(tmp_eth_q_stats_old);
1892 /* Restore the NAPI object as it has been already initialized */
1893 fp->napi = orig_napi;
1898 fp->max_cos = bp->max_cos;
1900 /* Special queues support only one CoS */
1904 * set the tpa flag for each queue. The tpa flag determines the queue
1905 * minimal size so it must be set prior to queue memory allocation
1907 fp->disable_tpa = !(bp->flags & TPA_ENABLE_FLAG ||
1908 (bp->flags & GRO_ENABLE_FLAG &&
1909 bnx2x_mtu_allows_gro(bp->dev->mtu)));
1910 if (bp->flags & TPA_ENABLE_FLAG)
1911 fp->mode = TPA_MODE_LRO;
1912 else if (bp->flags & GRO_ENABLE_FLAG)
1913 fp->mode = TPA_MODE_GRO;
1916 /* We don't want TPA on an FCoE L2 ring */
1918 fp->disable_tpa = 1;
1923 /* must be called with rtnl_lock */
1924 int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
1926 int port = BP_PORT(bp);
1930 #ifdef BNX2X_STOP_ON_ERROR
1931 if (unlikely(bp->panic)) {
1932 BNX2X_ERR("Can't load NIC when there is panic\n");
1937 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
1939 /* Set the initial link reported state to link down */
1940 bnx2x_acquire_phy_lock(bp);
1941 memset(&bp->last_reported_link, 0, sizeof(bp->last_reported_link));
1942 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1943 &bp->last_reported_link.link_report_flags);
1944 bnx2x_release_phy_lock(bp);
1946 /* must be called before memory allocation and HW init */
1947 bnx2x_ilt_set_info(bp);
1950 * Zero fastpath structures preserving invariants like napi, which are
1951 * allocated only once, fp index, max_cos, bp pointer.
1952 * Also set fp->disable_tpa.
1954 DP(NETIF_MSG_IFUP, "num queues: %d", bp->num_queues);
1955 for_each_queue(bp, i)
1959 /* Set the receive queues buffer size */
1960 bnx2x_set_rx_buf_size(bp);
1962 if (bnx2x_alloc_mem(bp))
1965 /* As long as bnx2x_alloc_mem() may possibly update
1966 * bp->num_queues, bnx2x_set_real_num_queues() should always
1969 rc = bnx2x_set_real_num_queues(bp);
1971 BNX2X_ERR("Unable to set real_num_queues\n");
1972 LOAD_ERROR_EXIT(bp, load_error0);
1975 /* configure multi cos mappings in kernel.
1976 * this configuration may be overriden by a multi class queue discipline
1977 * or by a dcbx negotiation result.
1979 bnx2x_setup_tc(bp->dev, bp->max_cos);
1981 bnx2x_napi_enable(bp);
1983 /* set pf load just before approaching the MCP */
1984 bnx2x_set_pf_load(bp);
1986 /* Send LOAD_REQUEST command to MCP
1987 * Returns the type of LOAD command:
1988 * if it is the first port to be initialized
1989 * common blocks should be initialized, otherwise - not
1991 if (!BP_NOMCP(bp)) {
1994 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
1995 DRV_MSG_SEQ_NUMBER_MASK);
1996 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
1998 /* Get current FW pulse sequence */
1999 bp->fw_drv_pulse_wr_seq =
2000 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_pulse_mb) &
2001 DRV_PULSE_SEQ_MASK);
2002 BNX2X_DEV_INFO("drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
2004 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ, 0);
2006 BNX2X_ERR("MCP response failure, aborting\n");
2008 LOAD_ERROR_EXIT(bp, load_error1);
2010 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) {
2011 BNX2X_ERR("Driver load refused\n");
2012 rc = -EBUSY; /* other port in diagnostic mode */
2013 LOAD_ERROR_EXIT(bp, load_error1);
2015 if (load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP &&
2016 load_code != FW_MSG_CODE_DRV_LOAD_COMMON) {
2017 /* abort nic load if version mismatch */
2018 if (!bnx2x_test_firmware_version(bp, true)) {
2020 LOAD_ERROR_EXIT(bp, load_error2);
2025 int path = BP_PATH(bp);
2027 DP(NETIF_MSG_IFUP, "NO MCP - load counts[%d] %d, %d, %d\n",
2028 path, load_count[path][0], load_count[path][1],
2029 load_count[path][2]);
2030 load_count[path][0]++;
2031 load_count[path][1 + port]++;
2032 DP(NETIF_MSG_IFUP, "NO MCP - new load counts[%d] %d, %d, %d\n",
2033 path, load_count[path][0], load_count[path][1],
2034 load_count[path][2]);
2035 if (load_count[path][0] == 1)
2036 load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
2037 else if (load_count[path][1 + port] == 1)
2038 load_code = FW_MSG_CODE_DRV_LOAD_PORT;
2040 load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
2043 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
2044 (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) ||
2045 (load_code == FW_MSG_CODE_DRV_LOAD_PORT)) {
2048 * We need the barrier to ensure the ordering between the
2049 * writing to bp->port.pmf here and reading it from the
2050 * bnx2x_periodic_task().
2056 DP(NETIF_MSG_IFUP, "pmf %d\n", bp->port.pmf);
2058 /* Init Function state controlling object */
2059 bnx2x__init_func_obj(bp);
2062 rc = bnx2x_init_hw(bp, load_code);
2064 BNX2X_ERR("HW init failed, aborting\n");
2065 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2066 LOAD_ERROR_EXIT(bp, load_error2);
2069 /* Connect to IRQs */
2070 rc = bnx2x_setup_irqs(bp);
2072 BNX2X_ERR("IRQs setup failed\n");
2073 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2074 LOAD_ERROR_EXIT(bp, load_error2);
2077 /* Setup NIC internals and enable interrupts */
2078 bnx2x_nic_init(bp, load_code);
2080 /* Init per-function objects */
2081 bnx2x_init_bp_objs(bp);
2083 if (((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
2084 (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP)) &&
2085 (bp->common.shmem2_base)) {
2086 if (SHMEM2_HAS(bp, dcc_support))
2087 SHMEM2_WR(bp, dcc_support,
2088 (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
2089 SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
2090 if (SHMEM2_HAS(bp, afex_driver_support))
2091 SHMEM2_WR(bp, afex_driver_support,
2092 SHMEM_AFEX_SUPPORTED_VERSION_ONE);
2095 /* Set AFEX default VLAN tag to an invalid value */
2096 bp->afex_def_vlan_tag = -1;
2098 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
2099 rc = bnx2x_func_start(bp);
2101 BNX2X_ERR("Function start failed!\n");
2102 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2103 LOAD_ERROR_EXIT(bp, load_error3);
2106 /* Send LOAD_DONE command to MCP */
2107 if (!BP_NOMCP(bp)) {
2108 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2110 BNX2X_ERR("MCP response failure, aborting\n");
2112 LOAD_ERROR_EXIT(bp, load_error3);
2116 rc = bnx2x_setup_leading(bp);
2118 BNX2X_ERR("Setup leading failed!\n");
2119 LOAD_ERROR_EXIT(bp, load_error3);
2123 /* Enable Timer scan */
2124 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 1);
2127 for_each_nondefault_queue(bp, i) {
2128 rc = bnx2x_setup_queue(bp, &bp->fp[i], 0);
2130 BNX2X_ERR("Queue setup failed\n");
2131 LOAD_ERROR_EXIT(bp, load_error4);
2135 rc = bnx2x_init_rss_pf(bp);
2137 BNX2X_ERR("PF RSS init failed\n");
2138 LOAD_ERROR_EXIT(bp, load_error4);
2141 /* Now when Clients are configured we are ready to work */
2142 bp->state = BNX2X_STATE_OPEN;
2144 /* Configure a ucast MAC */
2145 rc = bnx2x_set_eth_mac(bp, true);
2147 BNX2X_ERR("Setting Ethernet MAC failed\n");
2148 LOAD_ERROR_EXIT(bp, load_error4);
2151 if (bp->pending_max) {
2152 bnx2x_update_max_mf_config(bp, bp->pending_max);
2153 bp->pending_max = 0;
2157 bnx2x_initial_phy_init(bp, load_mode);
2159 /* Start fast path */
2161 /* Initialize Rx filter. */
2162 netif_addr_lock_bh(bp->dev);
2163 bnx2x_set_rx_mode(bp->dev);
2164 netif_addr_unlock_bh(bp->dev);
2167 switch (load_mode) {
2169 /* Tx queue should be only reenabled */
2170 netif_tx_wake_all_queues(bp->dev);
2174 netif_tx_start_all_queues(bp->dev);
2175 smp_mb__after_clear_bit();
2179 bp->state = BNX2X_STATE_DIAG;
2187 bnx2x_update_drv_flags(bp, 1 << DRV_FLAGS_DCB_CONFIGURED, 0);
2189 bnx2x__link_status_update(bp);
2191 /* start the timer */
2192 mod_timer(&bp->timer, jiffies + bp->current_interval);
2195 /* re-read iscsi info */
2196 bnx2x_get_iscsi_info(bp);
2197 bnx2x_setup_cnic_irq_info(bp);
2198 if (bp->state == BNX2X_STATE_OPEN)
2199 bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
2202 /* mark driver is loaded in shmem2 */
2203 if (SHMEM2_HAS(bp, drv_capabilities_flag)) {
2205 val = SHMEM2_RD(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)]);
2206 SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)],
2207 val | DRV_FLAGS_CAPABILITIES_LOADED_SUPPORTED |
2208 DRV_FLAGS_CAPABILITIES_LOADED_L2);
2211 /* Wait for all pending SP commands to complete */
2212 if (!bnx2x_wait_sp_comp(bp, ~0x0UL)) {
2213 BNX2X_ERR("Timeout waiting for SP elements to complete\n");
2214 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
2218 bnx2x_dcbx_init(bp);
2221 #ifndef BNX2X_STOP_ON_ERROR
2224 /* Disable Timer scan */
2225 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
2228 bnx2x_int_disable_sync(bp, 1);
2230 /* Clean queueable objects */
2231 bnx2x_squeeze_objects(bp);
2233 /* Free SKBs, SGEs, TPA pool and driver internals */
2234 bnx2x_free_skbs(bp);
2235 for_each_rx_queue(bp, i)
2236 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
2241 if (!BP_NOMCP(bp)) {
2242 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0);
2243 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
2248 bnx2x_napi_disable(bp);
2249 /* clear pf_load status, as it was already set */
2250 bnx2x_clear_pf_load(bp);
2255 #endif /* ! BNX2X_STOP_ON_ERROR */
2258 /* must be called with rtnl_lock */
2259 int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
2262 bool global = false;
2264 /* mark driver is unloaded in shmem2 */
2265 if (SHMEM2_HAS(bp, drv_capabilities_flag)) {
2267 val = SHMEM2_RD(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)]);
2268 SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)],
2269 val & ~DRV_FLAGS_CAPABILITIES_LOADED_L2);
2272 if ((bp->state == BNX2X_STATE_CLOSED) ||
2273 (bp->state == BNX2X_STATE_ERROR)) {
2274 /* We can get here if the driver has been unloaded
2275 * during parity error recovery and is either waiting for a
2276 * leader to complete or for other functions to unload and
2277 * then ifdown has been issued. In this case we want to
2278 * unload and let other functions to complete a recovery
2281 bp->recovery_state = BNX2X_RECOVERY_DONE;
2283 bnx2x_release_leader_lock(bp);
2286 DP(NETIF_MSG_IFDOWN, "Releasing a leadership...\n");
2287 BNX2X_ERR("Can't unload in closed or error state\n");
2292 * It's important to set the bp->state to the value different from
2293 * BNX2X_STATE_OPEN and only then stop the Tx. Otherwise bnx2x_tx_int()
2294 * may restart the Tx from the NAPI context (see bnx2x_tx_int()).
2296 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
2300 bnx2x_tx_disable(bp);
2303 bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
2306 bp->rx_mode = BNX2X_RX_MODE_NONE;
2308 del_timer_sync(&bp->timer);
2310 /* Set ALWAYS_ALIVE bit in shmem */
2311 bp->fw_drv_pulse_wr_seq |= DRV_PULSE_ALWAYS_ALIVE;
2313 bnx2x_drv_pulse(bp);
2315 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2316 bnx2x_save_statistics(bp);
2318 /* Cleanup the chip if needed */
2319 if (unload_mode != UNLOAD_RECOVERY)
2320 bnx2x_chip_cleanup(bp, unload_mode);
2322 /* Send the UNLOAD_REQUEST to the MCP */
2323 bnx2x_send_unload_req(bp, unload_mode);
2326 * Prevent transactions to host from the functions on the
2327 * engine that doesn't reset global blocks in case of global
2328 * attention once gloabl blocks are reset and gates are opened
2329 * (the engine which leader will perform the recovery
2332 if (!CHIP_IS_E1x(bp))
2333 bnx2x_pf_disable(bp);
2335 /* Disable HW interrupts, NAPI */
2336 bnx2x_netif_stop(bp, 1);
2341 /* Report UNLOAD_DONE to MCP */
2342 bnx2x_send_unload_done(bp);
2346 * At this stage no more interrupts will arrive so we may safly clean
2347 * the queueable objects here in case they failed to get cleaned so far.
2349 bnx2x_squeeze_objects(bp);
2351 /* There should be no more pending SP commands at this stage */
2356 /* Free SKBs, SGEs, TPA pool and driver internals */
2357 bnx2x_free_skbs(bp);
2358 for_each_rx_queue(bp, i)
2359 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
2363 bp->state = BNX2X_STATE_CLOSED;
2365 /* Check if there are pending parity attentions. If there are - set
2366 * RECOVERY_IN_PROGRESS.
2368 if (bnx2x_chk_parity_attn(bp, &global, false)) {
2369 bnx2x_set_reset_in_progress(bp);
2371 /* Set RESET_IS_GLOBAL if needed */
2373 bnx2x_set_reset_global(bp);
2377 /* The last driver must disable a "close the gate" if there is no
2378 * parity attention or "process kill" pending.
2380 if (!bnx2x_clear_pf_load(bp) && bnx2x_reset_is_done(bp, BP_PATH(bp)))
2381 bnx2x_disable_close_the_gate(bp);
2386 int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
2390 /* If there is no power capability, silently succeed */
2392 BNX2X_DEV_INFO("No power capability. Breaking.\n");
2396 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
2400 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
2401 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
2402 PCI_PM_CTRL_PME_STATUS));
2404 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
2405 /* delay required during transition out of D3hot */
2410 /* If there are other clients above don't
2411 shut down the power */
2412 if (atomic_read(&bp->pdev->enable_cnt) != 1)
2414 /* Don't shut down the power for emulation and FPGA */
2415 if (CHIP_REV_IS_SLOW(bp))
2418 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
2422 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
2424 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
2427 /* No more memory access after this point until
2428 * device is brought back to D0.
2433 dev_err(&bp->pdev->dev, "Can't support state = %d\n", state);
2440 * net_device service functions
2442 int bnx2x_poll(struct napi_struct *napi, int budget)
2446 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
2448 struct bnx2x *bp = fp->bp;
2451 #ifdef BNX2X_STOP_ON_ERROR
2452 if (unlikely(bp->panic)) {
2453 napi_complete(napi);
2458 for_each_cos_in_tx_queue(fp, cos)
2459 if (bnx2x_tx_queue_has_work(&fp->txdata[cos]))
2460 bnx2x_tx_int(bp, &fp->txdata[cos]);
2463 if (bnx2x_has_rx_work(fp)) {
2464 work_done += bnx2x_rx_int(fp, budget - work_done);
2466 /* must not complete if we consumed full budget */
2467 if (work_done >= budget)
2471 /* Fall out from the NAPI loop if needed */
2472 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
2474 /* No need to update SB for FCoE L2 ring as long as
2475 * it's connected to the default SB and the SB
2476 * has been updated when NAPI was scheduled.
2478 if (IS_FCOE_FP(fp)) {
2479 napi_complete(napi);
2484 bnx2x_update_fpsb_idx(fp);
2485 /* bnx2x_has_rx_work() reads the status block,
2486 * thus we need to ensure that status block indices
2487 * have been actually read (bnx2x_update_fpsb_idx)
2488 * prior to this check (bnx2x_has_rx_work) so that
2489 * we won't write the "newer" value of the status block
2490 * to IGU (if there was a DMA right after
2491 * bnx2x_has_rx_work and if there is no rmb, the memory
2492 * reading (bnx2x_update_fpsb_idx) may be postponed
2493 * to right before bnx2x_ack_sb). In this case there
2494 * will never be another interrupt until there is
2495 * another update of the status block, while there
2496 * is still unhandled work.
2500 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
2501 napi_complete(napi);
2502 /* Re-enable interrupts */
2503 DP(NETIF_MSG_RX_STATUS,
2504 "Update index to %d\n", fp->fp_hc_idx);
2505 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID,
2506 le16_to_cpu(fp->fp_hc_idx),
2516 /* we split the first BD into headers and data BDs
2517 * to ease the pain of our fellow microcode engineers
2518 * we use one mapping for both BDs
2520 static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
2521 struct bnx2x_fp_txdata *txdata,
2522 struct sw_tx_bd *tx_buf,
2523 struct eth_tx_start_bd **tx_bd, u16 hlen,
2524 u16 bd_prod, int nbd)
2526 struct eth_tx_start_bd *h_tx_bd = *tx_bd;
2527 struct eth_tx_bd *d_tx_bd;
2529 int old_len = le16_to_cpu(h_tx_bd->nbytes);
2531 /* first fix first BD */
2532 h_tx_bd->nbd = cpu_to_le16(nbd);
2533 h_tx_bd->nbytes = cpu_to_le16(hlen);
2535 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d (%x:%x) nbd %d\n",
2536 h_tx_bd->nbytes, h_tx_bd->addr_hi, h_tx_bd->addr_lo, h_tx_bd->nbd);
2538 /* now get a new data BD
2539 * (after the pbd) and fill it */
2540 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
2541 d_tx_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
2543 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
2544 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
2546 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
2547 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
2548 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
2550 /* this marks the BD as one that has no individual mapping */
2551 tx_buf->flags |= BNX2X_TSO_SPLIT_BD;
2553 DP(NETIF_MSG_TX_QUEUED,
2554 "TSO split data size is %d (%x:%x)\n",
2555 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
2558 *tx_bd = (struct eth_tx_start_bd *)d_tx_bd;
2563 static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
2566 csum = (u16) ~csum_fold(csum_sub(csum,
2567 csum_partial(t_header - fix, fix, 0)));
2570 csum = (u16) ~csum_fold(csum_add(csum,
2571 csum_partial(t_header, -fix, 0)));
2573 return swab16(csum);
2576 static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
2580 if (skb->ip_summed != CHECKSUM_PARTIAL)
2584 if (vlan_get_protocol(skb) == htons(ETH_P_IPV6)) {
2586 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
2587 rc |= XMIT_CSUM_TCP;
2591 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
2592 rc |= XMIT_CSUM_TCP;
2596 if (skb_is_gso_v6(skb))
2597 rc |= XMIT_GSO_V6 | XMIT_CSUM_TCP | XMIT_CSUM_V6;
2598 else if (skb_is_gso(skb))
2599 rc |= XMIT_GSO_V4 | XMIT_CSUM_V4 | XMIT_CSUM_TCP;
2604 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
2605 /* check if packet requires linearization (packet is too fragmented)
2606 no need to check fragmentation if page size > 8K (there will be no
2607 violation to FW restrictions) */
2608 static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
2613 int first_bd_sz = 0;
2615 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
2616 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
2618 if (xmit_type & XMIT_GSO) {
2619 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
2620 /* Check if LSO packet needs to be copied:
2621 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
2622 int wnd_size = MAX_FETCH_BD - 3;
2623 /* Number of windows to check */
2624 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
2629 /* Headers length */
2630 hlen = (int)(skb_transport_header(skb) - skb->data) +
2633 /* Amount of data (w/o headers) on linear part of SKB*/
2634 first_bd_sz = skb_headlen(skb) - hlen;
2636 wnd_sum = first_bd_sz;
2638 /* Calculate the first sum - it's special */
2639 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
2641 skb_frag_size(&skb_shinfo(skb)->frags[frag_idx]);
2643 /* If there was data on linear skb data - check it */
2644 if (first_bd_sz > 0) {
2645 if (unlikely(wnd_sum < lso_mss)) {
2650 wnd_sum -= first_bd_sz;
2653 /* Others are easier: run through the frag list and
2654 check all windows */
2655 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
2657 skb_frag_size(&skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1]);
2659 if (unlikely(wnd_sum < lso_mss)) {
2664 skb_frag_size(&skb_shinfo(skb)->frags[wnd_idx]);
2667 /* in non-LSO too fragmented packet should always
2674 if (unlikely(to_copy))
2675 DP(NETIF_MSG_TX_QUEUED,
2676 "Linearization IS REQUIRED for %s packet. num_frags %d hlen %d first_bd_sz %d\n",
2677 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
2678 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
2684 static inline void bnx2x_set_pbd_gso_e2(struct sk_buff *skb, u32 *parsing_data,
2687 *parsing_data |= (skb_shinfo(skb)->gso_size <<
2688 ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT) &
2689 ETH_TX_PARSE_BD_E2_LSO_MSS;
2690 if ((xmit_type & XMIT_GSO_V6) &&
2691 (ipv6_hdr(skb)->nexthdr == NEXTHDR_IPV6))
2692 *parsing_data |= ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR;
2696 * bnx2x_set_pbd_gso - update PBD in GSO case.
2700 * @xmit_type: xmit flags
2702 static inline void bnx2x_set_pbd_gso(struct sk_buff *skb,
2703 struct eth_tx_parse_bd_e1x *pbd,
2706 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
2707 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
2708 pbd->tcp_flags = pbd_tcp_flags(skb);
2710 if (xmit_type & XMIT_GSO_V4) {
2711 pbd->ip_id = swab16(ip_hdr(skb)->id);
2712 pbd->tcp_pseudo_csum =
2713 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
2715 0, IPPROTO_TCP, 0));
2718 pbd->tcp_pseudo_csum =
2719 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
2720 &ipv6_hdr(skb)->daddr,
2721 0, IPPROTO_TCP, 0));
2723 pbd->global_data |= ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN;
2727 * bnx2x_set_pbd_csum_e2 - update PBD with checksum and return header length
2729 * @bp: driver handle
2731 * @parsing_data: data to be updated
2732 * @xmit_type: xmit flags
2736 static inline u8 bnx2x_set_pbd_csum_e2(struct bnx2x *bp, struct sk_buff *skb,
2737 u32 *parsing_data, u32 xmit_type)
2740 ((((u8 *)skb_transport_header(skb) - skb->data) >> 1) <<
2741 ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W_SHIFT) &
2742 ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W;
2744 if (xmit_type & XMIT_CSUM_TCP) {
2745 *parsing_data |= ((tcp_hdrlen(skb) / 4) <<
2746 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
2747 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW;
2749 return skb_transport_header(skb) + tcp_hdrlen(skb) - skb->data;
2751 /* We support checksum offload for TCP and UDP only.
2752 * No need to pass the UDP header length - it's a constant.
2754 return skb_transport_header(skb) +
2755 sizeof(struct udphdr) - skb->data;
2758 static inline void bnx2x_set_sbd_csum(struct bnx2x *bp, struct sk_buff *skb,
2759 struct eth_tx_start_bd *tx_start_bd, u32 xmit_type)
2761 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
2763 if (xmit_type & XMIT_CSUM_V4)
2764 tx_start_bd->bd_flags.as_bitfield |=
2765 ETH_TX_BD_FLAGS_IP_CSUM;
2767 tx_start_bd->bd_flags.as_bitfield |=
2768 ETH_TX_BD_FLAGS_IPV6;
2770 if (!(xmit_type & XMIT_CSUM_TCP))
2771 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IS_UDP;
2775 * bnx2x_set_pbd_csum - update PBD with checksum and return header length
2777 * @bp: driver handle
2779 * @pbd: parse BD to be updated
2780 * @xmit_type: xmit flags
2782 static inline u8 bnx2x_set_pbd_csum(struct bnx2x *bp, struct sk_buff *skb,
2783 struct eth_tx_parse_bd_e1x *pbd,
2786 u8 hlen = (skb_network_header(skb) - skb->data) >> 1;
2788 /* for now NS flag is not used in Linux */
2790 (hlen | ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
2791 ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT));
2793 pbd->ip_hlen_w = (skb_transport_header(skb) -
2794 skb_network_header(skb)) >> 1;
2796 hlen += pbd->ip_hlen_w;
2798 /* We support checksum offload for TCP and UDP only */
2799 if (xmit_type & XMIT_CSUM_TCP)
2800 hlen += tcp_hdrlen(skb) / 2;
2802 hlen += sizeof(struct udphdr) / 2;
2804 pbd->total_hlen_w = cpu_to_le16(hlen);
2807 if (xmit_type & XMIT_CSUM_TCP) {
2808 pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
2811 s8 fix = SKB_CS_OFF(skb); /* signed! */
2813 DP(NETIF_MSG_TX_QUEUED,
2814 "hlen %d fix %d csum before fix %x\n",
2815 le16_to_cpu(pbd->total_hlen_w), fix, SKB_CS(skb));
2817 /* HW bug: fixup the CSUM */
2818 pbd->tcp_pseudo_csum =
2819 bnx2x_csum_fix(skb_transport_header(skb),
2822 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
2823 pbd->tcp_pseudo_csum);
2829 /* called with netif_tx_lock
2830 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
2831 * netif_wake_queue()
2833 netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
2835 struct bnx2x *bp = netdev_priv(dev);
2837 struct bnx2x_fastpath *fp;
2838 struct netdev_queue *txq;
2839 struct bnx2x_fp_txdata *txdata;
2840 struct sw_tx_bd *tx_buf;
2841 struct eth_tx_start_bd *tx_start_bd, *first_bd;
2842 struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
2843 struct eth_tx_parse_bd_e1x *pbd_e1x = NULL;
2844 struct eth_tx_parse_bd_e2 *pbd_e2 = NULL;
2845 u32 pbd_e2_parsing_data = 0;
2846 u16 pkt_prod, bd_prod;
2847 int nbd, txq_index, fp_index, txdata_index;
2849 u32 xmit_type = bnx2x_xmit_type(bp, skb);
2852 __le16 pkt_size = 0;
2854 u8 mac_type = UNICAST_ADDRESS;
2856 #ifdef BNX2X_STOP_ON_ERROR
2857 if (unlikely(bp->panic))
2858 return NETDEV_TX_BUSY;
2861 txq_index = skb_get_queue_mapping(skb);
2862 txq = netdev_get_tx_queue(dev, txq_index);
2864 BUG_ON(txq_index >= MAX_ETH_TXQ_IDX(bp) + FCOE_PRESENT);
2866 /* decode the fastpath index and the cos index from the txq */
2867 fp_index = TXQ_TO_FP(txq_index);
2868 txdata_index = TXQ_TO_COS(txq_index);
2872 * Override the above for the FCoE queue:
2873 * - FCoE fp entry is right after the ETH entries.
2874 * - FCoE L2 queue uses bp->txdata[0] only.
2876 if (unlikely(!NO_FCOE(bp) && (txq_index ==
2877 bnx2x_fcoe_tx(bp, txq_index)))) {
2878 fp_index = FCOE_IDX;
2883 /* enable this debug print to view the transmission queue being used
2884 DP(NETIF_MSG_TX_QUEUED, "indices: txq %d, fp %d, txdata %d\n",
2885 txq_index, fp_index, txdata_index); */
2887 /* locate the fastpath and the txdata */
2888 fp = &bp->fp[fp_index];
2889 txdata = &fp->txdata[txdata_index];
2891 /* enable this debug print to view the tranmission details
2892 DP(NETIF_MSG_TX_QUEUED,
2893 "transmitting packet cid %d fp index %d txdata_index %d tx_data ptr %p fp pointer %p\n",
2894 txdata->cid, fp_index, txdata_index, txdata, fp); */
2896 if (unlikely(bnx2x_tx_avail(bp, txdata) <
2897 (skb_shinfo(skb)->nr_frags + 3))) {
2898 fp->eth_q_stats.driver_xoff++;
2899 netif_tx_stop_queue(txq);
2900 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
2901 return NETDEV_TX_BUSY;
2904 DP(NETIF_MSG_TX_QUEUED,
2905 "queue[%d]: SKB: summed %x protocol %x protocol(%x,%x) gso type %x xmit_type %x\n",
2906 txq_index, skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
2907 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
2909 eth = (struct ethhdr *)skb->data;
2911 /* set flag according to packet type (UNICAST_ADDRESS is default)*/
2912 if (unlikely(is_multicast_ether_addr(eth->h_dest))) {
2913 if (is_broadcast_ether_addr(eth->h_dest))
2914 mac_type = BROADCAST_ADDRESS;
2916 mac_type = MULTICAST_ADDRESS;
2919 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
2920 /* First, check if we need to linearize the skb (due to FW
2921 restrictions). No need to check fragmentation if page size > 8K
2922 (there will be no violation to FW restrictions) */
2923 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
2924 /* Statistics of linearization */
2926 if (skb_linearize(skb) != 0) {
2927 DP(NETIF_MSG_TX_QUEUED,
2928 "SKB linearization failed - silently dropping this SKB\n");
2929 dev_kfree_skb_any(skb);
2930 return NETDEV_TX_OK;
2934 /* Map skb linear data for DMA */
2935 mapping = dma_map_single(&bp->pdev->dev, skb->data,
2936 skb_headlen(skb), DMA_TO_DEVICE);
2937 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
2938 DP(NETIF_MSG_TX_QUEUED,
2939 "SKB mapping failed - silently dropping this SKB\n");
2940 dev_kfree_skb_any(skb);
2941 return NETDEV_TX_OK;
2944 Please read carefully. First we use one BD which we mark as start,
2945 then we have a parsing info BD (used for TSO or xsum),
2946 and only then we have the rest of the TSO BDs.
2947 (don't forget to mark the last one as last,
2948 and to unmap only AFTER you write to the BD ...)
2949 And above all, all pdb sizes are in words - NOT DWORDS!
2952 /* get current pkt produced now - advance it just before sending packet
2953 * since mapping of pages may fail and cause packet to be dropped
2955 pkt_prod = txdata->tx_pkt_prod;
2956 bd_prod = TX_BD(txdata->tx_bd_prod);
2958 /* get a tx_buf and first BD
2959 * tx_start_bd may be changed during SPLIT,
2960 * but first_bd will always stay first
2962 tx_buf = &txdata->tx_buf_ring[TX_BD(pkt_prod)];
2963 tx_start_bd = &txdata->tx_desc_ring[bd_prod].start_bd;
2964 first_bd = tx_start_bd;
2966 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
2967 SET_FLAG(tx_start_bd->general_data, ETH_TX_START_BD_ETH_ADDR_TYPE,
2971 SET_FLAG(tx_start_bd->general_data, ETH_TX_START_BD_HDR_NBDS, 1);
2973 /* remember the first BD of the packet */
2974 tx_buf->first_bd = txdata->tx_bd_prod;
2978 DP(NETIF_MSG_TX_QUEUED,
2979 "sending pkt %u @%p next_idx %u bd %u @%p\n",
2980 pkt_prod, tx_buf, txdata->tx_pkt_prod, bd_prod, tx_start_bd);
2982 if (vlan_tx_tag_present(skb)) {
2983 tx_start_bd->vlan_or_ethertype =
2984 cpu_to_le16(vlan_tx_tag_get(skb));
2985 tx_start_bd->bd_flags.as_bitfield |=
2986 (X_ETH_OUTBAND_VLAN << ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT);
2988 tx_start_bd->vlan_or_ethertype = cpu_to_le16(pkt_prod);
2990 /* turn on parsing and get a BD */
2991 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
2993 if (xmit_type & XMIT_CSUM)
2994 bnx2x_set_sbd_csum(bp, skb, tx_start_bd, xmit_type);
2996 if (!CHIP_IS_E1x(bp)) {
2997 pbd_e2 = &txdata->tx_desc_ring[bd_prod].parse_bd_e2;
2998 memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2));
2999 /* Set PBD in checksum offload case */
3000 if (xmit_type & XMIT_CSUM)
3001 hlen = bnx2x_set_pbd_csum_e2(bp, skb,
3002 &pbd_e2_parsing_data,
3006 * fill in the MAC addresses in the PBD - for local
3009 bnx2x_set_fw_mac_addr(&pbd_e2->src_mac_addr_hi,
3010 &pbd_e2->src_mac_addr_mid,
3011 &pbd_e2->src_mac_addr_lo,
3013 bnx2x_set_fw_mac_addr(&pbd_e2->dst_mac_addr_hi,
3014 &pbd_e2->dst_mac_addr_mid,
3015 &pbd_e2->dst_mac_addr_lo,
3019 pbd_e1x = &txdata->tx_desc_ring[bd_prod].parse_bd_e1x;
3020 memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x));
3021 /* Set PBD in checksum offload case */
3022 if (xmit_type & XMIT_CSUM)
3023 hlen = bnx2x_set_pbd_csum(bp, skb, pbd_e1x, xmit_type);
3027 /* Setup the data pointer of the first BD of the packet */
3028 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
3029 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
3030 nbd = 2; /* start_bd + pbd + frags (updated when pages are mapped) */
3031 tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
3032 pkt_size = tx_start_bd->nbytes;
3034 DP(NETIF_MSG_TX_QUEUED,
3035 "first bd @%p addr (%x:%x) nbd %d nbytes %d flags %x vlan %x\n",
3036 tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
3037 le16_to_cpu(tx_start_bd->nbd), le16_to_cpu(tx_start_bd->nbytes),
3038 tx_start_bd->bd_flags.as_bitfield,
3039 le16_to_cpu(tx_start_bd->vlan_or_ethertype));
3041 if (xmit_type & XMIT_GSO) {
3043 DP(NETIF_MSG_TX_QUEUED,
3044 "TSO packet len %d hlen %d total len %d tso size %d\n",
3045 skb->len, hlen, skb_headlen(skb),
3046 skb_shinfo(skb)->gso_size);
3048 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
3050 if (unlikely(skb_headlen(skb) > hlen))
3051 bd_prod = bnx2x_tx_split(bp, txdata, tx_buf,
3054 if (!CHIP_IS_E1x(bp))
3055 bnx2x_set_pbd_gso_e2(skb, &pbd_e2_parsing_data,
3058 bnx2x_set_pbd_gso(skb, pbd_e1x, xmit_type);
3061 /* Set the PBD's parsing_data field if not zero
3062 * (for the chips newer than 57711).
3064 if (pbd_e2_parsing_data)
3065 pbd_e2->parsing_data = cpu_to_le32(pbd_e2_parsing_data);
3067 tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
3069 /* Handle fragmented skb */
3070 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
3071 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3073 mapping = skb_frag_dma_map(&bp->pdev->dev, frag, 0,
3074 skb_frag_size(frag), DMA_TO_DEVICE);
3075 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
3076 unsigned int pkts_compl = 0, bytes_compl = 0;
3078 DP(NETIF_MSG_TX_QUEUED,
3079 "Unable to map page - dropping packet...\n");
3081 /* we need unmap all buffers already mapped
3083 * first_bd->nbd need to be properly updated
3084 * before call to bnx2x_free_tx_pkt
3086 first_bd->nbd = cpu_to_le16(nbd);
3087 bnx2x_free_tx_pkt(bp, txdata,
3088 TX_BD(txdata->tx_pkt_prod),
3089 &pkts_compl, &bytes_compl);
3090 return NETDEV_TX_OK;
3093 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
3094 tx_data_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
3095 if (total_pkt_bd == NULL)
3096 total_pkt_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
3098 tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
3099 tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
3100 tx_data_bd->nbytes = cpu_to_le16(skb_frag_size(frag));
3101 le16_add_cpu(&pkt_size, skb_frag_size(frag));
3104 DP(NETIF_MSG_TX_QUEUED,
3105 "frag %d bd @%p addr (%x:%x) nbytes %d\n",
3106 i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo,
3107 le16_to_cpu(tx_data_bd->nbytes));
3110 DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd);
3112 /* update with actual num BDs */
3113 first_bd->nbd = cpu_to_le16(nbd);
3115 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
3117 /* now send a tx doorbell, counting the next BD
3118 * if the packet contains or ends with it
3120 if (TX_BD_POFF(bd_prod) < nbd)
3123 /* total_pkt_bytes should be set on the first data BD if
3124 * it's not an LSO packet and there is more than one
3125 * data BD. In this case pkt_size is limited by an MTU value.
3126 * However we prefer to set it for an LSO packet (while we don't
3127 * have to) in order to save some CPU cycles in a none-LSO
3128 * case, when we much more care about them.
3130 if (total_pkt_bd != NULL)
3131 total_pkt_bd->total_pkt_bytes = pkt_size;
3134 DP(NETIF_MSG_TX_QUEUED,
3135 "PBD (E1X) @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u tcp_flags %x xsum %x seq %u hlen %u\n",
3136 pbd_e1x, pbd_e1x->global_data, pbd_e1x->ip_hlen_w,
3137 pbd_e1x->ip_id, pbd_e1x->lso_mss, pbd_e1x->tcp_flags,
3138 pbd_e1x->tcp_pseudo_csum, pbd_e1x->tcp_send_seq,
3139 le16_to_cpu(pbd_e1x->total_hlen_w));
3141 DP(NETIF_MSG_TX_QUEUED,
3142 "PBD (E2) @%p dst %x %x %x src %x %x %x parsing_data %x\n",
3143 pbd_e2, pbd_e2->dst_mac_addr_hi, pbd_e2->dst_mac_addr_mid,
3144 pbd_e2->dst_mac_addr_lo, pbd_e2->src_mac_addr_hi,
3145 pbd_e2->src_mac_addr_mid, pbd_e2->src_mac_addr_lo,
3146 pbd_e2->parsing_data);
3147 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
3149 netdev_tx_sent_queue(txq, skb->len);
3151 skb_tx_timestamp(skb);
3153 txdata->tx_pkt_prod++;
3155 * Make sure that the BD data is updated before updating the producer
3156 * since FW might read the BD right after the producer is updated.
3157 * This is only applicable for weak-ordered memory model archs such
3158 * as IA-64. The following barrier is also mandatory since FW will
3159 * assumes packets must have BDs.
3163 txdata->tx_db.data.prod += nbd;
3166 DOORBELL(bp, txdata->cid, txdata->tx_db.raw);
3170 txdata->tx_bd_prod += nbd;
3172 if (unlikely(bnx2x_tx_avail(bp, txdata) < MAX_SKB_FRAGS + 4)) {
3173 netif_tx_stop_queue(txq);
3175 /* paired memory barrier is in bnx2x_tx_int(), we have to keep
3176 * ordering of set_bit() in netif_tx_stop_queue() and read of
3180 fp->eth_q_stats.driver_xoff++;
3181 if (bnx2x_tx_avail(bp, txdata) >= MAX_SKB_FRAGS + 4)
3182 netif_tx_wake_queue(txq);
3186 return NETDEV_TX_OK;
3190 * bnx2x_setup_tc - routine to configure net_device for multi tc
3192 * @netdev: net device to configure
3193 * @tc: number of traffic classes to enable
3195 * callback connected to the ndo_setup_tc function pointer
3197 int bnx2x_setup_tc(struct net_device *dev, u8 num_tc)
3199 int cos, prio, count, offset;
3200 struct bnx2x *bp = netdev_priv(dev);
3202 /* setup tc must be called under rtnl lock */
3205 /* no traffic classes requested. aborting */
3207 netdev_reset_tc(dev);
3211 /* requested to support too many traffic classes */
3212 if (num_tc > bp->max_cos) {
3213 BNX2X_ERR("support for too many traffic classes requested: %d. max supported is %d\n",
3214 num_tc, bp->max_cos);
3218 /* declare amount of supported traffic classes */
3219 if (netdev_set_num_tc(dev, num_tc)) {
3220 BNX2X_ERR("failed to declare %d traffic classes\n", num_tc);
3224 /* configure priority to traffic class mapping */
3225 for (prio = 0; prio < BNX2X_MAX_PRIORITY; prio++) {
3226 netdev_set_prio_tc_map(dev, prio, bp->prio_to_cos[prio]);
3227 DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
3228 "mapping priority %d to tc %d\n",
3229 prio, bp->prio_to_cos[prio]);
3233 /* Use this configuration to diffrentiate tc0 from other COSes
3234 This can be used for ets or pfc, and save the effort of setting
3235 up a multio class queue disc or negotiating DCBX with a switch
3236 netdev_set_prio_tc_map(dev, 0, 0);
3237 DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n", 0, 0);
3238 for (prio = 1; prio < 16; prio++) {
3239 netdev_set_prio_tc_map(dev, prio, 1);
3240 DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n", prio, 1);
3243 /* configure traffic class to transmission queue mapping */
3244 for (cos = 0; cos < bp->max_cos; cos++) {
3245 count = BNX2X_NUM_ETH_QUEUES(bp);
3246 offset = cos * MAX_TXQS_PER_COS;
3247 netdev_set_tc_queue(dev, cos, count, offset);
3248 DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
3249 "mapping tc %d to offset %d count %d\n",
3250 cos, offset, count);
3256 /* called with rtnl_lock */
3257 int bnx2x_change_mac_addr(struct net_device *dev, void *p)
3259 struct sockaddr *addr = p;
3260 struct bnx2x *bp = netdev_priv(dev);
3263 if (!bnx2x_is_valid_ether_addr(bp, addr->sa_data)) {
3264 BNX2X_ERR("Requested MAC address is not valid\n");
3269 if ((IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp)) &&
3270 !is_zero_ether_addr(addr->sa_data)) {
3271 BNX2X_ERR("Can't configure non-zero address on iSCSI or FCoE functions in MF-SD mode\n");
3276 if (netif_running(dev)) {
3277 rc = bnx2x_set_eth_mac(bp, false);
3282 dev->addr_assign_type &= ~NET_ADDR_RANDOM;
3283 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
3285 if (netif_running(dev))
3286 rc = bnx2x_set_eth_mac(bp, true);
3291 static void bnx2x_free_fp_mem_at(struct bnx2x *bp, int fp_index)
3293 union host_hc_status_block *sb = &bnx2x_fp(bp, fp_index, status_blk);
3294 struct bnx2x_fastpath *fp = &bp->fp[fp_index];
3299 if (IS_FCOE_IDX(fp_index)) {
3300 memset(sb, 0, sizeof(union host_hc_status_block));
3301 fp->status_blk_mapping = 0;
3306 if (!CHIP_IS_E1x(bp))
3307 BNX2X_PCI_FREE(sb->e2_sb,
3308 bnx2x_fp(bp, fp_index,
3309 status_blk_mapping),
3310 sizeof(struct host_hc_status_block_e2));
3312 BNX2X_PCI_FREE(sb->e1x_sb,
3313 bnx2x_fp(bp, fp_index,
3314 status_blk_mapping),
3315 sizeof(struct host_hc_status_block_e1x));
3320 if (!skip_rx_queue(bp, fp_index)) {
3321 bnx2x_free_rx_bds(fp);
3323 /* fastpath rx rings: rx_buf rx_desc rx_comp */
3324 BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_buf_ring));
3325 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_desc_ring),
3326 bnx2x_fp(bp, fp_index, rx_desc_mapping),
3327 sizeof(struct eth_rx_bd) * NUM_RX_BD);
3329 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_comp_ring),
3330 bnx2x_fp(bp, fp_index, rx_comp_mapping),
3331 sizeof(struct eth_fast_path_rx_cqe) *
3335 BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_page_ring));
3336 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_sge_ring),
3337 bnx2x_fp(bp, fp_index, rx_sge_mapping),
3338 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
3342 if (!skip_tx_queue(bp, fp_index)) {
3343 /* fastpath tx rings: tx_buf tx_desc */
3344 for_each_cos_in_tx_queue(fp, cos) {
3345 struct bnx2x_fp_txdata *txdata = &fp->txdata[cos];
3347 DP(NETIF_MSG_IFDOWN,
3348 "freeing tx memory of fp %d cos %d cid %d\n",
3349 fp_index, cos, txdata->cid);
3351 BNX2X_FREE(txdata->tx_buf_ring);
3352 BNX2X_PCI_FREE(txdata->tx_desc_ring,
3353 txdata->tx_desc_mapping,
3354 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
3357 /* end of fastpath */
3360 void bnx2x_free_fp_mem(struct bnx2x *bp)
3363 for_each_queue(bp, i)
3364 bnx2x_free_fp_mem_at(bp, i);
3367 static void set_sb_shortcuts(struct bnx2x *bp, int index)
3369 union host_hc_status_block status_blk = bnx2x_fp(bp, index, status_blk);
3370 if (!CHIP_IS_E1x(bp)) {
3371 bnx2x_fp(bp, index, sb_index_values) =
3372 (__le16 *)status_blk.e2_sb->sb.index_values;
3373 bnx2x_fp(bp, index, sb_running_index) =
3374 (__le16 *)status_blk.e2_sb->sb.running_index;
3376 bnx2x_fp(bp, index, sb_index_values) =
3377 (__le16 *)status_blk.e1x_sb->sb.index_values;
3378 bnx2x_fp(bp, index, sb_running_index) =
3379 (__le16 *)status_blk.e1x_sb->sb.running_index;
3383 /* Returns the number of actually allocated BDs */
3384 static int bnx2x_alloc_rx_bds(struct bnx2x_fastpath *fp,
3387 struct bnx2x *bp = fp->bp;
3388 u16 ring_prod, cqe_ring_prod;
3389 int i, failure_cnt = 0;
3391 fp->rx_comp_cons = 0;
3392 cqe_ring_prod = ring_prod = 0;
3394 /* This routine is called only during fo init so
3395 * fp->eth_q_stats.rx_skb_alloc_failed = 0
3397 for (i = 0; i < rx_ring_size; i++) {
3398 if (bnx2x_alloc_rx_data(bp, fp, ring_prod) < 0) {
3402 ring_prod = NEXT_RX_IDX(ring_prod);
3403 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
3404 WARN_ON(ring_prod <= (i - failure_cnt));
3408 BNX2X_ERR("was only able to allocate %d rx skbs on queue[%d]\n",
3409 i - failure_cnt, fp->index);
3411 fp->rx_bd_prod = ring_prod;
3412 /* Limit the CQE producer by the CQE ring size */
3413 fp->rx_comp_prod = min_t(u16, NUM_RCQ_RINGS*RCQ_DESC_CNT,
3415 fp->rx_pkt = fp->rx_calls = 0;
3417 fp->eth_q_stats.rx_skb_alloc_failed += failure_cnt;
3419 return i - failure_cnt;
3422 static void bnx2x_set_next_page_rx_cq(struct bnx2x_fastpath *fp)
3426 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
3427 struct eth_rx_cqe_next_page *nextpg;
3429 nextpg = (struct eth_rx_cqe_next_page *)
3430 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
3432 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
3433 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
3435 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
3436 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
3440 static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index)
3442 union host_hc_status_block *sb;
3443 struct bnx2x_fastpath *fp = &bp->fp[index];
3446 int rx_ring_size = 0;
3449 if (!bp->rx_ring_size &&
3450 (IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp))) {
3451 rx_ring_size = MIN_RX_SIZE_NONTPA;
3452 bp->rx_ring_size = rx_ring_size;
3455 if (!bp->rx_ring_size) {
3456 u32 cfg = SHMEM_RD(bp,
3457 dev_info.port_hw_config[BP_PORT(bp)].default_cfg);
3459 rx_ring_size = MAX_RX_AVAIL/BNX2X_NUM_RX_QUEUES(bp);
3461 /* Dercease ring size for 1G functions */
3462 if ((cfg & PORT_HW_CFG_NET_SERDES_IF_MASK) ==
3463 PORT_HW_CFG_NET_SERDES_IF_SGMII)
3466 /* allocate at least number of buffers required by FW */
3467 rx_ring_size = max_t(int, bp->disable_tpa ? MIN_RX_SIZE_NONTPA :
3468 MIN_RX_SIZE_TPA, rx_ring_size);
3470 bp->rx_ring_size = rx_ring_size;
3471 } else /* if rx_ring_size specified - use it */
3472 rx_ring_size = bp->rx_ring_size;
3475 sb = &bnx2x_fp(bp, index, status_blk);
3477 if (!IS_FCOE_IDX(index)) {
3480 if (!CHIP_IS_E1x(bp))
3481 BNX2X_PCI_ALLOC(sb->e2_sb,
3482 &bnx2x_fp(bp, index, status_blk_mapping),
3483 sizeof(struct host_hc_status_block_e2));
3485 BNX2X_PCI_ALLOC(sb->e1x_sb,
3486 &bnx2x_fp(bp, index, status_blk_mapping),
3487 sizeof(struct host_hc_status_block_e1x));
3492 /* FCoE Queue uses Default SB and doesn't ACK the SB, thus no need to
3493 * set shortcuts for it.
3495 if (!IS_FCOE_IDX(index))
3496 set_sb_shortcuts(bp, index);
3499 if (!skip_tx_queue(bp, index)) {
3500 /* fastpath tx rings: tx_buf tx_desc */
3501 for_each_cos_in_tx_queue(fp, cos) {
3502 struct bnx2x_fp_txdata *txdata = &fp->txdata[cos];
3505 "allocating tx memory of fp %d cos %d\n",
3508 BNX2X_ALLOC(txdata->tx_buf_ring,
3509 sizeof(struct sw_tx_bd) * NUM_TX_BD);
3510 BNX2X_PCI_ALLOC(txdata->tx_desc_ring,
3511 &txdata->tx_desc_mapping,
3512 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
3517 if (!skip_rx_queue(bp, index)) {
3518 /* fastpath rx rings: rx_buf rx_desc rx_comp */
3519 BNX2X_ALLOC(bnx2x_fp(bp, index, rx_buf_ring),
3520 sizeof(struct sw_rx_bd) * NUM_RX_BD);
3521 BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_desc_ring),
3522 &bnx2x_fp(bp, index, rx_desc_mapping),
3523 sizeof(struct eth_rx_bd) * NUM_RX_BD);
3525 BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_comp_ring),
3526 &bnx2x_fp(bp, index, rx_comp_mapping),
3527 sizeof(struct eth_fast_path_rx_cqe) *
3531 BNX2X_ALLOC(bnx2x_fp(bp, index, rx_page_ring),
3532 sizeof(struct sw_rx_page) * NUM_RX_SGE);
3533 BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_sge_ring),
3534 &bnx2x_fp(bp, index, rx_sge_mapping),
3535 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
3537 bnx2x_set_next_page_rx_bd(fp);
3540 bnx2x_set_next_page_rx_cq(fp);
3543 ring_size = bnx2x_alloc_rx_bds(fp, rx_ring_size);
3544 if (ring_size < rx_ring_size)
3550 /* handles low memory cases */
3552 BNX2X_ERR("Unable to allocate full memory for queue %d (size %d)\n",
3554 /* FW will drop all packets if queue is not big enough,
3555 * In these cases we disable the queue
3556 * Min size is different for OOO, TPA and non-TPA queues
3558 if (ring_size < (fp->disable_tpa ?
3559 MIN_RX_SIZE_NONTPA : MIN_RX_SIZE_TPA)) {
3560 /* release memory allocated for this queue */
3561 bnx2x_free_fp_mem_at(bp, index);
3567 int bnx2x_alloc_fp_mem(struct bnx2x *bp)
3572 * 1. Allocate FP for leading - fatal if error
3573 * 2. {CNIC} Allocate FCoE FP - fatal if error
3574 * 3. {CNIC} Allocate OOO + FWD - disable OOO if error
3575 * 4. Allocate RSS - fix number of queues if error
3579 if (bnx2x_alloc_fp_mem_at(bp, 0))
3585 if (bnx2x_alloc_fp_mem_at(bp, FCOE_IDX))
3586 /* we will fail load process instead of mark
3593 for_each_nondefault_eth_queue(bp, i)
3594 if (bnx2x_alloc_fp_mem_at(bp, i))
3597 /* handle memory failures */
3598 if (i != BNX2X_NUM_ETH_QUEUES(bp)) {
3599 int delta = BNX2X_NUM_ETH_QUEUES(bp) - i;
3604 * move non eth FPs next to last eth FP
3605 * must be done in that order
3606 * FCOE_IDX < FWD_IDX < OOO_IDX
3609 /* move FCoE fp even NO_FCOE_FLAG is on */
3610 bnx2x_move_fp(bp, FCOE_IDX, FCOE_IDX - delta);
3612 bp->num_queues -= delta;
3613 BNX2X_ERR("Adjusted num of queues from %d to %d\n",
3614 bp->num_queues + delta, bp->num_queues);
3620 void bnx2x_free_mem_bp(struct bnx2x *bp)
3623 kfree(bp->msix_table);
3627 int __devinit bnx2x_alloc_mem_bp(struct bnx2x *bp)
3629 struct bnx2x_fastpath *fp;
3630 struct msix_entry *tbl;
3631 struct bnx2x_ilt *ilt;
3632 int msix_table_size = 0;
3635 * The biggest MSI-X table we might need is as a maximum number of fast
3636 * path IGU SBs plus default SB (for PF).
3638 msix_table_size = bp->igu_sb_cnt + 1;
3640 /* fp array: RSS plus CNIC related L2 queues */
3641 fp = kcalloc(BNX2X_MAX_RSS_COUNT(bp) + NON_ETH_CONTEXT_USE,
3642 sizeof(*fp), GFP_KERNEL);
3648 tbl = kcalloc(msix_table_size, sizeof(*tbl), GFP_KERNEL);
3651 bp->msix_table = tbl;
3654 ilt = kzalloc(sizeof(*ilt), GFP_KERNEL);
3661 bnx2x_free_mem_bp(bp);
3666 int bnx2x_reload_if_running(struct net_device *dev)
3668 struct bnx2x *bp = netdev_priv(dev);
3670 if (unlikely(!netif_running(dev)))
3673 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
3674 return bnx2x_nic_load(bp, LOAD_NORMAL);
3677 int bnx2x_get_cur_phy_idx(struct bnx2x *bp)
3679 u32 sel_phy_idx = 0;
3680 if (bp->link_params.num_phys <= 1)
3683 if (bp->link_vars.link_up) {
3684 sel_phy_idx = EXT_PHY1;
3685 /* In case link is SERDES, check if the EXT_PHY2 is the one */
3686 if ((bp->link_vars.link_status & LINK_STATUS_SERDES_LINK) &&
3687 (bp->link_params.phy[EXT_PHY2].supported & SUPPORTED_FIBRE))
3688 sel_phy_idx = EXT_PHY2;
3691 switch (bnx2x_phy_selection(&bp->link_params)) {
3692 case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT:
3693 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY:
3694 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY:
3695 sel_phy_idx = EXT_PHY1;
3697 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY:
3698 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY:
3699 sel_phy_idx = EXT_PHY2;
3707 int bnx2x_get_link_cfg_idx(struct bnx2x *bp)
3709 u32 sel_phy_idx = bnx2x_get_cur_phy_idx(bp);
3711 * The selected actived PHY is always after swapping (in case PHY
3712 * swapping is enabled). So when swapping is enabled, we need to reverse
3716 if (bp->link_params.multi_phy_config &
3717 PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
3718 if (sel_phy_idx == EXT_PHY1)
3719 sel_phy_idx = EXT_PHY2;
3720 else if (sel_phy_idx == EXT_PHY2)
3721 sel_phy_idx = EXT_PHY1;
3723 return LINK_CONFIG_IDX(sel_phy_idx);
3726 #if defined(NETDEV_FCOE_WWNN) && defined(BCM_CNIC)
3727 int bnx2x_fcoe_get_wwn(struct net_device *dev, u64 *wwn, int type)
3729 struct bnx2x *bp = netdev_priv(dev);
3730 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
3733 case NETDEV_FCOE_WWNN:
3734 *wwn = HILO_U64(cp->fcoe_wwn_node_name_hi,
3735 cp->fcoe_wwn_node_name_lo);
3737 case NETDEV_FCOE_WWPN:
3738 *wwn = HILO_U64(cp->fcoe_wwn_port_name_hi,
3739 cp->fcoe_wwn_port_name_lo);
3742 BNX2X_ERR("Wrong WWN type requested - %d\n", type);
3750 /* called with rtnl_lock */
3751 int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
3753 struct bnx2x *bp = netdev_priv(dev);
3755 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
3756 BNX2X_ERR("Can't perform change MTU during parity recovery\n");
3760 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
3761 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE)) {
3762 BNX2X_ERR("Can't support requested MTU size\n");
3766 /* This does not race with packet allocation
3767 * because the actual alloc size is
3768 * only updated as part of load
3772 return bnx2x_reload_if_running(dev);
3775 netdev_features_t bnx2x_fix_features(struct net_device *dev,
3776 netdev_features_t features)
3778 struct bnx2x *bp = netdev_priv(dev);
3780 /* TPA requires Rx CSUM offloading */
3781 if (!(features & NETIF_F_RXCSUM) || bp->disable_tpa) {
3782 features &= ~NETIF_F_LRO;
3783 features &= ~NETIF_F_GRO;
3789 int bnx2x_set_features(struct net_device *dev, netdev_features_t features)
3791 struct bnx2x *bp = netdev_priv(dev);
3792 u32 flags = bp->flags;
3793 bool bnx2x_reload = false;
3795 if (features & NETIF_F_LRO)
3796 flags |= TPA_ENABLE_FLAG;
3798 flags &= ~TPA_ENABLE_FLAG;
3800 if (features & NETIF_F_GRO)
3801 flags |= GRO_ENABLE_FLAG;
3803 flags &= ~GRO_ENABLE_FLAG;
3805 if (features & NETIF_F_LOOPBACK) {
3806 if (bp->link_params.loopback_mode != LOOPBACK_BMAC) {
3807 bp->link_params.loopback_mode = LOOPBACK_BMAC;
3808 bnx2x_reload = true;
3811 if (bp->link_params.loopback_mode != LOOPBACK_NONE) {
3812 bp->link_params.loopback_mode = LOOPBACK_NONE;
3813 bnx2x_reload = true;
3817 if (flags ^ bp->flags) {
3819 bnx2x_reload = true;
3823 if (bp->recovery_state == BNX2X_RECOVERY_DONE)
3824 return bnx2x_reload_if_running(dev);
3825 /* else: bnx2x_nic_load() will be called at end of recovery */
3831 void bnx2x_tx_timeout(struct net_device *dev)
3833 struct bnx2x *bp = netdev_priv(dev);
3835 #ifdef BNX2X_STOP_ON_ERROR
3840 smp_mb__before_clear_bit();
3841 set_bit(BNX2X_SP_RTNL_TX_TIMEOUT, &bp->sp_rtnl_state);
3842 smp_mb__after_clear_bit();
3844 /* This allows the netif to be shutdown gracefully before resetting */
3845 schedule_delayed_work(&bp->sp_rtnl_task, 0);
3848 int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
3850 struct net_device *dev = pci_get_drvdata(pdev);
3854 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
3857 bp = netdev_priv(dev);
3861 pci_save_state(pdev);
3863 if (!netif_running(dev)) {
3868 netif_device_detach(dev);
3870 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
3872 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
3879 int bnx2x_resume(struct pci_dev *pdev)
3881 struct net_device *dev = pci_get_drvdata(pdev);
3886 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
3889 bp = netdev_priv(dev);
3891 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
3892 BNX2X_ERR("Handling parity error recovery. Try again later\n");
3898 pci_restore_state(pdev);
3900 if (!netif_running(dev)) {
3905 bnx2x_set_power_state(bp, PCI_D0);
3906 netif_device_attach(dev);
3908 rc = bnx2x_nic_load(bp, LOAD_OPEN);
3916 void bnx2x_set_ctx_validation(struct bnx2x *bp, struct eth_context *cxt,
3919 /* ustorm cxt validation */
3920 cxt->ustorm_ag_context.cdu_usage =
3921 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid),
3922 CDU_REGION_NUMBER_UCM_AG, ETH_CONNECTION_TYPE);
3923 /* xcontext validation */
3924 cxt->xstorm_ag_context.cdu_reserved =
3925 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid),
3926 CDU_REGION_NUMBER_XCM_AG, ETH_CONNECTION_TYPE);
3929 static void storm_memset_hc_timeout(struct bnx2x *bp, u8 port,
3930 u8 fw_sb_id, u8 sb_index,
3934 u32 addr = BAR_CSTRORM_INTMEM +
3935 CSTORM_STATUS_BLOCK_DATA_TIMEOUT_OFFSET(fw_sb_id, sb_index);
3936 REG_WR8(bp, addr, ticks);
3938 "port %x fw_sb_id %d sb_index %d ticks %d\n",
3939 port, fw_sb_id, sb_index, ticks);
3942 static void storm_memset_hc_disable(struct bnx2x *bp, u8 port,
3943 u16 fw_sb_id, u8 sb_index,
3946 u32 enable_flag = disable ? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT);
3947 u32 addr = BAR_CSTRORM_INTMEM +
3948 CSTORM_STATUS_BLOCK_DATA_FLAGS_OFFSET(fw_sb_id, sb_index);
3949 u16 flags = REG_RD16(bp, addr);
3951 flags &= ~HC_INDEX_DATA_HC_ENABLED;
3952 flags |= enable_flag;
3953 REG_WR16(bp, addr, flags);
3955 "port %x fw_sb_id %d sb_index %d disable %d\n",
3956 port, fw_sb_id, sb_index, disable);
3959 void bnx2x_update_coalesce_sb_index(struct bnx2x *bp, u8 fw_sb_id,
3960 u8 sb_index, u8 disable, u16 usec)
3962 int port = BP_PORT(bp);
3963 u8 ticks = usec / BNX2X_BTR;
3965 storm_memset_hc_timeout(bp, port, fw_sb_id, sb_index, ticks);
3967 disable = disable ? 1 : (usec ? 0 : 1);
3968 storm_memset_hc_disable(bp, port, fw_sb_id, sb_index, disable);