1 /* bnx2x_cmn.c: Broadcom Everest network driver.
3 * Copyright (c) 2007-2012 Broadcom Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath and fastpath rework by Vladislav Zolotarov
14 * Statistics and Link management by Yitchak Gertner
18 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
20 #include <linux/etherdevice.h>
21 #include <linux/if_vlan.h>
22 #include <linux/interrupt.h>
25 #include <net/ip6_checksum.h>
26 #include <linux/prefetch.h>
27 #include "bnx2x_cmn.h"
28 #include "bnx2x_init.h"
34 * bnx2x_move_fp - move content of the fastpath structure.
37 * @from: source FP index
38 * @to: destination FP index
40 * Makes sure the contents of the bp->fp[to].napi is kept
41 * intact. This is done by first copying the napi struct from
42 * the target to the source, and then mem copying the entire
43 * source onto the target
45 static inline void bnx2x_move_fp(struct bnx2x *bp, int from, int to)
47 struct bnx2x_fastpath *from_fp = &bp->fp[from];
48 struct bnx2x_fastpath *to_fp = &bp->fp[to];
50 /* Copy the NAPI object as it has been already initialized */
51 from_fp->napi = to_fp->napi;
53 /* Move bnx2x_fastpath contents */
54 memcpy(to_fp, from_fp, sizeof(*to_fp));
58 int load_count[2][3] = { {0} }; /* per-path: 0-common, 1-port0, 2-port1 */
60 /* free skb in the packet ring at pos idx
61 * return idx of last bd freed
63 static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata,
64 u16 idx, unsigned int *pkts_compl,
65 unsigned int *bytes_compl)
67 struct sw_tx_bd *tx_buf = &txdata->tx_buf_ring[idx];
68 struct eth_tx_start_bd *tx_start_bd;
69 struct eth_tx_bd *tx_data_bd;
70 struct sk_buff *skb = tx_buf->skb;
71 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
74 /* prefetch skb end pointer to speedup dev_kfree_skb() */
77 DP(NETIF_MSG_TX_DONE, "fp[%d]: pkt_idx %d buff @(%p)->skb %p\n",
78 txdata->txq_index, idx, tx_buf, skb);
81 tx_start_bd = &txdata->tx_desc_ring[bd_idx].start_bd;
82 dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd),
83 BD_UNMAP_LEN(tx_start_bd), DMA_TO_DEVICE);
86 nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
87 #ifdef BNX2X_STOP_ON_ERROR
88 if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
89 BNX2X_ERR("BAD nbd!\n");
93 new_cons = nbd + tx_buf->first_bd;
96 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
98 /* Skip a parse bd... */
100 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
102 /* ...and the TSO split header bd since they have no mapping */
103 if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
105 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
111 tx_data_bd = &txdata->tx_desc_ring[bd_idx].reg_bd;
112 dma_unmap_page(&bp->pdev->dev, BD_UNMAP_ADDR(tx_data_bd),
113 BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
115 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
122 (*bytes_compl) += skb->len;
125 dev_kfree_skb_any(skb);
126 tx_buf->first_bd = 0;
132 int bnx2x_tx_int(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata)
134 struct netdev_queue *txq;
135 u16 hw_cons, sw_cons, bd_cons = txdata->tx_bd_cons;
136 unsigned int pkts_compl = 0, bytes_compl = 0;
138 #ifdef BNX2X_STOP_ON_ERROR
139 if (unlikely(bp->panic))
143 txq = netdev_get_tx_queue(bp->dev, txdata->txq_index);
144 hw_cons = le16_to_cpu(*txdata->tx_cons_sb);
145 sw_cons = txdata->tx_pkt_cons;
147 while (sw_cons != hw_cons) {
150 pkt_cons = TX_BD(sw_cons);
152 DP(NETIF_MSG_TX_DONE,
153 "queue[%d]: hw_cons %u sw_cons %u pkt_cons %u\n",
154 txdata->txq_index, hw_cons, sw_cons, pkt_cons);
156 bd_cons = bnx2x_free_tx_pkt(bp, txdata, pkt_cons,
157 &pkts_compl, &bytes_compl);
162 netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
164 txdata->tx_pkt_cons = sw_cons;
165 txdata->tx_bd_cons = bd_cons;
167 /* Need to make the tx_bd_cons update visible to start_xmit()
168 * before checking for netif_tx_queue_stopped(). Without the
169 * memory barrier, there is a small possibility that
170 * start_xmit() will miss it and cause the queue to be stopped
172 * On the other hand we need an rmb() here to ensure the proper
173 * ordering of bit testing in the following
174 * netif_tx_queue_stopped(txq) call.
178 if (unlikely(netif_tx_queue_stopped(txq))) {
179 /* Taking tx_lock() is needed to prevent reenabling the queue
180 * while it's empty. This could have happen if rx_action() gets
181 * suspended in bnx2x_tx_int() after the condition before
182 * netif_tx_wake_queue(), while tx_action (bnx2x_start_xmit()):
184 * stops the queue->sees fresh tx_bd_cons->releases the queue->
185 * sends some packets consuming the whole queue again->
189 __netif_tx_lock(txq, smp_processor_id());
191 if ((netif_tx_queue_stopped(txq)) &&
192 (bp->state == BNX2X_STATE_OPEN) &&
193 (bnx2x_tx_avail(bp, txdata) >= MAX_SKB_FRAGS + 3))
194 netif_tx_wake_queue(txq);
196 __netif_tx_unlock(txq);
201 static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
204 u16 last_max = fp->last_max_sge;
206 if (SUB_S16(idx, last_max) > 0)
207 fp->last_max_sge = idx;
210 static inline void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
212 struct eth_end_agg_rx_cqe *cqe)
214 struct bnx2x *bp = fp->bp;
215 u16 last_max, last_elem, first_elem;
222 /* First mark all used pages */
223 for (i = 0; i < sge_len; i++)
224 BIT_VEC64_CLEAR_BIT(fp->sge_mask,
225 RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[i])));
227 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
228 sge_len - 1, le16_to_cpu(cqe->sgl_or_raw_data.sgl[sge_len - 1]));
230 /* Here we assume that the last SGE index is the biggest */
231 prefetch((void *)(fp->sge_mask));
232 bnx2x_update_last_max_sge(fp,
233 le16_to_cpu(cqe->sgl_or_raw_data.sgl[sge_len - 1]));
235 last_max = RX_SGE(fp->last_max_sge);
236 last_elem = last_max >> BIT_VEC64_ELEM_SHIFT;
237 first_elem = RX_SGE(fp->rx_sge_prod) >> BIT_VEC64_ELEM_SHIFT;
239 /* If ring is not full */
240 if (last_elem + 1 != first_elem)
243 /* Now update the prod */
244 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
245 if (likely(fp->sge_mask[i]))
248 fp->sge_mask[i] = BIT_VEC64_ELEM_ONE_MASK;
249 delta += BIT_VEC64_ELEM_SZ;
253 fp->rx_sge_prod += delta;
254 /* clear page-end entries */
255 bnx2x_clear_sge_mask_next_elems(fp);
258 DP(NETIF_MSG_RX_STATUS,
259 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
260 fp->last_max_sge, fp->rx_sge_prod);
263 /* Set Toeplitz hash value in the skb using the value from the
264 * CQE (calculated by HW).
266 static u32 bnx2x_get_rxhash(const struct bnx2x *bp,
267 const struct eth_fast_path_rx_cqe *cqe)
269 /* Set Toeplitz hash from CQE */
270 if ((bp->dev->features & NETIF_F_RXHASH) &&
271 (cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG))
272 return le32_to_cpu(cqe->rss_hash_result);
276 static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
278 struct eth_fast_path_rx_cqe *cqe)
280 struct bnx2x *bp = fp->bp;
281 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
282 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
283 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
285 struct bnx2x_agg_info *tpa_info = &fp->tpa_info[queue];
286 struct sw_rx_bd *first_buf = &tpa_info->first_buf;
288 /* print error if current state != stop */
289 if (tpa_info->tpa_state != BNX2X_TPA_STOP)
290 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
292 /* Try to map an empty data buffer from the aggregation info */
293 mapping = dma_map_single(&bp->pdev->dev,
294 first_buf->data + NET_SKB_PAD,
295 fp->rx_buf_size, DMA_FROM_DEVICE);
297 * ...if it fails - move the skb from the consumer to the producer
298 * and set the current aggregation state as ERROR to drop it
299 * when TPA_STOP arrives.
302 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
303 /* Move the BD from the consumer to the producer */
304 bnx2x_reuse_rx_data(fp, cons, prod);
305 tpa_info->tpa_state = BNX2X_TPA_ERROR;
309 /* move empty data from pool to prod */
310 prod_rx_buf->data = first_buf->data;
311 dma_unmap_addr_set(prod_rx_buf, mapping, mapping);
312 /* point prod_bd to new data */
313 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
314 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
316 /* move partial skb from cons to pool (don't unmap yet) */
317 *first_buf = *cons_rx_buf;
319 /* mark bin state as START */
320 tpa_info->parsing_flags =
321 le16_to_cpu(cqe->pars_flags.flags);
322 tpa_info->vlan_tag = le16_to_cpu(cqe->vlan_tag);
323 tpa_info->tpa_state = BNX2X_TPA_START;
324 tpa_info->len_on_bd = le16_to_cpu(cqe->len_on_bd);
325 tpa_info->placement_offset = cqe->placement_offset;
326 tpa_info->rxhash = bnx2x_get_rxhash(bp, cqe);
327 if (fp->mode == TPA_MODE_GRO) {
328 u16 gro_size = le16_to_cpu(cqe->pkt_len_or_gro_seg_len);
329 tpa_info->full_page =
330 SGE_PAGE_SIZE * PAGES_PER_SGE / gro_size * gro_size;
331 tpa_info->gro_size = gro_size;
334 #ifdef BNX2X_STOP_ON_ERROR
335 fp->tpa_queue_used |= (1 << queue);
336 #ifdef _ASM_GENERIC_INT_L64_H
337 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
339 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
345 /* Timestamp option length allowed for TPA aggregation:
347 * nop nop kind length echo val
349 #define TPA_TSTAMP_OPT_LEN 12
351 * bnx2x_set_lro_mss - calculate the approximate value of the MSS
354 * @parsing_flags: parsing flags from the START CQE
355 * @len_on_bd: total length of the first packet for the
358 * Approximate value of the MSS for this aggregation calculated using
359 * the first packet of it.
361 static u16 bnx2x_set_lro_mss(struct bnx2x *bp, u16 parsing_flags,
365 * TPA arrgregation won't have either IP options or TCP options
366 * other than timestamp or IPv6 extension headers.
368 u16 hdrs_len = ETH_HLEN + sizeof(struct tcphdr);
370 if (GET_FLAG(parsing_flags, PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) ==
371 PRS_FLAG_OVERETH_IPV6)
372 hdrs_len += sizeof(struct ipv6hdr);
374 hdrs_len += sizeof(struct iphdr);
377 /* Check if there was a TCP timestamp, if there is it's will
378 * always be 12 bytes length: nop nop kind length echo val.
380 * Otherwise FW would close the aggregation.
382 if (parsing_flags & PARSING_FLAGS_TIME_STAMP_EXIST_FLAG)
383 hdrs_len += TPA_TSTAMP_OPT_LEN;
385 return len_on_bd - hdrs_len;
388 static int bnx2x_alloc_rx_sge(struct bnx2x *bp,
389 struct bnx2x_fastpath *fp, u16 index)
391 struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
392 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
393 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
396 if (unlikely(page == NULL)) {
397 BNX2X_ERR("Can't alloc sge\n");
401 mapping = dma_map_page(&bp->pdev->dev, page, 0,
402 SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE);
403 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
404 __free_pages(page, PAGES_PER_SGE_SHIFT);
405 BNX2X_ERR("Can't map sge\n");
410 dma_unmap_addr_set(sw_buf, mapping, mapping);
412 sge->addr_hi = cpu_to_le32(U64_HI(mapping));
413 sge->addr_lo = cpu_to_le32(U64_LO(mapping));
418 static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
419 struct bnx2x_agg_info *tpa_info,
422 struct eth_end_agg_rx_cqe *cqe,
425 struct sw_rx_page *rx_pg, old_rx_pg;
426 u32 i, frag_len, frag_size;
427 int err, j, frag_id = 0;
428 u16 len_on_bd = tpa_info->len_on_bd;
429 u16 full_page = 0, gro_size = 0;
431 frag_size = le16_to_cpu(cqe->pkt_len) - len_on_bd;
433 if (fp->mode == TPA_MODE_GRO) {
434 gro_size = tpa_info->gro_size;
435 full_page = tpa_info->full_page;
438 /* This is needed in order to enable forwarding support */
440 skb_shinfo(skb)->gso_size = bnx2x_set_lro_mss(bp,
441 tpa_info->parsing_flags, len_on_bd);
444 if (fp->mode == TPA_MODE_GRO)
445 skb_shinfo(skb)->gso_type =
446 (GET_FLAG(tpa_info->parsing_flags,
447 PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) ==
448 PRS_FLAG_OVERETH_IPV6) ?
449 SKB_GSO_TCPV6 : SKB_GSO_TCPV4;
453 #ifdef BNX2X_STOP_ON_ERROR
454 if (pages > min_t(u32, 8, MAX_SKB_FRAGS)*SGE_PAGE_SIZE*PAGES_PER_SGE) {
455 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
457 BNX2X_ERR("cqe->pkt_len = %d\n", cqe->pkt_len);
463 /* Run through the SGL and compose the fragmented skb */
464 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
465 u16 sge_idx = RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[j]));
467 /* FW gives the indices of the SGE as if the ring is an array
468 (meaning that "next" element will consume 2 indices) */
469 if (fp->mode == TPA_MODE_GRO)
470 frag_len = min_t(u32, frag_size, (u32)full_page);
472 frag_len = min_t(u32, frag_size,
473 (u32)(SGE_PAGE_SIZE * PAGES_PER_SGE));
475 rx_pg = &fp->rx_page_ring[sge_idx];
478 /* If we fail to allocate a substitute page, we simply stop
479 where we are and drop the whole packet */
480 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
482 fp->eth_q_stats.rx_skb_alloc_failed++;
486 /* Unmap the page as we r going to pass it to the stack */
487 dma_unmap_page(&bp->pdev->dev,
488 dma_unmap_addr(&old_rx_pg, mapping),
489 SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE);
490 /* Add one frag and update the appropriate fields in the skb */
491 if (fp->mode == TPA_MODE_LRO)
492 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
496 for (rem = frag_len; rem > 0; rem -= gro_size) {
497 int len = rem > gro_size ? gro_size : rem;
498 skb_fill_page_desc(skb, frag_id++,
499 old_rx_pg.page, offset, len);
501 get_page(old_rx_pg.page);
506 skb->data_len += frag_len;
507 skb->truesize += SGE_PAGE_SIZE * PAGES_PER_SGE;
508 skb->len += frag_len;
510 frag_size -= frag_len;
516 static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
517 struct bnx2x_agg_info *tpa_info,
519 struct eth_end_agg_rx_cqe *cqe,
522 struct sw_rx_bd *rx_buf = &tpa_info->first_buf;
523 u8 pad = tpa_info->placement_offset;
524 u16 len = tpa_info->len_on_bd;
525 struct sk_buff *skb = NULL;
526 u8 *new_data, *data = rx_buf->data;
527 u8 old_tpa_state = tpa_info->tpa_state;
529 tpa_info->tpa_state = BNX2X_TPA_STOP;
531 /* If we there was an error during the handling of the TPA_START -
532 * drop this aggregation.
534 if (old_tpa_state == BNX2X_TPA_ERROR)
537 /* Try to allocate the new data */
538 new_data = kmalloc(fp->rx_buf_size + NET_SKB_PAD, GFP_ATOMIC);
540 /* Unmap skb in the pool anyway, as we are going to change
541 pool entry status to BNX2X_TPA_STOP even if new skb allocation
543 dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping),
544 fp->rx_buf_size, DMA_FROM_DEVICE);
545 if (likely(new_data))
546 skb = build_skb(data, 0);
549 #ifdef BNX2X_STOP_ON_ERROR
550 if (pad + len > fp->rx_buf_size) {
551 BNX2X_ERR("skb_put is about to fail... pad %d len %d rx_buf_size %d\n",
552 pad, len, fp->rx_buf_size);
558 skb_reserve(skb, pad + NET_SKB_PAD);
560 skb->rxhash = tpa_info->rxhash;
562 skb->protocol = eth_type_trans(skb, bp->dev);
563 skb->ip_summed = CHECKSUM_UNNECESSARY;
565 if (!bnx2x_fill_frag_skb(bp, fp, tpa_info, pages,
566 skb, cqe, cqe_idx)) {
567 if (tpa_info->parsing_flags & PARSING_FLAGS_VLAN)
568 __vlan_hwaccel_put_tag(skb, tpa_info->vlan_tag);
569 napi_gro_receive(&fp->napi, skb);
571 DP(NETIF_MSG_RX_STATUS,
572 "Failed to allocate new pages - dropping packet!\n");
573 dev_kfree_skb_any(skb);
577 /* put new data in bin */
578 rx_buf->data = new_data;
584 /* drop the packet and keep the buffer in the bin */
585 DP(NETIF_MSG_RX_STATUS,
586 "Failed to allocate or map a new skb - dropping packet!\n");
587 fp->eth_q_stats.rx_skb_alloc_failed++;
590 static int bnx2x_alloc_rx_data(struct bnx2x *bp,
591 struct bnx2x_fastpath *fp, u16 index)
594 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
595 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
598 data = kmalloc(fp->rx_buf_size + NET_SKB_PAD, GFP_ATOMIC);
599 if (unlikely(data == NULL))
602 mapping = dma_map_single(&bp->pdev->dev, data + NET_SKB_PAD,
605 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
607 BNX2X_ERR("Can't map rx data\n");
612 dma_unmap_addr_set(rx_buf, mapping, mapping);
614 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
615 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
621 int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
623 struct bnx2x *bp = fp->bp;
624 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
625 u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
628 #ifdef BNX2X_STOP_ON_ERROR
629 if (unlikely(bp->panic))
633 /* CQ "next element" is of the size of the regular element,
634 that's why it's ok here */
635 hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
636 if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
639 bd_cons = fp->rx_bd_cons;
640 bd_prod = fp->rx_bd_prod;
641 bd_prod_fw = bd_prod;
642 sw_comp_cons = fp->rx_comp_cons;
643 sw_comp_prod = fp->rx_comp_prod;
645 /* Memory barrier necessary as speculative reads of the rx
646 * buffer can be ahead of the index in the status block
650 DP(NETIF_MSG_RX_STATUS,
651 "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
652 fp->index, hw_comp_cons, sw_comp_cons);
654 while (sw_comp_cons != hw_comp_cons) {
655 struct sw_rx_bd *rx_buf = NULL;
657 union eth_rx_cqe *cqe;
658 struct eth_fast_path_rx_cqe *cqe_fp;
660 enum eth_rx_cqe_type cqe_fp_type;
664 #ifdef BNX2X_STOP_ON_ERROR
665 if (unlikely(bp->panic))
669 comp_ring_cons = RCQ_BD(sw_comp_cons);
670 bd_prod = RX_BD(bd_prod);
671 bd_cons = RX_BD(bd_cons);
673 cqe = &fp->rx_comp_ring[comp_ring_cons];
674 cqe_fp = &cqe->fast_path_cqe;
675 cqe_fp_flags = cqe_fp->type_error_flags;
676 cqe_fp_type = cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE;
678 DP(NETIF_MSG_RX_STATUS,
679 "CQE type %x err %x status %x queue %x vlan %x len %u\n",
680 CQE_TYPE(cqe_fp_flags),
681 cqe_fp_flags, cqe_fp->status_flags,
682 le32_to_cpu(cqe_fp->rss_hash_result),
683 le16_to_cpu(cqe_fp->vlan_tag),
684 le16_to_cpu(cqe_fp->pkt_len_or_gro_seg_len));
686 /* is this a slowpath msg? */
687 if (unlikely(CQE_TYPE_SLOW(cqe_fp_type))) {
688 bnx2x_sp_event(fp, cqe);
692 rx_buf = &fp->rx_buf_ring[bd_cons];
695 if (!CQE_TYPE_FAST(cqe_fp_type)) {
696 struct bnx2x_agg_info *tpa_info;
697 u16 frag_size, pages;
698 #ifdef BNX2X_STOP_ON_ERROR
700 if (fp->disable_tpa &&
701 (CQE_TYPE_START(cqe_fp_type) ||
702 CQE_TYPE_STOP(cqe_fp_type)))
703 BNX2X_ERR("START/STOP packet while disable_tpa type %x\n",
704 CQE_TYPE(cqe_fp_type));
707 if (CQE_TYPE_START(cqe_fp_type)) {
708 u16 queue = cqe_fp->queue_index;
709 DP(NETIF_MSG_RX_STATUS,
710 "calling tpa_start on queue %d\n",
713 bnx2x_tpa_start(fp, queue,
720 queue = cqe->end_agg_cqe.queue_index;
721 tpa_info = &fp->tpa_info[queue];
722 DP(NETIF_MSG_RX_STATUS,
723 "calling tpa_stop on queue %d\n",
726 frag_size = le16_to_cpu(cqe->end_agg_cqe.pkt_len) -
729 if (fp->mode == TPA_MODE_GRO)
730 pages = (frag_size + tpa_info->full_page - 1) /
733 pages = SGE_PAGE_ALIGN(frag_size) >>
736 bnx2x_tpa_stop(bp, fp, tpa_info, pages,
737 &cqe->end_agg_cqe, comp_ring_cons);
738 #ifdef BNX2X_STOP_ON_ERROR
743 bnx2x_update_sge_prod(fp, pages, &cqe->end_agg_cqe);
747 len = le16_to_cpu(cqe_fp->pkt_len_or_gro_seg_len);
748 pad = cqe_fp->placement_offset;
749 dma_sync_single_for_cpu(&bp->pdev->dev,
750 dma_unmap_addr(rx_buf, mapping),
751 pad + RX_COPY_THRESH,
754 prefetch(data + pad); /* speedup eth_type_trans() */
755 /* is this an error packet? */
756 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
757 DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
758 "ERROR flags %x rx packet %u\n",
759 cqe_fp_flags, sw_comp_cons);
760 fp->eth_q_stats.rx_err_discard_pkt++;
764 /* Since we don't have a jumbo ring
765 * copy small packets if mtu > 1500
767 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
768 (len <= RX_COPY_THRESH)) {
769 skb = netdev_alloc_skb_ip_align(bp->dev, len);
771 DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
772 "ERROR packet dropped because of alloc failure\n");
773 fp->eth_q_stats.rx_skb_alloc_failed++;
776 memcpy(skb->data, data + pad, len);
777 bnx2x_reuse_rx_data(fp, bd_cons, bd_prod);
779 if (likely(bnx2x_alloc_rx_data(bp, fp, bd_prod) == 0)) {
780 dma_unmap_single(&bp->pdev->dev,
781 dma_unmap_addr(rx_buf, mapping),
784 skb = build_skb(data, 0);
785 if (unlikely(!skb)) {
787 fp->eth_q_stats.rx_skb_alloc_failed++;
790 skb_reserve(skb, pad);
792 DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
793 "ERROR packet dropped because of alloc failure\n");
794 fp->eth_q_stats.rx_skb_alloc_failed++;
796 bnx2x_reuse_rx_data(fp, bd_cons, bd_prod);
802 skb->protocol = eth_type_trans(skb, bp->dev);
804 /* Set Toeplitz hash for a none-LRO skb */
805 skb->rxhash = bnx2x_get_rxhash(bp, cqe_fp);
807 skb_checksum_none_assert(skb);
809 if (bp->dev->features & NETIF_F_RXCSUM) {
811 if (likely(BNX2X_RX_CSUM_OK(cqe)))
812 skb->ip_summed = CHECKSUM_UNNECESSARY;
814 fp->eth_q_stats.hw_csum_err++;
817 skb_record_rx_queue(skb, fp->rx_queue);
819 if (le16_to_cpu(cqe_fp->pars_flags.flags) &
821 __vlan_hwaccel_put_tag(skb,
822 le16_to_cpu(cqe_fp->vlan_tag));
823 napi_gro_receive(&fp->napi, skb);
829 bd_cons = NEXT_RX_IDX(bd_cons);
830 bd_prod = NEXT_RX_IDX(bd_prod);
831 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
834 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
835 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
837 if (rx_pkt == budget)
841 fp->rx_bd_cons = bd_cons;
842 fp->rx_bd_prod = bd_prod_fw;
843 fp->rx_comp_cons = sw_comp_cons;
844 fp->rx_comp_prod = sw_comp_prod;
846 /* Update producers */
847 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
850 fp->rx_pkt += rx_pkt;
856 static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
858 struct bnx2x_fastpath *fp = fp_cookie;
859 struct bnx2x *bp = fp->bp;
863 "got an MSI-X interrupt on IDX:SB [fp %d fw_sd %d igusb %d]\n",
864 fp->index, fp->fw_sb_id, fp->igu_sb_id);
865 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
867 #ifdef BNX2X_STOP_ON_ERROR
868 if (unlikely(bp->panic))
872 /* Handle Rx and Tx according to MSI-X vector */
873 prefetch(fp->rx_cons_sb);
875 for_each_cos_in_tx_queue(fp, cos)
876 prefetch(fp->txdata[cos].tx_cons_sb);
878 prefetch(&fp->sb_running_index[SM_RX_ID]);
879 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
884 /* HW Lock for shared dual port PHYs */
885 void bnx2x_acquire_phy_lock(struct bnx2x *bp)
887 mutex_lock(&bp->port.phy_mutex);
889 if (bp->port.need_hw_lock)
890 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
893 void bnx2x_release_phy_lock(struct bnx2x *bp)
895 if (bp->port.need_hw_lock)
896 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
898 mutex_unlock(&bp->port.phy_mutex);
901 /* calculates MF speed according to current linespeed and MF configuration */
902 u16 bnx2x_get_mf_speed(struct bnx2x *bp)
904 u16 line_speed = bp->link_vars.line_speed;
906 u16 maxCfg = bnx2x_extract_max_cfg(bp,
907 bp->mf_config[BP_VN(bp)]);
909 /* Calculate the current MAX line speed limit for the MF
913 line_speed = (line_speed * maxCfg) / 100;
915 u16 vn_max_rate = maxCfg * 100;
917 if (vn_max_rate < line_speed)
918 line_speed = vn_max_rate;
926 * bnx2x_fill_report_data - fill link report data to report
929 * @data: link state to update
931 * It uses a none-atomic bit operations because is called under the mutex.
933 static void bnx2x_fill_report_data(struct bnx2x *bp,
934 struct bnx2x_link_report_data *data)
936 u16 line_speed = bnx2x_get_mf_speed(bp);
938 memset(data, 0, sizeof(*data));
940 /* Fill the report data: efective line speed */
941 data->line_speed = line_speed;
944 if (!bp->link_vars.link_up || (bp->flags & MF_FUNC_DIS))
945 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
946 &data->link_report_flags);
949 if (bp->link_vars.duplex == DUPLEX_FULL)
950 __set_bit(BNX2X_LINK_REPORT_FD, &data->link_report_flags);
952 /* Rx Flow Control is ON */
953 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX)
954 __set_bit(BNX2X_LINK_REPORT_RX_FC_ON, &data->link_report_flags);
956 /* Tx Flow Control is ON */
957 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
958 __set_bit(BNX2X_LINK_REPORT_TX_FC_ON, &data->link_report_flags);
962 * bnx2x_link_report - report link status to OS.
966 * Calls the __bnx2x_link_report() under the same locking scheme
967 * as a link/PHY state managing code to ensure a consistent link
971 void bnx2x_link_report(struct bnx2x *bp)
973 bnx2x_acquire_phy_lock(bp);
974 __bnx2x_link_report(bp);
975 bnx2x_release_phy_lock(bp);
979 * __bnx2x_link_report - report link status to OS.
983 * None atomic inmlementation.
984 * Should be called under the phy_lock.
986 void __bnx2x_link_report(struct bnx2x *bp)
988 struct bnx2x_link_report_data cur_data;
992 bnx2x_read_mf_cfg(bp);
994 /* Read the current link report info */
995 bnx2x_fill_report_data(bp, &cur_data);
997 /* Don't report link down or exactly the same link status twice */
998 if (!memcmp(&cur_data, &bp->last_reported_link, sizeof(cur_data)) ||
999 (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1000 &bp->last_reported_link.link_report_flags) &&
1001 test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1002 &cur_data.link_report_flags)))
1007 /* We are going to report a new link parameters now -
1008 * remember the current data for the next time.
1010 memcpy(&bp->last_reported_link, &cur_data, sizeof(cur_data));
1012 if (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1013 &cur_data.link_report_flags)) {
1014 netif_carrier_off(bp->dev);
1015 netdev_err(bp->dev, "NIC Link is Down\n");
1021 netif_carrier_on(bp->dev);
1023 if (test_and_clear_bit(BNX2X_LINK_REPORT_FD,
1024 &cur_data.link_report_flags))
1029 /* Handle the FC at the end so that only these flags would be
1030 * possibly set. This way we may easily check if there is no FC
1033 if (cur_data.link_report_flags) {
1034 if (test_bit(BNX2X_LINK_REPORT_RX_FC_ON,
1035 &cur_data.link_report_flags)) {
1036 if (test_bit(BNX2X_LINK_REPORT_TX_FC_ON,
1037 &cur_data.link_report_flags))
1038 flow = "ON - receive & transmit";
1040 flow = "ON - receive";
1042 flow = "ON - transmit";
1047 netdev_info(bp->dev, "NIC Link is Up, %d Mbps %s duplex, Flow control: %s\n",
1048 cur_data.line_speed, duplex, flow);
1052 static void bnx2x_set_next_page_sgl(struct bnx2x_fastpath *fp)
1056 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1057 struct eth_rx_sge *sge;
1059 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
1061 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
1062 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
1065 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
1066 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
1070 static void bnx2x_free_tpa_pool(struct bnx2x *bp,
1071 struct bnx2x_fastpath *fp, int last)
1075 for (i = 0; i < last; i++) {
1076 struct bnx2x_agg_info *tpa_info = &fp->tpa_info[i];
1077 struct sw_rx_bd *first_buf = &tpa_info->first_buf;
1078 u8 *data = first_buf->data;
1081 DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
1084 if (tpa_info->tpa_state == BNX2X_TPA_START)
1085 dma_unmap_single(&bp->pdev->dev,
1086 dma_unmap_addr(first_buf, mapping),
1087 fp->rx_buf_size, DMA_FROM_DEVICE);
1089 first_buf->data = NULL;
1093 void bnx2x_init_rx_rings(struct bnx2x *bp)
1095 int func = BP_FUNC(bp);
1099 /* Allocate TPA resources */
1100 for_each_rx_queue(bp, j) {
1101 struct bnx2x_fastpath *fp = &bp->fp[j];
1104 "mtu %d rx_buf_size %d\n", bp->dev->mtu, fp->rx_buf_size);
1106 if (!fp->disable_tpa) {
1107 /* Fill the per-aggregtion pool */
1108 for (i = 0; i < MAX_AGG_QS(bp); i++) {
1109 struct bnx2x_agg_info *tpa_info =
1111 struct sw_rx_bd *first_buf =
1112 &tpa_info->first_buf;
1114 first_buf->data = kmalloc(fp->rx_buf_size + NET_SKB_PAD,
1116 if (!first_buf->data) {
1117 BNX2X_ERR("Failed to allocate TPA skb pool for queue[%d] - disabling TPA on this queue!\n",
1119 bnx2x_free_tpa_pool(bp, fp, i);
1120 fp->disable_tpa = 1;
1123 dma_unmap_addr_set(first_buf, mapping, 0);
1124 tpa_info->tpa_state = BNX2X_TPA_STOP;
1127 /* "next page" elements initialization */
1128 bnx2x_set_next_page_sgl(fp);
1130 /* set SGEs bit mask */
1131 bnx2x_init_sge_ring_bit_mask(fp);
1133 /* Allocate SGEs and initialize the ring elements */
1134 for (i = 0, ring_prod = 0;
1135 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
1137 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
1138 BNX2X_ERR("was only able to allocate %d rx sges\n",
1140 BNX2X_ERR("disabling TPA for queue[%d]\n",
1142 /* Cleanup already allocated elements */
1143 bnx2x_free_rx_sge_range(bp, fp,
1145 bnx2x_free_tpa_pool(bp, fp,
1147 fp->disable_tpa = 1;
1151 ring_prod = NEXT_SGE_IDX(ring_prod);
1154 fp->rx_sge_prod = ring_prod;
1158 for_each_rx_queue(bp, j) {
1159 struct bnx2x_fastpath *fp = &bp->fp[j];
1163 /* Activate BD ring */
1165 * this will generate an interrupt (to the TSTORM)
1166 * must only be done after chip is initialized
1168 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
1174 if (CHIP_IS_E1(bp)) {
1175 REG_WR(bp, BAR_USTRORM_INTMEM +
1176 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
1177 U64_LO(fp->rx_comp_mapping));
1178 REG_WR(bp, BAR_USTRORM_INTMEM +
1179 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
1180 U64_HI(fp->rx_comp_mapping));
1185 static void bnx2x_free_tx_skbs(struct bnx2x *bp)
1190 for_each_tx_queue(bp, i) {
1191 struct bnx2x_fastpath *fp = &bp->fp[i];
1192 for_each_cos_in_tx_queue(fp, cos) {
1193 struct bnx2x_fp_txdata *txdata = &fp->txdata[cos];
1194 unsigned pkts_compl = 0, bytes_compl = 0;
1196 u16 sw_prod = txdata->tx_pkt_prod;
1197 u16 sw_cons = txdata->tx_pkt_cons;
1199 while (sw_cons != sw_prod) {
1200 bnx2x_free_tx_pkt(bp, txdata, TX_BD(sw_cons),
1201 &pkts_compl, &bytes_compl);
1204 netdev_tx_reset_queue(
1205 netdev_get_tx_queue(bp->dev, txdata->txq_index));
1210 static void bnx2x_free_rx_bds(struct bnx2x_fastpath *fp)
1212 struct bnx2x *bp = fp->bp;
1215 /* ring wasn't allocated */
1216 if (fp->rx_buf_ring == NULL)
1219 for (i = 0; i < NUM_RX_BD; i++) {
1220 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
1221 u8 *data = rx_buf->data;
1225 dma_unmap_single(&bp->pdev->dev,
1226 dma_unmap_addr(rx_buf, mapping),
1227 fp->rx_buf_size, DMA_FROM_DEVICE);
1229 rx_buf->data = NULL;
1234 static void bnx2x_free_rx_skbs(struct bnx2x *bp)
1238 for_each_rx_queue(bp, j) {
1239 struct bnx2x_fastpath *fp = &bp->fp[j];
1241 bnx2x_free_rx_bds(fp);
1243 if (!fp->disable_tpa)
1244 bnx2x_free_tpa_pool(bp, fp, MAX_AGG_QS(bp));
1248 void bnx2x_free_skbs(struct bnx2x *bp)
1250 bnx2x_free_tx_skbs(bp);
1251 bnx2x_free_rx_skbs(bp);
1254 void bnx2x_update_max_mf_config(struct bnx2x *bp, u32 value)
1256 /* load old values */
1257 u32 mf_cfg = bp->mf_config[BP_VN(bp)];
1259 if (value != bnx2x_extract_max_cfg(bp, mf_cfg)) {
1260 /* leave all but MAX value */
1261 mf_cfg &= ~FUNC_MF_CFG_MAX_BW_MASK;
1263 /* set new MAX value */
1264 mf_cfg |= (value << FUNC_MF_CFG_MAX_BW_SHIFT)
1265 & FUNC_MF_CFG_MAX_BW_MASK;
1267 bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW, mf_cfg);
1272 * bnx2x_free_msix_irqs - free previously requested MSI-X IRQ vectors
1274 * @bp: driver handle
1275 * @nvecs: number of vectors to be released
1277 static void bnx2x_free_msix_irqs(struct bnx2x *bp, int nvecs)
1281 if (nvecs == offset)
1283 free_irq(bp->msix_table[offset].vector, bp->dev);
1284 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
1285 bp->msix_table[offset].vector);
1288 if (nvecs == offset)
1293 for_each_eth_queue(bp, i) {
1294 if (nvecs == offset)
1296 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq\n",
1297 i, bp->msix_table[offset].vector);
1299 free_irq(bp->msix_table[offset++].vector, &bp->fp[i]);
1303 void bnx2x_free_irq(struct bnx2x *bp)
1305 if (bp->flags & USING_MSIX_FLAG &&
1306 !(bp->flags & USING_SINGLE_MSIX_FLAG))
1307 bnx2x_free_msix_irqs(bp, BNX2X_NUM_ETH_QUEUES(bp) +
1310 free_irq(bp->dev->irq, bp->dev);
1313 int __devinit bnx2x_enable_msix(struct bnx2x *bp)
1315 int msix_vec = 0, i, rc, req_cnt;
1317 bp->msix_table[msix_vec].entry = msix_vec;
1318 BNX2X_DEV_INFO("msix_table[0].entry = %d (slowpath)\n",
1319 bp->msix_table[0].entry);
1323 bp->msix_table[msix_vec].entry = msix_vec;
1324 BNX2X_DEV_INFO("msix_table[%d].entry = %d (CNIC)\n",
1325 bp->msix_table[msix_vec].entry, bp->msix_table[msix_vec].entry);
1328 /* We need separate vectors for ETH queues only (not FCoE) */
1329 for_each_eth_queue(bp, i) {
1330 bp->msix_table[msix_vec].entry = msix_vec;
1331 BNX2X_DEV_INFO("msix_table[%d].entry = %d (fastpath #%u)\n",
1332 msix_vec, msix_vec, i);
1336 req_cnt = BNX2X_NUM_ETH_QUEUES(bp) + CNIC_PRESENT + 1;
1338 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], req_cnt);
1341 * reconfigure number of tx/rx queues according to available
1344 if (rc >= BNX2X_MIN_MSIX_VEC_CNT) {
1345 /* how less vectors we will have? */
1346 int diff = req_cnt - rc;
1348 BNX2X_DEV_INFO("Trying to use less MSI-X vectors: %d\n", rc);
1350 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], rc);
1353 BNX2X_DEV_INFO("MSI-X is not attainable rc %d\n", rc);
1357 * decrease number of queues by number of unallocated entries
1359 bp->num_queues -= diff;
1361 BNX2X_DEV_INFO("New queue configuration set: %d\n",
1363 } else if (rc > 0) {
1364 /* Get by with single vector */
1365 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], 1);
1367 BNX2X_DEV_INFO("Single MSI-X is not attainable rc %d\n",
1372 BNX2X_DEV_INFO("Using single MSI-X vector\n");
1373 bp->flags |= USING_SINGLE_MSIX_FLAG;
1375 } else if (rc < 0) {
1376 BNX2X_DEV_INFO("MSI-X is not attainable rc %d\n", rc);
1380 bp->flags |= USING_MSIX_FLAG;
1385 /* fall to INTx if not enough memory */
1387 bp->flags |= DISABLE_MSI_FLAG;
1392 static int bnx2x_req_msix_irqs(struct bnx2x *bp)
1394 int i, rc, offset = 0;
1396 rc = request_irq(bp->msix_table[offset++].vector,
1397 bnx2x_msix_sp_int, 0,
1398 bp->dev->name, bp->dev);
1400 BNX2X_ERR("request sp irq failed\n");
1407 for_each_eth_queue(bp, i) {
1408 struct bnx2x_fastpath *fp = &bp->fp[i];
1409 snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
1412 rc = request_irq(bp->msix_table[offset].vector,
1413 bnx2x_msix_fp_int, 0, fp->name, fp);
1415 BNX2X_ERR("request fp #%d irq (%d) failed rc %d\n", i,
1416 bp->msix_table[offset].vector, rc);
1417 bnx2x_free_msix_irqs(bp, offset);
1424 i = BNX2X_NUM_ETH_QUEUES(bp);
1425 offset = 1 + CNIC_PRESENT;
1426 netdev_info(bp->dev, "using MSI-X IRQs: sp %d fp[%d] %d ... fp[%d] %d\n",
1427 bp->msix_table[0].vector,
1428 0, bp->msix_table[offset].vector,
1429 i - 1, bp->msix_table[offset + i - 1].vector);
1434 int bnx2x_enable_msi(struct bnx2x *bp)
1438 rc = pci_enable_msi(bp->pdev);
1440 BNX2X_DEV_INFO("MSI is not attainable\n");
1443 bp->flags |= USING_MSI_FLAG;
1448 static int bnx2x_req_irq(struct bnx2x *bp)
1450 unsigned long flags;
1453 if (bp->flags & (USING_MSI_FLAG | USING_MSIX_FLAG))
1456 flags = IRQF_SHARED;
1458 if (bp->flags & USING_MSIX_FLAG)
1459 irq = bp->msix_table[0].vector;
1461 irq = bp->pdev->irq;
1463 return request_irq(irq, bnx2x_interrupt, flags, bp->dev->name, bp->dev);
1466 static int bnx2x_setup_irqs(struct bnx2x *bp)
1469 if (bp->flags & USING_MSIX_FLAG &&
1470 !(bp->flags & USING_SINGLE_MSIX_FLAG)) {
1471 rc = bnx2x_req_msix_irqs(bp);
1476 rc = bnx2x_req_irq(bp);
1478 BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc);
1481 if (bp->flags & USING_MSI_FLAG) {
1482 bp->dev->irq = bp->pdev->irq;
1483 netdev_info(bp->dev, "using MSI IRQ %d\n",
1486 if (bp->flags & USING_MSIX_FLAG) {
1487 bp->dev->irq = bp->msix_table[0].vector;
1488 netdev_info(bp->dev, "using MSIX IRQ %d\n",
1496 static void bnx2x_napi_enable(struct bnx2x *bp)
1500 for_each_rx_queue(bp, i)
1501 napi_enable(&bnx2x_fp(bp, i, napi));
1504 static void bnx2x_napi_disable(struct bnx2x *bp)
1508 for_each_rx_queue(bp, i)
1509 napi_disable(&bnx2x_fp(bp, i, napi));
1512 void bnx2x_netif_start(struct bnx2x *bp)
1514 if (netif_running(bp->dev)) {
1515 bnx2x_napi_enable(bp);
1516 bnx2x_int_enable(bp);
1517 if (bp->state == BNX2X_STATE_OPEN)
1518 netif_tx_wake_all_queues(bp->dev);
1522 void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
1524 bnx2x_int_disable_sync(bp, disable_hw);
1525 bnx2x_napi_disable(bp);
1528 u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb)
1530 struct bnx2x *bp = netdev_priv(dev);
1534 struct ethhdr *hdr = (struct ethhdr *)skb->data;
1535 u16 ether_type = ntohs(hdr->h_proto);
1537 /* Skip VLAN tag if present */
1538 if (ether_type == ETH_P_8021Q) {
1539 struct vlan_ethhdr *vhdr =
1540 (struct vlan_ethhdr *)skb->data;
1542 ether_type = ntohs(vhdr->h_vlan_encapsulated_proto);
1545 /* If ethertype is FCoE or FIP - use FCoE ring */
1546 if ((ether_type == ETH_P_FCOE) || (ether_type == ETH_P_FIP))
1547 return bnx2x_fcoe_tx(bp, txq_index);
1550 /* select a non-FCoE queue */
1551 return __skb_tx_hash(dev, skb, BNX2X_NUM_ETH_QUEUES(bp));
1555 void bnx2x_set_num_queues(struct bnx2x *bp)
1558 bp->num_queues = bnx2x_calc_num_queues(bp);
1561 /* override in STORAGE SD modes */
1562 if (IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp))
1565 /* Add special queues */
1566 bp->num_queues += NON_ETH_CONTEXT_USE;
1570 * bnx2x_set_real_num_queues - configure netdev->real_num_[tx,rx]_queues
1572 * @bp: Driver handle
1574 * We currently support for at most 16 Tx queues for each CoS thus we will
1575 * allocate a multiple of 16 for ETH L2 rings according to the value of the
1578 * If there is an FCoE L2 queue the appropriate Tx queue will have the next
1579 * index after all ETH L2 indices.
1581 * If the actual number of Tx queues (for each CoS) is less than 16 then there
1582 * will be the holes at the end of each group of 16 ETh L2 indices (0..15,
1583 * 16..31,...) with indicies that are not coupled with any real Tx queue.
1585 * The proper configuration of skb->queue_mapping is handled by
1586 * bnx2x_select_queue() and __skb_tx_hash().
1588 * bnx2x_setup_tc() takes care of the proper TC mappings so that __skb_tx_hash()
1589 * will return a proper Tx index if TC is enabled (netdev->num_tc > 0).
1591 static int bnx2x_set_real_num_queues(struct bnx2x *bp)
1595 tx = MAX_TXQS_PER_COS * bp->max_cos;
1596 rx = BNX2X_NUM_ETH_QUEUES(bp);
1598 /* account for fcoe queue */
1606 rc = netif_set_real_num_tx_queues(bp->dev, tx);
1608 BNX2X_ERR("Failed to set real number of Tx queues: %d\n", rc);
1611 rc = netif_set_real_num_rx_queues(bp->dev, rx);
1613 BNX2X_ERR("Failed to set real number of Rx queues: %d\n", rc);
1617 DP(NETIF_MSG_IFUP, "Setting real num queues to (tx, rx) (%d, %d)\n",
1623 static void bnx2x_set_rx_buf_size(struct bnx2x *bp)
1627 for_each_queue(bp, i) {
1628 struct bnx2x_fastpath *fp = &bp->fp[i];
1631 /* Always use a mini-jumbo MTU for the FCoE L2 ring */
1634 * Although there are no IP frames expected to arrive to
1635 * this ring we still want to add an
1636 * IP_HEADER_ALIGNMENT_PADDING to prevent a buffer
1639 mtu = BNX2X_FCOE_MINI_JUMBO_MTU;
1642 fp->rx_buf_size = BNX2X_FW_RX_ALIGN_START +
1643 IP_HEADER_ALIGNMENT_PADDING +
1646 BNX2X_FW_RX_ALIGN_END;
1647 /* Note : rx_buf_size doesnt take into account NET_SKB_PAD */
1651 static int bnx2x_init_rss_pf(struct bnx2x *bp)
1654 u8 ind_table[T_ETH_INDIRECTION_TABLE_SIZE] = {0};
1655 u8 num_eth_queues = BNX2X_NUM_ETH_QUEUES(bp);
1657 /* Prepare the initial contents fo the indirection table if RSS is
1660 for (i = 0; i < sizeof(ind_table); i++)
1663 ethtool_rxfh_indir_default(i, num_eth_queues);
1666 * For 57710 and 57711 SEARCHER configuration (rss_keys) is
1667 * per-port, so if explicit configuration is needed , do it only
1670 * For 57712 and newer on the other hand it's a per-function
1673 return bnx2x_config_rss_eth(bp, ind_table,
1674 bp->port.pmf || !CHIP_IS_E1x(bp));
1677 int bnx2x_config_rss_pf(struct bnx2x *bp, struct bnx2x_rss_config_obj *rss_obj,
1678 u8 *ind_table, bool config_hash)
1680 struct bnx2x_config_rss_params params = {NULL};
1683 /* Although RSS is meaningless when there is a single HW queue we
1684 * still need it enabled in order to have HW Rx hash generated.
1686 * if (!is_eth_multi(bp))
1687 * bp->multi_mode = ETH_RSS_MODE_DISABLED;
1690 params.rss_obj = rss_obj;
1692 __set_bit(RAMROD_COMP_WAIT, ¶ms.ramrod_flags);
1694 __set_bit(BNX2X_RSS_MODE_REGULAR, ¶ms.rss_flags);
1696 /* RSS configuration */
1697 __set_bit(BNX2X_RSS_IPV4, ¶ms.rss_flags);
1698 __set_bit(BNX2X_RSS_IPV4_TCP, ¶ms.rss_flags);
1699 __set_bit(BNX2X_RSS_IPV6, ¶ms.rss_flags);
1700 __set_bit(BNX2X_RSS_IPV6_TCP, ¶ms.rss_flags);
1703 params.rss_result_mask = MULTI_MASK;
1705 memcpy(params.ind_table, ind_table, sizeof(params.ind_table));
1709 for (i = 0; i < sizeof(params.rss_key) / 4; i++)
1710 params.rss_key[i] = random32();
1712 __set_bit(BNX2X_RSS_SET_SRCH, ¶ms.rss_flags);
1715 return bnx2x_config_rss(bp, ¶ms);
1718 static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
1720 struct bnx2x_func_state_params func_params = {NULL};
1722 /* Prepare parameters for function state transitions */
1723 __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
1725 func_params.f_obj = &bp->func_obj;
1726 func_params.cmd = BNX2X_F_CMD_HW_INIT;
1728 func_params.params.hw_init.load_phase = load_code;
1730 return bnx2x_func_state_change(bp, &func_params);
1734 * Cleans the object that have internal lists without sending
1735 * ramrods. Should be run when interrutps are disabled.
1737 static void bnx2x_squeeze_objects(struct bnx2x *bp)
1740 unsigned long ramrod_flags = 0, vlan_mac_flags = 0;
1741 struct bnx2x_mcast_ramrod_params rparam = {NULL};
1742 struct bnx2x_vlan_mac_obj *mac_obj = &bp->fp->mac_obj;
1744 /***************** Cleanup MACs' object first *************************/
1746 /* Wait for completion of requested */
1747 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
1748 /* Perform a dry cleanup */
1749 __set_bit(RAMROD_DRV_CLR_ONLY, &ramrod_flags);
1751 /* Clean ETH primary MAC */
1752 __set_bit(BNX2X_ETH_MAC, &vlan_mac_flags);
1753 rc = mac_obj->delete_all(bp, &bp->fp->mac_obj, &vlan_mac_flags,
1756 BNX2X_ERR("Failed to clean ETH MACs: %d\n", rc);
1758 /* Cleanup UC list */
1760 __set_bit(BNX2X_UC_LIST_MAC, &vlan_mac_flags);
1761 rc = mac_obj->delete_all(bp, mac_obj, &vlan_mac_flags,
1764 BNX2X_ERR("Failed to clean UC list MACs: %d\n", rc);
1766 /***************** Now clean mcast object *****************************/
1767 rparam.mcast_obj = &bp->mcast_obj;
1768 __set_bit(RAMROD_DRV_CLR_ONLY, &rparam.ramrod_flags);
1770 /* Add a DEL command... */
1771 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL);
1773 BNX2X_ERR("Failed to add a new DEL command to a multi-cast object: %d\n",
1776 /* ...and wait until all pending commands are cleared */
1777 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
1780 BNX2X_ERR("Failed to clean multi-cast object: %d\n",
1785 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
1789 #ifndef BNX2X_STOP_ON_ERROR
1790 #define LOAD_ERROR_EXIT(bp, label) \
1792 (bp)->state = BNX2X_STATE_ERROR; \
1796 #define LOAD_ERROR_EXIT(bp, label) \
1798 (bp)->state = BNX2X_STATE_ERROR; \
1804 bool bnx2x_test_firmware_version(struct bnx2x *bp, bool is_err)
1806 /* build FW version dword */
1807 u32 my_fw = (BCM_5710_FW_MAJOR_VERSION) +
1808 (BCM_5710_FW_MINOR_VERSION << 8) +
1809 (BCM_5710_FW_REVISION_VERSION << 16) +
1810 (BCM_5710_FW_ENGINEERING_VERSION << 24);
1812 /* read loaded FW from chip */
1813 u32 loaded_fw = REG_RD(bp, XSEM_REG_PRAM);
1815 DP(NETIF_MSG_IFUP, "loaded fw %x, my fw %x\n", loaded_fw, my_fw);
1817 if (loaded_fw != my_fw) {
1819 BNX2X_ERR("bnx2x with FW %x was already loaded, which mismatches my %x FW. aborting\n",
1828 * bnx2x_bz_fp - zero content of the fastpath structure.
1830 * @bp: driver handle
1831 * @index: fastpath index to be zeroed
1833 * Makes sure the contents of the bp->fp[index].napi is kept
1836 static void bnx2x_bz_fp(struct bnx2x *bp, int index)
1838 struct bnx2x_fastpath *fp = &bp->fp[index];
1839 struct napi_struct orig_napi = fp->napi;
1840 /* bzero bnx2x_fastpath contents */
1842 memset(fp, 0, sizeof(*fp));
1844 /* Keep Queue statistics */
1845 struct bnx2x_eth_q_stats *tmp_eth_q_stats;
1846 struct bnx2x_eth_q_stats_old *tmp_eth_q_stats_old;
1848 tmp_eth_q_stats = kzalloc(sizeof(struct bnx2x_eth_q_stats),
1850 if (tmp_eth_q_stats)
1851 memcpy(tmp_eth_q_stats, &fp->eth_q_stats,
1852 sizeof(struct bnx2x_eth_q_stats));
1854 tmp_eth_q_stats_old =
1855 kzalloc(sizeof(struct bnx2x_eth_q_stats_old),
1857 if (tmp_eth_q_stats_old)
1858 memcpy(tmp_eth_q_stats_old, &fp->eth_q_stats_old,
1859 sizeof(struct bnx2x_eth_q_stats_old));
1861 memset(fp, 0, sizeof(*fp));
1863 if (tmp_eth_q_stats) {
1864 memcpy(&fp->eth_q_stats, tmp_eth_q_stats,
1865 sizeof(struct bnx2x_eth_q_stats));
1866 kfree(tmp_eth_q_stats);
1869 if (tmp_eth_q_stats_old) {
1870 memcpy(&fp->eth_q_stats_old, tmp_eth_q_stats_old,
1871 sizeof(struct bnx2x_eth_q_stats_old));
1872 kfree(tmp_eth_q_stats_old);
1877 /* Restore the NAPI object as it has been already initialized */
1878 fp->napi = orig_napi;
1883 fp->max_cos = bp->max_cos;
1885 /* Special queues support only one CoS */
1889 * set the tpa flag for each queue. The tpa flag determines the queue
1890 * minimal size so it must be set prior to queue memory allocation
1892 fp->disable_tpa = !(bp->flags & TPA_ENABLE_FLAG ||
1893 (bp->flags & GRO_ENABLE_FLAG &&
1894 bnx2x_mtu_allows_gro(bp->dev->mtu)));
1895 if (bp->flags & TPA_ENABLE_FLAG)
1896 fp->mode = TPA_MODE_LRO;
1897 else if (bp->flags & GRO_ENABLE_FLAG)
1898 fp->mode = TPA_MODE_GRO;
1901 /* We don't want TPA on an FCoE L2 ring */
1903 fp->disable_tpa = 1;
1908 /* must be called with rtnl_lock */
1909 int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
1911 int port = BP_PORT(bp);
1915 #ifdef BNX2X_STOP_ON_ERROR
1916 if (unlikely(bp->panic)) {
1917 BNX2X_ERR("Can't load NIC when there is panic\n");
1922 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
1924 /* Set the initial link reported state to link down */
1925 bnx2x_acquire_phy_lock(bp);
1926 memset(&bp->last_reported_link, 0, sizeof(bp->last_reported_link));
1927 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1928 &bp->last_reported_link.link_report_flags);
1929 bnx2x_release_phy_lock(bp);
1931 /* must be called before memory allocation and HW init */
1932 bnx2x_ilt_set_info(bp);
1935 * Zero fastpath structures preserving invariants like napi, which are
1936 * allocated only once, fp index, max_cos, bp pointer.
1937 * Also set fp->disable_tpa.
1939 DP(NETIF_MSG_IFUP, "num queues: %d", bp->num_queues);
1940 for_each_queue(bp, i)
1944 /* Set the receive queues buffer size */
1945 bnx2x_set_rx_buf_size(bp);
1947 if (bnx2x_alloc_mem(bp))
1950 /* As long as bnx2x_alloc_mem() may possibly update
1951 * bp->num_queues, bnx2x_set_real_num_queues() should always
1954 rc = bnx2x_set_real_num_queues(bp);
1956 BNX2X_ERR("Unable to set real_num_queues\n");
1957 LOAD_ERROR_EXIT(bp, load_error0);
1960 /* configure multi cos mappings in kernel.
1961 * this configuration may be overriden by a multi class queue discipline
1962 * or by a dcbx negotiation result.
1964 bnx2x_setup_tc(bp->dev, bp->max_cos);
1966 bnx2x_napi_enable(bp);
1968 /* set pf load just before approaching the MCP */
1969 bnx2x_set_pf_load(bp);
1971 /* Send LOAD_REQUEST command to MCP
1972 * Returns the type of LOAD command:
1973 * if it is the first port to be initialized
1974 * common blocks should be initialized, otherwise - not
1976 if (!BP_NOMCP(bp)) {
1979 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
1980 DRV_MSG_SEQ_NUMBER_MASK);
1981 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
1983 /* Get current FW pulse sequence */
1984 bp->fw_drv_pulse_wr_seq =
1985 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_pulse_mb) &
1986 DRV_PULSE_SEQ_MASK);
1987 BNX2X_DEV_INFO("drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
1989 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ, 0);
1991 BNX2X_ERR("MCP response failure, aborting\n");
1993 LOAD_ERROR_EXIT(bp, load_error1);
1995 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) {
1996 BNX2X_ERR("Driver load refused\n");
1997 rc = -EBUSY; /* other port in diagnostic mode */
1998 LOAD_ERROR_EXIT(bp, load_error1);
2000 if (load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP &&
2001 load_code != FW_MSG_CODE_DRV_LOAD_COMMON) {
2002 /* abort nic load if version mismatch */
2003 if (!bnx2x_test_firmware_version(bp, true)) {
2005 LOAD_ERROR_EXIT(bp, load_error2);
2010 int path = BP_PATH(bp);
2012 DP(NETIF_MSG_IFUP, "NO MCP - load counts[%d] %d, %d, %d\n",
2013 path, load_count[path][0], load_count[path][1],
2014 load_count[path][2]);
2015 load_count[path][0]++;
2016 load_count[path][1 + port]++;
2017 DP(NETIF_MSG_IFUP, "NO MCP - new load counts[%d] %d, %d, %d\n",
2018 path, load_count[path][0], load_count[path][1],
2019 load_count[path][2]);
2020 if (load_count[path][0] == 1)
2021 load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
2022 else if (load_count[path][1 + port] == 1)
2023 load_code = FW_MSG_CODE_DRV_LOAD_PORT;
2025 load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
2028 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
2029 (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) ||
2030 (load_code == FW_MSG_CODE_DRV_LOAD_PORT)) {
2033 * We need the barrier to ensure the ordering between the
2034 * writing to bp->port.pmf here and reading it from the
2035 * bnx2x_periodic_task().
2041 DP(NETIF_MSG_IFUP, "pmf %d\n", bp->port.pmf);
2043 /* Init Function state controlling object */
2044 bnx2x__init_func_obj(bp);
2047 rc = bnx2x_init_hw(bp, load_code);
2049 BNX2X_ERR("HW init failed, aborting\n");
2050 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2051 LOAD_ERROR_EXIT(bp, load_error2);
2054 /* Connect to IRQs */
2055 rc = bnx2x_setup_irqs(bp);
2057 BNX2X_ERR("IRQs setup failed\n");
2058 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2059 LOAD_ERROR_EXIT(bp, load_error2);
2062 /* Setup NIC internals and enable interrupts */
2063 bnx2x_nic_init(bp, load_code);
2065 /* Init per-function objects */
2066 bnx2x_init_bp_objs(bp);
2068 if (((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
2069 (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP)) &&
2070 (bp->common.shmem2_base)) {
2071 if (SHMEM2_HAS(bp, dcc_support))
2072 SHMEM2_WR(bp, dcc_support,
2073 (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
2074 SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
2075 if (SHMEM2_HAS(bp, afex_driver_support))
2076 SHMEM2_WR(bp, afex_driver_support,
2077 SHMEM_AFEX_SUPPORTED_VERSION_ONE);
2080 /* Set AFEX default VLAN tag to an invalid value */
2081 bp->afex_def_vlan_tag = -1;
2083 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
2084 rc = bnx2x_func_start(bp);
2086 BNX2X_ERR("Function start failed!\n");
2087 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2088 LOAD_ERROR_EXIT(bp, load_error3);
2091 /* Send LOAD_DONE command to MCP */
2092 if (!BP_NOMCP(bp)) {
2093 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2095 BNX2X_ERR("MCP response failure, aborting\n");
2097 LOAD_ERROR_EXIT(bp, load_error3);
2101 rc = bnx2x_setup_leading(bp);
2103 BNX2X_ERR("Setup leading failed!\n");
2104 LOAD_ERROR_EXIT(bp, load_error3);
2108 /* Enable Timer scan */
2109 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 1);
2112 for_each_nondefault_queue(bp, i) {
2113 rc = bnx2x_setup_queue(bp, &bp->fp[i], 0);
2115 BNX2X_ERR("Queue setup failed\n");
2116 LOAD_ERROR_EXIT(bp, load_error4);
2120 rc = bnx2x_init_rss_pf(bp);
2122 BNX2X_ERR("PF RSS init failed\n");
2123 LOAD_ERROR_EXIT(bp, load_error4);
2126 /* Now when Clients are configured we are ready to work */
2127 bp->state = BNX2X_STATE_OPEN;
2129 /* Configure a ucast MAC */
2130 rc = bnx2x_set_eth_mac(bp, true);
2132 BNX2X_ERR("Setting Ethernet MAC failed\n");
2133 LOAD_ERROR_EXIT(bp, load_error4);
2136 if (bp->pending_max) {
2137 bnx2x_update_max_mf_config(bp, bp->pending_max);
2138 bp->pending_max = 0;
2142 bnx2x_initial_phy_init(bp, load_mode);
2144 /* Start fast path */
2146 /* Initialize Rx filter. */
2147 netif_addr_lock_bh(bp->dev);
2148 bnx2x_set_rx_mode(bp->dev);
2149 netif_addr_unlock_bh(bp->dev);
2152 switch (load_mode) {
2154 /* Tx queue should be only reenabled */
2155 netif_tx_wake_all_queues(bp->dev);
2159 netif_tx_start_all_queues(bp->dev);
2160 smp_mb__after_clear_bit();
2164 bp->state = BNX2X_STATE_DIAG;
2172 bnx2x_update_drv_flags(bp, 1 << DRV_FLAGS_DCB_CONFIGURED, 0);
2174 bnx2x__link_status_update(bp);
2176 /* start the timer */
2177 mod_timer(&bp->timer, jiffies + bp->current_interval);
2180 /* re-read iscsi info */
2181 bnx2x_get_iscsi_info(bp);
2182 bnx2x_setup_cnic_irq_info(bp);
2183 if (bp->state == BNX2X_STATE_OPEN)
2184 bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
2187 /* mark driver is loaded in shmem2 */
2188 if (SHMEM2_HAS(bp, drv_capabilities_flag)) {
2190 val = SHMEM2_RD(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)]);
2191 SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)],
2192 val | DRV_FLAGS_CAPABILITIES_LOADED_SUPPORTED |
2193 DRV_FLAGS_CAPABILITIES_LOADED_L2);
2196 /* Wait for all pending SP commands to complete */
2197 if (!bnx2x_wait_sp_comp(bp, ~0x0UL)) {
2198 BNX2X_ERR("Timeout waiting for SP elements to complete\n");
2199 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
2203 bnx2x_dcbx_init(bp);
2206 #ifndef BNX2X_STOP_ON_ERROR
2209 /* Disable Timer scan */
2210 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
2213 bnx2x_int_disable_sync(bp, 1);
2215 /* Clean queueable objects */
2216 bnx2x_squeeze_objects(bp);
2218 /* Free SKBs, SGEs, TPA pool and driver internals */
2219 bnx2x_free_skbs(bp);
2220 for_each_rx_queue(bp, i)
2221 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
2226 if (!BP_NOMCP(bp)) {
2227 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0);
2228 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
2233 bnx2x_napi_disable(bp);
2234 /* clear pf_load status, as it was already set */
2235 bnx2x_clear_pf_load(bp);
2240 #endif /* ! BNX2X_STOP_ON_ERROR */
2243 /* must be called with rtnl_lock */
2244 int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
2247 bool global = false;
2249 /* mark driver is unloaded in shmem2 */
2250 if (SHMEM2_HAS(bp, drv_capabilities_flag)) {
2252 val = SHMEM2_RD(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)]);
2253 SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)],
2254 val & ~DRV_FLAGS_CAPABILITIES_LOADED_L2);
2257 if ((bp->state == BNX2X_STATE_CLOSED) ||
2258 (bp->state == BNX2X_STATE_ERROR)) {
2259 /* We can get here if the driver has been unloaded
2260 * during parity error recovery and is either waiting for a
2261 * leader to complete or for other functions to unload and
2262 * then ifdown has been issued. In this case we want to
2263 * unload and let other functions to complete a recovery
2266 bp->recovery_state = BNX2X_RECOVERY_DONE;
2268 bnx2x_release_leader_lock(bp);
2271 DP(NETIF_MSG_IFDOWN, "Releasing a leadership...\n");
2272 BNX2X_ERR("Can't unload in closed or error state\n");
2277 * It's important to set the bp->state to the value different from
2278 * BNX2X_STATE_OPEN and only then stop the Tx. Otherwise bnx2x_tx_int()
2279 * may restart the Tx from the NAPI context (see bnx2x_tx_int()).
2281 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
2285 bnx2x_tx_disable(bp);
2288 bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
2291 bp->rx_mode = BNX2X_RX_MODE_NONE;
2293 del_timer_sync(&bp->timer);
2295 /* Set ALWAYS_ALIVE bit in shmem */
2296 bp->fw_drv_pulse_wr_seq |= DRV_PULSE_ALWAYS_ALIVE;
2298 bnx2x_drv_pulse(bp);
2300 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2301 bnx2x_save_statistics(bp);
2303 /* Cleanup the chip if needed */
2304 if (unload_mode != UNLOAD_RECOVERY)
2305 bnx2x_chip_cleanup(bp, unload_mode);
2307 /* Send the UNLOAD_REQUEST to the MCP */
2308 bnx2x_send_unload_req(bp, unload_mode);
2311 * Prevent transactions to host from the functions on the
2312 * engine that doesn't reset global blocks in case of global
2313 * attention once gloabl blocks are reset and gates are opened
2314 * (the engine which leader will perform the recovery
2317 if (!CHIP_IS_E1x(bp))
2318 bnx2x_pf_disable(bp);
2320 /* Disable HW interrupts, NAPI */
2321 bnx2x_netif_stop(bp, 1);
2326 /* Report UNLOAD_DONE to MCP */
2327 bnx2x_send_unload_done(bp);
2331 * At this stage no more interrupts will arrive so we may safly clean
2332 * the queueable objects here in case they failed to get cleaned so far.
2334 bnx2x_squeeze_objects(bp);
2336 /* There should be no more pending SP commands at this stage */
2341 /* Free SKBs, SGEs, TPA pool and driver internals */
2342 bnx2x_free_skbs(bp);
2343 for_each_rx_queue(bp, i)
2344 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
2348 bp->state = BNX2X_STATE_CLOSED;
2350 /* Check if there are pending parity attentions. If there are - set
2351 * RECOVERY_IN_PROGRESS.
2353 if (bnx2x_chk_parity_attn(bp, &global, false)) {
2354 bnx2x_set_reset_in_progress(bp);
2356 /* Set RESET_IS_GLOBAL if needed */
2358 bnx2x_set_reset_global(bp);
2362 /* The last driver must disable a "close the gate" if there is no
2363 * parity attention or "process kill" pending.
2365 if (!bnx2x_clear_pf_load(bp) && bnx2x_reset_is_done(bp, BP_PATH(bp)))
2366 bnx2x_disable_close_the_gate(bp);
2371 int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
2375 /* If there is no power capability, silently succeed */
2377 BNX2X_DEV_INFO("No power capability. Breaking.\n");
2381 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
2385 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
2386 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
2387 PCI_PM_CTRL_PME_STATUS));
2389 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
2390 /* delay required during transition out of D3hot */
2395 /* If there are other clients above don't
2396 shut down the power */
2397 if (atomic_read(&bp->pdev->enable_cnt) != 1)
2399 /* Don't shut down the power for emulation and FPGA */
2400 if (CHIP_REV_IS_SLOW(bp))
2403 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
2407 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
2409 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
2412 /* No more memory access after this point until
2413 * device is brought back to D0.
2418 dev_err(&bp->pdev->dev, "Can't support state = %d\n", state);
2425 * net_device service functions
2427 int bnx2x_poll(struct napi_struct *napi, int budget)
2431 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
2433 struct bnx2x *bp = fp->bp;
2436 #ifdef BNX2X_STOP_ON_ERROR
2437 if (unlikely(bp->panic)) {
2438 napi_complete(napi);
2443 for_each_cos_in_tx_queue(fp, cos)
2444 if (bnx2x_tx_queue_has_work(&fp->txdata[cos]))
2445 bnx2x_tx_int(bp, &fp->txdata[cos]);
2448 if (bnx2x_has_rx_work(fp)) {
2449 work_done += bnx2x_rx_int(fp, budget - work_done);
2451 /* must not complete if we consumed full budget */
2452 if (work_done >= budget)
2456 /* Fall out from the NAPI loop if needed */
2457 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
2459 /* No need to update SB for FCoE L2 ring as long as
2460 * it's connected to the default SB and the SB
2461 * has been updated when NAPI was scheduled.
2463 if (IS_FCOE_FP(fp)) {
2464 napi_complete(napi);
2469 bnx2x_update_fpsb_idx(fp);
2470 /* bnx2x_has_rx_work() reads the status block,
2471 * thus we need to ensure that status block indices
2472 * have been actually read (bnx2x_update_fpsb_idx)
2473 * prior to this check (bnx2x_has_rx_work) so that
2474 * we won't write the "newer" value of the status block
2475 * to IGU (if there was a DMA right after
2476 * bnx2x_has_rx_work and if there is no rmb, the memory
2477 * reading (bnx2x_update_fpsb_idx) may be postponed
2478 * to right before bnx2x_ack_sb). In this case there
2479 * will never be another interrupt until there is
2480 * another update of the status block, while there
2481 * is still unhandled work.
2485 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
2486 napi_complete(napi);
2487 /* Re-enable interrupts */
2488 DP(NETIF_MSG_RX_STATUS,
2489 "Update index to %d\n", fp->fp_hc_idx);
2490 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID,
2491 le16_to_cpu(fp->fp_hc_idx),
2501 /* we split the first BD into headers and data BDs
2502 * to ease the pain of our fellow microcode engineers
2503 * we use one mapping for both BDs
2504 * So far this has only been observed to happen
2505 * in Other Operating Systems(TM)
2507 static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
2508 struct bnx2x_fp_txdata *txdata,
2509 struct sw_tx_bd *tx_buf,
2510 struct eth_tx_start_bd **tx_bd, u16 hlen,
2511 u16 bd_prod, int nbd)
2513 struct eth_tx_start_bd *h_tx_bd = *tx_bd;
2514 struct eth_tx_bd *d_tx_bd;
2516 int old_len = le16_to_cpu(h_tx_bd->nbytes);
2518 /* first fix first BD */
2519 h_tx_bd->nbd = cpu_to_le16(nbd);
2520 h_tx_bd->nbytes = cpu_to_le16(hlen);
2522 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d (%x:%x) nbd %d\n",
2523 h_tx_bd->nbytes, h_tx_bd->addr_hi, h_tx_bd->addr_lo, h_tx_bd->nbd);
2525 /* now get a new data BD
2526 * (after the pbd) and fill it */
2527 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
2528 d_tx_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
2530 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
2531 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
2533 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
2534 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
2535 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
2537 /* this marks the BD as one that has no individual mapping */
2538 tx_buf->flags |= BNX2X_TSO_SPLIT_BD;
2540 DP(NETIF_MSG_TX_QUEUED,
2541 "TSO split data size is %d (%x:%x)\n",
2542 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
2545 *tx_bd = (struct eth_tx_start_bd *)d_tx_bd;
2550 static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
2553 csum = (u16) ~csum_fold(csum_sub(csum,
2554 csum_partial(t_header - fix, fix, 0)));
2557 csum = (u16) ~csum_fold(csum_add(csum,
2558 csum_partial(t_header, -fix, 0)));
2560 return swab16(csum);
2563 static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
2567 if (skb->ip_summed != CHECKSUM_PARTIAL)
2571 if (vlan_get_protocol(skb) == htons(ETH_P_IPV6)) {
2573 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
2574 rc |= XMIT_CSUM_TCP;
2578 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
2579 rc |= XMIT_CSUM_TCP;
2583 if (skb_is_gso_v6(skb))
2584 rc |= XMIT_GSO_V6 | XMIT_CSUM_TCP | XMIT_CSUM_V6;
2585 else if (skb_is_gso(skb))
2586 rc |= XMIT_GSO_V4 | XMIT_CSUM_V4 | XMIT_CSUM_TCP;
2591 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
2592 /* check if packet requires linearization (packet is too fragmented)
2593 no need to check fragmentation if page size > 8K (there will be no
2594 violation to FW restrictions) */
2595 static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
2600 int first_bd_sz = 0;
2602 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
2603 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
2605 if (xmit_type & XMIT_GSO) {
2606 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
2607 /* Check if LSO packet needs to be copied:
2608 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
2609 int wnd_size = MAX_FETCH_BD - 3;
2610 /* Number of windows to check */
2611 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
2616 /* Headers length */
2617 hlen = (int)(skb_transport_header(skb) - skb->data) +
2620 /* Amount of data (w/o headers) on linear part of SKB*/
2621 first_bd_sz = skb_headlen(skb) - hlen;
2623 wnd_sum = first_bd_sz;
2625 /* Calculate the first sum - it's special */
2626 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
2628 skb_frag_size(&skb_shinfo(skb)->frags[frag_idx]);
2630 /* If there was data on linear skb data - check it */
2631 if (first_bd_sz > 0) {
2632 if (unlikely(wnd_sum < lso_mss)) {
2637 wnd_sum -= first_bd_sz;
2640 /* Others are easier: run through the frag list and
2641 check all windows */
2642 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
2644 skb_frag_size(&skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1]);
2646 if (unlikely(wnd_sum < lso_mss)) {
2651 skb_frag_size(&skb_shinfo(skb)->frags[wnd_idx]);
2654 /* in non-LSO too fragmented packet should always
2661 if (unlikely(to_copy))
2662 DP(NETIF_MSG_TX_QUEUED,
2663 "Linearization IS REQUIRED for %s packet. num_frags %d hlen %d first_bd_sz %d\n",
2664 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
2665 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
2671 static inline void bnx2x_set_pbd_gso_e2(struct sk_buff *skb, u32 *parsing_data,
2674 *parsing_data |= (skb_shinfo(skb)->gso_size <<
2675 ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT) &
2676 ETH_TX_PARSE_BD_E2_LSO_MSS;
2677 if ((xmit_type & XMIT_GSO_V6) &&
2678 (ipv6_hdr(skb)->nexthdr == NEXTHDR_IPV6))
2679 *parsing_data |= ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR;
2683 * bnx2x_set_pbd_gso - update PBD in GSO case.
2687 * @xmit_type: xmit flags
2689 static inline void bnx2x_set_pbd_gso(struct sk_buff *skb,
2690 struct eth_tx_parse_bd_e1x *pbd,
2693 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
2694 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
2695 pbd->tcp_flags = pbd_tcp_flags(skb);
2697 if (xmit_type & XMIT_GSO_V4) {
2698 pbd->ip_id = swab16(ip_hdr(skb)->id);
2699 pbd->tcp_pseudo_csum =
2700 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
2702 0, IPPROTO_TCP, 0));
2705 pbd->tcp_pseudo_csum =
2706 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
2707 &ipv6_hdr(skb)->daddr,
2708 0, IPPROTO_TCP, 0));
2710 pbd->global_data |= ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN;
2714 * bnx2x_set_pbd_csum_e2 - update PBD with checksum and return header length
2716 * @bp: driver handle
2718 * @parsing_data: data to be updated
2719 * @xmit_type: xmit flags
2723 static inline u8 bnx2x_set_pbd_csum_e2(struct bnx2x *bp, struct sk_buff *skb,
2724 u32 *parsing_data, u32 xmit_type)
2727 ((((u8 *)skb_transport_header(skb) - skb->data) >> 1) <<
2728 ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W_SHIFT) &
2729 ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W;
2731 if (xmit_type & XMIT_CSUM_TCP) {
2732 *parsing_data |= ((tcp_hdrlen(skb) / 4) <<
2733 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
2734 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW;
2736 return skb_transport_header(skb) + tcp_hdrlen(skb) - skb->data;
2738 /* We support checksum offload for TCP and UDP only.
2739 * No need to pass the UDP header length - it's a constant.
2741 return skb_transport_header(skb) +
2742 sizeof(struct udphdr) - skb->data;
2745 static inline void bnx2x_set_sbd_csum(struct bnx2x *bp, struct sk_buff *skb,
2746 struct eth_tx_start_bd *tx_start_bd, u32 xmit_type)
2748 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
2750 if (xmit_type & XMIT_CSUM_V4)
2751 tx_start_bd->bd_flags.as_bitfield |=
2752 ETH_TX_BD_FLAGS_IP_CSUM;
2754 tx_start_bd->bd_flags.as_bitfield |=
2755 ETH_TX_BD_FLAGS_IPV6;
2757 if (!(xmit_type & XMIT_CSUM_TCP))
2758 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IS_UDP;
2762 * bnx2x_set_pbd_csum - update PBD with checksum and return header length
2764 * @bp: driver handle
2766 * @pbd: parse BD to be updated
2767 * @xmit_type: xmit flags
2769 static inline u8 bnx2x_set_pbd_csum(struct bnx2x *bp, struct sk_buff *skb,
2770 struct eth_tx_parse_bd_e1x *pbd,
2773 u8 hlen = (skb_network_header(skb) - skb->data) >> 1;
2775 /* for now NS flag is not used in Linux */
2777 (hlen | ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
2778 ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT));
2780 pbd->ip_hlen_w = (skb_transport_header(skb) -
2781 skb_network_header(skb)) >> 1;
2783 hlen += pbd->ip_hlen_w;
2785 /* We support checksum offload for TCP and UDP only */
2786 if (xmit_type & XMIT_CSUM_TCP)
2787 hlen += tcp_hdrlen(skb) / 2;
2789 hlen += sizeof(struct udphdr) / 2;
2791 pbd->total_hlen_w = cpu_to_le16(hlen);
2794 if (xmit_type & XMIT_CSUM_TCP) {
2795 pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
2798 s8 fix = SKB_CS_OFF(skb); /* signed! */
2800 DP(NETIF_MSG_TX_QUEUED,
2801 "hlen %d fix %d csum before fix %x\n",
2802 le16_to_cpu(pbd->total_hlen_w), fix, SKB_CS(skb));
2804 /* HW bug: fixup the CSUM */
2805 pbd->tcp_pseudo_csum =
2806 bnx2x_csum_fix(skb_transport_header(skb),
2809 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
2810 pbd->tcp_pseudo_csum);
2816 /* called with netif_tx_lock
2817 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
2818 * netif_wake_queue()
2820 netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
2822 struct bnx2x *bp = netdev_priv(dev);
2824 struct bnx2x_fastpath *fp;
2825 struct netdev_queue *txq;
2826 struct bnx2x_fp_txdata *txdata;
2827 struct sw_tx_bd *tx_buf;
2828 struct eth_tx_start_bd *tx_start_bd, *first_bd;
2829 struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
2830 struct eth_tx_parse_bd_e1x *pbd_e1x = NULL;
2831 struct eth_tx_parse_bd_e2 *pbd_e2 = NULL;
2832 u32 pbd_e2_parsing_data = 0;
2833 u16 pkt_prod, bd_prod;
2834 int nbd, txq_index, fp_index, txdata_index;
2836 u32 xmit_type = bnx2x_xmit_type(bp, skb);
2839 __le16 pkt_size = 0;
2841 u8 mac_type = UNICAST_ADDRESS;
2843 #ifdef BNX2X_STOP_ON_ERROR
2844 if (unlikely(bp->panic))
2845 return NETDEV_TX_BUSY;
2848 txq_index = skb_get_queue_mapping(skb);
2849 txq = netdev_get_tx_queue(dev, txq_index);
2851 BUG_ON(txq_index >= MAX_ETH_TXQ_IDX(bp) + FCOE_PRESENT);
2853 /* decode the fastpath index and the cos index from the txq */
2854 fp_index = TXQ_TO_FP(txq_index);
2855 txdata_index = TXQ_TO_COS(txq_index);
2859 * Override the above for the FCoE queue:
2860 * - FCoE fp entry is right after the ETH entries.
2861 * - FCoE L2 queue uses bp->txdata[0] only.
2863 if (unlikely(!NO_FCOE(bp) && (txq_index ==
2864 bnx2x_fcoe_tx(bp, txq_index)))) {
2865 fp_index = FCOE_IDX;
2870 /* enable this debug print to view the transmission queue being used
2871 DP(NETIF_MSG_TX_QUEUED, "indices: txq %d, fp %d, txdata %d\n",
2872 txq_index, fp_index, txdata_index); */
2874 /* locate the fastpath and the txdata */
2875 fp = &bp->fp[fp_index];
2876 txdata = &fp->txdata[txdata_index];
2878 /* enable this debug print to view the tranmission details
2879 DP(NETIF_MSG_TX_QUEUED,
2880 "transmitting packet cid %d fp index %d txdata_index %d tx_data ptr %p fp pointer %p\n",
2881 txdata->cid, fp_index, txdata_index, txdata, fp); */
2883 if (unlikely(bnx2x_tx_avail(bp, txdata) <
2884 (skb_shinfo(skb)->nr_frags + 3))) {
2885 fp->eth_q_stats.driver_xoff++;
2886 netif_tx_stop_queue(txq);
2887 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
2888 return NETDEV_TX_BUSY;
2891 DP(NETIF_MSG_TX_QUEUED,
2892 "queue[%d]: SKB: summed %x protocol %x protocol(%x,%x) gso type %x xmit_type %x\n",
2893 txq_index, skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
2894 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
2896 eth = (struct ethhdr *)skb->data;
2898 /* set flag according to packet type (UNICAST_ADDRESS is default)*/
2899 if (unlikely(is_multicast_ether_addr(eth->h_dest))) {
2900 if (is_broadcast_ether_addr(eth->h_dest))
2901 mac_type = BROADCAST_ADDRESS;
2903 mac_type = MULTICAST_ADDRESS;
2906 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
2907 /* First, check if we need to linearize the skb (due to FW
2908 restrictions). No need to check fragmentation if page size > 8K
2909 (there will be no violation to FW restrictions) */
2910 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
2911 /* Statistics of linearization */
2913 if (skb_linearize(skb) != 0) {
2914 DP(NETIF_MSG_TX_QUEUED,
2915 "SKB linearization failed - silently dropping this SKB\n");
2916 dev_kfree_skb_any(skb);
2917 return NETDEV_TX_OK;
2921 /* Map skb linear data for DMA */
2922 mapping = dma_map_single(&bp->pdev->dev, skb->data,
2923 skb_headlen(skb), DMA_TO_DEVICE);
2924 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
2925 DP(NETIF_MSG_TX_QUEUED,
2926 "SKB mapping failed - silently dropping this SKB\n");
2927 dev_kfree_skb_any(skb);
2928 return NETDEV_TX_OK;
2931 Please read carefully. First we use one BD which we mark as start,
2932 then we have a parsing info BD (used for TSO or xsum),
2933 and only then we have the rest of the TSO BDs.
2934 (don't forget to mark the last one as last,
2935 and to unmap only AFTER you write to the BD ...)
2936 And above all, all pdb sizes are in words - NOT DWORDS!
2939 /* get current pkt produced now - advance it just before sending packet
2940 * since mapping of pages may fail and cause packet to be dropped
2942 pkt_prod = txdata->tx_pkt_prod;
2943 bd_prod = TX_BD(txdata->tx_bd_prod);
2945 /* get a tx_buf and first BD
2946 * tx_start_bd may be changed during SPLIT,
2947 * but first_bd will always stay first
2949 tx_buf = &txdata->tx_buf_ring[TX_BD(pkt_prod)];
2950 tx_start_bd = &txdata->tx_desc_ring[bd_prod].start_bd;
2951 first_bd = tx_start_bd;
2953 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
2954 SET_FLAG(tx_start_bd->general_data, ETH_TX_START_BD_ETH_ADDR_TYPE,
2958 SET_FLAG(tx_start_bd->general_data, ETH_TX_START_BD_HDR_NBDS, 1);
2960 /* remember the first BD of the packet */
2961 tx_buf->first_bd = txdata->tx_bd_prod;
2965 DP(NETIF_MSG_TX_QUEUED,
2966 "sending pkt %u @%p next_idx %u bd %u @%p\n",
2967 pkt_prod, tx_buf, txdata->tx_pkt_prod, bd_prod, tx_start_bd);
2969 if (vlan_tx_tag_present(skb)) {
2970 tx_start_bd->vlan_or_ethertype =
2971 cpu_to_le16(vlan_tx_tag_get(skb));
2972 tx_start_bd->bd_flags.as_bitfield |=
2973 (X_ETH_OUTBAND_VLAN << ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT);
2975 tx_start_bd->vlan_or_ethertype = cpu_to_le16(pkt_prod);
2977 /* turn on parsing and get a BD */
2978 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
2980 if (xmit_type & XMIT_CSUM)
2981 bnx2x_set_sbd_csum(bp, skb, tx_start_bd, xmit_type);
2983 if (!CHIP_IS_E1x(bp)) {
2984 pbd_e2 = &txdata->tx_desc_ring[bd_prod].parse_bd_e2;
2985 memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2));
2986 /* Set PBD in checksum offload case */
2987 if (xmit_type & XMIT_CSUM)
2988 hlen = bnx2x_set_pbd_csum_e2(bp, skb,
2989 &pbd_e2_parsing_data,
2993 * fill in the MAC addresses in the PBD - for local
2996 bnx2x_set_fw_mac_addr(&pbd_e2->src_mac_addr_hi,
2997 &pbd_e2->src_mac_addr_mid,
2998 &pbd_e2->src_mac_addr_lo,
3000 bnx2x_set_fw_mac_addr(&pbd_e2->dst_mac_addr_hi,
3001 &pbd_e2->dst_mac_addr_mid,
3002 &pbd_e2->dst_mac_addr_lo,
3006 pbd_e1x = &txdata->tx_desc_ring[bd_prod].parse_bd_e1x;
3007 memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x));
3008 /* Set PBD in checksum offload case */
3009 if (xmit_type & XMIT_CSUM)
3010 hlen = bnx2x_set_pbd_csum(bp, skb, pbd_e1x, xmit_type);
3014 /* Setup the data pointer of the first BD of the packet */
3015 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
3016 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
3017 nbd = 2; /* start_bd + pbd + frags (updated when pages are mapped) */
3018 tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
3019 pkt_size = tx_start_bd->nbytes;
3021 DP(NETIF_MSG_TX_QUEUED,
3022 "first bd @%p addr (%x:%x) nbd %d nbytes %d flags %x vlan %x\n",
3023 tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
3024 le16_to_cpu(tx_start_bd->nbd), le16_to_cpu(tx_start_bd->nbytes),
3025 tx_start_bd->bd_flags.as_bitfield,
3026 le16_to_cpu(tx_start_bd->vlan_or_ethertype));
3028 if (xmit_type & XMIT_GSO) {
3030 DP(NETIF_MSG_TX_QUEUED,
3031 "TSO packet len %d hlen %d total len %d tso size %d\n",
3032 skb->len, hlen, skb_headlen(skb),
3033 skb_shinfo(skb)->gso_size);
3035 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
3037 if (unlikely(skb_headlen(skb) > hlen))
3038 bd_prod = bnx2x_tx_split(bp, txdata, tx_buf,
3041 if (!CHIP_IS_E1x(bp))
3042 bnx2x_set_pbd_gso_e2(skb, &pbd_e2_parsing_data,
3045 bnx2x_set_pbd_gso(skb, pbd_e1x, xmit_type);
3048 /* Set the PBD's parsing_data field if not zero
3049 * (for the chips newer than 57711).
3051 if (pbd_e2_parsing_data)
3052 pbd_e2->parsing_data = cpu_to_le32(pbd_e2_parsing_data);
3054 tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
3056 /* Handle fragmented skb */
3057 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
3058 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3060 mapping = skb_frag_dma_map(&bp->pdev->dev, frag, 0,
3061 skb_frag_size(frag), DMA_TO_DEVICE);
3062 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
3063 unsigned int pkts_compl = 0, bytes_compl = 0;
3065 DP(NETIF_MSG_TX_QUEUED,
3066 "Unable to map page - dropping packet...\n");
3068 /* we need unmap all buffers already mapped
3070 * first_bd->nbd need to be properly updated
3071 * before call to bnx2x_free_tx_pkt
3073 first_bd->nbd = cpu_to_le16(nbd);
3074 bnx2x_free_tx_pkt(bp, txdata,
3075 TX_BD(txdata->tx_pkt_prod),
3076 &pkts_compl, &bytes_compl);
3077 return NETDEV_TX_OK;
3080 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
3081 tx_data_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
3082 if (total_pkt_bd == NULL)
3083 total_pkt_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
3085 tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
3086 tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
3087 tx_data_bd->nbytes = cpu_to_le16(skb_frag_size(frag));
3088 le16_add_cpu(&pkt_size, skb_frag_size(frag));
3091 DP(NETIF_MSG_TX_QUEUED,
3092 "frag %d bd @%p addr (%x:%x) nbytes %d\n",
3093 i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo,
3094 le16_to_cpu(tx_data_bd->nbytes));
3097 DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd);
3099 /* update with actual num BDs */
3100 first_bd->nbd = cpu_to_le16(nbd);
3102 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
3104 /* now send a tx doorbell, counting the next BD
3105 * if the packet contains or ends with it
3107 if (TX_BD_POFF(bd_prod) < nbd)
3110 /* total_pkt_bytes should be set on the first data BD if
3111 * it's not an LSO packet and there is more than one
3112 * data BD. In this case pkt_size is limited by an MTU value.
3113 * However we prefer to set it for an LSO packet (while we don't
3114 * have to) in order to save some CPU cycles in a none-LSO
3115 * case, when we much more care about them.
3117 if (total_pkt_bd != NULL)
3118 total_pkt_bd->total_pkt_bytes = pkt_size;
3121 DP(NETIF_MSG_TX_QUEUED,
3122 "PBD (E1X) @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u tcp_flags %x xsum %x seq %u hlen %u\n",
3123 pbd_e1x, pbd_e1x->global_data, pbd_e1x->ip_hlen_w,
3124 pbd_e1x->ip_id, pbd_e1x->lso_mss, pbd_e1x->tcp_flags,
3125 pbd_e1x->tcp_pseudo_csum, pbd_e1x->tcp_send_seq,
3126 le16_to_cpu(pbd_e1x->total_hlen_w));
3128 DP(NETIF_MSG_TX_QUEUED,
3129 "PBD (E2) @%p dst %x %x %x src %x %x %x parsing_data %x\n",
3130 pbd_e2, pbd_e2->dst_mac_addr_hi, pbd_e2->dst_mac_addr_mid,
3131 pbd_e2->dst_mac_addr_lo, pbd_e2->src_mac_addr_hi,
3132 pbd_e2->src_mac_addr_mid, pbd_e2->src_mac_addr_lo,
3133 pbd_e2->parsing_data);
3134 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
3136 netdev_tx_sent_queue(txq, skb->len);
3138 skb_tx_timestamp(skb);
3140 txdata->tx_pkt_prod++;
3142 * Make sure that the BD data is updated before updating the producer
3143 * since FW might read the BD right after the producer is updated.
3144 * This is only applicable for weak-ordered memory model archs such
3145 * as IA-64. The following barrier is also mandatory since FW will
3146 * assumes packets must have BDs.
3150 txdata->tx_db.data.prod += nbd;
3153 DOORBELL(bp, txdata->cid, txdata->tx_db.raw);
3157 txdata->tx_bd_prod += nbd;
3159 if (unlikely(bnx2x_tx_avail(bp, txdata) < MAX_SKB_FRAGS + 3)) {
3160 netif_tx_stop_queue(txq);
3162 /* paired memory barrier is in bnx2x_tx_int(), we have to keep
3163 * ordering of set_bit() in netif_tx_stop_queue() and read of
3167 fp->eth_q_stats.driver_xoff++;
3168 if (bnx2x_tx_avail(bp, txdata) >= MAX_SKB_FRAGS + 3)
3169 netif_tx_wake_queue(txq);
3173 return NETDEV_TX_OK;
3177 * bnx2x_setup_tc - routine to configure net_device for multi tc
3179 * @netdev: net device to configure
3180 * @tc: number of traffic classes to enable
3182 * callback connected to the ndo_setup_tc function pointer
3184 int bnx2x_setup_tc(struct net_device *dev, u8 num_tc)
3186 int cos, prio, count, offset;
3187 struct bnx2x *bp = netdev_priv(dev);
3189 /* setup tc must be called under rtnl lock */
3192 /* no traffic classes requested. aborting */
3194 netdev_reset_tc(dev);
3198 /* requested to support too many traffic classes */
3199 if (num_tc > bp->max_cos) {
3200 BNX2X_ERR("support for too many traffic classes requested: %d. max supported is %d\n",
3201 num_tc, bp->max_cos);
3205 /* declare amount of supported traffic classes */
3206 if (netdev_set_num_tc(dev, num_tc)) {
3207 BNX2X_ERR("failed to declare %d traffic classes\n", num_tc);
3211 /* configure priority to traffic class mapping */
3212 for (prio = 0; prio < BNX2X_MAX_PRIORITY; prio++) {
3213 netdev_set_prio_tc_map(dev, prio, bp->prio_to_cos[prio]);
3214 DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
3215 "mapping priority %d to tc %d\n",
3216 prio, bp->prio_to_cos[prio]);
3220 /* Use this configuration to diffrentiate tc0 from other COSes
3221 This can be used for ets or pfc, and save the effort of setting
3222 up a multio class queue disc or negotiating DCBX with a switch
3223 netdev_set_prio_tc_map(dev, 0, 0);
3224 DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n", 0, 0);
3225 for (prio = 1; prio < 16; prio++) {
3226 netdev_set_prio_tc_map(dev, prio, 1);
3227 DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n", prio, 1);
3230 /* configure traffic class to transmission queue mapping */
3231 for (cos = 0; cos < bp->max_cos; cos++) {
3232 count = BNX2X_NUM_ETH_QUEUES(bp);
3233 offset = cos * MAX_TXQS_PER_COS;
3234 netdev_set_tc_queue(dev, cos, count, offset);
3235 DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
3236 "mapping tc %d to offset %d count %d\n",
3237 cos, offset, count);
3243 /* called with rtnl_lock */
3244 int bnx2x_change_mac_addr(struct net_device *dev, void *p)
3246 struct sockaddr *addr = p;
3247 struct bnx2x *bp = netdev_priv(dev);
3250 if (!bnx2x_is_valid_ether_addr(bp, addr->sa_data)) {
3251 BNX2X_ERR("Requested MAC address is not valid\n");
3256 if ((IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp)) &&
3257 !is_zero_ether_addr(addr->sa_data)) {
3258 BNX2X_ERR("Can't configure non-zero address on iSCSI or FCoE functions in MF-SD mode\n");
3263 if (netif_running(dev)) {
3264 rc = bnx2x_set_eth_mac(bp, false);
3269 dev->addr_assign_type &= ~NET_ADDR_RANDOM;
3270 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
3272 if (netif_running(dev))
3273 rc = bnx2x_set_eth_mac(bp, true);
3278 static void bnx2x_free_fp_mem_at(struct bnx2x *bp, int fp_index)
3280 union host_hc_status_block *sb = &bnx2x_fp(bp, fp_index, status_blk);
3281 struct bnx2x_fastpath *fp = &bp->fp[fp_index];
3286 if (IS_FCOE_IDX(fp_index)) {
3287 memset(sb, 0, sizeof(union host_hc_status_block));
3288 fp->status_blk_mapping = 0;
3293 if (!CHIP_IS_E1x(bp))
3294 BNX2X_PCI_FREE(sb->e2_sb,
3295 bnx2x_fp(bp, fp_index,
3296 status_blk_mapping),
3297 sizeof(struct host_hc_status_block_e2));
3299 BNX2X_PCI_FREE(sb->e1x_sb,
3300 bnx2x_fp(bp, fp_index,
3301 status_blk_mapping),
3302 sizeof(struct host_hc_status_block_e1x));
3307 if (!skip_rx_queue(bp, fp_index)) {
3308 bnx2x_free_rx_bds(fp);
3310 /* fastpath rx rings: rx_buf rx_desc rx_comp */
3311 BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_buf_ring));
3312 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_desc_ring),
3313 bnx2x_fp(bp, fp_index, rx_desc_mapping),
3314 sizeof(struct eth_rx_bd) * NUM_RX_BD);
3316 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_comp_ring),
3317 bnx2x_fp(bp, fp_index, rx_comp_mapping),
3318 sizeof(struct eth_fast_path_rx_cqe) *
3322 BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_page_ring));
3323 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_sge_ring),
3324 bnx2x_fp(bp, fp_index, rx_sge_mapping),
3325 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
3329 if (!skip_tx_queue(bp, fp_index)) {
3330 /* fastpath tx rings: tx_buf tx_desc */
3331 for_each_cos_in_tx_queue(fp, cos) {
3332 struct bnx2x_fp_txdata *txdata = &fp->txdata[cos];
3334 DP(NETIF_MSG_IFDOWN,
3335 "freeing tx memory of fp %d cos %d cid %d\n",
3336 fp_index, cos, txdata->cid);
3338 BNX2X_FREE(txdata->tx_buf_ring);
3339 BNX2X_PCI_FREE(txdata->tx_desc_ring,
3340 txdata->tx_desc_mapping,
3341 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
3344 /* end of fastpath */
3347 void bnx2x_free_fp_mem(struct bnx2x *bp)
3350 for_each_queue(bp, i)
3351 bnx2x_free_fp_mem_at(bp, i);
3354 static void set_sb_shortcuts(struct bnx2x *bp, int index)
3356 union host_hc_status_block status_blk = bnx2x_fp(bp, index, status_blk);
3357 if (!CHIP_IS_E1x(bp)) {
3358 bnx2x_fp(bp, index, sb_index_values) =
3359 (__le16 *)status_blk.e2_sb->sb.index_values;
3360 bnx2x_fp(bp, index, sb_running_index) =
3361 (__le16 *)status_blk.e2_sb->sb.running_index;
3363 bnx2x_fp(bp, index, sb_index_values) =
3364 (__le16 *)status_blk.e1x_sb->sb.index_values;
3365 bnx2x_fp(bp, index, sb_running_index) =
3366 (__le16 *)status_blk.e1x_sb->sb.running_index;
3370 /* Returns the number of actually allocated BDs */
3371 static int bnx2x_alloc_rx_bds(struct bnx2x_fastpath *fp,
3374 struct bnx2x *bp = fp->bp;
3375 u16 ring_prod, cqe_ring_prod;
3376 int i, failure_cnt = 0;
3378 fp->rx_comp_cons = 0;
3379 cqe_ring_prod = ring_prod = 0;
3381 /* This routine is called only during fo init so
3382 * fp->eth_q_stats.rx_skb_alloc_failed = 0
3384 for (i = 0; i < rx_ring_size; i++) {
3385 if (bnx2x_alloc_rx_data(bp, fp, ring_prod) < 0) {
3389 ring_prod = NEXT_RX_IDX(ring_prod);
3390 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
3391 WARN_ON(ring_prod <= (i - failure_cnt));
3395 BNX2X_ERR("was only able to allocate %d rx skbs on queue[%d]\n",
3396 i - failure_cnt, fp->index);
3398 fp->rx_bd_prod = ring_prod;
3399 /* Limit the CQE producer by the CQE ring size */
3400 fp->rx_comp_prod = min_t(u16, NUM_RCQ_RINGS*RCQ_DESC_CNT,
3402 fp->rx_pkt = fp->rx_calls = 0;
3404 fp->eth_q_stats.rx_skb_alloc_failed += failure_cnt;
3406 return i - failure_cnt;
3409 static void bnx2x_set_next_page_rx_cq(struct bnx2x_fastpath *fp)
3413 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
3414 struct eth_rx_cqe_next_page *nextpg;
3416 nextpg = (struct eth_rx_cqe_next_page *)
3417 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
3419 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
3420 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
3422 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
3423 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
3427 static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index)
3429 union host_hc_status_block *sb;
3430 struct bnx2x_fastpath *fp = &bp->fp[index];
3433 int rx_ring_size = 0;
3436 if (!bp->rx_ring_size &&
3437 (IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp))) {
3438 rx_ring_size = MIN_RX_SIZE_NONTPA;
3439 bp->rx_ring_size = rx_ring_size;
3442 if (!bp->rx_ring_size) {
3443 u32 cfg = SHMEM_RD(bp,
3444 dev_info.port_hw_config[BP_PORT(bp)].default_cfg);
3446 rx_ring_size = MAX_RX_AVAIL/BNX2X_NUM_RX_QUEUES(bp);
3448 /* Dercease ring size for 1G functions */
3449 if ((cfg & PORT_HW_CFG_NET_SERDES_IF_MASK) ==
3450 PORT_HW_CFG_NET_SERDES_IF_SGMII)
3453 /* allocate at least number of buffers required by FW */
3454 rx_ring_size = max_t(int, bp->disable_tpa ? MIN_RX_SIZE_NONTPA :
3455 MIN_RX_SIZE_TPA, rx_ring_size);
3457 bp->rx_ring_size = rx_ring_size;
3458 } else /* if rx_ring_size specified - use it */
3459 rx_ring_size = bp->rx_ring_size;
3462 sb = &bnx2x_fp(bp, index, status_blk);
3464 if (!IS_FCOE_IDX(index)) {
3467 if (!CHIP_IS_E1x(bp))
3468 BNX2X_PCI_ALLOC(sb->e2_sb,
3469 &bnx2x_fp(bp, index, status_blk_mapping),
3470 sizeof(struct host_hc_status_block_e2));
3472 BNX2X_PCI_ALLOC(sb->e1x_sb,
3473 &bnx2x_fp(bp, index, status_blk_mapping),
3474 sizeof(struct host_hc_status_block_e1x));
3479 /* FCoE Queue uses Default SB and doesn't ACK the SB, thus no need to
3480 * set shortcuts for it.
3482 if (!IS_FCOE_IDX(index))
3483 set_sb_shortcuts(bp, index);
3486 if (!skip_tx_queue(bp, index)) {
3487 /* fastpath tx rings: tx_buf tx_desc */
3488 for_each_cos_in_tx_queue(fp, cos) {
3489 struct bnx2x_fp_txdata *txdata = &fp->txdata[cos];
3492 "allocating tx memory of fp %d cos %d\n",
3495 BNX2X_ALLOC(txdata->tx_buf_ring,
3496 sizeof(struct sw_tx_bd) * NUM_TX_BD);
3497 BNX2X_PCI_ALLOC(txdata->tx_desc_ring,
3498 &txdata->tx_desc_mapping,
3499 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
3504 if (!skip_rx_queue(bp, index)) {
3505 /* fastpath rx rings: rx_buf rx_desc rx_comp */
3506 BNX2X_ALLOC(bnx2x_fp(bp, index, rx_buf_ring),
3507 sizeof(struct sw_rx_bd) * NUM_RX_BD);
3508 BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_desc_ring),
3509 &bnx2x_fp(bp, index, rx_desc_mapping),
3510 sizeof(struct eth_rx_bd) * NUM_RX_BD);
3512 BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_comp_ring),
3513 &bnx2x_fp(bp, index, rx_comp_mapping),
3514 sizeof(struct eth_fast_path_rx_cqe) *
3518 BNX2X_ALLOC(bnx2x_fp(bp, index, rx_page_ring),
3519 sizeof(struct sw_rx_page) * NUM_RX_SGE);
3520 BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_sge_ring),
3521 &bnx2x_fp(bp, index, rx_sge_mapping),
3522 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
3524 bnx2x_set_next_page_rx_bd(fp);
3527 bnx2x_set_next_page_rx_cq(fp);
3530 ring_size = bnx2x_alloc_rx_bds(fp, rx_ring_size);
3531 if (ring_size < rx_ring_size)
3537 /* handles low memory cases */
3539 BNX2X_ERR("Unable to allocate full memory for queue %d (size %d)\n",
3541 /* FW will drop all packets if queue is not big enough,
3542 * In these cases we disable the queue
3543 * Min size is different for OOO, TPA and non-TPA queues
3545 if (ring_size < (fp->disable_tpa ?
3546 MIN_RX_SIZE_NONTPA : MIN_RX_SIZE_TPA)) {
3547 /* release memory allocated for this queue */
3548 bnx2x_free_fp_mem_at(bp, index);
3554 int bnx2x_alloc_fp_mem(struct bnx2x *bp)
3559 * 1. Allocate FP for leading - fatal if error
3560 * 2. {CNIC} Allocate FCoE FP - fatal if error
3561 * 3. {CNIC} Allocate OOO + FWD - disable OOO if error
3562 * 4. Allocate RSS - fix number of queues if error
3566 if (bnx2x_alloc_fp_mem_at(bp, 0))
3572 if (bnx2x_alloc_fp_mem_at(bp, FCOE_IDX))
3573 /* we will fail load process instead of mark
3580 for_each_nondefault_eth_queue(bp, i)
3581 if (bnx2x_alloc_fp_mem_at(bp, i))
3584 /* handle memory failures */
3585 if (i != BNX2X_NUM_ETH_QUEUES(bp)) {
3586 int delta = BNX2X_NUM_ETH_QUEUES(bp) - i;
3591 * move non eth FPs next to last eth FP
3592 * must be done in that order
3593 * FCOE_IDX < FWD_IDX < OOO_IDX
3596 /* move FCoE fp even NO_FCOE_FLAG is on */
3597 bnx2x_move_fp(bp, FCOE_IDX, FCOE_IDX - delta);
3599 bp->num_queues -= delta;
3600 BNX2X_ERR("Adjusted num of queues from %d to %d\n",
3601 bp->num_queues + delta, bp->num_queues);
3607 void bnx2x_free_mem_bp(struct bnx2x *bp)
3610 kfree(bp->msix_table);
3614 int __devinit bnx2x_alloc_mem_bp(struct bnx2x *bp)
3616 struct bnx2x_fastpath *fp;
3617 struct msix_entry *tbl;
3618 struct bnx2x_ilt *ilt;
3619 int msix_table_size = 0;
3622 * The biggest MSI-X table we might need is as a maximum number of fast
3623 * path IGU SBs plus default SB (for PF).
3625 msix_table_size = bp->igu_sb_cnt + 1;
3627 /* fp array: RSS plus CNIC related L2 queues */
3628 fp = kcalloc(BNX2X_MAX_RSS_COUNT(bp) + NON_ETH_CONTEXT_USE,
3629 sizeof(*fp), GFP_KERNEL);
3635 tbl = kcalloc(msix_table_size, sizeof(*tbl), GFP_KERNEL);
3638 bp->msix_table = tbl;
3641 ilt = kzalloc(sizeof(*ilt), GFP_KERNEL);
3648 bnx2x_free_mem_bp(bp);
3653 int bnx2x_reload_if_running(struct net_device *dev)
3655 struct bnx2x *bp = netdev_priv(dev);
3657 if (unlikely(!netif_running(dev)))
3660 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
3661 return bnx2x_nic_load(bp, LOAD_NORMAL);
3664 int bnx2x_get_cur_phy_idx(struct bnx2x *bp)
3666 u32 sel_phy_idx = 0;
3667 if (bp->link_params.num_phys <= 1)
3670 if (bp->link_vars.link_up) {
3671 sel_phy_idx = EXT_PHY1;
3672 /* In case link is SERDES, check if the EXT_PHY2 is the one */
3673 if ((bp->link_vars.link_status & LINK_STATUS_SERDES_LINK) &&
3674 (bp->link_params.phy[EXT_PHY2].supported & SUPPORTED_FIBRE))
3675 sel_phy_idx = EXT_PHY2;
3678 switch (bnx2x_phy_selection(&bp->link_params)) {
3679 case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT:
3680 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY:
3681 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY:
3682 sel_phy_idx = EXT_PHY1;
3684 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY:
3685 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY:
3686 sel_phy_idx = EXT_PHY2;
3694 int bnx2x_get_link_cfg_idx(struct bnx2x *bp)
3696 u32 sel_phy_idx = bnx2x_get_cur_phy_idx(bp);
3698 * The selected actived PHY is always after swapping (in case PHY
3699 * swapping is enabled). So when swapping is enabled, we need to reverse
3703 if (bp->link_params.multi_phy_config &
3704 PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
3705 if (sel_phy_idx == EXT_PHY1)
3706 sel_phy_idx = EXT_PHY2;
3707 else if (sel_phy_idx == EXT_PHY2)
3708 sel_phy_idx = EXT_PHY1;
3710 return LINK_CONFIG_IDX(sel_phy_idx);
3713 #if defined(NETDEV_FCOE_WWNN) && defined(BCM_CNIC)
3714 int bnx2x_fcoe_get_wwn(struct net_device *dev, u64 *wwn, int type)
3716 struct bnx2x *bp = netdev_priv(dev);
3717 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
3720 case NETDEV_FCOE_WWNN:
3721 *wwn = HILO_U64(cp->fcoe_wwn_node_name_hi,
3722 cp->fcoe_wwn_node_name_lo);
3724 case NETDEV_FCOE_WWPN:
3725 *wwn = HILO_U64(cp->fcoe_wwn_port_name_hi,
3726 cp->fcoe_wwn_port_name_lo);
3729 BNX2X_ERR("Wrong WWN type requested - %d\n", type);
3737 /* called with rtnl_lock */
3738 int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
3740 struct bnx2x *bp = netdev_priv(dev);
3742 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
3743 BNX2X_ERR("Can't perform change MTU during parity recovery\n");
3747 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
3748 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE)) {
3749 BNX2X_ERR("Can't support requested MTU size\n");
3753 /* This does not race with packet allocation
3754 * because the actual alloc size is
3755 * only updated as part of load
3759 return bnx2x_reload_if_running(dev);
3762 netdev_features_t bnx2x_fix_features(struct net_device *dev,
3763 netdev_features_t features)
3765 struct bnx2x *bp = netdev_priv(dev);
3767 /* TPA requires Rx CSUM offloading */
3768 if (!(features & NETIF_F_RXCSUM) || bp->disable_tpa) {
3769 features &= ~NETIF_F_LRO;
3770 features &= ~NETIF_F_GRO;
3776 int bnx2x_set_features(struct net_device *dev, netdev_features_t features)
3778 struct bnx2x *bp = netdev_priv(dev);
3779 u32 flags = bp->flags;
3780 bool bnx2x_reload = false;
3782 if (features & NETIF_F_LRO)
3783 flags |= TPA_ENABLE_FLAG;
3785 flags &= ~TPA_ENABLE_FLAG;
3787 if (features & NETIF_F_GRO)
3788 flags |= GRO_ENABLE_FLAG;
3790 flags &= ~GRO_ENABLE_FLAG;
3792 if (features & NETIF_F_LOOPBACK) {
3793 if (bp->link_params.loopback_mode != LOOPBACK_BMAC) {
3794 bp->link_params.loopback_mode = LOOPBACK_BMAC;
3795 bnx2x_reload = true;
3798 if (bp->link_params.loopback_mode != LOOPBACK_NONE) {
3799 bp->link_params.loopback_mode = LOOPBACK_NONE;
3800 bnx2x_reload = true;
3804 if (flags ^ bp->flags) {
3806 bnx2x_reload = true;
3810 if (bp->recovery_state == BNX2X_RECOVERY_DONE)
3811 return bnx2x_reload_if_running(dev);
3812 /* else: bnx2x_nic_load() will be called at end of recovery */
3818 void bnx2x_tx_timeout(struct net_device *dev)
3820 struct bnx2x *bp = netdev_priv(dev);
3822 #ifdef BNX2X_STOP_ON_ERROR
3827 smp_mb__before_clear_bit();
3828 set_bit(BNX2X_SP_RTNL_TX_TIMEOUT, &bp->sp_rtnl_state);
3829 smp_mb__after_clear_bit();
3831 /* This allows the netif to be shutdown gracefully before resetting */
3832 schedule_delayed_work(&bp->sp_rtnl_task, 0);
3835 int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
3837 struct net_device *dev = pci_get_drvdata(pdev);
3841 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
3844 bp = netdev_priv(dev);
3848 pci_save_state(pdev);
3850 if (!netif_running(dev)) {
3855 netif_device_detach(dev);
3857 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
3859 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
3866 int bnx2x_resume(struct pci_dev *pdev)
3868 struct net_device *dev = pci_get_drvdata(pdev);
3873 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
3876 bp = netdev_priv(dev);
3878 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
3879 BNX2X_ERR("Handling parity error recovery. Try again later\n");
3885 pci_restore_state(pdev);
3887 if (!netif_running(dev)) {
3892 bnx2x_set_power_state(bp, PCI_D0);
3893 netif_device_attach(dev);
3895 rc = bnx2x_nic_load(bp, LOAD_OPEN);
3903 void bnx2x_set_ctx_validation(struct bnx2x *bp, struct eth_context *cxt,
3906 /* ustorm cxt validation */
3907 cxt->ustorm_ag_context.cdu_usage =
3908 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid),
3909 CDU_REGION_NUMBER_UCM_AG, ETH_CONNECTION_TYPE);
3910 /* xcontext validation */
3911 cxt->xstorm_ag_context.cdu_reserved =
3912 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid),
3913 CDU_REGION_NUMBER_XCM_AG, ETH_CONNECTION_TYPE);
3916 static void storm_memset_hc_timeout(struct bnx2x *bp, u8 port,
3917 u8 fw_sb_id, u8 sb_index,
3921 u32 addr = BAR_CSTRORM_INTMEM +
3922 CSTORM_STATUS_BLOCK_DATA_TIMEOUT_OFFSET(fw_sb_id, sb_index);
3923 REG_WR8(bp, addr, ticks);
3925 "port %x fw_sb_id %d sb_index %d ticks %d\n",
3926 port, fw_sb_id, sb_index, ticks);
3929 static void storm_memset_hc_disable(struct bnx2x *bp, u8 port,
3930 u16 fw_sb_id, u8 sb_index,
3933 u32 enable_flag = disable ? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT);
3934 u32 addr = BAR_CSTRORM_INTMEM +
3935 CSTORM_STATUS_BLOCK_DATA_FLAGS_OFFSET(fw_sb_id, sb_index);
3936 u16 flags = REG_RD16(bp, addr);
3938 flags &= ~HC_INDEX_DATA_HC_ENABLED;
3939 flags |= enable_flag;
3940 REG_WR16(bp, addr, flags);
3942 "port %x fw_sb_id %d sb_index %d disable %d\n",
3943 port, fw_sb_id, sb_index, disable);
3946 void bnx2x_update_coalesce_sb_index(struct bnx2x *bp, u8 fw_sb_id,
3947 u8 sb_index, u8 disable, u16 usec)
3949 int port = BP_PORT(bp);
3950 u8 ticks = usec / BNX2X_BTR;
3952 storm_memset_hc_timeout(bp, port, fw_sb_id, sb_index, ticks);
3954 disable = disable ? 1 : (usec ? 0 : 1);
3955 storm_memset_hc_disable(bp, port, fw_sb_id, sb_index, disable);