1 /* bnx2x_cmn.c: Broadcom Everest network driver.
3 * Copyright (c) 2007-2012 Broadcom Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath and fastpath rework by Vladislav Zolotarov
14 * Statistics and Link management by Yitchak Gertner
18 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
20 #include <linux/etherdevice.h>
21 #include <linux/if_vlan.h>
22 #include <linux/interrupt.h>
25 #include <net/ip6_checksum.h>
26 #include <linux/prefetch.h>
27 #include "bnx2x_cmn.h"
28 #include "bnx2x_init.h"
34 * bnx2x_move_fp - move content of the fastpath structure.
37 * @from: source FP index
38 * @to: destination FP index
40 * Makes sure the contents of the bp->fp[to].napi is kept
41 * intact. This is done by first copying the napi struct from
42 * the target to the source, and then mem copying the entire
43 * source onto the target. Update txdata pointers and related
46 static inline void bnx2x_move_fp(struct bnx2x *bp, int from, int to)
48 struct bnx2x_fastpath *from_fp = &bp->fp[from];
49 struct bnx2x_fastpath *to_fp = &bp->fp[to];
50 struct bnx2x_sp_objs *from_sp_objs = &bp->sp_objs[from];
51 struct bnx2x_sp_objs *to_sp_objs = &bp->sp_objs[to];
52 struct bnx2x_fp_stats *from_fp_stats = &bp->fp_stats[from];
53 struct bnx2x_fp_stats *to_fp_stats = &bp->fp_stats[to];
54 int old_max_eth_txqs, new_max_eth_txqs;
55 int old_txdata_index = 0, new_txdata_index = 0;
57 /* Copy the NAPI object as it has been already initialized */
58 from_fp->napi = to_fp->napi;
60 /* Move bnx2x_fastpath contents */
61 memcpy(to_fp, from_fp, sizeof(*to_fp));
64 /* move sp_objs contents as well, as their indices match fp ones */
65 memcpy(to_sp_objs, from_sp_objs, sizeof(*to_sp_objs));
67 /* move fp_stats contents as well, as their indices match fp ones */
68 memcpy(to_fp_stats, from_fp_stats, sizeof(*to_fp_stats));
70 /* Update txdata pointers in fp and move txdata content accordingly:
71 * Each fp consumes 'max_cos' txdata structures, so the index should be
72 * decremented by max_cos x delta.
75 old_max_eth_txqs = BNX2X_NUM_ETH_QUEUES(bp) * (bp)->max_cos;
76 new_max_eth_txqs = (BNX2X_NUM_ETH_QUEUES(bp) - from + to) *
78 if (from == FCOE_IDX(bp)) {
79 old_txdata_index = old_max_eth_txqs + FCOE_TXQ_IDX_OFFSET;
80 new_txdata_index = new_max_eth_txqs + FCOE_TXQ_IDX_OFFSET;
83 memcpy(&bp->bnx2x_txq[new_txdata_index],
84 &bp->bnx2x_txq[old_txdata_index],
85 sizeof(struct bnx2x_fp_txdata));
86 to_fp->txdata_ptr[0] = &bp->bnx2x_txq[new_txdata_index];
90 * bnx2x_shrink_eth_fp - guarantees fastpath structures stay intact
93 * @delta: number of eth queues which were not allocated
95 static void bnx2x_shrink_eth_fp(struct bnx2x *bp, int delta)
97 int i, cos, old_eth_num = BNX2X_NUM_ETH_QUEUES(bp);
99 /* Queue pointer cannot be re-set on an fp-basis, as moving pointer
100 * backward along the array could cause memory to be overriden
102 for (cos = 1; cos < bp->max_cos; cos++) {
103 for (i = 0; i < old_eth_num - delta; i++) {
104 struct bnx2x_fastpath *fp = &bp->fp[i];
105 int new_idx = cos * (old_eth_num - delta) + i;
107 memcpy(&bp->bnx2x_txq[new_idx], fp->txdata_ptr[cos],
108 sizeof(struct bnx2x_fp_txdata));
109 fp->txdata_ptr[cos] = &bp->bnx2x_txq[new_idx];
114 int load_count[2][3] = { {0} }; /* per-path: 0-common, 1-port0, 2-port1 */
116 /* free skb in the packet ring at pos idx
117 * return idx of last bd freed
119 static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata,
120 u16 idx, unsigned int *pkts_compl,
121 unsigned int *bytes_compl)
123 struct sw_tx_bd *tx_buf = &txdata->tx_buf_ring[idx];
124 struct eth_tx_start_bd *tx_start_bd;
125 struct eth_tx_bd *tx_data_bd;
126 struct sk_buff *skb = tx_buf->skb;
127 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
130 /* prefetch skb end pointer to speedup dev_kfree_skb() */
133 DP(NETIF_MSG_TX_DONE, "fp[%d]: pkt_idx %d buff @(%p)->skb %p\n",
134 txdata->txq_index, idx, tx_buf, skb);
137 tx_start_bd = &txdata->tx_desc_ring[bd_idx].start_bd;
138 dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd),
139 BD_UNMAP_LEN(tx_start_bd), DMA_TO_DEVICE);
142 nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
143 #ifdef BNX2X_STOP_ON_ERROR
144 if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
145 BNX2X_ERR("BAD nbd!\n");
149 new_cons = nbd + tx_buf->first_bd;
151 /* Get the next bd */
152 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
154 /* Skip a parse bd... */
156 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
158 /* ...and the TSO split header bd since they have no mapping */
159 if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
161 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
167 tx_data_bd = &txdata->tx_desc_ring[bd_idx].reg_bd;
168 dma_unmap_page(&bp->pdev->dev, BD_UNMAP_ADDR(tx_data_bd),
169 BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
171 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
178 (*bytes_compl) += skb->len;
181 dev_kfree_skb_any(skb);
182 tx_buf->first_bd = 0;
188 int bnx2x_tx_int(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata)
190 struct netdev_queue *txq;
191 u16 hw_cons, sw_cons, bd_cons = txdata->tx_bd_cons;
192 unsigned int pkts_compl = 0, bytes_compl = 0;
194 #ifdef BNX2X_STOP_ON_ERROR
195 if (unlikely(bp->panic))
199 txq = netdev_get_tx_queue(bp->dev, txdata->txq_index);
200 hw_cons = le16_to_cpu(*txdata->tx_cons_sb);
201 sw_cons = txdata->tx_pkt_cons;
203 while (sw_cons != hw_cons) {
206 pkt_cons = TX_BD(sw_cons);
208 DP(NETIF_MSG_TX_DONE,
209 "queue[%d]: hw_cons %u sw_cons %u pkt_cons %u\n",
210 txdata->txq_index, hw_cons, sw_cons, pkt_cons);
212 bd_cons = bnx2x_free_tx_pkt(bp, txdata, pkt_cons,
213 &pkts_compl, &bytes_compl);
218 netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
220 txdata->tx_pkt_cons = sw_cons;
221 txdata->tx_bd_cons = bd_cons;
223 /* Need to make the tx_bd_cons update visible to start_xmit()
224 * before checking for netif_tx_queue_stopped(). Without the
225 * memory barrier, there is a small possibility that
226 * start_xmit() will miss it and cause the queue to be stopped
228 * On the other hand we need an rmb() here to ensure the proper
229 * ordering of bit testing in the following
230 * netif_tx_queue_stopped(txq) call.
234 if (unlikely(netif_tx_queue_stopped(txq))) {
235 /* Taking tx_lock() is needed to prevent reenabling the queue
236 * while it's empty. This could have happen if rx_action() gets
237 * suspended in bnx2x_tx_int() after the condition before
238 * netif_tx_wake_queue(), while tx_action (bnx2x_start_xmit()):
240 * stops the queue->sees fresh tx_bd_cons->releases the queue->
241 * sends some packets consuming the whole queue again->
245 __netif_tx_lock(txq, smp_processor_id());
247 if ((netif_tx_queue_stopped(txq)) &&
248 (bp->state == BNX2X_STATE_OPEN) &&
249 (bnx2x_tx_avail(bp, txdata) >= MAX_DESC_PER_TX_PKT))
250 netif_tx_wake_queue(txq);
252 __netif_tx_unlock(txq);
257 static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
260 u16 last_max = fp->last_max_sge;
262 if (SUB_S16(idx, last_max) > 0)
263 fp->last_max_sge = idx;
266 static inline void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
268 struct eth_end_agg_rx_cqe *cqe)
270 struct bnx2x *bp = fp->bp;
271 u16 last_max, last_elem, first_elem;
278 /* First mark all used pages */
279 for (i = 0; i < sge_len; i++)
280 BIT_VEC64_CLEAR_BIT(fp->sge_mask,
281 RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[i])));
283 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
284 sge_len - 1, le16_to_cpu(cqe->sgl_or_raw_data.sgl[sge_len - 1]));
286 /* Here we assume that the last SGE index is the biggest */
287 prefetch((void *)(fp->sge_mask));
288 bnx2x_update_last_max_sge(fp,
289 le16_to_cpu(cqe->sgl_or_raw_data.sgl[sge_len - 1]));
291 last_max = RX_SGE(fp->last_max_sge);
292 last_elem = last_max >> BIT_VEC64_ELEM_SHIFT;
293 first_elem = RX_SGE(fp->rx_sge_prod) >> BIT_VEC64_ELEM_SHIFT;
295 /* If ring is not full */
296 if (last_elem + 1 != first_elem)
299 /* Now update the prod */
300 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
301 if (likely(fp->sge_mask[i]))
304 fp->sge_mask[i] = BIT_VEC64_ELEM_ONE_MASK;
305 delta += BIT_VEC64_ELEM_SZ;
309 fp->rx_sge_prod += delta;
310 /* clear page-end entries */
311 bnx2x_clear_sge_mask_next_elems(fp);
314 DP(NETIF_MSG_RX_STATUS,
315 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
316 fp->last_max_sge, fp->rx_sge_prod);
319 /* Set Toeplitz hash value in the skb using the value from the
320 * CQE (calculated by HW).
322 static u32 bnx2x_get_rxhash(const struct bnx2x *bp,
323 const struct eth_fast_path_rx_cqe *cqe,
326 /* Set Toeplitz hash from CQE */
327 if ((bp->dev->features & NETIF_F_RXHASH) &&
328 (cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG)) {
329 enum eth_rss_hash_type htype;
331 htype = cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_TYPE;
332 *l4_rxhash = (htype == TCP_IPV4_HASH_TYPE) ||
333 (htype == TCP_IPV6_HASH_TYPE);
334 return le32_to_cpu(cqe->rss_hash_result);
340 static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
342 struct eth_fast_path_rx_cqe *cqe)
344 struct bnx2x *bp = fp->bp;
345 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
346 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
347 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
349 struct bnx2x_agg_info *tpa_info = &fp->tpa_info[queue];
350 struct sw_rx_bd *first_buf = &tpa_info->first_buf;
352 /* print error if current state != stop */
353 if (tpa_info->tpa_state != BNX2X_TPA_STOP)
354 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
356 /* Try to map an empty data buffer from the aggregation info */
357 mapping = dma_map_single(&bp->pdev->dev,
358 first_buf->data + NET_SKB_PAD,
359 fp->rx_buf_size, DMA_FROM_DEVICE);
361 * ...if it fails - move the skb from the consumer to the producer
362 * and set the current aggregation state as ERROR to drop it
363 * when TPA_STOP arrives.
366 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
367 /* Move the BD from the consumer to the producer */
368 bnx2x_reuse_rx_data(fp, cons, prod);
369 tpa_info->tpa_state = BNX2X_TPA_ERROR;
373 /* move empty data from pool to prod */
374 prod_rx_buf->data = first_buf->data;
375 dma_unmap_addr_set(prod_rx_buf, mapping, mapping);
376 /* point prod_bd to new data */
377 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
378 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
380 /* move partial skb from cons to pool (don't unmap yet) */
381 *first_buf = *cons_rx_buf;
383 /* mark bin state as START */
384 tpa_info->parsing_flags =
385 le16_to_cpu(cqe->pars_flags.flags);
386 tpa_info->vlan_tag = le16_to_cpu(cqe->vlan_tag);
387 tpa_info->tpa_state = BNX2X_TPA_START;
388 tpa_info->len_on_bd = le16_to_cpu(cqe->len_on_bd);
389 tpa_info->placement_offset = cqe->placement_offset;
390 tpa_info->rxhash = bnx2x_get_rxhash(bp, cqe, &tpa_info->l4_rxhash);
391 if (fp->mode == TPA_MODE_GRO) {
392 u16 gro_size = le16_to_cpu(cqe->pkt_len_or_gro_seg_len);
393 tpa_info->full_page =
394 SGE_PAGE_SIZE * PAGES_PER_SGE / gro_size * gro_size;
395 tpa_info->gro_size = gro_size;
398 #ifdef BNX2X_STOP_ON_ERROR
399 fp->tpa_queue_used |= (1 << queue);
400 #ifdef _ASM_GENERIC_INT_L64_H
401 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
403 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
409 /* Timestamp option length allowed for TPA aggregation:
411 * nop nop kind length echo val
413 #define TPA_TSTAMP_OPT_LEN 12
415 * bnx2x_set_lro_mss - calculate the approximate value of the MSS
418 * @parsing_flags: parsing flags from the START CQE
419 * @len_on_bd: total length of the first packet for the
422 * Approximate value of the MSS for this aggregation calculated using
423 * the first packet of it.
425 static u16 bnx2x_set_lro_mss(struct bnx2x *bp, u16 parsing_flags,
429 * TPA arrgregation won't have either IP options or TCP options
430 * other than timestamp or IPv6 extension headers.
432 u16 hdrs_len = ETH_HLEN + sizeof(struct tcphdr);
434 if (GET_FLAG(parsing_flags, PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) ==
435 PRS_FLAG_OVERETH_IPV6)
436 hdrs_len += sizeof(struct ipv6hdr);
438 hdrs_len += sizeof(struct iphdr);
441 /* Check if there was a TCP timestamp, if there is it's will
442 * always be 12 bytes length: nop nop kind length echo val.
444 * Otherwise FW would close the aggregation.
446 if (parsing_flags & PARSING_FLAGS_TIME_STAMP_EXIST_FLAG)
447 hdrs_len += TPA_TSTAMP_OPT_LEN;
449 return len_on_bd - hdrs_len;
452 static int bnx2x_alloc_rx_sge(struct bnx2x *bp,
453 struct bnx2x_fastpath *fp, u16 index)
455 struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
456 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
457 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
460 if (unlikely(page == NULL)) {
461 BNX2X_ERR("Can't alloc sge\n");
465 mapping = dma_map_page(&bp->pdev->dev, page, 0,
466 SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE);
467 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
468 __free_pages(page, PAGES_PER_SGE_SHIFT);
469 BNX2X_ERR("Can't map sge\n");
474 dma_unmap_addr_set(sw_buf, mapping, mapping);
476 sge->addr_hi = cpu_to_le32(U64_HI(mapping));
477 sge->addr_lo = cpu_to_le32(U64_LO(mapping));
482 static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
483 struct bnx2x_agg_info *tpa_info,
486 struct eth_end_agg_rx_cqe *cqe,
489 struct sw_rx_page *rx_pg, old_rx_pg;
490 u32 i, frag_len, frag_size;
491 int err, j, frag_id = 0;
492 u16 len_on_bd = tpa_info->len_on_bd;
493 u16 full_page = 0, gro_size = 0;
495 frag_size = le16_to_cpu(cqe->pkt_len) - len_on_bd;
497 if (fp->mode == TPA_MODE_GRO) {
498 gro_size = tpa_info->gro_size;
499 full_page = tpa_info->full_page;
502 /* This is needed in order to enable forwarding support */
504 skb_shinfo(skb)->gso_size = bnx2x_set_lro_mss(bp,
505 tpa_info->parsing_flags, len_on_bd);
507 skb_shinfo(skb)->gso_type =
508 (GET_FLAG(tpa_info->parsing_flags,
509 PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) ==
510 PRS_FLAG_OVERETH_IPV6) ?
511 SKB_GSO_TCPV6 : SKB_GSO_TCPV4;
515 #ifdef BNX2X_STOP_ON_ERROR
516 if (pages > min_t(u32, 8, MAX_SKB_FRAGS)*SGE_PAGE_SIZE*PAGES_PER_SGE) {
517 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
519 BNX2X_ERR("cqe->pkt_len = %d\n", cqe->pkt_len);
525 /* Run through the SGL and compose the fragmented skb */
526 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
527 u16 sge_idx = RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[j]));
529 /* FW gives the indices of the SGE as if the ring is an array
530 (meaning that "next" element will consume 2 indices) */
531 if (fp->mode == TPA_MODE_GRO)
532 frag_len = min_t(u32, frag_size, (u32)full_page);
534 frag_len = min_t(u32, frag_size,
535 (u32)(SGE_PAGE_SIZE * PAGES_PER_SGE));
537 rx_pg = &fp->rx_page_ring[sge_idx];
540 /* If we fail to allocate a substitute page, we simply stop
541 where we are and drop the whole packet */
542 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
544 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
548 /* Unmap the page as we r going to pass it to the stack */
549 dma_unmap_page(&bp->pdev->dev,
550 dma_unmap_addr(&old_rx_pg, mapping),
551 SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE);
552 /* Add one frag and update the appropriate fields in the skb */
553 if (fp->mode == TPA_MODE_LRO)
554 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
558 for (rem = frag_len; rem > 0; rem -= gro_size) {
559 int len = rem > gro_size ? gro_size : rem;
560 skb_fill_page_desc(skb, frag_id++,
561 old_rx_pg.page, offset, len);
563 get_page(old_rx_pg.page);
568 skb->data_len += frag_len;
569 skb->truesize += SGE_PAGE_SIZE * PAGES_PER_SGE;
570 skb->len += frag_len;
572 frag_size -= frag_len;
578 static void bnx2x_frag_free(const struct bnx2x_fastpath *fp, void *data)
580 if (fp->rx_frag_size)
581 put_page(virt_to_head_page(data));
586 static void *bnx2x_frag_alloc(const struct bnx2x_fastpath *fp)
588 if (fp->rx_frag_size)
589 return netdev_alloc_frag(fp->rx_frag_size);
591 return kmalloc(fp->rx_buf_size + NET_SKB_PAD, GFP_ATOMIC);
595 static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
596 struct bnx2x_agg_info *tpa_info,
598 struct eth_end_agg_rx_cqe *cqe,
601 struct sw_rx_bd *rx_buf = &tpa_info->first_buf;
602 u8 pad = tpa_info->placement_offset;
603 u16 len = tpa_info->len_on_bd;
604 struct sk_buff *skb = NULL;
605 u8 *new_data, *data = rx_buf->data;
606 u8 old_tpa_state = tpa_info->tpa_state;
608 tpa_info->tpa_state = BNX2X_TPA_STOP;
610 /* If we there was an error during the handling of the TPA_START -
611 * drop this aggregation.
613 if (old_tpa_state == BNX2X_TPA_ERROR)
616 /* Try to allocate the new data */
617 new_data = bnx2x_frag_alloc(fp);
618 /* Unmap skb in the pool anyway, as we are going to change
619 pool entry status to BNX2X_TPA_STOP even if new skb allocation
621 dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping),
622 fp->rx_buf_size, DMA_FROM_DEVICE);
623 if (likely(new_data))
624 skb = build_skb(data, fp->rx_frag_size);
627 #ifdef BNX2X_STOP_ON_ERROR
628 if (pad + len > fp->rx_buf_size) {
629 BNX2X_ERR("skb_put is about to fail... pad %d len %d rx_buf_size %d\n",
630 pad, len, fp->rx_buf_size);
636 skb_reserve(skb, pad + NET_SKB_PAD);
638 skb->rxhash = tpa_info->rxhash;
639 skb->l4_rxhash = tpa_info->l4_rxhash;
641 skb->protocol = eth_type_trans(skb, bp->dev);
642 skb->ip_summed = CHECKSUM_UNNECESSARY;
644 if (!bnx2x_fill_frag_skb(bp, fp, tpa_info, pages,
645 skb, cqe, cqe_idx)) {
646 if (tpa_info->parsing_flags & PARSING_FLAGS_VLAN)
647 __vlan_hwaccel_put_tag(skb, tpa_info->vlan_tag);
648 napi_gro_receive(&fp->napi, skb);
650 DP(NETIF_MSG_RX_STATUS,
651 "Failed to allocate new pages - dropping packet!\n");
652 dev_kfree_skb_any(skb);
656 /* put new data in bin */
657 rx_buf->data = new_data;
661 bnx2x_frag_free(fp, new_data);
663 /* drop the packet and keep the buffer in the bin */
664 DP(NETIF_MSG_RX_STATUS,
665 "Failed to allocate or map a new skb - dropping packet!\n");
666 bnx2x_fp_stats(bp, fp)->eth_q_stats.rx_skb_alloc_failed++;
669 static int bnx2x_alloc_rx_data(struct bnx2x *bp,
670 struct bnx2x_fastpath *fp, u16 index)
673 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
674 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
677 data = bnx2x_frag_alloc(fp);
678 if (unlikely(data == NULL))
681 mapping = dma_map_single(&bp->pdev->dev, data + NET_SKB_PAD,
684 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
685 bnx2x_frag_free(fp, data);
686 BNX2X_ERR("Can't map rx data\n");
691 dma_unmap_addr_set(rx_buf, mapping, mapping);
693 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
694 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
700 void bnx2x_csum_validate(struct sk_buff *skb, union eth_rx_cqe *cqe,
701 struct bnx2x_fastpath *fp,
702 struct bnx2x_eth_q_stats *qstats)
704 /* Do nothing if no L4 csum validation was done.
705 * We do not check whether IP csum was validated. For IPv4 we assume
706 * that if the card got as far as validating the L4 csum, it also
707 * validated the IP csum. IPv6 has no IP csum.
709 if (cqe->fast_path_cqe.status_flags &
710 ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG)
713 /* If L4 validation was done, check if an error was found. */
715 if (cqe->fast_path_cqe.type_error_flags &
716 (ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG |
717 ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG))
718 qstats->hw_csum_err++;
720 skb->ip_summed = CHECKSUM_UNNECESSARY;
723 int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
725 struct bnx2x *bp = fp->bp;
726 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
727 u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
730 #ifdef BNX2X_STOP_ON_ERROR
731 if (unlikely(bp->panic))
735 /* CQ "next element" is of the size of the regular element,
736 that's why it's ok here */
737 hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
738 if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
741 bd_cons = fp->rx_bd_cons;
742 bd_prod = fp->rx_bd_prod;
743 bd_prod_fw = bd_prod;
744 sw_comp_cons = fp->rx_comp_cons;
745 sw_comp_prod = fp->rx_comp_prod;
747 /* Memory barrier necessary as speculative reads of the rx
748 * buffer can be ahead of the index in the status block
752 DP(NETIF_MSG_RX_STATUS,
753 "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
754 fp->index, hw_comp_cons, sw_comp_cons);
756 while (sw_comp_cons != hw_comp_cons) {
757 struct sw_rx_bd *rx_buf = NULL;
759 union eth_rx_cqe *cqe;
760 struct eth_fast_path_rx_cqe *cqe_fp;
762 enum eth_rx_cqe_type cqe_fp_type;
767 #ifdef BNX2X_STOP_ON_ERROR
768 if (unlikely(bp->panic))
772 comp_ring_cons = RCQ_BD(sw_comp_cons);
773 bd_prod = RX_BD(bd_prod);
774 bd_cons = RX_BD(bd_cons);
776 cqe = &fp->rx_comp_ring[comp_ring_cons];
777 cqe_fp = &cqe->fast_path_cqe;
778 cqe_fp_flags = cqe_fp->type_error_flags;
779 cqe_fp_type = cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE;
781 DP(NETIF_MSG_RX_STATUS,
782 "CQE type %x err %x status %x queue %x vlan %x len %u\n",
783 CQE_TYPE(cqe_fp_flags),
784 cqe_fp_flags, cqe_fp->status_flags,
785 le32_to_cpu(cqe_fp->rss_hash_result),
786 le16_to_cpu(cqe_fp->vlan_tag),
787 le16_to_cpu(cqe_fp->pkt_len_or_gro_seg_len));
789 /* is this a slowpath msg? */
790 if (unlikely(CQE_TYPE_SLOW(cqe_fp_type))) {
791 bnx2x_sp_event(fp, cqe);
795 rx_buf = &fp->rx_buf_ring[bd_cons];
798 if (!CQE_TYPE_FAST(cqe_fp_type)) {
799 struct bnx2x_agg_info *tpa_info;
800 u16 frag_size, pages;
801 #ifdef BNX2X_STOP_ON_ERROR
803 if (fp->disable_tpa &&
804 (CQE_TYPE_START(cqe_fp_type) ||
805 CQE_TYPE_STOP(cqe_fp_type)))
806 BNX2X_ERR("START/STOP packet while disable_tpa type %x\n",
807 CQE_TYPE(cqe_fp_type));
810 if (CQE_TYPE_START(cqe_fp_type)) {
811 u16 queue = cqe_fp->queue_index;
812 DP(NETIF_MSG_RX_STATUS,
813 "calling tpa_start on queue %d\n",
816 bnx2x_tpa_start(fp, queue,
823 queue = cqe->end_agg_cqe.queue_index;
824 tpa_info = &fp->tpa_info[queue];
825 DP(NETIF_MSG_RX_STATUS,
826 "calling tpa_stop on queue %d\n",
829 frag_size = le16_to_cpu(cqe->end_agg_cqe.pkt_len) -
832 if (fp->mode == TPA_MODE_GRO)
833 pages = (frag_size + tpa_info->full_page - 1) /
836 pages = SGE_PAGE_ALIGN(frag_size) >>
839 bnx2x_tpa_stop(bp, fp, tpa_info, pages,
840 &cqe->end_agg_cqe, comp_ring_cons);
841 #ifdef BNX2X_STOP_ON_ERROR
846 bnx2x_update_sge_prod(fp, pages, &cqe->end_agg_cqe);
850 len = le16_to_cpu(cqe_fp->pkt_len_or_gro_seg_len);
851 pad = cqe_fp->placement_offset;
852 dma_sync_single_for_cpu(&bp->pdev->dev,
853 dma_unmap_addr(rx_buf, mapping),
854 pad + RX_COPY_THRESH,
857 prefetch(data + pad); /* speedup eth_type_trans() */
858 /* is this an error packet? */
859 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
860 DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
861 "ERROR flags %x rx packet %u\n",
862 cqe_fp_flags, sw_comp_cons);
863 bnx2x_fp_qstats(bp, fp)->rx_err_discard_pkt++;
867 /* Since we don't have a jumbo ring
868 * copy small packets if mtu > 1500
870 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
871 (len <= RX_COPY_THRESH)) {
872 skb = netdev_alloc_skb_ip_align(bp->dev, len);
874 DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
875 "ERROR packet dropped because of alloc failure\n");
876 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
879 memcpy(skb->data, data + pad, len);
880 bnx2x_reuse_rx_data(fp, bd_cons, bd_prod);
882 if (likely(bnx2x_alloc_rx_data(bp, fp, bd_prod) == 0)) {
883 dma_unmap_single(&bp->pdev->dev,
884 dma_unmap_addr(rx_buf, mapping),
887 skb = build_skb(data, fp->rx_frag_size);
888 if (unlikely(!skb)) {
889 bnx2x_frag_free(fp, data);
890 bnx2x_fp_qstats(bp, fp)->
891 rx_skb_alloc_failed++;
894 skb_reserve(skb, pad);
896 DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
897 "ERROR packet dropped because of alloc failure\n");
898 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
900 bnx2x_reuse_rx_data(fp, bd_cons, bd_prod);
906 skb->protocol = eth_type_trans(skb, bp->dev);
908 /* Set Toeplitz hash for a none-LRO skb */
909 skb->rxhash = bnx2x_get_rxhash(bp, cqe_fp, &l4_rxhash);
910 skb->l4_rxhash = l4_rxhash;
912 skb_checksum_none_assert(skb);
914 if (bp->dev->features & NETIF_F_RXCSUM)
915 bnx2x_csum_validate(skb, cqe, fp,
916 bnx2x_fp_qstats(bp, fp));
918 skb_record_rx_queue(skb, fp->rx_queue);
920 if (le16_to_cpu(cqe_fp->pars_flags.flags) &
922 __vlan_hwaccel_put_tag(skb,
923 le16_to_cpu(cqe_fp->vlan_tag));
924 napi_gro_receive(&fp->napi, skb);
930 bd_cons = NEXT_RX_IDX(bd_cons);
931 bd_prod = NEXT_RX_IDX(bd_prod);
932 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
935 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
936 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
938 if (rx_pkt == budget)
942 fp->rx_bd_cons = bd_cons;
943 fp->rx_bd_prod = bd_prod_fw;
944 fp->rx_comp_cons = sw_comp_cons;
945 fp->rx_comp_prod = sw_comp_prod;
947 /* Update producers */
948 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
951 fp->rx_pkt += rx_pkt;
957 static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
959 struct bnx2x_fastpath *fp = fp_cookie;
960 struct bnx2x *bp = fp->bp;
964 "got an MSI-X interrupt on IDX:SB [fp %d fw_sd %d igusb %d]\n",
965 fp->index, fp->fw_sb_id, fp->igu_sb_id);
966 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
968 #ifdef BNX2X_STOP_ON_ERROR
969 if (unlikely(bp->panic))
973 /* Handle Rx and Tx according to MSI-X vector */
974 prefetch(fp->rx_cons_sb);
976 for_each_cos_in_tx_queue(fp, cos)
977 prefetch(fp->txdata_ptr[cos]->tx_cons_sb);
979 prefetch(&fp->sb_running_index[SM_RX_ID]);
980 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
985 /* HW Lock for shared dual port PHYs */
986 void bnx2x_acquire_phy_lock(struct bnx2x *bp)
988 mutex_lock(&bp->port.phy_mutex);
990 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
993 void bnx2x_release_phy_lock(struct bnx2x *bp)
995 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
997 mutex_unlock(&bp->port.phy_mutex);
1000 /* calculates MF speed according to current linespeed and MF configuration */
1001 u16 bnx2x_get_mf_speed(struct bnx2x *bp)
1003 u16 line_speed = bp->link_vars.line_speed;
1005 u16 maxCfg = bnx2x_extract_max_cfg(bp,
1006 bp->mf_config[BP_VN(bp)]);
1008 /* Calculate the current MAX line speed limit for the MF
1012 line_speed = (line_speed * maxCfg) / 100;
1013 else { /* SD mode */
1014 u16 vn_max_rate = maxCfg * 100;
1016 if (vn_max_rate < line_speed)
1017 line_speed = vn_max_rate;
1025 * bnx2x_fill_report_data - fill link report data to report
1027 * @bp: driver handle
1028 * @data: link state to update
1030 * It uses a none-atomic bit operations because is called under the mutex.
1032 static void bnx2x_fill_report_data(struct bnx2x *bp,
1033 struct bnx2x_link_report_data *data)
1035 u16 line_speed = bnx2x_get_mf_speed(bp);
1037 memset(data, 0, sizeof(*data));
1039 /* Fill the report data: efective line speed */
1040 data->line_speed = line_speed;
1043 if (!bp->link_vars.link_up || (bp->flags & MF_FUNC_DIS))
1044 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1045 &data->link_report_flags);
1048 if (bp->link_vars.duplex == DUPLEX_FULL)
1049 __set_bit(BNX2X_LINK_REPORT_FD, &data->link_report_flags);
1051 /* Rx Flow Control is ON */
1052 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX)
1053 __set_bit(BNX2X_LINK_REPORT_RX_FC_ON, &data->link_report_flags);
1055 /* Tx Flow Control is ON */
1056 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
1057 __set_bit(BNX2X_LINK_REPORT_TX_FC_ON, &data->link_report_flags);
1061 * bnx2x_link_report - report link status to OS.
1063 * @bp: driver handle
1065 * Calls the __bnx2x_link_report() under the same locking scheme
1066 * as a link/PHY state managing code to ensure a consistent link
1070 void bnx2x_link_report(struct bnx2x *bp)
1072 bnx2x_acquire_phy_lock(bp);
1073 __bnx2x_link_report(bp);
1074 bnx2x_release_phy_lock(bp);
1078 * __bnx2x_link_report - report link status to OS.
1080 * @bp: driver handle
1082 * None atomic inmlementation.
1083 * Should be called under the phy_lock.
1085 void __bnx2x_link_report(struct bnx2x *bp)
1087 struct bnx2x_link_report_data cur_data;
1090 if (!CHIP_IS_E1(bp))
1091 bnx2x_read_mf_cfg(bp);
1093 /* Read the current link report info */
1094 bnx2x_fill_report_data(bp, &cur_data);
1096 /* Don't report link down or exactly the same link status twice */
1097 if (!memcmp(&cur_data, &bp->last_reported_link, sizeof(cur_data)) ||
1098 (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1099 &bp->last_reported_link.link_report_flags) &&
1100 test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1101 &cur_data.link_report_flags)))
1106 /* We are going to report a new link parameters now -
1107 * remember the current data for the next time.
1109 memcpy(&bp->last_reported_link, &cur_data, sizeof(cur_data));
1111 if (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1112 &cur_data.link_report_flags)) {
1113 netif_carrier_off(bp->dev);
1114 netdev_err(bp->dev, "NIC Link is Down\n");
1120 netif_carrier_on(bp->dev);
1122 if (test_and_clear_bit(BNX2X_LINK_REPORT_FD,
1123 &cur_data.link_report_flags))
1128 /* Handle the FC at the end so that only these flags would be
1129 * possibly set. This way we may easily check if there is no FC
1132 if (cur_data.link_report_flags) {
1133 if (test_bit(BNX2X_LINK_REPORT_RX_FC_ON,
1134 &cur_data.link_report_flags)) {
1135 if (test_bit(BNX2X_LINK_REPORT_TX_FC_ON,
1136 &cur_data.link_report_flags))
1137 flow = "ON - receive & transmit";
1139 flow = "ON - receive";
1141 flow = "ON - transmit";
1146 netdev_info(bp->dev, "NIC Link is Up, %d Mbps %s duplex, Flow control: %s\n",
1147 cur_data.line_speed, duplex, flow);
1151 static void bnx2x_set_next_page_sgl(struct bnx2x_fastpath *fp)
1155 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1156 struct eth_rx_sge *sge;
1158 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
1160 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
1161 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
1164 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
1165 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
1169 static void bnx2x_free_tpa_pool(struct bnx2x *bp,
1170 struct bnx2x_fastpath *fp, int last)
1174 for (i = 0; i < last; i++) {
1175 struct bnx2x_agg_info *tpa_info = &fp->tpa_info[i];
1176 struct sw_rx_bd *first_buf = &tpa_info->first_buf;
1177 u8 *data = first_buf->data;
1180 DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
1183 if (tpa_info->tpa_state == BNX2X_TPA_START)
1184 dma_unmap_single(&bp->pdev->dev,
1185 dma_unmap_addr(first_buf, mapping),
1186 fp->rx_buf_size, DMA_FROM_DEVICE);
1187 bnx2x_frag_free(fp, data);
1188 first_buf->data = NULL;
1192 void bnx2x_init_rx_rings_cnic(struct bnx2x *bp)
1196 for_each_rx_queue_cnic(bp, j) {
1197 struct bnx2x_fastpath *fp = &bp->fp[j];
1201 /* Activate BD ring */
1203 * this will generate an interrupt (to the TSTORM)
1204 * must only be done after chip is initialized
1206 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
1211 void bnx2x_init_rx_rings(struct bnx2x *bp)
1213 int func = BP_FUNC(bp);
1217 /* Allocate TPA resources */
1218 for_each_eth_queue(bp, j) {
1219 struct bnx2x_fastpath *fp = &bp->fp[j];
1222 "mtu %d rx_buf_size %d\n", bp->dev->mtu, fp->rx_buf_size);
1224 if (!fp->disable_tpa) {
1225 /* Fill the per-aggregtion pool */
1226 for (i = 0; i < MAX_AGG_QS(bp); i++) {
1227 struct bnx2x_agg_info *tpa_info =
1229 struct sw_rx_bd *first_buf =
1230 &tpa_info->first_buf;
1232 first_buf->data = bnx2x_frag_alloc(fp);
1233 if (!first_buf->data) {
1234 BNX2X_ERR("Failed to allocate TPA skb pool for queue[%d] - disabling TPA on this queue!\n",
1236 bnx2x_free_tpa_pool(bp, fp, i);
1237 fp->disable_tpa = 1;
1240 dma_unmap_addr_set(first_buf, mapping, 0);
1241 tpa_info->tpa_state = BNX2X_TPA_STOP;
1244 /* "next page" elements initialization */
1245 bnx2x_set_next_page_sgl(fp);
1247 /* set SGEs bit mask */
1248 bnx2x_init_sge_ring_bit_mask(fp);
1250 /* Allocate SGEs and initialize the ring elements */
1251 for (i = 0, ring_prod = 0;
1252 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
1254 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
1255 BNX2X_ERR("was only able to allocate %d rx sges\n",
1257 BNX2X_ERR("disabling TPA for queue[%d]\n",
1259 /* Cleanup already allocated elements */
1260 bnx2x_free_rx_sge_range(bp, fp,
1262 bnx2x_free_tpa_pool(bp, fp,
1264 fp->disable_tpa = 1;
1268 ring_prod = NEXT_SGE_IDX(ring_prod);
1271 fp->rx_sge_prod = ring_prod;
1275 for_each_eth_queue(bp, j) {
1276 struct bnx2x_fastpath *fp = &bp->fp[j];
1280 /* Activate BD ring */
1282 * this will generate an interrupt (to the TSTORM)
1283 * must only be done after chip is initialized
1285 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
1291 if (CHIP_IS_E1(bp)) {
1292 REG_WR(bp, BAR_USTRORM_INTMEM +
1293 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
1294 U64_LO(fp->rx_comp_mapping));
1295 REG_WR(bp, BAR_USTRORM_INTMEM +
1296 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
1297 U64_HI(fp->rx_comp_mapping));
1302 static void bnx2x_free_tx_skbs_queue(struct bnx2x_fastpath *fp)
1305 struct bnx2x *bp = fp->bp;
1307 for_each_cos_in_tx_queue(fp, cos) {
1308 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
1309 unsigned pkts_compl = 0, bytes_compl = 0;
1311 u16 sw_prod = txdata->tx_pkt_prod;
1312 u16 sw_cons = txdata->tx_pkt_cons;
1314 while (sw_cons != sw_prod) {
1315 bnx2x_free_tx_pkt(bp, txdata, TX_BD(sw_cons),
1316 &pkts_compl, &bytes_compl);
1320 netdev_tx_reset_queue(
1321 netdev_get_tx_queue(bp->dev,
1322 txdata->txq_index));
1326 static void bnx2x_free_tx_skbs_cnic(struct bnx2x *bp)
1330 for_each_tx_queue_cnic(bp, i) {
1331 bnx2x_free_tx_skbs_queue(&bp->fp[i]);
1335 static void bnx2x_free_tx_skbs(struct bnx2x *bp)
1339 for_each_eth_queue(bp, i) {
1340 bnx2x_free_tx_skbs_queue(&bp->fp[i]);
1344 static void bnx2x_free_rx_bds(struct bnx2x_fastpath *fp)
1346 struct bnx2x *bp = fp->bp;
1349 /* ring wasn't allocated */
1350 if (fp->rx_buf_ring == NULL)
1353 for (i = 0; i < NUM_RX_BD; i++) {
1354 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
1355 u8 *data = rx_buf->data;
1359 dma_unmap_single(&bp->pdev->dev,
1360 dma_unmap_addr(rx_buf, mapping),
1361 fp->rx_buf_size, DMA_FROM_DEVICE);
1363 rx_buf->data = NULL;
1364 bnx2x_frag_free(fp, data);
1368 static void bnx2x_free_rx_skbs_cnic(struct bnx2x *bp)
1372 for_each_rx_queue_cnic(bp, j) {
1373 bnx2x_free_rx_bds(&bp->fp[j]);
1377 static void bnx2x_free_rx_skbs(struct bnx2x *bp)
1381 for_each_eth_queue(bp, j) {
1382 struct bnx2x_fastpath *fp = &bp->fp[j];
1384 bnx2x_free_rx_bds(fp);
1386 if (!fp->disable_tpa)
1387 bnx2x_free_tpa_pool(bp, fp, MAX_AGG_QS(bp));
1391 void bnx2x_free_skbs_cnic(struct bnx2x *bp)
1393 bnx2x_free_tx_skbs_cnic(bp);
1394 bnx2x_free_rx_skbs_cnic(bp);
1397 void bnx2x_free_skbs(struct bnx2x *bp)
1399 bnx2x_free_tx_skbs(bp);
1400 bnx2x_free_rx_skbs(bp);
1403 void bnx2x_update_max_mf_config(struct bnx2x *bp, u32 value)
1405 /* load old values */
1406 u32 mf_cfg = bp->mf_config[BP_VN(bp)];
1408 if (value != bnx2x_extract_max_cfg(bp, mf_cfg)) {
1409 /* leave all but MAX value */
1410 mf_cfg &= ~FUNC_MF_CFG_MAX_BW_MASK;
1412 /* set new MAX value */
1413 mf_cfg |= (value << FUNC_MF_CFG_MAX_BW_SHIFT)
1414 & FUNC_MF_CFG_MAX_BW_MASK;
1416 bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW, mf_cfg);
1421 * bnx2x_free_msix_irqs - free previously requested MSI-X IRQ vectors
1423 * @bp: driver handle
1424 * @nvecs: number of vectors to be released
1426 static void bnx2x_free_msix_irqs(struct bnx2x *bp, int nvecs)
1430 if (nvecs == offset)
1432 free_irq(bp->msix_table[offset].vector, bp->dev);
1433 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
1434 bp->msix_table[offset].vector);
1437 if (CNIC_SUPPORT(bp)) {
1438 if (nvecs == offset)
1443 for_each_eth_queue(bp, i) {
1444 if (nvecs == offset)
1446 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq\n",
1447 i, bp->msix_table[offset].vector);
1449 free_irq(bp->msix_table[offset++].vector, &bp->fp[i]);
1453 void bnx2x_free_irq(struct bnx2x *bp)
1455 if (bp->flags & USING_MSIX_FLAG &&
1456 !(bp->flags & USING_SINGLE_MSIX_FLAG))
1457 bnx2x_free_msix_irqs(bp, BNX2X_NUM_ETH_QUEUES(bp) +
1458 CNIC_SUPPORT(bp) + 1);
1460 free_irq(bp->dev->irq, bp->dev);
1463 int bnx2x_enable_msix(struct bnx2x *bp)
1465 int msix_vec = 0, i, rc, req_cnt;
1467 bp->msix_table[msix_vec].entry = msix_vec;
1468 BNX2X_DEV_INFO("msix_table[0].entry = %d (slowpath)\n",
1469 bp->msix_table[0].entry);
1472 /* Cnic requires an msix vector for itself */
1473 if (CNIC_SUPPORT(bp)) {
1474 bp->msix_table[msix_vec].entry = msix_vec;
1475 BNX2X_DEV_INFO("msix_table[%d].entry = %d (CNIC)\n",
1476 msix_vec, bp->msix_table[msix_vec].entry);
1480 /* We need separate vectors for ETH queues only (not FCoE) */
1481 for_each_eth_queue(bp, i) {
1482 bp->msix_table[msix_vec].entry = msix_vec;
1483 BNX2X_DEV_INFO("msix_table[%d].entry = %d (fastpath #%u)\n",
1484 msix_vec, msix_vec, i);
1488 req_cnt = BNX2X_NUM_ETH_QUEUES(bp) + CNIC_SUPPORT(bp) + 1;
1490 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], req_cnt);
1493 * reconfigure number of tx/rx queues according to available
1496 if (rc >= BNX2X_MIN_MSIX_VEC_CNT(bp)) {
1497 /* how less vectors we will have? */
1498 int diff = req_cnt - rc;
1500 BNX2X_DEV_INFO("Trying to use less MSI-X vectors: %d\n", rc);
1502 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], rc);
1505 BNX2X_DEV_INFO("MSI-X is not attainable rc %d\n", rc);
1509 * decrease number of queues by number of unallocated entries
1511 bp->num_ethernet_queues -= diff;
1512 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
1514 BNX2X_DEV_INFO("New queue configuration set: %d\n",
1516 } else if (rc > 0) {
1517 /* Get by with single vector */
1518 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], 1);
1520 BNX2X_DEV_INFO("Single MSI-X is not attainable rc %d\n",
1525 BNX2X_DEV_INFO("Using single MSI-X vector\n");
1526 bp->flags |= USING_SINGLE_MSIX_FLAG;
1528 BNX2X_DEV_INFO("set number of queues to 1\n");
1529 bp->num_ethernet_queues = 1;
1530 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
1531 } else if (rc < 0) {
1532 BNX2X_DEV_INFO("MSI-X is not attainable rc %d\n", rc);
1536 bp->flags |= USING_MSIX_FLAG;
1541 /* fall to INTx if not enough memory */
1543 bp->flags |= DISABLE_MSI_FLAG;
1548 static int bnx2x_req_msix_irqs(struct bnx2x *bp)
1550 int i, rc, offset = 0;
1552 rc = request_irq(bp->msix_table[offset++].vector,
1553 bnx2x_msix_sp_int, 0,
1554 bp->dev->name, bp->dev);
1556 BNX2X_ERR("request sp irq failed\n");
1560 if (CNIC_SUPPORT(bp))
1563 for_each_eth_queue(bp, i) {
1564 struct bnx2x_fastpath *fp = &bp->fp[i];
1565 snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
1568 rc = request_irq(bp->msix_table[offset].vector,
1569 bnx2x_msix_fp_int, 0, fp->name, fp);
1571 BNX2X_ERR("request fp #%d irq (%d) failed rc %d\n", i,
1572 bp->msix_table[offset].vector, rc);
1573 bnx2x_free_msix_irqs(bp, offset);
1580 i = BNX2X_NUM_ETH_QUEUES(bp);
1581 offset = 1 + CNIC_SUPPORT(bp);
1582 netdev_info(bp->dev, "using MSI-X IRQs: sp %d fp[%d] %d ... fp[%d] %d\n",
1583 bp->msix_table[0].vector,
1584 0, bp->msix_table[offset].vector,
1585 i - 1, bp->msix_table[offset + i - 1].vector);
1590 int bnx2x_enable_msi(struct bnx2x *bp)
1594 rc = pci_enable_msi(bp->pdev);
1596 BNX2X_DEV_INFO("MSI is not attainable\n");
1599 bp->flags |= USING_MSI_FLAG;
1604 static int bnx2x_req_irq(struct bnx2x *bp)
1606 unsigned long flags;
1609 if (bp->flags & (USING_MSI_FLAG | USING_MSIX_FLAG))
1612 flags = IRQF_SHARED;
1614 if (bp->flags & USING_MSIX_FLAG)
1615 irq = bp->msix_table[0].vector;
1617 irq = bp->pdev->irq;
1619 return request_irq(irq, bnx2x_interrupt, flags, bp->dev->name, bp->dev);
1622 static int bnx2x_setup_irqs(struct bnx2x *bp)
1625 if (bp->flags & USING_MSIX_FLAG &&
1626 !(bp->flags & USING_SINGLE_MSIX_FLAG)) {
1627 rc = bnx2x_req_msix_irqs(bp);
1632 rc = bnx2x_req_irq(bp);
1634 BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc);
1637 if (bp->flags & USING_MSI_FLAG) {
1638 bp->dev->irq = bp->pdev->irq;
1639 netdev_info(bp->dev, "using MSI IRQ %d\n",
1642 if (bp->flags & USING_MSIX_FLAG) {
1643 bp->dev->irq = bp->msix_table[0].vector;
1644 netdev_info(bp->dev, "using MSIX IRQ %d\n",
1652 static void bnx2x_napi_enable_cnic(struct bnx2x *bp)
1656 for_each_rx_queue_cnic(bp, i)
1657 napi_enable(&bnx2x_fp(bp, i, napi));
1660 static void bnx2x_napi_enable(struct bnx2x *bp)
1664 for_each_eth_queue(bp, i)
1665 napi_enable(&bnx2x_fp(bp, i, napi));
1668 static void bnx2x_napi_disable_cnic(struct bnx2x *bp)
1672 for_each_rx_queue_cnic(bp, i)
1673 napi_disable(&bnx2x_fp(bp, i, napi));
1676 static void bnx2x_napi_disable(struct bnx2x *bp)
1680 for_each_eth_queue(bp, i)
1681 napi_disable(&bnx2x_fp(bp, i, napi));
1684 void bnx2x_netif_start(struct bnx2x *bp)
1686 if (netif_running(bp->dev)) {
1687 bnx2x_napi_enable(bp);
1688 if (CNIC_LOADED(bp))
1689 bnx2x_napi_enable_cnic(bp);
1690 bnx2x_int_enable(bp);
1691 if (bp->state == BNX2X_STATE_OPEN)
1692 netif_tx_wake_all_queues(bp->dev);
1696 void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
1698 bnx2x_int_disable_sync(bp, disable_hw);
1699 bnx2x_napi_disable(bp);
1700 if (CNIC_LOADED(bp))
1701 bnx2x_napi_disable_cnic(bp);
1704 u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb)
1706 struct bnx2x *bp = netdev_priv(dev);
1708 if (CNIC_LOADED(bp) && !NO_FCOE(bp)) {
1709 struct ethhdr *hdr = (struct ethhdr *)skb->data;
1710 u16 ether_type = ntohs(hdr->h_proto);
1712 /* Skip VLAN tag if present */
1713 if (ether_type == ETH_P_8021Q) {
1714 struct vlan_ethhdr *vhdr =
1715 (struct vlan_ethhdr *)skb->data;
1717 ether_type = ntohs(vhdr->h_vlan_encapsulated_proto);
1720 /* If ethertype is FCoE or FIP - use FCoE ring */
1721 if ((ether_type == ETH_P_FCOE) || (ether_type == ETH_P_FIP))
1722 return bnx2x_fcoe_tx(bp, txq_index);
1725 /* select a non-FCoE queue */
1726 return __skb_tx_hash(dev, skb, BNX2X_NUM_ETH_QUEUES(bp));
1730 void bnx2x_set_num_queues(struct bnx2x *bp)
1733 bp->num_ethernet_queues = bnx2x_calc_num_queues(bp);
1735 /* override in STORAGE SD modes */
1736 if (IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp))
1737 bp->num_ethernet_queues = 1;
1739 /* Add special queues */
1740 bp->num_cnic_queues = CNIC_SUPPORT(bp); /* For FCOE */
1741 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
1743 BNX2X_DEV_INFO("set number of queues to %d\n", bp->num_queues);
1747 * bnx2x_set_real_num_queues - configure netdev->real_num_[tx,rx]_queues
1749 * @bp: Driver handle
1751 * We currently support for at most 16 Tx queues for each CoS thus we will
1752 * allocate a multiple of 16 for ETH L2 rings according to the value of the
1755 * If there is an FCoE L2 queue the appropriate Tx queue will have the next
1756 * index after all ETH L2 indices.
1758 * If the actual number of Tx queues (for each CoS) is less than 16 then there
1759 * will be the holes at the end of each group of 16 ETh L2 indices (0..15,
1760 * 16..31,...) with indicies that are not coupled with any real Tx queue.
1762 * The proper configuration of skb->queue_mapping is handled by
1763 * bnx2x_select_queue() and __skb_tx_hash().
1765 * bnx2x_setup_tc() takes care of the proper TC mappings so that __skb_tx_hash()
1766 * will return a proper Tx index if TC is enabled (netdev->num_tc > 0).
1768 static int bnx2x_set_real_num_queues(struct bnx2x *bp, int include_cnic)
1772 tx = BNX2X_NUM_ETH_QUEUES(bp) * bp->max_cos;
1773 rx = BNX2X_NUM_ETH_QUEUES(bp);
1775 /* account for fcoe queue */
1776 if (include_cnic && !NO_FCOE(bp)) {
1781 rc = netif_set_real_num_tx_queues(bp->dev, tx);
1783 BNX2X_ERR("Failed to set real number of Tx queues: %d\n", rc);
1786 rc = netif_set_real_num_rx_queues(bp->dev, rx);
1788 BNX2X_ERR("Failed to set real number of Rx queues: %d\n", rc);
1792 DP(NETIF_MSG_IFUP, "Setting real num queues to (tx, rx) (%d, %d)\n",
1798 static void bnx2x_set_rx_buf_size(struct bnx2x *bp)
1802 for_each_queue(bp, i) {
1803 struct bnx2x_fastpath *fp = &bp->fp[i];
1806 /* Always use a mini-jumbo MTU for the FCoE L2 ring */
1809 * Although there are no IP frames expected to arrive to
1810 * this ring we still want to add an
1811 * IP_HEADER_ALIGNMENT_PADDING to prevent a buffer
1814 mtu = BNX2X_FCOE_MINI_JUMBO_MTU;
1817 fp->rx_buf_size = BNX2X_FW_RX_ALIGN_START +
1818 IP_HEADER_ALIGNMENT_PADDING +
1821 BNX2X_FW_RX_ALIGN_END;
1822 /* Note : rx_buf_size doesnt take into account NET_SKB_PAD */
1823 if (fp->rx_buf_size + NET_SKB_PAD <= PAGE_SIZE)
1824 fp->rx_frag_size = fp->rx_buf_size + NET_SKB_PAD;
1826 fp->rx_frag_size = 0;
1830 static int bnx2x_init_rss_pf(struct bnx2x *bp)
1833 u8 num_eth_queues = BNX2X_NUM_ETH_QUEUES(bp);
1835 /* Prepare the initial contents fo the indirection table if RSS is
1838 for (i = 0; i < sizeof(bp->rss_conf_obj.ind_table); i++)
1839 bp->rss_conf_obj.ind_table[i] =
1841 ethtool_rxfh_indir_default(i, num_eth_queues);
1844 * For 57710 and 57711 SEARCHER configuration (rss_keys) is
1845 * per-port, so if explicit configuration is needed , do it only
1848 * For 57712 and newer on the other hand it's a per-function
1851 return bnx2x_config_rss_eth(bp, bp->port.pmf || !CHIP_IS_E1x(bp));
1854 int bnx2x_config_rss_pf(struct bnx2x *bp, struct bnx2x_rss_config_obj *rss_obj,
1857 struct bnx2x_config_rss_params params = {NULL};
1859 /* Although RSS is meaningless when there is a single HW queue we
1860 * still need it enabled in order to have HW Rx hash generated.
1862 * if (!is_eth_multi(bp))
1863 * bp->multi_mode = ETH_RSS_MODE_DISABLED;
1866 params.rss_obj = rss_obj;
1868 __set_bit(RAMROD_COMP_WAIT, ¶ms.ramrod_flags);
1870 __set_bit(BNX2X_RSS_MODE_REGULAR, ¶ms.rss_flags);
1872 /* RSS configuration */
1873 __set_bit(BNX2X_RSS_IPV4, ¶ms.rss_flags);
1874 __set_bit(BNX2X_RSS_IPV4_TCP, ¶ms.rss_flags);
1875 __set_bit(BNX2X_RSS_IPV6, ¶ms.rss_flags);
1876 __set_bit(BNX2X_RSS_IPV6_TCP, ¶ms.rss_flags);
1877 if (rss_obj->udp_rss_v4)
1878 __set_bit(BNX2X_RSS_IPV4_UDP, ¶ms.rss_flags);
1879 if (rss_obj->udp_rss_v6)
1880 __set_bit(BNX2X_RSS_IPV6_UDP, ¶ms.rss_flags);
1883 params.rss_result_mask = MULTI_MASK;
1885 memcpy(params.ind_table, rss_obj->ind_table, sizeof(params.ind_table));
1889 prandom_bytes(params.rss_key, sizeof(params.rss_key));
1890 __set_bit(BNX2X_RSS_SET_SRCH, ¶ms.rss_flags);
1893 return bnx2x_config_rss(bp, ¶ms);
1896 static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
1898 struct bnx2x_func_state_params func_params = {NULL};
1900 /* Prepare parameters for function state transitions */
1901 __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
1903 func_params.f_obj = &bp->func_obj;
1904 func_params.cmd = BNX2X_F_CMD_HW_INIT;
1906 func_params.params.hw_init.load_phase = load_code;
1908 return bnx2x_func_state_change(bp, &func_params);
1912 * Cleans the object that have internal lists without sending
1913 * ramrods. Should be run when interrutps are disabled.
1915 static void bnx2x_squeeze_objects(struct bnx2x *bp)
1918 unsigned long ramrod_flags = 0, vlan_mac_flags = 0;
1919 struct bnx2x_mcast_ramrod_params rparam = {NULL};
1920 struct bnx2x_vlan_mac_obj *mac_obj = &bp->sp_objs->mac_obj;
1922 /***************** Cleanup MACs' object first *************************/
1924 /* Wait for completion of requested */
1925 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
1926 /* Perform a dry cleanup */
1927 __set_bit(RAMROD_DRV_CLR_ONLY, &ramrod_flags);
1929 /* Clean ETH primary MAC */
1930 __set_bit(BNX2X_ETH_MAC, &vlan_mac_flags);
1931 rc = mac_obj->delete_all(bp, &bp->sp_objs->mac_obj, &vlan_mac_flags,
1934 BNX2X_ERR("Failed to clean ETH MACs: %d\n", rc);
1936 /* Cleanup UC list */
1938 __set_bit(BNX2X_UC_LIST_MAC, &vlan_mac_flags);
1939 rc = mac_obj->delete_all(bp, mac_obj, &vlan_mac_flags,
1942 BNX2X_ERR("Failed to clean UC list MACs: %d\n", rc);
1944 /***************** Now clean mcast object *****************************/
1945 rparam.mcast_obj = &bp->mcast_obj;
1946 __set_bit(RAMROD_DRV_CLR_ONLY, &rparam.ramrod_flags);
1948 /* Add a DEL command... */
1949 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL);
1951 BNX2X_ERR("Failed to add a new DEL command to a multi-cast object: %d\n",
1954 /* ...and wait until all pending commands are cleared */
1955 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
1958 BNX2X_ERR("Failed to clean multi-cast object: %d\n",
1963 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
1967 #ifndef BNX2X_STOP_ON_ERROR
1968 #define LOAD_ERROR_EXIT(bp, label) \
1970 (bp)->state = BNX2X_STATE_ERROR; \
1974 #define LOAD_ERROR_EXIT_CNIC(bp, label) \
1976 bp->cnic_loaded = false; \
1979 #else /*BNX2X_STOP_ON_ERROR*/
1980 #define LOAD_ERROR_EXIT(bp, label) \
1982 (bp)->state = BNX2X_STATE_ERROR; \
1986 #define LOAD_ERROR_EXIT_CNIC(bp, label) \
1988 bp->cnic_loaded = false; \
1992 #endif /*BNX2X_STOP_ON_ERROR*/
1994 bool bnx2x_test_firmware_version(struct bnx2x *bp, bool is_err)
1996 /* build FW version dword */
1997 u32 my_fw = (BCM_5710_FW_MAJOR_VERSION) +
1998 (BCM_5710_FW_MINOR_VERSION << 8) +
1999 (BCM_5710_FW_REVISION_VERSION << 16) +
2000 (BCM_5710_FW_ENGINEERING_VERSION << 24);
2002 /* read loaded FW from chip */
2003 u32 loaded_fw = REG_RD(bp, XSEM_REG_PRAM);
2005 DP(NETIF_MSG_IFUP, "loaded fw %x, my fw %x\n", loaded_fw, my_fw);
2007 if (loaded_fw != my_fw) {
2009 BNX2X_ERR("bnx2x with FW %x was already loaded, which mismatches my %x FW. aborting\n",
2018 * bnx2x_bz_fp - zero content of the fastpath structure.
2020 * @bp: driver handle
2021 * @index: fastpath index to be zeroed
2023 * Makes sure the contents of the bp->fp[index].napi is kept
2026 static void bnx2x_bz_fp(struct bnx2x *bp, int index)
2028 struct bnx2x_fastpath *fp = &bp->fp[index];
2029 struct bnx2x_fp_stats *fp_stats = &bp->fp_stats[index];
2032 struct napi_struct orig_napi = fp->napi;
2033 struct bnx2x_agg_info *orig_tpa_info = fp->tpa_info;
2034 /* bzero bnx2x_fastpath contents */
2035 if (bp->stats_init) {
2036 memset(fp->tpa_info, 0, sizeof(*fp->tpa_info));
2037 memset(fp, 0, sizeof(*fp));
2039 /* Keep Queue statistics */
2040 struct bnx2x_eth_q_stats *tmp_eth_q_stats;
2041 struct bnx2x_eth_q_stats_old *tmp_eth_q_stats_old;
2043 tmp_eth_q_stats = kzalloc(sizeof(struct bnx2x_eth_q_stats),
2045 if (tmp_eth_q_stats)
2046 memcpy(tmp_eth_q_stats, &fp_stats->eth_q_stats,
2047 sizeof(struct bnx2x_eth_q_stats));
2049 tmp_eth_q_stats_old =
2050 kzalloc(sizeof(struct bnx2x_eth_q_stats_old),
2052 if (tmp_eth_q_stats_old)
2053 memcpy(tmp_eth_q_stats_old, &fp_stats->eth_q_stats_old,
2054 sizeof(struct bnx2x_eth_q_stats_old));
2056 memset(fp->tpa_info, 0, sizeof(*fp->tpa_info));
2057 memset(fp, 0, sizeof(*fp));
2059 if (tmp_eth_q_stats) {
2060 memcpy(&fp_stats->eth_q_stats, tmp_eth_q_stats,
2061 sizeof(struct bnx2x_eth_q_stats));
2062 kfree(tmp_eth_q_stats);
2065 if (tmp_eth_q_stats_old) {
2066 memcpy(&fp_stats->eth_q_stats_old, tmp_eth_q_stats_old,
2067 sizeof(struct bnx2x_eth_q_stats_old));
2068 kfree(tmp_eth_q_stats_old);
2073 /* Restore the NAPI object as it has been already initialized */
2074 fp->napi = orig_napi;
2075 fp->tpa_info = orig_tpa_info;
2079 fp->max_cos = bp->max_cos;
2081 /* Special queues support only one CoS */
2084 /* Init txdata pointers */
2086 fp->txdata_ptr[0] = &bp->bnx2x_txq[FCOE_TXQ_IDX(bp)];
2088 for_each_cos_in_tx_queue(fp, cos)
2089 fp->txdata_ptr[cos] = &bp->bnx2x_txq[cos *
2090 BNX2X_NUM_ETH_QUEUES(bp) + index];
2093 * set the tpa flag for each queue. The tpa flag determines the queue
2094 * minimal size so it must be set prior to queue memory allocation
2096 fp->disable_tpa = !(bp->flags & TPA_ENABLE_FLAG ||
2097 (bp->flags & GRO_ENABLE_FLAG &&
2098 bnx2x_mtu_allows_gro(bp->dev->mtu)));
2099 if (bp->flags & TPA_ENABLE_FLAG)
2100 fp->mode = TPA_MODE_LRO;
2101 else if (bp->flags & GRO_ENABLE_FLAG)
2102 fp->mode = TPA_MODE_GRO;
2104 /* We don't want TPA on an FCoE L2 ring */
2106 fp->disable_tpa = 1;
2109 int bnx2x_load_cnic(struct bnx2x *bp)
2111 int i, rc, port = BP_PORT(bp);
2113 DP(NETIF_MSG_IFUP, "Starting CNIC-related load\n");
2115 mutex_init(&bp->cnic_mutex);
2117 rc = bnx2x_alloc_mem_cnic(bp);
2119 BNX2X_ERR("Unable to allocate bp memory for cnic\n");
2120 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
2123 rc = bnx2x_alloc_fp_mem_cnic(bp);
2125 BNX2X_ERR("Unable to allocate memory for cnic fps\n");
2126 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
2129 /* Update the number of queues with the cnic queues */
2130 rc = bnx2x_set_real_num_queues(bp, 1);
2132 BNX2X_ERR("Unable to set real_num_queues including cnic\n");
2133 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
2136 /* Add all CNIC NAPI objects */
2137 bnx2x_add_all_napi_cnic(bp);
2138 DP(NETIF_MSG_IFUP, "cnic napi added\n");
2139 bnx2x_napi_enable_cnic(bp);
2141 rc = bnx2x_init_hw_func_cnic(bp);
2143 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic1);
2145 bnx2x_nic_init_cnic(bp);
2147 /* Enable Timer scan */
2148 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 1);
2150 for_each_cnic_queue(bp, i) {
2151 rc = bnx2x_setup_queue(bp, &bp->fp[i], 0);
2153 BNX2X_ERR("Queue setup failed\n");
2154 LOAD_ERROR_EXIT(bp, load_error_cnic2);
2158 /* Initialize Rx filter. */
2159 netif_addr_lock_bh(bp->dev);
2160 bnx2x_set_rx_mode(bp->dev);
2161 netif_addr_unlock_bh(bp->dev);
2163 /* re-read iscsi info */
2164 bnx2x_get_iscsi_info(bp);
2165 bnx2x_setup_cnic_irq_info(bp);
2166 bnx2x_setup_cnic_info(bp);
2167 bp->cnic_loaded = true;
2168 if (bp->state == BNX2X_STATE_OPEN)
2169 bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
2172 DP(NETIF_MSG_IFUP, "Ending successfully CNIC-related load\n");
2176 #ifndef BNX2X_STOP_ON_ERROR
2178 /* Disable Timer scan */
2179 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
2182 bnx2x_napi_disable_cnic(bp);
2183 /* Update the number of queues without the cnic queues */
2184 rc = bnx2x_set_real_num_queues(bp, 0);
2186 BNX2X_ERR("Unable to set real_num_queues not including cnic\n");
2188 BNX2X_ERR("CNIC-related load failed\n");
2189 bnx2x_free_fp_mem_cnic(bp);
2190 bnx2x_free_mem_cnic(bp);
2192 #endif /* ! BNX2X_STOP_ON_ERROR */
2196 /* must be called with rtnl_lock */
2197 int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
2199 int port = BP_PORT(bp);
2203 DP(NETIF_MSG_IFUP, "Starting NIC load\n");
2205 "CNIC is %s\n", CNIC_ENABLED(bp) ? "enabled" : "disabled");
2207 #ifdef BNX2X_STOP_ON_ERROR
2208 if (unlikely(bp->panic)) {
2209 BNX2X_ERR("Can't load NIC when there is panic\n");
2214 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
2216 /* Set the initial link reported state to link down */
2217 bnx2x_acquire_phy_lock(bp);
2218 memset(&bp->last_reported_link, 0, sizeof(bp->last_reported_link));
2219 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
2220 &bp->last_reported_link.link_report_flags);
2221 bnx2x_release_phy_lock(bp);
2223 /* must be called before memory allocation and HW init */
2224 bnx2x_ilt_set_info(bp);
2227 * Zero fastpath structures preserving invariants like napi, which are
2228 * allocated only once, fp index, max_cos, bp pointer.
2229 * Also set fp->disable_tpa and txdata_ptr.
2231 DP(NETIF_MSG_IFUP, "num queues: %d", bp->num_queues);
2232 for_each_queue(bp, i)
2234 memset(bp->bnx2x_txq, 0, (BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS +
2235 bp->num_cnic_queues) *
2236 sizeof(struct bnx2x_fp_txdata));
2238 bp->fcoe_init = false;
2240 /* Set the receive queues buffer size */
2241 bnx2x_set_rx_buf_size(bp);
2243 if (bnx2x_alloc_mem(bp))
2246 /* As long as bnx2x_alloc_mem() may possibly update
2247 * bp->num_queues, bnx2x_set_real_num_queues() should always
2248 * come after it. At this stage cnic queues are not counted.
2250 rc = bnx2x_set_real_num_queues(bp, 0);
2252 BNX2X_ERR("Unable to set real_num_queues\n");
2253 LOAD_ERROR_EXIT(bp, load_error0);
2256 /* configure multi cos mappings in kernel.
2257 * this configuration may be overriden by a multi class queue discipline
2258 * or by a dcbx negotiation result.
2260 bnx2x_setup_tc(bp->dev, bp->max_cos);
2262 /* Add all NAPI objects */
2263 bnx2x_add_all_napi(bp);
2264 DP(NETIF_MSG_IFUP, "napi added\n");
2265 bnx2x_napi_enable(bp);
2267 /* set pf load just before approaching the MCP */
2268 bnx2x_set_pf_load(bp);
2270 /* Send LOAD_REQUEST command to MCP
2271 * Returns the type of LOAD command:
2272 * if it is the first port to be initialized
2273 * common blocks should be initialized, otherwise - not
2275 if (!BP_NOMCP(bp)) {
2278 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
2279 DRV_MSG_SEQ_NUMBER_MASK);
2280 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
2282 /* Get current FW pulse sequence */
2283 bp->fw_drv_pulse_wr_seq =
2284 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_pulse_mb) &
2285 DRV_PULSE_SEQ_MASK);
2286 BNX2X_DEV_INFO("drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
2288 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ,
2289 DRV_MSG_CODE_LOAD_REQ_WITH_LFA);
2291 BNX2X_ERR("MCP response failure, aborting\n");
2293 LOAD_ERROR_EXIT(bp, load_error1);
2295 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) {
2296 BNX2X_ERR("Driver load refused\n");
2297 rc = -EBUSY; /* other port in diagnostic mode */
2298 LOAD_ERROR_EXIT(bp, load_error1);
2300 if (load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP &&
2301 load_code != FW_MSG_CODE_DRV_LOAD_COMMON) {
2302 /* abort nic load if version mismatch */
2303 if (!bnx2x_test_firmware_version(bp, true)) {
2305 LOAD_ERROR_EXIT(bp, load_error2);
2310 int path = BP_PATH(bp);
2312 DP(NETIF_MSG_IFUP, "NO MCP - load counts[%d] %d, %d, %d\n",
2313 path, load_count[path][0], load_count[path][1],
2314 load_count[path][2]);
2315 load_count[path][0]++;
2316 load_count[path][1 + port]++;
2317 DP(NETIF_MSG_IFUP, "NO MCP - new load counts[%d] %d, %d, %d\n",
2318 path, load_count[path][0], load_count[path][1],
2319 load_count[path][2]);
2320 if (load_count[path][0] == 1)
2321 load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
2322 else if (load_count[path][1 + port] == 1)
2323 load_code = FW_MSG_CODE_DRV_LOAD_PORT;
2325 load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
2328 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
2329 (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) ||
2330 (load_code == FW_MSG_CODE_DRV_LOAD_PORT)) {
2333 * We need the barrier to ensure the ordering between the
2334 * writing to bp->port.pmf here and reading it from the
2335 * bnx2x_periodic_task().
2341 DP(NETIF_MSG_IFUP, "pmf %d\n", bp->port.pmf);
2343 /* Init Function state controlling object */
2344 bnx2x__init_func_obj(bp);
2347 rc = bnx2x_init_hw(bp, load_code);
2349 BNX2X_ERR("HW init failed, aborting\n");
2350 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2351 LOAD_ERROR_EXIT(bp, load_error2);
2354 /* Connect to IRQs */
2355 rc = bnx2x_setup_irqs(bp);
2357 BNX2X_ERR("IRQs setup failed\n");
2358 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2359 LOAD_ERROR_EXIT(bp, load_error2);
2362 /* Setup NIC internals and enable interrupts */
2363 bnx2x_nic_init(bp, load_code);
2365 /* Init per-function objects */
2366 bnx2x_init_bp_objs(bp);
2368 if (((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
2369 (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP)) &&
2370 (bp->common.shmem2_base)) {
2371 if (SHMEM2_HAS(bp, dcc_support))
2372 SHMEM2_WR(bp, dcc_support,
2373 (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
2374 SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
2375 if (SHMEM2_HAS(bp, afex_driver_support))
2376 SHMEM2_WR(bp, afex_driver_support,
2377 SHMEM_AFEX_SUPPORTED_VERSION_ONE);
2380 /* Set AFEX default VLAN tag to an invalid value */
2381 bp->afex_def_vlan_tag = -1;
2383 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
2384 rc = bnx2x_func_start(bp);
2386 BNX2X_ERR("Function start failed!\n");
2387 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2388 LOAD_ERROR_EXIT(bp, load_error3);
2391 /* Send LOAD_DONE command to MCP */
2392 if (!BP_NOMCP(bp)) {
2393 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2395 BNX2X_ERR("MCP response failure, aborting\n");
2397 LOAD_ERROR_EXIT(bp, load_error3);
2401 rc = bnx2x_setup_leading(bp);
2403 BNX2X_ERR("Setup leading failed!\n");
2404 LOAD_ERROR_EXIT(bp, load_error3);
2407 for_each_nondefault_eth_queue(bp, i) {
2408 rc = bnx2x_setup_queue(bp, &bp->fp[i], 0);
2410 BNX2X_ERR("Queue setup failed\n");
2411 LOAD_ERROR_EXIT(bp, load_error3);
2415 rc = bnx2x_init_rss_pf(bp);
2417 BNX2X_ERR("PF RSS init failed\n");
2418 LOAD_ERROR_EXIT(bp, load_error3);
2421 /* Now when Clients are configured we are ready to work */
2422 bp->state = BNX2X_STATE_OPEN;
2424 /* Configure a ucast MAC */
2425 rc = bnx2x_set_eth_mac(bp, true);
2427 BNX2X_ERR("Setting Ethernet MAC failed\n");
2428 LOAD_ERROR_EXIT(bp, load_error3);
2431 if (bp->pending_max) {
2432 bnx2x_update_max_mf_config(bp, bp->pending_max);
2433 bp->pending_max = 0;
2437 bnx2x_initial_phy_init(bp, load_mode);
2438 bp->link_params.feature_config_flags &= ~FEATURE_CONFIG_BOOT_FROM_SAN;
2440 /* Start fast path */
2442 /* Initialize Rx filter. */
2443 netif_addr_lock_bh(bp->dev);
2444 bnx2x_set_rx_mode(bp->dev);
2445 netif_addr_unlock_bh(bp->dev);
2448 switch (load_mode) {
2450 /* Tx queue should be only reenabled */
2451 netif_tx_wake_all_queues(bp->dev);
2455 netif_tx_start_all_queues(bp->dev);
2456 smp_mb__after_clear_bit();
2460 case LOAD_LOOPBACK_EXT:
2461 bp->state = BNX2X_STATE_DIAG;
2469 bnx2x_update_drv_flags(bp, 1 << DRV_FLAGS_PORT_MASK, 0);
2471 bnx2x__link_status_update(bp);
2473 /* start the timer */
2474 mod_timer(&bp->timer, jiffies + bp->current_interval);
2476 if (CNIC_ENABLED(bp))
2477 bnx2x_load_cnic(bp);
2479 /* mark driver is loaded in shmem2 */
2480 if (SHMEM2_HAS(bp, drv_capabilities_flag)) {
2482 val = SHMEM2_RD(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)]);
2483 SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)],
2484 val | DRV_FLAGS_CAPABILITIES_LOADED_SUPPORTED |
2485 DRV_FLAGS_CAPABILITIES_LOADED_L2);
2488 /* Wait for all pending SP commands to complete */
2489 if (!bnx2x_wait_sp_comp(bp, ~0x0UL)) {
2490 BNX2X_ERR("Timeout waiting for SP elements to complete\n");
2491 bnx2x_nic_unload(bp, UNLOAD_CLOSE, false);
2495 /* If PMF - send ADMIN DCBX msg to MFW to initiate DCBX FSM */
2496 if (bp->port.pmf && (bp->state != BNX2X_STATE_DIAG))
2497 bnx2x_dcbx_init(bp, false);
2499 DP(NETIF_MSG_IFUP, "Ending successfully NIC load\n");
2503 #ifndef BNX2X_STOP_ON_ERROR
2505 bnx2x_int_disable_sync(bp, 1);
2507 /* Clean queueable objects */
2508 bnx2x_squeeze_objects(bp);
2510 /* Free SKBs, SGEs, TPA pool and driver internals */
2511 bnx2x_free_skbs(bp);
2512 for_each_rx_queue(bp, i)
2513 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
2518 if (!BP_NOMCP(bp)) {
2519 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0);
2520 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
2525 bnx2x_napi_disable(bp);
2526 /* clear pf_load status, as it was already set */
2527 bnx2x_clear_pf_load(bp);
2532 #endif /* ! BNX2X_STOP_ON_ERROR */
2535 /* must be called with rtnl_lock */
2536 int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link)
2539 bool global = false;
2541 DP(NETIF_MSG_IFUP, "Starting NIC unload\n");
2543 /* mark driver is unloaded in shmem2 */
2544 if (SHMEM2_HAS(bp, drv_capabilities_flag)) {
2546 val = SHMEM2_RD(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)]);
2547 SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)],
2548 val & ~DRV_FLAGS_CAPABILITIES_LOADED_L2);
2551 if ((bp->state == BNX2X_STATE_CLOSED) ||
2552 (bp->state == BNX2X_STATE_ERROR)) {
2553 /* We can get here if the driver has been unloaded
2554 * during parity error recovery and is either waiting for a
2555 * leader to complete or for other functions to unload and
2556 * then ifdown has been issued. In this case we want to
2557 * unload and let other functions to complete a recovery
2560 bp->recovery_state = BNX2X_RECOVERY_DONE;
2562 bnx2x_release_leader_lock(bp);
2565 DP(NETIF_MSG_IFDOWN, "Releasing a leadership...\n");
2566 BNX2X_ERR("Can't unload in closed or error state\n");
2571 * It's important to set the bp->state to the value different from
2572 * BNX2X_STATE_OPEN and only then stop the Tx. Otherwise bnx2x_tx_int()
2573 * may restart the Tx from the NAPI context (see bnx2x_tx_int()).
2575 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
2578 if (CNIC_LOADED(bp))
2579 bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
2582 bnx2x_tx_disable(bp);
2583 netdev_reset_tc(bp->dev);
2585 bp->rx_mode = BNX2X_RX_MODE_NONE;
2587 del_timer_sync(&bp->timer);
2589 /* Set ALWAYS_ALIVE bit in shmem */
2590 bp->fw_drv_pulse_wr_seq |= DRV_PULSE_ALWAYS_ALIVE;
2592 bnx2x_drv_pulse(bp);
2594 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2595 bnx2x_save_statistics(bp);
2597 /* Cleanup the chip if needed */
2598 if (unload_mode != UNLOAD_RECOVERY)
2599 bnx2x_chip_cleanup(bp, unload_mode, keep_link);
2601 /* Send the UNLOAD_REQUEST to the MCP */
2602 bnx2x_send_unload_req(bp, unload_mode);
2605 * Prevent transactions to host from the functions on the
2606 * engine that doesn't reset global blocks in case of global
2607 * attention once gloabl blocks are reset and gates are opened
2608 * (the engine which leader will perform the recovery
2611 if (!CHIP_IS_E1x(bp))
2612 bnx2x_pf_disable(bp);
2614 /* Disable HW interrupts, NAPI */
2615 bnx2x_netif_stop(bp, 1);
2616 /* Delete all NAPI objects */
2617 bnx2x_del_all_napi(bp);
2618 if (CNIC_LOADED(bp))
2619 bnx2x_del_all_napi_cnic(bp);
2623 /* Report UNLOAD_DONE to MCP */
2624 bnx2x_send_unload_done(bp, false);
2628 * At this stage no more interrupts will arrive so we may safly clean
2629 * the queueable objects here in case they failed to get cleaned so far.
2631 bnx2x_squeeze_objects(bp);
2633 /* There should be no more pending SP commands at this stage */
2638 /* Free SKBs, SGEs, TPA pool and driver internals */
2639 bnx2x_free_skbs(bp);
2640 if (CNIC_LOADED(bp))
2641 bnx2x_free_skbs_cnic(bp);
2642 for_each_rx_queue(bp, i)
2643 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
2645 if (CNIC_LOADED(bp)) {
2646 bnx2x_free_fp_mem_cnic(bp);
2647 bnx2x_free_mem_cnic(bp);
2651 bp->state = BNX2X_STATE_CLOSED;
2652 bp->cnic_loaded = false;
2654 /* Check if there are pending parity attentions. If there are - set
2655 * RECOVERY_IN_PROGRESS.
2657 if (bnx2x_chk_parity_attn(bp, &global, false)) {
2658 bnx2x_set_reset_in_progress(bp);
2660 /* Set RESET_IS_GLOBAL if needed */
2662 bnx2x_set_reset_global(bp);
2666 /* The last driver must disable a "close the gate" if there is no
2667 * parity attention or "process kill" pending.
2669 if (!bnx2x_clear_pf_load(bp) && bnx2x_reset_is_done(bp, BP_PATH(bp)))
2670 bnx2x_disable_close_the_gate(bp);
2672 DP(NETIF_MSG_IFUP, "Ending NIC unload\n");
2677 int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
2681 /* If there is no power capability, silently succeed */
2683 BNX2X_DEV_INFO("No power capability. Breaking.\n");
2687 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
2691 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
2692 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
2693 PCI_PM_CTRL_PME_STATUS));
2695 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
2696 /* delay required during transition out of D3hot */
2701 /* If there are other clients above don't
2702 shut down the power */
2703 if (atomic_read(&bp->pdev->enable_cnt) != 1)
2705 /* Don't shut down the power for emulation and FPGA */
2706 if (CHIP_REV_IS_SLOW(bp))
2709 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
2713 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
2715 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
2718 /* No more memory access after this point until
2719 * device is brought back to D0.
2724 dev_err(&bp->pdev->dev, "Can't support state = %d\n", state);
2731 * net_device service functions
2733 int bnx2x_poll(struct napi_struct *napi, int budget)
2737 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
2739 struct bnx2x *bp = fp->bp;
2742 #ifdef BNX2X_STOP_ON_ERROR
2743 if (unlikely(bp->panic)) {
2744 napi_complete(napi);
2749 for_each_cos_in_tx_queue(fp, cos)
2750 if (bnx2x_tx_queue_has_work(fp->txdata_ptr[cos]))
2751 bnx2x_tx_int(bp, fp->txdata_ptr[cos]);
2754 if (bnx2x_has_rx_work(fp)) {
2755 work_done += bnx2x_rx_int(fp, budget - work_done);
2757 /* must not complete if we consumed full budget */
2758 if (work_done >= budget)
2762 /* Fall out from the NAPI loop if needed */
2763 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
2765 /* No need to update SB for FCoE L2 ring as long as
2766 * it's connected to the default SB and the SB
2767 * has been updated when NAPI was scheduled.
2769 if (IS_FCOE_FP(fp)) {
2770 napi_complete(napi);
2773 bnx2x_update_fpsb_idx(fp);
2774 /* bnx2x_has_rx_work() reads the status block,
2775 * thus we need to ensure that status block indices
2776 * have been actually read (bnx2x_update_fpsb_idx)
2777 * prior to this check (bnx2x_has_rx_work) so that
2778 * we won't write the "newer" value of the status block
2779 * to IGU (if there was a DMA right after
2780 * bnx2x_has_rx_work and if there is no rmb, the memory
2781 * reading (bnx2x_update_fpsb_idx) may be postponed
2782 * to right before bnx2x_ack_sb). In this case there
2783 * will never be another interrupt until there is
2784 * another update of the status block, while there
2785 * is still unhandled work.
2789 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
2790 napi_complete(napi);
2791 /* Re-enable interrupts */
2792 DP(NETIF_MSG_RX_STATUS,
2793 "Update index to %d\n", fp->fp_hc_idx);
2794 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID,
2795 le16_to_cpu(fp->fp_hc_idx),
2805 /* we split the first BD into headers and data BDs
2806 * to ease the pain of our fellow microcode engineers
2807 * we use one mapping for both BDs
2809 static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
2810 struct bnx2x_fp_txdata *txdata,
2811 struct sw_tx_bd *tx_buf,
2812 struct eth_tx_start_bd **tx_bd, u16 hlen,
2813 u16 bd_prod, int nbd)
2815 struct eth_tx_start_bd *h_tx_bd = *tx_bd;
2816 struct eth_tx_bd *d_tx_bd;
2818 int old_len = le16_to_cpu(h_tx_bd->nbytes);
2820 /* first fix first BD */
2821 h_tx_bd->nbd = cpu_to_le16(nbd);
2822 h_tx_bd->nbytes = cpu_to_le16(hlen);
2824 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d (%x:%x) nbd %d\n",
2825 h_tx_bd->nbytes, h_tx_bd->addr_hi, h_tx_bd->addr_lo, h_tx_bd->nbd);
2827 /* now get a new data BD
2828 * (after the pbd) and fill it */
2829 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
2830 d_tx_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
2832 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
2833 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
2835 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
2836 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
2837 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
2839 /* this marks the BD as one that has no individual mapping */
2840 tx_buf->flags |= BNX2X_TSO_SPLIT_BD;
2842 DP(NETIF_MSG_TX_QUEUED,
2843 "TSO split data size is %d (%x:%x)\n",
2844 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
2847 *tx_bd = (struct eth_tx_start_bd *)d_tx_bd;
2852 static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
2855 csum = (u16) ~csum_fold(csum_sub(csum,
2856 csum_partial(t_header - fix, fix, 0)));
2859 csum = (u16) ~csum_fold(csum_add(csum,
2860 csum_partial(t_header, -fix, 0)));
2862 return swab16(csum);
2865 static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
2869 if (skb->ip_summed != CHECKSUM_PARTIAL)
2873 if (vlan_get_protocol(skb) == htons(ETH_P_IPV6)) {
2875 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
2876 rc |= XMIT_CSUM_TCP;
2880 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
2881 rc |= XMIT_CSUM_TCP;
2885 if (skb_is_gso_v6(skb))
2886 rc |= XMIT_GSO_V6 | XMIT_CSUM_TCP | XMIT_CSUM_V6;
2887 else if (skb_is_gso(skb))
2888 rc |= XMIT_GSO_V4 | XMIT_CSUM_V4 | XMIT_CSUM_TCP;
2893 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
2894 /* check if packet requires linearization (packet is too fragmented)
2895 no need to check fragmentation if page size > 8K (there will be no
2896 violation to FW restrictions) */
2897 static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
2902 int first_bd_sz = 0;
2904 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
2905 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
2907 if (xmit_type & XMIT_GSO) {
2908 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
2909 /* Check if LSO packet needs to be copied:
2910 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
2911 int wnd_size = MAX_FETCH_BD - 3;
2912 /* Number of windows to check */
2913 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
2918 /* Headers length */
2919 hlen = (int)(skb_transport_header(skb) - skb->data) +
2922 /* Amount of data (w/o headers) on linear part of SKB*/
2923 first_bd_sz = skb_headlen(skb) - hlen;
2925 wnd_sum = first_bd_sz;
2927 /* Calculate the first sum - it's special */
2928 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
2930 skb_frag_size(&skb_shinfo(skb)->frags[frag_idx]);
2932 /* If there was data on linear skb data - check it */
2933 if (first_bd_sz > 0) {
2934 if (unlikely(wnd_sum < lso_mss)) {
2939 wnd_sum -= first_bd_sz;
2942 /* Others are easier: run through the frag list and
2943 check all windows */
2944 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
2946 skb_frag_size(&skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1]);
2948 if (unlikely(wnd_sum < lso_mss)) {
2953 skb_frag_size(&skb_shinfo(skb)->frags[wnd_idx]);
2956 /* in non-LSO too fragmented packet should always
2963 if (unlikely(to_copy))
2964 DP(NETIF_MSG_TX_QUEUED,
2965 "Linearization IS REQUIRED for %s packet. num_frags %d hlen %d first_bd_sz %d\n",
2966 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
2967 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
2973 static inline void bnx2x_set_pbd_gso_e2(struct sk_buff *skb, u32 *parsing_data,
2976 *parsing_data |= (skb_shinfo(skb)->gso_size <<
2977 ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT) &
2978 ETH_TX_PARSE_BD_E2_LSO_MSS;
2979 if ((xmit_type & XMIT_GSO_V6) &&
2980 (ipv6_hdr(skb)->nexthdr == NEXTHDR_IPV6))
2981 *parsing_data |= ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR;
2985 * bnx2x_set_pbd_gso - update PBD in GSO case.
2989 * @xmit_type: xmit flags
2991 static inline void bnx2x_set_pbd_gso(struct sk_buff *skb,
2992 struct eth_tx_parse_bd_e1x *pbd,
2995 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
2996 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
2997 pbd->tcp_flags = pbd_tcp_flags(skb);
2999 if (xmit_type & XMIT_GSO_V4) {
3000 pbd->ip_id = swab16(ip_hdr(skb)->id);
3001 pbd->tcp_pseudo_csum =
3002 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
3004 0, IPPROTO_TCP, 0));
3007 pbd->tcp_pseudo_csum =
3008 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
3009 &ipv6_hdr(skb)->daddr,
3010 0, IPPROTO_TCP, 0));
3012 pbd->global_data |= ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN;
3016 * bnx2x_set_pbd_csum_e2 - update PBD with checksum and return header length
3018 * @bp: driver handle
3020 * @parsing_data: data to be updated
3021 * @xmit_type: xmit flags
3025 static inline u8 bnx2x_set_pbd_csum_e2(struct bnx2x *bp, struct sk_buff *skb,
3026 u32 *parsing_data, u32 xmit_type)
3029 ((((u8 *)skb_transport_header(skb) - skb->data) >> 1) <<
3030 ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W_SHIFT) &
3031 ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W;
3033 if (xmit_type & XMIT_CSUM_TCP) {
3034 *parsing_data |= ((tcp_hdrlen(skb) / 4) <<
3035 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
3036 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW;
3038 return skb_transport_header(skb) + tcp_hdrlen(skb) - skb->data;
3040 /* We support checksum offload for TCP and UDP only.
3041 * No need to pass the UDP header length - it's a constant.
3043 return skb_transport_header(skb) +
3044 sizeof(struct udphdr) - skb->data;
3047 static inline void bnx2x_set_sbd_csum(struct bnx2x *bp, struct sk_buff *skb,
3048 struct eth_tx_start_bd *tx_start_bd, u32 xmit_type)
3050 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
3052 if (xmit_type & XMIT_CSUM_V4)
3053 tx_start_bd->bd_flags.as_bitfield |=
3054 ETH_TX_BD_FLAGS_IP_CSUM;
3056 tx_start_bd->bd_flags.as_bitfield |=
3057 ETH_TX_BD_FLAGS_IPV6;
3059 if (!(xmit_type & XMIT_CSUM_TCP))
3060 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IS_UDP;
3064 * bnx2x_set_pbd_csum - update PBD with checksum and return header length
3066 * @bp: driver handle
3068 * @pbd: parse BD to be updated
3069 * @xmit_type: xmit flags
3071 static inline u8 bnx2x_set_pbd_csum(struct bnx2x *bp, struct sk_buff *skb,
3072 struct eth_tx_parse_bd_e1x *pbd,
3075 u8 hlen = (skb_network_header(skb) - skb->data) >> 1;
3077 /* for now NS flag is not used in Linux */
3079 (hlen | ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
3080 ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT));
3082 pbd->ip_hlen_w = (skb_transport_header(skb) -
3083 skb_network_header(skb)) >> 1;
3085 hlen += pbd->ip_hlen_w;
3087 /* We support checksum offload for TCP and UDP only */
3088 if (xmit_type & XMIT_CSUM_TCP)
3089 hlen += tcp_hdrlen(skb) / 2;
3091 hlen += sizeof(struct udphdr) / 2;
3093 pbd->total_hlen_w = cpu_to_le16(hlen);
3096 if (xmit_type & XMIT_CSUM_TCP) {
3097 pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
3100 s8 fix = SKB_CS_OFF(skb); /* signed! */
3102 DP(NETIF_MSG_TX_QUEUED,
3103 "hlen %d fix %d csum before fix %x\n",
3104 le16_to_cpu(pbd->total_hlen_w), fix, SKB_CS(skb));
3106 /* HW bug: fixup the CSUM */
3107 pbd->tcp_pseudo_csum =
3108 bnx2x_csum_fix(skb_transport_header(skb),
3111 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
3112 pbd->tcp_pseudo_csum);
3118 /* called with netif_tx_lock
3119 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
3120 * netif_wake_queue()
3122 netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
3124 struct bnx2x *bp = netdev_priv(dev);
3126 struct netdev_queue *txq;
3127 struct bnx2x_fp_txdata *txdata;
3128 struct sw_tx_bd *tx_buf;
3129 struct eth_tx_start_bd *tx_start_bd, *first_bd;
3130 struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
3131 struct eth_tx_parse_bd_e1x *pbd_e1x = NULL;
3132 struct eth_tx_parse_bd_e2 *pbd_e2 = NULL;
3133 u32 pbd_e2_parsing_data = 0;
3134 u16 pkt_prod, bd_prod;
3137 u32 xmit_type = bnx2x_xmit_type(bp, skb);
3140 __le16 pkt_size = 0;
3142 u8 mac_type = UNICAST_ADDRESS;
3144 #ifdef BNX2X_STOP_ON_ERROR
3145 if (unlikely(bp->panic))
3146 return NETDEV_TX_BUSY;
3149 txq_index = skb_get_queue_mapping(skb);
3150 txq = netdev_get_tx_queue(dev, txq_index);
3152 BUG_ON(txq_index >= MAX_ETH_TXQ_IDX(bp) + (CNIC_LOADED(bp) ? 1 : 0));
3154 txdata = &bp->bnx2x_txq[txq_index];
3156 /* enable this debug print to view the transmission queue being used
3157 DP(NETIF_MSG_TX_QUEUED, "indices: txq %d, fp %d, txdata %d\n",
3158 txq_index, fp_index, txdata_index); */
3160 /* enable this debug print to view the tranmission details
3161 DP(NETIF_MSG_TX_QUEUED,
3162 "transmitting packet cid %d fp index %d txdata_index %d tx_data ptr %p fp pointer %p\n",
3163 txdata->cid, fp_index, txdata_index, txdata, fp); */
3165 if (unlikely(bnx2x_tx_avail(bp, txdata) <
3166 skb_shinfo(skb)->nr_frags +
3168 NEXT_CNT_PER_TX_PKT(MAX_BDS_PER_TX_PKT))) {
3169 /* Handle special storage cases separately */
3170 if (txdata->tx_ring_size == 0) {
3171 struct bnx2x_eth_q_stats *q_stats =
3172 bnx2x_fp_qstats(bp, txdata->parent_fp);
3173 q_stats->driver_filtered_tx_pkt++;
3175 return NETDEV_TX_OK;
3177 bnx2x_fp_qstats(bp, txdata->parent_fp)->driver_xoff++;
3178 netif_tx_stop_queue(txq);
3179 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
3181 return NETDEV_TX_BUSY;
3184 DP(NETIF_MSG_TX_QUEUED,
3185 "queue[%d]: SKB: summed %x protocol %x protocol(%x,%x) gso type %x xmit_type %x\n",
3186 txq_index, skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
3187 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
3189 eth = (struct ethhdr *)skb->data;
3191 /* set flag according to packet type (UNICAST_ADDRESS is default)*/
3192 if (unlikely(is_multicast_ether_addr(eth->h_dest))) {
3193 if (is_broadcast_ether_addr(eth->h_dest))
3194 mac_type = BROADCAST_ADDRESS;
3196 mac_type = MULTICAST_ADDRESS;
3199 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
3200 /* First, check if we need to linearize the skb (due to FW
3201 restrictions). No need to check fragmentation if page size > 8K
3202 (there will be no violation to FW restrictions) */
3203 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
3204 /* Statistics of linearization */
3206 if (skb_linearize(skb) != 0) {
3207 DP(NETIF_MSG_TX_QUEUED,
3208 "SKB linearization failed - silently dropping this SKB\n");
3209 dev_kfree_skb_any(skb);
3210 return NETDEV_TX_OK;
3214 /* Map skb linear data for DMA */
3215 mapping = dma_map_single(&bp->pdev->dev, skb->data,
3216 skb_headlen(skb), DMA_TO_DEVICE);
3217 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
3218 DP(NETIF_MSG_TX_QUEUED,
3219 "SKB mapping failed - silently dropping this SKB\n");
3220 dev_kfree_skb_any(skb);
3221 return NETDEV_TX_OK;
3224 Please read carefully. First we use one BD which we mark as start,
3225 then we have a parsing info BD (used for TSO or xsum),
3226 and only then we have the rest of the TSO BDs.
3227 (don't forget to mark the last one as last,
3228 and to unmap only AFTER you write to the BD ...)
3229 And above all, all pdb sizes are in words - NOT DWORDS!
3232 /* get current pkt produced now - advance it just before sending packet
3233 * since mapping of pages may fail and cause packet to be dropped
3235 pkt_prod = txdata->tx_pkt_prod;
3236 bd_prod = TX_BD(txdata->tx_bd_prod);
3238 /* get a tx_buf and first BD
3239 * tx_start_bd may be changed during SPLIT,
3240 * but first_bd will always stay first
3242 tx_buf = &txdata->tx_buf_ring[TX_BD(pkt_prod)];
3243 tx_start_bd = &txdata->tx_desc_ring[bd_prod].start_bd;
3244 first_bd = tx_start_bd;
3246 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
3247 SET_FLAG(tx_start_bd->general_data,
3248 ETH_TX_START_BD_PARSE_NBDS,
3252 SET_FLAG(tx_start_bd->general_data, ETH_TX_START_BD_HDR_NBDS, 1);
3254 /* remember the first BD of the packet */
3255 tx_buf->first_bd = txdata->tx_bd_prod;
3259 DP(NETIF_MSG_TX_QUEUED,
3260 "sending pkt %u @%p next_idx %u bd %u @%p\n",
3261 pkt_prod, tx_buf, txdata->tx_pkt_prod, bd_prod, tx_start_bd);
3263 if (vlan_tx_tag_present(skb)) {
3264 tx_start_bd->vlan_or_ethertype =
3265 cpu_to_le16(vlan_tx_tag_get(skb));
3266 tx_start_bd->bd_flags.as_bitfield |=
3267 (X_ETH_OUTBAND_VLAN << ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT);
3269 tx_start_bd->vlan_or_ethertype = cpu_to_le16(pkt_prod);
3271 /* turn on parsing and get a BD */
3272 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
3274 if (xmit_type & XMIT_CSUM)
3275 bnx2x_set_sbd_csum(bp, skb, tx_start_bd, xmit_type);
3277 if (!CHIP_IS_E1x(bp)) {
3278 pbd_e2 = &txdata->tx_desc_ring[bd_prod].parse_bd_e2;
3279 memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2));
3280 /* Set PBD in checksum offload case */
3281 if (xmit_type & XMIT_CSUM)
3282 hlen = bnx2x_set_pbd_csum_e2(bp, skb,
3283 &pbd_e2_parsing_data,
3287 * fill in the MAC addresses in the PBD - for local
3290 bnx2x_set_fw_mac_addr(&pbd_e2->src_mac_addr_hi,
3291 &pbd_e2->src_mac_addr_mid,
3292 &pbd_e2->src_mac_addr_lo,
3294 bnx2x_set_fw_mac_addr(&pbd_e2->dst_mac_addr_hi,
3295 &pbd_e2->dst_mac_addr_mid,
3296 &pbd_e2->dst_mac_addr_lo,
3300 SET_FLAG(pbd_e2_parsing_data,
3301 ETH_TX_PARSE_BD_E2_ETH_ADDR_TYPE, mac_type);
3303 u16 global_data = 0;
3304 pbd_e1x = &txdata->tx_desc_ring[bd_prod].parse_bd_e1x;
3305 memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x));
3306 /* Set PBD in checksum offload case */
3307 if (xmit_type & XMIT_CSUM)
3308 hlen = bnx2x_set_pbd_csum(bp, skb, pbd_e1x, xmit_type);
3310 SET_FLAG(global_data,
3311 ETH_TX_PARSE_BD_E1X_ETH_ADDR_TYPE, mac_type);
3312 pbd_e1x->global_data |= cpu_to_le16(global_data);
3315 /* Setup the data pointer of the first BD of the packet */
3316 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
3317 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
3318 nbd = 2; /* start_bd + pbd + frags (updated when pages are mapped) */
3319 tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
3320 pkt_size = tx_start_bd->nbytes;
3322 DP(NETIF_MSG_TX_QUEUED,
3323 "first bd @%p addr (%x:%x) nbd %d nbytes %d flags %x vlan %x\n",
3324 tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
3325 le16_to_cpu(tx_start_bd->nbd), le16_to_cpu(tx_start_bd->nbytes),
3326 tx_start_bd->bd_flags.as_bitfield,
3327 le16_to_cpu(tx_start_bd->vlan_or_ethertype));
3329 if (xmit_type & XMIT_GSO) {
3331 DP(NETIF_MSG_TX_QUEUED,
3332 "TSO packet len %d hlen %d total len %d tso size %d\n",
3333 skb->len, hlen, skb_headlen(skb),
3334 skb_shinfo(skb)->gso_size);
3336 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
3338 if (unlikely(skb_headlen(skb) > hlen))
3339 bd_prod = bnx2x_tx_split(bp, txdata, tx_buf,
3342 if (!CHIP_IS_E1x(bp))
3343 bnx2x_set_pbd_gso_e2(skb, &pbd_e2_parsing_data,
3346 bnx2x_set_pbd_gso(skb, pbd_e1x, xmit_type);
3349 /* Set the PBD's parsing_data field if not zero
3350 * (for the chips newer than 57711).
3352 if (pbd_e2_parsing_data)
3353 pbd_e2->parsing_data = cpu_to_le32(pbd_e2_parsing_data);
3355 tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
3357 /* Handle fragmented skb */
3358 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
3359 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3361 mapping = skb_frag_dma_map(&bp->pdev->dev, frag, 0,
3362 skb_frag_size(frag), DMA_TO_DEVICE);
3363 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
3364 unsigned int pkts_compl = 0, bytes_compl = 0;
3366 DP(NETIF_MSG_TX_QUEUED,
3367 "Unable to map page - dropping packet...\n");
3369 /* we need unmap all buffers already mapped
3371 * first_bd->nbd need to be properly updated
3372 * before call to bnx2x_free_tx_pkt
3374 first_bd->nbd = cpu_to_le16(nbd);
3375 bnx2x_free_tx_pkt(bp, txdata,
3376 TX_BD(txdata->tx_pkt_prod),
3377 &pkts_compl, &bytes_compl);
3378 return NETDEV_TX_OK;
3381 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
3382 tx_data_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
3383 if (total_pkt_bd == NULL)
3384 total_pkt_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
3386 tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
3387 tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
3388 tx_data_bd->nbytes = cpu_to_le16(skb_frag_size(frag));
3389 le16_add_cpu(&pkt_size, skb_frag_size(frag));
3392 DP(NETIF_MSG_TX_QUEUED,
3393 "frag %d bd @%p addr (%x:%x) nbytes %d\n",
3394 i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo,
3395 le16_to_cpu(tx_data_bd->nbytes));
3398 DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd);
3400 /* update with actual num BDs */
3401 first_bd->nbd = cpu_to_le16(nbd);
3403 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
3405 /* now send a tx doorbell, counting the next BD
3406 * if the packet contains or ends with it
3408 if (TX_BD_POFF(bd_prod) < nbd)
3411 /* total_pkt_bytes should be set on the first data BD if
3412 * it's not an LSO packet and there is more than one
3413 * data BD. In this case pkt_size is limited by an MTU value.
3414 * However we prefer to set it for an LSO packet (while we don't
3415 * have to) in order to save some CPU cycles in a none-LSO
3416 * case, when we much more care about them.
3418 if (total_pkt_bd != NULL)
3419 total_pkt_bd->total_pkt_bytes = pkt_size;
3422 DP(NETIF_MSG_TX_QUEUED,
3423 "PBD (E1X) @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u tcp_flags %x xsum %x seq %u hlen %u\n",
3424 pbd_e1x, pbd_e1x->global_data, pbd_e1x->ip_hlen_w,
3425 pbd_e1x->ip_id, pbd_e1x->lso_mss, pbd_e1x->tcp_flags,
3426 pbd_e1x->tcp_pseudo_csum, pbd_e1x->tcp_send_seq,
3427 le16_to_cpu(pbd_e1x->total_hlen_w));
3429 DP(NETIF_MSG_TX_QUEUED,
3430 "PBD (E2) @%p dst %x %x %x src %x %x %x parsing_data %x\n",
3431 pbd_e2, pbd_e2->dst_mac_addr_hi, pbd_e2->dst_mac_addr_mid,
3432 pbd_e2->dst_mac_addr_lo, pbd_e2->src_mac_addr_hi,
3433 pbd_e2->src_mac_addr_mid, pbd_e2->src_mac_addr_lo,
3434 pbd_e2->parsing_data);
3435 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
3437 netdev_tx_sent_queue(txq, skb->len);
3439 skb_tx_timestamp(skb);
3441 txdata->tx_pkt_prod++;
3443 * Make sure that the BD data is updated before updating the producer
3444 * since FW might read the BD right after the producer is updated.
3445 * This is only applicable for weak-ordered memory model archs such
3446 * as IA-64. The following barrier is also mandatory since FW will
3447 * assumes packets must have BDs.
3451 txdata->tx_db.data.prod += nbd;
3454 DOORBELL(bp, txdata->cid, txdata->tx_db.raw);
3458 txdata->tx_bd_prod += nbd;
3460 if (unlikely(bnx2x_tx_avail(bp, txdata) < MAX_DESC_PER_TX_PKT)) {
3461 netif_tx_stop_queue(txq);
3463 /* paired memory barrier is in bnx2x_tx_int(), we have to keep
3464 * ordering of set_bit() in netif_tx_stop_queue() and read of
3468 bnx2x_fp_qstats(bp, txdata->parent_fp)->driver_xoff++;
3469 if (bnx2x_tx_avail(bp, txdata) >= MAX_DESC_PER_TX_PKT)
3470 netif_tx_wake_queue(txq);
3474 return NETDEV_TX_OK;
3478 * bnx2x_setup_tc - routine to configure net_device for multi tc
3480 * @netdev: net device to configure
3481 * @tc: number of traffic classes to enable
3483 * callback connected to the ndo_setup_tc function pointer
3485 int bnx2x_setup_tc(struct net_device *dev, u8 num_tc)
3487 int cos, prio, count, offset;
3488 struct bnx2x *bp = netdev_priv(dev);
3490 /* setup tc must be called under rtnl lock */
3493 /* no traffic classes requested. aborting */
3495 netdev_reset_tc(dev);
3499 /* requested to support too many traffic classes */
3500 if (num_tc > bp->max_cos) {
3501 BNX2X_ERR("support for too many traffic classes requested: %d. max supported is %d\n",
3502 num_tc, bp->max_cos);
3506 /* declare amount of supported traffic classes */
3507 if (netdev_set_num_tc(dev, num_tc)) {
3508 BNX2X_ERR("failed to declare %d traffic classes\n", num_tc);
3512 /* configure priority to traffic class mapping */
3513 for (prio = 0; prio < BNX2X_MAX_PRIORITY; prio++) {
3514 netdev_set_prio_tc_map(dev, prio, bp->prio_to_cos[prio]);
3515 DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
3516 "mapping priority %d to tc %d\n",
3517 prio, bp->prio_to_cos[prio]);
3521 /* Use this configuration to diffrentiate tc0 from other COSes
3522 This can be used for ets or pfc, and save the effort of setting
3523 up a multio class queue disc or negotiating DCBX with a switch
3524 netdev_set_prio_tc_map(dev, 0, 0);
3525 DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n", 0, 0);
3526 for (prio = 1; prio < 16; prio++) {
3527 netdev_set_prio_tc_map(dev, prio, 1);
3528 DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n", prio, 1);
3531 /* configure traffic class to transmission queue mapping */
3532 for (cos = 0; cos < bp->max_cos; cos++) {
3533 count = BNX2X_NUM_ETH_QUEUES(bp);
3534 offset = cos * BNX2X_NUM_NON_CNIC_QUEUES(bp);
3535 netdev_set_tc_queue(dev, cos, count, offset);
3536 DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
3537 "mapping tc %d to offset %d count %d\n",
3538 cos, offset, count);
3544 /* called with rtnl_lock */
3545 int bnx2x_change_mac_addr(struct net_device *dev, void *p)
3547 struct sockaddr *addr = p;
3548 struct bnx2x *bp = netdev_priv(dev);
3551 if (!bnx2x_is_valid_ether_addr(bp, addr->sa_data)) {
3552 BNX2X_ERR("Requested MAC address is not valid\n");
3556 if ((IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp)) &&
3557 !is_zero_ether_addr(addr->sa_data)) {
3558 BNX2X_ERR("Can't configure non-zero address on iSCSI or FCoE functions in MF-SD mode\n");
3562 if (netif_running(dev)) {
3563 rc = bnx2x_set_eth_mac(bp, false);
3568 dev->addr_assign_type &= ~NET_ADDR_RANDOM;
3569 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
3571 if (netif_running(dev))
3572 rc = bnx2x_set_eth_mac(bp, true);
3577 static void bnx2x_free_fp_mem_at(struct bnx2x *bp, int fp_index)
3579 union host_hc_status_block *sb = &bnx2x_fp(bp, fp_index, status_blk);
3580 struct bnx2x_fastpath *fp = &bp->fp[fp_index];
3585 if (IS_FCOE_IDX(fp_index)) {
3586 memset(sb, 0, sizeof(union host_hc_status_block));
3587 fp->status_blk_mapping = 0;
3590 if (!CHIP_IS_E1x(bp))
3591 BNX2X_PCI_FREE(sb->e2_sb,
3592 bnx2x_fp(bp, fp_index,
3593 status_blk_mapping),
3594 sizeof(struct host_hc_status_block_e2));
3596 BNX2X_PCI_FREE(sb->e1x_sb,
3597 bnx2x_fp(bp, fp_index,
3598 status_blk_mapping),
3599 sizeof(struct host_hc_status_block_e1x));
3603 if (!skip_rx_queue(bp, fp_index)) {
3604 bnx2x_free_rx_bds(fp);
3606 /* fastpath rx rings: rx_buf rx_desc rx_comp */
3607 BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_buf_ring));
3608 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_desc_ring),
3609 bnx2x_fp(bp, fp_index, rx_desc_mapping),
3610 sizeof(struct eth_rx_bd) * NUM_RX_BD);
3612 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_comp_ring),
3613 bnx2x_fp(bp, fp_index, rx_comp_mapping),
3614 sizeof(struct eth_fast_path_rx_cqe) *
3618 BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_page_ring));
3619 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_sge_ring),
3620 bnx2x_fp(bp, fp_index, rx_sge_mapping),
3621 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
3625 if (!skip_tx_queue(bp, fp_index)) {
3626 /* fastpath tx rings: tx_buf tx_desc */
3627 for_each_cos_in_tx_queue(fp, cos) {
3628 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
3630 DP(NETIF_MSG_IFDOWN,
3631 "freeing tx memory of fp %d cos %d cid %d\n",
3632 fp_index, cos, txdata->cid);
3634 BNX2X_FREE(txdata->tx_buf_ring);
3635 BNX2X_PCI_FREE(txdata->tx_desc_ring,
3636 txdata->tx_desc_mapping,
3637 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
3640 /* end of fastpath */
3643 void bnx2x_free_fp_mem_cnic(struct bnx2x *bp)
3646 for_each_cnic_queue(bp, i)
3647 bnx2x_free_fp_mem_at(bp, i);
3650 void bnx2x_free_fp_mem(struct bnx2x *bp)
3653 for_each_eth_queue(bp, i)
3654 bnx2x_free_fp_mem_at(bp, i);
3657 static void set_sb_shortcuts(struct bnx2x *bp, int index)
3659 union host_hc_status_block status_blk = bnx2x_fp(bp, index, status_blk);
3660 if (!CHIP_IS_E1x(bp)) {
3661 bnx2x_fp(bp, index, sb_index_values) =
3662 (__le16 *)status_blk.e2_sb->sb.index_values;
3663 bnx2x_fp(bp, index, sb_running_index) =
3664 (__le16 *)status_blk.e2_sb->sb.running_index;
3666 bnx2x_fp(bp, index, sb_index_values) =
3667 (__le16 *)status_blk.e1x_sb->sb.index_values;
3668 bnx2x_fp(bp, index, sb_running_index) =
3669 (__le16 *)status_blk.e1x_sb->sb.running_index;
3673 /* Returns the number of actually allocated BDs */
3674 static int bnx2x_alloc_rx_bds(struct bnx2x_fastpath *fp,
3677 struct bnx2x *bp = fp->bp;
3678 u16 ring_prod, cqe_ring_prod;
3679 int i, failure_cnt = 0;
3681 fp->rx_comp_cons = 0;
3682 cqe_ring_prod = ring_prod = 0;
3684 /* This routine is called only during fo init so
3685 * fp->eth_q_stats.rx_skb_alloc_failed = 0
3687 for (i = 0; i < rx_ring_size; i++) {
3688 if (bnx2x_alloc_rx_data(bp, fp, ring_prod) < 0) {
3692 ring_prod = NEXT_RX_IDX(ring_prod);
3693 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
3694 WARN_ON(ring_prod <= (i - failure_cnt));
3698 BNX2X_ERR("was only able to allocate %d rx skbs on queue[%d]\n",
3699 i - failure_cnt, fp->index);
3701 fp->rx_bd_prod = ring_prod;
3702 /* Limit the CQE producer by the CQE ring size */
3703 fp->rx_comp_prod = min_t(u16, NUM_RCQ_RINGS*RCQ_DESC_CNT,
3705 fp->rx_pkt = fp->rx_calls = 0;
3707 bnx2x_fp_stats(bp, fp)->eth_q_stats.rx_skb_alloc_failed += failure_cnt;
3709 return i - failure_cnt;
3712 static void bnx2x_set_next_page_rx_cq(struct bnx2x_fastpath *fp)
3716 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
3717 struct eth_rx_cqe_next_page *nextpg;
3719 nextpg = (struct eth_rx_cqe_next_page *)
3720 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
3722 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
3723 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
3725 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
3726 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
3730 static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index)
3732 union host_hc_status_block *sb;
3733 struct bnx2x_fastpath *fp = &bp->fp[index];
3736 int rx_ring_size = 0;
3738 if (!bp->rx_ring_size &&
3739 (IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp))) {
3740 rx_ring_size = MIN_RX_SIZE_NONTPA;
3741 bp->rx_ring_size = rx_ring_size;
3742 } else if (!bp->rx_ring_size) {
3743 rx_ring_size = MAX_RX_AVAIL/BNX2X_NUM_RX_QUEUES(bp);
3745 if (CHIP_IS_E3(bp)) {
3746 u32 cfg = SHMEM_RD(bp,
3747 dev_info.port_hw_config[BP_PORT(bp)].
3750 /* Decrease ring size for 1G functions */
3751 if ((cfg & PORT_HW_CFG_NET_SERDES_IF_MASK) ==
3752 PORT_HW_CFG_NET_SERDES_IF_SGMII)
3756 /* allocate at least number of buffers required by FW */
3757 rx_ring_size = max_t(int, bp->disable_tpa ? MIN_RX_SIZE_NONTPA :
3758 MIN_RX_SIZE_TPA, rx_ring_size);
3760 bp->rx_ring_size = rx_ring_size;
3761 } else /* if rx_ring_size specified - use it */
3762 rx_ring_size = bp->rx_ring_size;
3765 sb = &bnx2x_fp(bp, index, status_blk);
3767 if (!IS_FCOE_IDX(index)) {
3769 if (!CHIP_IS_E1x(bp))
3770 BNX2X_PCI_ALLOC(sb->e2_sb,
3771 &bnx2x_fp(bp, index, status_blk_mapping),
3772 sizeof(struct host_hc_status_block_e2));
3774 BNX2X_PCI_ALLOC(sb->e1x_sb,
3775 &bnx2x_fp(bp, index, status_blk_mapping),
3776 sizeof(struct host_hc_status_block_e1x));
3779 /* FCoE Queue uses Default SB and doesn't ACK the SB, thus no need to
3780 * set shortcuts for it.
3782 if (!IS_FCOE_IDX(index))
3783 set_sb_shortcuts(bp, index);
3786 if (!skip_tx_queue(bp, index)) {
3787 /* fastpath tx rings: tx_buf tx_desc */
3788 for_each_cos_in_tx_queue(fp, cos) {
3789 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
3792 "allocating tx memory of fp %d cos %d\n",
3795 BNX2X_ALLOC(txdata->tx_buf_ring,
3796 sizeof(struct sw_tx_bd) * NUM_TX_BD);
3797 BNX2X_PCI_ALLOC(txdata->tx_desc_ring,
3798 &txdata->tx_desc_mapping,
3799 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
3804 if (!skip_rx_queue(bp, index)) {
3805 /* fastpath rx rings: rx_buf rx_desc rx_comp */
3806 BNX2X_ALLOC(bnx2x_fp(bp, index, rx_buf_ring),
3807 sizeof(struct sw_rx_bd) * NUM_RX_BD);
3808 BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_desc_ring),
3809 &bnx2x_fp(bp, index, rx_desc_mapping),
3810 sizeof(struct eth_rx_bd) * NUM_RX_BD);
3812 BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_comp_ring),
3813 &bnx2x_fp(bp, index, rx_comp_mapping),
3814 sizeof(struct eth_fast_path_rx_cqe) *
3818 BNX2X_ALLOC(bnx2x_fp(bp, index, rx_page_ring),
3819 sizeof(struct sw_rx_page) * NUM_RX_SGE);
3820 BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_sge_ring),
3821 &bnx2x_fp(bp, index, rx_sge_mapping),
3822 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
3824 bnx2x_set_next_page_rx_bd(fp);
3827 bnx2x_set_next_page_rx_cq(fp);
3830 ring_size = bnx2x_alloc_rx_bds(fp, rx_ring_size);
3831 if (ring_size < rx_ring_size)
3837 /* handles low memory cases */
3839 BNX2X_ERR("Unable to allocate full memory for queue %d (size %d)\n",
3841 /* FW will drop all packets if queue is not big enough,
3842 * In these cases we disable the queue
3843 * Min size is different for OOO, TPA and non-TPA queues
3845 if (ring_size < (fp->disable_tpa ?
3846 MIN_RX_SIZE_NONTPA : MIN_RX_SIZE_TPA)) {
3847 /* release memory allocated for this queue */
3848 bnx2x_free_fp_mem_at(bp, index);
3854 int bnx2x_alloc_fp_mem_cnic(struct bnx2x *bp)
3858 if (bnx2x_alloc_fp_mem_at(bp, FCOE_IDX(bp)))
3859 /* we will fail load process instead of mark
3867 int bnx2x_alloc_fp_mem(struct bnx2x *bp)
3871 /* 1. Allocate FP for leading - fatal if error
3872 * 2. Allocate RSS - fix number of queues if error
3876 if (bnx2x_alloc_fp_mem_at(bp, 0))
3880 for_each_nondefault_eth_queue(bp, i)
3881 if (bnx2x_alloc_fp_mem_at(bp, i))
3884 /* handle memory failures */
3885 if (i != BNX2X_NUM_ETH_QUEUES(bp)) {
3886 int delta = BNX2X_NUM_ETH_QUEUES(bp) - i;
3889 bnx2x_shrink_eth_fp(bp, delta);
3890 if (CNIC_SUPPORT(bp))
3891 /* move non eth FPs next to last eth FP
3892 * must be done in that order
3893 * FCOE_IDX < FWD_IDX < OOO_IDX
3896 /* move FCoE fp even NO_FCOE_FLAG is on */
3897 bnx2x_move_fp(bp, FCOE_IDX(bp), FCOE_IDX(bp) - delta);
3898 bp->num_ethernet_queues -= delta;
3899 bp->num_queues = bp->num_ethernet_queues +
3900 bp->num_cnic_queues;
3901 BNX2X_ERR("Adjusted num of queues from %d to %d\n",
3902 bp->num_queues + delta, bp->num_queues);
3908 void bnx2x_free_mem_bp(struct bnx2x *bp)
3910 kfree(bp->fp->tpa_info);
3913 kfree(bp->fp_stats);
3914 kfree(bp->bnx2x_txq);
3915 kfree(bp->msix_table);
3919 int bnx2x_alloc_mem_bp(struct bnx2x *bp)
3921 struct bnx2x_fastpath *fp;
3922 struct msix_entry *tbl;
3923 struct bnx2x_ilt *ilt;
3924 int msix_table_size = 0;
3925 int fp_array_size, txq_array_size;
3929 * The biggest MSI-X table we might need is as a maximum number of fast
3930 * path IGU SBs plus default SB (for PF).
3932 msix_table_size = bp->igu_sb_cnt + 1;
3934 /* fp array: RSS plus CNIC related L2 queues */
3935 fp_array_size = BNX2X_MAX_RSS_COUNT(bp) + CNIC_SUPPORT(bp);
3936 BNX2X_DEV_INFO("fp_array_size %d", fp_array_size);
3938 fp = kcalloc(fp_array_size, sizeof(*fp), GFP_KERNEL);
3941 for (i = 0; i < fp_array_size; i++) {
3943 kcalloc(ETH_MAX_AGGREGATION_QUEUES_E1H_E2,
3944 sizeof(struct bnx2x_agg_info), GFP_KERNEL);
3945 if (!(fp[i].tpa_info))
3951 /* allocate sp objs */
3952 bp->sp_objs = kcalloc(fp_array_size, sizeof(struct bnx2x_sp_objs),
3957 /* allocate fp_stats */
3958 bp->fp_stats = kcalloc(fp_array_size, sizeof(struct bnx2x_fp_stats),
3963 /* Allocate memory for the transmission queues array */
3965 BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS + CNIC_SUPPORT(bp);
3966 BNX2X_DEV_INFO("txq_array_size %d", txq_array_size);
3968 bp->bnx2x_txq = kcalloc(txq_array_size, sizeof(struct bnx2x_fp_txdata),
3974 tbl = kcalloc(msix_table_size, sizeof(*tbl), GFP_KERNEL);
3977 bp->msix_table = tbl;
3980 ilt = kzalloc(sizeof(*ilt), GFP_KERNEL);
3987 bnx2x_free_mem_bp(bp);
3992 int bnx2x_reload_if_running(struct net_device *dev)
3994 struct bnx2x *bp = netdev_priv(dev);
3996 if (unlikely(!netif_running(dev)))
3999 bnx2x_nic_unload(bp, UNLOAD_NORMAL, true);
4000 return bnx2x_nic_load(bp, LOAD_NORMAL);
4003 int bnx2x_get_cur_phy_idx(struct bnx2x *bp)
4005 u32 sel_phy_idx = 0;
4006 if (bp->link_params.num_phys <= 1)
4009 if (bp->link_vars.link_up) {
4010 sel_phy_idx = EXT_PHY1;
4011 /* In case link is SERDES, check if the EXT_PHY2 is the one */
4012 if ((bp->link_vars.link_status & LINK_STATUS_SERDES_LINK) &&
4013 (bp->link_params.phy[EXT_PHY2].supported & SUPPORTED_FIBRE))
4014 sel_phy_idx = EXT_PHY2;
4017 switch (bnx2x_phy_selection(&bp->link_params)) {
4018 case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT:
4019 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY:
4020 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY:
4021 sel_phy_idx = EXT_PHY1;
4023 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY:
4024 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY:
4025 sel_phy_idx = EXT_PHY2;
4033 int bnx2x_get_link_cfg_idx(struct bnx2x *bp)
4035 u32 sel_phy_idx = bnx2x_get_cur_phy_idx(bp);
4037 * The selected actived PHY is always after swapping (in case PHY
4038 * swapping is enabled). So when swapping is enabled, we need to reverse
4042 if (bp->link_params.multi_phy_config &
4043 PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
4044 if (sel_phy_idx == EXT_PHY1)
4045 sel_phy_idx = EXT_PHY2;
4046 else if (sel_phy_idx == EXT_PHY2)
4047 sel_phy_idx = EXT_PHY1;
4049 return LINK_CONFIG_IDX(sel_phy_idx);
4052 #ifdef NETDEV_FCOE_WWNN
4053 int bnx2x_fcoe_get_wwn(struct net_device *dev, u64 *wwn, int type)
4055 struct bnx2x *bp = netdev_priv(dev);
4056 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
4059 case NETDEV_FCOE_WWNN:
4060 *wwn = HILO_U64(cp->fcoe_wwn_node_name_hi,
4061 cp->fcoe_wwn_node_name_lo);
4063 case NETDEV_FCOE_WWPN:
4064 *wwn = HILO_U64(cp->fcoe_wwn_port_name_hi,
4065 cp->fcoe_wwn_port_name_lo);
4068 BNX2X_ERR("Wrong WWN type requested - %d\n", type);
4076 /* called with rtnl_lock */
4077 int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
4079 struct bnx2x *bp = netdev_priv(dev);
4081 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
4082 BNX2X_ERR("Can't perform change MTU during parity recovery\n");
4086 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
4087 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE)) {
4088 BNX2X_ERR("Can't support requested MTU size\n");
4092 /* This does not race with packet allocation
4093 * because the actual alloc size is
4094 * only updated as part of load
4098 return bnx2x_reload_if_running(dev);
4101 netdev_features_t bnx2x_fix_features(struct net_device *dev,
4102 netdev_features_t features)
4104 struct bnx2x *bp = netdev_priv(dev);
4106 /* TPA requires Rx CSUM offloading */
4107 if (!(features & NETIF_F_RXCSUM) || bp->disable_tpa) {
4108 features &= ~NETIF_F_LRO;
4109 features &= ~NETIF_F_GRO;
4115 int bnx2x_set_features(struct net_device *dev, netdev_features_t features)
4117 struct bnx2x *bp = netdev_priv(dev);
4118 u32 flags = bp->flags;
4119 bool bnx2x_reload = false;
4121 if (features & NETIF_F_LRO)
4122 flags |= TPA_ENABLE_FLAG;
4124 flags &= ~TPA_ENABLE_FLAG;
4126 if (features & NETIF_F_GRO)
4127 flags |= GRO_ENABLE_FLAG;
4129 flags &= ~GRO_ENABLE_FLAG;
4131 if (features & NETIF_F_LOOPBACK) {
4132 if (bp->link_params.loopback_mode != LOOPBACK_BMAC) {
4133 bp->link_params.loopback_mode = LOOPBACK_BMAC;
4134 bnx2x_reload = true;
4137 if (bp->link_params.loopback_mode != LOOPBACK_NONE) {
4138 bp->link_params.loopback_mode = LOOPBACK_NONE;
4139 bnx2x_reload = true;
4143 if (flags ^ bp->flags) {
4145 bnx2x_reload = true;
4149 if (bp->recovery_state == BNX2X_RECOVERY_DONE)
4150 return bnx2x_reload_if_running(dev);
4151 /* else: bnx2x_nic_load() will be called at end of recovery */
4157 void bnx2x_tx_timeout(struct net_device *dev)
4159 struct bnx2x *bp = netdev_priv(dev);
4161 #ifdef BNX2X_STOP_ON_ERROR
4166 smp_mb__before_clear_bit();
4167 set_bit(BNX2X_SP_RTNL_TX_TIMEOUT, &bp->sp_rtnl_state);
4168 smp_mb__after_clear_bit();
4170 /* This allows the netif to be shutdown gracefully before resetting */
4171 schedule_delayed_work(&bp->sp_rtnl_task, 0);
4174 int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
4176 struct net_device *dev = pci_get_drvdata(pdev);
4180 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
4183 bp = netdev_priv(dev);
4187 pci_save_state(pdev);
4189 if (!netif_running(dev)) {
4194 netif_device_detach(dev);
4196 bnx2x_nic_unload(bp, UNLOAD_CLOSE, false);
4198 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
4205 int bnx2x_resume(struct pci_dev *pdev)
4207 struct net_device *dev = pci_get_drvdata(pdev);
4212 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
4215 bp = netdev_priv(dev);
4217 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
4218 BNX2X_ERR("Handling parity error recovery. Try again later\n");
4224 pci_restore_state(pdev);
4226 if (!netif_running(dev)) {
4231 bnx2x_set_power_state(bp, PCI_D0);
4232 netif_device_attach(dev);
4234 rc = bnx2x_nic_load(bp, LOAD_OPEN);
4242 void bnx2x_set_ctx_validation(struct bnx2x *bp, struct eth_context *cxt,
4245 /* ustorm cxt validation */
4246 cxt->ustorm_ag_context.cdu_usage =
4247 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid),
4248 CDU_REGION_NUMBER_UCM_AG, ETH_CONNECTION_TYPE);
4249 /* xcontext validation */
4250 cxt->xstorm_ag_context.cdu_reserved =
4251 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid),
4252 CDU_REGION_NUMBER_XCM_AG, ETH_CONNECTION_TYPE);
4255 static void storm_memset_hc_timeout(struct bnx2x *bp, u8 port,
4256 u8 fw_sb_id, u8 sb_index,
4260 u32 addr = BAR_CSTRORM_INTMEM +
4261 CSTORM_STATUS_BLOCK_DATA_TIMEOUT_OFFSET(fw_sb_id, sb_index);
4262 REG_WR8(bp, addr, ticks);
4264 "port %x fw_sb_id %d sb_index %d ticks %d\n",
4265 port, fw_sb_id, sb_index, ticks);
4268 static void storm_memset_hc_disable(struct bnx2x *bp, u8 port,
4269 u16 fw_sb_id, u8 sb_index,
4272 u32 enable_flag = disable ? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT);
4273 u32 addr = BAR_CSTRORM_INTMEM +
4274 CSTORM_STATUS_BLOCK_DATA_FLAGS_OFFSET(fw_sb_id, sb_index);
4275 u16 flags = REG_RD16(bp, addr);
4277 flags &= ~HC_INDEX_DATA_HC_ENABLED;
4278 flags |= enable_flag;
4279 REG_WR16(bp, addr, flags);
4281 "port %x fw_sb_id %d sb_index %d disable %d\n",
4282 port, fw_sb_id, sb_index, disable);
4285 void bnx2x_update_coalesce_sb_index(struct bnx2x *bp, u8 fw_sb_id,
4286 u8 sb_index, u8 disable, u16 usec)
4288 int port = BP_PORT(bp);
4289 u8 ticks = usec / BNX2X_BTR;
4291 storm_memset_hc_timeout(bp, port, fw_sb_id, sb_index, ticks);
4293 disable = disable ? 1 : (usec ? 0 : 1);
4294 storm_memset_hc_disable(bp, port, fw_sb_id, sb_index, disable);