1 /* bnx2x_cmn.c: Broadcom Everest network driver.
3 * Copyright (c) 2007-2013 Broadcom Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath and fastpath rework by Vladislav Zolotarov
14 * Statistics and Link management by Yitchak Gertner
18 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
20 #include <linux/etherdevice.h>
21 #include <linux/if_vlan.h>
22 #include <linux/interrupt.h>
26 #include <net/ip6_checksum.h>
27 #include <net/busy_poll.h>
28 #include <linux/prefetch.h>
29 #include "bnx2x_cmn.h"
30 #include "bnx2x_init.h"
34 * bnx2x_move_fp - move content of the fastpath structure.
37 * @from: source FP index
38 * @to: destination FP index
40 * Makes sure the contents of the bp->fp[to].napi is kept
41 * intact. This is done by first copying the napi struct from
42 * the target to the source, and then mem copying the entire
43 * source onto the target. Update txdata pointers and related
46 static inline void bnx2x_move_fp(struct bnx2x *bp, int from, int to)
48 struct bnx2x_fastpath *from_fp = &bp->fp[from];
49 struct bnx2x_fastpath *to_fp = &bp->fp[to];
50 struct bnx2x_sp_objs *from_sp_objs = &bp->sp_objs[from];
51 struct bnx2x_sp_objs *to_sp_objs = &bp->sp_objs[to];
52 struct bnx2x_fp_stats *from_fp_stats = &bp->fp_stats[from];
53 struct bnx2x_fp_stats *to_fp_stats = &bp->fp_stats[to];
54 int old_max_eth_txqs, new_max_eth_txqs;
55 int old_txdata_index = 0, new_txdata_index = 0;
56 struct bnx2x_agg_info *old_tpa_info = to_fp->tpa_info;
58 /* Copy the NAPI object as it has been already initialized */
59 from_fp->napi = to_fp->napi;
61 /* Move bnx2x_fastpath contents */
62 memcpy(to_fp, from_fp, sizeof(*to_fp));
65 /* Retain the tpa_info of the original `to' version as we don't want
66 * 2 FPs to contain the same tpa_info pointer.
68 to_fp->tpa_info = old_tpa_info;
70 /* move sp_objs contents as well, as their indices match fp ones */
71 memcpy(to_sp_objs, from_sp_objs, sizeof(*to_sp_objs));
73 /* move fp_stats contents as well, as their indices match fp ones */
74 memcpy(to_fp_stats, from_fp_stats, sizeof(*to_fp_stats));
76 /* Update txdata pointers in fp and move txdata content accordingly:
77 * Each fp consumes 'max_cos' txdata structures, so the index should be
78 * decremented by max_cos x delta.
81 old_max_eth_txqs = BNX2X_NUM_ETH_QUEUES(bp) * (bp)->max_cos;
82 new_max_eth_txqs = (BNX2X_NUM_ETH_QUEUES(bp) - from + to) *
84 if (from == FCOE_IDX(bp)) {
85 old_txdata_index = old_max_eth_txqs + FCOE_TXQ_IDX_OFFSET;
86 new_txdata_index = new_max_eth_txqs + FCOE_TXQ_IDX_OFFSET;
89 memcpy(&bp->bnx2x_txq[new_txdata_index],
90 &bp->bnx2x_txq[old_txdata_index],
91 sizeof(struct bnx2x_fp_txdata));
92 to_fp->txdata_ptr[0] = &bp->bnx2x_txq[new_txdata_index];
96 * bnx2x_fill_fw_str - Fill buffer with FW version string.
99 * @buf: character buffer to fill with the fw name
100 * @buf_len: length of the above buffer
103 void bnx2x_fill_fw_str(struct bnx2x *bp, char *buf, size_t buf_len)
106 u8 phy_fw_ver[PHY_FW_VER_LEN];
108 phy_fw_ver[0] = '\0';
109 bnx2x_get_ext_phy_fw_version(&bp->link_params,
110 phy_fw_ver, PHY_FW_VER_LEN);
111 strlcpy(buf, bp->fw_ver, buf_len);
112 snprintf(buf + strlen(bp->fw_ver), 32 - strlen(bp->fw_ver),
114 (bp->common.bc_ver & 0xff0000) >> 16,
115 (bp->common.bc_ver & 0xff00) >> 8,
116 (bp->common.bc_ver & 0xff),
117 ((phy_fw_ver[0] != '\0') ? " phy " : ""), phy_fw_ver);
119 bnx2x_vf_fill_fw_str(bp, buf, buf_len);
124 * bnx2x_shrink_eth_fp - guarantees fastpath structures stay intact
127 * @delta: number of eth queues which were not allocated
129 static void bnx2x_shrink_eth_fp(struct bnx2x *bp, int delta)
131 int i, cos, old_eth_num = BNX2X_NUM_ETH_QUEUES(bp);
133 /* Queue pointer cannot be re-set on an fp-basis, as moving pointer
134 * backward along the array could cause memory to be overridden
136 for (cos = 1; cos < bp->max_cos; cos++) {
137 for (i = 0; i < old_eth_num - delta; i++) {
138 struct bnx2x_fastpath *fp = &bp->fp[i];
139 int new_idx = cos * (old_eth_num - delta) + i;
141 memcpy(&bp->bnx2x_txq[new_idx], fp->txdata_ptr[cos],
142 sizeof(struct bnx2x_fp_txdata));
143 fp->txdata_ptr[cos] = &bp->bnx2x_txq[new_idx];
148 int load_count[2][3] = { {0} }; /* per-path: 0-common, 1-port0, 2-port1 */
150 /* free skb in the packet ring at pos idx
151 * return idx of last bd freed
153 static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata,
154 u16 idx, unsigned int *pkts_compl,
155 unsigned int *bytes_compl)
157 struct sw_tx_bd *tx_buf = &txdata->tx_buf_ring[idx];
158 struct eth_tx_start_bd *tx_start_bd;
159 struct eth_tx_bd *tx_data_bd;
160 struct sk_buff *skb = tx_buf->skb;
161 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
164 /* prefetch skb end pointer to speedup dev_kfree_skb() */
167 DP(NETIF_MSG_TX_DONE, "fp[%d]: pkt_idx %d buff @(%p)->skb %p\n",
168 txdata->txq_index, idx, tx_buf, skb);
171 tx_start_bd = &txdata->tx_desc_ring[bd_idx].start_bd;
172 dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd),
173 BD_UNMAP_LEN(tx_start_bd), DMA_TO_DEVICE);
175 nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
176 #ifdef BNX2X_STOP_ON_ERROR
177 if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
178 BNX2X_ERR("BAD nbd!\n");
182 new_cons = nbd + tx_buf->first_bd;
184 /* Get the next bd */
185 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
187 /* Skip a parse bd... */
189 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
191 /* ...and the TSO split header bd since they have no mapping */
192 if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
194 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
200 tx_data_bd = &txdata->tx_desc_ring[bd_idx].reg_bd;
201 dma_unmap_page(&bp->pdev->dev, BD_UNMAP_ADDR(tx_data_bd),
202 BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
204 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
211 (*bytes_compl) += skb->len;
214 dev_kfree_skb_any(skb);
215 tx_buf->first_bd = 0;
221 int bnx2x_tx_int(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata)
223 struct netdev_queue *txq;
224 u16 hw_cons, sw_cons, bd_cons = txdata->tx_bd_cons;
225 unsigned int pkts_compl = 0, bytes_compl = 0;
227 #ifdef BNX2X_STOP_ON_ERROR
228 if (unlikely(bp->panic))
232 txq = netdev_get_tx_queue(bp->dev, txdata->txq_index);
233 hw_cons = le16_to_cpu(*txdata->tx_cons_sb);
234 sw_cons = txdata->tx_pkt_cons;
236 while (sw_cons != hw_cons) {
239 pkt_cons = TX_BD(sw_cons);
241 DP(NETIF_MSG_TX_DONE,
242 "queue[%d]: hw_cons %u sw_cons %u pkt_cons %u\n",
243 txdata->txq_index, hw_cons, sw_cons, pkt_cons);
245 bd_cons = bnx2x_free_tx_pkt(bp, txdata, pkt_cons,
246 &pkts_compl, &bytes_compl);
251 netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
253 txdata->tx_pkt_cons = sw_cons;
254 txdata->tx_bd_cons = bd_cons;
256 /* Need to make the tx_bd_cons update visible to start_xmit()
257 * before checking for netif_tx_queue_stopped(). Without the
258 * memory barrier, there is a small possibility that
259 * start_xmit() will miss it and cause the queue to be stopped
261 * On the other hand we need an rmb() here to ensure the proper
262 * ordering of bit testing in the following
263 * netif_tx_queue_stopped(txq) call.
267 if (unlikely(netif_tx_queue_stopped(txq))) {
268 /* Taking tx_lock() is needed to prevent re-enabling the queue
269 * while it's empty. This could have happen if rx_action() gets
270 * suspended in bnx2x_tx_int() after the condition before
271 * netif_tx_wake_queue(), while tx_action (bnx2x_start_xmit()):
273 * stops the queue->sees fresh tx_bd_cons->releases the queue->
274 * sends some packets consuming the whole queue again->
278 __netif_tx_lock(txq, smp_processor_id());
280 if ((netif_tx_queue_stopped(txq)) &&
281 (bp->state == BNX2X_STATE_OPEN) &&
282 (bnx2x_tx_avail(bp, txdata) >= MAX_DESC_PER_TX_PKT))
283 netif_tx_wake_queue(txq);
285 __netif_tx_unlock(txq);
290 static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
293 u16 last_max = fp->last_max_sge;
295 if (SUB_S16(idx, last_max) > 0)
296 fp->last_max_sge = idx;
299 static inline void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
301 struct eth_end_agg_rx_cqe *cqe)
303 struct bnx2x *bp = fp->bp;
304 u16 last_max, last_elem, first_elem;
311 /* First mark all used pages */
312 for (i = 0; i < sge_len; i++)
313 BIT_VEC64_CLEAR_BIT(fp->sge_mask,
314 RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[i])));
316 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
317 sge_len - 1, le16_to_cpu(cqe->sgl_or_raw_data.sgl[sge_len - 1]));
319 /* Here we assume that the last SGE index is the biggest */
320 prefetch((void *)(fp->sge_mask));
321 bnx2x_update_last_max_sge(fp,
322 le16_to_cpu(cqe->sgl_or_raw_data.sgl[sge_len - 1]));
324 last_max = RX_SGE(fp->last_max_sge);
325 last_elem = last_max >> BIT_VEC64_ELEM_SHIFT;
326 first_elem = RX_SGE(fp->rx_sge_prod) >> BIT_VEC64_ELEM_SHIFT;
328 /* If ring is not full */
329 if (last_elem + 1 != first_elem)
332 /* Now update the prod */
333 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
334 if (likely(fp->sge_mask[i]))
337 fp->sge_mask[i] = BIT_VEC64_ELEM_ONE_MASK;
338 delta += BIT_VEC64_ELEM_SZ;
342 fp->rx_sge_prod += delta;
343 /* clear page-end entries */
344 bnx2x_clear_sge_mask_next_elems(fp);
347 DP(NETIF_MSG_RX_STATUS,
348 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
349 fp->last_max_sge, fp->rx_sge_prod);
352 /* Get Toeplitz hash value in the skb using the value from the
353 * CQE (calculated by HW).
355 static u32 bnx2x_get_rxhash(const struct bnx2x *bp,
356 const struct eth_fast_path_rx_cqe *cqe,
359 /* Get Toeplitz hash from CQE */
360 if ((bp->dev->features & NETIF_F_RXHASH) &&
361 (cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG)) {
362 enum eth_rss_hash_type htype;
364 htype = cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_TYPE;
365 *l4_rxhash = (htype == TCP_IPV4_HASH_TYPE) ||
366 (htype == TCP_IPV6_HASH_TYPE);
367 return le32_to_cpu(cqe->rss_hash_result);
373 static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
375 struct eth_fast_path_rx_cqe *cqe)
377 struct bnx2x *bp = fp->bp;
378 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
379 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
380 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
382 struct bnx2x_agg_info *tpa_info = &fp->tpa_info[queue];
383 struct sw_rx_bd *first_buf = &tpa_info->first_buf;
385 /* print error if current state != stop */
386 if (tpa_info->tpa_state != BNX2X_TPA_STOP)
387 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
389 /* Try to map an empty data buffer from the aggregation info */
390 mapping = dma_map_single(&bp->pdev->dev,
391 first_buf->data + NET_SKB_PAD,
392 fp->rx_buf_size, DMA_FROM_DEVICE);
394 * ...if it fails - move the skb from the consumer to the producer
395 * and set the current aggregation state as ERROR to drop it
396 * when TPA_STOP arrives.
399 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
400 /* Move the BD from the consumer to the producer */
401 bnx2x_reuse_rx_data(fp, cons, prod);
402 tpa_info->tpa_state = BNX2X_TPA_ERROR;
406 /* move empty data from pool to prod */
407 prod_rx_buf->data = first_buf->data;
408 dma_unmap_addr_set(prod_rx_buf, mapping, mapping);
409 /* point prod_bd to new data */
410 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
411 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
413 /* move partial skb from cons to pool (don't unmap yet) */
414 *first_buf = *cons_rx_buf;
416 /* mark bin state as START */
417 tpa_info->parsing_flags =
418 le16_to_cpu(cqe->pars_flags.flags);
419 tpa_info->vlan_tag = le16_to_cpu(cqe->vlan_tag);
420 tpa_info->tpa_state = BNX2X_TPA_START;
421 tpa_info->len_on_bd = le16_to_cpu(cqe->len_on_bd);
422 tpa_info->placement_offset = cqe->placement_offset;
423 tpa_info->rxhash = bnx2x_get_rxhash(bp, cqe, &tpa_info->l4_rxhash);
424 if (fp->mode == TPA_MODE_GRO) {
425 u16 gro_size = le16_to_cpu(cqe->pkt_len_or_gro_seg_len);
426 tpa_info->full_page = SGE_PAGES / gro_size * gro_size;
427 tpa_info->gro_size = gro_size;
430 #ifdef BNX2X_STOP_ON_ERROR
431 fp->tpa_queue_used |= (1 << queue);
432 #ifdef _ASM_GENERIC_INT_L64_H
433 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
435 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
441 /* Timestamp option length allowed for TPA aggregation:
443 * nop nop kind length echo val
445 #define TPA_TSTAMP_OPT_LEN 12
447 * bnx2x_set_gro_params - compute GRO values
450 * @parsing_flags: parsing flags from the START CQE
451 * @len_on_bd: total length of the first packet for the
453 * @pkt_len: length of all segments
455 * Approximate value of the MSS for this aggregation calculated using
456 * the first packet of it.
457 * Compute number of aggregated segments, and gso_type.
459 static void bnx2x_set_gro_params(struct sk_buff *skb, u16 parsing_flags,
460 u16 len_on_bd, unsigned int pkt_len,
461 u16 num_of_coalesced_segs)
463 /* TPA aggregation won't have either IP options or TCP options
464 * other than timestamp or IPv6 extension headers.
466 u16 hdrs_len = ETH_HLEN + sizeof(struct tcphdr);
468 if (GET_FLAG(parsing_flags, PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) ==
469 PRS_FLAG_OVERETH_IPV6) {
470 hdrs_len += sizeof(struct ipv6hdr);
471 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
473 hdrs_len += sizeof(struct iphdr);
474 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
477 /* Check if there was a TCP timestamp, if there is it's will
478 * always be 12 bytes length: nop nop kind length echo val.
480 * Otherwise FW would close the aggregation.
482 if (parsing_flags & PARSING_FLAGS_TIME_STAMP_EXIST_FLAG)
483 hdrs_len += TPA_TSTAMP_OPT_LEN;
485 skb_shinfo(skb)->gso_size = len_on_bd - hdrs_len;
487 /* tcp_gro_complete() will copy NAPI_GRO_CB(skb)->count
488 * to skb_shinfo(skb)->gso_segs
490 NAPI_GRO_CB(skb)->count = num_of_coalesced_segs;
493 static int bnx2x_alloc_rx_sge(struct bnx2x *bp, struct bnx2x_fastpath *fp,
494 u16 index, gfp_t gfp_mask)
496 struct page *page = alloc_pages(gfp_mask, PAGES_PER_SGE_SHIFT);
497 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
498 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
501 if (unlikely(page == NULL)) {
502 BNX2X_ERR("Can't alloc sge\n");
506 mapping = dma_map_page(&bp->pdev->dev, page, 0,
507 SGE_PAGES, DMA_FROM_DEVICE);
508 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
509 __free_pages(page, PAGES_PER_SGE_SHIFT);
510 BNX2X_ERR("Can't map sge\n");
515 dma_unmap_addr_set(sw_buf, mapping, mapping);
517 sge->addr_hi = cpu_to_le32(U64_HI(mapping));
518 sge->addr_lo = cpu_to_le32(U64_LO(mapping));
523 static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
524 struct bnx2x_agg_info *tpa_info,
527 struct eth_end_agg_rx_cqe *cqe,
530 struct sw_rx_page *rx_pg, old_rx_pg;
531 u32 i, frag_len, frag_size;
532 int err, j, frag_id = 0;
533 u16 len_on_bd = tpa_info->len_on_bd;
534 u16 full_page = 0, gro_size = 0;
536 frag_size = le16_to_cpu(cqe->pkt_len) - len_on_bd;
538 if (fp->mode == TPA_MODE_GRO) {
539 gro_size = tpa_info->gro_size;
540 full_page = tpa_info->full_page;
543 /* This is needed in order to enable forwarding support */
545 bnx2x_set_gro_params(skb, tpa_info->parsing_flags, len_on_bd,
546 le16_to_cpu(cqe->pkt_len),
547 le16_to_cpu(cqe->num_of_coalesced_segs));
549 #ifdef BNX2X_STOP_ON_ERROR
550 if (pages > min_t(u32, 8, MAX_SKB_FRAGS) * SGE_PAGES) {
551 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
553 BNX2X_ERR("cqe->pkt_len = %d\n", cqe->pkt_len);
559 /* Run through the SGL and compose the fragmented skb */
560 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
561 u16 sge_idx = RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[j]));
563 /* FW gives the indices of the SGE as if the ring is an array
564 (meaning that "next" element will consume 2 indices) */
565 if (fp->mode == TPA_MODE_GRO)
566 frag_len = min_t(u32, frag_size, (u32)full_page);
568 frag_len = min_t(u32, frag_size, (u32)SGE_PAGES);
570 rx_pg = &fp->rx_page_ring[sge_idx];
573 /* If we fail to allocate a substitute page, we simply stop
574 where we are and drop the whole packet */
575 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx, GFP_ATOMIC);
577 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
581 /* Unmap the page as we're going to pass it to the stack */
582 dma_unmap_page(&bp->pdev->dev,
583 dma_unmap_addr(&old_rx_pg, mapping),
584 SGE_PAGES, DMA_FROM_DEVICE);
585 /* Add one frag and update the appropriate fields in the skb */
586 if (fp->mode == TPA_MODE_LRO)
587 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
591 for (rem = frag_len; rem > 0; rem -= gro_size) {
592 int len = rem > gro_size ? gro_size : rem;
593 skb_fill_page_desc(skb, frag_id++,
594 old_rx_pg.page, offset, len);
596 get_page(old_rx_pg.page);
601 skb->data_len += frag_len;
602 skb->truesize += SGE_PAGES;
603 skb->len += frag_len;
605 frag_size -= frag_len;
611 static void bnx2x_frag_free(const struct bnx2x_fastpath *fp, void *data)
613 if (fp->rx_frag_size)
614 put_page(virt_to_head_page(data));
619 static void *bnx2x_frag_alloc(const struct bnx2x_fastpath *fp, gfp_t gfp_mask)
621 if (fp->rx_frag_size) {
622 /* GFP_KERNEL allocations are used only during initialization */
623 if (unlikely(gfp_mask & __GFP_WAIT))
624 return (void *)__get_free_page(gfp_mask);
626 return netdev_alloc_frag(fp->rx_frag_size);
629 return kmalloc(fp->rx_buf_size + NET_SKB_PAD, gfp_mask);
633 static void bnx2x_gro_ip_csum(struct bnx2x *bp, struct sk_buff *skb)
635 const struct iphdr *iph = ip_hdr(skb);
638 skb_set_transport_header(skb, sizeof(struct iphdr));
641 th->check = ~tcp_v4_check(skb->len - skb_transport_offset(skb),
642 iph->saddr, iph->daddr, 0);
645 static void bnx2x_gro_ipv6_csum(struct bnx2x *bp, struct sk_buff *skb)
647 struct ipv6hdr *iph = ipv6_hdr(skb);
650 skb_set_transport_header(skb, sizeof(struct ipv6hdr));
653 th->check = ~tcp_v6_check(skb->len - skb_transport_offset(skb),
654 &iph->saddr, &iph->daddr, 0);
657 static void bnx2x_gro_csum(struct bnx2x *bp, struct sk_buff *skb,
658 void (*gro_func)(struct bnx2x*, struct sk_buff*))
660 skb_set_network_header(skb, 0);
662 tcp_gro_complete(skb);
666 static void bnx2x_gro_receive(struct bnx2x *bp, struct bnx2x_fastpath *fp,
670 if (skb_shinfo(skb)->gso_size) {
671 switch (be16_to_cpu(skb->protocol)) {
673 bnx2x_gro_csum(bp, skb, bnx2x_gro_ip_csum);
676 bnx2x_gro_csum(bp, skb, bnx2x_gro_ipv6_csum);
679 BNX2X_ERR("Error: FW GRO supports only IPv4/IPv6, not 0x%04x\n",
680 be16_to_cpu(skb->protocol));
684 napi_gro_receive(&fp->napi, skb);
687 static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
688 struct bnx2x_agg_info *tpa_info,
690 struct eth_end_agg_rx_cqe *cqe,
693 struct sw_rx_bd *rx_buf = &tpa_info->first_buf;
694 u8 pad = tpa_info->placement_offset;
695 u16 len = tpa_info->len_on_bd;
696 struct sk_buff *skb = NULL;
697 u8 *new_data, *data = rx_buf->data;
698 u8 old_tpa_state = tpa_info->tpa_state;
700 tpa_info->tpa_state = BNX2X_TPA_STOP;
702 /* If we there was an error during the handling of the TPA_START -
703 * drop this aggregation.
705 if (old_tpa_state == BNX2X_TPA_ERROR)
708 /* Try to allocate the new data */
709 new_data = bnx2x_frag_alloc(fp, GFP_ATOMIC);
710 /* Unmap skb in the pool anyway, as we are going to change
711 pool entry status to BNX2X_TPA_STOP even if new skb allocation
713 dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping),
714 fp->rx_buf_size, DMA_FROM_DEVICE);
715 if (likely(new_data))
716 skb = build_skb(data, fp->rx_frag_size);
719 #ifdef BNX2X_STOP_ON_ERROR
720 if (pad + len > fp->rx_buf_size) {
721 BNX2X_ERR("skb_put is about to fail... pad %d len %d rx_buf_size %d\n",
722 pad, len, fp->rx_buf_size);
728 skb_reserve(skb, pad + NET_SKB_PAD);
730 skb->rxhash = tpa_info->rxhash;
731 skb->l4_rxhash = tpa_info->l4_rxhash;
733 skb->protocol = eth_type_trans(skb, bp->dev);
734 skb->ip_summed = CHECKSUM_UNNECESSARY;
736 if (!bnx2x_fill_frag_skb(bp, fp, tpa_info, pages,
737 skb, cqe, cqe_idx)) {
738 if (tpa_info->parsing_flags & PARSING_FLAGS_VLAN)
739 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), tpa_info->vlan_tag);
740 bnx2x_gro_receive(bp, fp, skb);
742 DP(NETIF_MSG_RX_STATUS,
743 "Failed to allocate new pages - dropping packet!\n");
744 dev_kfree_skb_any(skb);
747 /* put new data in bin */
748 rx_buf->data = new_data;
752 bnx2x_frag_free(fp, new_data);
754 /* drop the packet and keep the buffer in the bin */
755 DP(NETIF_MSG_RX_STATUS,
756 "Failed to allocate or map a new skb - dropping packet!\n");
757 bnx2x_fp_stats(bp, fp)->eth_q_stats.rx_skb_alloc_failed++;
760 static int bnx2x_alloc_rx_data(struct bnx2x *bp, struct bnx2x_fastpath *fp,
761 u16 index, gfp_t gfp_mask)
764 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
765 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
768 data = bnx2x_frag_alloc(fp, gfp_mask);
769 if (unlikely(data == NULL))
772 mapping = dma_map_single(&bp->pdev->dev, data + NET_SKB_PAD,
775 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
776 bnx2x_frag_free(fp, data);
777 BNX2X_ERR("Can't map rx data\n");
782 dma_unmap_addr_set(rx_buf, mapping, mapping);
784 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
785 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
791 void bnx2x_csum_validate(struct sk_buff *skb, union eth_rx_cqe *cqe,
792 struct bnx2x_fastpath *fp,
793 struct bnx2x_eth_q_stats *qstats)
795 /* Do nothing if no L4 csum validation was done.
796 * We do not check whether IP csum was validated. For IPv4 we assume
797 * that if the card got as far as validating the L4 csum, it also
798 * validated the IP csum. IPv6 has no IP csum.
800 if (cqe->fast_path_cqe.status_flags &
801 ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG)
804 /* If L4 validation was done, check if an error was found. */
806 if (cqe->fast_path_cqe.type_error_flags &
807 (ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG |
808 ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG))
809 qstats->hw_csum_err++;
811 skb->ip_summed = CHECKSUM_UNNECESSARY;
814 int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
816 struct bnx2x *bp = fp->bp;
817 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
818 u16 sw_comp_cons, sw_comp_prod;
820 union eth_rx_cqe *cqe;
821 struct eth_fast_path_rx_cqe *cqe_fp;
823 #ifdef BNX2X_STOP_ON_ERROR
824 if (unlikely(bp->panic))
828 bd_cons = fp->rx_bd_cons;
829 bd_prod = fp->rx_bd_prod;
830 bd_prod_fw = bd_prod;
831 sw_comp_cons = fp->rx_comp_cons;
832 sw_comp_prod = fp->rx_comp_prod;
834 comp_ring_cons = RCQ_BD(sw_comp_cons);
835 cqe = &fp->rx_comp_ring[comp_ring_cons];
836 cqe_fp = &cqe->fast_path_cqe;
838 DP(NETIF_MSG_RX_STATUS,
839 "queue[%d]: sw_comp_cons %u\n", fp->index, sw_comp_cons);
841 while (BNX2X_IS_CQE_COMPLETED(cqe_fp)) {
842 struct sw_rx_bd *rx_buf = NULL;
845 enum eth_rx_cqe_type cqe_fp_type;
850 #ifdef BNX2X_STOP_ON_ERROR
851 if (unlikely(bp->panic))
855 bd_prod = RX_BD(bd_prod);
856 bd_cons = RX_BD(bd_cons);
858 cqe_fp_flags = cqe_fp->type_error_flags;
859 cqe_fp_type = cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE;
861 DP(NETIF_MSG_RX_STATUS,
862 "CQE type %x err %x status %x queue %x vlan %x len %u\n",
863 CQE_TYPE(cqe_fp_flags),
864 cqe_fp_flags, cqe_fp->status_flags,
865 le32_to_cpu(cqe_fp->rss_hash_result),
866 le16_to_cpu(cqe_fp->vlan_tag),
867 le16_to_cpu(cqe_fp->pkt_len_or_gro_seg_len));
869 /* is this a slowpath msg? */
870 if (unlikely(CQE_TYPE_SLOW(cqe_fp_type))) {
871 bnx2x_sp_event(fp, cqe);
875 rx_buf = &fp->rx_buf_ring[bd_cons];
878 if (!CQE_TYPE_FAST(cqe_fp_type)) {
879 struct bnx2x_agg_info *tpa_info;
880 u16 frag_size, pages;
881 #ifdef BNX2X_STOP_ON_ERROR
883 if (fp->disable_tpa &&
884 (CQE_TYPE_START(cqe_fp_type) ||
885 CQE_TYPE_STOP(cqe_fp_type)))
886 BNX2X_ERR("START/STOP packet while disable_tpa type %x\n",
887 CQE_TYPE(cqe_fp_type));
890 if (CQE_TYPE_START(cqe_fp_type)) {
891 u16 queue = cqe_fp->queue_index;
892 DP(NETIF_MSG_RX_STATUS,
893 "calling tpa_start on queue %d\n",
896 bnx2x_tpa_start(fp, queue,
902 queue = cqe->end_agg_cqe.queue_index;
903 tpa_info = &fp->tpa_info[queue];
904 DP(NETIF_MSG_RX_STATUS,
905 "calling tpa_stop on queue %d\n",
908 frag_size = le16_to_cpu(cqe->end_agg_cqe.pkt_len) -
911 if (fp->mode == TPA_MODE_GRO)
912 pages = (frag_size + tpa_info->full_page - 1) /
915 pages = SGE_PAGE_ALIGN(frag_size) >>
918 bnx2x_tpa_stop(bp, fp, tpa_info, pages,
919 &cqe->end_agg_cqe, comp_ring_cons);
920 #ifdef BNX2X_STOP_ON_ERROR
925 bnx2x_update_sge_prod(fp, pages, &cqe->end_agg_cqe);
929 len = le16_to_cpu(cqe_fp->pkt_len_or_gro_seg_len);
930 pad = cqe_fp->placement_offset;
931 dma_sync_single_for_cpu(&bp->pdev->dev,
932 dma_unmap_addr(rx_buf, mapping),
933 pad + RX_COPY_THRESH,
936 prefetch(data + pad); /* speedup eth_type_trans() */
937 /* is this an error packet? */
938 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
939 DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
940 "ERROR flags %x rx packet %u\n",
941 cqe_fp_flags, sw_comp_cons);
942 bnx2x_fp_qstats(bp, fp)->rx_err_discard_pkt++;
946 /* Since we don't have a jumbo ring
947 * copy small packets if mtu > 1500
949 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
950 (len <= RX_COPY_THRESH)) {
951 skb = netdev_alloc_skb_ip_align(bp->dev, len);
953 DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
954 "ERROR packet dropped because of alloc failure\n");
955 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
958 memcpy(skb->data, data + pad, len);
959 bnx2x_reuse_rx_data(fp, bd_cons, bd_prod);
961 if (likely(bnx2x_alloc_rx_data(bp, fp, bd_prod,
963 dma_unmap_single(&bp->pdev->dev,
964 dma_unmap_addr(rx_buf, mapping),
967 skb = build_skb(data, fp->rx_frag_size);
968 if (unlikely(!skb)) {
969 bnx2x_frag_free(fp, data);
970 bnx2x_fp_qstats(bp, fp)->
971 rx_skb_alloc_failed++;
974 skb_reserve(skb, pad);
976 DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
977 "ERROR packet dropped because of alloc failure\n");
978 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
980 bnx2x_reuse_rx_data(fp, bd_cons, bd_prod);
986 skb->protocol = eth_type_trans(skb, bp->dev);
988 /* Set Toeplitz hash for a none-LRO skb */
989 skb->rxhash = bnx2x_get_rxhash(bp, cqe_fp, &l4_rxhash);
990 skb->l4_rxhash = l4_rxhash;
992 skb_checksum_none_assert(skb);
994 if (bp->dev->features & NETIF_F_RXCSUM)
995 bnx2x_csum_validate(skb, cqe, fp,
996 bnx2x_fp_qstats(bp, fp));
998 skb_record_rx_queue(skb, fp->rx_queue);
1000 if (le16_to_cpu(cqe_fp->pars_flags.flags) &
1002 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
1003 le16_to_cpu(cqe_fp->vlan_tag));
1005 skb_mark_napi_id(skb, &fp->napi);
1007 if (bnx2x_fp_ll_polling(fp))
1008 netif_receive_skb(skb);
1010 napi_gro_receive(&fp->napi, skb);
1012 rx_buf->data = NULL;
1014 bd_cons = NEXT_RX_IDX(bd_cons);
1015 bd_prod = NEXT_RX_IDX(bd_prod);
1016 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1019 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1020 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
1022 /* mark CQE as free */
1023 BNX2X_SEED_CQE(cqe_fp);
1025 if (rx_pkt == budget)
1028 comp_ring_cons = RCQ_BD(sw_comp_cons);
1029 cqe = &fp->rx_comp_ring[comp_ring_cons];
1030 cqe_fp = &cqe->fast_path_cqe;
1033 fp->rx_bd_cons = bd_cons;
1034 fp->rx_bd_prod = bd_prod_fw;
1035 fp->rx_comp_cons = sw_comp_cons;
1036 fp->rx_comp_prod = sw_comp_prod;
1038 /* Update producers */
1039 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1042 fp->rx_pkt += rx_pkt;
1048 static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1050 struct bnx2x_fastpath *fp = fp_cookie;
1051 struct bnx2x *bp = fp->bp;
1055 "got an MSI-X interrupt on IDX:SB [fp %d fw_sd %d igusb %d]\n",
1056 fp->index, fp->fw_sb_id, fp->igu_sb_id);
1058 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
1060 #ifdef BNX2X_STOP_ON_ERROR
1061 if (unlikely(bp->panic))
1065 /* Handle Rx and Tx according to MSI-X vector */
1066 for_each_cos_in_tx_queue(fp, cos)
1067 prefetch(fp->txdata_ptr[cos]->tx_cons_sb);
1069 prefetch(&fp->sb_running_index[SM_RX_ID]);
1070 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
1075 /* HW Lock for shared dual port PHYs */
1076 void bnx2x_acquire_phy_lock(struct bnx2x *bp)
1078 mutex_lock(&bp->port.phy_mutex);
1080 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
1083 void bnx2x_release_phy_lock(struct bnx2x *bp)
1085 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
1087 mutex_unlock(&bp->port.phy_mutex);
1090 /* calculates MF speed according to current linespeed and MF configuration */
1091 u16 bnx2x_get_mf_speed(struct bnx2x *bp)
1093 u16 line_speed = bp->link_vars.line_speed;
1095 u16 maxCfg = bnx2x_extract_max_cfg(bp,
1096 bp->mf_config[BP_VN(bp)]);
1098 /* Calculate the current MAX line speed limit for the MF
1102 line_speed = (line_speed * maxCfg) / 100;
1103 else { /* SD mode */
1104 u16 vn_max_rate = maxCfg * 100;
1106 if (vn_max_rate < line_speed)
1107 line_speed = vn_max_rate;
1115 * bnx2x_fill_report_data - fill link report data to report
1117 * @bp: driver handle
1118 * @data: link state to update
1120 * It uses a none-atomic bit operations because is called under the mutex.
1122 static void bnx2x_fill_report_data(struct bnx2x *bp,
1123 struct bnx2x_link_report_data *data)
1125 u16 line_speed = bnx2x_get_mf_speed(bp);
1127 memset(data, 0, sizeof(*data));
1129 /* Fill the report data: effective line speed */
1130 data->line_speed = line_speed;
1133 if (!bp->link_vars.link_up || (bp->flags & MF_FUNC_DIS))
1134 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1135 &data->link_report_flags);
1138 if (bp->link_vars.duplex == DUPLEX_FULL)
1139 __set_bit(BNX2X_LINK_REPORT_FD, &data->link_report_flags);
1141 /* Rx Flow Control is ON */
1142 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX)
1143 __set_bit(BNX2X_LINK_REPORT_RX_FC_ON, &data->link_report_flags);
1145 /* Tx Flow Control is ON */
1146 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
1147 __set_bit(BNX2X_LINK_REPORT_TX_FC_ON, &data->link_report_flags);
1151 * bnx2x_link_report - report link status to OS.
1153 * @bp: driver handle
1155 * Calls the __bnx2x_link_report() under the same locking scheme
1156 * as a link/PHY state managing code to ensure a consistent link
1160 void bnx2x_link_report(struct bnx2x *bp)
1162 bnx2x_acquire_phy_lock(bp);
1163 __bnx2x_link_report(bp);
1164 bnx2x_release_phy_lock(bp);
1168 * __bnx2x_link_report - report link status to OS.
1170 * @bp: driver handle
1172 * None atomic implementation.
1173 * Should be called under the phy_lock.
1175 void __bnx2x_link_report(struct bnx2x *bp)
1177 struct bnx2x_link_report_data cur_data;
1180 if (IS_PF(bp) && !CHIP_IS_E1(bp))
1181 bnx2x_read_mf_cfg(bp);
1183 /* Read the current link report info */
1184 bnx2x_fill_report_data(bp, &cur_data);
1186 /* Don't report link down or exactly the same link status twice */
1187 if (!memcmp(&cur_data, &bp->last_reported_link, sizeof(cur_data)) ||
1188 (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1189 &bp->last_reported_link.link_report_flags) &&
1190 test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1191 &cur_data.link_report_flags)))
1196 /* We are going to report a new link parameters now -
1197 * remember the current data for the next time.
1199 memcpy(&bp->last_reported_link, &cur_data, sizeof(cur_data));
1201 if (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1202 &cur_data.link_report_flags)) {
1203 netif_carrier_off(bp->dev);
1204 netdev_err(bp->dev, "NIC Link is Down\n");
1210 netif_carrier_on(bp->dev);
1212 if (test_and_clear_bit(BNX2X_LINK_REPORT_FD,
1213 &cur_data.link_report_flags))
1218 /* Handle the FC at the end so that only these flags would be
1219 * possibly set. This way we may easily check if there is no FC
1222 if (cur_data.link_report_flags) {
1223 if (test_bit(BNX2X_LINK_REPORT_RX_FC_ON,
1224 &cur_data.link_report_flags)) {
1225 if (test_bit(BNX2X_LINK_REPORT_TX_FC_ON,
1226 &cur_data.link_report_flags))
1227 flow = "ON - receive & transmit";
1229 flow = "ON - receive";
1231 flow = "ON - transmit";
1236 netdev_info(bp->dev, "NIC Link is Up, %d Mbps %s duplex, Flow control: %s\n",
1237 cur_data.line_speed, duplex, flow);
1241 static void bnx2x_set_next_page_sgl(struct bnx2x_fastpath *fp)
1245 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1246 struct eth_rx_sge *sge;
1248 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
1250 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
1251 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
1254 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
1255 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
1259 static void bnx2x_free_tpa_pool(struct bnx2x *bp,
1260 struct bnx2x_fastpath *fp, int last)
1264 for (i = 0; i < last; i++) {
1265 struct bnx2x_agg_info *tpa_info = &fp->tpa_info[i];
1266 struct sw_rx_bd *first_buf = &tpa_info->first_buf;
1267 u8 *data = first_buf->data;
1270 DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
1273 if (tpa_info->tpa_state == BNX2X_TPA_START)
1274 dma_unmap_single(&bp->pdev->dev,
1275 dma_unmap_addr(first_buf, mapping),
1276 fp->rx_buf_size, DMA_FROM_DEVICE);
1277 bnx2x_frag_free(fp, data);
1278 first_buf->data = NULL;
1282 void bnx2x_init_rx_rings_cnic(struct bnx2x *bp)
1286 for_each_rx_queue_cnic(bp, j) {
1287 struct bnx2x_fastpath *fp = &bp->fp[j];
1291 /* Activate BD ring */
1293 * this will generate an interrupt (to the TSTORM)
1294 * must only be done after chip is initialized
1296 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
1301 void bnx2x_init_rx_rings(struct bnx2x *bp)
1303 int func = BP_FUNC(bp);
1307 /* Allocate TPA resources */
1308 for_each_eth_queue(bp, j) {
1309 struct bnx2x_fastpath *fp = &bp->fp[j];
1312 "mtu %d rx_buf_size %d\n", bp->dev->mtu, fp->rx_buf_size);
1314 if (!fp->disable_tpa) {
1315 /* Fill the per-aggregation pool */
1316 for (i = 0; i < MAX_AGG_QS(bp); i++) {
1317 struct bnx2x_agg_info *tpa_info =
1319 struct sw_rx_bd *first_buf =
1320 &tpa_info->first_buf;
1323 bnx2x_frag_alloc(fp, GFP_KERNEL);
1324 if (!first_buf->data) {
1325 BNX2X_ERR("Failed to allocate TPA skb pool for queue[%d] - disabling TPA on this queue!\n",
1327 bnx2x_free_tpa_pool(bp, fp, i);
1328 fp->disable_tpa = 1;
1331 dma_unmap_addr_set(first_buf, mapping, 0);
1332 tpa_info->tpa_state = BNX2X_TPA_STOP;
1335 /* "next page" elements initialization */
1336 bnx2x_set_next_page_sgl(fp);
1338 /* set SGEs bit mask */
1339 bnx2x_init_sge_ring_bit_mask(fp);
1341 /* Allocate SGEs and initialize the ring elements */
1342 for (i = 0, ring_prod = 0;
1343 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
1345 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod,
1347 BNX2X_ERR("was only able to allocate %d rx sges\n",
1349 BNX2X_ERR("disabling TPA for queue[%d]\n",
1351 /* Cleanup already allocated elements */
1352 bnx2x_free_rx_sge_range(bp, fp,
1354 bnx2x_free_tpa_pool(bp, fp,
1356 fp->disable_tpa = 1;
1360 ring_prod = NEXT_SGE_IDX(ring_prod);
1363 fp->rx_sge_prod = ring_prod;
1367 for_each_eth_queue(bp, j) {
1368 struct bnx2x_fastpath *fp = &bp->fp[j];
1372 /* Activate BD ring */
1374 * this will generate an interrupt (to the TSTORM)
1375 * must only be done after chip is initialized
1377 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
1383 if (CHIP_IS_E1(bp)) {
1384 REG_WR(bp, BAR_USTRORM_INTMEM +
1385 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
1386 U64_LO(fp->rx_comp_mapping));
1387 REG_WR(bp, BAR_USTRORM_INTMEM +
1388 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
1389 U64_HI(fp->rx_comp_mapping));
1394 static void bnx2x_free_tx_skbs_queue(struct bnx2x_fastpath *fp)
1397 struct bnx2x *bp = fp->bp;
1399 for_each_cos_in_tx_queue(fp, cos) {
1400 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
1401 unsigned pkts_compl = 0, bytes_compl = 0;
1403 u16 sw_prod = txdata->tx_pkt_prod;
1404 u16 sw_cons = txdata->tx_pkt_cons;
1406 while (sw_cons != sw_prod) {
1407 bnx2x_free_tx_pkt(bp, txdata, TX_BD(sw_cons),
1408 &pkts_compl, &bytes_compl);
1412 netdev_tx_reset_queue(
1413 netdev_get_tx_queue(bp->dev,
1414 txdata->txq_index));
1418 static void bnx2x_free_tx_skbs_cnic(struct bnx2x *bp)
1422 for_each_tx_queue_cnic(bp, i) {
1423 bnx2x_free_tx_skbs_queue(&bp->fp[i]);
1427 static void bnx2x_free_tx_skbs(struct bnx2x *bp)
1431 for_each_eth_queue(bp, i) {
1432 bnx2x_free_tx_skbs_queue(&bp->fp[i]);
1436 static void bnx2x_free_rx_bds(struct bnx2x_fastpath *fp)
1438 struct bnx2x *bp = fp->bp;
1441 /* ring wasn't allocated */
1442 if (fp->rx_buf_ring == NULL)
1445 for (i = 0; i < NUM_RX_BD; i++) {
1446 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
1447 u8 *data = rx_buf->data;
1451 dma_unmap_single(&bp->pdev->dev,
1452 dma_unmap_addr(rx_buf, mapping),
1453 fp->rx_buf_size, DMA_FROM_DEVICE);
1455 rx_buf->data = NULL;
1456 bnx2x_frag_free(fp, data);
1460 static void bnx2x_free_rx_skbs_cnic(struct bnx2x *bp)
1464 for_each_rx_queue_cnic(bp, j) {
1465 bnx2x_free_rx_bds(&bp->fp[j]);
1469 static void bnx2x_free_rx_skbs(struct bnx2x *bp)
1473 for_each_eth_queue(bp, j) {
1474 struct bnx2x_fastpath *fp = &bp->fp[j];
1476 bnx2x_free_rx_bds(fp);
1478 if (!fp->disable_tpa)
1479 bnx2x_free_tpa_pool(bp, fp, MAX_AGG_QS(bp));
1483 void bnx2x_free_skbs_cnic(struct bnx2x *bp)
1485 bnx2x_free_tx_skbs_cnic(bp);
1486 bnx2x_free_rx_skbs_cnic(bp);
1489 void bnx2x_free_skbs(struct bnx2x *bp)
1491 bnx2x_free_tx_skbs(bp);
1492 bnx2x_free_rx_skbs(bp);
1495 void bnx2x_update_max_mf_config(struct bnx2x *bp, u32 value)
1497 /* load old values */
1498 u32 mf_cfg = bp->mf_config[BP_VN(bp)];
1500 if (value != bnx2x_extract_max_cfg(bp, mf_cfg)) {
1501 /* leave all but MAX value */
1502 mf_cfg &= ~FUNC_MF_CFG_MAX_BW_MASK;
1504 /* set new MAX value */
1505 mf_cfg |= (value << FUNC_MF_CFG_MAX_BW_SHIFT)
1506 & FUNC_MF_CFG_MAX_BW_MASK;
1508 bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW, mf_cfg);
1513 * bnx2x_free_msix_irqs - free previously requested MSI-X IRQ vectors
1515 * @bp: driver handle
1516 * @nvecs: number of vectors to be released
1518 static void bnx2x_free_msix_irqs(struct bnx2x *bp, int nvecs)
1522 if (nvecs == offset)
1525 /* VFs don't have a default SB */
1527 free_irq(bp->msix_table[offset].vector, bp->dev);
1528 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
1529 bp->msix_table[offset].vector);
1533 if (CNIC_SUPPORT(bp)) {
1534 if (nvecs == offset)
1539 for_each_eth_queue(bp, i) {
1540 if (nvecs == offset)
1542 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq\n",
1543 i, bp->msix_table[offset].vector);
1545 free_irq(bp->msix_table[offset++].vector, &bp->fp[i]);
1549 void bnx2x_free_irq(struct bnx2x *bp)
1551 if (bp->flags & USING_MSIX_FLAG &&
1552 !(bp->flags & USING_SINGLE_MSIX_FLAG)) {
1553 int nvecs = BNX2X_NUM_ETH_QUEUES(bp) + CNIC_SUPPORT(bp);
1555 /* vfs don't have a default status block */
1559 bnx2x_free_msix_irqs(bp, nvecs);
1561 free_irq(bp->dev->irq, bp->dev);
1565 int bnx2x_enable_msix(struct bnx2x *bp)
1567 int msix_vec = 0, i, rc;
1569 /* VFs don't have a default status block */
1571 bp->msix_table[msix_vec].entry = msix_vec;
1572 BNX2X_DEV_INFO("msix_table[0].entry = %d (slowpath)\n",
1573 bp->msix_table[0].entry);
1577 /* Cnic requires an msix vector for itself */
1578 if (CNIC_SUPPORT(bp)) {
1579 bp->msix_table[msix_vec].entry = msix_vec;
1580 BNX2X_DEV_INFO("msix_table[%d].entry = %d (CNIC)\n",
1581 msix_vec, bp->msix_table[msix_vec].entry);
1585 /* We need separate vectors for ETH queues only (not FCoE) */
1586 for_each_eth_queue(bp, i) {
1587 bp->msix_table[msix_vec].entry = msix_vec;
1588 BNX2X_DEV_INFO("msix_table[%d].entry = %d (fastpath #%u)\n",
1589 msix_vec, msix_vec, i);
1593 DP(BNX2X_MSG_SP, "about to request enable msix with %d vectors\n",
1596 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], msix_vec);
1599 * reconfigure number of tx/rx queues according to available
1602 if (rc >= BNX2X_MIN_MSIX_VEC_CNT(bp)) {
1603 /* how less vectors we will have? */
1604 int diff = msix_vec - rc;
1606 BNX2X_DEV_INFO("Trying to use less MSI-X vectors: %d\n", rc);
1608 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], rc);
1611 BNX2X_DEV_INFO("MSI-X is not attainable rc %d\n", rc);
1615 * decrease number of queues by number of unallocated entries
1617 bp->num_ethernet_queues -= diff;
1618 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
1620 BNX2X_DEV_INFO("New queue configuration set: %d\n",
1622 } else if (rc > 0) {
1623 /* Get by with single vector */
1624 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], 1);
1626 BNX2X_DEV_INFO("Single MSI-X is not attainable rc %d\n",
1631 BNX2X_DEV_INFO("Using single MSI-X vector\n");
1632 bp->flags |= USING_SINGLE_MSIX_FLAG;
1634 BNX2X_DEV_INFO("set number of queues to 1\n");
1635 bp->num_ethernet_queues = 1;
1636 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
1637 } else if (rc < 0) {
1638 BNX2X_DEV_INFO("MSI-X is not attainable rc %d\n", rc);
1642 bp->flags |= USING_MSIX_FLAG;
1647 /* fall to INTx if not enough memory */
1649 bp->flags |= DISABLE_MSI_FLAG;
1654 static int bnx2x_req_msix_irqs(struct bnx2x *bp)
1656 int i, rc, offset = 0;
1658 /* no default status block for vf */
1660 rc = request_irq(bp->msix_table[offset++].vector,
1661 bnx2x_msix_sp_int, 0,
1662 bp->dev->name, bp->dev);
1664 BNX2X_ERR("request sp irq failed\n");
1669 if (CNIC_SUPPORT(bp))
1672 for_each_eth_queue(bp, i) {
1673 struct bnx2x_fastpath *fp = &bp->fp[i];
1674 snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
1677 rc = request_irq(bp->msix_table[offset].vector,
1678 bnx2x_msix_fp_int, 0, fp->name, fp);
1680 BNX2X_ERR("request fp #%d irq (%d) failed rc %d\n", i,
1681 bp->msix_table[offset].vector, rc);
1682 bnx2x_free_msix_irqs(bp, offset);
1689 i = BNX2X_NUM_ETH_QUEUES(bp);
1691 offset = 1 + CNIC_SUPPORT(bp);
1692 netdev_info(bp->dev,
1693 "using MSI-X IRQs: sp %d fp[%d] %d ... fp[%d] %d\n",
1694 bp->msix_table[0].vector,
1695 0, bp->msix_table[offset].vector,
1696 i - 1, bp->msix_table[offset + i - 1].vector);
1698 offset = CNIC_SUPPORT(bp);
1699 netdev_info(bp->dev,
1700 "using MSI-X IRQs: fp[%d] %d ... fp[%d] %d\n",
1701 0, bp->msix_table[offset].vector,
1702 i - 1, bp->msix_table[offset + i - 1].vector);
1707 int bnx2x_enable_msi(struct bnx2x *bp)
1711 rc = pci_enable_msi(bp->pdev);
1713 BNX2X_DEV_INFO("MSI is not attainable\n");
1716 bp->flags |= USING_MSI_FLAG;
1721 static int bnx2x_req_irq(struct bnx2x *bp)
1723 unsigned long flags;
1726 if (bp->flags & (USING_MSI_FLAG | USING_MSIX_FLAG))
1729 flags = IRQF_SHARED;
1731 if (bp->flags & USING_MSIX_FLAG)
1732 irq = bp->msix_table[0].vector;
1734 irq = bp->pdev->irq;
1736 return request_irq(irq, bnx2x_interrupt, flags, bp->dev->name, bp->dev);
1739 static int bnx2x_setup_irqs(struct bnx2x *bp)
1742 if (bp->flags & USING_MSIX_FLAG &&
1743 !(bp->flags & USING_SINGLE_MSIX_FLAG)) {
1744 rc = bnx2x_req_msix_irqs(bp);
1748 rc = bnx2x_req_irq(bp);
1750 BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc);
1753 if (bp->flags & USING_MSI_FLAG) {
1754 bp->dev->irq = bp->pdev->irq;
1755 netdev_info(bp->dev, "using MSI IRQ %d\n",
1758 if (bp->flags & USING_MSIX_FLAG) {
1759 bp->dev->irq = bp->msix_table[0].vector;
1760 netdev_info(bp->dev, "using MSIX IRQ %d\n",
1768 static void bnx2x_napi_enable_cnic(struct bnx2x *bp)
1772 for_each_rx_queue_cnic(bp, i) {
1773 bnx2x_fp_init_lock(&bp->fp[i]);
1774 napi_enable(&bnx2x_fp(bp, i, napi));
1778 static void bnx2x_napi_enable(struct bnx2x *bp)
1782 for_each_eth_queue(bp, i) {
1783 bnx2x_fp_init_lock(&bp->fp[i]);
1784 napi_enable(&bnx2x_fp(bp, i, napi));
1788 static void bnx2x_napi_disable_cnic(struct bnx2x *bp)
1793 for_each_rx_queue_cnic(bp, i) {
1794 napi_disable(&bnx2x_fp(bp, i, napi));
1795 while (!bnx2x_fp_lock_napi(&bp->fp[i]))
1801 static void bnx2x_napi_disable(struct bnx2x *bp)
1806 for_each_eth_queue(bp, i) {
1807 napi_disable(&bnx2x_fp(bp, i, napi));
1808 while (!bnx2x_fp_lock_napi(&bp->fp[i]))
1814 void bnx2x_netif_start(struct bnx2x *bp)
1816 if (netif_running(bp->dev)) {
1817 bnx2x_napi_enable(bp);
1818 if (CNIC_LOADED(bp))
1819 bnx2x_napi_enable_cnic(bp);
1820 bnx2x_int_enable(bp);
1821 if (bp->state == BNX2X_STATE_OPEN)
1822 netif_tx_wake_all_queues(bp->dev);
1826 void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
1828 bnx2x_int_disable_sync(bp, disable_hw);
1829 bnx2x_napi_disable(bp);
1830 if (CNIC_LOADED(bp))
1831 bnx2x_napi_disable_cnic(bp);
1834 u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb)
1836 struct bnx2x *bp = netdev_priv(dev);
1838 if (CNIC_LOADED(bp) && !NO_FCOE(bp)) {
1839 struct ethhdr *hdr = (struct ethhdr *)skb->data;
1840 u16 ether_type = ntohs(hdr->h_proto);
1842 /* Skip VLAN tag if present */
1843 if (ether_type == ETH_P_8021Q) {
1844 struct vlan_ethhdr *vhdr =
1845 (struct vlan_ethhdr *)skb->data;
1847 ether_type = ntohs(vhdr->h_vlan_encapsulated_proto);
1850 /* If ethertype is FCoE or FIP - use FCoE ring */
1851 if ((ether_type == ETH_P_FCOE) || (ether_type == ETH_P_FIP))
1852 return bnx2x_fcoe_tx(bp, txq_index);
1855 /* select a non-FCoE queue */
1856 return __netdev_pick_tx(dev, skb) % BNX2X_NUM_ETH_QUEUES(bp);
1859 void bnx2x_set_num_queues(struct bnx2x *bp)
1862 bp->num_ethernet_queues = bnx2x_calc_num_queues(bp);
1864 /* override in STORAGE SD modes */
1865 if (IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp))
1866 bp->num_ethernet_queues = 1;
1868 /* Add special queues */
1869 bp->num_cnic_queues = CNIC_SUPPORT(bp); /* For FCOE */
1870 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
1872 BNX2X_DEV_INFO("set number of queues to %d\n", bp->num_queues);
1876 * bnx2x_set_real_num_queues - configure netdev->real_num_[tx,rx]_queues
1878 * @bp: Driver handle
1880 * We currently support for at most 16 Tx queues for each CoS thus we will
1881 * allocate a multiple of 16 for ETH L2 rings according to the value of the
1884 * If there is an FCoE L2 queue the appropriate Tx queue will have the next
1885 * index after all ETH L2 indices.
1887 * If the actual number of Tx queues (for each CoS) is less than 16 then there
1888 * will be the holes at the end of each group of 16 ETh L2 indices (0..15,
1889 * 16..31,...) with indices that are not coupled with any real Tx queue.
1891 * The proper configuration of skb->queue_mapping is handled by
1892 * bnx2x_select_queue() and __skb_tx_hash().
1894 * bnx2x_setup_tc() takes care of the proper TC mappings so that __skb_tx_hash()
1895 * will return a proper Tx index if TC is enabled (netdev->num_tc > 0).
1897 static int bnx2x_set_real_num_queues(struct bnx2x *bp, int include_cnic)
1901 tx = BNX2X_NUM_ETH_QUEUES(bp) * bp->max_cos;
1902 rx = BNX2X_NUM_ETH_QUEUES(bp);
1904 /* account for fcoe queue */
1905 if (include_cnic && !NO_FCOE(bp)) {
1910 rc = netif_set_real_num_tx_queues(bp->dev, tx);
1912 BNX2X_ERR("Failed to set real number of Tx queues: %d\n", rc);
1915 rc = netif_set_real_num_rx_queues(bp->dev, rx);
1917 BNX2X_ERR("Failed to set real number of Rx queues: %d\n", rc);
1921 DP(NETIF_MSG_IFUP, "Setting real num queues to (tx, rx) (%d, %d)\n",
1927 static void bnx2x_set_rx_buf_size(struct bnx2x *bp)
1931 for_each_queue(bp, i) {
1932 struct bnx2x_fastpath *fp = &bp->fp[i];
1935 /* Always use a mini-jumbo MTU for the FCoE L2 ring */
1938 * Although there are no IP frames expected to arrive to
1939 * this ring we still want to add an
1940 * IP_HEADER_ALIGNMENT_PADDING to prevent a buffer
1943 mtu = BNX2X_FCOE_MINI_JUMBO_MTU;
1946 fp->rx_buf_size = BNX2X_FW_RX_ALIGN_START +
1947 IP_HEADER_ALIGNMENT_PADDING +
1950 BNX2X_FW_RX_ALIGN_END;
1951 /* Note : rx_buf_size doesn't take into account NET_SKB_PAD */
1952 if (fp->rx_buf_size + NET_SKB_PAD <= PAGE_SIZE)
1953 fp->rx_frag_size = fp->rx_buf_size + NET_SKB_PAD;
1955 fp->rx_frag_size = 0;
1959 static int bnx2x_init_rss(struct bnx2x *bp)
1962 u8 num_eth_queues = BNX2X_NUM_ETH_QUEUES(bp);
1964 /* Prepare the initial contents for the indirection table if RSS is
1967 for (i = 0; i < sizeof(bp->rss_conf_obj.ind_table); i++)
1968 bp->rss_conf_obj.ind_table[i] =
1970 ethtool_rxfh_indir_default(i, num_eth_queues);
1973 * For 57710 and 57711 SEARCHER configuration (rss_keys) is
1974 * per-port, so if explicit configuration is needed , do it only
1977 * For 57712 and newer on the other hand it's a per-function
1980 return bnx2x_config_rss_eth(bp, bp->port.pmf || !CHIP_IS_E1x(bp));
1983 int bnx2x_rss(struct bnx2x *bp, struct bnx2x_rss_config_obj *rss_obj,
1984 bool config_hash, bool enable)
1986 struct bnx2x_config_rss_params params = {NULL};
1988 /* Although RSS is meaningless when there is a single HW queue we
1989 * still need it enabled in order to have HW Rx hash generated.
1991 * if (!is_eth_multi(bp))
1992 * bp->multi_mode = ETH_RSS_MODE_DISABLED;
1995 params.rss_obj = rss_obj;
1997 __set_bit(RAMROD_COMP_WAIT, ¶ms.ramrod_flags);
2000 __set_bit(BNX2X_RSS_MODE_REGULAR, ¶ms.rss_flags);
2002 /* RSS configuration */
2003 __set_bit(BNX2X_RSS_IPV4, ¶ms.rss_flags);
2004 __set_bit(BNX2X_RSS_IPV4_TCP, ¶ms.rss_flags);
2005 __set_bit(BNX2X_RSS_IPV6, ¶ms.rss_flags);
2006 __set_bit(BNX2X_RSS_IPV6_TCP, ¶ms.rss_flags);
2007 if (rss_obj->udp_rss_v4)
2008 __set_bit(BNX2X_RSS_IPV4_UDP, ¶ms.rss_flags);
2009 if (rss_obj->udp_rss_v6)
2010 __set_bit(BNX2X_RSS_IPV6_UDP, ¶ms.rss_flags);
2012 __set_bit(BNX2X_RSS_MODE_DISABLED, ¶ms.rss_flags);
2016 params.rss_result_mask = MULTI_MASK;
2018 memcpy(params.ind_table, rss_obj->ind_table, sizeof(params.ind_table));
2022 prandom_bytes(params.rss_key, T_ETH_RSS_KEY * 4);
2023 __set_bit(BNX2X_RSS_SET_SRCH, ¶ms.rss_flags);
2027 return bnx2x_config_rss(bp, ¶ms);
2029 return bnx2x_vfpf_config_rss(bp, ¶ms);
2032 static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
2034 struct bnx2x_func_state_params func_params = {NULL};
2036 /* Prepare parameters for function state transitions */
2037 __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
2039 func_params.f_obj = &bp->func_obj;
2040 func_params.cmd = BNX2X_F_CMD_HW_INIT;
2042 func_params.params.hw_init.load_phase = load_code;
2044 return bnx2x_func_state_change(bp, &func_params);
2048 * Cleans the object that have internal lists without sending
2049 * ramrods. Should be run when interrupts are disabled.
2051 void bnx2x_squeeze_objects(struct bnx2x *bp)
2054 unsigned long ramrod_flags = 0, vlan_mac_flags = 0;
2055 struct bnx2x_mcast_ramrod_params rparam = {NULL};
2056 struct bnx2x_vlan_mac_obj *mac_obj = &bp->sp_objs->mac_obj;
2058 /***************** Cleanup MACs' object first *************************/
2060 /* Wait for completion of requested */
2061 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
2062 /* Perform a dry cleanup */
2063 __set_bit(RAMROD_DRV_CLR_ONLY, &ramrod_flags);
2065 /* Clean ETH primary MAC */
2066 __set_bit(BNX2X_ETH_MAC, &vlan_mac_flags);
2067 rc = mac_obj->delete_all(bp, &bp->sp_objs->mac_obj, &vlan_mac_flags,
2070 BNX2X_ERR("Failed to clean ETH MACs: %d\n", rc);
2072 /* Cleanup UC list */
2074 __set_bit(BNX2X_UC_LIST_MAC, &vlan_mac_flags);
2075 rc = mac_obj->delete_all(bp, mac_obj, &vlan_mac_flags,
2078 BNX2X_ERR("Failed to clean UC list MACs: %d\n", rc);
2080 /***************** Now clean mcast object *****************************/
2081 rparam.mcast_obj = &bp->mcast_obj;
2082 __set_bit(RAMROD_DRV_CLR_ONLY, &rparam.ramrod_flags);
2084 /* Add a DEL command... - Since we're doing a driver cleanup only,
2085 * we take a lock surrounding both the initial send and the CONTs,
2086 * as we don't want a true completion to disrupt us in the middle.
2088 netif_addr_lock_bh(bp->dev);
2089 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL);
2091 BNX2X_ERR("Failed to add a new DEL command to a multi-cast object: %d\n",
2094 /* ...and wait until all pending commands are cleared */
2095 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
2098 BNX2X_ERR("Failed to clean multi-cast object: %d\n",
2100 netif_addr_unlock_bh(bp->dev);
2104 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
2106 netif_addr_unlock_bh(bp->dev);
2109 #ifndef BNX2X_STOP_ON_ERROR
2110 #define LOAD_ERROR_EXIT(bp, label) \
2112 (bp)->state = BNX2X_STATE_ERROR; \
2116 #define LOAD_ERROR_EXIT_CNIC(bp, label) \
2118 bp->cnic_loaded = false; \
2121 #else /*BNX2X_STOP_ON_ERROR*/
2122 #define LOAD_ERROR_EXIT(bp, label) \
2124 (bp)->state = BNX2X_STATE_ERROR; \
2128 #define LOAD_ERROR_EXIT_CNIC(bp, label) \
2130 bp->cnic_loaded = false; \
2134 #endif /*BNX2X_STOP_ON_ERROR*/
2136 static void bnx2x_free_fw_stats_mem(struct bnx2x *bp)
2138 BNX2X_PCI_FREE(bp->fw_stats, bp->fw_stats_mapping,
2139 bp->fw_stats_data_sz + bp->fw_stats_req_sz);
2143 static int bnx2x_alloc_fw_stats_mem(struct bnx2x *bp)
2145 int num_groups, vf_headroom = 0;
2146 int is_fcoe_stats = NO_FCOE(bp) ? 0 : 1;
2148 /* number of queues for statistics is number of eth queues + FCoE */
2149 u8 num_queue_stats = BNX2X_NUM_ETH_QUEUES(bp) + is_fcoe_stats;
2151 /* Total number of FW statistics requests =
2152 * 1 for port stats + 1 for PF stats + potential 2 for FCoE (fcoe proper
2153 * and fcoe l2 queue) stats + num of queues (which includes another 1
2154 * for fcoe l2 queue if applicable)
2156 bp->fw_stats_num = 2 + is_fcoe_stats + num_queue_stats;
2158 /* vf stats appear in the request list, but their data is allocated by
2159 * the VFs themselves. We don't include them in the bp->fw_stats_num as
2160 * it is used to determine where to place the vf stats queries in the
2164 vf_headroom = bnx2x_vf_headroom(bp);
2166 /* Request is built from stats_query_header and an array of
2167 * stats_query_cmd_group each of which contains
2168 * STATS_QUERY_CMD_COUNT rules. The real number or requests is
2169 * configured in the stats_query_header.
2172 (((bp->fw_stats_num + vf_headroom) / STATS_QUERY_CMD_COUNT) +
2173 (((bp->fw_stats_num + vf_headroom) % STATS_QUERY_CMD_COUNT) ?
2176 DP(BNX2X_MSG_SP, "stats fw_stats_num %d, vf headroom %d, num_groups %d\n",
2177 bp->fw_stats_num, vf_headroom, num_groups);
2178 bp->fw_stats_req_sz = sizeof(struct stats_query_header) +
2179 num_groups * sizeof(struct stats_query_cmd_group);
2181 /* Data for statistics requests + stats_counter
2182 * stats_counter holds per-STORM counters that are incremented
2183 * when STORM has finished with the current request.
2184 * memory for FCoE offloaded statistics are counted anyway,
2185 * even if they will not be sent.
2186 * VF stats are not accounted for here as the data of VF stats is stored
2187 * in memory allocated by the VF, not here.
2189 bp->fw_stats_data_sz = sizeof(struct per_port_stats) +
2190 sizeof(struct per_pf_stats) +
2191 sizeof(struct fcoe_statistics_params) +
2192 sizeof(struct per_queue_stats) * num_queue_stats +
2193 sizeof(struct stats_counter);
2195 BNX2X_PCI_ALLOC(bp->fw_stats, &bp->fw_stats_mapping,
2196 bp->fw_stats_data_sz + bp->fw_stats_req_sz);
2199 bp->fw_stats_req = (struct bnx2x_fw_stats_req *)bp->fw_stats;
2200 bp->fw_stats_req_mapping = bp->fw_stats_mapping;
2201 bp->fw_stats_data = (struct bnx2x_fw_stats_data *)
2202 ((u8 *)bp->fw_stats + bp->fw_stats_req_sz);
2203 bp->fw_stats_data_mapping = bp->fw_stats_mapping +
2204 bp->fw_stats_req_sz;
2206 DP(BNX2X_MSG_SP, "statistics request base address set to %x %x\n",
2207 U64_HI(bp->fw_stats_req_mapping),
2208 U64_LO(bp->fw_stats_req_mapping));
2209 DP(BNX2X_MSG_SP, "statistics data base address set to %x %x\n",
2210 U64_HI(bp->fw_stats_data_mapping),
2211 U64_LO(bp->fw_stats_data_mapping));
2215 bnx2x_free_fw_stats_mem(bp);
2216 BNX2X_ERR("Can't allocate FW stats memory\n");
2220 /* send load request to mcp and analyze response */
2221 static int bnx2x_nic_load_request(struct bnx2x *bp, u32 *load_code)
2227 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
2228 DRV_MSG_SEQ_NUMBER_MASK);
2229 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
2231 /* Get current FW pulse sequence */
2232 bp->fw_drv_pulse_wr_seq =
2233 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_pulse_mb) &
2234 DRV_PULSE_SEQ_MASK);
2235 BNX2X_DEV_INFO("drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
2237 param = DRV_MSG_CODE_LOAD_REQ_WITH_LFA;
2239 if (IS_MF_SD(bp) && bnx2x_port_after_undi(bp))
2240 param |= DRV_MSG_CODE_LOAD_REQ_FORCE_LFA;
2243 (*load_code) = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ, param);
2245 /* if mcp fails to respond we must abort */
2246 if (!(*load_code)) {
2247 BNX2X_ERR("MCP response failure, aborting\n");
2251 /* If mcp refused (e.g. other port is in diagnostic mode) we
2254 if ((*load_code) == FW_MSG_CODE_DRV_LOAD_REFUSED) {
2255 BNX2X_ERR("MCP refused load request, aborting\n");
2261 /* check whether another PF has already loaded FW to chip. In
2262 * virtualized environments a pf from another VM may have already
2263 * initialized the device including loading FW
2265 int bnx2x_nic_load_analyze_req(struct bnx2x *bp, u32 load_code)
2267 /* is another pf loaded on this engine? */
2268 if (load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP &&
2269 load_code != FW_MSG_CODE_DRV_LOAD_COMMON) {
2270 /* build my FW version dword */
2271 u32 my_fw = (BCM_5710_FW_MAJOR_VERSION) +
2272 (BCM_5710_FW_MINOR_VERSION << 8) +
2273 (BCM_5710_FW_REVISION_VERSION << 16) +
2274 (BCM_5710_FW_ENGINEERING_VERSION << 24);
2276 /* read loaded FW from chip */
2277 u32 loaded_fw = REG_RD(bp, XSEM_REG_PRAM);
2279 DP(BNX2X_MSG_SP, "loaded fw %x, my fw %x\n",
2282 /* abort nic load if version mismatch */
2283 if (my_fw != loaded_fw) {
2284 BNX2X_ERR("bnx2x with FW %x was already loaded which mismatches my %x FW. Aborting\n",
2292 /* returns the "mcp load_code" according to global load_count array */
2293 static int bnx2x_nic_load_no_mcp(struct bnx2x *bp, int port)
2295 int path = BP_PATH(bp);
2297 DP(NETIF_MSG_IFUP, "NO MCP - load counts[%d] %d, %d, %d\n",
2298 path, load_count[path][0], load_count[path][1],
2299 load_count[path][2]);
2300 load_count[path][0]++;
2301 load_count[path][1 + port]++;
2302 DP(NETIF_MSG_IFUP, "NO MCP - new load counts[%d] %d, %d, %d\n",
2303 path, load_count[path][0], load_count[path][1],
2304 load_count[path][2]);
2305 if (load_count[path][0] == 1)
2306 return FW_MSG_CODE_DRV_LOAD_COMMON;
2307 else if (load_count[path][1 + port] == 1)
2308 return FW_MSG_CODE_DRV_LOAD_PORT;
2310 return FW_MSG_CODE_DRV_LOAD_FUNCTION;
2313 /* mark PMF if applicable */
2314 static void bnx2x_nic_load_pmf(struct bnx2x *bp, u32 load_code)
2316 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
2317 (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) ||
2318 (load_code == FW_MSG_CODE_DRV_LOAD_PORT)) {
2320 /* We need the barrier to ensure the ordering between the
2321 * writing to bp->port.pmf here and reading it from the
2322 * bnx2x_periodic_task().
2329 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2332 static void bnx2x_nic_load_afex_dcc(struct bnx2x *bp, int load_code)
2334 if (((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
2335 (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP)) &&
2336 (bp->common.shmem2_base)) {
2337 if (SHMEM2_HAS(bp, dcc_support))
2338 SHMEM2_WR(bp, dcc_support,
2339 (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
2340 SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
2341 if (SHMEM2_HAS(bp, afex_driver_support))
2342 SHMEM2_WR(bp, afex_driver_support,
2343 SHMEM_AFEX_SUPPORTED_VERSION_ONE);
2346 /* Set AFEX default VLAN tag to an invalid value */
2347 bp->afex_def_vlan_tag = -1;
2351 * bnx2x_bz_fp - zero content of the fastpath structure.
2353 * @bp: driver handle
2354 * @index: fastpath index to be zeroed
2356 * Makes sure the contents of the bp->fp[index].napi is kept
2359 static void bnx2x_bz_fp(struct bnx2x *bp, int index)
2361 struct bnx2x_fastpath *fp = &bp->fp[index];
2363 struct napi_struct orig_napi = fp->napi;
2364 struct bnx2x_agg_info *orig_tpa_info = fp->tpa_info;
2366 /* bzero bnx2x_fastpath contents */
2368 memset(fp->tpa_info, 0, ETH_MAX_AGGREGATION_QUEUES_E1H_E2 *
2369 sizeof(struct bnx2x_agg_info));
2370 memset(fp, 0, sizeof(*fp));
2372 /* Restore the NAPI object as it has been already initialized */
2373 fp->napi = orig_napi;
2374 fp->tpa_info = orig_tpa_info;
2378 fp->max_cos = bp->max_cos;
2380 /* Special queues support only one CoS */
2383 /* Init txdata pointers */
2385 fp->txdata_ptr[0] = &bp->bnx2x_txq[FCOE_TXQ_IDX(bp)];
2387 for_each_cos_in_tx_queue(fp, cos)
2388 fp->txdata_ptr[cos] = &bp->bnx2x_txq[cos *
2389 BNX2X_NUM_ETH_QUEUES(bp) + index];
2391 /* set the tpa flag for each queue. The tpa flag determines the queue
2392 * minimal size so it must be set prior to queue memory allocation
2394 fp->disable_tpa = !(bp->flags & TPA_ENABLE_FLAG ||
2395 (bp->flags & GRO_ENABLE_FLAG &&
2396 bnx2x_mtu_allows_gro(bp->dev->mtu)));
2397 if (bp->flags & TPA_ENABLE_FLAG)
2398 fp->mode = TPA_MODE_LRO;
2399 else if (bp->flags & GRO_ENABLE_FLAG)
2400 fp->mode = TPA_MODE_GRO;
2402 /* We don't want TPA on an FCoE L2 ring */
2404 fp->disable_tpa = 1;
2407 int bnx2x_load_cnic(struct bnx2x *bp)
2409 int i, rc, port = BP_PORT(bp);
2411 DP(NETIF_MSG_IFUP, "Starting CNIC-related load\n");
2413 mutex_init(&bp->cnic_mutex);
2416 rc = bnx2x_alloc_mem_cnic(bp);
2418 BNX2X_ERR("Unable to allocate bp memory for cnic\n");
2419 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
2423 rc = bnx2x_alloc_fp_mem_cnic(bp);
2425 BNX2X_ERR("Unable to allocate memory for cnic fps\n");
2426 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
2429 /* Update the number of queues with the cnic queues */
2430 rc = bnx2x_set_real_num_queues(bp, 1);
2432 BNX2X_ERR("Unable to set real_num_queues including cnic\n");
2433 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
2436 /* Add all CNIC NAPI objects */
2437 bnx2x_add_all_napi_cnic(bp);
2438 DP(NETIF_MSG_IFUP, "cnic napi added\n");
2439 bnx2x_napi_enable_cnic(bp);
2441 rc = bnx2x_init_hw_func_cnic(bp);
2443 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic1);
2445 bnx2x_nic_init_cnic(bp);
2448 /* Enable Timer scan */
2449 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 1);
2451 /* setup cnic queues */
2452 for_each_cnic_queue(bp, i) {
2453 rc = bnx2x_setup_queue(bp, &bp->fp[i], 0);
2455 BNX2X_ERR("Queue setup failed\n");
2456 LOAD_ERROR_EXIT(bp, load_error_cnic2);
2461 /* Initialize Rx filter. */
2462 bnx2x_set_rx_mode_inner(bp);
2464 /* re-read iscsi info */
2465 bnx2x_get_iscsi_info(bp);
2466 bnx2x_setup_cnic_irq_info(bp);
2467 bnx2x_setup_cnic_info(bp);
2468 bp->cnic_loaded = true;
2469 if (bp->state == BNX2X_STATE_OPEN)
2470 bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
2472 DP(NETIF_MSG_IFUP, "Ending successfully CNIC-related load\n");
2476 #ifndef BNX2X_STOP_ON_ERROR
2478 /* Disable Timer scan */
2479 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
2482 bnx2x_napi_disable_cnic(bp);
2483 /* Update the number of queues without the cnic queues */
2484 rc = bnx2x_set_real_num_queues(bp, 0);
2486 BNX2X_ERR("Unable to set real_num_queues not including cnic\n");
2488 BNX2X_ERR("CNIC-related load failed\n");
2489 bnx2x_free_fp_mem_cnic(bp);
2490 bnx2x_free_mem_cnic(bp);
2492 #endif /* ! BNX2X_STOP_ON_ERROR */
2495 /* must be called with rtnl_lock */
2496 int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
2498 int port = BP_PORT(bp);
2499 int i, rc = 0, load_code = 0;
2501 DP(NETIF_MSG_IFUP, "Starting NIC load\n");
2503 "CNIC is %s\n", CNIC_ENABLED(bp) ? "enabled" : "disabled");
2505 #ifdef BNX2X_STOP_ON_ERROR
2506 if (unlikely(bp->panic)) {
2507 BNX2X_ERR("Can't load NIC when there is panic\n");
2512 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
2514 /* zero the structure w/o any lock, before SP handler is initialized */
2515 memset(&bp->last_reported_link, 0, sizeof(bp->last_reported_link));
2516 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
2517 &bp->last_reported_link.link_report_flags);
2520 /* must be called before memory allocation and HW init */
2521 bnx2x_ilt_set_info(bp);
2524 * Zero fastpath structures preserving invariants like napi, which are
2525 * allocated only once, fp index, max_cos, bp pointer.
2526 * Also set fp->disable_tpa and txdata_ptr.
2528 DP(NETIF_MSG_IFUP, "num queues: %d", bp->num_queues);
2529 for_each_queue(bp, i)
2531 memset(bp->bnx2x_txq, 0, (BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS +
2532 bp->num_cnic_queues) *
2533 sizeof(struct bnx2x_fp_txdata));
2535 bp->fcoe_init = false;
2537 /* Set the receive queues buffer size */
2538 bnx2x_set_rx_buf_size(bp);
2541 rc = bnx2x_alloc_mem(bp);
2543 BNX2X_ERR("Unable to allocate bp memory\n");
2548 /* Allocated memory for FW statistics */
2549 if (bnx2x_alloc_fw_stats_mem(bp))
2550 LOAD_ERROR_EXIT(bp, load_error0);
2552 /* need to be done after alloc mem, since it's self adjusting to amount
2553 * of memory available for RSS queues
2555 rc = bnx2x_alloc_fp_mem(bp);
2557 BNX2X_ERR("Unable to allocate memory for fps\n");
2558 LOAD_ERROR_EXIT(bp, load_error0);
2561 /* request pf to initialize status blocks */
2563 rc = bnx2x_vfpf_init(bp);
2565 LOAD_ERROR_EXIT(bp, load_error0);
2568 /* As long as bnx2x_alloc_mem() may possibly update
2569 * bp->num_queues, bnx2x_set_real_num_queues() should always
2570 * come after it. At this stage cnic queues are not counted.
2572 rc = bnx2x_set_real_num_queues(bp, 0);
2574 BNX2X_ERR("Unable to set real_num_queues\n");
2575 LOAD_ERROR_EXIT(bp, load_error0);
2578 /* configure multi cos mappings in kernel.
2579 * this configuration may be overridden by a multi class queue
2580 * discipline or by a dcbx negotiation result.
2582 bnx2x_setup_tc(bp->dev, bp->max_cos);
2584 /* Add all NAPI objects */
2585 bnx2x_add_all_napi(bp);
2586 DP(NETIF_MSG_IFUP, "napi added\n");
2587 bnx2x_napi_enable(bp);
2590 /* set pf load just before approaching the MCP */
2591 bnx2x_set_pf_load(bp);
2593 /* if mcp exists send load request and analyze response */
2594 if (!BP_NOMCP(bp)) {
2595 /* attempt to load pf */
2596 rc = bnx2x_nic_load_request(bp, &load_code);
2598 LOAD_ERROR_EXIT(bp, load_error1);
2600 /* what did mcp say? */
2601 rc = bnx2x_nic_load_analyze_req(bp, load_code);
2603 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2604 LOAD_ERROR_EXIT(bp, load_error2);
2607 load_code = bnx2x_nic_load_no_mcp(bp, port);
2610 /* mark pmf if applicable */
2611 bnx2x_nic_load_pmf(bp, load_code);
2613 /* Init Function state controlling object */
2614 bnx2x__init_func_obj(bp);
2617 rc = bnx2x_init_hw(bp, load_code);
2619 BNX2X_ERR("HW init failed, aborting\n");
2620 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2621 LOAD_ERROR_EXIT(bp, load_error2);
2625 bnx2x_pre_irq_nic_init(bp);
2627 /* Connect to IRQs */
2628 rc = bnx2x_setup_irqs(bp);
2630 BNX2X_ERR("setup irqs failed\n");
2632 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2633 LOAD_ERROR_EXIT(bp, load_error2);
2636 /* Init per-function objects */
2638 /* Setup NIC internals and enable interrupts */
2639 bnx2x_post_irq_nic_init(bp, load_code);
2641 bnx2x_init_bp_objs(bp);
2642 bnx2x_iov_nic_init(bp);
2644 /* Set AFEX default VLAN tag to an invalid value */
2645 bp->afex_def_vlan_tag = -1;
2646 bnx2x_nic_load_afex_dcc(bp, load_code);
2647 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
2648 rc = bnx2x_func_start(bp);
2650 BNX2X_ERR("Function start failed!\n");
2651 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2653 LOAD_ERROR_EXIT(bp, load_error3);
2656 /* Send LOAD_DONE command to MCP */
2657 if (!BP_NOMCP(bp)) {
2658 load_code = bnx2x_fw_command(bp,
2659 DRV_MSG_CODE_LOAD_DONE, 0);
2661 BNX2X_ERR("MCP response failure, aborting\n");
2663 LOAD_ERROR_EXIT(bp, load_error3);
2667 /* initialize FW coalescing state machines in RAM */
2668 bnx2x_update_coalesce(bp);
2671 /* setup the leading queue */
2672 rc = bnx2x_setup_leading(bp);
2674 BNX2X_ERR("Setup leading failed!\n");
2675 LOAD_ERROR_EXIT(bp, load_error3);
2678 /* set up the rest of the queues */
2679 for_each_nondefault_eth_queue(bp, i) {
2681 rc = bnx2x_setup_queue(bp, &bp->fp[i], false);
2683 rc = bnx2x_vfpf_setup_q(bp, &bp->fp[i], false);
2685 BNX2X_ERR("Queue %d setup failed\n", i);
2686 LOAD_ERROR_EXIT(bp, load_error3);
2691 rc = bnx2x_init_rss(bp);
2693 BNX2X_ERR("PF RSS init failed\n");
2694 LOAD_ERROR_EXIT(bp, load_error3);
2697 /* Now when Clients are configured we are ready to work */
2698 bp->state = BNX2X_STATE_OPEN;
2700 /* Configure a ucast MAC */
2702 rc = bnx2x_set_eth_mac(bp, true);
2704 rc = bnx2x_vfpf_config_mac(bp, bp->dev->dev_addr, bp->fp->index,
2707 BNX2X_ERR("Setting Ethernet MAC failed\n");
2708 LOAD_ERROR_EXIT(bp, load_error3);
2711 if (IS_PF(bp) && bp->pending_max) {
2712 bnx2x_update_max_mf_config(bp, bp->pending_max);
2713 bp->pending_max = 0;
2717 rc = bnx2x_initial_phy_init(bp, load_mode);
2719 LOAD_ERROR_EXIT(bp, load_error3);
2721 bp->link_params.feature_config_flags &= ~FEATURE_CONFIG_BOOT_FROM_SAN;
2723 /* Start fast path */
2725 /* Initialize Rx filter. */
2726 bnx2x_set_rx_mode_inner(bp);
2729 switch (load_mode) {
2731 /* Tx queue should be only re-enabled */
2732 netif_tx_wake_all_queues(bp->dev);
2736 netif_tx_start_all_queues(bp->dev);
2737 smp_mb__after_clear_bit();
2741 case LOAD_LOOPBACK_EXT:
2742 bp->state = BNX2X_STATE_DIAG;
2750 bnx2x_update_drv_flags(bp, 1 << DRV_FLAGS_PORT_MASK, 0);
2752 bnx2x__link_status_update(bp);
2754 /* start the timer */
2755 mod_timer(&bp->timer, jiffies + bp->current_interval);
2757 if (CNIC_ENABLED(bp))
2758 bnx2x_load_cnic(bp);
2760 if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) {
2761 /* mark driver is loaded in shmem2 */
2763 val = SHMEM2_RD(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)]);
2764 SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)],
2765 val | DRV_FLAGS_CAPABILITIES_LOADED_SUPPORTED |
2766 DRV_FLAGS_CAPABILITIES_LOADED_L2);
2769 /* Wait for all pending SP commands to complete */
2770 if (IS_PF(bp) && !bnx2x_wait_sp_comp(bp, ~0x0UL)) {
2771 BNX2X_ERR("Timeout waiting for SP elements to complete\n");
2772 bnx2x_nic_unload(bp, UNLOAD_CLOSE, false);
2776 /* If PMF - send ADMIN DCBX msg to MFW to initiate DCBX FSM */
2777 if (bp->port.pmf && (bp->state != BNX2X_STATE_DIAG))
2778 bnx2x_dcbx_init(bp, false);
2780 DP(NETIF_MSG_IFUP, "Ending successfully NIC load\n");
2784 #ifndef BNX2X_STOP_ON_ERROR
2787 bnx2x_int_disable_sync(bp, 1);
2789 /* Clean queueable objects */
2790 bnx2x_squeeze_objects(bp);
2793 /* Free SKBs, SGEs, TPA pool and driver internals */
2794 bnx2x_free_skbs(bp);
2795 for_each_rx_queue(bp, i)
2796 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
2801 if (IS_PF(bp) && !BP_NOMCP(bp)) {
2802 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0);
2803 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
2808 bnx2x_napi_disable(bp);
2809 bnx2x_del_all_napi(bp);
2811 /* clear pf_load status, as it was already set */
2813 bnx2x_clear_pf_load(bp);
2815 bnx2x_free_fp_mem(bp);
2816 bnx2x_free_fw_stats_mem(bp);
2820 #endif /* ! BNX2X_STOP_ON_ERROR */
2823 int bnx2x_drain_tx_queues(struct bnx2x *bp)
2827 /* Wait until tx fastpath tasks complete */
2828 for_each_tx_queue(bp, i) {
2829 struct bnx2x_fastpath *fp = &bp->fp[i];
2831 for_each_cos_in_tx_queue(fp, cos)
2832 rc = bnx2x_clean_tx_queue(bp, fp->txdata_ptr[cos]);
2839 /* must be called with rtnl_lock */
2840 int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link)
2843 bool global = false;
2845 DP(NETIF_MSG_IFUP, "Starting NIC unload\n");
2847 /* mark driver is unloaded in shmem2 */
2848 if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) {
2850 val = SHMEM2_RD(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)]);
2851 SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)],
2852 val & ~DRV_FLAGS_CAPABILITIES_LOADED_L2);
2855 if (IS_PF(bp) && bp->recovery_state != BNX2X_RECOVERY_DONE &&
2856 (bp->state == BNX2X_STATE_CLOSED ||
2857 bp->state == BNX2X_STATE_ERROR)) {
2858 /* We can get here if the driver has been unloaded
2859 * during parity error recovery and is either waiting for a
2860 * leader to complete or for other functions to unload and
2861 * then ifdown has been issued. In this case we want to
2862 * unload and let other functions to complete a recovery
2865 bp->recovery_state = BNX2X_RECOVERY_DONE;
2867 bnx2x_release_leader_lock(bp);
2870 DP(NETIF_MSG_IFDOWN, "Releasing a leadership...\n");
2871 BNX2X_ERR("Can't unload in closed or error state\n");
2875 /* Nothing to do during unload if previous bnx2x_nic_load()
2876 * have not completed successfully - all resources are released.
2878 * we can get here only after unsuccessful ndo_* callback, during which
2879 * dev->IFF_UP flag is still on.
2881 if (bp->state == BNX2X_STATE_CLOSED || bp->state == BNX2X_STATE_ERROR)
2884 /* It's important to set the bp->state to the value different from
2885 * BNX2X_STATE_OPEN and only then stop the Tx. Otherwise bnx2x_tx_int()
2886 * may restart the Tx from the NAPI context (see bnx2x_tx_int()).
2888 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
2891 /* indicate to VFs that the PF is going down */
2892 bnx2x_iov_channel_down(bp);
2894 if (CNIC_LOADED(bp))
2895 bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
2898 bnx2x_tx_disable(bp);
2899 netdev_reset_tc(bp->dev);
2901 bp->rx_mode = BNX2X_RX_MODE_NONE;
2903 del_timer_sync(&bp->timer);
2906 /* Set ALWAYS_ALIVE bit in shmem */
2907 bp->fw_drv_pulse_wr_seq |= DRV_PULSE_ALWAYS_ALIVE;
2908 bnx2x_drv_pulse(bp);
2909 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2910 bnx2x_save_statistics(bp);
2913 /* wait till consumers catch up with producers in all queues */
2914 bnx2x_drain_tx_queues(bp);
2916 /* if VF indicate to PF this function is going down (PF will delete sp
2917 * elements and clear initializations
2920 bnx2x_vfpf_close_vf(bp);
2921 else if (unload_mode != UNLOAD_RECOVERY)
2922 /* if this is a normal/close unload need to clean up chip*/
2923 bnx2x_chip_cleanup(bp, unload_mode, keep_link);
2925 /* Send the UNLOAD_REQUEST to the MCP */
2926 bnx2x_send_unload_req(bp, unload_mode);
2928 /* Prevent transactions to host from the functions on the
2929 * engine that doesn't reset global blocks in case of global
2930 * attention once global blocks are reset and gates are opened
2931 * (the engine which leader will perform the recovery
2934 if (!CHIP_IS_E1x(bp))
2935 bnx2x_pf_disable(bp);
2937 /* Disable HW interrupts, NAPI */
2938 bnx2x_netif_stop(bp, 1);
2939 /* Delete all NAPI objects */
2940 bnx2x_del_all_napi(bp);
2941 if (CNIC_LOADED(bp))
2942 bnx2x_del_all_napi_cnic(bp);
2946 /* Report UNLOAD_DONE to MCP */
2947 bnx2x_send_unload_done(bp, false);
2951 * At this stage no more interrupts will arrive so we may safely clean
2952 * the queueable objects here in case they failed to get cleaned so far.
2955 bnx2x_squeeze_objects(bp);
2957 /* There should be no more pending SP commands at this stage */
2962 /* Free SKBs, SGEs, TPA pool and driver internals */
2963 bnx2x_free_skbs(bp);
2964 if (CNIC_LOADED(bp))
2965 bnx2x_free_skbs_cnic(bp);
2966 for_each_rx_queue(bp, i)
2967 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
2969 bnx2x_free_fp_mem(bp);
2970 if (CNIC_LOADED(bp))
2971 bnx2x_free_fp_mem_cnic(bp);
2974 if (CNIC_LOADED(bp))
2975 bnx2x_free_mem_cnic(bp);
2979 bp->state = BNX2X_STATE_CLOSED;
2980 bp->cnic_loaded = false;
2982 /* Check if there are pending parity attentions. If there are - set
2983 * RECOVERY_IN_PROGRESS.
2985 if (IS_PF(bp) && bnx2x_chk_parity_attn(bp, &global, false)) {
2986 bnx2x_set_reset_in_progress(bp);
2988 /* Set RESET_IS_GLOBAL if needed */
2990 bnx2x_set_reset_global(bp);
2993 /* The last driver must disable a "close the gate" if there is no
2994 * parity attention or "process kill" pending.
2997 !bnx2x_clear_pf_load(bp) &&
2998 bnx2x_reset_is_done(bp, BP_PATH(bp)))
2999 bnx2x_disable_close_the_gate(bp);
3001 DP(NETIF_MSG_IFUP, "Ending NIC unload\n");
3006 int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
3010 /* If there is no power capability, silently succeed */
3012 BNX2X_DEV_INFO("No power capability. Breaking.\n");
3016 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
3020 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3021 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
3022 PCI_PM_CTRL_PME_STATUS));
3024 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
3025 /* delay required during transition out of D3hot */
3030 /* If there are other clients above don't
3031 shut down the power */
3032 if (atomic_read(&bp->pdev->enable_cnt) != 1)
3034 /* Don't shut down the power for emulation and FPGA */
3035 if (CHIP_REV_IS_SLOW(bp))
3038 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
3042 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
3044 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3047 /* No more memory access after this point until
3048 * device is brought back to D0.
3053 dev_err(&bp->pdev->dev, "Can't support state = %d\n", state);
3060 * net_device service functions
3062 int bnx2x_poll(struct napi_struct *napi, int budget)
3066 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
3068 struct bnx2x *bp = fp->bp;
3071 #ifdef BNX2X_STOP_ON_ERROR
3072 if (unlikely(bp->panic)) {
3073 napi_complete(napi);
3077 if (!bnx2x_fp_lock_napi(fp))
3080 for_each_cos_in_tx_queue(fp, cos)
3081 if (bnx2x_tx_queue_has_work(fp->txdata_ptr[cos]))
3082 bnx2x_tx_int(bp, fp->txdata_ptr[cos]);
3084 if (bnx2x_has_rx_work(fp)) {
3085 work_done += bnx2x_rx_int(fp, budget - work_done);
3087 /* must not complete if we consumed full budget */
3088 if (work_done >= budget) {
3089 bnx2x_fp_unlock_napi(fp);
3094 /* Fall out from the NAPI loop if needed */
3095 if (!bnx2x_fp_unlock_napi(fp) &&
3096 !(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
3098 /* No need to update SB for FCoE L2 ring as long as
3099 * it's connected to the default SB and the SB
3100 * has been updated when NAPI was scheduled.
3102 if (IS_FCOE_FP(fp)) {
3103 napi_complete(napi);
3106 bnx2x_update_fpsb_idx(fp);
3107 /* bnx2x_has_rx_work() reads the status block,
3108 * thus we need to ensure that status block indices
3109 * have been actually read (bnx2x_update_fpsb_idx)
3110 * prior to this check (bnx2x_has_rx_work) so that
3111 * we won't write the "newer" value of the status block
3112 * to IGU (if there was a DMA right after
3113 * bnx2x_has_rx_work and if there is no rmb, the memory
3114 * reading (bnx2x_update_fpsb_idx) may be postponed
3115 * to right before bnx2x_ack_sb). In this case there
3116 * will never be another interrupt until there is
3117 * another update of the status block, while there
3118 * is still unhandled work.
3122 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
3123 napi_complete(napi);
3124 /* Re-enable interrupts */
3125 DP(NETIF_MSG_RX_STATUS,
3126 "Update index to %d\n", fp->fp_hc_idx);
3127 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID,
3128 le16_to_cpu(fp->fp_hc_idx),
3138 #ifdef CONFIG_NET_RX_BUSY_POLL
3139 /* must be called with local_bh_disable()d */
3140 int bnx2x_low_latency_recv(struct napi_struct *napi)
3142 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
3144 struct bnx2x *bp = fp->bp;
3147 if ((bp->state == BNX2X_STATE_CLOSED) ||
3148 (bp->state == BNX2X_STATE_ERROR) ||
3149 (bp->flags & (TPA_ENABLE_FLAG | GRO_ENABLE_FLAG)))
3150 return LL_FLUSH_FAILED;
3152 if (!bnx2x_fp_lock_poll(fp))
3153 return LL_FLUSH_BUSY;
3155 if (bnx2x_has_rx_work(fp))
3156 found = bnx2x_rx_int(fp, 4);
3158 bnx2x_fp_unlock_poll(fp);
3164 /* we split the first BD into headers and data BDs
3165 * to ease the pain of our fellow microcode engineers
3166 * we use one mapping for both BDs
3168 static u16 bnx2x_tx_split(struct bnx2x *bp,
3169 struct bnx2x_fp_txdata *txdata,
3170 struct sw_tx_bd *tx_buf,
3171 struct eth_tx_start_bd **tx_bd, u16 hlen,
3174 struct eth_tx_start_bd *h_tx_bd = *tx_bd;
3175 struct eth_tx_bd *d_tx_bd;
3177 int old_len = le16_to_cpu(h_tx_bd->nbytes);
3179 /* first fix first BD */
3180 h_tx_bd->nbytes = cpu_to_le16(hlen);
3182 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d (%x:%x)\n",
3183 h_tx_bd->nbytes, h_tx_bd->addr_hi, h_tx_bd->addr_lo);
3185 /* now get a new data BD
3186 * (after the pbd) and fill it */
3187 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
3188 d_tx_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
3190 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
3191 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
3193 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
3194 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
3195 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
3197 /* this marks the BD as one that has no individual mapping */
3198 tx_buf->flags |= BNX2X_TSO_SPLIT_BD;
3200 DP(NETIF_MSG_TX_QUEUED,
3201 "TSO split data size is %d (%x:%x)\n",
3202 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
3205 *tx_bd = (struct eth_tx_start_bd *)d_tx_bd;
3210 #define bswab32(b32) ((__force __le32) swab32((__force __u32) (b32)))
3211 #define bswab16(b16) ((__force __le16) swab16((__force __u16) (b16)))
3212 static __le16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
3214 __sum16 tsum = (__force __sum16) csum;
3217 tsum = ~csum_fold(csum_sub((__force __wsum) csum,
3218 csum_partial(t_header - fix, fix, 0)));
3221 tsum = ~csum_fold(csum_add((__force __wsum) csum,
3222 csum_partial(t_header, -fix, 0)));
3224 return bswab16(tsum);
3227 static u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
3233 if (skb->ip_summed != CHECKSUM_PARTIAL)
3236 protocol = vlan_get_protocol(skb);
3237 if (protocol == htons(ETH_P_IPV6)) {
3239 prot = ipv6_hdr(skb)->nexthdr;
3242 prot = ip_hdr(skb)->protocol;
3245 if (!CHIP_IS_E1x(bp) && skb->encapsulation) {
3246 if (inner_ip_hdr(skb)->version == 6) {
3247 rc |= XMIT_CSUM_ENC_V6;
3248 if (inner_ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
3249 rc |= XMIT_CSUM_TCP;
3251 rc |= XMIT_CSUM_ENC_V4;
3252 if (inner_ip_hdr(skb)->protocol == IPPROTO_TCP)
3253 rc |= XMIT_CSUM_TCP;
3256 if (prot == IPPROTO_TCP)
3257 rc |= XMIT_CSUM_TCP;
3259 if (skb_is_gso_v6(skb)) {
3260 rc |= (XMIT_GSO_V6 | XMIT_CSUM_TCP);
3261 if (rc & XMIT_CSUM_ENC)
3262 rc |= XMIT_GSO_ENC_V6;
3263 } else if (skb_is_gso(skb)) {
3264 rc |= (XMIT_GSO_V4 | XMIT_CSUM_TCP);
3265 if (rc & XMIT_CSUM_ENC)
3266 rc |= XMIT_GSO_ENC_V4;
3272 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
3273 /* check if packet requires linearization (packet is too fragmented)
3274 no need to check fragmentation if page size > 8K (there will be no
3275 violation to FW restrictions) */
3276 static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
3281 int first_bd_sz = 0;
3283 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
3284 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
3286 if (xmit_type & XMIT_GSO) {
3287 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
3288 /* Check if LSO packet needs to be copied:
3289 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
3290 int wnd_size = MAX_FETCH_BD - 3;
3291 /* Number of windows to check */
3292 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
3297 /* Headers length */
3298 hlen = (int)(skb_transport_header(skb) - skb->data) +
3301 /* Amount of data (w/o headers) on linear part of SKB*/
3302 first_bd_sz = skb_headlen(skb) - hlen;
3304 wnd_sum = first_bd_sz;
3306 /* Calculate the first sum - it's special */
3307 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
3309 skb_frag_size(&skb_shinfo(skb)->frags[frag_idx]);
3311 /* If there was data on linear skb data - check it */
3312 if (first_bd_sz > 0) {
3313 if (unlikely(wnd_sum < lso_mss)) {
3318 wnd_sum -= first_bd_sz;
3321 /* Others are easier: run through the frag list and
3322 check all windows */
3323 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
3325 skb_frag_size(&skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1]);
3327 if (unlikely(wnd_sum < lso_mss)) {
3332 skb_frag_size(&skb_shinfo(skb)->frags[wnd_idx]);
3335 /* in non-LSO too fragmented packet should always
3342 if (unlikely(to_copy))
3343 DP(NETIF_MSG_TX_QUEUED,
3344 "Linearization IS REQUIRED for %s packet. num_frags %d hlen %d first_bd_sz %d\n",
3345 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
3346 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
3352 static void bnx2x_set_pbd_gso_e2(struct sk_buff *skb, u32 *parsing_data,
3355 struct ipv6hdr *ipv6;
3357 *parsing_data |= (skb_shinfo(skb)->gso_size <<
3358 ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT) &
3359 ETH_TX_PARSE_BD_E2_LSO_MSS;
3361 if (xmit_type & XMIT_GSO_ENC_V6)
3362 ipv6 = inner_ipv6_hdr(skb);
3363 else if (xmit_type & XMIT_GSO_V6)
3364 ipv6 = ipv6_hdr(skb);
3368 if (ipv6 && ipv6->nexthdr == NEXTHDR_IPV6)
3369 *parsing_data |= ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR;
3373 * bnx2x_set_pbd_gso - update PBD in GSO case.
3377 * @xmit_type: xmit flags
3379 static void bnx2x_set_pbd_gso(struct sk_buff *skb,
3380 struct eth_tx_parse_bd_e1x *pbd,
3381 struct eth_tx_start_bd *tx_start_bd,
3384 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
3385 pbd->tcp_send_seq = bswab32(tcp_hdr(skb)->seq);
3386 pbd->tcp_flags = pbd_tcp_flags(tcp_hdr(skb));
3388 if (xmit_type & XMIT_GSO_V4) {
3389 pbd->ip_id = bswab16(ip_hdr(skb)->id);
3390 pbd->tcp_pseudo_csum =
3391 bswab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
3393 0, IPPROTO_TCP, 0));
3395 /* GSO on 57710/57711 needs FW to calculate IP checksum */
3396 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IP_CSUM;
3398 pbd->tcp_pseudo_csum =
3399 bswab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
3400 &ipv6_hdr(skb)->daddr,
3401 0, IPPROTO_TCP, 0));
3405 cpu_to_le16(ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN);
3409 * bnx2x_set_pbd_csum_enc - update PBD with checksum and return header length
3411 * @bp: driver handle
3413 * @parsing_data: data to be updated
3414 * @xmit_type: xmit flags
3416 * 57712/578xx related, when skb has encapsulation
3418 static u8 bnx2x_set_pbd_csum_enc(struct bnx2x *bp, struct sk_buff *skb,
3419 u32 *parsing_data, u32 xmit_type)
3422 ((((u8 *)skb_inner_transport_header(skb) - skb->data) >> 1) <<
3423 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W_SHIFT) &
3424 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W;
3426 if (xmit_type & XMIT_CSUM_TCP) {
3427 *parsing_data |= ((inner_tcp_hdrlen(skb) / 4) <<
3428 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
3429 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW;
3431 return skb_inner_transport_header(skb) +
3432 inner_tcp_hdrlen(skb) - skb->data;
3435 /* We support checksum offload for TCP and UDP only.
3436 * No need to pass the UDP header length - it's a constant.
3438 return skb_inner_transport_header(skb) +
3439 sizeof(struct udphdr) - skb->data;
3443 * bnx2x_set_pbd_csum_e2 - update PBD with checksum and return header length
3445 * @bp: driver handle
3447 * @parsing_data: data to be updated
3448 * @xmit_type: xmit flags
3450 * 57712/578xx related
3452 static u8 bnx2x_set_pbd_csum_e2(struct bnx2x *bp, struct sk_buff *skb,
3453 u32 *parsing_data, u32 xmit_type)
3456 ((((u8 *)skb_transport_header(skb) - skb->data) >> 1) <<
3457 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W_SHIFT) &
3458 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W;
3460 if (xmit_type & XMIT_CSUM_TCP) {
3461 *parsing_data |= ((tcp_hdrlen(skb) / 4) <<
3462 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
3463 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW;
3465 return skb_transport_header(skb) + tcp_hdrlen(skb) - skb->data;
3467 /* We support checksum offload for TCP and UDP only.
3468 * No need to pass the UDP header length - it's a constant.
3470 return skb_transport_header(skb) + sizeof(struct udphdr) - skb->data;
3473 /* set FW indication according to inner or outer protocols if tunneled */
3474 static void bnx2x_set_sbd_csum(struct bnx2x *bp, struct sk_buff *skb,
3475 struct eth_tx_start_bd *tx_start_bd,
3478 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
3480 if (xmit_type & (XMIT_CSUM_ENC_V6 | XMIT_CSUM_V6))
3481 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IPV6;
3483 if (!(xmit_type & XMIT_CSUM_TCP))
3484 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IS_UDP;
3488 * bnx2x_set_pbd_csum - update PBD with checksum and return header length
3490 * @bp: driver handle
3492 * @pbd: parse BD to be updated
3493 * @xmit_type: xmit flags
3495 static u8 bnx2x_set_pbd_csum(struct bnx2x *bp, struct sk_buff *skb,
3496 struct eth_tx_parse_bd_e1x *pbd,
3499 u8 hlen = (skb_network_header(skb) - skb->data) >> 1;
3501 /* for now NS flag is not used in Linux */
3504 ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
3505 ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT));
3507 pbd->ip_hlen_w = (skb_transport_header(skb) -
3508 skb_network_header(skb)) >> 1;
3510 hlen += pbd->ip_hlen_w;
3512 /* We support checksum offload for TCP and UDP only */
3513 if (xmit_type & XMIT_CSUM_TCP)
3514 hlen += tcp_hdrlen(skb) / 2;
3516 hlen += sizeof(struct udphdr) / 2;
3518 pbd->total_hlen_w = cpu_to_le16(hlen);
3521 if (xmit_type & XMIT_CSUM_TCP) {
3522 pbd->tcp_pseudo_csum = bswab16(tcp_hdr(skb)->check);
3525 s8 fix = SKB_CS_OFF(skb); /* signed! */
3527 DP(NETIF_MSG_TX_QUEUED,
3528 "hlen %d fix %d csum before fix %x\n",
3529 le16_to_cpu(pbd->total_hlen_w), fix, SKB_CS(skb));
3531 /* HW bug: fixup the CSUM */
3532 pbd->tcp_pseudo_csum =
3533 bnx2x_csum_fix(skb_transport_header(skb),
3536 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
3537 pbd->tcp_pseudo_csum);
3543 static void bnx2x_update_pbds_gso_enc(struct sk_buff *skb,
3544 struct eth_tx_parse_bd_e2 *pbd_e2,
3545 struct eth_tx_parse_2nd_bd *pbd2,
3550 u8 outerip_off, outerip_len = 0;
3552 /* from outer IP to transport */
3553 hlen_w = (skb_inner_transport_header(skb) -
3554 skb_network_header(skb)) >> 1;
3557 hlen_w += inner_tcp_hdrlen(skb) >> 1;
3559 pbd2->fw_ip_hdr_to_payload_w = hlen_w;
3561 /* outer IP header info */
3562 if (xmit_type & XMIT_CSUM_V4) {
3563 struct iphdr *iph = ip_hdr(skb);
3564 u32 csum = (__force u32)(~iph->check) -
3565 (__force u32)iph->tot_len -
3566 (__force u32)iph->frag_off;
3568 pbd2->fw_ip_csum_wo_len_flags_frag =
3569 bswab16(csum_fold((__force __wsum)csum));
3571 pbd2->fw_ip_hdr_to_payload_w =
3572 hlen_w - ((sizeof(struct ipv6hdr)) >> 1);
3575 pbd2->tcp_send_seq = bswab32(inner_tcp_hdr(skb)->seq);
3577 pbd2->tcp_flags = pbd_tcp_flags(inner_tcp_hdr(skb));
3579 if (xmit_type & XMIT_GSO_V4) {
3580 pbd2->hw_ip_id = bswab16(inner_ip_hdr(skb)->id);
3582 pbd_e2->data.tunnel_data.pseudo_csum =
3583 bswab16(~csum_tcpudp_magic(
3584 inner_ip_hdr(skb)->saddr,
3585 inner_ip_hdr(skb)->daddr,
3586 0, IPPROTO_TCP, 0));
3588 outerip_len = ip_hdr(skb)->ihl << 1;
3590 pbd_e2->data.tunnel_data.pseudo_csum =
3591 bswab16(~csum_ipv6_magic(
3592 &inner_ipv6_hdr(skb)->saddr,
3593 &inner_ipv6_hdr(skb)->daddr,
3594 0, IPPROTO_TCP, 0));
3597 outerip_off = (skb_network_header(skb) - skb->data) >> 1;
3601 (!!(xmit_type & XMIT_CSUM_V6) <<
3602 ETH_TX_PARSE_2ND_BD_IP_HDR_TYPE_OUTER_SHIFT) |
3604 ETH_TX_PARSE_2ND_BD_IP_HDR_LEN_OUTER_W_SHIFT) |
3605 ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
3606 ETH_TX_PARSE_2ND_BD_LLC_SNAP_EN_SHIFT);
3608 if (ip_hdr(skb)->protocol == IPPROTO_UDP) {
3609 SET_FLAG(*global_data, ETH_TX_PARSE_2ND_BD_TUNNEL_UDP_EXIST, 1);
3610 pbd2->tunnel_udp_hdr_start_w = skb_transport_offset(skb) >> 1;
3614 /* called with netif_tx_lock
3615 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
3616 * netif_wake_queue()
3618 netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
3620 struct bnx2x *bp = netdev_priv(dev);
3622 struct netdev_queue *txq;
3623 struct bnx2x_fp_txdata *txdata;
3624 struct sw_tx_bd *tx_buf;
3625 struct eth_tx_start_bd *tx_start_bd, *first_bd;
3626 struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
3627 struct eth_tx_parse_bd_e1x *pbd_e1x = NULL;
3628 struct eth_tx_parse_bd_e2 *pbd_e2 = NULL;
3629 struct eth_tx_parse_2nd_bd *pbd2 = NULL;
3630 u32 pbd_e2_parsing_data = 0;
3631 u16 pkt_prod, bd_prod;
3634 u32 xmit_type = bnx2x_xmit_type(bp, skb);
3637 __le16 pkt_size = 0;
3639 u8 mac_type = UNICAST_ADDRESS;
3641 #ifdef BNX2X_STOP_ON_ERROR
3642 if (unlikely(bp->panic))
3643 return NETDEV_TX_BUSY;
3646 txq_index = skb_get_queue_mapping(skb);
3647 txq = netdev_get_tx_queue(dev, txq_index);
3649 BUG_ON(txq_index >= MAX_ETH_TXQ_IDX(bp) + (CNIC_LOADED(bp) ? 1 : 0));
3651 txdata = &bp->bnx2x_txq[txq_index];
3653 /* enable this debug print to view the transmission queue being used
3654 DP(NETIF_MSG_TX_QUEUED, "indices: txq %d, fp %d, txdata %d\n",
3655 txq_index, fp_index, txdata_index); */
3657 /* enable this debug print to view the transmission details
3658 DP(NETIF_MSG_TX_QUEUED,
3659 "transmitting packet cid %d fp index %d txdata_index %d tx_data ptr %p fp pointer %p\n",
3660 txdata->cid, fp_index, txdata_index, txdata, fp); */
3662 if (unlikely(bnx2x_tx_avail(bp, txdata) <
3663 skb_shinfo(skb)->nr_frags +
3665 NEXT_CNT_PER_TX_PKT(MAX_BDS_PER_TX_PKT))) {
3666 /* Handle special storage cases separately */
3667 if (txdata->tx_ring_size == 0) {
3668 struct bnx2x_eth_q_stats *q_stats =
3669 bnx2x_fp_qstats(bp, txdata->parent_fp);
3670 q_stats->driver_filtered_tx_pkt++;
3672 return NETDEV_TX_OK;
3674 bnx2x_fp_qstats(bp, txdata->parent_fp)->driver_xoff++;
3675 netif_tx_stop_queue(txq);
3676 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
3678 return NETDEV_TX_BUSY;
3681 DP(NETIF_MSG_TX_QUEUED,
3682 "queue[%d]: SKB: summed %x protocol %x protocol(%x,%x) gso type %x xmit_type %x len %d\n",
3683 txq_index, skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
3684 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type,
3687 eth = (struct ethhdr *)skb->data;
3689 /* set flag according to packet type (UNICAST_ADDRESS is default)*/
3690 if (unlikely(is_multicast_ether_addr(eth->h_dest))) {
3691 if (is_broadcast_ether_addr(eth->h_dest))
3692 mac_type = BROADCAST_ADDRESS;
3694 mac_type = MULTICAST_ADDRESS;
3697 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - BDS_PER_TX_PKT)
3698 /* First, check if we need to linearize the skb (due to FW
3699 restrictions). No need to check fragmentation if page size > 8K
3700 (there will be no violation to FW restrictions) */
3701 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
3702 /* Statistics of linearization */
3704 if (skb_linearize(skb) != 0) {
3705 DP(NETIF_MSG_TX_QUEUED,
3706 "SKB linearization failed - silently dropping this SKB\n");
3707 dev_kfree_skb_any(skb);
3708 return NETDEV_TX_OK;
3712 /* Map skb linear data for DMA */
3713 mapping = dma_map_single(&bp->pdev->dev, skb->data,
3714 skb_headlen(skb), DMA_TO_DEVICE);
3715 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
3716 DP(NETIF_MSG_TX_QUEUED,
3717 "SKB mapping failed - silently dropping this SKB\n");
3718 dev_kfree_skb_any(skb);
3719 return NETDEV_TX_OK;
3722 Please read carefully. First we use one BD which we mark as start,
3723 then we have a parsing info BD (used for TSO or xsum),
3724 and only then we have the rest of the TSO BDs.
3725 (don't forget to mark the last one as last,
3726 and to unmap only AFTER you write to the BD ...)
3727 And above all, all pdb sizes are in words - NOT DWORDS!
3730 /* get current pkt produced now - advance it just before sending packet
3731 * since mapping of pages may fail and cause packet to be dropped
3733 pkt_prod = txdata->tx_pkt_prod;
3734 bd_prod = TX_BD(txdata->tx_bd_prod);
3736 /* get a tx_buf and first BD
3737 * tx_start_bd may be changed during SPLIT,
3738 * but first_bd will always stay first
3740 tx_buf = &txdata->tx_buf_ring[TX_BD(pkt_prod)];
3741 tx_start_bd = &txdata->tx_desc_ring[bd_prod].start_bd;
3742 first_bd = tx_start_bd;
3744 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
3746 /* header nbd: indirectly zero other flags! */
3747 tx_start_bd->general_data = 1 << ETH_TX_START_BD_HDR_NBDS_SHIFT;
3749 /* remember the first BD of the packet */
3750 tx_buf->first_bd = txdata->tx_bd_prod;
3754 DP(NETIF_MSG_TX_QUEUED,
3755 "sending pkt %u @%p next_idx %u bd %u @%p\n",
3756 pkt_prod, tx_buf, txdata->tx_pkt_prod, bd_prod, tx_start_bd);
3758 if (vlan_tx_tag_present(skb)) {
3759 tx_start_bd->vlan_or_ethertype =
3760 cpu_to_le16(vlan_tx_tag_get(skb));
3761 tx_start_bd->bd_flags.as_bitfield |=
3762 (X_ETH_OUTBAND_VLAN << ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT);
3764 /* when transmitting in a vf, start bd must hold the ethertype
3765 * for fw to enforce it
3768 tx_start_bd->vlan_or_ethertype =
3769 cpu_to_le16(ntohs(eth->h_proto));
3771 /* used by FW for packet accounting */
3772 tx_start_bd->vlan_or_ethertype = cpu_to_le16(pkt_prod);
3775 nbd = 2; /* start_bd + pbd + frags (updated when pages are mapped) */
3777 /* turn on parsing and get a BD */
3778 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
3780 if (xmit_type & XMIT_CSUM)
3781 bnx2x_set_sbd_csum(bp, skb, tx_start_bd, xmit_type);
3783 if (!CHIP_IS_E1x(bp)) {
3784 pbd_e2 = &txdata->tx_desc_ring[bd_prod].parse_bd_e2;
3785 memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2));
3787 if (xmit_type & XMIT_CSUM_ENC) {
3788 u16 global_data = 0;
3790 /* Set PBD in enc checksum offload case */
3791 hlen = bnx2x_set_pbd_csum_enc(bp, skb,
3792 &pbd_e2_parsing_data,
3795 /* turn on 2nd parsing and get a BD */
3796 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
3798 pbd2 = &txdata->tx_desc_ring[bd_prod].parse_2nd_bd;
3800 memset(pbd2, 0, sizeof(*pbd2));
3802 pbd_e2->data.tunnel_data.ip_hdr_start_inner_w =
3803 (skb_inner_network_header(skb) -
3806 if (xmit_type & XMIT_GSO_ENC)
3807 bnx2x_update_pbds_gso_enc(skb, pbd_e2, pbd2,
3811 pbd2->global_data = cpu_to_le16(global_data);
3813 /* add addition parse BD indication to start BD */
3814 SET_FLAG(tx_start_bd->general_data,
3815 ETH_TX_START_BD_PARSE_NBDS, 1);
3816 /* set encapsulation flag in start BD */
3817 SET_FLAG(tx_start_bd->general_data,
3818 ETH_TX_START_BD_TUNNEL_EXIST, 1);
3820 } else if (xmit_type & XMIT_CSUM) {
3821 /* Set PBD in checksum offload case w/o encapsulation */
3822 hlen = bnx2x_set_pbd_csum_e2(bp, skb,
3823 &pbd_e2_parsing_data,
3827 /* Add the macs to the parsing BD this is a vf */
3829 /* override GRE parameters in BD */
3830 bnx2x_set_fw_mac_addr(&pbd_e2->data.mac_addr.src_hi,
3831 &pbd_e2->data.mac_addr.src_mid,
3832 &pbd_e2->data.mac_addr.src_lo,
3835 bnx2x_set_fw_mac_addr(&pbd_e2->data.mac_addr.dst_hi,
3836 &pbd_e2->data.mac_addr.dst_mid,
3837 &pbd_e2->data.mac_addr.dst_lo,
3841 SET_FLAG(pbd_e2_parsing_data,
3842 ETH_TX_PARSE_BD_E2_ETH_ADDR_TYPE, mac_type);
3844 u16 global_data = 0;
3845 pbd_e1x = &txdata->tx_desc_ring[bd_prod].parse_bd_e1x;
3846 memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x));
3847 /* Set PBD in checksum offload case */
3848 if (xmit_type & XMIT_CSUM)
3849 hlen = bnx2x_set_pbd_csum(bp, skb, pbd_e1x, xmit_type);
3851 SET_FLAG(global_data,
3852 ETH_TX_PARSE_BD_E1X_ETH_ADDR_TYPE, mac_type);
3853 pbd_e1x->global_data |= cpu_to_le16(global_data);
3856 /* Setup the data pointer of the first BD of the packet */
3857 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
3858 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
3859 tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
3860 pkt_size = tx_start_bd->nbytes;
3862 DP(NETIF_MSG_TX_QUEUED,
3863 "first bd @%p addr (%x:%x) nbytes %d flags %x vlan %x\n",
3864 tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
3865 le16_to_cpu(tx_start_bd->nbytes),
3866 tx_start_bd->bd_flags.as_bitfield,
3867 le16_to_cpu(tx_start_bd->vlan_or_ethertype));
3869 if (xmit_type & XMIT_GSO) {
3871 DP(NETIF_MSG_TX_QUEUED,
3872 "TSO packet len %d hlen %d total len %d tso size %d\n",
3873 skb->len, hlen, skb_headlen(skb),
3874 skb_shinfo(skb)->gso_size);
3876 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
3878 if (unlikely(skb_headlen(skb) > hlen)) {
3880 bd_prod = bnx2x_tx_split(bp, txdata, tx_buf,
3884 if (!CHIP_IS_E1x(bp))
3885 bnx2x_set_pbd_gso_e2(skb, &pbd_e2_parsing_data,
3888 bnx2x_set_pbd_gso(skb, pbd_e1x, first_bd, xmit_type);
3891 /* Set the PBD's parsing_data field if not zero
3892 * (for the chips newer than 57711).
3894 if (pbd_e2_parsing_data)
3895 pbd_e2->parsing_data = cpu_to_le32(pbd_e2_parsing_data);
3897 tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
3899 /* Handle fragmented skb */
3900 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
3901 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3903 mapping = skb_frag_dma_map(&bp->pdev->dev, frag, 0,
3904 skb_frag_size(frag), DMA_TO_DEVICE);
3905 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
3906 unsigned int pkts_compl = 0, bytes_compl = 0;
3908 DP(NETIF_MSG_TX_QUEUED,
3909 "Unable to map page - dropping packet...\n");
3911 /* we need unmap all buffers already mapped
3913 * first_bd->nbd need to be properly updated
3914 * before call to bnx2x_free_tx_pkt
3916 first_bd->nbd = cpu_to_le16(nbd);
3917 bnx2x_free_tx_pkt(bp, txdata,
3918 TX_BD(txdata->tx_pkt_prod),
3919 &pkts_compl, &bytes_compl);
3920 return NETDEV_TX_OK;
3923 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
3924 tx_data_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
3925 if (total_pkt_bd == NULL)
3926 total_pkt_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
3928 tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
3929 tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
3930 tx_data_bd->nbytes = cpu_to_le16(skb_frag_size(frag));
3931 le16_add_cpu(&pkt_size, skb_frag_size(frag));
3934 DP(NETIF_MSG_TX_QUEUED,
3935 "frag %d bd @%p addr (%x:%x) nbytes %d\n",
3936 i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo,
3937 le16_to_cpu(tx_data_bd->nbytes));
3940 DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd);
3942 /* update with actual num BDs */
3943 first_bd->nbd = cpu_to_le16(nbd);
3945 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
3947 /* now send a tx doorbell, counting the next BD
3948 * if the packet contains or ends with it
3950 if (TX_BD_POFF(bd_prod) < nbd)
3953 /* total_pkt_bytes should be set on the first data BD if
3954 * it's not an LSO packet and there is more than one
3955 * data BD. In this case pkt_size is limited by an MTU value.
3956 * However we prefer to set it for an LSO packet (while we don't
3957 * have to) in order to save some CPU cycles in a none-LSO
3958 * case, when we much more care about them.
3960 if (total_pkt_bd != NULL)
3961 total_pkt_bd->total_pkt_bytes = pkt_size;
3964 DP(NETIF_MSG_TX_QUEUED,
3965 "PBD (E1X) @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u tcp_flags %x xsum %x seq %u hlen %u\n",
3966 pbd_e1x, pbd_e1x->global_data, pbd_e1x->ip_hlen_w,
3967 pbd_e1x->ip_id, pbd_e1x->lso_mss, pbd_e1x->tcp_flags,
3968 pbd_e1x->tcp_pseudo_csum, pbd_e1x->tcp_send_seq,
3969 le16_to_cpu(pbd_e1x->total_hlen_w));
3971 DP(NETIF_MSG_TX_QUEUED,
3972 "PBD (E2) @%p dst %x %x %x src %x %x %x parsing_data %x\n",
3974 pbd_e2->data.mac_addr.dst_hi,
3975 pbd_e2->data.mac_addr.dst_mid,
3976 pbd_e2->data.mac_addr.dst_lo,
3977 pbd_e2->data.mac_addr.src_hi,
3978 pbd_e2->data.mac_addr.src_mid,
3979 pbd_e2->data.mac_addr.src_lo,
3980 pbd_e2->parsing_data);
3981 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
3983 netdev_tx_sent_queue(txq, skb->len);
3985 skb_tx_timestamp(skb);
3987 txdata->tx_pkt_prod++;
3989 * Make sure that the BD data is updated before updating the producer
3990 * since FW might read the BD right after the producer is updated.
3991 * This is only applicable for weak-ordered memory model archs such
3992 * as IA-64. The following barrier is also mandatory since FW will
3993 * assumes packets must have BDs.
3997 txdata->tx_db.data.prod += nbd;
4000 DOORBELL(bp, txdata->cid, txdata->tx_db.raw);
4004 txdata->tx_bd_prod += nbd;
4006 if (unlikely(bnx2x_tx_avail(bp, txdata) < MAX_DESC_PER_TX_PKT)) {
4007 netif_tx_stop_queue(txq);
4009 /* paired memory barrier is in bnx2x_tx_int(), we have to keep
4010 * ordering of set_bit() in netif_tx_stop_queue() and read of
4014 bnx2x_fp_qstats(bp, txdata->parent_fp)->driver_xoff++;
4015 if (bnx2x_tx_avail(bp, txdata) >= MAX_DESC_PER_TX_PKT)
4016 netif_tx_wake_queue(txq);
4020 return NETDEV_TX_OK;
4024 * bnx2x_setup_tc - routine to configure net_device for multi tc
4026 * @netdev: net device to configure
4027 * @tc: number of traffic classes to enable
4029 * callback connected to the ndo_setup_tc function pointer
4031 int bnx2x_setup_tc(struct net_device *dev, u8 num_tc)
4033 int cos, prio, count, offset;
4034 struct bnx2x *bp = netdev_priv(dev);
4036 /* setup tc must be called under rtnl lock */
4039 /* no traffic classes requested. Aborting */
4041 netdev_reset_tc(dev);
4045 /* requested to support too many traffic classes */
4046 if (num_tc > bp->max_cos) {
4047 BNX2X_ERR("support for too many traffic classes requested: %d. Max supported is %d\n",
4048 num_tc, bp->max_cos);
4052 /* declare amount of supported traffic classes */
4053 if (netdev_set_num_tc(dev, num_tc)) {
4054 BNX2X_ERR("failed to declare %d traffic classes\n", num_tc);
4058 /* configure priority to traffic class mapping */
4059 for (prio = 0; prio < BNX2X_MAX_PRIORITY; prio++) {
4060 netdev_set_prio_tc_map(dev, prio, bp->prio_to_cos[prio]);
4061 DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
4062 "mapping priority %d to tc %d\n",
4063 prio, bp->prio_to_cos[prio]);
4066 /* Use this configuration to differentiate tc0 from other COSes
4067 This can be used for ets or pfc, and save the effort of setting
4068 up a multio class queue disc or negotiating DCBX with a switch
4069 netdev_set_prio_tc_map(dev, 0, 0);
4070 DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n", 0, 0);
4071 for (prio = 1; prio < 16; prio++) {
4072 netdev_set_prio_tc_map(dev, prio, 1);
4073 DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n", prio, 1);
4076 /* configure traffic class to transmission queue mapping */
4077 for (cos = 0; cos < bp->max_cos; cos++) {
4078 count = BNX2X_NUM_ETH_QUEUES(bp);
4079 offset = cos * BNX2X_NUM_NON_CNIC_QUEUES(bp);
4080 netdev_set_tc_queue(dev, cos, count, offset);
4081 DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
4082 "mapping tc %d to offset %d count %d\n",
4083 cos, offset, count);
4089 /* called with rtnl_lock */
4090 int bnx2x_change_mac_addr(struct net_device *dev, void *p)
4092 struct sockaddr *addr = p;
4093 struct bnx2x *bp = netdev_priv(dev);
4096 if (!bnx2x_is_valid_ether_addr(bp, addr->sa_data)) {
4097 BNX2X_ERR("Requested MAC address is not valid\n");
4101 if ((IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp)) &&
4102 !is_zero_ether_addr(addr->sa_data)) {
4103 BNX2X_ERR("Can't configure non-zero address on iSCSI or FCoE functions in MF-SD mode\n");
4107 if (netif_running(dev)) {
4108 rc = bnx2x_set_eth_mac(bp, false);
4113 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
4115 if (netif_running(dev))
4116 rc = bnx2x_set_eth_mac(bp, true);
4121 static void bnx2x_free_fp_mem_at(struct bnx2x *bp, int fp_index)
4123 union host_hc_status_block *sb = &bnx2x_fp(bp, fp_index, status_blk);
4124 struct bnx2x_fastpath *fp = &bp->fp[fp_index];
4129 if (IS_FCOE_IDX(fp_index)) {
4130 memset(sb, 0, sizeof(union host_hc_status_block));
4131 fp->status_blk_mapping = 0;
4134 if (!CHIP_IS_E1x(bp))
4135 BNX2X_PCI_FREE(sb->e2_sb,
4136 bnx2x_fp(bp, fp_index,
4137 status_blk_mapping),
4138 sizeof(struct host_hc_status_block_e2));
4140 BNX2X_PCI_FREE(sb->e1x_sb,
4141 bnx2x_fp(bp, fp_index,
4142 status_blk_mapping),
4143 sizeof(struct host_hc_status_block_e1x));
4147 if (!skip_rx_queue(bp, fp_index)) {
4148 bnx2x_free_rx_bds(fp);
4150 /* fastpath rx rings: rx_buf rx_desc rx_comp */
4151 BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_buf_ring));
4152 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_desc_ring),
4153 bnx2x_fp(bp, fp_index, rx_desc_mapping),
4154 sizeof(struct eth_rx_bd) * NUM_RX_BD);
4156 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_comp_ring),
4157 bnx2x_fp(bp, fp_index, rx_comp_mapping),
4158 sizeof(struct eth_fast_path_rx_cqe) *
4162 BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_page_ring));
4163 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_sge_ring),
4164 bnx2x_fp(bp, fp_index, rx_sge_mapping),
4165 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
4169 if (!skip_tx_queue(bp, fp_index)) {
4170 /* fastpath tx rings: tx_buf tx_desc */
4171 for_each_cos_in_tx_queue(fp, cos) {
4172 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
4174 DP(NETIF_MSG_IFDOWN,
4175 "freeing tx memory of fp %d cos %d cid %d\n",
4176 fp_index, cos, txdata->cid);
4178 BNX2X_FREE(txdata->tx_buf_ring);
4179 BNX2X_PCI_FREE(txdata->tx_desc_ring,
4180 txdata->tx_desc_mapping,
4181 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
4184 /* end of fastpath */
4187 void bnx2x_free_fp_mem_cnic(struct bnx2x *bp)
4190 for_each_cnic_queue(bp, i)
4191 bnx2x_free_fp_mem_at(bp, i);
4194 void bnx2x_free_fp_mem(struct bnx2x *bp)
4197 for_each_eth_queue(bp, i)
4198 bnx2x_free_fp_mem_at(bp, i);
4201 static void set_sb_shortcuts(struct bnx2x *bp, int index)
4203 union host_hc_status_block status_blk = bnx2x_fp(bp, index, status_blk);
4204 if (!CHIP_IS_E1x(bp)) {
4205 bnx2x_fp(bp, index, sb_index_values) =
4206 (__le16 *)status_blk.e2_sb->sb.index_values;
4207 bnx2x_fp(bp, index, sb_running_index) =
4208 (__le16 *)status_blk.e2_sb->sb.running_index;
4210 bnx2x_fp(bp, index, sb_index_values) =
4211 (__le16 *)status_blk.e1x_sb->sb.index_values;
4212 bnx2x_fp(bp, index, sb_running_index) =
4213 (__le16 *)status_blk.e1x_sb->sb.running_index;
4217 /* Returns the number of actually allocated BDs */
4218 static int bnx2x_alloc_rx_bds(struct bnx2x_fastpath *fp,
4221 struct bnx2x *bp = fp->bp;
4222 u16 ring_prod, cqe_ring_prod;
4223 int i, failure_cnt = 0;
4225 fp->rx_comp_cons = 0;
4226 cqe_ring_prod = ring_prod = 0;
4228 /* This routine is called only during fo init so
4229 * fp->eth_q_stats.rx_skb_alloc_failed = 0
4231 for (i = 0; i < rx_ring_size; i++) {
4232 if (bnx2x_alloc_rx_data(bp, fp, ring_prod, GFP_KERNEL) < 0) {
4236 ring_prod = NEXT_RX_IDX(ring_prod);
4237 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
4238 WARN_ON(ring_prod <= (i - failure_cnt));
4242 BNX2X_ERR("was only able to allocate %d rx skbs on queue[%d]\n",
4243 i - failure_cnt, fp->index);
4245 fp->rx_bd_prod = ring_prod;
4246 /* Limit the CQE producer by the CQE ring size */
4247 fp->rx_comp_prod = min_t(u16, NUM_RCQ_RINGS*RCQ_DESC_CNT,
4249 fp->rx_pkt = fp->rx_calls = 0;
4251 bnx2x_fp_stats(bp, fp)->eth_q_stats.rx_skb_alloc_failed += failure_cnt;
4253 return i - failure_cnt;
4256 static void bnx2x_set_next_page_rx_cq(struct bnx2x_fastpath *fp)
4260 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
4261 struct eth_rx_cqe_next_page *nextpg;
4263 nextpg = (struct eth_rx_cqe_next_page *)
4264 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
4266 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
4267 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4269 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
4270 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4274 static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index)
4276 union host_hc_status_block *sb;
4277 struct bnx2x_fastpath *fp = &bp->fp[index];
4280 int rx_ring_size = 0;
4282 if (!bp->rx_ring_size &&
4283 (IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp))) {
4284 rx_ring_size = MIN_RX_SIZE_NONTPA;
4285 bp->rx_ring_size = rx_ring_size;
4286 } else if (!bp->rx_ring_size) {
4287 rx_ring_size = MAX_RX_AVAIL/BNX2X_NUM_RX_QUEUES(bp);
4289 if (CHIP_IS_E3(bp)) {
4290 u32 cfg = SHMEM_RD(bp,
4291 dev_info.port_hw_config[BP_PORT(bp)].
4294 /* Decrease ring size for 1G functions */
4295 if ((cfg & PORT_HW_CFG_NET_SERDES_IF_MASK) ==
4296 PORT_HW_CFG_NET_SERDES_IF_SGMII)
4300 /* allocate at least number of buffers required by FW */
4301 rx_ring_size = max_t(int, bp->disable_tpa ? MIN_RX_SIZE_NONTPA :
4302 MIN_RX_SIZE_TPA, rx_ring_size);
4304 bp->rx_ring_size = rx_ring_size;
4305 } else /* if rx_ring_size specified - use it */
4306 rx_ring_size = bp->rx_ring_size;
4308 DP(BNX2X_MSG_SP, "calculated rx_ring_size %d\n", rx_ring_size);
4311 sb = &bnx2x_fp(bp, index, status_blk);
4313 if (!IS_FCOE_IDX(index)) {
4315 if (!CHIP_IS_E1x(bp))
4316 BNX2X_PCI_ALLOC(sb->e2_sb,
4317 &bnx2x_fp(bp, index, status_blk_mapping),
4318 sizeof(struct host_hc_status_block_e2));
4320 BNX2X_PCI_ALLOC(sb->e1x_sb,
4321 &bnx2x_fp(bp, index, status_blk_mapping),
4322 sizeof(struct host_hc_status_block_e1x));
4325 /* FCoE Queue uses Default SB and doesn't ACK the SB, thus no need to
4326 * set shortcuts for it.
4328 if (!IS_FCOE_IDX(index))
4329 set_sb_shortcuts(bp, index);
4332 if (!skip_tx_queue(bp, index)) {
4333 /* fastpath tx rings: tx_buf tx_desc */
4334 for_each_cos_in_tx_queue(fp, cos) {
4335 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
4338 "allocating tx memory of fp %d cos %d\n",
4341 BNX2X_ALLOC(txdata->tx_buf_ring,
4342 sizeof(struct sw_tx_bd) * NUM_TX_BD);
4343 BNX2X_PCI_ALLOC(txdata->tx_desc_ring,
4344 &txdata->tx_desc_mapping,
4345 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
4350 if (!skip_rx_queue(bp, index)) {
4351 /* fastpath rx rings: rx_buf rx_desc rx_comp */
4352 BNX2X_ALLOC(bnx2x_fp(bp, index, rx_buf_ring),
4353 sizeof(struct sw_rx_bd) * NUM_RX_BD);
4354 BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_desc_ring),
4355 &bnx2x_fp(bp, index, rx_desc_mapping),
4356 sizeof(struct eth_rx_bd) * NUM_RX_BD);
4358 /* Seed all CQEs by 1s */
4359 BNX2X_PCI_FALLOC(bnx2x_fp(bp, index, rx_comp_ring),
4360 &bnx2x_fp(bp, index, rx_comp_mapping),
4361 sizeof(struct eth_fast_path_rx_cqe) *
4365 BNX2X_ALLOC(bnx2x_fp(bp, index, rx_page_ring),
4366 sizeof(struct sw_rx_page) * NUM_RX_SGE);
4367 BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_sge_ring),
4368 &bnx2x_fp(bp, index, rx_sge_mapping),
4369 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
4371 bnx2x_set_next_page_rx_bd(fp);
4374 bnx2x_set_next_page_rx_cq(fp);
4377 ring_size = bnx2x_alloc_rx_bds(fp, rx_ring_size);
4378 if (ring_size < rx_ring_size)
4384 /* handles low memory cases */
4386 BNX2X_ERR("Unable to allocate full memory for queue %d (size %d)\n",
4388 /* FW will drop all packets if queue is not big enough,
4389 * In these cases we disable the queue
4390 * Min size is different for OOO, TPA and non-TPA queues
4392 if (ring_size < (fp->disable_tpa ?
4393 MIN_RX_SIZE_NONTPA : MIN_RX_SIZE_TPA)) {
4394 /* release memory allocated for this queue */
4395 bnx2x_free_fp_mem_at(bp, index);
4401 int bnx2x_alloc_fp_mem_cnic(struct bnx2x *bp)
4405 if (bnx2x_alloc_fp_mem_at(bp, FCOE_IDX(bp)))
4406 /* we will fail load process instead of mark
4414 int bnx2x_alloc_fp_mem(struct bnx2x *bp)
4418 /* 1. Allocate FP for leading - fatal if error
4419 * 2. Allocate RSS - fix number of queues if error
4423 if (bnx2x_alloc_fp_mem_at(bp, 0))
4427 for_each_nondefault_eth_queue(bp, i)
4428 if (bnx2x_alloc_fp_mem_at(bp, i))
4431 /* handle memory failures */
4432 if (i != BNX2X_NUM_ETH_QUEUES(bp)) {
4433 int delta = BNX2X_NUM_ETH_QUEUES(bp) - i;
4436 bnx2x_shrink_eth_fp(bp, delta);
4437 if (CNIC_SUPPORT(bp))
4438 /* move non eth FPs next to last eth FP
4439 * must be done in that order
4440 * FCOE_IDX < FWD_IDX < OOO_IDX
4443 /* move FCoE fp even NO_FCOE_FLAG is on */
4444 bnx2x_move_fp(bp, FCOE_IDX(bp), FCOE_IDX(bp) - delta);
4445 bp->num_ethernet_queues -= delta;
4446 bp->num_queues = bp->num_ethernet_queues +
4447 bp->num_cnic_queues;
4448 BNX2X_ERR("Adjusted num of queues from %d to %d\n",
4449 bp->num_queues + delta, bp->num_queues);
4455 void bnx2x_free_mem_bp(struct bnx2x *bp)
4459 for (i = 0; i < bp->fp_array_size; i++)
4460 kfree(bp->fp[i].tpa_info);
4463 kfree(bp->fp_stats);
4464 kfree(bp->bnx2x_txq);
4465 kfree(bp->msix_table);
4469 int bnx2x_alloc_mem_bp(struct bnx2x *bp)
4471 struct bnx2x_fastpath *fp;
4472 struct msix_entry *tbl;
4473 struct bnx2x_ilt *ilt;
4474 int msix_table_size = 0;
4475 int fp_array_size, txq_array_size;
4479 * The biggest MSI-X table we might need is as a maximum number of fast
4480 * path IGU SBs plus default SB (for PF only).
4482 msix_table_size = bp->igu_sb_cnt;
4485 BNX2X_DEV_INFO("msix_table_size %d\n", msix_table_size);
4487 /* fp array: RSS plus CNIC related L2 queues */
4488 fp_array_size = BNX2X_MAX_RSS_COUNT(bp) + CNIC_SUPPORT(bp);
4489 bp->fp_array_size = fp_array_size;
4490 BNX2X_DEV_INFO("fp_array_size %d\n", bp->fp_array_size);
4492 fp = kcalloc(bp->fp_array_size, sizeof(*fp), GFP_KERNEL);
4495 for (i = 0; i < bp->fp_array_size; i++) {
4497 kcalloc(ETH_MAX_AGGREGATION_QUEUES_E1H_E2,
4498 sizeof(struct bnx2x_agg_info), GFP_KERNEL);
4499 if (!(fp[i].tpa_info))
4505 /* allocate sp objs */
4506 bp->sp_objs = kcalloc(bp->fp_array_size, sizeof(struct bnx2x_sp_objs),
4511 /* allocate fp_stats */
4512 bp->fp_stats = kcalloc(bp->fp_array_size, sizeof(struct bnx2x_fp_stats),
4517 /* Allocate memory for the transmission queues array */
4519 BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS + CNIC_SUPPORT(bp);
4520 BNX2X_DEV_INFO("txq_array_size %d", txq_array_size);
4522 bp->bnx2x_txq = kcalloc(txq_array_size, sizeof(struct bnx2x_fp_txdata),
4528 tbl = kcalloc(msix_table_size, sizeof(*tbl), GFP_KERNEL);
4531 bp->msix_table = tbl;
4534 ilt = kzalloc(sizeof(*ilt), GFP_KERNEL);
4541 bnx2x_free_mem_bp(bp);
4545 int bnx2x_reload_if_running(struct net_device *dev)
4547 struct bnx2x *bp = netdev_priv(dev);
4549 if (unlikely(!netif_running(dev)))
4552 bnx2x_nic_unload(bp, UNLOAD_NORMAL, true);
4553 return bnx2x_nic_load(bp, LOAD_NORMAL);
4556 int bnx2x_get_cur_phy_idx(struct bnx2x *bp)
4558 u32 sel_phy_idx = 0;
4559 if (bp->link_params.num_phys <= 1)
4562 if (bp->link_vars.link_up) {
4563 sel_phy_idx = EXT_PHY1;
4564 /* In case link is SERDES, check if the EXT_PHY2 is the one */
4565 if ((bp->link_vars.link_status & LINK_STATUS_SERDES_LINK) &&
4566 (bp->link_params.phy[EXT_PHY2].supported & SUPPORTED_FIBRE))
4567 sel_phy_idx = EXT_PHY2;
4570 switch (bnx2x_phy_selection(&bp->link_params)) {
4571 case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT:
4572 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY:
4573 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY:
4574 sel_phy_idx = EXT_PHY1;
4576 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY:
4577 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY:
4578 sel_phy_idx = EXT_PHY2;
4585 int bnx2x_get_link_cfg_idx(struct bnx2x *bp)
4587 u32 sel_phy_idx = bnx2x_get_cur_phy_idx(bp);
4589 * The selected activated PHY is always after swapping (in case PHY
4590 * swapping is enabled). So when swapping is enabled, we need to reverse
4594 if (bp->link_params.multi_phy_config &
4595 PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
4596 if (sel_phy_idx == EXT_PHY1)
4597 sel_phy_idx = EXT_PHY2;
4598 else if (sel_phy_idx == EXT_PHY2)
4599 sel_phy_idx = EXT_PHY1;
4601 return LINK_CONFIG_IDX(sel_phy_idx);
4604 #ifdef NETDEV_FCOE_WWNN
4605 int bnx2x_fcoe_get_wwn(struct net_device *dev, u64 *wwn, int type)
4607 struct bnx2x *bp = netdev_priv(dev);
4608 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
4611 case NETDEV_FCOE_WWNN:
4612 *wwn = HILO_U64(cp->fcoe_wwn_node_name_hi,
4613 cp->fcoe_wwn_node_name_lo);
4615 case NETDEV_FCOE_WWPN:
4616 *wwn = HILO_U64(cp->fcoe_wwn_port_name_hi,
4617 cp->fcoe_wwn_port_name_lo);
4620 BNX2X_ERR("Wrong WWN type requested - %d\n", type);
4628 /* called with rtnl_lock */
4629 int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
4631 struct bnx2x *bp = netdev_priv(dev);
4633 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
4634 BNX2X_ERR("Can't perform change MTU during parity recovery\n");
4638 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
4639 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE)) {
4640 BNX2X_ERR("Can't support requested MTU size\n");
4644 /* This does not race with packet allocation
4645 * because the actual alloc size is
4646 * only updated as part of load
4650 return bnx2x_reload_if_running(dev);
4653 netdev_features_t bnx2x_fix_features(struct net_device *dev,
4654 netdev_features_t features)
4656 struct bnx2x *bp = netdev_priv(dev);
4658 /* TPA requires Rx CSUM offloading */
4659 if (!(features & NETIF_F_RXCSUM) || bp->disable_tpa) {
4660 features &= ~NETIF_F_LRO;
4661 features &= ~NETIF_F_GRO;
4667 int bnx2x_set_features(struct net_device *dev, netdev_features_t features)
4669 struct bnx2x *bp = netdev_priv(dev);
4670 u32 flags = bp->flags;
4672 bool bnx2x_reload = false;
4674 if (features & NETIF_F_LRO)
4675 flags |= TPA_ENABLE_FLAG;
4677 flags &= ~TPA_ENABLE_FLAG;
4679 if (features & NETIF_F_GRO)
4680 flags |= GRO_ENABLE_FLAG;
4682 flags &= ~GRO_ENABLE_FLAG;
4684 if (features & NETIF_F_LOOPBACK) {
4685 if (bp->link_params.loopback_mode != LOOPBACK_BMAC) {
4686 bp->link_params.loopback_mode = LOOPBACK_BMAC;
4687 bnx2x_reload = true;
4690 if (bp->link_params.loopback_mode != LOOPBACK_NONE) {
4691 bp->link_params.loopback_mode = LOOPBACK_NONE;
4692 bnx2x_reload = true;
4696 changes = flags ^ bp->flags;
4698 /* if GRO is changed while LRO is enabled, don't force a reload */
4699 if ((changes & GRO_ENABLE_FLAG) && (flags & TPA_ENABLE_FLAG))
4700 changes &= ~GRO_ENABLE_FLAG;
4703 bnx2x_reload = true;
4708 if (bp->recovery_state == BNX2X_RECOVERY_DONE)
4709 return bnx2x_reload_if_running(dev);
4710 /* else: bnx2x_nic_load() will be called at end of recovery */
4716 void bnx2x_tx_timeout(struct net_device *dev)
4718 struct bnx2x *bp = netdev_priv(dev);
4720 #ifdef BNX2X_STOP_ON_ERROR
4725 smp_mb__before_clear_bit();
4726 set_bit(BNX2X_SP_RTNL_TX_TIMEOUT, &bp->sp_rtnl_state);
4727 smp_mb__after_clear_bit();
4729 /* This allows the netif to be shutdown gracefully before resetting */
4730 schedule_delayed_work(&bp->sp_rtnl_task, 0);
4733 int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
4735 struct net_device *dev = pci_get_drvdata(pdev);
4739 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
4742 bp = netdev_priv(dev);
4746 pci_save_state(pdev);
4748 if (!netif_running(dev)) {
4753 netif_device_detach(dev);
4755 bnx2x_nic_unload(bp, UNLOAD_CLOSE, false);
4757 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
4764 int bnx2x_resume(struct pci_dev *pdev)
4766 struct net_device *dev = pci_get_drvdata(pdev);
4771 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
4774 bp = netdev_priv(dev);
4776 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
4777 BNX2X_ERR("Handling parity error recovery. Try again later\n");
4783 pci_restore_state(pdev);
4785 if (!netif_running(dev)) {
4790 bnx2x_set_power_state(bp, PCI_D0);
4791 netif_device_attach(dev);
4793 rc = bnx2x_nic_load(bp, LOAD_OPEN);
4800 void bnx2x_set_ctx_validation(struct bnx2x *bp, struct eth_context *cxt,
4804 BNX2X_ERR("bad context pointer %p\n", cxt);
4808 /* ustorm cxt validation */
4809 cxt->ustorm_ag_context.cdu_usage =
4810 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid),
4811 CDU_REGION_NUMBER_UCM_AG, ETH_CONNECTION_TYPE);
4812 /* xcontext validation */
4813 cxt->xstorm_ag_context.cdu_reserved =
4814 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid),
4815 CDU_REGION_NUMBER_XCM_AG, ETH_CONNECTION_TYPE);
4818 static void storm_memset_hc_timeout(struct bnx2x *bp, u8 port,
4819 u8 fw_sb_id, u8 sb_index,
4822 u32 addr = BAR_CSTRORM_INTMEM +
4823 CSTORM_STATUS_BLOCK_DATA_TIMEOUT_OFFSET(fw_sb_id, sb_index);
4824 REG_WR8(bp, addr, ticks);
4826 "port %x fw_sb_id %d sb_index %d ticks %d\n",
4827 port, fw_sb_id, sb_index, ticks);
4830 static void storm_memset_hc_disable(struct bnx2x *bp, u8 port,
4831 u16 fw_sb_id, u8 sb_index,
4834 u32 enable_flag = disable ? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT);
4835 u32 addr = BAR_CSTRORM_INTMEM +
4836 CSTORM_STATUS_BLOCK_DATA_FLAGS_OFFSET(fw_sb_id, sb_index);
4837 u8 flags = REG_RD8(bp, addr);
4839 flags &= ~HC_INDEX_DATA_HC_ENABLED;
4840 flags |= enable_flag;
4841 REG_WR8(bp, addr, flags);
4843 "port %x fw_sb_id %d sb_index %d disable %d\n",
4844 port, fw_sb_id, sb_index, disable);
4847 void bnx2x_update_coalesce_sb_index(struct bnx2x *bp, u8 fw_sb_id,
4848 u8 sb_index, u8 disable, u16 usec)
4850 int port = BP_PORT(bp);
4851 u8 ticks = usec / BNX2X_BTR;
4853 storm_memset_hc_timeout(bp, port, fw_sb_id, sb_index, ticks);
4855 disable = disable ? 1 : (usec ? 0 : 1);
4856 storm_memset_hc_disable(bp, port, fw_sb_id, sb_index, disable);